summaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/9pfs/9p-handle.c9
-rw-r--r--hw/9pfs/9p-local-maru.c11
-rw-r--r--hw/9pfs/9p-local.c7
-rw-r--r--hw/9pfs/9p-maru.c308
-rw-r--r--hw/9pfs/9p-proxy.c88
-rw-r--r--hw/9pfs/9p-synth.h10
-rw-r--r--hw/9pfs/9p.c302
-rw-r--r--hw/9pfs/9p.h23
-rw-r--r--hw/9pfs/codir.c17
-rw-r--r--hw/9pfs/cofile.c32
-rw-r--r--hw/9pfs/cofs.c43
-rw-r--r--hw/9pfs/coth.h95
-rw-r--r--hw/9pfs/coxattr.c19
-rw-r--r--hw/9pfs/trace-events2
-rw-r--r--hw/9pfs/virtio-9p-device.c53
-rw-r--r--hw/9pfs/virtio-9p.h2
-rw-r--r--hw/Makefile.objs1
-rw-r--r--hw/acpi/Makefile.objs2
-rw-r--r--hw/acpi/aml-build.c2
-rw-r--r--hw/acpi/cpu.c17
-rw-r--r--hw/acpi/cpu_hotplug.c17
-rw-r--r--hw/acpi/ich9.c8
-rw-r--r--hw/acpi/ipmi.c1
-rw-r--r--hw/acpi/nvdimm.c526
-rw-r--r--hw/acpi/piix4.c7
-rw-r--r--hw/adc/Makefile.objs1
-rw-r--r--hw/adc/stm32f2xx_adc.c306
-rw-r--r--hw/alpha/dp264.c2
-rw-r--r--hw/alpha/typhoon.c2
-rw-r--r--hw/arm/Makefile.objs2
-rw-r--r--hw/arm/aspeed.c197
-rw-r--r--hw/arm/aspeed_soc.c276
-rw-r--r--hw/arm/ast2400.c214
-rw-r--r--hw/arm/boot.c21
-rw-r--r--hw/arm/cubieboard.c1
-rw-r--r--hw/arm/fsl-imx25.c2
-rw-r--r--hw/arm/fsl-imx31.c2
-rw-r--r--hw/arm/fsl-imx6.c2
-rw-r--r--hw/arm/integratorcp.c35
-rw-r--r--hw/arm/mainstone.c5
-rw-r--r--hw/arm/musicpal.c90
-rw-r--r--hw/arm/nseries.c3
-rw-r--r--hw/arm/omap2.c18
-rw-r--r--hw/arm/palmetto-bmc.c102
-rw-r--r--hw/arm/pxa2xx.c27
-rw-r--r--hw/arm/pxa2xx_gpio.c25
-rw-r--r--hw/arm/spitz.c13
-rw-r--r--hw/arm/stm32f205_soc.c92
-rw-r--r--hw/arm/strongarm.c39
-rw-r--r--hw/arm/sysbus-fdt.c4
-rw-r--r--hw/arm/tosa.c12
-rw-r--r--hw/arm/versatilepb.c9
-rw-r--r--hw/arm/virt-acpi-build.c93
-rw-r--r--hw/arm/virt.c103
-rw-r--r--hw/arm/xlnx-zynqmp.c2
-rw-r--r--hw/audio/gus.c9
-rw-r--r--hw/audio/intel-hda.c3
-rw-r--r--hw/audio/pcspk.c27
-rw-r--r--hw/audio/sb16.c4
-rw-r--r--hw/block/dataplane/virtio-blk.c77
-rw-r--r--hw/block/dataplane/virtio-blk.h6
-rw-r--r--hw/block/fdc.c275
-rw-r--r--hw/block/m25p80.c29
-rw-r--r--hw/block/nvme.c10
-rw-r--r--hw/block/virtio-blk.c91
-rw-r--r--hw/block/xen_disk.c274
-rw-r--r--hw/bt/hci-csr.c15
-rw-r--r--hw/bt/hci.c2
-rw-r--r--hw/char/bcm2835_aux.c16
-rw-r--r--hw/char/cadence_uart.c48
-rw-r--r--hw/char/debugcon.c10
-rw-r--r--hw/char/digic-uart.c11
-rw-r--r--hw/char/escc.c24
-rw-r--r--hw/char/etraxfs_ser.c14
-rw-r--r--hw/char/exynos4210_uart.c17
-rw-r--r--hw/char/grlib_apbuart.c19
-rw-r--r--hw/char/imx_serial.c27
-rw-r--r--hw/char/ipoctal232.c21
-rw-r--r--hw/char/lm32_juart.c13
-rw-r--r--hw/char/lm32_uart.c15
-rw-r--r--hw/char/mcf_uart.c18
-rw-r--r--hw/char/milkymist-uart.c13
-rw-r--r--hw/char/omap_uart.c4
-rw-r--r--hw/char/parallel.c51
-rw-r--r--hw/char/pl011.c88
-rw-r--r--hw/char/sclpconsole-lm.c34
-rw-r--r--hw/char/sclpconsole.c14
-rw-r--r--hw/char/serial-isa.c7
-rw-r--r--hw/char/serial.c39
-rw-r--r--hw/char/sh_serial.c18
-rw-r--r--hw/char/spapr_vty.c28
-rw-r--r--hw/char/stm32f2xx_usart.c20
-rw-r--r--hw/char/trace-events9
-rw-r--r--hw/char/virtio-console.c50
-rw-r--r--hw/char/virtio-serial-bus.c79
-rw-r--r--hw/char/xen_console.c73
-rw-r--r--hw/char/xilinx_uartlite.c14
-rw-r--r--hw/core/Makefile.objs3
-rw-r--r--hw/core/bus.c21
-rw-r--r--hw/core/generic-loader.c216
-rw-r--r--hw/core/loader.c93
-rw-r--r--hw/core/machine.c209
-rw-r--r--hw/core/or-irq.c107
-rw-r--r--hw/core/platform-bus.c8
-rw-r--r--hw/core/ptimer.c144
-rw-r--r--hw/core/qdev-properties-system.c76
-rw-r--r--hw/core/qdev-properties.c14
-rw-r--r--hw/display/cirrus_vga.c14
-rw-r--r--hw/display/milkymist-tmu2.c2
-rw-r--r--hw/display/pl110.c8
-rw-r--r--hw/display/qxl.c37
-rw-r--r--hw/display/ssd0323.c102
-rw-r--r--hw/display/vga-isa.c8
-rw-r--r--hw/display/virtio-gpu-3d.c1
-rw-r--r--hw/display/virtio-gpu-pci.c1
-rw-r--r--hw/display/virtio-gpu.c41
-rw-r--r--hw/display/virtio-vga.c15
-rw-r--r--hw/display/vmware_vga.c12
-rw-r--r--hw/display/xenfb.c127
-rw-r--r--hw/dma/i8257.c8
-rw-r--r--hw/dma/omap_dma.c2
-rw-r--r--hw/dma/pl080.c2
-rw-r--r--hw/dma/rc4030.c81
-rw-r--r--hw/dma/xilinx_axidma.c10
-rw-r--r--hw/gpio/imx_gpio.c2
-rw-r--r--hw/i2c/bitbang_i2c.c19
-rw-r--r--hw/i2c/core.c39
-rw-r--r--hw/i2c/smbus.c12
-rw-r--r--hw/i386/Makefile.objs1
-rw-r--r--hw/i386/acpi-build.c214
-rw-r--r--hw/i386/amd_iommu.c1212
-rw-r--r--hw/i386/amd_iommu.h289
-rw-r--r--hw/i386/intel_iommu.c119
-rw-r--r--hw/i386/intel_iommu_internal.h2
-rw-r--r--hw/i386/kvm/apic.c66
-rw-r--r--hw/i386/kvm/i8259.c2
-rw-r--r--hw/i386/kvm/pci-assign.c4
-rw-r--r--hw/i386/kvmvapic.c20
-rw-r--r--hw/i386/pc.c185
-rw-r--r--hw/i386/pc_piix.c33
-rw-r--r--hw/i386/pc_q35.c30
-rw-r--r--hw/i386/trace-events33
-rw-r--r--hw/i386/x86-iommu.c6
-rw-r--r--hw/i386/xen/xen_apic.c6
-rw-r--r--hw/i386/xen/xen_platform.c33
-rw-r--r--hw/ide/ahci.c3
-rw-r--r--hw/ide/atapi.c51
-rw-r--r--hw/ide/core.c16
-rw-r--r--hw/ide/macio.c213
-rw-r--r--hw/ide/piix.c4
-rw-r--r--hw/ide/qdev.c31
-rw-r--r--hw/input/adb.c255
-rw-r--r--hw/input/hid.c4
-rw-r--r--hw/input/pckbd.c4
-rw-r--r--hw/input/ps2.c612
-rw-r--r--hw/input/tsc2005.c190
-rw-r--r--hw/input/tsc210x.c227
-rw-r--r--hw/input/virtio-input.c21
-rw-r--r--hw/intc/Makefile.objs3
-rw-r--r--hw/intc/apic.c34
-rw-r--r--hw/intc/apic_common.c53
-rw-r--r--hw/intc/arm_gic.c22
-rw-r--r--hw/intc/arm_gic_kvm.c26
-rw-r--r--hw/intc/arm_gicv3_cpuif.c23
-rw-r--r--hw/intc/arm_gicv3_its_common.c148
-rw-r--r--hw/intc/arm_gicv3_its_kvm.c121
-rw-r--r--hw/intc/arm_gicv3_kvm.c13
-rw-r--r--hw/intc/i8259.c73
-rw-r--r--hw/intc/i8259_common.c5
-rw-r--r--hw/intc/intc.c41
-rw-r--r--hw/intc/ioapic.c2
-rw-r--r--hw/intc/lm32_pic.c63
-rw-r--r--hw/intc/s390_flic_kvm.c42
-rw-r--r--hw/intc/slavio_intctl.c67
-rw-r--r--hw/intc/trace-events29
-rw-r--r--hw/intc/xics.c333
-rw-r--r--hw/intc/xics_kvm.c70
-rw-r--r--hw/intc/xics_spapr.c169
-rw-r--r--hw/ipmi/Makefile.objs2
-rw-r--r--hw/ipmi/ipmi.c10
-rw-r--r--hw/ipmi/ipmi_bmc_extern.c36
-rw-r--r--hw/ipmi/ipmi_bmc_sim.c9
-rw-r--r--hw/ipmi/isa_ipmi_kcs.c6
-rw-r--r--hw/isa/isa-bus.c14
-rw-r--r--hw/isa/pc87312.c4
-rw-r--r--hw/lm32/lm32_hwsetup.h2
-rw-r--r--hw/m68k/mcf5206.c2
-rw-r--r--hw/m68k/mcf5208.c4
-rw-r--r--hw/mem/nvdimm.c4
-rw-r--r--hw/mem/trace-events5
-rw-r--r--hw/microblaze/boot.c1
-rw-r--r--hw/mips/mips_fulong2e.c2
-rw-r--r--hw/mips/mips_malta.c45
-rw-r--r--hw/mips/mips_r4k.c2
-rw-r--r--hw/misc/Makefile.objs2
-rw-r--r--hw/misc/aspeed_scu.c45
-rw-r--r--hw/misc/aspeed_sdmc.c280
-rw-r--r--hw/misc/edu.c18
-rw-r--r--hw/misc/imx25_ccm.c2
-rw-r--r--hw/misc/imx31_ccm.c2
-rw-r--r--hw/misc/imx6_ccm.c4
-rw-r--r--hw/misc/imx6_src.c2
-rw-r--r--hw/misc/ivshmem.c28
-rw-r--r--hw/misc/macio/macio.c26
-rw-r--r--hw/misc/milkymist-pfpu.c2
-rw-r--r--hw/net/cadence_gem.c557
-rw-r--r--hw/net/e1000e.c2
-rw-r--r--hw/net/e1000e_core.c42
-rw-r--r--hw/net/e1000e_core.h3
-rw-r--r--hw/net/eepro100.c1
-rw-r--r--hw/net/fsl_etsec/etsec.c6
-rw-r--r--hw/net/fsl_etsec/rings.c8
-rw-r--r--hw/net/imx_fec.c2
-rw-r--r--hw/net/lan9118.c2
-rw-r--r--hw/net/mcf_fec.c9
-rw-r--r--hw/net/pcnet.c133
-rw-r--r--hw/net/rocker/rocker.c2
-rw-r--r--hw/net/rtl8139.c2
-rw-r--r--hw/net/spapr_llan.c91
-rw-r--r--hw/net/trace-events18
-rw-r--r--hw/net/virtio-net.c133
-rw-r--r--hw/net/vmxnet3.c1
-rw-r--r--hw/net/xen_nic.c36
-rw-r--r--hw/nvram/Makefile.objs1
-rw-r--r--hw/nvram/chrp_nvram.c85
-rw-r--r--hw/nvram/fw_cfg.c8
-rw-r--r--hw/nvram/mac_nvram.c49
-rw-r--r--hw/nvram/spapr_nvram.c6
-rw-r--r--hw/pci-bridge/pci_expander_bridge.c1
-rw-r--r--hw/pci-host/uninorth.c4
-rw-r--r--hw/ppc/Makefile.objs6
-rw-r--r--hw/ppc/e500.c4
-rw-r--r--hw/ppc/fdt.c49
-rw-r--r--hw/ppc/mac_newworld.c1
-rw-r--r--hw/ppc/mac_oldworld.c1
-rw-r--r--hw/ppc/pnv.c821
-rw-r--r--hw/ppc/pnv_core.c233
-rw-r--r--hw/ppc/pnv_lpc.c472
-rw-r--r--hw/ppc/pnv_xscom.c275
-rw-r--r--hw/ppc/ppc405.h6
-rw-r--r--hw/ppc/ppc405_boards.c1
-rw-r--r--hw/ppc/ppce500_spin.c31
-rw-r--r--hw/ppc/spapr.c1172
-rw-r--r--hw/ppc/spapr_cpu_core.c120
-rw-r--r--hw/ppc/spapr_drc.c79
-rw-r--r--hw/ppc/spapr_events.c292
-rw-r--r--hw/ppc/spapr_hcall.c101
-rw-r--r--hw/ppc/spapr_iommu.c22
-rw-r--r--hw/ppc/spapr_ovec.c254
-rw-r--r--hw/ppc/spapr_pci.c147
-rw-r--r--hw/ppc/spapr_rtas.c148
-rw-r--r--hw/ppc/spapr_vio.c62
-rw-r--r--hw/ppc/trace-events33
-rw-r--r--hw/s390x/css.c20
-rw-r--r--hw/s390x/s390-pci-bus.c69
-rw-r--r--hw/s390x/s390-pci-bus.h4
-rw-r--r--hw/s390x/s390-pci-inst.c31
-rw-r--r--hw/s390x/s390-virtio-ccw.c40
-rw-r--r--hw/s390x/s390-virtio.c6
-rw-r--r--hw/s390x/sclp.c44
-rw-r--r--hw/s390x/virtio-ccw.c152
-rw-r--r--hw/s390x/virtio-ccw.h21
-rw-r--r--hw/scsi/esp.c2
-rw-r--r--hw/scsi/lsi53c895a.c280
-rw-r--r--hw/scsi/megasas.c64
-rw-r--r--hw/scsi/mptconfig.c6
-rw-r--r--hw/scsi/mptsas.c6
-rw-r--r--hw/scsi/scsi-disk.c33
-rw-r--r--hw/scsi/spapr_vscsi.c88
-rw-r--r--hw/scsi/trace-events27
-rw-r--r--hw/scsi/virtio-scsi-dataplane.c67
-rw-r--r--hw/scsi/virtio-scsi.c118
-rw-r--r--hw/scsi/vmw_pvscsi.c35
-rw-r--r--hw/sd/sd.c9
-rw-r--r--hw/sd/ssi-sd.c70
-rw-r--r--hw/sh4/shix.c2
-rw-r--r--hw/smbios/smbios.c13
-rw-r--r--hw/sparc/sun4m.c51
-rw-r--r--hw/sparc64/sun4u.c38
-rw-r--r--hw/ssi/Makefile.objs1
-rw-r--r--hw/ssi/aspeed_smc.c194
-rw-r--r--hw/ssi/imx_spi.c2
-rw-r--r--hw/ssi/stm32f2xx_spi.c225
-rw-r--r--hw/ssi/xilinx_spips.c7
-rw-r--r--hw/timer/a9gtimer.c14
-rw-r--r--hw/timer/allwinner-a10-pit.c2
-rw-r--r--hw/timer/arm_mptimer.c149
-rw-r--r--hw/timer/arm_timer.c2
-rw-r--r--hw/timer/digic-timer.c2
-rw-r--r--hw/timer/etraxfs_timer.c6
-rw-r--r--hw/timer/exynos4210_mct.c7
-rw-r--r--hw/timer/exynos4210_pwm.c2
-rw-r--r--hw/timer/exynos4210_rtc.c4
-rw-r--r--hw/timer/grlib_gptimer.c3
-rw-r--r--hw/timer/imx_epit.c6
-rw-r--r--hw/timer/imx_gpt.c4
-rw-r--r--hw/timer/lm32_timer.c2
-rw-r--r--hw/timer/mc146818rtc.c10
-rw-r--r--hw/timer/milkymist-sysctl.c4
-rw-r--r--hw/timer/puv3_ost.c2
-rw-r--r--hw/timer/sh_timer.c2
-rw-r--r--hw/timer/slavio_timer.c2
-rw-r--r--hw/timer/stm32f2xx_timer.c11
-rw-r--r--hw/timer/xilinx_timer.c2
-rw-r--r--hw/tpm/tpm_passthrough.c6
-rw-r--r--hw/tpm/tpm_tis.c1
-rw-r--r--hw/tricore/tricore_testboard.c2
-rw-r--r--hw/unicore32/puv3.c1
-rw-r--r--hw/usb/ccid-card-emulated.c3
-rw-r--r--hw/usb/ccid-card-passthru.c30
-rw-r--r--hw/usb/ccid.h2
-rw-r--r--hw/usb/desc.c12
-rw-r--r--hw/usb/dev-mtp.c198
-rw-r--r--hw/usb/dev-serial.c33
-rw-r--r--hw/usb/dev-smartcard-reader.c11
-rw-r--r--hw/usb/hcd-ehci.c2
-rw-r--r--hw/usb/hcd-ohci.c2
-rw-r--r--hw/usb/hcd-xhci.c238
-rw-r--r--hw/usb/host-libusb.c7
-rw-r--r--hw/usb/redirect.c37
-rw-r--r--hw/usb/xen-usb.c73
-rw-r--r--hw/vfio/common.c121
-rw-r--r--hw/vfio/pci-quirks.c16
-rw-r--r--hw/vfio/pci.c362
-rw-r--r--hw/vfio/pci.h5
-rw-r--r--hw/vfio/platform.c66
-rw-r--r--hw/vfio/spapr.c2
-rw-r--r--hw/vfio/trace-events2
-rw-r--r--hw/vigs/vigs_gl_backend_glx.c4
-rw-r--r--hw/vigs/vigs_gl_backend_wgl.c4
-rw-r--r--hw/virtio/Makefile.objs4
-rw-r--r--hw/virtio/trace-events7
-rw-r--r--hw/virtio/vhost-backend.c17
-rw-r--r--hw/virtio/vhost-user.c4
-rw-r--r--hw/virtio/vhost-vsock.c417
-rw-r--r--hw/virtio/vhost.c119
-rw-r--r--hw/virtio/virtio-balloon.c62
-rw-r--r--hw/virtio/virtio-bus.c186
-rw-r--r--hw/virtio/virtio-crypto-pci.c77
-rw-r--r--hw/virtio/virtio-crypto.c908
-rw-r--r--hw/virtio/virtio-mmio.c41
-rw-r--r--hw/virtio/virtio-pci.c191
-rw-r--r--hw/virtio/virtio-pci.h54
-rw-r--r--hw/virtio/virtio-rng.c19
-rw-r--r--hw/virtio/virtio.c540
-rw-r--r--hw/xen/Makefile.objs2
-rw-r--r--hw/xen/xen_backend.c400
-rw-r--r--hw/xen/xen_devconfig.c4
-rw-r--r--hw/xen/xen_pvdev.c318
-rw-r--r--hw/xenpv/xen_domainbuild.c8
-rw-r--r--hw/xtensa/xtfpga.c2
351 files changed, 17926 insertions, 6321 deletions
diff --git a/hw/9pfs/9p-handle.c b/hw/9pfs/9p-handle.c
index 3d77594f92..1687661bc9 100644
--- a/hw/9pfs/9p-handle.c
+++ b/hw/9pfs/9p-handle.c
@@ -649,6 +649,14 @@ out:
return ret;
}
+static void handle_cleanup(FsContext *ctx)
+{
+ struct handle_data *data = ctx->private;
+
+ close(data->mountfd);
+ g_free(data);
+}
+
static int handle_parse_opts(QemuOpts *opts, struct FsDriverEntry *fse)
{
const char *sec_model = qemu_opt_get(opts, "security_model");
@@ -671,6 +679,7 @@ static int handle_parse_opts(QemuOpts *opts, struct FsDriverEntry *fse)
FileOperations handle_ops = {
.parse_opts = handle_parse_opts,
.init = handle_init,
+ .cleanup = handle_cleanup,
.lstat = handle_lstat,
.readlink = handle_readlink,
.close = handle_close,
diff --git a/hw/9pfs/9p-local-maru.c b/hw/9pfs/9p-local-maru.c
index 7605f1dd13..cac8784dda 100644
--- a/hw/9pfs/9p-local-maru.c
+++ b/hw/9pfs/9p-local-maru.c
@@ -587,6 +587,7 @@ again:
}
entry->d_type = DT_UNKNOWN;
}
+
#else
if (ctx->export_flags & V9FS_SM_MAPPED_FILE) {
if (!strcmp(entry->d_name, VIRTFS_META_DIR)) {
@@ -1558,20 +1559,16 @@ static int local_name_to_path(FsContext *ctx, V9fsPath *dir_path,
LOG_TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
if (dir_path) {
#ifndef CONFIG_WIN32
- v9fs_string_sprintf((V9fsString *)target, "%s/%s",
- dir_path->data, name);
+ v9fs_path_sprintf(target, "%s/%s", dir_path->data, name);
#else
- v9fs_string_sprintf((V9fsString *)target, "%s\\%s",
- dir_path->data, name);
+ v9fs_path_sprintf(target, "%s\\%s", dir_path->data, name);
while((target->data)[strlen(target->data)-1] == '\\'){
(target->data)[strlen(target->data)-1] = '\0';
}
#endif
} else {
- v9fs_string_sprintf((V9fsString *)target, "%s", name);
+ v9fs_path_sprintf(target, "%s", name);
}
- /* Bump the size for including terminating NULL */
- target->size++;
return 0;
}
diff --git a/hw/9pfs/9p-local.c b/hw/9pfs/9p-local.c
index 3f271fcbd2..845675e7a1 100644
--- a/hw/9pfs/9p-local.c
+++ b/hw/9pfs/9p-local.c
@@ -1060,13 +1060,10 @@ static int local_name_to_path(FsContext *ctx, V9fsPath *dir_path,
const char *name, V9fsPath *target)
{
if (dir_path) {
- v9fs_string_sprintf((V9fsString *)target, "%s/%s",
- dir_path->data, name);
+ v9fs_path_sprintf(target, "%s/%s", dir_path->data, name);
} else {
- v9fs_string_sprintf((V9fsString *)target, "%s", name);
+ v9fs_path_sprintf(target, "%s", name);
}
- /* Bump the size for including terminating NULL */
- target->size++;
return 0;
}
diff --git a/hw/9pfs/9p-maru.c b/hw/9pfs/9p-maru.c
index ebe4987d73..20d0d07a46 100644
--- a/hw/9pfs/9p-maru.c
+++ b/hw/9pfs/9p-maru.c
@@ -29,6 +29,7 @@
*/
#include "qemu/osdep.h"
+#include <glib/gprintf.h>
#include "hw/virtio/virtio.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
@@ -58,6 +59,9 @@ extern uint64_t hostBytesPerSector;
#ifdef CONFIG_DARWIN
#define O_DIRECT 040000 /* Direct disk access */
#define O_NOATIME 01000000 /* Do not set atime */
+#ifndef XATTR_SIZE_MAX
+#define XATTR_SIZE_MAX 65536
+#endif
#endif
#include "../../tizen/src/debug_ch.h"
@@ -218,6 +222,20 @@ void v9fs_path_free(V9fsPath *path)
path->size = 0;
}
+
+void GCC_FMT_ATTR(2, 3)
+v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...)
+{
+ va_list ap;
+
+ v9fs_path_free(path);
+
+ va_start(ap, fmt);
+ /* Bump the size for including terminating NULL */
+ path->size = g_vasprintf(&path->data, fmt, ap) + 1;
+ va_end(ap);
+}
+
void v9fs_path_copy(V9fsPath *lhs, V9fsPath *rhs)
{
v9fs_path_free(lhs);
@@ -265,7 +283,7 @@ static size_t v9fs_string_size(V9fsString *str)
/*
* returns 0 if fid got re-opened, 1 if not, < 0 on error */
-static int v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
+static int coroutine_fn v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int err = 1;
@@ -285,7 +303,7 @@ static int v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
return err;
}
-static V9fsFidState *get_fid(V9fsPDU *pdu, int32_t fid)
+static V9fsFidState *coroutine_fn get_fid(V9fsPDU *pdu, int32_t fid)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int err;
@@ -355,12 +373,12 @@ static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid)
}
#ifndef CONFIG_WIN32
-static int v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
+static int coroutine_fn v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int retval = 0;
- if (fidp->fs.xattr.copied_len == -1) {
+ if (fidp->fs.xattr.xattrwalk_fid) {
/* getxattr/listxattr fid */
goto free_value;
}
@@ -389,7 +407,7 @@ free_value:
}
#endif
-static int free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
+static int coroutine_fn free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int retval = 0;
@@ -415,7 +433,7 @@ static int free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
return retval;
}
-static int put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
+static int coroutine_fn put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
{
BUG_ON(!fidp->ref);
fidp->ref--;
@@ -459,7 +477,7 @@ static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid)
return fidp;
}
-void v9fs_reclaim_fd(V9fsPDU *pdu)
+void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu)
{
int reclaim_count = 0;
V9fsState *s = pdu->s;
@@ -540,7 +558,7 @@ void v9fs_reclaim_fd(V9fsPDU *pdu)
}
}
-static int v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
+static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
{
TRACE("[%d][ >> %s]\n", __LINE__, __func__);
int err;
@@ -574,11 +592,11 @@ static int v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
return 0;
}
-static void virtfs_reset(V9fsPDU *pdu)
+static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
V9fsState *s = pdu->s;
- V9fsFidState *fidp = NULL;
+ V9fsFidState *fidp;
/* Free all fids */
while (s->fid_list) {
@@ -591,11 +609,6 @@ static void virtfs_reset(V9fsPDU *pdu)
free_fid(pdu, fidp);
}
}
- if (fidp) {
- /* One or more unclunked fids found... */
- error_report("9pfs:%s: One or more uncluncked fids "
- "found during reset", __func__);
- }
}
#define P9_QID_TYPE_DIR 0x80
@@ -645,7 +658,8 @@ static void stat_to_qid(const struct stat *stbuf, V9fsQID *qidp)
}
}
-static int fid_to_qid(V9fsPDU *pdu, V9fsFidState *fidp, V9fsQID *qidp)
+static int coroutine_fn fid_to_qid(V9fsPDU *pdu, V9fsFidState *fidp,
+ V9fsQID *qidp)
{
TRACE("[%d][ >> %s]\n", __LINE__, __func__);
struct stat stbuf;
@@ -673,17 +687,11 @@ V9fsPDU *pdu_alloc(V9fsState *s)
void pdu_free(V9fsPDU *pdu)
{
- if (pdu) {
- V9fsState *s = pdu->s;
- /*
- * Cancelled pdu are added back to the freelist
- * by flush request .
- */
- if (!pdu->cancelled) {
- QLIST_REMOVE(pdu, next);
- QLIST_INSERT_HEAD(&s->free_list, pdu, next);
- }
- }
+ V9fsState *s = pdu->s;
+
+ g_assert(!pdu->cancelled);
+ QLIST_REMOVE(pdu, next);
+ QLIST_INSERT_HEAD(&s->free_list, pdu, next);
}
/*
@@ -691,7 +699,7 @@ void pdu_free(V9fsPDU *pdu)
* because we always expect to have enough space to encode
* error details
*/
-static void pdu_complete(V9fsPDU *pdu, ssize_t len)
+static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len)
{
int8_t id = pdu->id + 1; /* Response */
V9fsState *s = pdu->s;
@@ -729,9 +737,9 @@ static void pdu_complete(V9fsPDU *pdu, ssize_t len)
pdu_push_and_notify(pdu);
/* Now wakeup anybody waiting in flush for this request */
- qemu_co_queue_next(&pdu->complete);
-
- pdu_free(pdu);
+ if (!qemu_co_queue_next(&pdu->complete)) {
+ pdu_free(pdu);
+ }
}
#ifndef CONFIG_WIN32
@@ -863,9 +871,9 @@ static uint32_t stat_to_v9mode(const struct stat *stbuf)
return mode;
}
-static int stat_to_v9stat(V9fsPDU *pdu, V9fsPath *name,
- const struct stat *stbuf,
- V9fsStat *v9stat)
+static int coroutine_fn stat_to_v9stat(V9fsPDU *pdu, V9fsPath *name,
+ const struct stat *stbuf,
+ V9fsStat *v9stat)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int err;
@@ -879,15 +887,15 @@ static int stat_to_v9stat(V9fsPDU *pdu, V9fsPath *name,
v9stat->mtime = stbuf->st_mtime;
v9stat->length = stbuf->st_size;
- v9fs_string_null(&v9stat->uid);
- v9fs_string_null(&v9stat->gid);
- v9fs_string_null(&v9stat->muid);
+ v9fs_string_free(&v9stat->uid);
+ v9fs_string_free(&v9stat->gid);
+ v9fs_string_free(&v9stat->muid);
v9stat->n_uid = stbuf->st_uid;
v9stat->n_gid = stbuf->st_gid;
v9stat->n_muid = 0;
- v9fs_string_null(&v9stat->extension);
+ v9fs_string_free(&v9stat->extension);
if (v9stat->mode & P9_STAT_MODE_SYMLINK) {
err = v9fs_co_readlink(pdu, name, &v9stat->extension);
@@ -1011,10 +1019,8 @@ static void v9fs_fix_path(V9fsPath *dst, V9fsPath *src, int len)
V9fsPath str;
v9fs_path_init(&str);
v9fs_path_copy(&str, dst);
- v9fs_string_sprintf((V9fsString *)dst, "%s%s", src->data, str.data+len);
+ v9fs_path_sprintf(dst, "%s%s", src->data, str.data + len);
v9fs_path_free(&str);
- /* +1 to include terminating NULL */
- dst->size++;
}
static inline bool is_ro_export(FsContext *ctx)
@@ -1022,7 +1028,7 @@ static inline bool is_ro_export(FsContext *ctx)
return ctx->export_flags & V9FS_RDONLY;
}
-static void v9fs_version(void *opaque)
+static void coroutine_fn v9fs_version(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
ssize_t err;
@@ -1061,7 +1067,7 @@ out:
v9fs_string_free(&version);
}
-static void v9fs_attach(void *opaque)
+static void coroutine_fn v9fs_attach(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
V9fsPDU *pdu = opaque;
@@ -1132,7 +1138,7 @@ out_nofid:
v9fs_string_free(&aname);
}
-static void v9fs_stat(void *opaque)
+static void coroutine_fn v9fs_stat(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t fid;
@@ -1177,7 +1183,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_getattr(void *opaque)
+static void coroutine_fn v9fs_getattr(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t fid;
@@ -1254,7 +1260,7 @@ out_nofid:
#define P9_ATTR_MASK 127
-static void v9fs_setattr(void *opaque)
+static void coroutine_fn v9fs_setattr(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int err = 0;
@@ -1387,7 +1393,7 @@ static bool not_same_qid(const V9fsQID *qid1, const V9fsQID *qid2)
qid1->path != qid2->path;
}
-static void v9fs_walk(void *opaque)
+static void coroutine_fn v9fs_walk(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int name_idx;
@@ -1442,13 +1448,14 @@ static void v9fs_walk(void *opaque)
goto out_nofid;
}
+ v9fs_path_init(&dpath);
+ v9fs_path_init(&path);
+
err = fid_to_qid(pdu, fidp, &qid);
if (err < 0) {
goto out;
}
- v9fs_path_init(&dpath);
- v9fs_path_init(&path);
/*
* Both dpath and path initially poin to fidp.
* Needed to handle request with nwnames == 0
@@ -1475,7 +1482,10 @@ static void v9fs_walk(void *opaque)
memcpy(&qids[name_idx], &qid, sizeof(qid));
}
if (fid == newfid) {
- BUG_ON(fidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
WARN("[%d][ >> %s]\n", __LINE__, __func__);
v9fs_path_copy(&fidp->path, &path);
} else {
@@ -1508,7 +1518,7 @@ out_nofid:
}
}
-static int32_t get_iounit(V9fsPDU *pdu, V9fsPath *path)
+static int32_t coroutine_fn get_iounit(V9fsPDU *pdu, V9fsPath *path)
{
struct statfs stbuf;
int32_t iounit = 0;
@@ -1528,7 +1538,7 @@ static int32_t get_iounit(V9fsPDU *pdu, V9fsPath *path)
return iounit;
}
-static void v9fs_open(void *opaque)
+static void coroutine_fn v9fs_open(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int flags;
@@ -1562,7 +1572,11 @@ static void v9fs_open(void *opaque)
err = -ENOENT;
goto out_nofid;
}
- BUG_ON(fidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ ERR("[%d][ >> %s]\n", __LINE__, __func__);
+ err = -EINVAL;
+ goto out;
+ }
err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
if (err < 0) {
@@ -1627,7 +1641,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_lcreate(void *opaque)
+static void coroutine_fn v9fs_lcreate(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t dfid, flags, mode;
@@ -1732,7 +1746,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_clunk(void *opaque)
+static void coroutine_fn v9fs_clunk(void *opaque)
{
int err;
int32_t fid;
@@ -1771,20 +1785,17 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
{
ssize_t err;
size_t offset = 7;
- int read_count;
- int64_t xattr_len;
+ uint64_t read_count;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = v->elems[pdu->idx];
- xattr_len = fidp->fs.xattr.len;
- read_count = xattr_len - off;
+ if (fidp->fs.xattr.len < off) {
+ read_count = 0;
+ } else {
+ read_count = fidp->fs.xattr.len - off;
+ }
if (read_count > max_count) {
read_count = max_count;
- } else if (read_count < 0) {
- /*
- * read beyond XATTR value
- */
- read_count = 0;
}
err = pdu_marshal(pdu, offset, "d", read_count);
if (err < 0) {
@@ -1803,8 +1814,9 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
}
#endif
-static int v9fs_do_readdir_with_stat(V9fsPDU *pdu,
- V9fsFidState *fidp, uint32_t max_count)
+static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
+ V9fsFidState *fidp,
+ uint32_t max_count)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
V9fsPath path;
@@ -1903,7 +1915,7 @@ static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
qemu_iovec_concat(qiov, &elem, skip, size);
}
-static void v9fs_read(void *opaque)
+static void coroutine_fn v9fs_read(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t fid;
@@ -1973,15 +1985,16 @@ static void v9fs_read(void *opaque)
/* IO error return the error */
ERR("[%d][ >> %s]\n", __LINE__, __func__);
err = len;
- goto out;
+ goto out_free_iovec;
}
} while (count < max_count && len > 0);
err = pdu_marshal(pdu, offset, "d", count);
if (err < 0) {
ERR("[%d][ >> %s]\n", __LINE__, __func__);
- goto out;
+ goto out_free_iovec;
}
err += offset + count;
+out_free_iovec:
qemu_iovec_destroy(&qiov);
qemu_iovec_destroy(&qiov_full);
#ifndef CONFIG_WIN32
@@ -2008,8 +2021,8 @@ static size_t v9fs_readdir_data_size(V9fsString *name)
return 24 + v9fs_string_size(name);
}
-static int v9fs_do_readdir(V9fsPDU *pdu,
- V9fsFidState *fidp, int32_t max_count)
+static int coroutine_fn v9fs_do_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
+ int32_t max_count)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
size_t size;
@@ -2034,7 +2047,6 @@ static int v9fs_do_readdir(V9fsPDU *pdu,
err = v9fs_co_readdir(pdu, fidp, &dent);
if (err || !dent) {
- ERR("[%d][ >> %s]\n", __LINE__, __func__);
break;
}
v9fs_string_init(&name);
@@ -2100,7 +2112,7 @@ static int v9fs_do_readdir(V9fsPDU *pdu,
return count;
}
-static void v9fs_readdir(void *opaque)
+static void coroutine_fn v9fs_readdir(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t fid;
@@ -2162,23 +2174,18 @@ static int v9fs_xattr_write(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
{
int i, to_copy;
ssize_t err = 0;
- int write_count;
- int64_t xattr_len;
+ uint64_t write_count;
size_t offset = 7;
- xattr_len = fidp->fs.xattr.len;
- write_count = xattr_len - off;
- if (write_count > count) {
- write_count = count;
- } else if (write_count < 0) {
- /*
- * write beyond XATTR value len specified in
- * xattrcreate
- */
+ if (fidp->fs.xattr.len < off) {
err = -ENOSPC;
goto out;
}
+ write_count = fidp->fs.xattr.len - off;
+ if (write_count > count) {
+ write_count = count;
+ }
err = pdu_marshal(pdu, offset, "d", write_count);
if (err < 0) {
return err;
@@ -2204,7 +2211,7 @@ out:
}
#endif
-static void v9fs_write(void *opaque)
+static void coroutine_fn v9fs_write(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
ssize_t err;
@@ -2285,7 +2292,7 @@ static void v9fs_write(void *opaque)
err = pdu_marshal(pdu, offset, "d", total);
if (err < 0) {
ERR("[%d][ >> %s]\n", __LINE__, __func__);
- goto out;
+ goto out_qiov;
}
err += offset;
trace_v9fs_write_return(pdu->tag, pdu->id, total, err);
@@ -2298,7 +2305,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_create(void *opaque)
+static void coroutine_fn v9fs_create(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t fid;
@@ -2501,7 +2508,7 @@ out_nofid:
v9fs_path_free(&path);
}
-static void v9fs_symlink(void *opaque)
+static void coroutine_fn v9fs_symlink(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
V9fsPDU *pdu = opaque;
@@ -2596,7 +2603,7 @@ static void v9fs_flush(void *opaque)
pdu_complete(pdu, 7);
}
-static void v9fs_link(void *opaque)
+static void coroutine_fn v9fs_link(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
V9fsPDU *pdu = opaque;
@@ -2638,6 +2645,7 @@ static void v9fs_link(void *opaque)
if (!err) {
err = offset;
}
+ put_fid(pdu, oldfidp);
out:
put_fid(pdu, dfidp);
out_nofid:
@@ -2646,7 +2654,7 @@ out_nofid:
}
/* Only works with path name based fid */
-static void v9fs_remove(void *opaque)
+static void coroutine_fn v9fs_remove(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t fid;
@@ -2691,7 +2699,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_unlinkat(void *opaque)
+static void coroutine_fn v9fs_unlinkat(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int err = 0;
@@ -2753,9 +2761,11 @@ out_nofid:
v9fs_string_free(&name);
}
+
/* Only works with path name based fid */
-static int v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
- int32_t newdirfid, V9fsString *name)
+static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
+ int32_t newdirfid,
+ V9fsString *name)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
char *end;
@@ -2773,7 +2783,10 @@ static int v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
err = -ENOENT;
goto out_nofid;
}
- BUG_ON(dirfidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path);
} else {
old_name = fidp->path.data;
@@ -2813,7 +2826,7 @@ out_nofid:
}
/* Only works with path name based fid */
-static void v9fs_rename(void *opaque)
+static void coroutine_fn v9fs_rename(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t fid;
@@ -2846,7 +2859,10 @@ static void v9fs_rename(void *opaque)
err = -ENOENT;
goto out_nofid;
}
- BUG_ON(fidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
/* if fs driver is not path based, return EOPNOTSUPP */
if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
err = -EOPNOTSUPP;
@@ -2865,9 +2881,10 @@ out_nofid:
v9fs_string_free(&name);
}
-static void v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
- V9fsString *old_name, V9fsPath *newdir,
- V9fsString *new_name)
+static void coroutine_fn v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
+ V9fsString *old_name,
+ V9fsPath *newdir,
+ V9fsString *new_name)
{
V9fsFidState *tfidp;
V9fsPath oldpath, newpath;
@@ -2893,9 +2910,10 @@ static void v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
v9fs_path_free(&newpath);
}
-static int v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid,
- V9fsString *old_name, int32_t newdirfid,
- V9fsString *new_name)
+static int coroutine_fn v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid,
+ V9fsString *old_name,
+ int32_t newdirfid,
+ V9fsString *new_name)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int err = 0;
@@ -2937,7 +2955,7 @@ out:
return err;
}
-static void v9fs_renameat(void *opaque)
+static void coroutine_fn v9fs_renameat(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
ssize_t err = 0;
@@ -2980,7 +2998,7 @@ out_err:
v9fs_string_free(&new_name);
}
-static void v9fs_wstat(void *opaque)
+static void coroutine_fn v9fs_wstat(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t fid;
@@ -3134,7 +3152,7 @@ static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf)
fsid_val, f_namelen);
}
-static void v9fs_statfs(void *opaque)
+static void coroutine_fn v9fs_statfs(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int32_t fid;
@@ -3169,7 +3187,7 @@ out_nofid:
pdu_complete(pdu, retval);
}
-static void v9fs_mknod(void *opaque)
+static void coroutine_fn v9fs_mknod(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
@@ -3240,7 +3258,7 @@ out_nofid:
* do any thing in * qemu 9p server side lock code path.
* So when a TLOCK request comes, always return success
*/
-static void v9fs_lock(void *opaque)
+static void coroutine_fn v9fs_lock(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
int8_t status;
@@ -3294,7 +3312,7 @@ out_nofid:
* When a TGETLOCK request comes, always return success because all lock
* handling is done by client's VFS layer.
*/
-static void v9fs_getlock(void *opaque)
+static void coroutine_fn v9fs_getlock(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
size_t offset = 7;
@@ -3340,7 +3358,7 @@ out_nofid:
v9fs_string_free(&glock.client_id);
}
-static void v9fs_mkdir(void *opaque)
+static void coroutine_fn v9fs_mkdir(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
V9fsPDU *pdu = opaque;
@@ -3396,7 +3414,7 @@ out_nofid:
}
#ifndef CONFIG_WIN32
-static void v9fs_xattrwalk(void *opaque)
+static void coroutine_fn v9fs_xattrwalk(void *opaque)
{
int64_t size;
V9fsString name;
@@ -3426,7 +3444,7 @@ static void v9fs_xattrwalk(void *opaque)
goto out;
}
v9fs_path_copy(&xattr_fidp->path, &file_fidp->path);
- if (name.data == NULL) {
+ if (!v9fs_string_size(&name)) {
/*
* listxattr request. Get the size first
*/
@@ -3441,7 +3459,7 @@ static void v9fs_xattrwalk(void *opaque)
*/
xattr_fidp->fs.xattr.len = size;
xattr_fidp->fid_type = P9_FID_XATTR;
- xattr_fidp->fs.xattr.copied_len = -1;
+ xattr_fidp->fs.xattr.xattrwalk_fid = true;
if (size) {
xattr_fidp->fs.xattr.value = g_malloc(size);
err = v9fs_co_llistxattr(pdu, &xattr_fidp->path,
@@ -3474,7 +3492,7 @@ static void v9fs_xattrwalk(void *opaque)
*/
xattr_fidp->fs.xattr.len = size;
xattr_fidp->fid_type = P9_FID_XATTR;
- xattr_fidp->fs.xattr.copied_len = -1;
+ xattr_fidp->fs.xattr.xattrwalk_fid = true;
if (size) {
xattr_fidp->fs.xattr.value = g_malloc(size);
err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
@@ -3502,11 +3520,11 @@ out_nofid:
v9fs_string_free(&name);
}
-static void v9fs_xattrcreate(void *opaque)
+static void coroutine_fn v9fs_xattrcreate(void *opaque)
{
int flags;
int32_t fid;
- int64_t size;
+ uint64_t size;
ssize_t err = 0;
V9fsString name;
size_t offset = 7;
@@ -3521,21 +3539,33 @@ static void v9fs_xattrcreate(void *opaque)
}
trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags);
+ if (size > XATTR_SIZE_MAX) {
+ err = -E2BIG;
+ goto out_nofid;
+ }
+
file_fidp = get_fid(pdu, fid);
if (file_fidp == NULL) {
err = -EINVAL;
goto out_nofid;
}
+ if (file_fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out_put_fid;
+ }
+
/* Make the file fid point to xattr */
xattr_fidp = file_fidp;
xattr_fidp->fid_type = P9_FID_XATTR;
xattr_fidp->fs.xattr.copied_len = 0;
+ xattr_fidp->fs.xattr.xattrwalk_fid = false;
xattr_fidp->fs.xattr.len = size;
xattr_fidp->fs.xattr.flags = flags;
v9fs_string_init(&xattr_fidp->fs.xattr.name);
v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name);
- xattr_fidp->fs.xattr.value = g_malloc(size);
+ xattr_fidp->fs.xattr.value = g_malloc0(size);
err = offset;
+out_put_fid:
put_fid(pdu, file_fidp);
out_nofid:
pdu_complete(pdu, err);
@@ -3543,7 +3573,7 @@ out_nofid:
}
#endif
-static void v9fs_readlink(void *opaque)
+static void coroutine_fn v9fs_readlink(void *opaque)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
V9fsPDU *pdu = opaque;
@@ -3622,14 +3652,14 @@ static CoroutineEntry *pdu_co_handlers[] = {
[P9_TREMOVE] = v9fs_remove,
};
-static void v9fs_op_not_supp(void *opaque)
+static void coroutine_fn v9fs_op_not_supp(void *opaque)
{
WARN("[%d][%s] >> This operation is not supported.\n", __LINE__, __func__);
V9fsPDU *pdu = opaque;
pdu_complete(pdu, -EOPNOTSUPP);
}
-static void v9fs_fs_ro(void *opaque)
+static void coroutine_fn v9fs_fs_ro(void *opaque)
{
WARN("[%d][%s] >> This is the read-only operation.\n", __LINE__, __func__);
V9fsPDU *pdu = opaque;
@@ -3772,8 +3802,11 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp)
rc = 0;
out:
if (rc) {
- g_free(s->ctx.fs_root);
+ if (s->ops->cleanup && s->ctx.private) {
+ s->ops->cleanup(&s->ctx);
+ }
g_free(s->tag);
+ g_free(s->ctx.fs_root);
v9fs_path_free(&path);
}
return rc;
@@ -3782,8 +3815,41 @@ out:
void v9fs_device_unrealize_common(V9fsState *s, Error **errp)
{
TRACE("[%d][ Enter >> %s]\n", __LINE__, __func__);
- g_free(s->ctx.fs_root);
+ if (s->ops->cleanup) {
+ s->ops->cleanup(&s->ctx);
+ }
g_free(s->tag);
+ g_free(s->ctx.fs_root);
+}
+
+typedef struct VirtfsCoResetData {
+ V9fsPDU pdu;
+ bool done;
+} VirtfsCoResetData;
+
+static void coroutine_fn virtfs_co_reset(void *opaque)
+{
+ VirtfsCoResetData *data = opaque;
+
+ virtfs_reset(&data->pdu);
+ data->done = true;
+}
+
+void v9fs_reset(V9fsState *s)
+{
+ VirtfsCoResetData data = { .pdu = { .s = s }, .done = false };
+ Coroutine *co;
+
+ while (!QLIST_EMPTY(&s->active_list)) {
+ aio_poll(qemu_get_aio_context(), true);
+ }
+
+ co = qemu_coroutine_create(virtfs_co_reset, &data);
+ qemu_coroutine_enter(co);
+
+ while (!data.done) {
+ aio_poll(qemu_get_aio_context(), true);
+ }
}
static void __attribute__((__constructor__)) v9fs_set_fd_limit(void)
diff --git a/hw/9pfs/9p-proxy.c b/hw/9pfs/9p-proxy.c
index f265501eac..f4aa7a9d70 100644
--- a/hw/9pfs/9p-proxy.c
+++ b/hw/9pfs/9p-proxy.c
@@ -294,8 +294,7 @@ static int v9fs_receive_status(V9fsProxy *proxy,
* This request read by proxy helper process
* returns 0 on success and -errno on error
*/
-static int v9fs_request(V9fsProxy *proxy, int type,
- void *response, const char *fmt, ...)
+static int v9fs_request(V9fsProxy *proxy, int type, void *response, ...)
{
dev_t rdev;
va_list ap;
@@ -317,7 +316,7 @@ static int v9fs_request(V9fsProxy *proxy, int type,
}
iovec = &proxy->out_iovec;
reply = &proxy->in_iovec;
- va_start(ap, fmt);
+ va_start(ap, response);
switch (type) {
case T_OPEN:
path = va_arg(ap, V9fsString *);
@@ -605,7 +604,7 @@ close_error:
static int proxy_lstat(FsContext *fs_ctx, V9fsPath *fs_path, struct stat *stbuf)
{
int retval;
- retval = v9fs_request(fs_ctx->private, T_LSTAT, stbuf, "s", fs_path);
+ retval = v9fs_request(fs_ctx->private, T_LSTAT, stbuf, fs_path);
if (retval < 0) {
errno = -retval;
return -1;
@@ -617,8 +616,7 @@ static ssize_t proxy_readlink(FsContext *fs_ctx, V9fsPath *fs_path,
char *buf, size_t bufsz)
{
int retval;
- retval = v9fs_request(fs_ctx->private, T_READLINK, buf, "sd",
- fs_path, bufsz);
+ retval = v9fs_request(fs_ctx->private, T_READLINK, buf, fs_path, bufsz);
if (retval < 0) {
errno = -retval;
return -1;
@@ -639,7 +637,7 @@ static int proxy_closedir(FsContext *ctx, V9fsFidOpenState *fs)
static int proxy_open(FsContext *ctx, V9fsPath *fs_path,
int flags, V9fsFidOpenState *fs)
{
- fs->fd = v9fs_request(ctx->private, T_OPEN, NULL, "sd", fs_path, flags);
+ fs->fd = v9fs_request(ctx->private, T_OPEN, NULL, fs_path, flags);
if (fs->fd < 0) {
errno = -fs->fd;
fs->fd = -1;
@@ -653,7 +651,7 @@ static int proxy_opendir(FsContext *ctx,
int serrno, fd;
fs->dir.stream = NULL;
- fd = v9fs_request(ctx->private, T_OPEN, NULL, "sd", fs_path, O_DIRECTORY);
+ fd = v9fs_request(ctx->private, T_OPEN, NULL, fs_path, O_DIRECTORY);
if (fd < 0) {
errno = -fd;
return -1;
@@ -735,8 +733,8 @@ static ssize_t proxy_pwritev(FsContext *ctx, V9fsFidOpenState *fs,
static int proxy_chmod(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp)
{
int retval;
- retval = v9fs_request(fs_ctx->private, T_CHMOD, NULL, "sd",
- fs_path, credp->fc_mode);
+ retval = v9fs_request(fs_ctx->private, T_CHMOD, NULL, fs_path,
+ credp->fc_mode);
if (retval < 0) {
errno = -retval;
}
@@ -752,8 +750,8 @@ static int proxy_mknod(FsContext *fs_ctx, V9fsPath *dir_path,
v9fs_string_init(&fullname);
v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name);
- retval = v9fs_request(fs_ctx->private, T_MKNOD, NULL, "sdqdd",
- &fullname, credp->fc_mode, credp->fc_rdev,
+ retval = v9fs_request(fs_ctx->private, T_MKNOD, NULL, &fullname,
+ credp->fc_mode, credp->fc_rdev,
credp->fc_uid, credp->fc_gid);
v9fs_string_free(&fullname);
if (retval < 0) {
@@ -772,14 +770,13 @@ static int proxy_mkdir(FsContext *fs_ctx, V9fsPath *dir_path,
v9fs_string_init(&fullname);
v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name);
- retval = v9fs_request(fs_ctx->private, T_MKDIR, NULL, "sddd", &fullname,
+ retval = v9fs_request(fs_ctx->private, T_MKDIR, NULL, &fullname,
credp->fc_mode, credp->fc_uid, credp->fc_gid);
v9fs_string_free(&fullname);
if (retval < 0) {
errno = -retval;
retval = -1;
}
- v9fs_string_free(&fullname);
return retval;
}
@@ -804,9 +801,8 @@ static int proxy_open2(FsContext *fs_ctx, V9fsPath *dir_path, const char *name,
v9fs_string_init(&fullname);
v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name);
- fs->fd = v9fs_request(fs_ctx->private, T_CREATE, NULL, "sdddd",
- &fullname, flags, credp->fc_mode,
- credp->fc_uid, credp->fc_gid);
+ fs->fd = v9fs_request(fs_ctx->private, T_CREATE, NULL, &fullname, flags,
+ credp->fc_mode, credp->fc_uid, credp->fc_gid);
v9fs_string_free(&fullname);
if (fs->fd < 0) {
errno = -fs->fd;
@@ -827,8 +823,8 @@ static int proxy_symlink(FsContext *fs_ctx, const char *oldpath,
v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name);
v9fs_string_sprintf(&target, "%s", oldpath);
- retval = v9fs_request(fs_ctx->private, T_SYMLINK, NULL, "ssdd",
- &target, &fullname, credp->fc_uid, credp->fc_gid);
+ retval = v9fs_request(fs_ctx->private, T_SYMLINK, NULL, &target, &fullname,
+ credp->fc_uid, credp->fc_gid);
v9fs_string_free(&fullname);
v9fs_string_free(&target);
if (retval < 0) {
@@ -847,7 +843,7 @@ static int proxy_link(FsContext *ctx, V9fsPath *oldpath,
v9fs_string_init(&newpath);
v9fs_string_sprintf(&newpath, "%s/%s", dirpath->data, name);
- retval = v9fs_request(ctx->private, T_LINK, NULL, "ss", oldpath, &newpath);
+ retval = v9fs_request(ctx->private, T_LINK, NULL, oldpath, &newpath);
v9fs_string_free(&newpath);
if (retval < 0) {
errno = -retval;
@@ -860,7 +856,7 @@ static int proxy_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size)
{
int retval;
- retval = v9fs_request(ctx->private, T_TRUNCATE, NULL, "sq", fs_path, size);
+ retval = v9fs_request(ctx->private, T_TRUNCATE, NULL, fs_path, size);
if (retval < 0) {
errno = -retval;
return -1;
@@ -879,8 +875,7 @@ static int proxy_rename(FsContext *ctx, const char *oldpath,
v9fs_string_sprintf(&oldname, "%s", oldpath);
v9fs_string_sprintf(&newname, "%s", newpath);
- retval = v9fs_request(ctx->private, T_RENAME, NULL, "ss",
- &oldname, &newname);
+ retval = v9fs_request(ctx->private, T_RENAME, NULL, &oldname, &newname);
v9fs_string_free(&oldname);
v9fs_string_free(&newname);
if (retval < 0) {
@@ -892,8 +887,8 @@ static int proxy_rename(FsContext *ctx, const char *oldpath,
static int proxy_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp)
{
int retval;
- retval = v9fs_request(fs_ctx->private, T_CHOWN, NULL, "sdd",
- fs_path, credp->fc_uid, credp->fc_gid);
+ retval = v9fs_request(fs_ctx->private, T_CHOWN, NULL, fs_path,
+ credp->fc_uid, credp->fc_gid);
if (retval < 0) {
errno = -retval;
}
@@ -904,8 +899,7 @@ static int proxy_utimensat(FsContext *s, V9fsPath *fs_path,
const struct timespec *buf)
{
int retval;
- retval = v9fs_request(s->private, T_UTIME, NULL, "sqqqq",
- fs_path,
+ retval = v9fs_request(s->private, T_UTIME, NULL, fs_path,
buf[0].tv_sec, buf[0].tv_nsec,
buf[1].tv_sec, buf[1].tv_nsec);
if (retval < 0) {
@@ -920,7 +914,7 @@ static int proxy_remove(FsContext *ctx, const char *path)
V9fsString name;
v9fs_string_init(&name);
v9fs_string_sprintf(&name, "%s", path);
- retval = v9fs_request(ctx->private, T_REMOVE, NULL, "s", &name);
+ retval = v9fs_request(ctx->private, T_REMOVE, NULL, &name);
v9fs_string_free(&name);
if (retval < 0) {
errno = -retval;
@@ -949,7 +943,7 @@ static int proxy_fsync(FsContext *ctx, int fid_type,
static int proxy_statfs(FsContext *s, V9fsPath *fs_path, struct statfs *stbuf)
{
int retval;
- retval = v9fs_request(s->private, T_STATFS, stbuf, "s", fs_path);
+ retval = v9fs_request(s->private, T_STATFS, stbuf, fs_path);
if (retval < 0) {
errno = -retval;
return -1;
@@ -965,8 +959,8 @@ static ssize_t proxy_lgetxattr(FsContext *ctx, V9fsPath *fs_path,
v9fs_string_init(&xname);
v9fs_string_sprintf(&xname, "%s", name);
- retval = v9fs_request(ctx->private, T_LGETXATTR, value, "dss", size,
- fs_path, &xname);
+ retval = v9fs_request(ctx->private, T_LGETXATTR, value, size, fs_path,
+ &xname);
v9fs_string_free(&xname);
if (retval < 0) {
errno = -retval;
@@ -978,8 +972,7 @@ static ssize_t proxy_llistxattr(FsContext *ctx, V9fsPath *fs_path,
void *value, size_t size)
{
int retval;
- retval = v9fs_request(ctx->private, T_LLISTXATTR, value, "ds", size,
- fs_path);
+ retval = v9fs_request(ctx->private, T_LLISTXATTR, value, size, fs_path);
if (retval < 0) {
errno = -retval;
}
@@ -1000,8 +993,8 @@ static int proxy_lsetxattr(FsContext *ctx, V9fsPath *fs_path, const char *name,
xvalue.data = g_malloc(size);
memcpy(xvalue.data, value, size);
- retval = v9fs_request(ctx->private, T_LSETXATTR, value, "sssdd",
- fs_path, &xname, &xvalue, size, flags);
+ retval = v9fs_request(ctx->private, T_LSETXATTR, value, fs_path, &xname,
+ &xvalue, size, flags);
v9fs_string_free(&xname);
v9fs_string_free(&xvalue);
if (retval < 0) {
@@ -1018,8 +1011,7 @@ static int proxy_lremovexattr(FsContext *ctx, V9fsPath *fs_path,
v9fs_string_init(&xname);
v9fs_string_sprintf(&xname, "%s", name);
- retval = v9fs_request(ctx->private, T_LREMOVEXATTR, NULL, "ss",
- fs_path, &xname);
+ retval = v9fs_request(ctx->private, T_LREMOVEXATTR, NULL, fs_path, &xname);
v9fs_string_free(&xname);
if (retval < 0) {
errno = -retval;
@@ -1031,13 +1023,10 @@ static int proxy_name_to_path(FsContext *ctx, V9fsPath *dir_path,
const char *name, V9fsPath *target)
{
if (dir_path) {
- v9fs_string_sprintf((V9fsString *)target, "%s/%s",
- dir_path->data, name);
+ v9fs_path_sprintf(target, "%s/%s", dir_path->data, name);
} else {
- v9fs_string_sprintf((V9fsString *)target, "%s", name);
+ v9fs_path_sprintf(target, "%s", name);
}
- /* Bump the size for including terminating NULL */
- target->size++;
return 0;
}
@@ -1086,7 +1075,7 @@ static int proxy_ioc_getversion(FsContext *fs_ctx, V9fsPath *path,
errno = ENOTTY;
return -1;
}
- err = v9fs_request(fs_ctx->private, T_GETVERSION, st_gen, "s", path);
+ err = v9fs_request(fs_ctx->private, T_GETVERSION, st_gen, path);
if (err < 0) {
errno = -err;
err = -1;
@@ -1179,9 +1168,22 @@ static int proxy_init(FsContext *ctx)
return 0;
}
+static void proxy_cleanup(FsContext *ctx)
+{
+ V9fsProxy *proxy = ctx->private;
+
+ g_free(proxy->out_iovec.iov_base);
+ g_free(proxy->in_iovec.iov_base);
+ if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) {
+ close(proxy->sockfd);
+ }
+ g_free(proxy);
+}
+
FileOperations proxy_ops = {
.parse_opts = proxy_parse_opts,
.init = proxy_init,
+ .cleanup = proxy_cleanup,
.lstat = proxy_lstat,
.readlink = proxy_readlink,
.close = proxy_close,
diff --git a/hw/9pfs/9p-synth.h b/hw/9pfs/9p-synth.h
index 6bcb44ace2..49c2fc7b27 100644
--- a/hw/9pfs/9p-synth.h
+++ b/hw/9pfs/9p-synth.h
@@ -43,10 +43,10 @@ typedef struct V9fsSynthOpenState {
struct dirent dent;
} V9fsSynthOpenState;
-extern int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode,
- const char *name, V9fsSynthNode **result);
-extern int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode,
- const char *name, v9fs_synth_read read,
- v9fs_synth_write write, void *arg);
+int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode,
+ const char *name, V9fsSynthNode **result);
+int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode,
+ const char *name, v9fs_synth_read read,
+ v9fs_synth_write write, void *arg);
#endif
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index dfe293d11d..faebd91f5f 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -12,6 +12,7 @@
*/
#include "qemu/osdep.h"
+#include <glib/gprintf.h>
#include "hw/virtio/virtio.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
@@ -179,6 +180,20 @@ void v9fs_path_free(V9fsPath *path)
path->size = 0;
}
+
+void GCC_FMT_ATTR(2, 3)
+v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...)
+{
+ va_list ap;
+
+ v9fs_path_free(path);
+
+ va_start(ap, fmt);
+ /* Bump the size for including terminating NULL */
+ path->size = g_vasprintf(&path->data, fmt, ap) + 1;
+ va_end(ap);
+}
+
void v9fs_path_copy(V9fsPath *lhs, V9fsPath *rhs)
{
v9fs_path_free(lhs);
@@ -221,7 +236,7 @@ static size_t v9fs_string_size(V9fsString *str)
/*
* returns 0 if fid got re-opened, 1 if not, < 0 on error */
-static int v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
+static int coroutine_fn v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
{
int err = 1;
if (f->fid_type == P9_FID_FILE) {
@@ -240,7 +255,7 @@ static int v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
return err;
}
-static V9fsFidState *get_fid(V9fsPDU *pdu, int32_t fid)
+static V9fsFidState *coroutine_fn get_fid(V9fsPDU *pdu, int32_t fid)
{
int err;
V9fsFidState *f;
@@ -306,11 +321,11 @@ static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid)
return f;
}
-static int v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
+static int coroutine_fn v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
{
int retval = 0;
- if (fidp->fs.xattr.copied_len == -1) {
+ if (fidp->fs.xattr.xattrwalk_fid) {
/* getxattr/listxattr fid */
goto free_value;
}
@@ -338,7 +353,7 @@ free_value:
return retval;
}
-static int free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
+static int coroutine_fn free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
{
int retval = 0;
@@ -359,7 +374,7 @@ static int free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
return retval;
}
-static int put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
+static int coroutine_fn put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
{
BUG_ON(!fidp->ref);
fidp->ref--;
@@ -403,7 +418,7 @@ static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid)
return fidp;
}
-void v9fs_reclaim_fd(V9fsPDU *pdu)
+void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu)
{
int reclaim_count = 0;
V9fsState *s = pdu->s;
@@ -484,7 +499,7 @@ void v9fs_reclaim_fd(V9fsPDU *pdu)
}
}
-static int v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
+static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
{
int err;
V9fsState *s = pdu->s;
@@ -517,10 +532,10 @@ static int v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
return 0;
}
-static void virtfs_reset(V9fsPDU *pdu)
+static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
{
V9fsState *s = pdu->s;
- V9fsFidState *fidp = NULL;
+ V9fsFidState *fidp;
/* Free all fids */
while (s->fid_list) {
@@ -533,11 +548,6 @@ static void virtfs_reset(V9fsPDU *pdu)
free_fid(pdu, fidp);
}
}
- if (fidp) {
- /* One or more unclunked fids found... */
- error_report("9pfs:%s: One or more uncluncked fids "
- "found during reset", __func__);
- }
}
#define P9_QID_TYPE_DIR 0x80
@@ -583,7 +593,8 @@ static void stat_to_qid(const struct stat *stbuf, V9fsQID *qidp)
}
}
-static int fid_to_qid(V9fsPDU *pdu, V9fsFidState *fidp, V9fsQID *qidp)
+static int coroutine_fn fid_to_qid(V9fsPDU *pdu, V9fsFidState *fidp,
+ V9fsQID *qidp)
{
struct stat stbuf;
int err;
@@ -610,17 +621,11 @@ V9fsPDU *pdu_alloc(V9fsState *s)
void pdu_free(V9fsPDU *pdu)
{
- if (pdu) {
- V9fsState *s = pdu->s;
- /*
- * Cancelled pdu are added back to the freelist
- * by flush request .
- */
- if (!pdu->cancelled) {
- QLIST_REMOVE(pdu, next);
- QLIST_INSERT_HEAD(&s->free_list, pdu, next);
- }
- }
+ V9fsState *s = pdu->s;
+
+ g_assert(!pdu->cancelled);
+ QLIST_REMOVE(pdu, next);
+ QLIST_INSERT_HEAD(&s->free_list, pdu, next);
}
/*
@@ -628,7 +633,7 @@ void pdu_free(V9fsPDU *pdu)
* because we always expect to have enough space to encode
* error details
*/
-static void pdu_complete(V9fsPDU *pdu, ssize_t len)
+static void coroutine_fn pdu_complete(V9fsPDU *pdu, ssize_t len)
{
int8_t id = pdu->id + 1; /* Response */
V9fsState *s = pdu->s;
@@ -665,9 +670,9 @@ static void pdu_complete(V9fsPDU *pdu, ssize_t len)
pdu_push_and_notify(pdu);
/* Now wakeup anybody waiting in flush for this request */
- qemu_co_queue_next(&pdu->complete);
-
- pdu_free(pdu);
+ if (!qemu_co_queue_next(&pdu->complete)) {
+ pdu_free(pdu);
+ }
}
static mode_t v9mode_to_mode(uint32_t mode, V9fsString *extension)
@@ -795,9 +800,9 @@ static uint32_t stat_to_v9mode(const struct stat *stbuf)
return mode;
}
-static int stat_to_v9stat(V9fsPDU *pdu, V9fsPath *name,
- const struct stat *stbuf,
- V9fsStat *v9stat)
+static int coroutine_fn stat_to_v9stat(V9fsPDU *pdu, V9fsPath *name,
+ const struct stat *stbuf,
+ V9fsStat *v9stat)
{
int err;
const char *str;
@@ -810,15 +815,15 @@ static int stat_to_v9stat(V9fsPDU *pdu, V9fsPath *name,
v9stat->mtime = stbuf->st_mtime;
v9stat->length = stbuf->st_size;
- v9fs_string_null(&v9stat->uid);
- v9fs_string_null(&v9stat->gid);
- v9fs_string_null(&v9stat->muid);
+ v9fs_string_free(&v9stat->uid);
+ v9fs_string_free(&v9stat->gid);
+ v9fs_string_free(&v9stat->muid);
v9stat->n_uid = stbuf->st_uid;
v9stat->n_gid = stbuf->st_gid;
v9stat->n_muid = 0;
- v9fs_string_null(&v9stat->extension);
+ v9fs_string_free(&v9stat->extension);
if (v9stat->mode & P9_STAT_MODE_SYMLINK) {
err = v9fs_co_readlink(pdu, name, &v9stat->extension);
@@ -917,10 +922,8 @@ static void v9fs_fix_path(V9fsPath *dst, V9fsPath *src, int len)
V9fsPath str;
v9fs_path_init(&str);
v9fs_path_copy(&str, dst);
- v9fs_string_sprintf((V9fsString *)dst, "%s%s", src->data, str.data+len);
+ v9fs_path_sprintf(dst, "%s%s", src->data, str.data + len);
v9fs_path_free(&str);
- /* +1 to include terminating NULL */
- dst->size++;
}
static inline bool is_ro_export(FsContext *ctx)
@@ -928,7 +931,7 @@ static inline bool is_ro_export(FsContext *ctx)
return ctx->export_flags & V9FS_RDONLY;
}
-static void v9fs_version(void *opaque)
+static void coroutine_fn v9fs_version(void *opaque)
{
ssize_t err;
V9fsPDU *pdu = opaque;
@@ -966,7 +969,7 @@ out:
v9fs_string_free(&version);
}
-static void v9fs_attach(void *opaque)
+static void coroutine_fn v9fs_attach(void *opaque)
{
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
@@ -1032,7 +1035,7 @@ out_nofid:
v9fs_string_free(&aname);
}
-static void v9fs_stat(void *opaque)
+static void coroutine_fn v9fs_stat(void *opaque)
{
int32_t fid;
V9fsStat v9stat;
@@ -1076,7 +1079,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_getattr(void *opaque)
+static void coroutine_fn v9fs_getattr(void *opaque)
{
int32_t fid;
size_t offset = 7;
@@ -1152,7 +1155,7 @@ out_nofid:
#define P9_ATTR_MASK 127
-static void v9fs_setattr(void *opaque)
+static void coroutine_fn v9fs_setattr(void *opaque)
{
int err = 0;
int32_t fid;
@@ -1270,7 +1273,7 @@ static bool not_same_qid(const V9fsQID *qid1, const V9fsQID *qid2)
qid1->path != qid2->path;
}
-static void v9fs_walk(void *opaque)
+static void coroutine_fn v9fs_walk(void *opaque)
{
int name_idx;
V9fsQID *qids = NULL;
@@ -1320,13 +1323,14 @@ static void v9fs_walk(void *opaque)
goto out_nofid;
}
+ v9fs_path_init(&dpath);
+ v9fs_path_init(&path);
+
err = fid_to_qid(pdu, fidp, &qid);
if (err < 0) {
goto out;
}
- v9fs_path_init(&dpath);
- v9fs_path_init(&path);
/*
* Both dpath and path initially poin to fidp.
* Needed to handle request with nwnames == 0
@@ -1352,7 +1356,10 @@ static void v9fs_walk(void *opaque)
memcpy(&qids[name_idx], &qid, sizeof(qid));
}
if (fid == newfid) {
- BUG_ON(fidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
v9fs_path_copy(&fidp->path, &path);
} else {
newfidp = alloc_fid(s, newfid);
@@ -1383,7 +1390,7 @@ out_nofid:
}
}
-static int32_t get_iounit(V9fsPDU *pdu, V9fsPath *path)
+static int32_t coroutine_fn get_iounit(V9fsPDU *pdu, V9fsPath *path)
{
struct statfs stbuf;
int32_t iounit = 0;
@@ -1403,7 +1410,7 @@ static int32_t get_iounit(V9fsPDU *pdu, V9fsPath *path)
return iounit;
}
-static void v9fs_open(void *opaque)
+static void coroutine_fn v9fs_open(void *opaque)
{
int flags;
int32_t fid;
@@ -1434,7 +1441,10 @@ static void v9fs_open(void *opaque)
err = -ENOENT;
goto out_nofid;
}
- BUG_ON(fidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
if (err < 0) {
@@ -1493,7 +1503,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_lcreate(void *opaque)
+static void coroutine_fn v9fs_lcreate(void *opaque)
{
int32_t dfid, flags, mode;
gid_t gid;
@@ -1590,7 +1600,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_clunk(void *opaque)
+static void coroutine_fn v9fs_clunk(void *opaque)
{
int err;
int32_t fid;
@@ -1628,20 +1638,17 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
{
ssize_t err;
size_t offset = 7;
- int read_count;
- int64_t xattr_len;
+ uint64_t read_count;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = v->elems[pdu->idx];
- xattr_len = fidp->fs.xattr.len;
- read_count = xattr_len - off;
+ if (fidp->fs.xattr.len < off) {
+ read_count = 0;
+ } else {
+ read_count = fidp->fs.xattr.len - off;
+ }
if (read_count > max_count) {
read_count = max_count;
- } else if (read_count < 0) {
- /*
- * read beyond XATTR value
- */
- read_count = 0;
}
err = pdu_marshal(pdu, offset, "d", read_count);
if (err < 0) {
@@ -1659,8 +1666,9 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
return offset;
}
-static int v9fs_do_readdir_with_stat(V9fsPDU *pdu,
- V9fsFidState *fidp, uint32_t max_count)
+static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
+ V9fsFidState *fidp,
+ uint32_t max_count)
{
V9fsPath path;
V9fsStat v9stat;
@@ -1750,7 +1758,7 @@ static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
qemu_iovec_concat(qiov, &elem, skip, size);
}
-static void v9fs_read(void *opaque)
+static void coroutine_fn v9fs_read(void *opaque)
{
int32_t fid;
uint64_t off;
@@ -1812,14 +1820,15 @@ static void v9fs_read(void *opaque)
if (len < 0) {
/* IO error return the error */
err = len;
- goto out;
+ goto out_free_iovec;
}
} while (count < max_count && len > 0);
err = pdu_marshal(pdu, offset, "d", count);
if (err < 0) {
- goto out;
+ goto out_free_iovec;
}
err += offset + count;
+out_free_iovec:
qemu_iovec_destroy(&qiov);
qemu_iovec_destroy(&qiov_full);
} else if (fidp->fid_type == P9_FID_XATTR) {
@@ -1843,8 +1852,8 @@ static size_t v9fs_readdir_data_size(V9fsString *name)
return 24 + v9fs_string_size(name);
}
-static int v9fs_do_readdir(V9fsPDU *pdu,
- V9fsFidState *fidp, int32_t max_count)
+static int coroutine_fn v9fs_do_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
+ int32_t max_count)
{
size_t size;
V9fsQID qid;
@@ -1913,7 +1922,7 @@ static int v9fs_do_readdir(V9fsPDU *pdu,
return count;
}
-static void v9fs_readdir(void *opaque)
+static void coroutine_fn v9fs_readdir(void *opaque)
{
int32_t fid;
V9fsFidState *fidp;
@@ -1968,23 +1977,18 @@ static int v9fs_xattr_write(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
{
int i, to_copy;
ssize_t err = 0;
- int write_count;
- int64_t xattr_len;
+ uint64_t write_count;
size_t offset = 7;
- xattr_len = fidp->fs.xattr.len;
- write_count = xattr_len - off;
- if (write_count > count) {
- write_count = count;
- } else if (write_count < 0) {
- /*
- * write beyond XATTR value len specified in
- * xattrcreate
- */
+ if (fidp->fs.xattr.len < off) {
err = -ENOSPC;
goto out;
}
+ write_count = fidp->fs.xattr.len - off;
+ if (write_count > count) {
+ write_count = count;
+ }
err = pdu_marshal(pdu, offset, "d", write_count);
if (err < 0) {
return err;
@@ -2009,7 +2013,7 @@ out:
return err;
}
-static void v9fs_write(void *opaque)
+static void coroutine_fn v9fs_write(void *opaque)
{
ssize_t err;
int32_t fid;
@@ -2079,7 +2083,7 @@ static void v9fs_write(void *opaque)
offset = 7;
err = pdu_marshal(pdu, offset, "d", total);
if (err < 0) {
- goto out;
+ goto out_qiov;
}
err += offset;
trace_v9fs_write_return(pdu->tag, pdu->id, total, err);
@@ -2092,7 +2096,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_create(void *opaque)
+static void coroutine_fn v9fs_create(void *opaque)
{
int32_t fid;
int err = 0;
@@ -2272,7 +2276,7 @@ out_nofid:
v9fs_path_free(&path);
}
-static void v9fs_symlink(void *opaque)
+static void coroutine_fn v9fs_symlink(void *opaque)
{
V9fsPDU *pdu = opaque;
V9fsString name;
@@ -2361,7 +2365,7 @@ static void v9fs_flush(void *opaque)
pdu_complete(pdu, 7);
}
-static void v9fs_link(void *opaque)
+static void coroutine_fn v9fs_link(void *opaque)
{
V9fsPDU *pdu = opaque;
int32_t dfid, oldfid;
@@ -2402,6 +2406,7 @@ static void v9fs_link(void *opaque)
if (!err) {
err = offset;
}
+ put_fid(pdu, oldfidp);
out:
put_fid(pdu, dfidp);
out_nofid:
@@ -2410,7 +2415,7 @@ out_nofid:
}
/* Only works with path name based fid */
-static void v9fs_remove(void *opaque)
+static void coroutine_fn v9fs_remove(void *opaque)
{
int32_t fid;
int err = 0;
@@ -2454,7 +2459,7 @@ out_nofid:
pdu_complete(pdu, err);
}
-static void v9fs_unlinkat(void *opaque)
+static void coroutine_fn v9fs_unlinkat(void *opaque)
{
int err = 0;
V9fsString name;
@@ -2517,8 +2522,9 @@ out_nofid:
/* Only works with path name based fid */
-static int v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
- int32_t newdirfid, V9fsString *name)
+static int coroutine_fn v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
+ int32_t newdirfid,
+ V9fsString *name)
{
char *end;
int err = 0;
@@ -2535,7 +2541,10 @@ static int v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
err = -ENOENT;
goto out_nofid;
}
- BUG_ON(dirfidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path);
} else {
old_name = fidp->path.data;
@@ -2575,7 +2584,7 @@ out_nofid:
}
/* Only works with path name based fid */
-static void v9fs_rename(void *opaque)
+static void coroutine_fn v9fs_rename(void *opaque)
{
int32_t fid;
ssize_t err = 0;
@@ -2607,7 +2616,10 @@ static void v9fs_rename(void *opaque)
err = -ENOENT;
goto out_nofid;
}
- BUG_ON(fidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
/* if fs driver is not path based, return EOPNOTSUPP */
if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
err = -EOPNOTSUPP;
@@ -2626,9 +2638,10 @@ out_nofid:
v9fs_string_free(&name);
}
-static void v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
- V9fsString *old_name, V9fsPath *newdir,
- V9fsString *new_name)
+static void coroutine_fn v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
+ V9fsString *old_name,
+ V9fsPath *newdir,
+ V9fsString *new_name)
{
V9fsFidState *tfidp;
V9fsPath oldpath, newpath;
@@ -2654,9 +2667,10 @@ static void v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
v9fs_path_free(&newpath);
}
-static int v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid,
- V9fsString *old_name, int32_t newdirfid,
- V9fsString *new_name)
+static int coroutine_fn v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid,
+ V9fsString *old_name,
+ int32_t newdirfid,
+ V9fsString *new_name)
{
int err = 0;
V9fsState *s = pdu->s;
@@ -2697,7 +2711,7 @@ out:
return err;
}
-static void v9fs_renameat(void *opaque)
+static void coroutine_fn v9fs_renameat(void *opaque)
{
ssize_t err = 0;
size_t offset = 7;
@@ -2739,7 +2753,7 @@ out_err:
v9fs_string_free(&new_name);
}
-static void v9fs_wstat(void *opaque)
+static void coroutine_fn v9fs_wstat(void *opaque)
{
int32_t fid;
int err = 0;
@@ -2878,7 +2892,7 @@ static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf)
fsid_val, f_namelen);
}
-static void v9fs_statfs(void *opaque)
+static void coroutine_fn v9fs_statfs(void *opaque)
{
int32_t fid;
ssize_t retval = 0;
@@ -2912,7 +2926,7 @@ out_nofid:
pdu_complete(pdu, retval);
}
-static void v9fs_mknod(void *opaque)
+static void coroutine_fn v9fs_mknod(void *opaque)
{
int mode;
@@ -2978,7 +2992,7 @@ out_nofid:
* do any thing in * qemu 9p server side lock code path.
* So when a TLOCK request comes, always return success
*/
-static void v9fs_lock(void *opaque)
+static void coroutine_fn v9fs_lock(void *opaque)
{
int8_t status;
V9fsFlock flock;
@@ -3031,7 +3045,7 @@ out_nofid:
* When a TGETLOCK request comes, always return success because all lock
* handling is done by client's VFS layer.
*/
-static void v9fs_getlock(void *opaque)
+static void coroutine_fn v9fs_getlock(void *opaque)
{
size_t offset = 7;
struct stat stbuf;
@@ -3076,7 +3090,7 @@ out_nofid:
v9fs_string_free(&glock.client_id);
}
-static void v9fs_mkdir(void *opaque)
+static void coroutine_fn v9fs_mkdir(void *opaque)
{
V9fsPDU *pdu = opaque;
size_t offset = 7;
@@ -3130,7 +3144,7 @@ out_nofid:
v9fs_string_free(&name);
}
-static void v9fs_xattrwalk(void *opaque)
+static void coroutine_fn v9fs_xattrwalk(void *opaque)
{
int64_t size;
V9fsString name;
@@ -3160,7 +3174,7 @@ static void v9fs_xattrwalk(void *opaque)
goto out;
}
v9fs_path_copy(&xattr_fidp->path, &file_fidp->path);
- if (name.data == NULL) {
+ if (!v9fs_string_size(&name)) {
/*
* listxattr request. Get the size first
*/
@@ -3175,7 +3189,7 @@ static void v9fs_xattrwalk(void *opaque)
*/
xattr_fidp->fs.xattr.len = size;
xattr_fidp->fid_type = P9_FID_XATTR;
- xattr_fidp->fs.xattr.copied_len = -1;
+ xattr_fidp->fs.xattr.xattrwalk_fid = true;
if (size) {
xattr_fidp->fs.xattr.value = g_malloc(size);
err = v9fs_co_llistxattr(pdu, &xattr_fidp->path,
@@ -3208,7 +3222,7 @@ static void v9fs_xattrwalk(void *opaque)
*/
xattr_fidp->fs.xattr.len = size;
xattr_fidp->fid_type = P9_FID_XATTR;
- xattr_fidp->fs.xattr.copied_len = -1;
+ xattr_fidp->fs.xattr.xattrwalk_fid = true;
if (size) {
xattr_fidp->fs.xattr.value = g_malloc(size);
err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
@@ -3236,11 +3250,11 @@ out_nofid:
v9fs_string_free(&name);
}
-static void v9fs_xattrcreate(void *opaque)
+static void coroutine_fn v9fs_xattrcreate(void *opaque)
{
int flags;
int32_t fid;
- int64_t size;
+ uint64_t size;
ssize_t err = 0;
V9fsString name;
size_t offset = 7;
@@ -3255,28 +3269,40 @@ static void v9fs_xattrcreate(void *opaque)
}
trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags);
+ if (size > XATTR_SIZE_MAX) {
+ err = -E2BIG;
+ goto out_nofid;
+ }
+
file_fidp = get_fid(pdu, fid);
if (file_fidp == NULL) {
err = -EINVAL;
goto out_nofid;
}
+ if (file_fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out_put_fid;
+ }
+
/* Make the file fid point to xattr */
xattr_fidp = file_fidp;
xattr_fidp->fid_type = P9_FID_XATTR;
xattr_fidp->fs.xattr.copied_len = 0;
+ xattr_fidp->fs.xattr.xattrwalk_fid = false;
xattr_fidp->fs.xattr.len = size;
xattr_fidp->fs.xattr.flags = flags;
v9fs_string_init(&xattr_fidp->fs.xattr.name);
v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name);
- xattr_fidp->fs.xattr.value = g_malloc(size);
+ xattr_fidp->fs.xattr.value = g_malloc0(size);
err = offset;
+out_put_fid:
put_fid(pdu, file_fidp);
out_nofid:
pdu_complete(pdu, err);
v9fs_string_free(&name);
}
-static void v9fs_readlink(void *opaque)
+static void coroutine_fn v9fs_readlink(void *opaque)
{
V9fsPDU *pdu = opaque;
size_t offset = 7;
@@ -3352,13 +3378,13 @@ static CoroutineEntry *pdu_co_handlers[] = {
[P9_TREMOVE] = v9fs_remove,
};
-static void v9fs_op_not_supp(void *opaque)
+static void coroutine_fn v9fs_op_not_supp(void *opaque)
{
V9fsPDU *pdu = opaque;
pdu_complete(pdu, -EOPNOTSUPP);
}
-static void v9fs_fs_ro(void *opaque)
+static void coroutine_fn v9fs_fs_ro(void *opaque)
{
V9fsPDU *pdu = opaque;
pdu_complete(pdu, -EROFS);
@@ -3495,8 +3521,11 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp)
rc = 0;
out:
if (rc) {
- g_free(s->ctx.fs_root);
+ if (s->ops->cleanup && s->ctx.private) {
+ s->ops->cleanup(&s->ctx);
+ }
g_free(s->tag);
+ g_free(s->ctx.fs_root);
v9fs_path_free(&path);
}
return rc;
@@ -3504,8 +3533,41 @@ out:
void v9fs_device_unrealize_common(V9fsState *s, Error **errp)
{
- g_free(s->ctx.fs_root);
+ if (s->ops->cleanup) {
+ s->ops->cleanup(&s->ctx);
+ }
g_free(s->tag);
+ g_free(s->ctx.fs_root);
+}
+
+typedef struct VirtfsCoResetData {
+ V9fsPDU pdu;
+ bool done;
+} VirtfsCoResetData;
+
+static void coroutine_fn virtfs_co_reset(void *opaque)
+{
+ VirtfsCoResetData *data = opaque;
+
+ virtfs_reset(&data->pdu);
+ data->done = true;
+}
+
+void v9fs_reset(V9fsState *s)
+{
+ VirtfsCoResetData data = { .pdu = { .s = s }, .done = false };
+ Coroutine *co;
+
+ while (!QLIST_EMPTY(&s->active_list)) {
+ aio_poll(qemu_get_aio_context(), true);
+ }
+
+ co = qemu_coroutine_create(virtfs_co_reset, &data);
+ qemu_coroutine_enter(co);
+
+ while (!data.done) {
+ aio_poll(qemu_get_aio_context(), true);
+ }
}
static void __attribute__((__constructor__)) v9fs_set_fd_limit(void)
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
index ccd9b41c24..1b5c1a14e3 100644
--- a/hw/9pfs/9p.h
+++ b/hw/9pfs/9p.h
@@ -181,11 +181,12 @@ typedef struct V9fsConf
typedef struct V9fsXattr
{
- int64_t copied_len;
- int64_t len;
+ uint64_t copied_len;
+ uint64_t len;
void *value;
V9fsString name;
int flags;
+ bool xattrwalk_fid;
} V9fsXattr;
typedef struct V9fsDir {
@@ -346,19 +347,21 @@ static inline uint8_t v9fs_request_cancelled(V9fsPDU *pdu)
return pdu->cancelled;
}
-extern void v9fs_reclaim_fd(V9fsPDU *pdu);
-extern void v9fs_path_init(V9fsPath *path);
-extern void v9fs_path_free(V9fsPath *path);
-extern void v9fs_path_copy(V9fsPath *lhs, V9fsPath *rhs);
-extern int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
- const char *name, V9fsPath *path);
-extern int v9fs_device_realize_common(V9fsState *s, Error **errp);
-extern void v9fs_device_unrealize_common(V9fsState *s, Error **errp);
+void coroutine_fn v9fs_reclaim_fd(V9fsPDU *pdu);
+void v9fs_path_init(V9fsPath *path);
+void v9fs_path_free(V9fsPath *path);
+void v9fs_path_sprintf(V9fsPath *path, const char *fmt, ...);
+void v9fs_path_copy(V9fsPath *lhs, V9fsPath *rhs);
+int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
+ const char *name, V9fsPath *path);
+int v9fs_device_realize_common(V9fsState *s, Error **errp);
+void v9fs_device_unrealize_common(V9fsState *s, Error **errp);
ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...);
ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...);
V9fsPDU *pdu_alloc(V9fsState *s);
void pdu_free(V9fsPDU *pdu);
void pdu_submit(V9fsPDU *pdu);
+void v9fs_reset(V9fsState *s);
#endif
diff --git a/hw/9pfs/codir.c b/hw/9pfs/codir.c
index d91f9ad6eb..7cd6fce1ad 100644
--- a/hw/9pfs/codir.c
+++ b/hw/9pfs/codir.c
@@ -17,7 +17,8 @@
#include "qemu/coroutine.h"
#include "coth.h"
-int v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent)
+int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
+ struct dirent **dent)
{
int err;
V9fsState *s = pdu->s;
@@ -59,7 +60,8 @@ off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp)
return err;
}
-void v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp, off_t offset)
+void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp,
+ off_t offset)
{
V9fsState *s = pdu->s;
if (v9fs_request_cancelled(pdu)) {
@@ -71,7 +73,7 @@ void v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp, off_t offset)
});
}
-void v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp)
+void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp)
{
V9fsState *s = pdu->s;
if (v9fs_request_cancelled(pdu)) {
@@ -83,8 +85,9 @@ void v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp)
});
}
-int v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name,
- mode_t mode, uid_t uid, gid_t gid, struct stat *stbuf)
+int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp,
+ V9fsString *name, mode_t mode, uid_t uid,
+ gid_t gid, struct stat *stbuf)
{
int err;
FsCred cred;
@@ -120,7 +123,7 @@ int v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name,
return err;
}
-int v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp)
+int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp)
{
int err;
V9fsState *s = pdu->s;
@@ -148,7 +151,7 @@ int v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp)
return err;
}
-int v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs)
+int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs)
{
int err;
V9fsState *s = pdu->s;
diff --git a/hw/9pfs/cofile.c b/hw/9pfs/cofile.c
index ac725369fa..efab057eba 100644
--- a/hw/9pfs/cofile.c
+++ b/hw/9pfs/cofile.c
@@ -17,8 +17,8 @@
#include "qemu/coroutine.h"
#include "coth.h"
-int v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode,
- V9fsStatDotl *v9stat)
+int coroutine_fn v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode,
+ V9fsStatDotl *v9stat)
{
int err = 0;
V9fsState *s = pdu->s;
@@ -41,7 +41,7 @@ int v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode,
return err;
}
-int v9fs_co_lstat(V9fsPDU *pdu, V9fsPath *path, struct stat *stbuf)
+int coroutine_fn v9fs_co_lstat(V9fsPDU *pdu, V9fsPath *path, struct stat *stbuf)
{
int err;
V9fsState *s = pdu->s;
@@ -61,7 +61,8 @@ int v9fs_co_lstat(V9fsPDU *pdu, V9fsPath *path, struct stat *stbuf)
return err;
}
-int v9fs_co_fstat(V9fsPDU *pdu, V9fsFidState *fidp, struct stat *stbuf)
+int coroutine_fn v9fs_co_fstat(V9fsPDU *pdu, V9fsFidState *fidp,
+ struct stat *stbuf)
{
int err;
V9fsState *s = pdu->s;
@@ -93,7 +94,7 @@ int v9fs_co_fstat(V9fsPDU *pdu, V9fsFidState *fidp, struct stat *stbuf)
return err;
}
-int v9fs_co_open(V9fsPDU *pdu, V9fsFidState *fidp, int flags)
+int coroutine_fn v9fs_co_open(V9fsPDU *pdu, V9fsFidState *fidp, int flags)
{
int err;
V9fsState *s = pdu->s;
@@ -121,8 +122,9 @@ int v9fs_co_open(V9fsPDU *pdu, V9fsFidState *fidp, int flags)
return err;
}
-int v9fs_co_open2(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name, gid_t gid,
- int flags, int mode, struct stat *stbuf)
+int coroutine_fn v9fs_co_open2(V9fsPDU *pdu, V9fsFidState *fidp,
+ V9fsString *name, gid_t gid, int flags, int mode,
+ struct stat *stbuf)
{
int err;
FsCred cred;
@@ -175,7 +177,7 @@ int v9fs_co_open2(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name, gid_t gid,
return err;
}
-int v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs)
+int coroutine_fn v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs)
{
int err;
V9fsState *s = pdu->s;
@@ -196,7 +198,7 @@ int v9fs_co_close(V9fsPDU *pdu, V9fsFidOpenState *fs)
return err;
}
-int v9fs_co_fsync(V9fsPDU *pdu, V9fsFidState *fidp, int datasync)
+int coroutine_fn v9fs_co_fsync(V9fsPDU *pdu, V9fsFidState *fidp, int datasync)
{
int err;
V9fsState *s = pdu->s;
@@ -214,8 +216,8 @@ int v9fs_co_fsync(V9fsPDU *pdu, V9fsFidState *fidp, int datasync)
return err;
}
-int v9fs_co_link(V9fsPDU *pdu, V9fsFidState *oldfid,
- V9fsFidState *newdirfid, V9fsString *name)
+int coroutine_fn v9fs_co_link(V9fsPDU *pdu, V9fsFidState *oldfid,
+ V9fsFidState *newdirfid, V9fsString *name)
{
int err;
V9fsState *s = pdu->s;
@@ -236,8 +238,8 @@ int v9fs_co_link(V9fsPDU *pdu, V9fsFidState *oldfid,
return err;
}
-int v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp,
- struct iovec *iov, int iovcnt, int64_t offset)
+int coroutine_fn v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp,
+ struct iovec *iov, int iovcnt, int64_t offset)
{
int err;
V9fsState *s = pdu->s;
@@ -255,8 +257,8 @@ int v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp,
return err;
}
-int v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp,
- struct iovec *iov, int iovcnt, int64_t offset)
+int coroutine_fn v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp,
+ struct iovec *iov, int iovcnt, int64_t offset)
{
int err;
V9fsState *s = pdu->s;
diff --git a/hw/9pfs/cofs.c b/hw/9pfs/cofs.c
index 70f584fcbd..c62103221d 100644
--- a/hw/9pfs/cofs.c
+++ b/hw/9pfs/cofs.c
@@ -49,7 +49,7 @@ static ssize_t __readlink(V9fsState *s, V9fsPath *path, V9fsString *buf)
return len;
}
-int v9fs_co_readlink(V9fsPDU *pdu, V9fsPath *path, V9fsString *buf)
+int coroutine_fn v9fs_co_readlink(V9fsPDU *pdu, V9fsPath *path, V9fsString *buf)
{
int err;
V9fsState *s = pdu->s;
@@ -69,7 +69,8 @@ int v9fs_co_readlink(V9fsPDU *pdu, V9fsPath *path, V9fsString *buf)
return err;
}
-int v9fs_co_statfs(V9fsPDU *pdu, V9fsPath *path, struct statfs *stbuf)
+int coroutine_fn v9fs_co_statfs(V9fsPDU *pdu, V9fsPath *path,
+ struct statfs *stbuf)
{
int err;
V9fsState *s = pdu->s;
@@ -89,7 +90,7 @@ int v9fs_co_statfs(V9fsPDU *pdu, V9fsPath *path, struct statfs *stbuf)
return err;
}
-int v9fs_co_chmod(V9fsPDU *pdu, V9fsPath *path, mode_t mode)
+int coroutine_fn v9fs_co_chmod(V9fsPDU *pdu, V9fsPath *path, mode_t mode)
{
int err;
FsCred cred;
@@ -112,8 +113,8 @@ int v9fs_co_chmod(V9fsPDU *pdu, V9fsPath *path, mode_t mode)
return err;
}
-int v9fs_co_utimensat(V9fsPDU *pdu, V9fsPath *path,
- struct timespec times[2])
+int coroutine_fn v9fs_co_utimensat(V9fsPDU *pdu, V9fsPath *path,
+ struct timespec times[2])
{
int err;
V9fsState *s = pdu->s;
@@ -133,7 +134,8 @@ int v9fs_co_utimensat(V9fsPDU *pdu, V9fsPath *path,
return err;
}
-int v9fs_co_chown(V9fsPDU *pdu, V9fsPath *path, uid_t uid, gid_t gid)
+int coroutine_fn v9fs_co_chown(V9fsPDU *pdu, V9fsPath *path, uid_t uid,
+ gid_t gid)
{
int err;
FsCred cred;
@@ -157,7 +159,7 @@ int v9fs_co_chown(V9fsPDU *pdu, V9fsPath *path, uid_t uid, gid_t gid)
return err;
}
-int v9fs_co_truncate(V9fsPDU *pdu, V9fsPath *path, off_t size)
+int coroutine_fn v9fs_co_truncate(V9fsPDU *pdu, V9fsPath *path, off_t size)
{
int err;
V9fsState *s = pdu->s;
@@ -177,8 +179,9 @@ int v9fs_co_truncate(V9fsPDU *pdu, V9fsPath *path, off_t size)
return err;
}
-int v9fs_co_mknod(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name, uid_t uid,
- gid_t gid, dev_t dev, mode_t mode, struct stat *stbuf)
+int coroutine_fn v9fs_co_mknod(V9fsPDU *pdu, V9fsFidState *fidp,
+ V9fsString *name, uid_t uid, gid_t gid,
+ dev_t dev, mode_t mode, struct stat *stbuf)
{
int err;
V9fsPath path;
@@ -216,7 +219,7 @@ int v9fs_co_mknod(V9fsPDU *pdu, V9fsFidState *fidp, V9fsString *name, uid_t uid,
}
/* Only works with path name based fid */
-int v9fs_co_remove(V9fsPDU *pdu, V9fsPath *path)
+int coroutine_fn v9fs_co_remove(V9fsPDU *pdu, V9fsPath *path)
{
int err;
V9fsState *s = pdu->s;
@@ -236,7 +239,8 @@ int v9fs_co_remove(V9fsPDU *pdu, V9fsPath *path)
return err;
}
-int v9fs_co_unlinkat(V9fsPDU *pdu, V9fsPath *path, V9fsString *name, int flags)
+int coroutine_fn v9fs_co_unlinkat(V9fsPDU *pdu, V9fsPath *path,
+ V9fsString *name, int flags)
{
int err;
V9fsState *s = pdu->s;
@@ -257,7 +261,8 @@ int v9fs_co_unlinkat(V9fsPDU *pdu, V9fsPath *path, V9fsString *name, int flags)
}
/* Only work with path name based fid */
-int v9fs_co_rename(V9fsPDU *pdu, V9fsPath *oldpath, V9fsPath *newpath)
+int coroutine_fn v9fs_co_rename(V9fsPDU *pdu, V9fsPath *oldpath,
+ V9fsPath *newpath)
{
int err;
V9fsState *s = pdu->s;
@@ -275,8 +280,9 @@ int v9fs_co_rename(V9fsPDU *pdu, V9fsPath *oldpath, V9fsPath *newpath)
return err;
}
-int v9fs_co_renameat(V9fsPDU *pdu, V9fsPath *olddirpath, V9fsString *oldname,
- V9fsPath *newdirpath, V9fsString *newname)
+int coroutine_fn v9fs_co_renameat(V9fsPDU *pdu, V9fsPath *olddirpath,
+ V9fsString *oldname, V9fsPath *newdirpath,
+ V9fsString *newname)
{
int err;
V9fsState *s = pdu->s;
@@ -295,8 +301,9 @@ int v9fs_co_renameat(V9fsPDU *pdu, V9fsPath *olddirpath, V9fsString *oldname,
return err;
}
-int v9fs_co_symlink(V9fsPDU *pdu, V9fsFidState *dfidp, V9fsString *name,
- const char *oldpath, gid_t gid, struct stat *stbuf)
+int coroutine_fn v9fs_co_symlink(V9fsPDU *pdu, V9fsFidState *dfidp,
+ V9fsString *name, const char *oldpath,
+ gid_t gid, struct stat *stbuf)
{
int err;
FsCred cred;
@@ -337,8 +344,8 @@ int v9fs_co_symlink(V9fsPDU *pdu, V9fsFidState *dfidp, V9fsString *name,
* For path name based fid we don't block. So we can
* directly call the fs driver ops.
*/
-int v9fs_co_name_to_path(V9fsPDU *pdu, V9fsPath *dirpath,
- const char *name, V9fsPath *path)
+int coroutine_fn v9fs_co_name_to_path(V9fsPDU *pdu, V9fsPath *dirpath,
+ const char *name, V9fsPath *path)
{
int err;
V9fsState *s = pdu->s;
diff --git a/hw/9pfs/coth.h b/hw/9pfs/coth.h
index 3c7424e423..19e4d9287e 100644
--- a/hw/9pfs/coth.h
+++ b/hw/9pfs/coth.h
@@ -47,52 +47,53 @@
qemu_coroutine_yield(); \
} while (0)
-extern void co_run_in_worker_bh(void *);
-extern int v9fs_co_readlink(V9fsPDU *, V9fsPath *, V9fsString *);
-extern int v9fs_co_readdir(V9fsPDU *, V9fsFidState *, struct dirent **);
-extern off_t v9fs_co_telldir(V9fsPDU *, V9fsFidState *);
-extern void v9fs_co_seekdir(V9fsPDU *, V9fsFidState *, off_t);
-extern void v9fs_co_rewinddir(V9fsPDU *, V9fsFidState *);
-extern int v9fs_co_statfs(V9fsPDU *, V9fsPath *, struct statfs *);
-extern int v9fs_co_lstat(V9fsPDU *, V9fsPath *, struct stat *);
-extern int v9fs_co_chmod(V9fsPDU *, V9fsPath *, mode_t);
-extern int v9fs_co_utimensat(V9fsPDU *, V9fsPath *, struct timespec [2]);
-extern int v9fs_co_chown(V9fsPDU *, V9fsPath *, uid_t, gid_t);
-extern int v9fs_co_truncate(V9fsPDU *, V9fsPath *, off_t);
-extern int v9fs_co_llistxattr(V9fsPDU *, V9fsPath *, void *, size_t);
-extern int v9fs_co_lgetxattr(V9fsPDU *, V9fsPath *,
- V9fsString *, void *, size_t);
-extern int v9fs_co_mknod(V9fsPDU *, V9fsFidState *, V9fsString *, uid_t,
- gid_t, dev_t, mode_t, struct stat *);
-extern int v9fs_co_mkdir(V9fsPDU *, V9fsFidState *, V9fsString *,
- mode_t, uid_t, gid_t, struct stat *);
-extern int v9fs_co_remove(V9fsPDU *, V9fsPath *);
-extern int v9fs_co_rename(V9fsPDU *, V9fsPath *, V9fsPath *);
-extern int v9fs_co_unlinkat(V9fsPDU *, V9fsPath *, V9fsString *, int flags);
-extern int v9fs_co_renameat(V9fsPDU *, V9fsPath *, V9fsString *,
- V9fsPath *, V9fsString *);
-extern int v9fs_co_fstat(V9fsPDU *, V9fsFidState *, struct stat *);
-extern int v9fs_co_opendir(V9fsPDU *, V9fsFidState *);
-extern int v9fs_co_open(V9fsPDU *, V9fsFidState *, int);
-extern int v9fs_co_open2(V9fsPDU *, V9fsFidState *, V9fsString *,
- gid_t, int, int, struct stat *);
-extern int v9fs_co_lsetxattr(V9fsPDU *, V9fsPath *, V9fsString *,
- void *, size_t, int);
-extern int v9fs_co_lremovexattr(V9fsPDU *, V9fsPath *, V9fsString *);
-extern int v9fs_co_closedir(V9fsPDU *, V9fsFidOpenState *);
-extern int v9fs_co_close(V9fsPDU *, V9fsFidOpenState *);
-extern int v9fs_co_fsync(V9fsPDU *, V9fsFidState *, int);
-extern int v9fs_co_symlink(V9fsPDU *, V9fsFidState *, V9fsString *,
- const char *, gid_t, struct stat *);
-extern int v9fs_co_link(V9fsPDU *, V9fsFidState *,
- V9fsFidState *, V9fsString *);
-extern int v9fs_co_pwritev(V9fsPDU *, V9fsFidState *,
- struct iovec *, int, int64_t);
-extern int v9fs_co_preadv(V9fsPDU *, V9fsFidState *,
- struct iovec *, int, int64_t);
-extern int v9fs_co_name_to_path(V9fsPDU *, V9fsPath *,
- const char *, V9fsPath *);
-extern int v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t,
- V9fsStatDotl *v9stat);
+void co_run_in_worker_bh(void *);
+int coroutine_fn v9fs_co_readlink(V9fsPDU *, V9fsPath *, V9fsString *);
+int coroutine_fn v9fs_co_readdir(V9fsPDU *, V9fsFidState *, struct dirent **);
+off_t coroutine_fn v9fs_co_telldir(V9fsPDU *, V9fsFidState *);
+void coroutine_fn v9fs_co_seekdir(V9fsPDU *, V9fsFidState *, off_t);
+void coroutine_fn v9fs_co_rewinddir(V9fsPDU *, V9fsFidState *);
+int coroutine_fn v9fs_co_statfs(V9fsPDU *, V9fsPath *, struct statfs *);
+int coroutine_fn v9fs_co_lstat(V9fsPDU *, V9fsPath *, struct stat *);
+int coroutine_fn v9fs_co_chmod(V9fsPDU *, V9fsPath *, mode_t);
+int coroutine_fn v9fs_co_utimensat(V9fsPDU *, V9fsPath *, struct timespec [2]);
+int coroutine_fn v9fs_co_chown(V9fsPDU *, V9fsPath *, uid_t, gid_t);
+int coroutine_fn v9fs_co_truncate(V9fsPDU *, V9fsPath *, off_t);
+int coroutine_fn v9fs_co_llistxattr(V9fsPDU *, V9fsPath *, void *, size_t);
+int coroutine_fn v9fs_co_lgetxattr(V9fsPDU *, V9fsPath *,
+ V9fsString *, void *, size_t);
+int coroutine_fn v9fs_co_mknod(V9fsPDU *, V9fsFidState *, V9fsString *, uid_t,
+ gid_t, dev_t, mode_t, struct stat *);
+int coroutine_fn v9fs_co_mkdir(V9fsPDU *, V9fsFidState *, V9fsString *,
+ mode_t, uid_t, gid_t, struct stat *);
+int coroutine_fn v9fs_co_remove(V9fsPDU *, V9fsPath *);
+int coroutine_fn v9fs_co_rename(V9fsPDU *, V9fsPath *, V9fsPath *);
+int coroutine_fn v9fs_co_unlinkat(V9fsPDU *, V9fsPath *, V9fsString *,
+ int flags);
+int coroutine_fn v9fs_co_renameat(V9fsPDU *, V9fsPath *, V9fsString *,
+ V9fsPath *, V9fsString *);
+int coroutine_fn v9fs_co_fstat(V9fsPDU *, V9fsFidState *, struct stat *);
+int coroutine_fn v9fs_co_opendir(V9fsPDU *, V9fsFidState *);
+int coroutine_fn v9fs_co_open(V9fsPDU *, V9fsFidState *, int);
+int coroutine_fn v9fs_co_open2(V9fsPDU *, V9fsFidState *, V9fsString *,
+ gid_t, int, int, struct stat *);
+int coroutine_fn v9fs_co_lsetxattr(V9fsPDU *, V9fsPath *, V9fsString *,
+ void *, size_t, int);
+int coroutine_fn v9fs_co_lremovexattr(V9fsPDU *, V9fsPath *, V9fsString *);
+int coroutine_fn v9fs_co_closedir(V9fsPDU *, V9fsFidOpenState *);
+int coroutine_fn v9fs_co_close(V9fsPDU *, V9fsFidOpenState *);
+int coroutine_fn v9fs_co_fsync(V9fsPDU *, V9fsFidState *, int);
+int coroutine_fn v9fs_co_symlink(V9fsPDU *, V9fsFidState *, V9fsString *,
+ const char *, gid_t, struct stat *);
+int coroutine_fn v9fs_co_link(V9fsPDU *, V9fsFidState *,
+ V9fsFidState *, V9fsString *);
+int coroutine_fn v9fs_co_pwritev(V9fsPDU *, V9fsFidState *,
+ struct iovec *, int, int64_t);
+int coroutine_fn v9fs_co_preadv(V9fsPDU *, V9fsFidState *,
+ struct iovec *, int, int64_t);
+int coroutine_fn v9fs_co_name_to_path(V9fsPDU *, V9fsPath *,
+ const char *, V9fsPath *);
+int coroutine_fn v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t,
+ V9fsStatDotl *v9stat);
#endif
diff --git a/hw/9pfs/coxattr.c b/hw/9pfs/coxattr.c
index 133c4ead37..154392eade 100644
--- a/hw/9pfs/coxattr.c
+++ b/hw/9pfs/coxattr.c
@@ -17,7 +17,8 @@
#include "qemu/coroutine.h"
#include "coth.h"
-int v9fs_co_llistxattr(V9fsPDU *pdu, V9fsPath *path, void *value, size_t size)
+int coroutine_fn v9fs_co_llistxattr(V9fsPDU *pdu, V9fsPath *path, void *value,
+ size_t size)
{
int err;
V9fsState *s = pdu->s;
@@ -37,9 +38,9 @@ int v9fs_co_llistxattr(V9fsPDU *pdu, V9fsPath *path, void *value, size_t size)
return err;
}
-int v9fs_co_lgetxattr(V9fsPDU *pdu, V9fsPath *path,
- V9fsString *xattr_name,
- void *value, size_t size)
+int coroutine_fn v9fs_co_lgetxattr(V9fsPDU *pdu, V9fsPath *path,
+ V9fsString *xattr_name, void *value,
+ size_t size)
{
int err;
V9fsState *s = pdu->s;
@@ -61,9 +62,9 @@ int v9fs_co_lgetxattr(V9fsPDU *pdu, V9fsPath *path,
return err;
}
-int v9fs_co_lsetxattr(V9fsPDU *pdu, V9fsPath *path,
- V9fsString *xattr_name, void *value,
- size_t size, int flags)
+int coroutine_fn v9fs_co_lsetxattr(V9fsPDU *pdu, V9fsPath *path,
+ V9fsString *xattr_name, void *value,
+ size_t size, int flags)
{
int err;
V9fsState *s = pdu->s;
@@ -85,8 +86,8 @@ int v9fs_co_lsetxattr(V9fsPDU *pdu, V9fsPath *path,
return err;
}
-int v9fs_co_lremovexattr(V9fsPDU *pdu, V9fsPath *path,
- V9fsString *xattr_name)
+int coroutine_fn v9fs_co_lremovexattr(V9fsPDU *pdu, V9fsPath *path,
+ V9fsString *xattr_name)
{
int err;
V9fsState *s = pdu->s;
diff --git a/hw/9pfs/trace-events b/hw/9pfs/trace-events
index 48d3d8abed..fb4de3d465 100644
--- a/hw/9pfs/trace-events
+++ b/hw/9pfs/trace-events
@@ -42,6 +42,6 @@ v9fs_mkdir(uint16_t tag, uint8_t id, int32_t fid, char* name, int mode, uint32_t
v9fs_mkdir_return(uint16_t tag, uint8_t id, int8_t type, int32_t version, int64_t path, int err) "tag %u id %u qid={type %d version %d path %"PRId64"} err %d"
v9fs_xattrwalk(uint16_t tag, uint8_t id, int32_t fid, int32_t newfid, char* name) "tag %d id %d fid %d newfid %d name %s"
v9fs_xattrwalk_return(uint16_t tag, uint8_t id, int64_t size) "tag %d id %d size %"PRId64
-v9fs_xattrcreate(uint16_t tag, uint8_t id, int32_t fid, char* name, int64_t size, int flags) "tag %d id %d fid %d name %s size %"PRId64" flags %d"
+v9fs_xattrcreate(uint16_t tag, uint8_t id, int32_t fid, char* name, uint64_t size, int flags) "tag %d id %d fid %d name %s size %"PRIu64" flags %d"
v9fs_readlink(uint16_t tag, uint8_t id, int32_t fid) "tag %d id %d fid %d"
v9fs_readlink_return(uint16_t tag, uint8_t id, char* target) "tag %d id %d name %s"
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index 009b43f6d0..1782e4a227 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -41,6 +41,7 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
V9fsState *s = &v->state;
V9fsPDU *pdu;
ssize_t len;
+ VirtQueueElement *elem;
while ((pdu = pdu_alloc(s))) {
struct {
@@ -48,21 +49,28 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
uint8_t id;
uint16_t tag_le;
} QEMU_PACKED out;
- VirtQueueElement *elem;
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
- pdu_free(pdu);
- break;
+ goto out_free_pdu;
}
- BUG_ON(elem->out_num == 0 || elem->in_num == 0);
- QEMU_BUILD_BUG_ON(sizeof out != 7);
+ if (elem->in_num == 0) {
+ virtio_error(vdev,
+ "The guest sent a VirtFS request without space for "
+ "the reply");
+ goto out_free_req;
+ }
+ QEMU_BUILD_BUG_ON(sizeof(out) != 7);
v->elems[pdu->idx] = elem;
len = iov_to_buf(elem->out_sg, elem->out_num, 0,
- &out, sizeof out);
- BUG_ON(len != sizeof out);
+ &out, sizeof(out));
+ if (len != sizeof(out)) {
+ virtio_error(vdev, "The guest sent a malformed VirtFS request: "
+ "header size is %zd, should be 7", len);
+ goto out_free_req;
+ }
pdu->size = le32_to_cpu(out.size_le);
@@ -72,6 +80,14 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
qemu_co_queue_init(&pdu->complete);
pdu_submit(pdu);
}
+
+ return;
+
+out_free_req:
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+out_free_pdu:
+ pdu_free(pdu);
}
static uint64_t virtio_9p_get_features(VirtIODevice *vdev, uint64_t features,
@@ -97,11 +113,6 @@ static void virtio_9p_get_config(VirtIODevice *vdev, uint8_t *config)
g_free(cfg);
}
-static int virtio_9p_load(QEMUFile *f, void *opaque, size_t size)
-{
- return virtio_load(VIRTIO_DEVICE(opaque), f, 1);
-}
-
static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -130,6 +141,13 @@ static void virtio_9p_device_unrealize(DeviceState *dev, Error **errp)
v9fs_device_unrealize_common(s, errp);
}
+static void virtio_9p_reset(VirtIODevice *vdev)
+{
+ V9fsVirtioState *v = (V9fsVirtioState *)vdev;
+
+ v9fs_reset(&v->state);
+}
+
ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
const char *fmt, va_list ap)
{
@@ -168,7 +186,15 @@ void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
/* virtio-9p device */
-VMSTATE_VIRTIO_DEVICE(9p, 1, virtio_9p_load, virtio_vmstate_save);
+static const VMStateDescription vmstate_virtio_9p = {
+ .name = "virtio-9p",
+ .minimum_version_id = 1,
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
static Property virtio_9p_properties[] = {
DEFINE_PROP_STRING("mount_tag", V9fsVirtioState, state.fsconf.tag),
@@ -188,6 +214,7 @@ static void virtio_9p_class_init(ObjectClass *klass, void *data)
vdc->unrealize = virtio_9p_device_unrealize;
vdc->get_features = virtio_9p_get_features;
vdc->get_config = virtio_9p_get_config;
+ vdc->reset = virtio_9p_reset;
}
static const TypeInfo virtio_device_info = {
diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
index 7586b792d6..25c47c7cb6 100644
--- a/hw/9pfs/virtio-9p.h
+++ b/hw/9pfs/virtio-9p.h
@@ -15,7 +15,7 @@ typedef struct V9fsVirtioState
V9fsState state;
} V9fsVirtioState;
-extern void virtio_9p_push_and_notify(V9fsPDU *pdu);
+void virtio_9p_push_and_notify(V9fsPDU *pdu);
ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
const char *fmt, va_list ap);
diff --git a/hw/Makefile.objs b/hw/Makefile.objs
index 791e6731d2..3b629a7111 100644
--- a/hw/Makefile.objs
+++ b/hw/Makefile.objs
@@ -1,5 +1,6 @@
devices-dirs-$(call land, $(CONFIG_VIRTIO),$(call land,$(CONFIG_VIRTFS),$(CONFIG_PCI))) += 9pfs/
devices-dirs-$(CONFIG_ACPI) += acpi/
+devices-dirs-$(CONFIG_SOFTMMU) += adc/
devices-dirs-$(CONFIG_SOFTMMU) += audio/
devices-dirs-$(CONFIG_SOFTMMU) += block/
devices-dirs-$(CONFIG_SOFTMMU) += bt/
diff --git a/hw/acpi/Makefile.objs b/hw/acpi/Makefile.objs
index 4b7da6639f..489e63bb75 100644
--- a/hw/acpi/Makefile.objs
+++ b/hw/acpi/Makefile.objs
@@ -3,7 +3,7 @@ common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o memory_hotplug_acpi_table.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu.o
-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
+common-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
common-obj-$(CONFIG_ACPI) += acpi_interface.o
common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
common-obj-$(CONFIG_ACPI) += aml-build.o
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index db3e914fb4..b2a1e4033b 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -226,7 +226,7 @@ static void build_extop_package(GArray *package, uint8_t op)
build_prepend_byte(package, 0x5B); /* ExtOpPrefix */
}
-static void build_append_int_noprefix(GArray *table, uint64_t value, int size)
+void build_append_int_noprefix(GArray *table, uint64_t value, int size)
{
int i;
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index c13b65c2c9..5ac89fefaf 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -4,6 +4,7 @@
#include "qapi/error.h"
#include "qapi-event.h"
#include "trace.h"
+#include "sysemu/numa.h"
#define ACPI_CPU_HOTPLUG_REG_LEN 12
#define ACPI_CPU_SELECTOR_OFFSET_WR 0
@@ -503,6 +504,7 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
/* build Processor object for each processor */
for (i = 0; i < arch_ids->len; i++) {
+ int j;
Aml *dev;
Aml *uid = aml_int(i);
GArray *madt_buf = g_array_new(0, 1, 1);
@@ -529,6 +531,11 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
apic->flags = cpu_to_le32(1);
break;
}
+ case ACPI_APIC_LOCAL_X2APIC: {
+ AcpiMadtProcessorX2Apic *apic = (void *)madt_buf->data;
+ apic->flags = cpu_to_le32(1);
+ break;
+ }
default:
assert(0);
}
@@ -546,6 +553,16 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
aml_arg(1), aml_arg(2))
);
aml_append(dev, method);
+
+ /* Linux guests discard SRAT info for non-present CPUs
+ * as a result _PXM is required for all CPUs which might
+ * be hot-plugged. For simplicity, add it for all CPUs.
+ */
+ j = numa_get_node_for_cpu(i);
+ if (j < nb_numa_nodes) {
+ aml_append(dev, aml_name_decl("_PXM", aml_int(j)));
+ }
+
aml_append(cpus_dev, dev);
}
}
diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c
index e19d902063..f15a2402fc 100644
--- a/hw/acpi/cpu_hotplug.c
+++ b/hw/acpi/cpu_hotplug.c
@@ -15,6 +15,7 @@
#include "qapi/error.h"
#include "qom/cpu.h"
#include "hw/i386/pc.h"
+#include "qemu/error-report.h"
#define CPU_EJECT_METHOD "CPEJ"
#define CPU_MAT_METHOD "CPMA"
@@ -63,7 +64,8 @@ static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu,
cpu_id = k->get_arch_id(cpu);
if ((cpu_id / 8) >= ACPI_GPE_PROC_LEN) {
- error_setg(errp, "acpi: invalid cpu id: %" PRIi64, cpu_id);
+ object_property_set_bool(g->device, false, "cpu-hotplug-legacy",
+ &error_abort);
return;
}
@@ -85,13 +87,14 @@ void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner,
{
CPUState *cpu;
- CPU_FOREACH(cpu) {
- acpi_set_cpu_present_bit(gpe_cpu, cpu, &error_abort);
- }
memory_region_init_io(&gpe_cpu->io, owner, &AcpiCpuHotplug_ops,
gpe_cpu, "acpi-cpu-hotplug", ACPI_GPE_PROC_LEN);
memory_region_add_subregion(parent, base, &gpe_cpu->io);
gpe_cpu->device = owner;
+
+ CPU_FOREACH(cpu) {
+ acpi_set_cpu_present_bit(gpe_cpu, cpu, &error_abort);
+ }
}
void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu,
@@ -234,7 +237,11 @@ void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
/* The current AML generator can cover the APIC ID range [0..255],
* inclusive, for VCPU hotplug. */
QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256);
- g_assert(pcms->apic_id_limit <= ACPI_CPU_HOTPLUG_ID_LIMIT);
+ if (pcms->apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) {
+ error_report("max_cpus is too large. APIC ID of last CPU is %u",
+ pcms->apic_id_limit - 1);
+ exit(1);
+ }
/* create PCI0.PRES device and its _CRS to reserve CPU hotplug MMIO */
dev = aml_device("PCI0." stringify(CPU_HOTPLUG_RESOURCE_DEVICE));
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index a2ec217df8..fdd3c91dc0 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -491,8 +491,12 @@ void ich9_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
if (lpc->pm.acpi_memory_hotplug.is_enabled &&
object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
- acpi_memory_plug_cb(hotplug_dev, &lpc->pm.acpi_memory_hotplug,
- dev, errp);
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ nvdimm_acpi_plug_cb(hotplug_dev, dev);
+ } else {
+ acpi_memory_plug_cb(hotplug_dev, &lpc->pm.acpi_memory_hotplug,
+ dev, errp);
+ }
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
if (lpc->pm.cpu_hotplug_legacy) {
legacy_acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.gpe_cpu, dev, errp);
diff --git a/hw/acpi/ipmi.c b/hw/acpi/ipmi.c
index 7e74ce4460..651e2e94ea 100644
--- a/hw/acpi/ipmi.c
+++ b/hw/acpi/ipmi.c
@@ -99,6 +99,7 @@ void build_acpi_ipmi_devices(Aml *scope, BusState *bus)
ii = IPMI_INTERFACE(obj);
iic = IPMI_INTERFACE_GET_CLASS(obj);
+ memset(&info, 0, sizeof(info));
iic->get_fwinfo(ii, &info);
aml_append(scope, aml_ipmi_device(&info));
}
diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
index e486128aa1..8e7d6ec034 100644
--- a/hw/acpi/nvdimm.c
+++ b/hw/acpi/nvdimm.c
@@ -33,35 +33,30 @@
#include "hw/nvram/fw_cfg.h"
#include "hw/mem/nvdimm.h"
-static int nvdimm_plugged_device_list(Object *obj, void *opaque)
+static int nvdimm_device_list(Object *obj, void *opaque)
{
GSList **list = opaque;
if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
- DeviceState *dev = DEVICE(obj);
-
- if (dev->realized) { /* only realized NVDIMMs matter */
- *list = g_slist_append(*list, DEVICE(obj));
- }
+ *list = g_slist_append(*list, DEVICE(obj));
}
- object_child_foreach(obj, nvdimm_plugged_device_list, opaque);
+ object_child_foreach(obj, nvdimm_device_list, opaque);
return 0;
}
/*
- * inquire plugged NVDIMM devices and link them into the list which is
+ * inquire NVDIMM devices and link them into the list which is
* returned to the caller.
*
* Note: it is the caller's responsibility to free the list to avoid
* memory leak.
*/
-static GSList *nvdimm_get_plugged_device_list(void)
+static GSList *nvdimm_get_device_list(void)
{
GSList *list = NULL;
- object_child_foreach(qdev_get_machine(), nvdimm_plugged_device_list,
- &list);
+ object_child_foreach(qdev_get_machine(), nvdimm_device_list, &list);
return list;
}
@@ -219,7 +214,7 @@ static uint32_t nvdimm_slot_to_dcr_index(int slot)
static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle)
{
NVDIMMDevice *nvdimm = NULL;
- GSList *list, *device_list = nvdimm_get_plugged_device_list();
+ GSList *list, *device_list = nvdimm_get_device_list();
for (list = device_list; list; list = list->next) {
NVDIMMDevice *nvd = list->data;
@@ -289,8 +284,6 @@ static void
nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
{
NvdimmNfitMemDev *nfit_memdev;
- uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP,
- NULL);
uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP,
NULL);
int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
@@ -314,7 +307,8 @@ nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
/* The memory region on the device. */
nfit_memdev->region_len = cpu_to_le64(size);
- nfit_memdev->region_dpa = cpu_to_le64(addr);
+ /* The device address starts from 0. */
+ nfit_memdev->region_dpa = cpu_to_le64(0);
/* Only one interleave for PMEM. */
nfit_memdev->interleave_ways = cpu_to_le16(1);
@@ -349,8 +343,9 @@ static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
(DSM) in DSM Spec Rev1.*/);
}
-static GArray *nvdimm_build_device_structure(GSList *device_list)
+static GArray *nvdimm_build_device_structure(void)
{
+ GSList *device_list = nvdimm_get_device_list();
GArray *structures = g_array_new(false, true /* clear */, 1);
for (; device_list; device_list = device_list->next) {
@@ -368,14 +363,32 @@ static GArray *nvdimm_build_device_structure(GSList *device_list)
/* build NVDIMM Control Region Structure. */
nvdimm_build_structure_dcr(structures, dev);
}
+ g_slist_free(device_list);
return structures;
}
-static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
+static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+ fit_buf->fit = g_array_new(false, true /* clear */, 1);
+}
+
+static void nvdimm_build_fit_buffer(NvdimmFitBuffer *fit_buf)
+{
+ g_array_free(fit_buf->fit, true);
+ fit_buf->fit = nvdimm_build_device_structure();
+ fit_buf->dirty = true;
+}
+
+void nvdimm_plug(AcpiNVDIMMState *state)
+{
+ nvdimm_build_fit_buffer(&state->fit_buf);
+}
+
+static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets,
GArray *table_data, BIOSLinker *linker)
{
- GArray *structures = nvdimm_build_device_structure(device_list);
+ NvdimmFitBuffer *fit_buf = &state->fit_buf;
unsigned int header;
acpi_add_table(table_offsets, table_data);
@@ -384,14 +397,15 @@ static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
header = table_data->len;
acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
/* NVDIMM device structures. */
- g_array_append_vals(table_data, structures->data, structures->len);
+ g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
build_header(linker, table_data,
(void *)(table_data->data + header), "NFIT",
- sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL);
- g_array_free(structures, true);
+ sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL);
}
+#define NVDIMM_DSM_MEMORY_SIZE 4096
+
struct NvdimmDsmIn {
uint32_t handle;
uint32_t revision;
@@ -402,7 +416,7 @@ struct NvdimmDsmIn {
};
} QEMU_PACKED;
typedef struct NvdimmDsmIn NvdimmDsmIn;
-QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmIn) != 4096);
+QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmIn) != NVDIMM_DSM_MEMORY_SIZE);
struct NvdimmDsmOut {
/* the size of buffer filled by QEMU. */
@@ -410,7 +424,7 @@ struct NvdimmDsmOut {
uint8_t data[4092];
} QEMU_PACKED;
typedef struct NvdimmDsmOut NvdimmDsmOut;
-QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmOut) != 4096);
+QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmOut) != NVDIMM_DSM_MEMORY_SIZE);
struct NvdimmDsmFunc0Out {
/* the size of buffer filled by QEMU. */
@@ -438,7 +452,7 @@ struct NvdimmFuncGetLabelSizeOut {
uint32_t max_xfer;
} QEMU_PACKED;
typedef struct NvdimmFuncGetLabelSizeOut NvdimmFuncGetLabelSizeOut;
-QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > 4096);
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > NVDIMM_DSM_MEMORY_SIZE);
struct NvdimmFuncGetLabelDataIn {
uint32_t offset; /* the offset in the namespace label data area. */
@@ -446,7 +460,7 @@ struct NvdimmFuncGetLabelDataIn {
} QEMU_PACKED;
typedef struct NvdimmFuncGetLabelDataIn NvdimmFuncGetLabelDataIn;
QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataIn) +
- offsetof(NvdimmDsmIn, arg3) > 4096);
+ offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
struct NvdimmFuncGetLabelDataOut {
/* the size of buffer filled by QEMU. */
@@ -455,7 +469,7 @@ struct NvdimmFuncGetLabelDataOut {
uint8_t out_buf[0]; /* the data got via Get Namesapce Label function. */
} QEMU_PACKED;
typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut;
-QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > 4096);
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > NVDIMM_DSM_MEMORY_SIZE);
struct NvdimmFuncSetLabelDataIn {
uint32_t offset; /* the offset in the namespace label data area. */
@@ -464,7 +478,23 @@ struct NvdimmFuncSetLabelDataIn {
} QEMU_PACKED;
typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn;
QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) +
- offsetof(NvdimmDsmIn, arg3) > 4096);
+ offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
+
+struct NvdimmFuncReadFITIn {
+ uint32_t offset; /* the offset into FIT buffer. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncReadFITIn NvdimmFuncReadFITIn;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITIn) +
+ offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
+
+struct NvdimmFuncReadFITOut {
+ /* the size of buffer filled by QEMU. */
+ uint32_t len;
+ uint32_t func_ret_status; /* return status code. */
+ uint8_t fit[0]; /* the FIT data. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > NVDIMM_DSM_MEMORY_SIZE);
static void
nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr)
@@ -486,6 +516,79 @@ nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr)
cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
}
+#define NVDIMM_DSM_RET_STATUS_SUCCESS 0 /* Success */
+#define NVDIMM_DSM_RET_STATUS_UNSUPPORT 1 /* Not Supported */
+#define NVDIMM_DSM_RET_STATUS_NOMEMDEV 2 /* Non-Existing Memory Device */
+#define NVDIMM_DSM_RET_STATUS_INVALID 3 /* Invalid Input Parameters */
+#define NVDIMM_DSM_RET_STATUS_FIT_CHANGED 0x100 /* FIT Changed */
+
+#define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000
+
+/* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */
+static void nvdimm_dsm_func_read_fit(AcpiNVDIMMState *state, NvdimmDsmIn *in,
+ hwaddr dsm_mem_addr)
+{
+ NvdimmFitBuffer *fit_buf = &state->fit_buf;
+ NvdimmFuncReadFITIn *read_fit;
+ NvdimmFuncReadFITOut *read_fit_out;
+ GArray *fit;
+ uint32_t read_len = 0, func_ret_status;
+ int size;
+
+ read_fit = (NvdimmFuncReadFITIn *)in->arg3;
+ le32_to_cpus(&read_fit->offset);
+
+ fit = fit_buf->fit;
+
+ nvdimm_debug("Read FIT: offset %#x FIT size %#x Dirty %s.\n",
+ read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No");
+
+ if (read_fit->offset > fit->len) {
+ func_ret_status = NVDIMM_DSM_RET_STATUS_INVALID;
+ goto exit;
+ }
+
+ /* It is the first time to read FIT. */
+ if (!read_fit->offset) {
+ fit_buf->dirty = false;
+ } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */
+ func_ret_status = NVDIMM_DSM_RET_STATUS_FIT_CHANGED;
+ goto exit;
+ }
+
+ func_ret_status = NVDIMM_DSM_RET_STATUS_SUCCESS;
+ read_len = MIN(fit->len - read_fit->offset,
+ NVDIMM_DSM_MEMORY_SIZE - sizeof(NvdimmFuncReadFITOut));
+
+exit:
+ size = sizeof(NvdimmFuncReadFITOut) + read_len;
+ read_fit_out = g_malloc(size);
+
+ read_fit_out->len = cpu_to_le32(size);
+ read_fit_out->func_ret_status = cpu_to_le32(func_ret_status);
+ memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len);
+
+ cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size);
+
+ g_free(read_fit_out);
+}
+
+static void
+nvdimm_dsm_handle_reserved_root_method(AcpiNVDIMMState *state,
+ NvdimmDsmIn *in, hwaddr dsm_mem_addr)
+{
+ switch (in->function) {
+ case 0x0:
+ nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr);
+ return;
+ case 0x1 /* Read FIT */:
+ nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr);
+ return;
+ }
+
+ nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
+}
+
static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
{
/*
@@ -499,7 +602,7 @@ static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
}
/* No function except function 0 is supported yet. */
- nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+ nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
}
/*
@@ -509,7 +612,9 @@ static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
*/
static uint32_t nvdimm_get_max_xfer_label_size(void)
{
- uint32_t max_get_size, max_set_size, dsm_memory_size = 4096;
+ uint32_t max_get_size, max_set_size, dsm_memory_size;
+
+ dsm_memory_size = NVDIMM_DSM_MEMORY_SIZE;
/*
* the max data ACPI can read one time which is transferred by
@@ -545,7 +650,7 @@ static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr)
nvdimm_debug("label_size %#x, max_xfer %#x.\n", label_size, mxfer);
- label_size_out.func_ret_status = cpu_to_le32(0 /* Success */);
+ label_size_out.func_ret_status = cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS);
label_size_out.label_size = cpu_to_le32(label_size);
label_size_out.max_xfer = cpu_to_le32(mxfer);
@@ -556,7 +661,7 @@ static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr)
static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm,
uint32_t offset, uint32_t length)
{
- uint32_t ret = 3 /* Invalid Input Parameters */;
+ uint32_t ret = NVDIMM_DSM_RET_STATUS_INVALID;
if (offset + length < offset) {
nvdimm_debug("offset %#x + length %#x is overflow.\n", offset,
@@ -576,7 +681,7 @@ static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm,
return ret;
}
- return 0 /* Success */;
+ return NVDIMM_DSM_RET_STATUS_SUCCESS;
}
/*
@@ -600,17 +705,18 @@ static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset,
get_label_data->length);
- if (status != 0 /* Success */) {
+ if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) {
nvdimm_dsm_no_payload(status, dsm_mem_addr);
return;
}
size = sizeof(*get_label_data_out) + get_label_data->length;
- assert(size <= 4096);
+ assert(size <= NVDIMM_DSM_MEMORY_SIZE);
get_label_data_out = g_malloc(size);
get_label_data_out->len = cpu_to_le32(size);
- get_label_data_out->func_ret_status = cpu_to_le32(0 /* Success */);
+ get_label_data_out->func_ret_status =
+ cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS);
nvc->read_label_data(nvdimm, get_label_data_out->out_buf,
get_label_data->length, get_label_data->offset);
@@ -638,17 +744,17 @@ static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset,
set_label_data->length);
- if (status != 0 /* Success */) {
+ if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) {
nvdimm_dsm_no_payload(status, dsm_mem_addr);
return;
}
- assert(sizeof(*in) + sizeof(*set_label_data) + set_label_data->length <=
- 4096);
+ assert(offsetof(NvdimmDsmIn, arg3) + sizeof(*set_label_data) +
+ set_label_data->length <= NVDIMM_DSM_MEMORY_SIZE);
nvc->write_label_data(nvdimm, set_label_data->in_buf,
set_label_data->length, set_label_data->offset);
- nvdimm_dsm_no_payload(0 /* Success */, dsm_mem_addr);
+ nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_SUCCESS, dsm_mem_addr);
}
static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
@@ -672,7 +778,7 @@ static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
}
if (!nvdimm) {
- nvdimm_dsm_no_payload(2 /* Non-Existing Memory Device */,
+ nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_NOMEMDEV,
dsm_mem_addr);
return;
}
@@ -699,7 +805,7 @@ static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
break;
}
- nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+ nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
}
static uint64_t
@@ -712,6 +818,7 @@ nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
static void
nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
{
+ AcpiNVDIMMState *state = opaque;
NvdimmDsmIn *in;
hwaddr dsm_mem_addr = val;
@@ -735,7 +842,12 @@ nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) {
nvdimm_debug("Revision %#x is not supported, expect %#x.\n",
in->revision, 0x1);
- nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+ nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
+ goto exit;
+ }
+
+ if (in->handle == NVDIMM_QEMU_RSVD_HANDLE_ROOT) {
+ nvdimm_dsm_handle_reserved_root_method(state, in, dsm_mem_addr);
goto exit;
}
@@ -761,6 +873,13 @@ static const MemoryRegionOps nvdimm_dsm_ops = {
},
};
+void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev)
+{
+ if (dev->hotplugged) {
+ acpi_send_event(DEVICE(hotplug_dev), ACPI_NVDIMM_HOTPLUG_STATUS);
+ }
+}
+
void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
FWCfgState *fw_cfg, Object *owner)
{
@@ -772,23 +891,105 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
state->dsm_mem->len);
+
+ nvdimm_init_fit_buffer(&state->fit_buf);
}
-#define NVDIMM_COMMON_DSM "NCAL"
-#define NVDIMM_ACPI_MEM_ADDR "MEMA"
+#define NVDIMM_COMMON_DSM "NCAL"
+#define NVDIMM_ACPI_MEM_ADDR "MEMA"
+
+#define NVDIMM_DSM_MEMORY "NRAM"
+#define NVDIMM_DSM_IOPORT "NPIO"
+
+#define NVDIMM_DSM_NOTIFY "NTFI"
+#define NVDIMM_DSM_HANDLE "HDLE"
+#define NVDIMM_DSM_REVISION "REVS"
+#define NVDIMM_DSM_FUNCTION "FUNC"
+#define NVDIMM_DSM_ARG3 "FARG"
+
+#define NVDIMM_DSM_OUT_BUF_SIZE "RLEN"
+#define NVDIMM_DSM_OUT_BUF "ODAT"
+
+#define NVDIMM_DSM_RFIT_STATUS "RSTA"
+
+#define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62"
static void nvdimm_build_common_dsm(Aml *dev)
{
- Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *result_size;
+ Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *elsectx2;
Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid;
- Aml *pckg, *pckg_index, *pckg_buf;
+ Aml *pckg, *pckg_index, *pckg_buf, *field, *dsm_out_buf, *dsm_out_buf_size;
uint8_t byte_list[1];
method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED);
uuid = aml_arg(0);
function = aml_arg(2);
handle = aml_arg(4);
- dsm_mem = aml_name(NVDIMM_ACPI_MEM_ADDR);
+ dsm_mem = aml_local(6);
+ dsm_out_buf = aml_local(7);
+
+ aml_append(method, aml_store(aml_name(NVDIMM_ACPI_MEM_ADDR), dsm_mem));
+
+ /* map DSM memory and IO into ACPI namespace. */
+ aml_append(method, aml_operation_region(NVDIMM_DSM_IOPORT, AML_SYSTEM_IO,
+ aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
+ aml_append(method, aml_operation_region(NVDIMM_DSM_MEMORY,
+ AML_SYSTEM_MEMORY, dsm_mem, sizeof(NvdimmDsmIn)));
+
+ /*
+ * DSM notifier:
+ * NVDIMM_DSM_NOTIFY: write the address of DSM memory and notify QEMU to
+ * emulate the access.
+ *
+ * It is the IO port so that accessing them will cause VM-exit, the
+ * control will be transferred to QEMU.
+ */
+ field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK,
+ AML_PRESERVE);
+ aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY,
+ sizeof(uint32_t) * BITS_PER_BYTE));
+ aml_append(method, field);
+
+ /*
+ * DSM input:
+ * NVDIMM_DSM_HANDLE: store device's handle, it's zero if the _DSM call
+ * happens on NVDIMM Root Device.
+ * NVDIMM_DSM_REVISION: store the Arg1 of _DSM call.
+ * NVDIMM_DSM_FUNCTION: store the Arg2 of _DSM call.
+ * NVDIMM_DSM_ARG3: store the Arg3 of _DSM call which is a Package
+ * containing function-specific arguments.
+ *
+ * They are RAM mapping on host so that these accesses never cause
+ * VM-EXIT.
+ */
+ field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
+ AML_PRESERVE);
+ aml_append(field, aml_named_field(NVDIMM_DSM_HANDLE,
+ sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_REVISION,
+ sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_FUNCTION,
+ sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_ARG3,
+ (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE));
+ aml_append(method, field);
+
+ /*
+ * DSM output:
+ * NVDIMM_DSM_OUT_BUF_SIZE: the size of the buffer filled by QEMU.
+ * NVDIMM_DSM_OUT_BUF: the buffer QEMU uses to store the result.
+ *
+ * Since the page is reused by both input and out, the input data
+ * will be lost after storing new result into ODAT so we should fetch
+ * all the input data before writing the result.
+ */
+ field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
+ AML_PRESERVE);
+ aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF_SIZE,
+ sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
+ aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF,
+ (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE));
+ aml_append(method, field);
/*
* do not support any method if DSM memory address has not been
@@ -804,9 +1005,15 @@ static void nvdimm_build_common_dsm(Aml *dev)
/* UUID for NVDIMM Root Device */, expected_uuid));
aml_append(method, ifctx);
elsectx = aml_else();
- aml_append(elsectx, aml_store(
+ ifctx = aml_if(aml_equal(handle, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT)));
+ aml_append(ifctx, aml_store(aml_touuid(NVDIMM_QEMU_RSVD_UUID
+ /* UUID for QEMU internal use */), expected_uuid));
+ aml_append(elsectx, ifctx);
+ elsectx2 = aml_else();
+ aml_append(elsectx2, aml_store(
aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66")
/* UUID for NVDIMM Devices */, expected_uuid));
+ aml_append(elsectx, elsectx2);
aml_append(method, elsectx);
uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
@@ -823,7 +1030,7 @@ static void nvdimm_build_common_dsm(Aml *dev)
aml_append(unsupport, ifctx);
/* No function is supported yet. */
- byte_list[0] = 1 /* Not Supported */;
+ byte_list[0] = NVDIMM_DSM_RET_STATUS_UNSUPPORT;
aml_append(unsupport, aml_return(aml_buffer(1, byte_list)));
aml_append(method, unsupport);
@@ -832,9 +1039,9 @@ static void nvdimm_build_common_dsm(Aml *dev)
* it reserves 0 for root device and is the handle for NVDIMM devices.
* See the comments in nvdimm_slot_to_handle().
*/
- aml_append(method, aml_store(handle, aml_name("HDLE")));
- aml_append(method, aml_store(aml_arg(1), aml_name("REVS")));
- aml_append(method, aml_store(aml_arg(2), aml_name("FUNC")));
+ aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE)));
+ aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION)));
+ aml_append(method, aml_store(aml_arg(2), aml_name(NVDIMM_DSM_FUNCTION)));
/*
* The fourth parameter (Arg3) of _DSM is a package which contains
@@ -852,24 +1059,26 @@ static void nvdimm_build_common_dsm(Aml *dev)
pckg_buf = aml_local(3);
aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index));
aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf));
- aml_append(ifctx, aml_store(pckg_buf, aml_name("ARG3")));
+ aml_append(ifctx, aml_store(pckg_buf, aml_name(NVDIMM_DSM_ARG3)));
aml_append(method, ifctx);
/*
* tell QEMU about the real address of DSM memory, then QEMU
* gets the control and fills the result in DSM memory.
*/
- aml_append(method, aml_store(dsm_mem, aml_name("NTFI")));
-
- result_size = aml_local(1);
- aml_append(method, aml_store(aml_name("RLEN"), result_size));
- aml_append(method, aml_store(aml_shiftleft(result_size, aml_int(3)),
- result_size));
- aml_append(method, aml_create_field(aml_name("ODAT"), aml_int(0),
- result_size, "OBUF"));
+ aml_append(method, aml_store(dsm_mem, aml_name(NVDIMM_DSM_NOTIFY)));
+
+ dsm_out_buf_size = aml_local(1);
+ /* RLEN is not included in the payload returned to guest. */
+ aml_append(method, aml_subtract(aml_name(NVDIMM_DSM_OUT_BUF_SIZE),
+ aml_int(4), dsm_out_buf_size));
+ aml_append(method, aml_store(aml_shiftleft(dsm_out_buf_size, aml_int(3)),
+ dsm_out_buf_size));
+ aml_append(method, aml_create_field(aml_name(NVDIMM_DSM_OUT_BUF),
+ aml_int(0), dsm_out_buf_size, "OBUF"));
aml_append(method, aml_concatenate(aml_buffer(0, NULL), aml_name("OBUF"),
- aml_arg(6)));
- aml_append(method, aml_return(aml_arg(6)));
+ dsm_out_buf));
+ aml_append(method, aml_return(dsm_out_buf));
aml_append(dev, method);
}
@@ -884,12 +1093,107 @@ static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle)
aml_append(dev, method);
}
-static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
+static void nvdimm_build_fit(Aml *dev)
{
- for (; device_list; device_list = device_list->next) {
- DeviceState *dev = device_list->data;
- int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
- NULL);
+ Aml *method, *pkg, *buf, *buf_size, *offset, *call_result;
+ Aml *whilectx, *ifcond, *ifctx, *elsectx, *fit;
+
+ buf = aml_local(0);
+ buf_size = aml_local(1);
+ fit = aml_local(2);
+
+ aml_append(dev, aml_name_decl(NVDIMM_DSM_RFIT_STATUS, aml_int(0)));
+
+ /* build helper function, RFIT. */
+ method = aml_method("RFIT", 1, AML_SERIALIZED);
+ aml_append(method, aml_name_decl("OFST", aml_int(0)));
+
+ /* prepare input package. */
+ pkg = aml_package(1);
+ aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
+ aml_append(pkg, aml_name("OFST"));
+
+ /* call Read_FIT function. */
+ call_result = aml_call5(NVDIMM_COMMON_DSM,
+ aml_touuid(NVDIMM_QEMU_RSVD_UUID),
+ aml_int(1) /* Revision 1 */,
+ aml_int(0x1) /* Read FIT */,
+ pkg, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT));
+ aml_append(method, aml_store(call_result, buf));
+
+ /* handle _DSM result. */
+ aml_append(method, aml_create_dword_field(buf,
+ aml_int(0) /* offset at byte 0 */, "STAU"));
+
+ aml_append(method, aml_store(aml_name("STAU"),
+ aml_name(NVDIMM_DSM_RFIT_STATUS)));
+
+ /* if something is wrong during _DSM. */
+ ifcond = aml_equal(aml_int(NVDIMM_DSM_RET_STATUS_SUCCESS),
+ aml_name("STAU"));
+ ifctx = aml_if(aml_lnot(ifcond));
+ aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
+ aml_append(method, ifctx);
+
+ aml_append(method, aml_store(aml_sizeof(buf), buf_size));
+ aml_append(method, aml_subtract(buf_size,
+ aml_int(4) /* the size of "STAU" */,
+ buf_size));
+
+ /* if we read the end of fit. */
+ ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
+ aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
+ aml_append(method, ifctx);
+
+ aml_append(method, aml_create_field(buf,
+ aml_int(4 * BITS_PER_BYTE), /* offset at byte 4.*/
+ aml_shiftleft(buf_size, aml_int(3)), "BUFF"));
+ aml_append(method, aml_return(aml_name("BUFF")));
+ aml_append(dev, method);
+
+ /* build _FIT. */
+ method = aml_method("_FIT", 0, AML_SERIALIZED);
+ offset = aml_local(3);
+
+ aml_append(method, aml_store(aml_buffer(0, NULL), fit));
+ aml_append(method, aml_store(aml_int(0), offset));
+
+ whilectx = aml_while(aml_int(1));
+ aml_append(whilectx, aml_store(aml_call1("RFIT", offset), buf));
+ aml_append(whilectx, aml_store(aml_sizeof(buf), buf_size));
+
+ /*
+ * if fit buffer was changed during RFIT, read from the beginning
+ * again.
+ */
+ ifctx = aml_if(aml_equal(aml_name(NVDIMM_DSM_RFIT_STATUS),
+ aml_int(NVDIMM_DSM_RET_STATUS_FIT_CHANGED)));
+ aml_append(ifctx, aml_store(aml_buffer(0, NULL), fit));
+ aml_append(ifctx, aml_store(aml_int(0), offset));
+ aml_append(whilectx, ifctx);
+
+ elsectx = aml_else();
+
+ /* finish fit read if no data is read out. */
+ ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
+ aml_append(ifctx, aml_return(fit));
+ aml_append(elsectx, ifctx);
+
+ /* update the offset. */
+ aml_append(elsectx, aml_add(offset, buf_size, offset));
+ /* append the data we read out to the fit buffer. */
+ aml_append(elsectx, aml_concatenate(fit, buf, fit));
+ aml_append(whilectx, elsectx);
+ aml_append(method, whilectx);
+
+ aml_append(dev, method);
+}
+
+static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots)
+{
+ uint32_t slot;
+
+ for (slot = 0; slot < ram_slots; slot++) {
uint32_t handle = nvdimm_slot_to_handle(slot);
Aml *nvdimm_dev;
@@ -910,11 +1214,11 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
}
}
-static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
- GArray *table_data, BIOSLinker *linker,
- GArray *dsm_dma_arrea)
+static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
+ BIOSLinker *linker, GArray *dsm_dma_arrea,
+ uint32_t ram_slots)
{
- Aml *ssdt, *sb_scope, *dev, *field;
+ Aml *ssdt, *sb_scope, *dev;
int mem_addr_offset, nvdimm_ssdt;
acpi_add_table(table_offsets, table_data);
@@ -939,69 +1243,13 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
*/
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
- /* map DSM memory and IO into ACPI namespace. */
- aml_append(dev, aml_operation_region("NPIO", AML_SYSTEM_IO,
- aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
- aml_append(dev, aml_operation_region("NRAM", AML_SYSTEM_MEMORY,
- aml_name(NVDIMM_ACPI_MEM_ADDR), sizeof(NvdimmDsmIn)));
-
- /*
- * DSM notifier:
- * NTFI: write the address of DSM memory and notify QEMU to emulate
- * the access.
- *
- * It is the IO port so that accessing them will cause VM-exit, the
- * control will be transferred to QEMU.
- */
- field = aml_field("NPIO", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
- aml_append(field, aml_named_field("NTFI",
- sizeof(uint32_t) * BITS_PER_BYTE));
- aml_append(dev, field);
-
- /*
- * DSM input:
- * HDLE: store device's handle, it's zero if the _DSM call happens
- * on NVDIMM Root Device.
- * REVS: store the Arg1 of _DSM call.
- * FUNC: store the Arg2 of _DSM call.
- * ARG3: store the Arg3 of _DSM call.
- *
- * They are RAM mapping on host so that these accesses never cause
- * VM-EXIT.
- */
- field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
- aml_append(field, aml_named_field("HDLE",
- sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("REVS",
- sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("FUNC",
- sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("ARG3",
- (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE));
- aml_append(dev, field);
-
- /*
- * DSM output:
- * RLEN: the size of the buffer filled by QEMU.
- * ODAT: the buffer QEMU uses to store the result.
- *
- * Since the page is reused by both input and out, the input data
- * will be lost after storing new result into ODAT so we should fetch
- * all the input data before writing the result.
- */
- field = aml_field("NRAM", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
- aml_append(field, aml_named_field("RLEN",
- sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
- aml_append(field, aml_named_field("ODAT",
- (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE));
- aml_append(dev, field);
-
nvdimm_build_common_dsm(dev);
/* 0 is reserved for root device. */
nvdimm_build_device_dsm(dev, 0);
+ nvdimm_build_fit(dev);
- nvdimm_build_nvdimm_devices(device_list, dev);
+ nvdimm_build_nvdimm_devices(dev, ram_slots);
aml_append(sb_scope, dev);
aml_append(ssdt, sb_scope);
@@ -1026,17 +1274,25 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
}
void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
- BIOSLinker *linker, GArray *dsm_dma_arrea)
+ BIOSLinker *linker, AcpiNVDIMMState *state,
+ uint32_t ram_slots)
{
GSList *device_list;
+ /* no nvdimm device can be plugged. */
+ if (!ram_slots) {
+ return;
+ }
+
+ nvdimm_build_ssdt(table_offsets, table_data, linker, state->dsm_mem,
+ ram_slots);
+
+ device_list = nvdimm_get_device_list();
/* no NVDIMM device is plugged. */
- device_list = nvdimm_get_plugged_device_list();
if (!device_list) {
return;
}
- nvdimm_build_nfit(device_list, table_offsets, table_data, linker);
- nvdimm_build_ssdt(device_list, table_offsets, table_data, linker,
- dsm_dma_arrea);
+
+ nvdimm_build_nfit(state, table_offsets, table_data, linker);
g_slist_free(device_list);
}
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index afdb5a9a2c..8d035740ac 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -384,7 +384,12 @@ static void piix4_device_plug_cb(HotplugHandler *hotplug_dev,
if (s->acpi_memory_hotplug.is_enabled &&
object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
- acpi_memory_plug_cb(hotplug_dev, &s->acpi_memory_hotplug, dev, errp);
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ nvdimm_acpi_plug_cb(hotplug_dev, dev);
+ } else {
+ acpi_memory_plug_cb(hotplug_dev, &s->acpi_memory_hotplug,
+ dev, errp);
+ }
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_plug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
diff --git a/hw/adc/Makefile.objs b/hw/adc/Makefile.objs
new file mode 100644
index 0000000000..3f6dfdedae
--- /dev/null
+++ b/hw/adc/Makefile.objs
@@ -0,0 +1 @@
+obj-$(CONFIG_STM32F2XX_ADC) += stm32f2xx_adc.o
diff --git a/hw/adc/stm32f2xx_adc.c b/hw/adc/stm32f2xx_adc.c
new file mode 100644
index 0000000000..90fe9de299
--- /dev/null
+++ b/hw/adc/stm32f2xx_adc.c
@@ -0,0 +1,306 @@
+/*
+ * STM32F2XX ADC
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "hw/hw.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
+#include "hw/adc/stm32f2xx_adc.h"
+
+#ifndef STM_ADC_ERR_DEBUG
+#define STM_ADC_ERR_DEBUG 0
+#endif
+
+#define DB_PRINT_L(lvl, fmt, args...) do { \
+ if (STM_ADC_ERR_DEBUG >= lvl) { \
+ qemu_log("%s: " fmt, __func__, ## args); \
+ } \
+} while (0);
+
+#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args)
+
+static void stm32f2xx_adc_reset(DeviceState *dev)
+{
+ STM32F2XXADCState *s = STM32F2XX_ADC(dev);
+
+ s->adc_sr = 0x00000000;
+ s->adc_cr1 = 0x00000000;
+ s->adc_cr2 = 0x00000000;
+ s->adc_smpr1 = 0x00000000;
+ s->adc_smpr2 = 0x00000000;
+ s->adc_jofr[0] = 0x00000000;
+ s->adc_jofr[1] = 0x00000000;
+ s->adc_jofr[2] = 0x00000000;
+ s->adc_jofr[3] = 0x00000000;
+ s->adc_htr = 0x00000FFF;
+ s->adc_ltr = 0x00000000;
+ s->adc_sqr1 = 0x00000000;
+ s->adc_sqr2 = 0x00000000;
+ s->adc_sqr3 = 0x00000000;
+ s->adc_jsqr = 0x00000000;
+ s->adc_jdr[0] = 0x00000000;
+ s->adc_jdr[1] = 0x00000000;
+ s->adc_jdr[2] = 0x00000000;
+ s->adc_jdr[3] = 0x00000000;
+ s->adc_dr = 0x00000000;
+}
+
+static uint32_t stm32f2xx_adc_generate_value(STM32F2XXADCState *s)
+{
+ /* Attempts to fake some ADC values */
+ s->adc_dr = s->adc_dr + 7;
+
+ switch ((s->adc_cr1 & ADC_CR1_RES) >> 24) {
+ case 0:
+ /* 12-bit */
+ s->adc_dr &= 0xFFF;
+ break;
+ case 1:
+ /* 10-bit */
+ s->adc_dr &= 0x3FF;
+ break;
+ case 2:
+ /* 8-bit */
+ s->adc_dr &= 0xFF;
+ break;
+ default:
+ /* 6-bit */
+ s->adc_dr &= 0x3F;
+ }
+
+ if (s->adc_cr2 & ADC_CR2_ALIGN) {
+ return (s->adc_dr << 1) & 0xFFF0;
+ } else {
+ return s->adc_dr;
+ }
+}
+
+static uint64_t stm32f2xx_adc_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ STM32F2XXADCState *s = opaque;
+
+ DB_PRINT("Address: 0x%" HWADDR_PRIx "\n", addr);
+
+ if (addr >= ADC_COMMON_ADDRESS) {
+ qemu_log_mask(LOG_UNIMP,
+ "%s: ADC Common Register Unsupported\n", __func__);
+ }
+
+ switch (addr) {
+ case ADC_SR:
+ return s->adc_sr;
+ case ADC_CR1:
+ return s->adc_cr1;
+ case ADC_CR2:
+ return s->adc_cr2 & 0xFFFFFFF;
+ case ADC_SMPR1:
+ return s->adc_smpr1;
+ case ADC_SMPR2:
+ return s->adc_smpr2;
+ case ADC_JOFR1:
+ case ADC_JOFR2:
+ case ADC_JOFR3:
+ case ADC_JOFR4:
+ qemu_log_mask(LOG_UNIMP, "%s: " \
+ "Injection ADC is not implemented, the registers are " \
+ "included for compatibility\n", __func__);
+ return s->adc_jofr[(addr - ADC_JOFR1) / 4];
+ case ADC_HTR:
+ return s->adc_htr;
+ case ADC_LTR:
+ return s->adc_ltr;
+ case ADC_SQR1:
+ return s->adc_sqr1;
+ case ADC_SQR2:
+ return s->adc_sqr2;
+ case ADC_SQR3:
+ return s->adc_sqr3;
+ case ADC_JSQR:
+ qemu_log_mask(LOG_UNIMP, "%s: " \
+ "Injection ADC is not implemented, the registers are " \
+ "included for compatibility\n", __func__);
+ return s->adc_jsqr;
+ case ADC_JDR1:
+ case ADC_JDR2:
+ case ADC_JDR3:
+ case ADC_JDR4:
+ qemu_log_mask(LOG_UNIMP, "%s: " \
+ "Injection ADC is not implemented, the registers are " \
+ "included for compatibility\n", __func__);
+ return s->adc_jdr[(addr - ADC_JDR1) / 4] -
+ s->adc_jofr[(addr - ADC_JDR1) / 4];
+ case ADC_DR:
+ if ((s->adc_cr2 & ADC_CR2_ADON) && (s->adc_cr2 & ADC_CR2_SWSTART)) {
+ s->adc_cr2 ^= ADC_CR2_SWSTART;
+ return stm32f2xx_adc_generate_value(s);
+ } else {
+ return 0;
+ }
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr);
+ }
+
+ return 0;
+}
+
+static void stm32f2xx_adc_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ STM32F2XXADCState *s = opaque;
+ uint32_t value = (uint32_t) val64;
+
+ DB_PRINT("Address: 0x%" HWADDR_PRIx ", Value: 0x%x\n",
+ addr, value);
+
+ if (addr >= 0x100) {
+ qemu_log_mask(LOG_UNIMP,
+ "%s: ADC Common Register Unsupported\n", __func__);
+ }
+
+ switch (addr) {
+ case ADC_SR:
+ s->adc_sr &= (value & 0x3F);
+ break;
+ case ADC_CR1:
+ s->adc_cr1 = value;
+ break;
+ case ADC_CR2:
+ s->adc_cr2 = value;
+ break;
+ case ADC_SMPR1:
+ s->adc_smpr1 = value;
+ break;
+ case ADC_SMPR2:
+ s->adc_smpr2 = value;
+ break;
+ case ADC_JOFR1:
+ case ADC_JOFR2:
+ case ADC_JOFR3:
+ case ADC_JOFR4:
+ s->adc_jofr[(addr - ADC_JOFR1) / 4] = (value & 0xFFF);
+ qemu_log_mask(LOG_UNIMP, "%s: " \
+ "Injection ADC is not implemented, the registers are " \
+ "included for compatibility\n", __func__);
+ break;
+ case ADC_HTR:
+ s->adc_htr = value;
+ break;
+ case ADC_LTR:
+ s->adc_ltr = value;
+ break;
+ case ADC_SQR1:
+ s->adc_sqr1 = value;
+ break;
+ case ADC_SQR2:
+ s->adc_sqr2 = value;
+ break;
+ case ADC_SQR3:
+ s->adc_sqr3 = value;
+ break;
+ case ADC_JSQR:
+ s->adc_jsqr = value;
+ qemu_log_mask(LOG_UNIMP, "%s: " \
+ "Injection ADC is not implemented, the registers are " \
+ "included for compatibility\n", __func__);
+ break;
+ case ADC_JDR1:
+ case ADC_JDR2:
+ case ADC_JDR3:
+ case ADC_JDR4:
+ s->adc_jdr[(addr - ADC_JDR1) / 4] = value;
+ qemu_log_mask(LOG_UNIMP, "%s: " \
+ "Injection ADC is not implemented, the registers are " \
+ "included for compatibility\n", __func__);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr);
+ }
+}
+
+static const MemoryRegionOps stm32f2xx_adc_ops = {
+ .read = stm32f2xx_adc_read,
+ .write = stm32f2xx_adc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const VMStateDescription vmstate_stm32f2xx_adc = {
+ .name = TYPE_STM32F2XX_ADC,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(adc_sr, STM32F2XXADCState),
+ VMSTATE_UINT32(adc_cr1, STM32F2XXADCState),
+ VMSTATE_UINT32(adc_cr2, STM32F2XXADCState),
+ VMSTATE_UINT32(adc_smpr1, STM32F2XXADCState),
+ VMSTATE_UINT32(adc_smpr2, STM32F2XXADCState),
+ VMSTATE_UINT32_ARRAY(adc_jofr, STM32F2XXADCState, 4),
+ VMSTATE_UINT32(adc_htr, STM32F2XXADCState),
+ VMSTATE_UINT32(adc_ltr, STM32F2XXADCState),
+ VMSTATE_UINT32(adc_sqr1, STM32F2XXADCState),
+ VMSTATE_UINT32(adc_sqr2, STM32F2XXADCState),
+ VMSTATE_UINT32(adc_sqr3, STM32F2XXADCState),
+ VMSTATE_UINT32(adc_jsqr, STM32F2XXADCState),
+ VMSTATE_UINT32_ARRAY(adc_jdr, STM32F2XXADCState, 4),
+ VMSTATE_UINT32(adc_dr, STM32F2XXADCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void stm32f2xx_adc_init(Object *obj)
+{
+ STM32F2XXADCState *s = STM32F2XX_ADC(obj);
+
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
+
+ memory_region_init_io(&s->mmio, obj, &stm32f2xx_adc_ops, s,
+ TYPE_STM32F2XX_ADC, 0xFF);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+}
+
+static void stm32f2xx_adc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = stm32f2xx_adc_reset;
+ dc->vmsd = &vmstate_stm32f2xx_adc;
+}
+
+static const TypeInfo stm32f2xx_adc_info = {
+ .name = TYPE_STM32F2XX_ADC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(STM32F2XXADCState),
+ .instance_init = stm32f2xx_adc_init,
+ .class_init = stm32f2xx_adc_class_init,
+};
+
+static void stm32f2xx_adc_register_types(void)
+{
+ type_register_static(&stm32f2xx_adc_info);
+}
+
+type_init(stm32f2xx_adc_register_types)
diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c
index f1267b5441..d6431fd586 100644
--- a/hw/alpha/dp264.c
+++ b/hw/alpha/dp264.c
@@ -88,7 +88,7 @@ static void clipper_init(MachineState *machine)
pci_vga_init(pci_bus);
/* Serial code setup. */
- serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS);
+ serial_hds_isa_init(isa_bus, 0, MAX_SERIAL_PORTS);
/* Network setup. e1000 is good enough, failing Tulip support. */
for (i = 0; i < nb_nics; i++) {
diff --git a/hw/alpha/typhoon.c b/hw/alpha/typhoon.c
index 883db13f96..f50f5cf186 100644
--- a/hw/alpha/typhoon.c
+++ b/hw/alpha/typhoon.c
@@ -376,7 +376,7 @@ static void cchip_write(void *opaque, hwaddr addr,
break;
case 0x0240: /* DIM1 */
/* DIM: Device Interrupt Mask Register, CPU1. */
- s->cchip.dim[0] = val;
+ s->cchip.dim[1] = val;
cpu_irq_change(s->cchip.cpu[1], val & s->cchip.drir);
break;
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
index f36884a23a..65022d0df2 100644
--- a/hw/arm/Makefile.objs
+++ b/hw/arm/Makefile.objs
@@ -45,4 +45,4 @@ obj-$(CONFIG_XLNX_ZYNQMP) += xlnx-zynqmp.o xlnx-ep108.o
obj-$(CONFIG_FSL_IMX25) += fsl-imx25.o imx25_pdk.o
obj-$(CONFIG_FSL_IMX31) += fsl-imx31.o kzm.o
obj-$(CONFIG_FSL_IMX6) += fsl-imx6.o sabrelite.o
-obj-$(CONFIG_ASPEED_SOC) += ast2400.o palmetto-bmc.o
+obj-$(CONFIG_ASPEED_SOC) += aspeed_soc.o aspeed.o
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
new file mode 100644
index 0000000000..c7206fda6d
--- /dev/null
+++ b/hw/arm/aspeed.c
@@ -0,0 +1,197 @@
+/*
+ * OpenPOWER Palmetto BMC
+ *
+ * Andrew Jeffery <andrew@aj.id.au>
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/address-spaces.h"
+#include "hw/arm/arm.h"
+#include "hw/arm/aspeed_soc.h"
+#include "hw/boards.h"
+#include "qemu/log.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/blockdev.h"
+
+static struct arm_boot_info aspeed_board_binfo = {
+ .board_id = -1, /* device-tree-only board */
+ .nb_cpus = 1,
+};
+
+typedef struct AspeedBoardState {
+ AspeedSoCState soc;
+ MemoryRegion ram;
+} AspeedBoardState;
+
+typedef struct AspeedBoardConfig {
+ const char *soc_name;
+ uint32_t hw_strap1;
+} AspeedBoardConfig;
+
+enum {
+ PALMETTO_BMC,
+ AST2500_EVB,
+};
+
+#define PALMETTO_BMC_HW_STRAP1 ( \
+ SCU_AST2400_HW_STRAP_DRAM_SIZE(DRAM_SIZE_256MB) | \
+ SCU_AST2400_HW_STRAP_DRAM_CONFIG(2 /* DDR3 with CL=6, CWL=5 */) | \
+ SCU_AST2400_HW_STRAP_ACPI_DIS | \
+ SCU_AST2400_HW_STRAP_SET_CLK_SOURCE(AST2400_CLK_48M_IN) | \
+ SCU_HW_STRAP_VGA_CLASS_CODE | \
+ SCU_HW_STRAP_LPC_RESET_PIN | \
+ SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_M_S_EN) | \
+ SCU_AST2400_HW_STRAP_SET_CPU_AHB_RATIO(AST2400_CPU_AHB_RATIO_2_1) | \
+ SCU_HW_STRAP_SPI_WIDTH | \
+ SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \
+ SCU_AST2400_HW_STRAP_BOOT_MODE(AST2400_SPI_BOOT))
+
+#define AST2500_EVB_HW_STRAP1 (( \
+ AST2500_HW_STRAP1_DEFAULTS | \
+ SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \
+ SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \
+ SCU_AST2500_HW_STRAP_UART_DEBUG | \
+ SCU_AST2500_HW_STRAP_DDR4_ENABLE | \
+ SCU_HW_STRAP_MAC1_RGMII | \
+ SCU_HW_STRAP_MAC0_RGMII) & \
+ ~SCU_HW_STRAP_2ND_BOOT_WDT)
+
+static const AspeedBoardConfig aspeed_boards[] = {
+ [PALMETTO_BMC] = { "ast2400-a0", PALMETTO_BMC_HW_STRAP1 },
+ [AST2500_EVB] = { "ast2500-a1", AST2500_EVB_HW_STRAP1 },
+};
+
+static void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype,
+ Error **errp)
+{
+ int i ;
+
+ for (i = 0; i < s->num_cs; ++i) {
+ AspeedSMCFlash *fl = &s->flashes[i];
+ DriveInfo *dinfo = drive_get_next(IF_MTD);
+ qemu_irq cs_line;
+
+ /*
+ * FIXME: check that we are not using a flash module exceeding
+ * the controller segment size
+ */
+ fl->flash = ssi_create_slave_no_init(s->spi, flashtype);
+ if (dinfo) {
+ qdev_prop_set_drive(fl->flash, "drive", blk_by_legacy_dinfo(dinfo),
+ errp);
+ }
+ qdev_init_nofail(fl->flash);
+
+ cs_line = qdev_get_gpio_in_named(fl->flash, SSI_GPIO_CS, 0);
+ sysbus_connect_irq(SYS_BUS_DEVICE(s), i + 1, cs_line);
+ }
+}
+
+static void aspeed_board_init(MachineState *machine,
+ const AspeedBoardConfig *cfg)
+{
+ AspeedBoardState *bmc;
+ AspeedSoCClass *sc;
+
+ bmc = g_new0(AspeedBoardState, 1);
+ object_initialize(&bmc->soc, (sizeof(bmc->soc)), cfg->soc_name);
+ object_property_add_child(OBJECT(machine), "soc", OBJECT(&bmc->soc),
+ &error_abort);
+
+ sc = ASPEED_SOC_GET_CLASS(&bmc->soc);
+
+ object_property_set_int(OBJECT(&bmc->soc), ram_size, "ram-size",
+ &error_abort);
+ object_property_set_int(OBJECT(&bmc->soc), cfg->hw_strap1, "hw-strap1",
+ &error_abort);
+ object_property_set_bool(OBJECT(&bmc->soc), true, "realized",
+ &error_abort);
+
+ /*
+ * Allocate RAM after the memory controller has checked the size
+ * was valid. If not, a default value is used.
+ */
+ ram_size = object_property_get_int(OBJECT(&bmc->soc), "ram-size",
+ &error_abort);
+
+ memory_region_allocate_system_memory(&bmc->ram, NULL, "ram", ram_size);
+ memory_region_add_subregion(get_system_memory(), sc->info->sdram_base,
+ &bmc->ram);
+ object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram),
+ &error_abort);
+
+ aspeed_board_init_flashes(&bmc->soc.fmc, "n25q256a", &error_abort);
+ aspeed_board_init_flashes(&bmc->soc.spi[0], "mx25l25635e", &error_abort);
+
+ aspeed_board_binfo.kernel_filename = machine->kernel_filename;
+ aspeed_board_binfo.initrd_filename = machine->initrd_filename;
+ aspeed_board_binfo.kernel_cmdline = machine->kernel_cmdline;
+ aspeed_board_binfo.ram_size = ram_size;
+ aspeed_board_binfo.loader_start = sc->info->sdram_base;
+
+ arm_load_kernel(ARM_CPU(first_cpu), &aspeed_board_binfo);
+}
+
+static void palmetto_bmc_init(MachineState *machine)
+{
+ aspeed_board_init(machine, &aspeed_boards[PALMETTO_BMC]);
+}
+
+static void palmetto_bmc_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "OpenPOWER Palmetto BMC (ARM926EJ-S)";
+ mc->init = palmetto_bmc_init;
+ mc->max_cpus = 1;
+ mc->no_sdcard = 1;
+ mc->no_floppy = 1;
+ mc->no_cdrom = 1;
+ mc->no_parallel = 1;
+}
+
+static const TypeInfo palmetto_bmc_type = {
+ .name = MACHINE_TYPE_NAME("palmetto-bmc"),
+ .parent = TYPE_MACHINE,
+ .class_init = palmetto_bmc_class_init,
+};
+
+static void ast2500_evb_init(MachineState *machine)
+{
+ aspeed_board_init(machine, &aspeed_boards[AST2500_EVB]);
+}
+
+static void ast2500_evb_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "Aspeed AST2500 EVB (ARM1176)";
+ mc->init = ast2500_evb_init;
+ mc->max_cpus = 1;
+ mc->no_sdcard = 1;
+ mc->no_floppy = 1;
+ mc->no_cdrom = 1;
+ mc->no_parallel = 1;
+}
+
+static const TypeInfo ast2500_evb_type = {
+ .name = MACHINE_TYPE_NAME("ast2500-evb"),
+ .parent = TYPE_MACHINE,
+ .class_init = ast2500_evb_class_init,
+};
+
+static void aspeed_machine_init(void)
+{
+ type_register_static(&palmetto_bmc_type);
+ type_register_static(&ast2500_evb_type);
+}
+
+type_init(aspeed_machine_init)
diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c
new file mode 100644
index 0000000000..e14f5c217e
--- /dev/null
+++ b/hw/arm/aspeed_soc.c
@@ -0,0 +1,276 @@
+/*
+ * ASPEED SoC family
+ *
+ * Andrew Jeffery <andrew@aj.id.au>
+ * Jeremy Kerr <jk@ozlabs.org>
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/address-spaces.h"
+#include "hw/arm/aspeed_soc.h"
+#include "hw/char/serial.h"
+#include "qemu/log.h"
+#include "hw/i2c/aspeed_i2c.h"
+
+#define ASPEED_SOC_UART_5_BASE 0x00184000
+#define ASPEED_SOC_IOMEM_SIZE 0x00200000
+#define ASPEED_SOC_IOMEM_BASE 0x1E600000
+#define ASPEED_SOC_FMC_BASE 0x1E620000
+#define ASPEED_SOC_SPI_BASE 0x1E630000
+#define ASPEED_SOC_SPI2_BASE 0x1E631000
+#define ASPEED_SOC_VIC_BASE 0x1E6C0000
+#define ASPEED_SOC_SDMC_BASE 0x1E6E0000
+#define ASPEED_SOC_SCU_BASE 0x1E6E2000
+#define ASPEED_SOC_TIMER_BASE 0x1E782000
+#define ASPEED_SOC_I2C_BASE 0x1E78A000
+
+static const int uart_irqs[] = { 9, 32, 33, 34, 10 };
+static const int timer_irqs[] = { 16, 17, 18, 35, 36, 37, 38, 39, };
+
+#define AST2400_SDRAM_BASE 0x40000000
+#define AST2500_SDRAM_BASE 0x80000000
+
+static const hwaddr aspeed_soc_ast2400_spi_bases[] = { ASPEED_SOC_SPI_BASE };
+static const char *aspeed_soc_ast2400_typenames[] = { "aspeed.smc.spi" };
+
+static const hwaddr aspeed_soc_ast2500_spi_bases[] = { ASPEED_SOC_SPI_BASE,
+ ASPEED_SOC_SPI2_BASE};
+static const char *aspeed_soc_ast2500_typenames[] = {
+ "aspeed.smc.ast2500-spi1", "aspeed.smc.ast2500-spi2" };
+
+static const AspeedSoCInfo aspeed_socs[] = {
+ { "ast2400-a0", "arm926", AST2400_A0_SILICON_REV, AST2400_SDRAM_BASE,
+ 1, aspeed_soc_ast2400_spi_bases,
+ "aspeed.smc.fmc", aspeed_soc_ast2400_typenames },
+ { "ast2400", "arm926", AST2400_A0_SILICON_REV, AST2400_SDRAM_BASE,
+ 1, aspeed_soc_ast2400_spi_bases,
+ "aspeed.smc.fmc", aspeed_soc_ast2400_typenames },
+ { "ast2500-a1", "arm1176", AST2500_A1_SILICON_REV, AST2500_SDRAM_BASE,
+ 2, aspeed_soc_ast2500_spi_bases,
+ "aspeed.smc.ast2500-fmc", aspeed_soc_ast2500_typenames },
+};
+
+/*
+ * IO handlers: simply catch any reads/writes to IO addresses that aren't
+ * handled by a device mapping.
+ */
+
+static uint64_t aspeed_soc_io_read(void *p, hwaddr offset, unsigned size)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx " [%u]\n",
+ __func__, offset, size);
+ return 0;
+}
+
+static void aspeed_soc_io_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx " <- 0x%" PRIx64 " [%u]\n",
+ __func__, offset, value, size);
+}
+
+static const MemoryRegionOps aspeed_soc_io_ops = {
+ .read = aspeed_soc_io_read,
+ .write = aspeed_soc_io_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void aspeed_soc_init(Object *obj)
+{
+ AspeedSoCState *s = ASPEED_SOC(obj);
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ int i;
+
+ s->cpu = cpu_arm_init(sc->info->cpu_model);
+
+ object_initialize(&s->vic, sizeof(s->vic), TYPE_ASPEED_VIC);
+ object_property_add_child(obj, "vic", OBJECT(&s->vic), NULL);
+ qdev_set_parent_bus(DEVICE(&s->vic), sysbus_get_default());
+
+ object_initialize(&s->timerctrl, sizeof(s->timerctrl), TYPE_ASPEED_TIMER);
+ object_property_add_child(obj, "timerctrl", OBJECT(&s->timerctrl), NULL);
+ qdev_set_parent_bus(DEVICE(&s->timerctrl), sysbus_get_default());
+
+ object_initialize(&s->i2c, sizeof(s->i2c), TYPE_ASPEED_I2C);
+ object_property_add_child(obj, "i2c", OBJECT(&s->i2c), NULL);
+ qdev_set_parent_bus(DEVICE(&s->i2c), sysbus_get_default());
+
+ object_initialize(&s->scu, sizeof(s->scu), TYPE_ASPEED_SCU);
+ object_property_add_child(obj, "scu", OBJECT(&s->scu), NULL);
+ qdev_set_parent_bus(DEVICE(&s->scu), sysbus_get_default());
+ qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev",
+ sc->info->silicon_rev);
+ object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu),
+ "hw-strap1", &error_abort);
+ object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu),
+ "hw-strap2", &error_abort);
+
+ object_initialize(&s->fmc, sizeof(s->fmc), sc->info->fmc_typename);
+ object_property_add_child(obj, "fmc", OBJECT(&s->fmc), NULL);
+ qdev_set_parent_bus(DEVICE(&s->fmc), sysbus_get_default());
+
+ for (i = 0; i < sc->info->spis_num; i++) {
+ object_initialize(&s->spi[i], sizeof(s->spi[i]),
+ sc->info->spi_typename[i]);
+ object_property_add_child(obj, "spi", OBJECT(&s->spi[i]), NULL);
+ qdev_set_parent_bus(DEVICE(&s->spi[i]), sysbus_get_default());
+ }
+
+ object_initialize(&s->sdmc, sizeof(s->sdmc), TYPE_ASPEED_SDMC);
+ object_property_add_child(obj, "sdmc", OBJECT(&s->sdmc), NULL);
+ qdev_set_parent_bus(DEVICE(&s->sdmc), sysbus_get_default());
+ qdev_prop_set_uint32(DEVICE(&s->sdmc), "silicon-rev",
+ sc->info->silicon_rev);
+ object_property_add_alias(obj, "ram-size", OBJECT(&s->sdmc),
+ "ram-size", &error_abort);
+}
+
+static void aspeed_soc_realize(DeviceState *dev, Error **errp)
+{
+ int i;
+ AspeedSoCState *s = ASPEED_SOC(dev);
+ AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ Error *err = NULL, *local_err = NULL;
+
+ /* IO space */
+ memory_region_init_io(&s->iomem, NULL, &aspeed_soc_io_ops, NULL,
+ "aspeed_soc.io", ASPEED_SOC_IOMEM_SIZE);
+ memory_region_add_subregion_overlap(get_system_memory(),
+ ASPEED_SOC_IOMEM_BASE, &s->iomem, -1);
+
+ /* VIC */
+ object_property_set_bool(OBJECT(&s->vic), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, ASPEED_SOC_VIC_BASE);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0,
+ qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1,
+ qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ));
+
+ /* Timer */
+ object_property_set_bool(OBJECT(&s->timerctrl), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, ASPEED_SOC_TIMER_BASE);
+ for (i = 0; i < ARRAY_SIZE(timer_irqs); i++) {
+ qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->vic), timer_irqs[i]);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
+ }
+
+ /* SCU */
+ object_property_set_bool(OBJECT(&s->scu), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, ASPEED_SOC_SCU_BASE);
+
+ /* UART - attach an 8250 to the IO space as our UART5 */
+ if (serial_hds[0]) {
+ qemu_irq uart5 = qdev_get_gpio_in(DEVICE(&s->vic), uart_irqs[4]);
+ serial_mm_init(&s->iomem, ASPEED_SOC_UART_5_BASE, 2,
+ uart5, 38400, serial_hds[0], DEVICE_LITTLE_ENDIAN);
+ }
+
+ /* I2C */
+ object_property_set_bool(OBJECT(&s->i2c), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, ASPEED_SOC_I2C_BASE);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0,
+ qdev_get_gpio_in(DEVICE(&s->vic), 12));
+
+ /* FMC */
+ object_property_set_int(OBJECT(&s->fmc), 1, "num-cs", &err);
+ object_property_set_bool(OBJECT(&s->fmc), true, "realized", &local_err);
+ error_propagate(&err, local_err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, ASPEED_SOC_FMC_BASE);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1,
+ s->fmc.ctrl->flash_window_base);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0,
+ qdev_get_gpio_in(DEVICE(&s->vic), 19));
+
+ /* SPI */
+ for (i = 0; i < sc->info->spis_num; i++) {
+ object_property_set_int(OBJECT(&s->spi[i]), 1, "num-cs", &err);
+ object_property_set_bool(OBJECT(&s->spi[i]), true, "realized",
+ &local_err);
+ error_propagate(&err, local_err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, sc->info->spi_bases[i]);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1,
+ s->spi[i].ctrl->flash_window_base);
+ }
+
+ /* SDMC - SDRAM Memory Controller */
+ object_property_set_bool(OBJECT(&s->sdmc), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->sdmc), 0, ASPEED_SOC_SDMC_BASE);
+}
+
+static void aspeed_soc_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ AspeedSoCClass *sc = ASPEED_SOC_CLASS(oc);
+
+ sc->info = (AspeedSoCInfo *) data;
+ dc->realize = aspeed_soc_realize;
+
+ /*
+ * Reason: creates an ARM CPU, thus use after free(), see
+ * arm_cpu_class_init()
+ */
+ dc->cannot_destroy_with_object_finalize_yet = true;
+}
+
+static const TypeInfo aspeed_soc_type_info = {
+ .name = TYPE_ASPEED_SOC,
+ .parent = TYPE_DEVICE,
+ .instance_init = aspeed_soc_init,
+ .instance_size = sizeof(AspeedSoCState),
+ .class_size = sizeof(AspeedSoCClass),
+ .abstract = true,
+};
+
+static void aspeed_soc_register_types(void)
+{
+ int i;
+
+ type_register_static(&aspeed_soc_type_info);
+ for (i = 0; i < ARRAY_SIZE(aspeed_socs); ++i) {
+ TypeInfo ti = {
+ .name = aspeed_socs[i].name,
+ .parent = TYPE_ASPEED_SOC,
+ .class_init = aspeed_soc_class_init,
+ .class_data = (void *) &aspeed_socs[i],
+ };
+ type_register(&ti);
+ }
+}
+
+type_init(aspeed_soc_register_types)
diff --git a/hw/arm/ast2400.c b/hw/arm/ast2400.c
deleted file mode 100644
index 326fdb36ee..0000000000
--- a/hw/arm/ast2400.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * AST2400 SoC
- *
- * Andrew Jeffery <andrew@aj.id.au>
- * Jeremy Kerr <jk@ozlabs.org>
- *
- * Copyright 2016 IBM Corp.
- *
- * This code is licensed under the GPL version 2 or later. See
- * the COPYING file in the top-level directory.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "qemu-common.h"
-#include "cpu.h"
-#include "exec/address-spaces.h"
-#include "hw/arm/ast2400.h"
-#include "hw/char/serial.h"
-#include "qemu/log.h"
-#include "hw/i2c/aspeed_i2c.h"
-
-#define AST2400_UART_5_BASE 0x00184000
-#define AST2400_IOMEM_SIZE 0x00200000
-#define AST2400_IOMEM_BASE 0x1E600000
-#define AST2400_SMC_BASE AST2400_IOMEM_BASE /* Legacy SMC */
-#define AST2400_FMC_BASE 0X1E620000
-#define AST2400_SPI_BASE 0X1E630000
-#define AST2400_VIC_BASE 0x1E6C0000
-#define AST2400_SCU_BASE 0x1E6E2000
-#define AST2400_TIMER_BASE 0x1E782000
-#define AST2400_I2C_BASE 0x1E78A000
-
-#define AST2400_FMC_FLASH_BASE 0x20000000
-#define AST2400_SPI_FLASH_BASE 0x30000000
-
-static const int uart_irqs[] = { 9, 32, 33, 34, 10 };
-static const int timer_irqs[] = { 16, 17, 18, 35, 36, 37, 38, 39, };
-
-/*
- * IO handlers: simply catch any reads/writes to IO addresses that aren't
- * handled by a device mapping.
- */
-
-static uint64_t ast2400_io_read(void *p, hwaddr offset, unsigned size)
-{
- qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx " [%u]\n",
- __func__, offset, size);
- return 0;
-}
-
-static void ast2400_io_write(void *opaque, hwaddr offset, uint64_t value,
- unsigned size)
-{
- qemu_log_mask(LOG_UNIMP, "%s: 0x%" HWADDR_PRIx " <- 0x%" PRIx64 " [%u]\n",
- __func__, offset, value, size);
-}
-
-static const MemoryRegionOps ast2400_io_ops = {
- .read = ast2400_io_read,
- .write = ast2400_io_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
-};
-
-static void ast2400_init(Object *obj)
-{
- AST2400State *s = AST2400(obj);
-
- s->cpu = cpu_arm_init("arm926");
-
- object_initialize(&s->vic, sizeof(s->vic), TYPE_ASPEED_VIC);
- object_property_add_child(obj, "vic", OBJECT(&s->vic), NULL);
- qdev_set_parent_bus(DEVICE(&s->vic), sysbus_get_default());
-
- object_initialize(&s->timerctrl, sizeof(s->timerctrl), TYPE_ASPEED_TIMER);
- object_property_add_child(obj, "timerctrl", OBJECT(&s->timerctrl), NULL);
- qdev_set_parent_bus(DEVICE(&s->timerctrl), sysbus_get_default());
-
- object_initialize(&s->i2c, sizeof(s->i2c), TYPE_ASPEED_I2C);
- object_property_add_child(obj, "i2c", OBJECT(&s->i2c), NULL);
- qdev_set_parent_bus(DEVICE(&s->i2c), sysbus_get_default());
-
- object_initialize(&s->scu, sizeof(s->scu), TYPE_ASPEED_SCU);
- object_property_add_child(obj, "scu", OBJECT(&s->scu), NULL);
- qdev_set_parent_bus(DEVICE(&s->scu), sysbus_get_default());
- qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev",
- AST2400_A0_SILICON_REV);
- object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu),
- "hw-strap1", &error_abort);
- object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu),
- "hw-strap2", &error_abort);
-
- object_initialize(&s->smc, sizeof(s->smc), "aspeed.smc.fmc");
- object_property_add_child(obj, "smc", OBJECT(&s->smc), NULL);
- qdev_set_parent_bus(DEVICE(&s->smc), sysbus_get_default());
-
- object_initialize(&s->spi, sizeof(s->spi), "aspeed.smc.spi");
- object_property_add_child(obj, "spi", OBJECT(&s->spi), NULL);
- qdev_set_parent_bus(DEVICE(&s->spi), sysbus_get_default());
-}
-
-static void ast2400_realize(DeviceState *dev, Error **errp)
-{
- int i;
- AST2400State *s = AST2400(dev);
- Error *err = NULL, *local_err = NULL;
-
- /* IO space */
- memory_region_init_io(&s->iomem, NULL, &ast2400_io_ops, NULL,
- "ast2400.io", AST2400_IOMEM_SIZE);
- memory_region_add_subregion_overlap(get_system_memory(), AST2400_IOMEM_BASE,
- &s->iomem, -1);
-
- /* VIC */
- object_property_set_bool(OBJECT(&s->vic), true, "realized", &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, AST2400_VIC_BASE);
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0,
- qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ));
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1,
- qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ));
-
- /* Timer */
- object_property_set_bool(OBJECT(&s->timerctrl), true, "realized", &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->timerctrl), 0, AST2400_TIMER_BASE);
- for (i = 0; i < ARRAY_SIZE(timer_irqs); i++) {
- qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->vic), timer_irqs[i]);
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
- }
-
- /* SCU */
- object_property_set_bool(OBJECT(&s->scu), true, "realized", &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, AST2400_SCU_BASE);
-
- /* UART - attach an 8250 to the IO space as our UART5 */
- if (serial_hds[0]) {
- qemu_irq uart5 = qdev_get_gpio_in(DEVICE(&s->vic), uart_irqs[4]);
- serial_mm_init(&s->iomem, AST2400_UART_5_BASE, 2,
- uart5, 38400, serial_hds[0], DEVICE_LITTLE_ENDIAN);
- }
-
- /* I2C */
- object_property_set_bool(OBJECT(&s->i2c), true, "realized", &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, AST2400_I2C_BASE);
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0,
- qdev_get_gpio_in(DEVICE(&s->vic), 12));
-
- /* SMC */
- object_property_set_int(OBJECT(&s->smc), 1, "num-cs", &err);
- object_property_set_bool(OBJECT(&s->smc), true, "realized", &local_err);
- error_propagate(&err, local_err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->smc), 0, AST2400_FMC_BASE);
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->smc), 1, AST2400_FMC_FLASH_BASE);
- sysbus_connect_irq(SYS_BUS_DEVICE(&s->smc), 0,
- qdev_get_gpio_in(DEVICE(&s->vic), 19));
-
- /* SPI */
- object_property_set_int(OBJECT(&s->spi), 1, "num-cs", &err);
- object_property_set_bool(OBJECT(&s->spi), true, "realized", &local_err);
- error_propagate(&err, local_err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi), 0, AST2400_SPI_BASE);
- sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi), 1, AST2400_SPI_FLASH_BASE);
-}
-
-static void ast2400_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
-
- dc->realize = ast2400_realize;
-
- /*
- * Reason: creates an ARM CPU, thus use after free(), see
- * arm_cpu_class_init()
- */
- dc->cannot_destroy_with_object_finalize_yet = true;
-}
-
-static const TypeInfo ast2400_type_info = {
- .name = TYPE_AST2400,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(AST2400State),
- .instance_init = ast2400_init,
- .class_init = ast2400_class_init,
-};
-
-static void ast2400_register_types(void)
-{
- type_register_static(&ast2400_type_info);
-}
-
-type_init(ast2400_register_types)
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 1b913a43ca..ff621e4b6a 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -9,6 +9,7 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include <libfdt.h>
#include "hw/hw.h"
#include "hw/arm/arm.h"
#include "hw/arm/linux-boot-if.h"
@@ -486,6 +487,17 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
g_free(nodename);
}
} else {
+ Error *err = NULL;
+
+ rc = fdt_path_offset(fdt, "/memory");
+ if (rc < 0) {
+ qemu_fdt_add_subnode(fdt, "/memory");
+ }
+
+ if (!qemu_fdt_getprop(fdt, "/memory", "device_type", NULL, &err)) {
+ qemu_fdt_setprop_string(fdt, "/memory", "device_type", "memory");
+ }
+
rc = qemu_fdt_setprop_sized_cells(fdt, "/memory", "reg",
acells, binfo->loader_start,
scells, binfo->ram_size);
@@ -495,6 +507,11 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
}
}
+ rc = fdt_path_offset(fdt, "/chosen");
+ if (rc < 0) {
+ qemu_fdt_add_subnode(fdt, "/chosen");
+ }
+
if (binfo->kernel_cmdline && *binfo->kernel_cmdline) {
rc = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
binfo->kernel_cmdline);
@@ -773,6 +790,8 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
*/
assert(!(info->secure_board_setup && kvm_enabled()));
+ info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
+
/* Load the kernel. */
if (!info->kernel_filename || info->firmware_loaded) {
@@ -833,8 +852,6 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
elf_machine = EM_ARM;
}
- info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
-
if (!info->secondary_cpu_reset_hook) {
info->secondary_cpu_reset_hook = default_reset_secondary;
}
diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c
index fbd78ed01c..dd19ba3c99 100644
--- a/hw/arm/cubieboard.c
+++ b/hw/arm/cubieboard.c
@@ -74,6 +74,7 @@ static void cubieboard_init(MachineState *machine)
cubieboard_binfo.ram_size = machine->ram_size;
cubieboard_binfo.kernel_filename = machine->kernel_filename;
cubieboard_binfo.kernel_cmdline = machine->kernel_cmdline;
+ cubieboard_binfo.initrd_filename = machine->initrd_filename;
arm_load_kernel(&s->a10->cpu, &cubieboard_binfo);
}
diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c
index b4e358db65..7bb7be76b6 100644
--- a/hw/arm/fsl-imx25.c
+++ b/hw/arm/fsl-imx25.c
@@ -125,7 +125,7 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp)
if (!chr) {
char label[20];
snprintf(label, sizeof(label), "imx31.uart%d", i);
- chr = qemu_chr_new(label, "null", NULL);
+ chr = qemu_chr_new(label, "null");
}
qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr);
diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c
index fe204ace62..f23672b222 100644
--- a/hw/arm/fsl-imx31.c
+++ b/hw/arm/fsl-imx31.c
@@ -114,7 +114,7 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp)
if (!chr) {
char label[20];
snprintf(label, sizeof(label), "imx31.uart%d", i);
- chr = qemu_chr_new(label, "null", NULL);
+ chr = qemu_chr_new(label, "null");
}
qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr);
diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c
index 6a1bf263a5..e93532fb57 100644
--- a/hw/arm/fsl-imx6.c
+++ b/hw/arm/fsl-imx6.c
@@ -193,7 +193,7 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
if (!chr) {
char *label = g_strdup_printf("imx6.uart%d", i + 1);
- chr = qemu_chr_new(label, "null", NULL);
+ chr = qemu_chr_new(label, "null");
g_free(label);
serial_hds[i] = chr;
}
diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c
index 96dc150025..039812a3fd 100644
--- a/hw/arm/integratorcp.c
+++ b/hw/arm/integratorcp.c
@@ -252,6 +252,26 @@ static void integratorcm_init(Object *obj)
/* ??? What should the high bits of this value be? */
s->cm_auxosc = 0x0007feff;
s->cm_sdram = 0x00011122;
+ memcpy(integrator_spd + 73, "QEMU-MEMORY", 11);
+ s->cm_init = 0x00000112;
+ s->cm_refcnt_offset = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24,
+ 1000);
+ memory_region_init_ram(&s->flash, obj, "integrator.flash", 0x100000,
+ &error_fatal);
+ vmstate_register_ram_global(&s->flash);
+
+ memory_region_init_io(&s->iomem, obj, &integratorcm_ops, s,
+ "integratorcm", 0x00800000);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ integratorcm_do_remap(s);
+ /* ??? Save/restore. */
+}
+
+static void integratorcm_realize(DeviceState *d, Error **errp)
+{
+ IntegratorCMState *s = INTEGRATOR_CM(d);
+
if (s->memsz >= 256) {
integrator_spd[31] = 64;
s->cm_sdram |= 0x10;
@@ -267,20 +287,6 @@ static void integratorcm_init(Object *obj)
} else {
integrator_spd[31] = 2;
}
- memcpy(integrator_spd + 73, "QEMU-MEMORY", 11);
- s->cm_init = 0x00000112;
- s->cm_refcnt_offset = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24,
- 1000);
- memory_region_init_ram(&s->flash, obj, "integrator.flash", 0x100000,
- &error_fatal);
- vmstate_register_ram_global(&s->flash);
-
- memory_region_init_io(&s->iomem, obj, &integratorcm_ops, s,
- "integratorcm", 0x00800000);
- sysbus_init_mmio(dev, &s->iomem);
-
- integratorcm_do_remap(s);
- /* ??? Save/restore. */
}
/* Integrator/CP hardware emulation. */
@@ -633,6 +639,7 @@ static void core_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->props = core_properties;
+ dc->realize = integratorcm_realize;
}
static const TypeInfo core_info = {
diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c
index 454acc5d2b..f962236cf4 100644
--- a/hw/arm/mainstone.c
+++ b/hw/arm/mainstone.c
@@ -73,8 +73,10 @@ static const struct keymap map[0xE0] = {
[0x2f] = {3,3}, /* v */
[0x11] = {3,4}, /* w */
[0x2d] = {3,5}, /* x */
+ [0x34] = {4,0}, /* . */
[0x15] = {4,2}, /* y */
[0x2c] = {4,3}, /* z */
+ [0x35] = {4,4}, /* / */
[0xc7] = {5,0}, /* Home */
[0x2a] = {5,1}, /* shift */
/*
@@ -88,7 +90,8 @@ static const struct keymap map[0xE0] = {
* Matrix position {5,4} and other keys are missing here.
* TODO: Compare with Linux code and test real hardware.
*/
- [0x1c] = {5,5}, /* enter (TODO: might be wrong) */
+ [0x1c] = {5,4}, /* enter */
+ [0x0e] = {5,5}, /* backspace */
[0xc8] = {6,0}, /* up */
[0xd0] = {6,1}, /* down */
[0xcb] = {6,2}, /* left */
diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
index cc50ace13d..cbbca4e17a 100644
--- a/hw/arm/musicpal.c
+++ b/hw/arm/musicpal.c
@@ -384,18 +384,24 @@ static NetClientInfo net_mv88w8618_info = {
.cleanup = eth_cleanup,
};
-static int mv88w8618_eth_init(SysBusDevice *sbd)
+static void mv88w8618_eth_init(Object *obj)
{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
DeviceState *dev = DEVICE(sbd);
mv88w8618_eth_state *s = MV88W8618_ETH(dev);
sysbus_init_irq(sbd, &s->irq);
- s->nic = qemu_new_nic(&net_mv88w8618_info, &s->conf,
- object_get_typename(OBJECT(dev)), dev->id, s);
- memory_region_init_io(&s->iomem, OBJECT(s), &mv88w8618_eth_ops, s,
+ memory_region_init_io(&s->iomem, obj, &mv88w8618_eth_ops, s,
"mv88w8618-eth", MP_ETH_SIZE);
sysbus_init_mmio(sbd, &s->iomem);
- return 0;
+}
+
+static void mv88w8618_eth_realize(DeviceState *dev, Error **errp)
+{
+ mv88w8618_eth_state *s = MV88W8618_ETH(dev);
+
+ s->nic = qemu_new_nic(&net_mv88w8618_info, &s->conf,
+ object_get_typename(OBJECT(dev)), dev->id, s);
}
static const VMStateDescription mv88w8618_eth_vmsd = {
@@ -423,17 +429,17 @@ static Property mv88w8618_eth_properties[] = {
static void mv88w8618_eth_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = mv88w8618_eth_init;
dc->vmsd = &mv88w8618_eth_vmsd;
dc->props = mv88w8618_eth_properties;
+ dc->realize = mv88w8618_eth_realize;
}
static const TypeInfo mv88w8618_eth_info = {
.name = TYPE_MV88W8618_ETH,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(mv88w8618_eth_state),
+ .instance_init = mv88w8618_eth_init,
.class_init = mv88w8618_eth_class_init,
};
@@ -615,23 +621,26 @@ static const GraphicHwOps musicpal_gfx_ops = {
.gfx_update = lcd_refresh,
};
-static int musicpal_lcd_init(SysBusDevice *sbd)
+static void musicpal_lcd_realize(DeviceState *dev, Error **errp)
+{
+ musicpal_lcd_state *s = MUSICPAL_LCD(dev);
+ s->con = graphic_console_init(dev, 0, &musicpal_gfx_ops, s);
+ qemu_console_resize(s->con, 128 * 3, 64 * 3);
+}
+
+static void musicpal_lcd_init(Object *obj)
{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
DeviceState *dev = DEVICE(sbd);
musicpal_lcd_state *s = MUSICPAL_LCD(dev);
s->brightness = 7;
- memory_region_init_io(&s->iomem, OBJECT(s), &musicpal_lcd_ops, s,
+ memory_region_init_io(&s->iomem, obj, &musicpal_lcd_ops, s,
"musicpal-lcd", MP_LCD_SIZE);
sysbus_init_mmio(sbd, &s->iomem);
- s->con = graphic_console_init(dev, 0, &musicpal_gfx_ops, s);
- qemu_console_resize(s->con, 128*3, 64*3);
-
qdev_init_gpio_in(dev, musicpal_lcd_gpio_brightness_in, 3);
-
- return 0;
}
static const VMStateDescription musicpal_lcd_vmsd = {
@@ -652,16 +661,16 @@ static const VMStateDescription musicpal_lcd_vmsd = {
static void musicpal_lcd_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = musicpal_lcd_init;
dc->vmsd = &musicpal_lcd_vmsd;
+ dc->realize = musicpal_lcd_realize;
}
static const TypeInfo musicpal_lcd_info = {
.name = TYPE_MUSICPAL_LCD,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(musicpal_lcd_state),
+ .instance_init = musicpal_lcd_init,
.class_init = musicpal_lcd_class_init,
};
@@ -748,16 +757,16 @@ static const MemoryRegionOps mv88w8618_pic_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int mv88w8618_pic_init(SysBusDevice *dev)
+static void mv88w8618_pic_init(Object *obj)
{
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
mv88w8618_pic_state *s = MV88W8618_PIC(dev);
qdev_init_gpio_in(DEVICE(dev), mv88w8618_pic_set_irq, 32);
sysbus_init_irq(dev, &s->parent_irq);
- memory_region_init_io(&s->iomem, OBJECT(s), &mv88w8618_pic_ops, s,
+ memory_region_init_io(&s->iomem, obj, &mv88w8618_pic_ops, s,
"musicpal-pic", MP_PIC_SIZE);
sysbus_init_mmio(dev, &s->iomem);
- return 0;
}
static const VMStateDescription mv88w8618_pic_vmsd = {
@@ -774,9 +783,7 @@ static const VMStateDescription mv88w8618_pic_vmsd = {
static void mv88w8618_pic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = mv88w8618_pic_init;
dc->reset = mv88w8618_pic_reset;
dc->vmsd = &mv88w8618_pic_vmsd;
}
@@ -785,6 +792,7 @@ static const TypeInfo mv88w8618_pic_info = {
.name = TYPE_MV88W8618_PIC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(mv88w8618_pic_state),
+ .instance_init = mv88w8618_pic_init,
.class_init = mv88w8618_pic_class_init,
};
@@ -837,7 +845,7 @@ static void mv88w8618_timer_init(SysBusDevice *dev, mv88w8618_timer_state *s,
s->freq = freq;
bh = qemu_bh_new(mv88w8618_timer_tick, s);
- s->ptimer = ptimer_init(bh);
+ s->ptimer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
}
static uint64_t mv88w8618_pit_read(void *opaque, hwaddr offset,
@@ -913,8 +921,9 @@ static const MemoryRegionOps mv88w8618_pit_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int mv88w8618_pit_init(SysBusDevice *dev)
+static void mv88w8618_pit_init(Object *obj)
{
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
mv88w8618_pit_state *s = MV88W8618_PIT(dev);
int i;
@@ -924,10 +933,9 @@ static int mv88w8618_pit_init(SysBusDevice *dev)
mv88w8618_timer_init(dev, &s->timer[i], 1000000);
}
- memory_region_init_io(&s->iomem, OBJECT(s), &mv88w8618_pit_ops, s,
+ memory_region_init_io(&s->iomem, obj, &mv88w8618_pit_ops, s,
"musicpal-pit", MP_PIT_SIZE);
sysbus_init_mmio(dev, &s->iomem);
- return 0;
}
static const VMStateDescription mv88w8618_timer_vmsd = {
@@ -955,9 +963,7 @@ static const VMStateDescription mv88w8618_pit_vmsd = {
static void mv88w8618_pit_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = mv88w8618_pit_init;
dc->reset = mv88w8618_pit_reset;
dc->vmsd = &mv88w8618_pit_vmsd;
}
@@ -966,6 +972,7 @@ static const TypeInfo mv88w8618_pit_info = {
.name = TYPE_MV88W8618_PIT,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(mv88w8618_pit_state),
+ .instance_init = mv88w8618_pit_init,
.class_init = mv88w8618_pit_class_init,
};
@@ -1018,15 +1025,15 @@ static const MemoryRegionOps mv88w8618_flashcfg_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int mv88w8618_flashcfg_init(SysBusDevice *dev)
+static void mv88w8618_flashcfg_init(Object *obj)
{
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
mv88w8618_flashcfg_state *s = MV88W8618_FLASHCFG(dev);
s->cfgr0 = 0xfffe4285; /* Default as set by U-Boot for 8 MB flash */
- memory_region_init_io(&s->iomem, OBJECT(s), &mv88w8618_flashcfg_ops, s,
+ memory_region_init_io(&s->iomem, obj, &mv88w8618_flashcfg_ops, s,
"musicpal-flashcfg", MP_FLASHCFG_SIZE);
sysbus_init_mmio(dev, &s->iomem);
- return 0;
}
static const VMStateDescription mv88w8618_flashcfg_vmsd = {
@@ -1042,9 +1049,7 @@ static const VMStateDescription mv88w8618_flashcfg_vmsd = {
static void mv88w8618_flashcfg_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = mv88w8618_flashcfg_init;
dc->vmsd = &mv88w8618_flashcfg_vmsd;
}
@@ -1052,6 +1057,7 @@ static const TypeInfo mv88w8618_flashcfg_info = {
.name = TYPE_MV88W8618_FLASHCFG,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(mv88w8618_flashcfg_state),
+ .instance_init = mv88w8618_flashcfg_init,
.class_init = mv88w8618_flashcfg_class_init,
};
@@ -1350,22 +1356,21 @@ static void musicpal_gpio_reset(DeviceState *d)
s->isr = 0;
}
-static int musicpal_gpio_init(SysBusDevice *sbd)
+static void musicpal_gpio_init(Object *obj)
{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
DeviceState *dev = DEVICE(sbd);
musicpal_gpio_state *s = MUSICPAL_GPIO(dev);
sysbus_init_irq(sbd, &s->irq);
- memory_region_init_io(&s->iomem, OBJECT(s), &musicpal_gpio_ops, s,
+ memory_region_init_io(&s->iomem, obj, &musicpal_gpio_ops, s,
"musicpal-gpio", MP_GPIO_SIZE);
sysbus_init_mmio(sbd, &s->iomem);
qdev_init_gpio_out(dev, s->out, ARRAY_SIZE(s->out));
qdev_init_gpio_in(dev, musicpal_gpio_pin_event, 32);
-
- return 0;
}
static const VMStateDescription musicpal_gpio_vmsd = {
@@ -1386,9 +1391,7 @@ static const VMStateDescription musicpal_gpio_vmsd = {
static void musicpal_gpio_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = musicpal_gpio_init;
dc->reset = musicpal_gpio_reset;
dc->vmsd = &musicpal_gpio_vmsd;
}
@@ -1397,6 +1400,7 @@ static const TypeInfo musicpal_gpio_info = {
.name = TYPE_MUSICPAL_GPIO,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(musicpal_gpio_state),
+ .instance_init = musicpal_gpio_init,
.class_init = musicpal_gpio_class_init,
};
@@ -1516,12 +1520,13 @@ static void musicpal_key_event(void *opaque, int keycode)
s->kbd_extended = 0;
}
-static int musicpal_key_init(SysBusDevice *sbd)
+static void musicpal_key_init(Object *obj)
{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
DeviceState *dev = DEVICE(sbd);
musicpal_key_state *s = MUSICPAL_KEY(dev);
- memory_region_init(&s->iomem, OBJECT(s), "dummy", 0);
+ memory_region_init(&s->iomem, obj, "dummy", 0);
sysbus_init_mmio(sbd, &s->iomem);
s->kbd_extended = 0;
@@ -1530,8 +1535,6 @@ static int musicpal_key_init(SysBusDevice *sbd)
qdev_init_gpio_out(dev, s->out, ARRAY_SIZE(s->out));
qemu_add_kbd_event_handler(musicpal_key_event, s);
-
- return 0;
}
static const VMStateDescription musicpal_key_vmsd = {
@@ -1548,9 +1551,7 @@ static const VMStateDescription musicpal_key_vmsd = {
static void musicpal_key_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = musicpal_key_init;
dc->vmsd = &musicpal_key_vmsd;
}
@@ -1558,6 +1559,7 @@ static const TypeInfo musicpal_key_info = {
.name = TYPE_MUSICPAL_KEY,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(musicpal_key_state),
+ .instance_init = musicpal_key_init,
.class_init = musicpal_key_class_init,
};
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
index fea911e3e3..c86cf80514 100644
--- a/hw/arm/nseries.c
+++ b/hw/arm/nseries.c
@@ -786,8 +786,7 @@ static void n8x0_cbus_setup(struct n800_s *s)
static void n8x0_uart_setup(struct n800_s *s)
{
- CharDriverState *radio = uart_hci_init(
- qdev_get_gpio_in(s->mpu->gpio, N8X0_BT_HOST_WKUP_GPIO));
+ CharDriverState *radio = uart_hci_init();
qdev_connect_gpio_out(s->mpu->gpio, N8X0_BT_RESET_GPIO,
csrhci_pins_get(radio)[csrhci_pin_reset]);
diff --git a/hw/arm/omap2.c b/hw/arm/omap2.c
index 3a0d77714a..6f05c98d3e 100644
--- a/hw/arm/omap2.c
+++ b/hw/arm/omap2.c
@@ -621,7 +621,7 @@ struct omap_sti_s {
qemu_irq irq;
MemoryRegion iomem;
MemoryRegion iomem_fifo;
- CharDriverState *chr;
+ CharBackend chr;
uint32_t sysconfig;
uint32_t systest;
@@ -769,14 +769,17 @@ static void omap_sti_fifo_write(void *opaque, hwaddr addr,
if (ch == STI_TRACE_CONTROL_CHANNEL) {
/* Flush channel <i>value</i>. */
- qemu_chr_fe_write(s->chr, (const uint8_t *) "\r", 1);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, (const uint8_t *) "\r", 1);
} else if (ch == STI_TRACE_CONSOLE_CHANNEL || 1) {
if (value == 0xc0 || value == 0xc3) {
/* Open channel <i>ch</i>. */
- } else if (value == 0x00)
- qemu_chr_fe_write(s->chr, (const uint8_t *) "\n", 1);
- else
- qemu_chr_fe_write(s->chr, &byte, 1);
+ } else if (value == 0x00) {
+ qemu_chr_fe_write_all(&s->chr, (const uint8_t *) "\n", 1);
+ } else {
+ qemu_chr_fe_write_all(&s->chr, &byte, 1);
+ }
}
}
@@ -796,7 +799,8 @@ static struct omap_sti_s *omap_sti_init(struct omap_target_agent_s *ta,
s->irq = irq;
omap_sti_reset(s);
- s->chr = chr ?: qemu_chr_new("null", "null", NULL);
+ qemu_chr_fe_init(&s->chr, chr ?: qemu_chr_new("null", "null"),
+ &error_abort);
memory_region_init_io(&s->iomem, NULL, &omap_sti_ops, s, "omap.sti",
omap_l4_region_size(ta, 0));
diff --git a/hw/arm/palmetto-bmc.c b/hw/arm/palmetto-bmc.c
deleted file mode 100644
index 54e29a865d..0000000000
--- a/hw/arm/palmetto-bmc.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * OpenPOWER Palmetto BMC
- *
- * Andrew Jeffery <andrew@aj.id.au>
- *
- * Copyright 2016 IBM Corp.
- *
- * This code is licensed under the GPL version 2 or later. See
- * the COPYING file in the top-level directory.
- */
-
-#include "qemu/osdep.h"
-#include "qapi/error.h"
-#include "qemu-common.h"
-#include "cpu.h"
-#include "exec/address-spaces.h"
-#include "hw/arm/arm.h"
-#include "hw/arm/ast2400.h"
-#include "hw/boards.h"
-#include "qemu/log.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
-
-static struct arm_boot_info palmetto_bmc_binfo = {
- .loader_start = AST2400_SDRAM_BASE,
- .board_id = 0,
- .nb_cpus = 1,
-};
-
-typedef struct PalmettoBMCState {
- AST2400State soc;
- MemoryRegion ram;
-} PalmettoBMCState;
-
-static void palmetto_bmc_init_flashes(AspeedSMCState *s, const char *flashtype,
- Error **errp)
-{
- int i ;
-
- for (i = 0; i < s->num_cs; ++i) {
- AspeedSMCFlash *fl = &s->flashes[i];
- DriveInfo *dinfo = drive_get_next(IF_MTD);
- qemu_irq cs_line;
-
- /*
- * FIXME: check that we are not using a flash module exceeding
- * the controller segment size
- */
- fl->flash = ssi_create_slave_no_init(s->spi, flashtype);
- if (dinfo) {
- qdev_prop_set_drive(fl->flash, "drive", blk_by_legacy_dinfo(dinfo),
- errp);
- }
- qdev_init_nofail(fl->flash);
-
- cs_line = qdev_get_gpio_in_named(fl->flash, SSI_GPIO_CS, 0);
- sysbus_connect_irq(SYS_BUS_DEVICE(s), i + 1, cs_line);
- }
-}
-
-static void palmetto_bmc_init(MachineState *machine)
-{
- PalmettoBMCState *bmc;
-
- bmc = g_new0(PalmettoBMCState, 1);
- object_initialize(&bmc->soc, (sizeof(bmc->soc)), TYPE_AST2400);
- object_property_add_child(OBJECT(machine), "soc", OBJECT(&bmc->soc),
- &error_abort);
-
- memory_region_allocate_system_memory(&bmc->ram, NULL, "ram", ram_size);
- memory_region_add_subregion(get_system_memory(), AST2400_SDRAM_BASE,
- &bmc->ram);
- object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram),
- &error_abort);
- object_property_set_int(OBJECT(&bmc->soc), 0x120CE416, "hw-strap1",
- &error_abort);
- object_property_set_bool(OBJECT(&bmc->soc), true, "realized",
- &error_abort);
-
- palmetto_bmc_init_flashes(&bmc->soc.smc, "n25q256a", &error_abort);
- palmetto_bmc_init_flashes(&bmc->soc.spi, "mx25l25635e", &error_abort);
-
- palmetto_bmc_binfo.kernel_filename = machine->kernel_filename;
- palmetto_bmc_binfo.initrd_filename = machine->initrd_filename;
- palmetto_bmc_binfo.kernel_cmdline = machine->kernel_cmdline;
- palmetto_bmc_binfo.ram_size = ram_size;
- arm_load_kernel(ARM_CPU(first_cpu), &palmetto_bmc_binfo);
-}
-
-static void palmetto_bmc_machine_init(MachineClass *mc)
-{
- mc->desc = "OpenPOWER Palmetto BMC";
- mc->init = palmetto_bmc_init;
- mc->max_cpus = 1;
- mc->no_sdcard = 1;
- mc->no_floppy = 1;
- mc->no_cdrom = 1;
- mc->no_sdcard = 1;
- mc->no_parallel = 1;
-}
-
-DEFINE_MACHINE("palmetto-bmc", palmetto_bmc_machine_init);
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
index cb55704687..21ea1d6210 100644
--- a/hw/arm/pxa2xx.c
+++ b/hw/arm/pxa2xx.c
@@ -1505,7 +1505,7 @@ static void pxa2xx_i2c_initfn(Object *obj)
PXA2xxI2CState *s = PXA2XX_I2C(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- s->bus = i2c_init_bus(dev, "i2c");
+ s->bus = i2c_init_bus(dev, NULL);
memory_region_init_io(&s->iomem, obj, &pxa2xx_i2c_ops, s,
"pxa2xx-i2c", s->region_size);
@@ -1764,7 +1764,7 @@ struct PXA2xxFIrState {
qemu_irq rx_dma;
qemu_irq tx_dma;
uint32_t enable;
- CharDriverState *chr;
+ CharBackend chr;
uint8_t control[3];
uint8_t status[2];
@@ -1898,12 +1898,16 @@ static void pxa2xx_fir_write(void *opaque, hwaddr addr,
pxa2xx_fir_update(s);
break;
case ICDR:
- if (s->control[2] & (1 << 2)) /* TXP */
+ if (s->control[2] & (1 << 2)) { /* TXP */
ch = value;
- else
+ } else {
ch = ~value;
- if (s->chr && s->enable && (s->control[0] & (1 << 3))) /* TXE */
- qemu_chr_fe_write(s->chr, &ch, 1);
+ }
+ if (s->enable && (s->control[0] & (1 << 3))) { /* TXE */
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
+ }
break;
case ICSR0:
s->status[0] &= ~(value & 0x66);
@@ -1971,11 +1975,8 @@ static void pxa2xx_fir_realize(DeviceState *dev, Error **errp)
{
PXA2xxFIrState *s = PXA2XX_FIR(dev);
- if (s->chr) {
- qemu_chr_fe_claim_no_fail(s->chr);
- qemu_chr_add_handlers(s->chr, pxa2xx_fir_is_empty,
- pxa2xx_fir_rx, pxa2xx_fir_event, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr, pxa2xx_fir_is_empty,
+ pxa2xx_fir_rx, pxa2xx_fir_event, s, NULL, true);
}
static bool pxa2xx_fir_vmstate_validate(void *opaque, int version_id)
@@ -2266,7 +2267,9 @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size)
qdev_get_gpio_in(s->pic, PXA2XX_PIC_LCD));
s->cm_base = 0x41300000;
- s->cm_regs[CCCR >> 2] = 0x02000210; /* 416.0 MHz */
+ s->cm_regs[CCCR >> 2] = 0x00000121; /* from datasheet */
+ s->cm_regs[CKEN >> 2] = 0x00017def; /* from datasheet */
+
s->clkcfg = 0x00000009; /* Turbo mode active */
memory_region_init_io(&s->cm_iomem, NULL, &pxa2xx_cm_ops, s, "pxa2xx-cm", 0x1000);
memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem);
diff --git a/hw/arm/pxa2xx_gpio.c b/hw/arm/pxa2xx_gpio.c
index 576a8eb91f..521dbad039 100644
--- a/hw/arm/pxa2xx_gpio.c
+++ b/hw/arm/pxa2xx_gpio.c
@@ -280,23 +280,28 @@ DeviceState *pxa2xx_gpio_init(hwaddr base,
return dev;
}
-static int pxa2xx_gpio_initfn(SysBusDevice *sbd)
+static void pxa2xx_gpio_initfn(Object *obj)
{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
DeviceState *dev = DEVICE(sbd);
PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev);
- s->cpu = ARM_CPU(qemu_get_cpu(s->ncpu));
-
- qdev_init_gpio_in(dev, pxa2xx_gpio_set, s->lines);
- qdev_init_gpio_out(dev, s->handler, s->lines);
-
- memory_region_init_io(&s->iomem, OBJECT(s), &pxa_gpio_ops, s, "pxa2xx-gpio", 0x1000);
+ memory_region_init_io(&s->iomem, obj, &pxa_gpio_ops,
+ s, "pxa2xx-gpio", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->irq0);
sysbus_init_irq(sbd, &s->irq1);
sysbus_init_irq(sbd, &s->irqX);
+}
- return 0;
+static void pxa2xx_gpio_realize(DeviceState *dev, Error **errp)
+{
+ PXA2xxGPIOInfo *s = PXA2XX_GPIO(dev);
+
+ s->cpu = ARM_CPU(qemu_get_cpu(s->ncpu));
+
+ qdev_init_gpio_in(dev, pxa2xx_gpio_set, s->lines);
+ qdev_init_gpio_out(dev, s->handler, s->lines);
}
/*
@@ -336,18 +341,18 @@ static Property pxa2xx_gpio_properties[] = {
static void pxa2xx_gpio_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = pxa2xx_gpio_initfn;
dc->desc = "PXA2xx GPIO controller";
dc->props = pxa2xx_gpio_properties;
dc->vmsd = &vmstate_pxa2xx_gpio_regs;
+ dc->realize = pxa2xx_gpio_realize;
}
static const TypeInfo pxa2xx_gpio_info = {
.name = TYPE_PXA2XX_GPIO,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PXA2xxGPIOInfo),
+ .instance_init = pxa2xx_gpio_initfn,
.class_init = pxa2xx_gpio_class_init,
};
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
index 41cc2eeeb1..949a15ae64 100644
--- a/hw/arm/spitz.c
+++ b/hw/arm/spitz.c
@@ -29,6 +29,7 @@
#include "sysemu/block-backend.h"
#include "hw/sysbus.h"
#include "exec/address-spaces.h"
+#include "sysemu/sysemu.h"
#undef REG_FMT
#define REG_FMT "0x%02lx"
@@ -844,9 +845,18 @@ static void spitz_lcd_hsync_handler(void *opaque, int line, int level)
spitz_hsync ^= 1;
}
+static void spitz_reset(void *opaque, int line, int level)
+{
+ if (level) {
+ qemu_system_reset_request();
+ }
+}
+
static void spitz_gpio_setup(PXA2xxState *cpu, int slots)
{
qemu_irq lcd_hsync;
+ qemu_irq reset;
+
/*
* Bad hack: We toggle the LCD hsync GPIO on every GPIO status
* read to satisfy broken guests that poll-wait for hsync.
@@ -867,7 +877,8 @@ static void spitz_gpio_setup(PXA2xxState *cpu, int slots)
qemu_irq_raise(qdev_get_gpio_in(cpu->gpio, SPITZ_GPIO_BAT_COVER));
/* Handle reset */
- qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_ON_RESET, cpu->reset);
+ reset = qemu_allocate_irq(spitz_reset, cpu, 0);
+ qdev_connect_gpio_out(cpu->gpio, SPITZ_GPIO_ON_RESET, reset);
/* PCMCIA signals: card's IRQ and Card-Detect */
if (slots >= 1)
diff --git a/hw/arm/stm32f205_soc.c b/hw/arm/stm32f205_soc.c
index de26b8caff..38425bda6c 100644
--- a/hw/arm/stm32f205_soc.c
+++ b/hw/arm/stm32f205_soc.c
@@ -34,9 +34,15 @@ static const uint32_t timer_addr[STM_NUM_TIMERS] = { 0x40000000, 0x40000400,
0x40000800, 0x40000C00 };
static const uint32_t usart_addr[STM_NUM_USARTS] = { 0x40011000, 0x40004400,
0x40004800, 0x40004C00, 0x40005000, 0x40011400 };
+static const uint32_t adc_addr[STM_NUM_ADCS] = { 0x40012000, 0x40012100,
+ 0x40012200 };
+static const uint32_t spi_addr[STM_NUM_SPIS] = { 0x40013000, 0x40003800,
+ 0x40003C00 };
static const int timer_irq[STM_NUM_TIMERS] = {28, 29, 30, 50};
static const int usart_irq[STM_NUM_USARTS] = {37, 38, 39, 52, 53, 71};
+#define ADC_IRQ 18
+static const int spi_irq[STM_NUM_SPIS] = {35, 36, 51};
static void stm32f205_soc_initfn(Object *obj)
{
@@ -57,13 +63,27 @@ static void stm32f205_soc_initfn(Object *obj)
TYPE_STM32F2XX_TIMER);
qdev_set_parent_bus(DEVICE(&s->timer[i]), sysbus_get_default());
}
+
+ s->adc_irqs = OR_IRQ(object_new(TYPE_OR_IRQ));
+
+ for (i = 0; i < STM_NUM_ADCS; i++) {
+ object_initialize(&s->adc[i], sizeof(s->adc[i]),
+ TYPE_STM32F2XX_ADC);
+ qdev_set_parent_bus(DEVICE(&s->adc[i]), sysbus_get_default());
+ }
+
+ for (i = 0; i < STM_NUM_SPIS; i++) {
+ object_initialize(&s->spi[i], sizeof(s->spi[i]),
+ TYPE_STM32F2XX_SPI);
+ qdev_set_parent_bus(DEVICE(&s->spi[i]), sysbus_get_default());
+ }
}
static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp)
{
STM32F205State *s = STM32F205_SOC(dev_soc);
- DeviceState *syscfgdev, *usartdev, *timerdev, *nvic;
- SysBusDevice *syscfgbusdev, *usartbusdev, *timerbusdev;
+ DeviceState *dev, *nvic;
+ SysBusDevice *busdev;
Error *err = NULL;
int i;
@@ -94,44 +114,80 @@ static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp)
s->kernel_filename, s->cpu_model);
/* System configuration controller */
- syscfgdev = DEVICE(&s->syscfg);
+ dev = DEVICE(&s->syscfg);
object_property_set_bool(OBJECT(&s->syscfg), true, "realized", &err);
if (err != NULL) {
error_propagate(errp, err);
return;
}
- syscfgbusdev = SYS_BUS_DEVICE(syscfgdev);
- sysbus_mmio_map(syscfgbusdev, 0, 0x40013800);
- sysbus_connect_irq(syscfgbusdev, 0, qdev_get_gpio_in(nvic, 71));
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(busdev, 0, 0x40013800);
+ sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(nvic, 71));
/* Attach UART (uses USART registers) and USART controllers */
for (i = 0; i < STM_NUM_USARTS; i++) {
- usartdev = DEVICE(&(s->usart[i]));
- qdev_prop_set_chr(usartdev, "chardev", i < MAX_SERIAL_PORTS ? serial_hds[i] : NULL);
+ dev = DEVICE(&(s->usart[i]));
+ qdev_prop_set_chr(dev, "chardev",
+ i < MAX_SERIAL_PORTS ? serial_hds[i] : NULL);
object_property_set_bool(OBJECT(&s->usart[i]), true, "realized", &err);
if (err != NULL) {
error_propagate(errp, err);
return;
}
- usartbusdev = SYS_BUS_DEVICE(usartdev);
- sysbus_mmio_map(usartbusdev, 0, usart_addr[i]);
- sysbus_connect_irq(usartbusdev, 0,
- qdev_get_gpio_in(nvic, usart_irq[i]));
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(busdev, 0, usart_addr[i]);
+ sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(nvic, usart_irq[i]));
}
/* Timer 2 to 5 */
for (i = 0; i < STM_NUM_TIMERS; i++) {
- timerdev = DEVICE(&(s->timer[i]));
- qdev_prop_set_uint64(timerdev, "clock-frequency", 1000000000);
+ dev = DEVICE(&(s->timer[i]));
+ qdev_prop_set_uint64(dev, "clock-frequency", 1000000000);
object_property_set_bool(OBJECT(&s->timer[i]), true, "realized", &err);
if (err != NULL) {
error_propagate(errp, err);
return;
}
- timerbusdev = SYS_BUS_DEVICE(timerdev);
- sysbus_mmio_map(timerbusdev, 0, timer_addr[i]);
- sysbus_connect_irq(timerbusdev, 0,
- qdev_get_gpio_in(nvic, timer_irq[i]));
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(busdev, 0, timer_addr[i]);
+ sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(nvic, timer_irq[i]));
+ }
+
+ /* ADC 1 to 3 */
+ object_property_set_int(OBJECT(s->adc_irqs), STM_NUM_ADCS,
+ "num-lines", &err);
+ object_property_set_bool(OBJECT(s->adc_irqs), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ qdev_connect_gpio_out(DEVICE(s->adc_irqs), 0,
+ qdev_get_gpio_in(nvic, ADC_IRQ));
+
+ for (i = 0; i < STM_NUM_ADCS; i++) {
+ dev = DEVICE(&(s->adc[i]));
+ object_property_set_bool(OBJECT(&s->adc[i]), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(busdev, 0, adc_addr[i]);
+ sysbus_connect_irq(busdev, 0,
+ qdev_get_gpio_in(DEVICE(s->adc_irqs), i));
+ }
+
+ /* SPI 1 and 2 */
+ for (i = 0; i < STM_NUM_SPIS; i++) {
+ dev = DEVICE(&(s->spi[i]));
+ object_property_set_bool(OBJECT(&s->spi[i]), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(busdev, 0, spi_addr[i]);
+ sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(nvic, spi_irq[i]));
}
}
diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c
index f1b2c6c966..3311cc38a4 100644
--- a/hw/arm/strongarm.c
+++ b/hw/arm/strongarm.c
@@ -912,7 +912,7 @@ typedef struct StrongARMUARTState {
SysBusDevice parent_obj;
MemoryRegion iomem;
- CharDriverState *chr;
+ CharBackend chr;
qemu_irq irq;
uint8_t utcr0;
@@ -1020,9 +1020,7 @@ static void strongarm_uart_update_parameters(StrongARMUARTState *s)
ssp.data_bits = data_bits;
ssp.stop_bits = stop_bits;
s->char_transmit_time = (NANOSECONDS_PER_SECOND / speed) * frame_size;
- if (s->chr) {
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
- }
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
DPRINTF(stderr, "%s speed=%d parity=%c data=%d stop=%d\n", s->chr->label,
speed, parity, data_bits, stop_bits);
@@ -1107,8 +1105,10 @@ static void strongarm_uart_tx(void *opaque)
if (s->utcr3 & UTCR3_LBM) /* loopback */ {
strongarm_uart_receive(s, &s->tx_fifo[s->tx_start], 1);
- } else if (s->chr) {
- qemu_chr_fe_write(s->chr, &s->tx_fifo[s->tx_start], 1);
+ } else if (qemu_chr_fe_get_driver(&s->chr)) {
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &s->tx_fifo[s->tx_start], 1);
}
s->tx_start = (s->tx_start + 1) % 8;
@@ -1236,14 +1236,17 @@ static void strongarm_uart_init(Object *obj)
s->rx_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, strongarm_uart_rx_to, s);
s->tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, strongarm_uart_tx, s);
+}
- if (s->chr) {
- qemu_chr_add_handlers(s->chr,
- strongarm_uart_can_receive,
- strongarm_uart_receive,
- strongarm_uart_event,
- s);
- }
+static void strongarm_uart_realize(DeviceState *dev, Error **errp)
+{
+ StrongARMUARTState *s = STRONGARM_UART(dev);
+
+ qemu_chr_fe_set_handlers(&s->chr,
+ strongarm_uart_can_receive,
+ strongarm_uart_receive,
+ strongarm_uart_event,
+ s, NULL, true);
}
static void strongarm_uart_reset(DeviceState *dev)
@@ -1318,6 +1321,7 @@ static void strongarm_uart_class_init(ObjectClass *klass, void *data)
dc->reset = strongarm_uart_reset;
dc->vmsd = &vmstate_strongarm_uart_regs;
dc->props = strongarm_uart_properties;
+ dc->realize = strongarm_uart_realize;
}
static const TypeInfo strongarm_uart_info = {
@@ -1518,19 +1522,19 @@ static int strongarm_ssp_post_load(void *opaque, int version_id)
return 0;
}
-static int strongarm_ssp_init(SysBusDevice *sbd)
+static void strongarm_ssp_init(Object *obj)
{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
DeviceState *dev = DEVICE(sbd);
StrongARMSSPState *s = STRONGARM_SSP(dev);
sysbus_init_irq(sbd, &s->irq);
- memory_region_init_io(&s->iomem, OBJECT(s), &strongarm_ssp_ops, s,
+ memory_region_init_io(&s->iomem, obj, &strongarm_ssp_ops, s,
"ssp", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
s->bus = ssi_create_bus(dev, "ssi");
- return 0;
}
static void strongarm_ssp_reset(DeviceState *dev)
@@ -1560,9 +1564,7 @@ static const VMStateDescription vmstate_strongarm_ssp_regs = {
static void strongarm_ssp_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = strongarm_ssp_init;
dc->desc = "StrongARM SSP controller";
dc->reset = strongarm_ssp_reset;
dc->vmsd = &vmstate_strongarm_ssp_regs;
@@ -1572,6 +1574,7 @@ static const TypeInfo strongarm_ssp_info = {
.name = TYPE_STRONGARM_SSP,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(StrongARMSSPState),
+ .instance_init = strongarm_ssp_init,
.class_init = strongarm_ssp_class_init,
};
diff --git a/hw/arm/sysbus-fdt.c b/hw/arm/sysbus-fdt.c
index 5debb3348c..d68e3dcdbd 100644
--- a/hw/arm/sysbus-fdt.c
+++ b/hw/arm/sysbus-fdt.c
@@ -436,7 +436,7 @@ static const NodeCreationPair add_fdt_node_functions[] = {
* are dynamically instantiable and if so call the node creation
* function.
*/
-static int add_fdt_node(SysBusDevice *sbdev, void *opaque)
+static void add_fdt_node(SysBusDevice *sbdev, void *opaque)
{
int i, ret;
@@ -445,7 +445,7 @@ static int add_fdt_node(SysBusDevice *sbdev, void *opaque)
add_fdt_node_functions[i].typename)) {
ret = add_fdt_node_functions[i].add_fdt_node_fn(sbdev, opaque);
assert(!ret);
- return 0;
+ return;
}
}
error_report("Device %s can not be dynamically instantiated",
diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c
index 2db66508b5..1ee12f49b3 100644
--- a/hw/arm/tosa.c
+++ b/hw/arm/tosa.c
@@ -25,6 +25,7 @@
#include "sysemu/block-backend.h"
#include "hw/sysbus.h"
#include "exec/address-spaces.h"
+#include "sysemu/sysemu.h"
#define TOSA_RAM 0x04000000
#define TOSA_ROM 0x00800000
@@ -86,6 +87,12 @@ static void tosa_out_switch(void *opaque, int line, int level)
}
}
+static void tosa_reset(void *opaque, int line, int level)
+{
+ if (level) {
+ qemu_system_reset_request();
+ }
+}
static void tosa_gpio_setup(PXA2xxState *cpu,
DeviceState *scp0,
@@ -93,13 +100,16 @@ static void tosa_gpio_setup(PXA2xxState *cpu,
TC6393xbState *tmio)
{
qemu_irq *outsignals = qemu_allocate_irqs(tosa_out_switch, cpu, 4);
+ qemu_irq reset;
+
/* MMC/SD host */
pxa2xx_mmci_handlers(cpu->mmc,
qdev_get_gpio_in(scp0, TOSA_GPIO_SD_WP),
qemu_irq_invert(qdev_get_gpio_in(cpu->gpio, TOSA_GPIO_nSD_DETECT)));
/* Handle reset */
- qdev_connect_gpio_out(cpu->gpio, TOSA_GPIO_ON_RESET, cpu->reset);
+ reset = qemu_allocate_irq(tosa_reset, cpu, 0);
+ qdev_connect_gpio_out(cpu->gpio, TOSA_GPIO_ON_RESET, reset);
/* PCMCIA signals: card's IRQ and Card-Detect */
pxa2xx_pcmcia_set_irq_cb(cpu->pcmcia[0],
diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c
index 8ae5392bcc..7b5cb36d5a 100644
--- a/hw/arm/versatilepb.c
+++ b/hw/arm/versatilepb.c
@@ -198,6 +198,15 @@ static void versatile_init(MachineState *machine, int board_id)
int done_smc = 0;
DriveInfo *dinfo;
+ if (machine->ram_size > 0x10000000) {
+ /* Device starting at address 0x10000000,
+ * and memory cannot overlap with devices.
+ * Refuse to run rather than behaving very confusingly.
+ */
+ error_report("versatilepb: memory size must not exceed 256MB");
+ exit(1);
+ }
+
if (!machine->cpu_model) {
machine->cpu_model = "arm926";
}
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 28fc59c665..d4160dfa7d 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -44,6 +44,7 @@
#include "hw/pci/pcie_host.h"
#include "hw/pci/pci.h"
#include "sysemu/numa.h"
+#include "kvm_arm.h"
#define ARM_SPI_BASE 32
#define ACPI_POWER_BUTTON_DEVICE "PWRB"
@@ -53,7 +54,7 @@ static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
uint16_t i;
for (i = 0; i < smp_cpus; i++) {
- Aml *dev = aml_device("C%03x", i);
+ Aml *dev = aml_device("C%.03X", i);
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
aml_append(dev, aml_name_decl("_UID", aml_int(i)));
aml_append(scope, dev);
@@ -383,6 +384,61 @@ build_rsdp(GArray *rsdp_table, BIOSLinker *linker, unsigned rsdt_tbl_offset)
}
static void
+build_iort(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
+{
+ int iort_start = table_data->len;
+ AcpiIortIdMapping *idmap;
+ AcpiIortItsGroup *its;
+ AcpiIortTable *iort;
+ size_t node_size, iort_length;
+ AcpiIortRC *rc;
+
+ iort = acpi_data_push(table_data, sizeof(*iort));
+
+ iort_length = sizeof(*iort);
+ iort->node_count = cpu_to_le32(2); /* RC and ITS nodes */
+ iort->node_offset = cpu_to_le32(sizeof(*iort));
+
+ /* ITS group node */
+ node_size = sizeof(*its) + sizeof(uint32_t);
+ iort_length += node_size;
+ its = acpi_data_push(table_data, node_size);
+
+ its->type = ACPI_IORT_NODE_ITS_GROUP;
+ its->length = cpu_to_le16(node_size);
+ its->its_count = cpu_to_le32(1);
+ its->identifiers[0] = 0; /* MADT translation_id */
+
+ /* Root Complex Node */
+ node_size = sizeof(*rc) + sizeof(*idmap);
+ iort_length += node_size;
+ rc = acpi_data_push(table_data, node_size);
+
+ rc->type = ACPI_IORT_NODE_PCI_ROOT_COMPLEX;
+ rc->length = cpu_to_le16(node_size);
+ rc->mapping_count = cpu_to_le32(1);
+ rc->mapping_offset = cpu_to_le32(sizeof(*rc));
+
+ /* fully coherent device */
+ rc->memory_properties.cache_coherency = cpu_to_le32(1);
+ rc->memory_properties.memory_flags = 0x3; /* CCA = CPM = DCAS = 1 */
+ rc->pci_segment_number = 0; /* MCFG pci_segment */
+
+ /* Identity RID mapping covering the whole input RID range */
+ idmap = &rc->id_mapping_array[0];
+ idmap->input_base = 0;
+ idmap->id_count = cpu_to_le32(0xFFFF);
+ idmap->output_base = 0;
+ /* output IORT node is the ITS group node (the first node) */
+ idmap->output_reference = cpu_to_le32(iort->node_offset);
+
+ iort->length = cpu_to_le32(iort_length);
+
+ build_header(linker, table_data, (void *)(table_data->data + iort_start),
+ "IORT", table_data->len - iort_start, 0, NULL, NULL);
+}
+
+static void
build_spcr(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
{
AcpiSerialPortConsoleRedirection *spcr;
@@ -426,11 +482,9 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
uint32_t *cpu_node = g_malloc0(guest_info->smp_cpus * sizeof(uint32_t));
for (i = 0; i < guest_info->smp_cpus; i++) {
- for (j = 0; j < nb_numa_nodes; j++) {
- if (test_bit(i, numa_info[j].node_cpu)) {
+ j = numa_get_node_for_cpu(i);
+ if (j < nb_numa_nodes) {
cpu_node[i] = j;
- break;
- }
}
}
@@ -540,12 +594,13 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
gicc->uid = i;
gicc->flags = cpu_to_le32(ACPI_GICC_ENABLED);
- if (armcpu->has_pmu) {
+ if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
}
}
if (guest_info->gic_version == 3) {
+ AcpiMadtGenericTranslator *gic_its;
AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data,
sizeof *gicr);
@@ -553,6 +608,14 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
gicr->length = sizeof(*gicr);
gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base);
gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size);
+
+ if (its_class_name() && !guest_info->no_its) {
+ gic_its = acpi_data_push(table_data, sizeof *gic_its);
+ gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR;
+ gic_its->length = sizeof(*gic_its);
+ gic_its->translation_id = 0;
+ gic_its->base_address = cpu_to_le64(memmap[VIRT_GIC_ITS].base);
+ }
} else {
gic_msi = acpi_data_push(table_data, sizeof *gic_msi);
gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME;
@@ -659,17 +722,6 @@ void virt_acpi_build(VirtGuestInfo *guest_info, AcpiBuildTables *tables)
ACPI_BUILD_TABLE_FILE, tables_blob,
64, false /* high memory */);
- /*
- * The ACPI v5.1 tables for Hardware-reduced ACPI platform are:
- * RSDP
- * RSDT
- * FADT
- * GTDT
- * MADT
- * MCFG
- * DSDT
- */
-
/* DSDT is pointed to by FADT */
dsdt = tables_blob->len;
build_dsdt(tables_blob, tables->linker, guest_info);
@@ -695,6 +747,11 @@ void virt_acpi_build(VirtGuestInfo *guest_info, AcpiBuildTables *tables)
build_srat(tables_blob, tables->linker, guest_info);
}
+ if (its_class_name() && !guest_info->no_its) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_iort(tables_blob, tables->linker, guest_info);
+ }
+
/* RSDT is pointed to by RSDP */
rsdt = tables_blob->len;
build_rsdt(tables_blob, tables->linker, table_offsets, NULL, NULL);
@@ -752,7 +809,7 @@ static MemoryRegion *acpi_add_rom_blob(AcpiBuildState *build_state,
uint64_t max_size)
{
return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
- name, virt_acpi_build_update, build_state);
+ name, virt_acpi_build_update, build_state, NULL);
}
static const VMStateDescription vmstate_virt_acpi_build = {
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index a193b5a95b..d04e4acbd9 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -76,7 +76,7 @@ typedef struct VirtBoardInfo {
int fdt_size;
uint32_t clock_phandle;
uint32_t gic_phandle;
- uint32_t v2m_phandle;
+ uint32_t msi_phandle;
bool using_psci;
} VirtBoardInfo;
@@ -84,6 +84,8 @@ typedef struct {
MachineClass parent;
VirtBoardInfo *daughterboard;
bool disallow_affinity_adjustment;
+ bool no_its;
+ bool no_pmu;
} VirtMachineClass;
typedef struct {
@@ -413,19 +415,31 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
armcpu->mp_affinity);
}
- for (i = 0; i < nb_numa_nodes; i++) {
- if (test_bit(cpu, numa_info[i].node_cpu)) {
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "numa-node-id", i);
- }
+ i = numa_get_node_for_cpu(cpu);
+ if (i < nb_numa_nodes) {
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "numa-node-id", i);
}
g_free(nodename);
}
}
+static void fdt_add_its_gic_node(VirtBoardInfo *vbi)
+{
+ vbi->msi_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
+ qemu_fdt_add_subnode(vbi->fdt, "/intc/its");
+ qemu_fdt_setprop_string(vbi->fdt, "/intc/its", "compatible",
+ "arm,gic-v3-its");
+ qemu_fdt_setprop(vbi->fdt, "/intc/its", "msi-controller", NULL, 0);
+ qemu_fdt_setprop_sized_cells(vbi->fdt, "/intc/its", "reg",
+ 2, vbi->memmap[VIRT_GIC_ITS].base,
+ 2, vbi->memmap[VIRT_GIC_ITS].size);
+ qemu_fdt_setprop_cell(vbi->fdt, "/intc/its", "phandle", vbi->msi_phandle);
+}
+
static void fdt_add_v2m_gic_node(VirtBoardInfo *vbi)
{
- vbi->v2m_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
+ vbi->msi_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
qemu_fdt_add_subnode(vbi->fdt, "/intc/v2m");
qemu_fdt_setprop_string(vbi->fdt, "/intc/v2m", "compatible",
"arm,gic-v2m-frame");
@@ -433,7 +447,7 @@ static void fdt_add_v2m_gic_node(VirtBoardInfo *vbi)
qemu_fdt_setprop_sized_cells(vbi->fdt, "/intc/v2m", "reg",
2, vbi->memmap[VIRT_GIC_V2M].base,
2, vbi->memmap[VIRT_GIC_V2M].size);
- qemu_fdt_setprop_cell(vbi->fdt, "/intc/v2m", "phandle", vbi->v2m_phandle);
+ qemu_fdt_setprop_cell(vbi->fdt, "/intc/v2m", "phandle", vbi->msi_phandle);
}
static void fdt_add_gic_node(VirtBoardInfo *vbi, int type)
@@ -477,7 +491,7 @@ static void fdt_add_pmu_nodes(const VirtBoardInfo *vbi, int gictype)
CPU_FOREACH(cpu) {
armcpu = ARM_CPU(cpu);
- if (!armcpu->has_pmu ||
+ if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU) ||
!kvm_arm_pmu_create(cpu, PPI(VIRTUAL_PMU_IRQ))) {
return;
}
@@ -500,6 +514,26 @@ static void fdt_add_pmu_nodes(const VirtBoardInfo *vbi, int gictype)
}
}
+static void create_its(VirtBoardInfo *vbi, DeviceState *gicdev)
+{
+ const char *itsclass = its_class_name();
+ DeviceState *dev;
+
+ if (!itsclass) {
+ /* Do nothing if not supported */
+ return;
+ }
+
+ dev = qdev_create(NULL, itsclass);
+
+ object_property_set_link(OBJECT(dev), OBJECT(gicdev), "parent-gicv3",
+ &error_abort);
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vbi->memmap[VIRT_GIC_ITS].base);
+
+ fdt_add_its_gic_node(vbi);
+}
+
static void create_v2m(VirtBoardInfo *vbi, qemu_irq *pic)
{
int i;
@@ -519,7 +553,8 @@ static void create_v2m(VirtBoardInfo *vbi, qemu_irq *pic)
fdt_add_v2m_gic_node(vbi);
}
-static void create_gic(VirtBoardInfo *vbi, qemu_irq *pic, int type, bool secure)
+static void create_gic(VirtBoardInfo *vbi, qemu_irq *pic, int type,
+ bool secure, bool no_its)
{
/* We create a standalone GIC */
DeviceState *gicdev;
@@ -583,7 +618,9 @@ static void create_gic(VirtBoardInfo *vbi, qemu_irq *pic, int type, bool secure)
fdt_add_gic_node(vbi, type);
- if (type == 2) {
+ if (type == 3 && !no_its) {
+ create_its(vbi, gicdev);
+ } else if (type == 2) {
create_v2m(vbi, pic);
}
}
@@ -892,9 +929,11 @@ static void create_fw_cfg(const VirtBoardInfo *vbi, AddressSpace *as)
{
hwaddr base = vbi->memmap[VIRT_FW_CFG].base;
hwaddr size = vbi->memmap[VIRT_FW_CFG].size;
+ FWCfgState *fw_cfg;
char *nodename;
- fw_cfg_init_mem_wide(base + 8, base, 8, base + 16, as);
+ fw_cfg = fw_cfg_init_mem_wide(base + 8, base, 8, base + 16, as);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base);
qemu_fdt_add_subnode(vbi->fdt, nodename);
@@ -1025,9 +1064,9 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
nr_pcie_buses - 1);
qemu_fdt_setprop(vbi->fdt, nodename, "dma-coherent", NULL, 0);
- if (vbi->v2m_phandle) {
+ if (vbi->msi_phandle) {
qemu_fdt_setprop_cells(vbi->fdt, nodename, "msi-parent",
- vbi->v2m_phandle);
+ vbi->msi_phandle);
}
qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
@@ -1317,6 +1356,10 @@ static void machvirt_init(MachineState *machine)
}
}
+ if (vmc->no_pmu && object_property_find(cpuobj, "pmu", NULL)) {
+ object_property_set_bool(cpuobj, false, "pmu", NULL);
+ }
+
if (object_property_find(cpuobj, "reset-cbar", NULL)) {
object_property_set_int(cpuobj, vbi->memmap[VIRT_CPUPERIPHS].base,
"reset-cbar", &error_abort);
@@ -1341,7 +1384,7 @@ static void machvirt_init(MachineState *machine)
create_flash(vbi, sysmem, secure_sysmem ? secure_sysmem : sysmem);
- create_gic(vbi, pic, gic_version, vms->secure);
+ create_gic(vbi, pic, gic_version, vms->secure, vmc->no_its);
fdt_add_pmu_nodes(vbi, gic_version);
@@ -1373,6 +1416,7 @@ static void machvirt_init(MachineState *machine)
guest_info->irqmap = vbi->irqmap;
guest_info->use_highmem = vms->highmem;
guest_info->gic_version = gic_version;
+ guest_info->no_its = vmc->no_its;
guest_info_state->machine_done.notify = virt_guest_info_machine_done;
qemu_add_machine_init_done_notifier(&guest_info_state->machine_done);
@@ -1457,11 +1501,13 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
* it later in machvirt_init, where we have more information about the
* configuration of the particular instance.
*/
- mc->max_cpus = MAX_CPUMASK_BITS;
+ mc->max_cpus = 255;
mc->has_dynamic_sysbus = true;
mc->block_default_type = IF_VIRTIO;
mc->no_cdrom = 1;
mc->pci_allow_0_address = true;
+ /* We know we will never create a pre-ARMv7 CPU which needs 1K pages */
+ mc->minimum_page_bits = 12;
}
static const TypeInfo virt_machine_info = {
@@ -1479,7 +1525,7 @@ static void machvirt_machine_init(void)
}
type_init(machvirt_machine_init);
-static void virt_2_7_instance_init(Object *obj)
+static void virt_2_8_instance_init(Object *obj)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
@@ -1512,10 +1558,31 @@ static void virt_2_7_instance_init(Object *obj)
"Valid values are 2, 3 and host", NULL);
}
+static void virt_machine_2_8_options(MachineClass *mc)
+{
+}
+DEFINE_VIRT_MACHINE_AS_LATEST(2, 8)
+
+#define VIRT_COMPAT_2_7 \
+ HW_COMPAT_2_7
+
+static void virt_2_7_instance_init(Object *obj)
+{
+ virt_2_8_instance_init(obj);
+}
+
static void virt_machine_2_7_options(MachineClass *mc)
{
+ VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
+
+ virt_machine_2_8_options(mc);
+ SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_7);
+ /* ITS was introduced with 2.8 */
+ vmc->no_its = true;
+ /* Stick with 1K pages for migration compatibility */
+ mc->minimum_page_bits = 0;
}
-DEFINE_VIRT_MACHINE_AS_LATEST(2, 7)
+DEFINE_VIRT_MACHINE(2, 7)
#define VIRT_COMPAT_2_6 \
HW_COMPAT_2_6
@@ -1532,5 +1599,7 @@ static void virt_machine_2_6_options(MachineClass *mc)
virt_machine_2_7_options(mc);
SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_6);
vmc->disallow_affinity_adjustment = true;
+ /* Disable PMU for 2.6 as PMU support was first introduced in 2.7 */
+ vmc->no_pmu = true;
}
DEFINE_VIRT_MACHINE(2, 6)
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index 23c7199867..0d86ba35ae 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -332,6 +332,8 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
qemu_check_nic_model(nd, TYPE_CADENCE_GEM);
qdev_set_nic_properties(DEVICE(&s->gem[i]), nd);
}
+ object_property_set_int(OBJECT(&s->gem[i]), 2, "num-priority-queues",
+ &error_abort);
object_property_set_bool(OBJECT(&s->gem[i]), true, "realized", &err);
if (err) {
error_propagate(errp, err);
diff --git a/hw/audio/gus.c b/hw/audio/gus.c
index 6c02646773..3d08a6576a 100644
--- a/hw/audio/gus.c
+++ b/hw/audio/gus.c
@@ -60,6 +60,8 @@ typedef struct GUSState {
int64_t last_ticks;
qemu_irq pic;
IsaDma *isa_dma;
+ PortioList portio_list1;
+ PortioList portio_list2;
} GUSState;
static uint32_t gus_readb(void *opaque, uint32_t nport)
@@ -265,9 +267,10 @@ static void gus_realizefn (DeviceState *dev, Error **errp)
s->samples = AUD_get_buffer_size_out (s->voice) >> s->shift;
s->mixbuf = g_malloc0 (s->samples << s->shift);
- isa_register_portio_list (d, s->port, gus_portio_list1, s, "gus");
- isa_register_portio_list (d, (s->port + 0x100) & 0xf00,
- gus_portio_list2, s, "gus");
+ isa_register_portio_list(d, &s->portio_list1, s->port,
+ gus_portio_list1, s, "gus");
+ isa_register_portio_list(d, &s->portio_list2, (s->port + 0x100) & 0xf00,
+ gus_portio_list2, s, "gus");
s->isa_dma = isa_get_dma(isa_bus_from_device(d), s->emu.gusdma);
k = ISADMA_GET_CLASS(s->isa_dma);
diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c
index cd95340cd9..537face94d 100644
--- a/hw/audio/intel-hda.c
+++ b/hw/audio/intel-hda.c
@@ -416,7 +416,8 @@ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output,
}
left = len;
- while (left > 0) {
+ s = st->bentries;
+ while (left > 0 && s-- > 0) {
copy = left;
if (copy > st->bsize - st->lpib)
copy = st->bsize - st->lpib;
diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c
index 42a6f4885a..798002277b 100644
--- a/hw/audio/pcspk.c
+++ b/hw/audio/pcspk.c
@@ -52,8 +52,9 @@ typedef struct {
unsigned int pit_count;
unsigned int samples;
unsigned int play_pos;
- int data_on;
- int dummy_refresh_clock;
+ uint8_t data_on;
+ uint8_t dummy_refresh_clock;
+ bool migrate;
} PCSpkState;
static const char *s_spk = "pcspk";
@@ -187,8 +188,29 @@ static void pcspk_realizefn(DeviceState *dev, Error **errp)
pcspk_state = s;
}
+static bool migrate_needed(void *opaque)
+{
+ PCSpkState *s = opaque;
+
+ return s->migrate;
+}
+
+static const VMStateDescription vmstate_spk = {
+ .name = "pcspk",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .needed = migrate_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(data_on, PCSpkState),
+ VMSTATE_UINT8(dummy_refresh_clock, PCSpkState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static Property pcspk_properties[] = {
DEFINE_PROP_UINT32("iobase", PCSpkState, iobase, -1),
+ DEFINE_PROP_BOOL("migrate", PCSpkState, migrate, true),
DEFINE_PROP_END_OF_LIST(),
};
@@ -198,6 +220,7 @@ static void pcspk_class_initfn(ObjectClass *klass, void *data)
dc->realize = pcspk_realizefn;
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
+ dc->vmsd = &vmstate_spk;
dc->props = pcspk_properties;
/* Reason: realize sets global pcspk_state */
dc->cannot_instantiate_with_device_add_yet = true;
diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c
index 3a4a57ac31..6b4427f242 100644
--- a/hw/audio/sb16.c
+++ b/hw/audio/sb16.c
@@ -106,6 +106,7 @@ typedef struct SB16State {
/* mixer state */
int mixer_nreg;
uint8_t mixer_regs[256];
+ PortioList portio_list;
} SB16State;
static void SB_audio_callback (void *opaque, int free);
@@ -1378,7 +1379,8 @@ static void sb16_realizefn (DeviceState *dev, Error **errp)
dolog ("warning: Could not create auxiliary timer\n");
}
- isa_register_portio_list (isadev, s->port, sb16_ioport_list, s, "sb16");
+ isa_register_portio_list(isadev, &s->portio_list, s->port,
+ sb16_ioport_list, s, "sb16");
s->isa_hdma = isa_get_dma(isa_bus_from_device(isadev), s->hdma);
k = ISADMA_GET_CLASS(s->isa_hdma);
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 704a763603..d1f9f63eaf 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -68,9 +68,7 @@ static void notify_guest_bh(void *opaque)
unsigned i = j + ctzl(bits);
VirtQueue *vq = virtio_get_queue(s->vdev, i);
- if (virtio_should_notify(s->vdev, vq)) {
- event_notifier_set(virtio_queue_get_guest_notifier(vq));
- }
+ virtio_notify_irqfd(s->vdev, vq);
bits &= bits - 1; /* clear right-most bit */
}
@@ -88,23 +86,28 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
*dataplane = NULL;
- if (!conf->iothread) {
- return;
- }
+ if (conf->iothread) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
+ error_setg(errp,
+ "device is incompatible with iothread "
+ "(transport does not support notifiers)");
+ return;
+ }
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
+ error_setg(errp, "ioeventfd is required for iothread");
+ return;
+ }
- /* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->ioeventfd_started) {
- error_setg(errp,
- "device is incompatible with dataplane "
- "(transport does not support notifiers)");
- return;
+ /* If dataplane is (re-)enabled while the guest is running there could
+ * be block jobs that can conflict.
+ */
+ if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
+ error_prepend(errp, "cannot start virtio-blk dataplane: ");
+ return;
+ }
}
-
- /* If dataplane is (re-)enabled while the guest is running there could be
- * block jobs that can conflict.
- */
- if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
- error_prepend(errp, "cannot start dataplane thread: ");
+ /* Don't try if transport does not support notifiers. */
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
return;
}
@@ -112,9 +115,13 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
s->vdev = vdev;
s->conf = conf;
- s->iothread = conf->iothread;
- object_ref(OBJECT(s->iothread));
- s->ctx = iothread_get_aio_context(s->iothread);
+ if (conf->iothread) {
+ s->iothread = conf->iothread;
+ object_ref(OBJECT(s->iothread));
+ s->ctx = iothread_get_aio_context(s->iothread);
+ } else {
+ s->ctx = qemu_get_aio_context();
+ }
s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
s->batch_notify_vqs = bitmap_new(conf->num_queues);
@@ -124,14 +131,19 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
{
+ VirtIOBlock *vblk;
+
if (!s) {
return;
}
- virtio_blk_data_plane_stop(s);
+ vblk = VIRTIO_BLK(s->vdev);
+ assert(!vblk->dataplane_started);
g_free(s->batch_notify_vqs);
qemu_bh_delete(s->bh);
- object_unref(OBJECT(s->iothread));
+ if (s->iothread) {
+ object_unref(OBJECT(s->iothread));
+ }
g_free(s);
}
@@ -147,17 +159,18 @@ static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
}
/* Context: QEMU global mutex held */
-void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
+int virtio_blk_data_plane_start(VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
+ VirtIOBlock *vblk = VIRTIO_BLK(vdev);
+ VirtIOBlockDataPlane *s = vblk->dataplane;
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
unsigned i;
unsigned nvqs = s->conf->num_queues;
int r;
if (vblk->dataplane_started || s->starting) {
- return;
+ return 0;
}
s->starting = true;
@@ -204,20 +217,22 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
virtio_blk_data_plane_handle_output);
}
aio_context_release(s->ctx);
- return;
+ return 0;
fail_guest_notifiers:
vblk->dataplane_disabled = true;
s->starting = false;
vblk->dataplane_started = true;
+ return -ENOSYS;
}
/* Context: QEMU global mutex held */
-void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
+void virtio_blk_data_plane_stop(VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
+ VirtIOBlock *vblk = VIRTIO_BLK(vdev);
+ VirtIOBlockDataPlane *s = vblk->dataplane;
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vblk));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
unsigned i;
unsigned nvqs = s->conf->num_queues;
diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h
index b1f0b95b32..db3f47b173 100644
--- a/hw/block/dataplane/virtio-blk.h
+++ b/hw/block/dataplane/virtio-blk.h
@@ -23,9 +23,9 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
VirtIOBlockDataPlane **dataplane,
Error **errp);
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_drain(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq);
+int virtio_blk_data_plane_start(VirtIODevice *vdev);
+void virtio_blk_data_plane_stop(VirtIODevice *vdev);
+
#endif /* HW_DATAPLANE_VIRTIO_BLK_H */
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
index f73af7db46..17d29e7bc5 100644
--- a/hw/block/fdc.c
+++ b/hw/block/fdc.c
@@ -35,6 +35,7 @@
#include "qemu/timer.h"
#include "hw/isa/isa.h"
#include "hw/sysbus.h"
+#include "hw/block/block.h"
#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
#include "sysemu/sysemu.h"
@@ -52,6 +53,35 @@
} \
} while (0)
+
+/********************************************************/
+/* qdev floppy bus */
+
+#define TYPE_FLOPPY_BUS "floppy-bus"
+#define FLOPPY_BUS(obj) OBJECT_CHECK(FloppyBus, (obj), TYPE_FLOPPY_BUS)
+
+typedef struct FDCtrl FDCtrl;
+typedef struct FDrive FDrive;
+static FDrive *get_drv(FDCtrl *fdctrl, int unit);
+
+typedef struct FloppyBus {
+ BusState bus;
+ FDCtrl *fdc;
+} FloppyBus;
+
+static const TypeInfo floppy_bus_info = {
+ .name = TYPE_FLOPPY_BUS,
+ .parent = TYPE_BUS,
+ .instance_size = sizeof(FloppyBus),
+};
+
+static void floppy_bus_create(FDCtrl *fdc, FloppyBus *bus, DeviceState *dev)
+{
+ qbus_create_inplace(bus, sizeof(FloppyBus), TYPE_FLOPPY_BUS, dev, NULL);
+ bus->fdc = fdc;
+}
+
+
/********************************************************/
/* Floppy drive emulation */
@@ -148,14 +178,12 @@ static FDriveSize drive_size(FloppyDriveType drive)
#define FD_SECTOR_SC 2 /* Sector size code */
#define FD_RESET_SENSEI_COUNT 4 /* Number of sense interrupts on RESET */
-typedef struct FDCtrl FDCtrl;
-
/* Floppy disk drive emulation */
typedef enum FDiskFlags {
FDISK_DBL_SIDES = 0x01,
} FDiskFlags;
-typedef struct FDrive {
+struct FDrive {
FDCtrl *fdctrl;
BlockBackend *blk;
/* Drive status */
@@ -176,7 +204,7 @@ typedef struct FDrive {
uint8_t media_rate; /* Data rate of medium */
bool media_validated; /* Have we validated the media? */
-} FDrive;
+};
static FloppyDriveType get_fallback_drive_type(FDrive *drv);
@@ -441,6 +469,135 @@ static void fd_revalidate(FDrive *drv)
}
}
+static void fd_change_cb(void *opaque, bool load)
+{
+ FDrive *drive = opaque;
+
+ drive->media_changed = 1;
+ drive->media_validated = false;
+ fd_revalidate(drive);
+}
+
+static const BlockDevOps fd_block_ops = {
+ .change_media_cb = fd_change_cb,
+};
+
+
+#define TYPE_FLOPPY_DRIVE "floppy"
+#define FLOPPY_DRIVE(obj) \
+ OBJECT_CHECK(FloppyDrive, (obj), TYPE_FLOPPY_DRIVE)
+
+typedef struct FloppyDrive {
+ DeviceState qdev;
+ uint32_t unit;
+ BlockConf conf;
+ FloppyDriveType type;
+} FloppyDrive;
+
+static Property floppy_drive_properties[] = {
+ DEFINE_PROP_UINT32("unit", FloppyDrive, unit, -1),
+ DEFINE_BLOCK_PROPERTIES(FloppyDrive, conf),
+ DEFINE_PROP_DEFAULT("drive-type", FloppyDrive, type,
+ FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type,
+ FloppyDriveType),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static int floppy_drive_init(DeviceState *qdev)
+{
+ FloppyDrive *dev = FLOPPY_DRIVE(qdev);
+ FloppyBus *bus = FLOPPY_BUS(qdev->parent_bus);
+ FDrive *drive;
+ int ret;
+
+ if (dev->unit == -1) {
+ for (dev->unit = 0; dev->unit < MAX_FD; dev->unit++) {
+ drive = get_drv(bus->fdc, dev->unit);
+ if (!drive->blk) {
+ break;
+ }
+ }
+ }
+
+ if (dev->unit >= MAX_FD) {
+ error_report("Can't create floppy unit %d, bus supports only %d units",
+ dev->unit, MAX_FD);
+ return -1;
+ }
+
+ drive = get_drv(bus->fdc, dev->unit);
+ if (drive->blk) {
+ error_report("Floppy unit %d is in use", dev->unit);
+ return -1;
+ }
+
+ if (!dev->conf.blk) {
+ /* Anonymous BlockBackend for an empty drive */
+ dev->conf.blk = blk_new();
+ ret = blk_attach_dev(dev->conf.blk, qdev);
+ assert(ret == 0);
+ }
+
+ blkconf_blocksizes(&dev->conf);
+ if (dev->conf.logical_block_size != 512 ||
+ dev->conf.physical_block_size != 512)
+ {
+ error_report("Physical and logical block size must be 512 for floppy");
+ return -1;
+ }
+
+ /* rerror/werror aren't supported by fdc and therefore not even registered
+ * with qdev. So set the defaults manually before they are used in
+ * blkconf_apply_backend_options(). */
+ dev->conf.rerror = BLOCKDEV_ON_ERROR_AUTO;
+ dev->conf.werror = BLOCKDEV_ON_ERROR_AUTO;
+ blkconf_apply_backend_options(&dev->conf);
+
+ /* 'enospc' is the default for -drive, 'report' is what blk_new() gives us
+ * for empty drives. */
+ if (blk_get_on_error(dev->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC &&
+ blk_get_on_error(dev->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) {
+ error_report("fdc doesn't support drive option werror");
+ return -1;
+ }
+ if (blk_get_on_error(dev->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
+ error_report("fdc doesn't support drive option rerror");
+ return -1;
+ }
+
+ drive->blk = dev->conf.blk;
+ drive->fdctrl = bus->fdc;
+
+ fd_init(drive);
+ blk_set_dev_ops(drive->blk, &fd_block_ops, drive);
+
+ /* Keep 'type' qdev property and FDrive->drive in sync */
+ drive->drive = dev->type;
+ pick_drive_type(drive);
+ dev->type = drive->drive;
+
+ fd_revalidate(drive);
+
+ return 0;
+}
+
+static void floppy_drive_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *k = DEVICE_CLASS(klass);
+ k->init = floppy_drive_init;
+ set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
+ k->bus_type = TYPE_FLOPPY_BUS;
+ k->props = floppy_drive_properties;
+ k->desc = "virtual floppy drive";
+}
+
+static const TypeInfo floppy_drive_info = {
+ .name = TYPE_FLOPPY_DRIVE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(FloppyDrive),
+ .class_init = floppy_drive_class_init,
+};
+
/********************************************************/
/* Intel 82078 floppy disk controller emulation */
@@ -684,14 +841,20 @@ struct FDCtrl {
/* Power down config (also with status regB access mode */
uint8_t pwrd;
/* Floppy drives */
+ FloppyBus bus;
uint8_t num_floppies;
FDrive drives[MAX_FD];
+ struct {
+ BlockBackend *blk;
+ FloppyDriveType type;
+ } qdev_for_drives[MAX_FD];
int reset_sensei;
uint32_t check_media_rate;
FloppyDriveType fallback; /* type=auto failure fallback */
/* Timers state */
uint8_t timer0;
uint8_t timer1;
+ PortioList portio_list;
};
static FloppyDriveType get_fallback_drive_type(FDrive *drv)
@@ -1158,9 +1321,9 @@ static inline FDrive *drv3(FDCtrl *fdctrl)
}
#endif
-static FDrive *get_cur_drv(FDCtrl *fdctrl)
+static FDrive *get_drv(FDCtrl *fdctrl, int unit)
{
- switch (fdctrl->cur_drv) {
+ switch (unit) {
case 0: return drv0(fdctrl);
case 1: return drv1(fdctrl);
#if MAX_FD == 4
@@ -1171,6 +1334,11 @@ static FDrive *get_cur_drv(FDCtrl *fdctrl)
}
}
+static FDrive *get_cur_drv(FDCtrl *fdctrl)
+{
+ return get_drv(fdctrl, fdctrl->cur_drv);
+}
+
/* Status A register : 0x00 (read-only) */
static uint32_t fdctrl_read_statusA(FDCtrl *fdctrl)
{
@@ -2330,46 +2498,49 @@ static void fdctrl_result_timer(void *opaque)
}
}
-static void fdctrl_change_cb(void *opaque, bool load)
-{
- FDrive *drive = opaque;
-
- drive->media_changed = 1;
- drive->media_validated = false;
- fd_revalidate(drive);
-}
-
-static const BlockDevOps fdctrl_block_ops = {
- .change_media_cb = fdctrl_change_cb,
-};
-
/* Init functions */
-static void fdctrl_connect_drives(FDCtrl *fdctrl, Error **errp)
+static void fdctrl_connect_drives(FDCtrl *fdctrl, Error **errp,
+ DeviceState *fdc_dev)
{
unsigned int i;
FDrive *drive;
+ DeviceState *dev;
+ BlockBackend *blk;
+ Error *local_err = NULL;
for (i = 0; i < MAX_FD; i++) {
drive = &fdctrl->drives[i];
drive->fdctrl = fdctrl;
- if (drive->blk) {
- if (blk_get_on_error(drive->blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) {
- error_setg(errp, "fdc doesn't support drive option werror");
- return;
- }
- if (blk_get_on_error(drive->blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
- error_setg(errp, "fdc doesn't support drive option rerror");
- return;
- }
+ /* If the drive is not present, we skip creating the qdev device, but
+ * still have to initialise the controller. */
+ blk = fdctrl->qdev_for_drives[i].blk;
+ if (!blk) {
+ fd_init(drive);
+ fd_revalidate(drive);
+ continue;
+ }
+
+ dev = qdev_create(&fdctrl->bus.bus, "floppy");
+ qdev_prop_set_uint32(dev, "unit", i);
+ qdev_prop_set_enum(dev, "drive-type", fdctrl->qdev_for_drives[i].type);
+
+ blk_ref(blk);
+ blk_detach_dev(blk, fdc_dev);
+ fdctrl->qdev_for_drives[i].blk = NULL;
+ qdev_prop_set_drive(dev, "drive", blk, &local_err);
+ blk_unref(blk);
+
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
}
- fd_init(drive);
- if (drive->blk) {
- blk_set_dev_ops(drive->blk, &fdctrl_block_ops, drive);
- pick_drive_type(drive);
+ object_property_set_bool(OBJECT(dev), true, "realized", &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
}
- fd_revalidate(drive);
}
}
@@ -2441,7 +2612,8 @@ void sun4m_fdctrl_init(qemu_irq irq, hwaddr io_base,
*fdc_tc = qdev_get_gpio_in(dev, 0);
}
-static void fdctrl_realize_common(FDCtrl *fdctrl, Error **errp)
+static void fdctrl_realize_common(DeviceState *dev, FDCtrl *fdctrl,
+ Error **errp)
{
int i, j;
static int command_tables_inited = 0;
@@ -2479,7 +2651,9 @@ static void fdctrl_realize_common(FDCtrl *fdctrl, Error **errp)
k->register_channel(fdctrl->dma, fdctrl->dma_chann,
&fdctrl_transfer_handler, fdctrl);
}
- fdctrl_connect_drives(fdctrl, errp);
+
+ floppy_bus_create(fdctrl, &fdctrl->bus, dev);
+ fdctrl_connect_drives(fdctrl, errp, dev);
}
static const MemoryRegionPortio fdc_portio_list[] = {
@@ -2495,7 +2669,8 @@ static void isabus_fdc_realize(DeviceState *dev, Error **errp)
FDCtrl *fdctrl = &isa->state;
Error *err = NULL;
- isa_register_portio_list(isadev, isa->iobase, fdc_portio_list, fdctrl,
+ isa_register_portio_list(isadev, &fdctrl->portio_list,
+ isa->iobase, fdc_portio_list, fdctrl,
"fdc");
isa_init_irq(isadev, &fdctrl->irq, isa->irq);
@@ -2506,7 +2681,7 @@ static void isabus_fdc_realize(DeviceState *dev, Error **errp)
}
qdev_set_legacy_instance_id(dev, isa->iobase, 2);
- fdctrl_realize_common(fdctrl, &err);
+ fdctrl_realize_common(dev, fdctrl, &err);
if (err != NULL) {
error_propagate(errp, err);
return;
@@ -2557,7 +2732,7 @@ static void sysbus_fdc_common_realize(DeviceState *dev, Error **errp)
FDCtrlSysBus *sys = SYSBUS_FDC(dev);
FDCtrl *fdctrl = &sys->state;
- fdctrl_realize_common(fdctrl, errp);
+ fdctrl_realize_common(dev, fdctrl, errp);
}
FloppyDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i)
@@ -2604,14 +2779,14 @@ static Property isa_fdc_properties[] = {
DEFINE_PROP_UINT32("iobase", FDCtrlISABus, iobase, 0x3f0),
DEFINE_PROP_UINT32("irq", FDCtrlISABus, irq, 6),
DEFINE_PROP_UINT32("dma", FDCtrlISABus, dma, 2),
- DEFINE_PROP_DRIVE("driveA", FDCtrlISABus, state.drives[0].blk),
- DEFINE_PROP_DRIVE("driveB", FDCtrlISABus, state.drives[1].blk),
+ DEFINE_PROP_DRIVE("driveA", FDCtrlISABus, state.qdev_for_drives[0].blk),
+ DEFINE_PROP_DRIVE("driveB", FDCtrlISABus, state.qdev_for_drives[1].blk),
DEFINE_PROP_BIT("check_media_rate", FDCtrlISABus, state.check_media_rate,
0, true),
- DEFINE_PROP_DEFAULT("fdtypeA", FDCtrlISABus, state.drives[0].drive,
+ DEFINE_PROP_DEFAULT("fdtypeA", FDCtrlISABus, state.qdev_for_drives[0].type,
FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type,
FloppyDriveType),
- DEFINE_PROP_DEFAULT("fdtypeB", FDCtrlISABus, state.drives[1].drive,
+ DEFINE_PROP_DEFAULT("fdtypeB", FDCtrlISABus, state.qdev_for_drives[1].type,
FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type,
FloppyDriveType),
DEFINE_PROP_DEFAULT("fallback", FDCtrlISABus, state.fallback,
@@ -2663,12 +2838,12 @@ static const VMStateDescription vmstate_sysbus_fdc ={
};
static Property sysbus_fdc_properties[] = {
- DEFINE_PROP_DRIVE("driveA", FDCtrlSysBus, state.drives[0].blk),
- DEFINE_PROP_DRIVE("driveB", FDCtrlSysBus, state.drives[1].blk),
- DEFINE_PROP_DEFAULT("fdtypeA", FDCtrlSysBus, state.drives[0].drive,
+ DEFINE_PROP_DRIVE("driveA", FDCtrlSysBus, state.qdev_for_drives[0].blk),
+ DEFINE_PROP_DRIVE("driveB", FDCtrlSysBus, state.qdev_for_drives[1].blk),
+ DEFINE_PROP_DEFAULT("fdtypeA", FDCtrlSysBus, state.qdev_for_drives[0].type,
FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type,
FloppyDriveType),
- DEFINE_PROP_DEFAULT("fdtypeB", FDCtrlSysBus, state.drives[1].drive,
+ DEFINE_PROP_DEFAULT("fdtypeB", FDCtrlSysBus, state.qdev_for_drives[1].type,
FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type,
FloppyDriveType),
DEFINE_PROP_DEFAULT("fallback", FDCtrlISABus, state.fallback,
@@ -2693,8 +2868,8 @@ static const TypeInfo sysbus_fdc_info = {
};
static Property sun4m_fdc_properties[] = {
- DEFINE_PROP_DRIVE("drive", FDCtrlSysBus, state.drives[0].blk),
- DEFINE_PROP_DEFAULT("fdtype", FDCtrlSysBus, state.drives[0].drive,
+ DEFINE_PROP_DRIVE("drive", FDCtrlSysBus, state.qdev_for_drives[0].blk),
+ DEFINE_PROP_DEFAULT("fdtype", FDCtrlSysBus, state.qdev_for_drives[0].type,
FLOPPY_DRIVE_TYPE_AUTO, qdev_prop_fdc_drive_type,
FloppyDriveType),
DEFINE_PROP_DEFAULT("fallback", FDCtrlISABus, state.fallback,
@@ -2742,6 +2917,8 @@ static void fdc_register_types(void)
type_register_static(&sysbus_fdc_type_info);
type_register_static(&sysbus_fdc_info);
type_register_static(&sun4m_fdc_info);
+ type_register_static(&floppy_bus_info);
+ type_register_static(&floppy_drive_info);
}
type_init(fdc_register_types)
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index 9828ee61d5..d29ff4cb4f 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -1189,9 +1189,9 @@ static Property m25p80_properties[] = {
};
static const VMStateDescription vmstate_m25p80 = {
- .name = "xilinx_spi",
- .version_id = 3,
- .minimum_version_id = 1,
+ .name = "m25p80",
+ .version_id = 0,
+ .minimum_version_id = 0,
.pre_save = m25p80_pre_save,
.fields = (VMStateField[]) {
VMSTATE_UINT8(state, Flash),
@@ -1200,20 +1200,19 @@ static const VMStateDescription vmstate_m25p80 = {
VMSTATE_UINT32(pos, Flash),
VMSTATE_UINT8(needed_bytes, Flash),
VMSTATE_UINT8(cmd_in_progress, Flash),
- VMSTATE_UNUSED(4),
VMSTATE_UINT32(cur_addr, Flash),
VMSTATE_BOOL(write_enable, Flash),
- VMSTATE_BOOL_V(reset_enable, Flash, 2),
- VMSTATE_UINT8_V(ear, Flash, 2),
- VMSTATE_BOOL_V(four_bytes_address_mode, Flash, 2),
- VMSTATE_UINT32_V(nonvolatile_cfg, Flash, 2),
- VMSTATE_UINT32_V(volatile_cfg, Flash, 2),
- VMSTATE_UINT32_V(enh_volatile_cfg, Flash, 2),
- VMSTATE_BOOL_V(quad_enable, Flash, 3),
- VMSTATE_UINT8_V(spansion_cr1nv, Flash, 3),
- VMSTATE_UINT8_V(spansion_cr2nv, Flash, 3),
- VMSTATE_UINT8_V(spansion_cr3nv, Flash, 3),
- VMSTATE_UINT8_V(spansion_cr4nv, Flash, 3),
+ VMSTATE_BOOL(reset_enable, Flash),
+ VMSTATE_UINT8(ear, Flash),
+ VMSTATE_BOOL(four_bytes_address_mode, Flash),
+ VMSTATE_UINT32(nonvolatile_cfg, Flash),
+ VMSTATE_UINT32(volatile_cfg, Flash),
+ VMSTATE_UINT32(enh_volatile_cfg, Flash),
+ VMSTATE_BOOL(quad_enable, Flash),
+ VMSTATE_UINT8(spansion_cr1nv, Flash),
+ VMSTATE_UINT8(spansion_cr2nv, Flash),
+ VMSTATE_UINT8(spansion_cr3nv, Flash),
+ VMSTATE_UINT8(spansion_cr4nv, Flash),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index cef3bb42f1..d479fd22f5 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -258,8 +258,10 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
req->has_sg = true;
dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct);
req->aiocb = is_write ?
- dma_blk_write(n->conf.blk, &req->qsg, data_offset, nvme_rw_cb, req) :
- dma_blk_read(n->conf.blk, &req->qsg, data_offset, nvme_rw_cb, req);
+ dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
+ nvme_rw_cb, req) :
+ dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
+ nvme_rw_cb, req);
return NVME_NO_COMPLETE;
}
@@ -373,7 +375,7 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
if (!cqid || nvme_check_cqid(n, cqid)) {
return NVME_INVALID_CQID | NVME_DNR;
}
- if (!sqid || (sqid && !nvme_check_sqid(n, sqid))) {
+ if (!sqid || !nvme_check_sqid(n, sqid)) {
return NVME_INVALID_QID | NVME_DNR;
}
if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
@@ -447,7 +449,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
uint16_t qflags = le16_to_cpu(c->cq_flags);
uint64_t prp1 = le64_to_cpu(c->prp1);
- if (!cqid || (cqid && !nvme_check_cqid(n, cqid))) {
+ if (!cqid || !nvme_check_cqid(n, cqid)) {
return NVME_INVALID_CQID | NVME_DNR;
}
if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 331d7667ec..0c5fd27593 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -29,8 +29,8 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
-void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
- VirtIOBlockReq *req)
+static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
+ VirtIOBlockReq *req)
{
req->dev = s;
req->vq = vq;
@@ -40,7 +40,7 @@ void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
req->mr_next = NULL;
}
-void virtio_blk_free_request(VirtIOBlockReq *req)
+static void virtio_blk_free_request(VirtIOBlockReq *req)
{
if (req) {
g_free(req);
@@ -381,7 +381,7 @@ static int multireq_compare(const void *a, const void *b)
}
}
-void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
+static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
{
int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
uint32_t max_transfer;
@@ -468,30 +468,32 @@ static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
return true;
}
-void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
+static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
uint32_t type;
struct iovec *in_iov = req->elem.in_sg;
struct iovec *iov = req->elem.out_sg;
unsigned in_num = req->elem.in_num;
unsigned out_num = req->elem.out_num;
+ VirtIOBlock *s = req->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
if (req->elem.out_num < 1 || req->elem.in_num < 1) {
- error_report("virtio-blk missing headers");
- exit(1);
+ virtio_error(vdev, "virtio-blk missing headers");
+ return -1;
}
if (unlikely(iov_to_buf(iov, out_num, 0, &req->out,
sizeof(req->out)) != sizeof(req->out))) {
- error_report("virtio-blk request outhdr too short");
- exit(1);
+ virtio_error(vdev, "virtio-blk request outhdr too short");
+ return -1;
}
iov_discard_front(&iov, &out_num, sizeof(req->out));
if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
- error_report("virtio-blk request inhdr too short");
- exit(1);
+ virtio_error(vdev, "virtio-blk request inhdr too short");
+ return -1;
}
/* We always touch the last byte, so just see how big in_iov is. */
@@ -529,7 +531,7 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
block_acct_invalid(blk_get_stats(req->dev->blk),
is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
virtio_blk_free_request(req);
- return;
+ return 0;
}
block_acct_start(blk_get_stats(req->dev->blk),
@@ -576,6 +578,7 @@ void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
virtio_blk_free_request(req);
}
+ return 0;
}
void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
@@ -586,7 +589,11 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
blk_io_plug(s->blk);
while ((req = virtio_blk_get_request(s, vq))) {
- virtio_blk_handle_request(req, &mrb);
+ if (virtio_blk_handle_request(req, &mrb)) {
+ virtqueue_detach_element(req->vq, &req->elem, 0);
+ virtio_blk_free_request(req);
+ break;
+ }
}
if (mrb.num_reqs) {
@@ -604,7 +611,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
* dataplane here instead of waiting for .set_status().
*/
- virtio_blk_data_plane_start(s->dataplane);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_disabled) {
return;
}
@@ -625,7 +632,18 @@ static void virtio_blk_dma_restart_bh(void *opaque)
while (req) {
VirtIOBlockReq *next = req->next;
- virtio_blk_handle_request(req, &mrb);
+ if (virtio_blk_handle_request(req, &mrb)) {
+ /* Device is now broken and won't do any processing until it gets
+ * reset. Already queued requests will be lost: let's purge them.
+ */
+ while (req) {
+ next = req->next;
+ virtqueue_detach_element(req->vq, &req->elem, 0);
+ virtio_blk_free_request(req);
+ req = next;
+ }
+ break;
+ }
req = next;
}
@@ -665,14 +683,13 @@ static void virtio_blk_reset(VirtIODevice *vdev)
while (s->rq) {
req = s->rq;
s->rq = req->next;
+ virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_blk_free_request(req);
}
- if (s->dataplane) {
- virtio_blk_data_plane_stop(s->dataplane);
- }
aio_context_release(ctx);
+ assert(!s->dataplane_started);
blk_set_enable_write_cache(s->blk, s->original_wce);
}
@@ -770,9 +787,8 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
- if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
- VIRTIO_CONFIG_S_DRIVER_OK))) {
- virtio_blk_data_plane_stop(s->dataplane);
+ if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
+ assert(!s->dataplane_started);
}
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
@@ -803,13 +819,6 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
}
}
-static void virtio_blk_save(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
-
- virtio_save(vdev, f);
-}
-
static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
@@ -828,14 +837,6 @@ static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
qemu_put_sbyte(f, 0);
}
-static int virtio_blk_load(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIOBlock *s = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(s);
-
- return virtio_load(vdev, f, 2);
-}
-
static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
@@ -915,7 +916,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
for (i = 0; i < conf->num_queues; i++) {
- virtio_add_queue_aio(vdev, 128, virtio_blk_handle_output);
+ virtio_add_queue(vdev, 128, virtio_blk_handle_output);
}
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
if (err != NULL) {
@@ -956,7 +957,15 @@ static void virtio_blk_instance_init(Object *obj)
DEVICE(obj), NULL);
}
-VMSTATE_VIRTIO_DEVICE(blk, 2, virtio_blk_load, virtio_blk_save);
+static const VMStateDescription vmstate_virtio_blk = {
+ .name = "virtio-blk",
+ .minimum_version_id = 2,
+ .version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
static Property virtio_blk_properties[] = {
DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
@@ -990,9 +999,11 @@ static void virtio_blk_class_init(ObjectClass *klass, void *data)
vdc->reset = virtio_blk_reset;
vdc->save = virtio_blk_save_device;
vdc->load = virtio_blk_load_device;
+ vdc->start_ioeventfd = virtio_blk_data_plane_start;
+ vdc->stop_ioeventfd = virtio_blk_data_plane_stop;
}
-static const TypeInfo virtio_device_info = {
+static const TypeInfo virtio_blk_info = {
.name = TYPE_VIRTIO_BLK,
.parent = TYPE_VIRTIO_DEVICE,
.instance_size = sizeof(VirtIOBlock),
@@ -1002,7 +1013,7 @@ static const TypeInfo virtio_device_info = {
static void virtio_register_types(void)
{
- type_register_static(&virtio_device_info);
+ type_register_static(&virtio_blk_info);
}
type_init(virtio_register_types)
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 3b8ad33fc5..456a2d5694 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -119,6 +119,9 @@ struct XenBlkDev {
unsigned int persistent_gnt_count;
unsigned int max_grants;
+ /* Grant copy */
+ gboolean feature_grant_copy;
+
/* qemu block driver */
DriveInfo *dinfo;
BlockBackend *blk;
@@ -164,12 +167,12 @@ static void destroy_grant(gpointer pgnt)
xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
- xen_be_printf(&grant->blkdev->xendev, 0,
+ xen_pv_printf(&grant->blkdev->xendev, 0,
"xengnttab_unmap failed: %s\n",
strerror(errno));
}
grant->blkdev->persistent_gnt_count--;
- xen_be_printf(&grant->blkdev->xendev, 3,
+ xen_pv_printf(&grant->blkdev->xendev, 3,
"unmapped grant %p\n", grant->page);
g_free(grant);
}
@@ -181,11 +184,11 @@ static void remove_persistent_region(gpointer data, gpointer dev)
xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
- xen_be_printf(&blkdev->xendev, 0,
+ xen_pv_printf(&blkdev->xendev, 0,
"xengnttab_unmap region %p failed: %s\n",
region->addr, strerror(errno));
}
- xen_be_printf(&blkdev->xendev, 3,
+ xen_pv_printf(&blkdev->xendev, 3,
"unmapped grant region %p with %d pages\n",
region->addr, region->num);
g_free(region);
@@ -252,7 +255,7 @@ static int ioreq_parse(struct ioreq *ioreq)
size_t len;
int i;
- xen_be_printf(&blkdev->xendev, 3,
+ xen_pv_printf(&blkdev->xendev, 3,
"op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
ioreq->req.operation, ioreq->req.nr_segments,
ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
@@ -272,28 +275,28 @@ static int ioreq_parse(struct ioreq *ioreq)
case BLKIF_OP_DISCARD:
return 0;
default:
- xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
+ xen_pv_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
ioreq->req.operation);
goto err;
};
if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
- xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
+ xen_pv_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
goto err;
}
ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
for (i = 0; i < ioreq->req.nr_segments; i++) {
if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
+ xen_pv_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
goto err;
}
if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
- xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
+ xen_pv_printf(&blkdev->xendev, 0, "error: first > last sector\n");
goto err;
}
if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
- xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
+ xen_pv_printf(&blkdev->xendev, 0, "error: page crossing\n");
goto err;
}
@@ -305,7 +308,7 @@ static int ioreq_parse(struct ioreq *ioreq)
qemu_iovec_add(&ioreq->v, (void*)mem, len);
}
if (ioreq->start + ioreq->v.size > blkdev->file_size) {
- xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
+ xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
goto err;
}
return 0;
@@ -328,7 +331,7 @@ static void ioreq_unmap(struct ioreq *ioreq)
return;
}
if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
- xen_be_printf(&ioreq->blkdev->xendev, 0,
+ xen_pv_printf(&ioreq->blkdev->xendev, 0,
"xengnttab_unmap failed: %s\n",
strerror(errno));
}
@@ -340,7 +343,7 @@ static void ioreq_unmap(struct ioreq *ioreq)
continue;
}
if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
- xen_be_printf(&ioreq->blkdev->xendev, 0,
+ xen_pv_printf(&ioreq->blkdev->xendev, 0,
"xengnttab_unmap failed: %s\n",
strerror(errno));
}
@@ -378,7 +381,7 @@ static int ioreq_map(struct ioreq *ioreq)
if (grant != NULL) {
page[i] = grant->page;
- xen_be_printf(&ioreq->blkdev->xendev, 3,
+ xen_pv_printf(&ioreq->blkdev->xendev, 3,
"using persistent-grant %" PRIu32 "\n",
ioreq->refs[i]);
} else {
@@ -407,7 +410,7 @@ static int ioreq_map(struct ioreq *ioreq)
ioreq->pages = xengnttab_map_grant_refs
(gnt, new_maps, domids, refs, ioreq->prot);
if (ioreq->pages == NULL) {
- xen_be_printf(&ioreq->blkdev->xendev, 0,
+ xen_pv_printf(&ioreq->blkdev->xendev, 0,
"can't map %d grant refs (%s, %d maps)\n",
new_maps, strerror(errno), ioreq->blkdev->cnt_map);
return -1;
@@ -423,7 +426,7 @@ static int ioreq_map(struct ioreq *ioreq)
ioreq->page[i] = xengnttab_map_grant_ref
(gnt, domids[i], refs[i], ioreq->prot);
if (ioreq->page[i] == NULL) {
- xen_be_printf(&ioreq->blkdev->xendev, 0,
+ xen_pv_printf(&ioreq->blkdev->xendev, 0,
"can't map grant ref %d (%s, %d maps)\n",
refs[i], strerror(errno), ioreq->blkdev->cnt_map);
ioreq->mapped = 1;
@@ -471,7 +474,7 @@ static int ioreq_map(struct ioreq *ioreq)
grant->page = ioreq->page[new_maps];
}
grant->blkdev = ioreq->blkdev;
- xen_be_printf(&ioreq->blkdev->xendev, 3,
+ xen_pv_printf(&ioreq->blkdev->xendev, 3,
"adding grant %" PRIu32 " page: %p\n",
refs[new_maps], grant->page);
g_tree_insert(ioreq->blkdev->persistent_gnts,
@@ -489,6 +492,106 @@ static int ioreq_map(struct ioreq *ioreq)
return 0;
}
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 480
+
+static void ioreq_free_copy_buffers(struct ioreq *ioreq)
+{
+ int i;
+
+ for (i = 0; i < ioreq->v.niov; i++) {
+ ioreq->page[i] = NULL;
+ }
+
+ qemu_vfree(ioreq->pages);
+}
+
+static int ioreq_init_copy_buffers(struct ioreq *ioreq)
+{
+ int i;
+
+ if (ioreq->v.niov == 0) {
+ return 0;
+ }
+
+ ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE);
+
+ for (i = 0; i < ioreq->v.niov; i++) {
+ ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
+ ioreq->v.iov[i].iov_base = ioreq->page[i];
+ }
+
+ return 0;
+}
+
+static int ioreq_grant_copy(struct ioreq *ioreq)
+{
+ xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
+ xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ int i, count, rc;
+ int64_t file_blk = ioreq->blkdev->file_blk;
+
+ if (ioreq->v.niov == 0) {
+ return 0;
+ }
+
+ count = ioreq->v.niov;
+
+ for (i = 0; i < count; i++) {
+ if (ioreq->req.operation == BLKIF_OP_READ) {
+ segs[i].flags = GNTCOPY_dest_gref;
+ segs[i].dest.foreign.ref = ioreq->refs[i];
+ segs[i].dest.foreign.domid = ioreq->domids[i];
+ segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
+ segs[i].source.virt = ioreq->v.iov[i].iov_base;
+ } else {
+ segs[i].flags = GNTCOPY_source_gref;
+ segs[i].source.foreign.ref = ioreq->refs[i];
+ segs[i].source.foreign.domid = ioreq->domids[i];
+ segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
+ segs[i].dest.virt = ioreq->v.iov[i].iov_base;
+ }
+ segs[i].len = (ioreq->req.seg[i].last_sect
+ - ioreq->req.seg[i].first_sect + 1) * file_blk;
+ }
+
+ rc = xengnttab_grant_copy(gnt, count, segs);
+
+ if (rc) {
+ xen_pv_printf(&ioreq->blkdev->xendev, 0,
+ "failed to copy data %d\n", rc);
+ ioreq->aio_errors++;
+ return -1;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (segs[i].status != GNTST_okay) {
+ xen_pv_printf(&ioreq->blkdev->xendev, 3,
+ "failed to copy data %d for gref %d, domid %d\n",
+ segs[i].status, ioreq->refs[i], ioreq->domids[i]);
+ ioreq->aio_errors++;
+ rc = -1;
+ }
+ }
+
+ return rc;
+}
+#else
+static void ioreq_free_copy_buffers(struct ioreq *ioreq)
+{
+ abort();
+}
+
+static int ioreq_init_copy_buffers(struct ioreq *ioreq)
+{
+ abort();
+}
+
+static int ioreq_grant_copy(struct ioreq *ioreq)
+{
+ abort();
+}
+#endif
+
static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
static void qemu_aio_complete(void *opaque, int ret)
@@ -496,7 +599,7 @@ static void qemu_aio_complete(void *opaque, int ret)
struct ioreq *ioreq = opaque;
if (ret != 0) {
- xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
+ xen_pv_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
ioreq->aio_errors++;
}
@@ -511,8 +614,31 @@ static void qemu_aio_complete(void *opaque, int ret)
return;
}
+ if (ioreq->blkdev->feature_grant_copy) {
+ switch (ioreq->req.operation) {
+ case BLKIF_OP_READ:
+ /* in case of failure ioreq->aio_errors is increased */
+ if (ret == 0) {
+ ioreq_grant_copy(ioreq);
+ }
+ ioreq_free_copy_buffers(ioreq);
+ break;
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ if (!ioreq->req.nr_segments) {
+ break;
+ }
+ ioreq_free_copy_buffers(ioreq);
+ break;
+ default:
+ break;
+ }
+ }
+
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
- ioreq_unmap(ioreq);
+ if (!ioreq->blkdev->feature_grant_copy) {
+ ioreq_unmap(ioreq);
+ }
ioreq_finish(ioreq);
switch (ioreq->req.operation) {
case BLKIF_OP_WRITE:
@@ -534,12 +660,54 @@ static void qemu_aio_complete(void *opaque, int ret)
qemu_bh_schedule(ioreq->blkdev->bh);
}
+static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
+ uint64_t nr_sectors)
+{
+ struct XenBlkDev *blkdev = ioreq->blkdev;
+ int64_t byte_offset;
+ int byte_chunk;
+ uint64_t byte_remaining, limit;
+ uint64_t sec_start = sector_number;
+ uint64_t sec_count = nr_sectors;
+
+ /* Wrap around, or overflowing byte limit? */
+ if (sec_start + sec_count < sec_count ||
+ sec_start + sec_count > INT64_MAX >> BDRV_SECTOR_BITS) {
+ return false;
+ }
+
+ limit = BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS;
+ byte_offset = sec_start << BDRV_SECTOR_BITS;
+ byte_remaining = sec_count << BDRV_SECTOR_BITS;
+
+ do {
+ byte_chunk = byte_remaining > limit ? limit : byte_remaining;
+ ioreq->aio_inflight++;
+ blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
+ qemu_aio_complete, ioreq);
+ byte_remaining -= byte_chunk;
+ byte_offset += byte_chunk;
+ } while (byte_remaining > 0);
+
+ return true;
+}
+
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
struct XenBlkDev *blkdev = ioreq->blkdev;
- if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
- goto err_no_map;
+ if (ioreq->blkdev->feature_grant_copy) {
+ ioreq_init_copy_buffers(ioreq);
+ if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
+ ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
+ ioreq_grant_copy(ioreq)) {
+ ioreq_free_copy_buffers(ioreq);
+ goto err;
+ }
+ } else {
+ if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
+ goto err;
+ }
}
ioreq->aio_inflight++;
@@ -572,16 +740,17 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
break;
case BLKIF_OP_DISCARD:
{
- struct blkif_request_discard *discard_req = (void *)&ioreq->req;
- ioreq->aio_inflight++;
- blk_aio_pdiscard(blkdev->blk,
- discard_req->sector_number << BDRV_SECTOR_BITS,
- discard_req->nr_sectors << BDRV_SECTOR_BITS,
- qemu_aio_complete, ioreq);
+ struct blkif_request_discard *req = (void *)&ioreq->req;
+ if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
+ goto err;
+ }
break;
}
default:
/* unknown operation (shouldn't happen -- parse catches this) */
+ if (!ioreq->blkdev->feature_grant_copy) {
+ ioreq_unmap(ioreq);
+ }
goto err;
}
@@ -590,8 +759,6 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
return 0;
err:
- ioreq_unmap(ioreq);
-err_no_map:
ioreq_finish(ioreq);
ioreq->status = BLKIF_RSP_ERROR;
return -1;
@@ -659,7 +826,7 @@ static void blk_send_response_all(struct XenBlkDev *blkdev)
ioreq_release(ioreq, true);
}
if (send_notify) {
- xen_be_send_notify(&blkdev->xendev);
+ xen_pv_send_notify(&blkdev->xendev);
}
}
@@ -729,7 +896,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
};
if (blk_send_response_one(ioreq)) {
- xen_be_send_notify(&blkdev->xendev);
+ xen_pv_send_notify(&blkdev->xendev);
}
ioreq_release(ioreq, false);
continue;
@@ -773,7 +940,7 @@ static void blk_alloc(struct XenDevice *xendev)
}
if (xengnttab_set_max_grants(xendev->gnttabdev,
MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
- xen_be_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
+ xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
strerror(errno));
}
}
@@ -919,11 +1086,11 @@ static int blk_connect(struct XenDevice *xendev)
}
/* setup via xenbus -> create new block driver instance */
- xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
+ xen_pv_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
qflags, &local_err);
if (!blkdev->blk) {
- xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
+ xen_pv_printf(&blkdev->xendev, 0, "error: %s\n",
error_get_pretty(local_err));
error_free(local_err);
return -1;
@@ -931,10 +1098,11 @@ static int blk_connect(struct XenDevice *xendev)
blk_set_enable_write_cache(blkdev->blk, !writethrough);
} else {
/* setup via qemu cmdline -> already setup for us */
- xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
+ xen_pv_printf(&blkdev->xendev, 2,
+ "get configured bdrv (cmdline setup)\n");
blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
if (blk_is_read_only(blkdev->blk) && !readonly) {
- xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
+ xen_pv_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
blkdev->blk = NULL;
return -1;
}
@@ -942,18 +1110,18 @@ static int blk_connect(struct XenDevice *xendev)
* so we can blk_unref() unconditionally */
blk_ref(blkdev->blk);
}
- blk_attach_dev_nofail(blkdev->blk, blkdev);
+ blk_attach_dev_legacy(blkdev->blk, blkdev);
blkdev->file_size = blk_getlength(blkdev->blk);
if (blkdev->file_size < 0) {
BlockDriverState *bs = blk_bs(blkdev->blk);
const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
- xen_be_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
+ xen_pv_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
(int)blkdev->file_size, strerror(-blkdev->file_size),
drv_name ?: "-");
blkdev->file_size = 0;
}
- xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
+ xen_pv_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
" size %" PRId64 " (%" PRId64 " MB)\n",
blkdev->type, blkdev->fileproto, blkdev->filename,
blkdev->file_size, blkdev->file_size >> 20);
@@ -976,14 +1144,16 @@ static int blk_connect(struct XenDevice *xendev)
blkdev->feature_persistent = !!pers;
}
- blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
- if (blkdev->xendev.protocol) {
- if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
- blkdev->protocol = BLKIF_PROTOCOL_X86_32;
- }
- if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
- blkdev->protocol = BLKIF_PROTOCOL_X86_64;
- }
+ if (!blkdev->xendev.protocol) {
+ blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
+ } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_NATIVE) == 0) {
+ blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
+ } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
+ blkdev->protocol = BLKIF_PROTOCOL_X86_32;
+ } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
+ blkdev->protocol = BLKIF_PROTOCOL_X86_64;
+ } else {
+ blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
}
blkdev->sring = xengnttab_map_grant_ref(blkdev->xendev.gnttabdev,
@@ -1032,7 +1202,13 @@ static int blk_connect(struct XenDevice *xendev)
xen_be_bind_evtchn(&blkdev->xendev);
- xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
+ blkdev->feature_grant_copy =
+ (xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0);
+
+ xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
+ blkdev->feature_grant_copy ? "enabled" : "disabled");
+
+ xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
"remote port %d, local port %d\n",
blkdev->xendev.protocol, blkdev->ring_ref,
blkdev->xendev.remote_port, blkdev->xendev.local_port);
@@ -1048,7 +1224,7 @@ static void blk_disconnect(struct XenDevice *xendev)
blk_unref(blkdev->blk);
blkdev->blk = NULL;
}
- xen_be_unbind_evtchn(&blkdev->xendev);
+ xen_pv_unbind_evtchn(&blkdev->xendev);
if (blkdev->sring) {
xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
diff --git a/hw/bt/hci-csr.c b/hw/bt/hci-csr.c
index d688372ca3..fbb3109cc1 100644
--- a/hw/bt/hci-csr.c
+++ b/hw/bt/hci-csr.c
@@ -78,15 +78,17 @@ enum {
static inline void csrhci_fifo_wake(struct csrhci_s *s)
{
+ CharBackend *be = s->chr.be;
+
if (!s->enable || !s->out_len)
return;
/* XXX: Should wait for s->modem_state & CHR_TIOCM_RTS? */
- if (s->chr.chr_can_read && s->chr.chr_can_read(s->chr.handler_opaque) &&
- s->chr.chr_read) {
- s->chr.chr_read(s->chr.handler_opaque,
- s->outfifo + s->out_start ++, 1);
- s->out_len --;
+ if (be && be->chr_can_read && be->chr_can_read(be->opaque) &&
+ be->chr_read) {
+ be->chr_read(be->opaque,
+ s->outfifo + s->out_start++, 1);
+ s->out_len--;
if (s->out_start >= s->out_size) {
s->out_start = 0;
s->out_size = FIFO_LEN;
@@ -458,7 +460,7 @@ qemu_irq *csrhci_pins_get(CharDriverState *chr)
return s->pins;
}
-CharDriverState *uart_hci_init(qemu_irq wakeup)
+CharDriverState *uart_hci_init(void)
{
struct csrhci_s *s = (struct csrhci_s *)
g_malloc0(sizeof(struct csrhci_s));
@@ -466,7 +468,6 @@ CharDriverState *uart_hci_init(qemu_irq wakeup)
s->chr.opaque = s;
s->chr.chr_write = csrhci_write;
s->chr.chr_ioctl = csrhci_ioctl;
- s->chr.avail_connections = 1;
s->hci = qemu_next_hci();
s->hci->opaque = s;
diff --git a/hw/bt/hci.c b/hw/bt/hci.c
index 351123fab7..476ebec0ab 100644
--- a/hw/bt/hci.c
+++ b/hw/bt/hci.c
@@ -421,7 +421,7 @@ static void bt_submit_raw_acl(struct bt_piconet_s *net, int length, uint8_t *dat
/* HCI layer emulation */
-/* Note: we could ignore endiannes because unswapped handles will still
+/* Note: we could ignore endianness because unswapped handles will still
* be valid as connection identifiers for the guest - they don't have to
* be continuously allocated. We do it though, to preserve similar
* behaviour between hosts. Some things, like the BD_ADDR cannot be
diff --git a/hw/char/bcm2835_aux.c b/hw/char/bcm2835_aux.c
index 319f1652f6..4d46ad60ae 100644
--- a/hw/char/bcm2835_aux.c
+++ b/hw/char/bcm2835_aux.c
@@ -79,9 +79,7 @@ static uint64_t bcm2835_aux_read(void *opaque, hwaddr offset, unsigned size)
s->read_pos = 0;
}
}
- if (s->chr) {
- qemu_chr_accept_input(s->chr);
- }
+ qemu_chr_fe_accept_input(&s->chr);
bcm2835_aux_update(s);
return c;
@@ -168,9 +166,9 @@ static void bcm2835_aux_write(void *opaque, hwaddr offset, uint64_t value,
case AUX_MU_IO_REG:
/* "DLAB bit set means access baudrate register" is NYI */
ch = value;
- if (s->chr) {
- qemu_chr_fe_write(s->chr, &ch, 1);
- }
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
break;
case AUX_MU_IER_REG:
@@ -280,10 +278,8 @@ static void bcm2835_aux_realize(DeviceState *dev, Error **errp)
{
BCM2835AuxState *s = BCM2835_AUX(dev);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr, bcm2835_aux_can_receive,
- bcm2835_aux_receive, NULL, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr, bcm2835_aux_can_receive,
+ bcm2835_aux_receive, NULL, s, NULL, true);
}
static Property bcm2835_aux_props[] = {
diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c
index e3bc52f7df..0215d6518d 100644
--- a/hw/char/cadence_uart.c
+++ b/hw/char/cadence_uart.c
@@ -1,6 +1,11 @@
/*
* Device model for Cadence UART
*
+ * Reference: Xilinx Zynq 7000 reference manual
+ * - http://www.xilinx.com/support/documentation/user_guides/ug585-Zynq-7000-TRM.pdf
+ * - Chapter 19 UART Controller
+ * - Appendix B for Register details
+ *
* Copyright (c) 2010 Xilinx Inc.
* Copyright (c) 2012 Peter A.G. Crosthwaite (peter.crosthwaite@petalogix.com)
* Copyright (c) 2012 PetaLogix Pty Ltd.
@@ -142,9 +147,7 @@ static void uart_rx_reset(CadenceUARTState *s)
{
s->rx_wpos = 0;
s->rx_count = 0;
- if (s->chr) {
- qemu_chr_accept_input(s->chr);
- }
+ qemu_chr_fe_accept_input(&s->chr);
}
static void uart_tx_reset(CadenceUARTState *s)
@@ -156,10 +159,8 @@ static void uart_send_breaks(CadenceUARTState *s)
{
int break_enabled = 1;
- if (s->chr) {
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_BREAK,
- &break_enabled);
- }
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK,
+ &break_enabled);
}
static void uart_parameters_setup(CadenceUARTState *s)
@@ -210,9 +211,7 @@ static void uart_parameters_setup(CadenceUARTState *s)
packet_size += ssp.data_bits + ssp.stop_bits;
s->char_tx_time = (NANOSECONDS_PER_SECOND / ssp.speed) * packet_size;
- if (s->chr) {
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
- }
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
}
static int uart_can_receive(void *opaque)
@@ -278,7 +277,7 @@ static gboolean cadence_uart_xmit(GIOChannel *chan, GIOCondition cond,
int ret;
/* instant drain the fifo when there's no back-end */
- if (!s->chr) {
+ if (!qemu_chr_fe_get_driver(&s->chr)) {
s->tx_count = 0;
return FALSE;
}
@@ -287,7 +286,7 @@ static gboolean cadence_uart_xmit(GIOChannel *chan, GIOCondition cond,
return FALSE;
}
- ret = qemu_chr_fe_write(s->chr, s->tx_fifo, s->tx_count);
+ ret = qemu_chr_fe_write(&s->chr, s->tx_fifo, s->tx_count);
if (ret >= 0) {
s->tx_count -= ret;
@@ -295,7 +294,7 @@ static gboolean cadence_uart_xmit(GIOChannel *chan, GIOCondition cond,
}
if (s->tx_count) {
- guint r = qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP,
+ guint r = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP,
cadence_uart_xmit, s);
if (!r) {
s->tx_count = 0;
@@ -368,9 +367,7 @@ static void uart_read_rx_fifo(CadenceUARTState *s, uint32_t *c)
*c = s->rx_fifo[rx_rpos];
s->rx_count--;
- if (s->chr) {
- qemu_chr_accept_input(s->chr);
- }
+ qemu_chr_fe_accept_input(&s->chr);
} else {
*c = 0;
}
@@ -410,6 +407,16 @@ static void uart_write(void *opaque, hwaddr offset,
break;
}
break;
+ case R_BRGR: /* Baud rate generator */
+ if (value >= 0x01) {
+ s->r[offset] = value & 0xFFFF;
+ }
+ break;
+ case R_BDIV: /* Baud rate divider */
+ if (value >= 0x04) {
+ s->r[offset] = value & 0xFF;
+ }
+ break;
default:
s->r[offset] = value;
}
@@ -458,7 +465,8 @@ static void cadence_uart_reset(DeviceState *dev)
s->r[R_IMR] = 0;
s->r[R_CISR] = 0;
s->r[R_RTRIG] = 0x00000020;
- s->r[R_BRGR] = 0x0000000F;
+ s->r[R_BRGR] = 0x0000028B;
+ s->r[R_BDIV] = 0x0000000F;
s->r[R_TTRIG] = 0x00000020;
uart_rx_reset(s);
@@ -474,10 +482,8 @@ static void cadence_uart_realize(DeviceState *dev, Error **errp)
s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
fifo_trigger_update, s);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr, uart_can_receive, uart_receive,
- uart_event, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr, uart_can_receive, uart_receive,
+ uart_event, s, NULL, true);
}
static void cadence_uart_init(Object *obj)
diff --git a/hw/char/debugcon.c b/hw/char/debugcon.c
index e7f025ec67..80dce07e7f 100644
--- a/hw/char/debugcon.c
+++ b/hw/char/debugcon.c
@@ -39,7 +39,7 @@
typedef struct DebugconState {
MemoryRegion io;
- CharDriverState *chr;
+ CharBackend chr;
uint32_t readback;
} DebugconState;
@@ -60,7 +60,9 @@ static void debugcon_ioport_write(void *opaque, hwaddr addr, uint64_t val,
printf(" [debugcon: write addr=0x%04" HWADDR_PRIx " val=0x%02" PRIx64 "]\n", addr, val);
#endif
- qemu_chr_fe_write(s->chr, &ch, 1);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
}
@@ -85,12 +87,12 @@ static const MemoryRegionOps debugcon_ops = {
static void debugcon_realize_core(DebugconState *s, Error **errp)
{
- if (!s->chr) {
+ if (!qemu_chr_fe_get_driver(&s->chr)) {
error_setg(errp, "Can't create debugcon device, empty char device");
return;
}
- qemu_chr_add_handlers(s->chr, NULL, NULL, NULL, s);
+ qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, NULL, s, NULL, true);
}
static void debugcon_isa_realizefn(DeviceState *dev, Error **errp)
diff --git a/hw/char/digic-uart.c b/hw/char/digic-uart.c
index c7604e6766..029f5bbf5e 100644
--- a/hw/char/digic-uart.c
+++ b/hw/char/digic-uart.c
@@ -76,9 +76,9 @@ static void digic_uart_write(void *opaque, hwaddr addr, uint64_t value,
switch (addr) {
case R_TX:
- if (s->chr) {
- qemu_chr_fe_write_all(s->chr, &ch, 1);
- }
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
break;
case R_ST:
@@ -145,9 +145,8 @@ static void digic_uart_realize(DeviceState *dev, Error **errp)
{
DigicUartState *s = DIGIC_UART(dev);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr, uart_can_rx, uart_rx, uart_event, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr, uart_can_rx, uart_rx,
+ uart_event, s, NULL, true);
}
static void digic_uart_init(Object *obj)
diff --git a/hw/char/escc.c b/hw/char/escc.c
index 31a5f902f9..d6662dc77d 100644
--- a/hw/char/escc.c
+++ b/hw/char/escc.c
@@ -88,7 +88,7 @@ typedef struct ChannelState {
uint32_t reg;
uint8_t wregs[SERIAL_REGS], rregs[SERIAL_REGS];
SERIOQueue queue;
- CharDriverState *chr;
+ CharBackend chr;
int e0_mode, led_mode, caps_lock_mode, num_lock_mode;
int disabled;
int clock;
@@ -416,7 +416,7 @@ static void escc_update_parameters(ChannelState *s)
int speed, parity, data_bits, stop_bits;
QEMUSerialSetParams ssp;
- if (!s->chr || s->type != ser)
+ if (!qemu_chr_fe_get_driver(&s->chr) || s->type != ser)
return;
if (s->wregs[W_TXCTRL1] & TXCTRL1_PAREN) {
@@ -466,7 +466,7 @@ static void escc_update_parameters(ChannelState *s)
ssp.data_bits = data_bits;
ssp.stop_bits = stop_bits;
trace_escc_update_parameters(CHN_C(s), speed, parity, data_bits, stop_bits);
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
}
static void escc_mem_write(void *opaque, hwaddr addr,
@@ -556,9 +556,11 @@ static void escc_mem_write(void *opaque, hwaddr addr,
trace_escc_mem_writeb_data(CHN_C(s), val);
s->tx = val;
if (s->wregs[W_TXCTRL2] & TXCTRL2_TXEN) { // tx enabled
- if (s->chr)
- qemu_chr_fe_write(s->chr, &s->tx, 1);
- else if (s->type == kbd && !s->disabled) {
+ if (qemu_chr_fe_get_driver(&s->chr)) {
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &s->tx, 1);
+ } else if (s->type == kbd && !s->disabled) {
handle_kbd_command(s, val);
}
}
@@ -597,8 +599,7 @@ static uint64_t escc_mem_read(void *opaque, hwaddr addr,
else
ret = s->rx;
trace_escc_mem_readb_data(CHN_C(s), ret);
- if (s->chr)
- qemu_chr_accept_input(s->chr);
+ qemu_chr_fe_accept_input(&s->chr);
return ret;
default:
break;
@@ -1011,10 +1012,11 @@ static void escc_realize(DeviceState *dev, Error **errp)
ESCC_SIZE << s->it_shift);
for (i = 0; i < 2; i++) {
- if (s->chn[i].chr) {
+ if (qemu_chr_fe_get_driver(&s->chn[i].chr)) {
s->chn[i].clock = s->frequency / 2;
- qemu_chr_add_handlers(s->chn[i].chr, serial_can_receive,
- serial_receive1, serial_event, &s->chn[i]);
+ qemu_chr_fe_set_handlers(&s->chn[i].chr, serial_can_receive,
+ serial_receive1, serial_event,
+ &s->chn[i], NULL, true);
}
}
diff --git a/hw/char/etraxfs_ser.c b/hw/char/etraxfs_ser.c
index 04ca04fe2c..54383878e0 100644
--- a/hw/char/etraxfs_ser.c
+++ b/hw/char/etraxfs_ser.c
@@ -53,7 +53,7 @@ typedef struct ETRAXSerial {
SysBusDevice parent_obj;
MemoryRegion mmio;
- CharDriverState *chr;
+ CharBackend chr;
qemu_irq irq;
int pending_tx;
@@ -126,7 +126,9 @@ ser_write(void *opaque, hwaddr addr,
switch (addr)
{
case RW_DOUT:
- qemu_chr_fe_write(s->chr, &ch, 1);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
s->regs[R_INTR] |= 3;
s->pending_tx = 1;
s->regs[addr] = value;
@@ -229,11 +231,9 @@ static void etraxfs_ser_realize(DeviceState *dev, Error **errp)
{
ETRAXSerial *s = ETRAX_SERIAL(dev);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr,
- serial_can_receive, serial_receive,
- serial_event, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr,
+ serial_can_receive, serial_receive,
+ serial_event, s, NULL, true);
}
static void etraxfs_ser_class_init(ObjectClass *klass, void *data)
diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c
index 885ecc027b..571c324004 100644
--- a/hw/char/exynos4210_uart.c
+++ b/hw/char/exynos4210_uart.c
@@ -181,7 +181,7 @@ typedef struct Exynos4210UartState {
Exynos4210UartFIFO rx;
Exynos4210UartFIFO tx;
- CharDriverState *chr;
+ CharBackend chr;
qemu_irq irq;
uint32_t channel;
@@ -346,7 +346,7 @@ static void exynos4210_uart_update_parameters(Exynos4210UartState *s)
ssp.data_bits = data_bits;
ssp.stop_bits = stop_bits;
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
PRINT_DEBUG("UART%d: speed: %d, parity: %c, data: %d, stop: %d\n",
s->channel, speed, parity, data_bits, stop_bits);
@@ -383,11 +383,13 @@ static void exynos4210_uart_write(void *opaque, hwaddr offset,
break;
case UTXH:
- if (s->chr) {
+ if (qemu_chr_fe_get_driver(&s->chr)) {
s->reg[I_(UTRSTAT)] &= ~(UTRSTAT_TRANSMITTER_EMPTY |
UTRSTAT_Tx_BUFFER_EMPTY);
ch = (uint8_t)val;
- qemu_chr_fe_write(s->chr, &ch, 1);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
#if DEBUG_Tx_DATA
fprintf(stderr, "%c", ch);
#endif
@@ -604,7 +606,7 @@ DeviceState *exynos4210_uart_create(hwaddr addr,
chr = serial_hds[channel];
if (!chr) {
snprintf(label, ARRAY_SIZE(label), "%s%d", chr_name, channel);
- chr = qemu_chr_new(label, "null", NULL);
+ chr = qemu_chr_new(label, "null");
if (!(chr)) {
error_report("Can't assign serial port to UART%d", channel);
exit(1);
@@ -638,8 +640,9 @@ static int exynos4210_uart_init(SysBusDevice *dev)
sysbus_init_irq(dev, &s->irq);
- qemu_chr_add_handlers(s->chr, exynos4210_uart_can_receive,
- exynos4210_uart_receive, exynos4210_uart_event, s);
+ qemu_chr_fe_set_handlers(&s->chr, exynos4210_uart_can_receive,
+ exynos4210_uart_receive, exynos4210_uart_event,
+ s, NULL, true);
return 0;
}
diff --git a/hw/char/grlib_apbuart.c b/hw/char/grlib_apbuart.c
index 871524c82f..db686e6a6f 100644
--- a/hw/char/grlib_apbuart.c
+++ b/hw/char/grlib_apbuart.c
@@ -78,7 +78,7 @@ typedef struct UART {
MemoryRegion iomem;
qemu_irq irq;
- CharDriverState *chr;
+ CharBackend chr;
/* registers */
uint32_t status;
@@ -201,9 +201,12 @@ static void grlib_apbuart_write(void *opaque, hwaddr addr,
case DATA_OFFSET:
case DATA_OFFSET + 3: /* When only one byte write */
/* Transmit when character device available and transmitter enabled */
- if ((uart->chr) && (uart->control & UART_TRANSMIT_ENABLE)) {
+ if (qemu_chr_fe_get_driver(&uart->chr) &&
+ (uart->control & UART_TRANSMIT_ENABLE)) {
c = value & 0xFF;
- qemu_chr_fe_write(uart->chr, &c, 1);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&uart->chr, &c, 1);
/* Generate interrupt */
if (uart->control & UART_TRANSMIT_INTERRUPT) {
qemu_irq_pulse(uart->irq);
@@ -240,11 +243,11 @@ static int grlib_apbuart_init(SysBusDevice *dev)
{
UART *uart = GRLIB_APB_UART(dev);
- qemu_chr_add_handlers(uart->chr,
- grlib_apbuart_can_receive,
- grlib_apbuart_receive,
- grlib_apbuart_event,
- uart);
+ qemu_chr_fe_set_handlers(&uart->chr,
+ grlib_apbuart_can_receive,
+ grlib_apbuart_receive,
+ grlib_apbuart_event,
+ uart, NULL, true);
sysbus_init_irq(dev, &uart->irq);
diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c
index 44856d671e..99545fc359 100644
--- a/hw/char/imx_serial.c
+++ b/hw/char/imx_serial.c
@@ -121,9 +121,7 @@ static uint64_t imx_serial_read(void *opaque, hwaddr offset,
s->usr2 &= ~USR2_RDR;
s->uts1 |= UTS1_RXEMPTY;
imx_update(s);
- if (s->chr) {
- qemu_chr_accept_input(s->chr);
- }
+ qemu_chr_fe_accept_input(&s->chr);
}
return c;
@@ -172,18 +170,19 @@ static void imx_serial_write(void *opaque, hwaddr offset,
uint64_t value, unsigned size)
{
IMXSerialState *s = (IMXSerialState *)opaque;
+ CharDriverState *chr = qemu_chr_fe_get_driver(&s->chr);
unsigned char ch;
DPRINTF("write(offset=0x%" HWADDR_PRIx ", value = 0x%x) to %s\n",
- offset, (unsigned int)value, s->chr ? s->chr->label : "NODEV");
+ offset, (unsigned int)value, chr ? chr->label : "NODEV");
switch (offset >> 2) {
case 0x10: /* UTXD */
ch = value;
if (s->ucr2 & UCR2_TXEN) {
- if (s->chr) {
- qemu_chr_fe_write(s->chr, &ch, 1);
- }
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
s->usr1 &= ~USR1_TRDY;
imx_update(s);
s->usr1 |= USR1_TRDY;
@@ -212,9 +211,7 @@ static void imx_serial_write(void *opaque, hwaddr offset,
}
if (value & UCR2_RXEN) {
if (!(s->ucr2 & UCR2_RXEN)) {
- if (s->chr) {
- qemu_chr_accept_input(s->chr);
- }
+ qemu_chr_fe_accept_input(&s->chr);
}
}
s->ucr2 = value & 0xffff;
@@ -316,12 +313,10 @@ static void imx_serial_realize(DeviceState *dev, Error **errp)
{
IMXSerialState *s = IMX_SERIAL(dev);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr, imx_can_receive, imx_receive,
- imx_event, s);
- } else {
- DPRINTF("No char dev for uart\n");
- }
+ DPRINTF("char dev for uart: %p\n", qemu_chr_fe_get_driver(&s->chr));
+
+ qemu_chr_fe_set_handlers(&s->chr, imx_can_receive, imx_receive,
+ imx_event, s, NULL, true);
}
static void imx_serial_init(Object *obj)
diff --git a/hw/char/ipoctal232.c b/hw/char/ipoctal232.c
index 9ead32af60..93929c2880 100644
--- a/hw/char/ipoctal232.c
+++ b/hw/char/ipoctal232.c
@@ -93,7 +93,7 @@ typedef struct SCC2698Block SCC2698Block;
struct SCC2698Channel {
IPOctalState *ipoctal;
- CharDriverState *dev;
+ CharBackend dev;
bool rx_enabled;
uint8_t mr[2];
uint8_t mr_idx;
@@ -288,9 +288,7 @@ static uint16_t io_read(IPackDevice *ip, uint8_t addr)
if (ch->rx_pending == 0) {
ch->sr &= ~SR_RXRDY;
blk->isr &= ~ISR_RXRDY(channel);
- if (ch->dev) {
- qemu_chr_accept_input(ch->dev);
- }
+ qemu_chr_fe_accept_input(&ch->dev);
} else {
ch->rhr_idx = (ch->rhr_idx + 1) % RX_FIFO_SIZE;
}
@@ -357,11 +355,11 @@ static void io_write(IPackDevice *ip, uint8_t addr, uint16_t val)
case REG_THRa:
case REG_THRb:
if (ch->sr & SR_TXRDY) {
+ uint8_t thr = reg;
DPRINTF("Write THR%c (0x%x)\n", channel + 'a', reg);
- if (ch->dev) {
- uint8_t thr = reg;
- qemu_chr_fe_write(ch->dev, &thr, 1);
- }
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&ch->dev, &thr, 1);
} else {
DPRINTF("Write THR%c (0x%x), Tx disabled\n", channel + 'a', reg);
}
@@ -544,9 +542,10 @@ static void ipoctal_realize(DeviceState *dev, Error **errp)
ch->ipoctal = s;
/* Redirect IP-Octal channels to host character devices */
- if (ch->dev) {
- qemu_chr_add_handlers(ch->dev, hostdev_can_receive,
- hostdev_receive, hostdev_event, ch);
+ if (qemu_chr_fe_get_driver(&ch->dev)) {
+ qemu_chr_fe_set_handlers(&ch->dev, hostdev_can_receive,
+ hostdev_receive, hostdev_event,
+ ch, NULL, true);
DPRINTF("Redirecting channel %u to %s\n", i, ch->dev->label);
} else {
DPRINTF("Could not redirect channel %u, no chardev set\n", i);
diff --git a/hw/char/lm32_juart.c b/hw/char/lm32_juart.c
index 28c2cf702d..f8c1e0d076 100644
--- a/hw/char/lm32_juart.c
+++ b/hw/char/lm32_juart.c
@@ -44,7 +44,7 @@ enum {
struct LM32JuartState {
SysBusDevice parent_obj;
- CharDriverState *chr;
+ CharBackend chr;
uint32_t jtx;
uint32_t jrx;
@@ -75,9 +75,9 @@ void lm32_juart_set_jtx(DeviceState *d, uint32_t jtx)
trace_lm32_juart_set_jtx(s->jtx);
s->jtx = jtx;
- if (s->chr) {
- qemu_chr_fe_write_all(s->chr, &ch, 1);
- }
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
}
void lm32_juart_set_jrx(DeviceState *d, uint32_t jtx)
@@ -118,9 +118,8 @@ static void lm32_juart_realize(DeviceState *dev, Error **errp)
{
LM32JuartState *s = LM32_JUART(dev);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr, juart_can_rx, juart_rx, juart_event, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr, juart_can_rx, juart_rx,
+ juart_event, s, NULL, true);
}
static const VMStateDescription vmstate_lm32_juart = {
diff --git a/hw/char/lm32_uart.c b/hw/char/lm32_uart.c
index b5c760dda3..7f3597c4b0 100644
--- a/hw/char/lm32_uart.c
+++ b/hw/char/lm32_uart.c
@@ -97,7 +97,7 @@ struct LM32UartState {
SysBusDevice parent_obj;
MemoryRegion iomem;
- CharDriverState *chr;
+ CharBackend chr;
qemu_irq irq;
uint32_t regs[R_MAX];
@@ -142,7 +142,7 @@ static uint64_t uart_read(void *opaque, hwaddr addr,
r = s->regs[R_RXTX];
s->regs[R_LSR] &= ~LSR_DR;
uart_update_irq(s);
- qemu_chr_accept_input(s->chr);
+ qemu_chr_fe_accept_input(&s->chr);
break;
case R_IIR:
case R_LSR:
@@ -177,9 +177,9 @@ static void uart_write(void *opaque, hwaddr addr,
addr >>= 2;
switch (addr) {
case R_RXTX:
- if (s->chr) {
- qemu_chr_fe_write_all(s->chr, &ch, 1);
- }
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
break;
case R_IER:
case R_LCR:
@@ -265,9 +265,8 @@ static void lm32_uart_realize(DeviceState *dev, Error **errp)
{
LM32UartState *s = LM32_UART(dev);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr, uart_can_rx, uart_rx, uart_event, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr, uart_can_rx, uart_rx,
+ uart_event, s, NULL, true);
}
static const VMStateDescription vmstate_lm32_uart = {
diff --git a/hw/char/mcf_uart.c b/hw/char/mcf_uart.c
index 3c0438fd79..ecaa091190 100644
--- a/hw/char/mcf_uart.c
+++ b/hw/char/mcf_uart.c
@@ -10,6 +10,7 @@
#include "hw/m68k/mcf.h"
#include "sysemu/char.h"
#include "exec/address-spaces.h"
+#include "qapi/error.h"
typedef struct {
MemoryRegion iomem;
@@ -26,7 +27,7 @@ typedef struct {
int tx_enabled;
int rx_enabled;
qemu_irq irq;
- CharDriverState *chr;
+ CharBackend chr;
} mcf_uart_state;
/* UART Status Register bits. */
@@ -92,7 +93,7 @@ uint64_t mcf_uart_read(void *opaque, hwaddr addr,
if (s->fifo_len == 0)
s->sr &= ~MCF_UART_RxRDY;
mcf_uart_update(s);
- qemu_chr_accept_input(s->chr);
+ qemu_chr_fe_accept_input(&s->chr);
return val;
}
case 0x10:
@@ -113,8 +114,9 @@ uint64_t mcf_uart_read(void *opaque, hwaddr addr,
static void mcf_uart_do_tx(mcf_uart_state *s)
{
if (s->tx_enabled && (s->sr & MCF_UART_TxEMP) == 0) {
- if (s->chr)
- qemu_chr_fe_write(s->chr, (unsigned char *)&s->tb, 1);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, (unsigned char *)&s->tb, 1);
s->sr |= MCF_UART_TxEMP;
}
if (s->tx_enabled) {
@@ -278,12 +280,12 @@ void *mcf_uart_init(qemu_irq irq, CharDriverState *chr)
mcf_uart_state *s;
s = g_malloc0(sizeof(mcf_uart_state));
- s->chr = chr;
s->irq = irq;
if (chr) {
- qemu_chr_fe_claim_no_fail(chr);
- qemu_chr_add_handlers(chr, mcf_uart_can_receive, mcf_uart_receive,
- mcf_uart_event, s);
+ qemu_chr_fe_init(&s->chr, chr, &error_abort);
+ qemu_chr_fe_set_handlers(&s->chr, mcf_uart_can_receive,
+ mcf_uart_receive, mcf_uart_event,
+ s, NULL, true);
}
mcf_uart_reset(s);
return s;
diff --git a/hw/char/milkymist-uart.c b/hw/char/milkymist-uart.c
index baddb37648..ae8e2f3554 100644
--- a/hw/char/milkymist-uart.c
+++ b/hw/char/milkymist-uart.c
@@ -61,7 +61,7 @@ struct MilkymistUartState {
SysBusDevice parent_obj;
MemoryRegion regs_region;
- CharDriverState *chr;
+ CharBackend chr;
qemu_irq irq;
uint32_t regs[R_MAX];
@@ -124,9 +124,7 @@ static void uart_write(void *opaque, hwaddr addr, uint64_t value,
addr >>= 2;
switch (addr) {
case R_RXTX:
- if (s->chr) {
- qemu_chr_fe_write_all(s->chr, &ch, 1);
- }
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
s->regs[R_STAT] |= STAT_TX_EVT;
break;
case R_DIV:
@@ -138,7 +136,7 @@ static void uart_write(void *opaque, hwaddr addr, uint64_t value,
case R_STAT:
/* write one to clear bits */
s->regs[addr] &= ~(value & (STAT_RX_EVT | STAT_TX_EVT));
- qemu_chr_accept_input(s->chr);
+ qemu_chr_fe_accept_input(&s->chr);
break;
default:
@@ -200,9 +198,8 @@ static void milkymist_uart_realize(DeviceState *dev, Error **errp)
{
MilkymistUartState *s = MILKYMIST_UART(dev);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr, uart_can_rx, uart_rx, uart_event, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr, uart_can_rx, uart_rx,
+ uart_event, s, NULL, true);
}
static void milkymist_uart_init(Object *obj)
diff --git a/hw/char/omap_uart.c b/hw/char/omap_uart.c
index 415bec5fac..893ab108bc 100644
--- a/hw/char/omap_uart.c
+++ b/hw/char/omap_uart.c
@@ -63,7 +63,7 @@ struct omap_uart_s *omap_uart_init(hwaddr base,
s->irq = irq;
s->serial = serial_mm_init(get_system_memory(), base, 2, irq,
omap_clk_getrate(fclk)/16,
- chr ?: qemu_chr_new(label, "null", NULL),
+ chr ?: qemu_chr_new(label, "null"),
DEVICE_NATIVE_ENDIAN);
return s;
}
@@ -183,6 +183,6 @@ void omap_uart_attach(struct omap_uart_s *s, CharDriverState *chr)
/* TODO: Should reuse or destroy current s->serial */
s->serial = serial_mm_init(get_system_memory(), s->base, 2, s->irq,
omap_clk_getrate(s->fclk) / 16,
- chr ?: qemu_chr_new("null", "null", NULL),
+ chr ?: qemu_chr_new("null", "null"),
DEVICE_NATIVE_ENDIAN);
}
diff --git a/hw/char/parallel.c b/hw/char/parallel.c
index 11c78fed88..f2d56666b7 100644
--- a/hw/char/parallel.c
+++ b/hw/char/parallel.c
@@ -74,12 +74,13 @@ typedef struct ParallelState {
uint8_t control;
qemu_irq irq;
int irq_pending;
- CharDriverState *chr;
+ CharBackend chr;
int hw_driver;
int epp_timeout;
uint32_t last_read_offset; /* For debugging */
/* Memory-mapped interface */
int it_shift;
+ PortioList portio_list;
} ParallelState;
#define TYPE_ISA_PARALLEL "isa-parallel"
@@ -128,7 +129,9 @@ parallel_ioport_write_sw(void *opaque, uint32_t addr, uint32_t val)
if (val & PARA_CTR_STROBE) {
s->status &= ~PARA_STS_BUSY;
if ((s->control & PARA_CTR_STROBE) == 0)
- qemu_chr_fe_write(s->chr, &s->dataw, 1);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &s->dataw, 1);
} else {
if (s->control & PARA_CTR_INTEN) {
s->irq_pending = 1;
@@ -158,7 +161,7 @@ static void parallel_ioport_write_hw(void *opaque, uint32_t addr, uint32_t val)
if (s->dataw == val)
return;
pdebug("wd%02x\n", val);
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_WRITE_DATA, &parm);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_WRITE_DATA, &parm);
s->dataw = val;
break;
case PARA_REG_STS:
@@ -178,11 +181,11 @@ static void parallel_ioport_write_hw(void *opaque, uint32_t addr, uint32_t val)
} else {
dir = 0;
}
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_DATA_DIR, &dir);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_DATA_DIR, &dir);
parm &= ~PARA_CTR_DIR;
}
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_WRITE_CONTROL, &parm);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_WRITE_CONTROL, &parm);
s->control = val;
break;
case PARA_REG_EPP_ADDR:
@@ -191,7 +194,8 @@ static void parallel_ioport_write_hw(void *opaque, uint32_t addr, uint32_t val)
pdebug("wa%02x s\n", val);
else {
struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 };
- if (qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_EPP_WRITE_ADDR, &ioarg)) {
+ if (qemu_chr_fe_ioctl(&s->chr,
+ CHR_IOCTL_PP_EPP_WRITE_ADDR, &ioarg)) {
s->epp_timeout = 1;
pdebug("wa%02x t\n", val);
}
@@ -205,7 +209,7 @@ static void parallel_ioport_write_hw(void *opaque, uint32_t addr, uint32_t val)
pdebug("we%02x s\n", val);
else {
struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 };
- if (qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg)) {
+ if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg)) {
s->epp_timeout = 1;
pdebug("we%02x t\n", val);
}
@@ -230,7 +234,7 @@ parallel_ioport_eppdata_write_hw2(void *opaque, uint32_t addr, uint32_t val)
pdebug("we%04x s\n", val);
return;
}
- err = qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
+ err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
if (err) {
s->epp_timeout = 1;
pdebug("we%04x t\n", val);
@@ -253,7 +257,7 @@ parallel_ioport_eppdata_write_hw4(void *opaque, uint32_t addr, uint32_t val)
pdebug("we%08x s\n", val);
return;
}
- err = qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
+ err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
if (err) {
s->epp_timeout = 1;
pdebug("we%08x t\n", val);
@@ -305,13 +309,13 @@ static uint32_t parallel_ioport_read_hw(void *opaque, uint32_t addr)
addr &= 7;
switch(addr) {
case PARA_REG_DATA:
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_READ_DATA, &ret);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_DATA, &ret);
if (s->last_read_offset != addr || s->datar != ret)
pdebug("rd%02x\n", ret);
s->datar = ret;
break;
case PARA_REG_STS:
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_READ_STATUS, &ret);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_STATUS, &ret);
ret &= ~PARA_STS_TMOUT;
if (s->epp_timeout)
ret |= PARA_STS_TMOUT;
@@ -323,7 +327,7 @@ static uint32_t parallel_ioport_read_hw(void *opaque, uint32_t addr)
/* s->control has some bits fixed to 1. It is zero only when
it has not been yet written to. */
if (s->control == 0) {
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_READ_CONTROL, &ret);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_CONTROL, &ret);
if (s->last_read_offset != addr)
pdebug("rc%02x\n", ret);
s->control = ret;
@@ -335,12 +339,14 @@ static uint32_t parallel_ioport_read_hw(void *opaque, uint32_t addr)
}
break;
case PARA_REG_EPP_ADDR:
- if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT))
+ if ((s->control & (PARA_CTR_DIR | PARA_CTR_SIGNAL)) !=
+ (PARA_CTR_DIR | PARA_CTR_INIT))
/* Controls not correct for EPP addr cycle, so do nothing */
pdebug("ra%02x s\n", ret);
else {
struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 };
- if (qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_EPP_READ_ADDR, &ioarg)) {
+ if (qemu_chr_fe_ioctl(&s->chr,
+ CHR_IOCTL_PP_EPP_READ_ADDR, &ioarg)) {
s->epp_timeout = 1;
pdebug("ra%02x t\n", ret);
}
@@ -349,12 +355,13 @@ static uint32_t parallel_ioport_read_hw(void *opaque, uint32_t addr)
}
break;
case PARA_REG_EPP_DATA:
- if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT))
+ if ((s->control & (PARA_CTR_DIR | PARA_CTR_SIGNAL)) !=
+ (PARA_CTR_DIR | PARA_CTR_INIT))
/* Controls not correct for EPP data cycle, so do nothing */
pdebug("re%02x s\n", ret);
else {
struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 };
- if (qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg)) {
+ if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg)) {
s->epp_timeout = 1;
pdebug("re%02x t\n", ret);
}
@@ -382,7 +389,7 @@ parallel_ioport_eppdata_read_hw2(void *opaque, uint32_t addr)
pdebug("re%04x s\n", eppdata);
return eppdata;
}
- err = qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
+ err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
ret = le16_to_cpu(eppdata);
if (err) {
@@ -409,7 +416,7 @@ parallel_ioport_eppdata_read_hw4(void *opaque, uint32_t addr)
pdebug("re%08x s\n", eppdata);
return eppdata;
}
- err = qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
+ err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
ret = le32_to_cpu(eppdata);
if (err) {
@@ -505,7 +512,7 @@ static void parallel_isa_realizefn(DeviceState *dev, Error **errp)
int base;
uint8_t dummy;
- if (!s->chr) {
+ if (!qemu_chr_fe_get_driver(&s->chr)) {
error_setg(errp, "Can't create parallel device, empty char device");
return;
}
@@ -527,12 +534,12 @@ static void parallel_isa_realizefn(DeviceState *dev, Error **errp)
isa_init_irq(isadev, &s->irq, isa->isairq);
qemu_register_reset(parallel_reset, s);
- if (qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_PP_READ_STATUS, &dummy) == 0) {
+ if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_STATUS, &dummy) == 0) {
s->hw_driver = 1;
s->status = dummy;
}
- isa_register_portio_list(isadev, base,
+ isa_register_portio_list(isadev, &s->portio_list, base,
(s->hw_driver
? &isa_parallel_portio_hw_list[0]
: &isa_parallel_portio_sw_list[0]),
@@ -602,7 +609,7 @@ bool parallel_mm_init(MemoryRegion *address_space,
s = g_malloc0(sizeof(ParallelState));
s->irq = irq;
- s->chr = chr;
+ qemu_chr_fe_init(&s->chr, chr, &error_abort);
s->it_shift = it_shift;
qemu_register_reset(parallel_reset, s);
diff --git a/hw/char/pl011.c b/hw/char/pl011.c
index c0fbf8a874..24ea9738b6 100644
--- a/hw/char/pl011.c
+++ b/hw/char/pl011.c
@@ -11,6 +11,7 @@
#include "hw/sysbus.h"
#include "sysemu/char.h"
#include "qemu/log.h"
+#include "trace.h"
#define TYPE_PL011 "pl011"
#define PL011(obj) OBJECT_CHECK(PL011State, (obj), TYPE_PL011)
@@ -35,7 +36,7 @@ typedef struct PL011State {
int read_pos;
int read_count;
int read_trigger;
- CharDriverState *chr;
+ CharBackend chr;
qemu_irq irq;
const unsigned char *id;
} PL011State;
@@ -58,6 +59,7 @@ static void pl011_update(PL011State *s)
uint32_t flags;
flags = s->int_level & s->int_enabled;
+ trace_pl011_irq_state(flags != 0);
qemu_set_irq(s->irq, flags != 0);
}
@@ -66,10 +68,8 @@ static uint64_t pl011_read(void *opaque, hwaddr offset,
{
PL011State *s = (PL011State *)opaque;
uint32_t c;
+ uint64_t r;
- if (offset >= 0xfe0 && offset < 0x1000) {
- return s->id[(offset - 0xfe0) >> 2];
- }
switch (offset >> 2) {
case 0: /* UARTDR */
s->flags &= ~PL011_FLAG_RXFF;
@@ -84,41 +84,60 @@ static uint64_t pl011_read(void *opaque, hwaddr offset,
}
if (s->read_count == s->read_trigger - 1)
s->int_level &= ~ PL011_INT_RX;
+ trace_pl011_read_fifo(s->read_count);
s->rsr = c >> 8;
pl011_update(s);
- if (s->chr) {
- qemu_chr_accept_input(s->chr);
- }
- return c;
+ qemu_chr_fe_accept_input(&s->chr);
+ r = c;
+ break;
case 1: /* UARTRSR */
- return s->rsr;
+ r = s->rsr;
+ break;
case 6: /* UARTFR */
- return s->flags;
+ r = s->flags;
+ break;
case 8: /* UARTILPR */
- return s->ilpr;
+ r = s->ilpr;
+ break;
case 9: /* UARTIBRD */
- return s->ibrd;
+ r = s->ibrd;
+ break;
case 10: /* UARTFBRD */
- return s->fbrd;
+ r = s->fbrd;
+ break;
case 11: /* UARTLCR_H */
- return s->lcr;
+ r = s->lcr;
+ break;
case 12: /* UARTCR */
- return s->cr;
+ r = s->cr;
+ break;
case 13: /* UARTIFLS */
- return s->ifl;
+ r = s->ifl;
+ break;
case 14: /* UARTIMSC */
- return s->int_enabled;
+ r = s->int_enabled;
+ break;
case 15: /* UARTRIS */
- return s->int_level;
+ r = s->int_level;
+ break;
case 16: /* UARTMIS */
- return s->int_level & s->int_enabled;
+ r = s->int_level & s->int_enabled;
+ break;
case 18: /* UARTDMACR */
- return s->dmacr;
+ r = s->dmacr;
+ break;
+ case 0x3f8 ... 0x400:
+ r = s->id[(offset - 0xfe0) >> 2];
+ break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"pl011_read: Bad offset %x\n", (int)offset);
- return 0;
+ r = 0;
+ break;
}
+
+ trace_pl011_read(offset, r);
+ return r;
}
static void pl011_set_read_trigger(PL011State *s)
@@ -141,12 +160,15 @@ static void pl011_write(void *opaque, hwaddr offset,
PL011State *s = (PL011State *)opaque;
unsigned char ch;
+ trace_pl011_write(offset, value);
+
switch (offset >> 2) {
case 0: /* UARTDR */
/* ??? Check if transmitter is enabled. */
ch = value;
- if (s->chr)
- qemu_chr_fe_write(s->chr, &ch, 1);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
s->int_level |= PL011_INT_TX;
pl011_update(s);
break;
@@ -205,11 +227,15 @@ static void pl011_write(void *opaque, hwaddr offset,
static int pl011_can_receive(void *opaque)
{
PL011State *s = (PL011State *)opaque;
+ int r;
- if (s->lcr & 0x10)
- return s->read_count < 16;
- else
- return s->read_count < 1;
+ if (s->lcr & 0x10) {
+ r = s->read_count < 16;
+ } else {
+ r = s->read_count < 1;
+ }
+ trace_pl011_can_receive(s->lcr, s->read_count, r);
+ return r;
}
static void pl011_put_fifo(void *opaque, uint32_t value)
@@ -223,7 +249,9 @@ static void pl011_put_fifo(void *opaque, uint32_t value)
s->read_fifo[slot] = value;
s->read_count++;
s->flags &= ~PL011_FLAG_RXFE;
+ trace_pl011_put_fifo(value, s->read_count);
if (!(s->lcr & 0x10) || s->read_count == 16) {
+ trace_pl011_put_fifo_full();
s->flags |= PL011_FLAG_RXFF;
}
if (s->read_count == s->read_trigger) {
@@ -300,10 +328,8 @@ static void pl011_realize(DeviceState *dev, Error **errp)
{
PL011State *s = PL011(dev);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr, pl011_can_receive, pl011_receive,
- pl011_event, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr, pl011_can_receive, pl011_receive,
+ pl011_event, s, NULL, true);
}
static void pl011_class_init(ObjectClass *oc, void *data)
diff --git a/hw/char/sclpconsole-lm.c b/hw/char/sclpconsole-lm.c
index a22ad8d016..07d6ebd112 100644
--- a/hw/char/sclpconsole-lm.c
+++ b/hw/char/sclpconsole-lm.c
@@ -37,7 +37,7 @@ typedef struct OprtnsCommand {
typedef struct SCLPConsoleLM {
SCLPEvent event;
- CharDriverState *chr;
+ CharBackend chr;
bool echo; /* immediate echo of input if true */
uint32_t write_errors; /* errors writing to char layer */
uint32_t length; /* length of byte stream in buffer */
@@ -89,7 +89,9 @@ static void chr_read(void *opaque, const uint8_t *buf, int size)
scon->buf[scon->length] = *buf;
scon->length += 1;
if (scon->echo) {
- qemu_chr_fe_write(scon->chr, buf, size);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&scon->chr, buf, size);
}
}
@@ -191,31 +193,16 @@ static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
*/
static int write_console_data(SCLPEvent *event, const uint8_t *buf, int len)
{
- int ret = 0;
- const uint8_t *buf_offset;
-
SCLPConsoleLM *scon = SCLPLM_CONSOLE(event);
- if (!scon->chr) {
+ if (!qemu_chr_fe_get_driver(&scon->chr)) {
/* If there's no backend, we can just say we consumed all data. */
return len;
}
- buf_offset = buf;
- while (len > 0) {
- ret = qemu_chr_fe_write(scon->chr, buf, len);
- if (ret == 0) {
- /* a pty doesn't seem to be connected - no error */
- len = 0;
- } else if (ret == -EAGAIN || (ret > 0 && ret < len)) {
- len -= ret;
- buf_offset += ret;
- } else {
- len = 0;
- }
- }
-
- return ret;
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ return qemu_chr_fe_write_all(&scon->chr, buf, len);
}
static int process_mdb(SCLPEvent *event, MDBO *mdbo)
@@ -325,9 +312,8 @@ static int console_init(SCLPEvent *event)
}
console_available = true;
- if (scon->chr) {
- qemu_chr_add_handlers(scon->chr, chr_can_read, chr_read, NULL, scon);
- }
+ qemu_chr_fe_set_handlers(&scon->chr, chr_can_read,
+ chr_read, NULL, scon, NULL, true);
return 0;
}
diff --git a/hw/char/sclpconsole.c b/hw/char/sclpconsole.c
index d22464826b..b78f240a73 100644
--- a/hw/char/sclpconsole.c
+++ b/hw/char/sclpconsole.c
@@ -31,7 +31,7 @@ typedef struct ASCIIConsoleData {
typedef struct SCLPConsole {
SCLPEvent event;
- CharDriverState *chr;
+ CharBackend chr;
uint8_t iov[SIZE_BUFFER_VT220];
uint32_t iov_sclp; /* offset in buf for SCLP read operation */
uint32_t iov_bs; /* offset in buf for char layer read operation */
@@ -163,12 +163,14 @@ static ssize_t write_console_data(SCLPEvent *event, const uint8_t *buf,
{
SCLPConsole *scon = SCLP_CONSOLE(event);
- if (!scon->chr) {
+ if (!qemu_chr_fe_get_driver(&scon->chr)) {
/* If there's no backend, we can just say we consumed all data. */
return len;
}
- return qemu_chr_fe_write_all(scon->chr, buf, len);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ return qemu_chr_fe_write_all(&scon->chr, buf, len);
}
static int write_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr)
@@ -225,10 +227,8 @@ static int console_init(SCLPEvent *event)
return -1;
}
console_available = true;
- if (scon->chr) {
- qemu_chr_add_handlers(scon->chr, chr_can_read,
- chr_read, NULL, scon);
- }
+ qemu_chr_fe_set_handlers(&scon->chr, chr_can_read,
+ chr_read, NULL, scon, NULL, true);
return 0;
}
diff --git a/hw/char/serial-isa.c b/hw/char/serial-isa.c
index 1594ec4db3..54d3a12f51 100644
--- a/hw/char/serial-isa.c
+++ b/hw/char/serial-isa.c
@@ -133,13 +133,14 @@ static void serial_isa_init(ISABus *bus, int index, CharDriverState *chr)
qdev_init_nofail(dev);
}
-void serial_hds_isa_init(ISABus *bus, int n)
+void serial_hds_isa_init(ISABus *bus, int from, int to)
{
int i;
- assert(n <= MAX_SERIAL_PORTS);
+ assert(from >= 0);
+ assert(to <= MAX_SERIAL_PORTS);
- for (i = 0; i < n; ++i) {
+ for (i = from; i < to; ++i) {
if (serial_hds[i]) {
serial_isa_init(bus, i, serial_hds[i]);
}
diff --git a/hw/char/serial.c b/hw/char/serial.c
index 3442f47d36..ffbacd8227 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -153,8 +153,9 @@ static void serial_update_parameters(SerialState *s)
int speed, parity, data_bits, stop_bits, frame_size;
QEMUSerialSetParams ssp;
- if (s->divider == 0)
+ if (s->divider == 0 || s->divider > s->baudbase) {
return;
+ }
/* Start bit. */
frame_size = 1;
@@ -181,7 +182,7 @@ static void serial_update_parameters(SerialState *s)
ssp.data_bits = data_bits;
ssp.stop_bits = stop_bits;
s->char_transmit_time = (NANOSECONDS_PER_SECOND / speed) * frame_size;
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_PARAMS, &ssp);
DPRINTF("speed=%d parity=%c data=%d stop=%d\n",
speed, parity, data_bits, stop_bits);
@@ -194,7 +195,8 @@ static void serial_update_msl(SerialState *s)
timer_del(s->modem_status_poll);
- if (qemu_chr_fe_ioctl(s->chr,CHR_IOCTL_SERIAL_GET_TIOCM, &flags) == -ENOTSUP) {
+ if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_GET_TIOCM,
+ &flags) == -ENOTSUP) {
s->poll_msl = -1;
return;
}
@@ -259,11 +261,12 @@ static void serial_xmit(SerialState *s)
if (s->mcr & UART_MCR_LOOP) {
/* in loopback mode, say that we just received a char */
serial_receive1(s, &s->tsr, 1);
- } else if (qemu_chr_fe_write(s->chr, &s->tsr, 1) != 1 &&
+ } else if (qemu_chr_fe_write(&s->chr, &s->tsr, 1) != 1 &&
s->tsr_retry < MAX_XMIT_RETRY) {
assert(s->watch_tag == 0);
- s->watch_tag = qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP,
- serial_watch_cb, s);
+ s->watch_tag =
+ qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP,
+ serial_watch_cb, s);
if (s->watch_tag > 0) {
s->tsr_retry++;
return;
@@ -416,8 +419,8 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
break_enable = (val >> 6) & 1;
if (break_enable != s->last_break_enable) {
s->last_break_enable = break_enable;
- qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_BREAK,
- &break_enable);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_BREAK,
+ &break_enable);
}
}
break;
@@ -431,7 +434,7 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
if (s->poll_msl >= 0 && old_mcr != s->mcr) {
- qemu_chr_fe_ioctl(s->chr,CHR_IOCTL_SERIAL_GET_TIOCM, &flags);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_GET_TIOCM, &flags);
flags &= ~(CHR_TIOCM_RTS | CHR_TIOCM_DTR);
@@ -440,7 +443,7 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
if (val & UART_MCR_DTR)
flags |= CHR_TIOCM_DTR;
- qemu_chr_fe_ioctl(s->chr,CHR_IOCTL_SERIAL_SET_TIOCM, &flags);
+ qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_SERIAL_SET_TIOCM, &flags);
/* Update the modem status after a one-character-send wait-time, since there may be a response
from the device/computer at the other end of the serial line */
timer_mod(s->modem_status_poll, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time);
@@ -485,7 +488,7 @@ static uint64_t serial_ioport_read(void *opaque, hwaddr addr, unsigned size)
serial_update_irq(s);
if (!(s->mcr & UART_MCR_LOOP)) {
/* in loopback mode, don't receive any data */
- qemu_chr_accept_input(s->chr);
+ qemu_chr_fe_accept_input(&s->chr);
}
}
break;
@@ -658,7 +661,7 @@ static int serial_post_load(void *opaque, int version_id)
}
assert(s->watch_tag == 0);
- s->watch_tag = qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP,
+ s->watch_tag = qemu_chr_fe_add_watch(&s->chr, G_IO_OUT | G_IO_HUP,
serial_watch_cb, s);
} else {
/* tsr_retry == 0 implies LSR.TEMT = 1 (transmitter empty). */
@@ -883,7 +886,7 @@ static void serial_reset(void *opaque)
void serial_realize_core(SerialState *s, Error **errp)
{
- if (!s->chr) {
+ if (!qemu_chr_fe_get_driver(&s->chr)) {
error_setg(errp, "Can't create serial device, empty char device");
return;
}
@@ -893,8 +896,8 @@ void serial_realize_core(SerialState *s, Error **errp)
s->fifo_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, (QEMUTimerCB *) fifo_timeout_int, s);
qemu_register_reset(serial_reset, s);
- qemu_chr_add_handlers(s->chr, serial_can_receive1, serial_receive1,
- serial_event, s);
+ qemu_chr_fe_set_handlers(&s->chr, serial_can_receive1, serial_receive1,
+ serial_event, s, NULL, true);
fifo8_create(&s->recv_fifo, UART_FIFO_LENGTH);
fifo8_create(&s->xmit_fifo, UART_FIFO_LENGTH);
serial_reset(s);
@@ -902,7 +905,7 @@ void serial_realize_core(SerialState *s, Error **errp)
void serial_exit_core(SerialState *s)
{
- qemu_chr_add_handlers(s->chr, NULL, NULL, NULL, NULL);
+ qemu_chr_fe_deinit(&s->chr);
qemu_unregister_reset(serial_reset, s);
}
@@ -932,7 +935,7 @@ SerialState *serial_init(int base, qemu_irq irq, int baudbase,
s->irq = irq;
s->baudbase = baudbase;
- s->chr = chr;
+ qemu_chr_fe_init(&s->chr, chr, &error_abort);
serial_realize_core(s, &error_fatal);
vmstate_register(NULL, base, &vmstate_serial, s);
@@ -989,7 +992,7 @@ SerialState *serial_mm_init(MemoryRegion *address_space,
s->it_shift = it_shift;
s->irq = irq;
s->baudbase = baudbase;
- s->chr = chr;
+ qemu_chr_fe_init(&s->chr, chr, &error_abort);
serial_realize_core(s, &error_fatal);
vmstate_register(NULL, base, &vmstate_serial, s);
diff --git a/hw/char/sh_serial.c b/hw/char/sh_serial.c
index 4c55dcb7dc..9d35564bcf 100644
--- a/hw/char/sh_serial.c
+++ b/hw/char/sh_serial.c
@@ -29,6 +29,7 @@
#include "hw/sh4/sh.h"
#include "sysemu/char.h"
#include "exec/address-spaces.h"
+#include "qapi/error.h"
//#define DEBUG_SERIAL
@@ -62,7 +63,7 @@ typedef struct {
int flags;
int rtrg;
- CharDriverState *chr;
+ CharBackend chr;
qemu_irq eri;
qemu_irq rxi;
@@ -109,9 +110,11 @@ static void sh_serial_write(void *opaque, hwaddr offs,
}
return;
case 0x0c: /* FTDR / TDR */
- if (s->chr) {
+ if (qemu_chr_fe_get_driver(&s->chr)) {
ch = val;
- qemu_chr_fe_write(s->chr, &ch, 1);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
}
s->dr = val;
s->flags &= ~SH_SERIAL_FLAG_TDE;
@@ -393,12 +396,11 @@ void sh_serial_init(MemoryRegion *sysmem,
0, 0x28);
memory_region_add_subregion(sysmem, A7ADDR(base), &s->iomem_a7);
- s->chr = chr;
-
if (chr) {
- qemu_chr_fe_claim_no_fail(chr);
- qemu_chr_add_handlers(chr, sh_serial_can_receive1, sh_serial_receive1,
- sh_serial_event, s);
+ qemu_chr_fe_init(&s->chr, chr, &error_abort);
+ qemu_chr_fe_set_handlers(&s->chr, sh_serial_can_receive1,
+ sh_serial_receive1,
+ sh_serial_event, s, NULL, true);
}
s->eri = eri_source;
diff --git a/hw/char/spapr_vty.c b/hw/char/spapr_vty.c
index 3498d7b052..7c22b8bd0e 100644
--- a/hw/char/spapr_vty.c
+++ b/hw/char/spapr_vty.c
@@ -1,4 +1,5 @@
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qemu-common.h"
#include "cpu.h"
@@ -11,7 +12,7 @@
typedef struct VIOsPAPRVTYDevice {
VIOsPAPRDevice sdev;
- CharDriverState *chardev;
+ CharBackend chardev;
uint32_t in, out;
uint8_t buf[VTERM_BUFSIZE];
} VIOsPAPRVTYDevice;
@@ -24,7 +25,7 @@ static int vty_can_receive(void *opaque)
{
VIOsPAPRVTYDevice *dev = VIO_SPAPR_VTY_DEVICE(opaque);
- return (dev->in - dev->out) < VTERM_BUFSIZE;
+ return VTERM_BUFSIZE - (dev->in - dev->out);
}
static void vty_receive(void *opaque, const uint8_t *buf, int size)
@@ -37,7 +38,15 @@ static void vty_receive(void *opaque, const uint8_t *buf, int size)
qemu_irq_pulse(spapr_vio_qirq(&dev->sdev));
}
for (i = 0; i < size; i++) {
- assert((dev->in - dev->out) < VTERM_BUFSIZE);
+ if (dev->in - dev->out >= VTERM_BUFSIZE) {
+ static bool reported;
+ if (!reported) {
+ error_report("VTY input buffer exhausted - characters dropped."
+ " (input size = %i)", size);
+ reported = true;
+ }
+ break;
+ }
dev->buf[dev->in++ % VTERM_BUFSIZE] = buf[i];
}
}
@@ -51,7 +60,7 @@ static int vty_getchars(VIOsPAPRDevice *sdev, uint8_t *buf, int max)
buf[n++] = dev->buf[dev->out++ % VTERM_BUFSIZE];
}
- qemu_chr_accept_input(dev->chardev);
+ qemu_chr_fe_accept_input(&dev->chardev);
return n;
}
@@ -60,21 +69,22 @@ void vty_putchars(VIOsPAPRDevice *sdev, uint8_t *buf, int len)
{
VIOsPAPRVTYDevice *dev = VIO_SPAPR_VTY_DEVICE(sdev);
- /* FIXME: should check the qemu_chr_fe_write() return value */
- qemu_chr_fe_write(dev->chardev, buf, len);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&dev->chardev, buf, len);
}
static void spapr_vty_realize(VIOsPAPRDevice *sdev, Error **errp)
{
VIOsPAPRVTYDevice *dev = VIO_SPAPR_VTY_DEVICE(sdev);
- if (!dev->chardev) {
+ if (!qemu_chr_fe_get_driver(&dev->chardev)) {
error_setg(errp, "chardev property not set");
return;
}
- qemu_chr_add_handlers(dev->chardev, vty_can_receive,
- vty_receive, NULL, dev);
+ qemu_chr_fe_set_handlers(&dev->chardev, vty_can_receive,
+ vty_receive, NULL, dev, NULL, true);
}
/* Forward declaration */
diff --git a/hw/char/stm32f2xx_usart.c b/hw/char/stm32f2xx_usart.c
index 15657abda9..59872e6d3b 100644
--- a/hw/char/stm32f2xx_usart.c
+++ b/hw/char/stm32f2xx_usart.c
@@ -97,17 +97,13 @@ static uint64_t stm32f2xx_usart_read(void *opaque, hwaddr addr,
case USART_SR:
retvalue = s->usart_sr;
s->usart_sr &= ~USART_SR_TC;
- if (s->chr) {
- qemu_chr_accept_input(s->chr);
- }
+ qemu_chr_fe_accept_input(&s->chr);
return retvalue;
case USART_DR:
DB_PRINT("Value: 0x%" PRIx32 ", %c\n", s->usart_dr, (char) s->usart_dr);
s->usart_sr |= USART_SR_TXE;
s->usart_sr &= ~USART_SR_RXNE;
- if (s->chr) {
- qemu_chr_accept_input(s->chr);
- }
+ qemu_chr_fe_accept_input(&s->chr);
qemu_set_irq(s->irq, 0);
return s->usart_dr & 0x3FF;
case USART_BRR:
@@ -152,9 +148,9 @@ static void stm32f2xx_usart_write(void *opaque, hwaddr addr,
case USART_DR:
if (value < 0xF000) {
ch = value;
- if (s->chr) {
- qemu_chr_fe_write_all(s->chr, &ch, 1);
- }
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
s->usart_sr |= USART_SR_TC;
s->usart_sr &= ~USART_SR_TXE;
}
@@ -210,10 +206,8 @@ static void stm32f2xx_usart_realize(DeviceState *dev, Error **errp)
{
STM32F2XXUsartState *s = STM32F2XX_USART(dev);
- if (s->chr) {
- qemu_chr_add_handlers(s->chr, stm32f2xx_usart_can_receive,
- stm32f2xx_usart_receive, NULL, s);
- }
+ qemu_chr_fe_set_handlers(&s->chr, stm32f2xx_usart_can_receive,
+ stm32f2xx_usart_receive, NULL, s, NULL, true);
}
static void stm32f2xx_usart_class_init(ObjectClass *klass, void *data)
diff --git a/hw/char/trace-events b/hw/char/trace-events
index d53577c99d..7fd48bb80d 100644
--- a/hw/char/trace-events
+++ b/hw/char/trace-events
@@ -47,3 +47,12 @@ escc_sunkbd_event_in(int ch, const char *name, int down) "QKeyCode 0x%2.2x [%s],
escc_sunkbd_event_out(int ch) "Translated keycode 0x%2.2x"
escc_kbd_command(int val) "Command %d"
escc_sunmouse_event(int dx, int dy, int buttons_state) "dx=%d dy=%d buttons=%01x"
+
+# hw/char/pl011.c
+pl011_irq_state(int level) "irq state %d"
+pl011_read(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x"
+pl011_read_fifo(int read_count) "FIFO read, read_count now %d"
+pl011_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x"
+pl011_can_receive(uint32_t lcr, int read_count, int r) "LCR %08x read_count %d returning %d"
+pl011_put_fifo(uint32_t c, int read_count) "new char 0x%x read_count now %d"
+pl011_put_fifo_full(void) "FIFO now full, RXFF set"
diff --git a/hw/char/virtio-console.c b/hw/char/virtio-console.c
index 4f0e03d3b7..776205b4a9 100644
--- a/hw/char/virtio-console.c
+++ b/hw/char/virtio-console.c
@@ -24,7 +24,7 @@
typedef struct VirtConsole {
VirtIOSerialPort parent_obj;
- CharDriverState *chr;
+ CharBackend chr;
guint watch;
} VirtConsole;
@@ -49,12 +49,12 @@ static ssize_t flush_buf(VirtIOSerialPort *port,
VirtConsole *vcon = VIRTIO_CONSOLE(port);
ssize_t ret;
- if (!vcon->chr) {
+ if (!qemu_chr_fe_get_driver(&vcon->chr)) {
/* If there's no backend, we can just say we consumed all data. */
return len;
}
- ret = qemu_chr_fe_write(vcon->chr, buf, len);
+ ret = qemu_chr_fe_write(&vcon->chr, buf, len);
trace_virtio_console_flush_buf(port->id, len, ret);
if (ret < len) {
@@ -68,10 +68,31 @@ static ssize_t flush_buf(VirtIOSerialPort *port,
*/
if (ret < 0)
ret = 0;
+
+ /* XXX we should be queuing data to send later for the
+ * console devices too rather than silently dropping
+ * console data on EAGAIN. The Linux virtio-console
+ * hvc driver though does sends with spinlocks held,
+ * so if we enable throttling that'll stall the entire
+ * guest kernel, not merely the process writing to the
+ * console.
+ *
+ * While we could queue data for later write without
+ * enabling throttling, this would result in the guest
+ * being able to trigger arbitrary memory usage in QEMU
+ * buffering data for later writes.
+ *
+ * So fixing this problem likely requires fixing the
+ * Linux virtio-console hvc driver to not hold spinlocks
+ * while writing, and instead merely block the process
+ * that's writing. QEMU would then need some way to detect
+ * if the guest had the fixed driver too, before we can
+ * use throttling on host side.
+ */
if (!k->is_console) {
virtio_serial_throttle_port(port, true);
if (!vcon->watch) {
- vcon->watch = qemu_chr_fe_add_watch(vcon->chr,
+ vcon->watch = qemu_chr_fe_add_watch(&vcon->chr,
G_IO_OUT|G_IO_HUP,
chr_write_unblocked, vcon);
}
@@ -87,8 +108,8 @@ static void set_guest_connected(VirtIOSerialPort *port, int guest_connected)
DeviceState *dev = DEVICE(port);
VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(port);
- if (vcon->chr && !k->is_console) {
- qemu_chr_fe_set_open(vcon->chr, guest_connected);
+ if (!k->is_console) {
+ qemu_chr_fe_set_open(&vcon->chr, guest_connected);
}
if (dev->id) {
@@ -101,9 +122,7 @@ static void guest_writable(VirtIOSerialPort *port)
{
VirtConsole *vcon = VIRTIO_CONSOLE(port);
- if (vcon->chr) {
- qemu_chr_accept_input(vcon->chr);
- }
+ qemu_chr_fe_accept_input(&vcon->chr);
}
/* Readiness of the guest to accept data on a port */
@@ -149,6 +168,7 @@ static void virtconsole_realize(DeviceState *dev, Error **errp)
VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev);
VirtConsole *vcon = VIRTIO_CONSOLE(dev);
VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(dev);
+ CharDriverState *chr = qemu_chr_fe_get_driver(&vcon->chr);
if (port->id == 0 && !k->is_console) {
error_setg(errp, "Port number 0 on virtio-serial devices reserved "
@@ -156,7 +176,7 @@ static void virtconsole_realize(DeviceState *dev, Error **errp)
return;
}
- if (vcon->chr) {
+ if (chr) {
/*
* For consoles we don't block guest data transfer just
* because nothing is connected - we'll just let it go
@@ -167,14 +187,12 @@ static void virtconsole_realize(DeviceState *dev, Error **errp)
* trigger open/close of the device
*/
if (k->is_console) {
- vcon->chr->explicit_fe_open = 0;
- qemu_chr_add_handlers(vcon->chr, chr_can_read, chr_read,
- NULL, vcon);
+ qemu_chr_fe_set_handlers(&vcon->chr, chr_can_read, chr_read,
+ NULL, vcon, NULL, true);
virtio_serial_open(port);
} else {
- vcon->chr->explicit_fe_open = 1;
- qemu_chr_add_handlers(vcon->chr, chr_can_read, chr_read,
- chr_event, vcon);
+ qemu_chr_fe_set_handlers(&vcon->chr, chr_can_read, chr_read,
+ chr_event, vcon, NULL, false);
}
}
}
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index db57a38546..7975c2cda1 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -75,6 +75,19 @@ static VirtIOSerialPort *find_port_by_name(char *name)
return NULL;
}
+static VirtIOSerialPort *find_first_connected_console(VirtIOSerial *vser)
+{
+ VirtIOSerialPort *port;
+
+ QTAILQ_FOREACH(port, &vser->ports, next) {
+ VirtIOSerialPortClass const *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
+ if (vsc->is_console && port->host_connected) {
+ return port;
+ }
+ }
+ return NULL;
+}
+
static bool use_multiport(VirtIOSerial *vser)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vser);
@@ -132,6 +145,15 @@ static void discard_vq_data(VirtQueue *vq, VirtIODevice *vdev)
virtio_notify(vdev, vq);
}
+static void discard_throttle_data(VirtIOSerialPort *port)
+{
+ if (port->elem) {
+ virtqueue_detach_element(port->ovq, port->elem, 0);
+ g_free(port->elem);
+ port->elem = NULL;
+ }
+}
+
static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq,
VirtIODevice *vdev)
{
@@ -254,6 +276,7 @@ int virtio_serial_close(VirtIOSerialPort *port)
* consume, reset the throttling flag and discard the data.
*/
port->throttled = false;
+ discard_throttle_data(port);
discard_vq_data(port->ovq, VIRTIO_DEVICE(port->vser));
send_control_event(port->vser, port->id, VIRTIO_CONSOLE_PORT_OPEN, 0);
@@ -528,6 +551,7 @@ static uint64_t get_features(VirtIODevice *vdev, uint64_t features,
vser = VIRTIO_SERIAL(vdev);
+ features |= vser->host_features;
if (vser->bus.max_nr_ports > 1) {
virtio_add_feature(&features, VIRTIO_CONSOLE_F_MULTIPORT);
}
@@ -547,6 +571,29 @@ static void get_config(VirtIODevice *vdev, uint8_t *config_data)
vser->serial.max_virtserial_ports);
}
+/* Guest sent new config info */
+static void set_config(VirtIODevice *vdev, const uint8_t *config_data)
+{
+ VirtIOSerial *vser = VIRTIO_SERIAL(vdev);
+ struct virtio_console_config *config =
+ (struct virtio_console_config *)config_data;
+ uint8_t emerg_wr_lo = le32_to_cpu(config->emerg_wr);
+ VirtIOSerialPort *port = find_first_connected_console(vser);
+ VirtIOSerialPortClass *vsc;
+
+ if (!config->emerg_wr) {
+ return;
+ }
+ /* Make sure we don't misdetect an emergency write when the guest
+ * does a short config write after an emergency write. */
+ config->emerg_wr = 0;
+ if (!port) {
+ return;
+ }
+ vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
+ (void)vsc->have_data(port, &emerg_wr_lo, 1);
+}
+
static void guest_reset(VirtIOSerial *vser)
{
VirtIOSerialPort *port;
@@ -554,6 +601,9 @@ static void guest_reset(VirtIOSerial *vser)
QTAILQ_FOREACH(port, &vser->ports, next) {
vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
+
+ discard_throttle_data(port);
+
if (port->guest_connected) {
port->guest_connected = false;
if (vsc->set_guest_connected) {
@@ -728,12 +778,6 @@ static int fetch_active_ports_list(QEMUFile *f,
return 0;
}
-static int virtio_serial_load(QEMUFile *f, void *opaque, size_t size)
-{
- /* The virtio device */
- return virtio_load(VIRTIO_DEVICE(opaque), f, 3);
-}
-
static int virtio_serial_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
@@ -864,6 +908,7 @@ static void remove_port(VirtIOSerial *vser, uint32_t port_id)
assert(port);
/* Flush out any unconsumed buffers first */
+ discard_throttle_data(port);
discard_vq_data(port->ovq, VIRTIO_DEVICE(port->vser));
send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_REMOVE, 1);
@@ -967,6 +1012,7 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOSerial *vser = VIRTIO_SERIAL(dev);
uint32_t i, max_supported_ports;
+ size_t config_size = sizeof(struct virtio_console_config);
if (!vser->serial.max_virtserial_ports) {
error_setg(errp, "Maximum number of serial ports not specified");
@@ -981,10 +1027,12 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
return;
}
- /* We don't support emergency write, skip it for now. */
- /* TODO: cleaner fix, depending on host features. */
+ if (!virtio_has_feature(vser->host_features,
+ VIRTIO_CONSOLE_F_EMERG_WRITE)) {
+ config_size = offsetof(struct virtio_console_config, emerg_wr);
+ }
virtio_init(vdev, "virtio-serial", VIRTIO_ID_CONSOLE,
- offsetof(struct virtio_console_config, emerg_wr));
+ config_size);
/* Spawn a new virtio-serial bus on which the ports will ride as devices */
qbus_create_inplace(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS,
@@ -1075,11 +1123,21 @@ static void virtio_serial_device_unrealize(DeviceState *dev, Error **errp)
}
/* Note: 'console' is used for backwards compatibility */
-VMSTATE_VIRTIO_DEVICE(console, 3, virtio_serial_load, virtio_vmstate_save);
+static const VMStateDescription vmstate_virtio_console = {
+ .name = "virtio-console",
+ .minimum_version_id = 3,
+ .version_id = 3,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
static Property virtio_serial_properties[] = {
DEFINE_PROP_UINT32("max_ports", VirtIOSerial, serial.max_virtserial_ports,
31),
+ DEFINE_PROP_BIT64("emergency-write", VirtIOSerial, host_features,
+ VIRTIO_CONSOLE_F_EMERG_WRITE, true),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1098,6 +1156,7 @@ static void virtio_serial_class_init(ObjectClass *klass, void *data)
vdc->unrealize = virtio_serial_device_unrealize;
vdc->get_features = get_features;
vdc->get_config = get_config;
+ vdc->set_config = set_config;
vdc->set_status = set_status;
vdc->reset = vser_reset;
vdc->save = virtio_serial_save_device;
diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c
index 83108b0bdb..c01f41090e 100644
--- a/hw/char/xen_console.c
+++ b/hw/char/xen_console.c
@@ -23,9 +23,11 @@
#include <sys/select.h>
#include <termios.h>
+#include "qapi/error.h"
#include "hw/hw.h"
#include "sysemu/char.h"
#include "hw/xen/xen_backend.h"
+#include "qapi/error.h"
#include <xen/io/console.h>
@@ -43,7 +45,7 @@ struct XenConsole {
char console[XEN_BUFSIZE];
int ring_ref;
void *sring;
- CharDriverState *chr;
+ CharBackend chr;
int backlog;
};
@@ -72,7 +74,7 @@ static void buffer_append(struct XenConsole *con)
xen_mb();
intf->out_cons = cons;
- xen_be_send_notify(&con->xendev);
+ xen_pv_send_notify(&con->xendev);
if (buffer->max_capacity &&
buffer->size > buffer->max_capacity) {
@@ -140,7 +142,7 @@ static void xencons_receive(void *opaque, const uint8_t *buf, int len)
}
xen_wmb();
intf->in_prod = prod;
- xen_be_send_notify(&con->xendev);
+ xen_pv_send_notify(&con->xendev);
}
static void xencons_send(struct XenConsole *con)
@@ -148,22 +150,25 @@ static void xencons_send(struct XenConsole *con)
ssize_t len, size;
size = con->buffer.size - con->buffer.consumed;
- if (con->chr)
- len = qemu_chr_fe_write(con->chr, con->buffer.data + con->buffer.consumed,
- size);
- else
+ if (qemu_chr_fe_get_driver(&con->chr)) {
+ len = qemu_chr_fe_write(&con->chr,
+ con->buffer.data + con->buffer.consumed,
+ size);
+ } else {
len = size;
+ }
if (len < 1) {
- if (!con->backlog) {
- con->backlog = 1;
- xen_be_printf(&con->xendev, 1, "backlog piling up, nobody listening?\n");
- }
+ if (!con->backlog) {
+ con->backlog = 1;
+ xen_pv_printf(&con->xendev, 1,
+ "backlog piling up, nobody listening?\n");
+ }
} else {
- buffer_advance(&con->buffer, len);
- if (con->backlog && len == size) {
- con->backlog = 0;
- xen_be_printf(&con->xendev, 1, "backlog is gone\n");
- }
+ buffer_advance(&con->buffer, len);
+ if (con->backlog && len == size) {
+ con->backlog = 0;
+ xen_pv_printf(&con->xendev, 1, "backlog is gone\n");
+ }
}
}
@@ -187,7 +192,7 @@ static int con_init(struct XenDevice *xendev)
type = xenstore_read_str(con->console, "type");
if (!type || strcmp(type, "ioemu") != 0) {
- xen_be_printf(xendev, 1, "not for me (type=%s)\n", type);
+ xen_pv_printf(xendev, 1, "not for me (type=%s)\n", type);
ret = -1;
goto out;
}
@@ -196,13 +201,18 @@ static int con_init(struct XenDevice *xendev)
/* no Xen override, use qemu output device */
if (output == NULL) {
- con->chr = serial_hds[con->xendev.dev];
+ if (con->xendev.dev) {
+ qemu_chr_fe_init(&con->chr, serial_hds[con->xendev.dev],
+ &error_abort);
+ }
} else {
snprintf(label, sizeof(label), "xencons%d", con->xendev.dev);
- con->chr = qemu_chr_new(label, output, NULL);
+ qemu_chr_fe_init(&con->chr,
+ qemu_chr_new(label, output), &error_abort);
}
- xenstore_store_pv_console_info(con->xendev.dev, con->chr);
+ xenstore_store_pv_console_info(con->xendev.dev,
+ qemu_chr_fe_get_driver(&con->chr));
out:
g_free(type);
@@ -235,19 +245,11 @@ static int con_initialise(struct XenDevice *xendev)
return -1;
xen_be_bind_evtchn(&con->xendev);
- if (con->chr) {
- if (qemu_chr_fe_claim(con->chr) == 0) {
- qemu_chr_add_handlers(con->chr, xencons_can_receive,
- xencons_receive, NULL, con);
- } else {
- xen_be_printf(xendev, 0,
- "xen_console_init error chardev %s already used\n",
- con->chr->label);
- con->chr = NULL;
- }
- }
+ qemu_chr_fe_set_handlers(&con->chr, xencons_can_receive,
+ xencons_receive, NULL, con, NULL, true);
- xen_be_printf(xendev, 1, "ring mfn %d, remote port %d, local port %d, limit %zd\n",
+ xen_pv_printf(xendev, 1,
+ "ring mfn %d, remote port %d, local port %d, limit %zd\n",
con->ring_ref,
con->xendev.remote_port,
con->xendev.local_port,
@@ -259,11 +261,8 @@ static void con_disconnect(struct XenDevice *xendev)
{
struct XenConsole *con = container_of(xendev, struct XenConsole, xendev);
- if (con->chr) {
- qemu_chr_add_handlers(con->chr, NULL, NULL, NULL, NULL);
- qemu_chr_fe_release(con->chr);
- }
- xen_be_unbind_evtchn(&con->xendev);
+ qemu_chr_fe_deinit(&con->chr);
+ xen_pv_unbind_evtchn(&con->xendev);
if (con->sring) {
if (!xendev->dev) {
diff --git a/hw/char/xilinx_uartlite.c b/hw/char/xilinx_uartlite.c
index 4847efb29f..37d313b429 100644
--- a/hw/char/xilinx_uartlite.c
+++ b/hw/char/xilinx_uartlite.c
@@ -55,7 +55,7 @@ typedef struct XilinxUARTLite {
SysBusDevice parent_obj;
MemoryRegion mmio;
- CharDriverState *chr;
+ CharBackend chr;
qemu_irq irq;
uint8_t rx_fifo[8];
@@ -107,7 +107,7 @@ uart_read(void *opaque, hwaddr addr, unsigned int size)
s->rx_fifo_len--;
uart_update_status(s);
uart_update_irq(s);
- qemu_chr_accept_input(s->chr);
+ qemu_chr_fe_accept_input(&s->chr);
break;
default:
@@ -143,9 +143,9 @@ uart_write(void *opaque, hwaddr addr,
break;
case R_TX:
- if (s->chr)
- qemu_chr_fe_write(s->chr, &ch, 1);
-
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->chr, &ch, 1);
s->regs[addr] = value;
/* hax. */
@@ -211,8 +211,8 @@ static void xilinx_uartlite_realize(DeviceState *dev, Error **errp)
{
XilinxUARTLite *s = XILINX_UARTLITE(dev);
- if (s->chr)
- qemu_chr_add_handlers(s->chr, uart_can_rx, uart_rx, uart_event, s);
+ qemu_chr_fe_set_handlers(&s->chr, uart_can_rx, uart_rx,
+ uart_event, s, NULL, true);
}
static void xilinx_uartlite_init(Object *obj)
diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs
index cfd4840397..a4c94e522d 100644
--- a/hw/core/Makefile.objs
+++ b/hw/core/Makefile.objs
@@ -16,4 +16,7 @@ common-obj-$(CONFIG_SOFTMMU) += null-machine.o
common-obj-$(CONFIG_SOFTMMU) += loader.o
common-obj-$(CONFIG_SOFTMMU) += qdev-properties-system.o
common-obj-$(CONFIG_SOFTMMU) += register.o
+common-obj-$(CONFIG_SOFTMMU) += or-irq.o
common-obj-$(CONFIG_PLATFORM_BUS) += platform-bus.o
+
+obj-$(CONFIG_SOFTMMU) += generic-loader.o
diff --git a/hw/core/bus.c b/hw/core/bus.c
index 3e3f8ac740..cf383fc1af 100644
--- a/hw/core/bus.c
+++ b/hw/core/bus.c
@@ -78,8 +78,7 @@ static void qbus_realize(BusState *bus, DeviceState *parent, const char *name)
{
const char *typename = object_get_typename(OBJECT(bus));
BusClass *bc;
- char *buf;
- int i, len, bus_id;
+ int i, bus_id;
bus->parent = parent;
@@ -88,23 +87,15 @@ static void qbus_realize(BusState *bus, DeviceState *parent, const char *name)
} else if (bus->parent && bus->parent->id) {
/* parent device has id -> use it plus parent-bus-id for bus name */
bus_id = bus->parent->num_child_bus;
-
- len = strlen(bus->parent->id) + 16;
- buf = g_malloc(len);
- snprintf(buf, len, "%s.%d", bus->parent->id, bus_id);
- bus->name = buf;
+ bus->name = g_strdup_printf("%s.%d", bus->parent->id, bus_id);
} else {
/* no id -> use lowercase bus type plus global bus-id for bus name */
bc = BUS_GET_CLASS(bus);
bus_id = bc->automatic_ids++;
-
- len = strlen(typename) + 16;
- buf = g_malloc(len);
- len = snprintf(buf, len, "%s.%d", typename, bus_id);
- for (i = 0; i < len; i++) {
- buf[i] = qemu_tolower(buf[i]);
+ bus->name = g_strdup_printf("%s.%d", typename, bus_id);
+ for (i = 0; bus->name[i]; i++) {
+ bus->name[i] = qemu_tolower(bus->name[i]);
}
- bus->name = buf;
}
if (bus->parent) {
@@ -229,7 +220,7 @@ static void qbus_finalize(Object *obj)
{
BusState *bus = BUS(obj);
- g_free((char *)bus->name);
+ g_free(bus->name);
}
static const TypeInfo bus_info = {
diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c
new file mode 100644
index 0000000000..208f549dff
--- /dev/null
+++ b/hw/core/generic-loader.c
@@ -0,0 +1,216 @@
+/*
+ * Generic Loader
+ *
+ * Copyright (C) 2014 Li Guang
+ * Copyright (C) 2016 Xilinx Inc.
+ * Written by Li Guang <lig.fnst@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+/*
+ * Internally inside QEMU this is a device. It is a strange device that
+ * provides no hardware interface but allows QEMU to monkey patch memory
+ * specified when it is created. To be able to do this it has a reset
+ * callback that does the memory operations.
+
+ * This device allows the user to monkey patch memory. To be able to do
+ * this it needs a backend to manage the datas, the same as other
+ * memory-related devices. In this case as the backend is so trivial we
+ * have merged it with the frontend instead of creating and maintaining a
+ * seperate backend.
+ */
+
+#include "qemu/osdep.h"
+#include "qom/cpu.h"
+#include "hw/sysbus.h"
+#include "sysemu/dma.h"
+#include "hw/loader.h"
+#include "qapi/error.h"
+#include "hw/core/generic-loader.h"
+
+#define CPU_NONE 0xFFFFFFFF
+
+static void generic_loader_reset(void *opaque)
+{
+ GenericLoaderState *s = GENERIC_LOADER(opaque);
+
+ if (s->set_pc) {
+ CPUClass *cc = CPU_GET_CLASS(s->cpu);
+ cpu_reset(s->cpu);
+ if (cc) {
+ cc->set_pc(s->cpu, s->addr);
+ }
+ }
+
+ if (s->data_len) {
+ assert(s->data_len < sizeof(s->data));
+ dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len);
+ }
+}
+
+static void generic_loader_realize(DeviceState *dev, Error **errp)
+{
+ GenericLoaderState *s = GENERIC_LOADER(dev);
+ hwaddr entry;
+ int big_endian;
+ int size = 0;
+
+ s->set_pc = false;
+
+ /* Perform some error checking on the user's options */
+ if (s->data || s->data_len || s->data_be) {
+ /* User is loading memory values */
+ if (s->file) {
+ error_setg(errp, "Specifying a file is not supported when loading "
+ "memory values");
+ return;
+ } else if (s->force_raw) {
+ error_setg(errp, "Specifying force-raw is not supported when "
+ "loading memory values");
+ return;
+ } else if (!s->data_len) {
+ /* We cant' check for !data here as a value of 0 is still valid. */
+ error_setg(errp, "Both data and data-len must be specified");
+ return;
+ } else if (s->data_len > 8) {
+ error_setg(errp, "data-len cannot be greater then 8 bytes");
+ return;
+ }
+ } else if (s->file || s->force_raw) {
+ /* User is loading an image */
+ if (s->data || s->data_len || s->data_be) {
+ error_setg(errp, "data can not be specified when loading an "
+ "image");
+ return;
+ }
+ /* The user specified a file, only set the PC if they also specified
+ * a CPU to use.
+ */
+ if (s->cpu_num != CPU_NONE) {
+ s->set_pc = true;
+ }
+ } else if (s->addr) {
+ /* User is setting the PC */
+ if (s->data || s->data_len || s->data_be) {
+ error_setg(errp, "data can not be specified when setting a "
+ "program counter");
+ return;
+ } else if (!s->cpu_num) {
+ error_setg(errp, "cpu_num must be specified when setting a "
+ "program counter");
+ return;
+ }
+ s->set_pc = true;
+ } else {
+ /* Did the user specify anything? */
+ error_setg(errp, "please include valid arguments");
+ return;
+ }
+
+ qemu_register_reset(generic_loader_reset, dev);
+
+ if (s->cpu_num != CPU_NONE) {
+ s->cpu = qemu_get_cpu(s->cpu_num);
+ if (!s->cpu) {
+ error_setg(errp, "Specified boot CPU#%d is nonexistent",
+ s->cpu_num);
+ return;
+ }
+ } else {
+ s->cpu = first_cpu;
+ }
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ big_endian = 1;
+#else
+ big_endian = 0;
+#endif
+
+ if (s->file) {
+ if (!s->force_raw) {
+ size = load_elf_as(s->file, NULL, NULL, &entry, NULL, NULL,
+ big_endian, 0, 0, 0, s->cpu->as);
+
+ if (size < 0) {
+ size = load_uimage_as(s->file, &entry, NULL, NULL, NULL, NULL,
+ s->cpu->as);
+ }
+ }
+
+ if (size < 0 || s->force_raw) {
+ /* Default to the maximum size being the machine's ram size */
+ size = load_image_targphys_as(s->file, s->addr, ram_size,
+ s->cpu->as);
+ } else {
+ s->addr = entry;
+ }
+
+ if (size < 0) {
+ error_setg(errp, "Cannot load specified image %s", s->file);
+ return;
+ }
+ }
+
+ /* Convert the data endiannes */
+ if (s->data_be) {
+ s->data = cpu_to_be64(s->data);
+ } else {
+ s->data = cpu_to_le64(s->data);
+ }
+}
+
+static void generic_loader_unrealize(DeviceState *dev, Error **errp)
+{
+ qemu_unregister_reset(generic_loader_reset, dev);
+}
+
+static Property generic_loader_props[] = {
+ DEFINE_PROP_UINT64("addr", GenericLoaderState, addr, 0),
+ DEFINE_PROP_UINT64("data", GenericLoaderState, data, 0),
+ DEFINE_PROP_UINT8("data-len", GenericLoaderState, data_len, 0),
+ DEFINE_PROP_BOOL("data-be", GenericLoaderState, data_be, false),
+ DEFINE_PROP_UINT32("cpu-num", GenericLoaderState, cpu_num, CPU_NONE),
+ DEFINE_PROP_BOOL("force-raw", GenericLoaderState, force_raw, false),
+ DEFINE_PROP_STRING("file", GenericLoaderState, file),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void generic_loader_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ /* The reset function is not registered here and is instead registered in
+ * the realize function to allow this device to be added via the device_add
+ * command in the QEMU monitor.
+ * TODO: Improve the device_add functionality to allow resets to be
+ * connected
+ */
+ dc->realize = generic_loader_realize;
+ dc->unrealize = generic_loader_unrealize;
+ dc->props = generic_loader_props;
+ dc->desc = "Generic Loader";
+}
+
+static TypeInfo generic_loader_info = {
+ .name = TYPE_GENERIC_LOADER,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(GenericLoaderState),
+ .class_init = generic_loader_class_init,
+};
+
+static void generic_loader_register_type(void)
+{
+ type_register_static(&generic_loader_info);
+}
+
+type_init(generic_loader_register_type)
diff --git a/hw/core/loader.c b/hw/core/loader.c
index 53e0e41554..45742494e6 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -133,10 +133,16 @@ ssize_t read_targphys(const char *name,
return did;
}
-/* return the size or -1 if error */
int load_image_targphys(const char *filename,
hwaddr addr, uint64_t max_sz)
{
+ return load_image_targphys_as(filename, addr, max_sz, NULL);
+}
+
+/* return the size or -1 if error */
+int load_image_targphys_as(const char *filename,
+ hwaddr addr, uint64_t max_sz, AddressSpace *as)
+{
int size;
size = get_image_size(filename);
@@ -144,7 +150,7 @@ int load_image_targphys(const char *filename,
return -1;
}
if (size > 0) {
- rom_add_file_fixed(filename, addr, -1);
+ rom_add_file_fixed_as(filename, addr, -1, as);
}
return size;
}
@@ -417,6 +423,18 @@ int load_elf(const char *filename, uint64_t (*translate_fn)(void *, uint64_t),
uint64_t *highaddr, int big_endian, int elf_machine,
int clear_lsb, int data_swab)
{
+ return load_elf_as(filename, translate_fn, translate_opaque, pentry,
+ lowaddr, highaddr, big_endian, elf_machine, clear_lsb,
+ data_swab, NULL);
+}
+
+/* return < 0 if error, otherwise the number of bytes loaded in memory */
+int load_elf_as(const char *filename,
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
+ uint64_t *highaddr, int big_endian, int elf_machine,
+ int clear_lsb, int data_swab, AddressSpace *as)
+{
int fd, data_order, target_data_order, must_swab, ret = ELF_LOAD_FAILED;
uint8_t e_ident[EI_NIDENT];
@@ -455,11 +473,11 @@ int load_elf(const char *filename, uint64_t (*translate_fn)(void *, uint64_t),
if (e_ident[EI_CLASS] == ELFCLASS64) {
ret = load_elf64(filename, fd, translate_fn, translate_opaque, must_swab,
pentry, lowaddr, highaddr, elf_machine, clear_lsb,
- data_swab);
+ data_swab, as);
} else {
ret = load_elf32(filename, fd, translate_fn, translate_opaque, must_swab,
pentry, lowaddr, highaddr, elf_machine, clear_lsb,
- data_swab);
+ data_swab, as);
}
fail:
@@ -569,7 +587,7 @@ static ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src,
static int load_uboot_image(const char *filename, hwaddr *ep, hwaddr *loadaddr,
int *is_linux, uint8_t image_type,
uint64_t (*translate_fn)(void *, uint64_t),
- void *translate_opaque)
+ void *translate_opaque, AddressSpace *as)
{
int fd;
int size;
@@ -670,7 +688,7 @@ static int load_uboot_image(const char *filename, hwaddr *ep, hwaddr *loadaddr,
hdr->ih_size = bytes;
}
- rom_add_blob_fixed(filename, data, hdr->ih_size, address);
+ rom_add_blob_fixed_as(filename, data, hdr->ih_size, address, as);
ret = hdr->ih_size;
@@ -686,14 +704,23 @@ int load_uimage(const char *filename, hwaddr *ep, hwaddr *loadaddr,
void *translate_opaque)
{
return load_uboot_image(filename, ep, loadaddr, is_linux, IH_TYPE_KERNEL,
- translate_fn, translate_opaque);
+ translate_fn, translate_opaque, NULL);
+}
+
+int load_uimage_as(const char *filename, hwaddr *ep, hwaddr *loadaddr,
+ int *is_linux,
+ uint64_t (*translate_fn)(void *, uint64_t),
+ void *translate_opaque, AddressSpace *as)
+{
+ return load_uboot_image(filename, ep, loadaddr, is_linux, IH_TYPE_KERNEL,
+ translate_fn, translate_opaque, as);
}
/* Load a ramdisk. */
int load_ramdisk(const char *filename, hwaddr addr, uint64_t max_sz)
{
return load_uboot_image(filename, NULL, &addr, NULL, IH_TYPE_RAMDISK,
- NULL, NULL);
+ NULL, NULL, NULL);
}
/* Load a gzip-compressed kernel to a dynamically allocated buffer. */
@@ -777,6 +804,7 @@ struct Rom {
uint8_t *data;
MemoryRegion *mr;
+ AddressSpace *as;
int isrom;
char *fw_dir;
char *fw_file;
@@ -788,6 +816,12 @@ struct Rom {
static FWCfgState *fw_cfg;
static QTAILQ_HEAD(, Rom) roms = QTAILQ_HEAD_INITIALIZER(roms);
+static inline bool rom_order_compare(Rom *rom, Rom *item)
+{
+ return ((uintptr_t)(void *)rom->as > (uintptr_t)(void *)item->as) ||
+ (rom->as == item->as && rom->addr >= item->addr);
+}
+
static void rom_insert(Rom *rom)
{
Rom *item;
@@ -796,10 +830,16 @@ static void rom_insert(Rom *rom)
hw_error ("ROM images must be loaded at startup\n");
}
- /* list is ordered by load address */
+ /* The user didn't specify an address space, this is the default */
+ if (!rom->as) {
+ rom->as = &address_space_memory;
+ }
+
+ /* List is ordered by load address in the same address space */
QTAILQ_FOREACH(item, &roms, next) {
- if (rom->addr >= item->addr)
+ if (rom_order_compare(rom, item)) {
continue;
+ }
QTAILQ_INSERT_BEFORE(item, rom, next);
return;
}
@@ -833,16 +873,25 @@ static void *rom_set_mr(Rom *rom, Object *owner, const char *name)
int rom_add_file(const char *file, const char *fw_dir,
hwaddr addr, int32_t bootindex,
- bool option_rom, MemoryRegion *mr)
+ bool option_rom, MemoryRegion *mr,
+ AddressSpace *as)
{
MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
Rom *rom;
int rc, fd = -1;
char devpath[100];
+ if (as && mr) {
+ fprintf(stderr, "Specifying an Address Space and Memory Region is " \
+ "not valid when loading a rom\n");
+ /* We haven't allocated anything so we don't need any cleanup */
+ return -1;
+ }
+
rom = g_malloc0(sizeof(*rom));
rom->name = g_strdup(file);
rom->path = qemu_find_file(QEMU_FILE_TYPE_BIOS, rom->name);
+ rom->as = as;
if (rom->path == NULL) {
rom->path = g_strdup(file);
}
@@ -929,7 +978,8 @@ err:
MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
size_t max_len, hwaddr addr, const char *fw_file_name,
- FWCfgReadCallback fw_callback, void *callback_opaque)
+ FWCfgReadCallback fw_callback, void *callback_opaque,
+ AddressSpace *as)
{
MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
Rom *rom;
@@ -937,6 +987,7 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
rom = g_malloc0(sizeof(*rom));
rom->name = g_strdup(name);
+ rom->as = as;
rom->addr = addr;
rom->romsize = max_len ? max_len : len;
rom->datasize = len;
@@ -969,7 +1020,7 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
* memory ownership of "data", so we don't have to allocate and copy the buffer.
*/
int rom_add_elf_program(const char *name, void *data, size_t datasize,
- size_t romsize, hwaddr addr)
+ size_t romsize, hwaddr addr, AddressSpace *as)
{
Rom *rom;
@@ -979,18 +1030,19 @@ int rom_add_elf_program(const char *name, void *data, size_t datasize,
rom->datasize = datasize;
rom->romsize = romsize;
rom->data = data;
+ rom->as = as;
rom_insert(rom);
return 0;
}
int rom_add_vga(const char *file)
{
- return rom_add_file(file, "vgaroms", 0, -1, true, NULL);
+ return rom_add_file(file, "vgaroms", 0, -1, true, NULL, NULL);
}
int rom_add_option(const char *file, int32_t bootindex)
{
- return rom_add_file(file, "genroms", 0, bootindex, true, NULL);
+ return rom_add_file(file, "genroms", 0, bootindex, true, NULL, NULL);
}
static void rom_reset(void *unused)
@@ -1008,8 +1060,8 @@ static void rom_reset(void *unused)
void *host = memory_region_get_ram_ptr(rom->mr);
memcpy(host, rom->data, rom->datasize);
} else {
- cpu_physical_memory_write_rom(&address_space_memory,
- rom->addr, rom->data, rom->datasize);
+ cpu_physical_memory_write_rom(rom->as, rom->addr, rom->data,
+ rom->datasize);
}
if (rom->isrom) {
/* rom needs to be written only once */
@@ -1031,12 +1083,13 @@ int rom_check_and_register_reset(void)
hwaddr addr = 0;
MemoryRegionSection section;
Rom *rom;
+ AddressSpace *as = NULL;
QTAILQ_FOREACH(rom, &roms, next) {
if (rom->fw_file) {
continue;
}
- if (addr > rom->addr) {
+ if ((addr > rom->addr) && (as == rom->as)) {
fprintf(stderr, "rom: requested regions overlap "
"(rom %s. free=0x" TARGET_FMT_plx
", addr=0x" TARGET_FMT_plx ")\n",
@@ -1045,9 +1098,11 @@ int rom_check_and_register_reset(void)
}
addr = rom->addr;
addr += rom->romsize;
- section = memory_region_find(get_system_memory(), rom->addr, 1);
+ section = memory_region_find(rom->mr ? rom->mr : get_system_memory(),
+ rom->addr, 1);
rom->isrom = int128_nz(section.size) && memory_region_is_rom(section.mr);
memory_region_unref(section.mr);
+ as = rom->as;
}
qemu_register_reset(rom_reset, NULL);
roms_loaded = 1;
diff --git a/hw/core/machine.c b/hw/core/machine.c
index e5a456f21d..b0fd91f6cd 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -332,7 +332,7 @@ static bool machine_get_enforce_config_section(Object *obj, Error **errp)
return ms->enforce_config_section;
}
-static int error_on_sysbus_device(SysBusDevice *sbdev, void *opaque)
+static void error_on_sysbus_device(SysBusDevice *sbdev, void *opaque)
{
error_report("Option '-device %s' cannot be handled by this machine",
object_class_get_name(object_get_class(OBJECT(sbdev))));
@@ -364,6 +364,104 @@ static void machine_class_init(ObjectClass *oc, void *data)
/* Default 128 MB as guest ram size */
mc->default_ram_size = 128 * M_BYTE;
mc->rom_file_has_mr = true;
+
+ object_class_property_add_str(oc, "accel",
+ machine_get_accel, machine_set_accel, &error_abort);
+ object_class_property_set_description(oc, "accel",
+ "Accelerator list", &error_abort);
+
+ object_class_property_add(oc, "kernel-irqchip", "OnOffSplit",
+ NULL, machine_set_kernel_irqchip,
+ NULL, NULL, &error_abort);
+ object_class_property_set_description(oc, "kernel-irqchip",
+ "Configure KVM in-kernel irqchip", &error_abort);
+
+ object_class_property_add(oc, "kvm-shadow-mem", "int",
+ machine_get_kvm_shadow_mem, machine_set_kvm_shadow_mem,
+ NULL, NULL, &error_abort);
+ object_class_property_set_description(oc, "kvm-shadow-mem",
+ "KVM shadow MMU size", &error_abort);
+
+ object_class_property_add_str(oc, "kernel",
+ machine_get_kernel, machine_set_kernel, &error_abort);
+ object_class_property_set_description(oc, "kernel",
+ "Linux kernel image file", &error_abort);
+
+ object_class_property_add_str(oc, "initrd",
+ machine_get_initrd, machine_set_initrd, &error_abort);
+ object_class_property_set_description(oc, "initrd",
+ "Linux initial ramdisk file", &error_abort);
+
+ object_class_property_add_str(oc, "append",
+ machine_get_append, machine_set_append, &error_abort);
+ object_class_property_set_description(oc, "append",
+ "Linux kernel command line", &error_abort);
+
+ object_class_property_add_str(oc, "dtb",
+ machine_get_dtb, machine_set_dtb, &error_abort);
+ object_class_property_set_description(oc, "dtb",
+ "Linux kernel device tree file", &error_abort);
+
+ object_class_property_add_str(oc, "dumpdtb",
+ machine_get_dumpdtb, machine_set_dumpdtb, &error_abort);
+ object_class_property_set_description(oc, "dumpdtb",
+ "Dump current dtb to a file and quit", &error_abort);
+
+ object_class_property_add(oc, "phandle-start", "int",
+ machine_get_phandle_start, machine_set_phandle_start,
+ NULL, NULL, &error_abort);
+ object_class_property_set_description(oc, "phandle-start",
+ "The first phandle ID we may generate dynamically", &error_abort);
+
+ object_class_property_add_str(oc, "dt-compatible",
+ machine_get_dt_compatible, machine_set_dt_compatible, &error_abort);
+ object_class_property_set_description(oc, "dt-compatible",
+ "Overrides the \"compatible\" property of the dt root node",
+ &error_abort);
+
+ object_class_property_add_bool(oc, "dump-guest-core",
+ machine_get_dump_guest_core, machine_set_dump_guest_core, &error_abort);
+ object_class_property_set_description(oc, "dump-guest-core",
+ "Include guest memory in a core dump", &error_abort);
+
+ object_class_property_add_bool(oc, "mem-merge",
+ machine_get_mem_merge, machine_set_mem_merge, &error_abort);
+ object_class_property_set_description(oc, "mem-merge",
+ "Enable/disable memory merge support", &error_abort);
+
+ object_class_property_add_bool(oc, "usb",
+ machine_get_usb, machine_set_usb, &error_abort);
+ object_class_property_set_description(oc, "usb",
+ "Set on/off to enable/disable usb", &error_abort);
+
+ object_class_property_add_bool(oc, "graphics",
+ machine_get_graphics, machine_set_graphics, &error_abort);
+ object_class_property_set_description(oc, "graphics",
+ "Set on/off to enable/disable graphics emulation", &error_abort);
+
+ object_class_property_add_bool(oc, "igd-passthru",
+ machine_get_igd_gfx_passthru, machine_set_igd_gfx_passthru,
+ &error_abort);
+ object_class_property_set_description(oc, "igd-passthru",
+ "Set on/off to enable/disable igd passthrou", &error_abort);
+
+ object_class_property_add_str(oc, "firmware",
+ machine_get_firmware, machine_set_firmware,
+ &error_abort);
+ object_class_property_set_description(oc, "firmware",
+ "Firmware image", &error_abort);
+
+ object_class_property_add_bool(oc, "suppress-vmdesc",
+ machine_get_suppress_vmdesc, machine_set_suppress_vmdesc,
+ &error_abort);
+ object_class_property_set_description(oc, "suppress-vmdesc",
+ "Set on to disable self-describing migration", &error_abort);
+
+ object_class_property_add_bool(oc, "enforce-config-section",
+ machine_get_enforce_config_section, machine_set_enforce_config_section,
+ &error_abort);
+ object_class_property_set_description(oc, "enforce-config-section",
+ "Set on to enforce configuration section migration", &error_abort);
}
static void machine_class_base_init(ObjectClass *oc, void *data)
@@ -387,114 +485,6 @@ static void machine_initfn(Object *obj)
ms->mem_merge = true;
ms->enable_graphics = true;
- object_property_add_str(obj, "accel",
- machine_get_accel, machine_set_accel, NULL);
- object_property_set_description(obj, "accel",
- "Accelerator list",
- NULL);
- object_property_add(obj, "kernel-irqchip", "OnOffSplit",
- NULL,
- machine_set_kernel_irqchip,
- NULL, NULL, NULL);
- object_property_set_description(obj, "kernel-irqchip",
- "Configure KVM in-kernel irqchip",
- NULL);
- object_property_add(obj, "kvm-shadow-mem", "int",
- machine_get_kvm_shadow_mem,
- machine_set_kvm_shadow_mem,
- NULL, NULL, NULL);
- object_property_set_description(obj, "kvm-shadow-mem",
- "KVM shadow MMU size",
- NULL);
- object_property_add_str(obj, "kernel",
- machine_get_kernel, machine_set_kernel, NULL);
- object_property_set_description(obj, "kernel",
- "Linux kernel image file",
- NULL);
- object_property_add_str(obj, "initrd",
- machine_get_initrd, machine_set_initrd, NULL);
- object_property_set_description(obj, "initrd",
- "Linux initial ramdisk file",
- NULL);
- object_property_add_str(obj, "append",
- machine_get_append, machine_set_append, NULL);
- object_property_set_description(obj, "append",
- "Linux kernel command line",
- NULL);
- object_property_add_str(obj, "dtb",
- machine_get_dtb, machine_set_dtb, NULL);
- object_property_set_description(obj, "dtb",
- "Linux kernel device tree file",
- NULL);
- object_property_add_str(obj, "dumpdtb",
- machine_get_dumpdtb, machine_set_dumpdtb, NULL);
- object_property_set_description(obj, "dumpdtb",
- "Dump current dtb to a file and quit",
- NULL);
- object_property_add(obj, "phandle-start", "int",
- machine_get_phandle_start,
- machine_set_phandle_start,
- NULL, NULL, NULL);
- object_property_set_description(obj, "phandle-start",
- "The first phandle ID we may generate dynamically",
- NULL);
- object_property_add_str(obj, "dt-compatible",
- machine_get_dt_compatible,
- machine_set_dt_compatible,
- NULL);
- object_property_set_description(obj, "dt-compatible",
- "Overrides the \"compatible\" property of the dt root node",
- NULL);
- object_property_add_bool(obj, "dump-guest-core",
- machine_get_dump_guest_core,
- machine_set_dump_guest_core,
- NULL);
- object_property_set_description(obj, "dump-guest-core",
- "Include guest memory in a core dump",
- NULL);
- object_property_add_bool(obj, "mem-merge",
- machine_get_mem_merge,
- machine_set_mem_merge, NULL);
- object_property_set_description(obj, "mem-merge",
- "Enable/disable memory merge support",
- NULL);
- object_property_add_bool(obj, "usb",
- machine_get_usb,
- machine_set_usb, NULL);
- object_property_set_description(obj, "usb",
- "Set on/off to enable/disable usb",
- NULL);
- object_property_add_bool(obj, "graphics",
- machine_get_graphics,
- machine_set_graphics, NULL);
- object_property_set_description(obj, "graphics",
- "Set on/off to enable/disable graphics emulation",
- NULL);
- object_property_add_bool(obj, "igd-passthru",
- machine_get_igd_gfx_passthru,
- machine_set_igd_gfx_passthru, NULL);
- object_property_set_description(obj, "igd-passthru",
- "Set on/off to enable/disable igd passthrou",
- NULL);
- object_property_add_str(obj, "firmware",
- machine_get_firmware,
- machine_set_firmware, NULL);
- object_property_set_description(obj, "firmware",
- "Firmware image",
- NULL);
- object_property_add_bool(obj, "suppress-vmdesc",
- machine_get_suppress_vmdesc,
- machine_set_suppress_vmdesc, NULL);
- object_property_set_description(obj, "suppress-vmdesc",
- "Set on to disable self-describing migration",
- NULL);
- object_property_add_bool(obj, "enforce-config-section",
- machine_get_enforce_config_section,
- machine_set_enforce_config_section, NULL);
- object_property_set_description(obj, "enforce-config-section",
- "Set on to enforce configuration section migration",
- NULL);
-
/* Register notifier when init is done for sysbus sanity checks */
ms->sysbus_notifier.notify = machine_init_notify;
qemu_add_machine_init_done_notifier(&ms->sysbus_notifier);
@@ -561,6 +551,7 @@ static void machine_class_finalize(ObjectClass *klass, void *data)
if (mc->compat_props) {
g_array_free(mc->compat_props, true);
}
+ g_free(mc->name);
}
void machine_register_compat_props(MachineState *machine)
diff --git a/hw/core/or-irq.c b/hw/core/or-irq.c
new file mode 100644
index 0000000000..1ac090d1a4
--- /dev/null
+++ b/hw/core/or-irq.c
@@ -0,0 +1,107 @@
+/*
+ * QEMU IRQ/GPIO common code.
+ *
+ * Copyright (c) 2016 Alistair Francis <alistair@alistair23.me>.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/or-irq.h"
+
+static void or_irq_handler(void *opaque, int n, int level)
+{
+ qemu_or_irq *s = OR_IRQ(opaque);
+ int or_level = 0;
+ int i;
+
+ s->levels[n] = level;
+
+ for (i = 0; i < s->num_lines; i++) {
+ or_level |= s->levels[i];
+ }
+
+ qemu_set_irq(s->out_irq, or_level);
+}
+
+static void or_irq_reset(DeviceState *dev)
+{
+ qemu_or_irq *s = OR_IRQ(dev);
+ int i;
+
+ for (i = 0; i < MAX_OR_LINES; i++) {
+ s->levels[i] = false;
+ }
+}
+
+static void or_irq_realize(DeviceState *dev, Error **errp)
+{
+ qemu_or_irq *s = OR_IRQ(dev);
+
+ assert(s->num_lines < MAX_OR_LINES);
+
+ qdev_init_gpio_in(dev, or_irq_handler, s->num_lines);
+}
+
+static void or_irq_init(Object *obj)
+{
+ qemu_or_irq *s = OR_IRQ(obj);
+
+ qdev_init_gpio_out(DEVICE(obj), &s->out_irq, 1);
+}
+
+static const VMStateDescription vmstate_or_irq = {
+ .name = TYPE_OR_IRQ,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL_ARRAY(levels, qemu_or_irq, MAX_OR_LINES),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static Property or_irq_properties[] = {
+ DEFINE_PROP_UINT16("num-lines", qemu_or_irq, num_lines, 1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void or_irq_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = or_irq_reset;
+ dc->props = or_irq_properties;
+ dc->realize = or_irq_realize;
+ dc->vmsd = &vmstate_or_irq;
+}
+
+static const TypeInfo or_irq_type_info = {
+ .name = TYPE_OR_IRQ,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(qemu_or_irq),
+ .instance_init = or_irq_init,
+ .class_init = or_irq_class_init,
+};
+
+static void or_irq_register_types(void)
+{
+ type_register_static(&or_irq_type_info);
+}
+
+type_init(or_irq_register_types)
diff --git a/hw/core/platform-bus.c b/hw/core/platform-bus.c
index 36f84ab72f..329ac670c0 100644
--- a/hw/core/platform-bus.c
+++ b/hw/core/platform-bus.c
@@ -74,7 +74,7 @@ hwaddr platform_bus_get_mmio_addr(PlatformBusDevice *pbus, SysBusDevice *sbdev,
return object_property_get_int(OBJECT(sbdev_mr), "addr", NULL);
}
-static int platform_bus_count_irqs(SysBusDevice *sbdev, void *opaque)
+static void platform_bus_count_irqs(SysBusDevice *sbdev, void *opaque)
{
PlatformBusDevice *pbus = opaque;
qemu_irq sbirq;
@@ -93,8 +93,6 @@ static int platform_bus_count_irqs(SysBusDevice *sbdev, void *opaque)
}
}
}
-
- return 0;
}
/*
@@ -168,7 +166,7 @@ static void platform_bus_map_mmio(PlatformBusDevice *pbus, SysBusDevice *sbdev,
* For each sysbus device, look for unassigned IRQ lines as well as
* unassociated MMIO regions. Connect them to the platform bus if available.
*/
-static int link_sysbus_device(SysBusDevice *sbdev, void *opaque)
+static void link_sysbus_device(SysBusDevice *sbdev, void *opaque)
{
PlatformBusDevice *pbus = opaque;
int i;
@@ -180,8 +178,6 @@ static int link_sysbus_device(SysBusDevice *sbdev, void *opaque)
for (i = 0; sysbus_has_mmio(sbdev, i); i++) {
platform_bus_map_mmio(pbus, sbdev, i);
}
-
- return 0;
}
static void platform_bus_init_notify(Notifier *notifier, void *data)
diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c
index 30829ee97b..3af82afe78 100644
--- a/hw/core/ptimer.c
+++ b/hw/core/ptimer.c
@@ -11,6 +11,10 @@
#include "hw/ptimer.h"
#include "qemu/host-utils.h"
#include "sysemu/replay.h"
+#include "sysemu/qtest.h"
+
+#define DELTA_ADJUST 1
+#define DELTA_NO_ADJUST -1
struct ptimer_state
{
@@ -21,6 +25,7 @@ struct ptimer_state
int64_t period;
int64_t last_event;
int64_t next_event;
+ uint8_t policy_mask;
QEMUBH *bh;
QEMUTimer *timer;
};
@@ -33,17 +38,58 @@ static void ptimer_trigger(ptimer_state *s)
}
}
-static void ptimer_reload(ptimer_state *s)
+static void ptimer_reload(ptimer_state *s, int delta_adjust)
{
uint32_t period_frac = s->period_frac;
uint64_t period = s->period;
+ uint64_t delta = s->delta;
- if (s->delta == 0) {
+ if (delta == 0 && !(s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER)) {
ptimer_trigger(s);
- s->delta = s->limit;
}
- if (s->delta == 0 || s->period == 0) {
- fprintf(stderr, "Timer with period zero, disabling\n");
+
+ if (delta == 0 && !(s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_RELOAD)) {
+ delta = s->delta = s->limit;
+ }
+
+ if (s->period == 0) {
+ if (!qtest_enabled()) {
+ fprintf(stderr, "Timer with period zero, disabling\n");
+ }
+ timer_del(s->timer);
+ s->enabled = 0;
+ return;
+ }
+
+ if (s->policy_mask & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) {
+ if (delta_adjust != DELTA_NO_ADJUST) {
+ delta += delta_adjust;
+ }
+ }
+
+ if (delta == 0 && (s->policy_mask & PTIMER_POLICY_CONTINUOUS_TRIGGER)) {
+ if (s->enabled == 1 && s->limit == 0) {
+ delta = 1;
+ }
+ }
+
+ if (delta == 0 && (s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER)) {
+ if (delta_adjust != DELTA_NO_ADJUST) {
+ delta = 1;
+ }
+ }
+
+ if (delta == 0 && (s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_RELOAD)) {
+ if (s->enabled == 1 && s->limit != 0) {
+ delta = 1;
+ }
+ }
+
+ if (delta == 0) {
+ if (!qtest_enabled()) {
+ fprintf(stderr, "Timer with delta zero, disabling\n");
+ }
+ timer_del(s->timer);
s->enabled = 0;
return;
}
@@ -57,15 +103,15 @@ static void ptimer_reload(ptimer_state *s)
* on the current generation of host machines.
*/
- if (s->enabled == 1 && (s->delta * period < 10000) && !use_icount) {
- period = 10000 / s->delta;
+ if (s->enabled == 1 && (delta * period < 10000) && !use_icount) {
+ period = 10000 / delta;
period_frac = 0;
}
s->last_event = s->next_event;
- s->next_event = s->last_event + s->delta * period;
+ s->next_event = s->last_event + delta * period;
if (period_frac) {
- s->next_event += ((int64_t)period_frac * s->delta) >> 32;
+ s->next_event += ((int64_t)period_frac * delta) >> 32;
}
timer_mod(s->timer, s->next_event);
}
@@ -73,12 +119,35 @@ static void ptimer_reload(ptimer_state *s)
static void ptimer_tick(void *opaque)
{
ptimer_state *s = (ptimer_state *)opaque;
- ptimer_trigger(s);
- s->delta = 0;
+ bool trigger = true;
+
if (s->enabled == 2) {
+ s->delta = 0;
s->enabled = 0;
} else {
- ptimer_reload(s);
+ int delta_adjust = DELTA_ADJUST;
+
+ if (s->delta == 0 || s->limit == 0) {
+ /* If a "continuous trigger" policy is not used and limit == 0,
+ we should error out. delta == 0 means that this tick is
+ caused by a "no immediate reload" policy, so it shouldn't
+ be adjusted. */
+ delta_adjust = DELTA_NO_ADJUST;
+ }
+
+ if (!(s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER)) {
+ /* Avoid re-trigger on deferred reload if "no immediate trigger"
+ policy isn't used. */
+ trigger = (delta_adjust == DELTA_ADJUST);
+ }
+
+ s->delta = s->limit;
+
+ ptimer_reload(s, delta_adjust);
+ }
+
+ if (trigger) {
+ ptimer_trigger(s);
}
}
@@ -86,9 +155,10 @@ uint64_t ptimer_get_count(ptimer_state *s)
{
uint64_t counter;
- if (s->enabled) {
+ if (s->enabled && s->delta != 0) {
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t next = s->next_event;
+ int64_t last = s->last_event;
bool expired = (now - next >= 0);
bool oneshot = (s->enabled == 2);
@@ -113,7 +183,7 @@ uint64_t ptimer_get_count(ptimer_state *s)
/* We need to divide time by period, where time is stored in
rem (64-bit integer) and period is stored in period/period_frac
(64.32 fixed point).
-
+
Doing full precision division is hard, so scale values and
do a 64-bit division. The result should be rounded down,
so that the rounding error never causes the timer to go
@@ -140,6 +210,35 @@ uint64_t ptimer_get_count(ptimer_state *s)
div += 1;
}
counter = rem / div;
+
+ if (s->policy_mask & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) {
+ /* Before wrapping around, timer should stay with counter = 0
+ for a one period. */
+ if (!oneshot && s->delta == s->limit) {
+ if (now == last) {
+ /* Counter == delta here, check whether it was
+ adjusted and if it was, then right now it is
+ that "one period". */
+ if (counter == s->limit + DELTA_ADJUST) {
+ return 0;
+ }
+ } else if (counter == s->limit) {
+ /* Since the counter is rounded down and now != last,
+ the counter == limit means that delta was adjusted
+ by +1 and right now it is that adjusted period. */
+ return 0;
+ }
+ }
+ }
+ }
+
+ if (s->policy_mask & PTIMER_POLICY_NO_COUNTER_ROUND_DOWN) {
+ /* If now == last then delta == limit, i.e. the counter already
+ represents the correct value. It would be rounded down a 1ns
+ later. */
+ if (now != last) {
+ counter += 1;
+ }
}
} else {
counter = s->delta;
@@ -152,7 +251,7 @@ void ptimer_set_count(ptimer_state *s, uint64_t count)
s->delta = count;
if (s->enabled) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- ptimer_reload(s);
+ ptimer_reload(s, 0);
}
}
@@ -161,13 +260,15 @@ void ptimer_run(ptimer_state *s, int oneshot)
bool was_disabled = !s->enabled;
if (was_disabled && s->period == 0) {
- fprintf(stderr, "Timer with period zero, disabling\n");
+ if (!qtest_enabled()) {
+ fprintf(stderr, "Timer with period zero, disabling\n");
+ }
return;
}
s->enabled = oneshot ? 2 : 1;
if (was_disabled) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- ptimer_reload(s);
+ ptimer_reload(s, 0);
}
}
@@ -191,7 +292,7 @@ void ptimer_set_period(ptimer_state *s, int64_t period)
s->period_frac = 0;
if (s->enabled) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- ptimer_reload(s);
+ ptimer_reload(s, 0);
}
}
@@ -203,7 +304,7 @@ void ptimer_set_freq(ptimer_state *s, uint32_t freq)
s->period_frac = (1000000000ll << 32) / freq;
if (s->enabled) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- ptimer_reload(s);
+ ptimer_reload(s, 0);
}
}
@@ -216,7 +317,7 @@ void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload)
s->delta = limit;
if (s->enabled && reload) {
s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- ptimer_reload(s);
+ ptimer_reload(s, 0);
}
}
@@ -242,12 +343,13 @@ const VMStateDescription vmstate_ptimer = {
}
};
-ptimer_state *ptimer_init(QEMUBH *bh)
+ptimer_state *ptimer_init(QEMUBH *bh, uint8_t policy_mask)
{
ptimer_state *s;
s = (ptimer_state *)g_malloc0(sizeof(ptimer_state));
s->bh = bh;
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ptimer_tick, s);
+ s->policy_mask = policy_mask;
return s;
}
diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index e55afe6bf2..1b7ea50e9f 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -160,55 +160,63 @@ PropertyInfo qdev_prop_drive = {
/* --- character device --- */
-static void parse_chr(DeviceState *dev, const char *str, void **ptr,
- const char *propname, Error **errp)
+static void get_chr(Object *obj, Visitor *v, const char *name, void *opaque,
+ Error **errp)
{
- CharDriverState *chr = qemu_chr_find(str);
- if (chr == NULL) {
- error_setg(errp, "Property '%s.%s' can't find value '%s'",
- object_get_typename(OBJECT(dev)), propname, str);
- return;
- }
- if (qemu_chr_fe_claim(chr) != 0) {
- error_setg(errp, "Property '%s.%s' can't take value '%s', it's in use",
- object_get_typename(OBJECT(dev)), propname, str);
- return;
- }
- *ptr = chr;
+ DeviceState *dev = DEVICE(obj);
+ CharBackend *be = qdev_get_prop_ptr(dev, opaque);
+ char *p;
+
+ p = g_strdup(be->chr && be->chr->label ? be->chr->label : "");
+ visit_type_str(v, name, &p, errp);
+ g_free(p);
}
-static void release_chr(Object *obj, const char *name, void *opaque)
+static void set_chr(Object *obj, Visitor *v, const char *name, void *opaque,
+ Error **errp)
{
DeviceState *dev = DEVICE(obj);
+ Error *local_err = NULL;
Property *prop = opaque;
- CharDriverState **ptr = qdev_get_prop_ptr(dev, prop);
- CharDriverState *chr = *ptr;
+ CharBackend *be = qdev_get_prop_ptr(dev, prop);
+ CharDriverState *s;
+ char *str;
- if (chr) {
- qemu_chr_add_handlers(chr, NULL, NULL, NULL, NULL);
- qemu_chr_fe_release(chr);
+ if (dev->realized) {
+ qdev_prop_set_after_realize(dev, name, errp);
+ return;
}
-}
+ visit_type_str(v, name, &str, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
-static char *print_chr(void *ptr)
-{
- CharDriverState *chr = ptr;
- const char *val = chr->label ? chr->label : "";
+ if (!*str) {
+ g_free(str);
+ be->chr = NULL;
+ return;
+ }
- return g_strdup(val);
+ s = qemu_chr_find(str);
+ if (s == NULL) {
+ error_setg(errp, "Property '%s.%s' can't find value '%s'",
+ object_get_typename(obj), prop->name, str);
+ } else if (!qemu_chr_fe_init(be, s, errp)) {
+ error_prepend(errp, "Property '%s.%s' can't take value '%s': ",
+ object_get_typename(obj), prop->name, str);
+ }
+ g_free(str);
}
-static void get_chr(Object *obj, Visitor *v, const char *name, void *opaque,
- Error **errp)
+static void release_chr(Object *obj, const char *name, void *opaque)
{
- get_pointer(obj, v, opaque, print_chr, name, errp);
-}
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ CharBackend *be = qdev_get_prop_ptr(dev, prop);
-static void set_chr(Object *obj, Visitor *v, const char *name, void *opaque,
- Error **errp)
-{
- set_pointer(obj, v, opaque, parse_chr, name, errp);
+ qemu_chr_fe_deinit(be);
}
PropertyInfo qdev_prop_chr = {
diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c
index 311af6da76..2a82768067 100644
--- a/hw/core/qdev-properties.c
+++ b/hw/core/qdev-properties.c
@@ -705,13 +705,19 @@ static void get_pci_host_devaddr(Object *obj, Visitor *v, const char *name,
DeviceState *dev = DEVICE(obj);
Property *prop = opaque;
PCIHostDeviceAddress *addr = qdev_get_prop_ptr(dev, prop);
- char buffer[] = "xxxx:xx:xx.x";
+ char buffer[] = "ffff:ff:ff.f";
char *p = buffer;
int rc = 0;
- rc = snprintf(buffer, sizeof(buffer), "%04x:%02x:%02x.%d",
- addr->domain, addr->bus, addr->slot, addr->function);
- assert(rc == sizeof(buffer) - 1);
+ /*
+ * Catch "invalid" device reference from vfio-pci and allow the
+ * default buffer representing the non-existant device to be used.
+ */
+ if (~addr->domain || ~addr->bus || ~addr->slot || ~addr->function) {
+ rc = snprintf(buffer, sizeof(buffer), "%04x:%02x:%02x.%0d",
+ addr->domain, addr->bus, addr->slot, addr->function);
+ assert(rc == sizeof(buffer) - 1);
+ }
visit_type_str(v, name, &p, errp);
}
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index 3d712d592f..bdb092ee9d 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -272,6 +272,9 @@ static void cirrus_update_memory_access(CirrusVGAState *s);
static bool blit_region_is_unsafe(struct CirrusVGAState *s,
int32_t pitch, int32_t addr)
{
+ if (!pitch) {
+ return true;
+ }
if (pitch < 0) {
int64_t min = addr
+ ((int64_t)s->cirrus_blt_height-1) * pitch;
@@ -715,7 +718,7 @@ static int cirrus_bitblt_videotovideo_patterncopy(CirrusVGAState * s)
s->cirrus_addr_mask));
}
-static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
+static int cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
{
int sx = 0, sy = 0;
int dx = 0, dy = 0;
@@ -729,6 +732,9 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
int width, height;
depth = s->vga.get_bpp(&s->vga) / 8;
+ if (!depth) {
+ return 0;
+ }
s->vga.get_resolution(&s->vga, &width, &height);
/* extra x, y */
@@ -783,6 +789,8 @@ static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
cirrus_invalidate_region(s, s->cirrus_blt_dstaddr,
s->cirrus_blt_dstpitch, s->cirrus_blt_width,
s->cirrus_blt_height);
+
+ return 1;
}
static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s)
@@ -790,11 +798,9 @@ static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s)
if (blit_is_unsafe(s))
return 0;
- cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->vga.start_addr,
+ return cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->vga.start_addr,
s->cirrus_blt_srcaddr - s->vga.start_addr,
s->cirrus_blt_width, s->cirrus_blt_height);
-
- return 1;
}
/***************************************
diff --git a/hw/display/milkymist-tmu2.c b/hw/display/milkymist-tmu2.c
index 9c0018448a..5c666f9b24 100644
--- a/hw/display/milkymist-tmu2.c
+++ b/hw/display/milkymist-tmu2.c
@@ -213,7 +213,7 @@ static void tmu2_start(MilkymistTMU2State *s)
/* Read the QEMU source framebuffer into an OpenGL texture */
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
- fb_len = 2*s->regs[R_TEXHRES]*s->regs[R_TEXVRES];
+ fb_len = 2ULL * s->regs[R_TEXHRES] * s->regs[R_TEXVRES];
fb = cpu_physical_memory_map(s->regs[R_TEXFBUF], &fb_len, 0);
if (fb == NULL) {
glDeleteTextures(1, &texture);
diff --git a/hw/display/pl110.c b/hw/display/pl110.c
index c069c0b7fd..8c7dcc6f0a 100644
--- a/hw/display/pl110.c
+++ b/hw/display/pl110.c
@@ -466,17 +466,16 @@ static const GraphicHwOps pl110_gfx_ops = {
.gfx_update = pl110_update_display,
};
-static int pl110_initfn(SysBusDevice *sbd)
+static void pl110_realize(DeviceState *dev, Error **errp)
{
- DeviceState *dev = DEVICE(sbd);
PL110State *s = PL110(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
memory_region_init_io(&s->iomem, OBJECT(s), &pl110_ops, s, "pl110", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->irq);
qdev_init_gpio_in(dev, pl110_mux_ctrl_set, 1);
s->con = graphic_console_init(dev, 0, &pl110_gfx_ops, s);
- return 0;
}
static void pl110_init(Object *obj)
@@ -503,11 +502,10 @@ static void pl111_init(Object *obj)
static void pl110_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = pl110_initfn;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
dc->vmsd = &vmstate_pl110;
+ dc->realize = pl110_realize;
}
static const TypeInfo pl110_info = {
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
index 0e2682d28b..62d0c80dcf 100644
--- a/hw/display/qxl.c
+++ b/hw/display/qxl.c
@@ -992,6 +992,34 @@ static uint32_t qxl_crc32(const uint8_t *p, unsigned len)
return crc32(0xffffffff, p, len) ^ 0xffffffff;
}
+static bool qxl_rom_monitors_config_changed(QXLRom *rom,
+ VDAgentMonitorsConfig *monitors_config,
+ unsigned int max_outputs)
+{
+ int i;
+ unsigned int monitors_count;
+
+ monitors_count = MIN(monitors_config->num_of_monitors, max_outputs);
+
+ if (rom->client_monitors_config.count != monitors_count) {
+ return true;
+ }
+
+ for (i = 0 ; i < rom->client_monitors_config.count ; ++i) {
+ VDAgentMonConfig *monitor = &monitors_config->monitors[i];
+ QXLURect *rect = &rom->client_monitors_config.heads[i];
+ /* monitor->depth ignored */
+ if ((rect->left != monitor->x) ||
+ (rect->top != monitor->y) ||
+ (rect->right != monitor->x + monitor->width) ||
+ (rect->bottom != monitor->y + monitor->height)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
/* called from main context only */
static int interface_client_monitors_config(QXLInstance *sin,
VDAgentMonitorsConfig *monitors_config)
@@ -1000,6 +1028,7 @@ static int interface_client_monitors_config(QXLInstance *sin,
QXLRom *rom = memory_region_get_ram_ptr(&qxl->rom_bar);
int i;
unsigned max_outputs = ARRAY_SIZE(rom->client_monitors_config.heads);
+ bool config_changed = false;
if (qxl->revision < 4) {
trace_qxl_client_monitors_config_unsupported_by_device(qxl->id,
@@ -1030,6 +1059,10 @@ static int interface_client_monitors_config(QXLInstance *sin,
}
#endif
+ config_changed = qxl_rom_monitors_config_changed(rom,
+ monitors_config,
+ max_outputs);
+
memset(&rom->client_monitors_config, 0,
sizeof(rom->client_monitors_config));
rom->client_monitors_config.count = monitors_config->num_of_monitors;
@@ -1059,7 +1092,9 @@ static int interface_client_monitors_config(QXLInstance *sin,
trace_qxl_interrupt_client_monitors_config(qxl->id,
rom->client_monitors_config.count,
rom->client_monitors_config.heads);
- qxl_send_events(qxl, QXL_INTERRUPT_CLIENT_MONITORS_CONFIG);
+ if (config_changed) {
+ qxl_send_events(qxl, QXL_INTERRUPT_CLIENT_MONITORS_CONFIG);
+ }
return 1;
}
diff --git a/hw/display/ssd0323.c b/hw/display/ssd0323.c
index 6d1faf44af..e182893157 100644
--- a/hw/display/ssd0323.c
+++ b/hw/display/ssd0323.c
@@ -48,18 +48,18 @@ typedef struct {
SSISlave ssidev;
QemuConsole *con;
- int cmd_len;
- int cmd;
- int cmd_data[8];
- int row;
- int row_start;
- int row_end;
- int col;
- int col_start;
- int col_end;
- int redraw;
- int remap;
- enum ssd0323_mode mode;
+ uint32_t cmd_len;
+ int32_t cmd;
+ int32_t cmd_data[8];
+ int32_t row;
+ int32_t row_start;
+ int32_t row_end;
+ int32_t col;
+ int32_t col_start;
+ int32_t col_end;
+ int32_t redraw;
+ int32_t remap;
+ uint32_t mode;
uint8_t framebuffer[128 * 80 / 2];
} ssd0323_state;
@@ -279,83 +279,62 @@ static void ssd0323_cd(void *opaque, int n, int level)
s->mode = level ? SSD0323_DATA : SSD0323_CMD;
}
-static void ssd0323_save(QEMUFile *f, void *opaque)
+static int ssd0323_post_load(void *opaque, int version_id)
{
- SSISlave *ss = SSI_SLAVE(opaque);
ssd0323_state *s = (ssd0323_state *)opaque;
- int i;
-
- qemu_put_be32(f, s->cmd_len);
- qemu_put_be32(f, s->cmd);
- for (i = 0; i < 8; i++)
- qemu_put_be32(f, s->cmd_data[i]);
- qemu_put_be32(f, s->row);
- qemu_put_be32(f, s->row_start);
- qemu_put_be32(f, s->row_end);
- qemu_put_be32(f, s->col);
- qemu_put_be32(f, s->col_start);
- qemu_put_be32(f, s->col_end);
- qemu_put_be32(f, s->redraw);
- qemu_put_be32(f, s->remap);
- qemu_put_be32(f, s->mode);
- qemu_put_buffer(f, s->framebuffer, sizeof(s->framebuffer));
-
- qemu_put_be32(f, ss->cs);
-}
-
-static int ssd0323_load(QEMUFile *f, void *opaque, int version_id)
-{
- SSISlave *ss = SSI_SLAVE(opaque);
- ssd0323_state *s = (ssd0323_state *)opaque;
- int i;
- if (version_id != 1)
- return -EINVAL;
-
- s->cmd_len = qemu_get_be32(f);
- if (s->cmd_len < 0 || s->cmd_len > ARRAY_SIZE(s->cmd_data)) {
+ if (s->cmd_len > ARRAY_SIZE(s->cmd_data)) {
return -EINVAL;
}
- s->cmd = qemu_get_be32(f);
- for (i = 0; i < 8; i++)
- s->cmd_data[i] = qemu_get_be32(f);
- s->row = qemu_get_be32(f);
if (s->row < 0 || s->row >= 80) {
return -EINVAL;
}
- s->row_start = qemu_get_be32(f);
if (s->row_start < 0 || s->row_start >= 80) {
return -EINVAL;
}
- s->row_end = qemu_get_be32(f);
if (s->row_end < 0 || s->row_end >= 80) {
return -EINVAL;
}
- s->col = qemu_get_be32(f);
if (s->col < 0 || s->col >= 64) {
return -EINVAL;
}
- s->col_start = qemu_get_be32(f);
if (s->col_start < 0 || s->col_start >= 64) {
return -EINVAL;
}
- s->col_end = qemu_get_be32(f);
if (s->col_end < 0 || s->col_end >= 64) {
return -EINVAL;
}
- s->redraw = qemu_get_be32(f);
- s->remap = qemu_get_be32(f);
- s->mode = qemu_get_be32(f);
if (s->mode != SSD0323_CMD && s->mode != SSD0323_DATA) {
return -EINVAL;
}
- qemu_get_buffer(f, s->framebuffer, sizeof(s->framebuffer));
-
- ss->cs = qemu_get_be32(f);
return 0;
}
+static const VMStateDescription vmstate_ssd0323 = {
+ .name = "ssd0323_oled",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .post_load = ssd0323_post_load,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32(cmd_len, ssd0323_state),
+ VMSTATE_INT32(cmd, ssd0323_state),
+ VMSTATE_INT32_ARRAY(cmd_data, ssd0323_state, 8),
+ VMSTATE_INT32(row, ssd0323_state),
+ VMSTATE_INT32(row_start, ssd0323_state),
+ VMSTATE_INT32(row_end, ssd0323_state),
+ VMSTATE_INT32(col, ssd0323_state),
+ VMSTATE_INT32(col_start, ssd0323_state),
+ VMSTATE_INT32(col_end, ssd0323_state),
+ VMSTATE_INT32(redraw, ssd0323_state),
+ VMSTATE_INT32(remap, ssd0323_state),
+ VMSTATE_UINT32(mode, ssd0323_state),
+ VMSTATE_BUFFER(framebuffer, ssd0323_state),
+ VMSTATE_SSI_SLAVE(ssidev, ssd0323_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const GraphicHwOps ssd0323_ops = {
.invalidate = ssd0323_invalidate_display,
.gfx_update = ssd0323_update_display,
@@ -372,18 +351,17 @@ static void ssd0323_realize(SSISlave *d, Error **errp)
qemu_console_resize(s->con, 128 * MAGNIFY, 64 * MAGNIFY);
qdev_init_gpio_in(dev, ssd0323_cd, 1);
-
- register_savevm(dev, "ssd0323_oled", -1, 1,
- ssd0323_save, ssd0323_load, s);
}
static void ssd0323_class_init(ObjectClass *klass, void *data)
{
+ DeviceClass *dc = DEVICE_CLASS(klass);
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
k->realize = ssd0323_realize;
k->transfer = ssd0323_transfer;
k->cs_polarity = SSI_CS_HIGH;
+ dc->vmsd = &vmstate_ssd0323;
}
static const TypeInfo ssd0323_info = {
diff --git a/hw/display/vga-isa.c b/hw/display/vga-isa.c
index f5aff1cbe0..1af95562f2 100644
--- a/hw/display/vga-isa.c
+++ b/hw/display/vga-isa.c
@@ -39,6 +39,8 @@ typedef struct ISAVGAState {
ISADevice parent_obj;
struct VGACommonState state;
+ PortioList portio_vga;
+ PortioList portio_vbe;
} ISAVGAState;
static void vga_isa_reset(DeviceState *dev)
@@ -60,9 +62,11 @@ static void vga_isa_realizefn(DeviceState *dev, Error **errp)
vga_common_init(s, OBJECT(dev), true);
s->legacy_address_space = isa_address_space(isadev);
vga_io_memory = vga_init_io(s, OBJECT(dev), &vga_ports, &vbe_ports);
- isa_register_portio_list(isadev, 0x3b0, vga_ports, s, "vga");
+ isa_register_portio_list(isadev, &d->portio_vga,
+ 0x3b0, vga_ports, s, "vga");
if (vbe_ports) {
- isa_register_portio_list(isadev, 0x1ce, vbe_ports, s, "vbe");
+ isa_register_portio_list(isadev, &d->portio_vbe,
+ 0x1ce, vbe_ports, s, "vbe");
}
memory_region_add_subregion_overlap(isa_address_space(isadev),
0x000a0000,
diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c
index 758d33a09d..23f39de94d 100644
--- a/hw/display/virtio-gpu-3d.c
+++ b/hw/display/virtio-gpu-3d.c
@@ -347,6 +347,7 @@ static void virgl_cmd_get_capset_info(VirtIOGPU *g,
VIRTIO_GPU_FILL_CMD(info);
+ memset(&resp, 0, sizeof(resp));
if (info.capset_index == 0) {
resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
virgl_renderer_get_cap_set(resp.capset_id,
diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c
index 34a724c754..ef92c4ad6f 100644
--- a/hw/display/virtio-gpu-pci.c
+++ b/hw/display/virtio-gpu-pci.c
@@ -48,6 +48,7 @@ static void virtio_gpu_pci_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
dc->props = virtio_gpu_pci_properties;
+ dc->hotpluggable = false;
k->realize = virtio_gpu_pci_realize;
pcidev_k->class_id = PCI_CLASS_DISPLAY_OTHER;
}
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 7fe6ed8bf0..5f32e1aae9 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -84,6 +84,7 @@ static void update_cursor_data_virgl(VirtIOGPU *g,
if (width != s->current_cursor->width ||
height != s->current_cursor->height) {
+ free(data);
return;
}
@@ -333,6 +334,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
qemu_log_mask(LOG_GUEST_ERROR,
"%s: host couldn't handle guest format %d\n",
__func__, c2d.format);
+ g_free(res);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
@@ -990,12 +992,9 @@ static const VMStateDescription vmstate_virtio_gpu_scanouts = {
static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size)
{
VirtIOGPU *g = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(g);
struct virtio_gpu_simple_resource *res;
int i;
- virtio_save(vdev, f);
-
/* in 2d mode we should never find unprocessed commands here */
assert(QTAILQ_EMPTY(&g->cmdq));
@@ -1020,16 +1019,10 @@ static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size)
static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size)
{
VirtIOGPU *g = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(g);
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_scanout *scanout;
uint32_t resource_id, pformat;
- int i, ret;
-
- ret = virtio_load(vdev, f, VIRTIO_GPU_VM_VERSION);
- if (ret) {
- return ret;
- }
+ int i;
resource_id = qemu_get_be32(f);
while (resource_id != 0) {
@@ -1219,8 +1212,32 @@ static void virtio_gpu_reset(VirtIODevice *vdev)
#endif
}
-VMSTATE_VIRTIO_DEVICE(gpu, VIRTIO_GPU_VM_VERSION, virtio_gpu_load,
- virtio_gpu_save);
+/*
+ * For historical reasons virtio_gpu does not adhere to virtio migration
+ * scheme as described in doc/virtio-migration.txt, in a sense that no
+ * save/load callback are provided to the core. Instead the device data
+ * is saved/loaded after the core data.
+ *
+ * Because of this we need a special vmsd.
+ */
+static const VMStateDescription vmstate_virtio_gpu = {
+ .name = "virtio-gpu",
+ .minimum_version_id = VIRTIO_GPU_VM_VERSION,
+ .version_id = VIRTIO_GPU_VM_VERSION,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE /* core */,
+ {
+ .name = "virtio-gpu",
+ .info = &(const VMStateInfo) {
+ .name = "virtio-gpu",
+ .get = virtio_gpu_load,
+ .put = virtio_gpu_save,
+ },
+ .flags = VMS_SINGLE,
+ } /* device */,
+ VMSTATE_END_OF_LIST()
+ },
+};
static Property virtio_gpu_properties[] = {
DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c
index 5b510a17fd..f9b017d86b 100644
--- a/hw/display/virtio-vga.c
+++ b/hw/display/virtio-vga.c
@@ -120,8 +120,19 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
* virtio regions are moved to the end of bar #2, to make room for
* the stdvga mmio registers at the start of bar #2.
*/
- vpci_dev->modern_mem_bar = 2;
- vpci_dev->msix_bar = 4;
+ vpci_dev->modern_mem_bar_idx = 2;
+ vpci_dev->msix_bar_idx = 4;
+
+ if (!(vpci_dev->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ)) {
+ /*
+ * with page-per-vq=off there is no padding space we can use
+ * for the stdvga registers. Make the common and isr regions
+ * smaller then.
+ */
+ vpci_dev->common.size /= 2;
+ vpci_dev->isr.size /= 2;
+ }
+
offset = memory_region_size(&vpci_dev->modern_bar);
offset -= vpci_dev->notify.size;
vpci_dev->notify.offset = offset;
diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
index e51a05ea7e..6599cf078d 100644
--- a/hw/display/vmware_vga.c
+++ b/hw/display/vmware_vga.c
@@ -676,11 +676,13 @@ static void vmsvga_fifo_run(struct vmsvga_state_s *s)
cursor.bpp = vmsvga_fifo_read(s);
args = SVGA_BITMAP_SIZE(x, y) + SVGA_PIXMAP_SIZE(x, y, cursor.bpp);
- if (cursor.width > 256 ||
- cursor.height > 256 ||
- cursor.bpp > 32 ||
- SVGA_BITMAP_SIZE(x, y) > sizeof cursor.mask ||
- SVGA_PIXMAP_SIZE(x, y, cursor.bpp) > sizeof cursor.image) {
+ if (cursor.width > 256
+ || cursor.height > 256
+ || cursor.bpp > 32
+ || SVGA_BITMAP_SIZE(x, y)
+ > sizeof(cursor.mask) / sizeof(cursor.mask[0])
+ || SVGA_PIXMAP_SIZE(x, y, cursor.bpp)
+ > sizeof(cursor.image) / sizeof(cursor.image[0])) {
goto badcmd;
}
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 46b7d5eded..7a8727aa21 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -90,28 +90,29 @@ static int common_bind(struct common *c)
xen_pfn_t mfn;
if (xenstore_read_fe_uint64(&c->xendev, "page-ref", &val) == -1)
- return -1;
+ return -1;
mfn = (xen_pfn_t)val;
assert(val == mfn);
if (xenstore_read_fe_int(&c->xendev, "event-channel", &c->xendev.remote_port) == -1)
- return -1;
+ return -1;
c->page = xenforeignmemory_map(xen_fmem, c->xendev.dom,
PROT_READ | PROT_WRITE, 1, &mfn, NULL);
if (c->page == NULL)
- return -1;
+ return -1;
xen_be_bind_evtchn(&c->xendev);
- xen_be_printf(&c->xendev, 1, "ring mfn %"PRI_xen_pfn", remote-port %d, local-port %d\n",
- mfn, c->xendev.remote_port, c->xendev.local_port);
+ xen_pv_printf(&c->xendev, 1,
+ "ring mfn %"PRI_xen_pfn", remote-port %d, local-port %d\n",
+ mfn, c->xendev.remote_port, c->xendev.local_port);
return 0;
}
static void common_unbind(struct common *c)
{
- xen_be_unbind_evtchn(&c->xendev);
+ xen_pv_unbind_evtchn(&c->xendev);
if (c->page) {
xenforeignmemory_unmap(xen_fmem, c->page, 1);
c->page = NULL;
@@ -214,7 +215,7 @@ static int xenfb_kbd_event(struct XenInput *xenfb,
XENKBD_IN_RING_REF(page, prod) = *event;
xen_wmb(); /* ensure ring contents visible */
page->in_prod = prod + 1;
- return xen_be_send_notify(&xenfb->c.xendev);
+ return xen_pv_send_notify(&xenfb->c.xendev);
}
/* Send a keyboard (or mouse button) event */
@@ -345,7 +346,7 @@ static int input_initialise(struct XenDevice *xendev)
int rc;
if (!in->c.con) {
- xen_be_printf(xendev, 1, "ds not set (yet)\n");
+ xen_pv_printf(xendev, 1, "ds not set (yet)\n");
return -1;
}
@@ -396,7 +397,7 @@ static void input_event(struct XenDevice *xendev)
if (page->out_prod == page->out_cons)
return;
page->out_cons = page->out_prod;
- xen_be_send_notify(&xenfb->c.xendev);
+ xen_pv_send_notify(&xenfb->c.xendev);
}
/* -------------------------------------------------------------------- */
@@ -500,8 +501,8 @@ out:
}
static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim,
- int width, int height, int depth,
- size_t fb_len, int offset, int row_stride)
+ int width, int height, int depth,
+ size_t fb_len, int offset, int row_stride)
{
size_t mfn_sz = sizeof(*((struct xenfb_page *)0)->pd);
size_t pd_len = sizeof(((struct xenfb_page *)0)->pd) / mfn_sz;
@@ -510,40 +511,47 @@ static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim,
int max_width, max_height;
if (fb_len_lim > fb_len_max) {
- xen_be_printf(&xenfb->c.xendev, 0, "fb size limit %zu exceeds %zu, corrected\n",
- fb_len_lim, fb_len_max);
- fb_len_lim = fb_len_max;
+ xen_pv_printf(&xenfb->c.xendev, 0,
+ "fb size limit %zu exceeds %zu, corrected\n",
+ fb_len_lim, fb_len_max);
+ fb_len_lim = fb_len_max;
}
if (fb_len_lim && fb_len > fb_len_lim) {
- xen_be_printf(&xenfb->c.xendev, 0, "frontend fb size %zu limited to %zu\n",
- fb_len, fb_len_lim);
- fb_len = fb_len_lim;
+ xen_pv_printf(&xenfb->c.xendev, 0,
+ "frontend fb size %zu limited to %zu\n",
+ fb_len, fb_len_lim);
+ fb_len = fb_len_lim;
}
if (depth != 8 && depth != 16 && depth != 24 && depth != 32) {
- xen_be_printf(&xenfb->c.xendev, 0, "can't handle frontend fb depth %d\n",
- depth);
- return -1;
+ xen_pv_printf(&xenfb->c.xendev, 0,
+ "can't handle frontend fb depth %d\n",
+ depth);
+ return -1;
}
if (row_stride <= 0 || row_stride > fb_len) {
- xen_be_printf(&xenfb->c.xendev, 0, "invalid frontend stride %d\n", row_stride);
- return -1;
+ xen_pv_printf(&xenfb->c.xendev, 0, "invalid frontend stride %d\n",
+ row_stride);
+ return -1;
}
max_width = row_stride / (depth / 8);
if (width < 0 || width > max_width) {
- xen_be_printf(&xenfb->c.xendev, 0, "invalid frontend width %d limited to %d\n",
- width, max_width);
- width = max_width;
+ xen_pv_printf(&xenfb->c.xendev, 0,
+ "invalid frontend width %d limited to %d\n",
+ width, max_width);
+ width = max_width;
}
if (offset < 0 || offset >= fb_len) {
- xen_be_printf(&xenfb->c.xendev, 0, "invalid frontend offset %d (max %zu)\n",
- offset, fb_len - 1);
- return -1;
+ xen_pv_printf(&xenfb->c.xendev, 0,
+ "invalid frontend offset %d (max %zu)\n",
+ offset, fb_len - 1);
+ return -1;
}
max_height = (fb_len - offset) / row_stride;
if (height < 0 || height > max_height) {
- xen_be_printf(&xenfb->c.xendev, 0, "invalid frontend height %d limited to %d\n",
- height, max_height);
- height = max_height;
+ xen_pv_printf(&xenfb->c.xendev, 0,
+ "invalid frontend height %d limited to %d\n",
+ height, max_height);
+ height = max_height;
}
xenfb->fb_len = fb_len;
xenfb->row_stride = row_stride;
@@ -553,8 +561,9 @@ static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim,
xenfb->offset = offset;
xenfb->up_fullscreen = 1;
xenfb->do_resize = 1;
- xen_be_printf(&xenfb->c.xendev, 1, "framebuffer %dx%dx%d offset %d stride %d\n",
- width, height, depth, offset, row_stride);
+ xen_pv_printf(&xenfb->c.xendev, 1,
+ "framebuffer %dx%dx%d offset %d stride %d\n",
+ width, height, depth, offset, row_stride);
return 0;
}
@@ -631,7 +640,7 @@ static void xenfb_guest_copy(struct XenFB *xenfb, int x, int y, int w, int h)
}
}
if (oops) /* should not happen */
- xen_be_printf(&xenfb->c.xendev, 0, "%s: oops: convert %d -> %d bpp?\n",
+ xen_pv_printf(&xenfb->c.xendev, 0, "%s: oops: convert %d -> %d bpp?\n",
__FUNCTION__, xenfb->depth, bpp);
dpy_gfx_update(xenfb->c.con, x, y, w, h);
@@ -663,7 +672,7 @@ static void xenfb_send_event(struct XenFB *xenfb, union xenfb_in_event *event)
xen_wmb(); /* ensure ring contents visible */
page->in_prod = prod + 1;
- xen_be_send_notify(&xenfb->c.xendev);
+ xen_pv_send_notify(&xenfb->c.xendev);
}
static void xenfb_send_refresh_period(struct XenFB *xenfb, int period)
@@ -696,9 +705,9 @@ static void xenfb_update(void *opaque)
return;
if (!xenfb->feature_update) {
- /* we don't get update notifications, thus use the
- * sledge hammer approach ... */
- xenfb->up_fullscreen = 1;
+ /* we don't get update notifications, thus use the
+ * sledge hammer approach ... */
+ xenfb->up_fullscreen = 1;
}
/* resize if needed */
@@ -721,7 +730,8 @@ static void xenfb_update(void *opaque)
break;
}
dpy_gfx_replace_surface(xenfb->c.con, surface);
- xen_be_printf(&xenfb->c.xendev, 1, "update: resizing: %dx%d @ %d bpp%s\n",
+ xen_pv_printf(&xenfb->c.xendev, 1,
+ "update: resizing: %dx%d @ %d bpp%s\n",
xenfb->width, xenfb->height, xenfb->depth,
is_buffer_shared(surface) ? " (shared)" : "");
xenfb->up_fullscreen = 1;
@@ -729,18 +739,19 @@ static void xenfb_update(void *opaque)
/* run queued updates */
if (xenfb->up_fullscreen) {
- xen_be_printf(&xenfb->c.xendev, 3, "update: fullscreen\n");
- xenfb_guest_copy(xenfb, 0, 0, xenfb->width, xenfb->height);
+ xen_pv_printf(&xenfb->c.xendev, 3, "update: fullscreen\n");
+ xenfb_guest_copy(xenfb, 0, 0, xenfb->width, xenfb->height);
} else if (xenfb->up_count) {
- xen_be_printf(&xenfb->c.xendev, 3, "update: %d rects\n", xenfb->up_count);
- for (i = 0; i < xenfb->up_count; i++)
- xenfb_guest_copy(xenfb,
- xenfb->up_rects[i].x,
- xenfb->up_rects[i].y,
- xenfb->up_rects[i].w,
- xenfb->up_rects[i].h);
+ xen_pv_printf(&xenfb->c.xendev, 3, "update: %d rects\n",
+ xenfb->up_count);
+ for (i = 0; i < xenfb->up_count; i++)
+ xenfb_guest_copy(xenfb,
+ xenfb->up_rects[i].x,
+ xenfb->up_rects[i].y,
+ xenfb->up_rects[i].w,
+ xenfb->up_rects[i].h);
} else {
- xen_be_printf(&xenfb->c.xendev, 3, "update: nothing\n");
+ xen_pv_printf(&xenfb->c.xendev, 3, "update: nothing\n");
}
xenfb->up_count = 0;
xenfb->up_fullscreen = 0;
@@ -794,14 +805,14 @@ static void xenfb_handle_events(struct XenFB *xenfb)
w = MIN(event->update.width, xenfb->width - x);
h = MIN(event->update.height, xenfb->height - y);
if (w < 0 || h < 0) {
- xen_be_printf(&xenfb->c.xendev, 1, "bogus update ignored\n");
+ xen_pv_printf(&xenfb->c.xendev, 1, "bogus update ignored\n");
break;
}
if (x != event->update.x ||
y != event->update.y ||
w != event->update.width ||
h != event->update.height) {
- xen_be_printf(&xenfb->c.xendev, 1, "bogus update clipped\n");
+ xen_pv_printf(&xenfb->c.xendev, 1, "bogus update clipped\n");
}
if (w == xenfb->width && h > xenfb->height / 2) {
/* scroll detector: updated more than 50% of the lines,
@@ -883,7 +894,7 @@ static int fb_initialise(struct XenDevice *xendev)
if (fb->feature_update)
xenstore_write_be_int(xendev, "request-update", 1);
- xen_be_printf(xendev, 1, "feature-update=%d, videoram=%d\n",
+ xen_pv_printf(xendev, 1, "feature-update=%d, videoram=%d\n",
fb->feature_update, videoram);
return 0;
}
@@ -902,7 +913,7 @@ static void fb_disconnect(struct XenDevice *xendev)
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON,
-1, 0);
if (fb->pixels == MAP_FAILED) {
- xen_be_printf(xendev, 0,
+ xen_pv_printf(xendev, 0,
"Couldn't replace the framebuffer with anonymous memory errno=%d\n",
errno);
}
@@ -923,7 +934,7 @@ static void fb_frontend_changed(struct XenDevice *xendev, const char *node)
if (fb->bug_trigger == 0 && strcmp(node, "state") == 0 &&
xendev->fe_state == XenbusStateConnected &&
xendev->be_state == XenbusStateConnected) {
- xen_be_printf(xendev, 2, "re-trigger connected (frontend bug)\n");
+ xen_pv_printf(xendev, 2, "re-trigger connected (frontend bug)\n");
xen_be_set_state(xendev, XenbusStateConnected);
fb->bug_trigger = 1; /* only once */
}
@@ -934,7 +945,7 @@ static void fb_event(struct XenDevice *xendev)
struct XenFB *xenfb = container_of(xendev, struct XenFB, c.xendev);
xenfb_handle_events(xenfb);
- xen_be_send_notify(&xenfb->c.xendev);
+ xen_pv_send_notify(&xenfb->c.xendev);
}
/* -------------------------------------------------------------------- */
@@ -977,14 +988,14 @@ void xen_init_display(int domid)
wait_more:
i++;
main_loop_wait(true);
- xfb = xen_be_find_xendev("vfb", domid, 0);
- xin = xen_be_find_xendev("vkbd", domid, 0);
+ xfb = xen_pv_find_xendev("vfb", domid, 0);
+ xin = xen_pv_find_xendev("vkbd", domid, 0);
if (!xfb || !xin) {
if (i < 256) {
usleep(10000);
goto wait_more;
}
- xen_be_printf(NULL, 1, "displaystate setup failed\n");
+ xen_pv_printf(NULL, 1, "displaystate setup failed\n");
return;
}
diff --git a/hw/dma/i8257.c b/hw/dma/i8257.c
index f345c54762..8bd82e8bc8 100644
--- a/hw/dma/i8257.c
+++ b/hw/dma/i8257.c
@@ -553,10 +553,12 @@ static void i8257_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion(isa_address_space_io(isa),
d->base, &d->channel_io);
- isa_register_portio_list(isa, d->page_base, page_portio_list, d,
+ isa_register_portio_list(isa, &d->portio_page,
+ d->page_base, page_portio_list, d,
"dma-page");
if (d->pageh_base >= 0) {
- isa_register_portio_list(isa, d->pageh_base, pageh_portio_list, d,
+ isa_register_portio_list(isa, &d->portio_pageh,
+ d->pageh_base, pageh_portio_list, d,
"dma-pageh");
}
@@ -598,6 +600,8 @@ static void i8257_class_init(ObjectClass *klass, void *data)
idc->release_DREQ = i8257_dma_release_DREQ;
idc->schedule = i8257_dma_schedule;
idc->register_channel = i8257_dma_register_channel;
+ /* Reason: needs to be wired up by isa_bus_dma() to work */
+ dc->cannot_instantiate_with_device_add_yet = true;
}
static const TypeInfo i8257_info = {
diff --git a/hw/dma/omap_dma.c b/hw/dma/omap_dma.c
index 700cd6b43e..f6f86f9639 100644
--- a/hw/dma/omap_dma.c
+++ b/hw/dma/omap_dma.c
@@ -1975,7 +1975,7 @@ static void omap_dma4_write(void *opaque, hwaddr addr,
ch->endian[1] =(value >> 19) & 1;
ch->endian_lock[1] =(value >> 18) & 1;
if (ch->endian[0] != ch->endian[1])
- fprintf(stderr, "%s: DMA endiannes conversion enable attempt\n",
+ fprintf(stderr, "%s: DMA endianness conversion enable attempt\n",
__FUNCTION__);
ch->write_mode = (value >> 16) & 3;
ch->burst[1] = (value & 0xc000) >> 14;
diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c
index 3bed5c3390..7724c93b8f 100644
--- a/hw/dma/pl080.c
+++ b/hw/dma/pl080.c
@@ -351,7 +351,7 @@ static void pl080_write(void *opaque, hwaddr offset,
break;
case 12: /* Configuration */
s->conf = value;
- if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
+ if (s->conf & (PL080_CONF_M1 | PL080_CONF_M2)) {
qemu_log_mask(LOG_UNIMP,
"pl080_write: Big-endian DMA not implemented\n");
}
diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c
index 2f2576fafb..17c8518fea 100644
--- a/hw/dma/rc4030.c
+++ b/hw/dma/rc4030.c
@@ -616,34 +616,9 @@ static void rc4030_reset(DeviceState *dev)
qemu_irq_lower(s->jazz_bus_irq);
}
-static int rc4030_load(QEMUFile *f, void *opaque, int version_id)
+static int rc4030_post_load(void *opaque, int version_id)
{
rc4030State* s = opaque;
- int i, j;
-
- if (version_id != 2)
- return -EINVAL;
-
- s->config = qemu_get_be32(f);
- s->invalid_address_register = qemu_get_be32(f);
- for (i = 0; i < 8; i++)
- for (j = 0; j < 4; j++)
- s->dma_regs[i][j] = qemu_get_be32(f);
- s->dma_tl_base = qemu_get_be32(f);
- s->dma_tl_limit = qemu_get_be32(f);
- s->cache_maint = qemu_get_be32(f);
- s->remote_failed_address = qemu_get_be32(f);
- s->memory_failed_address = qemu_get_be32(f);
- s->cache_ptag = qemu_get_be32(f);
- s->cache_ltag = qemu_get_be32(f);
- s->cache_bmask = qemu_get_be32(f);
- s->memory_refresh_rate = qemu_get_be32(f);
- s->nvram_protect = qemu_get_be32(f);
- for (i = 0; i < 15; i++)
- s->rem_speed[i] = qemu_get_be32(f);
- s->imr_jazz = qemu_get_be32(f);
- s->isr_jazz = qemu_get_be32(f);
- s->itr = qemu_get_be32(f);
set_next_tick(s);
update_jazz_irq(s);
@@ -651,32 +626,31 @@ static int rc4030_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-static void rc4030_save(QEMUFile *f, void *opaque)
-{
- rc4030State* s = opaque;
- int i, j;
-
- qemu_put_be32(f, s->config);
- qemu_put_be32(f, s->invalid_address_register);
- for (i = 0; i < 8; i++)
- for (j = 0; j < 4; j++)
- qemu_put_be32(f, s->dma_regs[i][j]);
- qemu_put_be32(f, s->dma_tl_base);
- qemu_put_be32(f, s->dma_tl_limit);
- qemu_put_be32(f, s->cache_maint);
- qemu_put_be32(f, s->remote_failed_address);
- qemu_put_be32(f, s->memory_failed_address);
- qemu_put_be32(f, s->cache_ptag);
- qemu_put_be32(f, s->cache_ltag);
- qemu_put_be32(f, s->cache_bmask);
- qemu_put_be32(f, s->memory_refresh_rate);
- qemu_put_be32(f, s->nvram_protect);
- for (i = 0; i < 15; i++)
- qemu_put_be32(f, s->rem_speed[i]);
- qemu_put_be32(f, s->imr_jazz);
- qemu_put_be32(f, s->isr_jazz);
- qemu_put_be32(f, s->itr);
-}
+static const VMStateDescription vmstate_rc4030 = {
+ .name = "rc4030",
+ .version_id = 3,
+ .post_load = rc4030_post_load,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32(config, rc4030State),
+ VMSTATE_UINT32(invalid_address_register, rc4030State),
+ VMSTATE_UINT32_2DARRAY(dma_regs, rc4030State, 8, 4),
+ VMSTATE_UINT32(dma_tl_base, rc4030State),
+ VMSTATE_UINT32(dma_tl_limit, rc4030State),
+ VMSTATE_UINT32(cache_maint, rc4030State),
+ VMSTATE_UINT32(remote_failed_address, rc4030State),
+ VMSTATE_UINT32(memory_failed_address, rc4030State),
+ VMSTATE_UINT32(cache_ptag, rc4030State),
+ VMSTATE_UINT32(cache_ltag, rc4030State),
+ VMSTATE_UINT32(cache_bmask, rc4030State),
+ VMSTATE_UINT32(memory_refresh_rate, rc4030State),
+ VMSTATE_UINT32(nvram_protect, rc4030State),
+ VMSTATE_UINT32_ARRAY(rem_speed, rc4030State, 16),
+ VMSTATE_UINT32(imr_jazz, rc4030State),
+ VMSTATE_UINT32(isr_jazz, rc4030State),
+ VMSTATE_UINT32(itr, rc4030State),
+ VMSTATE_END_OF_LIST()
+ }
+};
static void rc4030_do_dma(void *opaque, int n, uint8_t *buf, int len, int is_write)
{
@@ -753,8 +727,6 @@ static void rc4030_initfn(Object *obj)
sysbus_init_irq(sysbus, &s->timer_irq);
sysbus_init_irq(sysbus, &s->jazz_bus_irq);
- register_savevm(NULL, "rc4030", 0, 2, rc4030_save, rc4030_load, s);
-
sysbus_init_mmio(sysbus, &s->iomem_chipset);
sysbus_init_mmio(sysbus, &s->iomem_jazzio);
}
@@ -813,6 +785,7 @@ static void rc4030_class_init(ObjectClass *klass, void *class_data)
dc->realize = rc4030_realize;
dc->unrealize = rc4030_unrealize;
dc->reset = rc4030_reset;
+ dc->vmsd = &vmstate_rc4030;
}
static const TypeInfo rc4030_info = {
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
index a4753e55a2..6065689ad1 100644
--- a/hw/dma/xilinx_axidma.c
+++ b/hw/dma/xilinx_axidma.c
@@ -111,6 +111,7 @@ struct Stream {
unsigned int complete_cnt;
uint32_t regs[R_MAX];
uint8_t app[20];
+ unsigned char txbuf[16 * 1024];
};
struct XilinxAXIDMAStreamSlave {
@@ -256,7 +257,6 @@ static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev,
StreamSlave *tx_control_dev)
{
uint32_t prev_d;
- unsigned char txbuf[16 * 1024];
unsigned int txlen;
if (!stream_running(s) || stream_idle(s)) {
@@ -277,17 +277,17 @@ static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev,
}
txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
- if ((txlen + s->pos) > sizeof txbuf) {
+ if ((txlen + s->pos) > sizeof s->txbuf) {
hw_error("%s: too small internal txbuf! %d\n", __func__,
txlen + s->pos);
}
cpu_physical_memory_read(s->desc.buffer_address,
- txbuf + s->pos, txlen);
+ s->txbuf + s->pos, txlen);
s->pos += txlen;
if (stream_desc_eof(&s->desc)) {
- stream_push(tx_data_dev, txbuf, s->pos);
+ stream_push(tx_data_dev, s->txbuf, s->pos);
s->pos = 0;
stream_complete(s);
}
@@ -548,7 +548,7 @@ static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
st->nr = i;
st->bh = qemu_bh_new(timer_hit, st);
- st->ptimer = ptimer_init(st->bh);
+ st->ptimer = ptimer_init(st->bh, PTIMER_POLICY_DEFAULT);
ptimer_set_freq(st->ptimer, s->freqhz);
}
return;
diff --git a/hw/gpio/imx_gpio.c b/hw/gpio/imx_gpio.c
index f3574aa8f3..c36c394fda 100644
--- a/hw/gpio/imx_gpio.c
+++ b/hw/gpio/imx_gpio.c
@@ -237,7 +237,7 @@ static void imx_gpio_write(void *opaque, hwaddr offset, uint64_t value,
break;
case ISR_ADDR:
- s->isr |= ~value;
+ s->isr &= ~value;
imx_gpio_set_all_int_lines(s);
break;
diff --git a/hw/i2c/bitbang_i2c.c b/hw/i2c/bitbang_i2c.c
index d3a29891f8..8be88ee265 100644
--- a/hw/i2c/bitbang_i2c.c
+++ b/hw/i2c/bitbang_i2c.c
@@ -130,14 +130,25 @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
return bitbang_i2c_ret(i2c, 1);
case WAITING_FOR_ACK:
+ {
+ int ret;
+
if (i2c->current_addr < 0) {
i2c->current_addr = i2c->buffer;
DPRINTF("Address 0x%02x\n", i2c->current_addr);
- i2c_start_transfer(i2c->bus, i2c->current_addr >> 1,
- i2c->current_addr & 1);
+ ret = i2c_start_transfer(i2c->bus, i2c->current_addr >> 1,
+ i2c->current_addr & 1);
} else {
DPRINTF("Sent 0x%02x\n", i2c->buffer);
- i2c_send(i2c->bus, i2c->buffer);
+ ret = i2c_send(i2c->bus, i2c->buffer);
+ }
+ if (ret) {
+ /* NACK (either addressing a nonexistent device, or the
+ * device we were sending to decided to NACK us).
+ */
+ DPRINTF("Got NACK\n");
+ bitbang_i2c_enter_stop(i2c);
+ return bitbang_i2c_ret(i2c, 1);
}
if (i2c->current_addr & 1) {
i2c->state = RECEIVING_BIT7;
@@ -145,7 +156,7 @@ int bitbang_i2c_set(bitbang_i2c_interface *i2c, int line, int level)
i2c->state = SENDING_BIT7;
}
return bitbang_i2c_ret(i2c, 0);
-
+ }
case RECEIVING_BIT7:
i2c->buffer = i2c_recv(i2c->bus);
DPRINTF("RX byte 0x%02x\n", i2c->buffer);
diff --git a/hw/i2c/core.c b/hw/i2c/core.c
index 4afbe0bde5..abd4c4cddb 100644
--- a/hw/i2c/core.c
+++ b/hw/i2c/core.c
@@ -88,7 +88,12 @@ int i2c_bus_busy(I2CBus *bus)
return !QLIST_EMPTY(&bus->current_devs);
}
-/* Returns non-zero if the address is not valid. */
+/*
+ * Returns non-zero if the address is not valid. If this is called
+ * again without an intervening i2c_end_transfer(), like in the SMBus
+ * case where the operation is switched from write to read, this
+ * function will not rescan the bus and thus cannot fail.
+ */
/* TODO: Make this handle multiple masters. */
int i2c_start_transfer(I2CBus *bus, uint8_t address, int recv)
{
@@ -104,15 +109,25 @@ int i2c_start_transfer(I2CBus *bus, uint8_t address, int recv)
bus->broadcast = true;
}
- QTAILQ_FOREACH(kid, &bus->qbus.children, sibling) {
- DeviceState *qdev = kid->child;
- I2CSlave *candidate = I2C_SLAVE(qdev);
- if ((candidate->address == address) || (bus->broadcast)) {
- node = g_malloc(sizeof(struct I2CNode));
- node->elt = candidate;
- QLIST_INSERT_HEAD(&bus->current_devs, node, next);
- if (!bus->broadcast) {
- break;
+ /*
+ * If there are already devices in the list, that means we are in
+ * the middle of a transaction and we shouldn't rescan the bus.
+ *
+ * This happens with any SMBus transaction, even on a pure I2C
+ * device. The interface does a transaction start without
+ * terminating the previous transaction.
+ */
+ if (QLIST_EMPTY(&bus->current_devs)) {
+ QTAILQ_FOREACH(kid, &bus->qbus.children, sibling) {
+ DeviceState *qdev = kid->child;
+ I2CSlave *candidate = I2C_SLAVE(qdev);
+ if ((candidate->address == address) || (bus->broadcast)) {
+ node = g_malloc(sizeof(struct I2CNode));
+ node->elt = candidate;
+ QLIST_INSERT_HEAD(&bus->current_devs, node, next);
+ if (!bus->broadcast) {
+ break;
+ }
}
}
}
@@ -137,10 +152,6 @@ void i2c_end_transfer(I2CBus *bus)
I2CSlaveClass *sc;
I2CNode *node, *next;
- if (QLIST_EMPTY(&bus->current_devs)) {
- return;
- }
-
QLIST_FOREACH_SAFE(node, &bus->current_devs, next, next) {
sc = I2C_SLAVE_GET_CLASS(node->elt);
if (sc->event) {
diff --git a/hw/i2c/smbus.c b/hw/i2c/smbus.c
index 3979b3dad7..5b4dd3eba4 100644
--- a/hw/i2c/smbus.c
+++ b/hw/i2c/smbus.c
@@ -248,7 +248,9 @@ int smbus_read_byte(I2CBus *bus, uint8_t addr, uint8_t command)
return -1;
}
i2c_send(bus, command);
- i2c_start_transfer(bus, addr, 1);
+ if (i2c_start_transfer(bus, addr, 1)) {
+ assert(0);
+ }
data = i2c_recv(bus);
i2c_nack(bus);
i2c_end_transfer(bus);
@@ -273,7 +275,9 @@ int smbus_read_word(I2CBus *bus, uint8_t addr, uint8_t command)
return -1;
}
i2c_send(bus, command);
- i2c_start_transfer(bus, addr, 1);
+ if (i2c_start_transfer(bus, addr, 1)) {
+ assert(0);
+ }
data = i2c_recv(bus);
data |= i2c_recv(bus) << 8;
i2c_nack(bus);
@@ -302,7 +306,9 @@ int smbus_read_block(I2CBus *bus, uint8_t addr, uint8_t command, uint8_t *data)
return -1;
}
i2c_send(bus, command);
- i2c_start_transfer(bus, addr, 1);
+ if (i2c_start_transfer(bus, addr, 1)) {
+ assert(0);
+ }
len = i2c_recv(bus);
if (len > 32) {
len = 0;
diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
index 90e94ffefd..909ead6a77 100644
--- a/hw/i386/Makefile.objs
+++ b/hw/i386/Makefile.objs
@@ -3,6 +3,7 @@ obj-y += multiboot.o
obj-y += pc.o pc_piix.o pc_q35.o
obj-y += pc_sysfw.o
obj-y += x86-iommu.o intel_iommu.o
+obj-y += amd_iommu.o
obj-$(CONFIG_XEN) += ../xenpv/ xen/
obj-y += kvmvapic.o
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index a26a4bb03f..9708cdc463 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -53,13 +53,13 @@
#include "hw/pci/pci_bus.h"
#include "hw/pci-host/q35.h"
#include "hw/i386/x86-iommu.h"
-#include "hw/timer/hpet.h"
#include "hw/acpi/aml-build.h"
#include "qapi/qmp/qint.h"
#include "qom/qom-qobject.h"
-#include "hw/i386/x86-iommu.h"
+#include "hw/i386/amd_iommu.h"
+#include "hw/i386/intel_iommu.h"
#include "hw/acpi/ipmi.h"
@@ -339,24 +339,38 @@ build_fadt(GArray *table_data, BIOSLinker *linker, AcpiPmInfo *pm,
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
CPUArchIdList *apic_ids, GArray *entry)
{
- int apic_id;
- AcpiMadtProcessorApic *apic = acpi_data_push(entry, sizeof *apic);
+ uint32_t apic_id = apic_ids->cpus[uid].arch_id;
- apic_id = apic_ids->cpus[uid].arch_id;
- apic->type = ACPI_APIC_PROCESSOR;
- apic->length = sizeof(*apic);
- apic->processor_id = uid;
- apic->local_apic_id = apic_id;
- if (apic_ids->cpus[uid].cpu != NULL) {
- apic->flags = cpu_to_le32(1);
+ /* ACPI spec says that LAPIC entry for non present
+ * CPU may be omitted from MADT or it must be marked
+ * as disabled. However omitting non present CPU from
+ * MADT breaks hotplug on linux. So possible CPUs
+ * should be put in MADT but kept disabled.
+ */
+ if (apic_id < 255) {
+ AcpiMadtProcessorApic *apic = acpi_data_push(entry, sizeof *apic);
+
+ apic->type = ACPI_APIC_PROCESSOR;
+ apic->length = sizeof(*apic);
+ apic->processor_id = uid;
+ apic->local_apic_id = apic_id;
+ if (apic_ids->cpus[uid].cpu != NULL) {
+ apic->flags = cpu_to_le32(1);
+ } else {
+ apic->flags = cpu_to_le32(0);
+ }
} else {
- /* ACPI spec says that LAPIC entry for non present
- * CPU may be omitted from MADT or it must be marked
- * as disabled. However omitting non present CPU from
- * MADT breaks hotplug on linux. So possible CPUs
- * should be put in MADT but kept disabled.
- */
- apic->flags = cpu_to_le32(0);
+ AcpiMadtProcessorX2Apic *apic = acpi_data_push(entry, sizeof *apic);
+
+ apic->type = ACPI_APIC_LOCAL_X2APIC;
+ apic->length = sizeof(*apic);
+ apic->uid = cpu_to_le32(uid);
+ apic->x2apic_id = cpu_to_le32(apic_id);
+ if (apic_ids->cpus[uid].cpu != NULL) {
+ apic->flags = cpu_to_le32(1);
+ } else {
+ apic->flags = cpu_to_le32(0);
+ }
}
}
@@ -368,11 +382,11 @@ build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms)
int madt_start = table_data->len;
AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(pcms->acpi_dev);
AcpiDeviceIf *adev = ACPI_DEVICE_IF(pcms->acpi_dev);
+ bool x2apic_mode = false;
AcpiMultipleApicTable *madt;
AcpiMadtIoApic *io_apic;
AcpiMadtIntsrcovr *intsrcovr;
- AcpiMadtLocalNmi *local_nmi;
int i;
madt = acpi_data_push(table_data, sizeof *madt);
@@ -381,6 +395,9 @@ build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms)
for (i = 0; i < apic_ids->len; i++) {
adevc->madt_cpu(adev, i, apic_ids, table_data);
+ if (apic_ids->cpus[i].arch_id > 254) {
+ x2apic_mode = true;
+ }
}
g_free(apic_ids);
@@ -413,12 +430,25 @@ build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms)
intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */
}
- local_nmi = acpi_data_push(table_data, sizeof *local_nmi);
- local_nmi->type = ACPI_APIC_LOCAL_NMI;
- local_nmi->length = sizeof(*local_nmi);
- local_nmi->processor_id = 0xff; /* all processors */
- local_nmi->flags = cpu_to_le16(0);
- local_nmi->lint = 1; /* ACPI_LINT1 */
+ if (x2apic_mode) {
+ AcpiMadtLocalX2ApicNmi *local_nmi;
+
+ local_nmi = acpi_data_push(table_data, sizeof *local_nmi);
+ local_nmi->type = ACPI_APIC_LOCAL_X2APIC_NMI;
+ local_nmi->length = sizeof(*local_nmi);
+ local_nmi->uid = 0xFFFFFFFF; /* all processors */
+ local_nmi->flags = cpu_to_le16(0);
+ local_nmi->lint = 1; /* ACPI_LINT1 */
+ } else {
+ AcpiMadtLocalNmi *local_nmi;
+
+ local_nmi = acpi_data_push(table_data, sizeof *local_nmi);
+ local_nmi->type = ACPI_APIC_LOCAL_NMI;
+ local_nmi->length = sizeof(*local_nmi);
+ local_nmi->processor_id = 0xff; /* all processors */
+ local_nmi->flags = cpu_to_le16(0);
+ local_nmi->lint = 1; /* ACPI_LINT1 */
+ }
build_header(linker, table_data,
(void *)(table_data->data + madt_start), "APIC",
@@ -789,7 +819,7 @@ static gint crs_range_compare(gconstpointer a, gconstpointer b)
static void crs_replace_with_free_ranges(GPtrArray *ranges,
uint64_t start, uint64_t end)
{
- GPtrArray *free_ranges = g_ptr_array_new_with_free_func(crs_range_free);
+ GPtrArray *free_ranges = g_ptr_array_new();
uint64_t free_base = start;
int i;
@@ -813,7 +843,7 @@ static void crs_replace_with_free_ranges(GPtrArray *ranges,
g_ptr_array_add(ranges, g_ptr_array_index(free_ranges, i));
}
- g_ptr_array_free(free_ranges, false);
+ g_ptr_array_free(free_ranges, true);
}
/*
@@ -2038,6 +2068,13 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
method = aml_method("_E03", 0, AML_NOTSERIALIZED);
aml_append(method, aml_call0(MEMORY_HOTPLUG_HANDLER_PATH));
aml_append(scope, method);
+
+ if (pcms->acpi_nvdimm_state.is_enabled) {
+ method = aml_method("_E04", 0, AML_NOTSERIALIZED);
+ aml_append(method, aml_notify(aml_name("\\_SB.NVDR"),
+ aml_int(0x80)));
+ aml_append(scope, method);
+ }
}
aml_append(dsdt, scope);
@@ -2390,7 +2427,6 @@ static void
build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
{
AcpiSystemResourceAffinityTable *srat;
- AcpiSratProcessorAffinity *core;
AcpiSratMemoryAffinity *numamem;
int i;
@@ -2409,22 +2445,34 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
srat->reserved1 = cpu_to_le32(1);
for (i = 0; i < apic_ids->len; i++) {
- int j;
- int apic_id = apic_ids->cpus[i].arch_id;
-
- core = acpi_data_push(table_data, sizeof *core);
- core->type = ACPI_SRAT_PROCESSOR_APIC;
- core->length = sizeof(*core);
- core->local_apic_id = apic_id;
- for (j = 0; j < nb_numa_nodes; j++) {
- if (test_bit(i, numa_info[j].node_cpu)) {
+ int j = numa_get_node_for_cpu(i);
+ uint32_t apic_id = apic_ids->cpus[i].arch_id;
+
+ if (apic_id < 255) {
+ AcpiSratProcessorAffinity *core;
+
+ core = acpi_data_push(table_data, sizeof *core);
+ core->type = ACPI_SRAT_PROCESSOR_APIC;
+ core->length = sizeof(*core);
+ core->local_apic_id = apic_id;
+ if (j < nb_numa_nodes) {
core->proximity_lo = j;
- break;
}
+ memset(core->proximity_hi, 0, 3);
+ core->local_sapic_eid = 0;
+ core->flags = cpu_to_le32(1);
+ } else {
+ AcpiSratProcessorX2ApicAffinity *core;
+
+ core = acpi_data_push(table_data, sizeof *core);
+ core->type = ACPI_SRAT_PROCESSOR_x2APIC;
+ core->length = sizeof(*core);
+ core->x2apic_id = cpu_to_le32(apic_id);
+ if (j < nb_numa_nodes) {
+ core->proximity_domain = cpu_to_le32(j);
+ }
+ core->flags = cpu_to_le32(1);
}
- memset(core->proximity_hi, 0, 3);
- core->local_sapic_eid = 0;
- core->flags = cpu_to_le32(1);
}
@@ -2557,11 +2605,68 @@ build_dmar_q35(GArray *table_data, BIOSLinker *linker)
scope->length = ioapic_scope_size;
scope->enumeration_id = ACPI_BUILD_IOAPIC_ID;
scope->bus = Q35_PSEUDO_BUS_PLATFORM;
- scope->path[0] = cpu_to_le16(Q35_PSEUDO_DEVFN_IOAPIC);
+ scope->path[0].device = PCI_SLOT(Q35_PSEUDO_DEVFN_IOAPIC);
+ scope->path[0].function = PCI_FUNC(Q35_PSEUDO_DEVFN_IOAPIC);
build_header(linker, table_data, (void *)(table_data->data + dmar_start),
"DMAR", table_data->len - dmar_start, 1, NULL, NULL);
}
+/*
+ * IVRS table as specified in AMD IOMMU Specification v2.62, Section 5.2
+ * accessible here http://support.amd.com/TechDocs/48882_IOMMU.pdf
+ */
+static void
+build_amd_iommu(GArray *table_data, BIOSLinker *linker)
+{
+ int iommu_start = table_data->len;
+ AMDVIState *s = AMD_IOMMU_DEVICE(x86_iommu_get_default());
+
+ /* IVRS header */
+ acpi_data_push(table_data, sizeof(AcpiTableHeader));
+ /* IVinfo - IO virtualization information common to all
+ * IOMMU units in a system
+ */
+ build_append_int_noprefix(table_data, 40UL << 8/* PASize */, 4);
+ /* reserved */
+ build_append_int_noprefix(table_data, 0, 8);
+
+ /* IVHD definition - type 10h */
+ build_append_int_noprefix(table_data, 0x10, 1);
+ /* virtualization flags */
+ build_append_int_noprefix(table_data,
+ (1UL << 0) | /* HtTunEn */
+ (1UL << 4) | /* iotblSup */
+ (1UL << 6) | /* PrefSup */
+ (1UL << 7), /* PPRSup */
+ 1);
+ /* IVHD length */
+ build_append_int_noprefix(table_data, 0x24, 2);
+ /* DeviceID */
+ build_append_int_noprefix(table_data, s->devid, 2);
+ /* Capability offset */
+ build_append_int_noprefix(table_data, s->capab_offset, 2);
+ /* IOMMU base address */
+ build_append_int_noprefix(table_data, s->mmio.addr, 8);
+ /* PCI Segment Group */
+ build_append_int_noprefix(table_data, 0, 2);
+ /* IOMMU info */
+ build_append_int_noprefix(table_data, 0, 2);
+ /* IOMMU Feature Reporting */
+ build_append_int_noprefix(table_data,
+ (48UL << 30) | /* HATS */
+ (48UL << 28) | /* GATS */
+ (1UL << 2), /* GTSup */
+ 4);
+ /*
+ * Type 1 device entry reporting all devices
+ * These are 4-byte device entries currently reporting the range of
+ * Refer to Spec - Table 95:IVHD Device Entry Type Codes(4-byte)
+ */
+ build_append_int_noprefix(table_data, 0x0000001, 4);
+
+ build_header(linker, table_data, (void *)(table_data->data + iommu_start),
+ "IVRS", table_data->len - iommu_start, 1, NULL, NULL);
+}
static GArray *
build_rsdp(GArray *rsdp_table, BIOSLinker *linker, unsigned rsdt_tbl_offset)
@@ -2622,11 +2727,6 @@ static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg)
return true;
}
-static bool acpi_has_iommu(void)
-{
- return !!x86_iommu_get_default();
-}
-
static
void acpi_build(AcpiBuildTables *tables, MachineState *machine)
{
@@ -2706,13 +2806,19 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
acpi_add_table(table_offsets, tables_blob);
build_mcfg_q35(tables_blob, tables->linker, &mcfg);
}
- if (acpi_has_iommu()) {
- acpi_add_table(table_offsets, tables_blob);
- build_dmar_q35(tables_blob, tables->linker);
+ if (x86_iommu_get_default()) {
+ IommuType IOMMUType = x86_iommu_get_type();
+ if (IOMMUType == TYPE_AMD) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_amd_iommu(tables_blob, tables->linker);
+ } else if (IOMMUType == TYPE_INTEL) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_dmar_q35(tables_blob, tables->linker);
+ }
}
if (pcms->acpi_nvdimm_state.is_enabled) {
nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
- pcms->acpi_nvdimm_state.dsm_mem);
+ &pcms->acpi_nvdimm_state, machine->ram_slots);
}
/* Add tables supplied by user (if any) */
@@ -2754,7 +2860,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
*/
int legacy_aml_len =
pcmc->legacy_acpi_table_size +
- ACPI_BUILD_LEGACY_CPU_AML_SIZE * max_cpus;
+ ACPI_BUILD_LEGACY_CPU_AML_SIZE * pcms->apic_id_limit;
int legacy_table_size =
ROUND_UP(tables_blob->len - aml_len + legacy_aml_len,
ACPI_BUILD_ALIGN_SIZE);
@@ -2830,7 +2936,7 @@ static MemoryRegion *acpi_add_rom_blob(AcpiBuildState *build_state,
uint64_t max_size)
{
return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
- name, acpi_build_update, build_state);
+ name, acpi_build_update, build_state, NULL);
}
static const VMStateDescription vmstate_acpi_build = {
@@ -2855,7 +2961,7 @@ void acpi_setup(void)
return;
}
- if (!pcmc->has_acpi_build) {
+ if (!pcms->acpi_build_enabled) {
ACPI_BUILD_DPRINTF("ACPI build disabled. Bailing out.\n");
return;
}
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
new file mode 100644
index 0000000000..47b79d9112
--- /dev/null
+++ b/hw/i386/amd_iommu.c
@@ -0,0 +1,1212 @@
+/*
+ * QEMU emulation of AMD IOMMU (AMD-Vi)
+ *
+ * Copyright (C) 2011 Eduard - Gabriel Munteanu
+ * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Cache implementation inspired by hw/i386/intel_iommu.c
+ */
+#include "qemu/osdep.h"
+#include "hw/i386/amd_iommu.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+
+/* used AMD-Vi MMIO registers */
+const char *amdvi_mmio_low[] = {
+ "AMDVI_MMIO_DEVTAB_BASE",
+ "AMDVI_MMIO_CMDBUF_BASE",
+ "AMDVI_MMIO_EVTLOG_BASE",
+ "AMDVI_MMIO_CONTROL",
+ "AMDVI_MMIO_EXCL_BASE",
+ "AMDVI_MMIO_EXCL_LIMIT",
+ "AMDVI_MMIO_EXT_FEATURES",
+ "AMDVI_MMIO_PPR_BASE",
+ "UNHANDLED"
+};
+const char *amdvi_mmio_high[] = {
+ "AMDVI_MMIO_COMMAND_HEAD",
+ "AMDVI_MMIO_COMMAND_TAIL",
+ "AMDVI_MMIO_EVTLOG_HEAD",
+ "AMDVI_MMIO_EVTLOG_TAIL",
+ "AMDVI_MMIO_STATUS",
+ "AMDVI_MMIO_PPR_HEAD",
+ "AMDVI_MMIO_PPR_TAIL",
+ "UNHANDLED"
+};
+
+struct AMDVIAddressSpace {
+ uint8_t bus_num; /* bus number */
+ uint8_t devfn; /* device function */
+ AMDVIState *iommu_state; /* AMDVI - one per machine */
+ MemoryRegion iommu; /* Device's address translation region */
+ MemoryRegion iommu_ir; /* Device's interrupt remapping region */
+ AddressSpace as; /* device's corresponding address space */
+};
+
+/* AMDVI cache entry */
+typedef struct AMDVIIOTLBEntry {
+ uint16_t domid; /* assigned domain id */
+ uint16_t devid; /* device owning entry */
+ uint64_t perms; /* access permissions */
+ uint64_t translated_addr; /* translated address */
+ uint64_t page_mask; /* physical page size */
+} AMDVIIOTLBEntry;
+
+/* configure MMIO registers at startup/reset */
+static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val,
+ uint64_t romask, uint64_t w1cmask)
+{
+ stq_le_p(&s->mmior[addr], val);
+ stq_le_p(&s->romask[addr], romask);
+ stq_le_p(&s->w1cmask[addr], w1cmask);
+}
+
+static uint16_t amdvi_readw(AMDVIState *s, hwaddr addr)
+{
+ return lduw_le_p(&s->mmior[addr]);
+}
+
+static uint32_t amdvi_readl(AMDVIState *s, hwaddr addr)
+{
+ return ldl_le_p(&s->mmior[addr]);
+}
+
+static uint64_t amdvi_readq(AMDVIState *s, hwaddr addr)
+{
+ return ldq_le_p(&s->mmior[addr]);
+}
+
+/* internal write */
+static void amdvi_writeq_raw(AMDVIState *s, uint64_t val, hwaddr addr)
+{
+ stq_le_p(&s->mmior[addr], val);
+}
+
+/* external write */
+static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val)
+{
+ uint16_t romask = lduw_le_p(&s->romask[addr]);
+ uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
+ uint16_t oldval = lduw_le_p(&s->mmior[addr]);
+ stw_le_p(&s->mmior[addr],
+ ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
+}
+
+static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val)
+{
+ uint32_t romask = ldl_le_p(&s->romask[addr]);
+ uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
+ uint32_t oldval = ldl_le_p(&s->mmior[addr]);
+ stl_le_p(&s->mmior[addr],
+ ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
+}
+
+static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val)
+{
+ uint64_t romask = ldq_le_p(&s->romask[addr]);
+ uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
+ uint32_t oldval = ldq_le_p(&s->mmior[addr]);
+ stq_le_p(&s->mmior[addr],
+ ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
+}
+
+/* OR a 64-bit register with a 64-bit value */
+static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val)
+{
+ return amdvi_readq(s, addr) | val;
+}
+
+/* OR a 64-bit register with a 64-bit value storing result in the register */
+static void amdvi_assign_orq(AMDVIState *s, hwaddr addr, uint64_t val)
+{
+ amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) | val);
+}
+
+/* AND a 64-bit register with a 64-bit value storing result in the register */
+static void amdvi_assign_andq(AMDVIState *s, hwaddr addr, uint64_t val)
+{
+ amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) & val);
+}
+
+static void amdvi_generate_msi_interrupt(AMDVIState *s)
+{
+ MSIMessage msg = {};
+ MemTxAttrs attrs = {
+ .requester_id = pci_requester_id(&s->pci.dev)
+ };
+
+ if (msi_enabled(&s->pci.dev)) {
+ msg = msi_get_message(&s->pci.dev, 0);
+ address_space_stl_le(&address_space_memory, msg.address, msg.data,
+ attrs, NULL);
+ }
+}
+
+static void amdvi_log_event(AMDVIState *s, uint64_t *evt)
+{
+ /* event logging not enabled */
+ if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS,
+ AMDVI_MMIO_STATUS_EVT_OVF)) {
+ return;
+ }
+
+ /* event log buffer full */
+ if (s->evtlog_tail >= s->evtlog_len) {
+ amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF);
+ /* generate interrupt */
+ amdvi_generate_msi_interrupt(s);
+ return;
+ }
+
+ if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail,
+ &evt, AMDVI_EVENT_LEN)) {
+ trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail);
+ }
+
+ s->evtlog_tail += AMDVI_EVENT_LEN;
+ amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
+ amdvi_generate_msi_interrupt(s);
+}
+
+static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start,
+ int length)
+{
+ int index = start / 64, bitpos = start % 64;
+ uint64_t mask = MAKE_64BIT_MASK(start, length);
+ buffer[index] &= ~mask;
+ buffer[index] |= (value << bitpos) & mask;
+}
+/*
+ * AMDVi event structure
+ * 0:15 -> DeviceID
+ * 55:63 -> event type + miscellaneous info
+ * 63:127 -> related address
+ */
+static void amdvi_encode_event(uint64_t *evt, uint16_t devid, uint64_t addr,
+ uint16_t info)
+{
+ amdvi_setevent_bits(evt, devid, 0, 16);
+ amdvi_setevent_bits(evt, info, 55, 8);
+ amdvi_setevent_bits(evt, addr, 63, 64);
+}
+/* log an error encountered during a page walk
+ *
+ * @addr: virtual address in translation request
+ */
+static void amdvi_page_fault(AMDVIState *s, uint16_t devid,
+ hwaddr addr, uint16_t info)
+{
+ uint64_t evt[4];
+
+ info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF;
+ amdvi_encode_event(evt, devid, addr, info);
+ amdvi_log_event(s, evt);
+ pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+}
+/*
+ * log a master abort accessing device table
+ * @devtab : address of device table entry
+ * @info : error flags
+ */
+static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid,
+ hwaddr devtab, uint16_t info)
+{
+ uint64_t evt[4];
+
+ info |= AMDVI_EVENT_DEV_TAB_HW_ERROR;
+
+ amdvi_encode_event(evt, devid, devtab, info);
+ amdvi_log_event(s, evt);
+ pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+}
+/* log an event trying to access command buffer
+ * @addr : address that couldn't be accessed
+ */
+static void amdvi_log_command_error(AMDVIState *s, hwaddr addr)
+{
+ uint64_t evt[4], info = AMDVI_EVENT_COMMAND_HW_ERROR;
+
+ amdvi_encode_event(evt, 0, addr, info);
+ amdvi_log_event(s, evt);
+ pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+}
+/* log an illegal comand event
+ * @addr : address of illegal command
+ */
+static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info,
+ hwaddr addr)
+{
+ uint64_t evt[4];
+
+ info |= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR;
+ amdvi_encode_event(evt, 0, addr, info);
+ amdvi_log_event(s, evt);
+}
+/* log an error accessing device table
+ *
+ * @devid : device owning the table entry
+ * @devtab : address of device table entry
+ * @info : error flags
+ */
+static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid,
+ hwaddr addr, uint16_t info)
+{
+ uint64_t evt[4];
+
+ info |= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY;
+ amdvi_encode_event(evt, devid, addr, info);
+ amdvi_log_event(s, evt);
+}
+/* log an error accessing a PTE entry
+ * @addr : address that couldn't be accessed
+ */
+static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid,
+ hwaddr addr, uint16_t info)
+{
+ uint64_t evt[4];
+
+ info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR;
+ amdvi_encode_event(evt, devid, addr, info);
+ amdvi_log_event(s, evt);
+ pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
+ PCI_STATUS_SIG_TARGET_ABORT);
+}
+
+static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2)
+{
+ return *((const uint64_t *)v1) == *((const uint64_t *)v2);
+}
+
+static guint amdvi_uint64_hash(gconstpointer v)
+{
+ return (guint)*(const uint64_t *)v;
+}
+
+static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr,
+ uint64_t devid)
+{
+ uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
+ ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
+ return g_hash_table_lookup(s->iotlb, &key);
+}
+
+static void amdvi_iotlb_reset(AMDVIState *s)
+{
+ assert(s->iotlb);
+ trace_amdvi_iotlb_reset();
+ g_hash_table_remove_all(s->iotlb);
+}
+
+static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
+ uint16_t devid = *(uint16_t *)user_data;
+ return entry->devid == devid;
+}
+
+static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr,
+ uint64_t devid)
+{
+ uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
+ ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
+ g_hash_table_remove(s->iotlb, &key);
+}
+
+static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
+ uint64_t gpa, IOMMUTLBEntry to_cache,
+ uint16_t domid)
+{
+ AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
+ uint64_t *key = g_new(uint64_t, 1);
+ uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
+
+ /* don't cache erroneous translations */
+ if (to_cache.perm != IOMMU_NONE) {
+ trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid), gpa, to_cache.translated_addr);
+
+ if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) {
+ amdvi_iotlb_reset(s);
+ }
+
+ entry->domid = domid;
+ entry->perms = to_cache.perm;
+ entry->translated_addr = to_cache.translated_addr;
+ entry->page_mask = to_cache.addr_mask;
+ *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
+ g_hash_table_replace(s->iotlb, key, entry);
+ }
+}
+
+static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd)
+{
+ /* pad the last 3 bits */
+ hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3;
+ uint64_t data = cpu_to_le64(cmd[1]);
+
+ if (extract64(cmd[0], 51, 8)) {
+ amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
+ s->cmdbuf + s->cmdbuf_head);
+ }
+ if (extract64(cmd[0], 0, 1)) {
+ if (dma_memory_write(&address_space_memory, addr, &data,
+ AMDVI_COMPLETION_DATA_SIZE)) {
+ trace_amdvi_completion_wait_fail(addr);
+ }
+ }
+ /* set completion interrupt */
+ if (extract64(cmd[0], 1, 1)) {
+ amdvi_test_mask(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
+ /* generate interrupt */
+ amdvi_generate_msi_interrupt(s);
+ }
+ trace_amdvi_completion_wait(addr, data);
+}
+
+/* log error without aborting since linux seems to be using reserved bits */
+static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd)
+{
+ uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16));
+
+ /* This command should invalidate internal caches of which there isn't */
+ if (extract64(cmd[0], 15, 16) || cmd[1]) {
+ amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
+ s->cmdbuf + s->cmdbuf_head);
+ }
+ trace_amdvi_devtab_inval(PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid));
+}
+
+static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd)
+{
+ if (extract64(cmd[0], 15, 16) || extract64(cmd[0], 19, 8) ||
+ extract64(cmd[1], 0, 2) || extract64(cmd[1], 3, 29)
+ || extract64(cmd[1], 47, 16)) {
+ amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
+ s->cmdbuf + s->cmdbuf_head);
+ }
+ trace_amdvi_ppr_exec();
+}
+
+static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd)
+{
+ if (extract64(cmd[0], 0, 60) || cmd[1]) {
+ amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
+ s->cmdbuf + s->cmdbuf_head);
+ }
+
+ amdvi_iotlb_reset(s);
+ trace_amdvi_all_inval();
+}
+
+static gboolean amdvi_iotlb_remove_by_domid(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
+ uint16_t domid = *(uint16_t *)user_data;
+ return entry->domid == domid;
+}
+
+/* we don't have devid - we can't remove pages by address */
+static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd)
+{
+ uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16));
+
+ if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 16, 12) ||
+ extract64(cmd[0], 3, 10)) {
+ amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
+ s->cmdbuf + s->cmdbuf_head);
+ }
+
+ g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid,
+ &domid);
+ trace_amdvi_pages_inval(domid);
+}
+
+static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd)
+{
+ if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 20, 8) ||
+ extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) ||
+ extract64(cmd[1], 5, 7)) {
+ amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
+ s->cmdbuf + s->cmdbuf_head);
+ }
+
+ trace_amdvi_prefetch_pages();
+}
+
+static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd)
+{
+ if (extract64(cmd[0], 16, 16) || cmd[1]) {
+ amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
+ s->cmdbuf + s->cmdbuf_head);
+ return;
+ }
+
+ trace_amdvi_intr_inval();
+}
+
+/* FIXME: Try to work with the specified size instead of all the pages
+ * when the S bit is on
+ */
+static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd)
+{
+
+ uint16_t devid = extract64(cmd[0], 0, 16);
+ if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 9)) {
+ amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
+ s->cmdbuf + s->cmdbuf_head);
+ return;
+ }
+
+ if (extract64(cmd[1], 0, 1)) {
+ g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_devid,
+ &devid);
+ } else {
+ amdvi_iotlb_remove_page(s, cpu_to_le64(extract64(cmd[1], 12, 52)) << 12,
+ cpu_to_le16(extract64(cmd[1], 0, 16)));
+ }
+ trace_amdvi_iotlb_inval();
+}
+
+/* not honouring reserved bits is regarded as an illegal command */
+static void amdvi_cmdbuf_exec(AMDVIState *s)
+{
+ uint64_t cmd[2];
+
+ if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head,
+ cmd, AMDVI_COMMAND_SIZE)) {
+ trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head);
+ amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
+ return;
+ }
+
+ switch (extract64(cmd[0], 60, 4)) {
+ case AMDVI_CMD_COMPLETION_WAIT:
+ amdvi_completion_wait(s, cmd);
+ break;
+ case AMDVI_CMD_INVAL_DEVTAB_ENTRY:
+ amdvi_inval_devtab_entry(s, cmd);
+ break;
+ case AMDVI_CMD_INVAL_AMDVI_PAGES:
+ amdvi_inval_pages(s, cmd);
+ break;
+ case AMDVI_CMD_INVAL_IOTLB_PAGES:
+ iommu_inval_iotlb(s, cmd);
+ break;
+ case AMDVI_CMD_INVAL_INTR_TABLE:
+ amdvi_inval_inttable(s, cmd);
+ break;
+ case AMDVI_CMD_PREFETCH_AMDVI_PAGES:
+ amdvi_prefetch_pages(s, cmd);
+ break;
+ case AMDVI_CMD_COMPLETE_PPR_REQUEST:
+ amdvi_complete_ppr(s, cmd);
+ break;
+ case AMDVI_CMD_INVAL_AMDVI_ALL:
+ amdvi_inval_all(s, cmd);
+ break;
+ default:
+ trace_amdvi_unhandled_command(extract64(cmd[1], 60, 4));
+ /* log illegal command */
+ amdvi_log_illegalcom_error(s, extract64(cmd[1], 60, 4),
+ s->cmdbuf + s->cmdbuf_head);
+ }
+}
+
+static void amdvi_cmdbuf_run(AMDVIState *s)
+{
+ if (!s->cmdbuf_enabled) {
+ trace_amdvi_command_error(amdvi_readq(s, AMDVI_MMIO_CONTROL));
+ return;
+ }
+
+ /* check if there is work to do. */
+ while (s->cmdbuf_head != s->cmdbuf_tail) {
+ trace_amdvi_command_exec(s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
+ amdvi_cmdbuf_exec(s);
+ s->cmdbuf_head += AMDVI_COMMAND_SIZE;
+ amdvi_writeq_raw(s, s->cmdbuf_head, AMDVI_MMIO_COMMAND_HEAD);
+
+ /* wrap head pointer */
+ if (s->cmdbuf_head >= s->cmdbuf_len * AMDVI_COMMAND_SIZE) {
+ s->cmdbuf_head = 0;
+ }
+ }
+}
+
+static void amdvi_mmio_trace(hwaddr addr, unsigned size)
+{
+ uint8_t index = (addr & ~0x2000) / 8;
+
+ if ((addr & 0x2000)) {
+ /* high table */
+ index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index;
+ trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07);
+ } else {
+ index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index;
+ trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07);
+ }
+}
+
+static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size)
+{
+ AMDVIState *s = opaque;
+
+ uint64_t val = -1;
+ if (addr + size > AMDVI_MMIO_SIZE) {
+ trace_amdvi_mmio_read("error: addr outside region: max ",
+ (uint64_t)AMDVI_MMIO_SIZE, addr, size);
+ return (uint64_t)-1;
+ }
+
+ if (size == 2) {
+ val = amdvi_readw(s, addr);
+ } else if (size == 4) {
+ val = amdvi_readl(s, addr);
+ } else if (size == 8) {
+ val = amdvi_readq(s, addr);
+ }
+ amdvi_mmio_trace(addr, size);
+
+ return val;
+}
+
+static void amdvi_handle_control_write(AMDVIState *s)
+{
+ unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL);
+ s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN);
+
+ s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN);
+ s->evtlog_enabled = s->enabled && !!(control &
+ AMDVI_MMIO_CONTROL_EVENTLOGEN);
+
+ s->evtlog_intr = !!(control & AMDVI_MMIO_CONTROL_EVENTINTEN);
+ s->completion_wait_intr = !!(control & AMDVI_MMIO_CONTROL_COMWAITINTEN);
+ s->cmdbuf_enabled = s->enabled && !!(control &
+ AMDVI_MMIO_CONTROL_CMDBUFLEN);
+
+ /* update the flags depending on the control register */
+ if (s->cmdbuf_enabled) {
+ amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_CMDBUF_RUN);
+ } else {
+ amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_CMDBUF_RUN);
+ }
+ if (s->evtlog_enabled) {
+ amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_RUN);
+ } else {
+ amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_EVT_RUN);
+ }
+
+ trace_amdvi_control_status(control);
+ amdvi_cmdbuf_run(s);
+}
+
+static inline void amdvi_handle_devtab_write(AMDVIState *s)
+
+{
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE);
+ s->devtab = (val & AMDVI_MMIO_DEVTAB_BASE_MASK);
+
+ /* set device table length */
+ s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 *
+ (AMDVI_MMIO_DEVTAB_SIZE_UNIT /
+ AMDVI_MMIO_DEVTAB_ENTRY_SIZE));
+}
+
+static inline void amdvi_handle_cmdhead_write(AMDVIState *s)
+{
+ s->cmdbuf_head = amdvi_readq(s, AMDVI_MMIO_COMMAND_HEAD)
+ & AMDVI_MMIO_CMDBUF_HEAD_MASK;
+ amdvi_cmdbuf_run(s);
+}
+
+static inline void amdvi_handle_cmdbase_write(AMDVIState *s)
+{
+ s->cmdbuf = amdvi_readq(s, AMDVI_MMIO_COMMAND_BASE)
+ & AMDVI_MMIO_CMDBUF_BASE_MASK;
+ s->cmdbuf_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_CMDBUF_SIZE_BYTE)
+ & AMDVI_MMIO_CMDBUF_SIZE_MASK);
+ s->cmdbuf_head = s->cmdbuf_tail = 0;
+}
+
+static inline void amdvi_handle_cmdtail_write(AMDVIState *s)
+{
+ s->cmdbuf_tail = amdvi_readq(s, AMDVI_MMIO_COMMAND_TAIL)
+ & AMDVI_MMIO_CMDBUF_TAIL_MASK;
+ amdvi_cmdbuf_run(s);
+}
+
+static inline void amdvi_handle_excllim_write(AMDVIState *s)
+{
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_EXCL_LIMIT);
+ s->excl_limit = (val & AMDVI_MMIO_EXCL_LIMIT_MASK) |
+ AMDVI_MMIO_EXCL_LIMIT_LOW;
+}
+
+static inline void amdvi_handle_evtbase_write(AMDVIState *s)
+{
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE);
+ s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK;
+ s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE)
+ & AMDVI_MMIO_EVTLOG_SIZE_MASK);
+}
+
+static inline void amdvi_handle_evttail_write(AMDVIState *s)
+{
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_TAIL);
+ s->evtlog_tail = val & AMDVI_MMIO_EVTLOG_TAIL_MASK;
+}
+
+static inline void amdvi_handle_evthead_write(AMDVIState *s)
+{
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_HEAD);
+ s->evtlog_head = val & AMDVI_MMIO_EVTLOG_HEAD_MASK;
+}
+
+static inline void amdvi_handle_pprbase_write(AMDVIState *s)
+{
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_BASE);
+ s->ppr_log = val & AMDVI_MMIO_PPRLOG_BASE_MASK;
+ s->pprlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_PPRLOG_SIZE_BYTE)
+ & AMDVI_MMIO_PPRLOG_SIZE_MASK);
+}
+
+static inline void amdvi_handle_pprhead_write(AMDVIState *s)
+{
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_HEAD);
+ s->pprlog_head = val & AMDVI_MMIO_PPRLOG_HEAD_MASK;
+}
+
+static inline void amdvi_handle_pprtail_write(AMDVIState *s)
+{
+ uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_TAIL);
+ s->pprlog_tail = val & AMDVI_MMIO_PPRLOG_TAIL_MASK;
+}
+
+/* FIXME: something might go wrong if System Software writes in chunks
+ * of one byte but linux writes in chunks of 4 bytes so currently it
+ * works correctly with linux but will definitely be busted if software
+ * reads/writes 8 bytes
+ */
+static void amdvi_mmio_reg_write(AMDVIState *s, unsigned size, uint64_t val,
+ hwaddr addr)
+{
+ if (size == 2) {
+ amdvi_writew(s, addr, val);
+ } else if (size == 4) {
+ amdvi_writel(s, addr, val);
+ } else if (size == 8) {
+ amdvi_writeq(s, addr, val);
+ }
+}
+
+static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ AMDVIState *s = opaque;
+ unsigned long offset = addr & 0x07;
+
+ if (addr + size > AMDVI_MMIO_SIZE) {
+ trace_amdvi_mmio_write("error: addr outside region: max ",
+ (uint64_t)AMDVI_MMIO_SIZE, size, val, offset);
+ return;
+ }
+
+ amdvi_mmio_trace(addr, size);
+ switch (addr & ~0x07) {
+ case AMDVI_MMIO_CONTROL:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_control_write(s);
+ break;
+ case AMDVI_MMIO_DEVICE_TABLE:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ /* set device table address
+ * This also suffers from inability to tell whether software
+ * is done writing
+ */
+ if (offset || (size == 8)) {
+ amdvi_handle_devtab_write(s);
+ }
+ break;
+ case AMDVI_MMIO_COMMAND_HEAD:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_cmdhead_write(s);
+ break;
+ case AMDVI_MMIO_COMMAND_BASE:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ /* FIXME - make sure System Software has finished writing incase
+ * it writes in chucks less than 8 bytes in a robust way.As for
+ * now, this hacks works for the linux driver
+ */
+ if (offset || (size == 8)) {
+ amdvi_handle_cmdbase_write(s);
+ }
+ break;
+ case AMDVI_MMIO_COMMAND_TAIL:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_cmdtail_write(s);
+ break;
+ case AMDVI_MMIO_EVENT_BASE:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_evtbase_write(s);
+ break;
+ case AMDVI_MMIO_EVENT_HEAD:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_evthead_write(s);
+ break;
+ case AMDVI_MMIO_EVENT_TAIL:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_evttail_write(s);
+ break;
+ case AMDVI_MMIO_EXCL_LIMIT:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_excllim_write(s);
+ break;
+ /* PPR log base - unused for now */
+ case AMDVI_MMIO_PPR_BASE:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_pprbase_write(s);
+ break;
+ /* PPR log head - also unused for now */
+ case AMDVI_MMIO_PPR_HEAD:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_pprhead_write(s);
+ break;
+ /* PPR log tail - unused for now */
+ case AMDVI_MMIO_PPR_TAIL:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ amdvi_handle_pprtail_write(s);
+ break;
+ }
+}
+
+static inline uint64_t amdvi_get_perms(uint64_t entry)
+{
+ return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >>
+ AMDVI_DEV_PERM_SHIFT;
+}
+
+/* a valid entry should have V = 1 and reserved bits honoured */
+static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid,
+ uint64_t *dte)
+{
+ if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED)
+ || (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED)
+ || (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
+ amdvi_log_illegaldevtab_error(s, devid,
+ s->devtab +
+ devid * AMDVI_DEVTAB_ENTRY_SIZE, 0);
+ return false;
+ }
+
+ return dte[0] & AMDVI_DEV_VALID;
+}
+
+/* get a device table entry given the devid */
+static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry)
+{
+ uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE;
+
+ if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
+ AMDVI_DEVTAB_ENTRY_SIZE)) {
+ trace_amdvi_dte_get_fail(s->devtab, offset);
+ /* log error accessing dte */
+ amdvi_log_devtab_error(s, devid, s->devtab + offset, 0);
+ return false;
+ }
+
+ *entry = le64_to_cpu(*entry);
+ if (!amdvi_validate_dte(s, devid, entry)) {
+ trace_amdvi_invalid_dte(entry[0]);
+ return false;
+ }
+
+ return true;
+}
+
+/* get pte translation mode */
+static inline uint8_t get_pte_translation_mode(uint64_t pte)
+{
+ return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK;
+}
+
+static inline uint64_t pte_override_page_mask(uint64_t pte)
+{
+ uint8_t page_mask = 12;
+ uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) ^ AMDVI_DEV_PT_ROOT_MASK;
+ /* find the first zero bit */
+ while (addr & 1) {
+ page_mask++;
+ addr = addr >> 1;
+ }
+
+ return ~((1ULL << page_mask) - 1);
+}
+
+static inline uint64_t pte_get_page_mask(uint64_t oldlevel)
+{
+ return ~((1UL << ((oldlevel * 9) + 3)) - 1);
+}
+
+static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr,
+ uint16_t devid)
+{
+ uint64_t pte;
+
+ if (dma_memory_read(&address_space_memory, pte_addr, &pte, sizeof(pte))) {
+ trace_amdvi_get_pte_hwerror(pte_addr);
+ amdvi_log_pagetab_error(s, devid, pte_addr, 0);
+ pte = 0;
+ return pte;
+ }
+
+ pte = le64_to_cpu(pte);
+ return pte;
+}
+
+static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte,
+ IOMMUTLBEntry *ret, unsigned perms,
+ hwaddr addr)
+{
+ unsigned level, present, pte_perms, oldlevel;
+ uint64_t pte = dte[0], pte_addr, page_mask;
+
+ /* make sure the DTE has TV = 1 */
+ if (pte & AMDVI_DEV_TRANSLATION_VALID) {
+ level = get_pte_translation_mode(pte);
+ if (level >= 7) {
+ trace_amdvi_mode_invalid(level, addr);
+ return;
+ }
+ if (level == 0) {
+ goto no_remap;
+ }
+
+ /* we are at the leaf page table or page table encodes a huge page */
+ while (level > 0) {
+ pte_perms = amdvi_get_perms(pte);
+ present = pte & 1;
+ if (!present || perms != (perms & pte_perms)) {
+ amdvi_page_fault(as->iommu_state, as->devfn, addr, perms);
+ trace_amdvi_page_fault(addr);
+ return;
+ }
+
+ /* go to the next lower level */
+ pte_addr = pte & AMDVI_DEV_PT_ROOT_MASK;
+ /* add offset and load pte */
+ pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
+ pte = amdvi_get_pte_entry(as->iommu_state, pte_addr, as->devfn);
+ if (!pte) {
+ return;
+ }
+ oldlevel = level;
+ level = get_pte_translation_mode(pte);
+ if (level == 0x7) {
+ break;
+ }
+ }
+
+ if (level == 0x7) {
+ page_mask = pte_override_page_mask(pte);
+ } else {
+ page_mask = pte_get_page_mask(oldlevel);
+ }
+
+ /* get access permissions from pte */
+ ret->iova = addr & page_mask;
+ ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & page_mask;
+ ret->addr_mask = ~page_mask;
+ ret->perm = amdvi_get_perms(pte);
+ return;
+ }
+no_remap:
+ ret->iova = addr & AMDVI_PAGE_MASK_4K;
+ ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
+ ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret->perm = amdvi_get_perms(pte);
+}
+
+static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr,
+ bool is_write, IOMMUTLBEntry *ret)
+{
+ AMDVIState *s = as->iommu_state;
+ uint16_t devid = PCI_BUILD_BDF(as->bus_num, as->devfn);
+ AMDVIIOTLBEntry *iotlb_entry = amdvi_iotlb_lookup(s, addr, devid);
+ uint64_t entry[4];
+
+ if (iotlb_entry) {
+ trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid), addr, iotlb_entry->translated_addr);
+ ret->iova = addr & ~iotlb_entry->page_mask;
+ ret->translated_addr = iotlb_entry->translated_addr;
+ ret->addr_mask = iotlb_entry->page_mask;
+ ret->perm = iotlb_entry->perms;
+ return;
+ }
+
+ /* devices with V = 0 are not translated */
+ if (!amdvi_get_dte(s, devid, entry)) {
+ goto out;
+ }
+
+ amdvi_page_walk(as, entry, ret,
+ is_write ? AMDVI_PERM_WRITE : AMDVI_PERM_READ, addr);
+
+ amdvi_update_iotlb(s, devid, addr, *ret,
+ entry[1] & AMDVI_DEV_DOMID_ID_MASK);
+ return;
+
+out:
+ ret->iova = addr & AMDVI_PAGE_MASK_4K;
+ ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
+ ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret->perm = IOMMU_RW;
+}
+
+static inline bool amdvi_is_interrupt_addr(hwaddr addr)
+{
+ return addr >= AMDVI_INT_ADDR_FIRST && addr <= AMDVI_INT_ADDR_LAST;
+}
+
+static IOMMUTLBEntry amdvi_translate(MemoryRegion *iommu, hwaddr addr,
+ bool is_write)
+{
+ AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
+ AMDVIState *s = as->iommu_state;
+ IOMMUTLBEntry ret = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = 0,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE
+ };
+
+ if (!s->enabled) {
+ /* AMDVI disabled - corresponds to iommu=off not
+ * failure to provide any parameter
+ */
+ ret.iova = addr & AMDVI_PAGE_MASK_4K;
+ ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
+ ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret.perm = IOMMU_RW;
+ return ret;
+ } else if (amdvi_is_interrupt_addr(addr)) {
+ ret.iova = addr & AMDVI_PAGE_MASK_4K;
+ ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
+ ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
+ ret.perm = IOMMU_WO;
+ return ret;
+ }
+
+ amdvi_do_translate(as, addr, is_write, &ret);
+ trace_amdvi_translation_result(as->bus_num, PCI_SLOT(as->devfn),
+ PCI_FUNC(as->devfn), addr, ret.translated_addr);
+ return ret;
+}
+
+static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
+{
+ AMDVIState *s = opaque;
+ AMDVIAddressSpace **iommu_as;
+ int bus_num = pci_bus_num(bus);
+
+ iommu_as = s->address_spaces[bus_num];
+
+ /* allocate memory during the first run */
+ if (!iommu_as) {
+ iommu_as = g_malloc0(sizeof(AMDVIAddressSpace *) * PCI_DEVFN_MAX);
+ s->address_spaces[bus_num] = iommu_as;
+ }
+
+ /* set up AMD-Vi region */
+ if (!iommu_as[devfn]) {
+ iommu_as[devfn] = g_malloc0(sizeof(AMDVIAddressSpace));
+ iommu_as[devfn]->bus_num = (uint8_t)bus_num;
+ iommu_as[devfn]->devfn = (uint8_t)devfn;
+ iommu_as[devfn]->iommu_state = s;
+
+ memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
+ &s->iommu_ops, "amd-iommu", UINT64_MAX);
+ address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
+ "amd-iommu");
+ }
+ return &iommu_as[devfn]->as;
+}
+
+static const MemoryRegionOps mmio_mem_ops = {
+ .read = amdvi_mmio_read,
+ .write = amdvi_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ }
+};
+
+static void amdvi_iommu_notify_flag_changed(MemoryRegion *iommu,
+ IOMMUNotifierFlag old,
+ IOMMUNotifierFlag new)
+{
+ AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
+
+ if (new & IOMMU_NOTIFIER_MAP) {
+ error_report("device %02x.%02x.%x requires iommu notifier which is not "
+ "currently supported", as->bus_num, PCI_SLOT(as->devfn),
+ PCI_FUNC(as->devfn));
+ exit(1);
+ }
+}
+
+static void amdvi_init(AMDVIState *s)
+{
+ amdvi_iotlb_reset(s);
+
+ s->iommu_ops.translate = amdvi_translate;
+ s->iommu_ops.notify_flag_changed = amdvi_iommu_notify_flag_changed;
+ s->devtab_len = 0;
+ s->cmdbuf_len = 0;
+ s->cmdbuf_head = 0;
+ s->cmdbuf_tail = 0;
+ s->evtlog_head = 0;
+ s->evtlog_tail = 0;
+ s->excl_enabled = false;
+ s->excl_allow = false;
+ s->mmio_enabled = false;
+ s->enabled = false;
+ s->ats_enabled = false;
+ s->cmdbuf_enabled = false;
+
+ /* reset MMIO */
+ memset(s->mmior, 0, AMDVI_MMIO_SIZE);
+ amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES,
+ 0xffffffffffffffef, 0);
+ amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67);
+
+ /* reset device ident */
+ pci_config_set_vendor_id(s->pci.dev.config, PCI_VENDOR_ID_AMD);
+ pci_config_set_prog_interface(s->pci.dev.config, 00);
+ pci_config_set_device_id(s->pci.dev.config, s->devid);
+ pci_config_set_class(s->pci.dev.config, 0x0806);
+
+ /* reset AMDVI specific capabilities, all r/o */
+ pci_set_long(s->pci.dev.config + s->capab_offset, AMDVI_CAPAB_FEATURES);
+ pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_LOW,
+ s->mmio.addr & ~(0xffff0000));
+ pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH,
+ (s->mmio.addr & ~(0xffff)) >> 16);
+ pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_RANGE,
+ 0xff000000);
+ pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, 0);
+ pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC,
+ AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR);
+}
+
+static void amdvi_reset(DeviceState *dev)
+{
+ AMDVIState *s = AMD_IOMMU_DEVICE(dev);
+
+ msi_reset(&s->pci.dev);
+ amdvi_init(s);
+}
+
+static void amdvi_realize(DeviceState *dev, Error **err)
+{
+ int ret = 0;
+ AMDVIState *s = AMD_IOMMU_DEVICE(dev);
+ X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
+ PCIBus *bus = PC_MACHINE(qdev_get_machine())->bus;
+ s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
+ amdvi_uint64_equal, g_free, g_free);
+
+ /* This device should take care of IOMMU PCI properties */
+ x86_iommu->type = TYPE_AMD;
+ qdev_set_parent_bus(DEVICE(&s->pci), &bus->qbus);
+ object_property_set_bool(OBJECT(&s->pci), true, "realized", err);
+ s->capab_offset = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0,
+ AMDVI_CAPAB_SIZE);
+ assert(s->capab_offset > 0);
+ ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0, AMDVI_CAPAB_REG_SIZE);
+ assert(ret > 0);
+ ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0, AMDVI_CAPAB_REG_SIZE);
+ assert(ret > 0);
+
+ /* set up MMIO */
+ memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio",
+ AMDVI_MMIO_SIZE);
+
+ sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio);
+ sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR);
+ pci_setup_iommu(bus, amdvi_host_dma_iommu, s);
+ s->devid = object_property_get_int(OBJECT(&s->pci), "addr", err);
+ msi_init(&s->pci.dev, 0, 1, true, false, err);
+ amdvi_init(s);
+}
+
+static const VMStateDescription vmstate_amdvi = {
+ .name = "amd-iommu",
+ .unmigratable = 1
+};
+
+static void amdvi_instance_init(Object *klass)
+{
+ AMDVIState *s = AMD_IOMMU_DEVICE(klass);
+
+ object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI);
+}
+
+static void amdvi_class_init(ObjectClass *klass, void* data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ X86IOMMUClass *dc_class = X86_IOMMU_CLASS(klass);
+
+ dc->reset = amdvi_reset;
+ dc->vmsd = &vmstate_amdvi;
+ dc->hotpluggable = false;
+ dc_class->realize = amdvi_realize;
+}
+
+static const TypeInfo amdvi = {
+ .name = TYPE_AMD_IOMMU_DEVICE,
+ .parent = TYPE_X86_IOMMU_DEVICE,
+ .instance_size = sizeof(AMDVIState),
+ .instance_init = amdvi_instance_init,
+ .class_init = amdvi_class_init
+};
+
+static const TypeInfo amdviPCI = {
+ .name = "AMDVI-PCI",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(AMDVIPCIState),
+};
+
+static void amdviPCI_register_types(void)
+{
+ type_register_static(&amdviPCI);
+ type_register_static(&amdvi);
+}
+
+type_init(amdviPCI_register_types);
diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
new file mode 100644
index 0000000000..884926e9e7
--- /dev/null
+++ b/hw/i386/amd_iommu.h
@@ -0,0 +1,289 @@
+/*
+ * QEMU emulation of an AMD IOMMU (AMD-Vi)
+ *
+ * Copyright (C) 2011 Eduard - Gabriel Munteanu
+ * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef AMD_IOMMU_H_
+#define AMD_IOMMU_H_
+
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
+#include "hw/sysbus.h"
+#include "sysemu/dma.h"
+#include "hw/i386/pc.h"
+#include "hw/pci/pci_bus.h"
+#include "hw/i386/x86-iommu.h"
+
+/* Capability registers */
+#define AMDVI_CAPAB_BAR_LOW 0x04
+#define AMDVI_CAPAB_BAR_HIGH 0x08
+#define AMDVI_CAPAB_RANGE 0x0C
+#define AMDVI_CAPAB_MISC 0x10
+
+#define AMDVI_CAPAB_SIZE 0x18
+#define AMDVI_CAPAB_REG_SIZE 0x04
+
+/* Capability header data */
+#define AMDVI_CAPAB_ID_SEC 0xf
+#define AMDVI_CAPAB_FLAT_EXT (1 << 28)
+#define AMDVI_CAPAB_EFR_SUP (1 << 27)
+#define AMDVI_CAPAB_FLAG_NPCACHE (1 << 26)
+#define AMDVI_CAPAB_FLAG_HTTUNNEL (1 << 25)
+#define AMDVI_CAPAB_FLAG_IOTLBSUP (1 << 24)
+#define AMDVI_CAPAB_INIT_TYPE (3 << 16)
+
+/* No. of used MMIO registers */
+#define AMDVI_MMIO_REGS_HIGH 8
+#define AMDVI_MMIO_REGS_LOW 7
+
+/* MMIO registers */
+#define AMDVI_MMIO_DEVICE_TABLE 0x0000
+#define AMDVI_MMIO_COMMAND_BASE 0x0008
+#define AMDVI_MMIO_EVENT_BASE 0x0010
+#define AMDVI_MMIO_CONTROL 0x0018
+#define AMDVI_MMIO_EXCL_BASE 0x0020
+#define AMDVI_MMIO_EXCL_LIMIT 0x0028
+#define AMDVI_MMIO_EXT_FEATURES 0x0030
+#define AMDVI_MMIO_COMMAND_HEAD 0x2000
+#define AMDVI_MMIO_COMMAND_TAIL 0x2008
+#define AMDVI_MMIO_EVENT_HEAD 0x2010
+#define AMDVI_MMIO_EVENT_TAIL 0x2018
+#define AMDVI_MMIO_STATUS 0x2020
+#define AMDVI_MMIO_PPR_BASE 0x0038
+#define AMDVI_MMIO_PPR_HEAD 0x2030
+#define AMDVI_MMIO_PPR_TAIL 0x2038
+
+#define AMDVI_MMIO_SIZE 0x4000
+
+#define AMDVI_MMIO_DEVTAB_SIZE_MASK ((1ULL << 12) - 1)
+#define AMDVI_MMIO_DEVTAB_BASE_MASK (((1ULL << 52) - 1) & ~ \
+ AMDVI_MMIO_DEVTAB_SIZE_MASK)
+#define AMDVI_MMIO_DEVTAB_ENTRY_SIZE 32
+#define AMDVI_MMIO_DEVTAB_SIZE_UNIT 4096
+
+/* some of this are similar but just for readability */
+#define AMDVI_MMIO_CMDBUF_SIZE_BYTE (AMDVI_MMIO_COMMAND_BASE + 7)
+#define AMDVI_MMIO_CMDBUF_SIZE_MASK 0x0f
+#define AMDVI_MMIO_CMDBUF_BASE_MASK AMDVI_MMIO_DEVTAB_BASE_MASK
+#define AMDVI_MMIO_CMDBUF_HEAD_MASK (((1ULL << 19) - 1) & ~0x0f)
+#define AMDVI_MMIO_CMDBUF_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
+
+#define AMDVI_MMIO_EVTLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7)
+#define AMDVI_MMIO_EVTLOG_SIZE_MASK AMDVI_MMIO_CMDBUF_SIZE_MASK
+#define AMDVI_MMIO_EVTLOG_BASE_MASK AMDVI_MMIO_CMDBUF_BASE_MASK
+#define AMDVI_MMIO_EVTLOG_HEAD_MASK (((1ULL << 19) - 1) & ~0x0f)
+#define AMDVI_MMIO_EVTLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
+
+#define AMDVI_MMIO_PPRLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7)
+#define AMDVI_MMIO_PPRLOG_HEAD_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
+#define AMDVI_MMIO_PPRLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
+#define AMDVI_MMIO_PPRLOG_BASE_MASK AMDVI_MMIO_EVTLOG_BASE_MASK
+#define AMDVI_MMIO_PPRLOG_SIZE_MASK AMDVI_MMIO_EVTLOG_SIZE_MASK
+
+#define AMDVI_MMIO_EXCL_ENABLED_MASK (1ULL << 0)
+#define AMDVI_MMIO_EXCL_ALLOW_MASK (1ULL << 1)
+#define AMDVI_MMIO_EXCL_LIMIT_MASK AMDVI_MMIO_DEVTAB_BASE_MASK
+#define AMDVI_MMIO_EXCL_LIMIT_LOW 0xfff
+
+/* mmio control register flags */
+#define AMDVI_MMIO_CONTROL_AMDVIEN (1ULL << 0)
+#define AMDVI_MMIO_CONTROL_HTTUNEN (1ULL << 1)
+#define AMDVI_MMIO_CONTROL_EVENTLOGEN (1ULL << 2)
+#define AMDVI_MMIO_CONTROL_EVENTINTEN (1ULL << 3)
+#define AMDVI_MMIO_CONTROL_COMWAITINTEN (1ULL << 4)
+#define AMDVI_MMIO_CONTROL_CMDBUFLEN (1ULL << 12)
+
+/* MMIO status register bits */
+#define AMDVI_MMIO_STATUS_CMDBUF_RUN (1 << 4)
+#define AMDVI_MMIO_STATUS_EVT_RUN (1 << 3)
+#define AMDVI_MMIO_STATUS_COMP_INT (1 << 2)
+#define AMDVI_MMIO_STATUS_EVT_OVF (1 << 0)
+
+#define AMDVI_CMDBUF_ID_BYTE 0x07
+#define AMDVI_CMDBUF_ID_RSHIFT 4
+
+#define AMDVI_CMD_COMPLETION_WAIT 0x01
+#define AMDVI_CMD_INVAL_DEVTAB_ENTRY 0x02
+#define AMDVI_CMD_INVAL_AMDVI_PAGES 0x03
+#define AMDVI_CMD_INVAL_IOTLB_PAGES 0x04
+#define AMDVI_CMD_INVAL_INTR_TABLE 0x05
+#define AMDVI_CMD_PREFETCH_AMDVI_PAGES 0x06
+#define AMDVI_CMD_COMPLETE_PPR_REQUEST 0x07
+#define AMDVI_CMD_INVAL_AMDVI_ALL 0x08
+
+#define AMDVI_DEVTAB_ENTRY_SIZE 32
+
+/* Device table entry bits 0:63 */
+#define AMDVI_DEV_VALID (1ULL << 0)
+#define AMDVI_DEV_TRANSLATION_VALID (1ULL << 1)
+#define AMDVI_DEV_MODE_MASK 0x7
+#define AMDVI_DEV_MODE_RSHIFT 9
+#define AMDVI_DEV_PT_ROOT_MASK 0xffffffffff000
+#define AMDVI_DEV_PT_ROOT_RSHIFT 12
+#define AMDVI_DEV_PERM_SHIFT 61
+#define AMDVI_DEV_PERM_READ (1ULL << 61)
+#define AMDVI_DEV_PERM_WRITE (1ULL << 62)
+
+/* Device table entry bits 64:127 */
+#define AMDVI_DEV_DOMID_ID_MASK ((1ULL << 16) - 1)
+
+/* Event codes and flags, as stored in the info field */
+#define AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY (0x1U << 12)
+#define AMDVI_EVENT_IOPF (0x2U << 12)
+#define AMDVI_EVENT_IOPF_I (1U << 3)
+#define AMDVI_EVENT_DEV_TAB_HW_ERROR (0x3U << 12)
+#define AMDVI_EVENT_PAGE_TAB_HW_ERROR (0x4U << 12)
+#define AMDVI_EVENT_ILLEGAL_COMMAND_ERROR (0x5U << 12)
+#define AMDVI_EVENT_COMMAND_HW_ERROR (0x6U << 12)
+
+#define AMDVI_EVENT_LEN 16
+#define AMDVI_PERM_READ (1 << 0)
+#define AMDVI_PERM_WRITE (1 << 1)
+
+#define AMDVI_FEATURE_PREFETCH (1ULL << 0) /* page prefetch */
+#define AMDVI_FEATURE_PPR (1ULL << 1) /* PPR Support */
+#define AMDVI_FEATURE_GT (1ULL << 4) /* Guest Translation */
+#define AMDVI_FEATURE_IA (1ULL << 6) /* inval all support */
+#define AMDVI_FEATURE_GA (1ULL << 7) /* guest VAPIC support */
+#define AMDVI_FEATURE_HE (1ULL << 8) /* hardware error regs */
+#define AMDVI_FEATURE_PC (1ULL << 9) /* Perf counters */
+
+/* reserved DTE bits */
+#define AMDVI_DTE_LOWER_QUAD_RESERVED 0x80300000000000fc
+#define AMDVI_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
+#define AMDVI_DTE_UPPER_QUAD_RESERVED 0x08f0000000000000
+
+/* AMDVI paging mode */
+#define AMDVI_GATS_MODE (6ULL << 12)
+#define AMDVI_HATS_MODE (6ULL << 10)
+
+/* IOTLB */
+#define AMDVI_IOTLB_MAX_SIZE 1024
+#define AMDVI_DEVID_SHIFT 36
+
+/* extended feature support */
+#define AMDVI_EXT_FEATURES (AMDVI_FEATURE_PREFETCH | AMDVI_FEATURE_PPR | \
+ AMDVI_FEATURE_IA | AMDVI_FEATURE_GT | AMDVI_FEATURE_HE | \
+ AMDVI_GATS_MODE | AMDVI_HATS_MODE)
+
+/* capabilities header */
+#define AMDVI_CAPAB_FEATURES (AMDVI_CAPAB_FLAT_EXT | \
+ AMDVI_CAPAB_FLAG_NPCACHE | AMDVI_CAPAB_FLAG_IOTLBSUP \
+ | AMDVI_CAPAB_ID_SEC | AMDVI_CAPAB_INIT_TYPE | \
+ AMDVI_CAPAB_FLAG_HTTUNNEL | AMDVI_CAPAB_EFR_SUP)
+
+/* AMDVI default address */
+#define AMDVI_BASE_ADDR 0xfed80000
+
+/* page management constants */
+#define AMDVI_PAGE_SHIFT 12
+#define AMDVI_PAGE_SIZE (1ULL << AMDVI_PAGE_SHIFT)
+
+#define AMDVI_PAGE_SHIFT_4K 12
+#define AMDVI_PAGE_MASK_4K (~((1ULL << AMDVI_PAGE_SHIFT_4K) - 1))
+
+#define AMDVI_MAX_VA_ADDR (48UL << 5)
+#define AMDVI_MAX_PH_ADDR (40UL << 8)
+#define AMDVI_MAX_GVA_ADDR (48UL << 15)
+
+/* Completion Wait data size */
+#define AMDVI_COMPLETION_DATA_SIZE 8
+
+#define AMDVI_COMMAND_SIZE 16
+/* Completion Wait data size */
+#define AMDVI_COMPLETION_DATA_SIZE 8
+
+#define AMDVI_COMMAND_SIZE 16
+
+#define AMDVI_INT_ADDR_FIRST 0xfee00000
+#define AMDVI_INT_ADDR_LAST 0xfeefffff
+
+#define TYPE_AMD_IOMMU_DEVICE "amd-iommu"
+#define AMD_IOMMU_DEVICE(obj)\
+ OBJECT_CHECK(AMDVIState, (obj), TYPE_AMD_IOMMU_DEVICE)
+
+#define TYPE_AMD_IOMMU_PCI "AMDVI-PCI"
+
+typedef struct AMDVIAddressSpace AMDVIAddressSpace;
+
+/* functions to steal PCI config space */
+typedef struct AMDVIPCIState {
+ PCIDevice dev; /* The PCI device itself */
+} AMDVIPCIState;
+
+typedef struct AMDVIState {
+ X86IOMMUState iommu; /* IOMMU bus device */
+ AMDVIPCIState pci; /* IOMMU PCI device */
+
+ uint32_t version;
+ uint32_t capab_offset; /* capability offset pointer */
+
+ uint64_t mmio_addr;
+
+ uint32_t devid; /* auto-assigned devid */
+
+ bool enabled; /* IOMMU enabled */
+ bool ats_enabled; /* address translation enabled */
+ bool cmdbuf_enabled; /* command buffer enabled */
+ bool evtlog_enabled; /* event log enabled */
+ bool excl_enabled;
+
+ hwaddr devtab; /* base address device table */
+ size_t devtab_len; /* device table length */
+
+ hwaddr cmdbuf; /* command buffer base address */
+ uint64_t cmdbuf_len; /* command buffer length */
+ uint32_t cmdbuf_head; /* current IOMMU read position */
+ uint32_t cmdbuf_tail; /* next Software write position */
+ bool completion_wait_intr;
+
+ hwaddr evtlog; /* base address event log */
+ bool evtlog_intr;
+ uint32_t evtlog_len; /* event log length */
+ uint32_t evtlog_head; /* current IOMMU write position */
+ uint32_t evtlog_tail; /* current Software read position */
+
+ /* unused for now */
+ hwaddr excl_base; /* base DVA - IOMMU exclusion range */
+ hwaddr excl_limit; /* limit of IOMMU exclusion range */
+ bool excl_allow; /* translate accesses to the exclusion range */
+ bool excl_enable; /* exclusion range enabled */
+
+ hwaddr ppr_log; /* base address ppr log */
+ uint32_t pprlog_len; /* ppr log len */
+ uint32_t pprlog_head; /* ppr log head */
+ uint32_t pprlog_tail; /* ppr log tail */
+
+ MemoryRegion mmio; /* MMIO region */
+ uint8_t mmior[AMDVI_MMIO_SIZE]; /* read/write MMIO */
+ uint8_t w1cmask[AMDVI_MMIO_SIZE]; /* read/write 1 clear mask */
+ uint8_t romask[AMDVI_MMIO_SIZE]; /* MMIO read/only mask */
+ bool mmio_enabled;
+
+ /* IOMMU function */
+ MemoryRegionIOMMUOps iommu_ops;
+
+ /* for each served device */
+ AMDVIAddressSpace **address_spaces[PCI_BUS_MAX];
+
+ /* IOTLB */
+ GHashTable *iotlb;
+} AMDVIState;
+
+#endif
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 28c31a2cdf..5f3e35123d 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -21,16 +21,20 @@
#include "qemu/osdep.h"
#include "qemu/error-report.h"
+#include "qapi/error.h"
#include "hw/sysbus.h"
#include "exec/address-spaces.h"
#include "intel_iommu_internal.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "hw/i386/pc.h"
+#include "hw/i386/apic-msidef.h"
#include "hw/boards.h"
#include "hw/i386/x86-iommu.h"
#include "hw/pci-host/q35.h"
#include "sysemu/kvm.h"
+#include "hw/i386/apic_internal.h"
+#include "kvm_i386.h"
/*#define DEBUG_INTEL_IOMMU*/
#ifdef DEBUG_INTEL_IOMMU
@@ -214,7 +218,7 @@ static void vtd_reset_iotlb(IntelIOMMUState *s)
g_hash_table_remove_all(s->iotlb);
}
-static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint8_t source_id,
+static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id,
uint32_t level)
{
return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
@@ -279,18 +283,17 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg,
hwaddr mesg_data_reg)
{
- hwaddr addr;
- uint32_t data;
+ MSIMessage msi;
assert(mesg_data_reg < DMAR_REG_SIZE);
assert(mesg_addr_reg < DMAR_REG_SIZE);
- addr = vtd_get_long_raw(s, mesg_addr_reg);
- data = vtd_get_long_raw(s, mesg_data_reg);
+ msi.address = vtd_get_long_raw(s, mesg_addr_reg);
+ msi.data = vtd_get_long_raw(s, mesg_data_reg);
- VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32, addr, data);
- address_space_stl_le(&address_space_memory, addr, data,
- MEMTXATTRS_UNSPECIFIED, NULL);
+ VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32,
+ msi.address, msi.data);
+ apic_get_class()->send_msi(&msi);
}
/* Generate a fault event to software via MSI if conditions are met.
@@ -985,6 +988,7 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s,
mask = 7; /* Mask bit 2:0 in the SID field */
break;
}
+ mask = ~mask;
VTD_DPRINTF(INV, "device-selective invalidation source 0x%"PRIx16
" mask %"PRIu16, source_id, mask);
vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id));
@@ -1974,14 +1978,20 @@ static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
return ret;
}
-static void vtd_iommu_notify_started(MemoryRegion *iommu)
+static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu,
+ IOMMUNotifierFlag old,
+ IOMMUNotifierFlag new)
{
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
- hw_error("Device at bus %s addr %02x.%d requires iommu notifier which "
- "is currently not supported by intel-iommu emulation",
- vtd_as->bus->qbus.name, PCI_SLOT(vtd_as->devfn),
- PCI_FUNC(vtd_as->devfn));
+ if (new & IOMMU_NOTIFIER_MAP) {
+ error_report("Device at bus %s addr %02x.%d requires iommu "
+ "notifier which is currently not supported by "
+ "intel-iommu emulation",
+ vtd_as->bus->qbus.name, PCI_SLOT(vtd_as->devfn),
+ PCI_FUNC(vtd_as->devfn));
+ exit(1);
+ }
}
static const VMStateDescription vtd_vmstate = {
@@ -2005,6 +2015,9 @@ static const MemoryRegionOps vtd_mem_ops = {
static Property vtd_properties[] = {
DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0),
+ DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim,
+ ON_OFF_AUTO_AUTO),
+ DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2127,6 +2140,7 @@ static void vtd_generate_msi_message(VTDIrq *irq, MSIMessage *msg_out)
msg.dest_mode = irq->dest_mode;
msg.redir_hint = irq->redir_hint;
msg.dest = irq->dest;
+ msg.__addr_hi = irq->dest & 0xffffff00;
msg.__addr_head = cpu_to_le32(0xfee);
/* Keep this from original MSI address bits */
msg.__not_used = irq->msi_addr_last_bits;
@@ -2167,7 +2181,7 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
}
addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
- if (le16_to_cpu(addr.addr.__head) != 0xfee) {
+ if (addr.addr.__head != 0xfee) {
VTD_DPRINTF(GENERAL, "error: MSI addr low 32 bits invalid: "
"0x%"PRIx32, addr.data);
return -VTD_FR_IR_REQ_RSVD;
@@ -2203,6 +2217,8 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
}
} else {
uint8_t vector = origin->data & 0xff;
+ uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
+
VTD_DPRINTF(IR, "received IOAPIC interrupt");
/* IOAPIC entry vector should be aligned with IRTE vector
* (see vt-d spec 5.1.5.1). */
@@ -2211,6 +2227,15 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
"entry: %d, IRTE: %d, index: %d",
vector, irq.vector, index);
}
+
+ /* The Trigger Mode field must match the Trigger Mode in the IRTE.
+ * (see vt-d spec 5.1.5.1). */
+ if (trigger_mode != irq.trigger_mode) {
+ VTD_DPRINTF(GENERAL, "IOAPIC trigger mode inconsistent: "
+ "entry: %u, IRTE: %u, index: %d",
+ trigger_mode, irq.trigger_mode, index);
+ }
+
}
/*
@@ -2275,11 +2300,7 @@ static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
" for device sid 0x%04x",
to.address, to.data, sid);
- if (dma_memory_write(&address_space_memory, to.address,
- &to.data, size)) {
- VTD_DPRINTF(GENERAL, "error: fail to write 0x%"PRIx64
- " value 0x%"PRIx32, to.address, to.data);
- }
+ apic_get_class()->send_msi(&to);
return MEMTX_OK;
}
@@ -2348,7 +2369,7 @@ static void vtd_init(IntelIOMMUState *s)
memset(s->womask, 0, DMAR_REG_SIZE);
s->iommu_ops.translate = vtd_iommu_translate;
- s->iommu_ops.notify_started = vtd_iommu_notify_started;
+ s->iommu_ops.notify_flag_changed = vtd_iommu_notify_flag_changed;
s->root = 0;
s->root_extended = false;
s->dmar_enabled = false;
@@ -2364,7 +2385,11 @@ static void vtd_init(IntelIOMMUState *s)
s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
if (x86_iommu->intr_supported) {
- s->ecap |= VTD_ECAP_IR | VTD_ECAP_EIM | VTD_ECAP_MHMV;
+ s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV;
+ if (s->intr_eim == ON_OFF_AUTO_ON) {
+ s->ecap |= VTD_ECAP_EIM;
+ }
+ assert(s->intr_eim != ON_OFF_AUTO_AUTO);
}
vtd_reset_context_cache(s);
@@ -2439,12 +2464,48 @@ static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
IntelIOMMUState *s = opaque;
VTDAddressSpace *vtd_as;
- assert(0 <= devfn && devfn <= X86_IOMMU_PCI_DEVFN_MAX);
+ assert(0 <= devfn && devfn < X86_IOMMU_PCI_DEVFN_MAX);
vtd_as = vtd_find_add_as(s, bus, devfn);
return &vtd_as->as;
}
+static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
+{
+ X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
+
+ /* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */
+ if (x86_iommu->intr_supported && kvm_irqchip_in_kernel() &&
+ !kvm_irqchip_is_split()) {
+ error_setg(errp, "Intel Interrupt Remapping cannot work with "
+ "kernel-irqchip=on, please use 'split|off'.");
+ return false;
+ }
+ if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu->intr_supported) {
+ error_setg(errp, "eim=on cannot be selected without intremap=on");
+ return false;
+ }
+
+ if (s->intr_eim == ON_OFF_AUTO_AUTO) {
+ s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim)
+ && x86_iommu->intr_supported ?
+ ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
+ }
+ if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) {
+ if (!kvm_irqchip_in_kernel()) {
+ error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split");
+ return false;
+ }
+ if (!kvm_enable_x2apic()) {
+ error_setg(errp, "eim=on requires support on the KVM side"
+ "(X2APIC_API, first shipped in v4.7)");
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void vtd_realize(DeviceState *dev, Error **errp)
{
PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
@@ -2453,6 +2514,12 @@ static void vtd_realize(DeviceState *dev, Error **errp)
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
VTD_DPRINTF(GENERAL, "");
+ x86_iommu->type = TYPE_INTEL;
+
+ if (!vtd_decide_config(s, errp)) {
+ return;
+ }
+
memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
"intel_iommu", DMAR_REG_SIZE);
@@ -2467,14 +2534,6 @@ static void vtd_realize(DeviceState *dev, Error **errp)
pci_setup_iommu(bus, vtd_host_dma_iommu, dev);
/* Pseudo address space under root PCI bus. */
pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC);
-
- /* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */
- if (x86_iommu->intr_supported && kvm_irqchip_in_kernel() &&
- !kvm_irqchip_is_split()) {
- error_report("Intel Interrupt Remapping cannot work with "
- "kernel-irqchip=on, please use 'split|off'.");
- exit(1);
- }
}
static void vtd_class_init(ObjectClass *klass, void *data)
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index 0829a5064f..11abfa2233 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -115,7 +115,7 @@
/* The shift of source_id in the key of IOTLB hash table */
#define VTD_IOTLB_SID_SHIFT 36
-#define VTD_IOTLB_LVL_SHIFT 44
+#define VTD_IOTLB_LVL_SHIFT 52
#define VTD_IOTLB_MAX_SIZE 1024 /* Max size of the hash table */
/* IOTLB_REG */
diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c
index 2bd0de82b4..01cbaa88d2 100644
--- a/hw/i386/kvm/apic.c
+++ b/hw/i386/kvm/apic.c
@@ -15,6 +15,7 @@
#include "hw/i386/apic_internal.h"
#include "hw/pci/msi.h"
#include "sysemu/kvm.h"
+#include "target-i386/kvm_i386.h"
static inline void kvm_apic_set_reg(struct kvm_lapic_state *kapic,
int reg_id, uint32_t val)
@@ -28,13 +29,16 @@ static inline uint32_t kvm_apic_get_reg(struct kvm_lapic_state *kapic,
return *((uint32_t *)(kapic->regs + (reg_id << 4)));
}
-void kvm_put_apic_state(DeviceState *dev, struct kvm_lapic_state *kapic)
+static void kvm_put_apic_state(APICCommonState *s, struct kvm_lapic_state *kapic)
{
- APICCommonState *s = APIC_COMMON(dev);
int i;
memset(kapic, 0, sizeof(*kapic));
- kvm_apic_set_reg(kapic, 0x2, s->id << 24);
+ if (kvm_has_x2apic_api() && s->apicbase & MSR_IA32_APICBASE_EXTD) {
+ kvm_apic_set_reg(kapic, 0x2, s->initial_apic_id);
+ } else {
+ kvm_apic_set_reg(kapic, 0x2, s->id << 24);
+ }
kvm_apic_set_reg(kapic, 0x8, s->tpr);
kvm_apic_set_reg(kapic, 0xd, s->log_dest << 24);
kvm_apic_set_reg(kapic, 0xe, s->dest_mode << 28 | 0x0fffffff);
@@ -59,7 +63,11 @@ void kvm_get_apic_state(DeviceState *dev, struct kvm_lapic_state *kapic)
APICCommonState *s = APIC_COMMON(dev);
int i, v;
- s->id = kvm_apic_get_reg(kapic, 0x2) >> 24;
+ if (kvm_has_x2apic_api() && s->apicbase & MSR_IA32_APICBASE_EXTD) {
+ assert(kvm_apic_get_reg(kapic, 0x2) == s->initial_apic_id);
+ } else {
+ s->id = kvm_apic_get_reg(kapic, 0x2) >> 24;
+ }
s->tpr = kvm_apic_get_reg(kapic, 0x8);
s->arb_id = kvm_apic_get_reg(kapic, 0x9);
s->log_dest = kvm_apic_get_reg(kapic, 0xd) >> 24;
@@ -125,10 +133,30 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
}
}
-static void do_inject_external_nmi(void *data)
+static void kvm_apic_put(CPUState *cs, run_on_cpu_data data)
+{
+ APICCommonState *s = data.host_ptr;
+ struct kvm_lapic_state kapic;
+ int ret;
+
+ kvm_put_apicbase(s->cpu, s->apicbase);
+ kvm_put_apic_state(s, &kapic);
+
+ ret = kvm_vcpu_ioctl(CPU(s->cpu), KVM_SET_LAPIC, &kapic);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_SET_LAPIC failed: %s\n", strerror(ret));
+ abort();
+ }
+}
+
+static void kvm_apic_post_load(APICCommonState *s)
+{
+ run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
+}
+
+static void do_inject_external_nmi(CPUState *cpu, run_on_cpu_data data)
{
- APICCommonState *s = data;
- CPUState *cpu = CPU(s->cpu);
+ APICCommonState *s = data.host_ptr;
uint32_t lvt;
int ret;
@@ -146,7 +174,18 @@ static void do_inject_external_nmi(void *data)
static void kvm_apic_external_nmi(APICCommonState *s)
{
- run_on_cpu(CPU(s->cpu), do_inject_external_nmi, s);
+ run_on_cpu(CPU(s->cpu), do_inject_external_nmi, RUN_ON_CPU_HOST_PTR(s));
+}
+
+static void kvm_send_msi(MSIMessage *msg)
+{
+ int ret;
+
+ ret = kvm_irqchip_send_msi(kvm_state, *msg);
+ if (ret < 0) {
+ fprintf(stderr, "KVM: injection failed, MSI lost (%s)\n",
+ strerror(-ret));
+ }
}
static uint64_t kvm_apic_mem_read(void *opaque, hwaddr addr,
@@ -159,13 +198,8 @@ static void kvm_apic_mem_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
MSIMessage msg = { .address = addr, .data = data };
- int ret;
- ret = kvm_irqchip_send_msi(kvm_state, msg);
- if (ret < 0) {
- fprintf(stderr, "KVM: injection failed, MSI lost (%s)\n",
- strerror(-ret));
- }
+ kvm_send_msi(&msg);
}
static const MemoryRegionOps kvm_apic_io_ops = {
@@ -178,6 +212,8 @@ static void kvm_apic_reset(APICCommonState *s)
{
/* Not used by KVM, which uses the CPU mp_state instead. */
s->wait_for_sipi = 0;
+
+ run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
}
static void kvm_apic_realize(DeviceState *dev, Error **errp)
@@ -206,9 +242,11 @@ static void kvm_apic_class_init(ObjectClass *klass, void *data)
k->set_base = kvm_apic_set_base;
k->set_tpr = kvm_apic_set_tpr;
k->get_tpr = kvm_apic_get_tpr;
+ k->post_load = kvm_apic_post_load;
k->enable_tpr_reporting = kvm_apic_enable_tpr_reporting;
k->vapic_base_update = kvm_apic_vapic_base_update;
k->external_nmi = kvm_apic_external_nmi;
+ k->send_msi = kvm_send_msi;
}
static const TypeInfo kvm_apic_info = {
diff --git a/hw/i386/kvm/i8259.c b/hw/i386/kvm/i8259.c
index 2b207de01b..11d1b726b6 100644
--- a/hw/i386/kvm/i8259.c
+++ b/hw/i386/kvm/i8259.c
@@ -92,7 +92,7 @@ static void kvm_pic_put(PICCommonState *s)
ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, &chip);
if (ret < 0) {
- fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret));
+ fprintf(stderr, "KVM_SET_IRQCHIP failed: %s\n", strerror(ret));
abort();
}
}
diff --git a/hw/i386/kvm/pci-assign.c b/hw/i386/kvm/pci-assign.c
index 8238fbc630..87dcbdd51a 100644
--- a/hw/i386/kvm/pci-assign.c
+++ b/hw/i386/kvm/pci-assign.c
@@ -1251,6 +1251,7 @@ static int assigned_device_pci_cap_init(PCIDevice *pci_dev, Error **errp)
error_propagate(errp, local_err);
return -ENOTSUP;
}
+ dev->dev.cap_present |= QEMU_PCI_CAP_MSI;
dev->cap.available |= ASSIGNED_DEVICE_CAP_MSI;
/* Only 32-bit/no-mask currently supported */
ret = pci_add_capability2(pci_dev, PCI_CAP_ID_MSI, pos, 10,
@@ -1285,6 +1286,7 @@ static int assigned_device_pci_cap_init(PCIDevice *pci_dev, Error **errp)
error_propagate(errp, local_err);
return -ENOTSUP;
}
+ dev->dev.cap_present |= QEMU_PCI_CAP_MSIX;
dev->cap.available |= ASSIGNED_DEVICE_CAP_MSIX;
ret = pci_add_capability2(pci_dev, PCI_CAP_ID_MSIX, pos, 12,
&local_err);
@@ -1648,6 +1650,7 @@ static void assigned_dev_register_msix_mmio(AssignedDevice *dev, Error **errp)
dev->msix_table = NULL;
return;
}
+ dev->dev.msix_table = (uint8_t *)dev->msix_table;
assigned_dev_msix_reset(dev);
@@ -1665,6 +1668,7 @@ static void assigned_dev_unregister_msix_mmio(AssignedDevice *dev)
error_report("error unmapping msix_table! %s", strerror(errno));
}
dev->msix_table = NULL;
+ dev->dev.msix_table = NULL;
}
static const VMStateDescription vmstate_assigned_device = {
diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c
index 3bf1ddd976..b30d1b90c6 100644
--- a/hw/i386/kvmvapic.c
+++ b/hw/i386/kvmvapic.c
@@ -17,6 +17,7 @@
#include "sysemu/kvm.h"
#include "hw/i386/apic_internal.h"
#include "hw/sysbus.h"
+#include "tcg/tcg.h"
#define VAPIC_IO_PORT 0x7e
@@ -449,6 +450,9 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
resume_all_vcpus();
if (!kvm_enabled()) {
+ /* tb_lock will be reset when cpu_loop_exit_noexc longjmps
+ * back into the cpu_exec loop. */
+ tb_lock();
tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
cpu_loop_exit_noexc(cs);
}
@@ -483,10 +487,9 @@ typedef struct VAPICEnableTPRReporting {
bool enable;
} VAPICEnableTPRReporting;
-static void vapic_do_enable_tpr_reporting(void *data)
+static void vapic_do_enable_tpr_reporting(CPUState *cpu, run_on_cpu_data data)
{
- VAPICEnableTPRReporting *info = data;
-
+ VAPICEnableTPRReporting *info = data.host_ptr;
apic_enable_tpr_access_reporting(info->apic, info->enable);
}
@@ -501,7 +504,7 @@ static void vapic_enable_tpr_reporting(bool enable)
CPU_FOREACH(cs) {
cpu = X86_CPU(cs);
info.apic = cpu->apic_state;
- run_on_cpu(cs, vapic_do_enable_tpr_reporting, &info);
+ run_on_cpu(cs, vapic_do_enable_tpr_reporting, RUN_ON_CPU_HOST_PTR(&info));
}
}
@@ -734,10 +737,10 @@ static void vapic_realize(DeviceState *dev, Error **errp)
nb_option_roms++;
}
-static void do_vapic_enable(void *data)
+static void do_vapic_enable(CPUState *cs, run_on_cpu_data data)
{
- VAPICROMState *s = data;
- X86CPU *cpu = X86_CPU(first_cpu);
+ VAPICROMState *s = data.host_ptr;
+ X86CPU *cpu = X86_CPU(cs);
static const uint8_t enabled = 1;
cpu_physical_memory_write(s->vapic_paddr + offsetof(VAPICState, enabled),
@@ -758,7 +761,7 @@ static void kvmvapic_vm_state_change(void *opaque, int running,
if (s->state == VAPIC_ACTIVE) {
if (smp_cpus == 1) {
- run_on_cpu(first_cpu, do_vapic_enable, s);
+ run_on_cpu(first_cpu, do_vapic_enable, RUN_ON_CPU_HOST_PTR(s));
} else {
zero = g_malloc0(s->rom_state.vapic_size);
cpu_physical_memory_write(s->vapic_paddr, zero,
@@ -768,6 +771,7 @@ static void kvmvapic_vm_state_change(void *opaque, int running,
}
qemu_del_vm_change_state_handler(s->vmsentry);
+ s->vmsentry = NULL;
}
static int vapic_post_load(void *opaque, int version_id)
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 0daa4d1f7f..0779fa2639 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -68,6 +68,7 @@
#include "qapi-visit.h"
#include "qom/cpu.h"
#include "hw/nmi.h"
+#include "hw/i386/intel_iommu.h"
#include "sysemu/hax.h"
@@ -163,13 +164,15 @@ int cpu_get_pic_interrupt(CPUX86State *env)
X86CPU *cpu = x86_env_get_cpu(env);
int intno;
- intno = apic_get_interrupt(cpu->apic_state);
- if (intno >= 0) {
- return intno;
- }
- /* read the irq from the PIC */
- if (!apic_accept_pic_intr(cpu->apic_state)) {
- return -1;
+ if (!kvm_irqchip_in_kernel()) {
+ intno = apic_get_interrupt(cpu->apic_state);
+ if (intno >= 0) {
+ return intno;
+ }
+ /* read the irq from the PIC */
+ if (!apic_accept_pic_intr(cpu->apic_state)) {
+ return -1;
+ }
}
intno = pic_read_irq(isa_pic);
@@ -182,7 +185,7 @@ static void pic_irq_request(void *opaque, int irq, int level)
X86CPU *cpu = X86_CPU(cs);
DPRINTF("pic_irqs: %s irq %d\n", level? "raise" : "lower", irq);
- if (cpu->apic_state) {
+ if (cpu->apic_state && !kvm_irqchip_in_kernel()) {
CPU_FOREACH(cs) {
cpu = X86_CPU(cs);
if (apic_accept_pic_intr(cpu->apic_state)) {
@@ -532,9 +535,9 @@ static uint64_t port92_read(void *opaque, hwaddr addr,
return ret;
}
-static void port92_init(ISADevice *dev, qemu_irq *a20_out)
+static void port92_init(ISADevice *dev, qemu_irq a20_out)
{
- qdev_connect_gpio_out_named(DEVICE(dev), PORT92_A20_LINE, 0, *a20_out);
+ qdev_connect_gpio_out_named(DEVICE(dev), PORT92_A20_LINE, 0, a20_out);
}
static const VMStateDescription vmstate_port92_isa = {
@@ -743,20 +746,19 @@ static FWCfgState *bochs_bios_init(AddressSpace *as, PCMachineState *pcms)
int i, j;
fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, as);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus);
/* FW_CFG_MAX_CPUS is a bit confusing/problematic on x86:
*
- * SeaBIOS needs FW_CFG_MAX_CPUS for CPU hotplug, but the CPU hotplug
- * QEMU<->SeaBIOS interface is not based on the "CPU index", but on the APIC
- * ID of hotplugged CPUs[1]. This means that FW_CFG_MAX_CPUS is not the
- * "maximum number of CPUs", but the "limit to the APIC ID values SeaBIOS
- * may see".
+ * For machine types prior to 1.8, SeaBIOS needs FW_CFG_MAX_CPUS for
+ * building MPTable, ACPI MADT, ACPI CPU hotplug and ACPI SRAT table,
+ * that tables are based on xAPIC ID and QEMU<->SeaBIOS interface
+ * for CPU hotplug also uses APIC ID and not "CPU index".
+ * This means that FW_CFG_MAX_CPUS is not the "maximum number of CPUs",
+ * but the "limit to the APIC ID values SeaBIOS may see".
*
- * So, this means we must not use max_cpus, here, but the maximum possible
- * APIC ID value, plus one.
- *
- * [1] The only kind of "CPU identifier" used between SeaBIOS and QEMU is
- * the APIC ID, not the "CPU index"
+ * So for compatibility reasons with old BIOSes we are stuck with
+ * "etc/max-cpus" actually being apic_id_limit
*/
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)pcms->apic_id_limit);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
@@ -779,11 +781,9 @@ static FWCfgState *bochs_bios_init(AddressSpace *as, PCMachineState *pcms)
for (i = 0; i < max_cpus; i++) {
unsigned int apic_id = x86_cpu_apic_id_from_index(i);
assert(apic_id < pcms->apic_id_limit);
- for (j = 0; j < nb_numa_nodes; j++) {
- if (test_bit(i, numa_info[j].node_cpu)) {
- numa_fw_cfg[apic_id + 1] = cpu_to_le64(j);
- break;
- }
+ j = numa_get_node_for_cpu(i);
+ if (j < nb_numa_nodes) {
+ numa_fw_cfg[apic_id + 1] = cpu_to_le64(j);
}
}
for (i = 0; i < nb_numa_nodes; i++) {
@@ -1093,17 +1093,6 @@ void pc_acpi_smi_interrupt(void *opaque, int irq, int level)
}
}
-static int pc_present_cpus_count(PCMachineState *pcms)
-{
- int i, boot_cpus = 0;
- for (i = 0; i < pcms->possible_cpus->len; i++) {
- if (pcms->possible_cpus->cpus[i].cpu) {
- boot_cpus++;
- }
- }
- return boot_cpus;
-}
-
static X86CPU *pc_new_cpu(const char *typename, int64_t apic_id,
Error **errp)
{
@@ -1196,12 +1185,6 @@ void pc_cpus_init(PCMachineState *pcms)
* This is used for FW_CFG_MAX_CPUS. See comments on bochs_bios_init().
*/
pcms->apic_id_limit = x86_cpu_apic_id_from_index(max_cpus - 1) + 1;
- if (pcms->apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) {
- error_report("max_cpus is too large. APIC ID of last CPU is %u",
- pcms->apic_id_limit - 1);
- exit(1);
- }
-
pcms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
sizeof(CPUArchId) * max_cpus);
for (i = 0; i < max_cpus; i++) {
@@ -1246,6 +1229,19 @@ static void pc_build_feature_control_file(PCMachineState *pcms)
fw_cfg_add_file(pcms->fw_cfg, "etc/msr_feature_control", val, sizeof(*val));
}
+static void rtc_set_cpus_count(ISADevice *rtc, uint16_t cpus_count)
+{
+ if (cpus_count > 0xff) {
+ /* If the number of CPUs can't be represented in 8 bits, the
+ * BIOS must use "FW_CFG_NB_CPUS". Set RTC field to 0 just
+ * to make old BIOSes fail more predictably.
+ */
+ rtc_set_memory(rtc, 0x5f, 0);
+ } else {
+ rtc_set_memory(rtc, 0x5f, cpus_count - 1);
+ }
+}
+
static
void pc_machine_done(Notifier *notifier, void *data)
{
@@ -1254,7 +1250,7 @@ void pc_machine_done(Notifier *notifier, void *data)
PCIBus *bus = pcms->bus;
/* set the number of CPUs */
- rtc_set_memory(pcms->rtc, 0x5f, pc_present_cpus_count(pcms) - 1);
+ rtc_set_cpus_count(pcms->rtc, pcms->boot_cpus);
if (bus) {
int extra_hosts = 0;
@@ -1277,6 +1273,21 @@ void pc_machine_done(Notifier *notifier, void *data)
if (pcms->fw_cfg) {
pc_build_smbios(pcms->fw_cfg);
pc_build_feature_control_file(pcms);
+ /* update FW_CFG_NB_CPUS to account for -device added CPUs */
+ fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus);
+ }
+
+ if (pcms->apic_id_limit > 255) {
+ IntelIOMMUState *iommu = INTEL_IOMMU_DEVICE(x86_iommu_get_default());
+
+ if (!iommu || !iommu->x86_iommu.intr_supported ||
+ iommu->intr_eim != ON_OFF_AUTO_ON) {
+ error_report("current -smp configuration requires "
+ "Extended Interrupt Mode enabled. "
+ "You can add an IOMMU using: "
+ "-device intel-iommu,intremap=on,eim=on");
+ exit(EXIT_FAILURE);
+ }
}
}
@@ -1341,6 +1352,7 @@ void xen_load_linux(PCMachineState *pcms)
assert(MACHINE(pcms)->kernel_filename != NULL);
fw_cfg = fw_cfg_init_io(FW_CFG_IO_BASE);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus);
rom_set_fw(fw_cfg);
load_linux(pcms, fw_cfg);
@@ -1595,12 +1607,12 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
pcspk_init(isa_bus, pit);
}
- serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS);
+ serial_hds_isa_init(isa_bus, 0, MAX_SERIAL_PORTS);
parallel_hds_isa_init(isa_bus, MAX_PARALLEL_PORTS);
a20_line = qemu_allocate_irqs(handle_a20_line_change, first_cpu, 2);
i8042 = isa_create_simple(isa_bus, "i8042");
- i8042_setup_a20_line(i8042, &a20_line[0]);
+ i8042_setup_a20_line(i8042, a20_line[0]);
if (!no_vmport) {
vmport_init(isa_bus);
vmmouse = isa_try_create(isa_bus, "vmmouse");
@@ -1613,7 +1625,8 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
qdev_init_nofail(dev);
}
port92 = isa_create_simple(isa_bus, "port92");
- port92_init(port92, &a20_line[1]);
+ port92_init(port92, a20_line[1]);
+ g_free(a20_line);
DMA_init(isa_bus, 0);
@@ -1705,6 +1718,10 @@ static void pc_dimm_plug(HotplugHandler *hotplug_dev,
goto out;
}
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ nvdimm_plug(&pcms->acpi_nvdimm_state);
+ }
+
hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
hhc->plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &error_abort);
out:
@@ -1724,6 +1741,12 @@ static void pc_dimm_unplug_request(HotplugHandler *hotplug_dev,
goto out;
}
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ error_setg(&local_err,
+ "nvdimm device hot unplug is not supported yet.");
+ goto out;
+ }
+
hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
hhc->unplug_request(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
@@ -1799,9 +1822,11 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev,
}
}
+ /* increment the number of CPUs */
+ pcms->boot_cpus++;
if (dev->hotplugged) {
- /* increment the number of CPUs */
- rtc_set_memory(pcms->rtc, 0x5f, rtc_get_memory(pcms->rtc, 0x5f) + 1);
+ rtc_set_cpus_count(pcms->rtc, pcms->boot_cpus);
+ fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus);
}
found_cpu = pc_find_cpu_slot(pcms, CPU(dev), NULL);
@@ -1855,7 +1880,11 @@ static void pc_cpu_unplug_cb(HotplugHandler *hotplug_dev,
found_cpu->cpu = NULL;
object_unparent(OBJECT(dev));
- rtc_set_memory(pcms->rtc, 0x5f, rtc_get_memory(pcms->rtc, 0x5f) - 1);
+ /* decrement the number of CPUs */
+ pcms->boot_cpus--;
+ /* Update the number of CPUs in CMOS */
+ rtc_set_cpus_count(pcms->rtc, pcms->boot_cpus);
+ fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus);
out:
error_propagate(errp, local_err);
}
@@ -2141,41 +2170,13 @@ static void pc_machine_initfn(Object *obj)
{
PCMachineState *pcms = PC_MACHINE(obj);
- object_property_add(obj, PC_MACHINE_MEMHP_REGION_SIZE, "int",
- pc_machine_get_hotplug_memory_region_size,
- NULL, NULL, NULL, &error_abort);
-
pcms->max_ram_below_4g = 0; /* use default */
- object_property_add(obj, PC_MACHINE_MAX_RAM_BELOW_4G, "size",
- pc_machine_get_max_ram_below_4g,
- pc_machine_set_max_ram_below_4g,
- NULL, NULL, &error_abort);
- object_property_set_description(obj, PC_MACHINE_MAX_RAM_BELOW_4G,
- "Maximum ram below the 4G boundary (32bit boundary)",
- &error_abort);
-
pcms->smm = ON_OFF_AUTO_AUTO;
- object_property_add(obj, PC_MACHINE_SMM, "OnOffAuto",
- pc_machine_get_smm,
- pc_machine_set_smm,
- NULL, NULL, &error_abort);
- object_property_set_description(obj, PC_MACHINE_SMM,
- "Enable SMM (pc & q35)",
- &error_abort);
-
pcms->vmport = ON_OFF_AUTO_AUTO;
- object_property_add(obj, PC_MACHINE_VMPORT, "OnOffAuto",
- pc_machine_get_vmport,
- pc_machine_set_vmport,
- NULL, NULL, &error_abort);
- object_property_set_description(obj, PC_MACHINE_VMPORT,
- "Enable vmport (pc & q35)",
- &error_abort);
-
/* nvdimm is disabled on default. */
pcms->acpi_nvdimm_state.is_enabled = false;
- object_property_add_bool(obj, PC_MACHINE_NVDIMM, pc_machine_get_nvdimm,
- pc_machine_set_nvdimm, &error_abort);
+ /* acpi build is enabled by default if machine supports it */
+ pcms->acpi_build_enabled = PC_MACHINE_GET_CLASS(pcms)->has_acpi_build;
}
static void pc_machine_reset(void)
@@ -2310,6 +2311,32 @@ static void pc_machine_class_init(ObjectClass *oc, void *data)
hc->unplug_request = pc_machine_device_unplug_request_cb;
hc->unplug = pc_machine_device_unplug_cb;
nc->nmi_monitor_handler = x86_nmi;
+
+ object_class_property_add(oc, PC_MACHINE_MEMHP_REGION_SIZE, "int",
+ pc_machine_get_hotplug_memory_region_size, NULL,
+ NULL, NULL, &error_abort);
+
+ object_class_property_add(oc, PC_MACHINE_MAX_RAM_BELOW_4G, "size",
+ pc_machine_get_max_ram_below_4g, pc_machine_set_max_ram_below_4g,
+ NULL, NULL, &error_abort);
+
+ object_class_property_set_description(oc, PC_MACHINE_MAX_RAM_BELOW_4G,
+ "Maximum ram below the 4G boundary (32bit boundary)", &error_abort);
+
+ object_class_property_add(oc, PC_MACHINE_SMM, "OnOffAuto",
+ pc_machine_get_smm, pc_machine_set_smm,
+ NULL, NULL, &error_abort);
+ object_class_property_set_description(oc, PC_MACHINE_SMM,
+ "Enable SMM (pc & q35)", &error_abort);
+
+ object_class_property_add(oc, PC_MACHINE_VMPORT, "OnOffAuto",
+ pc_machine_get_vmport, pc_machine_set_vmport,
+ NULL, NULL, &error_abort);
+ object_class_property_set_description(oc, PC_MACHINE_VMPORT,
+ "Enable vmport (pc & q35)", &error_abort);
+
+ object_class_property_add_bool(oc, PC_MACHINE_NVDIMM,
+ pc_machine_get_nvdimm, pc_machine_set_nvdimm, &error_abort);
}
static const TypeInfo pc_machine_info = {
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 84241296a8..68835dde0b 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -74,7 +74,6 @@ static void pc_init1(MachineState *machine,
ISABus *isa_bus;
PCII440FXState *i440fx_state;
int piix3_devfn = -1;
- qemu_irq *gsi;
qemu_irq *i8259;
qemu_irq smi_irq;
GSIState *gsi_state;
@@ -185,16 +184,16 @@ static void pc_init1(MachineState *machine,
gsi_state = g_malloc0(sizeof(*gsi_state));
if (kvm_ioapic_in_kernel()) {
kvm_pc_setup_irq_routing(pcmc->pci_enabled);
- gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state,
- GSI_NUM_PINS);
+ pcms->gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state,
+ GSI_NUM_PINS);
} else {
- gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS);
+ pcms->gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS);
}
if (pcmc->pci_enabled) {
pci_bus = i440fx_init(host_type,
pci_type,
- &i440fx_state, &piix3_devfn, &isa_bus, gsi,
+ &i440fx_state, &piix3_devfn, &isa_bus, pcms->gsi,
system_memory, system_io, machine->ram_size,
pcms->below_4g_mem_size,
pcms->above_4g_mem_size,
@@ -207,7 +206,7 @@ static void pc_init1(MachineState *machine,
&error_abort);
no_hpet = 1;
}
- isa_bus_irqs(isa_bus, gsi);
+ isa_bus_irqs(isa_bus, pcms->gsi);
if (kvm_pic_in_kernel()) {
i8259 = kvm_i8259_init(isa_bus);
@@ -225,7 +224,7 @@ static void pc_init1(MachineState *machine,
ioapic_init_gsi(gsi_state, "i440fx");
}
- pc_register_ferr_irq(gsi[13]);
+ pc_register_ferr_irq(pcms->gsi[13]);
pc_vga_init(isa_bus, pcmc->pci_enabled ? pci_bus : NULL);
@@ -235,7 +234,7 @@ static void pc_init1(MachineState *machine,
}
/* init basic PC hardware */
- pc_basic_device_init(isa_bus, gsi, &rtc_state, true,
+ pc_basic_device_init(isa_bus, pcms->gsi, &rtc_state, true,
(pcms->vmport != ON_OFF_AUTO_ON), 0x4);
pc_nic_init(isa_bus, pci_bus);
@@ -279,7 +278,7 @@ static void pc_init1(MachineState *machine,
smi_irq = qemu_allocate_irq(pc_acpi_smi_interrupt, first_cpu, 0);
/* TODO: Populate SPD eeprom data. */
smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100,
- gsi[9], smi_irq,
+ pcms->gsi[9], smi_irq,
pc_machine_is_smm_enabled(pcms),
&piix4_pm);
smbus_eeprom_init(smbus, 8, NULL, 0);
@@ -447,13 +446,25 @@ static void pc_i440fx_machine_options(MachineClass *m)
m->default_display = "std";
}
-static void pc_i440fx_2_7_machine_options(MachineClass *m)
+static void pc_i440fx_2_8_machine_options(MachineClass *m)
{
pc_i440fx_machine_options(m);
m->alias = "pc";
m->is_default = 1;
}
+DEFINE_I440FX_MACHINE(v2_8, "pc-i440fx-2.8", NULL,
+ pc_i440fx_2_8_machine_options);
+
+
+static void pc_i440fx_2_7_machine_options(MachineClass *m)
+{
+ pc_i440fx_2_8_machine_options(m);
+ m->is_default = 0;
+ m->alias = NULL;
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_7);
+}
+
DEFINE_I440FX_MACHINE(v2_7, "pc-i440fx-2.7", NULL,
pc_i440fx_2_7_machine_options);
@@ -462,8 +473,6 @@ static void pc_i440fx_2_6_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_2_7_machine_options(m);
- m->is_default = 0;
- m->alias = NULL;
pcmc->legacy_cpu_hotplug = true;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_6);
}
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index c0b9961928..b40d19ee00 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -69,7 +69,6 @@ static void pc_q35_init(MachineState *machine)
MemoryRegion *ram_memory;
GSIState *gsi_state;
ISABus *isa_bus;
- qemu_irq *gsi;
qemu_irq *i8259;
int i;
ICH9LPCState *ich9_lpc;
@@ -153,10 +152,10 @@ static void pc_q35_init(MachineState *machine)
gsi_state = g_malloc0(sizeof(*gsi_state));
if (kvm_ioapic_in_kernel()) {
kvm_pc_setup_irq_routing(pcmc->pci_enabled);
- gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state,
- GSI_NUM_PINS);
+ pcms->gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state,
+ GSI_NUM_PINS);
} else {
- gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS);
+ pcms->gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS);
}
/* create pci host bus */
@@ -195,7 +194,7 @@ static void pc_q35_init(MachineState *machine)
ich9_lpc = ICH9_LPC_DEVICE(lpc);
lpc_dev = DEVICE(lpc);
for (i = 0; i < GSI_NUM_PINS; i++) {
- qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, gsi[i]);
+ qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, pcms->gsi[i]);
}
pci_bus_irqs(host_bus, ich9_lpc_set_irq, ich9_lpc_map_irq, ich9_lpc,
ICH9_LPC_NB_PIRQS);
@@ -213,11 +212,13 @@ static void pc_q35_init(MachineState *machine)
for (i = 0; i < ISA_NUM_IRQS; i++) {
gsi_state->i8259_irq[i] = i8259[i];
}
+ g_free(i8259);
+
if (pcmc->pci_enabled) {
ioapic_init_gsi(gsi_state, "q35");
}
- pc_register_ferr_irq(gsi[13]);
+ pc_register_ferr_irq(pcms->gsi[13]);
assert(pcms->vmport != ON_OFF_AUTO__MAX);
if (pcms->vmport == ON_OFF_AUTO_AUTO) {
@@ -225,7 +226,7 @@ static void pc_q35_init(MachineState *machine)
}
/* init basic PC hardware */
- pc_basic_device_init(isa_bus, gsi, &rtc_state, !mc->no_floppy,
+ pc_basic_device_init(isa_bus, pcms->gsi, &rtc_state, !mc->no_floppy,
(pcms->vmport != ON_OFF_AUTO_ON), 0xff0104);
/* connect pm stuff to lpc */
@@ -290,14 +291,26 @@ static void pc_q35_machine_options(MachineClass *m)
m->default_display = "std";
m->no_floppy = 1;
m->has_dynamic_sysbus = true;
+ m->max_cpus = 288;
}
-static void pc_q35_2_7_machine_options(MachineClass *m)
+static void pc_q35_2_8_machine_options(MachineClass *m)
{
pc_q35_machine_options(m);
m->alias = "q35";
}
+DEFINE_Q35_MACHINE(v2_8, "pc-q35-2.8", NULL,
+ pc_q35_2_8_machine_options);
+
+static void pc_q35_2_7_machine_options(MachineClass *m)
+{
+ pc_q35_2_8_machine_options(m);
+ m->alias = NULL;
+ m->max_cpus = 255;
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_7);
+}
+
DEFINE_Q35_MACHINE(v2_7, "pc-q35-2.7", NULL,
pc_q35_2_7_machine_options);
@@ -305,7 +318,6 @@ static void pc_q35_2_6_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_7_machine_options(m);
- m->alias = NULL;
pcmc->legacy_cpu_hotplug = true;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_6);
}
diff --git a/hw/i386/trace-events b/hw/i386/trace-events
index 7735e46eaf..d2b497327e 100644
--- a/hw/i386/trace-events
+++ b/hw/i386/trace-events
@@ -7,9 +7,34 @@ xen_platform_log(char *s) "xen platform: %s"
xen_pv_mmio_read(uint64_t addr) "WARNING: read from Xen PV Device MMIO space (address %"PRIx64")"
xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (address %"PRIx64")"
-# hw/i386/pc.c
-mhp_pc_dimm_assigned_slot(int slot) "0x%d"
-mhp_pc_dimm_assigned_address(uint64_t addr) "0x%"PRIx64
-
# hw/i386/x86-iommu.c
x86_iommu_iec_notify(bool global, uint32_t index, uint32_t mask) "Notify IEC invalidation: global=%d index=%" PRIu32 " mask=%" PRIu32
+
+# hw/i386/amd_iommu.c
+amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" + offset 0x%"PRIx32
+amdvi_cache_update(uint16_t domid, uint8_t bus, uint8_t slot, uint8_t func, uint64_t gpa, uint64_t txaddr) " update iotlb domid 0x%"PRIx16" devid: %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64
+amdvi_completion_wait_fail(uint64_t addr) "error: fail to write at address 0x%"PRIx64
+amdvi_mmio_write(const char *reg, uint64_t addr, unsigned size, uint64_t val, uint64_t offset) "%s write addr 0x%"PRIx64", size %u, val 0x%"PRIx64", offset 0x%"PRIx64
+amdvi_mmio_read(const char *reg, uint64_t addr, unsigned size, uint64_t offset) "%s read addr 0x%"PRIx64", size %u offset 0x%"PRIx64
+amdvi_command_error(uint64_t status) "error: Executing commands with command buffer disabled 0x%"PRIx64
+amdvi_command_read_fail(uint64_t addr, uint32_t head) "error: fail to access memory at 0x%"PRIx64" + 0x%"PRIx32
+amdvi_command_exec(uint32_t head, uint32_t tail, uint64_t buf) "command buffer head at 0x%"PRIx32" command buffer tail at 0x%"PRIx32" command buffer base at 0x%"PRIx64
+amdvi_unhandled_command(uint8_t type) "unhandled command 0x%"PRIx8
+amdvi_intr_inval(void) "Interrupt table invalidated"
+amdvi_iotlb_inval(void) "IOTLB pages invalidated"
+amdvi_prefetch_pages(void) "Pre-fetch of AMD-Vi pages requested"
+amdvi_pages_inval(uint16_t domid) "AMD-Vi pages for domain 0x%"PRIx16 " invalidated"
+amdvi_all_inval(void) "Invalidation of all AMD-Vi cache requested "
+amdvi_ppr_exec(void) "Execution of PPR queue requested "
+amdvi_devtab_inval(uint8_t bus, uint8_t slot, uint8_t func) "device table entry for devid: %02x:%02x.%x invalidated"
+amdvi_completion_wait(uint64_t addr, uint64_t data) "completion wait requested with store address 0x%"PRIx64" and store data 0x%"PRIx64
+amdvi_control_status(uint64_t val) "MMIO_STATUS state 0x%"PRIx64
+amdvi_iotlb_reset(void) "IOTLB exceed size limit - reset "
+amdvi_completion_wait_exec(uint64_t addr, uint64_t data) "completion wait requested with store address 0x%"PRIx64" and store data 0x%"PRIx64
+amdvi_dte_get_fail(uint64_t addr, uint32_t offset) "error: failed to access Device Entry devtab 0x%"PRIx64" offset 0x%"PRIx32
+amdvi_invalid_dte(uint64_t addr) "PTE entry at 0x%"PRIx64" is invalid "
+amdvi_get_pte_hwerror(uint64_t addr) "hardware error eccessing PTE at addr 0x%"PRIx64
+amdvi_mode_invalid(uint8_t level, uint64_t addr)"error: translation level 0x%"PRIx8" translating addr 0x%"PRIx64
+amdvi_page_fault(uint64_t addr) "error: page fault accessing guest physical address 0x%"PRIx64
+amdvi_iotlb_hit(uint8_t bus, uint8_t slot, uint8_t func, uint64_t addr, uint64_t txaddr) "hit iotlb devid %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64
+amdvi_translation_result(uint8_t bus, uint8_t slot, uint8_t func, uint64_t addr, uint64_t txaddr) "devid: %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64
diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c
index ce26b2a71d..2278af7c32 100644
--- a/hw/i386/x86-iommu.c
+++ b/hw/i386/x86-iommu.c
@@ -71,6 +71,11 @@ X86IOMMUState *x86_iommu_get_default(void)
return x86_iommu_default;
}
+IommuType x86_iommu_get_type(void)
+{
+ return x86_iommu_default->type;
+}
+
static void x86_iommu_realize(DeviceState *dev, Error **errp)
{
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
@@ -79,6 +84,7 @@ static void x86_iommu_realize(DeviceState *dev, Error **errp)
if (x86_class->realize) {
x86_class->realize(dev, errp);
}
+
x86_iommu_set_default(X86_IOMMU_DEVICE(dev));
}
diff --git a/hw/i386/xen/xen_apic.c b/hw/i386/xen/xen_apic.c
index 21d68ee04b..55769eba7e 100644
--- a/hw/i386/xen/xen_apic.c
+++ b/hw/i386/xen/xen_apic.c
@@ -68,6 +68,11 @@ static void xen_apic_external_nmi(APICCommonState *s)
{
}
+static void xen_send_msi(MSIMessage *msi)
+{
+ xen_hvm_inject_msi(msi->address, msi->data);
+}
+
static void xen_apic_class_init(ObjectClass *klass, void *data)
{
APICCommonClass *k = APIC_COMMON_CLASS(klass);
@@ -78,6 +83,7 @@ static void xen_apic_class_init(ObjectClass *klass, void *data)
k->get_tpr = xen_apic_get_tpr;
k->vapic_base_update = xen_apic_vapic_base_update;
k->external_nmi = xen_apic_external_nmi;
+ k->send_msi = xen_send_msi;
}
static const TypeInfo xen_apic_info = {
diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c
index aa7839324c..2e1e543881 100644
--- a/hw/i386/xen/xen_platform.c
+++ b/hw/i386/xen/xen_platform.c
@@ -114,6 +114,10 @@ static void unplug_disks(PCIBus *b, PCIDevice *d, void *o)
PCI_CLASS_STORAGE_IDE
&& strcmp(d->name, "xen-pci-passthrough") != 0) {
pci_piix3_xen_ide_unplug(DEVICE(d));
+ } else if (pci_get_word(d->config + PCI_CLASS_DEVICE) ==
+ PCI_CLASS_STORAGE_SCSI
+ && strcmp(d->name, "xen-pci-passthrough") != 0) {
+ object_unparent(OBJECT(d));
}
}
@@ -134,8 +138,6 @@ static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t v
devices, and bit 2 the non-primary-master IDE devices. */
if (val & UNPLUG_ALL_IDE_DISKS) {
DPRINTF("unplug disks\n");
- blk_drain_all();
- blk_flush_all();
pci_unplug_disks(pci_dev->bus);
}
if (val & UNPLUG_ALL_NICS) {
@@ -309,13 +311,38 @@ static void xen_platform_ioport_writeb(void *opaque, hwaddr addr,
uint64_t val, unsigned int size)
{
PCIXenPlatformState *s = opaque;
+ PCIDevice *pci_dev = PCI_DEVICE(s);
switch (addr) {
case 0: /* Platform flags */
platform_fixed_ioport_writeb(opaque, 0, (uint32_t)val);
break;
+ case 4:
+ if (val == 1) {
+ /*
+ * SUSE unplug for Xenlinux
+ * xen-kmp used this since xen-3.0.4, instead the official protocol
+ * from xen-3.3+ It did an unconditional "outl(1, (ioaddr + 4));"
+ * Pre VMDP 1.7 used 4 and 8 depending on how VMDP was configured.
+ * If VMDP was to control both disk and LAN it would use 4.
+ * If it controlled just disk or just LAN, it would use 8 below.
+ */
+ pci_unplug_disks(pci_dev->bus);
+ pci_unplug_nics(pci_dev->bus);
+ }
+ break;
case 8:
- log_writeb(s, (uint32_t)val);
+ switch (val) {
+ case 1:
+ pci_unplug_disks(pci_dev->bus);
+ break;
+ case 2:
+ pci_unplug_nics(pci_dev->bus);
+ break;
+ default:
+ log_writeb(s, (uint32_t)val);
+ break;
+ }
break;
default:
break;
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index f3438ad78a..3c19bdadc5 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -948,6 +948,7 @@ static void ncq_cb(void *opaque, int ret)
NCQTransferState *ncq_tfs = (NCQTransferState *)opaque;
IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
+ ncq_tfs->aiocb = NULL;
if (ret == -ECANCELED) {
return;
}
@@ -1008,6 +1009,7 @@ static void execute_ncq_command(NCQTransferState *ncq_tfs)
&ncq_tfs->sglist, BLOCK_ACCT_READ);
ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist,
ncq_tfs->lba << BDRV_SECTOR_BITS,
+ BDRV_SECTOR_SIZE,
ncq_cb, ncq_tfs);
break;
case WRITE_FPDMA_QUEUED:
@@ -1021,6 +1023,7 @@ static void execute_ncq_command(NCQTransferState *ncq_tfs)
&ncq_tfs->sglist, BLOCK_ACCT_WRITE);
ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist,
ncq_tfs->lba << BDRV_SECTOR_BITS,
+ BDRV_SECTOR_SIZE,
ncq_cb, ncq_tfs);
break;
default:
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
index 6189675036..fc1d19c6d4 100644
--- a/hw/ide/atapi.c
+++ b/hw/ide/atapi.c
@@ -637,6 +637,23 @@ static unsigned int event_status_media(IDEState *s,
return 8; /* We wrote to 4 extra bytes from the header */
}
+/*
+ * Before transferring data or otherwise signalling acceptance of a command
+ * marked CONDDATA, we must check the validity of the byte_count_limit.
+ */
+static bool validate_bcl(IDEState *s)
+{
+ /* TODO: Check IDENTIFY data word 125 for defacult BCL (currently 0) */
+ if (s->atapi_dma || atapi_byte_count_limit(s)) {
+ return true;
+ }
+
+ /* TODO: Move abort back into core.c and introduce proper error flow between
+ * ATAPI layer and IDE core layer */
+ ide_abort_command(s);
+ return false;
+}
+
static void cmd_get_event_status_notification(IDEState *s,
uint8_t *buf)
{
@@ -1028,12 +1045,19 @@ static void cmd_read_cd(IDEState *s, uint8_t* buf)
return;
}
- transfer_request = buf[9];
- switch(transfer_request & 0xf8) {
- case 0x00:
+ transfer_request = buf[9] & 0xf8;
+ if (transfer_request == 0x00) {
/* nothing */
ide_atapi_cmd_ok(s);
- break;
+ return;
+ }
+
+ /* Check validity of BCL before transferring data */
+ if (!validate_bcl(s)) {
+ return;
+ }
+
+ switch (transfer_request) {
case 0x10:
/* normal read */
ide_atapi_cmd_read(s, lba, nb_sectors, 2048);
@@ -1266,6 +1290,14 @@ enum {
* See ATA8-ACS3 "7.21.5 Byte Count Limit"
*/
NONDATA = 0x04,
+
+ /*
+ * CONDDATA implies a command that transfers data only conditionally based
+ * on the presence of suboptions. It should be exempt from the BCL check at
+ * command validation time, but it needs to be checked at the command
+ * handler level instead.
+ */
+ CONDDATA = 0x08,
};
static const struct AtapiCmd {
@@ -1289,7 +1321,7 @@ static const struct AtapiCmd {
[ 0xad ] = { cmd_read_dvd_structure, CHECK_READY },
[ 0xbb ] = { cmd_set_speed, NONDATA },
[ 0xbd ] = { cmd_mechanism_status, 0 },
- [ 0xbe ] = { cmd_read_cd, CHECK_READY },
+ [ 0xbe ] = { cmd_read_cd, CHECK_READY | CONDDATA },
/* [1] handler detects and reports not ready condition itself */
};
@@ -1348,15 +1380,12 @@ void ide_atapi_cmd(IDEState *s)
return;
}
- /* Nondata commands permit the byte_count_limit to be 0.
+ /* Commands that don't transfer DATA permit the byte_count_limit to be 0.
* If this is a data-transferring PIO command and BCL is 0,
* we abort at the /ATA/ level, not the ATAPI level.
* See ATA8 ACS3 section 7.17.6.49 and 7.21.5 */
- if (cmd->handler && !(cmd->flags & NONDATA)) {
- /* TODO: Check IDENTIFY data word 125 for default BCL (currently 0) */
- if (!(atapi_byte_count_limit(s) || s->atapi_dma)) {
- /* TODO: Move abort back into core.c and make static inline again */
- ide_abort_command(s);
+ if (cmd->handler && !(cmd->flags & (NONDATA | CONDDATA))) {
+ if (!validate_bcl(s)) {
return;
}
}
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 45b6df132c..43709e545f 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -882,15 +882,15 @@ static void ide_dma_cb(void *opaque, int ret)
switch (s->dma_cmd) {
case IDE_DMA_READ:
s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
- ide_dma_cb, s);
+ BDRV_SECTOR_SIZE, ide_dma_cb, s);
break;
case IDE_DMA_WRITE:
s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
- ide_dma_cb, s);
+ BDRV_SECTOR_SIZE, ide_dma_cb, s);
break;
case IDE_DMA_TRIM:
s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
- &s->sg, offset,
+ &s->sg, offset, BDRV_SECTOR_SIZE,
ide_issue_trim, s->blk, ide_dma_cb, s,
DMA_DIRECTION_TO_DEVICE);
break;
@@ -908,7 +908,7 @@ eot:
static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
{
- s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
+ s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
s->io_buffer_size = 0;
s->dma_cmd = dma_cmd;
@@ -2582,7 +2582,7 @@ static void ide_restart_cb(void *opaque, int running, RunState state)
void ide_register_restart_cb(IDEBus *bus)
{
if (bus->dma->ops->restart_dma) {
- qemu_add_vm_change_state_handler(ide_restart_cb, bus);
+ bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
}
}
@@ -2619,10 +2619,12 @@ void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
{
/* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
bridge has been setup properly to always register with ISA. */
- isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
+ isa_register_portio_list(dev, &bus->portio_list,
+ iobase, ide_portio_list, bus, "ide");
if (iobase2) {
- isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
+ isa_register_portio_list(dev, &bus->portio2_list,
+ iobase2, ide_portio2_list, bus, "ide");
}
}
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 76f97c2539..9742c005d1 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -52,187 +52,6 @@ static const int debug_macio = 0;
#define MACIO_PAGE_SIZE 4096
-/*
- * Unaligned DMA read/write access functions required for OS X/Darwin which
- * don't perform DMA transactions on sector boundaries. These functions are
- * modelled on bdrv_co_preadv()/bdrv_co_pwritev() and so should be easy to
- * remove if the unaligned block APIs are ever exposed.
- */
-
-static void pmac_dma_read(BlockBackend *blk,
- int64_t offset, unsigned int bytes,
- void (*cb)(void *opaque, int ret), void *opaque)
-{
- DBDMA_io *io = opaque;
- MACIOIDEState *m = io->opaque;
- IDEState *s = idebus_active_if(&m->bus);
- dma_addr_t dma_addr;
- int64_t sector_num;
- int nsector;
- uint64_t align = BDRV_SECTOR_SIZE;
- size_t head_bytes, tail_bytes;
-
- qemu_iovec_destroy(&io->iov);
- qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
-
- sector_num = (offset >> 9);
- nsector = (io->len >> 9);
-
- MACIO_DPRINTF("--- DMA read transfer (0x%" HWADDR_PRIx ",0x%x): "
- "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
- sector_num, nsector);
-
- dma_addr = io->addr;
- io->dir = DMA_DIRECTION_FROM_DEVICE;
- io->dma_len = io->len;
- io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
- io->dir);
-
- if (offset & (align - 1)) {
- head_bytes = offset & (align - 1);
-
- MACIO_DPRINTF("--- DMA unaligned head: sector %" PRId64 ", "
- "discarding %zu bytes\n", sector_num, head_bytes);
-
- qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
-
- bytes += offset & (align - 1);
- offset = offset & ~(align - 1);
- }
-
- qemu_iovec_add(&io->iov, io->dma_mem, io->len);
-
- if ((offset + bytes) & (align - 1)) {
- tail_bytes = (offset + bytes) & (align - 1);
-
- MACIO_DPRINTF("--- DMA unaligned tail: sector %" PRId64 ", "
- "discarding bytes %zu\n", sector_num, tail_bytes);
-
- qemu_iovec_add(&io->iov, &io->tail_remainder, align - tail_bytes);
- bytes = ROUND_UP(bytes, align);
- }
-
- s->io_buffer_size -= io->len;
- s->io_buffer_index += io->len;
-
- io->len = 0;
-
- MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " "
- "nsector: %x\n", (offset >> 9), (bytes >> 9));
-
- s->bus->dma->aiocb = blk_aio_preadv(blk, offset, &io->iov, 0, cb, io);
-}
-
-static void pmac_dma_write(BlockBackend *blk,
- int64_t offset, int bytes,
- void (*cb)(void *opaque, int ret), void *opaque)
-{
- DBDMA_io *io = opaque;
- MACIOIDEState *m = io->opaque;
- IDEState *s = idebus_active_if(&m->bus);
- dma_addr_t dma_addr;
- int64_t sector_num;
- int nsector;
- uint64_t align = BDRV_SECTOR_SIZE;
- size_t head_bytes, tail_bytes;
- bool unaligned_head = false, unaligned_tail = false;
-
- qemu_iovec_destroy(&io->iov);
- qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
-
- sector_num = (offset >> 9);
- nsector = (io->len >> 9);
-
- MACIO_DPRINTF("--- DMA write transfer (0x%" HWADDR_PRIx ",0x%x): "
- "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
- sector_num, nsector);
-
- dma_addr = io->addr;
- io->dir = DMA_DIRECTION_TO_DEVICE;
- io->dma_len = io->len;
- io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
- io->dir);
-
- if (offset & (align - 1)) {
- head_bytes = offset & (align - 1);
- sector_num = ((offset & ~(align - 1)) >> 9);
-
- MACIO_DPRINTF("--- DMA unaligned head: pre-reading head sector %"
- PRId64 "\n", sector_num);
-
- blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align);
-
- qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
- qemu_iovec_add(&io->iov, io->dma_mem, io->len);
-
- bytes += offset & (align - 1);
- offset = offset & ~(align - 1);
-
- unaligned_head = true;
- }
-
- if ((offset + bytes) & (align - 1)) {
- tail_bytes = (offset + bytes) & (align - 1);
- sector_num = (((offset + bytes) & ~(align - 1)) >> 9);
-
- MACIO_DPRINTF("--- DMA unaligned tail: pre-reading tail sector %"
- PRId64 "\n", sector_num);
-
- blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align);
-
- if (!unaligned_head) {
- qemu_iovec_add(&io->iov, io->dma_mem, io->len);
- }
-
- qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes,
- align - tail_bytes);
-
- bytes = ROUND_UP(bytes, align);
-
- unaligned_tail = true;
- }
-
- if (!unaligned_head && !unaligned_tail) {
- qemu_iovec_add(&io->iov, io->dma_mem, io->len);
- }
-
- s->io_buffer_size -= io->len;
- s->io_buffer_index += io->len;
-
- io->len = 0;
-
- MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " "
- "nsector: %x\n", (offset >> 9), (bytes >> 9));
-
- s->bus->dma->aiocb = blk_aio_pwritev(blk, offset, &io->iov, 0, cb, io);
-}
-
-static void pmac_dma_trim(BlockBackend *blk,
- int64_t offset, int bytes,
- void (*cb)(void *opaque, int ret), void *opaque)
-{
- DBDMA_io *io = opaque;
- MACIOIDEState *m = io->opaque;
- IDEState *s = idebus_active_if(&m->bus);
- dma_addr_t dma_addr;
-
- qemu_iovec_destroy(&io->iov);
- qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
-
- dma_addr = io->addr;
- io->dir = DMA_DIRECTION_TO_DEVICE;
- io->dma_len = io->len;
- io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
- io->dir);
-
- qemu_iovec_add(&io->iov, io->dma_mem, io->len);
- s->io_buffer_size -= io->len;
- s->io_buffer_index += io->len;
- io->len = 0;
-
- s->bus->dma->aiocb = ide_issue_trim(offset, &io->iov, cb, io, blk);
-}
-
static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
{
DBDMA_io *io = opaque;
@@ -244,6 +63,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
if (ret < 0) {
MACIO_DPRINTF("DMA error: %d\n", ret);
+ qemu_sglist_destroy(&s->sg);
ide_atapi_io_error(s, ret);
goto done;
}
@@ -258,6 +78,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
if (s->io_buffer_size <= 0) {
MACIO_DPRINTF("End of IDE transfer\n");
+ qemu_sglist_destroy(&s->sg);
ide_atapi_cmd_ok(s);
m->dma_active = false;
goto done;
@@ -282,7 +103,15 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
/* Calculate current offset */
offset = ((int64_t)s->lba << 11) + s->io_buffer_index;
- pmac_dma_read(s->blk, offset, io->len, pmac_ide_atapi_transfer_cb, io);
+ qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
+ &address_space_memory);
+ qemu_sglist_add(&s->sg, io->addr, io->len);
+ s->io_buffer_size -= io->len;
+ s->io_buffer_index += io->len;
+ io->len = 0;
+
+ s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset, 0x1,
+ pmac_ide_atapi_transfer_cb, io);
return;
done:
@@ -310,6 +139,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
if (ret < 0) {
MACIO_DPRINTF("DMA error: %d\n", ret);
+ qemu_sglist_destroy(&s->sg);
ide_dma_error(s);
goto done;
}
@@ -324,6 +154,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
if (s->io_buffer_size <= 0) {
MACIO_DPRINTF("End of IDE transfer\n");
+ qemu_sglist_destroy(&s->sg);
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
m->dma_active = false;
@@ -338,15 +169,27 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
/* Calculate number of sectors */
offset = (ide_get_sector(s) << 9) + s->io_buffer_index;
+ qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
+ &address_space_memory);
+ qemu_sglist_add(&s->sg, io->addr, io->len);
+ s->io_buffer_size -= io->len;
+ s->io_buffer_index += io->len;
+ io->len = 0;
+
switch (s->dma_cmd) {
case IDE_DMA_READ:
- pmac_dma_read(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
+ s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset, 0x1,
+ pmac_ide_atapi_transfer_cb, io);
break;
case IDE_DMA_WRITE:
- pmac_dma_write(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
+ s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset, 0x1,
+ pmac_ide_transfer_cb, io);
break;
case IDE_DMA_TRIM:
- pmac_dma_trim(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
+ s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), &s->sg,
+ offset, 0x1, ide_issue_trim, s->blk,
+ pmac_ide_transfer_cb, io,
+ DMA_DIRECTION_TO_DEVICE);
break;
default:
abort();
diff --git a/hw/ide/piix.c b/hw/ide/piix.c
index c190fcaa3c..d5777fd0b3 100644
--- a/hw/ide/piix.c
+++ b/hw/ide/piix.c
@@ -179,6 +179,10 @@ int pci_piix3_xen_ide_unplug(DeviceState *dev)
if (di != NULL && !di->media_cd) {
BlockBackend *blk = blk_by_legacy_dinfo(di);
DeviceState *ds = blk_get_attached_dev(blk);
+
+ blk_drain(blk);
+ blk_flush(blk);
+
if (ds) {
blk_detach_dev(blk, ds);
}
diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c
index 67c76bfcd6..dbaa75cf59 100644
--- a/hw/ide/qdev.c
+++ b/hw/ide/qdev.c
@@ -31,6 +31,7 @@
/* --------------------------------- */
static char *idebus_get_fw_dev_path(DeviceState *dev);
+static void idebus_unrealize(DeviceState *qdev, Error **errp);
static Property ide_props[] = {
DEFINE_PROP_UINT32("unit", IDEDevice, unit, -1),
@@ -44,6 +45,15 @@ static void ide_bus_class_init(ObjectClass *klass, void *data)
k->get_fw_dev_path = idebus_get_fw_dev_path;
}
+static void idebus_unrealize(DeviceState *qdev, Error **errp)
+{
+ IDEBus *bus = DO_UPCAST(IDEBus, qbus, qdev->parent_bus);
+
+ if (bus->vmstate) {
+ qemu_del_vm_change_state_handler(bus->vmstate);
+ }
+}
+
static const TypeInfo ide_bus_info = {
.name = TYPE_IDE_BUS,
.parent = TYPE_BUS,
@@ -75,10 +85,6 @@ static int ide_qdev_init(DeviceState *qdev)
IDEDeviceClass *dc = IDE_DEVICE_GET_CLASS(dev);
IDEBus *bus = DO_UPCAST(IDEBus, qbus, qdev->parent_bus);
- if (!dev->conf.blk) {
- error_report("No drive specified");
- goto err;
- }
if (dev->unit == -1) {
dev->unit = bus->master ? 1 : 0;
}
@@ -158,6 +164,16 @@ static int ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind)
IDEState *s = bus->ifs + dev->unit;
Error *err = NULL;
+ if (!dev->conf.blk) {
+ if (kind != IDE_CD) {
+ error_report("No drive specified");
+ return -1;
+ } else {
+ /* Anonymous BlockBackend for an empty drive */
+ dev->conf.blk = blk_new();
+ }
+ }
+
if (dev->conf.discard_granularity == -1) {
dev->conf.discard_granularity = 512;
} else if (dev->conf.discard_granularity &&
@@ -257,7 +273,11 @@ static int ide_cd_initfn(IDEDevice *dev)
static int ide_drive_initfn(IDEDevice *dev)
{
- DriveInfo *dinfo = blk_legacy_dinfo(dev->conf.blk);
+ DriveInfo *dinfo = NULL;
+
+ if (dev->conf.blk) {
+ dinfo = blk_legacy_dinfo(dev->conf.blk);
+ }
return ide_dev_initfn(dev, dinfo && dinfo->media_cd ? IDE_CD : IDE_HD);
}
@@ -345,6 +365,7 @@ static void ide_device_class_init(ObjectClass *klass, void *data)
k->init = ide_qdev_init;
set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
k->bus_type = TYPE_IDE_BUS;
+ k->unrealize = idebus_unrealize;
k->props = ide_props;
}
diff --git a/hw/input/adb.c b/hw/input/adb.c
index f0ad0d4471..43d3205472 100644
--- a/hw/input/adb.c
+++ b/hw/input/adb.c
@@ -25,6 +25,9 @@
#include "hw/hw.h"
#include "hw/input/adb.h"
#include "ui/console.h"
+#include "include/hw/input/adb-keys.h"
+#include "ui/input.h"
+#include "sysemu/sysemu.h"
/* debug ADB */
//#define DEBUG_ADB
@@ -59,6 +62,9 @@ do { printf("ADB: " fmt , ## __VA_ARGS__); } while (0)
/* error codes */
#define ADB_RET_NOTPRESENT (-2)
+/* The adb keyboard doesn't have every key imaginable */
+#define NO_KEY 0xff
+
static void adb_device_reset(ADBDevice *d)
{
qdev_reset_all(DEVICE(d));
@@ -187,23 +193,125 @@ typedef struct ADBKeyboardClass {
DeviceRealize parent_realize;
} ADBKeyboardClass;
-static const uint8_t pc_to_adb_keycode[256] = {
- 0, 53, 18, 19, 20, 21, 23, 22, 26, 28, 25, 29, 27, 24, 51, 48,
- 12, 13, 14, 15, 17, 16, 32, 34, 31, 35, 33, 30, 36, 54, 0, 1,
- 2, 3, 5, 4, 38, 40, 37, 41, 39, 50, 56, 42, 6, 7, 8, 9,
- 11, 45, 46, 43, 47, 44,123, 67, 58, 49, 57,122,120, 99,118, 96,
- 97, 98,100,101,109, 71,107, 89, 91, 92, 78, 86, 87, 88, 69, 83,
- 84, 85, 82, 65, 0, 0, 10,103,111, 0, 0,110, 81, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 94, 0, 93, 0, 0, 0, 0, 0, 0,104,102, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 76,125, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,105, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 75, 0, 0,124, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0,115, 62,116, 0, 59, 0, 60, 0,119,
- 61,121,114,117, 0, 0, 0, 0, 0, 0, 0, 55,126, 0,127, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+int qcode_to_adb_keycode[] = {
+ /* Make sure future additions are automatically set to NO_KEY */
+ [0 ... 0xff] = NO_KEY,
+
+ [Q_KEY_CODE_SHIFT] = ADB_KEY_LEFT_SHIFT,
+ [Q_KEY_CODE_SHIFT_R] = ADB_KEY_RIGHT_SHIFT,
+ [Q_KEY_CODE_ALT] = ADB_KEY_LEFT_OPTION,
+ [Q_KEY_CODE_ALT_R] = ADB_KEY_RIGHT_OPTION,
+ [Q_KEY_CODE_ALTGR] = ADB_KEY_RIGHT_OPTION,
+ [Q_KEY_CODE_CTRL] = ADB_KEY_LEFT_CONTROL,
+ [Q_KEY_CODE_CTRL_R] = ADB_KEY_RIGHT_CONTROL,
+ [Q_KEY_CODE_META_L] = ADB_KEY_COMMAND,
+ [Q_KEY_CODE_META_R] = ADB_KEY_COMMAND,
+ [Q_KEY_CODE_SPC] = ADB_KEY_SPACEBAR,
+
+ [Q_KEY_CODE_ESC] = ADB_KEY_ESC,
+ [Q_KEY_CODE_1] = ADB_KEY_1,
+ [Q_KEY_CODE_2] = ADB_KEY_2,
+ [Q_KEY_CODE_3] = ADB_KEY_3,
+ [Q_KEY_CODE_4] = ADB_KEY_4,
+ [Q_KEY_CODE_5] = ADB_KEY_5,
+ [Q_KEY_CODE_6] = ADB_KEY_6,
+ [Q_KEY_CODE_7] = ADB_KEY_7,
+ [Q_KEY_CODE_8] = ADB_KEY_8,
+ [Q_KEY_CODE_9] = ADB_KEY_9,
+ [Q_KEY_CODE_0] = ADB_KEY_0,
+ [Q_KEY_CODE_MINUS] = ADB_KEY_MINUS,
+ [Q_KEY_CODE_EQUAL] = ADB_KEY_EQUAL,
+ [Q_KEY_CODE_BACKSPACE] = ADB_KEY_DELETE,
+ [Q_KEY_CODE_TAB] = ADB_KEY_TAB,
+ [Q_KEY_CODE_Q] = ADB_KEY_Q,
+ [Q_KEY_CODE_W] = ADB_KEY_W,
+ [Q_KEY_CODE_E] = ADB_KEY_E,
+ [Q_KEY_CODE_R] = ADB_KEY_R,
+ [Q_KEY_CODE_T] = ADB_KEY_T,
+ [Q_KEY_CODE_Y] = ADB_KEY_Y,
+ [Q_KEY_CODE_U] = ADB_KEY_U,
+ [Q_KEY_CODE_I] = ADB_KEY_I,
+ [Q_KEY_CODE_O] = ADB_KEY_O,
+ [Q_KEY_CODE_P] = ADB_KEY_P,
+ [Q_KEY_CODE_BRACKET_LEFT] = ADB_KEY_LEFT_BRACKET,
+ [Q_KEY_CODE_BRACKET_RIGHT] = ADB_KEY_RIGHT_BRACKET,
+ [Q_KEY_CODE_RET] = ADB_KEY_RETURN,
+ [Q_KEY_CODE_A] = ADB_KEY_A,
+ [Q_KEY_CODE_S] = ADB_KEY_S,
+ [Q_KEY_CODE_D] = ADB_KEY_D,
+ [Q_KEY_CODE_F] = ADB_KEY_F,
+ [Q_KEY_CODE_G] = ADB_KEY_G,
+ [Q_KEY_CODE_H] = ADB_KEY_H,
+ [Q_KEY_CODE_J] = ADB_KEY_J,
+ [Q_KEY_CODE_K] = ADB_KEY_K,
+ [Q_KEY_CODE_L] = ADB_KEY_L,
+ [Q_KEY_CODE_SEMICOLON] = ADB_KEY_SEMICOLON,
+ [Q_KEY_CODE_APOSTROPHE] = ADB_KEY_APOSTROPHE,
+ [Q_KEY_CODE_GRAVE_ACCENT] = ADB_KEY_GRAVE_ACCENT,
+ [Q_KEY_CODE_BACKSLASH] = ADB_KEY_BACKSLASH,
+ [Q_KEY_CODE_Z] = ADB_KEY_Z,
+ [Q_KEY_CODE_X] = ADB_KEY_X,
+ [Q_KEY_CODE_C] = ADB_KEY_C,
+ [Q_KEY_CODE_V] = ADB_KEY_V,
+ [Q_KEY_CODE_B] = ADB_KEY_B,
+ [Q_KEY_CODE_N] = ADB_KEY_N,
+ [Q_KEY_CODE_M] = ADB_KEY_M,
+ [Q_KEY_CODE_COMMA] = ADB_KEY_COMMA,
+ [Q_KEY_CODE_DOT] = ADB_KEY_PERIOD,
+ [Q_KEY_CODE_SLASH] = ADB_KEY_FORWARD_SLASH,
+ [Q_KEY_CODE_ASTERISK] = ADB_KEY_KP_MULTIPLY,
+ [Q_KEY_CODE_CAPS_LOCK] = ADB_KEY_CAPS_LOCK,
+
+ [Q_KEY_CODE_F1] = ADB_KEY_F1,
+ [Q_KEY_CODE_F2] = ADB_KEY_F2,
+ [Q_KEY_CODE_F3] = ADB_KEY_F3,
+ [Q_KEY_CODE_F4] = ADB_KEY_F4,
+ [Q_KEY_CODE_F5] = ADB_KEY_F5,
+ [Q_KEY_CODE_F6] = ADB_KEY_F6,
+ [Q_KEY_CODE_F7] = ADB_KEY_F7,
+ [Q_KEY_CODE_F8] = ADB_KEY_F8,
+ [Q_KEY_CODE_F9] = ADB_KEY_F9,
+ [Q_KEY_CODE_F10] = ADB_KEY_F10,
+ [Q_KEY_CODE_F11] = ADB_KEY_F11,
+ [Q_KEY_CODE_F12] = ADB_KEY_F12,
+ [Q_KEY_CODE_PRINT] = ADB_KEY_F13,
+ [Q_KEY_CODE_SYSRQ] = ADB_KEY_F13,
+ [Q_KEY_CODE_SCROLL_LOCK] = ADB_KEY_F14,
+ [Q_KEY_CODE_PAUSE] = ADB_KEY_F15,
+
+ [Q_KEY_CODE_NUM_LOCK] = ADB_KEY_KP_CLEAR,
+ [Q_KEY_CODE_KP_EQUALS] = ADB_KEY_KP_EQUAL,
+ [Q_KEY_CODE_KP_DIVIDE] = ADB_KEY_KP_DIVIDE,
+ [Q_KEY_CODE_KP_MULTIPLY] = ADB_KEY_KP_MULTIPLY,
+ [Q_KEY_CODE_KP_SUBTRACT] = ADB_KEY_KP_SUBTRACT,
+ [Q_KEY_CODE_KP_ADD] = ADB_KEY_KP_PLUS,
+ [Q_KEY_CODE_KP_ENTER] = ADB_KEY_KP_ENTER,
+ [Q_KEY_CODE_KP_DECIMAL] = ADB_KEY_KP_PERIOD,
+ [Q_KEY_CODE_KP_0] = ADB_KEY_KP_0,
+ [Q_KEY_CODE_KP_1] = ADB_KEY_KP_1,
+ [Q_KEY_CODE_KP_2] = ADB_KEY_KP_2,
+ [Q_KEY_CODE_KP_3] = ADB_KEY_KP_3,
+ [Q_KEY_CODE_KP_4] = ADB_KEY_KP_4,
+ [Q_KEY_CODE_KP_5] = ADB_KEY_KP_5,
+ [Q_KEY_CODE_KP_6] = ADB_KEY_KP_6,
+ [Q_KEY_CODE_KP_7] = ADB_KEY_KP_7,
+ [Q_KEY_CODE_KP_8] = ADB_KEY_KP_8,
+ [Q_KEY_CODE_KP_9] = ADB_KEY_KP_9,
+
+ [Q_KEY_CODE_UP] = ADB_KEY_UP,
+ [Q_KEY_CODE_DOWN] = ADB_KEY_DOWN,
+ [Q_KEY_CODE_LEFT] = ADB_KEY_LEFT,
+ [Q_KEY_CODE_RIGHT] = ADB_KEY_RIGHT,
+
+ [Q_KEY_CODE_HELP] = ADB_KEY_HELP,
+ [Q_KEY_CODE_INSERT] = ADB_KEY_HELP,
+ [Q_KEY_CODE_DELETE] = ADB_KEY_FORWARD_DELETE,
+ [Q_KEY_CODE_HOME] = ADB_KEY_HOME,
+ [Q_KEY_CODE_END] = ADB_KEY_END,
+ [Q_KEY_CODE_PGUP] = ADB_KEY_PAGE_UP,
+ [Q_KEY_CODE_PGDN] = ADB_KEY_PAGE_DOWN,
+
+ [Q_KEY_CODE_POWER] = ADB_KEY_POWER
};
static void adb_kbd_put_keycode(void *opaque, int keycode)
@@ -220,35 +328,40 @@ static void adb_kbd_put_keycode(void *opaque, int keycode)
static int adb_kbd_poll(ADBDevice *d, uint8_t *obuf)
{
- static int ext_keycode;
KBDState *s = ADB_KEYBOARD(d);
- int adb_keycode, keycode;
+ int keycode;
int olen;
olen = 0;
- for(;;) {
- if (s->count == 0)
- break;
- keycode = s->data[s->rptr];
- if (++s->rptr == sizeof(s->data))
- s->rptr = 0;
- s->count--;
-
- if (keycode == 0xe0) {
- ext_keycode = 1;
- } else {
- if (ext_keycode)
- adb_keycode = pc_to_adb_keycode[keycode | 0x80];
- else
- adb_keycode = pc_to_adb_keycode[keycode & 0x7f];
- obuf[0] = adb_keycode | (keycode & 0x80);
- /* NOTE: could put a second keycode if needed */
- obuf[1] = 0xff;
- olen = 2;
- ext_keycode = 0;
- break;
- }
+ if (s->count == 0) {
+ return 0;
}
+ keycode = s->data[s->rptr];
+ s->rptr++;
+ if (s->rptr == sizeof(s->data)) {
+ s->rptr = 0;
+ }
+ s->count--;
+ /*
+ * The power key is the only two byte value key, so it is a special case.
+ * Since 0x7f is not a used keycode for ADB we overload it to indicate the
+ * power button when we're storing keycodes in our internal buffer, and
+ * expand it out to two bytes when we send to the guest.
+ */
+ if (keycode == 0x7f) {
+ obuf[0] = 0x7f;
+ obuf[1] = 0x7f;
+ olen = 2;
+ } else {
+ obuf[0] = keycode;
+ /* NOTE: the power key key-up is the two byte sequence 0xff 0xff;
+ * otherwise we could in theory send a second keycode in the second
+ * byte, but choose not to bother.
+ */
+ obuf[1] = 0xff;
+ olen = 2;
+ }
+
return olen;
}
@@ -283,9 +396,15 @@ static int adb_kbd_request(ADBDevice *d, uint8_t *obuf,
d->devaddr = buf[1] & 0xf;
break;
default:
- /* XXX: check this */
d->devaddr = buf[1] & 0xf;
- d->handler = buf[2];
+ /* we support handlers:
+ * 1: Apple Standard Keyboard
+ * 2: Apple Extended Keyboard (LShift = RShift)
+ * 3: Apple Extended Keyboard (LShift != RShift)
+ */
+ if (buf[2] == 1 || buf[2] == 2 || buf[2] == 3) {
+ d->handler = buf[2];
+ }
break;
}
}
@@ -313,6 +432,30 @@ static int adb_kbd_request(ADBDevice *d, uint8_t *obuf,
return olen;
}
+/* This is where keyboard events enter this file */
+static void adb_keyboard_event(DeviceState *dev, QemuConsole *src,
+ InputEvent *evt)
+{
+ KBDState *s = (KBDState *)dev;
+ int qcode, keycode;
+
+ qcode = qemu_input_key_value_to_qcode(evt->u.key.data->key);
+ if (qcode >= ARRAY_SIZE(qcode_to_adb_keycode)) {
+ return;
+ }
+ /* FIXME: take handler into account when translating qcode */
+ keycode = qcode_to_adb_keycode[qcode];
+ if (keycode == NO_KEY) { /* We don't want to send this to the guest */
+ ADB_DPRINTF("Ignoring NO_KEY\n");
+ return;
+ }
+ if (evt->u.key.data->down == false) { /* if key release event */
+ keycode = keycode | 0x80; /* create keyboard break code */
+ }
+
+ adb_kbd_put_keycode(s, keycode);
+}
+
static const VMStateDescription vmstate_adb_kbd = {
.name = "adb_kbd",
.version_id = 2,
@@ -340,14 +483,17 @@ static void adb_kbd_reset(DeviceState *dev)
s->count = 0;
}
+static QemuInputHandler adb_keyboard_handler = {
+ .name = "QEMU ADB Keyboard",
+ .mask = INPUT_EVENT_MASK_KEY,
+ .event = adb_keyboard_event,
+};
+
static void adb_kbd_realizefn(DeviceState *dev, Error **errp)
{
- ADBDevice *d = ADB_DEVICE(dev);
ADBKeyboardClass *akc = ADB_KEYBOARD_GET_CLASS(dev);
-
akc->parent_realize(dev, errp);
-
- qemu_add_kbd_event_handler(adb_kbd_put_keycode, d);
+ qemu_input_handler_register(dev, &adb_keyboard_handler);
}
static void adb_kbd_initfn(Object *obj)
@@ -492,8 +638,21 @@ static int adb_mouse_request(ADBDevice *d, uint8_t *obuf,
d->devaddr = buf[1] & 0xf;
break;
default:
- /* XXX: check this */
d->devaddr = buf[1] & 0xf;
+ /* we support handlers:
+ * 0x01: Classic Apple Mouse Protocol / 100 cpi operations
+ * 0x02: Classic Apple Mouse Protocol / 200 cpi operations
+ * we don't support handlers (at least):
+ * 0x03: Mouse systems A3 trackball
+ * 0x04: Extended Apple Mouse Protocol
+ * 0x2f: Microspeed mouse
+ * 0x42: Macally
+ * 0x5f: Microspeed mouse
+ * 0x66: Microspeed mouse
+ */
+ if (buf[2] == 1 || buf[2] == 2) {
+ d->handler = buf[2];
+ }
break;
}
}
diff --git a/hw/input/hid.c b/hw/input/hid.c
index 5e2850e655..fa9cc4c616 100644
--- a/hw/input/hid.c
+++ b/hw/input/hid.c
@@ -46,7 +46,7 @@ static const uint8_t hid_usage_keys[0x100] = {
0xe2, 0x2c, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e,
0x3f, 0x40, 0x41, 0x42, 0x43, 0x53, 0x47, 0x5f,
0x60, 0x61, 0x56, 0x5c, 0x5d, 0x5e, 0x57, 0x59,
- 0x5a, 0x5b, 0x62, 0x63, 0x00, 0x00, 0x64, 0x44,
+ 0x5a, 0x5b, 0x62, 0x63, 0x46, 0x00, 0x64, 0x44,
0x45, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e,
0xe8, 0xe9, 0x71, 0x72, 0x73, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x85, 0x00, 0x00, 0x00, 0x00,
@@ -61,7 +61,7 @@ static const uint8_t hid_usage_keys[0x100] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x00, 0x46,
0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x48, 0x4a,
0x52, 0x4b, 0x00, 0x50, 0x00, 0x4f, 0x00, 0x4d,
0x51, 0x4e, 0x49, 0x4c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xe3, 0xe7, 0x65, 0x00, 0x00,
diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c
index dc57e2c762..d414288839 100644
--- a/hw/input/pckbd.c
+++ b/hw/input/pckbd.c
@@ -499,9 +499,9 @@ void i8042_isa_mouse_fake_event(void *opaque)
ps2_mouse_fake_event(s->mouse);
}
-void i8042_setup_a20_line(ISADevice *dev, qemu_irq *a20_out)
+void i8042_setup_a20_line(ISADevice *dev, qemu_irq a20_out)
{
- qdev_connect_gpio_out_named(DEVICE(dev), I8042_A20_LINE, 0, *a20_out);
+ qdev_connect_gpio_out_named(DEVICE(dev), I8042_A20_LINE, 0, a20_out);
}
static const VMStateDescription vmstate_kbd_isa = {
diff --git a/hw/input/ps2.c b/hw/input/ps2.c
index 984a2638bf..d50a27eea5 100644
--- a/hw/input/ps2.c
+++ b/hw/input/ps2.c
@@ -22,6 +22,7 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "hw/hw.h"
#include "hw/input/ps2.h"
#include "ui/console.h"
@@ -99,12 +100,10 @@ typedef struct {
typedef struct {
PS2State common;
int scan_enabled;
- /* QEMU uses translated PC scancodes internally. To avoid multiple
- conversions we do the translation (if any) in the PS/2 emulation
- not the keyboard controller. */
int translate;
int scancode_set; /* 1=XT, 2=AT, 3=PS/2 */
int ledstate;
+ bool need_high_bit;
} PS2KbdState;
typedef struct {
@@ -121,26 +120,430 @@ typedef struct {
uint8_t mouse_buttons;
} PS2MouseState;
-/* Table to convert from PC scancodes to raw scancodes. */
-static const unsigned char ps2_raw_keycode[128] = {
- 0, 118, 22, 30, 38, 37, 46, 54, 61, 62, 70, 69, 78, 85, 102, 13,
- 21, 29, 36, 45, 44, 53, 60, 67, 68, 77, 84, 91, 90, 20, 28, 27,
- 35, 43, 52, 51, 59, 66, 75, 76, 82, 14, 18, 93, 26, 34, 33, 42,
- 50, 49, 58, 65, 73, 74, 89, 124, 17, 41, 88, 5, 6, 4, 12, 3,
- 11, 2, 10, 1, 9, 119, 126, 108, 117, 125, 123, 107, 115, 116, 121, 105,
-114, 122, 112, 113, 127, 96, 97, 120, 7, 15, 23, 31, 39, 47, 55, 63,
- 71, 79, 86, 94, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 87, 111,
- 19, 25, 57, 81, 83, 92, 95, 98, 99, 100, 101, 103, 104, 106, 109, 110
+/* Table to convert from QEMU codes to scancodes. */
+static const uint16_t qcode_to_keycode_set1[Q_KEY_CODE__MAX] = {
+ [0 ... Q_KEY_CODE__MAX - 1] = 0,
+
+ [Q_KEY_CODE_A] = 0x1e,
+ [Q_KEY_CODE_B] = 0x30,
+ [Q_KEY_CODE_C] = 0x2e,
+ [Q_KEY_CODE_D] = 0x20,
+ [Q_KEY_CODE_E] = 0x12,
+ [Q_KEY_CODE_F] = 0x21,
+ [Q_KEY_CODE_G] = 0x22,
+ [Q_KEY_CODE_H] = 0x23,
+ [Q_KEY_CODE_I] = 0x17,
+ [Q_KEY_CODE_J] = 0x24,
+ [Q_KEY_CODE_K] = 0x25,
+ [Q_KEY_CODE_L] = 0x26,
+ [Q_KEY_CODE_M] = 0x32,
+ [Q_KEY_CODE_N] = 0x31,
+ [Q_KEY_CODE_O] = 0x18,
+ [Q_KEY_CODE_P] = 0x19,
+ [Q_KEY_CODE_Q] = 0x10,
+ [Q_KEY_CODE_R] = 0x13,
+ [Q_KEY_CODE_S] = 0x1f,
+ [Q_KEY_CODE_T] = 0x14,
+ [Q_KEY_CODE_U] = 0x16,
+ [Q_KEY_CODE_V] = 0x2f,
+ [Q_KEY_CODE_W] = 0x11,
+ [Q_KEY_CODE_X] = 0x2d,
+ [Q_KEY_CODE_Y] = 0x15,
+ [Q_KEY_CODE_Z] = 0x2c,
+ [Q_KEY_CODE_0] = 0x0b,
+ [Q_KEY_CODE_1] = 0x02,
+ [Q_KEY_CODE_2] = 0x03,
+ [Q_KEY_CODE_3] = 0x04,
+ [Q_KEY_CODE_4] = 0x05,
+ [Q_KEY_CODE_5] = 0x06,
+ [Q_KEY_CODE_6] = 0x07,
+ [Q_KEY_CODE_7] = 0x08,
+ [Q_KEY_CODE_8] = 0x09,
+ [Q_KEY_CODE_9] = 0x0a,
+ [Q_KEY_CODE_GRAVE_ACCENT] = 0x29,
+ [Q_KEY_CODE_MINUS] = 0x0c,
+ [Q_KEY_CODE_EQUAL] = 0x0d,
+ [Q_KEY_CODE_BACKSLASH] = 0x2b,
+ [Q_KEY_CODE_BACKSPACE] = 0x0e,
+ [Q_KEY_CODE_SPC] = 0x39,
+ [Q_KEY_CODE_TAB] = 0x0f,
+ [Q_KEY_CODE_CAPS_LOCK] = 0x3a,
+ [Q_KEY_CODE_SHIFT] = 0x2a,
+ [Q_KEY_CODE_CTRL] = 0x1d,
+ [Q_KEY_CODE_META_L] = 0xe05b,
+ [Q_KEY_CODE_ALT] = 0x38,
+ [Q_KEY_CODE_SHIFT_R] = 0x36,
+ [Q_KEY_CODE_CTRL_R] = 0xe01d,
+ [Q_KEY_CODE_META_R] = 0xe05c,
+ [Q_KEY_CODE_ALT_R] = 0xe038,
+ [Q_KEY_CODE_MENU] = 0xe05d,
+ [Q_KEY_CODE_RET] = 0x1c,
+ [Q_KEY_CODE_ESC] = 0x01,
+ [Q_KEY_CODE_F1] = 0x3b,
+ [Q_KEY_CODE_F2] = 0x3c,
+ [Q_KEY_CODE_F3] = 0x3d,
+ [Q_KEY_CODE_F4] = 0x3e,
+ [Q_KEY_CODE_F5] = 0x3f,
+ [Q_KEY_CODE_F6] = 0x40,
+ [Q_KEY_CODE_F7] = 0x41,
+ [Q_KEY_CODE_F8] = 0x42,
+ [Q_KEY_CODE_F9] = 0x43,
+ [Q_KEY_CODE_F10] = 0x44,
+ [Q_KEY_CODE_F11] = 0x57,
+ [Q_KEY_CODE_F12] = 0x58,
+ /* special handling for Q_KEY_CODE_PRINT */
+ [Q_KEY_CODE_SCROLL_LOCK] = 0x46,
+ /* special handling for Q_KEY_CODE_PAUSE */
+ [Q_KEY_CODE_BRACKET_LEFT] = 0x1a,
+ [Q_KEY_CODE_INSERT] = 0xe052,
+ [Q_KEY_CODE_HOME] = 0xe047,
+ [Q_KEY_CODE_PGUP] = 0xe049,
+ [Q_KEY_CODE_DELETE] = 0xe053,
+ [Q_KEY_CODE_END] = 0xe04f,
+ [Q_KEY_CODE_PGDN] = 0xe051,
+ [Q_KEY_CODE_UP] = 0xe048,
+ [Q_KEY_CODE_LEFT] = 0xe04b,
+ [Q_KEY_CODE_DOWN] = 0xe050,
+ [Q_KEY_CODE_RIGHT] = 0xe04d,
+ [Q_KEY_CODE_NUM_LOCK] = 0x45,
+ [Q_KEY_CODE_KP_DIVIDE] = 0xe035,
+ [Q_KEY_CODE_KP_MULTIPLY] = 0x37,
+ [Q_KEY_CODE_KP_SUBTRACT] = 0x4a,
+ [Q_KEY_CODE_KP_ADD] = 0x4e,
+ [Q_KEY_CODE_KP_ENTER] = 0xe01c,
+ [Q_KEY_CODE_KP_DECIMAL] = 0x53,
+ [Q_KEY_CODE_KP_0] = 0x52,
+ [Q_KEY_CODE_KP_1] = 0x4f,
+ [Q_KEY_CODE_KP_2] = 0x50,
+ [Q_KEY_CODE_KP_3] = 0x51,
+ [Q_KEY_CODE_KP_4] = 0x4b,
+ [Q_KEY_CODE_KP_5] = 0x4c,
+ [Q_KEY_CODE_KP_6] = 0x4d,
+ [Q_KEY_CODE_KP_7] = 0x47,
+ [Q_KEY_CODE_KP_8] = 0x48,
+ [Q_KEY_CODE_KP_9] = 0x49,
+ [Q_KEY_CODE_BRACKET_RIGHT] = 0x1b,
+ [Q_KEY_CODE_SEMICOLON] = 0x27,
+ [Q_KEY_CODE_APOSTROPHE] = 0x28,
+ [Q_KEY_CODE_COMMA] = 0x33,
+ [Q_KEY_CODE_DOT] = 0x34,
+ [Q_KEY_CODE_SLASH] = 0x35,
+
+#if 0
+ [Q_KEY_CODE_POWER] = 0x0e5e,
+ [Q_KEY_CODE_SLEEP] = 0x0e5f,
+ [Q_KEY_CODE_WAKE] = 0x0e63,
+
+ [Q_KEY_CODE_AUDIONEXT] = 0xe019,
+ [Q_KEY_CODE_AUDIOPREV] = 0xe010,
+ [Q_KEY_CODE_AUDIOSTOP] = 0xe024,
+ [Q_KEY_CODE_AUDIOPLAY] = 0xe022,
+ [Q_KEY_CODE_AUDIOMUTE] = 0xe020,
+ [Q_KEY_CODE_VOLUMEUP] = 0xe030,
+ [Q_KEY_CODE_VOLUMEDOWN] = 0xe02e,
+ [Q_KEY_CODE_MEDIASELECT] = 0xe06d,
+ [Q_KEY_CODE_MAIL] = 0xe06c,
+ [Q_KEY_CODE_CALCULATOR] = 0xe021,
+ [Q_KEY_CODE_COMPUTER] = 0xe06b,
+ [Q_KEY_CODE_AC_SEARCH] = 0xe065,
+ [Q_KEY_CODE_AC_HOME] = 0xe032,
+ [Q_KEY_CODE_AC_BACK] = 0xe06a,
+ [Q_KEY_CODE_AC_FORWARD] = 0xe069,
+ [Q_KEY_CODE_AC_STOP] = 0xe068,
+ [Q_KEY_CODE_AC_REFRESH] = 0xe067,
+ [Q_KEY_CODE_AC_BOOKMARKS] = 0xe066,
+#endif
+
+ [Q_KEY_CODE_ASTERISK] = 0x37,
+ [Q_KEY_CODE_LESS] = 0x56,
+ [Q_KEY_CODE_RO] = 0x73,
+ [Q_KEY_CODE_KP_COMMA] = 0x7e,
};
-static const unsigned char ps2_raw_keycode_set3[128] = {
- 0, 8, 22, 30, 38, 37, 46, 54, 61, 62, 70, 69, 78, 85, 102, 13,
- 21, 29, 36, 45, 44, 53, 60, 67, 68, 77, 84, 91, 90, 17, 28, 27,
- 35, 43, 52, 51, 59, 66, 75, 76, 82, 14, 18, 92, 26, 34, 33, 42,
- 50, 49, 58, 65, 73, 74, 89, 126, 25, 41, 20, 7, 15, 23, 31, 39,
- 47, 2, 63, 71, 79, 118, 95, 108, 117, 125, 132, 107, 115, 116, 124, 105,
-114, 122, 112, 113, 127, 96, 97, 86, 94, 15, 23, 31, 39, 47, 55, 63,
- 71, 79, 86, 94, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 87, 111,
- 19, 25, 57, 81, 83, 92, 95, 98, 99, 100, 101, 103, 104, 106, 109, 110
+
+static const uint16_t qcode_to_keycode_set2[Q_KEY_CODE__MAX] = {
+ [0 ... Q_KEY_CODE__MAX - 1] = 0,
+
+ [Q_KEY_CODE_A] = 0x1c,
+ [Q_KEY_CODE_B] = 0x32,
+ [Q_KEY_CODE_C] = 0x21,
+ [Q_KEY_CODE_D] = 0x23,
+ [Q_KEY_CODE_E] = 0x24,
+ [Q_KEY_CODE_F] = 0x2b,
+ [Q_KEY_CODE_G] = 0x34,
+ [Q_KEY_CODE_H] = 0x33,
+ [Q_KEY_CODE_I] = 0x43,
+ [Q_KEY_CODE_J] = 0x3b,
+ [Q_KEY_CODE_K] = 0x42,
+ [Q_KEY_CODE_L] = 0x4b,
+ [Q_KEY_CODE_M] = 0x3a,
+ [Q_KEY_CODE_N] = 0x31,
+ [Q_KEY_CODE_O] = 0x44,
+ [Q_KEY_CODE_P] = 0x4d,
+ [Q_KEY_CODE_Q] = 0x15,
+ [Q_KEY_CODE_R] = 0x2d,
+ [Q_KEY_CODE_S] = 0x1b,
+ [Q_KEY_CODE_T] = 0x2c,
+ [Q_KEY_CODE_U] = 0x3c,
+ [Q_KEY_CODE_V] = 0x2a,
+ [Q_KEY_CODE_W] = 0x1d,
+ [Q_KEY_CODE_X] = 0x22,
+ [Q_KEY_CODE_Y] = 0x35,
+ [Q_KEY_CODE_Z] = 0x1a,
+ [Q_KEY_CODE_0] = 0x45,
+ [Q_KEY_CODE_1] = 0x16,
+ [Q_KEY_CODE_2] = 0x1e,
+ [Q_KEY_CODE_3] = 0x26,
+ [Q_KEY_CODE_4] = 0x25,
+ [Q_KEY_CODE_5] = 0x2e,
+ [Q_KEY_CODE_6] = 0x36,
+ [Q_KEY_CODE_7] = 0x3d,
+ [Q_KEY_CODE_8] = 0x3e,
+ [Q_KEY_CODE_9] = 0x46,
+ [Q_KEY_CODE_GRAVE_ACCENT] = 0x0e,
+ [Q_KEY_CODE_MINUS] = 0x4e,
+ [Q_KEY_CODE_EQUAL] = 0x55,
+ [Q_KEY_CODE_BACKSLASH] = 0x5d,
+ [Q_KEY_CODE_BACKSPACE] = 0x66,
+ [Q_KEY_CODE_SPC] = 0x29,
+ [Q_KEY_CODE_TAB] = 0x0d,
+ [Q_KEY_CODE_CAPS_LOCK] = 0x58,
+ [Q_KEY_CODE_SHIFT] = 0x12,
+ [Q_KEY_CODE_CTRL] = 0x14,
+ [Q_KEY_CODE_META_L] = 0xe01f,
+ [Q_KEY_CODE_ALT] = 0x11,
+ [Q_KEY_CODE_SHIFT_R] = 0x59,
+ [Q_KEY_CODE_CTRL_R] = 0xe014,
+ [Q_KEY_CODE_META_R] = 0xe027,
+ [Q_KEY_CODE_ALT_R] = 0xe011,
+ [Q_KEY_CODE_MENU] = 0xe02f,
+ [Q_KEY_CODE_RET] = 0x5a,
+ [Q_KEY_CODE_ESC] = 0x76,
+ [Q_KEY_CODE_F1] = 0x05,
+ [Q_KEY_CODE_F2] = 0x06,
+ [Q_KEY_CODE_F3] = 0x04,
+ [Q_KEY_CODE_F4] = 0x0c,
+ [Q_KEY_CODE_F5] = 0x03,
+ [Q_KEY_CODE_F6] = 0x0b,
+ [Q_KEY_CODE_F7] = 0x83,
+ [Q_KEY_CODE_F8] = 0x0a,
+ [Q_KEY_CODE_F9] = 0x01,
+ [Q_KEY_CODE_F10] = 0x09,
+ [Q_KEY_CODE_F11] = 0x78,
+ [Q_KEY_CODE_F12] = 0x07,
+ /* special handling for Q_KEY_CODE_PRINT */
+ [Q_KEY_CODE_SCROLL_LOCK] = 0x7e,
+ /* special handling for Q_KEY_CODE_PAUSE */
+ [Q_KEY_CODE_BRACKET_LEFT] = 0x54,
+ [Q_KEY_CODE_INSERT] = 0xe070,
+ [Q_KEY_CODE_HOME] = 0xe06c,
+ [Q_KEY_CODE_PGUP] = 0xe07d,
+ [Q_KEY_CODE_DELETE] = 0xe071,
+ [Q_KEY_CODE_END] = 0xe069,
+ [Q_KEY_CODE_PGDN] = 0xe07a,
+ [Q_KEY_CODE_UP] = 0xe075,
+ [Q_KEY_CODE_LEFT] = 0xe06b,
+ [Q_KEY_CODE_DOWN] = 0xe072,
+ [Q_KEY_CODE_RIGHT] = 0xe074,
+ [Q_KEY_CODE_NUM_LOCK] = 0x77,
+ [Q_KEY_CODE_KP_DIVIDE] = 0xe04a,
+ [Q_KEY_CODE_KP_MULTIPLY] = 0x7c,
+ [Q_KEY_CODE_KP_SUBTRACT] = 0x7b,
+ [Q_KEY_CODE_KP_ADD] = 0x79,
+ [Q_KEY_CODE_KP_ENTER] = 0xe05a,
+ [Q_KEY_CODE_KP_DECIMAL] = 0x71,
+ [Q_KEY_CODE_KP_0] = 0x70,
+ [Q_KEY_CODE_KP_1] = 0x69,
+ [Q_KEY_CODE_KP_2] = 0x72,
+ [Q_KEY_CODE_KP_3] = 0x7a,
+ [Q_KEY_CODE_KP_4] = 0x6b,
+ [Q_KEY_CODE_KP_5] = 0x73,
+ [Q_KEY_CODE_KP_6] = 0x74,
+ [Q_KEY_CODE_KP_7] = 0x6c,
+ [Q_KEY_CODE_KP_8] = 0x75,
+ [Q_KEY_CODE_KP_9] = 0x7d,
+ [Q_KEY_CODE_BRACKET_RIGHT] = 0x5b,
+ [Q_KEY_CODE_SEMICOLON] = 0x4c,
+ [Q_KEY_CODE_APOSTROPHE] = 0x52,
+ [Q_KEY_CODE_COMMA] = 0x41,
+ [Q_KEY_CODE_DOT] = 0x49,
+ [Q_KEY_CODE_SLASH] = 0x4a,
+
+#if 0
+ [Q_KEY_CODE_POWER] = 0x0e37,
+ [Q_KEY_CODE_SLEEP] = 0x0e3f,
+ [Q_KEY_CODE_WAKE] = 0x0e5e,
+
+ [Q_KEY_CODE_AUDIONEXT] = 0xe04d,
+ [Q_KEY_CODE_AUDIOPREV] = 0xe015,
+ [Q_KEY_CODE_AUDIOSTOP] = 0xe03b,
+ [Q_KEY_CODE_AUDIOPLAY] = 0xe034,
+ [Q_KEY_CODE_AUDIOMUTE] = 0xe023,
+ [Q_KEY_CODE_VOLUMEUP] = 0xe032,
+ [Q_KEY_CODE_VOLUMEDOWN] = 0xe021,
+ [Q_KEY_CODE_MEDIASELECT] = 0xe050,
+ [Q_KEY_CODE_MAIL] = 0xe048,
+ [Q_KEY_CODE_CALCULATOR] = 0xe02b,
+ [Q_KEY_CODE_COMPUTER] = 0xe040,
+ [Q_KEY_CODE_AC_SEARCH] = 0xe010,
+ [Q_KEY_CODE_AC_HOME] = 0xe03a,
+ [Q_KEY_CODE_AC_BACK] = 0xe038,
+ [Q_KEY_CODE_AC_FORWARD] = 0xe030,
+ [Q_KEY_CODE_AC_STOP] = 0xe028,
+ [Q_KEY_CODE_AC_REFRESH] = 0xe020,
+ [Q_KEY_CODE_AC_BOOKMARKS] = 0xe018,
+#endif
+
+ [Q_KEY_CODE_ALTGR] = 0x08,
+ [Q_KEY_CODE_ALTGR_R] = 0xe008,
+ [Q_KEY_CODE_ASTERISK] = 0x7c,
+ [Q_KEY_CODE_LESS] = 0x61,
+ [Q_KEY_CODE_SYSRQ] = 0x7f,
+ [Q_KEY_CODE_RO] = 0x51,
+ [Q_KEY_CODE_KP_COMMA] = 0x6d,
+};
+
+static const uint16_t qcode_to_keycode_set3[Q_KEY_CODE__MAX] = {
+ [0 ... Q_KEY_CODE__MAX - 1] = 0,
+
+ [Q_KEY_CODE_A] = 0x1c,
+ [Q_KEY_CODE_B] = 0x32,
+ [Q_KEY_CODE_C] = 0x21,
+ [Q_KEY_CODE_D] = 0x23,
+ [Q_KEY_CODE_E] = 0x24,
+ [Q_KEY_CODE_F] = 0x2b,
+ [Q_KEY_CODE_G] = 0x34,
+ [Q_KEY_CODE_H] = 0x33,
+ [Q_KEY_CODE_I] = 0x43,
+ [Q_KEY_CODE_J] = 0x3b,
+ [Q_KEY_CODE_K] = 0x42,
+ [Q_KEY_CODE_L] = 0x4b,
+ [Q_KEY_CODE_M] = 0x3a,
+ [Q_KEY_CODE_N] = 0x31,
+ [Q_KEY_CODE_O] = 0x44,
+ [Q_KEY_CODE_P] = 0x4d,
+ [Q_KEY_CODE_Q] = 0x15,
+ [Q_KEY_CODE_R] = 0x2d,
+ [Q_KEY_CODE_S] = 0x1b,
+ [Q_KEY_CODE_T] = 0x2c,
+ [Q_KEY_CODE_U] = 0x3c,
+ [Q_KEY_CODE_V] = 0x2a,
+ [Q_KEY_CODE_W] = 0x1d,
+ [Q_KEY_CODE_X] = 0x22,
+ [Q_KEY_CODE_Y] = 0x35,
+ [Q_KEY_CODE_Z] = 0x1a,
+ [Q_KEY_CODE_0] = 0x45,
+ [Q_KEY_CODE_1] = 0x16,
+ [Q_KEY_CODE_2] = 0x1e,
+ [Q_KEY_CODE_3] = 0x26,
+ [Q_KEY_CODE_4] = 0x25,
+ [Q_KEY_CODE_5] = 0x2e,
+ [Q_KEY_CODE_6] = 0x36,
+ [Q_KEY_CODE_7] = 0x3d,
+ [Q_KEY_CODE_8] = 0x3e,
+ [Q_KEY_CODE_9] = 0x46,
+ [Q_KEY_CODE_GRAVE_ACCENT] = 0x0e,
+ [Q_KEY_CODE_MINUS] = 0x4e,
+ [Q_KEY_CODE_EQUAL] = 0x55,
+ [Q_KEY_CODE_BACKSLASH] = 0x5c,
+ [Q_KEY_CODE_BACKSPACE] = 0x66,
+ [Q_KEY_CODE_SPC] = 0x29,
+ [Q_KEY_CODE_TAB] = 0x0d,
+ [Q_KEY_CODE_CAPS_LOCK] = 0x14,
+ [Q_KEY_CODE_SHIFT] = 0x12,
+ [Q_KEY_CODE_CTRL] = 0x11,
+ [Q_KEY_CODE_META_L] = 0x8b,
+ [Q_KEY_CODE_ALT] = 0x19,
+ [Q_KEY_CODE_SHIFT_R] = 0x59,
+ [Q_KEY_CODE_CTRL_R] = 0x58,
+ [Q_KEY_CODE_META_R] = 0x8c,
+ [Q_KEY_CODE_ALT_R] = 0x39,
+ [Q_KEY_CODE_MENU] = 0x8d,
+ [Q_KEY_CODE_RET] = 0x5a,
+ [Q_KEY_CODE_ESC] = 0x08,
+ [Q_KEY_CODE_F1] = 0x07,
+ [Q_KEY_CODE_F2] = 0x0f,
+ [Q_KEY_CODE_F3] = 0x17,
+ [Q_KEY_CODE_F4] = 0x1f,
+ [Q_KEY_CODE_F5] = 0x27,
+ [Q_KEY_CODE_F6] = 0x2f,
+ [Q_KEY_CODE_F7] = 0x37,
+ [Q_KEY_CODE_F8] = 0x3f,
+ [Q_KEY_CODE_F9] = 0x47,
+ [Q_KEY_CODE_F10] = 0x4f,
+ [Q_KEY_CODE_F11] = 0x56,
+ [Q_KEY_CODE_F12] = 0x5e,
+ [Q_KEY_CODE_PRINT] = 0x57,
+ [Q_KEY_CODE_SCROLL_LOCK] = 0x5f,
+ [Q_KEY_CODE_PAUSE] = 0x62,
+ [Q_KEY_CODE_BRACKET_LEFT] = 0x54,
+ [Q_KEY_CODE_INSERT] = 0x67,
+ [Q_KEY_CODE_HOME] = 0x6e,
+ [Q_KEY_CODE_PGUP] = 0x6f,
+ [Q_KEY_CODE_DELETE] = 0x64,
+ [Q_KEY_CODE_END] = 0x65,
+ [Q_KEY_CODE_PGDN] = 0x6d,
+ [Q_KEY_CODE_UP] = 0x63,
+ [Q_KEY_CODE_LEFT] = 0x61,
+ [Q_KEY_CODE_DOWN] = 0x60,
+ [Q_KEY_CODE_RIGHT] = 0x6a,
+ [Q_KEY_CODE_NUM_LOCK] = 0x76,
+ [Q_KEY_CODE_KP_DIVIDE] = 0x4a,
+ [Q_KEY_CODE_KP_MULTIPLY] = 0x7e,
+ [Q_KEY_CODE_KP_SUBTRACT] = 0x4e,
+ [Q_KEY_CODE_KP_ADD] = 0x7c,
+ [Q_KEY_CODE_KP_ENTER] = 0x79,
+ [Q_KEY_CODE_KP_DECIMAL] = 0x71,
+ [Q_KEY_CODE_KP_0] = 0x70,
+ [Q_KEY_CODE_KP_1] = 0x69,
+ [Q_KEY_CODE_KP_2] = 0x72,
+ [Q_KEY_CODE_KP_3] = 0x7a,
+ [Q_KEY_CODE_KP_4] = 0x6b,
+ [Q_KEY_CODE_KP_5] = 0x73,
+ [Q_KEY_CODE_KP_6] = 0x74,
+ [Q_KEY_CODE_KP_7] = 0x6c,
+ [Q_KEY_CODE_KP_8] = 0x75,
+ [Q_KEY_CODE_KP_9] = 0x7d,
+ [Q_KEY_CODE_BRACKET_RIGHT] = 0x5b,
+ [Q_KEY_CODE_SEMICOLON] = 0x4c,
+ [Q_KEY_CODE_APOSTROPHE] = 0x52,
+ [Q_KEY_CODE_COMMA] = 0x41,
+ [Q_KEY_CODE_DOT] = 0x49,
+ [Q_KEY_CODE_SLASH] = 0x4a,
+};
+
+static uint8_t translate_table[256] = {
+ 0xff, 0x43, 0x41, 0x3f, 0x3d, 0x3b, 0x3c, 0x58,
+ 0x64, 0x44, 0x42, 0x40, 0x3e, 0x0f, 0x29, 0x59,
+ 0x65, 0x38, 0x2a, 0x70, 0x1d, 0x10, 0x02, 0x5a,
+ 0x66, 0x71, 0x2c, 0x1f, 0x1e, 0x11, 0x03, 0x5b,
+ 0x67, 0x2e, 0x2d, 0x20, 0x12, 0x05, 0x04, 0x5c,
+ 0x68, 0x39, 0x2f, 0x21, 0x14, 0x13, 0x06, 0x5d,
+ 0x69, 0x31, 0x30, 0x23, 0x22, 0x15, 0x07, 0x5e,
+ 0x6a, 0x72, 0x32, 0x24, 0x16, 0x08, 0x09, 0x5f,
+ 0x6b, 0x33, 0x25, 0x17, 0x18, 0x0b, 0x0a, 0x60,
+ 0x6c, 0x34, 0x35, 0x26, 0x27, 0x19, 0x0c, 0x61,
+ 0x6d, 0x73, 0x28, 0x74, 0x1a, 0x0d, 0x62, 0x6e,
+ 0x3a, 0x36, 0x1c, 0x1b, 0x75, 0x2b, 0x63, 0x76,
+ 0x55, 0x56, 0x77, 0x78, 0x79, 0x7a, 0x0e, 0x7b,
+ 0x7c, 0x4f, 0x7d, 0x4b, 0x47, 0x7e, 0x7f, 0x6f,
+ 0x52, 0x53, 0x50, 0x4c, 0x4d, 0x48, 0x01, 0x45,
+ 0x57, 0x4e, 0x51, 0x4a, 0x37, 0x49, 0x46, 0x54,
+ 0x80, 0x81, 0x82, 0x41, 0x54, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
};
void ps2_queue(void *opaque, int b)
@@ -157,44 +560,130 @@ void ps2_queue(void *opaque, int b)
s->update_irq(s->update_arg, 1);
}
-/*
- keycode is expressed as follow:
- bit 7 - 0 key pressed, 1 = key released
- bits 6-0 - translated scancode set 2
- */
+/* keycode is the untranslated scancode in the current scancode set. */
static void ps2_put_keycode(void *opaque, int keycode)
{
PS2KbdState *s = opaque;
trace_ps2_put_keycode(opaque, keycode);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
- /* XXX: add support for scancode set 1 */
- if (!s->translate && keycode < 0xe0 && s->scancode_set > 1) {
- if (keycode & 0x80) {
- ps2_queue(&s->common, 0xf0);
- }
- if (s->scancode_set == 2) {
- keycode = ps2_raw_keycode[keycode & 0x7f];
- } else if (s->scancode_set == 3) {
- keycode = ps2_raw_keycode_set3[keycode & 0x7f];
+
+ if (s->translate) {
+ if (keycode == 0xf0) {
+ s->need_high_bit = true;
+ } else if (s->need_high_bit) {
+ ps2_queue(&s->common, translate_table[keycode] | 0x80);
+ s->need_high_bit = false;
+ } else {
+ ps2_queue(&s->common, translate_table[keycode]);
}
- }
- ps2_queue(&s->common, keycode);
+ } else {
+ ps2_queue(&s->common, keycode);
+ }
}
static void ps2_keyboard_event(DeviceState *dev, QemuConsole *src,
InputEvent *evt)
{
PS2KbdState *s = (PS2KbdState *)dev;
- int scancodes[3], i, count;
InputKeyEvent *key = evt->u.key.data;
+ int qcode;
+ uint16_t keycode;
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
- count = qemu_input_key_value_to_scancode(key->key,
- key->down,
- scancodes);
- for (i = 0; i < count; i++) {
- ps2_put_keycode(s, scancodes[i]);
+ assert(evt->type == INPUT_EVENT_KIND_KEY);
+ qcode = qemu_input_key_value_to_qcode(key->key);
+
+ if (s->scancode_set == 1) {
+ if (qcode == Q_KEY_CODE_PAUSE) {
+ if (key->down) {
+ ps2_put_keycode(s, 0xe1);
+ ps2_put_keycode(s, 0x1d);
+ ps2_put_keycode(s, 0x45);
+ ps2_put_keycode(s, 0x91);
+ ps2_put_keycode(s, 0x9d);
+ ps2_put_keycode(s, 0xc5);
+ }
+ } else if (qcode == Q_KEY_CODE_PRINT) {
+ if (key->down) {
+ ps2_put_keycode(s, 0xe0);
+ ps2_put_keycode(s, 0x2a);
+ ps2_put_keycode(s, 0xe0);
+ ps2_put_keycode(s, 0x37);
+ } else {
+ ps2_put_keycode(s, 0xe0);
+ ps2_put_keycode(s, 0xb7);
+ ps2_put_keycode(s, 0xe0);
+ ps2_put_keycode(s, 0xaa);
+ }
+ } else {
+ keycode = qcode_to_keycode_set1[qcode];
+ if (keycode) {
+ if (keycode & 0xff00) {
+ ps2_put_keycode(s, keycode >> 8);
+ }
+ if (!key->down) {
+ keycode |= 0x80;
+ }
+ ps2_put_keycode(s, keycode & 0xff);
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "ps2: ignoring key with qcode %d\n", qcode);
+ }
+ }
+ } else if (s->scancode_set == 2) {
+ if (qcode == Q_KEY_CODE_PAUSE) {
+ if (key->down) {
+ ps2_put_keycode(s, 0xe1);
+ ps2_put_keycode(s, 0x14);
+ ps2_put_keycode(s, 0x77);
+ ps2_put_keycode(s, 0xe1);
+ ps2_put_keycode(s, 0xf0);
+ ps2_put_keycode(s, 0x14);
+ ps2_put_keycode(s, 0xf0);
+ ps2_put_keycode(s, 0x77);
+ }
+ } else if (qcode == Q_KEY_CODE_PRINT) {
+ if (key->down) {
+ ps2_put_keycode(s, 0xe0);
+ ps2_put_keycode(s, 0x12);
+ ps2_put_keycode(s, 0xe0);
+ ps2_put_keycode(s, 0x7c);
+ } else {
+ ps2_put_keycode(s, 0xe0);
+ ps2_put_keycode(s, 0xf0);
+ ps2_put_keycode(s, 0x7c);
+ ps2_put_keycode(s, 0xe0);
+ ps2_put_keycode(s, 0xf0);
+ ps2_put_keycode(s, 0x12);
+ }
+ } else {
+ keycode = qcode_to_keycode_set2[qcode];
+ if (keycode) {
+ if (keycode & 0xff00) {
+ ps2_put_keycode(s, keycode >> 8);
+ }
+ if (!key->down) {
+ ps2_put_keycode(s, 0xf0);
+ }
+ ps2_put_keycode(s, keycode & 0xff);
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "ps2: ignoring key with qcode %d\n", qcode);
+ }
+ }
+ } else if (s->scancode_set == 3) {
+ keycode = qcode_to_keycode_set3[qcode];
+ if (keycode) {
+ /* FIXME: break code should be configured on a key by key basis */
+ if (!key->down) {
+ ps2_put_keycode(s, 0xf0);
+ }
+ ps2_put_keycode(s, keycode);
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "ps2: ignoring key with qcode %d\n", qcode);
+ }
}
}
@@ -295,22 +784,19 @@ void ps2_write_keyboard(void *opaque, int val)
ps2_queue(&s->common, KBD_REPLY_POR);
break;
default:
- ps2_queue(&s->common, KBD_REPLY_ACK);
+ ps2_queue(&s->common, KBD_REPLY_RESEND);
break;
}
break;
case KBD_CMD_SCANCODE:
if (val == 0) {
- if (s->scancode_set == 1)
- ps2_put_keycode(s, 0x43);
- else if (s->scancode_set == 2)
- ps2_put_keycode(s, 0x41);
- else if (s->scancode_set == 3)
- ps2_put_keycode(s, 0x3f);
- } else {
- if (val >= 1 && val <= 3)
- s->scancode_set = val;
ps2_queue(&s->common, KBD_REPLY_ACK);
+ ps2_put_keycode(s, s->scancode_set);
+ } else if (val >= 1 && val <= 3) {
+ s->scancode_set = val;
+ ps2_queue(&s->common, KBD_REPLY_ACK);
+ } else {
+ ps2_queue(&s->common, KBD_REPLY_RESEND);
}
s->common.write_cmd = -1;
break;
@@ -702,6 +1188,23 @@ static const VMStateDescription vmstate_ps2_keyboard_ledstate = {
}
};
+static bool ps2_keyboard_need_high_bit_needed(void *opaque)
+{
+ PS2KbdState *s = opaque;
+ return s->need_high_bit != 0; /* 0 is the usual state */
+}
+
+static const VMStateDescription vmstate_ps2_keyboard_need_high_bit = {
+ .name = "ps2kbd/need_high_bit",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = ps2_keyboard_need_high_bit_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(need_high_bit, PS2KbdState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static int ps2_kbd_post_load(void* opaque, int version_id)
{
PS2KbdState *s = (PS2KbdState*)opaque;
@@ -738,6 +1241,7 @@ static const VMStateDescription vmstate_ps2_keyboard = {
},
.subsections = (const VMStateDescription*[]) {
&vmstate_ps2_keyboard_ledstate,
+ &vmstate_ps2_keyboard_need_high_bit,
NULL
}
};
diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c
index 9b359aaec0..eb5320af40 100644
--- a/hw/input/tsc2005.c
+++ b/hw/input/tsc2005.c
@@ -31,30 +31,31 @@ typedef struct {
QEMUTimer *timer;
uint16_t model;
- int x, y;
- int pressure;
+ int32_t x, y;
+ bool pressure;
- int state, reg, irq, command;
+ uint8_t reg, state;
+ bool irq, command;
uint16_t data, dav;
- int busy;
- int enabled;
- int host_mode;
- int function;
- int nextfunction;
- int precision;
- int nextprecision;
- int filter;
- int pin_func;
- int timing[2];
- int noise;
- int reset;
- int pdst;
- int pnd0;
+ bool busy;
+ bool enabled;
+ bool host_mode;
+ int8_t function;
+ int8_t nextfunction;
+ bool precision;
+ bool nextprecision;
+ uint16_t filter;
+ uint8_t pin_func;
+ uint16_t timing[2];
+ uint8_t noise;
+ bool reset;
+ bool pdst;
+ bool pnd0;
uint16_t temp_thr[2];
uint16_t aux_thr[2];
- int tr[8];
+ int32_t tr[8];
} TSC2005State;
enum {
@@ -149,7 +150,7 @@ static uint16_t tsc2005_read(TSC2005State *s, int reg)
ret = s->dav | (s->reset << 7) | (s->pdst << 2) | 0x0;
s->dav &= ~(mode_regs[TSC_MODE_X_TEST] | mode_regs[TSC_MODE_Y_TEST] |
mode_regs[TSC_MODE_TS_TEST]);
- s->reset = 1;
+ s->reset = true;
return ret;
case 0x8: /* AUX high treshold */
@@ -196,14 +197,14 @@ static void tsc2005_write(TSC2005State *s, int reg, uint16_t data)
break;
case 0xc: /* CFR0 */
- s->host_mode = data >> 15;
+ s->host_mode = (data >> 15) != 0;
if (s->enabled != !(data & 0x4000)) {
s->enabled = !(data & 0x4000);
fprintf(stderr, "%s: touchscreen sense %sabled\n",
__FUNCTION__, s->enabled ? "en" : "dis");
if (s->busy && !s->enabled)
timer_del(s->timer);
- s->busy &= s->enabled;
+ s->busy = s->busy && s->enabled;
}
s->nextprecision = (data >> 13) & 1;
s->timing[0] = data & 0x1fff;
@@ -229,7 +230,7 @@ static void tsc2005_write(TSC2005State *s, int reg, uint16_t data)
static void tsc2005_pin_update(TSC2005State *s)
{
int64_t expires;
- int pin_state;
+ bool pin_state;
switch (s->pin_func) {
case 0:
@@ -253,7 +254,7 @@ static void tsc2005_pin_update(TSC2005State *s)
case TSC_MODE_XYZ_SCAN:
case TSC_MODE_XY_SCAN:
if (!s->host_mode && s->dav)
- s->enabled = 0;
+ s->enabled = false;
if (!s->pressure)
return;
/* Fall through */
@@ -273,7 +274,7 @@ static void tsc2005_pin_update(TSC2005State *s)
case TSC_MODE_Y_TEST:
case TSC_MODE_TS_TEST:
if (s->dav)
- s->enabled = 0;
+ s->enabled = false;
break;
case TSC_MODE_RESERVED:
@@ -287,7 +288,7 @@ static void tsc2005_pin_update(TSC2005State *s)
if (!s->enabled || s->busy)
return;
- s->busy = 1;
+ s->busy = true;
s->precision = s->nextprecision;
s->function = s->nextfunction;
s->pdst = !s->pnd0; /* Synchronised on internal clock */
@@ -300,17 +301,17 @@ static void tsc2005_reset(TSC2005State *s)
{
s->state = 0;
s->pin_func = 0;
- s->enabled = 0;
- s->busy = 0;
- s->nextprecision = 0;
+ s->enabled = false;
+ s->busy = false;
+ s->nextprecision = false;
s->nextfunction = 0;
s->timing[0] = 0;
s->timing[1] = 0;
- s->irq = 0;
+ s->irq = false;
s->dav = 0;
- s->reset = 0;
- s->pdst = 1;
- s->pnd0 = 0;
+ s->reset = false;
+ s->pdst = true;
+ s->pnd0 = false;
s->function = -1;
s->temp_thr[0] = 0x000;
s->temp_thr[1] = 0xfff;
@@ -340,7 +341,7 @@ static uint8_t tsc2005_txrx_word(void *opaque, uint8_t value)
__FUNCTION__, s->enabled ? "en" : "dis");
if (s->busy && !s->enabled)
timer_del(s->timer);
- s->busy &= s->enabled;
+ s->busy = s->busy && s->enabled;
}
tsc2005_pin_update(s);
}
@@ -407,7 +408,7 @@ static void tsc2005_timer_tick(void *opaque)
if (!s->busy)
return;
- s->busy = 0;
+ s->busy = false;
s->dav |= mode_regs[s->function];
s->function = -1;
tsc2005_pin_update(s);
@@ -434,86 +435,9 @@ static void tsc2005_touchscreen_event(void *opaque,
tsc2005_pin_update(s);
}
-static void tsc2005_save(QEMUFile *f, void *opaque)
+static int tsc2005_post_load(void *opaque, int version_id)
{
TSC2005State *s = (TSC2005State *) opaque;
- int i;
-
- qemu_put_be16(f, s->x);
- qemu_put_be16(f, s->y);
- qemu_put_byte(f, s->pressure);
-
- qemu_put_byte(f, s->state);
- qemu_put_byte(f, s->reg);
- qemu_put_byte(f, s->command);
-
- qemu_put_byte(f, s->irq);
- qemu_put_be16s(f, &s->dav);
- qemu_put_be16s(f, &s->data);
-
- timer_put(f, s->timer);
- qemu_put_byte(f, s->enabled);
- qemu_put_byte(f, s->host_mode);
- qemu_put_byte(f, s->function);
- qemu_put_byte(f, s->nextfunction);
- qemu_put_byte(f, s->precision);
- qemu_put_byte(f, s->nextprecision);
- qemu_put_be16(f, s->filter);
- qemu_put_byte(f, s->pin_func);
- qemu_put_be16(f, s->timing[0]);
- qemu_put_be16(f, s->timing[1]);
- qemu_put_be16s(f, &s->temp_thr[0]);
- qemu_put_be16s(f, &s->temp_thr[1]);
- qemu_put_be16s(f, &s->aux_thr[0]);
- qemu_put_be16s(f, &s->aux_thr[1]);
- qemu_put_be32(f, s->noise);
- qemu_put_byte(f, s->reset);
- qemu_put_byte(f, s->pdst);
- qemu_put_byte(f, s->pnd0);
-
- for (i = 0; i < 8; i ++)
- qemu_put_be32(f, s->tr[i]);
-}
-
-static int tsc2005_load(QEMUFile *f, void *opaque, int version_id)
-{
- TSC2005State *s = (TSC2005State *) opaque;
- int i;
-
- s->x = qemu_get_be16(f);
- s->y = qemu_get_be16(f);
- s->pressure = qemu_get_byte(f);
-
- s->state = qemu_get_byte(f);
- s->reg = qemu_get_byte(f);
- s->command = qemu_get_byte(f);
-
- s->irq = qemu_get_byte(f);
- qemu_get_be16s(f, &s->dav);
- qemu_get_be16s(f, &s->data);
-
- timer_get(f, s->timer);
- s->enabled = qemu_get_byte(f);
- s->host_mode = qemu_get_byte(f);
- s->function = qemu_get_byte(f);
- s->nextfunction = qemu_get_byte(f);
- s->precision = qemu_get_byte(f);
- s->nextprecision = qemu_get_byte(f);
- s->filter = qemu_get_be16(f);
- s->pin_func = qemu_get_byte(f);
- s->timing[0] = qemu_get_be16(f);
- s->timing[1] = qemu_get_be16(f);
- qemu_get_be16s(f, &s->temp_thr[0]);
- qemu_get_be16s(f, &s->temp_thr[1]);
- qemu_get_be16s(f, &s->aux_thr[0]);
- qemu_get_be16s(f, &s->aux_thr[1]);
- s->noise = qemu_get_be32(f);
- s->reset = qemu_get_byte(f);
- s->pdst = qemu_get_byte(f);
- s->pnd0 = qemu_get_byte(f);
-
- for (i = 0; i < 8; i ++)
- s->tr[i] = qemu_get_be32(f);
s->busy = timer_pending(s->timer);
tsc2005_pin_update(s);
@@ -521,6 +445,42 @@ static int tsc2005_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
+static const VMStateDescription vmstate_tsc2005 = {
+ .name = "tsc2005",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .post_load = tsc2005_post_load,
+ .fields = (VMStateField []) {
+ VMSTATE_BOOL(pressure, TSC2005State),
+ VMSTATE_BOOL(irq, TSC2005State),
+ VMSTATE_BOOL(command, TSC2005State),
+ VMSTATE_BOOL(enabled, TSC2005State),
+ VMSTATE_BOOL(host_mode, TSC2005State),
+ VMSTATE_BOOL(reset, TSC2005State),
+ VMSTATE_BOOL(pdst, TSC2005State),
+ VMSTATE_BOOL(pnd0, TSC2005State),
+ VMSTATE_BOOL(precision, TSC2005State),
+ VMSTATE_BOOL(nextprecision, TSC2005State),
+ VMSTATE_UINT8(reg, TSC2005State),
+ VMSTATE_UINT8(state, TSC2005State),
+ VMSTATE_UINT16(data, TSC2005State),
+ VMSTATE_UINT16(dav, TSC2005State),
+ VMSTATE_UINT16(filter, TSC2005State),
+ VMSTATE_INT8(nextfunction, TSC2005State),
+ VMSTATE_INT8(function, TSC2005State),
+ VMSTATE_INT32(x, TSC2005State),
+ VMSTATE_INT32(y, TSC2005State),
+ VMSTATE_TIMER_PTR(timer, TSC2005State),
+ VMSTATE_UINT8(pin_func, TSC2005State),
+ VMSTATE_UINT16_ARRAY(timing, TSC2005State, 2),
+ VMSTATE_UINT8(noise, TSC2005State),
+ VMSTATE_UINT16_ARRAY(temp_thr, TSC2005State, 2),
+ VMSTATE_UINT16_ARRAY(aux_thr, TSC2005State, 2),
+ VMSTATE_INT32_ARRAY(tr, TSC2005State, 8),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
void *tsc2005_init(qemu_irq pintdav)
{
TSC2005State *s;
@@ -529,8 +489,8 @@ void *tsc2005_init(qemu_irq pintdav)
g_malloc0(sizeof(TSC2005State));
s->x = 400;
s->y = 240;
- s->pressure = 0;
- s->precision = s->nextprecision = 0;
+ s->pressure = false;
+ s->precision = s->nextprecision = false;
s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, tsc2005_timer_tick, s);
s->pint = pintdav;
s->model = 0x2005;
@@ -550,7 +510,7 @@ void *tsc2005_init(qemu_irq pintdav)
"QEMU TSC2005-driven Touchscreen");
qemu_register_reset((void *) tsc2005_reset, s);
- register_savevm(NULL, "tsc2005", -1, 0, tsc2005_save, tsc2005_load, s);
+ vmstate_register(NULL, 0, &vmstate_tsc2005, s);
return s;
}
diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c
index 93ca374fcd..b068343771 100644
--- a/hw/input/tsc210x.c
+++ b/hw/input/tsc210x.c
@@ -47,24 +47,25 @@ typedef struct {
uint8_t out_fifo[16384];
uint16_t model;
- int x, y;
- int pressure;
-
- int state, page, offset, irq;
- uint16_t command, dav;
-
- int busy;
- int enabled;
- int host_mode;
- int function;
- int nextfunction;
- int precision;
- int nextprecision;
- int filter;
- int pin_func;
- int ref;
- int timing;
- int noise;
+ int32_t x, y;
+ bool pressure;
+
+ uint8_t page, offset;
+ uint16_t dav;
+
+ bool state;
+ bool irq;
+ bool command;
+ bool busy;
+ bool enabled;
+ bool host_mode;
+ uint8_t function, nextfunction;
+ uint8_t precision, nextprecision;
+ uint8_t filter;
+ uint8_t pin_func;
+ uint8_t ref;
+ uint8_t timing;
+ uint8_t noise;
uint16_t audio_ctrl1;
uint16_t audio_ctrl2;
@@ -72,7 +73,7 @@ typedef struct {
uint16_t pll[3];
uint16_t volume;
int64_t volume_change;
- int softstep;
+ bool softstep;
uint16_t dac_power;
int64_t powerdown;
uint16_t filter_data[0x14];
@@ -93,6 +94,7 @@ typedef struct {
int mode;
int intr;
} kb;
+ int64_t now; /* Time at migration */
} TSC210xState;
static const int resolution[4] = { 12, 8, 10, 12 };
@@ -154,14 +156,14 @@ static const uint16_t mode_regs[16] = {
static void tsc210x_reset(TSC210xState *s)
{
- s->state = 0;
+ s->state = false;
s->pin_func = 2;
- s->enabled = 0;
- s->busy = 0;
+ s->enabled = false;
+ s->busy = false;
s->nextfunction = 0;
s->ref = 0;
s->timing = 0;
- s->irq = 0;
+ s->irq = false;
s->dav = 0;
s->audio_ctrl1 = 0x0000;
@@ -172,7 +174,7 @@ static void tsc210x_reset(TSC210xState *s)
s->pll[2] = 0x1fff;
s->volume = 0xffff;
s->dac_power = 0x8540;
- s->softstep = 1;
+ s->softstep = true;
s->volume_change = 0;
s->powerdown = 0;
s->filter_data[0x00] = 0x6be3;
@@ -566,7 +568,7 @@ static void tsc2102_control_register_write(
s->enabled = !(value & 0x4000);
if (s->busy && !s->enabled)
timer_del(s->timer);
- s->busy &= s->enabled;
+ s->busy = s->busy && s->enabled;
s->nextfunction = (value >> 10) & 0xf;
s->nextprecision = (value >> 8) & 3;
s->filter = value & 0xff;
@@ -773,7 +775,7 @@ static void tsc2102_audio_register_write(
static void tsc210x_pin_update(TSC210xState *s)
{
int64_t expires;
- int pin_state;
+ bool pin_state;
switch (s->pin_func) {
case 0:
@@ -788,7 +790,7 @@ static void tsc210x_pin_update(TSC210xState *s)
}
if (!s->enabled)
- pin_state = 0;
+ pin_state = false;
if (pin_state != s->irq) {
s->irq = pin_state;
@@ -814,7 +816,7 @@ static void tsc210x_pin_update(TSC210xState *s)
case TSC_MODE_TEMP1:
case TSC_MODE_TEMP2:
if (s->dav)
- s->enabled = 0;
+ s->enabled = false;
break;
case TSC_MODE_AUX_SCAN:
@@ -832,7 +834,7 @@ static void tsc210x_pin_update(TSC210xState *s)
if (!s->enabled || s->busy || s->dav)
return;
- s->busy = 1;
+ s->busy = true;
s->precision = s->nextprecision;
s->function = s->nextfunction;
expires = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
@@ -867,7 +869,7 @@ static uint16_t tsc210x_read(TSC210xState *s)
/* Allow sequential reads. */
s->offset ++;
- s->state = 0;
+ s->state = false;
return ret;
}
@@ -878,10 +880,10 @@ static void tsc210x_write(TSC210xState *s, uint16_t value)
* command and data every second time.
*/
if (!s->state) {
- s->command = value >> 15;
+ s->command = (value >> 15) != 0;
s->page = (value >> 11) & 0x0f;
s->offset = (value >> 5) & 0x3f;
- s->state = 1;
+ s->state = true;
} else {
if (s->command)
fprintf(stderr, "tsc210x_write: SPI overrun!\n");
@@ -901,7 +903,7 @@ static void tsc210x_write(TSC210xState *s, uint16_t value)
}
tsc210x_pin_update(s);
- s->state = 0;
+ s->state = false;
}
}
@@ -933,7 +935,7 @@ static void tsc210x_timer_tick(void *opaque)
if (!s->busy)
return;
- s->busy = 0;
+ s->busy = false;
s->dav |= mode_regs[s->function];
tsc210x_pin_update(s);
qemu_irq_lower(s->davint);
@@ -974,108 +976,34 @@ static void tsc210x_i2s_set_rate(TSC210xState *s, int in, int out)
s->i2s_rx_rate = in;
}
-static void tsc210x_save(QEMUFile *f, void *opaque)
+static void tsc210x_pre_save(void *opaque)
{
TSC210xState *s = (TSC210xState *) opaque;
- int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- int i;
-
- qemu_put_be16(f, s->x);
- qemu_put_be16(f, s->y);
- qemu_put_byte(f, s->pressure);
-
- qemu_put_byte(f, s->state);
- qemu_put_byte(f, s->page);
- qemu_put_byte(f, s->offset);
- qemu_put_byte(f, s->command);
-
- qemu_put_byte(f, s->irq);
- qemu_put_be16s(f, &s->dav);
-
- timer_put(f, s->timer);
- qemu_put_byte(f, s->enabled);
- qemu_put_byte(f, s->host_mode);
- qemu_put_byte(f, s->function);
- qemu_put_byte(f, s->nextfunction);
- qemu_put_byte(f, s->precision);
- qemu_put_byte(f, s->nextprecision);
- qemu_put_byte(f, s->filter);
- qemu_put_byte(f, s->pin_func);
- qemu_put_byte(f, s->ref);
- qemu_put_byte(f, s->timing);
- qemu_put_be32(f, s->noise);
-
- qemu_put_be16s(f, &s->audio_ctrl1);
- qemu_put_be16s(f, &s->audio_ctrl2);
- qemu_put_be16s(f, &s->audio_ctrl3);
- qemu_put_be16s(f, &s->pll[0]);
- qemu_put_be16s(f, &s->pll[1]);
- qemu_put_be16s(f, &s->volume);
- qemu_put_sbe64(f, (s->volume_change - now));
- qemu_put_sbe64(f, (s->powerdown - now));
- qemu_put_byte(f, s->softstep);
- qemu_put_be16s(f, &s->dac_power);
-
- for (i = 0; i < 0x14; i ++)
- qemu_put_be16s(f, &s->filter_data[i]);
+ s->now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
-static int tsc210x_load(QEMUFile *f, void *opaque, int version_id)
+static int tsc210x_post_load(void *opaque, int version_id)
{
TSC210xState *s = (TSC210xState *) opaque;
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- int i;
-
- s->x = qemu_get_be16(f);
- s->y = qemu_get_be16(f);
- s->pressure = qemu_get_byte(f);
-
- s->state = qemu_get_byte(f);
- s->page = qemu_get_byte(f);
- s->offset = qemu_get_byte(f);
- s->command = qemu_get_byte(f);
- s->irq = qemu_get_byte(f);
- qemu_get_be16s(f, &s->dav);
-
- timer_get(f, s->timer);
- s->enabled = qemu_get_byte(f);
- s->host_mode = qemu_get_byte(f);
- s->function = qemu_get_byte(f);
- if (s->function < 0 || s->function >= ARRAY_SIZE(mode_regs)) {
+ if (s->function >= ARRAY_SIZE(mode_regs)) {
return -EINVAL;
}
- s->nextfunction = qemu_get_byte(f);
- if (s->nextfunction < 0 || s->nextfunction >= ARRAY_SIZE(mode_regs)) {
+ if (s->nextfunction >= ARRAY_SIZE(mode_regs)) {
return -EINVAL;
}
- s->precision = qemu_get_byte(f);
- if (s->precision < 0 || s->precision >= ARRAY_SIZE(resolution)) {
+ if (s->precision >= ARRAY_SIZE(resolution)) {
return -EINVAL;
}
- s->nextprecision = qemu_get_byte(f);
- if (s->nextprecision < 0 || s->nextprecision >= ARRAY_SIZE(resolution)) {
+ if (s->nextprecision >= ARRAY_SIZE(resolution)) {
return -EINVAL;
}
- s->filter = qemu_get_byte(f);
- s->pin_func = qemu_get_byte(f);
- s->ref = qemu_get_byte(f);
- s->timing = qemu_get_byte(f);
- s->noise = qemu_get_be32(f);
-
- qemu_get_be16s(f, &s->audio_ctrl1);
- qemu_get_be16s(f, &s->audio_ctrl2);
- qemu_get_be16s(f, &s->audio_ctrl3);
- qemu_get_be16s(f, &s->pll[0]);
- qemu_get_be16s(f, &s->pll[1]);
- qemu_get_be16s(f, &s->volume);
- s->volume_change = qemu_get_sbe64(f) + now;
- s->powerdown = qemu_get_sbe64(f) + now;
- s->softstep = qemu_get_byte(f);
- qemu_get_be16s(f, &s->dac_power);
-
- for (i = 0; i < 0x14; i ++)
- qemu_get_be16s(f, &s->filter_data[i]);
+
+ s->volume_change -= s->now;
+ s->volume_change += now;
+ s->powerdown -= s->now;
+ s->powerdown += now;
s->busy = timer_pending(s->timer);
qemu_set_irq(s->pint, !s->irq);
@@ -1084,6 +1012,60 @@ static int tsc210x_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
+static VMStateField vmstatefields_tsc210x[] = {
+ VMSTATE_BOOL(enabled, TSC210xState),
+ VMSTATE_BOOL(host_mode, TSC210xState),
+ VMSTATE_BOOL(irq, TSC210xState),
+ VMSTATE_BOOL(command, TSC210xState),
+ VMSTATE_BOOL(pressure, TSC210xState),
+ VMSTATE_BOOL(softstep, TSC210xState),
+ VMSTATE_BOOL(state, TSC210xState),
+ VMSTATE_UINT16(dav, TSC210xState),
+ VMSTATE_INT32(x, TSC210xState),
+ VMSTATE_INT32(y, TSC210xState),
+ VMSTATE_UINT8(offset, TSC210xState),
+ VMSTATE_UINT8(page, TSC210xState),
+ VMSTATE_UINT8(filter, TSC210xState),
+ VMSTATE_UINT8(pin_func, TSC210xState),
+ VMSTATE_UINT8(ref, TSC210xState),
+ VMSTATE_UINT8(timing, TSC210xState),
+ VMSTATE_UINT8(noise, TSC210xState),
+ VMSTATE_UINT8(function, TSC210xState),
+ VMSTATE_UINT8(nextfunction, TSC210xState),
+ VMSTATE_UINT8(precision, TSC210xState),
+ VMSTATE_UINT8(nextprecision, TSC210xState),
+ VMSTATE_UINT16(audio_ctrl1, TSC210xState),
+ VMSTATE_UINT16(audio_ctrl2, TSC210xState),
+ VMSTATE_UINT16(audio_ctrl3, TSC210xState),
+ VMSTATE_UINT16_ARRAY(pll, TSC210xState, 3),
+ VMSTATE_UINT16(volume, TSC210xState),
+ VMSTATE_UINT16(dac_power, TSC210xState),
+ VMSTATE_INT64(volume_change, TSC210xState),
+ VMSTATE_INT64(powerdown, TSC210xState),
+ VMSTATE_INT64(now, TSC210xState),
+ VMSTATE_UINT16_ARRAY(filter_data, TSC210xState, 0x14),
+ VMSTATE_TIMER_PTR(timer, TSC210xState),
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_tsc2102 = {
+ .name = "tsc2102",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = tsc210x_pre_save,
+ .post_load = tsc210x_post_load,
+ .fields = vmstatefields_tsc210x,
+};
+
+static const VMStateDescription vmstate_tsc2301 = {
+ .name = "tsc2301",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = tsc210x_pre_save,
+ .post_load = tsc210x_post_load,
+ .fields = vmstatefields_tsc210x,
+};
+
uWireSlave *tsc2102_init(qemu_irq pint)
{
TSC210xState *s;
@@ -1125,8 +1107,7 @@ uWireSlave *tsc2102_init(qemu_irq pint)
AUD_register_card(s->name, &s->card);
qemu_register_reset((void *) tsc210x_reset, s);
- register_savevm(NULL, s->name, -1, 0,
- tsc210x_save, tsc210x_load, s);
+ vmstate_register(NULL, 0, &vmstate_tsc2102, s);
return &s->chip;
}
@@ -1174,7 +1155,7 @@ uWireSlave *tsc2301_init(qemu_irq penirq, qemu_irq kbirq, qemu_irq dav)
AUD_register_card(s->name, &s->card);
qemu_register_reset((void *) tsc210x_reset, s);
- register_savevm(NULL, s->name, -1, 0, tsc210x_save, tsc210x_load, s);
+ vmstate_register(NULL, 0, &vmstate_tsc2301, s);
return &s->chip;
}
diff --git a/hw/input/virtio-input.c b/hw/input/virtio-input.c
index ccdf7308a5..b678ee9f20 100644
--- a/hw/input/virtio-input.c
+++ b/hw/input/virtio-input.c
@@ -217,19 +217,12 @@ static void virtio_input_reset(VirtIODevice *vdev)
}
}
-static int virtio_input_load(QEMUFile *f, void *opaque, size_t size)
+static int virtio_input_post_load(void *opaque, int version_id)
{
VirtIOInput *vinput = opaque;
VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vinput);
VirtIODevice *vdev = VIRTIO_DEVICE(vinput);
- int ret;
- ret = virtio_load(vdev, f, VIRTIO_INPUT_VM_VERSION);
- if (ret) {
- return ret;
- }
-
- /* post_load() */
vinput->active = vdev->status & VIRTIO_CONFIG_S_DRIVER_OK;
if (vic->change_active) {
vic->change_active(vinput);
@@ -296,8 +289,16 @@ static void virtio_input_device_unrealize(DeviceState *dev, Error **errp)
virtio_cleanup(vdev);
}
-VMSTATE_VIRTIO_DEVICE(input, VIRTIO_INPUT_VM_VERSION, virtio_input_load,
- virtio_vmstate_save);
+static const VMStateDescription vmstate_virtio_input = {
+ .name = "virtio-input",
+ .minimum_version_id = VIRTIO_INPUT_VM_VERSION,
+ .version_id = VIRTIO_INPUT_VM_VERSION,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+ .post_load = virtio_input_post_load,
+};
static Property virtio_input_properties[] = {
DEFINE_PROP_STRING("serial", VirtIOInput, serial),
diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs
index 05ec21b21e..2f44a2da26 100644
--- a/hw/intc/Makefile.objs
+++ b/hw/intc/Makefile.objs
@@ -16,11 +16,14 @@ common-obj-$(CONFIG_ARM_GIC) += arm_gicv3_common.o
common-obj-$(CONFIG_ARM_GIC) += arm_gicv3.o
common-obj-$(CONFIG_ARM_GIC) += arm_gicv3_dist.o
common-obj-$(CONFIG_ARM_GIC) += arm_gicv3_redist.o
+common-obj-$(CONFIG_ARM_GIC) += arm_gicv3_its_common.o
common-obj-$(CONFIG_OPENPIC) += openpic.o
+common-obj-y += intc.o
obj-$(CONFIG_APIC) += apic.o apic_common.o
obj-$(CONFIG_ARM_GIC_KVM) += arm_gic_kvm.o
obj-$(call land,$(CONFIG_ARM_GIC_KVM),$(TARGET_AARCH64)) += arm_gicv3_kvm.o
+obj-$(call land,$(CONFIG_ARM_GIC_KVM),$(TARGET_AARCH64)) += arm_gicv3_its_kvm.o
obj-$(CONFIG_STELLARIS) += armv7m_nvic.o
obj-$(CONFIG_EXYNOS4) += exynos4210_gic.o exynos4210_combiner.o
obj-$(CONFIG_GRLIB) += grlib_irqmp.o
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 45887d99c0..fe15fb6024 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -39,6 +39,10 @@
static APICCommonState *local_apics[MAX_APICS + 1];
+#define TYPE_APIC "apic"
+#define APIC(obj) \
+ OBJECT_CHECK(APICCommonState, (obj), TYPE_APIC)
+
static void apic_set_irq(APICCommonState *s, int vector_num, int trigger_mode);
static void apic_update_irq(APICCommonState *s);
static void apic_get_delivery_bitmask(uint32_t *deliver_bitmask,
@@ -163,7 +167,7 @@ static void apic_local_deliver(APICCommonState *s, int vector)
void apic_deliver_pic_intr(DeviceState *dev, int level)
{
- APICCommonState *s = APIC_COMMON(dev);
+ APICCommonState *s = APIC(dev);
if (level) {
apic_local_deliver(s, APIC_LVT_LINT0);
@@ -373,7 +377,7 @@ static void apic_update_irq(APICCommonState *s)
void apic_poll_irq(DeviceState *dev)
{
- APICCommonState *s = APIC_COMMON(dev);
+ APICCommonState *s = APIC(dev);
apic_sync_vapic(s, SYNC_FROM_VAPIC);
apic_update_irq(s);
@@ -479,7 +483,7 @@ static void apic_startup(APICCommonState *s, int vector_num)
void apic_sipi(DeviceState *dev)
{
- APICCommonState *s = APIC_COMMON(dev);
+ APICCommonState *s = APIC(dev);
cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI);
@@ -493,7 +497,7 @@ static void apic_deliver(DeviceState *dev, uint8_t dest, uint8_t dest_mode,
uint8_t delivery_mode, uint8_t vector_num,
uint8_t trigger_mode)
{
- APICCommonState *s = APIC_COMMON(dev);
+ APICCommonState *s = APIC(dev);
uint32_t deliver_bitmask[MAX_APIC_WORDS];
int dest_shorthand = (s->icr[0] >> 18) & 3;
APICCommonState *apic_iter;
@@ -550,7 +554,7 @@ static bool apic_check_pic(APICCommonState *s)
int apic_get_interrupt(DeviceState *dev)
{
- APICCommonState *s = APIC_COMMON(dev);
+ APICCommonState *s = APIC(dev);
int intno;
/* if the APIC is installed or enabled, we let the 8259 handle the
@@ -584,7 +588,7 @@ int apic_get_interrupt(DeviceState *dev)
int apic_accept_pic_intr(DeviceState *dev)
{
- APICCommonState *s = APIC_COMMON(dev);
+ APICCommonState *s = APIC(dev);
uint32_t lvt0;
if (!s)
@@ -663,7 +667,7 @@ static uint32_t apic_mem_readl(void *opaque, hwaddr addr)
if (!dev) {
return 0;
}
- s = APIC_COMMON(dev);
+ s = APIC(dev);
index = (addr >> 4) & 0xff;
switch(index) {
@@ -736,8 +740,10 @@ static uint32_t apic_mem_readl(void *opaque, hwaddr addr)
return val;
}
-static void apic_send_msi(hwaddr addr, uint32_t data)
+static void apic_send_msi(MSIMessage *msi)
{
+ uint64_t addr = msi->address;
+ uint32_t data = msi->data;
uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
@@ -758,7 +764,8 @@ static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val)
* APIC is connected directly to the CPU.
* Mapping them on the global bus happens to work because
* MSI registers are reserved in APIC MMIO and vice versa. */
- apic_send_msi(addr, val);
+ MSIMessage msi = { .address = addr, .data = val };
+ apic_send_msi(&msi);
return;
}
@@ -766,7 +773,7 @@ static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val)
if (!dev) {
return;
}
- s = APIC_COMMON(dev);
+ s = APIC(dev);
trace_apic_mem_writel(addr, val);
@@ -870,7 +877,7 @@ static const MemoryRegionOps apic_io_ops = {
static void apic_realize(DeviceState *dev, Error **errp)
{
- APICCommonState *s = APIC_COMMON(dev);
+ APICCommonState *s = APIC(dev);
if (s->id >= MAX_APICS) {
error_setg(errp, "%s initialization failed. APIC ID %d is invalid",
@@ -889,7 +896,7 @@ static void apic_realize(DeviceState *dev, Error **errp)
static void apic_unrealize(DeviceState *dev, Error **errp)
{
- APICCommonState *s = APIC_COMMON(dev);
+ APICCommonState *s = APIC(dev);
timer_del(s->timer);
timer_free(s->timer);
@@ -909,10 +916,11 @@ static void apic_class_init(ObjectClass *klass, void *data)
k->external_nmi = apic_external_nmi;
k->pre_save = apic_pre_save;
k->post_load = apic_post_load;
+ k->send_msi = apic_send_msi;
}
static const TypeInfo apic_info = {
- .name = "apic",
+ .name = TYPE_APIC,
.instance_size = sizeof(APICCommonState),
.parent = TYPE_APIC_COMMON,
.class_init = apic_class_init,
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index a03508be81..0a0d29e079 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -18,9 +18,11 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qemu-common.h"
#include "cpu.h"
+#include "qapi/visitor.h"
#include "hw/i386/apic.h"
#include "hw/i386/apic_internal.h"
#include "trace.h"
@@ -38,6 +40,11 @@ void cpu_set_apic_base(DeviceState *dev, uint64_t val)
if (dev) {
APICCommonState *s = APIC_COMMON(dev);
APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
+ /* switching to x2APIC, reset possibly modified xAPIC ID */
+ if (!(s->apicbase & MSR_IA32_APICBASE_EXTD) &&
+ (val & MSR_IA32_APICBASE_EXTD)) {
+ s->id = s->initial_apic_id;
+ }
info->set_base(s, val);
}
}
@@ -241,6 +248,7 @@ static void apic_reset_common(DeviceState *dev)
bsp = s->apicbase & MSR_IA32_APICBASE_BSP;
s->apicbase = APIC_DEFAULT_ADDRESS | bsp | MSR_IA32_APICBASE_ENABLE;
+ s->id = s->initial_apic_id;
s->vapic_paddr = 0;
info->vapic_base_update(s);
@@ -429,7 +437,6 @@ static const VMStateDescription vmstate_apic_common = {
};
static Property apic_properties_common[] = {
- DEFINE_PROP_UINT8("id", APICCommonState, id, -1),
DEFINE_PROP_UINT8("version", APICCommonState, version, 0x14),
DEFINE_PROP_BIT("vapic", APICCommonState, vapic_control, VAPIC_ENABLE_BIT,
true),
@@ -438,6 +445,49 @@ static Property apic_properties_common[] = {
DEFINE_PROP_END_OF_LIST(),
};
+static void apic_common_get_id(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ APICCommonState *s = APIC_COMMON(obj);
+ int64_t value;
+
+ value = s->apicbase & MSR_IA32_APICBASE_EXTD ? s->initial_apic_id : s->id;
+ visit_type_int(v, name, &value, errp);
+}
+
+static void apic_common_set_id(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ APICCommonState *s = APIC_COMMON(obj);
+ DeviceState *dev = DEVICE(obj);
+ Error *local_err = NULL;
+ int64_t value;
+
+ if (dev->realized) {
+ qdev_prop_set_after_realize(dev, name, errp);
+ return;
+ }
+
+ visit_type_int(v, name, &value, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ s->initial_apic_id = value;
+ s->id = (uint8_t)value;
+}
+
+static void apic_common_initfn(Object *obj)
+{
+ APICCommonState *s = APIC_COMMON(obj);
+
+ s->id = s->initial_apic_id = -1;
+ object_property_add(obj, "id", "int",
+ apic_common_get_id,
+ apic_common_set_id, NULL, NULL, NULL);
+}
+
static void apic_common_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -457,6 +507,7 @@ static const TypeInfo apic_common_type = {
.name = TYPE_APIC_COMMON,
.parent = TYPE_DEVICE,
.instance_size = sizeof(APICCommonState),
+ .instance_init = apic_common_initfn,
.class_size = sizeof(APICCommonClass),
.class_init = apic_common_class_init,
.abstract = true,
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index b30cc91745..521aac3cc6 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -156,6 +156,17 @@ static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
}
}
+static void gic_set_irq_nvic(GICState *s, int irq, int level,
+ int cm, int target)
+{
+ if (level) {
+ GIC_SET_LEVEL(irq, cm);
+ GIC_SET_PENDING(irq, target);
+ } else {
+ GIC_CLEAR_LEVEL(irq, cm);
+ }
+}
+
static void gic_set_irq_generic(GICState *s, int irq, int level,
int cm, int target)
{
@@ -201,8 +212,10 @@ static void gic_set_irq(void *opaque, int irq, int level)
return;
}
- if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) {
+ if (s->revision == REV_11MPCORE) {
gic_set_irq_11mpcore(s, irq, level, cm, target);
+ } else if (s->revision == REV_NVIC) {
+ gic_set_irq_nvic(s, irq, level, cm, target);
} else {
gic_set_irq_generic(s, irq, level, cm, target);
}
@@ -568,7 +581,7 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
return; /* No active IRQ. */
}
- if (s->revision == REV_11MPCORE || s->revision == REV_NVIC) {
+ if (s->revision == REV_11MPCORE) {
/* Mark level triggered interrupts as pending if they are still
raised. */
if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
@@ -576,6 +589,11 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
DPRINTF("Set %d pending mask %x\n", irq, cm);
GIC_SET_PENDING(irq, cm);
}
+ } else if (s->revision == REV_NVIC) {
+ if (GIC_TEST_LEVEL(irq, cm)) {
+ DPRINTF("Set nvic %d pending mask %x\n", irq, cm);
+ GIC_SET_PENDING(irq, cm);
+ }
}
group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm);
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 5593cdb3e4..11729ee902 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -30,20 +30,6 @@
#include "gic_internal.h"
#include "vgic_common.h"
-//#define DEBUG_GIC_KVM
-
-#ifdef DEBUG_GIC_KVM
-static const int debug_gic_kvm = 1;
-#else
-static const int debug_gic_kvm = 0;
-#endif
-
-#define DPRINTF(fmt, ...) do { \
- if (debug_gic_kvm) { \
- printf("arm_gic: " fmt , ## __VA_ARGS__); \
- } \
- } while (0)
-
#define TYPE_KVM_ARM_GIC "kvm-arm-gic"
#define KVM_ARM_GIC(obj) \
OBJECT_CHECK(GICState, (obj), TYPE_KVM_ARM_GIC)
@@ -577,6 +563,18 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
"not support vGICv2 migration");
migrate_add_blocker(s->migration_blocker);
}
+
+ if (kvm_has_gsi_routing()) {
+ /* set up irq routing */
+ kvm_init_irq_routing(kvm_state);
+ for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
+ kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
+ }
+
+ kvm_gsi_routing_allowed = true;
+
+ kvm_irqchip_commit_routes(kvm_state);
+ }
}
static void kvm_arm_gic_class_init(ObjectClass *klass, void *data)
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 4633172bec..bca30c49da 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -454,7 +454,8 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
int irq = value & 0xffffff;
int grp;
- trace_gicv3_icc_eoir_write(gicv3_redist_affid(cs), value);
+ trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
if (ri->crm == 8) {
/* EOIR0 */
@@ -542,7 +543,7 @@ static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
bpr = MIN(bpr, 7);
}
- trace_gicv3_icc_bpr_read(gicv3_redist_affid(cs), bpr);
+ trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
return bpr;
}
@@ -553,7 +554,8 @@ static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
GICv3CPUState *cs = icc_cs_from_env(env);
int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
- trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
+ trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
grp = GICV3_G1NS;
@@ -591,7 +593,7 @@ static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
value = cs->icc_apr[grp][regno];
- trace_gicv3_icc_ap_read(regno, gicv3_redist_affid(cs), value);
+ trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
return value;
}
@@ -603,7 +605,7 @@ static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
int regno = ri->opc2 & 3;
int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
- trace_gicv3_icc_ap_write(regno, gicv3_redist_affid(cs), value);
+ trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
grp = GICV3_G1NS;
@@ -820,7 +822,8 @@ static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
value = cs->icc_igrpen[grp];
- trace_gicv3_icc_igrpen_read(gicv3_redist_affid(cs), value);
+ trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
return value;
}
@@ -830,7 +833,8 @@ static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
GICv3CPUState *cs = icc_cs_from_env(env);
int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
- trace_gicv3_icc_igrpen_write(gicv3_redist_affid(cs), value);
+ trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
grp = GICV3_G1NS;
@@ -843,9 +847,12 @@ static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
/* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
- return cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
+ value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
+ trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
+ return value;
}
static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c
new file mode 100644
index 0000000000..9d67c5c1ee
--- /dev/null
+++ b/hw/intc/arm_gicv3_its_common.c
@@ -0,0 +1,148 @@
+/*
+ * ITS base class for a GICv3-based system
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Written by Pavel Fedin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/pci/msi.h"
+#include "hw/intc/arm_gicv3_its_common.h"
+#include "qemu/log.h"
+
+static void gicv3_its_pre_save(void *opaque)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ GICv3ITSCommonClass *c = ARM_GICV3_ITS_COMMON_GET_CLASS(s);
+
+ if (c->pre_save) {
+ c->pre_save(s);
+ }
+}
+
+static int gicv3_its_post_load(void *opaque, int version_id)
+{
+ GICv3ITSState *s = (GICv3ITSState *)opaque;
+ GICv3ITSCommonClass *c = ARM_GICV3_ITS_COMMON_GET_CLASS(s);
+
+ if (c->post_load) {
+ c->post_load(s);
+ }
+ return 0;
+}
+
+static const VMStateDescription vmstate_its = {
+ .name = "arm_gicv3_its",
+ .pre_save = gicv3_its_pre_save,
+ .post_load = gicv3_its_post_load,
+ .unmigratable = true,
+};
+
+static MemTxResult gicv3_its_trans_read(void *opaque, hwaddr offset,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ qemu_log_mask(LOG_GUEST_ERROR, "ITS read at offset 0x%"PRIx64"\n", offset);
+ return MEMTX_ERROR;
+}
+
+static MemTxResult gicv3_its_trans_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ if (offset == 0x0040 && ((size == 2) || (size == 4))) {
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(opaque);
+ GICv3ITSCommonClass *c = ARM_GICV3_ITS_COMMON_GET_CLASS(s);
+ int ret = c->send_msi(s, le64_to_cpu(value), attrs.requester_id);
+
+ if (ret <= 0) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS: Error sending MSI: %s\n", strerror(-ret));
+ return MEMTX_DECODE_ERROR;
+ }
+
+ return MEMTX_OK;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS write at bad offset 0x%"PRIx64"\n", offset);
+ return MEMTX_DECODE_ERROR;
+ }
+}
+
+static const MemoryRegionOps gicv3_its_trans_ops = {
+ .read_with_attrs = gicv3_its_trans_read,
+ .write_with_attrs = gicv3_its_trans_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(s);
+
+ memory_region_init_io(&s->iomem_its_cntrl, OBJECT(s), ops, s,
+ "control", ITS_CONTROL_SIZE);
+ memory_region_init_io(&s->iomem_its_translation, OBJECT(s),
+ &gicv3_its_trans_ops, s,
+ "translation", ITS_TRANS_SIZE);
+
+ /* Our two regions are always adjacent, therefore we now combine them
+ * into a single one in order to make our users' life easier.
+ */
+ memory_region_init(&s->iomem_main, OBJECT(s), "gicv3_its", ITS_SIZE);
+ memory_region_add_subregion(&s->iomem_main, 0, &s->iomem_its_cntrl);
+ memory_region_add_subregion(&s->iomem_main, ITS_CONTROL_SIZE,
+ &s->iomem_its_translation);
+ sysbus_init_mmio(sbd, &s->iomem_main);
+
+ msi_nonbroken = true;
+}
+
+static void gicv3_its_common_reset(DeviceState *dev)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+
+ s->ctlr = 0;
+ s->cbaser = 0;
+ s->cwriter = 0;
+ s->creadr = 0;
+ memset(&s->baser, 0, sizeof(s->baser));
+
+ gicv3_its_post_load(s, 0);
+}
+
+static void gicv3_its_common_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = gicv3_its_common_reset;
+ dc->vmsd = &vmstate_its;
+}
+
+static const TypeInfo gicv3_its_common_info = {
+ .name = TYPE_ARM_GICV3_ITS_COMMON,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(GICv3ITSState),
+ .class_size = sizeof(GICv3ITSCommonClass),
+ .class_init = gicv3_its_common_class_init,
+ .abstract = true,
+};
+
+static void gicv3_its_common_register_types(void)
+{
+ type_register_static(&gicv3_its_common_info);
+}
+
+type_init(gicv3_its_common_register_types)
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
new file mode 100644
index 0000000000..fc246e0cb5
--- /dev/null
+++ b/hw/intc/arm_gicv3_its_kvm.c
@@ -0,0 +1,121 @@
+/*
+ * KVM-based ITS implementation for a GICv3-based system
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Written by Pavel Fedin <p.fedin@samsung.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/intc/arm_gicv3_its_common.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "migration/migration.h"
+
+#define TYPE_KVM_ARM_ITS "arm-its-kvm"
+#define KVM_ARM_ITS(obj) OBJECT_CHECK(GICv3ITSState, (obj), TYPE_KVM_ARM_ITS)
+
+static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint16_t devid)
+{
+ struct kvm_msi msi;
+
+ if (unlikely(!s->translater_gpa_known)) {
+ MemoryRegion *mr = &s->iomem_its_translation;
+ MemoryRegionSection mrs;
+
+ mrs = memory_region_find(mr, 0, 1);
+ memory_region_unref(mrs.mr);
+ s->gits_translater_gpa = mrs.offset_within_address_space + 0x40;
+ s->translater_gpa_known = true;
+ }
+
+ msi.address_lo = extract64(s->gits_translater_gpa, 0, 32);
+ msi.address_hi = extract64(s->gits_translater_gpa, 32, 32);
+ msi.data = le32_to_cpu(value);
+ msi.flags = KVM_MSI_VALID_DEVID;
+ msi.devid = devid;
+ memset(msi.pad, 0, sizeof(msi.pad));
+
+ return kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi);
+}
+
+static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+
+ s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_ITS, false);
+ if (s->dev_fd < 0) {
+ error_setg_errno(errp, -s->dev_fd, "error creating in-kernel ITS");
+ return;
+ }
+
+ /* explicit init of the ITS */
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+
+ /* register the base address */
+ kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd);
+
+ gicv3_its_init_mmio(s, NULL);
+
+ /*
+ * Block migration of a KVM GICv3 ITS device: the API for saving and
+ * restoring the state in the kernel is not yet available
+ */
+ error_setg(&s->migration_blocker, "vITS migration is not implemented");
+ migrate_add_blocker(s->migration_blocker);
+
+ kvm_msi_use_devid = true;
+ kvm_gsi_direct_mapping = false;
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
+}
+
+static void kvm_arm_its_init(Object *obj)
+{
+ GICv3ITSState *s = KVM_ARM_ITS(obj);
+
+ object_property_add_link(obj, "parent-gicv3",
+ "kvm-arm-gicv3", (Object **)&s->gicv3,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &error_abort);
+}
+
+static void kvm_arm_its_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
+
+ dc->realize = kvm_arm_its_realize;
+ icc->send_msi = kvm_its_send_msi;
+}
+
+static const TypeInfo kvm_arm_its_info = {
+ .name = TYPE_KVM_ARM_ITS,
+ .parent = TYPE_ARM_GICV3_ITS_COMMON,
+ .instance_size = sizeof(GICv3ITSState),
+ .instance_init = kvm_arm_its_init,
+ .class_init = kvm_arm_its_class_init,
+};
+
+static void kvm_arm_its_register_types(void)
+{
+ type_register_static(&kvm_arm_its_info);
+}
+
+type_init(kvm_arm_its_register_types)
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index 711fde38f3..199a439ccf 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -85,6 +85,7 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
GICv3State *s = KVM_ARM_GICV3(dev);
KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s);
Error *local_err = NULL;
+ int i;
DPRINTF("kvm_arm_gicv3_realize\n");
@@ -127,6 +128,18 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
*/
error_setg(&s->migration_blocker, "vGICv3 migration is not implemented");
migrate_add_blocker(s->migration_blocker);
+
+ if (kvm_has_gsi_routing()) {
+ /* set up irq routing */
+ kvm_init_irq_routing(kvm_state);
+ for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) {
+ kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
+ }
+
+ kvm_gsi_routing_allowed = true;
+
+ kvm_irqchip_commit_routes(kvm_state);
+ }
}
static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data)
diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c
index c2607a5868..fe9ecd6bd4 100644
--- a/hw/intc/i8259.c
+++ b/hw/intc/i8259.c
@@ -29,6 +29,7 @@
#include "qemu/timer.h"
#include "qemu/log.h"
#include "hw/isa/i8259_internal.h"
+#include "hw/intc/intc.h"
/* debug PIC */
//#define DEBUG_PIC
@@ -251,6 +252,35 @@ static void pic_reset(DeviceState *dev)
pic_init_reset(s);
}
+static bool pic_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts, unsigned int *nb_irqs)
+{
+ PICCommonState *s = PIC_COMMON(obj);
+
+ if (s->master) {
+#ifdef DEBUG_IRQ_COUNT
+ *irq_counts = irq_count;
+ *nb_irqs = ARRAY_SIZE(irq_count);
+#else
+ return false;
+#endif
+ } else {
+ *irq_counts = NULL;
+ *nb_irqs = 0;
+ }
+ return true;
+}
+
+static void pic_print_info(InterruptStatsProvider *obj, Monitor *mon)
+{
+ PICCommonState *s = PIC_COMMON(obj);
+ monitor_printf(mon, "pic%d: irr=%02x imr=%02x isr=%02x hprio=%d "
+ "irq_base=%02x rr_sel=%d elcr=%02x fnm=%d\n",
+ s->master ? 0 : 1, s->irr, s->imr, s->isr, s->priority_add,
+ s->irq_base, s->read_reg_select, s->elcr,
+ s->special_fully_nested_mode);
+}
+
static void pic_ioport_write(void *opaque, hwaddr addr64,
uint64_t val64, unsigned size)
{
@@ -431,42 +461,6 @@ static void pic_realize(DeviceState *dev, Error **errp)
pc->parent_realize(dev, errp);
}
-void hmp_info_pic(Monitor *mon, const QDict *qdict)
-{
- int i;
- PICCommonState *s;
-
- if (!isa_pic) {
- return;
- }
- for (i = 0; i < 2; i++) {
- s = i == 0 ? PIC_COMMON(isa_pic) : slave_pic;
- monitor_printf(mon, "pic%d: irr=%02x imr=%02x isr=%02x hprio=%d "
- "irq_base=%02x rr_sel=%d elcr=%02x fnm=%d\n",
- i, s->irr, s->imr, s->isr, s->priority_add,
- s->irq_base, s->read_reg_select, s->elcr,
- s->special_fully_nested_mode);
- }
-}
-
-void hmp_info_irq(Monitor *mon, const QDict *qdict)
-{
-#ifndef DEBUG_IRQ_COUNT
- monitor_printf(mon, "irq statistic code not compiled.\n");
-#else
- int i;
- int64_t count;
-
- monitor_printf(mon, "IRQ statistics:\n");
- for (i = 0; i < 16; i++) {
- count = irq_count[i];
- if (count > 0) {
- monitor_printf(mon, "%2d: %" PRId64 "\n", i, count);
- }
- }
-#endif
-}
-
qemu_irq *i8259_init(ISABus *bus, qemu_irq parent_irq)
{
qemu_irq *irq_set;
@@ -503,10 +497,13 @@ static void i8259_class_init(ObjectClass *klass, void *data)
{
PICClass *k = PIC_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
k->parent_realize = dc->realize;
dc->realize = pic_realize;
dc->reset = pic_reset;
+ ic->get_statistics = pic_get_statistics;
+ ic->print_info = pic_print_info;
}
static const TypeInfo i8259_info = {
@@ -515,6 +512,10 @@ static const TypeInfo i8259_info = {
.parent = TYPE_PIC_COMMON,
.class_init = i8259_class_init,
.class_size = sizeof(PICClass),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
};
static void pic_register_types(void)
diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c
index 3a850b0c66..d9a5e8b217 100644
--- a/hw/intc/i8259_common.c
+++ b/hw/intc/i8259_common.c
@@ -70,10 +70,11 @@ static int pic_dispatch_post_load(void *opaque, int version_id)
static void pic_common_realize(DeviceState *dev, Error **errp)
{
PICCommonState *s = PIC_COMMON(dev);
+ ISADevice *isa = ISA_DEVICE(dev);
- isa_register_ioport(NULL, &s->base_io, s->iobase);
+ isa_register_ioport(isa, &s->base_io, s->iobase);
if (s->elcr_addr != -1) {
- isa_register_ioport(NULL, &s->elcr_io, s->elcr_addr);
+ isa_register_ioport(isa, &s->elcr_io, s->elcr_addr);
}
qdev_set_legacy_instance_id(dev, s->iobase, 1);
diff --git a/hw/intc/intc.c b/hw/intc/intc.c
new file mode 100644
index 0000000000..2e1e29e753
--- /dev/null
+++ b/hw/intc/intc.c
@@ -0,0 +1,41 @@
+/*
+ * QEMU Generic Interrupt Controller
+ *
+ * Copyright (c) 2016 Hervé Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/intc/intc.h"
+#include "qemu/module.h"
+
+static const TypeInfo intctrl_info = {
+ .name = TYPE_INTERRUPT_STATS_PROVIDER,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(InterruptStatsProviderClass),
+};
+
+static void intc_register_types(void)
+{
+ type_register_static(&intctrl_info);
+}
+
+type_init(intc_register_types)
+
diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c
index 31791b0986..fd9208fde0 100644
--- a/hw/intc/ioapic.c
+++ b/hw/intc/ioapic.c
@@ -416,7 +416,7 @@ static void ioapic_realize(DeviceState *dev, Error **errp)
}
static Property ioapic_properties[] = {
- DEFINE_PROP_UINT8("version", IOAPICCommonState, version, 0x11),
+ DEFINE_PROP_UINT8("version", IOAPICCommonState, version, 0x20),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/intc/lm32_pic.c b/hw/intc/lm32_pic.c
index 3dad01c5ba..09e15115fb 100644
--- a/hw/intc/lm32_pic.c
+++ b/hw/intc/lm32_pic.c
@@ -25,6 +25,7 @@
#include "hw/sysbus.h"
#include "trace.h"
#include "hw/lm32/lm32_pic.h"
+#include "hw/intc/intc.h"
#define TYPE_LM32_PIC "lm32-pic"
#define LM32_PIC(obj) OBJECT_CHECK(LM32PicState, (obj), TYPE_LM32_PIC)
@@ -38,39 +39,10 @@ struct LM32PicState {
uint32_t irq_state;
/* statistics */
- uint32_t stats_irq_count[32];
+ uint64_t stats_irq_count[32];
};
typedef struct LM32PicState LM32PicState;
-static LM32PicState *pic;
-void lm32_hmp_info_pic(Monitor *mon, const QDict *qdict)
-{
- if (pic == NULL) {
- return;
- }
-
- monitor_printf(mon, "lm32-pic: im=%08x ip=%08x irq_state=%08x\n",
- pic->im, pic->ip, pic->irq_state);
-}
-
-void lm32_hmp_info_irq(Monitor *mon, const QDict *qdict)
-{
- int i;
- uint32_t count;
-
- if (pic == NULL) {
- return;
- }
-
- monitor_printf(mon, "IRQ statistics:\n");
- for (i = 0; i < 32; i++) {
- count = pic->stats_irq_count[i];
- if (count > 0) {
- monitor_printf(mon, "%2d: %u\n", i, count);
- }
- }
-}
-
static void update_irq(LM32PicState *s)
{
s->ip |= s->irq_state;
@@ -152,6 +124,22 @@ static void pic_reset(DeviceState *d)
}
}
+static bool lm32_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts, unsigned int *nb_irqs)
+{
+ LM32PicState *s = LM32_PIC(obj);
+ *irq_counts = s->stats_irq_count;
+ *nb_irqs = ARRAY_SIZE(s->stats_irq_count);
+ return true;
+}
+
+static void lm32_print_info(InterruptStatsProvider *obj, Monitor *mon)
+{
+ LM32PicState *s = LM32_PIC(obj);
+ monitor_printf(mon, "lm32-pic: im=%08x ip=%08x irq_state=%08x\n",
+ s->im, s->ip, s->irq_state);
+}
+
static void lm32_pic_init(Object *obj)
{
DeviceState *dev = DEVICE(obj);
@@ -160,19 +148,17 @@ static void lm32_pic_init(Object *obj)
qdev_init_gpio_in(dev, irq_handler, 32);
sysbus_init_irq(sbd, &s->parent_irq);
-
- pic = s;
}
static const VMStateDescription vmstate_lm32_pic = {
.name = "lm32-pic",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.fields = (VMStateField[]) {
VMSTATE_UINT32(im, LM32PicState),
VMSTATE_UINT32(ip, LM32PicState),
VMSTATE_UINT32(irq_state, LM32PicState),
- VMSTATE_UINT32_ARRAY(stats_irq_count, LM32PicState, 32),
+ VMSTATE_UINT64_ARRAY(stats_irq_count, LM32PicState, 32),
VMSTATE_END_OF_LIST()
}
};
@@ -180,9 +166,12 @@ static const VMStateDescription vmstate_lm32_pic = {
static void lm32_pic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
dc->reset = pic_reset;
dc->vmsd = &vmstate_lm32_pic;
+ ic->get_statistics = lm32_get_statistics;
+ ic->print_info = lm32_print_info;
}
static const TypeInfo lm32_pic_info = {
@@ -191,6 +180,10 @@ static const TypeInfo lm32_pic_info = {
.instance_size = sizeof(LM32PicState),
.instance_init = lm32_pic_init,
.class_init = lm32_pic_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
};
static void lm32_pic_register_types(void)
diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c
index fef808011f..21ac2e2dcd 100644
--- a/hw/intc/s390_flic_kvm.c
+++ b/hw/intc/s390_flic_kvm.c
@@ -280,12 +280,13 @@ static void kvm_s390_release_adapter_routes(S390FLICState *fs,
* kvm_flic_save - Save pending floating interrupts
* @f: QEMUFile containing migration state
* @opaque: pointer to flic device state
+ * @size: ignored
*
* Note: Pass buf and len to kernel. Start with one page and
* increase until buffer is sufficient or maxium size is
* reached
*/
-static void kvm_flic_save(QEMUFile *f, void *opaque)
+static void kvm_flic_save(QEMUFile *f, void *opaque, size_t size)
{
KVMS390FLICState *flic = opaque;
int len = FLIC_SAVE_INITIAL_SIZE;
@@ -324,24 +325,19 @@ static void kvm_flic_save(QEMUFile *f, void *opaque)
* kvm_flic_load - Load pending floating interrupts
* @f: QEMUFile containing migration state
* @opaque: pointer to flic device state
- * @version_id: version id for migration
+ * @size: ignored
*
* Returns: value of flic_enqueue_irqs, -EINVAL on error
* Note: Do nothing when no interrupts where stored
* in QEMUFile
*/
-static int kvm_flic_load(QEMUFile *f, void *opaque, int version_id)
+static int kvm_flic_load(QEMUFile *f, void *opaque, size_t size)
{
uint64_t len = 0;
uint64_t count = 0;
void *buf = NULL;
int r = 0;
- if (version_id != FLIC_SAVEVM_VERSION) {
- r = -EINVAL;
- goto out;
- }
-
flic_enable_pfault((struct KVMS390FLICState *) opaque);
count = qemu_get_be64(f);
@@ -372,6 +368,24 @@ out:
return r;
}
+static const VMStateDescription kvm_s390_flic_vmstate = {
+ .name = "s390-flic",
+ .version_id = FLIC_SAVEVM_VERSION,
+ .minimum_version_id = FLIC_SAVEVM_VERSION,
+ .fields = (VMStateField[]) {
+ {
+ .name = "irqs",
+ .info = &(const VMStateInfo) {
+ .name = "irqs",
+ .get = kvm_flic_load,
+ .put = kvm_flic_save,
+ },
+ .flags = VMS_SINGLE,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static void kvm_s390_flic_realize(DeviceState *dev, Error **errp)
{
KVMS390FLICState *flic_state = KVM_S390_FLIC(dev);
@@ -398,16 +412,6 @@ static void kvm_s390_flic_realize(DeviceState *dev, Error **errp)
flic_state->clear_io_supported = !ioctl(flic_state->fd,
KVM_HAS_DEVICE_ATTR, test_attr);
- /* Register savevm handler for floating interrupts */
- register_savevm(NULL, "s390-flic", 0, 1, kvm_flic_save,
- kvm_flic_load, (void *) flic_state);
-}
-
-static void kvm_s390_flic_unrealize(DeviceState *dev, Error **errp)
-{
- KVMS390FLICState *flic_state = KVM_S390_FLIC(dev);
-
- unregister_savevm(DEVICE(flic_state), "s390-flic", flic_state);
}
static void kvm_s390_flic_reset(DeviceState *dev)
@@ -438,7 +442,7 @@ static void kvm_s390_flic_class_init(ObjectClass *oc, void *data)
S390FLICStateClass *fsc = S390_FLIC_COMMON_CLASS(oc);
dc->realize = kvm_s390_flic_realize;
- dc->unrealize = kvm_s390_flic_unrealize;
+ dc->vmsd = &kvm_s390_flic_vmstate;
dc->reset = kvm_s390_flic_reset;
fsc->register_io_adapter = kvm_s390_register_io_adapter;
fsc->io_adapter_map = kvm_s390_io_adapter_map;
diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c
index e82e893628..84e0bee4a9 100644
--- a/hw/intc/slavio_intctl.c
+++ b/hw/intc/slavio_intctl.c
@@ -26,6 +26,7 @@
#include "hw/sparc/sun4m.h"
#include "monitor/monitor.h"
#include "hw/sysbus.h"
+#include "hw/intc/intc.h"
#include "trace.h"
//#define DEBUG_IRQ_COUNT
@@ -210,38 +211,6 @@ static const MemoryRegionOps slavio_intctlm_mem_ops = {
},
};
-void slavio_pic_info(Monitor *mon, DeviceState *dev)
-{
- SLAVIO_INTCTLState *s = SLAVIO_INTCTL(dev);
- int i;
-
- for (i = 0; i < MAX_CPUS; i++) {
- monitor_printf(mon, "per-cpu %d: pending 0x%08x\n", i,
- s->slaves[i].intreg_pending);
- }
- monitor_printf(mon, "master: pending 0x%08x, disabled 0x%08x\n",
- s->intregm_pending, s->intregm_disabled);
-}
-
-void slavio_irq_info(Monitor *mon, DeviceState *dev)
-{
-#ifndef DEBUG_IRQ_COUNT
- monitor_printf(mon, "irq statistic code not compiled.\n");
-#else
- SLAVIO_INTCTLState *s = SLAVIO_INTCTL(dev);
- int i;
- int64_t count;
-
- s = SLAVIO_INTCTL(dev);
- monitor_printf(mon, "IRQ statistics:\n");
- for (i = 0; i < 32; i++) {
- count = s->irq_count[i];
- if (count > 0)
- monitor_printf(mon, "%2d: %" PRId64 "\n", i, count);
- }
-#endif
-}
-
static const uint32_t intbit_to_level[] = {
2, 3, 5, 7, 9, 11, 13, 2, 3, 5, 7, 9, 11, 13, 12, 12,
6, 13, 4, 10, 8, 9, 11, 0, 0, 0, 0, 15, 15, 15, 15, 0,
@@ -418,6 +387,31 @@ static void slavio_intctl_reset(DeviceState *d)
slavio_check_interrupts(s, 0);
}
+#ifdef DEBUG_IRQ_COUNT
+static bool slavio_intctl_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts,
+ unsigned int *nb_irqs)
+{
+ SLAVIO_INTCTLState *s = SLAVIO_INTCTL(obj);
+ *irq_counts = s->irq_count;
+ *nb_irqs = ARRAY_SIZE(s->irq_count);
+ return true;
+}
+#endif
+
+static void slavio_intctl_print_info(InterruptStatsProvider *obj, Monitor *mon)
+{
+ SLAVIO_INTCTLState *s = SLAVIO_INTCTL(obj);
+ int i;
+
+ for (i = 0; i < MAX_CPUS; i++) {
+ monitor_printf(mon, "per-cpu %d: pending 0x%08x\n", i,
+ s->slaves[i].intreg_pending);
+ }
+ monitor_printf(mon, "master: pending 0x%08x, disabled 0x%08x\n",
+ s->intregm_pending, s->intregm_disabled);
+}
+
static void slavio_intctl_init(Object *obj)
{
DeviceState *dev = DEVICE(obj);
@@ -449,9 +443,14 @@ static void slavio_intctl_init(Object *obj)
static void slavio_intctl_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
dc->reset = slavio_intctl_reset;
dc->vmsd = &vmstate_intctl;
+#ifdef DEBUG_IRQ_COUNT
+ ic->get_statistics = slavio_intctl_get_statistics;
+#endif
+ ic->print_info = slavio_intctl_print_info;
}
static const TypeInfo slavio_intctl_info = {
@@ -460,6 +459,10 @@ static const TypeInfo slavio_intctl_info = {
.instance_size = sizeof(SLAVIO_INTCTLState),
.instance_init = slavio_intctl_init,
.class_init = slavio_intctl_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
};
static void slavio_intctl_register_types(void)
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index f12192c082..340f617761 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -50,16 +50,17 @@ xics_icp_accept(uint32_t old_xirr, uint32_t new_xirr) "icp_accept: XIRR %#"PRIx3
xics_icp_eoi(int server, uint32_t xirr, uint32_t new_xirr) "icp_eoi: server %d given XIRR %#"PRIx32" new XIRR %#"PRIx32
xics_icp_irq(int server, int nr, uint8_t priority) "cpu %d trying to deliver irq %#"PRIx32" priority %#x"
xics_icp_raise(uint32_t xirr, uint8_t pending_priority) "raising IRQ new XIRR=%#x new pending priority=%#x"
-xics_set_irq_msi(int srcno, int nr) "set_irq_msi: srcno %d [irq %#x]"
+xics_ics_simple_set_irq_msi(int srcno, int nr) "set_irq_msi: srcno %d [irq %#x]"
xics_masked_pending(void) "set_irq_msi: masked pending"
-xics_set_irq_lsi(int srcno, int nr) "set_irq_lsi: srcno %d [irq %#x]"
-xics_ics_write_xive(int nr, int srcno, int server, uint8_t priority) "ics_write_xive: irq %#x [src %d] server %#x prio %#x"
-xics_ics_reject(int nr, int srcno) "reject irq %#x [src %d]"
-xics_ics_eoi(int nr) "ics_eoi: irq %#x"
-xics_alloc(int src, int irq) "source#%d, irq %d"
-xics_alloc_block(int src, int first, int num, bool lsi, int align) "source#%d, first irq %d, %d irqs, lsi=%d, alignnum %d"
+xics_ics_simple_set_irq_lsi(int srcno, int nr) "set_irq_lsi: srcno %d [irq %#x]"
+xics_ics_simple_write_xive(int nr, int srcno, int server, uint8_t priority) "ics_write_xive: irq %#x [src %d] server %#x prio %#x"
+xics_ics_simple_reject(int nr, int srcno) "reject irq %#x [src %d]"
+xics_ics_simple_eoi(int nr) "ics_eoi: irq %#x"
+xics_alloc(int irq) "irq %d"
+xics_alloc_block(int first, int num, bool lsi, int align) "first irq %d, %d irqs, lsi=%d, alignnum %d"
xics_ics_free(int src, int irq, int num) "Source#%d, first irq %d, %d irqs"
xics_ics_free_warn(int src, int irq) "Source#%d, irq %d is already free"
+xics_icp_post_load(uint32_t server_no, uint32_t xirr, uint64_t addr, uint8_t pend) "server_no %d, xirr %#x, xirr_owner 0x%" PRIx64 ", pending %d"
# hw/intc/s390_flic_kvm.c
flic_create_device(int err) "flic: create device failed %d"
@@ -84,12 +85,12 @@ gic_acknowledge_irq(int cpu, int irq) "cpu %d acknowledged irq %d"
# hw/intc/arm_gicv3_cpuif.c
gicv3_icc_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR read cpu %x value 0x%" PRIx64
gicv3_icc_pmr_write(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR write cpu %x value 0x%" PRIx64
-gicv3_icc_bpr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_BPR read cpu %x value 0x%" PRIx64
-gicv3_icc_bpr_write(uint32_t cpu, uint64_t val) "GICv3 ICC_BPR write cpu %x value 0x%" PRIx64
-gicv3_icc_ap_read(int regno, uint32_t cpu, uint64_t val) "GICv3 ICC_AP%dR read cpu %x value 0x%" PRIx64
-gicv3_icc_ap_write(int regno, uint32_t cpu, uint64_t val) "GICv3 ICC_AP%dR write cpu %x value 0x%" PRIx64
-gicv3_icc_igrpen_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN read cpu %x value 0x%" PRIx64
-gicv3_icc_igrpen_write(uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN write cpu %x value 0x%" PRIx64
+gicv3_icc_bpr_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_BPR%d read cpu %x value 0x%" PRIx64
+gicv3_icc_bpr_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_BPR%d write cpu %x value 0x%" PRIx64
+gicv3_icc_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICC_AP%dR%d read cpu %x value 0x%" PRIx64
+gicv3_icc_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICC_AP%dR%d write cpu %x value 0x%" PRIx64
+gicv3_icc_igrpen_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN%d read cpu %x value 0x%" PRIx64
+gicv3_icc_igrpen_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN%d write cpu %x value 0x%" PRIx64
gicv3_icc_igrpen1_el3_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN1_EL3 read cpu %x value 0x%" PRIx64
gicv3_icc_igrpen1_el3_write(uint32_t cpu, uint64_t val) "GICv3 ICC_IGRPEN1_EL3 write cpu %x value 0x%" PRIx64
gicv3_icc_ctlr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_CTLR read cpu %x value 0x%" PRIx64
@@ -101,7 +102,7 @@ gicv3_cpuif_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel) "GICv3 CPU i/f
gicv3_icc_generate_sgi(uint32_t cpuid, int irq, int irm, uint32_t aff, uint32_t targetlist) "GICv3 CPU i/f %x generating SGI %d IRM %d target affinity 0x%xxx targetlist 0x%x"
gicv3_icc_iar0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR0 read cpu %x value 0x%" PRIx64
gicv3_icc_iar1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR1 read cpu %x value 0x%" PRIx64
-gicv3_icc_eoir_write(uint32_t cpu, uint64_t val) "GICv3 ICC_EOIR write cpu %x value 0x%" PRIx64
+gicv3_icc_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_EOIR%d write cpu %x value 0x%" PRIx64
gicv3_icc_hppir0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR0 read cpu %x value 0x%" PRIx64
gicv3_icc_hppir1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR1 read cpu %x value 0x%" PRIx64
gicv3_icc_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICC_DIR write cpu %x value 0x%" PRIx64
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
index cd48f42046..095c16a300 100644
--- a/hw/intc/xics.c
+++ b/hw/intc/xics.c
@@ -35,6 +35,8 @@
#include "hw/ppc/xics.h"
#include "qemu/error-report.h"
#include "qapi/visitor.h"
+#include "monitor/monitor.h"
+#include "hw/intc/intc.h"
int xics_get_cpu_index_by_dt_id(int cpu_dt_id)
{
@@ -90,19 +92,63 @@ void xics_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
}
}
+static void xics_common_pic_print_info(InterruptStatsProvider *obj,
+ Monitor *mon)
+{
+ XICSState *xics = XICS_COMMON(obj);
+ ICSState *ics;
+ uint32_t i;
+
+ for (i = 0; i < xics->nr_servers; i++) {
+ ICPState *icp = &xics->ss[i];
+
+ if (!icp->output) {
+ continue;
+ }
+ monitor_printf(mon, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n",
+ i, icp->xirr, icp->xirr_owner,
+ icp->pending_priority, icp->mfrr);
+ }
+
+ QLIST_FOREACH(ics, &xics->ics, list) {
+ monitor_printf(mon, "ICS %4x..%4x %p\n",
+ ics->offset, ics->offset + ics->nr_irqs - 1, ics);
+
+ if (!ics->irqs) {
+ continue;
+ }
+
+ for (i = 0; i < ics->nr_irqs; i++) {
+ ICSIRQState *irq = ics->irqs + i;
+
+ if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) {
+ continue;
+ }
+ monitor_printf(mon, " %4x %s %02x %02x\n",
+ ics->offset + i,
+ (irq->flags & XICS_FLAGS_IRQ_LSI) ?
+ "LSI" : "MSI",
+ irq->priority, irq->status);
+ }
+ }
+}
+
/*
* XICS Common class - parent for emulated XICS and KVM-XICS
*/
static void xics_common_reset(DeviceState *d)
{
XICSState *xics = XICS_COMMON(d);
+ ICSState *ics;
int i;
for (i = 0; i < xics->nr_servers; i++) {
device_reset(DEVICE(&xics->ss[i]));
}
- device_reset(DEVICE(xics->ics));
+ QLIST_FOREACH(ics, &xics->ics, list) {
+ device_reset(DEVICE(ics));
+ }
}
static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name,
@@ -134,10 +180,28 @@ static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name,
}
assert(info->set_nr_irqs);
- assert(xics->ics);
info->set_nr_irqs(xics, value, errp);
}
+void xics_set_nr_servers(XICSState *xics, uint32_t nr_servers,
+ const char *typename, Error **errp)
+{
+ int i;
+
+ xics->nr_servers = nr_servers;
+
+ xics->ss = g_malloc0(xics->nr_servers * sizeof(ICPState));
+ for (i = 0; i < xics->nr_servers; i++) {
+ char name[32];
+ ICPState *icp = &xics->ss[i];
+
+ object_initialize(icp, sizeof(*icp), typename);
+ snprintf(name, sizeof(name), "icp[%d]", i);
+ object_property_add_child(OBJECT(xics), name, OBJECT(icp), errp);
+ icp->xics = xics;
+ }
+}
+
static void xics_prop_get_nr_servers(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
@@ -153,7 +217,7 @@ static void xics_prop_set_nr_servers(Object *obj, Visitor *v,
Error **errp)
{
XICSState *xics = XICS_COMMON(obj);
- XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
+ XICSStateClass *xsc = XICS_COMMON_GET_CLASS(xics);
Error *error = NULL;
int64_t value;
@@ -168,12 +232,15 @@ static void xics_prop_set_nr_servers(Object *obj, Visitor *v,
return;
}
- assert(info->set_nr_servers);
- info->set_nr_servers(xics, value, errp);
+ assert(xsc->set_nr_servers);
+ xsc->set_nr_servers(xics, value, errp);
}
static void xics_common_initfn(Object *obj)
{
+ XICSState *xics = XICS_COMMON(obj);
+
+ QLIST_INIT(&xics->ics);
object_property_add(obj, "nr_irqs", "int",
xics_prop_get_nr_irqs, xics_prop_set_nr_irqs,
NULL, NULL, NULL);
@@ -185,8 +252,10 @@ static void xics_common_initfn(Object *obj)
static void xics_common_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(oc);
dc->reset = xics_common_reset;
+ ic->print_info = xics_common_pic_print_info;
}
static const TypeInfo xics_common_info = {
@@ -196,6 +265,10 @@ static const TypeInfo xics_common_info = {
.class_size = sizeof(XICSStateClass),
.instance_init = xics_common_initfn,
.class_init = xics_common_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
};
/*
@@ -208,42 +281,65 @@ static const TypeInfo xics_common_info = {
#define XISR(ss) (((ss)->xirr) & XISR_MASK)
#define CPPR(ss) (((ss)->xirr) >> 24)
-static void ics_reject(ICSState *ics, int nr);
-static void ics_resend(ICSState *ics);
-static void ics_eoi(ICSState *ics, int nr);
+static void ics_reject(ICSState *ics, uint32_t nr)
+{
+ ICSStateClass *k = ICS_BASE_GET_CLASS(ics);
+
+ if (k->reject) {
+ k->reject(ics, nr);
+ }
+}
+
+static void ics_resend(ICSState *ics)
+{
+ ICSStateClass *k = ICS_BASE_GET_CLASS(ics);
-static void icp_check_ipi(XICSState *xics, int server)
+ if (k->resend) {
+ k->resend(ics);
+ }
+}
+
+static void ics_eoi(ICSState *ics, int nr)
{
- ICPState *ss = xics->ss + server;
+ ICSStateClass *k = ICS_BASE_GET_CLASS(ics);
+ if (k->eoi) {
+ k->eoi(ics, nr);
+ }
+}
+
+static void icp_check_ipi(ICPState *ss)
+{
if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
return;
}
- trace_xics_icp_check_ipi(server, ss->mfrr);
+ trace_xics_icp_check_ipi(ss->cs->cpu_index, ss->mfrr);
- if (XISR(ss)) {
- ics_reject(xics->ics, XISR(ss));
+ if (XISR(ss) && ss->xirr_owner) {
+ ics_reject(ss->xirr_owner, XISR(ss));
}
ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
ss->pending_priority = ss->mfrr;
+ ss->xirr_owner = NULL;
qemu_irq_raise(ss->output);
}
-static void icp_resend(XICSState *xics, int server)
+static void icp_resend(ICPState *ss)
{
- ICPState *ss = xics->ss + server;
+ ICSState *ics;
if (ss->mfrr < CPPR(ss)) {
- icp_check_ipi(xics, server);
+ icp_check_ipi(ss);
+ }
+ QLIST_FOREACH(ics, &ss->xics->ics, list) {
+ ics_resend(ics);
}
- ics_resend(xics->ics);
}
-void icp_set_cppr(XICSState *xics, int server, uint8_t cppr)
+void icp_set_cppr(ICPState *ss, uint8_t cppr)
{
- ICPState *ss = xics->ss + server;
uint8_t old_cppr;
uint32_t old_xisr;
@@ -256,22 +352,23 @@ void icp_set_cppr(XICSState *xics, int server, uint8_t cppr)
ss->xirr &= ~XISR_MASK; /* Clear XISR */
ss->pending_priority = 0xff;
qemu_irq_lower(ss->output);
- ics_reject(xics->ics, old_xisr);
+ if (ss->xirr_owner) {
+ ics_reject(ss->xirr_owner, old_xisr);
+ ss->xirr_owner = NULL;
+ }
}
} else {
if (!XISR(ss)) {
- icp_resend(xics, server);
+ icp_resend(ss);
}
}
}
-void icp_set_mfrr(XICSState *xics, int server, uint8_t mfrr)
+void icp_set_mfrr(ICPState *ss, uint8_t mfrr)
{
- ICPState *ss = xics->ss + server;
-
ss->mfrr = mfrr;
if (mfrr < CPPR(ss)) {
- icp_check_ipi(xics, server);
+ icp_check_ipi(ss);
}
}
@@ -282,6 +379,7 @@ uint32_t icp_accept(ICPState *ss)
qemu_irq_lower(ss->output);
ss->xirr = ss->pending_priority << 24;
ss->pending_priority = 0xff;
+ ss->xirr_owner = NULL;
trace_xics_icp_accept(xirr, ss->xirr);
@@ -296,33 +394,42 @@ uint32_t icp_ipoll(ICPState *ss, uint32_t *mfrr)
return ss->xirr;
}
-void icp_eoi(XICSState *xics, int server, uint32_t xirr)
+void icp_eoi(ICPState *ss, uint32_t xirr)
{
- ICPState *ss = xics->ss + server;
+ ICSState *ics;
+ uint32_t irq;
/* Send EOI -> ICS */
ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
- trace_xics_icp_eoi(server, xirr, ss->xirr);
- ics_eoi(xics->ics, xirr & XISR_MASK);
+ trace_xics_icp_eoi(ss->cs->cpu_index, xirr, ss->xirr);
+ irq = xirr & XISR_MASK;
+ QLIST_FOREACH(ics, &ss->xics->ics, list) {
+ if (ics_valid_irq(ics, irq)) {
+ ics_eoi(ics, irq);
+ }
+ }
if (!XISR(ss)) {
- icp_resend(xics, server);
+ icp_resend(ss);
}
}
-static void icp_irq(XICSState *xics, int server, int nr, uint8_t priority)
+static void icp_irq(ICSState *ics, int server, int nr, uint8_t priority)
{
+ XICSState *xics = ics->xics;
ICPState *ss = xics->ss + server;
trace_xics_icp_irq(server, nr, priority);
if ((priority >= CPPR(ss))
|| (XISR(ss) && (ss->pending_priority <= priority))) {
- ics_reject(xics->ics, nr);
+ ics_reject(ics, nr);
} else {
- if (XISR(ss)) {
- ics_reject(xics->ics, XISR(ss));
+ if (XISR(ss) && ss->xirr_owner) {
+ ics_reject(ss->xirr_owner, XISR(ss));
+ ss->xirr_owner = NULL;
}
ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
+ ss->xirr_owner = ics;
ss->pending_priority = priority;
trace_xics_icp_raise(ss->xirr, ss->pending_priority);
qemu_irq_raise(ss->output);
@@ -397,7 +504,7 @@ static const TypeInfo icp_info = {
/*
* ICS: Source layer
*/
-static void resend_msi(ICSState *ics, int srcno)
+static void ics_simple_resend_msi(ICSState *ics, int srcno)
{
ICSIRQState *irq = ics->irqs + srcno;
@@ -405,13 +512,12 @@ static void resend_msi(ICSState *ics, int srcno)
if (irq->status & XICS_STATUS_REJECTED) {
irq->status &= ~XICS_STATUS_REJECTED;
if (irq->priority != 0xff) {
- icp_irq(ics->xics, irq->server, srcno + ics->offset,
- irq->priority);
+ icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
}
}
}
-static void resend_lsi(ICSState *ics, int srcno)
+static void ics_simple_resend_lsi(ICSState *ics, int srcno)
{
ICSIRQState *irq = ics->irqs + srcno;
@@ -419,51 +525,51 @@ static void resend_lsi(ICSState *ics, int srcno)
&& (irq->status & XICS_STATUS_ASSERTED)
&& !(irq->status & XICS_STATUS_SENT)) {
irq->status |= XICS_STATUS_SENT;
- icp_irq(ics->xics, irq->server, srcno + ics->offset, irq->priority);
+ icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
}
}
-static void set_irq_msi(ICSState *ics, int srcno, int val)
+static void ics_simple_set_irq_msi(ICSState *ics, int srcno, int val)
{
ICSIRQState *irq = ics->irqs + srcno;
- trace_xics_set_irq_msi(srcno, srcno + ics->offset);
+ trace_xics_ics_simple_set_irq_msi(srcno, srcno + ics->offset);
if (val) {
if (irq->priority == 0xff) {
irq->status |= XICS_STATUS_MASKED_PENDING;
trace_xics_masked_pending();
} else {
- icp_irq(ics->xics, irq->server, srcno + ics->offset, irq->priority);
+ icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
}
}
}
-static void set_irq_lsi(ICSState *ics, int srcno, int val)
+static void ics_simple_set_irq_lsi(ICSState *ics, int srcno, int val)
{
ICSIRQState *irq = ics->irqs + srcno;
- trace_xics_set_irq_lsi(srcno, srcno + ics->offset);
+ trace_xics_ics_simple_set_irq_lsi(srcno, srcno + ics->offset);
if (val) {
irq->status |= XICS_STATUS_ASSERTED;
} else {
irq->status &= ~XICS_STATUS_ASSERTED;
}
- resend_lsi(ics, srcno);
+ ics_simple_resend_lsi(ics, srcno);
}
-static void ics_set_irq(void *opaque, int srcno, int val)
+static void ics_simple_set_irq(void *opaque, int srcno, int val)
{
ICSState *ics = (ICSState *)opaque;
if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
- set_irq_lsi(ics, srcno, val);
+ ics_simple_set_irq_lsi(ics, srcno, val);
} else {
- set_irq_msi(ics, srcno, val);
+ ics_simple_set_irq_msi(ics, srcno, val);
}
}
-static void write_xive_msi(ICSState *ics, int srcno)
+static void ics_simple_write_xive_msi(ICSState *ics, int srcno)
{
ICSIRQState *irq = ics->irqs + srcno;
@@ -473,71 +579,74 @@ static void write_xive_msi(ICSState *ics, int srcno)
}
irq->status &= ~XICS_STATUS_MASKED_PENDING;
- icp_irq(ics->xics, irq->server, srcno + ics->offset, irq->priority);
+ icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
}
-static void write_xive_lsi(ICSState *ics, int srcno)
+static void ics_simple_write_xive_lsi(ICSState *ics, int srcno)
{
- resend_lsi(ics, srcno);
+ ics_simple_resend_lsi(ics, srcno);
}
-void ics_write_xive(ICSState *ics, int nr, int server,
- uint8_t priority, uint8_t saved_priority)
+void ics_simple_write_xive(ICSState *ics, int srcno, int server,
+ uint8_t priority, uint8_t saved_priority)
{
- int srcno = nr - ics->offset;
ICSIRQState *irq = ics->irqs + srcno;
irq->server = server;
irq->priority = priority;
irq->saved_priority = saved_priority;
- trace_xics_ics_write_xive(nr, srcno, server, priority);
+ trace_xics_ics_simple_write_xive(ics->offset + srcno, srcno, server,
+ priority);
if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
- write_xive_lsi(ics, srcno);
+ ics_simple_write_xive_lsi(ics, srcno);
} else {
- write_xive_msi(ics, srcno);
+ ics_simple_write_xive_msi(ics, srcno);
}
}
-static void ics_reject(ICSState *ics, int nr)
+static void ics_simple_reject(ICSState *ics, uint32_t nr)
{
ICSIRQState *irq = ics->irqs + nr - ics->offset;
- trace_xics_ics_reject(nr, nr - ics->offset);
- irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */
- irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */
+ trace_xics_ics_simple_reject(nr, nr - ics->offset);
+ if (irq->flags & XICS_FLAGS_IRQ_MSI) {
+ irq->status |= XICS_STATUS_REJECTED;
+ } else if (irq->flags & XICS_FLAGS_IRQ_LSI) {
+ irq->status &= ~XICS_STATUS_SENT;
+ }
}
-static void ics_resend(ICSState *ics)
+static void ics_simple_resend(ICSState *ics)
{
int i;
for (i = 0; i < ics->nr_irqs; i++) {
/* FIXME: filter by server#? */
if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) {
- resend_lsi(ics, i);
+ ics_simple_resend_lsi(ics, i);
} else {
- resend_msi(ics, i);
+ ics_simple_resend_msi(ics, i);
}
}
}
-static void ics_eoi(ICSState *ics, int nr)
+static void ics_simple_eoi(ICSState *ics, uint32_t nr)
{
int srcno = nr - ics->offset;
ICSIRQState *irq = ics->irqs + srcno;
- trace_xics_ics_eoi(nr);
+ trace_xics_ics_simple_eoi(nr);
if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
irq->status &= ~XICS_STATUS_SENT;
}
}
-static void ics_reset(DeviceState *dev)
+static void ics_simple_reset(DeviceState *dev)
{
- ICSState *ics = ICS(dev);
+ ICSState *ics = ICS_SIMPLE(dev);
int i;
uint8_t flags[ics->nr_irqs];
@@ -554,31 +663,31 @@ static void ics_reset(DeviceState *dev)
}
}
-static int ics_post_load(ICSState *ics, int version_id)
+static int ics_simple_post_load(ICSState *ics, int version_id)
{
int i;
for (i = 0; i < ics->xics->nr_servers; i++) {
- icp_resend(ics->xics, i);
+ icp_resend(&ics->xics->ss[i]);
}
return 0;
}
-static void ics_dispatch_pre_save(void *opaque)
+static void ics_simple_dispatch_pre_save(void *opaque)
{
ICSState *ics = opaque;
- ICSStateClass *info = ICS_GET_CLASS(ics);
+ ICSStateClass *info = ICS_BASE_GET_CLASS(ics);
if (info->pre_save) {
info->pre_save(ics);
}
}
-static int ics_dispatch_post_load(void *opaque, int version_id)
+static int ics_simple_dispatch_post_load(void *opaque, int version_id)
{
ICSState *ics = opaque;
- ICSStateClass *info = ICS_GET_CLASS(ics);
+ ICSStateClass *info = ICS_BASE_GET_CLASS(ics);
if (info->post_load) {
return info->post_load(ics, version_id);
@@ -587,7 +696,7 @@ static int ics_dispatch_post_load(void *opaque, int version_id)
return 0;
}
-static const VMStateDescription vmstate_ics_irq = {
+static const VMStateDescription vmstate_ics_simple_irq = {
.name = "ics/irq",
.version_id = 2,
.minimum_version_id = 1,
@@ -601,86 +710,93 @@ static const VMStateDescription vmstate_ics_irq = {
},
};
-static const VMStateDescription vmstate_ics = {
+static const VMStateDescription vmstate_ics_simple = {
.name = "ics",
.version_id = 1,
.minimum_version_id = 1,
- .pre_save = ics_dispatch_pre_save,
- .post_load = ics_dispatch_post_load,
+ .pre_save = ics_simple_dispatch_pre_save,
+ .post_load = ics_simple_dispatch_post_load,
.fields = (VMStateField[]) {
/* Sanity check */
VMSTATE_UINT32_EQUAL(nr_irqs, ICSState),
VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
- vmstate_ics_irq, ICSIRQState),
+ vmstate_ics_simple_irq,
+ ICSIRQState),
VMSTATE_END_OF_LIST()
},
};
-static void ics_initfn(Object *obj)
+static void ics_simple_initfn(Object *obj)
{
- ICSState *ics = ICS(obj);
+ ICSState *ics = ICS_SIMPLE(obj);
ics->offset = XICS_IRQ_BASE;
}
-static void ics_realize(DeviceState *dev, Error **errp)
+static void ics_simple_realize(DeviceState *dev, Error **errp)
{
- ICSState *ics = ICS(dev);
+ ICSState *ics = ICS_SIMPLE(dev);
if (!ics->nr_irqs) {
error_setg(errp, "Number of interrupts needs to be greater 0");
return;
}
ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState));
- ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs);
+ ics->qirqs = qemu_allocate_irqs(ics_simple_set_irq, ics, ics->nr_irqs);
}
-static void ics_class_init(ObjectClass *klass, void *data)
+static void ics_simple_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- ICSStateClass *isc = ICS_CLASS(klass);
+ ICSStateClass *isc = ICS_BASE_CLASS(klass);
- dc->realize = ics_realize;
- dc->vmsd = &vmstate_ics;
- dc->reset = ics_reset;
- isc->post_load = ics_post_load;
+ dc->realize = ics_simple_realize;
+ dc->vmsd = &vmstate_ics_simple;
+ dc->reset = ics_simple_reset;
+ isc->post_load = ics_simple_post_load;
+ isc->reject = ics_simple_reject;
+ isc->resend = ics_simple_resend;
+ isc->eoi = ics_simple_eoi;
}
-static const TypeInfo ics_info = {
- .name = TYPE_ICS,
+static const TypeInfo ics_simple_info = {
+ .name = TYPE_ICS_SIMPLE,
+ .parent = TYPE_ICS_BASE,
+ .instance_size = sizeof(ICSState),
+ .class_init = ics_simple_class_init,
+ .class_size = sizeof(ICSStateClass),
+ .instance_init = ics_simple_initfn,
+};
+
+static const TypeInfo ics_base_info = {
+ .name = TYPE_ICS_BASE,
.parent = TYPE_DEVICE,
+ .abstract = true,
.instance_size = sizeof(ICSState),
- .class_init = ics_class_init,
.class_size = sizeof(ICSStateClass),
- .instance_init = ics_initfn,
};
/*
* Exported functions
*/
-int xics_find_source(XICSState *xics, int irq)
+ICSState *xics_find_source(XICSState *xics, int irq)
{
- int sources = 1;
- int src;
+ ICSState *ics;
- /* FIXME: implement multiple sources */
- for (src = 0; src < sources; ++src) {
- ICSState *ics = &xics->ics[src];
+ QLIST_FOREACH(ics, &xics->ics, list) {
if (ics_valid_irq(ics, irq)) {
- return src;
+ return ics;
}
}
-
- return -1;
+ return NULL;
}
qemu_irq xics_get_qirq(XICSState *xics, int irq)
{
- int src = xics_find_source(xics, irq);
+ ICSState *ics = xics_find_source(xics, irq);
- if (src >= 0) {
- ICSState *ics = &xics->ics[src];
+ if (ics) {
return ics->qirqs[irq - ics->offset];
}
@@ -698,7 +814,8 @@ void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
static void xics_register_types(void)
{
type_register_static(&xics_common_info);
- type_register_static(&ics_info);
+ type_register_static(&ics_simple_info);
+ type_register_static(&ics_base_info);
type_register_static(&icp_info);
}
diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c
index edbd62fd1b..17694eaa87 100644
--- a/hw/intc/xics_kvm.c
+++ b/hw/intc/xics_kvm.c
@@ -272,7 +272,7 @@ static void ics_kvm_set_irq(void *opaque, int srcno, int val)
static void ics_kvm_reset(DeviceState *dev)
{
- ICSState *ics = ICS(dev);
+ ICSState *ics = ICS_SIMPLE(dev);
int i;
uint8_t flags[ics->nr_irqs];
@@ -293,7 +293,7 @@ static void ics_kvm_reset(DeviceState *dev)
static void ics_kvm_realize(DeviceState *dev, Error **errp)
{
- ICSState *ics = ICS(dev);
+ ICSState *ics = ICS_SIMPLE(dev);
if (!ics->nr_irqs) {
error_setg(errp, "Number of interrupts needs to be greater 0");
@@ -306,7 +306,7 @@ static void ics_kvm_realize(DeviceState *dev, Error **errp)
static void ics_kvm_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- ICSStateClass *icsc = ICS_CLASS(klass);
+ ICSStateClass *icsc = ICS_BASE_CLASS(klass);
dc->realize = ics_kvm_realize;
dc->reset = ics_kvm_reset;
@@ -315,8 +315,8 @@ static void ics_kvm_class_init(ObjectClass *klass, void *data)
}
static const TypeInfo ics_kvm_info = {
- .name = TYPE_KVM_ICS,
- .parent = TYPE_ICS,
+ .name = TYPE_ICS_KVM,
+ .parent = TYPE_ICS_SIMPLE,
.instance_size = sizeof(ICSState),
.class_init = ics_kvm_class_init,
};
@@ -329,6 +329,7 @@ static void xics_kvm_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
CPUState *cs;
ICPState *ss;
KVMXICSState *xicskvm = XICS_SPAPR_KVM(xics);
+ int ret;
cs = CPU(cpu);
ss = &xics->ss[cs->cpu_index];
@@ -347,42 +348,32 @@ static void xics_kvm_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
return;
}
- if (xicskvm->kernel_xics_fd != -1) {
- int ret;
-
- ret = kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_XICS, 0,
- xicskvm->kernel_xics_fd,
- kvm_arch_vcpu_id(cs));
- if (ret < 0) {
- error_report("Unable to connect CPU%ld to kernel XICS: %s",
- kvm_arch_vcpu_id(cs), strerror(errno));
- exit(1);
- }
- ss->cap_irq_xics_enabled = true;
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_XICS, 0, xicskvm->kernel_xics_fd,
+ kvm_arch_vcpu_id(cs));
+ if (ret < 0) {
+ error_report("Unable to connect CPU%ld to kernel XICS: %s",
+ kvm_arch_vcpu_id(cs), strerror(errno));
+ exit(1);
}
+ ss->cap_irq_xics_enabled = true;
}
static void xics_kvm_set_nr_irqs(XICSState *xics, uint32_t nr_irqs,
Error **errp)
{
- xics->nr_irqs = xics->ics->nr_irqs = nr_irqs;
+ ICSState *ics = QLIST_FIRST(&xics->ics);
+
+ /* This needs to be deprecated ... */
+ xics->nr_irqs = nr_irqs;
+ if (ics) {
+ ics->nr_irqs = nr_irqs;
+ }
}
static void xics_kvm_set_nr_servers(XICSState *xics, uint32_t nr_servers,
Error **errp)
{
- int i;
-
- xics->nr_servers = nr_servers;
-
- xics->ss = g_malloc0(xics->nr_servers * sizeof(ICPState));
- for (i = 0; i < xics->nr_servers; i++) {
- char buffer[32];
- object_initialize(&xics->ss[i], sizeof(xics->ss[i]), TYPE_KVM_ICP);
- snprintf(buffer, sizeof(buffer), "icp[%d]", i);
- object_property_add_child(OBJECT(xics), buffer, OBJECT(&xics->ss[i]),
- errp);
- }
+ xics_set_nr_servers(xics, nr_servers, TYPE_KVM_ICP, errp);
}
static void rtas_dummy(PowerPCCPU *cpu, sPAPRMachineState *spapr,
@@ -398,6 +389,7 @@ static void xics_kvm_realize(DeviceState *dev, Error **errp)
{
KVMXICSState *xicskvm = XICS_SPAPR_KVM(dev);
XICSState *xics = XICS_COMMON(dev);
+ ICSState *ics;
int i, rc;
Error *error = NULL;
struct kvm_create_device xics_create_device = {
@@ -449,10 +441,12 @@ static void xics_kvm_realize(DeviceState *dev, Error **errp)
xicskvm->kernel_xics_fd = xics_create_device.fd;
- object_property_set_bool(OBJECT(xics->ics), true, "realized", &error);
- if (error) {
- error_propagate(errp, error);
- goto fail;
+ QLIST_FOREACH(ics, &xics->ics, list) {
+ object_property_set_bool(OBJECT(ics), true, "realized", &error);
+ if (error) {
+ error_propagate(errp, error);
+ goto fail;
+ }
}
assert(xics->nr_servers);
@@ -481,10 +475,12 @@ fail:
static void xics_kvm_initfn(Object *obj)
{
XICSState *xics = XICS_COMMON(obj);
+ ICSState *ics;
- xics->ics = ICS(object_new(TYPE_KVM_ICS));
- object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL);
- xics->ics->xics = xics;
+ ics = ICS_SIMPLE(object_new(TYPE_ICS_KVM));
+ object_property_add_child(obj, "ics", OBJECT(ics), NULL);
+ ics->xics = xics;
+ QLIST_INSERT_HEAD(&xics->ics, ics, list);
}
static void xics_kvm_class_init(ObjectClass *oc, void *data)
diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c
index 618826dacf..2e3f1c5e95 100644
--- a/hw/intc/xics_spapr.c
+++ b/hw/intc/xics_spapr.c
@@ -32,6 +32,7 @@
#include "qemu/timer.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/xics.h"
+#include "hw/ppc/fdt.h"
#include "qapi/visitor.h"
#include "qapi/error.h"
@@ -43,9 +44,10 @@ static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
+ ICPState *icp = &spapr->xics->ss[cs->cpu_index];
target_ulong cppr = args[0];
- icp_set_cppr(spapr->xics, cs->cpu_index, cppr);
+ icp_set_cppr(icp, cppr);
return H_SUCCESS;
}
@@ -59,7 +61,7 @@ static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return H_PARAMETER;
}
- icp_set_mfrr(spapr->xics, server, mfrr);
+ icp_set_mfrr(spapr->xics->ss + server, mfrr);
return H_SUCCESS;
}
@@ -67,7 +69,8 @@ static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
- uint32_t xirr = icp_accept(spapr->xics->ss + cs->cpu_index);
+ ICPState *icp = &spapr->xics->ss[cs->cpu_index];
+ uint32_t xirr = icp_accept(icp);
args[0] = xirr;
return H_SUCCESS;
@@ -77,8 +80,8 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
- ICPState *ss = &spapr->xics->ss[cs->cpu_index];
- uint32_t xirr = icp_accept(ss);
+ ICPState *icp = &spapr->xics->ss[cs->cpu_index];
+ uint32_t xirr = icp_accept(icp);
args[0] = xirr;
args[1] = cpu_get_host_ticks();
@@ -89,9 +92,10 @@ static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
+ ICPState *icp = &spapr->xics->ss[cs->cpu_index];
target_ulong xirr = args[0];
- icp_eoi(spapr->xics, cs->cpu_index, xirr);
+ icp_eoi(icp, xirr);
return H_SUCCESS;
}
@@ -99,8 +103,9 @@ static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
CPUState *cs = CPU(cpu);
+ ICPState *icp = &spapr->xics->ss[cs->cpu_index];
uint32_t mfrr;
- uint32_t xirr = icp_ipoll(spapr->xics->ss + cs->cpu_index, &mfrr);
+ uint32_t xirr = icp_ipoll(icp, &mfrr);
args[0] = xirr;
args[1] = mfrr;
@@ -113,13 +118,17 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
- ICSState *ics = spapr->xics->ics;
- uint32_t nr, server, priority;
+ ICSState *ics = QLIST_FIRST(&spapr->xics->ics);
+ uint32_t nr, srcno, server, priority;
if ((nargs != 3) || (nret != 1)) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
+ if (!ics) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
nr = rtas_ld(args, 0);
server = xics_get_cpu_index_by_dt_id(rtas_ld(args, 1));
@@ -131,7 +140,8 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return;
}
- ics_write_xive(ics, nr, server, priority, priority);
+ srcno = nr - ics->offset;
+ ics_simple_write_xive(ics, srcno, server, priority, priority);
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
@@ -141,13 +151,17 @@ static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
- ICSState *ics = spapr->xics->ics;
- uint32_t nr;
+ ICSState *ics = QLIST_FIRST(&spapr->xics->ics);
+ uint32_t nr, srcno;
if ((nargs != 1) || (nret != 3)) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
+ if (!ics) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
nr = rtas_ld(args, 0);
@@ -157,8 +171,9 @@ static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
}
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
- rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
- rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
+ srcno = nr - ics->offset;
+ rtas_st(rets, 1, ics->irqs[srcno].server);
+ rtas_st(rets, 2, ics->irqs[srcno].priority);
}
static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
@@ -166,13 +181,17 @@ static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
- ICSState *ics = spapr->xics->ics;
- uint32_t nr;
+ ICSState *ics = QLIST_FIRST(&spapr->xics->ics);
+ uint32_t nr, srcno;
if ((nargs != 1) || (nret != 1)) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
+ if (!ics) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
nr = rtas_ld(args, 0);
@@ -181,8 +200,9 @@ static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return;
}
- ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
- ics->irqs[nr - ics->offset].priority);
+ srcno = nr - ics->offset;
+ ics_simple_write_xive(ics, srcno, ics->irqs[srcno].server, 0xff,
+ ics->irqs[srcno].priority);
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
@@ -192,13 +212,17 @@ static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
- ICSState *ics = spapr->xics->ics;
- uint32_t nr;
+ ICSState *ics = QLIST_FIRST(&spapr->xics->ics);
+ uint32_t nr, srcno;
if ((nargs != 1) || (nret != 1)) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
+ if (!ics) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
nr = rtas_ld(args, 0);
@@ -207,9 +231,10 @@ static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return;
}
- ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
- ics->irqs[nr - ics->offset].saved_priority,
- ics->irqs[nr - ics->offset].saved_priority);
+ srcno = nr - ics->offset;
+ ics_simple_write_xive(ics, srcno, ics->irqs[srcno].server,
+ ics->irqs[srcno].saved_priority,
+ ics->irqs[srcno].saved_priority);
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
@@ -217,29 +242,25 @@ static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr,
static void xics_spapr_set_nr_irqs(XICSState *xics, uint32_t nr_irqs,
Error **errp)
{
- xics->nr_irqs = xics->ics->nr_irqs = nr_irqs;
+ ICSState *ics = QLIST_FIRST(&xics->ics);
+
+ /* This needs to be deprecated ... */
+ xics->nr_irqs = nr_irqs;
+ if (ics) {
+ ics->nr_irqs = nr_irqs;
+ }
}
static void xics_spapr_set_nr_servers(XICSState *xics, uint32_t nr_servers,
Error **errp)
{
- int i;
-
- xics->nr_servers = nr_servers;
-
- xics->ss = g_malloc0(xics->nr_servers * sizeof(ICPState));
- for (i = 0; i < xics->nr_servers; i++) {
- char buffer[32];
- object_initialize(&xics->ss[i], sizeof(xics->ss[i]), TYPE_ICP);
- snprintf(buffer, sizeof(buffer), "icp[%d]", i);
- object_property_add_child(OBJECT(xics), buffer, OBJECT(&xics->ss[i]),
- errp);
- }
+ xics_set_nr_servers(xics, nr_servers, TYPE_ICP, errp);
}
static void xics_spapr_realize(DeviceState *dev, Error **errp)
{
XICSState *xics = XICS_SPAPR(dev);
+ ICSState *ics;
Error *error = NULL;
int i;
@@ -261,10 +282,12 @@ static void xics_spapr_realize(DeviceState *dev, Error **errp)
spapr_register_hypercall(H_EOI, h_eoi);
spapr_register_hypercall(H_IPOLL, h_ipoll);
- object_property_set_bool(OBJECT(xics->ics), true, "realized", &error);
- if (error) {
- error_propagate(errp, error);
- return;
+ QLIST_FOREACH(ics, &xics->ics, list) {
+ object_property_set_bool(OBJECT(ics), true, "realized", &error);
+ if (error) {
+ error_propagate(errp, error);
+ return;
+ }
}
for (i = 0; i < xics->nr_servers; i++) {
@@ -280,10 +303,12 @@ static void xics_spapr_realize(DeviceState *dev, Error **errp)
static void xics_spapr_initfn(Object *obj)
{
XICSState *xics = XICS_SPAPR(obj);
+ ICSState *ics;
- xics->ics = ICS(object_new(TYPE_ICS));
- object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL);
- xics->ics->xics = xics;
+ ics = ICS_SIMPLE(object_new(TYPE_ICS_SIMPLE));
+ object_property_add_child(obj, "ics", OBJECT(ics), NULL);
+ ics->xics = xics;
+ QLIST_INSERT_HEAD(&xics->ics, ics, list);
}
static void xics_spapr_class_init(ObjectClass *oc, void *data)
@@ -329,14 +354,15 @@ static int ics_find_free_block(ICSState *ics, int num, int alignnum)
return -1;
}
-int xics_spapr_alloc(XICSState *xics, int src, int irq_hint, bool lsi,
- Error **errp)
+int xics_spapr_alloc(XICSState *xics, int irq_hint, bool lsi, Error **errp)
{
- ICSState *ics = &xics->ics[src];
+ ICSState *ics = QLIST_FIRST(&xics->ics);
int irq;
+ if (!ics) {
+ return -1;
+ }
if (irq_hint) {
- assert(src == xics_find_source(xics, irq_hint));
if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) {
error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint);
return -1;
@@ -352,7 +378,7 @@ int xics_spapr_alloc(XICSState *xics, int src, int irq_hint, bool lsi,
}
ics_set_irq_type(ics, irq - ics->offset, lsi);
- trace_xics_alloc(src, irq);
+ trace_xics_alloc(irq);
return irq;
}
@@ -361,13 +387,16 @@ int xics_spapr_alloc(XICSState *xics, int src, int irq_hint, bool lsi,
* Allocate block of consecutive IRQs, and return the number of the first IRQ in
* the block. If align==true, aligns the first IRQ number to num.
*/
-int xics_spapr_alloc_block(XICSState *xics, int src, int num, bool lsi,
- bool align, Error **errp)
+int xics_spapr_alloc_block(XICSState *xics, int num, bool lsi, bool align,
+ Error **errp)
{
+ ICSState *ics = QLIST_FIRST(&xics->ics);
int i, first = -1;
- ICSState *ics = &xics->ics[src];
- assert(src == 0);
+ if (!ics) {
+ return -1;
+ }
+
/*
* MSIMesage::data is used for storing VIRQ so
* it has to be aligned to num to support multiple
@@ -394,7 +423,7 @@ int xics_spapr_alloc_block(XICSState *xics, int src, int num, bool lsi,
}
first += ics->offset;
- trace_xics_alloc_block(src, first, num, lsi, align);
+ trace_xics_alloc_block(first, num, lsi, align);
return first;
}
@@ -405,7 +434,7 @@ static void ics_free(ICSState *ics, int srcno, int num)
for (i = srcno; i < srcno + num; ++i) {
if (ICS_IRQ_FREE(ics, i)) {
- trace_xics_ics_free_warn(ics - ics->xics->ics, i + ics->offset);
+ trace_xics_ics_free_warn(0, i + ics->offset);
}
memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
}
@@ -413,19 +442,35 @@ static void ics_free(ICSState *ics, int srcno, int num)
void xics_spapr_free(XICSState *xics, int irq, int num)
{
- int src = xics_find_source(xics, irq);
-
- if (src >= 0) {
- ICSState *ics = &xics->ics[src];
-
- /* FIXME: implement multiple sources */
- assert(src == 0);
+ ICSState *ics = xics_find_source(xics, irq);
- trace_xics_ics_free(ics - xics->ics, irq, num);
+ if (ics) {
+ trace_xics_ics_free(0, irq, num);
ics_free(ics, irq - ics->offset, num);
}
}
+void spapr_dt_xics(XICSState *xics, void *fdt, uint32_t phandle)
+{
+ uint32_t interrupt_server_ranges_prop[] = {
+ 0, cpu_to_be32(xics->nr_servers),
+ };
+ int node;
+
+ _FDT(node = fdt_add_subnode(fdt, 0, "interrupt-controller"));
+
+ _FDT(fdt_setprop_string(fdt, node, "device_type",
+ "PowerPC-External-Interrupt-Presentation"));
+ _FDT(fdt_setprop_string(fdt, node, "compatible", "IBM,ppc-xicp"));
+ _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
+ _FDT(fdt_setprop(fdt, node, "ibm,interrupt-server-ranges",
+ interrupt_server_ranges_prop,
+ sizeof(interrupt_server_ranges_prop)));
+ _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
+ _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
+ _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
+}
+
static void xics_spapr_register_types(void)
{
type_register_static(&xics_spapr_info);
diff --git a/hw/ipmi/Makefile.objs b/hw/ipmi/Makefile.objs
index a90318d5ba..1b422bbee0 100644
--- a/hw/ipmi/Makefile.objs
+++ b/hw/ipmi/Makefile.objs
@@ -1,5 +1,5 @@
common-obj-$(CONFIG_IPMI) += ipmi.o
common-obj-$(CONFIG_IPMI_LOCAL) += ipmi_bmc_sim.o
-common-obj-$(CONFIG_IPMI_LOCAL) += ipmi_bmc_extern.o
+common-obj-$(CONFIG_IPMI_EXTERN) += ipmi_bmc_extern.o
common-obj-$(CONFIG_ISA_IPMI_KCS) += isa_ipmi_kcs.o
common-obj-$(CONFIG_ISA_IPMI_BT) += isa_ipmi_bt.o
diff --git a/hw/ipmi/ipmi.c b/hw/ipmi/ipmi.c
index f09f217e78..5cf1caa88a 100644
--- a/hw/ipmi/ipmi.c
+++ b/hw/ipmi/ipmi.c
@@ -51,7 +51,7 @@ static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op, int checkonly)
if (checkonly) {
return 0;
}
- qemu_system_powerdown_request();
+ qemu_system_shutdown_request();
return 0;
case IPMI_SEND_NMI:
@@ -61,9 +61,15 @@ static int ipmi_do_hw_op(IPMIInterface *s, enum ipmi_op op, int checkonly)
qmp_inject_nmi(NULL);
return 0;
+ case IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP:
+ if (checkonly) {
+ return 0;
+ }
+ qemu_system_powerdown_request();
+ return 0;
+
case IPMI_POWERCYCLE_CHASSIS:
case IPMI_PULSE_DIAG_IRQ:
- case IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP:
case IPMI_POWERON_CHASSIS:
default:
return IPMI_CC_COMMAND_NOT_SUPPORTED;
diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c
index 157879e177..e8e3d250b6 100644
--- a/hw/ipmi/ipmi_bmc_extern.c
+++ b/hw/ipmi/ipmi_bmc_extern.c
@@ -54,7 +54,8 @@
#define VM_CAPABILITIES_IRQ 0x04
#define VM_CAPABILITIES_NMI 0x08
#define VM_CAPABILITIES_ATTN 0x10
-#define VM_CMD_FORCEOFF 0x09
+#define VM_CAPABILITIES_GRACEFUL_SHUTDOWN 0x20
+#define VM_CMD_GRACEFUL_SHUTDOWN 0x09
#define TYPE_IPMI_BMC_EXTERN "ipmi-bmc-extern"
#define IPMI_BMC_EXTERN(obj) OBJECT_CHECK(IPMIBmcExtern, (obj), \
@@ -62,7 +63,7 @@
typedef struct IPMIBmcExtern {
IPMIBmc parent;
- CharDriverState *chr;
+ CharBackend chr;
bool connected;
@@ -100,12 +101,16 @@ ipmb_checksum(const unsigned char *data, int size, unsigned char start)
static void continue_send(IPMIBmcExtern *ibe)
{
+ int ret;
if (ibe->outlen == 0) {
goto check_reset;
}
send:
- ibe->outpos += qemu_chr_fe_write(ibe->chr, ibe->outbuf + ibe->outpos,
- ibe->outlen - ibe->outpos);
+ ret = qemu_chr_fe_write(&ibe->chr, ibe->outbuf + ibe->outpos,
+ ibe->outlen - ibe->outpos);
+ if (ret > 0) {
+ ibe->outpos += ret;
+ }
if (ibe->outpos < ibe->outlen) {
/* Not fully transmitted, try again in a 10ms */
timer_mod_ns(ibe->extern_timer,
@@ -272,8 +277,8 @@ static void handle_hw_op(IPMIBmcExtern *ibe, unsigned char hw_op)
k->do_hw_op(s, IPMI_SEND_NMI, 0);
break;
- case VM_CMD_FORCEOFF:
- qemu_system_shutdown_request();
+ case VM_CMD_GRACEFUL_SHUTDOWN:
+ k->do_hw_op(s, IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 0);
break;
}
}
@@ -397,6 +402,10 @@ static void chr_event(void *opaque, int event)
if (k->do_hw_op(ibe->parent.intf, IPMI_POWEROFF_CHASSIS, 1) == 0) {
v |= VM_CAPABILITIES_POWER;
}
+ if (k->do_hw_op(ibe->parent.intf, IPMI_SHUTDOWN_VIA_ACPI_OVERTEMP, 1)
+ == 0) {
+ v |= VM_CAPABILITIES_GRACEFUL_SHUTDOWN;
+ }
if (k->do_hw_op(ibe->parent.intf, IPMI_RESET_CHASSIS, 1) == 0) {
v |= VM_CAPABILITIES_RESET;
}
@@ -438,12 +447,13 @@ static void ipmi_bmc_extern_realize(DeviceState *dev, Error **errp)
{
IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(dev);
- if (!ibe->chr) {
+ if (!qemu_chr_fe_get_driver(&ibe->chr)) {
error_setg(errp, "IPMI external bmc requires chardev attribute");
return;
}
- qemu_chr_add_handlers(ibe->chr, can_receive, receive, chr_event, ibe);
+ qemu_chr_fe_set_handlers(&ibe->chr, can_receive, receive,
+ chr_event, ibe, NULL, true);
}
static int ipmi_bmc_extern_post_migrate(void *opaque, int version_id)
@@ -487,6 +497,14 @@ static void ipmi_bmc_extern_init(Object *obj)
vmstate_register(NULL, 0, &vmstate_ipmi_bmc_extern, ibe);
}
+static void ipmi_bmc_extern_finalize(Object *obj)
+{
+ IPMIBmcExtern *ibe = IPMI_BMC_EXTERN(obj);
+
+ timer_del(ibe->extern_timer);
+ timer_free(ibe->extern_timer);
+}
+
static Property ipmi_bmc_extern_properties[] = {
DEFINE_PROP_CHR("chardev", IPMIBmcExtern, chr),
DEFINE_PROP_END_OF_LIST(),
@@ -499,6 +517,7 @@ static void ipmi_bmc_extern_class_init(ObjectClass *oc, void *data)
bk->handle_command = ipmi_bmc_extern_handle_command;
bk->handle_reset = ipmi_bmc_extern_handle_reset;
+ dc->hotpluggable = false;
dc->realize = ipmi_bmc_extern_realize;
dc->props = ipmi_bmc_extern_properties;
}
@@ -508,6 +527,7 @@ static const TypeInfo ipmi_bmc_extern_type = {
.parent = TYPE_IPMI_BMC,
.instance_size = sizeof(IPMIBmcExtern),
.instance_init = ipmi_bmc_extern_init,
+ .instance_finalize = ipmi_bmc_extern_finalize,
.class_init = ipmi_bmc_extern_class_init,
};
diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c
index dc9c14cd29..c7883d6f5e 100644
--- a/hw/ipmi/ipmi_bmc_sim.c
+++ b/hw/ipmi/ipmi_bmc_sim.c
@@ -217,7 +217,6 @@ struct IPMIBmcSim {
/* Odd netfns are for responses, so we only need the even ones. */
const IPMINetfn *netfns[MAX_NETFNS / 2];
- QemuMutex lock;
/* We allow one event in the buffer */
uint8_t evtbuf[16];
@@ -940,7 +939,6 @@ static void get_msg(IPMIBmcSim *ibs,
{
IPMIRcvBufEntry *msg;
- qemu_mutex_lock(&ibs->lock);
if (QTAILQ_EMPTY(&ibs->rcvbufs)) {
rsp_buffer_set_error(rsp, 0x80); /* Queue empty */
goto out;
@@ -960,7 +958,6 @@ static void get_msg(IPMIBmcSim *ibs,
}
out:
- qemu_mutex_unlock(&ibs->lock);
return;
}
@@ -1055,11 +1052,9 @@ static void send_msg(IPMIBmcSim *ibs,
end_msg:
msg->buf[msg->len] = ipmb_checksum(msg->buf, msg->len, 0);
msg->len++;
- qemu_mutex_lock(&ibs->lock);
QTAILQ_INSERT_TAIL(&ibs->rcvbufs, msg, entry);
ibs->msg_flags |= IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE;
k->set_atn(s, 1, attn_irq_enabled(ibs));
- qemu_mutex_unlock(&ibs->lock);
}
static void do_watchdog_reset(IPMIBmcSim *ibs)
@@ -1753,7 +1748,6 @@ static void ipmi_sim_realize(DeviceState *dev, Error **errp)
unsigned int i;
IPMIBmcSim *ibs = IPMI_BMC_SIMULATOR(b);
- qemu_mutex_init(&ibs->lock);
QTAILQ_INIT(&ibs->rcvbufs);
ibs->bmc_global_enables = (1 << IPMI_BMC_EVENT_LOG_BIT);
@@ -1773,7 +1767,7 @@ static void ipmi_sim_realize(DeviceState *dev, Error **errp)
ibs->acpi_power_state[1] = 0;
if (qemu_uuid_set) {
- memcpy(&ibs->uuid, qemu_uuid, 16);
+ memcpy(&ibs->uuid, &qemu_uuid, 16);
} else {
memset(&ibs->uuid, 0, 16);
}
@@ -1791,6 +1785,7 @@ static void ipmi_sim_class_init(ObjectClass *oc, void *data)
DeviceClass *dc = DEVICE_CLASS(oc);
IPMIBmcClass *bk = IPMI_BMC_CLASS(oc);
+ dc->hotpluggable = false;
dc->realize = ipmi_sim_realize;
bk->handle_command = ipmi_sim_handle_command;
}
diff --git a/hw/ipmi/isa_ipmi_kcs.c b/hw/ipmi/isa_ipmi_kcs.c
index 9a38f8a28a..80444977a0 100644
--- a/hw/ipmi/isa_ipmi_kcs.c
+++ b/hw/ipmi/isa_ipmi_kcs.c
@@ -433,10 +433,8 @@ const VMStateDescription vmstate_ISAIPMIKCSDevice = {
VMSTATE_BOOL(kcs.use_irq, ISAIPMIKCSDevice),
VMSTATE_BOOL(kcs.irqs_enabled, ISAIPMIKCSDevice),
VMSTATE_UINT32(kcs.outpos, ISAIPMIKCSDevice),
- VMSTATE_VBUFFER_UINT32(kcs.outmsg, ISAIPMIKCSDevice, 1, NULL, 0,
- kcs.outlen),
- VMSTATE_VBUFFER_UINT32(kcs.inmsg, ISAIPMIKCSDevice, 1, NULL, 0,
- kcs.inlen),
+ VMSTATE_UINT8_ARRAY(kcs.outmsg, ISAIPMIKCSDevice, MAX_IPMI_MSG_SIZE),
+ VMSTATE_UINT8_ARRAY(kcs.inmsg, ISAIPMIKCSDevice, MAX_IPMI_MSG_SIZE),
VMSTATE_BOOL(kcs.write_end, ISAIPMIKCSDevice),
VMSTATE_UINT8(kcs.status_reg, ISAIPMIKCSDevice),
VMSTATE_UINT8(kcs.data_out_reg, ISAIPMIKCSDevice),
diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c
index ce74db232a..9d07b118c0 100644
--- a/hw/isa/isa-bus.c
+++ b/hw/isa/isa-bus.c
@@ -131,24 +131,20 @@ void isa_register_ioport(ISADevice *dev, MemoryRegion *io, uint16_t start)
isa_init_ioport(dev, start);
}
-void isa_register_portio_list(ISADevice *dev, uint16_t start,
+void isa_register_portio_list(ISADevice *dev,
+ PortioList *piolist, uint16_t start,
const MemoryRegionPortio *pio_start,
void *opaque, const char *name)
{
- PortioList piolist;
+ assert(piolist && !piolist->owner);
/* START is how we should treat DEV, regardless of the actual
contents of the portio array. This is how the old code
actually handled e.g. the FDC device. */
isa_init_ioport(dev, start);
- /* FIXME: the device should store created PortioList in its state. Note
- that DEV can be NULL here and that single device can register several
- portio lists. Current implementation is leaking memory allocated
- in portio_list_init. The leak is not critical because it happens only
- at initialization time. */
- portio_list_init(&piolist, OBJECT(dev), pio_start, opaque, name);
- portio_list_add(&piolist, isabus->address_space_io, start);
+ portio_list_init(piolist, OBJECT(dev), pio_start, opaque, name);
+ portio_list_add(piolist, isabus->address_space_io, start);
}
static void isa_device_init(Object *obj)
diff --git a/hw/isa/pc87312.c b/hw/isa/pc87312.c
index c3ebf3e7a0..b1c1a0acb1 100644
--- a/hw/isa/pc87312.c
+++ b/hw/isa/pc87312.c
@@ -283,7 +283,7 @@ static void pc87312_realize(DeviceState *dev, Error **errp)
/* FIXME use a qdev chardev prop instead of parallel_hds[] */
chr = parallel_hds[0];
if (chr == NULL) {
- chr = qemu_chr_new("par0", "null", NULL);
+ chr = qemu_chr_new("par0", "null");
}
isa = isa_create(bus, "isa-parallel");
d = DEVICE(isa);
@@ -303,7 +303,7 @@ static void pc87312_realize(DeviceState *dev, Error **errp)
chr = serial_hds[i];
if (chr == NULL) {
snprintf(name, sizeof(name), "ser%d", i);
- chr = qemu_chr_new(name, "null", NULL);
+ chr = qemu_chr_new(name, "null");
}
isa = isa_create(bus, "isa-serial");
d = DEVICE(isa);
diff --git a/hw/lm32/lm32_hwsetup.h b/hw/lm32/lm32_hwsetup.h
index b71e6eafba..23e18784df 100644
--- a/hw/lm32/lm32_hwsetup.h
+++ b/hw/lm32/lm32_hwsetup.h
@@ -75,7 +75,7 @@ static inline void hwsetup_create_rom(HWSetup *hw,
hwaddr base)
{
rom_add_blob("hwsetup", hw->data, TARGET_PAGE_SIZE,
- TARGET_PAGE_SIZE, base, NULL, NULL, NULL);
+ TARGET_PAGE_SIZE, base, NULL, NULL, NULL, NULL);
}
static inline void hwsetup_add_u8(HWSetup *hw, uint8_t u)
diff --git a/hw/m68k/mcf5206.c b/hw/m68k/mcf5206.c
index e14896e529..b81901fdfd 100644
--- a/hw/m68k/mcf5206.c
+++ b/hw/m68k/mcf5206.c
@@ -139,7 +139,7 @@ static m5206_timer_state *m5206_timer_init(qemu_irq irq)
s = (m5206_timer_state *)g_malloc0(sizeof(m5206_timer_state));
bh = qemu_bh_new(m5206_timer_trigger, s);
- s->timer = ptimer_init(bh);
+ s->timer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
s->irq = irq;
m5206_timer_reset(s);
return s;
diff --git a/hw/m68k/mcf5208.c b/hw/m68k/mcf5208.c
index 24155574f2..3438314c35 100644
--- a/hw/m68k/mcf5208.c
+++ b/hw/m68k/mcf5208.c
@@ -21,7 +21,7 @@
#include "elf.h"
#include "exec/address-spaces.h"
-#define SYS_FREQ 66000000
+#define SYS_FREQ 166666666
#define PCSR_EN 0x0001
#define PCSR_RLD 0x0002
@@ -183,7 +183,7 @@ static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic)
for (i = 0; i < 2; i++) {
s = (m5208_timer_state *)g_malloc0(sizeof(m5208_timer_state));
bh = qemu_bh_new(m5208_timer_trigger, s);
- s->timer = ptimer_init(bh);
+ s->timer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
memory_region_init_io(&s->iomem, NULL, &m5208_timer_ops, s,
"m5208-timer", 0x00004000);
memory_region_add_subregion(address_space, 0xfc080000 + 0x4000 * i,
diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c
index 7895805a23..db896b0bb6 100644
--- a/hw/mem/nvdimm.c
+++ b/hw/mem/nvdimm.c
@@ -148,13 +148,9 @@ static MemoryRegion *nvdimm_get_vmstate_memory_region(PCDIMMDevice *dimm)
static void nvdimm_class_init(ObjectClass *oc, void *data)
{
- DeviceClass *dc = DEVICE_CLASS(oc);
PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc);
NVDIMMClass *nvc = NVDIMM_CLASS(oc);
- /* nvdimm hotplug has not been supported yet. */
- dc->hotpluggable = false;
-
ddc->realize = nvdimm_realize;
ddc->get_memory_region = nvdimm_get_memory_region;
ddc->get_vmstate_memory_region = nvdimm_get_vmstate_memory_region;
diff --git a/hw/mem/trace-events b/hw/mem/trace-events
new file mode 100644
index 0000000000..323c3c10d5
--- /dev/null
+++ b/hw/mem/trace-events
@@ -0,0 +1,5 @@
+# See docs/trace-events.txt for syntax documentation.
+
+# hw/mem/pc-dimm.c
+mhp_pc_dimm_assigned_slot(int slot) "%d"
+mhp_pc_dimm_assigned_address(uint64_t addr) "0x%"PRIx64
diff --git a/hw/microblaze/boot.c b/hw/microblaze/boot.c
index 9eebb1a521..1834d22a61 100644
--- a/hw/microblaze/boot.c
+++ b/hw/microblaze/boot.c
@@ -30,7 +30,6 @@
#include "qemu/option.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
-#include "qemu-common.h"
#include "sysemu/device_tree.h"
#include "sysemu/sysemu.h"
#include "hw/loader.h"
diff --git a/hw/mips/mips_fulong2e.c b/hw/mips/mips_fulong2e.c
index 889cdc7ca7..9a4dae42d9 100644
--- a/hw/mips/mips_fulong2e.c
+++ b/hw/mips/mips_fulong2e.c
@@ -374,7 +374,7 @@ static void mips_fulong2e_init(MachineState *machine)
rtc_init(isa_bus, 2000, NULL);
- serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS);
+ serial_hds_isa_init(isa_bus, 0, MAX_SERIAL_PORTS);
parallel_hds_isa_init(isa_bus, 1);
/* Sound card */
diff --git a/hw/mips/mips_malta.c b/hw/mips/mips_malta.c
index e90857ee0b..cf48f420cc 100644
--- a/hw/mips/mips_malta.c
+++ b/hw/mips/mips_malta.c
@@ -47,7 +47,6 @@
#include "elf.h"
#include "hw/timer/mc146818rtc.h"
#include "hw/timer/i8254.h"
-#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
#include "exec/address-spaces.h"
#include "hw/sysbus.h" /* SysBusDevice */
@@ -85,9 +84,10 @@ typedef struct {
uint32_t i2coe;
uint32_t i2cout;
uint32_t i2csel;
- CharDriverState *display;
+ CharBackend display;
char display_text[9];
SerialState *uart;
+ bool display_inited;
} MaltaFPGAState;
#define TYPE_MIPS_MALTA "mips-malta"
@@ -124,8 +124,10 @@ static void malta_fpga_update_display(void *opaque)
}
leds_text[8] = '\0';
- qemu_chr_fe_printf(s->display, "\e[H\n\n|\e[32m%-8.8s\e[00m|\r\n", leds_text);
- qemu_chr_fe_printf(s->display, "\n\n\n\n|\e[31m%-8.8s\e[00m|", s->display_text);
+ qemu_chr_fe_printf(&s->display, "\e[H\n\n|\e[32m%-8.8s\e[00m|\r\n",
+ leds_text);
+ qemu_chr_fe_printf(&s->display, "\n\n\n\n|\e[31m%-8.8s\e[00m|",
+ s->display_text);
}
/*
@@ -530,23 +532,29 @@ static void malta_fpga_reset(void *opaque)
snprintf(s->display_text, 9, " ");
}
-static void malta_fpga_led_init(CharDriverState *chr)
+static void malta_fgpa_display_event(void *opaque, int event)
{
- qemu_chr_fe_printf(chr, "\e[HMalta LEDBAR\r\n");
- qemu_chr_fe_printf(chr, "+--------+\r\n");
- qemu_chr_fe_printf(chr, "+ +\r\n");
- qemu_chr_fe_printf(chr, "+--------+\r\n");
- qemu_chr_fe_printf(chr, "\n");
- qemu_chr_fe_printf(chr, "Malta ASCII\r\n");
- qemu_chr_fe_printf(chr, "+--------+\r\n");
- qemu_chr_fe_printf(chr, "+ +\r\n");
- qemu_chr_fe_printf(chr, "+--------+\r\n");
+ MaltaFPGAState *s = opaque;
+
+ if (event == CHR_EVENT_OPENED && !s->display_inited) {
+ qemu_chr_fe_printf(&s->display, "\e[HMalta LEDBAR\r\n");
+ qemu_chr_fe_printf(&s->display, "+--------+\r\n");
+ qemu_chr_fe_printf(&s->display, "+ +\r\n");
+ qemu_chr_fe_printf(&s->display, "+--------+\r\n");
+ qemu_chr_fe_printf(&s->display, "\n");
+ qemu_chr_fe_printf(&s->display, "Malta ASCII\r\n");
+ qemu_chr_fe_printf(&s->display, "+--------+\r\n");
+ qemu_chr_fe_printf(&s->display, "+ +\r\n");
+ qemu_chr_fe_printf(&s->display, "+--------+\r\n");
+ s->display_inited = true;
+ }
}
static MaltaFPGAState *malta_fpga_init(MemoryRegion *address_space,
hwaddr base, qemu_irq uart_irq, CharDriverState *uart_chr)
{
MaltaFPGAState *s;
+ CharDriverState *chr;
s = (MaltaFPGAState *)g_malloc0(sizeof(MaltaFPGAState));
@@ -560,7 +568,10 @@ static MaltaFPGAState *malta_fpga_init(MemoryRegion *address_space,
memory_region_add_subregion(address_space, base, &s->iomem_lo);
memory_region_add_subregion(address_space, base + 0xa00, &s->iomem_hi);
- s->display = qemu_chr_new("fpga", "vc:320x200", malta_fpga_led_init);
+ chr = qemu_chr_new("fpga", "vc:320x200");
+ qemu_chr_fe_init(&s->display, chr, NULL);
+ qemu_chr_fe_set_handlers(&s->display, NULL, NULL,
+ malta_fgpa_display_event, s, NULL, true);
s->uart = serial_mm_init(address_space, base + 0x900, 3, uart_irq,
230400, uart_chr, DEVICE_NATIVE_ENDIAN);
@@ -1025,7 +1036,7 @@ void mips_malta_init(MachineState *machine)
if (!serial_hds[i]) {
char label[32];
snprintf(label, sizeof(label), "serial%d", i);
- serial_hds[i] = qemu_chr_new(label, "null", NULL);
+ serial_hds[i] = qemu_chr_new(label, "null");
}
}
@@ -1215,7 +1226,7 @@ void mips_malta_init(MachineState *machine)
isa_create_simple(isa_bus, "i8042");
rtc_init(isa_bus, 2000, NULL);
- serial_hds_isa_init(isa_bus, 2);
+ serial_hds_isa_init(isa_bus, 0, 2);
parallel_hds_isa_init(isa_bus, 1);
for(i = 0; i < MAX_FD; i++) {
diff --git a/hw/mips/mips_r4k.c b/hw/mips/mips_r4k.c
index 16a59c779c..27548c43b6 100644
--- a/hw/mips/mips_r4k.c
+++ b/hw/mips/mips_r4k.c
@@ -286,7 +286,7 @@ void mips_r4k_init(MachineState *machine)
pit = pit_init(isa_bus, 0x40, 0, NULL);
- serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS);
+ serial_hds_isa_init(isa_bus, 0, MAX_SERIAL_PORTS);
isa_vga_init(isa_bus);
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
index 4cfbd1024a..1a89615a62 100644
--- a/hw/misc/Makefile.objs
+++ b/hw/misc/Makefile.objs
@@ -52,4 +52,4 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_EDU) += edu.o
obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o
obj-$(CONFIG_AUX) += auxbus.o
-obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o
+obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o
diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c
index c7e2c8263f..b1f3e6f6b8 100644
--- a/hw/misc/aspeed_scu.c
+++ b/hw/misc/aspeed_scu.c
@@ -120,6 +120,41 @@ static const uint32_t ast2400_a0_resets[ASPEED_SCU_NR_REGS] = {
[BMC_DEV_ID] = 0x00002402U
};
+/* SCU70 bit 23: 0 24Mhz. bit 11:9: 0b001 AXI:ABH ratio 2:1 */
+/* AST2500 revision A1 */
+
+static const uint32_t ast2500_a1_resets[ASPEED_SCU_NR_REGS] = {
+ [SYS_RST_CTRL] = 0xFFCFFEDCU,
+ [CLK_SEL] = 0xF3F40000U,
+ [CLK_STOP_CTRL] = 0x19FC3E8BU,
+ [D2PLL_PARAM] = 0x00026108U,
+ [MPLL_PARAM] = 0x00030291U,
+ [HPLL_PARAM] = 0x93000400U,
+ [MISC_CTRL1] = 0x00000010U,
+ [PCI_CTRL1] = 0x20001A03U,
+ [PCI_CTRL2] = 0x20001A03U,
+ [PCI_CTRL3] = 0x04000030U,
+ [SYS_RST_STATUS] = 0x00000001U,
+ [SOC_SCRATCH1] = 0x000000C0U, /* SoC completed DRAM init */
+ [MISC_CTRL2] = 0x00000023U,
+ [RNG_CTRL] = 0x0000000EU,
+ [PINMUX_CTRL2] = 0x0000F000U,
+ [PINMUX_CTRL3] = 0x03000000U,
+ [PINMUX_CTRL4] = 0x00000000U,
+ [PINMUX_CTRL5] = 0x0000A000U,
+ [WDT_RST_CTRL] = 0x023FFFF3U,
+ [PINMUX_CTRL8] = 0xFFFF0000U,
+ [PINMUX_CTRL9] = 0x000FFFFFU,
+ [FREE_CNTR4] = 0x000000FFU,
+ [FREE_CNTR4_EXT] = 0x000000FFU,
+ [CPU2_BASE_SEG1] = 0x80000000U,
+ [CPU2_BASE_SEG4] = 0x1E600000U,
+ [CPU2_BASE_SEG5] = 0xC0000000U,
+ [UART_HPLL_CLK] = 0x00001903U,
+ [PCIE_CTRL] = 0x0000007BU,
+ [BMC_DEV_ID] = 0x00002402U
+};
+
static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size)
{
AspeedSCUState *s = ASPEED_SCU(opaque);
@@ -198,6 +233,10 @@ static void aspeed_scu_reset(DeviceState *dev)
case AST2400_A0_SILICON_REV:
reset = ast2400_a0_resets;
break;
+ case AST2500_A0_SILICON_REV:
+ case AST2500_A1_SILICON_REV:
+ reset = ast2500_a1_resets;
+ break;
default:
g_assert_not_reached();
}
@@ -208,7 +247,11 @@ static void aspeed_scu_reset(DeviceState *dev)
s->regs[HW_STRAP2] = s->hw_strap2;
}
-static uint32_t aspeed_silicon_revs[] = { AST2400_A0_SILICON_REV, };
+static uint32_t aspeed_silicon_revs[] = {
+ AST2400_A0_SILICON_REV,
+ AST2500_A0_SILICON_REV,
+ AST2500_A1_SILICON_REV,
+};
bool is_supported_silicon_rev(uint32_t silicon_rev)
{
diff --git a/hw/misc/aspeed_sdmc.c b/hw/misc/aspeed_sdmc.c
new file mode 100644
index 0000000000..8830dc084c
--- /dev/null
+++ b/hw/misc/aspeed_sdmc.c
@@ -0,0 +1,280 @@
+/*
+ * ASPEED SDRAM Memory Controller
+ *
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "hw/misc/aspeed_sdmc.h"
+#include "hw/misc/aspeed_scu.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+#include "trace.h"
+
+/* Protection Key Register */
+#define R_PROT (0x00 / 4)
+#define PROT_KEY_UNLOCK 0xFC600309
+
+/* Configuration Register */
+#define R_CONF (0x04 / 4)
+
+/*
+ * Configuration register Ox4 (for Aspeed AST2400 SOC)
+ *
+ * These are for the record and future use. ASPEED_SDMC_DRAM_SIZE is
+ * what we care about right now as it is checked by U-Boot to
+ * determine the RAM size.
+ */
+
+#define ASPEED_SDMC_RESERVED 0xFFFFF800 /* 31:11 reserved */
+#define ASPEED_SDMC_AST2300_COMPAT (1 << 10)
+#define ASPEED_SDMC_SCRAMBLE_PATTERN (1 << 9)
+#define ASPEED_SDMC_DATA_SCRAMBLE (1 << 8)
+#define ASPEED_SDMC_ECC_ENABLE (1 << 7)
+#define ASPEED_SDMC_VGA_COMPAT (1 << 6) /* readonly */
+#define ASPEED_SDMC_DRAM_BANK (1 << 5)
+#define ASPEED_SDMC_DRAM_BURST (1 << 4)
+#define ASPEED_SDMC_VGA_APERTURE(x) ((x & 0x3) << 2) /* readonly */
+#define ASPEED_SDMC_VGA_8MB 0x0
+#define ASPEED_SDMC_VGA_16MB 0x1
+#define ASPEED_SDMC_VGA_32MB 0x2
+#define ASPEED_SDMC_VGA_64MB 0x3
+#define ASPEED_SDMC_DRAM_SIZE(x) (x & 0x3)
+#define ASPEED_SDMC_DRAM_64MB 0x0
+#define ASPEED_SDMC_DRAM_128MB 0x1
+#define ASPEED_SDMC_DRAM_256MB 0x2
+#define ASPEED_SDMC_DRAM_512MB 0x3
+
+#define ASPEED_SDMC_READONLY_MASK \
+ (ASPEED_SDMC_RESERVED | ASPEED_SDMC_VGA_COMPAT | \
+ ASPEED_SDMC_VGA_APERTURE(ASPEED_SDMC_VGA_64MB))
+/*
+ * Configuration register Ox4 (for Aspeed AST2500 SOC and higher)
+ *
+ * Incompatibilities are annotated in the list. ASPEED_SDMC_HW_VERSION
+ * should be set to 1 for the AST2500 SOC.
+ */
+#define ASPEED_SDMC_HW_VERSION(x) ((x & 0xf) << 28) /* readonly */
+#define ASPEED_SDMC_SW_VERSION ((x & 0xff) << 20)
+#define ASPEED_SDMC_CACHE_INITIAL_DONE (1 << 19) /* readonly */
+#define ASPEED_SDMC_AST2500_RESERVED 0x7C000 /* 18:14 reserved */
+#define ASPEED_SDMC_CACHE_DDR4_CONF (1 << 13)
+#define ASPEED_SDMC_CACHE_INITIAL (1 << 12)
+#define ASPEED_SDMC_CACHE_RANGE_CTRL (1 << 11)
+#define ASPEED_SDMC_CACHE_ENABLE (1 << 10) /* differs from AST2400 */
+#define ASPEED_SDMC_DRAM_TYPE (1 << 4) /* differs from AST2400 */
+
+/* DRAM size definitions differs */
+#define ASPEED_SDMC_AST2500_128MB 0x0
+#define ASPEED_SDMC_AST2500_256MB 0x1
+#define ASPEED_SDMC_AST2500_512MB 0x2
+#define ASPEED_SDMC_AST2500_1024MB 0x3
+
+#define ASPEED_SDMC_AST2500_READONLY_MASK \
+ (ASPEED_SDMC_HW_VERSION(0xf) | ASPEED_SDMC_CACHE_INITIAL_DONE | \
+ ASPEED_SDMC_AST2500_RESERVED | ASPEED_SDMC_VGA_COMPAT | \
+ ASPEED_SDMC_VGA_APERTURE(ASPEED_SDMC_VGA_64MB))
+
+static uint64_t aspeed_sdmc_read(void *opaque, hwaddr addr, unsigned size)
+{
+ AspeedSDMCState *s = ASPEED_SDMC(opaque);
+
+ addr >>= 2;
+
+ if (addr >= ARRAY_SIZE(s->regs)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ return 0;
+ }
+
+ return s->regs[addr];
+}
+
+static void aspeed_sdmc_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
+{
+ AspeedSDMCState *s = ASPEED_SDMC(opaque);
+
+ addr >>= 2;
+
+ if (addr >= ARRAY_SIZE(s->regs)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ return;
+ }
+
+ if (addr != R_PROT && s->regs[R_PROT] != PROT_KEY_UNLOCK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: SDMC is locked!\n", __func__);
+ return;
+ }
+
+ if (addr == R_CONF) {
+ /* Make sure readonly bits are kept */
+ switch (s->silicon_rev) {
+ case AST2400_A0_SILICON_REV:
+ data &= ~ASPEED_SDMC_READONLY_MASK;
+ break;
+ case AST2500_A0_SILICON_REV:
+ data &= ~ASPEED_SDMC_AST2500_READONLY_MASK;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ s->regs[addr] = data;
+}
+
+static const MemoryRegionOps aspeed_sdmc_ops = {
+ .read = aspeed_sdmc_read,
+ .write = aspeed_sdmc_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+};
+
+static int ast2400_rambits(AspeedSDMCState *s)
+{
+ switch (s->ram_size >> 20) {
+ case 64:
+ return ASPEED_SDMC_DRAM_64MB;
+ case 128:
+ return ASPEED_SDMC_DRAM_128MB;
+ case 256:
+ return ASPEED_SDMC_DRAM_256MB;
+ case 512:
+ return ASPEED_SDMC_DRAM_512MB;
+ default:
+ break;
+ }
+
+ /* use a common default */
+ error_report("warning: Invalid RAM size 0x%" PRIx64
+ ". Using default 256M", s->ram_size);
+ s->ram_size = 256 << 20;
+ return ASPEED_SDMC_DRAM_256MB;
+}
+
+static int ast2500_rambits(AspeedSDMCState *s)
+{
+ switch (s->ram_size >> 20) {
+ case 128:
+ return ASPEED_SDMC_AST2500_128MB;
+ case 256:
+ return ASPEED_SDMC_AST2500_256MB;
+ case 512:
+ return ASPEED_SDMC_AST2500_512MB;
+ case 1024:
+ return ASPEED_SDMC_AST2500_1024MB;
+ default:
+ break;
+ }
+
+ /* use a common default */
+ error_report("warning: Invalid RAM size 0x%" PRIx64
+ ". Using default 512M", s->ram_size);
+ s->ram_size = 512 << 20;
+ return ASPEED_SDMC_AST2500_512MB;
+}
+
+static void aspeed_sdmc_reset(DeviceState *dev)
+{
+ AspeedSDMCState *s = ASPEED_SDMC(dev);
+
+ memset(s->regs, 0, sizeof(s->regs));
+
+ /* Set ram size bit and defaults values */
+ switch (s->silicon_rev) {
+ case AST2400_A0_SILICON_REV:
+ s->regs[R_CONF] |=
+ ASPEED_SDMC_VGA_COMPAT |
+ ASPEED_SDMC_DRAM_SIZE(s->ram_bits);
+ break;
+
+ case AST2500_A0_SILICON_REV:
+ case AST2500_A1_SILICON_REV:
+ s->regs[R_CONF] |=
+ ASPEED_SDMC_HW_VERSION(1) |
+ ASPEED_SDMC_VGA_APERTURE(ASPEED_SDMC_VGA_64MB) |
+ ASPEED_SDMC_DRAM_SIZE(s->ram_bits);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void aspeed_sdmc_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ AspeedSDMCState *s = ASPEED_SDMC(dev);
+
+ if (!is_supported_silicon_rev(s->silicon_rev)) {
+ error_setg(errp, "Unknown silicon revision: 0x%" PRIx32,
+ s->silicon_rev);
+ return;
+ }
+
+ switch (s->silicon_rev) {
+ case AST2400_A0_SILICON_REV:
+ s->ram_bits = ast2400_rambits(s);
+ break;
+ case AST2500_A0_SILICON_REV:
+ case AST2500_A1_SILICON_REV:
+ s->ram_bits = ast2500_rambits(s);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_sdmc_ops, s,
+ TYPE_ASPEED_SDMC, 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static const VMStateDescription vmstate_aspeed_sdmc = {
+ .name = "aspeed.sdmc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, AspeedSDMCState, ASPEED_SDMC_NR_REGS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property aspeed_sdmc_properties[] = {
+ DEFINE_PROP_UINT32("silicon-rev", AspeedSDMCState, silicon_rev, 0),
+ DEFINE_PROP_UINT64("ram-size", AspeedSDMCState, ram_size, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void aspeed_sdmc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->realize = aspeed_sdmc_realize;
+ dc->reset = aspeed_sdmc_reset;
+ dc->desc = "ASPEED SDRAM Memory Controller";
+ dc->vmsd = &vmstate_aspeed_sdmc;
+ dc->props = aspeed_sdmc_properties;
+}
+
+static const TypeInfo aspeed_sdmc_info = {
+ .name = TYPE_ASPEED_SDMC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AspeedSDMCState),
+ .class_init = aspeed_sdmc_class_init,
+};
+
+static void aspeed_sdmc_register_types(void)
+{
+ type_register_static(&aspeed_sdmc_info);
+}
+
+type_init(aspeed_sdmc_register_types);
diff --git a/hw/misc/edu.c b/hw/misc/edu.c
index 888ba49a0e..401039c100 100644
--- a/hw/misc/edu.c
+++ b/hw/misc/edu.c
@@ -24,6 +24,7 @@
#include "qemu/osdep.h"
#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
#include "qemu/timer.h"
#include "qemu/main-loop.h" /* iothread mutex */
#include "qapi/visitor.h"
@@ -69,11 +70,20 @@ typedef struct {
uint64_t dma_mask;
} EduState;
+static bool edu_msi_enabled(EduState *edu)
+{
+ return msi_enabled(&edu->pdev);
+}
+
static void edu_raise_irq(EduState *edu, uint32_t val)
{
edu->irq_status |= val;
if (edu->irq_status) {
- pci_set_irq(&edu->pdev, 1);
+ if (edu_msi_enabled(edu)) {
+ msi_notify(&edu->pdev, 0);
+ } else {
+ pci_set_irq(&edu->pdev, 1);
+ }
}
}
@@ -81,7 +91,7 @@ static void edu_lower_irq(EduState *edu, uint32_t val)
{
edu->irq_status &= ~val;
- if (!edu->irq_status) {
+ if (!edu->irq_status && !edu_msi_enabled(edu)) {
pci_set_irq(&edu->pdev, 0);
}
}
@@ -342,6 +352,10 @@ static void pci_edu_realize(PCIDevice *pdev, Error **errp)
pci_config_set_interrupt_pin(pci_conf, 1);
+ if (msi_init(pdev, 0, 1, true, false, errp)) {
+ return;
+ }
+
memory_region_init_io(&edu->mmio, OBJECT(edu), &edu_mmio_ops, edu,
"edu-mmio", 1 << 20);
pci_register_bar(pdev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &edu->mmio);
diff --git a/hw/misc/imx25_ccm.c b/hw/misc/imx25_ccm.c
index 5cd8c0a9a7..19e948a52d 100644
--- a/hw/misc/imx25_ccm.c
+++ b/hw/misc/imx25_ccm.c
@@ -27,7 +27,7 @@
} \
} while (0)
-static char const *imx25_ccm_reg_name(uint32_t reg)
+static const char *imx25_ccm_reg_name(uint32_t reg)
{
static char unknown[20];
diff --git a/hw/misc/imx31_ccm.c b/hw/misc/imx31_ccm.c
index 1c03e52c40..b890c383be 100644
--- a/hw/misc/imx31_ccm.c
+++ b/hw/misc/imx31_ccm.c
@@ -29,7 +29,7 @@
} \
} while (0)
-static char const *imx31_ccm_reg_name(uint32_t reg)
+static const char *imx31_ccm_reg_name(uint32_t reg)
{
static char unknown[20];
diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c
index 17e15d4c92..1b421013a3 100644
--- a/hw/misc/imx6_ccm.c
+++ b/hw/misc/imx6_ccm.c
@@ -26,7 +26,7 @@
} \
} while (0)
-static char const *imx6_ccm_reg_name(uint32_t reg)
+static const char *imx6_ccm_reg_name(uint32_t reg)
{
static char unknown[20];
@@ -99,7 +99,7 @@ static char const *imx6_ccm_reg_name(uint32_t reg)
}
}
-static char const *imx6_analog_reg_name(uint32_t reg)
+static const char *imx6_analog_reg_name(uint32_t reg)
{
static char unknown[20];
diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c
index 8bb6829575..55b817b8d7 100644
--- a/hw/misc/imx6_src.c
+++ b/hw/misc/imx6_src.c
@@ -27,7 +27,7 @@
} \
} while (0)
-static char const *imx6_src_reg_name(uint32_t reg)
+static const char *imx6_src_reg_name(uint32_t reg)
{
static char unknown[20];
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index 40a2ebca20..abeaf3da08 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -88,7 +88,7 @@ typedef struct IVShmemState {
/* exactly one of these two may be set */
HostMemoryBackend *hostmem; /* with interrupts */
- CharDriverState *server_chr; /* without interrupts */
+ CharBackend server_chr; /* without interrupts */
/* registers */
uint32_t intrmask;
@@ -627,8 +627,7 @@ static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
msg = le64_to_cpu(s->msg_buf);
s->msg_buffered_bytes = 0;
- fd = qemu_chr_fe_get_msgfd(s->server_chr);
- IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", msg, fd);
+ fd = qemu_chr_fe_get_msgfd(&s->server_chr);
process_msg(s, msg, fd, &err);
if (err) {
@@ -643,8 +642,8 @@ static int64_t ivshmem_recv_msg(IVShmemState *s, int *pfd, Error **errp)
n = 0;
do {
- ret = qemu_chr_fe_read_all(s->server_chr, (uint8_t *)&msg + n,
- sizeof(msg) - n);
+ ret = qemu_chr_fe_read_all(&s->server_chr, (uint8_t *)&msg + n,
+ sizeof(msg) - n);
if (ret < 0 && ret != -EINTR) {
error_setg_errno(errp, -ret, "read from server failed");
return INT64_MIN;
@@ -652,7 +651,7 @@ static int64_t ivshmem_recv_msg(IVShmemState *s, int *pfd, Error **errp)
n += ret;
} while (n < sizeof(msg));
- *pfd = qemu_chr_fe_get_msgfd(s->server_chr);
+ *pfd = qemu_chr_fe_get_msgfd(&s->server_chr);
return msg;
}
@@ -859,7 +858,7 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
&s->ivshmem_mmio);
- if (!s->not_legacy_32bit) {
+ if (s->not_legacy_32bit) {
attr |= PCI_BASE_ADDRESS_MEM_TYPE_64;
}
@@ -869,10 +868,11 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
s->ivshmem_bar2 = host_memory_backend_get_memory(s->hostmem,
&error_abort);
} else {
- assert(s->server_chr);
+ CharDriverState *chr = qemu_chr_fe_get_driver(&s->server_chr);
+ assert(chr);
IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n",
- s->server_chr->filename);
+ chr->filename);
/* we allocate enough space for 16 peers and grow as needed */
resize_peers(s, 16);
@@ -894,8 +894,8 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
return;
}
- qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive,
- ivshmem_read, NULL, s);
+ qemu_chr_fe_set_handlers(&s->server_chr, ivshmem_can_receive,
+ ivshmem_read, NULL, s, NULL, true);
if (ivshmem_setup_interrupts(s) < 0) {
error_setg(errp, "failed to initialize interrupts");
@@ -1045,6 +1045,7 @@ static void ivshmem_plain_init(Object *obj)
ivshmem_check_memdev_is_busy,
OBJ_PROP_LINK_UNREF_ON_RELEASE,
&error_abort);
+ s->not_legacy_32bit = 1;
}
static void ivshmem_plain_realize(PCIDevice *dev, Error **errp)
@@ -1116,13 +1117,14 @@ static void ivshmem_doorbell_init(Object *obj)
s->features |= (1 << IVSHMEM_MSI);
s->legacy_size = SIZE_MAX; /* whatever the server sends */
+ s->not_legacy_32bit = 1;
}
static void ivshmem_doorbell_realize(PCIDevice *dev, Error **errp)
{
IVShmemState *s = IVSHMEM_COMMON(dev);
- if (!s->server_chr) {
+ if (!qemu_chr_fe_get_driver(&s->server_chr)) {
error_setg(errp, "You must specify a 'chardev'");
return;
}
@@ -1251,7 +1253,7 @@ static void ivshmem_realize(PCIDevice *dev, Error **errp)
" or ivshmem-doorbell instead");
}
- if (!!s->server_chr + !!s->shmobj != 1) {
+ if (!!qemu_chr_fe_get_driver(&s->server_chr) + !!s->shmobj != 1) {
error_setg(errp, "You must specify either 'shm' or 'chardev'");
return;
}
diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c
index be03926b96..5d57f45dc6 100644
--- a/hw/misc/macio/macio.c
+++ b/hw/misc/macio/macio.c
@@ -89,22 +89,16 @@ static void macio_escc_legacy_setup(MacIOState *macio_state)
MemoryRegion *bar = &macio_state->bar;
int i;
static const int maps[] = {
- 0x00, 0x00,
- 0x02, 0x20,
- 0x04, 0x10,
- 0x06, 0x30,
- 0x08, 0x40,
- 0x0A, 0x50,
- 0x60, 0x60,
- 0x70, 0x70,
- 0x80, 0x70,
- 0x90, 0x80,
- 0xA0, 0x90,
- 0xB0, 0xA0,
- 0xC0, 0xB0,
- 0xD0, 0xC0,
- 0xE0, 0xD0,
- 0xF0, 0xE0,
+ 0x00, 0x00, /* Command B */
+ 0x02, 0x20, /* Command A */
+ 0x04, 0x10, /* Data B */
+ 0x06, 0x30, /* Data A */
+ 0x08, 0x40, /* Enhancement B */
+ 0x0A, 0x50, /* Enhancement A */
+ 0x80, 0x80, /* Recovery count */
+ 0x90, 0x90, /* Start A */
+ 0xa0, 0xa0, /* Start B */
+ 0xb0, 0xb0, /* Detect AB */
};
memory_region_init(escc_legacy, OBJECT(macio_state), "escc-legacy", 256);
diff --git a/hw/misc/milkymist-pfpu.c b/hw/misc/milkymist-pfpu.c
index 1da21a643e..3ca25894f1 100644
--- a/hw/misc/milkymist-pfpu.c
+++ b/hw/misc/milkymist-pfpu.c
@@ -137,7 +137,7 @@ struct MilkymistPFPUState {
};
typedef struct MilkymistPFPUState MilkymistPFPUState;
-static inline hwaddr
+static inline uint32_t
get_dma_address(uint32_t base, uint32_t x, uint32_t y)
{
return base + 8 * (128 * y + x);
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index db1b301e7f..7915732f74 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -26,6 +26,8 @@
#include <zlib.h> /* For crc32 */
#include "hw/net/cadence_gem.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
#include "net/checksum.h"
#ifdef CADENCE_GEM_ERR_DEBUG
@@ -141,6 +143,55 @@
#define GEM_DESCONF6 (0x00000294/4)
#define GEM_DESCONF7 (0x00000298/4)
+#define GEM_INT_Q1_STATUS (0x00000400 / 4)
+#define GEM_INT_Q1_MASK (0x00000640 / 4)
+
+#define GEM_TRANSMIT_Q1_PTR (0x00000440 / 4)
+#define GEM_TRANSMIT_Q7_PTR (GEM_TRANSMIT_Q1_PTR + 6)
+
+#define GEM_RECEIVE_Q1_PTR (0x00000480 / 4)
+#define GEM_RECEIVE_Q7_PTR (GEM_RECEIVE_Q1_PTR + 6)
+
+#define GEM_INT_Q1_ENABLE (0x00000600 / 4)
+#define GEM_INT_Q7_ENABLE (GEM_INT_Q1_ENABLE + 6)
+
+#define GEM_INT_Q1_DISABLE (0x00000620 / 4)
+#define GEM_INT_Q7_DISABLE (GEM_INT_Q1_DISABLE + 6)
+
+#define GEM_INT_Q1_MASK (0x00000640 / 4)
+#define GEM_INT_Q7_MASK (GEM_INT_Q1_MASK + 6)
+
+#define GEM_SCREENING_TYPE1_REGISTER_0 (0x00000500 / 4)
+
+#define GEM_ST1R_UDP_PORT_MATCH_ENABLE (1 << 29)
+#define GEM_ST1R_DSTC_ENABLE (1 << 28)
+#define GEM_ST1R_UDP_PORT_MATCH_SHIFT (12)
+#define GEM_ST1R_UDP_PORT_MATCH_WIDTH (27 - GEM_ST1R_UDP_PORT_MATCH_SHIFT + 1)
+#define GEM_ST1R_DSTC_MATCH_SHIFT (4)
+#define GEM_ST1R_DSTC_MATCH_WIDTH (11 - GEM_ST1R_DSTC_MATCH_SHIFT + 1)
+#define GEM_ST1R_QUEUE_SHIFT (0)
+#define GEM_ST1R_QUEUE_WIDTH (3 - GEM_ST1R_QUEUE_SHIFT + 1)
+
+#define GEM_SCREENING_TYPE2_REGISTER_0 (0x00000540 / 4)
+
+#define GEM_ST2R_COMPARE_A_ENABLE (1 << 18)
+#define GEM_ST2R_COMPARE_A_SHIFT (13)
+#define GEM_ST2R_COMPARE_WIDTH (17 - GEM_ST2R_COMPARE_A_SHIFT + 1)
+#define GEM_ST2R_ETHERTYPE_ENABLE (1 << 12)
+#define GEM_ST2R_ETHERTYPE_INDEX_SHIFT (9)
+#define GEM_ST2R_ETHERTYPE_INDEX_WIDTH (11 - GEM_ST2R_ETHERTYPE_INDEX_SHIFT \
+ + 1)
+#define GEM_ST2R_QUEUE_SHIFT (0)
+#define GEM_ST2R_QUEUE_WIDTH (3 - GEM_ST2R_QUEUE_SHIFT + 1)
+
+#define GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 (0x000006e0 / 4)
+#define GEM_TYPE2_COMPARE_0_WORD_0 (0x00000700 / 4)
+
+#define GEM_T2CW1_COMPARE_OFFSET_SHIFT (7)
+#define GEM_T2CW1_COMPARE_OFFSET_WIDTH (8 - GEM_T2CW1_COMPARE_OFFSET_SHIFT + 1)
+#define GEM_T2CW1_OFFSET_VALUE_SHIFT (0)
+#define GEM_T2CW1_OFFSET_VALUE_WIDTH (6 - GEM_T2CW1_OFFSET_VALUE_SHIFT + 1)
+
/*****************************************/
#define GEM_NWCTRL_TXSTART 0x00000200 /* Transmit Enable */
#define GEM_NWCTRL_TXENA 0x00000008 /* Transmit Enable */
@@ -284,9 +335,9 @@ static inline unsigned tx_desc_get_length(unsigned *desc)
return desc[1] & DESC_1_LENGTH;
}
-static inline void print_gem_tx_desc(unsigned *desc)
+static inline void print_gem_tx_desc(unsigned *desc, uint8_t queue)
{
- DB_PRINT("TXDESC:\n");
+ DB_PRINT("TXDESC (queue %" PRId8 "):\n", queue);
DB_PRINT("bufaddr: 0x%08x\n", *desc);
DB_PRINT("used_hw: %d\n", tx_desc_get_used(desc));
DB_PRINT("wrap: %d\n", tx_desc_get_wrap(desc));
@@ -416,6 +467,7 @@ static void phy_update_link(CadenceGEMState *s)
static int gem_can_receive(NetClientState *nc)
{
CadenceGEMState *s;
+ int i;
s = qemu_get_nic_opaque(nc);
@@ -428,18 +480,20 @@ static int gem_can_receive(NetClientState *nc)
return 0;
}
- if (rx_desc_get_ownership(s->rx_desc) == 1) {
- if (s->can_rx_state != 2) {
- s->can_rx_state = 2;
- DB_PRINT("can't receive - busy buffer descriptor 0x%x\n",
- s->rx_desc_addr);
+ for (i = 0; i < s->num_priority_queues; i++) {
+ if (rx_desc_get_ownership(s->rx_desc[i]) == 1) {
+ if (s->can_rx_state != 2) {
+ s->can_rx_state = 2;
+ DB_PRINT("can't receive - busy buffer descriptor (q%d) 0x%x\n",
+ i, s->rx_desc_addr[i]);
+ }
+ return 0;
}
- return 0;
}
if (s->can_rx_state != 0) {
s->can_rx_state = 0;
- DB_PRINT("can receive 0x%x\n", s->rx_desc_addr);
+ DB_PRINT("can receive\n");
}
return 1;
}
@@ -450,9 +504,20 @@ static int gem_can_receive(NetClientState *nc)
*/
static void gem_update_int_status(CadenceGEMState *s)
{
- if (s->regs[GEM_ISR]) {
- DB_PRINT("asserting int. (0x%08x)\n", s->regs[GEM_ISR]);
- qemu_set_irq(s->irq, 1);
+ int i;
+
+ if ((s->num_priority_queues == 1) && s->regs[GEM_ISR]) {
+ /* No priority queues, just trigger the interrupt */
+ DB_PRINT("asserting int.\n", i);
+ qemu_set_irq(s->irq[0], 1);
+ return;
+ }
+
+ for (i = 0; i < s->num_priority_queues; ++i) {
+ if (s->regs[GEM_INT_Q1_STATUS + i]) {
+ DB_PRINT("asserting int. (q=%d)\n", i);
+ qemu_set_irq(s->irq[i], 1);
+ }
}
}
@@ -601,17 +666,137 @@ static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet)
return GEM_RX_REJECT;
}
-static void gem_get_rx_desc(CadenceGEMState *s)
+/* Figure out which queue the received data should be sent to */
+static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr,
+ unsigned rxbufsize)
{
- DB_PRINT("read descriptor 0x%x\n", (unsigned)s->rx_desc_addr);
+ uint32_t reg;
+ bool matched, mismatched;
+ int i, j;
+
+ for (i = 0; i < s->num_type1_screeners; i++) {
+ reg = s->regs[GEM_SCREENING_TYPE1_REGISTER_0 + i];
+ matched = false;
+ mismatched = false;
+
+ /* Screening is based on UDP Port */
+ if (reg & GEM_ST1R_UDP_PORT_MATCH_ENABLE) {
+ uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23];
+ if (udp_port == extract32(reg, GEM_ST1R_UDP_PORT_MATCH_SHIFT,
+ GEM_ST1R_UDP_PORT_MATCH_WIDTH)) {
+ matched = true;
+ } else {
+ mismatched = true;
+ }
+ }
+
+ /* Screening is based on DS/TC */
+ if (reg & GEM_ST1R_DSTC_ENABLE) {
+ uint8_t dscp = rxbuf_ptr[14 + 1];
+ if (dscp == extract32(reg, GEM_ST1R_DSTC_MATCH_SHIFT,
+ GEM_ST1R_DSTC_MATCH_WIDTH)) {
+ matched = true;
+ } else {
+ mismatched = true;
+ }
+ }
+
+ if (matched && !mismatched) {
+ return extract32(reg, GEM_ST1R_QUEUE_SHIFT, GEM_ST1R_QUEUE_WIDTH);
+ }
+ }
+
+ for (i = 0; i < s->num_type2_screeners; i++) {
+ reg = s->regs[GEM_SCREENING_TYPE2_REGISTER_0 + i];
+ matched = false;
+ mismatched = false;
+
+ if (reg & GEM_ST2R_ETHERTYPE_ENABLE) {
+ uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13];
+ int et_idx = extract32(reg, GEM_ST2R_ETHERTYPE_INDEX_SHIFT,
+ GEM_ST2R_ETHERTYPE_INDEX_WIDTH);
+
+ if (et_idx > s->num_type2_screeners) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype "
+ "register index: %d\n", et_idx);
+ }
+ if (type == s->regs[GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 +
+ et_idx]) {
+ matched = true;
+ } else {
+ mismatched = true;
+ }
+ }
+
+ /* Compare A, B, C */
+ for (j = 0; j < 3; j++) {
+ uint32_t cr0, cr1, mask;
+ uint16_t rx_cmp;
+ int offset;
+ int cr_idx = extract32(reg, GEM_ST2R_COMPARE_A_SHIFT + j * 6,
+ GEM_ST2R_COMPARE_WIDTH);
+
+ if (!(reg & (GEM_ST2R_COMPARE_A_ENABLE << (j * 6)))) {
+ continue;
+ }
+ if (cr_idx > s->num_type2_screeners) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare "
+ "register index: %d\n", cr_idx);
+ }
+
+ cr0 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2];
+ cr1 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2 + 1];
+ offset = extract32(cr1, GEM_T2CW1_OFFSET_VALUE_SHIFT,
+ GEM_T2CW1_OFFSET_VALUE_WIDTH);
+
+ switch (extract32(cr1, GEM_T2CW1_COMPARE_OFFSET_SHIFT,
+ GEM_T2CW1_COMPARE_OFFSET_WIDTH)) {
+ case 3: /* Skip UDP header */
+ qemu_log_mask(LOG_UNIMP, "TCP compare offsets"
+ "unimplemented - assuming UDP\n");
+ offset += 8;
+ /* Fallthrough */
+ case 2: /* skip the IP header */
+ offset += 20;
+ /* Fallthrough */
+ case 1: /* Count from after the ethertype */
+ offset += 14;
+ break;
+ case 0:
+ /* Offset from start of frame */
+ break;
+ }
+
+ rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset];
+ mask = extract32(cr0, 0, 16);
+
+ if ((rx_cmp & mask) == (extract32(cr0, 16, 16) & mask)) {
+ matched = true;
+ } else {
+ mismatched = true;
+ }
+ }
+
+ if (matched && !mismatched) {
+ return extract32(reg, GEM_ST2R_QUEUE_SHIFT, GEM_ST2R_QUEUE_WIDTH);
+ }
+ }
+
+ /* We made it here, assume it's queue 0 */
+ return 0;
+}
+
+static void gem_get_rx_desc(CadenceGEMState *s, int q)
+{
+ DB_PRINT("read descriptor 0x%x\n", (unsigned)s->rx_desc_addr[q]);
/* read current descriptor */
- cpu_physical_memory_read(s->rx_desc_addr,
- (uint8_t *)s->rx_desc, sizeof(s->rx_desc));
+ cpu_physical_memory_read(s->rx_desc_addr[0],
+ (uint8_t *)s->rx_desc[0], sizeof(s->rx_desc[0]));
/* Descriptor owned by software ? */
- if (rx_desc_get_ownership(s->rx_desc) == 1) {
+ if (rx_desc_get_ownership(s->rx_desc[q]) == 1) {
DB_PRINT("descriptor 0x%x owned by sw.\n",
- (unsigned)s->rx_desc_addr);
+ (unsigned)s->rx_desc_addr[q]);
s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF;
s->regs[GEM_ISR] |= GEM_INT_RXUSED & ~(s->regs[GEM_IMR]);
/* Handle interrupt consequences */
@@ -632,6 +817,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
uint8_t *rxbuf_ptr;
bool first_desc = true;
int maf;
+ int q = 0;
s = qemu_get_nic_opaque(nc);
@@ -710,6 +896,9 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
DB_PRINT("config bufsize: %d packet size: %ld\n", rxbufsize, size);
+ /* Find which queue we are targetting */
+ q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize);
+
while (bytes_to_copy) {
/* Do nothing if receive is not enabled. */
if (!gem_can_receive(nc)) {
@@ -718,56 +907,59 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
}
DB_PRINT("copy %d bytes to 0x%x\n", MIN(bytes_to_copy, rxbufsize),
- rx_desc_get_buffer(s->rx_desc));
+ rx_desc_get_buffer(s->rx_desc[q]));
/* Copy packet data to emulated DMA buffer */
- cpu_physical_memory_write(rx_desc_get_buffer(s->rx_desc) + rxbuf_offset,
+ cpu_physical_memory_write(rx_desc_get_buffer(s->rx_desc[q]) +
+ rxbuf_offset,
rxbuf_ptr, MIN(bytes_to_copy, rxbufsize));
rxbuf_ptr += MIN(bytes_to_copy, rxbufsize);
bytes_to_copy -= MIN(bytes_to_copy, rxbufsize);
/* Update the descriptor. */
if (first_desc) {
- rx_desc_set_sof(s->rx_desc);
+ rx_desc_set_sof(s->rx_desc[q]);
first_desc = false;
}
if (bytes_to_copy == 0) {
- rx_desc_set_eof(s->rx_desc);
- rx_desc_set_length(s->rx_desc, size);
+ rx_desc_set_eof(s->rx_desc[q]);
+ rx_desc_set_length(s->rx_desc[q], size);
}
- rx_desc_set_ownership(s->rx_desc);
+ rx_desc_set_ownership(s->rx_desc[q]);
switch (maf) {
case GEM_RX_PROMISCUOUS_ACCEPT:
break;
case GEM_RX_BROADCAST_ACCEPT:
- rx_desc_set_broadcast(s->rx_desc);
+ rx_desc_set_broadcast(s->rx_desc[q]);
break;
case GEM_RX_UNICAST_HASH_ACCEPT:
- rx_desc_set_unicast_hash(s->rx_desc);
+ rx_desc_set_unicast_hash(s->rx_desc[q]);
break;
case GEM_RX_MULTICAST_HASH_ACCEPT:
- rx_desc_set_multicast_hash(s->rx_desc);
+ rx_desc_set_multicast_hash(s->rx_desc[q]);
break;
case GEM_RX_REJECT:
abort();
default: /* SAR */
- rx_desc_set_sar(s->rx_desc, maf);
+ rx_desc_set_sar(s->rx_desc[q], maf);
}
/* Descriptor write-back. */
- cpu_physical_memory_write(s->rx_desc_addr,
- (uint8_t *)s->rx_desc, sizeof(s->rx_desc));
+ cpu_physical_memory_write(s->rx_desc_addr[q],
+ (uint8_t *)s->rx_desc[q],
+ sizeof(s->rx_desc[q]));
/* Next descriptor */
- if (rx_desc_get_wrap(s->rx_desc)) {
+ if (rx_desc_get_wrap(s->rx_desc[q])) {
DB_PRINT("wrapping RX descriptor list\n");
- s->rx_desc_addr = s->regs[GEM_RXQBASE];
+ s->rx_desc_addr[q] = s->regs[GEM_RXQBASE];
} else {
DB_PRINT("incrementing RX descriptor list\n");
- s->rx_desc_addr += 8;
+ s->rx_desc_addr[q] += 8;
}
- gem_get_rx_desc(s);
+
+ gem_get_rx_desc(s, q);
}
/* Count it */
@@ -839,6 +1031,7 @@ static void gem_transmit(CadenceGEMState *s)
uint8_t tx_packet[2048];
uint8_t *p;
unsigned total_bytes;
+ int q = 0;
/* Do nothing if transmit is not enabled. */
if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
@@ -854,110 +1047,123 @@ static void gem_transmit(CadenceGEMState *s)
p = tx_packet;
total_bytes = 0;
- /* read current descriptor */
- packet_desc_addr = s->tx_desc_addr;
-
- DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
- cpu_physical_memory_read(packet_desc_addr,
- (uint8_t *)desc, sizeof(desc));
- /* Handle all descriptors owned by hardware */
- while (tx_desc_get_used(desc) == 0) {
-
- /* Do nothing if transmit is not enabled. */
- if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
- return;
- }
- print_gem_tx_desc(desc);
-
- /* The real hardware would eat this (and possibly crash).
- * For QEMU let's lend a helping hand.
- */
- if ((tx_desc_get_buffer(desc) == 0) ||
- (tx_desc_get_length(desc) == 0)) {
- DB_PRINT("Invalid TX descriptor @ 0x%x\n",
- (unsigned)packet_desc_addr);
- break;
- }
-
- if (tx_desc_get_length(desc) > sizeof(tx_packet) - (p - tx_packet)) {
- DB_PRINT("TX descriptor @ 0x%x too large: size 0x%x space 0x%x\n",
- (unsigned)packet_desc_addr,
- (unsigned)tx_desc_get_length(desc),
- sizeof(tx_packet) - (p - tx_packet));
- break;
- }
+ for (q = s->num_priority_queues - 1; q >= 0; q--) {
+ /* read current descriptor */
+ packet_desc_addr = s->tx_desc_addr[q];
- /* Gather this fragment of the packet from "dma memory" to our contig.
- * buffer.
- */
- cpu_physical_memory_read(tx_desc_get_buffer(desc), p,
- tx_desc_get_length(desc));
- p += tx_desc_get_length(desc);
- total_bytes += tx_desc_get_length(desc);
+ DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
+ cpu_physical_memory_read(packet_desc_addr,
+ (uint8_t *)desc, sizeof(desc));
+ /* Handle all descriptors owned by hardware */
+ while (tx_desc_get_used(desc) == 0) {
- /* Last descriptor for this packet; hand the whole thing off */
- if (tx_desc_get_last(desc)) {
- unsigned desc_first[2];
+ /* Do nothing if transmit is not enabled. */
+ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
+ return;
+ }
+ print_gem_tx_desc(desc, q);
- /* Modify the 1st descriptor of this packet to be owned by
- * the processor.
+ /* The real hardware would eat this (and possibly crash).
+ * For QEMU let's lend a helping hand.
*/
- cpu_physical_memory_read(s->tx_desc_addr, (uint8_t *)desc_first,
- sizeof(desc_first));
- tx_desc_set_used(desc_first);
- cpu_physical_memory_write(s->tx_desc_addr, (uint8_t *)desc_first,
- sizeof(desc_first));
- /* Advance the hardware current descriptor past this packet */
- if (tx_desc_get_wrap(desc)) {
- s->tx_desc_addr = s->regs[GEM_TXQBASE];
- } else {
- s->tx_desc_addr = packet_desc_addr + 8;
+ if ((tx_desc_get_buffer(desc) == 0) ||
+ (tx_desc_get_length(desc) == 0)) {
+ DB_PRINT("Invalid TX descriptor @ 0x%x\n",
+ (unsigned)packet_desc_addr);
+ break;
}
- DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr);
-
- s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL;
- s->regs[GEM_ISR] |= GEM_INT_TXCMPL & ~(s->regs[GEM_IMR]);
-
- /* Handle interrupt consequences */
- gem_update_int_status(s);
- /* Is checksum offload enabled? */
- if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) {
- net_checksum_calculate(tx_packet, total_bytes);
+ if (tx_desc_get_length(desc) > sizeof(tx_packet) -
+ (p - tx_packet)) {
+ DB_PRINT("TX descriptor @ 0x%x too large: size 0x%x space " \
+ "0x%x\n", (unsigned)packet_desc_addr,
+ (unsigned)tx_desc_get_length(desc),
+ sizeof(tx_packet) - (p - tx_packet));
+ break;
}
- /* Update MAC statistics */
- gem_transmit_updatestats(s, tx_packet, total_bytes);
+ /* Gather this fragment of the packet from "dma memory" to our
+ * contig buffer.
+ */
+ cpu_physical_memory_read(tx_desc_get_buffer(desc), p,
+ tx_desc_get_length(desc));
+ p += tx_desc_get_length(desc);
+ total_bytes += tx_desc_get_length(desc);
+
+ /* Last descriptor for this packet; hand the whole thing off */
+ if (tx_desc_get_last(desc)) {
+ unsigned desc_first[2];
+
+ /* Modify the 1st descriptor of this packet to be owned by
+ * the processor.
+ */
+ cpu_physical_memory_read(s->tx_desc_addr[q],
+ (uint8_t *)desc_first,
+ sizeof(desc_first));
+ tx_desc_set_used(desc_first);
+ cpu_physical_memory_write(s->tx_desc_addr[q],
+ (uint8_t *)desc_first,
+ sizeof(desc_first));
+ /* Advance the hardware current descriptor past this packet */
+ if (tx_desc_get_wrap(desc)) {
+ s->tx_desc_addr[q] = s->regs[GEM_TXQBASE];
+ } else {
+ s->tx_desc_addr[q] = packet_desc_addr + 8;
+ }
+ DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]);
+
+ s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL;
+ s->regs[GEM_ISR] |= GEM_INT_TXCMPL & ~(s->regs[GEM_IMR]);
+
+ /* Update queue interrupt status */
+ if (s->num_priority_queues > 1) {
+ s->regs[GEM_INT_Q1_STATUS + q] |=
+ GEM_INT_TXCMPL & ~(s->regs[GEM_INT_Q1_MASK + q]);
+ }
+
+ /* Handle interrupt consequences */
+ gem_update_int_status(s);
+
+ /* Is checksum offload enabled? */
+ if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) {
+ net_checksum_calculate(tx_packet, total_bytes);
+ }
+
+ /* Update MAC statistics */
+ gem_transmit_updatestats(s, tx_packet, total_bytes);
+
+ /* Send the packet somewhere */
+ if (s->phy_loop || (s->regs[GEM_NWCTRL] &
+ GEM_NWCTRL_LOCALLOOP)) {
+ gem_receive(qemu_get_queue(s->nic), tx_packet,
+ total_bytes);
+ } else {
+ qemu_send_packet(qemu_get_queue(s->nic), tx_packet,
+ total_bytes);
+ }
+
+ /* Prepare for next packet */
+ p = tx_packet;
+ total_bytes = 0;
+ }
- /* Send the packet somewhere */
- if (s->phy_loop || (s->regs[GEM_NWCTRL] & GEM_NWCTRL_LOCALLOOP)) {
- gem_receive(qemu_get_queue(s->nic), tx_packet, total_bytes);
+ /* read next descriptor */
+ if (tx_desc_get_wrap(desc)) {
+ tx_desc_set_last(desc);
+ packet_desc_addr = s->regs[GEM_TXQBASE];
} else {
- qemu_send_packet(qemu_get_queue(s->nic), tx_packet,
- total_bytes);
+ packet_desc_addr += 8;
}
-
- /* Prepare for next packet */
- p = tx_packet;
- total_bytes = 0;
+ DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
+ cpu_physical_memory_read(packet_desc_addr,
+ (uint8_t *)desc, sizeof(desc));
}
- /* read next descriptor */
- if (tx_desc_get_wrap(desc)) {
- tx_desc_set_last(desc);
- packet_desc_addr = s->regs[GEM_TXQBASE];
- } else {
- packet_desc_addr += 8;
+ if (tx_desc_get_used(desc)) {
+ s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED;
+ s->regs[GEM_ISR] |= GEM_INT_TXUSED & ~(s->regs[GEM_IMR]);
+ gem_update_int_status(s);
}
- DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
- cpu_physical_memory_read(packet_desc_addr,
- (uint8_t *)desc, sizeof(desc));
- }
-
- if (tx_desc_get_used(desc)) {
- s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED;
- s->regs[GEM_ISR] |= GEM_INT_TXUSED & ~(s->regs[GEM_IMR]);
- gem_update_int_status(s);
}
}
@@ -1065,7 +1271,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
{
CadenceGEMState *s;
uint32_t retval;
-
+ int i;
s = (CadenceGEMState *)opaque;
offset >>= 2;
@@ -1075,8 +1281,10 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
switch (offset) {
case GEM_ISR:
- DB_PRINT("lowering irq on ISR read\n");
- qemu_set_irq(s->irq, 0);
+ DB_PRINT("lowering irqs on ISR read\n");
+ for (i = 0; i < s->num_priority_queues; ++i) {
+ qemu_set_irq(s->irq[i], 0);
+ }
break;
case GEM_PHYMNTNC:
if (retval & GEM_PHYMNTNC_OP_R) {
@@ -1101,6 +1309,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
retval &= ~(s->regs_wo[offset]);
DB_PRINT("0x%08x\n", retval);
+ gem_update_int_status(s);
return retval;
}
@@ -1113,6 +1322,7 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
{
CadenceGEMState *s = (CadenceGEMState *)opaque;
uint32_t readonly;
+ int i;
DB_PRINT("offset: 0x%04x write: 0x%08x ", (unsigned)offset, (unsigned)val);
offset >>= 2;
@@ -1132,14 +1342,18 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
switch (offset) {
case GEM_NWCTRL:
if (val & GEM_NWCTRL_RXENA) {
- gem_get_rx_desc(s);
+ for (i = 0; i < s->num_priority_queues; ++i) {
+ gem_get_rx_desc(s, i);
+ }
}
if (val & GEM_NWCTRL_TXSTART) {
gem_transmit(s);
}
if (!(val & GEM_NWCTRL_TXENA)) {
/* Reset to start of Q when transmit disabled. */
- s->tx_desc_addr = s->regs[GEM_TXQBASE];
+ for (i = 0; i < s->num_priority_queues; i++) {
+ s->tx_desc_addr[i] = s->regs[GEM_TXQBASE];
+ }
}
if (gem_can_receive(qemu_get_queue(s->nic))) {
qemu_flush_queued_packets(qemu_get_queue(s->nic));
@@ -1150,10 +1364,16 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
gem_update_int_status(s);
break;
case GEM_RXQBASE:
- s->rx_desc_addr = val;
+ s->rx_desc_addr[0] = val;
+ break;
+ case GEM_RECEIVE_Q1_PTR ... GEM_RECEIVE_Q7_PTR:
+ s->rx_desc_addr[offset - GEM_RECEIVE_Q1_PTR + 1] = val;
break;
case GEM_TXQBASE:
- s->tx_desc_addr = val;
+ s->tx_desc_addr[0] = val;
+ break;
+ case GEM_TRANSMIT_Q1_PTR ... GEM_TRANSMIT_Q7_PTR:
+ s->tx_desc_addr[offset - GEM_TRANSMIT_Q1_PTR + 1] = val;
break;
case GEM_RXSTATUS:
gem_update_int_status(s);
@@ -1162,10 +1382,18 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
s->regs[GEM_IMR] &= ~val;
gem_update_int_status(s);
break;
+ case GEM_INT_Q1_ENABLE ... GEM_INT_Q7_ENABLE:
+ s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_ENABLE] &= ~val;
+ gem_update_int_status(s);
+ break;
case GEM_IDR:
s->regs[GEM_IMR] |= val;
gem_update_int_status(s);
break;
+ case GEM_INT_Q1_DISABLE ... GEM_INT_Q7_DISABLE:
+ s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_DISABLE] |= val;
+ gem_update_int_status(s);
+ break;
case GEM_SPADDR1LO:
case GEM_SPADDR2LO:
case GEM_SPADDR3LO:
@@ -1202,8 +1430,11 @@ static const MemoryRegionOps gem_ops = {
static void gem_set_link(NetClientState *nc)
{
+ CadenceGEMState *s = qemu_get_nic_opaque(nc);
+
DB_PRINT("\n");
- phy_update_link(qemu_get_nic_opaque(nc));
+ phy_update_link(s);
+ gem_update_int_status(s);
}
static NetClientInfo net_gem_info = {
@@ -1214,36 +1445,62 @@ static NetClientInfo net_gem_info = {
.link_status_changed = gem_set_link,
};
-static int gem_init(SysBusDevice *sbd)
+static void gem_realize(DeviceState *dev, Error **errp)
{
- DeviceState *dev = DEVICE(sbd);
CadenceGEMState *s = CADENCE_GEM(dev);
+ int i;
+
+ if (s->num_priority_queues == 0 ||
+ s->num_priority_queues > MAX_PRIORITY_QUEUES) {
+ error_setg(errp, "Invalid num-priority-queues value: %" PRIx8,
+ s->num_priority_queues);
+ return;
+ } else if (s->num_type1_screeners > MAX_TYPE1_SCREENERS) {
+ error_setg(errp, "Invalid num-type1-screeners value: %" PRIx8,
+ s->num_type1_screeners);
+ return;
+ } else if (s->num_type2_screeners > MAX_TYPE2_SCREENERS) {
+ error_setg(errp, "Invalid num-type2-screeners value: %" PRIx8,
+ s->num_type2_screeners);
+ return;
+ }
+
+ for (i = 0; i < s->num_priority_queues; ++i) {
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
+ }
+
+ qemu_macaddr_default_if_unset(&s->conf.macaddr);
+
+ s->nic = qemu_new_nic(&net_gem_info, &s->conf,
+ object_get_typename(OBJECT(dev)), dev->id, s);
+}
+
+static void gem_init(Object *obj)
+{
+ CadenceGEMState *s = CADENCE_GEM(obj);
+ DeviceState *dev = DEVICE(obj);
DB_PRINT("\n");
gem_init_register_masks(s);
memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s,
"enet", sizeof(s->regs));
- sysbus_init_mmio(sbd, &s->iomem);
- sysbus_init_irq(sbd, &s->irq);
- qemu_macaddr_default_if_unset(&s->conf.macaddr);
- s->nic = qemu_new_nic(&net_gem_info, &s->conf,
- object_get_typename(OBJECT(dev)), dev->id, s);
-
- return 0;
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
}
static const VMStateDescription vmstate_cadence_gem = {
.name = "cadence_gem",
- .version_id = 2,
- .minimum_version_id = 2,
+ .version_id = 4,
+ .minimum_version_id = 4,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG),
VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32),
VMSTATE_UINT8(phy_loop, CadenceGEMState),
- VMSTATE_UINT32(rx_desc_addr, CadenceGEMState),
- VMSTATE_UINT32(tx_desc_addr, CadenceGEMState),
+ VMSTATE_UINT32_ARRAY(rx_desc_addr, CadenceGEMState,
+ MAX_PRIORITY_QUEUES),
+ VMSTATE_UINT32_ARRAY(tx_desc_addr, CadenceGEMState,
+ MAX_PRIORITY_QUEUES),
VMSTATE_BOOL_ARRAY(sar_active, CadenceGEMState, 4),
VMSTATE_END_OF_LIST(),
}
@@ -1251,15 +1508,20 @@ static const VMStateDescription vmstate_cadence_gem = {
static Property gem_properties[] = {
DEFINE_NIC_PROPERTIES(CadenceGEMState, conf),
+ DEFINE_PROP_UINT8("num-priority-queues", CadenceGEMState,
+ num_priority_queues, 1),
+ DEFINE_PROP_UINT8("num-type1-screeners", CadenceGEMState,
+ num_type1_screeners, 4),
+ DEFINE_PROP_UINT8("num-type2-screeners", CadenceGEMState,
+ num_type2_screeners, 4),
DEFINE_PROP_END_OF_LIST(),
};
static void gem_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
- sdc->init = gem_init;
+ dc->realize = gem_realize;
dc->props = gem_properties;
dc->vmsd = &vmstate_cadence_gem;
dc->reset = gem_reset;
@@ -1269,6 +1531,7 @@ static const TypeInfo gem_info = {
.name = TYPE_CADENCE_GEM,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(CadenceGEMState),
+ .instance_init = gem_init,
.class_init = gem_class_init,
};
diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c
index bad43f474e..4994e1ca00 100644
--- a/hw/net/e1000e.c
+++ b/hw/net/e1000e.c
@@ -400,7 +400,7 @@ static void e1000e_write_config(PCIDevice *pci_dev, uint32_t address,
if (range_covers_byte(address, len, PCI_COMMAND) &&
(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
- qemu_flush_queued_packets(qemu_get_queue(s->nic));
+ e1000e_start_recv(&s->core);
}
}
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
index badb1feb7d..2b11499829 100644
--- a/hw/net/e1000e_core.c
+++ b/hw/net/e1000e_core.c
@@ -52,7 +52,7 @@
second according to spec 10.2.4.2 */
#define E1000E_MAX_TX_FRAGS (64)
-static void
+static inline void
e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val);
static inline void
@@ -953,7 +953,7 @@ e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r,
core->rx_desc_buf_size;
}
-static inline void
+void
e1000e_start_recv(E1000ECore *core)
{
int i;
@@ -1278,11 +1278,10 @@ e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc,
struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
- memset(d, 0, sizeof(*d));
-
assert(!rss_info->enabled);
d->length = cpu_to_le16(length);
+ d->csum = 0;
e1000e_build_rx_metadata(core, pkt, pkt != NULL,
rss_info,
@@ -1291,6 +1290,7 @@ e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc,
&d->special);
d->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24);
d->status = (uint8_t) le32_to_cpu(status_flags);
+ d->special = 0;
}
static inline void
@@ -1301,7 +1301,7 @@ e1000e_write_ext_rx_descr(E1000ECore *core, uint8_t *desc,
{
union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc;
- memset(d, 0, sizeof(*d));
+ memset(&d->wb, 0, sizeof(d->wb));
d->wb.upper.length = cpu_to_le16(length);
@@ -1325,7 +1325,7 @@ e1000e_write_ps_rx_descr(E1000ECore *core, uint8_t *desc,
union e1000_rx_desc_packet_split *d =
(union e1000_rx_desc_packet_split *) desc;
- memset(d, 0, sizeof(*d));
+ memset(&d->wb, 0, sizeof(d->wb));
d->wb.middle.length0 = cpu_to_le16((*written)[0]);
@@ -1710,7 +1710,8 @@ e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt)
}
/* Perform ACK receive detection */
- if (e1000e_is_tcp_ack(core, core->rx_pkt)) {
+ if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS) &&
+ (e1000e_is_tcp_ack(core, core->rx_pkt))) {
n |= E1000_ICS_ACK;
}
@@ -1807,6 +1808,7 @@ e1000e_core_set_link_status(E1000ECore *core)
core->autoneg_timer);
} else {
e1000x_update_regs_on_link_up(core->mac, core->phy[0]);
+ e1000e_start_recv(core);
}
}
@@ -2007,19 +2009,23 @@ e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg)
}
if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_EIAME) {
- trace_e1000e_irq_ims_clear_eiame(core->mac[IAM], cause);
- e1000e_clear_ims_bits(core, core->mac[IAM] & cause);
+ trace_e1000e_irq_iam_clear_eiame(core->mac[IAM], cause);
+ core->mac[IAM] &= ~cause;
}
trace_e1000e_irq_icr_clear_eiac(core->mac[ICR], core->mac[EIAC]);
- if (core->mac[EIAC] & E1000_ICR_OTHER) {
- effective_eiac = (core->mac[EIAC] & E1000_EIAC_MASK) |
- E1000_ICR_OTHER_CAUSES;
- } else {
- effective_eiac = core->mac[EIAC] & E1000_EIAC_MASK;
+ effective_eiac = core->mac[EIAC] & cause;
+
+ if (effective_eiac == E1000_ICR_OTHER) {
+ effective_eiac |= E1000_ICR_OTHER_CAUSES;
}
+
core->mac[ICR] &= ~effective_eiac;
+
+ if (!(core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) {
+ core->mac[IMS] &= ~effective_eiac;
+ }
}
static void
@@ -2130,7 +2136,7 @@ e1000e_update_interrupt_state(E1000ECore *core)
/* Set ICR[OTHER] for MSI-X */
if (is_msix) {
- if (core->mac[ICR] & core->mac[IMS] & E1000_ICR_OTHER_CAUSES) {
+ if (core->mac[ICR] & E1000_ICR_OTHER_CAUSES) {
core->mac[ICR] |= E1000_ICR_OTHER;
trace_e1000e_irq_add_msi_other(core->mac[ICR]);
}
@@ -2168,7 +2174,7 @@ e1000e_update_interrupt_state(E1000ECore *core)
}
}
-static inline void
+static void
e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val)
{
trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]);
@@ -2187,6 +2193,8 @@ e1000e_autoneg_timer(void *opaque)
E1000ECore *core = opaque;
if (!qemu_get_queue(core->owner_nic)->link_down) {
e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]);
+ e1000e_start_recv(core);
+
e1000e_update_flowctl_status(core);
/* signal link status change to the guest */
e1000e_set_interrupt_cause(core, E1000_ICR_LSC);
@@ -2344,7 +2352,7 @@ e1000e_set_pbaclr(E1000ECore *core, int index, uint32_t val)
core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK;
- if (msix_enabled(core->owner)) {
+ if (!msix_enabled(core->owner)) {
return;
}
diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h
index 5f413a9e08..1ff6978ca1 100644
--- a/hw/net/e1000e_core.h
+++ b/hw/net/e1000e_core.h
@@ -144,3 +144,6 @@ e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size);
ssize_t
e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt);
+
+void
+e1000e_start_recv(E1000ECore *core);
diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c
index bab4dbfc98..4bf71f2d85 100644
--- a/hw/net/eepro100.c
+++ b/hw/net/eepro100.c
@@ -1843,6 +1843,7 @@ static void pci_nic_uninit(PCIDevice *pci_dev)
EEPRO100State *s = DO_UPCAST(EEPRO100State, dev, pci_dev);
vmstate_unregister(&pci_dev->qdev, s->vmstate, s);
+ g_free(s->vmstate);
eeprom93xx_free(&pci_dev->qdev, s->eeprom);
qemu_del_nic(s->nic);
}
diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c
index b5c777fbf6..fadf9c8faf 100644
--- a/hw/net/fsl_etsec/etsec.c
+++ b/hw/net/fsl_etsec/etsec.c
@@ -348,8 +348,8 @@ static ssize_t etsec_receive(NetClientState *nc,
eTSEC *etsec = qemu_get_nic_opaque(nc);
#if defined(HEX_DUMP)
- fprintf(stderr, "%s receive size:%d\n", etsec->nic->nc.name, size);
- qemu_hexdump(buf, stderr, "", size);
+ fprintf(stderr, "%s receive size:%zd\n", nc->name, size);
+ qemu_hexdump((void *)buf, stderr, "", size);
#endif
/* Flush is unnecessary as are already in receiving path */
etsec->need_flush = false;
@@ -387,7 +387,7 @@ static void etsec_realize(DeviceState *dev, Error **errp)
etsec->bh = qemu_bh_new(etsec_timer_hit, etsec);
- etsec->ptimer = ptimer_init(etsec->bh);
+ etsec->ptimer = ptimer_init(etsec->bh, PTIMER_POLICY_DEFAULT);
ptimer_set_freq(etsec->ptimer, 100);
}
diff --git a/hw/net/fsl_etsec/rings.c b/hw/net/fsl_etsec/rings.c
index 79d2f14dd8..54c01275d4 100644
--- a/hw/net/fsl_etsec/rings.c
+++ b/hw/net/fsl_etsec/rings.c
@@ -474,6 +474,14 @@ static void rx_init_frame(eTSEC *etsec, const uint8_t *buf, size_t size)
/* CRC padding (We don't have to compute the CRC) */
etsec->rx_padding = 4;
+ /*
+ * Ensure that payload length + CRC length is at least 802.3
+ * minimum MTU size bytes long (64)
+ */
+ if (etsec->rx_buffer_len < 60) {
+ etsec->rx_padding += 60 - etsec->rx_buffer_len;
+ }
+
etsec->rx_first_in_frame = 1;
etsec->rx_remaining_data = etsec->rx_buffer_len;
RING_DEBUG("%s: rx_buffer_len:%u rx_padding+crc:%u\n", __func__,
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
index 1c415ab3b1..50c75642c6 100644
--- a/hw/net/imx_fec.c
+++ b/hw/net/imx_fec.c
@@ -429,7 +429,7 @@ static void imx_fec_do_tx(IMXFECState *s)
frame_size += len;
if (bd.flags & ENET_BD_L) {
/* Last buffer in frame. */
- qemu_send_packet(qemu_get_queue(s->nic), frame, len);
+ qemu_send_packet(qemu_get_queue(s->nic), frame, frame_size);
ptr = frame;
frame_size = 0;
s->regs[ENET_EIR] |= ENET_INT_TXF;
diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c
index 4615d873b1..3db8937cac 100644
--- a/hw/net/lan9118.c
+++ b/hw/net/lan9118.c
@@ -1345,7 +1345,7 @@ static int lan9118_init1(SysBusDevice *sbd)
s->txp = &s->tx_packet;
bh = qemu_bh_new(lan9118_tick, s);
- s->timer = ptimer_init(bh);
+ s->timer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
ptimer_set_freq(s->timer, 10000);
ptimer_set_limit(s->timer, 0xffff, 1);
diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c
index 0ee8ad9d66..4025eb3b33 100644
--- a/hw/net/mcf_fec.c
+++ b/hw/net/mcf_fec.c
@@ -23,6 +23,7 @@ do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0)
#define DPRINTF(fmt, ...) do {} while(0)
#endif
+#define FEC_MAX_DESC 1024
#define FEC_MAX_FRAME_SIZE 2032
typedef struct {
@@ -149,7 +150,7 @@ static void mcf_fec_do_tx(mcf_fec_state *s)
uint32_t addr;
mcf_fec_bd bd;
int frame_size;
- int len;
+ int len, descnt = 0;
uint8_t frame[FEC_MAX_FRAME_SIZE];
uint8_t *ptr;
@@ -157,7 +158,7 @@ static void mcf_fec_do_tx(mcf_fec_state *s)
ptr = frame;
frame_size = 0;
addr = s->tx_descriptor;
- while (1) {
+ while (descnt++ < FEC_MAX_DESC) {
mcf_fec_read_bd(&bd, addr);
DPRINTF("tx_bd %x flags %04x len %d data %08x\n",
addr, bd.flags, bd.length, bd.data);
@@ -176,7 +177,7 @@ static void mcf_fec_do_tx(mcf_fec_state *s)
if (bd.flags & FEC_BD_L) {
/* Last buffer in frame. */
DPRINTF("Sending packet\n");
- qemu_send_packet(qemu_get_queue(s->nic), frame, len);
+ qemu_send_packet(qemu_get_queue(s->nic), frame, frame_size);
ptr = frame;
frame_size = 0;
s->eir |= FEC_INT_TXF;
@@ -392,7 +393,7 @@ static void mcf_fec_write(void *opaque, hwaddr addr,
s->tx_descriptor = s->etdsr;
break;
case 0x188:
- s->emrbr = value & 0x7f0;
+ s->emrbr = value > 0 ? value & 0x7F0 : 0x7F0;
break;
default:
hw_error("mcf_fec_write Bad address 0x%x\n", (int)addr);
diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
index 198a01f92d..654455355f 100644
--- a/hw/net/pcnet.c
+++ b/hw/net/pcnet.c
@@ -302,7 +302,7 @@ static inline void pcnet_tmd_load(PCNetState *s, struct pcnet_TMD *tmd,
uint32_t tbadr;
int16_t length;
int16_t status;
- } xda;
+ } xda;
s->phys_mem_read(s->dma_opaque, addr, (void *)&xda, sizeof(xda), 0);
tmd->tbadr = le32_to_cpu(xda.tbadr) & 0xffffff;
tmd->length = le16_to_cpu(xda.length);
@@ -664,7 +664,9 @@ static inline int ladr_match(PCNetState *s, const uint8_t *buf, int size)
static inline hwaddr pcnet_rdra_addr(PCNetState *s, int idx)
{
- while (idx < 1) idx += CSR_RCVRL(s);
+ while (idx < 1) {
+ idx += CSR_RCVRL(s);
+ }
return s->rdra + ((CSR_RCVRL(s) - idx) * (BCR_SWSTYLE(s) ? 16 : 8));
}
@@ -672,8 +674,10 @@ static inline int64_t pcnet_get_next_poll_time(PCNetState *s, int64_t current_ti
{
int64_t next_time = current_time +
(65536 - (CSR_SPND(s) ? 0 : CSR_POLL(s))) * 30;
- if (next_time <= current_time)
+
+ if (next_time <= current_time) {
next_time = current_time + 1;
+ }
return next_time;
}
@@ -795,13 +799,13 @@ static void pcnet_init(PCNetState *s)
mode = le16_to_cpu(initblk.mode);
rlen = initblk.rlen >> 4;
tlen = initblk.tlen >> 4;
- ladrf[0] = le16_to_cpu(initblk.ladrf[0]);
- ladrf[1] = le16_to_cpu(initblk.ladrf[1]);
- ladrf[2] = le16_to_cpu(initblk.ladrf[2]);
- ladrf[3] = le16_to_cpu(initblk.ladrf[3]);
- padr[0] = le16_to_cpu(initblk.padr[0]);
- padr[1] = le16_to_cpu(initblk.padr[1]);
- padr[2] = le16_to_cpu(initblk.padr[2]);
+ ladrf[0] = le16_to_cpu(initblk.ladrf[0]);
+ ladrf[1] = le16_to_cpu(initblk.ladrf[1]);
+ ladrf[2] = le16_to_cpu(initblk.ladrf[2]);
+ ladrf[3] = le16_to_cpu(initblk.ladrf[3]);
+ padr[0] = le16_to_cpu(initblk.padr[0]);
+ padr[1] = le16_to_cpu(initblk.padr[1]);
+ padr[2] = le16_to_cpu(initblk.padr[2]);
rdra = le32_to_cpu(initblk.rdra);
tdra = le32_to_cpu(initblk.tdra);
} else {
@@ -809,13 +813,13 @@ static void pcnet_init(PCNetState *s)
s->phys_mem_read(s->dma_opaque, PHYSADDR(s,CSR_IADR(s)),
(uint8_t *)&initblk, sizeof(initblk), 0);
mode = le16_to_cpu(initblk.mode);
- ladrf[0] = le16_to_cpu(initblk.ladrf[0]);
- ladrf[1] = le16_to_cpu(initblk.ladrf[1]);
- ladrf[2] = le16_to_cpu(initblk.ladrf[2]);
- ladrf[3] = le16_to_cpu(initblk.ladrf[3]);
- padr[0] = le16_to_cpu(initblk.padr[0]);
- padr[1] = le16_to_cpu(initblk.padr[1]);
- padr[2] = le16_to_cpu(initblk.padr[2]);
+ ladrf[0] = le16_to_cpu(initblk.ladrf[0]);
+ ladrf[1] = le16_to_cpu(initblk.ladrf[1]);
+ ladrf[2] = le16_to_cpu(initblk.ladrf[2]);
+ ladrf[3] = le16_to_cpu(initblk.ladrf[3]);
+ padr[0] = le16_to_cpu(initblk.padr[0]);
+ padr[1] = le16_to_cpu(initblk.padr[1]);
+ padr[2] = le16_to_cpu(initblk.padr[2]);
rdra = le32_to_cpu(initblk.rdra);
tdra = le32_to_cpu(initblk.tdra);
rlen = rdra >> 29;
@@ -858,12 +862,12 @@ static void pcnet_start(PCNetState *s)
printf("pcnet_start\n");
#endif
- if (!CSR_DTX(s))
+ if (!CSR_DTX(s)) {
s->csr[0] |= 0x0010; /* set TXON */
-
- if (!CSR_DRX(s))
+ }
+ if (!CSR_DRX(s)) {
s->csr[0] |= 0x0020; /* set RXON */
-
+ }
s->csr[0] &= ~0x0004; /* clear STOP bit */
s->csr[0] |= 0x0002;
pcnet_poll_timer(s);
@@ -925,8 +929,7 @@ static void pcnet_rdte_poll(PCNetState *s)
crda);
}
} else {
- printf("pcnet: BAD RMD RDA=0x" TARGET_FMT_plx "\n",
- crda);
+ printf("pcnet: BAD RMD RDA=0x" TARGET_FMT_plx "\n", crda);
#endif
}
}
@@ -1168,10 +1171,11 @@ ssize_t pcnet_receive(NetClientState *nc, const uint8_t *buf, size_t size_)
#endif
while (pktcount--) {
- if (CSR_RCVRC(s) <= 1)
+ if (CSR_RCVRC(s) <= 1) {
CSR_RCVRC(s) = CSR_RCVRL(s);
- else
+ } else {
CSR_RCVRC(s)--;
+ }
}
pcnet_rdte_poll(s);
@@ -1207,7 +1211,7 @@ static void pcnet_transmit(PCNetState *s)
s->tx_busy = 1;
- txagain:
+txagain:
if (pcnet_tdte_poll(s)) {
struct pcnet_TMD tmd;
@@ -1251,7 +1255,7 @@ static void pcnet_transmit(PCNetState *s)
s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr),
s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s));
s->xmit_pos += bcnt;
-
+
if (!GET_FIELD(tmd.status, TMDS, ENP)) {
goto txdone;
}
@@ -1276,21 +1280,22 @@ static void pcnet_transmit(PCNetState *s)
s->csr[4] |= 0x0004; /* set TXSTRT */
s->xmit_pos = -1;
- txdone:
+txdone:
SET_FIELD(&tmd.status, TMDS, OWN, 0);
TMDSTORE(&tmd, PHYSADDR(s,CSR_CXDA(s)));
- if (!CSR_TOKINTD(s) || (CSR_LTINTEN(s) && GET_FIELD(tmd.status, TMDS, LTINT)))
+ if (!CSR_TOKINTD(s)
+ || (CSR_LTINTEN(s) && GET_FIELD(tmd.status, TMDS, LTINT))) {
s->csr[0] |= 0x0200; /* set TINT */
-
- if (CSR_XMTRC(s)<=1)
+ }
+ if (CSR_XMTRC(s) <= 1) {
CSR_XMTRC(s) = CSR_XMTRL(s);
- else
+ } else {
CSR_XMTRC(s)--;
- if (count--)
+ }
+ if (count--) {
goto txagain;
-
- } else
- if (s->xmit_pos >= 0) {
+ }
+ } else if (s->xmit_pos >= 0) {
struct pcnet_TMD tmd;
TMDLOAD(&tmd, xmit_cxda);
SET_FIELD(&tmd.misc, TMDM, BUFF, 1);
@@ -1301,9 +1306,9 @@ static void pcnet_transmit(PCNetState *s)
s->csr[0] |= 0x0200; /* set TINT */
if (!CSR_DXSUFLO(s)) {
s->csr[0] &= ~0x0010;
- } else
- if (count--)
- goto txagain;
+ } else if (count--) {
+ goto txagain;
+ }
}
s->tx_busy = 0;
@@ -1315,13 +1320,11 @@ static void pcnet_poll(PCNetState *s)
pcnet_rdte_poll(s);
}
- if (CSR_TDMD(s) ||
- (CSR_TXON(s) && !CSR_DPOLL(s) && pcnet_tdte_poll(s)))
- {
+ if (CSR_TDMD(s) || (CSR_TXON(s) && !CSR_DPOLL(s) && pcnet_tdte_poll(s))) {
/* prevent recursion */
- if (s->tx_busy)
+ if (s->tx_busy) {
return;
-
+ }
pcnet_transmit(s);
}
}
@@ -1340,15 +1343,16 @@ static void pcnet_poll_timer(void *opaque)
if (!CSR_STOP(s) && !CSR_SPND(s) && !CSR_DPOLL(s)) {
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) * 33;
- if (!s->timer || !now)
+ if (!s->timer || !now) {
s->timer = now;
- else {
+ } else {
uint64_t t = now - s->timer + CSR_POLL(s);
if (t > 0xffffLL) {
pcnet_poll(s);
CSR_POLL(s) = CSR_PINT(s);
- } else
+ } else {
CSR_POLL(s) = t;
+ }
}
timer_mod(s->poll_timer,
pcnet_get_next_poll_time(s,qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)));
@@ -1371,21 +1375,21 @@ static void pcnet_csr_writew(PCNetState *s, uint32_t rap, uint32_t new_value)
val = (val & 0x007f) | (s->csr[0] & 0x7f00);
/* IFF STOP, STRT and INIT are set, clear STRT and INIT */
- if ((val&7) == 7)
- val &= ~3;
-
- if (!CSR_STOP(s) && (val & 4))
+ if ((val & 7) == 7) {
+ val &= ~3;
+ }
+ if (!CSR_STOP(s) && (val & 4)) {
pcnet_stop(s);
-
- if (!CSR_INIT(s) && (val & 1))
+ }
+ if (!CSR_INIT(s) && (val & 1)) {
pcnet_init(s);
-
- if (!CSR_STRT(s) && (val & 2))
+ }
+ if (!CSR_STRT(s) && (val & 2)) {
pcnet_start(s);
-
- if (CSR_TDMD(s))
+ }
+ if (CSR_TDMD(s)) {
pcnet_transmit(s);
-
+ }
return;
case 1:
case 2:
@@ -1429,12 +1433,16 @@ static void pcnet_csr_writew(PCNetState *s, uint32_t rap, uint32_t new_value)
case 47: /* POLLINT */
case 72:
case 74:
+ break;
case 76: /* RCVRL */
case 78: /* XMTRL */
+ val = (val > 0) ? val : 512;
+ break;
case 112:
- if (CSR_STOP(s) || CSR_SPND(s))
- break;
- return;
+ if (CSR_STOP(s) || CSR_SPND(s)) {
+ break;
+ }
+ return;
case 3:
break;
case 4:
@@ -1651,8 +1659,7 @@ void pcnet_ioport_writel(void *opaque, uint32_t addr, uint32_t val)
pcnet_bcr_writew(s, s->rap, val & 0xffff);
break;
}
- } else
- if ((addr & 0x0f) == 0) {
+ } else if ((addr & 0x0f) == 0) {
/* switch device to dword i/o mode */
pcnet_bcr_writew(s, BCR_BSBC, pcnet_bcr_readw(s, BCR_BSBC) | 0x0080);
#ifdef PCNET_DEBUG_IO
diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c
index 30f2ce417b..e9d215aa4d 100644
--- a/hw/net/rocker/rocker.c
+++ b/hw/net/rocker/rocker.c
@@ -860,7 +860,7 @@ static void rocker_io_writel(void *opaque, hwaddr addr, uint32_t val)
rocker_msix_irq(r, val);
break;
case ROCKER_TEST_DMA_SIZE:
- r->test_dma_size = val;
+ r->test_dma_size = val & 0xFFFF;
break;
case ROCKER_TEST_DMA_ADDR + 4:
r->test_dma_addr = ((uint64_t)val) << 32 | r->lower32;
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index 3345bc6b5e..f05e59c85f 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -2350,7 +2350,7 @@ static void rtl8139_cplus_transmit(RTL8139State *s)
{
int txcount = 0;
- while (rtl8139_cplus_transmit_one(s))
+ while (txcount < 64 && rtl8139_cplus_transmit_one(s))
{
++txcount;
}
diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c
index b273eda933..01ecb02773 100644
--- a/hw/net/spapr_llan.c
+++ b/hw/net/spapr_llan.c
@@ -34,20 +34,13 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "sysemu/sysemu.h"
+#include "trace.h"
#include <libfdt.h>
#define ETH_ALEN 6
#define MAX_PACKET_SIZE 65536
-/*#define DEBUG*/
-
-#ifdef DEBUG
-#define DPRINTF(fmt...) do { fprintf(stderr, fmt); } while (0)
-#else
-#define DPRINTF(fmt...)
-#endif
-
/* Compatibility flags for migration */
#define SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT 0
#define SPAPRVLAN_FLAG_RX_BUF_POOLS (1 << SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT)
@@ -106,6 +99,7 @@ typedef struct VIOsPAPRVLANDevice {
VIOsPAPRDevice sdev;
NICConf nicconf;
NICState *nic;
+ MACAddr perm_mac;
bool isopen;
hwaddr buf_list;
uint32_t add_buf_ptr, use_buf_ptr, rx_bufs;
@@ -157,8 +151,10 @@ static vlan_bd_t spapr_vlan_get_rx_bd_from_pool(VIOsPAPRVLANDevice *dev,
return 0;
}
- DPRINTF("Found buffer: pool=%d count=%d rxbufs=%d\n", pool,
- dev->rx_pool[pool]->count, dev->rx_bufs);
+
+ trace_spapr_vlan_get_rx_bd_from_pool_found(pool,
+ dev->rx_pool[pool]->count,
+ dev->rx_bufs);
/* Remove the buffer from the pool */
dev->rx_pool[pool]->count--;
@@ -185,8 +181,8 @@ static vlan_bd_t spapr_vlan_get_rx_bd_from_page(VIOsPAPRVLANDevice *dev,
}
bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr);
- DPRINTF("use_buf_ptr=%d bd=0x%016llx\n",
- buf_ptr, (unsigned long long)bd);
+
+ trace_spapr_vlan_get_rx_bd_from_page(buf_ptr, (uint64_t)bd);
} while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8)
&& buf_ptr != dev->use_buf_ptr);
@@ -199,7 +195,7 @@ static vlan_bd_t spapr_vlan_get_rx_bd_from_page(VIOsPAPRVLANDevice *dev,
dev->use_buf_ptr = buf_ptr;
vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0);
- DPRINTF("Found buffer: ptr=%d rxbufs=%d\n", dev->use_buf_ptr, dev->rx_bufs);
+ trace_spapr_vlan_get_rx_bd_from_page_found(dev->use_buf_ptr, dev->rx_bufs);
return bd;
}
@@ -214,8 +210,7 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
uint64_t handle;
uint8_t control;
- DPRINTF("spapr_vlan_receive() [%s] rx_bufs=%d\n", sdev->qdev.id,
- dev->rx_bufs);
+ trace_spapr_vlan_receive(sdev->qdev.id, dev->rx_bufs);
if (!dev->isopen) {
return -1;
@@ -243,7 +238,7 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
return -1;
}
- DPRINTF("spapr_vlan_receive: DMA write completed\n");
+ trace_spapr_vlan_receive_dma_completed();
/* Update the receive queue */
control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID;
@@ -257,12 +252,11 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
- DPRINTF("wrote rxq entry (ptr=0x%llx): 0x%016llx 0x%016llx\n",
- (unsigned long long)dev->rxq_ptr,
- (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
- dev->rxq_ptr),
- (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
- dev->rxq_ptr + 8));
+ trace_spapr_vlan_receive_wrote(dev->rxq_ptr,
+ vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
+ dev->rxq_ptr),
+ vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
+ dev->rxq_ptr + 8));
dev->rxq_ptr += 16;
if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) {
@@ -316,6 +310,10 @@ static void spapr_vlan_reset(VIOsPAPRDevice *sdev)
spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
}
}
+
+ memcpy(&dev->nicconf.macaddr.a, &dev->perm_mac.a,
+ sizeof(dev->nicconf.macaddr.a));
+ qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
}
static void spapr_vlan_realize(VIOsPAPRDevice *sdev, Error **errp)
@@ -324,6 +322,8 @@ static void spapr_vlan_realize(VIOsPAPRDevice *sdev, Error **errp)
qemu_macaddr_default_if_unset(&dev->nicconf.macaddr);
+ memcpy(&dev->perm_mac.a, &dev->nicconf.macaddr.a, sizeof(dev->perm_mac.a));
+
dev->nic = qemu_new_nic(&net_spapr_vlan_info, &dev->nicconf,
object_get_typename(OBJECT(sdev)), sdev->qdev.id, dev);
qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
@@ -573,8 +573,8 @@ static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev,
qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]),
rx_pool_size_compare);
pool = spapr_vlan_get_rx_pool_id(dev, size);
- DPRINTF("created RX pool %d for size %lld\n", pool,
- VLAN_BD_LEN(buf));
+ trace_spapr_vlan_add_rxbuf_to_pool_create(pool,
+ VLAN_BD_LEN(buf));
break;
}
}
@@ -584,8 +584,8 @@ static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev,
return H_RESOURCE;
}
- DPRINTF("h_add_llan_buf(): Add buf using pool %i (size %lli, count=%i)\n",
- pool, VLAN_BD_LEN(buf), dev->rx_pool[pool]->count);
+ trace_spapr_vlan_add_rxbuf_to_pool(pool, VLAN_BD_LEN(buf),
+ dev->rx_pool[pool]->count);
dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf;
@@ -616,8 +616,7 @@ static target_long spapr_vlan_add_rxbuf_to_page(VIOsPAPRVLANDevice *dev,
vio_stq(&dev->sdev, dev->buf_list + dev->add_buf_ptr, buf);
- DPRINTF("h_add_llan_buf(): Added buf ptr=%d rx_bufs=%d bd=0x%016llx\n",
- dev->add_buf_ptr, dev->rx_bufs, (unsigned long long)buf);
+ trace_spapr_vlan_add_rxbuf_to_page(dev->add_buf_ptr, dev->rx_bufs, buf);
return 0;
}
@@ -633,8 +632,7 @@ static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
target_long ret;
- DPRINTF("H_ADD_LOGICAL_LAN_BUFFER(0x" TARGET_FMT_lx
- ", 0x" TARGET_FMT_lx ")\n", reg, buf);
+ trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf);
if (!sdev) {
hcall_dprintf("Bad device\n");
@@ -687,14 +685,13 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
int i, nbufs;
int ret;
- DPRINTF("H_SEND_LOGICAL_LAN(0x" TARGET_FMT_lx ", <bufs>, 0x"
- TARGET_FMT_lx ")\n", reg, continue_token);
+ trace_spapr_vlan_h_send_logical_lan(reg, continue_token);
if (!sdev) {
return H_PARAMETER;
}
- DPRINTF("rxbufs = %d\n", dev->rx_bufs);
+ trace_spapr_vlan_h_send_logical_lan_rxbufs(dev->rx_bufs);
if (!dev->isopen) {
return H_DROPPED;
@@ -706,7 +703,7 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
total_len = 0;
for (i = 0; i < 6; i++) {
- DPRINTF(" buf desc: 0x" TARGET_FMT_lx "\n", bufs[i]);
+ trace_spapr_vlan_h_send_logical_lan_buf_desc(bufs[i]);
if (!(bufs[i] & VLAN_BD_VALID)) {
break;
}
@@ -714,8 +711,7 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
}
nbufs = i;
- DPRINTF("h_send_logical_lan() %d buffers, total length 0x%x\n",
- nbufs, total_len);
+ trace_spapr_vlan_h_send_logical_lan_total(nbufs, total_len);
if (total_len == 0) {
return H_SUCCESS;
@@ -756,6 +752,27 @@ static target_ulong h_multicast_ctrl(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return H_SUCCESS;
}
+static target_ulong h_change_logical_lan_mac(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ target_ulong reg = args[0];
+ target_ulong macaddr = args[1];
+ VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+ VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->nicconf.macaddr.a[ETH_ALEN - i - 1] = macaddr & 0xff;
+ macaddr >>= 8;
+ }
+
+ qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
+
+ return H_SUCCESS;
+}
+
static Property spapr_vlan_properties[] = {
DEFINE_SPAPR_PROPERTIES(VIOsPAPRVLANDevice, sdev),
DEFINE_NIC_PROPERTIES(VIOsPAPRVLANDevice, nicconf),
@@ -854,6 +871,8 @@ static void spapr_vlan_register_types(void)
spapr_register_hypercall(H_ADD_LOGICAL_LAN_BUFFER,
h_add_logical_lan_buffer);
spapr_register_hypercall(H_MULTICAST_CTRL, h_multicast_ctrl);
+ spapr_register_hypercall(H_CHANGE_LOGICAL_LAN_MAC,
+ h_change_logical_lan_mac);
type_register_static(&spapr_vlan_info);
}
diff --git a/hw/net/trace-events b/hw/net/trace-events
index 8d38d7724d..1a5c909939 100644
--- a/hw/net/trace-events
+++ b/hw/net/trace-events
@@ -223,7 +223,7 @@ e1000e_irq_icr_read_entry(uint32_t icr) "Starting ICR read. Current ICR: 0x%x"
e1000e_irq_icr_read_exit(uint32_t icr) "Ending ICR read. Current ICR: 0x%x"
e1000e_irq_icr_clear_zero_ims(void) "Clearing ICR on read due to zero IMS"
e1000e_irq_icr_clear_iame(void) "Clearing ICR on read due to IAME"
-e1000e_irq_ims_clear_eiame(uint32_t iam, uint32_t cause) "Clearing IMS due to EIAME, IAM: 0x%X, cause: 0x%X"
+e1000e_irq_iam_clear_eiame(uint32_t iam, uint32_t cause) "Clearing IMS due to EIAME, IAM: 0x%X, cause: 0x%X"
e1000e_irq_icr_clear_eiac(uint32_t icr, uint32_t eiac) "Clearing ICR bits due to EIAC, ICR: 0x%X, EIAC: 0x%X"
e1000e_irq_ims_clear_set_imc(uint32_t val) "Clearing IMS bits due to IMC write 0x%x"
e1000e_irq_fire_delayed_interrupts(void) "Firing delayed interrupts"
@@ -270,3 +270,19 @@ e1000e_cfg_support_virtio(bool support) "Virtio header supported: %d"
e1000e_vm_state_running(void) "VM state is running"
e1000e_vm_state_stopped(void) "VM state is stopped"
+
+# hw/net/spapr_llan.c
+spapr_vlan_get_rx_bd_from_pool_found(int pool, int32_t count, uint32_t rx_bufs) "pool=%d count=%"PRId32" rxbufs=%"PRIu32
+spapr_vlan_get_rx_bd_from_page(int buf_ptr, uint64_t bd) "use_buf_ptr=%d bd=0x%016"PRIx64
+spapr_vlan_get_rx_bd_from_page_found(uint32_t use_buf_ptr, uint32_t rx_bufs) "ptr=%"PRIu32" rxbufs=%"PRIu32
+spapr_vlan_receive(const char *id, uint32_t rx_bufs) "[%s] rx_bufs=%"PRIu32
+spapr_vlan_receive_dma_completed(void) "DMA write completed"
+spapr_vlan_receive_wrote(uint64_t ptr, uint64_t hi, uint64_t lo) "rxq entry (ptr=0x%"PRIx64"): 0x%016"PRIx64" 0x%016"PRIx64
+spapr_vlan_add_rxbuf_to_pool_create(int pool, uint64_t len) "created RX pool %d for size %"PRIu64
+spapr_vlan_add_rxbuf_to_pool(int pool, uint64_t len, int32_t count) "add buf using pool %d (size %"PRIu64", count=%"PRId32")"
+spapr_vlan_add_rxbuf_to_page(uint32_t ptr, uint32_t rx_bufs, uint64_t bd) "added buf ptr=%"PRIu32" rx_bufs=%"PRIu32" bd=0x%016"PRIx64
+spapr_vlan_h_add_logical_lan_buffer(uint64_t reg, uint64_t buf) "H_ADD_LOGICAL_LAN_BUFFER(0x%"PRIx64", 0x%"PRIx64")"
+spapr_vlan_h_send_logical_lan(uint64_t reg, uint64_t continue_token) "H_SEND_LOGICAL_LAN(0x%"PRIx64", <bufs>, 0x%"PRIx64")"
+spapr_vlan_h_send_logical_lan_rxbufs(uint32_t rx_bufs) "rxbufs = %"PRIu32
+spapr_vlan_h_send_logical_lan_buf_desc(uint64_t buf) " buf desc: 0x%"PRIx64
+spapr_vlan_h_send_logical_lan_total(int nbufs, unsigned total_len) "%d buffers, total length 0x%x"
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 01f1351554..5009533cfa 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -31,6 +31,11 @@
#define MAC_TABLE_ENTRIES 64
#define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
+/* previously fixed value */
+#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
+/* for now, only allow larger queues; with virtio-1, guest can downsize */
+#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
+
/*
* Calculate the number of bytes up to and including the given 'field' of
* 'container'.
@@ -503,6 +508,10 @@ static void virtio_net_set_queues(VirtIONet *n)
int i;
int r;
+ if (n->nic->peer_deleted) {
+ return;
+ }
+
for (i = 0; i < n->max_queues; i++) {
if (i < n->curr_queues) {
r = peer_attach(n, i);
@@ -875,6 +884,7 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
return VIRTIO_NET_OK;
}
+
static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIONet *n = VIRTIO_NET(vdev);
@@ -892,8 +902,10 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
- error_report("virtio-net ctrl missing headers");
- exit(1);
+ virtio_error(vdev, "virtio-net ctrl missing headers");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ break;
}
iov_cnt = elem->out_num;
@@ -1122,21 +1134,24 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
if (!elem) {
- if (i == 0)
- return -1;
- error_report("virtio-net unexpected empty queue: "
- "i %zd mergeable %d offset %zd, size %zd, "
- "guest hdr len %zd, host hdr len %zd "
- "guest features 0x%" PRIx64,
- i, n->mergeable_rx_bufs, offset, size,
- n->guest_hdr_len, n->host_hdr_len,
- vdev->guest_features);
- exit(1);
+ if (i) {
+ virtio_error(vdev, "virtio-net unexpected empty queue: "
+ "i %zd mergeable %d offset %zd, size %zd, "
+ "guest hdr len %zd, host hdr len %zd "
+ "guest features 0x%" PRIx64,
+ i, n->mergeable_rx_bufs, offset, size,
+ n->guest_hdr_len, n->host_hdr_len,
+ vdev->guest_features);
+ }
+ return -1;
}
if (elem->in_num < 1) {
- error_report("virtio-net receive queue contains no in buffers");
- exit(1);
+ virtio_error(vdev,
+ "virtio-net receive queue contains no in buffers");
+ virtqueue_detach_element(q->rx_vq, elem, 0);
+ g_free(elem);
+ return -1;
}
sg = elem->in_sg;
@@ -1166,7 +1181,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
* must have consumed the complete packet.
* Otherwise, drop it. */
if (!n->mergeable_rx_bufs && offset < size) {
- virtqueue_discard(q->rx_vq, elem, total);
+ virtqueue_unpop(q->rx_vq, elem, total);
g_free(elem);
return size;
}
@@ -1238,15 +1253,19 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
out_num = elem->out_num;
out_sg = elem->out_sg;
if (out_num < 1) {
- error_report("virtio-net header not in first element");
- exit(1);
+ virtio_error(vdev, "virtio-net header not in first element");
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+ g_free(elem);
+ return -EINVAL;
}
if (n->has_vnet_hdr) {
if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
n->guest_hdr_len) {
- error_report("virtio-net header incorrect");
- exit(1);
+ virtio_error(vdev, "virtio-net header incorrect");
+ virtqueue_detach_element(q->tx_vq, elem, 0);
+ g_free(elem);
+ return -EINVAL;
}
if (n->needs_vnet_hdr_swap) {
virtio_net_hdr_swap(vdev, (void *) &mhdr);
@@ -1314,7 +1333,9 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
virtio_queue_set_notification(vq, 1);
timer_del(q->tx_timer);
q->tx_waiting = 0;
- virtio_net_flush_tx(q);
+ if (virtio_net_flush_tx(q) == -EINVAL) {
+ return;
+ }
} else {
timer_mod(q->tx_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
@@ -1385,8 +1406,9 @@ static void virtio_net_tx_bh(void *opaque)
}
ret = virtio_net_flush_tx(q);
- if (ret == -EBUSY) {
- return; /* Notification re-enable handled by tx_complete */
+ if (ret == -EBUSY || ret == -EINVAL) {
+ return; /* Notification re-enable handled by tx_complete or device
+ * broken */
}
/* If we flush a full burst of packets, assume there are
@@ -1401,7 +1423,10 @@ static void virtio_net_tx_bh(void *opaque)
* anything that may have come in while we weren't looking. If
* we find something, assume the guest is still active and reschedule */
virtio_queue_set_notification(q->tx_vq, 1);
- if (virtio_net_flush_tx(q) > 0) {
+ ret = virtio_net_flush_tx(q);
+ if (ret == -EINVAL) {
+ return;
+ } else if (ret > 0) {
virtio_queue_set_notification(q->tx_vq, 0);
qemu_bh_schedule(q->tx_bh);
q->tx_waiting = 1;
@@ -1412,7 +1437,8 @@ static void virtio_net_add_queue(VirtIONet *n, int index)
{
VirtIODevice *vdev = VIRTIO_DEVICE(n);
- n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
+ n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
+ virtio_net_handle_rx);
if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
n->vqs[index].tx_vq =
virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
@@ -1492,17 +1518,6 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
virtio_net_set_queues(n);
}
-static void virtio_net_save(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIONet *n = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(n);
-
- /* At this point, backend must be stopped, otherwise
- * it might keep writing to memory. */
- assert(!n->vhost_started);
- virtio_save(vdev, f);
-}
-
static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
{
VirtIONet *n = VIRTIO_NET(vdev);
@@ -1538,14 +1553,6 @@ static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
}
}
-static int virtio_net_load(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIONet *n = opaque;
- VirtIODevice *vdev = VIRTIO_DEVICE(n);
-
- return virtio_load(vdev, f, VIRTIO_NET_VM_VERSION);
-}
-
static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
@@ -1720,6 +1727,22 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
virtio_net_set_config_size(n, n->host_features);
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
+ /*
+ * We set a lower limit on RX queue size to what it always was.
+ * Guests that want a smaller ring can always resize it without
+ * help from us (using virtio 1 and up).
+ */
+ if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
+ n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
+ (n->net_conf.rx_queue_size & (n->net_conf.rx_queue_size - 1))) {
+ error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
+ "must be a power of 2 between %d and %d.",
+ n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
+ VIRTQUEUE_MAX_SIZE);
+ virtio_cleanup(vdev);
+ return;
+ }
+
n->max_queues = MAX(n->nic_conf.peers.queues, 1);
if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
@@ -1832,8 +1855,25 @@ static void virtio_net_instance_init(Object *obj)
DEVICE(n), NULL);
}
-VMSTATE_VIRTIO_DEVICE(net, VIRTIO_NET_VM_VERSION, virtio_net_load,
- virtio_net_save);
+static void virtio_net_pre_save(void *opaque)
+{
+ VirtIONet *n = opaque;
+
+ /* At this point, backend must be stopped, otherwise
+ * it might keep writing to memory. */
+ assert(!n->vhost_started);
+}
+
+static const VMStateDescription vmstate_virtio_net = {
+ .name = "virtio-net",
+ .minimum_version_id = VIRTIO_NET_VM_VERSION,
+ .version_id = VIRTIO_NET_VM_VERSION,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+ .pre_save = virtio_net_pre_save,
+};
static Property virtio_net_properties[] = {
DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
@@ -1880,6 +1920,8 @@ static Property virtio_net_properties[] = {
TX_TIMER_INTERVAL),
DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
+ DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
+ VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1904,6 +1946,7 @@ static void virtio_net_class_init(ObjectClass *klass, void *data)
vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
vdc->load = virtio_net_load_device;
vdc->save = virtio_net_save_device;
+ vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
}
static const TypeInfo virtio_net_info = {
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 90f6943668..92f6af9620 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
index 6856b52999..20c43a61b3 100644
--- a/hw/net/xen_nic.c
+++ b/hw/net/xen_nic.c
@@ -69,7 +69,7 @@ static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, i
netdev->tx_ring.rsp_prod_pvt = ++i;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify);
if (notify) {
- xen_be_send_notify(&netdev->xendev);
+ xen_pv_send_notify(&netdev->xendev);
}
if (i == netdev->tx_ring.req_cons) {
@@ -128,30 +128,32 @@ static void net_tx_packets(struct XenNetDev *netdev)
/* should not happen in theory, we don't announce the *
* feature-{sg,gso,whatelse} flags in xenstore (yet?) */
if (txreq.flags & NETTXF_extra_info) {
- xen_be_printf(&netdev->xendev, 0, "FIXME: extra info flag\n");
+ xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n");
net_tx_error(netdev, &txreq, rc);
continue;
}
if (txreq.flags & NETTXF_more_data) {
- xen_be_printf(&netdev->xendev, 0, "FIXME: more data flag\n");
+ xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n");
net_tx_error(netdev, &txreq, rc);
continue;
}
#endif
if (txreq.size < 14) {
- xen_be_printf(&netdev->xendev, 0, "bad packet size: %d\n", txreq.size);
+ xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n",
+ txreq.size);
net_tx_error(netdev, &txreq, rc);
continue;
}
if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
- xen_be_printf(&netdev->xendev, 0, "error: page crossing\n");
+ xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
net_tx_error(netdev, &txreq, rc);
continue;
}
- xen_be_printf(&netdev->xendev, 3, "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n",
+ xen_pv_printf(&netdev->xendev, 3,
+ "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n",
txreq.gref, txreq.offset, txreq.size, txreq.flags,
(txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "",
(txreq.flags & NETTXF_data_validated) ? " data_validated" : "",
@@ -162,8 +164,9 @@ static void net_tx_packets(struct XenNetDev *netdev)
netdev->xendev.dom,
txreq.gref, PROT_READ);
if (page == NULL) {
- xen_be_printf(&netdev->xendev, 0, "error: tx gref dereference failed (%d)\n",
- txreq.gref);
+ xen_pv_printf(&netdev->xendev, 0,
+ "error: tx gref dereference failed (%d)\n",
+ txreq.gref);
net_tx_error(netdev, &txreq, rc);
continue;
}
@@ -211,13 +214,14 @@ static void net_rx_response(struct XenNetDev *netdev,
resp->status = (int16_t)st;
}
- xen_be_printf(&netdev->xendev, 3, "rx response: idx %d, status %d, flags 0x%x\n",
+ xen_pv_printf(&netdev->xendev, 3,
+ "rx response: idx %d, status %d, flags 0x%x\n",
i, resp->status, resp->flags);
netdev->rx_ring.rsp_prod_pvt = ++i;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify);
if (notify) {
- xen_be_send_notify(&netdev->xendev);
+ xen_pv_send_notify(&netdev->xendev);
}
}
@@ -242,7 +246,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
return 0;
}
if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
- xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
+ xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
(unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
return -1;
}
@@ -254,7 +258,8 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
netdev->xendev.dom,
rxreq.gref, PROT_WRITE);
if (page == NULL) {
- xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n",
+ xen_pv_printf(&netdev->xendev, 0,
+ "error: rx gref dereference failed (%d)\n",
rxreq.gref);
net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0);
return -1;
@@ -328,7 +333,8 @@ static int net_connect(struct XenDevice *xendev)
rx_copy = 0;
}
if (rx_copy == 0) {
- xen_be_printf(&netdev->xendev, 0, "frontend doesn't support rx-copy.\n");
+ xen_pv_printf(&netdev->xendev, 0,
+ "frontend doesn't support rx-copy.\n");
return -1;
}
@@ -353,7 +359,7 @@ static int net_connect(struct XenDevice *xendev)
xen_be_bind_evtchn(&netdev->xendev);
- xen_be_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, "
+ xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, "
"remote port %d, local port %d\n",
netdev->tx_ring_ref, netdev->rx_ring_ref,
netdev->xendev.remote_port, netdev->xendev.local_port);
@@ -366,7 +372,7 @@ static void net_disconnect(struct XenDevice *xendev)
{
struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
- xen_be_unbind_evtchn(&netdev->xendev);
+ xen_pv_unbind_evtchn(&netdev->xendev);
if (netdev->txs) {
xengnttab_unmap(netdev->xendev.gnttabdev, netdev->txs, 1);
diff --git a/hw/nvram/Makefile.objs b/hw/nvram/Makefile.objs
index e9a66940e0..c018f6b2ff 100644
--- a/hw/nvram/Makefile.objs
+++ b/hw/nvram/Makefile.objs
@@ -1,5 +1,6 @@
common-obj-$(CONFIG_DS1225Y) += ds1225y.o
common-obj-y += eeprom93xx.o
common-obj-y += fw_cfg.o
+common-obj-y += chrp_nvram.o
common-obj-$(CONFIG_MAC_NVRAM) += mac_nvram.o
obj-$(CONFIG_PSERIES) += spapr_nvram.o
diff --git a/hw/nvram/chrp_nvram.c b/hw/nvram/chrp_nvram.c
new file mode 100644
index 0000000000..3837510dd2
--- /dev/null
+++ b/hw/nvram/chrp_nvram.c
@@ -0,0 +1,85 @@
+/*
+ * Common Hardware Reference Platform NVRAM helper functions.
+ *
+ * The CHRP NVRAM layout is used by OpenBIOS and SLOF. See CHRP
+ * specification, chapter 8, or the LoPAPR specification for details
+ * about the NVRAM layout.
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+#include "hw/hw.h"
+#include "hw/nvram/chrp_nvram.h"
+#include "sysemu/sysemu.h"
+
+static int chrp_nvram_set_var(uint8_t *nvram, int addr, const char *str)
+{
+ int len;
+
+ len = strlen(str) + 1;
+ memcpy(&nvram[addr], str, len);
+
+ return addr + len;
+}
+
+/**
+ * Create a "system partition", used for the Open Firmware
+ * environment variables.
+ */
+int chrp_nvram_create_system_partition(uint8_t *data, int min_len)
+{
+ ChrpNvramPartHdr *part_header;
+ unsigned int i;
+ int end;
+
+ part_header = (ChrpNvramPartHdr *)data;
+ part_header->signature = CHRP_NVPART_SYSTEM;
+ pstrcpy(part_header->name, sizeof(part_header->name), "system");
+
+ end = sizeof(ChrpNvramPartHdr);
+ for (i = 0; i < nb_prom_envs; i++) {
+ end = chrp_nvram_set_var(data, end, prom_envs[i]);
+ }
+
+ /* End marker */
+ data[end++] = '\0';
+
+ end = (end + 15) & ~15;
+ /* XXX: OpenBIOS is not able to grow up a partition. Leave some space for
+ new variables. */
+ if (end < min_len) {
+ end = min_len;
+ }
+ chrp_nvram_finish_partition(part_header, end);
+
+ return end;
+}
+
+/**
+ * Create a "free space" partition
+ */
+int chrp_nvram_create_free_partition(uint8_t *data, int len)
+{
+ ChrpNvramPartHdr *part_header;
+
+ part_header = (ChrpNvramPartHdr *)data;
+ part_header->signature = CHRP_NVPART_FREE;
+ pstrcpy(part_header->name, sizeof(part_header->name), "free");
+
+ chrp_nvram_finish_partition(part_header, len);
+
+ return len;
+}
diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
index 6a68e594d5..3ebecb2260 100644
--- a/hw/nvram/fw_cfg.c
+++ b/hw/nvram/fw_cfg.c
@@ -29,7 +29,6 @@
#include "hw/isa/isa.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/sysbus.h"
-#include "hw/boards.h"
#include "trace.h"
#include "qemu/error-report.h"
#include "qemu/config-file.h"
@@ -180,7 +179,7 @@ static void fw_cfg_bootsplash(FWCfgState *s)
temp = qemu_opt_get(opts, "splash-time");
if (temp != NULL) {
p = (char *)temp;
- boot_splash_time = strtol(p, (char **)&p, 10);
+ boot_splash_time = strtol(p, &p, 10);
}
}
@@ -240,7 +239,7 @@ static void fw_cfg_reboot(FWCfgState *s)
temp = qemu_opt_get(opts, "reboot-timeout");
if (temp != NULL) {
p = (char *)temp;
- reboot_timeout = strtol(p, (char **)&p, 10);
+ reboot_timeout = strtol(p, &p, 10);
}
}
/* validate the input */
@@ -883,9 +882,8 @@ static void fw_cfg_init1(DeviceState *dev)
qdev_init_nofail(dev);
fw_cfg_add_bytes(s, FW_CFG_SIGNATURE, (char *)"QEMU", 4);
- fw_cfg_add_bytes(s, FW_CFG_UUID, qemu_uuid, 16);
+ fw_cfg_add_bytes(s, FW_CFG_UUID, &qemu_uuid, 16);
fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)!machine->enable_graphics);
- fw_cfg_add_i16(s, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
fw_cfg_add_i16(s, FW_CFG_BOOT_MENU, (uint16_t)boot_menu);
fw_cfg_bootsplash(s);
fw_cfg_reboot(s);
diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c
index 24f61212ba..63f9ed1d82 100644
--- a/hw/nvram/mac_nvram.c
+++ b/hw/nvram/mac_nvram.c
@@ -24,8 +24,7 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/nvram/openbios_firmware_abi.h"
-#include "sysemu/sysemu.h"
+#include "hw/nvram/chrp_nvram.h"
#include "hw/ppc/mac.h"
#include "qemu/cutils.h"
#include <zlib.h>
@@ -146,38 +145,14 @@ static void macio_nvram_register_types(void)
static void pmac_format_nvram_partition_of(MacIONVRAMState *nvr, int off,
int len)
{
- unsigned int i;
- uint32_t start = off, end;
- struct OpenBIOS_nvpart_v1 *part_header;
-
- // OpenBIOS nvram variables
- // Variable partition
- part_header = (struct OpenBIOS_nvpart_v1 *)&nvr->data[start];
- part_header->signature = OPENBIOS_PART_SYSTEM;
- pstrcpy(part_header->name, sizeof(part_header->name), "system");
-
- end = start + sizeof(struct OpenBIOS_nvpart_v1);
- for (i = 0; i < nb_prom_envs; i++)
- end = OpenBIOS_set_var(nvr->data, end, prom_envs[i]);
-
- // End marker
- nvr->data[end++] = '\0';
-
- end = start + ((end - start + 15) & ~15);
- /* XXX: OpenBIOS is not able to grow up a partition. Leave some space for
- new variables. */
- if (end < DEF_SYSTEM_SIZE)
- end = DEF_SYSTEM_SIZE;
- OpenBIOS_finish_partition(part_header, end - start);
-
- // free partition
- start = end;
- part_header = (struct OpenBIOS_nvpart_v1 *)&nvr->data[start];
- part_header->signature = OPENBIOS_PART_FREE;
- pstrcpy(part_header->name, sizeof(part_header->name), "free");
-
- end = len;
- OpenBIOS_finish_partition(part_header, end - start);
+ int sysp_end;
+
+ /* OpenBIOS nvram variables partition */
+ sysp_end = chrp_nvram_create_system_partition(&nvr->data[off],
+ DEF_SYSTEM_SIZE) + off;
+
+ /* Free space partition */
+ chrp_nvram_create_free_partition(&nvr->data[sysp_end], len - sysp_end);
}
#define OSX_NVRAM_SIGNATURE (0x5A)
@@ -187,15 +162,15 @@ static void pmac_format_nvram_partition_osx(MacIONVRAMState *nvr, int off,
int len)
{
uint32_t start = off;
- struct OpenBIOS_nvpart_v1 *part_header;
+ ChrpNvramPartHdr *part_header;
unsigned char *data = &nvr->data[start];
/* empty partition */
- part_header = (struct OpenBIOS_nvpart_v1 *)data;
+ part_header = (ChrpNvramPartHdr *)data;
part_header->signature = OSX_NVRAM_SIGNATURE;
pstrcpy(part_header->name, sizeof(part_header->name), "wwwwwwwwwwww");
- OpenBIOS_finish_partition(part_header, len);
+ chrp_nvram_finish_partition(part_header, len);
/* Generation */
stl_be_p(&data[20], 2);
diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c
index 4de5f705d8..eb42ea323f 100644
--- a/hw/nvram/spapr_nvram.c
+++ b/hw/nvram/spapr_nvram.c
@@ -31,6 +31,7 @@
#include "sysemu/block-backend.h"
#include "sysemu/device_tree.h"
#include "hw/sysbus.h"
+#include "hw/nvram/chrp_nvram.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
@@ -162,6 +163,11 @@ static void spapr_nvram_realize(VIOsPAPRDevice *dev, Error **errp)
error_setg(errp, "can't read spapr-nvram contents");
return;
}
+ } else if (nb_prom_envs > 0) {
+ /* Create a system partition to pass the -prom-env variables */
+ chrp_nvram_create_system_partition(nvram->buf, MIN_NVRAM_SIZE / 4);
+ chrp_nvram_create_free_partition(&nvram->buf[MIN_NVRAM_SIZE / 4],
+ nvram->size - MIN_NVRAM_SIZE / 4);
}
spapr_rtas_register(RTAS_NVRAM_FETCH, "nvram-fetch", rtas_nvram_fetch);
diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c
index 1cc598f7e9..6ac187fa32 100644
--- a/hw/pci-bridge/pci_expander_bridge.c
+++ b/hw/pci-bridge/pci_expander_bridge.c
@@ -15,7 +15,6 @@
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci/pci_host.h"
-#include "hw/pci/pci_bus.h"
#include "hw/pci/pci_bridge.h"
#include "hw/i386/pc.h"
#include "qemu/range.h"
diff --git a/hw/pci-host/uninorth.c b/hw/pci-host/uninorth.c
index 7aac4d67a4..df342ac3cb 100644
--- a/hw/pci-host/uninorth.c
+++ b/hw/pci-host/uninorth.c
@@ -62,9 +62,7 @@ typedef struct UNINState {
static int pci_unin_map_irq(PCIDevice *pci_dev, int irq_num)
{
- int devfn = pci_dev->devfn & 0x00FFFFFF;
-
- return (((devfn >> 11) & 0x1F) + irq_num) & 3;
+ return (irq_num + (pci_dev->devfn >> 3)) & 3;
}
static void pci_unin_set_irq(void *opaque, int irq_num, int level)
diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs
index 91a3420f47..8025129377 100644
--- a/hw/ppc/Makefile.objs
+++ b/hw/ppc/Makefile.objs
@@ -1,10 +1,12 @@
# shared objects
-obj-y += ppc.o ppc_booke.o
+obj-y += ppc.o ppc_booke.o fdt.o
# IBM pSeries (sPAPR)
obj-$(CONFIG_PSERIES) += spapr.o spapr_vio.o spapr_events.o
obj-$(CONFIG_PSERIES) += spapr_hcall.o spapr_iommu.o spapr_rtas.o
obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o spapr_drc.o spapr_rng.o
-obj-$(CONFIG_PSERIES) += spapr_cpu_core.o
+obj-$(CONFIG_PSERIES) += spapr_cpu_core.o spapr_ovec.o
+# IBM PowerNV
+obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o
ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
obj-y += spapr_pci_vfio.o
endif
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index 0cd534df55..cf8b122afe 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -196,7 +196,7 @@ static int create_devtree_etsec(SysBusDevice *sbdev, PlatformDevtreeData *data)
return 0;
}
-static int sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque)
+static void sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque)
{
PlatformDevtreeData *data = opaque;
bool matched = false;
@@ -211,8 +211,6 @@ static int sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque)
qdev_fw_name(DEVICE(sbdev)));
exit(1);
}
-
- return 0;
}
static void platform_bus_create_devtree(PPCE500Params *params, void *fdt,
diff --git a/hw/ppc/fdt.c b/hw/ppc/fdt.c
new file mode 100644
index 0000000000..e67d60d03c
--- /dev/null
+++ b/hw/ppc/fdt.c
@@ -0,0 +1,49 @@
+/*
+ * QEMU PowerPC helper routines for the device tree.
+ *
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "target-ppc/cpu.h"
+
+#include "hw/ppc/fdt.h"
+
+#if defined(TARGET_PPC64)
+size_t ppc_create_page_sizes_prop(CPUPPCState *env, uint32_t *prop,
+ size_t maxsize)
+{
+ size_t maxcells = maxsize / sizeof(uint32_t);
+ int i, j, count;
+ uint32_t *p = prop;
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
+
+ if (!sps->page_shift) {
+ break;
+ }
+ for (count = 0; count < PPC_PAGE_SIZES_MAX_SZ; count++) {
+ if (sps->enc[count].page_shift == 0) {
+ break;
+ }
+ }
+ if ((p - prop) >= (maxcells - 3 - count * 2)) {
+ break;
+ }
+ *(p++) = cpu_to_be32(sps->page_shift);
+ *(p++) = cpu_to_be32(sps->slb_enc);
+ *(p++) = cpu_to_be32(count);
+ for (j = 0; j < count; j++) {
+ *(p++) = cpu_to_be32(sps->enc[j].page_shift);
+ *(p++) = cpu_to_be32(sps->enc[j].pte_enc);
+ }
+ }
+
+ return (p - prop) * sizeof(uint32_t);
+}
+#endif
diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
index 7d2510658d..2bfdb643df 100644
--- a/hw/ppc/mac_newworld.c
+++ b/hw/ppc/mac_newworld.c
@@ -466,6 +466,7 @@ static void ppc_core99_init(MachineState *machine)
/* No PCI init: the BIOS will do it */
fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, machine_arch);
diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c
index 447948746b..56282c5bc6 100644
--- a/hw/ppc/mac_oldworld.c
+++ b/hw/ppc/mac_oldworld.c
@@ -319,6 +319,7 @@ static void ppc_heathrow_init(MachineState *machine)
/* No PCI init: the BIOS will do it */
fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, ARCH_HEATHROW);
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
new file mode 100644
index 0000000000..9df7b25315
--- /dev/null
+++ b/hw/ppc/pnv.c
@@ -0,0 +1,821 @@
+/*
+ * QEMU PowerPC PowerNV machine model
+ *
+ * Copyright (c) 2016, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/numa.h"
+#include "hw/hw.h"
+#include "target-ppc/cpu.h"
+#include "qemu/log.h"
+#include "hw/ppc/fdt.h"
+#include "hw/ppc/ppc.h"
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_core.h"
+#include "hw/loader.h"
+#include "exec/address-spaces.h"
+#include "qemu/cutils.h"
+#include "qapi/visitor.h"
+
+#include "hw/ppc/pnv_xscom.h"
+
+#include "hw/isa/isa.h"
+#include "hw/char/serial.h"
+#include "hw/timer/mc146818rtc.h"
+
+#include <libfdt.h>
+
+#define FDT_MAX_SIZE 0x00100000
+
+#define FW_FILE_NAME "skiboot.lid"
+#define FW_LOAD_ADDR 0x0
+#define FW_MAX_SIZE 0x00400000
+
+#define KERNEL_LOAD_ADDR 0x20000000
+#define INITRD_LOAD_ADDR 0x40000000
+
+/*
+ * On Power Systems E880 (POWER8), the max cpus (threads) should be :
+ * 4 * 4 sockets * 12 cores * 8 threads = 1536
+ * Let's make it 2^11
+ */
+#define MAX_CPUS 2048
+
+/*
+ * Memory nodes are created by hostboot, one for each range of memory
+ * that has a different "affinity". In practice, it means one range
+ * per chip.
+ */
+static void powernv_populate_memory_node(void *fdt, int chip_id, hwaddr start,
+ hwaddr size)
+{
+ char *mem_name;
+ uint64_t mem_reg_property[2];
+ int off;
+
+ mem_reg_property[0] = cpu_to_be64(start);
+ mem_reg_property[1] = cpu_to_be64(size);
+
+ mem_name = g_strdup_printf("memory@%"HWADDR_PRIx, start);
+ off = fdt_add_subnode(fdt, 0, mem_name);
+ g_free(mem_name);
+
+ _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
+ _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
+ sizeof(mem_reg_property))));
+ _FDT((fdt_setprop_cell(fdt, off, "ibm,chip-id", chip_id)));
+}
+
+static int get_cpus_node(void *fdt)
+{
+ int cpus_offset = fdt_path_offset(fdt, "/cpus");
+
+ if (cpus_offset < 0) {
+ cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
+ "cpus");
+ if (cpus_offset) {
+ _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
+ _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
+ }
+ }
+ _FDT(cpus_offset);
+ return cpus_offset;
+}
+
+/*
+ * The PowerNV cores (and threads) need to use real HW ids and not an
+ * incremental index like it has been done on other platforms. This HW
+ * id is stored in the CPU PIR, it is used to create cpu nodes in the
+ * device tree, used in XSCOM to address cores and in interrupt
+ * servers.
+ */
+static void powernv_create_core_node(PnvChip *chip, PnvCore *pc, void *fdt)
+{
+ CPUState *cs = CPU(DEVICE(pc->threads));
+ DeviceClass *dc = DEVICE_GET_CLASS(cs);
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ int smt_threads = CPU_CORE(pc)->nr_threads;
+ CPUPPCState *env = &cpu->env;
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
+ uint32_t servers_prop[smt_threads];
+ int i;
+ uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
+ 0xffffffff, 0xffffffff};
+ uint32_t tbfreq = PNV_TIMEBASE_FREQ;
+ uint32_t cpufreq = 1000000000;
+ uint32_t page_sizes_prop[64];
+ size_t page_sizes_prop_size;
+ const uint8_t pa_features[] = { 24, 0,
+ 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xf0,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
+ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00 };
+ int offset;
+ char *nodename;
+ int cpus_offset = get_cpus_node(fdt);
+
+ nodename = g_strdup_printf("%s@%x", dc->fw_name, pc->pir);
+ offset = fdt_add_subnode(fdt, cpus_offset, nodename);
+ _FDT(offset);
+ g_free(nodename);
+
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", chip->chip_id)));
+
+ _FDT((fdt_setprop_cell(fdt, offset, "reg", pc->pir)));
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,pir", pc->pir)));
+ _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
+
+ _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
+ _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
+ env->dcache_line_size)));
+ _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
+ env->dcache_line_size)));
+ _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
+ env->icache_line_size)));
+ _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
+ env->icache_line_size)));
+
+ if (pcc->l1_dcache_size) {
+ _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
+ pcc->l1_dcache_size)));
+ } else {
+ error_report("Warning: Unknown L1 dcache size for cpu");
+ }
+ if (pcc->l1_icache_size) {
+ _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
+ pcc->l1_icache_size)));
+ } else {
+ error_report("Warning: Unknown L1 icache size for cpu");
+ }
+
+ _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
+ _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", env->slb_nr)));
+ _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
+ _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
+
+ if (env->spr_cb[SPR_PURR].oea_read) {
+ _FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0)));
+ }
+
+ if (env->mmu_model & POWERPC_MMU_1TSEG) {
+ _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
+ segs, sizeof(segs))));
+ }
+
+ /* Advertise VMX/VSX (vector extensions) if available
+ * 0 / no property == no vector extensions
+ * 1 == VMX / Altivec available
+ * 2 == VSX available */
+ if (env->insns_flags & PPC_ALTIVEC) {
+ uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1;
+
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", vmx)));
+ }
+
+ /* Advertise DFP (Decimal Floating Point) if available
+ * 0 / no property == no DFP
+ * 1 == DFP available */
+ if (env->insns_flags2 & PPC2_DFP) {
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
+ }
+
+ page_sizes_prop_size = ppc_create_page_sizes_prop(env, page_sizes_prop,
+ sizeof(page_sizes_prop));
+ if (page_sizes_prop_size) {
+ _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
+ page_sizes_prop, page_sizes_prop_size)));
+ }
+
+ _FDT((fdt_setprop(fdt, offset, "ibm,pa-features",
+ pa_features, sizeof(pa_features))));
+
+ /* Build interrupt servers properties */
+ for (i = 0; i < smt_threads; i++) {
+ servers_prop[i] = cpu_to_be32(pc->pir + i);
+ }
+ _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
+ servers_prop, sizeof(servers_prop))));
+}
+
+static void powernv_populate_chip(PnvChip *chip, void *fdt)
+{
+ PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
+ char *typename = pnv_core_typename(pcc->cpu_model);
+ size_t typesize = object_type_get_instance_size(typename);
+ int i;
+
+ pnv_xscom_populate(chip, fdt, 0);
+
+ for (i = 0; i < chip->nr_cores; i++) {
+ PnvCore *pnv_core = PNV_CORE(chip->cores + i * typesize);
+
+ powernv_create_core_node(chip, pnv_core, fdt);
+ }
+
+ if (chip->ram_size) {
+ powernv_populate_memory_node(fdt, chip->chip_id, chip->ram_start,
+ chip->ram_size);
+ }
+ g_free(typename);
+}
+
+static void *powernv_create_fdt(MachineState *machine)
+{
+ const char plat_compat[] = "qemu,powernv\0ibm,powernv";
+ PnvMachineState *pnv = POWERNV_MACHINE(machine);
+ void *fdt;
+ char *buf;
+ int off;
+ int i;
+
+ fdt = g_malloc0(FDT_MAX_SIZE);
+ _FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE)));
+
+ /* Root node */
+ _FDT((fdt_setprop_cell(fdt, 0, "#address-cells", 0x2)));
+ _FDT((fdt_setprop_cell(fdt, 0, "#size-cells", 0x2)));
+ _FDT((fdt_setprop_string(fdt, 0, "model",
+ "IBM PowerNV (emulated by qemu)")));
+ _FDT((fdt_setprop(fdt, 0, "compatible", plat_compat,
+ sizeof(plat_compat))));
+
+ buf = qemu_uuid_unparse_strdup(&qemu_uuid);
+ _FDT((fdt_setprop_string(fdt, 0, "vm,uuid", buf)));
+ if (qemu_uuid_set) {
+ _FDT((fdt_property_string(fdt, "system-id", buf)));
+ }
+ g_free(buf);
+
+ off = fdt_add_subnode(fdt, 0, "chosen");
+ if (machine->kernel_cmdline) {
+ _FDT((fdt_setprop_string(fdt, off, "bootargs",
+ machine->kernel_cmdline)));
+ }
+
+ if (pnv->initrd_size) {
+ uint32_t start_prop = cpu_to_be32(pnv->initrd_base);
+ uint32_t end_prop = cpu_to_be32(pnv->initrd_base + pnv->initrd_size);
+
+ _FDT((fdt_setprop(fdt, off, "linux,initrd-start",
+ &start_prop, sizeof(start_prop))));
+ _FDT((fdt_setprop(fdt, off, "linux,initrd-end",
+ &end_prop, sizeof(end_prop))));
+ }
+
+ /* Populate device tree for each chip */
+ for (i = 0; i < pnv->num_chips; i++) {
+ powernv_populate_chip(pnv->chips[i], fdt);
+ }
+ return fdt;
+}
+
+static void ppc_powernv_reset(void)
+{
+ MachineState *machine = MACHINE(qdev_get_machine());
+ void *fdt;
+
+ qemu_devices_reset();
+
+ fdt = powernv_create_fdt(machine);
+
+ /* Pack resulting tree */
+ _FDT((fdt_pack(fdt)));
+
+ cpu_physical_memory_write(PNV_FDT_ADDR, fdt, fdt_totalsize(fdt));
+}
+
+/* If we don't use the built-in LPC interrupt deserializer, we need
+ * to provide a set of qirqs for the ISA bus or things will go bad.
+ *
+ * Most machines using pre-Naples chips (without said deserializer)
+ * have a CPLD that will collect the SerIRQ and shoot them as a
+ * single level interrupt to the P8 chip. So let's setup a hook
+ * for doing just that.
+ *
+ * Note: The actual interrupt input isn't emulated yet, this will
+ * come with the PSI bridge model.
+ */
+static void pnv_lpc_isa_irq_handler_cpld(void *opaque, int n, int level)
+{
+ /* We don't yet emulate the PSI bridge which provides the external
+ * interrupt, so just drop interrupts on the floor
+ */
+}
+
+static void pnv_lpc_isa_irq_handler(void *opaque, int n, int level)
+{
+ /* XXX TODO */
+}
+
+static ISABus *pnv_isa_create(PnvChip *chip)
+{
+ PnvLpcController *lpc = &chip->lpc;
+ ISABus *isa_bus;
+ qemu_irq *irqs;
+ PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
+
+ /* let isa_bus_new() create its own bridge on SysBus otherwise
+ * devices speficied on the command line won't find the bus and
+ * will fail to create.
+ */
+ isa_bus = isa_bus_new(NULL, &lpc->isa_mem, &lpc->isa_io,
+ &error_fatal);
+
+ /* Not all variants have a working serial irq decoder. If not,
+ * handling of LPC interrupts becomes a platform issue (some
+ * platforms have a CPLD to do it).
+ */
+ if (pcc->chip_type == PNV_CHIP_POWER8NVL) {
+ irqs = qemu_allocate_irqs(pnv_lpc_isa_irq_handler, chip, ISA_NUM_IRQS);
+ } else {
+ irqs = qemu_allocate_irqs(pnv_lpc_isa_irq_handler_cpld, chip,
+ ISA_NUM_IRQS);
+ }
+
+ isa_bus_irqs(isa_bus, irqs);
+ return isa_bus;
+}
+
+static void ppc_powernv_init(MachineState *machine)
+{
+ PnvMachineState *pnv = POWERNV_MACHINE(machine);
+ MemoryRegion *ram;
+ char *fw_filename;
+ long fw_size;
+ int i;
+ char *chip_typename;
+
+ /* allocate RAM */
+ if (machine->ram_size < (1 * G_BYTE)) {
+ error_report("Warning: skiboot may not work with < 1GB of RAM");
+ }
+
+ ram = g_new(MemoryRegion, 1);
+ memory_region_allocate_system_memory(ram, NULL, "ppc_powernv.ram",
+ machine->ram_size);
+ memory_region_add_subregion(get_system_memory(), 0, ram);
+
+ /* load skiboot firmware */
+ if (bios_name == NULL) {
+ bios_name = FW_FILE_NAME;
+ }
+
+ fw_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+
+ fw_size = load_image_targphys(fw_filename, FW_LOAD_ADDR, FW_MAX_SIZE);
+ if (fw_size < 0) {
+ hw_error("qemu: could not load OPAL '%s'\n", fw_filename);
+ exit(1);
+ }
+ g_free(fw_filename);
+
+ /* load kernel */
+ if (machine->kernel_filename) {
+ long kernel_size;
+
+ kernel_size = load_image_targphys(machine->kernel_filename,
+ KERNEL_LOAD_ADDR, 0x2000000);
+ if (kernel_size < 0) {
+ hw_error("qemu: could not load kernel'%s'\n",
+ machine->kernel_filename);
+ exit(1);
+ }
+ }
+
+ /* load initrd */
+ if (machine->initrd_filename) {
+ pnv->initrd_base = INITRD_LOAD_ADDR;
+ pnv->initrd_size = load_image_targphys(machine->initrd_filename,
+ pnv->initrd_base, 0x10000000); /* 128MB max */
+ if (pnv->initrd_size < 0) {
+ error_report("qemu: could not load initial ram disk '%s'",
+ machine->initrd_filename);
+ exit(1);
+ }
+ }
+
+ /* We need some cpu model to instantiate the PnvChip class */
+ if (machine->cpu_model == NULL) {
+ machine->cpu_model = "POWER8";
+ }
+
+ /* Create the processor chips */
+ chip_typename = g_strdup_printf(TYPE_PNV_CHIP "-%s", machine->cpu_model);
+ if (!object_class_by_name(chip_typename)) {
+ error_report("qemu: invalid CPU model '%s' for %s machine",
+ machine->cpu_model, MACHINE_GET_CLASS(machine)->name);
+ exit(1);
+ }
+
+ pnv->chips = g_new0(PnvChip *, pnv->num_chips);
+ for (i = 0; i < pnv->num_chips; i++) {
+ char chip_name[32];
+ Object *chip = object_new(chip_typename);
+
+ pnv->chips[i] = PNV_CHIP(chip);
+
+ /* TODO: put all the memory in one node on chip 0 until we find a
+ * way to specify different ranges for each chip
+ */
+ if (i == 0) {
+ object_property_set_int(chip, machine->ram_size, "ram-size",
+ &error_fatal);
+ }
+
+ snprintf(chip_name, sizeof(chip_name), "chip[%d]", PNV_CHIP_HWID(i));
+ object_property_add_child(OBJECT(pnv), chip_name, chip, &error_fatal);
+ object_property_set_int(chip, PNV_CHIP_HWID(i), "chip-id",
+ &error_fatal);
+ object_property_set_int(chip, smp_cores, "nr-cores", &error_fatal);
+ object_property_set_bool(chip, true, "realized", &error_fatal);
+ }
+ g_free(chip_typename);
+
+ /* Instantiate ISA bus on chip 0 */
+ pnv->isa_bus = pnv_isa_create(pnv->chips[0]);
+
+ /* Create serial port */
+ serial_hds_isa_init(pnv->isa_bus, 0, MAX_SERIAL_PORTS);
+
+ /* Create an RTC ISA device too */
+ rtc_init(pnv->isa_bus, 2000, NULL);
+}
+
+/*
+ * 0:21 Reserved - Read as zeros
+ * 22:24 Chip ID
+ * 25:28 Core number
+ * 29:31 Thread ID
+ */
+static uint32_t pnv_chip_core_pir_p8(PnvChip *chip, uint32_t core_id)
+{
+ return (chip->chip_id << 7) | (core_id << 3);
+}
+
+/*
+ * 0:48 Reserved - Read as zeroes
+ * 49:52 Node ID
+ * 53:55 Chip ID
+ * 56 Reserved - Read as zero
+ * 57:61 Core number
+ * 62:63 Thread ID
+ *
+ * We only care about the lower bits. uint32_t is fine for the moment.
+ */
+static uint32_t pnv_chip_core_pir_p9(PnvChip *chip, uint32_t core_id)
+{
+ return (chip->chip_id << 8) | (core_id << 2);
+}
+
+/* Allowed core identifiers on a POWER8 Processor Chip :
+ *
+ * <EX0 reserved>
+ * EX1 - Venice only
+ * EX2 - Venice only
+ * EX3 - Venice only
+ * EX4
+ * EX5
+ * EX6
+ * <EX7,8 reserved> <reserved>
+ * EX9 - Venice only
+ * EX10 - Venice only
+ * EX11 - Venice only
+ * EX12
+ * EX13
+ * EX14
+ * <EX15 reserved>
+ */
+#define POWER8E_CORE_MASK (0x7070ull)
+#define POWER8_CORE_MASK (0x7e7eull)
+
+/*
+ * POWER9 has 24 cores, ids starting at 0x20
+ */
+#define POWER9_CORE_MASK (0xffffff00000000ull)
+
+static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PnvChipClass *k = PNV_CHIP_CLASS(klass);
+
+ k->cpu_model = "POWER8E";
+ k->chip_type = PNV_CHIP_POWER8E;
+ k->chip_cfam_id = 0x221ef04980000000ull; /* P8 Murano DD2.1 */
+ k->cores_mask = POWER8E_CORE_MASK;
+ k->core_pir = pnv_chip_core_pir_p8;
+ k->xscom_base = 0x003fc0000000000ull;
+ k->xscom_core_base = 0x10000000ull;
+ dc->desc = "PowerNV Chip POWER8E";
+}
+
+static const TypeInfo pnv_chip_power8e_info = {
+ .name = TYPE_PNV_CHIP_POWER8E,
+ .parent = TYPE_PNV_CHIP,
+ .instance_size = sizeof(PnvChip),
+ .class_init = pnv_chip_power8e_class_init,
+};
+
+static void pnv_chip_power8_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PnvChipClass *k = PNV_CHIP_CLASS(klass);
+
+ k->cpu_model = "POWER8";
+ k->chip_type = PNV_CHIP_POWER8;
+ k->chip_cfam_id = 0x220ea04980000000ull; /* P8 Venice DD2.0 */
+ k->cores_mask = POWER8_CORE_MASK;
+ k->core_pir = pnv_chip_core_pir_p8;
+ k->xscom_base = 0x003fc0000000000ull;
+ k->xscom_core_base = 0x10000000ull;
+ dc->desc = "PowerNV Chip POWER8";
+}
+
+static const TypeInfo pnv_chip_power8_info = {
+ .name = TYPE_PNV_CHIP_POWER8,
+ .parent = TYPE_PNV_CHIP,
+ .instance_size = sizeof(PnvChip),
+ .class_init = pnv_chip_power8_class_init,
+};
+
+static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PnvChipClass *k = PNV_CHIP_CLASS(klass);
+
+ k->cpu_model = "POWER8NVL";
+ k->chip_type = PNV_CHIP_POWER8NVL;
+ k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */
+ k->cores_mask = POWER8_CORE_MASK;
+ k->core_pir = pnv_chip_core_pir_p8;
+ k->xscom_base = 0x003fc0000000000ull;
+ k->xscom_core_base = 0x10000000ull;
+ dc->desc = "PowerNV Chip POWER8NVL";
+}
+
+static const TypeInfo pnv_chip_power8nvl_info = {
+ .name = TYPE_PNV_CHIP_POWER8NVL,
+ .parent = TYPE_PNV_CHIP,
+ .instance_size = sizeof(PnvChip),
+ .class_init = pnv_chip_power8nvl_class_init,
+};
+
+static void pnv_chip_power9_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PnvChipClass *k = PNV_CHIP_CLASS(klass);
+
+ k->cpu_model = "POWER9";
+ k->chip_type = PNV_CHIP_POWER9;
+ k->chip_cfam_id = 0x100d104980000000ull; /* P9 Nimbus DD1.0 */
+ k->cores_mask = POWER9_CORE_MASK;
+ k->core_pir = pnv_chip_core_pir_p9;
+ k->xscom_base = 0x00603fc00000000ull;
+ k->xscom_core_base = 0x0ull;
+ dc->desc = "PowerNV Chip POWER9";
+}
+
+static const TypeInfo pnv_chip_power9_info = {
+ .name = TYPE_PNV_CHIP_POWER9,
+ .parent = TYPE_PNV_CHIP,
+ .instance_size = sizeof(PnvChip),
+ .class_init = pnv_chip_power9_class_init,
+};
+
+static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp)
+{
+ PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
+ int cores_max;
+
+ /*
+ * No custom mask for this chip, let's use the default one from *
+ * the chip class
+ */
+ if (!chip->cores_mask) {
+ chip->cores_mask = pcc->cores_mask;
+ }
+
+ /* filter alien core ids ! some are reserved */
+ if ((chip->cores_mask & pcc->cores_mask) != chip->cores_mask) {
+ error_setg(errp, "warning: invalid core mask for chip Ox%"PRIx64" !",
+ chip->cores_mask);
+ return;
+ }
+ chip->cores_mask &= pcc->cores_mask;
+
+ /* now that we have a sane layout, let check the number of cores */
+ cores_max = ctpop64(chip->cores_mask);
+ if (chip->nr_cores > cores_max) {
+ error_setg(errp, "warning: too many cores for chip ! Limit is %d",
+ cores_max);
+ return;
+ }
+}
+
+static void pnv_chip_init(Object *obj)
+{
+ PnvChip *chip = PNV_CHIP(obj);
+ PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
+
+ chip->xscom_base = pcc->xscom_base;
+
+ object_initialize(&chip->lpc, sizeof(chip->lpc), TYPE_PNV_LPC);
+ object_property_add_child(obj, "lpc", OBJECT(&chip->lpc), NULL);
+}
+
+static void pnv_chip_realize(DeviceState *dev, Error **errp)
+{
+ PnvChip *chip = PNV_CHIP(dev);
+ Error *error = NULL;
+ PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
+ char *typename = pnv_core_typename(pcc->cpu_model);
+ size_t typesize = object_type_get_instance_size(typename);
+ int i, core_hwid;
+
+ if (!object_class_by_name(typename)) {
+ error_setg(errp, "Unable to find PowerNV CPU Core '%s'", typename);
+ return;
+ }
+
+ /* XSCOM bridge */
+ pnv_xscom_realize(chip, &error);
+ if (error) {
+ error_propagate(errp, error);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(chip), 0, PNV_XSCOM_BASE(chip));
+
+ /* Cores */
+ pnv_chip_core_sanitize(chip, &error);
+ if (error) {
+ error_propagate(errp, error);
+ return;
+ }
+
+ chip->cores = g_malloc0(typesize * chip->nr_cores);
+
+ for (i = 0, core_hwid = 0; (core_hwid < sizeof(chip->cores_mask) * 8)
+ && (i < chip->nr_cores); core_hwid++) {
+ char core_name[32];
+ void *pnv_core = chip->cores + i * typesize;
+
+ if (!(chip->cores_mask & (1ull << core_hwid))) {
+ continue;
+ }
+
+ object_initialize(pnv_core, typesize, typename);
+ snprintf(core_name, sizeof(core_name), "core[%d]", core_hwid);
+ object_property_add_child(OBJECT(chip), core_name, OBJECT(pnv_core),
+ &error_fatal);
+ object_property_set_int(OBJECT(pnv_core), smp_threads, "nr-threads",
+ &error_fatal);
+ object_property_set_int(OBJECT(pnv_core), core_hwid,
+ CPU_CORE_PROP_CORE_ID, &error_fatal);
+ object_property_set_int(OBJECT(pnv_core),
+ pcc->core_pir(chip, core_hwid),
+ "pir", &error_fatal);
+ object_property_set_bool(OBJECT(pnv_core), true, "realized",
+ &error_fatal);
+ object_unref(OBJECT(pnv_core));
+
+ /* Each core has an XSCOM MMIO region */
+ pnv_xscom_add_subregion(chip,
+ PNV_XSCOM_EX_CORE_BASE(pcc->xscom_core_base,
+ core_hwid),
+ &PNV_CORE(pnv_core)->xscom_regs);
+ i++;
+ }
+ g_free(typename);
+
+ /* Create LPC controller */
+ object_property_set_bool(OBJECT(&chip->lpc), true, "realized",
+ &error_fatal);
+ pnv_xscom_add_subregion(chip, PNV_XSCOM_LPC_BASE, &chip->lpc.xscom_regs);
+}
+
+static Property pnv_chip_properties[] = {
+ DEFINE_PROP_UINT32("chip-id", PnvChip, chip_id, 0),
+ DEFINE_PROP_UINT64("ram-start", PnvChip, ram_start, 0),
+ DEFINE_PROP_UINT64("ram-size", PnvChip, ram_size, 0),
+ DEFINE_PROP_UINT32("nr-cores", PnvChip, nr_cores, 1),
+ DEFINE_PROP_UINT64("cores-mask", PnvChip, cores_mask, 0x0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pnv_chip_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = pnv_chip_realize;
+ dc->props = pnv_chip_properties;
+ dc->desc = "PowerNV Chip";
+}
+
+static const TypeInfo pnv_chip_info = {
+ .name = TYPE_PNV_CHIP,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .class_init = pnv_chip_class_init,
+ .instance_init = pnv_chip_init,
+ .class_size = sizeof(PnvChipClass),
+ .abstract = true,
+};
+
+static void pnv_get_num_chips(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ visit_type_uint32(v, name, &POWERNV_MACHINE(obj)->num_chips, errp);
+}
+
+static void pnv_set_num_chips(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ PnvMachineState *pnv = POWERNV_MACHINE(obj);
+ uint32_t num_chips;
+ Error *local_err = NULL;
+
+ visit_type_uint32(v, name, &num_chips, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /*
+ * TODO: should we decide on how many chips we can create based
+ * on #cores and Venice vs. Murano vs. Naples chip type etc...,
+ */
+ if (!is_power_of_2(num_chips) || num_chips > 4) {
+ error_setg(errp, "invalid number of chips: '%d'", num_chips);
+ return;
+ }
+
+ pnv->num_chips = num_chips;
+}
+
+static void powernv_machine_initfn(Object *obj)
+{
+ PnvMachineState *pnv = POWERNV_MACHINE(obj);
+ pnv->num_chips = 1;
+}
+
+static void powernv_machine_class_props_init(ObjectClass *oc)
+{
+ object_class_property_add(oc, "num-chips", "uint32_t",
+ pnv_get_num_chips, pnv_set_num_chips,
+ NULL, NULL, NULL);
+ object_class_property_set_description(oc, "num-chips",
+ "Specifies the number of processor chips",
+ NULL);
+}
+
+static void powernv_machine_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "IBM PowerNV (Non-Virtualized)";
+ mc->init = ppc_powernv_init;
+ mc->reset = ppc_powernv_reset;
+ mc->max_cpus = MAX_CPUS;
+ mc->block_default_type = IF_IDE; /* Pnv provides a AHCI device for
+ * storage */
+ mc->no_parallel = 1;
+ mc->default_boot_order = NULL;
+ mc->default_ram_size = 1 * G_BYTE;
+
+ powernv_machine_class_props_init(oc);
+}
+
+static const TypeInfo powernv_machine_info = {
+ .name = TYPE_POWERNV_MACHINE,
+ .parent = TYPE_MACHINE,
+ .instance_size = sizeof(PnvMachineState),
+ .instance_init = powernv_machine_initfn,
+ .class_init = powernv_machine_class_init,
+};
+
+static void powernv_machine_register_types(void)
+{
+ type_register_static(&powernv_machine_info);
+ type_register_static(&pnv_chip_info);
+ type_register_static(&pnv_chip_power8e_info);
+ type_register_static(&pnv_chip_power8_info);
+ type_register_static(&pnv_chip_power8nvl_info);
+ type_register_static(&pnv_chip_power9_info);
+}
+
+type_init(powernv_machine_register_types)
diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c
new file mode 100644
index 0000000000..76ce854b0c
--- /dev/null
+++ b/hw/ppc/pnv_core.c
@@ -0,0 +1,233 @@
+/*
+ * QEMU PowerPC PowerNV CPU Core model
+ *
+ * Copyright (c) 2016, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "sysemu/sysemu.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
+#include "target-ppc/cpu.h"
+#include "hw/ppc/ppc.h"
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_core.h"
+#include "hw/ppc/pnv_xscom.h"
+
+static void powernv_cpu_reset(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ cpu_reset(cs);
+
+ /*
+ * the skiboot firmware elects a primary thread to initialize the
+ * system and it can be any.
+ */
+ env->gpr[3] = PNV_FDT_ADDR;
+ env->nip = 0x10;
+ env->msr |= MSR_HVB; /* Hypervisor mode */
+}
+
+static void powernv_cpu_init(PowerPCCPU *cpu, Error **errp)
+{
+ CPUPPCState *env = &cpu->env;
+ int core_pir;
+ int thread_index = 0; /* TODO: TCG supports only one thread */
+ ppc_spr_t *pir = &env->spr_cb[SPR_PIR];
+
+ core_pir = object_property_get_int(OBJECT(cpu), "core-pir", &error_abort);
+
+ /*
+ * The PIR of a thread is the core PIR + the thread index. We will
+ * need to find a way to get the thread index when TCG supports
+ * more than 1. We could use the object name ?
+ */
+ pir->default_value = core_pir + thread_index;
+
+ /* Set time-base frequency to 512 MHz */
+ cpu_ppc_tb_init(env, PNV_TIMEBASE_FREQ);
+
+ qemu_register_reset(powernv_cpu_reset, cpu);
+}
+
+/*
+ * These values are read by the PowerNV HW monitors under Linux
+ */
+#define PNV_XSCOM_EX_DTS_RESULT0 0x50000
+#define PNV_XSCOM_EX_DTS_RESULT1 0x50001
+
+static uint64_t pnv_core_xscom_read(void *opaque, hwaddr addr,
+ unsigned int width)
+{
+ uint32_t offset = addr >> 3;
+ uint64_t val = 0;
+
+ /* The result should be 38 C */
+ switch (offset) {
+ case PNV_XSCOM_EX_DTS_RESULT0:
+ val = 0x26f024f023f0000ull;
+ break;
+ case PNV_XSCOM_EX_DTS_RESULT1:
+ val = 0x24f000000000000ull;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Warning: reading reg=0x%" HWADDR_PRIx,
+ addr);
+ }
+
+ return val;
+}
+
+static void pnv_core_xscom_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned int width)
+{
+ qemu_log_mask(LOG_UNIMP, "Warning: writing to reg=0x%" HWADDR_PRIx,
+ addr);
+}
+
+static const MemoryRegionOps pnv_core_xscom_ops = {
+ .read = pnv_core_xscom_read,
+ .write = pnv_core_xscom_write,
+ .valid.min_access_size = 8,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 8,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void pnv_core_realize_child(Object *child, Error **errp)
+{
+ Error *local_err = NULL;
+ CPUState *cs = CPU(child);
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ object_property_set_bool(child, true, "realized", &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ powernv_cpu_init(cpu, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+}
+
+static void pnv_core_realize(DeviceState *dev, Error **errp)
+{
+ PnvCore *pc = PNV_CORE(OBJECT(dev));
+ CPUCore *cc = CPU_CORE(OBJECT(dev));
+ PnvCoreClass *pcc = PNV_CORE_GET_CLASS(OBJECT(dev));
+ const char *typename = object_class_get_name(pcc->cpu_oc);
+ size_t size = object_type_get_instance_size(typename);
+ Error *local_err = NULL;
+ void *obj;
+ int i, j;
+ char name[32];
+
+ pc->threads = g_malloc0(size * cc->nr_threads);
+ for (i = 0; i < cc->nr_threads; i++) {
+ obj = pc->threads + i * size;
+
+ object_initialize(obj, size, typename);
+
+ snprintf(name, sizeof(name), "thread[%d]", i);
+ object_property_add_child(OBJECT(pc), name, obj, &local_err);
+ object_property_add_alias(obj, "core-pir", OBJECT(pc),
+ "pir", &local_err);
+ if (local_err) {
+ goto err;
+ }
+ object_unref(obj);
+ }
+
+ for (j = 0; j < cc->nr_threads; j++) {
+ obj = pc->threads + j * size;
+
+ pnv_core_realize_child(obj, &local_err);
+ if (local_err) {
+ goto err;
+ }
+ }
+
+ snprintf(name, sizeof(name), "xscom-core.%d", cc->core_id);
+ pnv_xscom_region_init(&pc->xscom_regs, OBJECT(dev), &pnv_core_xscom_ops,
+ pc, name, PNV_XSCOM_EX_CORE_SIZE);
+ return;
+
+err:
+ while (--i >= 0) {
+ obj = pc->threads + i * size;
+ object_unparent(obj);
+ }
+ g_free(pc->threads);
+ error_propagate(errp, local_err);
+}
+
+static Property pnv_core_properties[] = {
+ DEFINE_PROP_UINT32("pir", PnvCore, pir, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pnv_core_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PnvCoreClass *pcc = PNV_CORE_CLASS(oc);
+
+ dc->realize = pnv_core_realize;
+ dc->props = pnv_core_properties;
+ pcc->cpu_oc = cpu_class_by_name(TYPE_POWERPC_CPU, data);
+}
+
+static const TypeInfo pnv_core_info = {
+ .name = TYPE_PNV_CORE,
+ .parent = TYPE_CPU_CORE,
+ .instance_size = sizeof(PnvCore),
+ .class_size = sizeof(PnvCoreClass),
+ .abstract = true,
+};
+
+static const char *pnv_core_models[] = {
+ "POWER8E", "POWER8", "POWER8NVL", "POWER9"
+};
+
+static void pnv_core_register_types(void)
+{
+ int i ;
+
+ type_register_static(&pnv_core_info);
+ for (i = 0; i < ARRAY_SIZE(pnv_core_models); ++i) {
+ TypeInfo ti = {
+ .parent = TYPE_PNV_CORE,
+ .instance_size = sizeof(PnvCore),
+ .class_init = pnv_core_class_init,
+ .class_data = (void *) pnv_core_models[i],
+ };
+ ti.name = pnv_core_typename(pnv_core_models[i]);
+ type_register(&ti);
+ g_free((void *)ti.name);
+ }
+}
+
+type_init(pnv_core_register_types)
+
+char *pnv_core_typename(const char *model)
+{
+ return g_strdup_printf(TYPE_PNV_CORE "-%s", model);
+}
diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c
new file mode 100644
index 0000000000..0e2117f0f5
--- /dev/null
+++ b/hw/ppc/pnv_lpc.c
@@ -0,0 +1,472 @@
+/*
+ * QEMU PowerPC PowerNV LPC controller
+ *
+ * Copyright (c) 2016, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/sysemu.h"
+#include "target-ppc/cpu.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
+
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_lpc.h"
+#include "hw/ppc/pnv_xscom.h"
+#include "hw/ppc/fdt.h"
+
+#include <libfdt.h>
+
+enum {
+ ECCB_CTL = 0,
+ ECCB_RESET = 1,
+ ECCB_STAT = 2,
+ ECCB_DATA = 3,
+};
+
+/* OPB Master LS registers */
+#define OPB_MASTER_LS_IRQ_STAT 0x50
+#define OPB_MASTER_IRQ_LPC 0x00000800
+#define OPB_MASTER_LS_IRQ_MASK 0x54
+#define OPB_MASTER_LS_IRQ_POL 0x58
+#define OPB_MASTER_LS_IRQ_INPUT 0x5c
+
+/* LPC HC registers */
+#define LPC_HC_FW_SEG_IDSEL 0x24
+#define LPC_HC_FW_RD_ACC_SIZE 0x28
+#define LPC_HC_FW_RD_1B 0x00000000
+#define LPC_HC_FW_RD_2B 0x01000000
+#define LPC_HC_FW_RD_4B 0x02000000
+#define LPC_HC_FW_RD_16B 0x04000000
+#define LPC_HC_FW_RD_128B 0x07000000
+#define LPC_HC_IRQSER_CTRL 0x30
+#define LPC_HC_IRQSER_EN 0x80000000
+#define LPC_HC_IRQSER_QMODE 0x40000000
+#define LPC_HC_IRQSER_START_MASK 0x03000000
+#define LPC_HC_IRQSER_START_4CLK 0x00000000
+#define LPC_HC_IRQSER_START_6CLK 0x01000000
+#define LPC_HC_IRQSER_START_8CLK 0x02000000
+#define LPC_HC_IRQMASK 0x34 /* same bit defs as LPC_HC_IRQSTAT */
+#define LPC_HC_IRQSTAT 0x38
+#define LPC_HC_IRQ_SERIRQ0 0x80000000 /* all bits down to ... */
+#define LPC_HC_IRQ_SERIRQ16 0x00008000 /* IRQ16=IOCHK#, IRQ2=SMI# */
+#define LPC_HC_IRQ_SERIRQ_ALL 0xffff8000
+#define LPC_HC_IRQ_LRESET 0x00000400
+#define LPC_HC_IRQ_SYNC_ABNORM_ERR 0x00000080
+#define LPC_HC_IRQ_SYNC_NORESP_ERR 0x00000040
+#define LPC_HC_IRQ_SYNC_NORM_ERR 0x00000020
+#define LPC_HC_IRQ_SYNC_TIMEOUT_ERR 0x00000010
+#define LPC_HC_IRQ_SYNC_TARG_TAR_ERR 0x00000008
+#define LPC_HC_IRQ_SYNC_BM_TAR_ERR 0x00000004
+#define LPC_HC_IRQ_SYNC_BM0_REQ 0x00000002
+#define LPC_HC_IRQ_SYNC_BM1_REQ 0x00000001
+#define LPC_HC_ERROR_ADDRESS 0x40
+
+#define LPC_OPB_SIZE 0x100000000ull
+
+#define ISA_IO_SIZE 0x00010000
+#define ISA_MEM_SIZE 0x10000000
+#define LPC_IO_OPB_ADDR 0xd0010000
+#define LPC_IO_OPB_SIZE 0x00010000
+#define LPC_MEM_OPB_ADDR 0xe0010000
+#define LPC_MEM_OPB_SIZE 0x10000000
+#define LPC_FW_OPB_ADDR 0xf0000000
+#define LPC_FW_OPB_SIZE 0x10000000
+
+#define LPC_OPB_REGS_OPB_ADDR 0xc0010000
+#define LPC_OPB_REGS_OPB_SIZE 0x00002000
+#define LPC_HC_REGS_OPB_ADDR 0xc0012000
+#define LPC_HC_REGS_OPB_SIZE 0x00001000
+
+
+/*
+ * TODO: the "primary" cell should only be added on chip 0. This is
+ * how skiboot chooses the default LPC controller on multichip
+ * systems.
+ *
+ * It would be easly done if we can change the populate() interface to
+ * replace the PnvXScomInterface parameter by a PnvChip one
+ */
+static int pnv_lpc_populate(PnvXScomInterface *dev, void *fdt, int xscom_offset)
+{
+ const char compat[] = "ibm,power8-lpc\0ibm,lpc";
+ char *name;
+ int offset;
+ uint32_t lpc_pcba = PNV_XSCOM_LPC_BASE;
+ uint32_t reg[] = {
+ cpu_to_be32(lpc_pcba),
+ cpu_to_be32(PNV_XSCOM_LPC_SIZE)
+ };
+
+ name = g_strdup_printf("isa@%x", lpc_pcba);
+ offset = fdt_add_subnode(fdt, xscom_offset, name);
+ _FDT(offset);
+ g_free(name);
+
+ _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
+ _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 2)));
+ _FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 1)));
+ _FDT((fdt_setprop(fdt, offset, "primary", NULL, 0)));
+ _FDT((fdt_setprop(fdt, offset, "compatible", compat, sizeof(compat))));
+ return 0;
+}
+
+/*
+ * These read/write handlers of the OPB address space should be common
+ * with the P9 LPC Controller which uses direct MMIOs.
+ *
+ * TODO: rework to use address_space_stq() and address_space_ldq()
+ * instead.
+ */
+static bool opb_read(PnvLpcController *lpc, uint32_t addr, uint8_t *data,
+ int sz)
+{
+ bool success;
+
+ /* XXX Handle access size limits and FW read caching here */
+ success = !address_space_rw(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED,
+ data, sz, false);
+
+ return success;
+}
+
+static bool opb_write(PnvLpcController *lpc, uint32_t addr, uint8_t *data,
+ int sz)
+{
+ bool success;
+
+ /* XXX Handle access size limits here */
+ success = !address_space_rw(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED,
+ data, sz, true);
+
+ return success;
+}
+
+#define ECCB_CTL_READ (1ull << (63 - 15))
+#define ECCB_CTL_SZ_LSH (63 - 7)
+#define ECCB_CTL_SZ_MASK (0xfull << ECCB_CTL_SZ_LSH)
+#define ECCB_CTL_ADDR_MASK 0xffffffffu;
+
+#define ECCB_STAT_OP_DONE (1ull << (63 - 52))
+#define ECCB_STAT_OP_ERR (1ull << (63 - 52))
+#define ECCB_STAT_RD_DATA_LSH (63 - 37)
+#define ECCB_STAT_RD_DATA_MASK (0xffffffff << ECCB_STAT_RD_DATA_LSH)
+
+static void pnv_lpc_do_eccb(PnvLpcController *lpc, uint64_t cmd)
+{
+ /* XXX Check for magic bits at the top, addr size etc... */
+ unsigned int sz = (cmd & ECCB_CTL_SZ_MASK) >> ECCB_CTL_SZ_LSH;
+ uint32_t opb_addr = cmd & ECCB_CTL_ADDR_MASK;
+ uint8_t data[4];
+ bool success;
+
+ if (cmd & ECCB_CTL_READ) {
+ success = opb_read(lpc, opb_addr, data, sz);
+ if (success) {
+ lpc->eccb_stat_reg = ECCB_STAT_OP_DONE |
+ (((uint64_t)data[0]) << 24 |
+ ((uint64_t)data[1]) << 16 |
+ ((uint64_t)data[2]) << 8 |
+ ((uint64_t)data[3])) << ECCB_STAT_RD_DATA_LSH;
+ } else {
+ lpc->eccb_stat_reg = ECCB_STAT_OP_DONE |
+ (0xffffffffull << ECCB_STAT_RD_DATA_LSH);
+ }
+ } else {
+ data[0] = lpc->eccb_data_reg >> 24;
+ data[1] = lpc->eccb_data_reg >> 16;
+ data[2] = lpc->eccb_data_reg >> 8;
+ data[3] = lpc->eccb_data_reg;
+
+ success = opb_write(lpc, opb_addr, data, sz);
+ lpc->eccb_stat_reg = ECCB_STAT_OP_DONE;
+ }
+ /* XXX Which error bit (if any) to signal OPB error ? */
+}
+
+static uint64_t pnv_lpc_xscom_read(void *opaque, hwaddr addr, unsigned size)
+{
+ PnvLpcController *lpc = PNV_LPC(opaque);
+ uint32_t offset = addr >> 3;
+ uint64_t val = 0;
+
+ switch (offset & 3) {
+ case ECCB_CTL:
+ case ECCB_RESET:
+ val = 0;
+ break;
+ case ECCB_STAT:
+ val = lpc->eccb_stat_reg;
+ lpc->eccb_stat_reg = 0;
+ break;
+ case ECCB_DATA:
+ val = ((uint64_t)lpc->eccb_data_reg) << 32;
+ break;
+ }
+ return val;
+}
+
+static void pnv_lpc_xscom_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ PnvLpcController *lpc = PNV_LPC(opaque);
+ uint32_t offset = addr >> 3;
+
+ switch (offset & 3) {
+ case ECCB_CTL:
+ pnv_lpc_do_eccb(lpc, val);
+ break;
+ case ECCB_RESET:
+ /* XXXX */
+ break;
+ case ECCB_STAT:
+ break;
+ case ECCB_DATA:
+ lpc->eccb_data_reg = val >> 32;
+ break;
+ }
+}
+
+static const MemoryRegionOps pnv_lpc_xscom_ops = {
+ .read = pnv_lpc_xscom_read,
+ .write = pnv_lpc_xscom_write,
+ .valid.min_access_size = 8,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 8,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static uint64_t lpc_hc_read(void *opaque, hwaddr addr, unsigned size)
+{
+ PnvLpcController *lpc = opaque;
+ uint64_t val = 0xfffffffffffffffful;
+
+ switch (addr) {
+ case LPC_HC_FW_SEG_IDSEL:
+ val = lpc->lpc_hc_fw_seg_idsel;
+ break;
+ case LPC_HC_FW_RD_ACC_SIZE:
+ val = lpc->lpc_hc_fw_rd_acc_size;
+ break;
+ case LPC_HC_IRQSER_CTRL:
+ val = lpc->lpc_hc_irqser_ctrl;
+ break;
+ case LPC_HC_IRQMASK:
+ val = lpc->lpc_hc_irqmask;
+ break;
+ case LPC_HC_IRQSTAT:
+ val = lpc->lpc_hc_irqstat;
+ break;
+ case LPC_HC_ERROR_ADDRESS:
+ val = lpc->lpc_hc_error_addr;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "LPC HC Unimplemented register: Ox%"
+ HWADDR_PRIx "\n", addr);
+ }
+ return val;
+}
+
+static void lpc_hc_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned size)
+{
+ PnvLpcController *lpc = opaque;
+
+ /* XXX Filter out reserved bits */
+
+ switch (addr) {
+ case LPC_HC_FW_SEG_IDSEL:
+ /* XXX Actually figure out how that works as this impact
+ * memory regions/aliases
+ */
+ lpc->lpc_hc_fw_seg_idsel = val;
+ break;
+ case LPC_HC_FW_RD_ACC_SIZE:
+ lpc->lpc_hc_fw_rd_acc_size = val;
+ break;
+ case LPC_HC_IRQSER_CTRL:
+ lpc->lpc_hc_irqser_ctrl = val;
+ break;
+ case LPC_HC_IRQMASK:
+ lpc->lpc_hc_irqmask = val;
+ break;
+ case LPC_HC_IRQSTAT:
+ lpc->lpc_hc_irqstat &= ~val;
+ break;
+ case LPC_HC_ERROR_ADDRESS:
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "LPC HC Unimplemented register: Ox%"
+ HWADDR_PRIx "\n", addr);
+ }
+}
+
+static const MemoryRegionOps lpc_hc_ops = {
+ .read = lpc_hc_read,
+ .write = lpc_hc_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static uint64_t opb_master_read(void *opaque, hwaddr addr, unsigned size)
+{
+ PnvLpcController *lpc = opaque;
+ uint64_t val = 0xfffffffffffffffful;
+
+ switch (addr) {
+ case OPB_MASTER_LS_IRQ_STAT:
+ val = lpc->opb_irq_stat;
+ break;
+ case OPB_MASTER_LS_IRQ_MASK:
+ val = lpc->opb_irq_mask;
+ break;
+ case OPB_MASTER_LS_IRQ_POL:
+ val = lpc->opb_irq_pol;
+ break;
+ case OPB_MASTER_LS_IRQ_INPUT:
+ val = lpc->opb_irq_input;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "OPB MASTER Unimplemented register: Ox%"
+ HWADDR_PRIx "\n", addr);
+ }
+
+ return val;
+}
+
+static void opb_master_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ PnvLpcController *lpc = opaque;
+
+ switch (addr) {
+ case OPB_MASTER_LS_IRQ_STAT:
+ lpc->opb_irq_stat &= ~val;
+ break;
+ case OPB_MASTER_LS_IRQ_MASK:
+ /* XXX Filter out reserved bits */
+ lpc->opb_irq_mask = val;
+ break;
+ case OPB_MASTER_LS_IRQ_POL:
+ /* XXX Filter out reserved bits */
+ lpc->opb_irq_pol = val;
+ break;
+ case OPB_MASTER_LS_IRQ_INPUT:
+ /* Read only */
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "OPB MASTER Unimplemented register: Ox%"
+ HWADDR_PRIx "\n", addr);
+ }
+}
+
+static const MemoryRegionOps opb_master_ops = {
+ .read = opb_master_read,
+ .write = opb_master_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void pnv_lpc_realize(DeviceState *dev, Error **errp)
+{
+ PnvLpcController *lpc = PNV_LPC(dev);
+
+ /* Reg inits */
+ lpc->lpc_hc_fw_rd_acc_size = LPC_HC_FW_RD_4B;
+
+ /* Create address space and backing MR for the OPB bus */
+ memory_region_init(&lpc->opb_mr, OBJECT(dev), "lpc-opb", 0x100000000ull);
+ address_space_init(&lpc->opb_as, &lpc->opb_mr, "lpc-opb");
+
+ /* Create ISA IO and Mem space regions which are the root of
+ * the ISA bus (ie, ISA address spaces). We don't create a
+ * separate one for FW which we alias to memory.
+ */
+ memory_region_init(&lpc->isa_io, OBJECT(dev), "isa-io", ISA_IO_SIZE);
+ memory_region_init(&lpc->isa_mem, OBJECT(dev), "isa-mem", ISA_MEM_SIZE);
+
+ /* Create windows from the OPB space to the ISA space */
+ memory_region_init_alias(&lpc->opb_isa_io, OBJECT(dev), "lpc-isa-io",
+ &lpc->isa_io, 0, LPC_IO_OPB_SIZE);
+ memory_region_add_subregion(&lpc->opb_mr, LPC_IO_OPB_ADDR,
+ &lpc->opb_isa_io);
+ memory_region_init_alias(&lpc->opb_isa_mem, OBJECT(dev), "lpc-isa-mem",
+ &lpc->isa_mem, 0, LPC_MEM_OPB_SIZE);
+ memory_region_add_subregion(&lpc->opb_mr, LPC_MEM_OPB_ADDR,
+ &lpc->opb_isa_mem);
+ memory_region_init_alias(&lpc->opb_isa_fw, OBJECT(dev), "lpc-isa-fw",
+ &lpc->isa_mem, 0, LPC_FW_OPB_SIZE);
+ memory_region_add_subregion(&lpc->opb_mr, LPC_FW_OPB_ADDR,
+ &lpc->opb_isa_fw);
+
+ /* Create MMIO regions for LPC HC and OPB registers */
+ memory_region_init_io(&lpc->opb_master_regs, OBJECT(dev), &opb_master_ops,
+ lpc, "lpc-opb-master", LPC_OPB_REGS_OPB_SIZE);
+ memory_region_add_subregion(&lpc->opb_mr, LPC_OPB_REGS_OPB_ADDR,
+ &lpc->opb_master_regs);
+ memory_region_init_io(&lpc->lpc_hc_regs, OBJECT(dev), &lpc_hc_ops, lpc,
+ "lpc-hc", LPC_HC_REGS_OPB_SIZE);
+ memory_region_add_subregion(&lpc->opb_mr, LPC_HC_REGS_OPB_ADDR,
+ &lpc->lpc_hc_regs);
+
+ /* XScom region for LPC registers */
+ pnv_xscom_region_init(&lpc->xscom_regs, OBJECT(dev),
+ &pnv_lpc_xscom_ops, lpc, "xscom-lpc",
+ PNV_XSCOM_LPC_SIZE);
+}
+
+static void pnv_lpc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
+
+ xdc->populate = pnv_lpc_populate;
+
+ dc->realize = pnv_lpc_realize;
+}
+
+static const TypeInfo pnv_lpc_info = {
+ .name = TYPE_PNV_LPC,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(PnvLpcController),
+ .class_init = pnv_lpc_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_PNV_XSCOM_INTERFACE },
+ { }
+ }
+};
+
+static void pnv_lpc_register_types(void)
+{
+ type_register_static(&pnv_lpc_info);
+}
+
+type_init(pnv_lpc_register_types)
diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c
new file mode 100644
index 0000000000..8da271872f
--- /dev/null
+++ b/hw/ppc/pnv_xscom.c
@@ -0,0 +1,275 @@
+/*
+ * QEMU PowerPC PowerNV XSCOM bus
+ *
+ * Copyright (c) 2016, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/hw.h"
+#include "qemu/log.h"
+#include "sysemu/kvm.h"
+#include "target-ppc/cpu.h"
+#include "hw/sysbus.h"
+
+#include "hw/ppc/fdt.h"
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_xscom.h"
+
+#include <libfdt.h>
+
+static void xscom_complete(CPUState *cs, uint64_t hmer_bits)
+{
+ /*
+ * TODO: When the read/write comes from the monitor, NULL is
+ * passed for the cpu, and no CPU completion is generated.
+ */
+ if (cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ /*
+ * TODO: Need a CPU helper to set HMER, also handle generation
+ * of HMIs
+ */
+ cpu_synchronize_state(cs);
+ env->spr[SPR_HMER] |= hmer_bits;
+ }
+}
+
+static uint32_t pnv_xscom_pcba(PnvChip *chip, uint64_t addr)
+{
+ PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
+
+ addr &= (PNV_XSCOM_SIZE - 1);
+ if (pcc->chip_type == PNV_CHIP_POWER9) {
+ return addr >> 3;
+ } else {
+ return ((addr >> 4) & ~0xfull) | ((addr >> 3) & 0xf);
+ }
+}
+
+static uint64_t xscom_read_default(PnvChip *chip, uint32_t pcba)
+{
+ switch (pcba) {
+ case 0xf000f:
+ return PNV_CHIP_GET_CLASS(chip)->chip_cfam_id;
+ case 0x1010c00: /* PIBAM FIR */
+ case 0x1010c03: /* PIBAM FIR MASK */
+ case 0x2020007: /* ADU stuff */
+ case 0x2020009: /* ADU stuff */
+ case 0x202000f: /* ADU stuff */
+ return 0;
+ case 0x2013f00: /* PBA stuff */
+ case 0x2013f01: /* PBA stuff */
+ case 0x2013f02: /* PBA stuff */
+ case 0x2013f03: /* PBA stuff */
+ case 0x2013f04: /* PBA stuff */
+ case 0x2013f05: /* PBA stuff */
+ case 0x2013f06: /* PBA stuff */
+ case 0x2013f07: /* PBA stuff */
+ return 0;
+ case 0x2013028: /* CAPP stuff */
+ case 0x201302a: /* CAPP stuff */
+ case 0x2013801: /* CAPP stuff */
+ case 0x2013802: /* CAPP stuff */
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static bool xscom_write_default(PnvChip *chip, uint32_t pcba, uint64_t val)
+{
+ /* We ignore writes to these */
+ switch (pcba) {
+ case 0xf000f: /* chip id is RO */
+ case 0x1010c00: /* PIBAM FIR */
+ case 0x1010c01: /* PIBAM FIR */
+ case 0x1010c02: /* PIBAM FIR */
+ case 0x1010c03: /* PIBAM FIR MASK */
+ case 0x1010c04: /* PIBAM FIR MASK */
+ case 0x1010c05: /* PIBAM FIR MASK */
+ case 0x2020007: /* ADU stuff */
+ case 0x2020009: /* ADU stuff */
+ case 0x202000f: /* ADU stuff */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static uint64_t xscom_read(void *opaque, hwaddr addr, unsigned width)
+{
+ PnvChip *chip = opaque;
+ uint32_t pcba = pnv_xscom_pcba(chip, addr);
+ uint64_t val = 0;
+ MemTxResult result;
+
+ /* Handle some SCOMs here before dispatch */
+ val = xscom_read_default(chip, pcba);
+ if (val != -1) {
+ goto complete;
+ }
+
+ val = address_space_ldq(&chip->xscom_as, (uint64_t) pcba << 3,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XSCOM read failed at @0x%"
+ HWADDR_PRIx " pcba=0x%08x\n", addr, pcba);
+ xscom_complete(current_cpu, HMER_XSCOM_FAIL | HMER_XSCOM_DONE);
+ return 0;
+ }
+
+complete:
+ xscom_complete(current_cpu, HMER_XSCOM_DONE);
+ return val;
+}
+
+static void xscom_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ PnvChip *chip = opaque;
+ uint32_t pcba = pnv_xscom_pcba(chip, addr);
+ MemTxResult result;
+
+ /* Handle some SCOMs here before dispatch */
+ if (xscom_write_default(chip, pcba, val)) {
+ goto complete;
+ }
+
+ address_space_stq(&chip->xscom_as, (uint64_t) pcba << 3, val,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XSCOM write failed at @0x%"
+ HWADDR_PRIx " pcba=0x%08x data=0x%" PRIx64 "\n",
+ addr, pcba, val);
+ xscom_complete(current_cpu, HMER_XSCOM_FAIL | HMER_XSCOM_DONE);
+ return;
+ }
+
+complete:
+ xscom_complete(current_cpu, HMER_XSCOM_DONE);
+}
+
+const MemoryRegionOps pnv_xscom_ops = {
+ .read = xscom_read,
+ .write = xscom_write,
+ .valid.min_access_size = 8,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 8,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+void pnv_xscom_realize(PnvChip *chip, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(chip);
+ char *name;
+
+ name = g_strdup_printf("xscom-%x", chip->chip_id);
+ memory_region_init_io(&chip->xscom_mmio, OBJECT(chip), &pnv_xscom_ops,
+ chip, name, PNV_XSCOM_SIZE);
+ sysbus_init_mmio(sbd, &chip->xscom_mmio);
+
+ memory_region_init(&chip->xscom, OBJECT(chip), name, PNV_XSCOM_SIZE);
+ address_space_init(&chip->xscom_as, &chip->xscom, name);
+ g_free(name);
+}
+
+static const TypeInfo pnv_xscom_interface_info = {
+ .name = TYPE_PNV_XSCOM_INTERFACE,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(PnvXScomInterfaceClass),
+};
+
+static void pnv_xscom_register_types(void)
+{
+ type_register_static(&pnv_xscom_interface_info);
+}
+
+type_init(pnv_xscom_register_types)
+
+typedef struct ForeachPopulateArgs {
+ void *fdt;
+ int xscom_offset;
+} ForeachPopulateArgs;
+
+static int xscom_populate_child(Object *child, void *opaque)
+{
+ if (object_dynamic_cast(child, TYPE_PNV_XSCOM_INTERFACE)) {
+ ForeachPopulateArgs *args = opaque;
+ PnvXScomInterface *xd = PNV_XSCOM_INTERFACE(child);
+ PnvXScomInterfaceClass *xc = PNV_XSCOM_INTERFACE_GET_CLASS(xd);
+
+ if (xc->populate) {
+ _FDT((xc->populate(xd, args->fdt, args->xscom_offset)));
+ }
+ }
+ return 0;
+}
+
+static const char compat_p8[] = "ibm,power8-xscom\0ibm,xscom";
+static const char compat_p9[] = "ibm,power9-xscom\0ibm,xscom";
+
+int pnv_xscom_populate(PnvChip *chip, void *fdt, int root_offset)
+{
+ uint64_t reg[] = { cpu_to_be64(PNV_XSCOM_BASE(chip)),
+ cpu_to_be64(PNV_XSCOM_SIZE) };
+ int xscom_offset;
+ ForeachPopulateArgs args;
+ char *name;
+ PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
+
+ name = g_strdup_printf("xscom@%" PRIx64, be64_to_cpu(reg[0]));
+ xscom_offset = fdt_add_subnode(fdt, root_offset, name);
+ _FDT(xscom_offset);
+ g_free(name);
+ _FDT((fdt_setprop_cell(fdt, xscom_offset, "ibm,chip-id", chip->chip_id)));
+ _FDT((fdt_setprop_cell(fdt, xscom_offset, "#address-cells", 1)));
+ _FDT((fdt_setprop_cell(fdt, xscom_offset, "#size-cells", 1)));
+ _FDT((fdt_setprop(fdt, xscom_offset, "reg", reg, sizeof(reg))));
+
+ if (pcc->chip_type == PNV_CHIP_POWER9) {
+ _FDT((fdt_setprop(fdt, xscom_offset, "compatible", compat_p9,
+ sizeof(compat_p9))));
+ } else {
+ _FDT((fdt_setprop(fdt, xscom_offset, "compatible", compat_p8,
+ sizeof(compat_p8))));
+ }
+
+ _FDT((fdt_setprop(fdt, xscom_offset, "scom-controller", NULL, 0)));
+
+ args.fdt = fdt;
+ args.xscom_offset = xscom_offset;
+
+ object_child_foreach(OBJECT(chip), xscom_populate_child, &args);
+ return 0;
+}
+
+void pnv_xscom_add_subregion(PnvChip *chip, hwaddr offset, MemoryRegion *mr)
+{
+ memory_region_add_subregion(&chip->xscom, offset << 3, mr);
+}
+
+void pnv_xscom_region_init(MemoryRegion *mr,
+ struct Object *owner,
+ const MemoryRegionOps *ops,
+ void *opaque,
+ const char *name,
+ uint64_t size)
+{
+ memory_region_init_io(mr, owner, ops, opaque, name, size << 3);
+}
diff --git a/hw/ppc/ppc405.h b/hw/ppc/ppc405.h
index c67febca2f..a9ffc87f19 100644
--- a/hw/ppc/ppc405.h
+++ b/hw/ppc/ppc405.h
@@ -71,11 +71,5 @@ CPUPPCState *ppc405ep_init(MemoryRegion *address_space_mem,
hwaddr ram_sizes[2],
uint32_t sysclk, qemu_irq **picp,
int do_init);
-/* IBM STBxxx microcontrollers */
-CPUPPCState *ppc_stb025_init (MemoryRegion ram_memories[2],
- hwaddr ram_bases[2],
- hwaddr ram_sizes[2],
- uint32_t sysclk, qemu_irq **picp,
- ram_addr_t *offsetp);
#endif /* PPC405_H */
diff --git a/hw/ppc/ppc405_boards.c b/hw/ppc/ppc405_boards.c
index 4b2f07aecb..d01798f245 100644
--- a/hw/ppc/ppc405_boards.c
+++ b/hw/ppc/ppc405_boards.c
@@ -37,7 +37,6 @@
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "hw/loader.h"
-#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
#include "exec/address-spaces.h"
diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c
index 22c584eb8d..cf958a9e00 100644
--- a/hw/ppc/ppce500_spin.c
+++ b/hw/ppc/ppce500_spin.c
@@ -54,11 +54,6 @@ typedef struct SpinState {
SpinInfo spin[MAX_CPUS];
} SpinState;
-typedef struct spin_kick {
- PowerPCCPU *cpu;
- SpinInfo *spin;
-} SpinKick;
-
static void spin_reset(void *opaque)
{
SpinState *s = opaque;
@@ -89,16 +84,15 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
env->tlb_dirty = true;
}
-static void spin_kick(void *data)
+static void spin_kick(CPUState *cs, run_on_cpu_data data)
{
- SpinKick *kick = data;
- CPUState *cpu = CPU(kick->cpu);
- CPUPPCState *env = &kick->cpu->env;
- SpinInfo *curspin = kick->spin;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ SpinInfo *curspin = data.host_ptr;
hwaddr map_size = 64 * 1024 * 1024;
hwaddr map_start;
- cpu_synchronize_state(cpu);
+ cpu_synchronize_state(cs);
stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]);
env->nip = ldq_p(&curspin->addr) & (map_size - 1);
env->gpr[3] = ldq_p(&curspin->r3);
@@ -112,10 +106,10 @@ static void spin_kick(void *data)
map_start = ldq_p(&curspin->addr) & ~(map_size - 1);
mmubooke_create_initial_mapping(env, 0, map_start, map_size);
- cpu->halted = 0;
- cpu->exception_index = -1;
- cpu->stopped = false;
- qemu_cpu_kick(cpu);
+ cs->halted = 0;
+ cs->exception_index = -1;
+ cs->stopped = false;
+ qemu_cpu_kick(cs);
}
static void spin_write(void *opaque, hwaddr addr, uint64_t value,
@@ -153,12 +147,7 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t value,
if (!(ldq_p(&curspin->addr) & 1)) {
/* run CPU */
- SpinKick kick = {
- .cpu = POWERPC_CPU(cpu),
- .spin = curspin,
- };
-
- run_on_cpu(cpu, spin_kick, &kick);
+ run_on_cpu(cpu, spin_kick, RUN_ON_CPU_HOST_PTR(curspin));
}
}
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 30d6800ab3..208ef7b110 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -37,7 +37,6 @@
#include "sysemu/block-backend.h"
#include "sysemu/cpus.h"
#include "sysemu/kvm.h"
-#include "sysemu/device_tree.h"
#include "kvm_ppc.h"
#include "migration/migration.h"
#include "mmu-hash64.h"
@@ -47,6 +46,7 @@
#include "hw/ppc/ppc.h"
#include "hw/loader.h"
+#include "hw/ppc/fdt.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "hw/pci-host/spapr.h"
@@ -249,40 +249,6 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPRMachineState *spapr)
return ret;
}
-
-static size_t create_page_sizes_prop(CPUPPCState *env, uint32_t *prop,
- size_t maxsize)
-{
- size_t maxcells = maxsize / sizeof(uint32_t);
- int i, j, count;
- uint32_t *p = prop;
-
- for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
- struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
-
- if (!sps->page_shift) {
- break;
- }
- for (count = 0; count < PPC_PAGE_SIZES_MAX_SZ; count++) {
- if (sps->enc[count].page_shift == 0) {
- break;
- }
- }
- if ((p - prop) >= (maxcells - 3 - count * 2)) {
- break;
- }
- *(p++) = cpu_to_be32(sps->page_shift);
- *(p++) = cpu_to_be32(sps->slb_enc);
- *(p++) = cpu_to_be32(count);
- for (j = 0; j < count; j++) {
- *(p++) = cpu_to_be32(sps->enc[j].page_shift);
- *(p++) = cpu_to_be32(sps->enc[j].pte_enc);
- }
- }
-
- return (p - prop) * sizeof(uint32_t);
-}
-
static hwaddr spapr_node0_size(void)
{
MachineState *machine = MACHINE(qdev_get_machine());
@@ -299,225 +265,11 @@ static hwaddr spapr_node0_size(void)
return machine->ram_size;
}
-#define _FDT(exp) \
- do { \
- int ret = (exp); \
- if (ret < 0) { \
- fprintf(stderr, "qemu: error creating device tree: %s: %s\n", \
- #exp, fdt_strerror(ret)); \
- exit(1); \
- } \
- } while (0)
-
static void add_str(GString *s, const gchar *s1)
{
g_string_append_len(s, s1, strlen(s1) + 1);
}
-static void *spapr_create_fdt_skel(hwaddr initrd_base,
- hwaddr initrd_size,
- hwaddr kernel_size,
- bool little_endian,
- const char *kernel_cmdline,
- uint32_t epow_irq)
-{
- void *fdt;
- uint32_t start_prop = cpu_to_be32(initrd_base);
- uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
- GString *hypertas = g_string_sized_new(256);
- GString *qemu_hypertas = g_string_sized_new(256);
- uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)};
- uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(max_cpus)};
- unsigned char vec5[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x80};
- char *buf;
-
- add_str(hypertas, "hcall-pft");
- add_str(hypertas, "hcall-term");
- add_str(hypertas, "hcall-dabr");
- add_str(hypertas, "hcall-interrupt");
- add_str(hypertas, "hcall-tce");
- add_str(hypertas, "hcall-vio");
- add_str(hypertas, "hcall-splpar");
- add_str(hypertas, "hcall-bulk");
- add_str(hypertas, "hcall-set-mode");
- add_str(hypertas, "hcall-sprg0");
- add_str(hypertas, "hcall-copy");
- add_str(hypertas, "hcall-debug");
- add_str(qemu_hypertas, "hcall-memop1");
-
- fdt = g_malloc0(FDT_MAX_SIZE);
- _FDT((fdt_create(fdt, FDT_MAX_SIZE)));
-
- if (kernel_size) {
- _FDT((fdt_add_reservemap_entry(fdt, KERNEL_LOAD_ADDR, kernel_size)));
- }
- if (initrd_size) {
- _FDT((fdt_add_reservemap_entry(fdt, initrd_base, initrd_size)));
- }
- _FDT((fdt_finish_reservemap(fdt)));
-
- /* Root node */
- _FDT((fdt_begin_node(fdt, "")));
- _FDT((fdt_property_string(fdt, "device_type", "chrp")));
- _FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)")));
- _FDT((fdt_property_string(fdt, "compatible", "qemu,pseries")));
-
- /*
- * Add info to guest to indentify which host is it being run on
- * and what is the uuid of the guest
- */
- if (kvmppc_get_host_model(&buf)) {
- _FDT((fdt_property_string(fdt, "host-model", buf)));
- g_free(buf);
- }
- if (kvmppc_get_host_serial(&buf)) {
- _FDT((fdt_property_string(fdt, "host-serial", buf)));
- g_free(buf);
- }
-
- buf = g_strdup_printf(UUID_FMT, qemu_uuid[0], qemu_uuid[1],
- qemu_uuid[2], qemu_uuid[3], qemu_uuid[4],
- qemu_uuid[5], qemu_uuid[6], qemu_uuid[7],
- qemu_uuid[8], qemu_uuid[9], qemu_uuid[10],
- qemu_uuid[11], qemu_uuid[12], qemu_uuid[13],
- qemu_uuid[14], qemu_uuid[15]);
-
- _FDT((fdt_property_string(fdt, "vm,uuid", buf)));
- if (qemu_uuid_set) {
- _FDT((fdt_property_string(fdt, "system-id", buf)));
- }
- g_free(buf);
-
- if (qemu_get_vm_name()) {
- _FDT((fdt_property_string(fdt, "ibm,partition-name",
- qemu_get_vm_name())));
- }
-
- _FDT((fdt_property_cell(fdt, "#address-cells", 0x2)));
- _FDT((fdt_property_cell(fdt, "#size-cells", 0x2)));
-
- /* /chosen */
- _FDT((fdt_begin_node(fdt, "chosen")));
-
- /* Set Form1_affinity */
- _FDT((fdt_property(fdt, "ibm,architecture-vec-5", vec5, sizeof(vec5))));
-
- _FDT((fdt_property_string(fdt, "bootargs", kernel_cmdline)));
- _FDT((fdt_property(fdt, "linux,initrd-start",
- &start_prop, sizeof(start_prop))));
- _FDT((fdt_property(fdt, "linux,initrd-end",
- &end_prop, sizeof(end_prop))));
- if (kernel_size) {
- uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR),
- cpu_to_be64(kernel_size) };
-
- _FDT((fdt_property(fdt, "qemu,boot-kernel", &kprop, sizeof(kprop))));
- if (little_endian) {
- _FDT((fdt_property(fdt, "qemu,boot-kernel-le", NULL, 0)));
- }
- }
- if (boot_menu) {
- _FDT((fdt_property_cell(fdt, "qemu,boot-menu", boot_menu)));
- }
- _FDT((fdt_property_cell(fdt, "qemu,graphic-width", graphic_width)));
- _FDT((fdt_property_cell(fdt, "qemu,graphic-height", graphic_height)));
- _FDT((fdt_property_cell(fdt, "qemu,graphic-depth", graphic_depth)));
-
- _FDT((fdt_end_node(fdt)));
-
- /* RTAS */
- _FDT((fdt_begin_node(fdt, "rtas")));
-
- if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
- add_str(hypertas, "hcall-multi-tce");
- }
- _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas->str,
- hypertas->len)));
- g_string_free(hypertas, TRUE);
- _FDT((fdt_property(fdt, "qemu,hypertas-functions", qemu_hypertas->str,
- qemu_hypertas->len)));
- g_string_free(qemu_hypertas, TRUE);
-
- _FDT((fdt_property(fdt, "ibm,associativity-reference-points",
- refpoints, sizeof(refpoints))));
-
- _FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX)));
- _FDT((fdt_property_cell(fdt, "rtas-event-scan-rate",
- RTAS_EVENT_SCAN_RATE)));
-
- if (msi_nonbroken) {
- _FDT((fdt_property(fdt, "ibm,change-msix-capable", NULL, 0)));
- }
-
- /*
- * According to PAPR, rtas ibm,os-term does not guarantee a return
- * back to the guest cpu.
- *
- * While an additional ibm,extended-os-term property indicates that
- * rtas call return will always occur. Set this property.
- */
- _FDT((fdt_property(fdt, "ibm,extended-os-term", NULL, 0)));
-
- _FDT((fdt_end_node(fdt)));
-
- /* interrupt controller */
- _FDT((fdt_begin_node(fdt, "interrupt-controller")));
-
- _FDT((fdt_property_string(fdt, "device_type",
- "PowerPC-External-Interrupt-Presentation")));
- _FDT((fdt_property_string(fdt, "compatible", "IBM,ppc-xicp")));
- _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
- _FDT((fdt_property(fdt, "ibm,interrupt-server-ranges",
- interrupt_server_ranges_prop,
- sizeof(interrupt_server_ranges_prop))));
- _FDT((fdt_property_cell(fdt, "#interrupt-cells", 2)));
- _FDT((fdt_property_cell(fdt, "linux,phandle", PHANDLE_XICP)));
- _FDT((fdt_property_cell(fdt, "phandle", PHANDLE_XICP)));
-
- _FDT((fdt_end_node(fdt)));
-
- /* vdevice */
- _FDT((fdt_begin_node(fdt, "vdevice")));
-
- _FDT((fdt_property_string(fdt, "device_type", "vdevice")));
- _FDT((fdt_property_string(fdt, "compatible", "IBM,vdevice")));
- _FDT((fdt_property_cell(fdt, "#address-cells", 0x1)));
- _FDT((fdt_property_cell(fdt, "#size-cells", 0x0)));
- _FDT((fdt_property_cell(fdt, "#interrupt-cells", 0x2)));
- _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
-
- _FDT((fdt_end_node(fdt)));
-
- /* event-sources */
- spapr_events_fdt_skel(fdt, epow_irq);
-
- /* /hypervisor node */
- if (kvm_enabled()) {
- uint8_t hypercall[16];
-
- /* indicate KVM hypercall interface */
- _FDT((fdt_begin_node(fdt, "hypervisor")));
- _FDT((fdt_property_string(fdt, "compatible", "linux,kvm")));
- if (kvmppc_has_cap_fixup_hcalls()) {
- /*
- * Older KVM versions with older guest kernels were broken with the
- * magic page, don't allow the guest to map it.
- */
- if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
- sizeof(hypercall))) {
- _FDT((fdt_property(fdt, "hcall-instructions", hypercall,
- sizeof(hypercall))));
- }
- }
- _FDT((fdt_end_node(fdt)));
- }
-
- _FDT((fdt_end_node(fdt))); /* close root node */
- _FDT((fdt_finish(fdt)));
-
- return fdt;
-}
-
static int spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start,
hwaddr size)
{
@@ -594,6 +346,51 @@ static int spapr_populate_memory(sPAPRMachineState *spapr, void *fdt)
return 0;
}
+/* Populate the "ibm,pa-features" property */
+static void spapr_populate_pa_features(CPUPPCState *env, void *fdt, int offset)
+{
+ uint8_t pa_features_206[] = { 6, 0,
+ 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
+ uint8_t pa_features_207[] = { 24, 0,
+ 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
+ 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
+ uint8_t *pa_features;
+ size_t pa_size;
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_2_06:
+ case POWERPC_MMU_2_06a:
+ pa_features = pa_features_206;
+ pa_size = sizeof(pa_features_206);
+ break;
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
+ pa_features = pa_features_207;
+ pa_size = sizeof(pa_features_207);
+ break;
+ default:
+ return;
+ }
+
+ if (env->ci_large_pages) {
+ /*
+ * Note: we keep CI large pages off by default because a 64K capable
+ * guest provisioned with large pages might otherwise try to map a qemu
+ * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
+ * even if that qemu runs on a 4k host.
+ * We dd this bit back here if we are confident this is not an issue
+ */
+ pa_features[3] |= 0x20;
+ }
+ if (kvmppc_has_cap_htm() && pa_size > 24) {
+ pa_features[24] |= 0x80; /* Transactional memory support */
+ }
+
+ _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
+}
+
static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
sPAPRMachineState *spapr)
{
@@ -621,24 +418,6 @@ static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
_FDT((fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index)));
}
- /* Note: we keep CI large pages off for now because a 64K capable guest
- * provisioned with large pages might otherwise try to map a qemu
- * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
- * even if that qemu runs on a 4k host.
- *
- * We can later add this bit back when we are confident this is not
- * an issue (!HV KVM or 64K host)
- */
- uint8_t pa_features_206[] = { 6, 0,
- 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
- uint8_t pa_features_207[] = { 24, 0,
- 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
- 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
- 0x80, 0x00, 0x80, 0x00, 0x80, 0x00 };
- uint8_t *pa_features;
- size_t pa_size;
-
_FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
_FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
@@ -656,13 +435,13 @@ static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
_FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
pcc->l1_dcache_size)));
} else {
- fprintf(stderr, "Warning: Unknown L1 dcache size for cpu\n");
+ error_report("Warning: Unknown L1 dcache size for cpu");
}
if (pcc->l1_icache_size) {
_FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
pcc->l1_icache_size)));
} else {
- fprintf(stderr, "Warning: Unknown L1 icache size for cpu\n");
+ error_report("Warning: Unknown L1 icache size for cpu");
}
_FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
@@ -698,25 +477,14 @@ static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
_FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
}
- page_sizes_prop_size = create_page_sizes_prop(env, page_sizes_prop,
+ page_sizes_prop_size = ppc_create_page_sizes_prop(env, page_sizes_prop,
sizeof(page_sizes_prop));
if (page_sizes_prop_size) {
_FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
page_sizes_prop, page_sizes_prop_size)));
}
- /* Do the ibm,pa-features property, adjust it for ci-large-pages */
- if (env->mmu_model == POWERPC_MMU_2_06) {
- pa_features = pa_features_206;
- pa_size = sizeof(pa_features_206);
- } else /* env->mmu_model == POWERPC_MMU_2_07 */ {
- pa_features = pa_features_207;
- pa_size = sizeof(pa_features_207);
- }
- if (env->ci_large_pages) {
- pa_features[3] |= 0x20;
- }
- _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
+ spapr_populate_pa_features(env, fdt, offset);
_FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
cs->cpu_index / vcpus_per_socket)));
@@ -886,13 +654,42 @@ out:
return ret;
}
+static int spapr_dt_cas_updates(sPAPRMachineState *spapr, void *fdt,
+ sPAPROptionVector *ov5_updates)
+{
+ sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
+ int ret = 0, offset;
+
+ /* Generate ibm,dynamic-reconfiguration-memory node if required */
+ if (spapr_ovec_test(ov5_updates, OV5_DRCONF_MEMORY)) {
+ g_assert(smc->dr_lmb_enabled);
+ ret = spapr_populate_drconf_memory(spapr, fdt);
+ if (ret) {
+ goto out;
+ }
+ }
+
+ offset = fdt_path_offset(fdt, "/chosen");
+ if (offset < 0) {
+ offset = fdt_add_subnode(fdt, 0, "chosen");
+ if (offset < 0) {
+ return offset;
+ }
+ }
+ ret = spapr_ovec_populate_dt(fdt, offset, spapr->ov5_cas,
+ "ibm,architecture-vec-5");
+
+out:
+ return ret;
+}
+
int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
target_ulong addr, target_ulong size,
- bool cpu_update, bool memory_update)
+ bool cpu_update,
+ sPAPROptionVector *ov5_updates)
{
void *fdt, *fdt_skel;
sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
- sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
size -= sizeof(hdr);
@@ -911,9 +708,8 @@ int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
_FDT((spapr_fixup_cpu_dt(fdt, spapr)));
}
- /* Generate ibm,dynamic-reconfiguration-memory node if required */
- if (memory_update && smc->dr_lmb_enabled) {
- _FDT((spapr_populate_drconf_memory(spapr, fdt)));
+ if (spapr_dt_cas_updates(spapr, fdt, ov5_updates)) {
+ return -1;
}
/* Pack resulting tree */
@@ -932,42 +728,220 @@ int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
return 0;
}
-static void spapr_finalize_fdt(sPAPRMachineState *spapr,
- hwaddr fdt_addr,
- hwaddr rtas_addr,
- hwaddr rtas_size)
+static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt)
+{
+ int rtas;
+ GString *hypertas = g_string_sized_new(256);
+ GString *qemu_hypertas = g_string_sized_new(256);
+ uint32_t refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) };
+ uint64_t max_hotplug_addr = spapr->hotplug_memory.base +
+ memory_region_size(&spapr->hotplug_memory.mr);
+ uint32_t lrdr_capacity[] = {
+ cpu_to_be32(max_hotplug_addr >> 32),
+ cpu_to_be32(max_hotplug_addr & 0xffffffff),
+ 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE),
+ cpu_to_be32(max_cpus / smp_threads),
+ };
+
+ _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
+
+ /* hypertas */
+ add_str(hypertas, "hcall-pft");
+ add_str(hypertas, "hcall-term");
+ add_str(hypertas, "hcall-dabr");
+ add_str(hypertas, "hcall-interrupt");
+ add_str(hypertas, "hcall-tce");
+ add_str(hypertas, "hcall-vio");
+ add_str(hypertas, "hcall-splpar");
+ add_str(hypertas, "hcall-bulk");
+ add_str(hypertas, "hcall-set-mode");
+ add_str(hypertas, "hcall-sprg0");
+ add_str(hypertas, "hcall-copy");
+ add_str(hypertas, "hcall-debug");
+ add_str(qemu_hypertas, "hcall-memop1");
+
+ if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
+ add_str(hypertas, "hcall-multi-tce");
+ }
+ _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions",
+ hypertas->str, hypertas->len));
+ g_string_free(hypertas, TRUE);
+ _FDT(fdt_setprop(fdt, rtas, "qemu,hypertas-functions",
+ qemu_hypertas->str, qemu_hypertas->len));
+ g_string_free(qemu_hypertas, TRUE);
+
+ _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
+ refpoints, sizeof(refpoints)));
+
+ _FDT(fdt_setprop_cell(fdt, rtas, "rtas-error-log-max",
+ RTAS_ERROR_LOG_MAX));
+ _FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
+ RTAS_EVENT_SCAN_RATE));
+
+ if (msi_nonbroken) {
+ _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
+ }
+
+ /*
+ * According to PAPR, rtas ibm,os-term does not guarantee a return
+ * back to the guest cpu.
+ *
+ * While an additional ibm,extended-os-term property indicates
+ * that rtas call return will always occur. Set this property.
+ */
+ _FDT(fdt_setprop(fdt, rtas, "ibm,extended-os-term", NULL, 0));
+
+ _FDT(fdt_setprop(fdt, rtas, "ibm,lrdr-capacity",
+ lrdr_capacity, sizeof(lrdr_capacity)));
+
+ spapr_dt_rtas_tokens(fdt, rtas);
+}
+
+static void spapr_dt_chosen(sPAPRMachineState *spapr, void *fdt)
+{
+ MachineState *machine = MACHINE(spapr);
+ int chosen;
+ const char *boot_device = machine->boot_order;
+ char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
+ size_t cb = 0;
+ char *bootlist = get_boot_devices_list(&cb, true);
+
+ _FDT(chosen = fdt_add_subnode(fdt, 0, "chosen"));
+
+ _FDT(fdt_setprop_string(fdt, chosen, "bootargs", machine->kernel_cmdline));
+ _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-start",
+ spapr->initrd_base));
+ _FDT(fdt_setprop_cell(fdt, chosen, "linux,initrd-end",
+ spapr->initrd_base + spapr->initrd_size));
+
+ if (spapr->kernel_size) {
+ uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR),
+ cpu_to_be64(spapr->kernel_size) };
+
+ _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel",
+ &kprop, sizeof(kprop)));
+ if (spapr->kernel_le) {
+ _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel-le", NULL, 0));
+ }
+ }
+ if (boot_menu) {
+ _FDT((fdt_setprop_cell(fdt, chosen, "qemu,boot-menu", boot_menu)));
+ }
+ _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-width", graphic_width));
+ _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-height", graphic_height));
+ _FDT(fdt_setprop_cell(fdt, chosen, "qemu,graphic-depth", graphic_depth));
+
+ if (cb && bootlist) {
+ int i;
+
+ for (i = 0; i < cb; i++) {
+ if (bootlist[i] == '\n') {
+ bootlist[i] = ' ';
+ }
+ }
+ _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-list", bootlist));
+ }
+
+ if (boot_device && strlen(boot_device)) {
+ _FDT(fdt_setprop_string(fdt, chosen, "qemu,boot-device", boot_device));
+ }
+
+ if (!spapr->has_graphics && stdout_path) {
+ _FDT(fdt_setprop_string(fdt, chosen, "linux,stdout-path", stdout_path));
+ }
+
+ g_free(stdout_path);
+ g_free(bootlist);
+}
+
+static void spapr_dt_hypervisor(sPAPRMachineState *spapr, void *fdt)
+{
+ /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
+ * KVM to work under pHyp with some guest co-operation */
+ int hypervisor;
+ uint8_t hypercall[16];
+
+ _FDT(hypervisor = fdt_add_subnode(fdt, 0, "hypervisor"));
+ /* indicate KVM hypercall interface */
+ _FDT(fdt_setprop_string(fdt, hypervisor, "compatible", "linux,kvm"));
+ if (kvmppc_has_cap_fixup_hcalls()) {
+ /*
+ * Older KVM versions with older guest kernels were broken
+ * with the magic page, don't allow the guest to map it.
+ */
+ if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
+ sizeof(hypercall))) {
+ _FDT(fdt_setprop(fdt, hypervisor, "hcall-instructions",
+ hypercall, sizeof(hypercall)));
+ }
+ }
+}
+
+static void *spapr_build_fdt(sPAPRMachineState *spapr,
+ hwaddr rtas_addr,
+ hwaddr rtas_size)
{
MachineState *machine = MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(machine);
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
- const char *boot_device = machine->boot_order;
- int ret, i;
- size_t cb = 0;
- char *bootlist;
+ int ret;
void *fdt;
sPAPRPHBState *phb;
+ char *buf;
- fdt = g_malloc(FDT_MAX_SIZE);
+ fdt = g_malloc0(FDT_MAX_SIZE);
+ _FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE)));
- /* open out the base tree into a temp buffer for the final tweaks */
- _FDT((fdt_open_into(spapr->fdt_skel, fdt, FDT_MAX_SIZE)));
+ /* Root node */
+ _FDT(fdt_setprop_string(fdt, 0, "device_type", "chrp"));
+ _FDT(fdt_setprop_string(fdt, 0, "model", "IBM pSeries (emulated by qemu)"));
+ _FDT(fdt_setprop_string(fdt, 0, "compatible", "qemu,pseries"));
- ret = spapr_populate_memory(spapr, fdt);
- if (ret < 0) {
- fprintf(stderr, "couldn't setup memory nodes in fdt\n");
- exit(1);
+ /*
+ * Add info to guest to indentify which host is it being run on
+ * and what is the uuid of the guest
+ */
+ if (kvmppc_get_host_model(&buf)) {
+ _FDT(fdt_setprop_string(fdt, 0, "host-model", buf));
+ g_free(buf);
+ }
+ if (kvmppc_get_host_serial(&buf)) {
+ _FDT(fdt_setprop_string(fdt, 0, "host-serial", buf));
+ g_free(buf);
+ }
+
+ buf = qemu_uuid_unparse_strdup(&qemu_uuid);
+
+ _FDT(fdt_setprop_string(fdt, 0, "vm,uuid", buf));
+ if (qemu_uuid_set) {
+ _FDT(fdt_setprop_string(fdt, 0, "system-id", buf));
+ }
+ g_free(buf);
+
+ if (qemu_get_vm_name()) {
+ _FDT(fdt_setprop_string(fdt, 0, "ibm,partition-name",
+ qemu_get_vm_name()));
}
- ret = spapr_populate_vdevice(spapr->vio_bus, fdt);
+ _FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
+ _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
+
+ /* /interrupt controller */
+ spapr_dt_xics(spapr->xics, fdt, PHANDLE_XICP);
+
+ ret = spapr_populate_memory(spapr, fdt);
if (ret < 0) {
- fprintf(stderr, "couldn't setup vio devices in fdt\n");
+ error_report("couldn't setup memory nodes in fdt");
exit(1);
}
+ /* /vdevice */
+ spapr_dt_vdevice(spapr->vio_bus, fdt);
+
if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
ret = spapr_rng_populate_dt(fdt);
if (ret < 0) {
- fprintf(stderr, "could not set up rng device in the fdt\n");
+ error_report("could not set up rng device in the fdt");
exit(1);
}
}
@@ -980,43 +954,9 @@ static void spapr_finalize_fdt(sPAPRMachineState *spapr,
}
}
- /* RTAS */
- ret = spapr_rtas_device_tree_setup(fdt, rtas_addr, rtas_size);
- if (ret < 0) {
- fprintf(stderr, "Couldn't set up RTAS device tree properties\n");
- }
-
/* cpus */
spapr_populate_cpus_dt_node(fdt, spapr);
- bootlist = get_boot_devices_list(&cb, true);
- if (cb && bootlist) {
- int offset = fdt_path_offset(fdt, "/chosen");
- if (offset < 0) {
- exit(1);
- }
- for (i = 0; i < cb; i++) {
- if (bootlist[i] == '\n') {
- bootlist[i] = ' ';
- }
-
- }
- ret = fdt_setprop_string(fdt, offset, "qemu,boot-list", bootlist);
- }
-
- if (boot_device && strlen(boot_device)) {
- int offset = fdt_path_offset(fdt, "/chosen");
-
- if (offset < 0) {
- exit(1);
- }
- fdt_setprop_string(fdt, offset, "qemu,boot-device", boot_device);
- }
-
- if (!spapr->has_graphics) {
- spapr_populate_chosen_stdout(fdt, spapr->vio_bus);
- }
-
if (smc->dr_lmb_enabled) {
_FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
}
@@ -1031,19 +971,36 @@ static void spapr_finalize_fdt(sPAPRMachineState *spapr,
}
}
- _FDT((fdt_pack(fdt)));
+ /* /event-sources */
+ spapr_dt_events(spapr, fdt);
- if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
- error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
- fdt_totalsize(fdt), FDT_MAX_SIZE);
- exit(1);
+ /* /rtas */
+ spapr_dt_rtas(spapr, fdt);
+
+ /* /chosen */
+ spapr_dt_chosen(spapr, fdt);
+
+ /* /hypervisor */
+ if (kvm_enabled()) {
+ spapr_dt_hypervisor(spapr, fdt);
}
- qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
- cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
+ /* Build memory reserve map */
+ if (spapr->kernel_size) {
+ _FDT((fdt_add_mem_rsv(fdt, KERNEL_LOAD_ADDR, spapr->kernel_size)));
+ }
+ if (spapr->initrd_size) {
+ _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, spapr->initrd_size)));
+ }
- g_free(bootlist);
- g_free(fdt);
+ /* ibm,client-architecture-support updates */
+ ret = spapr_dt_cas_updates(spapr, fdt, spapr->ov5_cas);
+ if (ret < 0) {
+ error_report("couldn't setup CAS properties fdt");
+ exit(1);
+ }
+
+ return fdt;
}
static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
@@ -1158,7 +1115,7 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
}
}
-static int find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
+static void find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
{
bool matched = false;
@@ -1171,8 +1128,6 @@ static int find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
qdev_fw_name(DEVICE(sbdev)));
exit(1);
}
-
- return 0;
}
static void ppc_spapr_reset(void)
@@ -1181,6 +1136,9 @@ static void ppc_spapr_reset(void)
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
PowerPCCPU *first_ppc_cpu;
uint32_t rtas_limit;
+ hwaddr rtas_addr, fdt_addr;
+ void *fdt;
+ int rc;
/* Check for unknown sysbus devices */
foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL);
@@ -1204,24 +1162,44 @@ static void ppc_spapr_reset(void)
* processed with 32-bit real mode code if necessary
*/
rtas_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR);
- spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE;
- spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE;
+ rtas_addr = rtas_limit - RTAS_MAX_SIZE;
+ fdt_addr = rtas_addr - FDT_MAX_SIZE;
- /* Load the fdt */
- spapr_finalize_fdt(spapr, spapr->fdt_addr, spapr->rtas_addr,
- spapr->rtas_size);
+ /* if this reset wasn't generated by CAS, we should reset our
+ * negotiated options and start from scratch */
+ if (!spapr->cas_reboot) {
+ spapr_ovec_cleanup(spapr->ov5_cas);
+ spapr->ov5_cas = spapr_ovec_new();
+ }
+
+ fdt = spapr_build_fdt(spapr, rtas_addr, spapr->rtas_size);
+
+ spapr_load_rtas(spapr, fdt, rtas_addr);
+
+ rc = fdt_pack(fdt);
+
+ /* Should only fail if we've built a corrupted tree */
+ assert(rc == 0);
+
+ if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
+ error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
+ fdt_totalsize(fdt), FDT_MAX_SIZE);
+ exit(1);
+ }
- /* Copy RTAS over */
- cpu_physical_memory_write(spapr->rtas_addr, spapr->rtas_blob,
- spapr->rtas_size);
+ /* Load the fdt */
+ qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
+ cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
+ g_free(fdt);
/* Set up the entry state */
first_ppc_cpu = POWERPC_CPU(first_cpu);
- first_ppc_cpu->env.gpr[3] = spapr->fdt_addr;
+ first_ppc_cpu->env.gpr[3] = fdt_addr;
first_ppc_cpu->env.gpr[5] = 0;
first_cpu->halted = 0;
first_ppc_cpu->env.nip = SPAPR_ENTRY_POINT;
+ spapr->cas_reboot = false;
}
static void spapr_create_nvram(sPAPRMachineState *spapr)
@@ -1289,6 +1267,68 @@ static bool version_before_3(void *opaque, int version_id)
return version_id < 3;
}
+static bool spapr_ov5_cas_needed(void *opaque)
+{
+ sPAPRMachineState *spapr = opaque;
+ sPAPROptionVector *ov5_mask = spapr_ovec_new();
+ sPAPROptionVector *ov5_legacy = spapr_ovec_new();
+ sPAPROptionVector *ov5_removed = spapr_ovec_new();
+ bool cas_needed;
+
+ /* Prior to the introduction of sPAPROptionVector, we had two option
+ * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
+ * Both of these options encode machine topology into the device-tree
+ * in such a way that the now-booted OS should still be able to interact
+ * appropriately with QEMU regardless of what options were actually
+ * negotiatied on the source side.
+ *
+ * As such, we can avoid migrating the CAS-negotiated options if these
+ * are the only options available on the current machine/platform.
+ * Since these are the only options available for pseries-2.7 and
+ * earlier, this allows us to maintain old->new/new->old migration
+ * compatibility.
+ *
+ * For QEMU 2.8+, there are additional CAS-negotiatable options available
+ * via default pseries-2.8 machines and explicit command-line parameters.
+ * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
+ * of the actual CAS-negotiated values to continue working properly. For
+ * example, availability of memory unplug depends on knowing whether
+ * OV5_HP_EVT was negotiated via CAS.
+ *
+ * Thus, for any cases where the set of available CAS-negotiatable
+ * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
+ * include the CAS-negotiated options in the migration stream.
+ */
+ spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
+ spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
+
+ /* spapr_ovec_diff returns true if bits were removed. we avoid using
+ * the mask itself since in the future it's possible "legacy" bits may be
+ * removed via machine options, which could generate a false positive
+ * that breaks migration.
+ */
+ spapr_ovec_intersect(ov5_legacy, spapr->ov5, ov5_mask);
+ cas_needed = spapr_ovec_diff(ov5_removed, spapr->ov5, ov5_legacy);
+
+ spapr_ovec_cleanup(ov5_mask);
+ spapr_ovec_cleanup(ov5_legacy);
+ spapr_ovec_cleanup(ov5_removed);
+
+ return cas_needed;
+}
+
+static const VMStateDescription vmstate_spapr_ov5_cas = {
+ .name = "spapr_option_vector_ov5_cas",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = spapr_ov5_cas_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_POINTER_V(ov5_cas, sPAPRMachineState, 1,
+ vmstate_spapr_ovec, sPAPROptionVector),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static const VMStateDescription vmstate_spapr = {
.name = "spapr",
.version_id = 3,
@@ -1304,6 +1344,10 @@ static const VMStateDescription vmstate_spapr = {
VMSTATE_PPC_TIMEBASE_V(tb, sPAPRMachineState, 2),
VMSTATE_END_OF_LIST()
},
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_spapr_ov5_cas,
+ NULL
+ }
};
static int htab_save_setup(QEMUFile *f, void *opaque)
@@ -1716,7 +1760,6 @@ static void ppc_spapr_init(MachineState *machine)
MachineClass *mc = MACHINE_GET_CLASS(machine);
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
const char *kernel_filename = machine->kernel_filename;
- const char *kernel_cmdline = machine->kernel_cmdline;
const char *initrd_filename = machine->initrd_filename;
PCIHostState *phb;
int i;
@@ -1726,10 +1769,7 @@ static void ppc_spapr_init(MachineState *machine)
void *rma = NULL;
hwaddr rma_alloc_size;
hwaddr node0_size = spapr_node0_size();
- uint32_t initrd_base = 0;
- long kernel_size = 0, initrd_size = 0;
long load_limit, fw_size;
- bool kernel_le = false;
char *filename;
int smt = kvmppc_smt_threads();
int spapr_cores = smp_cpus / smp_threads;
@@ -1803,13 +1843,25 @@ static void ppc_spapr_init(MachineState *machine)
DIV_ROUND_UP(max_cpus * smt, smp_threads),
XICS_IRQS_SPAPR, &error_fatal);
+ /* Set up containers for ibm,client-set-architecture negotiated options */
+ spapr->ov5 = spapr_ovec_new();
+ spapr->ov5_cas = spapr_ovec_new();
+
if (smc->dr_lmb_enabled) {
+ spapr_ovec_set(spapr->ov5, OV5_DRCONF_MEMORY);
spapr_validate_node_memory(machine, &error_fatal);
}
+ spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
+
+ /* advertise support for dedicated HP event source to guests */
+ if (spapr->use_hotplug_event_source) {
+ spapr_ovec_set(spapr->ov5, OV5_HP_EVT);
+ }
+
/* init CPUs */
if (machine->cpu_model == NULL) {
- machine->cpu_model = kvm_enabled() ? "host" : "POWER7";
+ machine->cpu_model = kvm_enabled() ? "host" : smc->tcg_default_cpu;
}
ppc_cpu_parse_features(machine->cpu_model);
@@ -1857,6 +1909,9 @@ static void ppc_spapr_init(MachineState *machine)
/* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
kvmppc_enable_logical_ci_hcalls();
kvmppc_enable_set_mode_hcall();
+
+ /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
+ kvmppc_enable_clear_ref_mod_hcalls();
}
/* allocate RAM */
@@ -1927,7 +1982,7 @@ static void ppc_spapr_init(MachineState *machine)
}
g_free(filename);
- /* Set up EPOW events infrastructure */
+ /* Set up RTAS event infrastructure */
spapr_events_init(spapr);
/* Set up the RTC RTAS interfaces */
@@ -1999,19 +2054,19 @@ static void ppc_spapr_init(MachineState *machine)
if (kernel_filename) {
uint64_t lowaddr = 0;
- kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL,
- NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE,
- 0, 0);
- if (kernel_size == ELF_LOAD_WRONG_ENDIAN) {
- kernel_size = load_elf(kernel_filename,
- translate_kernel_address, NULL,
- NULL, &lowaddr, NULL, 0, PPC_ELF_MACHINE,
- 0, 0);
- kernel_le = kernel_size > 0;
- }
- if (kernel_size < 0) {
- error_report("error loading %s: %s",
- kernel_filename, load_elf_strerror(kernel_size));
+ spapr->kernel_size = load_elf(kernel_filename, translate_kernel_address,
+ NULL, NULL, &lowaddr, NULL, 1,
+ PPC_ELF_MACHINE, 0, 0);
+ if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) {
+ spapr->kernel_size = load_elf(kernel_filename,
+ translate_kernel_address, NULL, NULL,
+ &lowaddr, NULL, 0, PPC_ELF_MACHINE,
+ 0, 0);
+ spapr->kernel_le = spapr->kernel_size > 0;
+ }
+ if (spapr->kernel_size < 0) {
+ error_report("error loading %s: %s", kernel_filename,
+ load_elf_strerror(spapr->kernel_size));
exit(1);
}
@@ -2020,17 +2075,17 @@ static void ppc_spapr_init(MachineState *machine)
/* Try to locate the initrd in the gap between the kernel
* and the firmware. Add a bit of space just in case
*/
- initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff;
- initrd_size = load_image_targphys(initrd_filename, initrd_base,
- load_limit - initrd_base);
- if (initrd_size < 0) {
+ spapr->initrd_base = (KERNEL_LOAD_ADDR + spapr->kernel_size
+ + 0x1ffff) & ~0xffff;
+ spapr->initrd_size = load_image_targphys(initrd_filename,
+ spapr->initrd_base,
+ load_limit
+ - spapr->initrd_base);
+ if (spapr->initrd_size < 0) {
error_report("could not load initial ram disk '%s'",
initrd_filename);
exit(1);
}
- } else {
- initrd_base = 0;
- initrd_size = 0;
}
}
@@ -2056,13 +2111,6 @@ static void ppc_spapr_init(MachineState *machine)
register_savevm_live(NULL, "spapr/htab", -1, 1,
&savevm_htab_handlers, spapr);
- /* Prepare the device tree */
- spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size,
- kernel_size, kernel_le,
- kernel_cmdline,
- spapr->check_exception_irq);
- assert(spapr->fdt_skel != NULL);
-
/* used by RTAS */
QTAILQ_INIT(&spapr->ccs_list);
qemu_register_reset(spapr_ccs_reset_hook, spapr);
@@ -2160,16 +2208,41 @@ static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
spapr->kvm_type = g_strdup(value);
}
+static bool spapr_get_modern_hotplug_events(Object *obj, Error **errp)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
+
+ return spapr->use_hotplug_event_source;
+}
+
+static void spapr_set_modern_hotplug_events(Object *obj, bool value,
+ Error **errp)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
+
+ spapr->use_hotplug_event_source = value;
+}
+
static void spapr_machine_initfn(Object *obj)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
spapr->htab_fd = -1;
+ spapr->use_hotplug_event_source = true;
object_property_add_str(obj, "kvm-type",
spapr_get_kvm_type, spapr_set_kvm_type, NULL);
object_property_set_description(obj, "kvm-type",
"Specifies the KVM virtualization mode (HV, PR)",
NULL);
+ object_property_add_bool(obj, "modern-hotplug-events",
+ spapr_get_modern_hotplug_events,
+ spapr_set_modern_hotplug_events,
+ NULL);
+ object_property_set_description(obj, "modern-hotplug-events",
+ "Use dedicated hotplug event mechanism in"
+ " place of standard EPOW events when possible"
+ " (required for memory hot-unplug support)",
+ NULL);
}
static void spapr_machine_finalizefn(Object *obj)
@@ -2179,10 +2252,8 @@ static void spapr_machine_finalizefn(Object *obj)
g_free(spapr->kvm_type);
}
-static void ppc_cpu_do_nmi_on_cpu(void *arg)
+static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg)
{
- CPUState *cs = arg;
-
cpu_synchronize_state(cs);
ppc_cpu_do_system_reset(cs);
}
@@ -2192,18 +2263,20 @@ static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
CPUState *cs;
CPU_FOREACH(cs) {
- async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, cs);
+ async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL);
}
}
-static void spapr_add_lmbs(DeviceState *dev, uint64_t addr, uint64_t size,
- uint32_t node, Error **errp)
+static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
+ uint32_t node, bool dedicated_hp_event_source,
+ Error **errp)
{
sPAPRDRConnector *drc;
sPAPRDRConnectorClass *drck;
uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
int i, fdt_offset, fdt_size;
void *fdt;
+ uint64_t addr = addr_start;
for (i = 0; i < nr_lmbs; i++) {
drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
@@ -2217,12 +2290,27 @@ static void spapr_add_lmbs(DeviceState *dev, uint64_t addr, uint64_t size,
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp);
addr += SPAPR_MEMORY_BLOCK_SIZE;
+ if (!dev->hotplugged) {
+ /* guests expect coldplugged LMBs to be pre-allocated */
+ drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
+ drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
+ }
}
/* send hotplug notification to the
* guest only in case of hotplugged memory
*/
if (dev->hotplugged) {
- spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB, nr_lmbs);
+ if (dedicated_hp_event_source) {
+ drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
+ addr_start / SPAPR_MEMORY_BLOCK_SIZE);
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
+ nr_lmbs,
+ drck->get_index(drc));
+ } else {
+ spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
+ nr_lmbs);
+ }
}
}
@@ -2255,8 +2343,94 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
goto out;
}
- spapr_add_lmbs(dev, addr, size, node, &error_abort);
+ spapr_add_lmbs(dev, addr, size, node,
+ spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT),
+ &error_abort);
+
+out:
+ error_propagate(errp, local_err);
+}
+
+typedef struct sPAPRDIMMState {
+ uint32_t nr_lmbs;
+} sPAPRDIMMState;
+
+static void spapr_lmb_release(DeviceState *dev, void *opaque)
+{
+ sPAPRDIMMState *ds = (sPAPRDIMMState *)opaque;
+ HotplugHandler *hotplug_ctrl;
+
+ if (--ds->nr_lmbs) {
+ return;
+ }
+
+ g_free(ds);
+
+ /*
+ * Now that all the LMBs have been removed by the guest, call the
+ * pc-dimm unplug handler to cleanup up the pc-dimm device.
+ */
+ hotplug_ctrl = qdev_get_hotplug_handler(dev);
+ hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort);
+}
+
+static void spapr_del_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
+ Error **errp)
+{
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint32_t nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE;
+ int i;
+ sPAPRDIMMState *ds = g_malloc0(sizeof(sPAPRDIMMState));
+ uint64_t addr = addr_start;
+
+ ds->nr_lmbs = nr_lmbs;
+ for (i = 0; i < nr_lmbs; i++) {
+ drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
+ addr / SPAPR_MEMORY_BLOCK_SIZE);
+ g_assert(drc);
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ drck->detach(drc, dev, spapr_lmb_release, ds, errp);
+ addr += SPAPR_MEMORY_BLOCK_SIZE;
+ }
+
+ drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
+ addr_start / SPAPR_MEMORY_BLOCK_SIZE);
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
+ nr_lmbs,
+ drck->get_index(drc));
+}
+
+static void spapr_memory_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev);
+ PCDIMMDevice *dimm = PC_DIMM(dev);
+ PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
+ MemoryRegion *mr = ddc->get_memory_region(dimm);
+
+ pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
+ object_unparent(OBJECT(dev));
+}
+
+static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ Error *local_err = NULL;
+ PCDIMMDevice *dimm = PC_DIMM(dev);
+ PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
+ MemoryRegion *mr = ddc->get_memory_region(dimm);
+ uint64_t size = memory_region_size(mr);
+ uint64_t addr;
+
+ addr = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ spapr_del_lmbs(dev, addr, size, &error_abort);
out:
error_propagate(errp, local_err);
}
@@ -2334,10 +2508,42 @@ static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
+ sPAPRMachineState *sms = SPAPR_MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
- error_setg(errp, "Memory hot unplug not supported by sPAPR");
+ if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) {
+ spapr_memory_unplug(hotplug_dev, dev, errp);
+ } else {
+ error_setg(errp, "Memory hot unplug not supported for this guest");
+ }
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
+ if (!mc->query_hotpluggable_cpus) {
+ error_setg(errp, "CPU hot unplug not supported on this machine");
+ return;
+ }
+ spapr_core_unplug(hotplug_dev, dev, errp);
+ }
+}
+
+static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ sPAPRMachineState *sms = SPAPR_MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ if (spapr_ovec_test(sms->ov5_cas, OV5_HP_EVT)) {
+ spapr_memory_unplug_request(hotplug_dev, dev, errp);
+ } else {
+ /* NOTE: this means there is a window after guest reset, prior to
+ * CAS negotiation, where unplug requests will fail due to the
+ * capability not being detected yet. This is a bit different than
+ * the case with PCI unplug, where the events will be queued and
+ * eventually handled by the guest after boot
+ */
+ error_setg(errp, "Memory hot unplug not supported for this guest");
+ }
} else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
if (!mc->query_hotpluggable_cpus) {
error_setg(errp, "CPU hot unplug not supported on this machine");
@@ -2355,8 +2561,8 @@ static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
}
}
-static HotplugHandler *spapr_get_hotpug_handler(MachineState *machine,
- DeviceState *dev)
+static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
+ DeviceState *dev)
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
@@ -2403,6 +2609,56 @@ static HotpluggableCPUList *spapr_query_hotpluggable_cpus(MachineState *machine)
return head;
}
+static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
+ uint64_t *buid, hwaddr *pio,
+ hwaddr *mmio32, hwaddr *mmio64,
+ unsigned n_dma, uint32_t *liobns, Error **errp)
+{
+ /*
+ * New-style PHB window placement.
+ *
+ * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
+ * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
+ * windows.
+ *
+ * Some guest kernels can't work with MMIO windows above 1<<46
+ * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
+ *
+ * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
+ * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the
+ * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the
+ * 1TiB 64-bit MMIO windows for each PHB.
+ */
+ const uint64_t base_buid = 0x800000020000000ULL;
+ const int max_phbs =
+ (SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / SPAPR_PCI_MEM64_WIN_SIZE - 1;
+ int i;
+
+ /* Sanity check natural alignments */
+ QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
+ QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT % SPAPR_PCI_MEM64_WIN_SIZE) != 0);
+ QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE % SPAPR_PCI_MEM32_WIN_SIZE) != 0);
+ QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE % SPAPR_PCI_IO_WIN_SIZE) != 0);
+ /* Sanity check bounds */
+ QEMU_BUILD_BUG_ON((max_phbs * SPAPR_PCI_IO_WIN_SIZE) > SPAPR_PCI_MEM32_WIN_SIZE);
+ QEMU_BUILD_BUG_ON((max_phbs * SPAPR_PCI_MEM32_WIN_SIZE) > SPAPR_PCI_MEM64_WIN_SIZE);
+
+ if (index >= max_phbs) {
+ error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
+ max_phbs - 1);
+ return;
+ }
+
+ *buid = base_buid + index;
+ for (i = 0; i < n_dma; ++i) {
+ liobns[i] = SPAPR_PCI_LIOBN(index, i);
+ }
+
+ *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
+ *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
+ *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
+}
+
static void spapr_machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -2421,23 +2677,26 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
mc->init = ppc_spapr_init;
mc->reset = ppc_spapr_reset;
mc->block_default_type = IF_SCSI;
- mc->max_cpus = MAX_CPUMASK_BITS;
+ mc->max_cpus = 255;
mc->no_parallel = 1;
mc->default_boot_order = "";
mc->default_ram_size = 512 * M_BYTE;
mc->kvm_type = spapr_kvm_type;
mc->has_dynamic_sysbus = true;
mc->pci_allow_0_address = true;
- mc->get_hotplug_handler = spapr_get_hotpug_handler;
+ mc->get_hotplug_handler = spapr_get_hotplug_handler;
hc->pre_plug = spapr_machine_device_pre_plug;
hc->plug = spapr_machine_device_plug;
hc->unplug = spapr_machine_device_unplug;
mc->cpu_index_to_socket_id = spapr_cpu_index_to_socket_id;
+ hc->unplug_request = spapr_machine_device_unplug_request;
smc->dr_lmb_enabled = true;
+ smc->tcg_default_cpu = "POWER8";
mc->query_hotpluggable_cpus = spapr_query_hotpluggable_cpus;
fwc->get_dev_path = spapr_get_fw_dev_path;
nc->nmi_monitor_handler = spapr_nmi;
+ smc->phb_placement = spapr_phb_placement;
}
static const TypeInfo spapr_machine_info = {
@@ -2486,18 +2745,113 @@ static const TypeInfo spapr_machine_info = {
type_init(spapr_machine_register_##suffix)
/*
+ * pseries-2.8
+ */
+static void spapr_machine_2_8_instance_options(MachineState *machine)
+{
+}
+
+static void spapr_machine_2_8_class_options(MachineClass *mc)
+{
+ /* Defaults for the latest behaviour inherited from the base class */
+}
+
+DEFINE_SPAPR_MACHINE(2_8, "2.8", true);
+
+/*
* pseries-2.7
*/
+#define SPAPR_COMPAT_2_7 \
+ HW_COMPAT_2_7 \
+ { \
+ .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
+ .property = "mem_win_size", \
+ .value = stringify(SPAPR_PCI_2_7_MMIO_WIN_SIZE),\
+ }, \
+ { \
+ .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
+ .property = "mem64_win_size", \
+ .value = "0", \
+ }, \
+ { \
+ .driver = TYPE_POWERPC_CPU, \
+ .property = "pre-2.8-migration", \
+ .value = "on", \
+ }, \
+ { \
+ .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
+ .property = "pre-2.8-migration", \
+ .value = "on", \
+ },
+
+static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index,
+ uint64_t *buid, hwaddr *pio,
+ hwaddr *mmio32, hwaddr *mmio64,
+ unsigned n_dma, uint32_t *liobns, Error **errp)
+{
+ /* Legacy PHB placement for pseries-2.7 and earlier machine types */
+ const uint64_t base_buid = 0x800000020000000ULL;
+ const hwaddr phb_spacing = 0x1000000000ULL; /* 64 GiB */
+ const hwaddr mmio_offset = 0xa0000000; /* 2 GiB + 512 MiB */
+ const hwaddr pio_offset = 0x80000000; /* 2 GiB */
+ const uint32_t max_index = 255;
+ const hwaddr phb0_alignment = 0x10000000000ULL; /* 1 TiB */
+
+ uint64_t ram_top = MACHINE(spapr)->ram_size;
+ hwaddr phb0_base, phb_base;
+ int i;
+
+ /* Do we have hotpluggable memory? */
+ if (MACHINE(spapr)->maxram_size > ram_top) {
+ /* Can't just use maxram_size, because there may be an
+ * alignment gap between normal and hotpluggable memory
+ * regions */
+ ram_top = spapr->hotplug_memory.base +
+ memory_region_size(&spapr->hotplug_memory.mr);
+ }
+
+ phb0_base = QEMU_ALIGN_UP(ram_top, phb0_alignment);
+
+ if (index > max_index) {
+ error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
+ max_index);
+ return;
+ }
+
+ *buid = base_buid + index;
+ for (i = 0; i < n_dma; ++i) {
+ liobns[i] = SPAPR_PCI_LIOBN(index, i);
+ }
+
+ phb_base = phb0_base + index * phb_spacing;
+ *pio = phb_base + pio_offset;
+ *mmio32 = phb_base + mmio_offset;
+ /*
+ * We don't set the 64-bit MMIO window, relying on the PHB's
+ * fallback behaviour of automatically splitting a large "32-bit"
+ * window into contiguous 32-bit and 64-bit windows
+ */
+}
+
static void spapr_machine_2_7_instance_options(MachineState *machine)
{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
+
+ spapr_machine_2_8_instance_options(machine);
+ spapr->use_hotplug_event_source = false;
}
static void spapr_machine_2_7_class_options(MachineClass *mc)
{
- /* Defaults for the latest behaviour inherited from the base class */
+ sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
+
+ spapr_machine_2_8_class_options(mc);
+ smc->tcg_default_cpu = "POWER7";
+ SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_7);
+ smc->phb_placement = phb_placement_2_7;
}
-DEFINE_SPAPR_MACHINE(2_7, "2.7", true);
+DEFINE_SPAPR_MACHINE(2_7, "2.7", false);
/*
* pseries-2.6
@@ -2512,6 +2866,7 @@ DEFINE_SPAPR_MACHINE(2_7, "2.7", true);
static void spapr_machine_2_6_instance_options(MachineState *machine)
{
+ spapr_machine_2_7_instance_options(machine);
}
static void spapr_machine_2_6_class_options(MachineClass *mc)
@@ -2536,6 +2891,7 @@ DEFINE_SPAPR_MACHINE(2_6, "2.6", false);
static void spapr_machine_2_5_instance_options(MachineState *machine)
{
+ spapr_machine_2_6_instance_options(machine);
}
static void spapr_machine_2_5_class_options(MachineClass *mc)
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index bcb483dbe6..e0c14f6b77 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -69,11 +69,9 @@ void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu, Error **errp)
}
/* Set NUMA node for the added CPUs */
- for (i = 0; i < nb_numa_nodes; i++) {
- if (test_bit(cs->cpu_index, numa_info[i].node_cpu)) {
+ i = numa_get_node_for_cpu(cs->cpu_index);
+ if (i < nb_numa_nodes) {
cs->numa_node = i;
- break;
- }
}
xics_cpu_setup(spapr->xics, cpu);
@@ -92,27 +90,28 @@ char *spapr_get_cpu_core_type(const char *model)
gchar **model_pieces = g_strsplit(model, ",", 2);
core_type = g_strdup_printf("%s-%s", model_pieces[0], TYPE_SPAPR_CPU_CORE);
- g_strfreev(model_pieces);
/* Check whether it exists or whether we have to look up an alias name */
if (!object_class_by_name(core_type)) {
const char *realmodel;
g_free(core_type);
- realmodel = ppc_cpu_lookup_alias(model);
+ core_type = NULL;
+ realmodel = ppc_cpu_lookup_alias(model_pieces[0]);
if (realmodel) {
- return spapr_get_cpu_core_type(realmodel);
+ core_type = spapr_get_cpu_core_type(realmodel);
}
- return NULL;
}
+ g_strfreev(model_pieces);
return core_type;
}
static void spapr_core_release(DeviceState *dev, void *opaque)
{
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
- const char *typename = object_class_get_name(sc->cpu_class);
+ sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(dev));
+ const char *typename = object_class_get_name(scc->cpu_class);
size_t size = object_type_get_instance_size(typename);
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
CPUCore *cc = CPU_CORE(dev);
@@ -185,7 +184,7 @@ void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
/*
* Setup CPU DT entries only for hotplugged CPUs. For boot time or
- * coldplugged CPUs DT entries are setup in spapr_finalize_fdt().
+ * coldplugged CPUs DT entries are setup in spapr_build_fdt().
*/
if (dev->hotplugged) {
fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
@@ -287,8 +286,9 @@ static void spapr_cpu_core_realize_child(Object *child, Error **errp)
static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
{
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
+ sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(dev));
CPUCore *cc = CPU_CORE(OBJECT(dev));
- const char *typename = object_class_get_name(sc->cpu_class);
+ const char *typename = object_class_get_name(scc->cpu_class);
size_t size = object_type_get_instance_size(typename);
Error *local_err = NULL;
void *obj;
@@ -331,83 +331,43 @@ err:
error_propagate(errp, local_err);
}
-static void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- dc->realize = spapr_cpu_core_realize;
-}
-
-/*
- * instance_init routines from different flavours of sPAPR CPU cores.
- */
-#define SPAPR_CPU_CORE_INITFN(_type, _fname) \
-static void glue(glue(spapr_cpu_core_, _fname), _initfn(Object *obj)) \
-{ \
- sPAPRCPUCore *core = SPAPR_CPU_CORE(obj); \
- char *name = g_strdup_printf("%s-" TYPE_POWERPC_CPU, stringify(_type)); \
- ObjectClass *oc = object_class_by_name(name); \
- g_assert(oc); \
- g_free((void *)name); \
- core->cpu_class = oc; \
-}
-
-SPAPR_CPU_CORE_INITFN(970mp_v1.0, 970MP_v10);
-SPAPR_CPU_CORE_INITFN(970mp_v1.1, 970MP_v11);
-SPAPR_CPU_CORE_INITFN(970_v2.2, 970);
-SPAPR_CPU_CORE_INITFN(POWER5+_v2.1, POWER5plus);
-SPAPR_CPU_CORE_INITFN(POWER7_v2.3, POWER7);
-SPAPR_CPU_CORE_INITFN(POWER7+_v2.1, POWER7plus);
-SPAPR_CPU_CORE_INITFN(POWER8_v2.0, POWER8);
-SPAPR_CPU_CORE_INITFN(POWER8E_v2.1, POWER8E);
-SPAPR_CPU_CORE_INITFN(POWER8NVL_v1.0, POWER8NVL);
-
-typedef struct SPAPRCoreInfo {
- const char *name;
- void (*initfn)(Object *obj);
-} SPAPRCoreInfo;
-
-static const SPAPRCoreInfo spapr_cores[] = {
+static const char *spapr_core_models[] = {
/* 970 */
- { .name = "970_v2.2", .initfn = spapr_cpu_core_970_initfn },
+ "970_v2.2",
/* 970MP variants */
- { .name = "970MP_v1.0", .initfn = spapr_cpu_core_970MP_v10_initfn },
- { .name = "970mp_v1.0", .initfn = spapr_cpu_core_970MP_v10_initfn },
- { .name = "970MP_v1.1", .initfn = spapr_cpu_core_970MP_v11_initfn },
- { .name = "970mp_v1.1", .initfn = spapr_cpu_core_970MP_v11_initfn },
+ "970MP_v1.0",
+ "970mp_v1.0",
+ "970MP_v1.1",
+ "970mp_v1.1",
/* POWER5+ */
- { .name = "POWER5+_v2.1", .initfn = spapr_cpu_core_POWER5plus_initfn },
+ "POWER5+_v2.1",
/* POWER7 */
- { .name = "POWER7_v2.3", .initfn = spapr_cpu_core_POWER7_initfn },
+ "POWER7_v2.3",
/* POWER7+ */
- { .name = "POWER7+_v2.1", .initfn = spapr_cpu_core_POWER7plus_initfn },
+ "POWER7+_v2.1",
/* POWER8 */
- { .name = "POWER8_v2.0", .initfn = spapr_cpu_core_POWER8_initfn },
+ "POWER8_v2.0",
/* POWER8E */
- { .name = "POWER8E_v2.1", .initfn = spapr_cpu_core_POWER8E_initfn },
+ "POWER8E_v2.1",
/* POWER8NVL */
- { .name = "POWER8NVL_v1.0", .initfn = spapr_cpu_core_POWER8NVL_initfn },
-
- { .name = NULL }
+ "POWER8NVL_v1.0",
};
-static void spapr_cpu_core_register(const SPAPRCoreInfo *info)
+void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
{
- TypeInfo type_info = {
- .parent = TYPE_SPAPR_CPU_CORE,
- .instance_size = sizeof(sPAPRCPUCore),
- .instance_init = info->initfn,
- };
-
- type_info.name = g_strdup_printf("%s-" TYPE_SPAPR_CPU_CORE, info->name);
- type_register(&type_info);
- g_free((void *)type_info.name);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_CLASS(oc);
+
+ dc->realize = spapr_cpu_core_realize;
+ scc->cpu_class = cpu_class_by_name(TYPE_POWERPC_CPU, data);
+ g_assert(scc->cpu_class);
}
static const TypeInfo spapr_cpu_core_type_info = {
@@ -415,17 +375,27 @@ static const TypeInfo spapr_cpu_core_type_info = {
.parent = TYPE_CPU_CORE,
.abstract = true,
.instance_size = sizeof(sPAPRCPUCore),
- .class_init = spapr_cpu_core_class_init,
+ .class_size = sizeof(sPAPRCPUCoreClass),
};
static void spapr_cpu_core_register_types(void)
{
- const SPAPRCoreInfo *info = spapr_cores;
+ int i;
type_register_static(&spapr_cpu_core_type_info);
- while (info->name) {
- spapr_cpu_core_register(info);
- info++;
+
+ for (i = 0; i < ARRAY_SIZE(spapr_core_models); i++) {
+ TypeInfo type_info = {
+ .parent = TYPE_SPAPR_CPU_CORE,
+ .instance_size = sizeof(sPAPRCPUCore),
+ .class_init = spapr_cpu_core_class_init,
+ .class_data = (void *) spapr_core_models[i],
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_SPAPR_CPU_CORE,
+ spapr_core_models[i]);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
}
}
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index 26a067951c..a0c44ee593 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -20,20 +20,7 @@
#include "qapi/visitor.h"
#include "qemu/error-report.h"
#include "hw/ppc/spapr.h" /* for RTAS return codes */
-
-/* #define DEBUG_SPAPR_DRC */
-
-#ifdef DEBUG_SPAPR_DRC
-#define DPRINTF(fmt, ...) \
- do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
-#define DPRINTFN(fmt, ...) \
- do { DPRINTF(fmt, ## __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#define DPRINTFN(fmt, ...) \
- do { } while (0)
-#endif
+#include "trace.h"
#define DRC_CONTAINER_PATH "/dr-connector"
#define DRC_INDEX_TYPE_SHIFT 28
@@ -69,7 +56,7 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
{
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- DPRINTFN("drc: %x, set_isolation_state: %x", get_index(drc), state);
+ trace_spapr_drc_set_isolation_state(get_index(drc), state);
if (state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
/* cannot unisolate a non-existant resource, and, or resources
@@ -81,6 +68,23 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
}
}
+ /*
+ * Fail any requests to ISOLATE the LMB DRC if this LMB doesn't
+ * belong to a DIMM device that is marked for removal.
+ *
+ * Currently the guest userspace tool drmgr that drives the memory
+ * hotplug/unplug will just try to remove a set of 'removable' LMBs
+ * in response to a hot unplug request that is based on drc-count.
+ * If the LMB being removed doesn't belong to a DIMM device that is
+ * actually being unplugged, fail the isolation request here.
+ */
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_LMB) {
+ if ((state == SPAPR_DR_ISOLATION_STATE_ISOLATED) &&
+ !drc->awaiting_release) {
+ return RTAS_OUT_HW_ERROR;
+ }
+ }
+
drc->isolation_state = state;
if (drc->isolation_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
@@ -94,11 +98,11 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
*/
if (drc->awaiting_release) {
if (drc->configured) {
- DPRINTFN("finalizing device removal");
+ trace_spapr_drc_set_isolation_state_finalizing(get_index(drc));
drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
drc->detach_cb_opaque, NULL);
} else {
- DPRINTFN("deferring device removal on unconfigured device\n");
+ trace_spapr_drc_set_isolation_state_deferring(get_index(drc));
}
}
drc->configured = false;
@@ -110,7 +114,7 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
static uint32_t set_indicator_state(sPAPRDRConnector *drc,
sPAPRDRIndicatorState state)
{
- DPRINTFN("drc: %x, set_indicator_state: %x", get_index(drc), state);
+ trace_spapr_drc_set_indicator_state(get_index(drc), state);
drc->indicator_state = state;
return RTAS_OUT_SUCCESS;
}
@@ -120,7 +124,7 @@ static uint32_t set_allocation_state(sPAPRDRConnector *drc,
{
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- DPRINTFN("drc: %x, set_allocation_state: %x", get_index(drc), state);
+ trace_spapr_drc_set_allocation_state(get_index(drc), state);
if (state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
/* if there's no resource/device associated with the DRC, there's
@@ -137,7 +141,7 @@ static uint32_t set_allocation_state(sPAPRDRConnector *drc,
drc->allocation_state = state;
if (drc->awaiting_release &&
drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
- DPRINTFN("finalizing device removal");
+ trace_spapr_drc_set_allocation_state_finalizing(get_index(drc));
drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
drc->detach_cb_opaque, NULL);
} else if (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
@@ -167,12 +171,11 @@ static const void *get_fdt(sPAPRDRConnector *drc, int *fdt_start_offset)
static void set_configured(sPAPRDRConnector *drc)
{
- DPRINTFN("drc: %x, set_configured", get_index(drc));
+ trace_spapr_drc_set_configured(get_index(drc));
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
/* guest should be not configuring an isolated device */
- DPRINTFN("drc: %x, set_configured: skipping isolated device",
- get_index(drc));
+ trace_spapr_drc_set_configured_skipping(get_index(drc));
return;
}
drc->configured = true;
@@ -222,7 +225,7 @@ static uint32_t entity_sense(sPAPRDRConnector *drc, sPAPRDREntitySense *state)
}
}
- DPRINTFN("drc: %x, entity_sense: %x", get_index(drc), state);
+ trace_spapr_drc_entity_sense(get_index(drc), *state);
return RTAS_OUT_SUCCESS;
}
@@ -336,7 +339,7 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
static void attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
int fdt_start_offset, bool coldplug, Error **errp)
{
- DPRINTFN("drc: %x, attach", get_index(drc));
+ trace_spapr_drc_attach(get_index(drc));
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
error_setg(errp, "an attached device is still awaiting release");
@@ -389,7 +392,7 @@ static void detach(sPAPRDRConnector *drc, DeviceState *d,
spapr_drc_detach_cb *detach_cb,
void *detach_cb_opaque, Error **errp)
{
- DPRINTFN("drc: %x, detach", get_index(drc));
+ trace_spapr_drc_detach(get_index(drc));
drc->detach_cb = detach_cb;
drc->detach_cb_opaque = detach_cb_opaque;
@@ -415,21 +418,21 @@ static void detach(sPAPRDRConnector *drc, DeviceState *d,
}
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
- DPRINTFN("awaiting transition to isolated state before removal");
+ trace_spapr_drc_awaiting_isolated(get_index(drc));
drc->awaiting_release = true;
return;
}
if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
- DPRINTFN("awaiting transition to unusable state before removal");
+ trace_spapr_drc_awaiting_unusable(get_index(drc));
drc->awaiting_release = true;
return;
}
if (drc->awaiting_allocation) {
drc->awaiting_release = true;
- DPRINTFN("awaiting allocation to complete before removal");
+ trace_spapr_drc_awaiting_allocation(get_index(drc));
return;
}
@@ -460,7 +463,7 @@ static void reset(DeviceState *d)
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
sPAPRDREntitySense state;
- DPRINTFN("drc reset: %x", drck->get_index(drc));
+ trace_spapr_drc_reset(drck->get_index(drc));
/* immediately upon reset we can safely assume DRCs whose devices
* are pending removal can be safely removed, and that they will
* subsequently be left in an ISOLATED state. move the DRC to this
@@ -502,7 +505,7 @@ static void realize(DeviceState *d, Error **errp)
gchar *child_name;
Error *err = NULL;
- DPRINTFN("drc realize: %x", drck->get_index(drc));
+ trace_spapr_drc_realize(drck->get_index(drc));
/* NOTE: we do this as part of realize/unrealize due to the fact
* that the guest will communicate with the DRC via RTAS calls
* referencing the global DRC index. By unlinking the DRC
@@ -513,7 +516,7 @@ static void realize(DeviceState *d, Error **errp)
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
snprintf(link_name, sizeof(link_name), "%x", drck->get_index(drc));
child_name = object_get_canonical_path_component(OBJECT(drc));
- DPRINTFN("drc child name: %s", child_name);
+ trace_spapr_drc_realize_child(drck->get_index(drc), child_name);
object_property_add_alias(root_container, link_name,
drc->owner, child_name, &err);
if (err) {
@@ -521,7 +524,7 @@ static void realize(DeviceState *d, Error **errp)
object_unref(OBJECT(drc));
}
g_free(child_name);
- DPRINTFN("drc realize complete");
+ trace_spapr_drc_realize_complete(drck->get_index(drc));
}
static void unrealize(DeviceState *d, Error **errp)
@@ -532,7 +535,7 @@ static void unrealize(DeviceState *d, Error **errp)
char name[256];
Error *err = NULL;
- DPRINTFN("drc unrealize: %x", drck->get_index(drc));
+ trace_spapr_drc_unrealize(drck->get_index(drc));
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
snprintf(name, sizeof(name), "%x", drck->get_index(drc));
object_property_del(root_container, name, &err);
@@ -816,7 +819,7 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
drc_indexes->data,
drc_indexes->len * sizeof(uint32_t));
if (ret) {
- fprintf(stderr, "Couldn't create ibm,drc-indexes property\n");
+ error_report("Couldn't create ibm,drc-indexes property");
goto out;
}
@@ -824,21 +827,21 @@ int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
drc_power_domains->data,
drc_power_domains->len * sizeof(uint32_t));
if (ret) {
- fprintf(stderr, "Couldn't finalize ibm,drc-power-domains property\n");
+ error_report("Couldn't finalize ibm,drc-power-domains property");
goto out;
}
ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-names",
drc_names->str, drc_names->len);
if (ret) {
- fprintf(stderr, "Couldn't finalize ibm,drc-names property\n");
+ error_report("Couldn't finalize ibm,drc-names property");
goto out;
}
ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-types",
drc_types->str, drc_types->len);
if (ret) {
- fprintf(stderr, "Couldn't finalize ibm,drc-types property\n");
+ error_report("Couldn't finalize ibm,drc-types property");
goto out;
}
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index b0668b34a9..f85a9c32a7 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -32,6 +32,7 @@
#include "hw/qdev.h"
#include "sysemu/device_tree.h"
+#include "hw/ppc/fdt.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "hw/pci/pci.h"
@@ -39,6 +40,7 @@
#include "hw/ppc/spapr_drc.h"
#include "qemu/help_option.h"
#include "qemu/bcd.h"
+#include "hw/ppc/spapr_ovec.h"
#include <libfdt.h>
struct rtas_error_log {
@@ -173,6 +175,16 @@ struct epow_log_full {
struct rtas_event_log_v6_epow epow;
} QEMU_PACKED;
+union drc_identifier {
+ uint32_t index;
+ uint32_t count;
+ struct {
+ uint32_t count;
+ uint32_t index;
+ } count_indexed;
+ char name[1];
+} QEMU_PACKED;
+
struct rtas_event_log_v6_hp {
#define RTAS_LOG_V6_SECTION_ID_HOTPLUG 0x4850 /* HP */
struct rtas_event_log_v6_section_header hdr;
@@ -189,12 +201,9 @@ struct rtas_event_log_v6_hp {
#define RTAS_LOG_V6_HP_ID_DRC_NAME 1
#define RTAS_LOG_V6_HP_ID_DRC_INDEX 2
#define RTAS_LOG_V6_HP_ID_DRC_COUNT 3
+#define RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED 4
uint8_t reserved;
- union {
- uint32_t index;
- uint32_t count;
- char name[1];
- } drc;
+ union drc_identifier drc_id;
} QEMU_PACKED;
struct hp_log_full {
@@ -205,38 +214,132 @@ struct hp_log_full {
struct rtas_event_log_v6_hp hp;
} QEMU_PACKED;
-#define EVENT_MASK_INTERNAL_ERRORS 0x80000000
-#define EVENT_MASK_EPOW 0x40000000
-#define EVENT_MASK_HOTPLUG 0x10000000
-#define EVENT_MASK_IO 0x08000000
-
-#define _FDT(exp) \
- do { \
- int ret = (exp); \
- if (ret < 0) { \
- fprintf(stderr, "qemu: error creating device tree: %s: %s\n", \
- #exp, fdt_strerror(ret)); \
- exit(1); \
- } \
- } while (0)
-
-void spapr_events_fdt_skel(void *fdt, uint32_t check_exception_irq)
+typedef enum EventClass {
+ EVENT_CLASS_INTERNAL_ERRORS = 0,
+ EVENT_CLASS_EPOW = 1,
+ EVENT_CLASS_RESERVED = 2,
+ EVENT_CLASS_HOT_PLUG = 3,
+ EVENT_CLASS_IO = 4,
+ EVENT_CLASS_MAX
+} EventClassIndex;
+#define EVENT_CLASS_MASK(index) (1 << (31 - index))
+
+static const char * const event_names[EVENT_CLASS_MAX] = {
+ [EVENT_CLASS_INTERNAL_ERRORS] = "internal-errors",
+ [EVENT_CLASS_EPOW] = "epow-events",
+ [EVENT_CLASS_HOT_PLUG] = "hot-plug-events",
+ [EVENT_CLASS_IO] = "ibm,io-events",
+};
+
+struct sPAPREventSource {
+ int irq;
+ uint32_t mask;
+ bool enabled;
+};
+
+static sPAPREventSource *spapr_event_sources_new(void)
{
- uint32_t irq_ranges[] = {cpu_to_be32(check_exception_irq), cpu_to_be32(1)};
- uint32_t interrupts[] = {cpu_to_be32(check_exception_irq), 0};
+ return g_new0(sPAPREventSource, EVENT_CLASS_MAX);
+}
- _FDT((fdt_begin_node(fdt, "event-sources")));
+static void spapr_event_sources_register(sPAPREventSource *event_sources,
+ EventClassIndex index, int irq)
+{
+ /* we only support 1 irq per event class at the moment */
+ g_assert(event_sources);
+ g_assert(!event_sources[index].enabled);
+ event_sources[index].irq = irq;
+ event_sources[index].mask = EVENT_CLASS_MASK(index);
+ event_sources[index].enabled = true;
+}
- _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
- _FDT((fdt_property_cell(fdt, "#interrupt-cells", 2)));
- _FDT((fdt_property(fdt, "interrupt-ranges",
- irq_ranges, sizeof(irq_ranges))));
+static const sPAPREventSource *
+spapr_event_sources_get_source(sPAPREventSource *event_sources,
+ EventClassIndex index)
+{
+ g_assert(index < EVENT_CLASS_MAX);
+ g_assert(event_sources);
+
+ return &event_sources[index];
+}
+
+void spapr_dt_events(sPAPRMachineState *spapr, void *fdt)
+{
+ uint32_t irq_ranges[EVENT_CLASS_MAX * 2];
+ int i, count = 0, event_sources;
+ sPAPREventSource *events = spapr->event_sources;
+
+ g_assert(events);
- _FDT((fdt_begin_node(fdt, "epow-events")));
- _FDT((fdt_property(fdt, "interrupts", interrupts, sizeof(interrupts))));
- _FDT((fdt_end_node(fdt)));
+ _FDT(event_sources = fdt_add_subnode(fdt, 0, "event-sources"));
- _FDT((fdt_end_node(fdt)));
+ for (i = 0, count = 0; i < EVENT_CLASS_MAX; i++) {
+ int node_offset;
+ uint32_t interrupts[2];
+ const sPAPREventSource *source =
+ spapr_event_sources_get_source(events, i);
+ const char *source_name = event_names[i];
+
+ if (!source->enabled) {
+ continue;
+ }
+
+ interrupts[0] = cpu_to_be32(source->irq);
+ interrupts[1] = 0;
+
+ _FDT(node_offset = fdt_add_subnode(fdt, event_sources, source_name));
+ _FDT(fdt_setprop(fdt, node_offset, "interrupts", interrupts,
+ sizeof(interrupts)));
+
+ irq_ranges[count++] = interrupts[0];
+ irq_ranges[count++] = cpu_to_be32(1);
+ }
+
+ irq_ranges[count] = cpu_to_be32(count);
+ count++;
+
+ _FDT((fdt_setprop(fdt, event_sources, "interrupt-controller", NULL, 0)));
+ _FDT((fdt_setprop_cell(fdt, event_sources, "#interrupt-cells", 2)));
+ _FDT((fdt_setprop(fdt, event_sources, "interrupt-ranges",
+ irq_ranges, count * sizeof(uint32_t))));
+}
+
+static const sPAPREventSource *
+rtas_event_log_to_source(sPAPRMachineState *spapr, int log_type)
+{
+ const sPAPREventSource *source;
+
+ g_assert(spapr->event_sources);
+
+ switch (log_type) {
+ case RTAS_LOG_TYPE_HOTPLUG:
+ source = spapr_event_sources_get_source(spapr->event_sources,
+ EVENT_CLASS_HOT_PLUG);
+ if (spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT)) {
+ g_assert(source->enabled);
+ break;
+ }
+ /* fall back to epow for legacy hotplug interrupt source */
+ case RTAS_LOG_TYPE_EPOW:
+ source = spapr_event_sources_get_source(spapr->event_sources,
+ EVENT_CLASS_EPOW);
+ break;
+ default:
+ source = NULL;
+ }
+
+ return source;
+}
+
+static int rtas_event_log_to_irq(sPAPRMachineState *spapr, int log_type)
+{
+ const sPAPREventSource *source;
+
+ source = rtas_event_log_to_source(spapr, log_type);
+ g_assert(source);
+ g_assert(source->enabled);
+
+ return source->irq;
}
static void rtas_event_log_queue(int log_type, void *data, bool exception)
@@ -257,19 +360,15 @@ static sPAPREventLogEntry *rtas_event_log_dequeue(uint32_t event_mask,
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
sPAPREventLogEntry *entry = NULL;
- /* we only queue EPOW events atm. */
- if ((event_mask & EVENT_MASK_EPOW) == 0) {
- return NULL;
- }
-
QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
+ const sPAPREventSource *source =
+ rtas_event_log_to_source(spapr, entry->log_type);
+
if (entry->exception != exception) {
continue;
}
- /* EPOW and hotplug events are surfaced in the same manner */
- if (entry->log_type == RTAS_LOG_TYPE_EPOW ||
- entry->log_type == RTAS_LOG_TYPE_HOTPLUG) {
+ if (source->mask & event_mask) {
break;
}
}
@@ -286,19 +385,15 @@ static bool rtas_event_log_contains(uint32_t event_mask, bool exception)
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
sPAPREventLogEntry *entry = NULL;
- /* we only queue EPOW events atm. */
- if ((event_mask & EVENT_MASK_EPOW) == 0) {
- return false;
- }
-
QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
+ const sPAPREventSource *source =
+ rtas_event_log_to_source(spapr, entry->log_type);
+
if (entry->exception != exception) {
continue;
}
- /* EPOW and hotplug events are surfaced in the same manner */
- if (entry->log_type == RTAS_LOG_TYPE_EPOW ||
- entry->log_type == RTAS_LOG_TYPE_HOTPLUG) {
+ if (source->mask & event_mask) {
return true;
}
}
@@ -386,7 +481,9 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
rtas_event_log_queue(RTAS_LOG_TYPE_EPOW, new_epow, true);
- qemu_irq_pulse(xics_get_qirq(spapr->xics, spapr->check_exception_irq));
+ qemu_irq_pulse(xics_get_qirq(spapr->xics,
+ rtas_event_log_to_irq(spapr,
+ RTAS_LOG_TYPE_EPOW)));
}
static void spapr_hotplug_set_signalled(uint32_t drc_index)
@@ -398,7 +495,7 @@ static void spapr_hotplug_set_signalled(uint32_t drc_index)
static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
sPAPRDRConnectorType drc_type,
- uint32_t drc)
+ union drc_identifier *drc_id)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
struct hp_log_full *new_hp;
@@ -443,7 +540,7 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
case SPAPR_DR_CONNECTOR_TYPE_PCI:
hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
if (hp->hotplug_action == RTAS_LOG_V6_HP_ACTION_ADD) {
- spapr_hotplug_set_signalled(drc);
+ spapr_hotplug_set_signalled(drc_id->index);
}
break;
case SPAPR_DR_CONNECTOR_TYPE_LMB:
@@ -461,48 +558,89 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
}
if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) {
- hp->drc.count = cpu_to_be32(drc);
+ hp->drc_id.count = cpu_to_be32(drc_id->count);
} else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) {
- hp->drc.index = cpu_to_be32(drc);
+ hp->drc_id.index = cpu_to_be32(drc_id->index);
+ } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED) {
+ /* we should not be using count_indexed value unless the guest
+ * supports dedicated hotplug event source
+ */
+ g_assert(spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT));
+ hp->drc_id.count_indexed.count =
+ cpu_to_be32(drc_id->count_indexed.count);
+ hp->drc_id.count_indexed.index =
+ cpu_to_be32(drc_id->count_indexed.index);
}
rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp, true);
- qemu_irq_pulse(xics_get_qirq(spapr->xics, spapr->check_exception_irq));
+ qemu_irq_pulse(xics_get_qirq(spapr->xics,
+ rtas_event_log_to_irq(spapr,
+ RTAS_LOG_TYPE_HOTPLUG)));
}
void spapr_hotplug_req_add_by_index(sPAPRDRConnector *drc)
{
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
sPAPRDRConnectorType drc_type = drck->get_type(drc);
- uint32_t index = drck->get_index(drc);
+ union drc_identifier drc_id;
+ drc_id.index = drck->get_index(drc);
spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
- RTAS_LOG_V6_HP_ACTION_ADD, drc_type, index);
+ RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
}
void spapr_hotplug_req_remove_by_index(sPAPRDRConnector *drc)
{
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
sPAPRDRConnectorType drc_type = drck->get_type(drc);
- uint32_t index = drck->get_index(drc);
+ union drc_identifier drc_id;
+ drc_id.index = drck->get_index(drc);
spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
- RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, index);
+ RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
}
void spapr_hotplug_req_add_by_count(sPAPRDRConnectorType drc_type,
uint32_t count)
{
+ union drc_identifier drc_id;
+
+ drc_id.count = count;
spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
- RTAS_LOG_V6_HP_ACTION_ADD, drc_type, count);
+ RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
}
void spapr_hotplug_req_remove_by_count(sPAPRDRConnectorType drc_type,
uint32_t count)
{
+ union drc_identifier drc_id;
+
+ drc_id.count = count;
spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
- RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, count);
+ RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
+}
+
+void spapr_hotplug_req_add_by_count_indexed(sPAPRDRConnectorType drc_type,
+ uint32_t count, uint32_t index)
+{
+ union drc_identifier drc_id;
+
+ drc_id.count_indexed.count = count;
+ drc_id.count_indexed.index = index;
+ spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
+ RTAS_LOG_V6_HP_ACTION_ADD, drc_type, &drc_id);
+}
+
+void spapr_hotplug_req_remove_by_count_indexed(sPAPRDRConnectorType drc_type,
+ uint32_t count, uint32_t index)
+{
+ union drc_identifier drc_id;
+
+ drc_id.count_indexed.count = count;
+ drc_id.count_indexed.index = index;
+ spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED,
+ RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, &drc_id);
}
static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr,
@@ -514,6 +652,7 @@ static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr,
uint64_t xinfo;
sPAPREventLogEntry *event;
struct rtas_error_log *hdr;
+ int i;
if ((nargs < 6) || (nargs > 7) || nret != 1) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
@@ -550,8 +689,14 @@ static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr,
* do the latter here, since our code relies on edge-triggered
* interrupts.
*/
- if (rtas_event_log_contains(mask, true)) {
- qemu_irq_pulse(xics_get_qirq(spapr->xics, spapr->check_exception_irq));
+ for (i = 0; i < EVENT_CLASS_MAX; i++) {
+ if (rtas_event_log_contains(EVENT_CLASS_MASK(i), true)) {
+ const sPAPREventSource *source =
+ spapr_event_sources_get_source(spapr->event_sources, i);
+
+ g_assert(source->enabled);
+ qemu_irq_pulse(xics_get_qirq(spapr->xics, source->irq));
+ }
}
return;
@@ -603,8 +748,27 @@ out_no_events:
void spapr_events_init(sPAPRMachineState *spapr)
{
QTAILQ_INIT(&spapr->pending_events);
- spapr->check_exception_irq = xics_spapr_alloc(spapr->xics, 0, 0, false,
- &error_fatal);
+
+ spapr->event_sources = spapr_event_sources_new();
+
+ spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW,
+ xics_spapr_alloc(spapr->xics, 0, false,
+ &error_fatal));
+
+ /* NOTE: if machine supports modern/dedicated hotplug event source,
+ * we add it to the device-tree unconditionally. This means we may
+ * have cases where the source is enabled in QEMU, but unused by the
+ * guest because it does not support modern hotplug events, so we
+ * take care to rely on checking for negotiation of OV5_HP_EVT option
+ * before attempting to use it to signal events, rather than simply
+ * checking that it's enabled.
+ */
+ if (spapr->use_hotplug_event_source) {
+ spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG,
+ xics_spapr_alloc(spapr->xics, 0, false,
+ &error_fatal));
+ }
+
spapr->epow_notifier.notify = spapr_powerdown_req;
qemu_register_powerdown_notifier(&spapr->epow_notifier);
spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception",
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 73af112e1d..9a9bedf1bd 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -11,21 +11,21 @@
#include "trace.h"
#include "sysemu/kvm.h"
#include "kvm_ppc.h"
+#include "hw/ppc/spapr_ovec.h"
struct SPRSyncState {
- CPUState *cs;
int spr;
target_ulong value;
target_ulong mask;
};
-static void do_spr_sync(void *arg)
+static void do_spr_sync(CPUState *cs, run_on_cpu_data arg)
{
- struct SPRSyncState *s = arg;
- PowerPCCPU *cpu = POWERPC_CPU(s->cs);
+ struct SPRSyncState *s = arg.host_ptr;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
- cpu_synchronize_state(s->cs);
+ cpu_synchronize_state(cs);
env->spr[s->spr] &= ~s->mask;
env->spr[s->spr] |= s->value;
}
@@ -34,12 +34,11 @@ static void set_spr(CPUState *cs, int spr, target_ulong value,
target_ulong mask)
{
struct SPRSyncState s = {
- .cs = cs,
.spr = spr,
.value = value,
.mask = mask
};
- run_on_cpu(cs, do_spr_sync, &s);
+ run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s));
}
static bool has_spr(PowerPCCPU *cpu, int spr)
@@ -201,7 +200,7 @@ static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
switch (ret) {
case REMOVE_SUCCESS:
- check_tlb_flush(env);
+ check_tlb_flush(env, true);
return H_SUCCESS;
case REMOVE_NOT_FOUND:
@@ -282,7 +281,7 @@ static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
}
}
exit:
- check_tlb_flush(env);
+ check_tlb_flush(env, true);
return rc;
}
@@ -319,6 +318,8 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
ppc_hash64_store_hpte(cpu, pte_index,
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
+ /* Flush the tlb */
+ check_tlb_flush(env, true);
/* Don't need a memory barrier, due to qemu's global lock */
ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
return H_SUCCESS;
@@ -880,44 +881,18 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return ret;
}
-/*
- * Return the offset to the requested option vector @vector in the
- * option vector table @table.
- */
-static target_ulong cas_get_option_vector(int vector, target_ulong table)
-{
- int i;
- char nr_vectors, nr_entries;
-
- if (!table) {
- return 0;
- }
-
- nr_vectors = (ldl_phys(&address_space_memory, table) >> 24) + 1;
- if (!vector || vector > nr_vectors) {
- return 0;
- }
- table++; /* skip nr option vectors */
-
- for (i = 0; i < vector - 1; i++) {
- nr_entries = ldl_phys(&address_space_memory, table) >> 24;
- table += nr_entries + 2;
- }
- return table;
-}
-
typedef struct {
- PowerPCCPU *cpu;
uint32_t cpu_version;
Error *err;
} SetCompatState;
-static void do_set_compat(void *arg)
+static void do_set_compat(CPUState *cs, run_on_cpu_data arg)
{
- SetCompatState *s = arg;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ SetCompatState *s = arg.host_ptr;
- cpu_synchronize_state(CPU(s->cpu));
- ppc_set_compat(s->cpu, s->cpu_version, &s->err);
+ cpu_synchronize_state(cs);
+ ppc_set_compat(cpu, s->cpu_version, &s->err);
}
#define get_compat_level(cpuver) ( \
@@ -961,23 +936,21 @@ static void cas_handle_compat_cpu(PowerPCCPUClass *pcc, uint32_t pvr,
}
}
-#define OV5_DRCONF_MEMORY 0x20
-
static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
sPAPRMachineState *spapr,
target_ulong opcode,
target_ulong *args)
{
target_ulong list = ppc64_phys_to_real(args[0]);
- target_ulong ov_table, ov5;
+ target_ulong ov_table;
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu_);
CPUState *cs;
- bool cpu_match = false, cpu_update = true, memory_update = false;
+ bool cpu_match = false, cpu_update = true;
unsigned old_cpu_version = cpu_->cpu_version;
unsigned compat_lvl = 0, cpu_version = 0;
unsigned max_lvl = get_compat_level(cpu_->max_compat);
int counter;
- char ov5_byte2;
+ sPAPROptionVector *ov5_guest, *ov5_cas_old, *ov5_updates;
/* Parse PVR list */
for (counter = 0; counter < 512; ++counter) {
@@ -1013,12 +986,11 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
if (old_cpu_version != cpu_version) {
CPU_FOREACH(cs) {
SetCompatState s = {
- .cpu = POWERPC_CPU(cs),
.cpu_version = cpu_version,
.err = NULL,
};
- run_on_cpu(cs, do_set_compat, &s);
+ run_on_cpu(cs, do_set_compat, RUN_ON_CPU_HOST_PTR(&s));
if (s.err) {
error_report_err(s.err);
@@ -1034,19 +1006,34 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
/* For the future use: here @ov_table points to the first option vector */
ov_table = list;
- ov5 = cas_get_option_vector(5, ov_table);
- if (!ov5) {
- return H_SUCCESS;
- }
+ ov5_guest = spapr_ovec_parse_vector(ov_table, 5);
+
+ /* NOTE: there are actually a number of ov5 bits where input from the
+ * guest is always zero, and the platform/QEMU enables them independently
+ * of guest input. To model these properly we'd want some sort of mask,
+ * but since they only currently apply to memory migration as defined
+ * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need
+ * to worry about this for now.
+ */
+ ov5_cas_old = spapr_ovec_clone(spapr->ov5_cas);
+ /* full range of negotiated ov5 capabilities */
+ spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest);
+ spapr_ovec_cleanup(ov5_guest);
+ /* capabilities that have been added since CAS-generated guest reset.
+ * if capabilities have since been removed, generate another reset
+ */
+ ov5_updates = spapr_ovec_new();
+ spapr->cas_reboot = spapr_ovec_diff(ov5_updates,
+ ov5_cas_old, spapr->ov5_cas);
- /* @list now points to OV 5 */
- ov5_byte2 = ldub_phys(&address_space_memory, ov5 + 2);
- if (ov5_byte2 & OV5_DRCONF_MEMORY) {
- memory_update = true;
+ if (!spapr->cas_reboot) {
+ spapr->cas_reboot =
+ (spapr_h_cas_compose_response(spapr, args[1], args[2], cpu_update,
+ ov5_updates) != 0);
}
+ spapr_ovec_cleanup(ov5_updates);
- if (spapr_h_cas_compose_response(spapr, args[1], args[2],
- cpu_update, memory_update)) {
+ if (spapr->cas_reboot) {
qemu_system_reset_request();
}
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index 6bc4d4db33..ae30bbe30f 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -156,14 +156,17 @@ static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu)
return 1ULL << tcet->page_shift;
}
-static void spapr_tce_notify_started(MemoryRegion *iommu)
+static void spapr_tce_notify_flag_changed(MemoryRegion *iommu,
+ IOMMUNotifierFlag old,
+ IOMMUNotifierFlag new)
{
- spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), true);
-}
+ struct sPAPRTCETable *tbl = container_of(iommu, sPAPRTCETable, iommu);
-static void spapr_tce_notify_stopped(MemoryRegion *iommu)
-{
- spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), false);
+ if (old == IOMMU_NOTIFIER_NONE && new != IOMMU_NOTIFIER_NONE) {
+ spapr_tce_set_need_vfio(tbl, true);
+ } else if (old != IOMMU_NOTIFIER_NONE && new == IOMMU_NOTIFIER_NONE) {
+ spapr_tce_set_need_vfio(tbl, false);
+ }
}
static int spapr_tce_table_post_load(void *opaque, int version_id)
@@ -246,8 +249,7 @@ static const VMStateDescription vmstate_spapr_tce_table = {
static MemoryRegionIOMMUOps spapr_iommu_ops = {
.translate = spapr_tce_translate_iommu,
.get_min_page_size = spapr_tce_get_min_page_size,
- .notify_started = spapr_tce_notify_started,
- .notify_stopped = spapr_tce_notify_stopped,
+ .notify_flag_changed = spapr_tce_notify_flag_changed,
};
static int spapr_tce_table_realize(DeviceState *dev)
@@ -310,8 +312,8 @@ sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn)
char tmp[32];
if (spapr_tce_find_by_liobn(liobn)) {
- fprintf(stderr, "Attempted to create TCE table with duplicate"
- " LIOBN 0x%x\n", liobn);
+ error_report("Attempted to create TCE table with duplicate"
+ " LIOBN 0x%x", liobn);
return NULL;
}
diff --git a/hw/ppc/spapr_ovec.c b/hw/ppc/spapr_ovec.c
new file mode 100644
index 0000000000..3eb1d5976f
--- /dev/null
+++ b/hw/ppc/spapr_ovec.c
@@ -0,0 +1,254 @@
+/*
+ * QEMU SPAPR Architecture Option Vector Helper Functions
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Authors:
+ * Bharata B Rao <bharata@linux.vnet.ibm.com>
+ * Michael Roth <mdroth@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/ppc/spapr_ovec.h"
+#include "qemu/bitmap.h"
+#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
+#include <libfdt.h>
+
+/* #define DEBUG_SPAPR_OVEC */
+
+#ifdef DEBUG_SPAPR_OVEC
+#define DPRINTFN(fmt, ...) \
+ do { fprintf(stderr, fmt "\n", ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTFN(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define OV_MAXBYTES 256 /* not including length byte */
+#define OV_MAXBITS (OV_MAXBYTES * BITS_PER_BYTE)
+
+/* we *could* work with bitmaps directly, but handling the bitmap privately
+ * allows us to more safely make assumptions about the bitmap size and
+ * simplify the calling code somewhat
+ */
+struct sPAPROptionVector {
+ unsigned long *bitmap;
+ int32_t bitmap_size; /* only used for migration */
+};
+
+const VMStateDescription vmstate_spapr_ovec = {
+ .name = "spapr_option_vector",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_BITMAP(bitmap, sPAPROptionVector, 1, bitmap_size),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+sPAPROptionVector *spapr_ovec_new(void)
+{
+ sPAPROptionVector *ov;
+
+ ov = g_new0(sPAPROptionVector, 1);
+ ov->bitmap = bitmap_new(OV_MAXBITS);
+ ov->bitmap_size = OV_MAXBITS;
+
+ return ov;
+}
+
+sPAPROptionVector *spapr_ovec_clone(sPAPROptionVector *ov_orig)
+{
+ sPAPROptionVector *ov;
+
+ g_assert(ov_orig);
+
+ ov = spapr_ovec_new();
+ bitmap_copy(ov->bitmap, ov_orig->bitmap, OV_MAXBITS);
+
+ return ov;
+}
+
+void spapr_ovec_intersect(sPAPROptionVector *ov,
+ sPAPROptionVector *ov1,
+ sPAPROptionVector *ov2)
+{
+ g_assert(ov);
+ g_assert(ov1);
+ g_assert(ov2);
+
+ bitmap_and(ov->bitmap, ov1->bitmap, ov2->bitmap, OV_MAXBITS);
+}
+
+/* returns true if options bits were removed, false otherwise */
+bool spapr_ovec_diff(sPAPROptionVector *ov,
+ sPAPROptionVector *ov_old,
+ sPAPROptionVector *ov_new)
+{
+ unsigned long *change_mask = bitmap_new(OV_MAXBITS);
+ unsigned long *removed_bits = bitmap_new(OV_MAXBITS);
+ bool bits_were_removed = false;
+
+ g_assert(ov);
+ g_assert(ov_old);
+ g_assert(ov_new);
+
+ bitmap_xor(change_mask, ov_old->bitmap, ov_new->bitmap, OV_MAXBITS);
+ bitmap_and(ov->bitmap, ov_new->bitmap, change_mask, OV_MAXBITS);
+ bitmap_and(removed_bits, ov_old->bitmap, change_mask, OV_MAXBITS);
+
+ if (!bitmap_empty(removed_bits, OV_MAXBITS)) {
+ bits_were_removed = true;
+ }
+
+ g_free(change_mask);
+ g_free(removed_bits);
+
+ return bits_were_removed;
+}
+
+void spapr_ovec_cleanup(sPAPROptionVector *ov)
+{
+ if (ov) {
+ g_free(ov->bitmap);
+ g_free(ov);
+ }
+}
+
+void spapr_ovec_set(sPAPROptionVector *ov, long bitnr)
+{
+ g_assert(ov);
+ g_assert_cmpint(bitnr, <, OV_MAXBITS);
+
+ set_bit(bitnr, ov->bitmap);
+}
+
+void spapr_ovec_clear(sPAPROptionVector *ov, long bitnr)
+{
+ g_assert(ov);
+ g_assert_cmpint(bitnr, <, OV_MAXBITS);
+
+ clear_bit(bitnr, ov->bitmap);
+}
+
+bool spapr_ovec_test(sPAPROptionVector *ov, long bitnr)
+{
+ g_assert(ov);
+ g_assert_cmpint(bitnr, <, OV_MAXBITS);
+
+ return test_bit(bitnr, ov->bitmap) ? true : false;
+}
+
+static void guest_byte_to_bitmap(uint8_t entry, unsigned long *bitmap,
+ long bitmap_offset)
+{
+ int i;
+
+ for (i = 0; i < BITS_PER_BYTE; i++) {
+ if (entry & (1 << (BITS_PER_BYTE - 1 - i))) {
+ bitmap_set(bitmap, bitmap_offset + i, 1);
+ }
+ }
+}
+
+static uint8_t guest_byte_from_bitmap(unsigned long *bitmap, long bitmap_offset)
+{
+ uint8_t entry = 0;
+ int i;
+
+ for (i = 0; i < BITS_PER_BYTE; i++) {
+ if (test_bit(bitmap_offset + i, bitmap)) {
+ entry |= (1 << (BITS_PER_BYTE - 1 - i));
+ }
+ }
+
+ return entry;
+}
+
+static target_ulong vector_addr(target_ulong table_addr, int vector)
+{
+ uint16_t vector_count, vector_len;
+ int i;
+
+ vector_count = ldub_phys(&address_space_memory, table_addr) + 1;
+ if (vector > vector_count) {
+ return 0;
+ }
+ table_addr++; /* skip nr option vectors */
+
+ for (i = 0; i < vector - 1; i++) {
+ vector_len = ldub_phys(&address_space_memory, table_addr) + 1;
+ table_addr += vector_len + 1; /* bit-vector + length byte */
+ }
+ return table_addr;
+}
+
+sPAPROptionVector *spapr_ovec_parse_vector(target_ulong table_addr, int vector)
+{
+ sPAPROptionVector *ov;
+ target_ulong addr;
+ uint16_t vector_len;
+ int i;
+
+ g_assert(table_addr);
+ g_assert_cmpint(vector, >=, 1); /* vector numbering starts at 1 */
+
+ addr = vector_addr(table_addr, vector);
+ if (!addr) {
+ /* specified vector isn't present */
+ return NULL;
+ }
+
+ vector_len = ldub_phys(&address_space_memory, addr++) + 1;
+ g_assert_cmpint(vector_len, <=, OV_MAXBYTES);
+ ov = spapr_ovec_new();
+
+ for (i = 0; i < vector_len; i++) {
+ uint8_t entry = ldub_phys(&address_space_memory, addr + i);
+ if (entry) {
+ DPRINTFN("read guest vector %2d, byte %3d / %3d: 0x%.2x",
+ vector, i + 1, vector_len, entry);
+ guest_byte_to_bitmap(entry, ov->bitmap, i * BITS_PER_BYTE);
+ }
+ }
+
+ return ov;
+}
+
+int spapr_ovec_populate_dt(void *fdt, int fdt_offset,
+ sPAPROptionVector *ov, const char *name)
+{
+ uint8_t vec[OV_MAXBYTES + 1];
+ uint16_t vec_len;
+ unsigned long lastbit;
+ int i;
+
+ g_assert(ov);
+
+ lastbit = find_last_bit(ov->bitmap, OV_MAXBITS);
+ /* if no bits are set, include at least 1 byte of the vector so we can
+ * still encoded this in the device tree while abiding by the same
+ * encoding/sizing expected in ibm,client-architecture-support
+ */
+ vec_len = (lastbit == OV_MAXBITS) ? 1 : lastbit / BITS_PER_BYTE + 1;
+ g_assert_cmpint(vec_len, <=, OV_MAXBYTES);
+ /* guest expects vector len encoded as vec_len - 1, since the length byte
+ * is assumed and not included, and the first byte of the vector
+ * is assumed as well
+ */
+ vec[0] = vec_len - 1;
+
+ for (i = 1; i < vec_len + 1; i++) {
+ vec[i] = guest_byte_from_bitmap(ov->bitmap, (i - 1) * BITS_PER_BYTE);
+ if (vec[i]) {
+ DPRINTFN("encoding guest vector byte %3d / %3d: 0x%.2x",
+ i, vec_len, vec[i]);
+ }
+ }
+
+ return fdt_setprop(fdt, fdt_offset, name, vec, vec_len);
+}
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 949c44fec8..fd6fc1d953 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -47,6 +47,7 @@
#include "sysemu/device_tree.h"
#include "sysemu/kvm.h"
#include "sysemu/hostmem.h"
+#include "sysemu/numa.h"
#include "hw/vfio/vfio.h"
@@ -362,7 +363,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
}
/* Allocate MSIs */
- irq = xics_spapr_alloc_block(spapr->xics, 0, req_num, false,
+ irq = xics_spapr_alloc_block(spapr->xics, req_num, false,
ret_intr_type == RTAS_TYPE_MSI, &err);
if (err) {
error_reportf_err(err, "Can't allocate MSIs for device %x: ",
@@ -1310,32 +1311,27 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
sphb->ddw_enabled ? SPAPR_PCI_DMA_MAX_WINDOWS : 1;
if (sphb->index != (uint32_t)-1) {
- hwaddr windows_base;
+ sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
+ Error *local_err = NULL;
if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn[0] != (uint32_t)-1)
|| (sphb->dma_liobn[1] != (uint32_t)-1 && windows_supported == 2)
|| (sphb->mem_win_addr != (hwaddr)-1)
+ || (sphb->mem64_win_addr != (hwaddr)-1)
|| (sphb->io_win_addr != (hwaddr)-1)) {
error_setg(errp, "Either \"index\" or other parameters must"
" be specified for PAPR PHB, not both");
return;
}
- if (sphb->index > SPAPR_PCI_MAX_INDEX) {
- error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
- SPAPR_PCI_MAX_INDEX);
+ smc->phb_placement(spapr, sphb->index,
+ &sphb->buid, &sphb->io_win_addr,
+ &sphb->mem_win_addr, &sphb->mem64_win_addr,
+ windows_supported, sphb->dma_liobn, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
return;
}
-
- sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
- for (i = 0; i < windows_supported; ++i) {
- sphb->dma_liobn[i] = SPAPR_PCI_LIOBN(sphb->index, i);
- }
-
- windows_base = SPAPR_PCI_WINDOW_BASE
- + sphb->index * SPAPR_PCI_WINDOW_SPACING;
- sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
- sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
}
if (sphb->buid == (uint64_t)-1) {
@@ -1359,11 +1355,49 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
return;
}
+ if (sphb->mem64_win_size != 0) {
+ if (sphb->mem64_win_addr == (hwaddr)-1) {
+ error_setg(errp,
+ "64-bit memory window address not specified for PHB");
+ return;
+ }
+
+ if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
+ error_setg(errp, "32-bit memory window of size 0x%"HWADDR_PRIx
+ " (max 2 GiB)", sphb->mem_win_size);
+ return;
+ }
+
+ if (sphb->mem64_win_pciaddr == (hwaddr)-1) {
+ /* 64-bit window defaults to identity mapping */
+ sphb->mem64_win_pciaddr = sphb->mem64_win_addr;
+ }
+ } else if (sphb->mem_win_size > SPAPR_PCI_MEM32_WIN_SIZE) {
+ /*
+ * For compatibility with old configuration, if no 64-bit MMIO
+ * window is specified, but the ordinary (32-bit) memory
+ * window is specified as > 2GiB, we treat it as a 2GiB 32-bit
+ * window, with a 64-bit MMIO window following on immediately
+ * afterwards
+ */
+ sphb->mem64_win_size = sphb->mem_win_size - SPAPR_PCI_MEM32_WIN_SIZE;
+ sphb->mem64_win_addr = sphb->mem_win_addr + SPAPR_PCI_MEM32_WIN_SIZE;
+ sphb->mem64_win_pciaddr =
+ SPAPR_PCI_MEM_WIN_BUS_OFFSET + SPAPR_PCI_MEM32_WIN_SIZE;
+ sphb->mem_win_size = SPAPR_PCI_MEM32_WIN_SIZE;
+ }
+
if (spapr_pci_find_phb(spapr, sphb->buid)) {
error_setg(errp, "PCI host bridges must have unique BUIDs");
return;
}
+ if (sphb->numa_node != -1 &&
+ (sphb->numa_node >= MAX_NODES || !numa_info[sphb->numa_node].present)) {
+ error_setg(errp, "Invalid NUMA node ID for PCI host bridge");
+ return;
+ }
+
sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
namebuf = alloca(strlen(sphb->dtbusname) + 32);
@@ -1372,12 +1406,19 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
sprintf(namebuf, "%s.mmio", sphb->dtbusname);
memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
- sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname);
- memory_region_init_alias(&sphb->memwindow, OBJECT(sphb),
+ sprintf(namebuf, "%s.mmio32-alias", sphb->dtbusname);
+ memory_region_init_alias(&sphb->mem32window, OBJECT(sphb),
namebuf, &sphb->memspace,
SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
- &sphb->memwindow);
+ &sphb->mem32window);
+
+ sprintf(namebuf, "%s.mmio64-alias", sphb->dtbusname);
+ memory_region_init_alias(&sphb->mem64window, OBJECT(sphb),
+ namebuf, &sphb->memspace,
+ sphb->mem64_win_pciaddr, sphb->mem64_win_size);
+ memory_region_add_subregion(get_system_memory(), sphb->mem64_win_addr,
+ &sphb->mem64window);
/* Initialize IO regions */
sprintf(namebuf, "%s.io", sphb->dtbusname);
@@ -1444,8 +1485,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
uint32_t irq;
Error *local_err = NULL;
- irq = xics_spapr_alloc_block(spapr->xics, 0, 1, true, false,
- &local_err);
+ irq = xics_spapr_alloc_block(spapr->xics, 1, true, false, &local_err);
if (local_err) {
error_propagate(errp, local_err);
error_prepend(errp, "can't allocate LSIs: ");
@@ -1530,7 +1570,12 @@ static Property spapr_phb_properties[] = {
DEFINE_PROP_UINT32("liobn64", sPAPRPHBState, dma_liobn[1], -1),
DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
- SPAPR_PCI_MMIO_WIN_SIZE),
+ SPAPR_PCI_MEM32_WIN_SIZE),
+ DEFINE_PROP_UINT64("mem64_win_addr", sPAPRPHBState, mem64_win_addr, -1),
+ DEFINE_PROP_UINT64("mem64_win_size", sPAPRPHBState, mem64_win_size,
+ SPAPR_PCI_MEM64_WIN_SIZE),
+ DEFINE_PROP_UINT64("mem64_win_pciaddr", sPAPRPHBState, mem64_win_pciaddr,
+ -1),
DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
SPAPR_PCI_IO_WIN_SIZE),
@@ -1544,6 +1589,9 @@ static Property spapr_phb_properties[] = {
DEFINE_PROP_BOOL("ddw", sPAPRPHBState, ddw_enabled, true),
DEFINE_PROP_UINT64("pgsz", sPAPRPHBState, page_size_mask,
(1ULL << 12) | (1ULL << 16)),
+ DEFINE_PROP_UINT32("numa_node", sPAPRPHBState, numa_node, -1),
+ DEFINE_PROP_BOOL("pre-2.8-migration", sPAPRPHBState,
+ pre_2_8_migration, false),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1590,6 +1638,20 @@ static void spapr_pci_pre_save(void *opaque)
sphb->msi_devs[i].key = *(uint32_t *) key;
sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
}
+
+ if (sphb->pre_2_8_migration) {
+ sphb->mig_liobn = sphb->dma_liobn[0];
+ sphb->mig_mem_win_addr = sphb->mem_win_addr;
+ sphb->mig_mem_win_size = sphb->mem_win_size;
+ sphb->mig_io_win_addr = sphb->io_win_addr;
+ sphb->mig_io_win_size = sphb->io_win_size;
+
+ if ((sphb->mem64_win_size != 0)
+ && (sphb->mem64_win_addr
+ == (sphb->mem_win_addr + sphb->mem_win_size))) {
+ sphb->mig_mem_win_size += sphb->mem64_win_size;
+ }
+ }
}
static int spapr_pci_post_load(void *opaque, int version_id)
@@ -1612,6 +1674,13 @@ static int spapr_pci_post_load(void *opaque, int version_id)
return 0;
}
+static bool pre_2_8_migration(void *opaque, int version_id)
+{
+ sPAPRPHBState *sphb = opaque;
+
+ return sphb->pre_2_8_migration;
+}
+
static const VMStateDescription vmstate_spapr_pci = {
.name = "spapr_pci",
.version_id = 2,
@@ -1620,11 +1689,11 @@ static const VMStateDescription vmstate_spapr_pci = {
.post_load = spapr_pci_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
- VMSTATE_UINT32_EQUAL(dma_liobn[0], sPAPRPHBState),
- VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
- VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
- VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
- VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
+ VMSTATE_UINT32_TEST(mig_liobn, sPAPRPHBState, pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_mem_win_addr, sPAPRPHBState, pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_mem_win_size, sPAPRPHBState, pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_io_win_addr, sPAPRPHBState, pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_io_win_size, sPAPRPHBState, pre_2_8_migration),
VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
VMSTATE_INT32(msi_devs_num, sPAPRPHBState),
@@ -1765,10 +1834,6 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
int bus_off, i, j, ret;
char nodename[FDT_NAME_MAX];
uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
- const uint64_t mmiosize = memory_region_size(&phb->memwindow);
- const uint64_t w32max = (1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET;
- const uint64_t w32size = MIN(w32max, mmiosize);
- const uint64_t w64size = (mmiosize > w32size) ? (mmiosize - w32size) : 0;
struct {
uint32_t hi;
uint64_t child;
@@ -1783,15 +1848,16 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
{
cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
cpu_to_be64(phb->mem_win_addr),
- cpu_to_be64(w32size),
+ cpu_to_be64(phb->mem_win_size),
},
{
- cpu_to_be32(b_ss(3)), cpu_to_be64(1ULL << 32),
- cpu_to_be64(phb->mem_win_addr + w32size),
- cpu_to_be64(w64size)
+ cpu_to_be32(b_ss(3)), cpu_to_be64(phb->mem64_win_pciaddr),
+ cpu_to_be64(phb->mem64_win_addr),
+ cpu_to_be64(phb->mem64_win_size),
},
};
- const unsigned sizeof_ranges = (w64size ? 3 : 2) * sizeof(ranges[0]);
+ const unsigned sizeof_ranges =
+ (phb->mem64_win_size ? 3 : 2) * sizeof(ranges[0]);
uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
uint32_t interrupt_map_mask[] = {
cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
@@ -1805,6 +1871,11 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
cpu_to_be32(1),
cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW)
};
+ uint32_t associativity[] = {cpu_to_be32(0x4),
+ cpu_to_be32(0x0),
+ cpu_to_be32(0x0),
+ cpu_to_be32(0x0),
+ cpu_to_be32(phb->numa_node)};
sPAPRTCETable *tcet;
PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
sPAPRFDT s_fdt;
@@ -1837,6 +1908,12 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
&ddw_extensions, sizeof(ddw_extensions)));
}
+ /* Advertise NUMA via ibm,associativity */
+ if (phb->numa_node != -1) {
+ _FDT(fdt_setprop(fdt, bus_off, "ibm,associativity", associativity,
+ sizeof(associativity)));
+ }
+
/* Build the interrupt-map, this must matches what is done
* in pci_spapr_map_irq
*/
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index dc058e512b..bb19944686 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -27,6 +27,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/log.h"
+#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
#include "sysemu/char.h"
#include "hw/qdev.h"
@@ -36,6 +37,7 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
+#include "hw/ppc/spapr_rtas.h"
#include "hw/ppc/ppc.h"
#include "qapi-event.h"
#include "hw/boards.h"
@@ -43,16 +45,8 @@
#include <libfdt.h>
#include "hw/ppc/spapr_drc.h"
#include "qemu/cutils.h"
-
-/* #define DEBUG_SPAPR */
-
-#ifdef DEBUG_SPAPR
-#define DPRINTF(fmt, ...) \
- do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#endif
+#include "trace.h"
+#include "hw/ppc/fdt.h"
static sPAPRConfigureConnectorState *spapr_ccs_find(sPAPRMachineState *spapr,
uint32_t drc_index)
@@ -302,7 +296,8 @@ static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu,
break;
}
case RTAS_SYSPARM_UUID:
- ret = sysparm_st(buffer, length, qemu_uuid, (qemu_uuid_set ? 16 : 0));
+ ret = sysparm_st(buffer, length, (unsigned char *)&qemu_uuid,
+ (qemu_uuid_set ? 16 : 0));
break;
default:
ret = RTAS_OUT_NOT_SUPPORTED;
@@ -434,8 +429,7 @@ static void rtas_set_indicator(PowerPCCPU *cpu, sPAPRMachineState *spapr,
/* if this is a DR sensor we can assume sensor_index == drc_index */
drc = spapr_dr_connector_by_index(sensor_index);
if (!drc) {
- DPRINTF("rtas_set_indicator: invalid sensor/DRC index: %xh\n",
- sensor_index);
+ trace_spapr_rtas_set_indicator_invalid(sensor_index);
ret = RTAS_OUT_PARAM_ERROR;
goto out;
}
@@ -474,8 +468,7 @@ out:
out_unimplemented:
/* currently only DR-related sensors are implemented */
- DPRINTF("rtas_set_indicator: sensor/indicator not implemented: %d\n",
- sensor_type);
+ trace_spapr_rtas_set_indicator_not_supported(sensor_index, sensor_type);
rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
}
@@ -501,16 +494,15 @@ static void rtas_get_sensor_state(PowerPCCPU *cpu, sPAPRMachineState *spapr,
if (sensor_type != RTAS_SENSOR_TYPE_ENTITY_SENSE) {
/* currently only DR-related sensors are implemented */
- DPRINTF("rtas_get_sensor_state: sensor/indicator not implemented: %d\n",
- sensor_type);
+ trace_spapr_rtas_get_sensor_state_not_supported(sensor_index,
+ sensor_type);
ret = RTAS_OUT_NOT_SUPPORTED;
goto out;
}
drc = spapr_dr_connector_by_index(sensor_index);
if (!drc) {
- DPRINTF("rtas_get_sensor_state: invalid sensor/DRC index: %xh\n",
- sensor_index);
+ trace_spapr_rtas_get_sensor_state_invalid(sensor_index);
ret = RTAS_OUT_PARAM_ERROR;
goto out;
}
@@ -567,8 +559,7 @@ static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
drc_index = rtas_ld(wa_addr, 0);
drc = spapr_dr_connector_by_index(drc_index);
if (!drc) {
- DPRINTF("rtas_ibm_configure_connector: invalid DRC index: %xh\n",
- drc_index);
+ trace_spapr_rtas_ibm_configure_connector_invalid(drc_index);
rc = RTAS_OUT_PARAM_ERROR;
goto out;
}
@@ -576,8 +567,7 @@ static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
fdt = drck->get_fdt(drc, NULL);
if (!fdt) {
- DPRINTF("rtas_ibm_configure_connector: Missing FDT for DRC index: %xh\n",
- drc_index);
+ trace_spapr_rtas_ibm_configure_connector_missing_fdt(drc_index);
rc = SPAPR_DR_CC_RESPONSE_NOT_CONFIGURABLE;
goto out;
}
@@ -691,6 +681,24 @@ target_ulong spapr_rtas_call(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return H_PARAMETER;
}
+uint64_t qtest_rtas_call(char *cmd, uint32_t nargs, uint64_t args,
+ uint32_t nret, uint64_t rets)
+{
+ int token;
+
+ for (token = 0; token < RTAS_TOKEN_MAX - RTAS_TOKEN_BASE; token++) {
+ if (strcmp(cmd, rtas_table[token].name) == 0) {
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
+
+ rtas_table[token].fn(cpu, spapr, token + RTAS_TOKEN_BASE,
+ nargs, args, nret, rets);
+ return H_SUCCESS;
+ }
+ }
+ return H_PARAMETER;
+}
+
void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn)
{
assert((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX));
@@ -703,47 +711,9 @@ void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn)
rtas_table[token].fn = fn;
}
-int spapr_rtas_device_tree_setup(void *fdt, hwaddr rtas_addr,
- hwaddr rtas_size)
+void spapr_dt_rtas_tokens(void *fdt, int rtas)
{
- int ret;
int i;
- uint32_t lrdr_capacity[5];
- MachineState *machine = MACHINE(qdev_get_machine());
- sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
- uint64_t max_hotplug_addr = spapr->hotplug_memory.base +
- memory_region_size(&spapr->hotplug_memory.mr);
-
- ret = fdt_add_mem_rsv(fdt, rtas_addr, rtas_size);
- if (ret < 0) {
- fprintf(stderr, "Couldn't add RTAS reserve entry: %s\n",
- fdt_strerror(ret));
- return ret;
- }
-
- ret = qemu_fdt_setprop_cell(fdt, "/rtas", "linux,rtas-base",
- rtas_addr);
- if (ret < 0) {
- fprintf(stderr, "Couldn't add linux,rtas-base property: %s\n",
- fdt_strerror(ret));
- return ret;
- }
-
- ret = qemu_fdt_setprop_cell(fdt, "/rtas", "linux,rtas-entry",
- rtas_addr);
- if (ret < 0) {
- fprintf(stderr, "Couldn't add linux,rtas-entry property: %s\n",
- fdt_strerror(ret));
- return ret;
- }
-
- ret = qemu_fdt_setprop_cell(fdt, "/rtas", "rtas-size",
- rtas_size);
- if (ret < 0) {
- fprintf(stderr, "Couldn't add rtas-size property: %s\n",
- fdt_strerror(ret));
- return ret;
- }
for (i = 0; i < RTAS_TOKEN_MAX - RTAS_TOKEN_BASE; i++) {
struct rtas_call *call = &rtas_table[i];
@@ -752,29 +722,49 @@ int spapr_rtas_device_tree_setup(void *fdt, hwaddr rtas_addr,
continue;
}
- ret = qemu_fdt_setprop_cell(fdt, "/rtas", call->name,
- i + RTAS_TOKEN_BASE);
- if (ret < 0) {
- fprintf(stderr, "Couldn't add rtas token for %s: %s\n",
- call->name, fdt_strerror(ret));
- return ret;
- }
+ _FDT(fdt_setprop_cell(fdt, rtas, call->name, i + RTAS_TOKEN_BASE));
+ }
+}
+void spapr_load_rtas(sPAPRMachineState *spapr, void *fdt, hwaddr addr)
+{
+ int rtas_node;
+ int ret;
+
+ /* Copy RTAS blob into guest RAM */
+ cpu_physical_memory_write(addr, spapr->rtas_blob, spapr->rtas_size);
+
+ ret = fdt_add_mem_rsv(fdt, addr, spapr->rtas_size);
+ if (ret < 0) {
+ error_report("Couldn't add RTAS reserve entry: %s",
+ fdt_strerror(ret));
+ exit(1);
}
- lrdr_capacity[0] = cpu_to_be32(max_hotplug_addr >> 32);
- lrdr_capacity[1] = cpu_to_be32(max_hotplug_addr & 0xffffffff);
- lrdr_capacity[2] = 0;
- lrdr_capacity[3] = cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE);
- lrdr_capacity[4] = cpu_to_be32(max_cpus/smp_threads);
- ret = qemu_fdt_setprop(fdt, "/rtas", "ibm,lrdr-capacity", lrdr_capacity,
- sizeof(lrdr_capacity));
+ /* Update the device tree with the blob's location */
+ rtas_node = fdt_path_offset(fdt, "/rtas");
+ assert(rtas_node >= 0);
+
+ ret = fdt_setprop_cell(fdt, rtas_node, "linux,rtas-base", addr);
if (ret < 0) {
- fprintf(stderr, "Couldn't add ibm,lrdr-capacity rtas property\n");
- return ret;
+ error_report("Couldn't add linux,rtas-base property: %s",
+ fdt_strerror(ret));
+ exit(1);
}
- return 0;
+ ret = fdt_setprop_cell(fdt, rtas_node, "linux,rtas-entry", addr);
+ if (ret < 0) {
+ error_report("Couldn't add linux,rtas-entry property: %s",
+ fdt_strerror(ret));
+ exit(1);
+ }
+
+ ret = fdt_setprop_cell(fdt, rtas_node, "rtas-size", spapr->rtas_size);
+ if (ret < 0) {
+ error_report("Couldn't add rtas-size property: %s",
+ fdt_strerror(ret));
+ exit(1);
+ }
}
static void core_rtas_register_types(void)
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index f93244d7c1..cc1e09c568 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -20,6 +20,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "hw/hw.h"
#include "qemu/log.h"
@@ -35,19 +36,11 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "hw/ppc/xics.h"
+#include "hw/ppc/fdt.h"
+#include "trace.h"
#include <libfdt.h>
-/* #define DEBUG_SPAPR */
-
-#ifdef DEBUG_SPAPR
-#define DPRINTF(fmt, ...) \
- do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#endif
-
static Property spapr_vio_props[] = {
DEFINE_PROP_UINT32("irq", VIOsPAPRDevice, irq, 0), \
DEFINE_PROP_END_OF_LIST(),
@@ -201,9 +194,7 @@ static target_ulong h_reg_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr,
dev->crq.qsize = queue_len;
dev->crq.qnext = 0;
- DPRINTF("CRQ for dev 0x" TARGET_FMT_lx " registered at 0x"
- TARGET_FMT_lx "/0x" TARGET_FMT_lx "\n",
- reg, queue_addr, queue_len);
+ trace_spapr_vio_h_reg_crq(reg, queue_addr, queue_len);
return H_SUCCESS;
}
@@ -213,7 +204,7 @@ static target_ulong free_crq(VIOsPAPRDevice *dev)
dev->crq.qsize = 0;
dev->crq.qnext = 0;
- DPRINTF("CRQ for dev 0x%" PRIx32 " freed\n", dev->reg);
+ trace_spapr_vio_free_crq(dev->reg);
return H_SUCCESS;
}
@@ -276,7 +267,7 @@ int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq)
uint8_t byte;
if (!dev->crq.qsize) {
- fprintf(stderr, "spapr_vio_send_creq on uninitialized queue\n");
+ error_report("spapr_vio_send_creq on uninitialized queue");
return -1;
}
@@ -463,7 +454,7 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp)
dev->qdev.id = id;
}
- dev->irq = xics_spapr_alloc(spapr->xics, 0, dev->irq, false, &local_err);
+ dev->irq = xics_spapr_alloc(spapr->xics, dev->irq, false, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@@ -634,11 +625,21 @@ static int compare_reg(const void *p1, const void *p2)
return 1;
}
-int spapr_populate_vdevice(VIOsPAPRBus *bus, void *fdt)
+void spapr_dt_vdevice(VIOsPAPRBus *bus, void *fdt)
{
DeviceState *qdev, **qdevs;
BusChild *kid;
int i, num, ret = 0;
+ int node;
+
+ _FDT(node = fdt_add_subnode(fdt, 0, "vdevice"));
+
+ _FDT(fdt_setprop_string(fdt, node, "device_type", "vdevice"));
+ _FDT(fdt_setprop_string(fdt, node, "compatible", "IBM,vdevice"));
+ _FDT(fdt_setprop_cell(fdt, node, "#address-cells", 1));
+ _FDT(fdt_setprop_cell(fdt, node, "#size-cells", 0));
+ _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
+ _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
/* Count qdevs on the bus list */
num = 0;
@@ -660,43 +661,32 @@ int spapr_populate_vdevice(VIOsPAPRBus *bus, void *fdt)
* to know that will mean they are in forward order in the tree. */
for (i = num - 1; i >= 0; i--) {
VIOsPAPRDevice *dev = (VIOsPAPRDevice *)(qdevs[i]);
+ VIOsPAPRDeviceClass *vdc = VIO_SPAPR_DEVICE_GET_CLASS(dev);
ret = vio_make_devnode(dev, fdt);
-
if (ret < 0) {
- goto out;
+ error_report("Couldn't create device node /vdevice/%s@%"PRIx32,
+ vdc->dt_name, dev->reg);
+ exit(1);
}
}
- ret = 0;
-out:
g_free(qdevs);
-
- return ret;
}
-int spapr_populate_chosen_stdout(void *fdt, VIOsPAPRBus *bus)
+gchar *spapr_vio_stdout_path(VIOsPAPRBus *bus)
{
VIOsPAPRDevice *dev;
char *name, *path;
- int ret, offset;
dev = spapr_vty_get_default(bus);
- if (!dev)
- return 0;
-
- offset = fdt_path_offset(fdt, "/chosen");
- if (offset < 0) {
- return offset;
+ if (!dev) {
+ return NULL;
}
name = spapr_vio_get_dev_name(DEVICE(dev));
path = g_strdup_printf("/vdevice/%s", name);
- ret = fdt_setprop_string(fdt, offset, "linux,stdout-path", path);
-
g_free(name);
- g_free(path);
-
- return ret;
+ return path;
}
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index dfeab93089..2297ead11e 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -35,6 +35,39 @@ spapr_iommu_ddw_create(uint64_t buid, uint32_t cfgaddr, uint64_t pg_size, uint64
spapr_iommu_ddw_remove(uint32_t liobn) "liobn=%"PRIx32
spapr_iommu_ddw_reset(uint64_t buid, uint32_t cfgaddr) "buid=%"PRIx64" addr=%"PRIx32
+# hw/ppc/spapr_drc.c
+spapr_drc_set_isolation_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: %"PRIx32
+spapr_drc_set_isolation_state_finalizing(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_set_isolation_state_deferring(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_set_indicator_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x"
+spapr_drc_set_allocation_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x"
+spapr_drc_set_allocation_state_finalizing(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_set_configured(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_set_configured_skipping(uint32_t index) "drc: 0x%"PRIx32", isolated device"
+spapr_drc_entity_sense(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x"
+spapr_drc_attach(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_detach(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_awaiting_isolated(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_awaiting_unusable(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_awaiting_allocation(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_reset(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_realize(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_realize_child(uint32_t index, char *childname) "drc: 0x%"PRIx32", child name: %s"
+spapr_drc_realize_complete(uint32_t index) "drc: 0x%"PRIx32
+spapr_drc_unrealize(uint32_t index) "drc: 0x%"PRIx32
+
+# hw/ppc/spapr_rtas.c
+spapr_rtas_set_indicator_invalid(uint32_t index) "sensor index: 0x%"PRIx32
+spapr_rtas_set_indicator_not_supported(uint32_t index, uint32_t type) "sensor index: 0x%"PRIx32", type: %"PRIu32
+spapr_rtas_get_sensor_state_not_supported(uint32_t index, uint32_t type) "sensor index: 0x%"PRIx32", type: %"PRIu32
+spapr_rtas_get_sensor_state_invalid(uint32_t index) "sensor index: 0x%"PRIx32
+spapr_rtas_ibm_configure_connector_invalid(uint32_t index) "DRC index: 0x%"PRIx32
+spapr_rtas_ibm_configure_connector_missing_fdt(uint32_t index) "DRC index: 0x%"PRIx32
+
+# hw/ppc/spapr_vio.c
+spapr_vio_h_reg_crq(uint64_t reg, uint64_t queue_addr, uint64_t queue_len) "CRQ for dev 0x%" PRIx64 " registered at 0x%" PRIx64 "/0x%" PRIx64
+spapr_vio_free_crq(uint32_t reg) "CRQ for dev 0x%" PRIx32 " freed"
+
# hw/ppc/ppc.c
ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)"
diff --git a/hw/s390x/css.c b/hw/s390x/css.c
index bb8e4be339..0f2580d644 100644
--- a/hw/s390x/css.c
+++ b/hw/s390x/css.c
@@ -141,7 +141,8 @@ out_err:
int css_create_css_image(uint8_t cssid, bool default_image)
{
trace_css_new_image(cssid, default_image ? "(default)" : "");
- if (cssid > MAX_CSSID) {
+ /* 255 is reserved */
+ if (cssid == 255) {
return -EINVAL;
}
if (channel_subsys.css[cssid]) {
@@ -774,7 +775,7 @@ int css_do_xsch(SubchDev *sch)
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
@@ -814,7 +815,7 @@ int css_do_csch(SubchDev *sch)
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
@@ -836,7 +837,7 @@ int css_do_hsch(SubchDev *sch)
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
@@ -912,7 +913,7 @@ int css_do_ssch(SubchDev *sch, ORB *orb)
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
@@ -989,7 +990,7 @@ int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
uint16_t stctl;
IRB irb;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
return 3;
}
@@ -1195,7 +1196,7 @@ int css_do_rsch(SubchDev *sch)
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
@@ -1267,7 +1268,7 @@ bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
uint8_t real_cssid;
real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
- if (real_cssid > MAX_CSSID || ssid > MAX_SSID ||
+ if (ssid > MAX_SSID ||
!channel_subsys.css[real_cssid] ||
!channel_subsys.css[real_cssid]->sch_set[ssid]) {
return true;
@@ -1282,9 +1283,6 @@ static int css_add_virtual_chpid(uint8_t cssid, uint8_t chpid, uint8_t type)
CssImage *css;
trace_css_chpid_add(cssid, chpid, type);
- if (cssid > MAX_CSSID) {
- return -EINVAL;
- }
css = channel_subsys.css[cssid];
if (!css) {
return -EINVAL;
diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
index 9c1c04e590..63f6248f1d 100644
--- a/hw/s390x/s390-pci-bus.c
+++ b/hw/s390x/s390-pci-bus.c
@@ -383,7 +383,6 @@ static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr,
uint64_t pte;
uint32_t flags;
S390PCIBusDevice *pbdev = container_of(iommu, S390PCIBusDevice, iommu_mr);
- S390pciState *s;
IOMMUTLBEntry ret = {
.target_as = &address_space_memory,
.iova = 0,
@@ -405,19 +404,6 @@ static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr,
DPRINTF("iommu trans addr 0x%" PRIx64 "\n", addr);
- s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pbdev->pdev)->qbus.parent);
- /* s390 does not have an APIC mapped to main storage so we use
- * a separate AddressSpace only for msix notifications
- */
- if (addr == ZPCI_MSI_ADDR) {
- ret.target_as = &s->msix_notify_as;
- ret.iova = addr;
- ret.translated_addr = addr;
- ret.addr_mask = 0xfff;
- ret.perm = IOMMU_RW;
- return ret;
- }
-
if (addr < pbdev->pba || addr > pbdev->pal) {
return ret;
}
@@ -476,8 +462,7 @@ static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set)
static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data,
unsigned int size)
{
- S390PCIBusDevice *pbdev;
- uint32_t io_int_word;
+ S390PCIBusDevice *pbdev = opaque;
uint32_t idx = data >> ZPCI_MSI_VEC_BITS;
uint32_t vec = data & ZPCI_MSI_VEC_MASK;
uint64_t ind_bit;
@@ -486,7 +471,6 @@ static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data,
DPRINTF("write_msix data 0x%" PRIx64 " idx %d vec 0x%x\n", data, idx, vec);
- pbdev = s390_pci_find_dev_by_idx(idx);
if (!pbdev) {
e |= (vec << ERR_EVENT_MVN_OFFSET);
s390_pci_generate_error_event(ERR_EVENT_NOMSI, idx, 0, addr, e);
@@ -504,8 +488,7 @@ static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data,
0x80 >> ((ind_bit + vec) % 8));
if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8,
0x80 >> (sum_bit % 8))) {
- io_int_word = (pbdev->isc << 27) | IO_INT_WORD_AI;
- s390_io_interrupt(0, 0, 0, io_int_word);
+ css_adapter_interrupt(pbdev->isc);
}
}
@@ -548,10 +531,6 @@ static void s390_pcihost_init_as(S390pciState *s)
s->iommu[i] = iommu;
}
-
- memory_region_init_io(&s->msix_notify_mr, OBJECT(s),
- &s390_msi_ctrl_ops, s, "msix-s390", UINT64_MAX);
- address_space_init(&s->msix_notify_as, &s->msix_notify_mr, "msix-pci");
}
static int s390_pcihost_init(SysBusDevice *dev)
@@ -581,7 +560,7 @@ static int s390_pcihost_init(SysBusDevice *dev)
return 0;
}
-static int s390_pcihost_setup_msix(S390PCIBusDevice *pbdev)
+static int s390_pci_setup_msix(S390PCIBusDevice *pbdev)
{
uint8_t pos;
uint16_t ctrl;
@@ -609,6 +588,26 @@ static int s390_pcihost_setup_msix(S390PCIBusDevice *pbdev)
return 0;
}
+static void s390_pci_msix_init(S390PCIBusDevice *pbdev)
+{
+ char *name;
+
+ name = g_strdup_printf("msix-s390-%04x", pbdev->uid);
+
+ memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev),
+ &s390_msi_ctrl_ops, pbdev, name, PAGE_SIZE);
+ memory_region_add_subregion(&pbdev->iommu->mr, ZPCI_MSI_ADDR,
+ &pbdev->msix_notify_mr);
+
+ g_free(name);
+}
+
+static void s390_pci_msix_free(S390PCIBusDevice *pbdev)
+{
+ memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr);
+ object_unparent(OBJECT(&pbdev->msix_notify_mr));
+}
+
static S390PCIBusDevice *s390_pci_device_new(const char *target)
{
DeviceState *dev = NULL;
@@ -649,6 +648,7 @@ static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev,
pbdev = s390_pci_device_new(dev->id);
if (!pbdev) {
error_setg(errp, "create zpci device failed");
+ return;
}
}
@@ -661,7 +661,9 @@ static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev,
pbdev->pdev = pdev;
pbdev->iommu = s->iommu[PCI_SLOT(pdev->devfn)];
pbdev->state = ZPCI_FS_STANDBY;
- s390_pcihost_setup_msix(pbdev);
+
+ s390_pci_msix_init(pbdev);
+ s390_pci_setup_msix(pbdev);
if (dev->hotplugged) {
s390_pci_generate_plug_event(HP_EVENT_RESERVED_TO_STANDBY,
@@ -717,11 +719,7 @@ static void s390_pcihost_hot_unplug(HotplugHandler *hotplug_dev,
break;
}
}
-
- if (!pbdev) {
- object_unparent(OBJECT(pci_dev));
- return;
- }
+ assert(pbdev != NULL);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
pbdev = S390_PCI_DEVICE(dev);
pci_dev = pbdev->pdev;
@@ -752,6 +750,7 @@ static void s390_pcihost_hot_unplug(HotplugHandler *hotplug_dev,
s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED,
pbdev->fh, pbdev->fid);
object_unparent(OBJECT(pci_dev));
+ s390_pci_msix_free(pbdev);
pbdev->pdev = NULL;
pbdev->state = ZPCI_FS_RESERVED;
out:
@@ -808,17 +807,11 @@ static uint32_t s390_pci_generate_fid(Error **errp)
{
uint32_t fid = 0;
- while (fid <= ZPCI_MAX_FID) {
+ do {
if (!s390_pci_find_dev_by_fid(fid)) {
return fid;
}
-
- if (fid == ZPCI_MAX_FID) {
- break;
- }
-
- fid++;
- }
+ } while (fid++ != ZPCI_MAX_FID);
error_setg(errp, "no free fid could be found");
return 0;
diff --git a/hw/s390x/s390-pci-bus.h b/hw/s390x/s390-pci-bus.h
index 4f564e02f2..7f2701301e 100644
--- a/hw/s390x/s390-pci-bus.h
+++ b/hw/s390x/s390-pci-bus.h
@@ -82,6 +82,7 @@
#define ZPCI_EDMA_ADDR 0x1ffffffffffffffULL
#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_DEFAULT_ACC 0
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
@@ -283,6 +284,7 @@ typedef struct S390PCIBusDevice {
AdapterRoutes routes;
S390PCIIOMMU *iommu;
MemoryRegion iommu_mr;
+ MemoryRegion msix_notify_mr;
IndAddr *summary_ind;
IndAddr *indicator;
QEMUTimer *release_timer;
@@ -297,8 +299,6 @@ typedef struct S390pciState {
S390PCIBus *bus;
S390PCIBusDevice *pbdev[PCI_SLOT_MAX];
S390PCIIOMMU *iommu[PCI_SLOT_MAX];
- AddressSpace msix_notify_as;
- MemoryRegion msix_notify_mr;
QTAILQ_HEAD(, SeiContainer) pending_sei;
} S390pciState;
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
index f069b110b4..0864d9be12 100644
--- a/hw/s390x/s390-pci-inst.c
+++ b/hw/s390x/s390-pci-inst.c
@@ -315,6 +315,8 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
S390PCIBusDevice *pbdev;
uint64_t offset;
uint64_t data;
+ MemoryRegion *mr;
+ MemTxResult result;
uint8_t len;
uint32_t fh;
uint8_t pcias;
@@ -363,9 +365,13 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
- MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
- memory_region_dispatch_read(mr, offset, &data, len,
- MEMTXATTRS_UNSPECIFIED);
+ mr = pbdev->pdev->io_regions[pcias].memory;
+ result = memory_region_dispatch_read(mr, offset, &data, len,
+ MEMTXATTRS_UNSPECIFIED);
+ if (result != MEMTX_OK) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
} else if (pcias == 15) {
if ((4 - (offset & 0x3)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
@@ -442,6 +448,8 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
CPUS390XState *env = &cpu->env;
uint64_t offset, data;
S390PCIBusDevice *pbdev;
+ MemoryRegion *mr;
+ MemTxResult result;
uint8_t len;
uint32_t fh;
uint8_t pcias;
@@ -491,7 +499,7 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
- MemoryRegion *mr;
+
if (trap_msix(pbdev, offset, pcias)) {
offset = offset - pbdev->msix.table_offset;
mr = &pbdev->pdev->msix_table_mmio;
@@ -500,8 +508,12 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
mr = pbdev->pdev->io_regions[pcias].memory;
}
- memory_region_dispatch_write(mr, offset, data, len,
+ result = memory_region_dispatch_write(mr, offset, data, len,
MEMTXATTRS_UNSPECIFIED);
+ if (result != MEMTX_OK) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
} else if (pcias == 15) {
if ((4 - (offset & 0x3)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
@@ -631,6 +643,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
MemoryRegion *mr;
+ MemTxResult result;
int i;
uint32_t fh;
uint8_t pcias;
@@ -688,7 +701,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
mr = pbdev->pdev->io_regions[pcias].memory;
if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
- program_interrupt(env, PGM_ADDRESSING, 6);
+ program_interrupt(env, PGM_OPERAND, 6);
return 0;
}
@@ -697,9 +710,13 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
}
for (i = 0; i < len / 8; i++) {
- memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
+ result = memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
ldq_p(buffer + i * 8), 8,
MEMTXATTRS_UNSPECIFIED);
+ if (result != MEMTX_OK) {
+ program_interrupt(env, PGM_OPERAND, 6);
+ return 0;
+ }
}
setcc(cpu, ZPCI_PCI_LS_OK);
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 91d9cefbb5..e340eab36b 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -193,6 +193,7 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
S390CcwMachineClass *s390mc = S390_MACHINE_CLASS(mc);
s390mc->ri_allowed = true;
+ s390mc->cpu_model_allowed = true;
mc->init = ccw_init;
mc->reset = s390_machine_reset;
mc->hot_add_cpu = s390_hot_add_cpu;
@@ -249,10 +250,28 @@ bool ri_allowed(void)
return s390mc->ri_allowed;
}
+ /*
+ * Make sure the "none" machine can have ri, otherwise it won't * be
+ * unlocked in KVM and therefore the host CPU model might be wrong.
+ */
+ return true;
}
return 0;
}
+bool cpu_model_allowed(void)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+ if (object_class_dynamic_cast(OBJECT_CLASS(mc),
+ TYPE_S390_CCW_MACHINE)) {
+ S390CcwMachineClass *s390mc = S390_MACHINE_CLASS(mc);
+
+ return s390mc->cpu_model_allowed;
+ }
+ /* allow CPU model qmp queries with the "none" machine */
+ return true;
+}
+
static inline void s390_machine_initfn(Object *obj)
{
object_property_add_bool(obj, "aes-key-wrap",
@@ -316,7 +335,11 @@ static const TypeInfo ccw_machine_info = {
} \
type_init(ccw_machine_register_##suffix)
+#define CCW_COMPAT_2_7 \
+ HW_COMPAT_2_7
+
#define CCW_COMPAT_2_6 \
+ CCW_COMPAT_2_7 \
HW_COMPAT_2_6 \
{\
.driver = TYPE_S390_IPL,\
@@ -372,14 +395,29 @@ static const TypeInfo ccw_machine_info = {
.value = "0",\
},
+static void ccw_machine_2_8_instance_options(MachineState *machine)
+{
+}
+
+static void ccw_machine_2_8_class_options(MachineClass *mc)
+{
+}
+DEFINE_CCW_MACHINE(2_8, "2.8", true);
+
static void ccw_machine_2_7_instance_options(MachineState *machine)
{
+ ccw_machine_2_8_instance_options(machine);
}
static void ccw_machine_2_7_class_options(MachineClass *mc)
{
+ S390CcwMachineClass *s390mc = S390_MACHINE_CLASS(mc);
+
+ s390mc->cpu_model_allowed = false;
+ ccw_machine_2_8_class_options(mc);
+ SET_MACHINE_COMPAT(mc, CCW_COMPAT_2_7);
}
-DEFINE_CCW_MACHINE(2_7, "2.7", true);
+DEFINE_CCW_MACHINE(2_7, "2.7", false);
static void ccw_machine_2_6_instance_options(MachineState *machine)
{
diff --git a/hw/s390x/s390-virtio.c b/hw/s390x/s390-virtio.c
index 544c61643d..0a963473ad 100644
--- a/hw/s390x/s390-virtio.c
+++ b/hw/s390x/s390-virtio.c
@@ -101,7 +101,11 @@ void s390_init_cpus(MachineState *machine)
gchar *name;
if (machine->cpu_model == NULL) {
- machine->cpu_model = "host";
+ if (kvm_enabled()) {
+ machine->cpu_model = "host";
+ } else {
+ machine->cpu_model = "qemu";
+ }
}
cpu_states = g_new0(S390CPU *, max_cpus);
diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c
index fca37f511e..e741da1141 100644
--- a/hw/s390x/sclp.c
+++ b/hw/s390x/sclp.c
@@ -26,7 +26,25 @@
static inline SCLPDevice *get_sclp_device(void)
{
- return SCLP(object_resolve_path_type("", TYPE_SCLP, NULL));
+ static SCLPDevice *sclp;
+
+ if (!sclp) {
+ sclp = SCLP(object_resolve_path_type("", TYPE_SCLP, NULL));
+ }
+ return sclp;
+}
+
+static void prepare_cpu_entries(SCLPDevice *sclp, CPUEntry *entry, int count)
+{
+ uint8_t features[SCCB_CPU_FEATURE_LEN] = { 0 };
+ int i;
+
+ s390_get_feat_block(S390_FEAT_TYPE_SCLP_CPU, features);
+ for (i = 0; i < count; i++) {
+ entry[i].address = i;
+ entry[i].type = 0;
+ memcpy(entry[i].features, features, sizeof(entry[i].features));
+ }
}
/* Provide information about the configuration, CPUs and storage */
@@ -37,7 +55,6 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
CPUState *cpu;
int cpu_count = 0;
- int i = 0;
int rnsize, rnmax;
int slots = MIN(machine->ram_slots, s390_get_memslot_count(kvm_state));
@@ -50,10 +67,15 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries));
read_info->highest_cpu = cpu_to_be16(max_cpus);
- for (i = 0; i < cpu_count; i++) {
- read_info->entries[i].address = i;
- read_info->entries[i].type = 0;
- }
+ read_info->ibc_val = cpu_to_be32(s390_get_ibc_val());
+
+ /* Configuration Characteristic (Extension) */
+ s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR,
+ read_info->conf_char);
+ s390_get_feat_block(S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT,
+ read_info->conf_char_ext);
+
+ prepare_cpu_entries(sclp, read_info->entries, cpu_count);
read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO |
SCLP_HAS_PCI_RECONFIG);
@@ -88,6 +110,8 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR);
}
+ read_info->mha_pow = s390_get_mha_pow();
+ read_info->hmfai = cpu_to_be32(s390_get_hmfai());
rnsize = 1 << (sclp->increment_size - 20);
if (rnsize <= 128) {
@@ -304,7 +328,6 @@ static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb)
ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb;
CPUState *cpu;
int cpu_count = 0;
- int i = 0;
CPU_FOREACH(cpu) {
cpu_count++;
@@ -318,10 +341,7 @@ static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb)
cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
+ cpu_info->nr_configured*sizeof(CPUEntry));
- for (i = 0; i < cpu_count; i++) {
- cpu_info->entries[i].address = i;
- cpu_info->entries[i].type = 0;
- }
+ prepare_cpu_entries(sclp, cpu_info->entries, cpu_count);
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
}
@@ -406,7 +426,7 @@ int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code)
goto out;
}
- sclp_c->execute(sclp, (SCCB *)&work_sccb, code);
+ sclp_c->execute(sclp, &work_sccb, code);
cpu_physical_memory_write(sccb, &work_sccb,
be16_to_cpu(work_sccb.h.length));
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index a554a24d06..f5c1d98192 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -59,38 +59,11 @@ static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
virtio_bus_stop_ioeventfd(&dev->bus);
}
-static bool virtio_ccw_ioeventfd_started(DeviceState *d)
+static bool virtio_ccw_ioeventfd_enabled(DeviceState *d)
{
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
- return dev->ioeventfd_started;
-}
-
-static void virtio_ccw_ioeventfd_set_started(DeviceState *d, bool started,
- bool err)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- dev->ioeventfd_started = started;
- if (err) {
- /* Disable ioeventfd for this device. */
- dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
- }
-}
-
-static bool virtio_ccw_ioeventfd_disabled(DeviceState *d)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- return dev->ioeventfd_disabled ||
- !(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD);
-}
-
-static void virtio_ccw_ioeventfd_set_disabled(DeviceState *d, bool disabled)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- dev->ioeventfd_disabled = disabled;
+ return (dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) != 0;
}
static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
@@ -330,6 +303,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (!ccw.cda) {
ret = -EFAULT;
} else {
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+
features.index = address_space_ldub(&address_space_memory,
ccw.cda
+ sizeof(features.features),
@@ -339,7 +314,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
if (dev->revision >= 1) {
/* Don't offer legacy features for modern devices. */
features.features = (uint32_t)
- (vdev->host_features & ~VIRTIO_LEGACY_FEATURES);
+ (vdev->host_features & ~vdc->legacy_features);
} else {
features.features = (uint32_t)vdev->host_features;
}
@@ -455,6 +430,26 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
}
}
break;
+ case CCW_CMD_READ_STATUS:
+ if (check_len) {
+ if (ccw.count != sizeof(status)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(status)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ address_space_stb(&address_space_memory, ccw.cda, vdev->status,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);;
+ ret = 0;
+ }
+ break;
case CCW_CMD_WRITE_STATUS:
if (check_len) {
if (ccw.count != sizeof(status)) {
@@ -689,6 +684,10 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
sch->cssid, sch->ssid, sch->schid, sch->devno,
ccw_dev->bus_id.valid ? "user-configured" : "auto-configured");
+ if (!kvm_eventfds_enabled()) {
+ dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
+ }
+
if (k->realize) {
k->realize(dev, &err);
}
@@ -1261,6 +1260,16 @@ static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
return 0;
}
+static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+
+ if (dev->max_rev >= 1) {
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
+ }
+}
+
/* This is called by virtio-bus just after the device is plugged. */
static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
{
@@ -1270,6 +1279,10 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
SubchDev *sch = ccw_dev->sch;
int n = virtio_get_num_queues(vdev);
+ if (!virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
+ dev->max_rev = 0;
+ }
+
if (virtio_get_num_queues(vdev) > VIRTIO_CCW_QUEUE_MAX) {
error_setg(errp, "The number of virtqueues %d "
"exceeds ccw limit %d", n,
@@ -1277,31 +1290,13 @@ static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
return;
}
- if (!kvm_eventfds_enabled()) {
- dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
- }
-
sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
- if (dev->max_rev >= 1) {
- virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
- }
css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
d->hotplugged, 1);
}
-static void virtio_ccw_post_plugged(DeviceState *d, Error **errp)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
- VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
-
- if (!virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1)) {
- /* A backend didn't support modern virtio. */
- dev->max_rev = 0;
- }
-}
-
static void virtio_ccw_device_unplugged(DeviceState *d)
{
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
@@ -1593,13 +1588,10 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
k->load_queue = virtio_ccw_load_queue;
k->save_config = virtio_ccw_save_config;
k->load_config = virtio_ccw_load_config;
+ k->pre_plugged = virtio_ccw_pre_plugged;
k->device_plugged = virtio_ccw_device_plugged;
- k->post_plugged = virtio_ccw_post_plugged;
k->device_unplugged = virtio_ccw_device_unplugged;
- k->ioeventfd_started = virtio_ccw_ioeventfd_started;
- k->ioeventfd_set_started = virtio_ccw_ioeventfd_set_started;
- k->ioeventfd_disabled = virtio_ccw_ioeventfd_disabled;
- k->ioeventfd_set_disabled = virtio_ccw_ioeventfd_set_disabled;
+ k->ioeventfd_enabled = virtio_ccw_ioeventfd_enabled;
k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
}
@@ -1658,6 +1650,57 @@ static const TypeInfo virtio_ccw_9p_info = {
};
#endif
+#ifdef CONFIG_VHOST_VSOCK
+
+static Property vhost_vsock_ccw_properties[] = {
+ DEFINE_PROP_CSS_DEV_ID("devno", VirtioCcwDevice, parent_obj.bus_id),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vhost_vsock_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VHostVSockCCWState *dev = VHOST_VSOCK_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ }
+}
+
+static void vhost_vsock_ccw_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = vhost_vsock_ccw_realize;
+ k->exit = virtio_ccw_exit;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->props = vhost_vsock_ccw_properties;
+ dc->reset = virtio_ccw_reset;
+}
+
+static void vhost_vsock_ccw_instance_init(Object *obj)
+{
+ VHostVSockCCWState *dev = VHOST_VSOCK_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_VSOCK);
+}
+
+static const TypeInfo vhost_vsock_ccw_info = {
+ .name = TYPE_VHOST_VSOCK_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VHostVSockCCWState),
+ .instance_init = vhost_vsock_ccw_instance_init,
+ .class_init = vhost_vsock_ccw_class_init,
+};
+#endif
+
static void virtio_ccw_register(void)
{
type_register_static(&virtio_ccw_bus_info);
@@ -1674,6 +1717,9 @@ static void virtio_ccw_register(void)
#ifdef CONFIG_VIRTFS
type_register_static(&virtio_ccw_9p_info);
#endif
+#ifdef CONFIG_VHOST_VSOCK
+ type_register_static(&vhost_vsock_ccw_info);
+#endif
}
type_init(virtio_ccw_register)
diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h
index 1c6bc86316..77d10f1671 100644
--- a/hw/s390x/virtio-ccw.h
+++ b/hw/s390x/virtio-ccw.h
@@ -23,6 +23,9 @@
#include "hw/virtio/virtio-balloon.h"
#include "hw/virtio/virtio-rng.h"
#include "hw/virtio/virtio-bus.h"
+#ifdef CONFIG_VHOST_VSOCK
+#include "hw/virtio/vhost-vsock.h"
+#endif /* CONFIG_VHOST_VSOCK */
#include "hw/s390x/s390_flic.h"
#include "hw/s390x/css.h"
@@ -42,6 +45,7 @@
#define CCW_CMD_SET_IND 0x43
#define CCW_CMD_SET_CONF_IND 0x53
#define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_READ_STATUS 0x72
#define CCW_CMD_SET_IND_ADAPTER 0x73
#define CCW_CMD_SET_VIRTIO_REV 0x83
@@ -82,8 +86,6 @@ struct VirtioCcwDevice {
int revision;
uint32_t max_rev;
VirtioBusState bus;
- bool ioeventfd_started;
- bool ioeventfd_disabled;
uint32_t flags;
uint8_t thinint_isc;
AdapterRoutes routes;
@@ -95,7 +97,7 @@ struct VirtioCcwDevice {
};
/* The maximum virtio revision we support. */
-#define VIRTIO_CCW_MAX_REV 1
+#define VIRTIO_CCW_MAX_REV 2
static inline int virtio_ccw_rev_max(VirtioCcwDevice *dev)
{
return dev->max_rev;
@@ -180,7 +182,6 @@ typedef struct VirtIORNGCcw {
VirtIORNG vdev;
} VirtIORNGCcw;
-void virtio_ccw_device_update_status(SubchDev *sch);
VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch);
#ifdef CONFIG_VIRTFS
@@ -197,4 +198,16 @@ typedef struct V9fsCCWState {
#endif /* CONFIG_VIRTFS */
+#ifdef CONFIG_VHOST_VSOCK
+#define TYPE_VHOST_VSOCK_CCW "vhost-vsock-ccw"
+#define VHOST_VSOCK_CCW(obj) \
+ OBJECT_CHECK(VHostVSockCCWState, (obj), TYPE_VHOST_VSOCK_CCW)
+
+typedef struct VHostVSockCCWState {
+ VirtioCcwDevice parent_obj;
+ VHostVSock vdev;
+} VHostVSockCCWState;
+
+#endif /* CONFIG_VHOST_VSOCK */
+
#endif
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index 1f2f2d33dd..5a5a4e946a 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -406,11 +406,9 @@ uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
/* Data out. */
qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
s->rregs[ESP_FIFO] = 0;
- esp_raise_irq(s);
} else if (s->ti_rptr < s->ti_wptr) {
s->ti_size--;
s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
- esp_raise_irq(s);
}
if (s->ti_rptr == s->ti_wptr) {
s->ti_rptr = 0;
diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
index df205cdafe..feb1191315 100644
--- a/hw/scsi/lsi53c895a.c
+++ b/hw/scsi/lsi53c895a.c
@@ -19,6 +19,7 @@
#include "hw/pci/pci.h"
#include "hw/scsi/scsi.h"
#include "sysemu/dma.h"
+#include "qemu/log.h"
//#define DEBUG_LSI
//#define DEBUG_LSI_REG
@@ -34,6 +35,21 @@ do { fprintf(stderr, "lsi_scsi: error: " fmt , ## __VA_ARGS__); exit(1);} while
do { fprintf(stderr, "lsi_scsi: error: " fmt , ## __VA_ARGS__);} while (0)
#endif
+static const char *names[] = {
+ "SCNTL0", "SCNTL1", "SCNTL2", "SCNTL3", "SCID", "SXFER", "SDID", "GPREG",
+ "SFBR", "SOCL", "SSID", "SBCL", "DSTAT", "SSTAT0", "SSTAT1", "SSTAT2",
+ "DSA0", "DSA1", "DSA2", "DSA3", "ISTAT", "0x15", "0x16", "0x17",
+ "CTEST0", "CTEST1", "CTEST2", "CTEST3", "TEMP0", "TEMP1", "TEMP2", "TEMP3",
+ "DFIFO", "CTEST4", "CTEST5", "CTEST6", "DBC0", "DBC1", "DBC2", "DCMD",
+ "DNAD0", "DNAD1", "DNAD2", "DNAD3", "DSP0", "DSP1", "DSP2", "DSP3",
+ "DSPS0", "DSPS1", "DSPS2", "DSPS3", "SCRATCHA0", "SCRATCHA1", "SCRATCHA2", "SCRATCHA3",
+ "DMODE", "DIEN", "SBR", "DCNTL", "ADDER0", "ADDER1", "ADDER2", "ADDER3",
+ "SIEN0", "SIEN1", "SIST0", "SIST1", "SLPAR", "0x45", "MACNTL", "GPCNTL",
+ "STIME0", "STIME1", "RESPID", "0x4b", "STEST0", "STEST1", "STEST2", "STEST3",
+ "SIDL", "0x51", "0x52", "0x53", "SODL", "0x55", "0x56", "0x57",
+ "SBDL", "0x59", "0x5a", "0x5b", "SCRATCHB0", "SCRATCHB1", "SCRATCHB2", "SCRATCHB3",
+};
+
#define LSI_MAX_DEVS 7
#define LSI_SCNTL0_TRG 0x01
@@ -194,6 +210,7 @@ typedef struct {
MemoryRegion mmio_io;
MemoryRegion ram_io;
MemoryRegion io_io;
+ AddressSpace pci_io_as;
int carry; /* ??? Should this be an a visible register somewhere? */
int status;
@@ -309,7 +326,7 @@ static void lsi_soft_reset(LSIState *s)
s->istat0 = 0;
s->istat1 = 0;
s->dcmd = 0x40;
- s->dstat = LSI_DSTAT_DFE;
+ s->dstat = 0;
s->dien = 0;
s->sist0 = 0;
s->sist1 = 0;
@@ -391,6 +408,30 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val);
static void lsi_execute_script(LSIState *s);
static void lsi_reselect(LSIState *s, lsi_request *p);
+static inline int lsi_mem_read(LSIState *s, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+{
+ if (s->dmode & LSI_DMODE_SIOM) {
+ address_space_read(&s->pci_io_as, addr, MEMTXATTRS_UNSPECIFIED,
+ buf, len);
+ return 0;
+ } else {
+ return pci_dma_read(PCI_DEVICE(s), addr, buf, len);
+ }
+}
+
+static inline int lsi_mem_write(LSIState *s, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+{
+ if (s->dmode & LSI_DMODE_DIOM) {
+ address_space_write(&s->pci_io_as, addr, MEMTXATTRS_UNSPECIFIED,
+ buf, len);
+ return 0;
+ } else {
+ return pci_dma_write(PCI_DEVICE(s), addr, buf, len);
+ }
+}
+
static inline uint32_t read_dword(LSIState *s, uint32_t addr)
{
uint32_t buf;
@@ -534,7 +575,6 @@ static void lsi_bad_selection(LSIState *s, uint32_t id)
/* Initiate a SCSI layer data transfer. */
static void lsi_do_dma(LSIState *s, int out)
{
- PCIDevice *pci_dev;
uint32_t count;
dma_addr_t addr;
SCSIDevice *dev;
@@ -546,7 +586,6 @@ static void lsi_do_dma(LSIState *s, int out)
return;
}
- pci_dev = PCI_DEVICE(s);
dev = s->current->req->dev;
assert(dev);
@@ -572,9 +611,9 @@ static void lsi_do_dma(LSIState *s, int out)
}
/* ??? Set SFBR to first data byte. */
if (out) {
- pci_dma_read(pci_dev, addr, s->current->dma_buf, count);
+ lsi_mem_read(s, addr, s->current->dma_buf, count);
} else {
- pci_dma_write(pci_dev, addr, s->current->dma_buf, count);
+ lsi_mem_write(s, addr, s->current->dma_buf, count);
}
s->current->dma_len -= count;
if (s->current->dma_len == 0) {
@@ -1006,15 +1045,14 @@ bad:
#define LSI_BUF_SIZE 4096
static void lsi_memcpy(LSIState *s, uint32_t dest, uint32_t src, int count)
{
- PCIDevice *d = PCI_DEVICE(s);
int n;
uint8_t buf[LSI_BUF_SIZE];
DPRINTF("memcpy dest 0x%08x src 0x%08x count %d\n", dest, src, count);
while (count) {
n = (count > LSI_BUF_SIZE) ? LSI_BUF_SIZE : count;
- pci_dma_read(d, src, buf, n);
- pci_dma_write(d, dest, buf, n);
+ lsi_mem_read(s, src, buf, n);
+ lsi_mem_write(s, dest, buf, n);
src += n;
dest += n;
count -= n;
@@ -1480,155 +1518,200 @@ again:
static uint8_t lsi_reg_readb(LSIState *s, int offset)
{
- uint8_t tmp;
+ uint8_t ret;
+
#define CASE_GET_REG24(name, addr) \
- case addr: return s->name & 0xff; \
- case addr + 1: return (s->name >> 8) & 0xff; \
- case addr + 2: return (s->name >> 16) & 0xff;
+ case addr: ret = s->name & 0xff; break; \
+ case addr + 1: ret = (s->name >> 8) & 0xff; break; \
+ case addr + 2: ret = (s->name >> 16) & 0xff; break;
#define CASE_GET_REG32(name, addr) \
- case addr: return s->name & 0xff; \
- case addr + 1: return (s->name >> 8) & 0xff; \
- case addr + 2: return (s->name >> 16) & 0xff; \
- case addr + 3: return (s->name >> 24) & 0xff;
+ case addr: ret = s->name & 0xff; break; \
+ case addr + 1: ret = (s->name >> 8) & 0xff; break; \
+ case addr + 2: ret = (s->name >> 16) & 0xff; break; \
+ case addr + 3: ret = (s->name >> 24) & 0xff; break;
-#ifdef DEBUG_LSI_REG
- DPRINTF("Read reg %x\n", offset);
-#endif
switch (offset) {
case 0x00: /* SCNTL0 */
- return s->scntl0;
+ ret = s->scntl0;
+ break;
case 0x01: /* SCNTL1 */
- return s->scntl1;
+ ret = s->scntl1;
+ break;
case 0x02: /* SCNTL2 */
- return s->scntl2;
+ ret = s->scntl2;
+ break;
case 0x03: /* SCNTL3 */
- return s->scntl3;
+ ret = s->scntl3;
+ break;
case 0x04: /* SCID */
- return s->scid;
+ ret = s->scid;
+ break;
case 0x05: /* SXFER */
- return s->sxfer;
+ ret = s->sxfer;
+ break;
case 0x06: /* SDID */
- return s->sdid;
+ ret = s->sdid;
+ break;
case 0x07: /* GPREG0 */
- return 0x7f;
+ ret = 0x7f;
+ break;
case 0x08: /* Revision ID */
- return 0x00;
+ ret = 0x00;
+ break;
case 0x09: /* SOCL */
- return s->socl;
+ ret = s->socl;
+ break;
case 0xa: /* SSID */
- return s->ssid;
+ ret = s->ssid;
+ break;
case 0xb: /* SBCL */
/* ??? This is not correct. However it's (hopefully) only
used for diagnostics, so should be ok. */
- return 0;
+ ret = 0;
+ break;
case 0xc: /* DSTAT */
- tmp = s->dstat | LSI_DSTAT_DFE;
+ ret = s->dstat | LSI_DSTAT_DFE;
if ((s->istat0 & LSI_ISTAT0_INTF) == 0)
s->dstat = 0;
lsi_update_irq(s);
- return tmp;
+ break;
case 0x0d: /* SSTAT0 */
- return s->sstat0;
+ ret = s->sstat0;
+ break;
case 0x0e: /* SSTAT1 */
- return s->sstat1;
+ ret = s->sstat1;
+ break;
case 0x0f: /* SSTAT2 */
- return s->scntl1 & LSI_SCNTL1_CON ? 0 : 2;
+ ret = s->scntl1 & LSI_SCNTL1_CON ? 0 : 2;
+ break;
CASE_GET_REG32(dsa, 0x10)
case 0x14: /* ISTAT0 */
- return s->istat0;
+ ret = s->istat0;
+ break;
case 0x15: /* ISTAT1 */
- return s->istat1;
+ ret = s->istat1;
+ break;
case 0x16: /* MBOX0 */
- return s->mbox0;
+ ret = s->mbox0;
+ break;
case 0x17: /* MBOX1 */
- return s->mbox1;
+ ret = s->mbox1;
+ break;
case 0x18: /* CTEST0 */
- return 0xff;
+ ret = 0xff;
+ break;
case 0x19: /* CTEST1 */
- return 0;
+ ret = 0;
+ break;
case 0x1a: /* CTEST2 */
- tmp = s->ctest2 | LSI_CTEST2_DACK | LSI_CTEST2_CM;
+ ret = s->ctest2 | LSI_CTEST2_DACK | LSI_CTEST2_CM;
if (s->istat0 & LSI_ISTAT0_SIGP) {
s->istat0 &= ~LSI_ISTAT0_SIGP;
- tmp |= LSI_CTEST2_SIGP;
+ ret |= LSI_CTEST2_SIGP;
}
- return tmp;
+ break;
case 0x1b: /* CTEST3 */
- return s->ctest3;
+ ret = s->ctest3;
+ break;
CASE_GET_REG32(temp, 0x1c)
case 0x20: /* DFIFO */
- return 0;
+ ret = 0;
+ break;
case 0x21: /* CTEST4 */
- return s->ctest4;
+ ret = s->ctest4;
+ break;
case 0x22: /* CTEST5 */
- return s->ctest5;
+ ret = s->ctest5;
+ break;
case 0x23: /* CTEST6 */
- return 0;
+ ret = 0;
+ break;
CASE_GET_REG24(dbc, 0x24)
case 0x27: /* DCMD */
- return s->dcmd;
+ ret = s->dcmd;
+ break;
CASE_GET_REG32(dnad, 0x28)
CASE_GET_REG32(dsp, 0x2c)
CASE_GET_REG32(dsps, 0x30)
CASE_GET_REG32(scratch[0], 0x34)
case 0x38: /* DMODE */
- return s->dmode;
+ ret = s->dmode;
+ break;
case 0x39: /* DIEN */
- return s->dien;
+ ret = s->dien;
+ break;
case 0x3a: /* SBR */
- return s->sbr;
+ ret = s->sbr;
+ break;
case 0x3b: /* DCNTL */
- return s->dcntl;
+ ret = s->dcntl;
+ break;
/* ADDER Output (Debug of relative jump address) */
CASE_GET_REG32(adder, 0x3c)
case 0x40: /* SIEN0 */
- return s->sien0;
+ ret = s->sien0;
+ break;
case 0x41: /* SIEN1 */
- return s->sien1;
+ ret = s->sien1;
+ break;
case 0x42: /* SIST0 */
- tmp = s->sist0;
+ ret = s->sist0;
s->sist0 = 0;
lsi_update_irq(s);
- return tmp;
+ break;
case 0x43: /* SIST1 */
- tmp = s->sist1;
+ ret = s->sist1;
s->sist1 = 0;
lsi_update_irq(s);
- return tmp;
+ break;
case 0x46: /* MACNTL */
- return 0x0f;
+ ret = 0x0f;
+ break;
case 0x47: /* GPCNTL0 */
- return 0x0f;
+ ret = 0x0f;
+ break;
case 0x48: /* STIME0 */
- return s->stime0;
+ ret = s->stime0;
+ break;
case 0x4a: /* RESPID0 */
- return s->respid0;
+ ret = s->respid0;
+ break;
case 0x4b: /* RESPID1 */
- return s->respid1;
+ ret = s->respid1;
+ break;
case 0x4d: /* STEST1 */
- return s->stest1;
+ ret = s->stest1;
+ break;
case 0x4e: /* STEST2 */
- return s->stest2;
+ ret = s->stest2;
+ break;
case 0x4f: /* STEST3 */
- return s->stest3;
+ ret = s->stest3;
+ break;
case 0x50: /* SIDL */
/* This is needed by the linux drivers. We currently only update it
during the MSG IN phase. */
- return s->sidl;
+ ret = s->sidl;
+ break;
case 0x52: /* STEST4 */
- return 0xe0;
+ ret = 0xe0;
+ break;
case 0x56: /* CCNTL0 */
- return s->ccntl0;
+ ret = s->ccntl0;
+ break;
case 0x57: /* CCNTL1 */
- return s->ccntl1;
+ ret = s->ccntl1;
+ break;
case 0x58: /* SBDL */
/* Some drivers peek at the data bus during the MSG IN phase. */
if ((s->sstat1 & PHASE_MASK) == PHASE_MI)
return s->msg[0];
- return 0;
+ ret = 0;
+ break;
case 0x59: /* SBDL high */
- return 0;
+ ret = 0;
+ break;
CASE_GET_REG32(mmrs, 0xa0)
CASE_GET_REG32(mmws, 0xa4)
CASE_GET_REG32(sfs, 0xa8)
@@ -1643,18 +1726,34 @@ static uint8_t lsi_reg_readb(LSIState *s, int offset)
CASE_GET_REG32(ia, 0xd4)
CASE_GET_REG32(sbc, 0xd8)
CASE_GET_REG32(csbc, 0xdc)
- }
- if (offset >= 0x5c && offset < 0xa0) {
+ case 0x5c ... 0x9f:
+ {
int n;
int shift;
n = (offset - 0x58) >> 2;
shift = (offset & 3) * 8;
- return (s->scratch[n] >> shift) & 0xff;
+ ret = (s->scratch[n] >> shift) & 0xff;
+ break;
+ }
+ default:
+ {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "lsi_scsi: invalid read from reg %s %x\n",
+ offset < ARRAY_SIZE(names) ? names[offset] : "???",
+ offset);
+ ret = 0xff;
+ break;
+ }
}
- BADF("readb 0x%x\n", offset);
- exit(1);
#undef CASE_GET_REG24
#undef CASE_GET_REG32
+
+#ifdef DEBUG_LSI_REG
+ DPRINTF("Read reg %s %x = %02x\n",
+ offset < ARRAY_SIZE(names) ? names[offset] : "???", offset, ret);
+#endif
+
+ return ret;
}
static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
@@ -1671,7 +1770,8 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
case addr + 3: s->name &= 0x00ffffff; s->name |= val << 24; break;
#ifdef DEBUG_LSI_REG
- DPRINTF("Write reg %x = %02x\n", offset, val);
+ DPRINTF("Write reg %s %x = %02x\n",
+ offset < ARRAY_SIZE(names) ? names[offset] : "???", offset, val);
#endif
switch (offset) {
case 0x00: /* SCNTL0 */
@@ -1799,9 +1899,6 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
CASE_SET_REG32(dsps, 0x30)
CASE_SET_REG32(scratch[0], 0x34)
case 0x38: /* DMODE */
- if (val & (LSI_DMODE_SIOM | LSI_DMODE_DIOM)) {
- BADF("IO mappings not implemented\n");
- }
s->dmode = val;
break;
case 0x39: /* DIEN */
@@ -1886,7 +1983,10 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
shift = (offset & 3) * 8;
s->scratch[n] = deposit32(s->scratch[n], shift, 8, val);
} else {
- BADF("Unhandled writeb 0x%x = 0x%x\n", offset, val);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "lsi_scsi: invalid write to reg %s %x (0x%02x)\n",
+ offset < ARRAY_SIZE(names) ? names[offset] : "???",
+ offset, val);
}
}
#undef CASE_SET_REG24
@@ -2108,6 +2208,8 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
memory_region_init_io(&s->io_io, OBJECT(s), &lsi_io_ops, s,
"lsi-io", 256);
+ address_space_init(&s->pci_io_as, pci_address_space_io(dev), "lsi-pci-io");
+
pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_io);
pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio_io);
pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ram_io);
@@ -2119,6 +2221,13 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
}
}
+static void lsi_scsi_unrealize(DeviceState *dev, Error **errp)
+{
+ LSIState *s = LSI53C895A(dev);
+
+ address_space_destroy(&s->pci_io_as);
+}
+
static void lsi_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -2129,6 +2238,7 @@ static void lsi_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_LSI_53C895A;
k->class_id = PCI_CLASS_STORAGE_SCSI;
k->subsystem_id = 0x1000;
+ dc->unrealize = lsi_scsi_unrealize;
dc->reset = lsi_scsi_reset;
dc->vmsd = &vmstate_lsi_scsi;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index e968302fdc..67fc1e7893 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -300,12 +300,6 @@ unmap:
return iov_count - i;
}
-static void megasas_unmap_sgl(MegasasCmd *cmd)
-{
- qemu_sglist_destroy(&cmd->qsg);
- cmd->iov_offset = 0;
-}
-
/*
* passthrough sense and io sense are at the same offset
*/
@@ -461,9 +455,12 @@ static void megasas_unmap_frame(MegasasState *s, MegasasCmd *cmd)
{
PCIDevice *p = PCI_DEVICE(s);
- pci_dma_unmap(p, cmd->frame, cmd->pa_size, 0, 0);
+ if (cmd->pa_size) {
+ pci_dma_unmap(p, cmd->frame, cmd->pa_size, 0, 0);
+ }
cmd->frame = NULL;
cmd->pa = 0;
+ cmd->pa_size = 0;
clear_bit(cmd->index, s->frame_map);
}
@@ -577,6 +574,20 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context)
}
}
+static void megasas_complete_command(MegasasCmd *cmd)
+{
+ qemu_sglist_destroy(&cmd->qsg);
+ cmd->iov_size = 0;
+ cmd->iov_offset = 0;
+
+ cmd->req->hba_private = NULL;
+ scsi_req_unref(cmd->req);
+ cmd->req = NULL;
+
+ megasas_unmap_frame(cmd->state, cmd);
+ megasas_complete_frame(cmd->state, cmd->context);
+}
+
static void megasas_reset_frames(MegasasState *s)
{
int i;
@@ -593,9 +604,9 @@ static void megasas_reset_frames(MegasasState *s)
static void megasas_abort_command(MegasasCmd *cmd)
{
- if (cmd->req) {
+ /* Never abort internal commands. */
+ if (cmd->req != NULL) {
scsi_req_cancel(cmd->req);
- cmd->req = NULL;
}
}
@@ -686,9 +697,6 @@ static void megasas_finish_dcmd(MegasasCmd *cmd, uint32_t iov_size)
{
trace_megasas_finish_dcmd(cmd->index, iov_size);
- if (cmd->frame->header.sge_count) {
- qemu_sglist_destroy(&cmd->qsg);
- }
if (iov_size > cmd->iov_size) {
if (megasas_frame_is_ieee_sgl(cmd)) {
cmd->frame->dcmd.sgl.sg_skinny->len = cpu_to_le32(iov_size);
@@ -698,7 +706,6 @@ static void megasas_finish_dcmd(MegasasCmd *cmd, uint32_t iov_size)
cmd->frame->dcmd.sgl.sg32->len = cpu_to_le32(iov_size);
}
}
- cmd->iov_size = 0;
}
static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd)
@@ -1586,7 +1593,6 @@ static int megasas_finish_internal_dcmd(MegasasCmd *cmd,
int lun = req->lun;
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
- scsi_req_unref(req);
trace_megasas_dcmd_internal_finish(cmd->index, opcode, lun);
switch (opcode) {
case MFI_DCMD_PD_GET_INFO:
@@ -1857,7 +1863,11 @@ static void megasas_command_complete(SCSIRequest *req, uint32_t status,
trace_megasas_command_complete(cmd->index, status, resid);
- if (cmd->req != req) {
+ if (req->io_canceled) {
+ return;
+ }
+
+ if (cmd->req == NULL) {
/*
* Internal command complete
*/
@@ -1876,25 +1886,21 @@ static void megasas_command_complete(SCSIRequest *req, uint32_t status,
megasas_copy_sense(cmd);
}
- megasas_unmap_sgl(cmd);
cmd->frame->header.scsi_status = req->status;
- scsi_req_unref(cmd->req);
- cmd->req = NULL;
}
cmd->frame->header.cmd_status = cmd_status;
- megasas_unmap_frame(cmd->state, cmd);
- megasas_complete_frame(cmd->state, cmd->context);
+ megasas_complete_command(cmd);
}
-static void megasas_command_cancel(SCSIRequest *req)
+static void megasas_command_cancelled(SCSIRequest *req)
{
MegasasCmd *cmd = req->hba_private;
- if (cmd) {
- megasas_abort_command(cmd);
- } else {
- scsi_req_unref(req);
+ if (!cmd) {
+ return;
}
+ cmd->frame->header.cmd_status = MFI_STAT_SCSI_IO_FAILED;
+ megasas_complete_command(cmd);
}
static int megasas_handle_abort(MegasasState *s, MegasasCmd *cmd)
@@ -1981,7 +1987,11 @@ static void megasas_handle_frame(MegasasState *s, uint64_t frame_addr,
break;
}
if (frame_status != MFI_STAT_INVALID_STATUS) {
- cmd->frame->header.cmd_status = frame_status;
+ if (cmd->frame) {
+ cmd->frame->header.cmd_status = frame_status;
+ } else {
+ megasas_frame_set_cmd_status(s, frame_addr, frame_status);
+ }
megasas_unmap_frame(s, cmd);
megasas_complete_frame(s, cmd->context);
}
@@ -2309,7 +2319,7 @@ static const struct SCSIBusInfo megasas_scsi_info = {
.transfer_data = megasas_xfer_complete,
.get_sg_list = megasas_get_sg_list,
.complete = megasas_command_complete,
- .cancel = megasas_command_cancel,
+ .cancel = megasas_command_cancelled,
};
static void megasas_scsi_realize(PCIDevice *dev, Error **errp)
diff --git a/hw/scsi/mptconfig.c b/hw/scsi/mptconfig.c
index 707185469e..87a416a5cb 100644
--- a/hw/scsi/mptconfig.c
+++ b/hw/scsi/mptconfig.c
@@ -158,7 +158,7 @@ static size_t mptsas_config_pack(uint8_t **data, const char *fmt, ...)
va_end(ap);
if (data) {
- assert(ret < 256 && (ret % 4) == 0);
+ assert(ret / 4 < 256 && (ret % 4) == 0);
stb_p(*data + 1, ret / 4);
}
return ret;
@@ -203,7 +203,7 @@ size_t mptsas_config_manufacturing_1(MPTSASState *s, uint8_t **data, int address
{
/* VPD - all zeros */
return MPTSAS_CONFIG_PACK(1, MPI_CONFIG_PAGETYPE_MANUFACTURING, 0x00,
- "s256");
+ "*s256");
}
static
@@ -328,7 +328,7 @@ size_t mptsas_config_ioc_0(MPTSASState *s, uint8_t **data, int address)
return MPTSAS_CONFIG_PACK(0, MPI_CONFIG_PAGETYPE_IOC, 0x01,
"*l*lwwb*b*b*blww",
pcic->vendor_id, pcic->device_id, pcic->revision,
- pcic->subsystem_vendor_id,
+ pcic->class_id, pcic->subsystem_vendor_id,
pcic->subsystem_id);
}
diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
index 0e0a22f696..ad87e78fe2 100644
--- a/hw/scsi/mptsas.c
+++ b/hw/scsi/mptsas.c
@@ -304,7 +304,7 @@ static int mptsas_process_scsi_io_request(MPTSASState *s,
goto bad;
}
- req = g_new(MPTSASRequest, 1);
+ req = g_new0(MPTSASRequest, 1);
QTAILQ_INSERT_TAIL(&s->pending, req, next);
req->scsi_io = *scsi_io;
req->dev = s;
@@ -1269,7 +1269,7 @@ static const struct SCSIBusInfo mptsas_scsi_info = {
.load_request = mptsas_load_request,
};
-static void mptsas_scsi_init(PCIDevice *dev, Error **errp)
+static void mptsas_scsi_realize(PCIDevice *dev, Error **errp)
{
DeviceState *d = DEVICE(dev);
MPTSASState *s = MPT_SAS(dev);
@@ -1426,7 +1426,7 @@ static void mptsas1068_class_init(ObjectClass *oc, void *data)
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
- pc->realize = mptsas_scsi_init;
+ pc->realize = mptsas_scsi_realize;
pc->exit = mptsas_scsi_uninit;
pc->romfile = 0;
pc->vendor_id = PCI_VENDOR_ID_LSI_LOGIC;
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index 836a1553ed..a96319138a 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -341,6 +341,7 @@ static void scsi_do_read(SCSIDiskReq *r, int ret)
r->req.resid -= r->req.sg->size;
r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
r->req.sg, r->sector << BDRV_SECTOR_BITS,
+ BDRV_SECTOR_SIZE,
sdc->dma_readv, r, scsi_dma_complete, r,
DMA_DIRECTION_FROM_DEVICE);
} else {
@@ -396,7 +397,7 @@ static void scsi_read_data(SCSIRequest *req)
return;
}
- if (s->tray_open) {
+ if (!blk_is_available(req->dev->conf.blk)) {
scsi_read_complete(r, -ENOMEDIUM);
return;
}
@@ -519,7 +520,7 @@ static void scsi_write_data(SCSIRequest *req)
scsi_write_complete_noio(r, 0);
return;
}
- if (s->tray_open) {
+ if (!blk_is_available(req->dev->conf.blk)) {
scsi_write_complete_noio(r, -ENOMEDIUM);
return;
}
@@ -539,6 +540,7 @@ static void scsi_write_data(SCSIRequest *req)
r->req.resid -= r->req.sg->size;
r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
r->req.sg, r->sector << BDRV_SECTOR_BITS,
+ BDRV_SECTOR_SIZE,
sdc->dma_writev, r, scsi_dma_complete, r,
DMA_DIRECTION_TO_DEVICE);
} else {
@@ -599,8 +601,8 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
}
l = strlen(s->serial);
- if (l > 20) {
- l = 20;
+ if (l > 36) {
+ l = 36;
}
DPRINTF("Inquiry EVPD[Serial number] "
@@ -792,10 +794,7 @@ static inline bool media_is_dvd(SCSIDiskState *s)
if (s->qdev.type != TYPE_ROM) {
return false;
}
- if (!blk_is_inserted(s->qdev.conf.blk)) {
- return false;
- }
- if (s->tray_open) {
+ if (!blk_is_available(s->qdev.conf.blk)) {
return false;
}
blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
@@ -808,10 +807,7 @@ static inline bool media_is_cd(SCSIDiskState *s)
if (s->qdev.type != TYPE_ROM) {
return false;
}
- if (!blk_is_inserted(s->qdev.conf.blk)) {
- return false;
- }
- if (s->tray_open) {
+ if (!blk_is_available(s->qdev.conf.blk)) {
return false;
}
blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
@@ -875,7 +871,7 @@ static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
}
if (format != 0xff) {
- if (s->tray_open || !blk_is_inserted(s->qdev.conf.blk)) {
+ if (!blk_is_available(s->qdev.conf.blk)) {
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
return -1;
}
@@ -1857,7 +1853,7 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
break;
default:
- if (s->tray_open || !blk_is_inserted(s->qdev.conf.blk)) {
+ if (!blk_is_available(s->qdev.conf.blk)) {
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
return 0;
}
@@ -1886,7 +1882,7 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
memset(outbuf, 0, r->buflen);
switch (req->cmd.buf[0]) {
case TEST_UNIT_READY:
- assert(!s->tray_open && blk_is_inserted(s->qdev.conf.blk));
+ assert(blk_is_available(s->qdev.conf.blk));
break;
case INQUIRY:
buflen = scsi_disk_emulate_inquiry(req, outbuf);
@@ -2126,7 +2122,7 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
command = buf[0];
- if (s->tray_open || !blk_is_inserted(s->qdev.conf.blk)) {
+ if (!blk_is_available(s->qdev.conf.blk)) {
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
return 0;
}
@@ -2359,6 +2355,11 @@ static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
+
+ if (!dev->conf.blk) {
+ dev->conf.blk = blk_new();
+ }
+
s->qdev.blocksize = 2048;
s->qdev.type = TYPE_ROM;
s->features |= 1 << SCSI_DISK_F_REMOVABLE;
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
index 8fbd50f660..6090a204a0 100644
--- a/hw/scsi/spapr_vscsi.c
+++ b/hw/scsi/spapr_vscsi.c
@@ -42,19 +42,10 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "viosrp.h"
+#include "trace.h"
#include <libfdt.h>
-/*#define DEBUG_VSCSI*/
-
-#ifdef DEBUG_VSCSI
-#define DPRINTF(fmt, ...) \
- do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#endif
-
/*
* Virtual SCSI device
*/
@@ -237,8 +228,7 @@ static int vscsi_send_rsp(VSCSIState *s, vscsi_req *req,
int total_len = sizeof(iu->srp.rsp);
uint8_t sol_not = iu->srp.cmd.sol_not;
- DPRINTF("VSCSI: Sending resp status: 0x%x, "
- "res_in: %d, res_out: %d\n", status, res_in, res_out);
+ trace_spapr_vscsi_send_rsp(status, res_in, res_out);
memset(iu, 0, sizeof(struct srp_rsp));
iu->srp.rsp.opcode = SRP_RSP;
@@ -298,13 +288,13 @@ static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req,
switch (req->dma_fmt) {
case SRP_NO_DATA_DESC: {
- DPRINTF("VSCSI: no data descriptor\n");
+ trace_spapr_vscsi_fetch_desc_no_data();
return 0;
}
case SRP_DATA_DESC_DIRECT: {
memcpy(ret, cmd->add_data + req->cdb_offset, sizeof(*ret));
assert(req->cur_desc_num == 0);
- DPRINTF("VSCSI: direct segment\n");
+ trace_spapr_vscsi_fetch_desc_direct();
break;
}
case SRP_DATA_DESC_INDIRECT: {
@@ -312,30 +302,29 @@ static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req,
(cmd->add_data + req->cdb_offset);
if (n < req->local_desc) {
*ret = tmp->desc_list[n];
- DPRINTF("VSCSI: indirect segment local tag=0x%x desc#%d/%d\n",
- req->qtag, n, req->local_desc);
-
+ trace_spapr_vscsi_fetch_desc_indirect(req->qtag, n,
+ req->local_desc);
} else if (n < req->total_desc) {
int rc;
struct srp_direct_buf tbl_desc = vscsi_swap_desc(tmp->table_desc);
unsigned desc_offset = n * sizeof(struct srp_direct_buf);
if (desc_offset >= tbl_desc.len) {
- DPRINTF("VSCSI: #%d is ouf of range (%d bytes)\n",
- n, desc_offset);
+ trace_spapr_vscsi_fetch_desc_out_of_range(n, desc_offset);
return -1;
}
rc = spapr_vio_dma_read(&s->vdev, tbl_desc.va + desc_offset,
ret, sizeof(struct srp_direct_buf));
if (rc) {
- DPRINTF("VSCSI: spapr_vio_dma_read -> %d reading ext_desc\n",
- rc);
+ trace_spapr_vscsi_fetch_desc_dma_read_error(rc);
return -1;
}
- DPRINTF("VSCSI: indirect segment ext. tag=0x%x desc#%d/%d { va=%"PRIx64" len=%x }\n",
- req->qtag, n, req->total_desc, tbl_desc.va, tbl_desc.len);
+ trace_spapr_vscsi_fetch_desc_indirect_seg_ext(req->qtag, n,
+ req->total_desc,
+ tbl_desc.va,
+ tbl_desc.len);
} else {
- DPRINTF("VSCSI: Out of descriptors !\n");
+ trace_spapr_vscsi_fetch_desc_out_of_desc();
return 0;
}
break;
@@ -347,15 +336,16 @@ static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req,
*ret = vscsi_swap_desc(*ret);
if (buf_offset > ret->len) {
- DPRINTF(" offset=%x is out of a descriptor #%d boundary=%x\n",
- buf_offset, req->cur_desc_num, ret->len);
+ trace_spapr_vscsi_fetch_desc_out_of_desc_boundary(buf_offset,
+ req->cur_desc_num,
+ ret->len);
return -1;
}
ret->va += buf_offset;
ret->len -= buf_offset;
- DPRINTF(" cur=%d offs=%x ret { va=%"PRIx64" len=%x }\n",
- req->cur_desc_num, req->cur_desc_offset, ret->va, ret->len);
+ trace_spapr_vscsi_fetch_desc_done(req->cur_desc_num, req->cur_desc_offset,
+ ret->va, ret->len);
return ret->len ? 1 : 0;
}
@@ -398,7 +388,7 @@ static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req,
int rc = 0;
uint32_t llen, total = 0;
- DPRINTF("VSCSI: indirect segment 0x%x bytes\n", len);
+ trace_spapr_vscsi_srp_indirect_data(len);
/* While we have data ... */
while (len) {
@@ -417,11 +407,10 @@ static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req,
rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen);
}
if (rc) {
- DPRINTF("VSCSI: spapr_vio_dma_r/w(%d) -> %d\n", req->writing, rc);
+ trace_spapr_vscsi_srp_indirect_data_rw(req->writing, rc);
break;
}
- DPRINTF("VSCSI: data: %02x %02x %02x %02x...\n",
- buf[0], buf[1], buf[2], buf[3]);
+ trace_spapr_vscsi_srp_indirect_data_buf(buf[0], buf[1], buf[2], buf[3]);
len -= llen;
buf += llen;
@@ -447,7 +436,7 @@ static int vscsi_srp_transfer_data(VSCSIState *s, vscsi_req *req,
switch (req->dma_fmt) {
case SRP_NO_DATA_DESC:
- DPRINTF("VSCSI: no data desc transfer, skipping 0x%x bytes\n", len);
+ trace_spapr_vscsi_srp_transfer_data(len);
break;
case SRP_DATA_DESC_DIRECT:
err = vscsi_srp_direct_data(s, req, buf, len);
@@ -527,8 +516,7 @@ static void vscsi_transfer_data(SCSIRequest *sreq, uint32_t len)
uint8_t *buf;
int rc = 0;
- DPRINTF("VSCSI: SCSI xfer complete tag=0x%x len=0x%x, req=%p\n",
- sreq->tag, len, req);
+ trace_spapr_vscsi_transfer_data(sreq->tag, len, req);
if (req == NULL) {
fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag);
return;
@@ -557,8 +545,7 @@ static void vscsi_command_complete(SCSIRequest *sreq, uint32_t status, size_t re
vscsi_req *req = sreq->hba_private;
int32_t res_in = 0, res_out = 0;
- DPRINTF("VSCSI: SCSI cmd complete, tag=0x%x status=0x%x, req=%p\n",
- sreq->tag, status, req);
+ trace_spapr_vscsi_command_complete(sreq->tag, status, req);
if (req == NULL) {
fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag);
return;
@@ -567,16 +554,15 @@ static void vscsi_command_complete(SCSIRequest *sreq, uint32_t status, size_t re
if (status == CHECK_CONDITION) {
req->senselen = scsi_req_get_sense(req->sreq, req->sense,
sizeof(req->sense));
- DPRINTF("VSCSI: Sense data, %d bytes:\n", req->senselen);
- DPRINTF(" %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ trace_spapr_vscsi_command_complete_sense_data1(req->senselen,
req->sense[0], req->sense[1], req->sense[2], req->sense[3],
req->sense[4], req->sense[5], req->sense[6], req->sense[7]);
- DPRINTF(" %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ trace_spapr_vscsi_command_complete_sense_data2(
req->sense[8], req->sense[9], req->sense[10], req->sense[11],
req->sense[12], req->sense[13], req->sense[14], req->sense[15]);
}
- DPRINTF("VSCSI: Command complete err=%d\n", status);
+ trace_spapr_vscsi_command_complete_status(status);
if (status == 0) {
/* We handle overflows, not underflows for normal commands,
* but hopefully nobody cares
@@ -635,8 +621,8 @@ static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq)
vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL);
- DPRINTF("VSCSI: saving tag=%u, current desc#%d, offset=%x\n",
- req->qtag, req->cur_desc_num, req->cur_desc_offset);
+ trace_spapr_vscsi_save_request(req->qtag, req->cur_desc_num,
+ req->cur_desc_offset);
}
static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq)
@@ -660,8 +646,8 @@ static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq)
req->sreq = scsi_req_ref(sreq);
- DPRINTF("VSCSI: restoring tag=%u, current desc#%d, offset=%x\n",
- req->qtag, req->cur_desc_num, req->cur_desc_offset);
+ trace_spapr_vscsi_load_request(req->qtag, req->cur_desc_num,
+ req->cur_desc_offset);
return req;
}
@@ -672,7 +658,7 @@ static void vscsi_process_login(VSCSIState *s, vscsi_req *req)
struct srp_login_rsp *rsp = &iu->srp.login_rsp;
uint64_t tag = iu->srp.rsp.tag;
- DPRINTF("VSCSI: Got login, sendin response !\n");
+ trace_spapr_vscsi_process_login();
/* TODO handle case that requested size is wrong and
* buffer format is wrong
@@ -795,8 +781,7 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
sdev = vscsi_device_find(&s->bus, be64_to_cpu(srp->cmd.lun), &lun);
if (!sdev) {
- DPRINTF("VSCSI: Command for lun %08" PRIx64 " with no drive\n",
- be64_to_cpu(srp->cmd.lun));
+ trace_spapr_vscsi_queue_cmd_no_drive(be64_to_cpu(srp->cmd.lun));
if (srp->cmd.cdb[0] == INQUIRY) {
vscsi_inquiry_no_target(s, req);
} else {
@@ -808,9 +793,8 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
req->sreq = scsi_req_new(sdev, req->qtag, lun, srp->cmd.cdb, req);
n = scsi_req_enqueue(req->sreq);
- DPRINTF("VSCSI: Queued command tag 0x%x CMD 0x%x=%s LUN %d ret: %d\n",
- req->qtag, srp->cmd.cdb[0], scsi_command_name(srp->cmd.cdb[0]),
- lun, n);
+ trace_spapr_vscsi_queue_cmd(req->qtag, srp->cmd.cdb[0],
+ scsi_command_name(srp->cmd.cdb[0]), lun, n);
if (n) {
/* Transfer direction must be set before preprocessing the
@@ -1141,7 +1125,7 @@ static int vscsi_do_crq(struct VIOsPAPRDevice *dev, uint8_t *crq_data)
crq.s.IU_length = be16_to_cpu(crq.s.IU_length);
crq.s.IU_data_ptr = be64_to_cpu(crq.s.IU_data_ptr);
- DPRINTF("VSCSI: do_crq %02x %02x ...\n", crq.raw[0], crq.raw[1]);
+ trace_spapr_vscsi_do_crq(crq.raw[0], crq.raw[1]);
switch (crq.s.valid) {
case 0xc0: /* Init command/response */
diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events
index ed64858fe3..4a2e5d66df 100644
--- a/hw/scsi/trace-events
+++ b/hw/scsi/trace-events
@@ -202,3 +202,30 @@ esp_pci_dma_abort(uint32_t val) "ABORT (%.8x)"
esp_pci_dma_start(uint32_t val) "START (%.8x)"
esp_pci_sbac_read(uint32_t reg) "sbac: 0x%8.8x"
esp_pci_sbac_write(uint32_t reg, uint32_t val) "sbac: 0x%8.8x -> 0x%8.8x"
+
+# hw/scsi/spapr_vscsi.c
+spapr_vscsi_send_rsp(uint8_t status, int32_t res_in, int32_t res_out) "status: 0x%x, res_in: %"PRId32", res_out: %"PRId32
+spapr_vscsi_fetch_desc_no_data(void) "no data descriptor"
+spapr_vscsi_fetch_desc_direct(void) "direct segment"
+spapr_vscsi_fetch_desc_indirect(uint32_t qtag, unsigned desc, unsigned local_desc) "indirect segment local tag=0x%"PRIx32" desc#%u/%u"
+spapr_vscsi_fetch_desc_out_of_range(unsigned desc, unsigned desc_offset) "#%u is ouf of range (%u bytes)"
+spapr_vscsi_fetch_desc_dma_read_error(int rc) "spapr_vio_dma_read -> %d reading ext_desc"
+spapr_vscsi_fetch_desc_indirect_seg_ext(uint32_t qtag, unsigned n, unsigned desc, uint64_t va, uint32_t len) "indirect segment ext. tag=0x%"PRIx32" desc#%u/%u { va=0x%"PRIx64" len=0x%"PRIx32" }"
+spapr_vscsi_fetch_desc_out_of_desc(void) "Out of descriptors !"
+spapr_vscsi_fetch_desc_out_of_desc_boundary(unsigned offset, unsigned desc, uint32_t len) " offset=0x%x is out of a descriptor #%u boundary=%"PRIx32
+spapr_vscsi_fetch_desc_done(unsigned desc_num, unsigned desc_offset, uint64_t va, uint32_t len) " cur=%u offs=0x%x ret { va=0x%"PRIx64" len=0x%"PRIx32" }"
+spapr_vscsi_srp_indirect_data(uint32_t len) "indirect segment 0x%"PRIx32" bytes"
+spapr_vscsi_srp_indirect_data_rw(int writing, int rc) "spapr_vio_dma_r/w(%d) -> %d"
+spapr_vscsi_srp_indirect_data_buf(unsigned a, unsigned b, unsigned c, unsigned d) " data: %02x %02x %02x %02x..."
+spapr_vscsi_srp_transfer_data(uint32_t len) "no data desc transfer, skipping 0x%"PRIx32" bytes"
+spapr_vscsi_transfer_data(uint32_t tag, uint32_t len, void *req) "SCSI xfer complete tag=0x%"PRIx32" len=0x%"PRIx32", req=%p"
+spapr_vscsi_command_complete(uint32_t tag, uint32_t status, void *req) "SCSI cmd complete, tag=0x%"PRIx32" status=0x%"PRIx32", req=%p"
+spapr_vscsi_command_complete_sense_data1(uint32_t len, unsigned s0, unsigned s1, unsigned s2, unsigned s3, unsigned s4, unsigned s5, unsigned s6, unsigned s7) "Sense data, %d bytes: %02x %02x %02x %02x %02x %02x %02x %02x"
+spapr_vscsi_command_complete_sense_data2(unsigned s8, unsigned s9, unsigned s10, unsigned s11, unsigned s12, unsigned s13, unsigned s14, unsigned s15) " %02x %02x %02x %02x %02x %02x %02x %02x"
+spapr_vscsi_command_complete_status(uint32_t status) "Command complete err=%"PRIu32
+spapr_vscsi_save_request(uint32_t qtag, unsigned desc, unsigned offset) "saving tag=%"PRIu32", current desc#%u, offset=0x%x"
+spapr_vscsi_load_request(uint32_t qtag, unsigned desc, unsigned offset) "restoring tag=%"PRIu32", current desc#%u, offset=0x%x"
+spapr_vscsi_process_login(void) "Got login, sending response !"
+spapr_vscsi_queue_cmd_no_drive(uint64_t lun) "Command for lun %08" PRIx64 " with no drive"
+spapr_vscsi_queue_cmd(uint32_t qtag, unsigned cdb, const char *cmd, int lun, int ret) "Queued command tag 0x%"PRIx32" CMD 0x%x=%s LUN %d ret: %d"
+spapr_vscsi_do_crq(unsigned c0, unsigned c1) "crq: %02x %02x ..."
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index b173b94949..6b8d0f0024 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -12,6 +12,7 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "hw/virtio/virtio-scsi.h"
#include "qemu/error-report.h"
#include "sysemu/block-backend.h"
@@ -21,20 +22,30 @@
#include "hw/virtio/virtio-access.h"
/* Context: QEMU global mutex held */
-void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
+void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- assert(!s->ctx);
- s->ctx = iothread_get_aio_context(vs->conf.iothread);
-
- /* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->ioeventfd_started) {
- fprintf(stderr, "virtio-scsi: Failed to set iothread "
- "(transport does not support notifiers)");
- exit(1);
+ if (vs->conf.iothread) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
+ error_setg(errp,
+ "device is incompatible with iothread "
+ "(transport does not support notifiers)");
+ return;
+ }
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
+ error_setg(errp, "ioeventfd is required for iothread");
+ return;
+ }
+ s->ctx = iothread_get_aio_context(vs->conf.iothread);
+ } else {
+ if (!virtio_device_ioeventfd_enabled(vdev)) {
+ return;
+ }
+ s->ctx = qemu_get_aio_context();
}
}
@@ -84,13 +95,6 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
return 0;
}
-void virtio_scsi_dataplane_notify(VirtIODevice *vdev, VirtIOSCSIReq *req)
-{
- if (virtio_should_notify(vdev, req->vq)) {
- event_notifier_set(virtio_queue_get_guest_notifier(req->vq));
- }
-}
-
/* assumes s->ctx held */
static void virtio_scsi_clear_aio(VirtIOSCSI *s)
{
@@ -105,19 +109,19 @@ static void virtio_scsi_clear_aio(VirtIOSCSI *s)
}
/* Context: QEMU global mutex held */
-void virtio_scsi_dataplane_start(VirtIOSCSI *s)
+int virtio_scsi_dataplane_start(VirtIODevice *vdev)
{
int i;
int rc;
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
+ VirtIOSCSI *s = VIRTIO_SCSI(vdev);
if (s->dataplane_started ||
s->dataplane_starting ||
- s->dataplane_fenced ||
- s->ctx != iothread_get_aio_context(vs->conf.iothread)) {
- return;
+ s->dataplane_fenced) {
+ return 0;
}
s->dataplane_starting = true;
@@ -152,7 +156,7 @@ void virtio_scsi_dataplane_start(VirtIOSCSI *s)
s->dataplane_starting = false;
s->dataplane_started = true;
aio_context_release(s->ctx);
- return;
+ return 0;
fail_vrings:
virtio_scsi_clear_aio(s);
@@ -165,14 +169,16 @@ fail_guest_notifiers:
s->dataplane_fenced = true;
s->dataplane_starting = false;
s->dataplane_started = true;
+ return -ENOSYS;
}
/* Context: QEMU global mutex held */
-void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
+void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
{
- BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
+ VirtIOSCSI *s = VIRTIO_SCSI(vdev);
int i;
if (!s->dataplane_started || s->dataplane_stopping) {
@@ -186,16 +192,13 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
return;
}
s->dataplane_stopping = true;
- assert(s->ctx == iothread_get_aio_context(vs->conf.iothread));
aio_context_acquire(s->ctx);
-
virtio_scsi_clear_aio(s);
+ aio_context_release(s->ctx);
blk_drain_all(); /* ensure there are no in-flight requests */
- aio_context_release(s->ctx);
-
for (i = 0; i < vs->conf.num_queues + 2; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index ce57ef6248..10fd687193 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -69,7 +69,7 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
if (s->dataplane_started && !s->dataplane_fenced) {
- virtio_scsi_dataplane_notify(vdev, req);
+ virtio_notify_irqfd(vdev, vq);
} else {
virtio_notify(vdev, vq);
}
@@ -81,10 +81,11 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
virtio_scsi_free_req(req);
}
-static void virtio_scsi_bad_req(void)
+static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
{
- error_report("wrong size for virtio-scsi headers");
- exit(1);
+ virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
+ virtqueue_detach_element(req->vq, &req->elem, 0);
+ virtio_scsi_free_req(req);
}
static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
@@ -236,6 +237,13 @@ static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
g_free(n);
}
+static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
+{
+ if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
+ assert(blk_get_aio_context(d->conf.blk) == s->ctx);
+ }
+}
+
/* Return 0 if the request is ready to be completed and return to guest;
* -EINPROGRESS if the request is submitted and will be completed later, in the
* case of async cancellation. */
@@ -247,9 +255,7 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
int target;
int ret = 0;
- if (s->dataplane_started && d) {
- assert(blk_get_aio_context(d->conf.blk) == s->ctx);
- }
+ virtio_scsi_ctx_check(s, d);
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
req->resp.tmf.response = VIRTIO_SCSI_S_OK;
@@ -382,7 +388,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
&type, sizeof(type)) < sizeof(type)) {
- virtio_scsi_bad_req();
+ virtio_scsi_bad_req(req);
return;
}
@@ -390,7 +396,8 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (type == VIRTIO_SCSI_T_TMF) {
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
- virtio_scsi_bad_req();
+ virtio_scsi_bad_req(req);
+ return;
} else {
r = virtio_scsi_do_tmf(s, req);
}
@@ -399,7 +406,8 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
sizeof(VirtIOSCSICtrlANResp)) < 0) {
- virtio_scsi_bad_req();
+ virtio_scsi_bad_req(req);
+ return;
} else {
req->resp.an.event_actual = 0;
req->resp.an.response = VIRTIO_SCSI_S_OK;
@@ -426,7 +434,7 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
if (s->ctx) {
- virtio_scsi_dataplane_start(s);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_fenced) {
return;
}
@@ -516,7 +524,7 @@ static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
virtio_scsi_complete_cmd_req(req);
}
-static bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
+static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
VirtIOSCSICommon *vs = &s->parent_obj;
SCSIDevice *d;
@@ -527,21 +535,20 @@ static bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req
if (rc < 0) {
if (rc == -ENOTSUP) {
virtio_scsi_fail_cmd_req(req);
+ return -ENOTSUP;
} else {
- virtio_scsi_bad_req();
+ virtio_scsi_bad_req(req);
+ return -EINVAL;
}
- return false;
}
d = virtio_scsi_device_find(s, req->req.cmd.lun);
if (!d) {
req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
virtio_scsi_complete_cmd_req(req);
- return false;
- }
- if (s->dataplane_started) {
- assert(blk_get_aio_context(d->conf.blk) == s->ctx);
+ return -ENOENT;
}
+ virtio_scsi_ctx_check(s, d);
req->sreq = scsi_req_new(d, req->req.cmd.tag,
virtio_scsi_get_lun(req->req.cmd.lun),
req->req.cmd.cdb, req);
@@ -551,11 +558,11 @@ static bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req
req->sreq->cmd.xfer > req->qsgl.size)) {
req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
virtio_scsi_complete_cmd_req(req);
- return false;
+ return -ENOBUFS;
}
scsi_req_ref(req->sreq);
blk_io_plug(d->conf.blk);
- return true;
+ return 0;
}
static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
@@ -571,11 +578,24 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req, *next;
+ int ret;
+
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
while ((req = virtio_scsi_pop_req(s, vq))) {
- if (virtio_scsi_handle_cmd_req_prepare(s, req)) {
+ ret = virtio_scsi_handle_cmd_req_prepare(s, req);
+ if (!ret) {
QTAILQ_INSERT_TAIL(&reqs, req, next);
+ } else if (ret == -EINVAL) {
+ /* The device is broken and shouldn't process any request */
+ while (!QTAILQ_EMPTY(&reqs)) {
+ req = QTAILQ_FIRST(&reqs);
+ QTAILQ_REMOVE(&reqs, req, next);
+ blk_io_unplug(req->sreq->dev->conf.blk);
+ scsi_req_unref(req->sreq);
+ virtqueue_detach_element(req->vq, &req->elem, 0);
+ virtio_scsi_free_req(req);
+ }
}
}
@@ -590,7 +610,7 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
if (s->ctx) {
- virtio_scsi_dataplane_start(s);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_fenced) {
return;
}
@@ -624,8 +644,9 @@ static void virtio_scsi_set_config(VirtIODevice *vdev,
if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
(uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
- error_report("bad data written to virtio-scsi configuration space");
- exit(1);
+ virtio_error(vdev,
+ "bad data written to virtio-scsi configuration space");
+ return;
}
vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
@@ -648,9 +669,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
- if (s->ctx) {
- virtio_scsi_dataplane_stop(s);
- }
+ assert(!s->dataplane_started);
s->resetting++;
qbus_reset_all(&s->bus.qbus);
s->resetting--;
@@ -660,22 +679,6 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
s->events_dropped = false;
}
-/* The device does not have anything to save beyond the virtio data.
- * Request data is saved with callbacks from SCSI devices.
- */
-static void virtio_scsi_save(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
- virtio_save(vdev, f);
-}
-
-static int virtio_scsi_load(QEMUFile *f, void *opaque, size_t size)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
-
- return virtio_load(vdev, f, 1);
-}
-
void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
uint32_t event, uint32_t reason)
{
@@ -705,7 +708,8 @@ void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
}
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
- virtio_scsi_bad_req();
+ virtio_scsi_bad_req(req);
+ goto out;
}
evt = &req->resp.event;
@@ -743,7 +747,7 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
if (s->ctx) {
- virtio_scsi_dataplane_start(s);
+ virtio_device_start_ioeventfd(vdev);
if (!s->dataplane_fenced) {
return;
}
@@ -842,14 +846,10 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
- s->ctrl_vq = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, ctrl);
- s->event_vq = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, evt);
+ s->ctrl_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, ctrl);
+ s->event_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, evt);
for (i = 0; i < s->conf.num_queues; i++) {
- s->cmd_vqs[i] = virtio_add_queue_aio(vdev, VIRTIO_SCSI_VQ_SIZE, cmd);
- }
-
- if (s->conf.iothread) {
- virtio_scsi_set_iothread(VIRTIO_SCSI(s), s->conf.iothread);
+ s->cmd_vqs[i] = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, cmd);
}
}
@@ -879,6 +879,8 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
return;
}
}
+
+ virtio_scsi_dataplane_setup(s, errp);
}
static void virtio_scsi_instance_init(Object *obj)
@@ -918,7 +920,15 @@ static Property virtio_scsi_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
-VMSTATE_VIRTIO_DEVICE(scsi, 1, virtio_scsi_load, virtio_scsi_save);
+static const VMStateDescription vmstate_virtio_scsi = {
+ .name = "virtio-scsi",
+ .minimum_version_id = 1,
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
{
@@ -943,6 +953,8 @@ static void virtio_scsi_class_init(ObjectClass *klass, void *data)
vdc->set_config = virtio_scsi_set_config;
vdc->get_features = virtio_scsi_get_features;
vdc->reset = virtio_scsi_reset;
+ vdc->start_ioeventfd = virtio_scsi_dataplane_start;
+ vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
hc->plug = virtio_scsi_hotplug;
hc->unplug = virtio_scsi_hotunplug;
}
diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
index 5116f4ad68..a5ce7dea8e 100644
--- a/hw/scsi/vmw_pvscsi.c
+++ b/hw/scsi/vmw_pvscsi.c
@@ -40,6 +40,8 @@
#define PVSCSI_MAX_DEVS (64)
#define PVSCSI_MSIX_NUM_VECTORS (1)
+#define PVSCSI_MAX_SG_ELEM 2048
+
#define PVSCSI_MAX_CMD_DATA_WORDS \
(sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
@@ -152,7 +154,7 @@ pvscsi_log2(uint32_t input)
return log;
}
-static int
+static void
pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
{
int i;
@@ -160,10 +162,6 @@ pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
uint32_t req_ring_size, cmp_ring_size;
m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
- if ((ri->reqRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
- || (ri->cmpRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)) {
- return -1;
- }
req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
txr_len_log2 = pvscsi_log2(req_ring_size - 1);
@@ -195,8 +193,6 @@ pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
/* Flush ring state page changes */
smp_wmb();
-
- return 0;
}
static int
@@ -251,8 +247,11 @@ static hwaddr
pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
{
uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx);
+ uint32_t ring_size = PVSCSI_MAX_NUM_PAGES_REQ_RING
+ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
- if (ready_ptr != mgr->consumed_ptr) {
+ if (ready_ptr != mgr->consumed_ptr
+ && ready_ptr - mgr->consumed_ptr < ring_size) {
uint32_t next_ready_ptr =
mgr->consumed_ptr++ & mgr->txr_len_mask;
uint32_t next_ready_page =
@@ -634,17 +633,16 @@ pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d,
static void
pvscsi_convert_sglist(PVSCSIRequest *r)
{
- int chunk_size;
+ uint32_t chunk_size, elmcnt = 0;
uint64_t data_length = r->req.dataLen;
PVSCSISGState sg = r->sg;
- while (data_length) {
- while (!sg.resid) {
+ while (data_length && elmcnt < PVSCSI_MAX_SG_ELEM) {
+ while (!sg.resid && elmcnt++ < PVSCSI_MAX_SG_ELEM) {
pvscsi_get_next_sg_elem(&sg);
trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr,
r->sg.resid);
}
- assert(data_length > 0);
- chunk_size = MIN((unsigned) data_length, sg.resid);
+ chunk_size = MIN(data_length, sg.resid);
if (chunk_size) {
qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size);
}
@@ -746,7 +744,7 @@ pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc)
trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages);
for (i = 0; i < rc->cmpRingNumPages; i++) {
- trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->reqRingPPNs[i]);
+ trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->cmpRingPPNs[i]);
}
}
@@ -779,11 +777,16 @@ pvscsi_on_cmd_setup_rings(PVSCSIState *s)
trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS");
- pvscsi_dbg_dump_tx_rings_config(rc);
- if (pvscsi_ring_init_data(&s->rings, rc) < 0) {
+ if (!rc->reqRingNumPages
+ || rc->reqRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
+ || !rc->cmpRingNumPages
+ || rc->cmpRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) {
return PVSCSI_COMMAND_PROCESSING_FAILED;
}
+ pvscsi_dbg_dump_tx_rings_config(rc);
+ pvscsi_ring_init_data(&s->rings, rc);
+
s->rings_info_valid = TRUE;
return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
}
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
index 87c6dc108d..8e88e8311a 100644
--- a/hw/sd/sd.c
+++ b/hw/sd/sd.c
@@ -1876,6 +1876,14 @@ static void sd_instance_init(Object *obj)
sd->ocr_power_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sd_ocr_powerup, sd);
}
+static void sd_instance_finalize(Object *obj)
+{
+ SDState *sd = SD_CARD(obj);
+
+ timer_del(sd->ocr_power_timer);
+ timer_free(sd->ocr_power_timer);
+}
+
static void sd_realize(DeviceState *dev, Error **errp)
{
SDState *sd = SD_CARD(dev);
@@ -1927,6 +1935,7 @@ static const TypeInfo sd_info = {
.class_size = sizeof(SDCardClass),
.class_init = sd_class_init,
.instance_init = sd_instance_init,
+ .instance_finalize = sd_instance_finalize,
};
static void sd_register_types(void)
diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c
index 3ff0886dd5..24001dc3e6 100644
--- a/hw/sd/ssi-sd.c
+++ b/hw/sd/ssi-sd.c
@@ -31,7 +31,7 @@ do { fprintf(stderr, "ssi_sd: error: " fmt , ## __VA_ARGS__);} while (0)
#endif
typedef enum {
- SSI_SD_CMD,
+ SSI_SD_CMD = 0,
SSI_SD_CMDARG,
SSI_SD_RESPONSE,
SSI_SD_DATA_START,
@@ -40,13 +40,13 @@ typedef enum {
typedef struct {
SSISlave ssidev;
- ssi_sd_mode mode;
+ uint32_t mode;
int cmd;
uint8_t cmdarg[4];
uint8_t response[5];
- int arglen;
- int response_pos;
- int stopping;
+ int32_t arglen;
+ int32_t response_pos;
+ int32_t stopping;
SDState *sd;
} ssi_sd_state;
@@ -198,61 +198,46 @@ static uint32_t ssi_sd_transfer(SSISlave *dev, uint32_t val)
return 0xff;
}
-static void ssi_sd_save(QEMUFile *f, void *opaque)
+static int ssi_sd_post_load(void *opaque, int version_id)
{
- SSISlave *ss = SSI_SLAVE(opaque);
ssi_sd_state *s = (ssi_sd_state *)opaque;
- int i;
- qemu_put_be32(f, s->mode);
- qemu_put_be32(f, s->cmd);
- for (i = 0; i < 4; i++)
- qemu_put_be32(f, s->cmdarg[i]);
- for (i = 0; i < 5; i++)
- qemu_put_be32(f, s->response[i]);
- qemu_put_be32(f, s->arglen);
- qemu_put_be32(f, s->response_pos);
- qemu_put_be32(f, s->stopping);
-
- qemu_put_be32(f, ss->cs);
-}
-
-static int ssi_sd_load(QEMUFile *f, void *opaque, int version_id)
-{
- SSISlave *ss = SSI_SLAVE(opaque);
- ssi_sd_state *s = (ssi_sd_state *)opaque;
- int i;
-
- if (version_id != 1)
+ if (s->mode > SSI_SD_DATA_READ) {
return -EINVAL;
-
- s->mode = qemu_get_be32(f);
- s->cmd = qemu_get_be32(f);
- for (i = 0; i < 4; i++)
- s->cmdarg[i] = qemu_get_be32(f);
- for (i = 0; i < 5; i++)
- s->response[i] = qemu_get_be32(f);
- s->arglen = qemu_get_be32(f);
+ }
if (s->mode == SSI_SD_CMDARG &&
(s->arglen < 0 || s->arglen >= ARRAY_SIZE(s->cmdarg))) {
return -EINVAL;
}
- s->response_pos = qemu_get_be32(f);
- s->stopping = qemu_get_be32(f);
if (s->mode == SSI_SD_RESPONSE &&
(s->response_pos < 0 || s->response_pos >= ARRAY_SIZE(s->response) ||
(!s->stopping && s->arglen > ARRAY_SIZE(s->response)))) {
return -EINVAL;
}
- ss->cs = qemu_get_be32(f);
-
return 0;
}
+static const VMStateDescription vmstate_ssi_sd = {
+ .name = "ssi_sd",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .post_load = ssi_sd_post_load,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32(mode, ssi_sd_state),
+ VMSTATE_INT32(cmd, ssi_sd_state),
+ VMSTATE_UINT8_ARRAY(cmdarg, ssi_sd_state, 4),
+ VMSTATE_UINT8_ARRAY(response, ssi_sd_state, 5),
+ VMSTATE_INT32(arglen, ssi_sd_state),
+ VMSTATE_INT32(response_pos, ssi_sd_state),
+ VMSTATE_INT32(stopping, ssi_sd_state),
+ VMSTATE_SSI_SLAVE(ssidev, ssi_sd_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static void ssi_sd_realize(SSISlave *d, Error **errp)
{
- DeviceState *dev = DEVICE(d);
ssi_sd_state *s = FROM_SSI_SLAVE(ssi_sd_state, d);
DriveInfo *dinfo;
@@ -264,16 +249,17 @@ static void ssi_sd_realize(SSISlave *d, Error **errp)
error_setg(errp, "Device initialization failed.");
return;
}
- register_savevm(dev, "ssi_sd", -1, 1, ssi_sd_save, ssi_sd_load, s);
}
static void ssi_sd_class_init(ObjectClass *klass, void *data)
{
+ DeviceClass *dc = DEVICE_CLASS(klass);
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
k->realize = ssi_sd_realize;
k->transfer = ssi_sd_transfer;
k->cs_polarity = SSI_CS_LOW;
+ dc->vmsd = &vmstate_ssi_sd;
}
static const TypeInfo ssi_sd_info = {
diff --git a/hw/sh4/shix.c b/hw/sh4/shix.c
index ccc9e75894..14d4007c1c 100644
--- a/hw/sh4/shix.c
+++ b/hw/sh4/shix.c
@@ -23,7 +23,7 @@
*/
/*
Shix 2.0 board by Alexis Polti, described at
- http://perso.enst.fr/~polti/realisations/shix20/
+ https://web.archive.org/web/20070917001736/perso.enst.fr/~polti/realisations/shix20
More information in target-sh4/README.sh4
*/
diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c
index 74c7102929..3a96cededd 100644
--- a/hw/smbios/smbios.c
+++ b/hw/smbios/smbios.c
@@ -20,6 +20,7 @@
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
+#include "qemu/uuid.h"
#include "sysemu/cpus.h"
#include "hw/smbios/smbios.h"
#include "hw/loader.h"
@@ -79,7 +80,7 @@ static struct {
static struct {
const char *manufacturer, *product, *version, *serial, *sku, *family;
- /* uuid is in qemu_uuid[] */
+ /* uuid is in qemu_uuid */
} type1;
static struct {
@@ -408,7 +409,7 @@ static void smbios_build_type_1_fields(void)
* BIOS.
*/
smbios_add_field(1, offsetof(struct smbios_type_1, uuid),
- qemu_uuid, 16);
+ &qemu_uuid, 16);
}
}
@@ -483,9 +484,9 @@ static void smbios_build_type_0_table(void)
/* Encode UUID from the big endian encoding described on RFC4122 to the wire
* format specified by SMBIOS version 2.6.
*/
-static void smbios_encode_uuid(struct smbios_uuid *uuid, const uint8_t *buf)
+static void smbios_encode_uuid(struct smbios_uuid *uuid, QemuUUID *in)
{
- memcpy(uuid, buf, 16);
+ memcpy(uuid, in, 16);
if (smbios_uuid_encoded) {
uuid->time_low = bswap32(uuid->time_low);
uuid->time_mid = bswap16(uuid->time_mid);
@@ -502,7 +503,7 @@ static void smbios_build_type_1_table(void)
SMBIOS_TABLE_SET_STR(1, version_str, type1.version);
SMBIOS_TABLE_SET_STR(1, serial_number_str, type1.serial);
if (qemu_uuid_set) {
- smbios_encode_uuid(&t->uuid, qemu_uuid);
+ smbios_encode_uuid(&t->uuid, &qemu_uuid);
} else {
memset(&t->uuid, 0, 16);
}
@@ -1001,7 +1002,7 @@ void smbios_entry_add(QemuOpts *opts)
val = qemu_opt_get(opts, "uuid");
if (val) {
- if (qemu_uuid_parse(val, qemu_uuid) != 0) {
+ if (qemu_uuid_parse(val, &qemu_uuid) != 0) {
error_report("Invalid UUID");
exit(1);
}
diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c
index 478fda8209..f5b6efddf8 100644
--- a/hw/sparc/sun4m.c
+++ b/hw/sparc/sun4m.c
@@ -35,10 +35,11 @@
#include "sysemu/sysemu.h"
#include "net/net.h"
#include "hw/boards.h"
-#include "hw/nvram/openbios_firmware_abi.h"
#include "hw/scsi/esp.h"
#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
+#include "hw/nvram/sun_nvram.h"
+#include "hw/nvram/chrp_nvram.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/char/escc.h"
#include "hw/empty_slot.h"
@@ -117,39 +118,17 @@ static void nvram_init(Nvram *nvram, uint8_t *macaddr,
int nvram_machine_id, const char *arch)
{
unsigned int i;
- uint32_t start, end;
+ int sysp_end;
uint8_t image[0x1ff0];
- struct OpenBIOS_nvpart_v1 *part_header;
NvramClass *k = NVRAM_GET_CLASS(nvram);
memset(image, '\0', sizeof(image));
- start = 0;
+ /* OpenBIOS nvram variables partition */
+ sysp_end = chrp_nvram_create_system_partition(image, 0);
- // OpenBIOS nvram variables
- // Variable partition
- part_header = (struct OpenBIOS_nvpart_v1 *)&image[start];
- part_header->signature = OPENBIOS_PART_SYSTEM;
- pstrcpy(part_header->name, sizeof(part_header->name), "system");
-
- end = start + sizeof(struct OpenBIOS_nvpart_v1);
- for (i = 0; i < nb_prom_envs; i++)
- end = OpenBIOS_set_var(image, end, prom_envs[i]);
-
- // End marker
- image[end++] = '\0';
-
- end = start + ((end - start + 15) & ~15);
- OpenBIOS_finish_partition(part_header, end - start);
-
- // free partition
- start = end;
- part_header = (struct OpenBIOS_nvpart_v1 *)&image[start];
- part_header->signature = OPENBIOS_PART_FREE;
- pstrcpy(part_header->name, sizeof(part_header->name), "free");
-
- end = 0x1fd0;
- OpenBIOS_finish_partition(part_header, end - start);
+ /* Free space partition */
+ chrp_nvram_create_free_partition(&image[sysp_end], 0x1fd0 - sysp_end);
Sun_init_header((struct Sun_nvram *)&image[0x1fd8], macaddr,
nvram_machine_id);
@@ -159,20 +138,6 @@ static void nvram_init(Nvram *nvram, uint8_t *macaddr,
}
}
-static DeviceState *slavio_intctl;
-
-void sun4m_hmp_info_pic(Monitor *mon, const QDict *qdict)
-{
- if (slavio_intctl)
- slavio_pic_info(mon, slavio_intctl);
-}
-
-void sun4m_hmp_info_irq(Monitor *mon, const QDict *qdict)
-{
- if (slavio_intctl)
- slavio_irq_info(mon, slavio_intctl);
-}
-
void cpu_check_irqs(CPUSPARCState *env)
{
CPUState *cs;
@@ -873,6 +838,7 @@ static void dummy_fdc_tc(void *opaque, int irq, int level)
static void sun4m_hw_init(const struct sun4m_hwdef *hwdef,
MachineState *machine)
{
+ DeviceState *slavio_intctl;
const char *cpu_model = machine->cpu_model;
unsigned int i;
void *iommu, *espdma, *ledma, *nvram;
@@ -1067,6 +1033,7 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef,
hwdef->ecc_version);
fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id);
diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c
index 3165e18eb7..466331535b 100644
--- a/hw/sparc64/sun4u.c
+++ b/hw/sparc64/sun4u.c
@@ -36,7 +36,8 @@
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
#include "hw/boards.h"
-#include "hw/nvram/openbios_firmware_abi.h"
+#include "hw/nvram/sun_nvram.h"
+#include "hw/nvram/chrp_nvram.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/sysbus.h"
#include "hw/ide.h"
@@ -124,39 +125,17 @@ static int sun4u_NVRAM_set_params(Nvram *nvram, uint16_t NVRAM_size,
const uint8_t *macaddr)
{
unsigned int i;
- uint32_t start, end;
+ int sysp_end;
uint8_t image[0x1ff0];
- struct OpenBIOS_nvpart_v1 *part_header;
NvramClass *k = NVRAM_GET_CLASS(nvram);
memset(image, '\0', sizeof(image));
- start = 0;
+ /* OpenBIOS nvram variables partition */
+ sysp_end = chrp_nvram_create_system_partition(image, 0);
- // OpenBIOS nvram variables
- // Variable partition
- part_header = (struct OpenBIOS_nvpart_v1 *)&image[start];
- part_header->signature = OPENBIOS_PART_SYSTEM;
- pstrcpy(part_header->name, sizeof(part_header->name), "system");
-
- end = start + sizeof(struct OpenBIOS_nvpart_v1);
- for (i = 0; i < nb_prom_envs; i++)
- end = OpenBIOS_set_var(image, end, prom_envs[i]);
-
- // End marker
- image[end++] = '\0';
-
- end = start + ((end - start + 15) & ~15);
- OpenBIOS_finish_partition(part_header, end - start);
-
- // free partition
- start = end;
- part_header = (struct OpenBIOS_nvpart_v1 *)&image[start];
- part_header->signature = OPENBIOS_PART_FREE;
- pstrcpy(part_header->name, sizeof(part_header->name), "free");
-
- end = 0x1fd0;
- OpenBIOS_finish_partition(part_header, end - start);
+ /* Free space partition */
+ chrp_nvram_create_free_partition(&image[sysp_end], 0x1fd0 - sysp_end);
Sun_init_header((struct Sun_nvram *)&image[0x1fd8], macaddr, 0x80);
@@ -824,7 +803,7 @@ static void sun4uv_init(MemoryRegion *address_space_mem,
i++;
}
- serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS);
+ serial_hds_isa_init(isa_bus, i, MAX_SERIAL_PORTS);
parallel_hds_isa_init(isa_bus, MAX_PARALLEL_PORTS);
for(i = 0; i < nb_nics; i++)
@@ -876,6 +855,7 @@ static void sun4uv_init(MemoryRegion *address_space_mem,
(uint8_t *)&nd_table[0].macaddr);
fw_cfg = fw_cfg_init_io(BIOS_CFG_IOPORT);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id);
diff --git a/hw/ssi/Makefile.objs b/hw/ssi/Makefile.objs
index c79a8dcd86..487add2879 100644
--- a/hw/ssi/Makefile.objs
+++ b/hw/ssi/Makefile.objs
@@ -3,6 +3,7 @@ common-obj-$(CONFIG_SSI) += ssi.o
common-obj-$(CONFIG_XILINX_SPI) += xilinx_spi.o
common-obj-$(CONFIG_XILINX_SPIPS) += xilinx_spips.o
common-obj-$(CONFIG_ASPEED_SOC) += aspeed_smc.o
+common-obj-$(CONFIG_STM32F2XX_SPI) += stm32f2xx_spi.o
obj-$(CONFIG_OMAP) += omap_spi.o
obj-$(CONFIG_IMX) += imx_spi.o
diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c
index d319e04a27..6e8403ebc2 100644
--- a/hw/ssi/aspeed_smc.c
+++ b/hw/ssi/aspeed_smc.c
@@ -79,10 +79,10 @@
/* CEx Segment Address Register */
#define R_SEG_ADDR0 (0x30 / 4)
-#define SEG_SIZE_SHIFT 24 /* 8MB units */
-#define SEG_SIZE_MASK 0x7f
+#define SEG_END_SHIFT 24 /* 8MB units */
+#define SEG_END_MASK 0xff
#define SEG_START_SHIFT 16 /* address bit [A29-A23] */
-#define SEG_START_MASK 0x7f
+#define SEG_START_MASK 0xff
#define R_SEG_ADDR1 (0x34 / 4)
#define R_SEG_ADDR2 (0x38 / 4)
#define R_SEG_ADDR3 (0x3C / 4)
@@ -127,18 +127,22 @@
#define R_SPI_MISC_CTRL (0x10 / 4)
#define R_SPI_TIMINGS (0x14 / 4)
+#define ASPEED_SOC_SMC_FLASH_BASE 0x10000000
+#define ASPEED_SOC_FMC_FLASH_BASE 0x20000000
+#define ASPEED_SOC_SPI_FLASH_BASE 0x30000000
+#define ASPEED_SOC_SPI2_FLASH_BASE 0x38000000
+
/*
* Default segments mapping addresses and size for each slave per
* controller. These can be changed when board is initialized with the
- * Segment Address Registers but they don't seem do be used on the
- * field.
+ * Segment Address Registers.
*/
static const AspeedSegments aspeed_segments_legacy[] = {
{ 0x10000000, 32 * 1024 * 1024 },
};
static const AspeedSegments aspeed_segments_fmc[] = {
- { 0x20000000, 64 * 1024 * 1024 },
+ { 0x20000000, 64 * 1024 * 1024 }, /* start address is readonly */
{ 0x24000000, 32 * 1024 * 1024 },
{ 0x26000000, 32 * 1024 * 1024 },
{ 0x28000000, 32 * 1024 * 1024 },
@@ -149,15 +153,155 @@ static const AspeedSegments aspeed_segments_spi[] = {
{ 0x30000000, 64 * 1024 * 1024 },
};
+static const AspeedSegments aspeed_segments_ast2500_fmc[] = {
+ { 0x20000000, 128 * 1024 * 1024 }, /* start address is readonly */
+ { 0x28000000, 32 * 1024 * 1024 },
+ { 0x2A000000, 32 * 1024 * 1024 },
+};
+
+static const AspeedSegments aspeed_segments_ast2500_spi1[] = {
+ { 0x30000000, 32 * 1024 * 1024 }, /* start address is readonly */
+ { 0x32000000, 96 * 1024 * 1024 }, /* end address is readonly */
+};
+
+static const AspeedSegments aspeed_segments_ast2500_spi2[] = {
+ { 0x38000000, 32 * 1024 * 1024 }, /* start address is readonly */
+ { 0x3A000000, 96 * 1024 * 1024 }, /* end address is readonly */
+};
+
static const AspeedSMCController controllers[] = {
{ "aspeed.smc.smc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 5, aspeed_segments_legacy, 0x6000000 },
+ CONF_ENABLE_W0, 5, aspeed_segments_legacy,
+ ASPEED_SOC_SMC_FLASH_BASE, 0x6000000 },
{ "aspeed.smc.fmc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 5, aspeed_segments_fmc, 0x10000000 },
+ CONF_ENABLE_W0, 5, aspeed_segments_fmc,
+ ASPEED_SOC_FMC_FLASH_BASE, 0x10000000 },
{ "aspeed.smc.spi", R_SPI_CONF, 0xff, R_SPI_CTRL0, R_SPI_TIMINGS,
- SPI_CONF_ENABLE_W0, 1, aspeed_segments_spi, 0x10000000 },
+ SPI_CONF_ENABLE_W0, 1, aspeed_segments_spi,
+ ASPEED_SOC_SPI_FLASH_BASE, 0x10000000 },
+ { "aspeed.smc.ast2500-fmc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
+ CONF_ENABLE_W0, 3, aspeed_segments_ast2500_fmc,
+ ASPEED_SOC_FMC_FLASH_BASE, 0x10000000 },
+ { "aspeed.smc.ast2500-spi1", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
+ CONF_ENABLE_W0, 2, aspeed_segments_ast2500_spi1,
+ ASPEED_SOC_SPI_FLASH_BASE, 0x8000000 },
+ { "aspeed.smc.ast2500-spi2", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
+ CONF_ENABLE_W0, 2, aspeed_segments_ast2500_spi2,
+ ASPEED_SOC_SPI2_FLASH_BASE, 0x8000000 },
};
+/*
+ * The Segment Register uses a 8MB unit to encode the start address
+ * and the end address of the mapping window of a flash SPI slave :
+ *
+ * | byte 1 | byte 2 | byte 3 | byte 4 |
+ * +--------+--------+--------+--------+
+ * | end | start | 0 | 0 |
+ *
+ */
+static inline uint32_t aspeed_smc_segment_to_reg(const AspeedSegments *seg)
+{
+ uint32_t reg = 0;
+ reg |= ((seg->addr >> 23) & SEG_START_MASK) << SEG_START_SHIFT;
+ reg |= (((seg->addr + seg->size) >> 23) & SEG_END_MASK) << SEG_END_SHIFT;
+ return reg;
+}
+
+static inline void aspeed_smc_reg_to_segment(uint32_t reg, AspeedSegments *seg)
+{
+ seg->addr = ((reg >> SEG_START_SHIFT) & SEG_START_MASK) << 23;
+ seg->size = (((reg >> SEG_END_SHIFT) & SEG_END_MASK) << 23) - seg->addr;
+}
+
+static bool aspeed_smc_flash_overlap(const AspeedSMCState *s,
+ const AspeedSegments *new,
+ int cs)
+{
+ AspeedSegments seg;
+ int i;
+
+ for (i = 0; i < s->ctrl->max_slaves; i++) {
+ if (i == cs) {
+ continue;
+ }
+
+ aspeed_smc_reg_to_segment(s->regs[R_SEG_ADDR0 + i], &seg);
+
+ if (new->addr + new->size > seg.addr &&
+ new->addr < seg.addr + seg.size) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment CS%d [ 0x%"
+ HWADDR_PRIx" - 0x%"HWADDR_PRIx" ] overlaps with "
+ "CS%d [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n",
+ s->ctrl->name, cs, new->addr, new->addr + new->size,
+ i, seg.addr, seg.addr + seg.size);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs,
+ uint64_t new)
+{
+ AspeedSMCFlash *fl = &s->flashes[cs];
+ AspeedSegments seg;
+
+ aspeed_smc_reg_to_segment(new, &seg);
+
+ /* The start address of CS0 is read-only */
+ if (cs == 0 && seg.addr != s->ctrl->flash_window_base) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Tried to change CS0 start address to 0x%"
+ HWADDR_PRIx "\n", s->ctrl->name, seg.addr);
+ return;
+ }
+
+ /*
+ * The end address of the AST2500 spi controllers is also
+ * read-only.
+ */
+ if ((s->ctrl->segments == aspeed_segments_ast2500_spi1 ||
+ s->ctrl->segments == aspeed_segments_ast2500_spi2) &&
+ cs == s->ctrl->max_slaves &&
+ seg.addr + seg.size != s->ctrl->segments[cs].addr +
+ s->ctrl->segments[cs].size) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Tried to change CS%d end address to 0x%"
+ HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr);
+ return;
+ }
+
+ /* Keep the segment in the overall flash window */
+ if (seg.addr + seg.size <= s->ctrl->flash_window_base ||
+ seg.addr > s->ctrl->flash_window_base + s->ctrl->flash_window_size) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is invalid : "
+ "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n",
+ s->ctrl->name, cs, seg.addr, seg.addr + seg.size);
+ return;
+ }
+
+ /* Check start address vs. alignment */
+ if (seg.addr % seg.size) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is not "
+ "aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n",
+ s->ctrl->name, cs, seg.addr, seg.addr + seg.size);
+ }
+
+ /* And segments should not overlap */
+ if (aspeed_smc_flash_overlap(s, &seg, cs)) {
+ return;
+ }
+
+ /* All should be fine now to move the region */
+ memory_region_transaction_begin();
+ memory_region_set_size(&fl->mmio, seg.size);
+ memory_region_set_address(&fl->mmio, seg.addr - s->ctrl->flash_window_base);
+ memory_region_set_enabled(&fl->mmio, true);
+ memory_region_transaction_commit();
+
+ s->regs[R_SEG_ADDR0 + cs] = new;
+}
+
static uint64_t aspeed_smc_flash_default_read(void *opaque, hwaddr addr,
unsigned size)
{
@@ -281,6 +425,12 @@ static void aspeed_smc_reset(DeviceState *d)
s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE;
}
+ /* setup default segment register values for all */
+ for (i = 0; i < s->ctrl->max_slaves; ++i) {
+ s->regs[R_SEG_ADDR0 + i] =
+ aspeed_smc_segment_to_reg(&s->ctrl->segments[i]);
+ }
+
aspeed_smc_update_cs(s);
}
@@ -301,6 +451,7 @@ static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size)
addr == s->r_timings ||
addr == s->r_ce_ctrl ||
addr == R_INTR_CTRL ||
+ (addr >= R_SEG_ADDR0 && addr < R_SEG_ADDR0 + s->ctrl->max_slaves) ||
(addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs)) {
return s->regs[addr];
} else {
@@ -332,6 +483,13 @@ static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data,
} else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) {
s->regs[addr] = value;
aspeed_smc_update_cs(s);
+ } else if (addr >= R_SEG_ADDR0 &&
+ addr < R_SEG_ADDR0 + s->ctrl->max_slaves) {
+ int cs = addr - R_SEG_ADDR0;
+
+ if (value != s->regs[R_SEG_ADDR0 + cs]) {
+ aspeed_smc_flash_set_segment(s, cs, value);
+ }
} else {
qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n",
__func__, addr);
@@ -384,23 +542,33 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp)
aspeed_smc_reset(dev);
+ /* The memory region for the controller registers */
memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s,
s->ctrl->name, ASPEED_SMC_R_MAX * 4);
sysbus_init_mmio(sbd, &s->mmio);
/*
- * Memory region where flash modules are remapped
+ * The container memory region representing the address space
+ * window in which the flash modules are mapped. The size and
+ * address depends on the SoC model and controller type.
*/
snprintf(name, sizeof(name), "%s.flash", s->ctrl->name);
memory_region_init_io(&s->mmio_flash, OBJECT(s),
&aspeed_smc_flash_default_ops, s, name,
- s->ctrl->mapping_window_size);
+ s->ctrl->flash_window_size);
sysbus_init_mmio(sbd, &s->mmio_flash);
- s->flashes = g_new0(AspeedSMCFlash, s->num_cs);
+ s->flashes = g_new0(AspeedSMCFlash, s->ctrl->max_slaves);
- for (i = 0; i < s->num_cs; ++i) {
+ /*
+ * Let's create a sub memory region for each possible slave. All
+ * have a configurable memory segment in the overall flash mapping
+ * window of the controller but, there is not necessarily a flash
+ * module behind to handle the memory accesses. This depends on
+ * the board configuration.
+ */
+ for (i = 0; i < s->ctrl->max_slaves; ++i) {
AspeedSMCFlash *fl = &s->flashes[i];
snprintf(name, sizeof(name), "%s.%d", s->ctrl->name, i);
diff --git a/hw/ssi/imx_spi.c b/hw/ssi/imx_spi.c
index 4226199811..e4e395fa67 100644
--- a/hw/ssi/imx_spi.c
+++ b/hw/ssi/imx_spi.c
@@ -25,7 +25,7 @@
} \
} while (0)
-static char const *imx_spi_reg_name(uint32_t reg)
+static const char *imx_spi_reg_name(uint32_t reg)
{
static char unknown[20];
diff --git a/hw/ssi/stm32f2xx_spi.c b/hw/ssi/stm32f2xx_spi.c
new file mode 100644
index 0000000000..26a1b4ddf5
--- /dev/null
+++ b/hw/ssi/stm32f2xx_spi.c
@@ -0,0 +1,225 @@
+/*
+ * STM32F405 SPI
+ *
+ * Copyright (c) 2014 Alistair Francis <alistair@alistair23.me>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
+#include "hw/ssi/stm32f2xx_spi.h"
+
+#ifndef STM_SPI_ERR_DEBUG
+#define STM_SPI_ERR_DEBUG 0
+#endif
+
+#define DB_PRINT_L(lvl, fmt, args...) do { \
+ if (STM_SPI_ERR_DEBUG >= lvl) { \
+ qemu_log("%s: " fmt, __func__, ## args); \
+ } \
+} while (0);
+
+#define DB_PRINT(fmt, args...) DB_PRINT_L(1, fmt, ## args)
+
+static void stm32f2xx_spi_reset(DeviceState *dev)
+{
+ STM32F2XXSPIState *s = STM32F2XX_SPI(dev);
+
+ s->spi_cr1 = 0x00000000;
+ s->spi_cr2 = 0x00000000;
+ s->spi_sr = 0x0000000A;
+ s->spi_dr = 0x0000000C;
+ s->spi_crcpr = 0x00000007;
+ s->spi_rxcrcr = 0x00000000;
+ s->spi_txcrcr = 0x00000000;
+ s->spi_i2scfgr = 0x00000000;
+ s->spi_i2spr = 0x00000002;
+}
+
+static void stm32f2xx_spi_transfer(STM32F2XXSPIState *s)
+{
+ DB_PRINT("Data to send: 0x%x\n", s->spi_dr);
+
+ s->spi_dr = ssi_transfer(s->ssi, s->spi_dr);
+ s->spi_sr |= STM_SPI_SR_RXNE;
+
+ DB_PRINT("Data received: 0x%x\n", s->spi_dr);
+}
+
+static uint64_t stm32f2xx_spi_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ STM32F2XXSPIState *s = opaque;
+
+ DB_PRINT("Address: 0x%" HWADDR_PRIx "\n", addr);
+
+ switch (addr) {
+ case STM_SPI_CR1:
+ return s->spi_cr1;
+ case STM_SPI_CR2:
+ qemu_log_mask(LOG_UNIMP, "%s: Interrupts and DMA are not implemented\n",
+ __func__);
+ return s->spi_cr2;
+ case STM_SPI_SR:
+ return s->spi_sr;
+ case STM_SPI_DR:
+ stm32f2xx_spi_transfer(s);
+ s->spi_sr &= ~STM_SPI_SR_RXNE;
+ return s->spi_dr;
+ case STM_SPI_CRCPR:
+ qemu_log_mask(LOG_UNIMP, "%s: CRC is not implemented, the registers " \
+ "are included for compatibility\n", __func__);
+ return s->spi_crcpr;
+ case STM_SPI_RXCRCR:
+ qemu_log_mask(LOG_UNIMP, "%s: CRC is not implemented, the registers " \
+ "are included for compatibility\n", __func__);
+ return s->spi_rxcrcr;
+ case STM_SPI_TXCRCR:
+ qemu_log_mask(LOG_UNIMP, "%s: CRC is not implemented, the registers " \
+ "are included for compatibility\n", __func__);
+ return s->spi_txcrcr;
+ case STM_SPI_I2SCFGR:
+ qemu_log_mask(LOG_UNIMP, "%s: I2S is not implemented, the registers " \
+ "are included for compatibility\n", __func__);
+ return s->spi_i2scfgr;
+ case STM_SPI_I2SPR:
+ qemu_log_mask(LOG_UNIMP, "%s: I2S is not implemented, the registers " \
+ "are included for compatibility\n", __func__);
+ return s->spi_i2spr;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ }
+
+ return 0;
+}
+
+static void stm32f2xx_spi_write(void *opaque, hwaddr addr,
+ uint64_t val64, unsigned int size)
+{
+ STM32F2XXSPIState *s = opaque;
+ uint32_t value = val64;
+
+ DB_PRINT("Address: 0x%" HWADDR_PRIx ", Value: 0x%x\n", addr, value);
+
+ switch (addr) {
+ case STM_SPI_CR1:
+ s->spi_cr1 = value;
+ return;
+ case STM_SPI_CR2:
+ qemu_log_mask(LOG_UNIMP, "%s: " \
+ "Interrupts and DMA are not implemented\n", __func__);
+ s->spi_cr2 = value;
+ return;
+ case STM_SPI_SR:
+ /* Read only register, except for clearing the CRCERR bit, which
+ * is not supported
+ */
+ return;
+ case STM_SPI_DR:
+ s->spi_dr = value;
+ stm32f2xx_spi_transfer(s);
+ return;
+ case STM_SPI_CRCPR:
+ qemu_log_mask(LOG_UNIMP, "%s: CRC is not implemented\n", __func__);
+ return;
+ case STM_SPI_RXCRCR:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Read only register: " \
+ "0x%" HWADDR_PRIx "\n", __func__, addr);
+ return;
+ case STM_SPI_TXCRCR:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Read only register: " \
+ "0x%" HWADDR_PRIx "\n", __func__, addr);
+ return;
+ case STM_SPI_I2SCFGR:
+ qemu_log_mask(LOG_UNIMP, "%s: " \
+ "I2S is not implemented\n", __func__);
+ return;
+ case STM_SPI_I2SPR:
+ qemu_log_mask(LOG_UNIMP, "%s: " \
+ "I2S is not implemented\n", __func__);
+ return;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, addr);
+ }
+}
+
+static const MemoryRegionOps stm32f2xx_spi_ops = {
+ .read = stm32f2xx_spi_read,
+ .write = stm32f2xx_spi_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const VMStateDescription vmstate_stm32f2xx_spi = {
+ .name = TYPE_STM32F2XX_SPI,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(spi_cr1, STM32F2XXSPIState),
+ VMSTATE_UINT32(spi_cr2, STM32F2XXSPIState),
+ VMSTATE_UINT32(spi_sr, STM32F2XXSPIState),
+ VMSTATE_UINT32(spi_dr, STM32F2XXSPIState),
+ VMSTATE_UINT32(spi_crcpr, STM32F2XXSPIState),
+ VMSTATE_UINT32(spi_rxcrcr, STM32F2XXSPIState),
+ VMSTATE_UINT32(spi_txcrcr, STM32F2XXSPIState),
+ VMSTATE_UINT32(spi_i2scfgr, STM32F2XXSPIState),
+ VMSTATE_UINT32(spi_i2spr, STM32F2XXSPIState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void stm32f2xx_spi_init(Object *obj)
+{
+ STM32F2XXSPIState *s = STM32F2XX_SPI(obj);
+ DeviceState *dev = DEVICE(obj);
+
+ memory_region_init_io(&s->mmio, obj, &stm32f2xx_spi_ops, s,
+ TYPE_STM32F2XX_SPI, 0x400);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
+
+ s->ssi = ssi_create_bus(dev, "ssi");
+}
+
+static void stm32f2xx_spi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = stm32f2xx_spi_reset;
+ dc->vmsd = &vmstate_stm32f2xx_spi;
+}
+
+static const TypeInfo stm32f2xx_spi_info = {
+ .name = TYPE_STM32F2XX_SPI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(STM32F2XXSPIState),
+ .instance_init = stm32f2xx_spi_init,
+ .class_init = stm32f2xx_spi_class_init,
+};
+
+static void stm32f2xx_spi_register_types(void)
+{
+ type_register_static(&stm32f2xx_spi_info);
+}
+
+type_init(stm32f2xx_spi_register_types)
diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c
index e2b77dc3de..da8adfa443 100644
--- a/hw/ssi/xilinx_spips.c
+++ b/hw/ssi/xilinx_spips.c
@@ -607,6 +607,7 @@ static void xilinx_spips_realize(DeviceState *dev, Error **errp)
XilinxSPIPS *s = XILINX_SPIPS(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
XilinxSPIPSClass *xsc = XILINX_SPIPS_GET_CLASS(s);
+ qemu_irq *cs;
int i;
DB_PRINT_L(0, "realized spips\n");
@@ -619,8 +620,10 @@ static void xilinx_spips_realize(DeviceState *dev, Error **errp)
}
s->cs_lines = g_new0(qemu_irq, s->num_cs * s->num_busses);
- ssi_auto_connect_slaves(DEVICE(s), s->cs_lines, s->spi[0]);
- ssi_auto_connect_slaves(DEVICE(s), s->cs_lines, s->spi[1]);
+ for (i = 0, cs = s->cs_lines; i < s->num_busses; ++i, cs += s->num_cs) {
+ ssi_auto_connect_slaves(DEVICE(s), cs, s->spi[i]);
+ }
+
sysbus_init_irq(sbd, &s->irq);
for (i = 0; i < s->num_cs * s->num_busses; ++i) {
sysbus_init_irq(sbd, &s->cs_lines[i]);
diff --git a/hw/timer/a9gtimer.c b/hw/timer/a9gtimer.c
index 772f85f5fd..ce1dc63911 100644
--- a/hw/timer/a9gtimer.c
+++ b/hw/timer/a9gtimer.c
@@ -82,15 +82,15 @@ static void a9_gtimer_update(A9GTimerState *s, bool sync)
if ((s->control & R_CONTROL_TIMER_ENABLE) &&
(gtb->control & R_CONTROL_COMP_ENABLE)) {
/* R2p0+, where the compare function is >= */
- while (gtb->compare < update.new) {
+ if (gtb->compare < update.new) {
DB_PRINT("Compare event happened for CPU %d\n", i);
gtb->status = 1;
- if (gtb->control & R_CONTROL_AUTO_INCREMENT) {
- DB_PRINT("Auto incrementing timer compare by %" PRId32 "\n",
- gtb->inc);
- gtb->compare += gtb->inc;
- } else {
- break;
+ if (gtb->control & R_CONTROL_AUTO_INCREMENT && gtb->inc) {
+ uint64_t inc =
+ QEMU_ALIGN_UP(update.new - gtb->compare, gtb->inc);
+ DB_PRINT("Auto incrementing timer compare by %"
+ PRId64 "\n", inc);
+ gtb->compare += inc;
}
}
cdiff = (int64_t)gtb->compare - (int64_t)update.new + 1;
diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c
index 3385e5dc35..22ceabe1d4 100644
--- a/hw/timer/allwinner-a10-pit.c
+++ b/hw/timer/allwinner-a10-pit.c
@@ -267,7 +267,7 @@ static void a10_pit_init(Object *obj)
tc->container = s;
tc->index = i;
bh[i] = qemu_bh_new(a10_pit_timer_cb, tc);
- s->timer[i] = ptimer_init(bh[i]);
+ s->timer[i] = ptimer_init(bh[i], PTIMER_POLICY_DEFAULT);
}
}
diff --git a/hw/timer/arm_mptimer.c b/hw/timer/arm_mptimer.c
index d66bbf01b4..daf6c48797 100644
--- a/hw/timer/arm_mptimer.c
+++ b/hw/timer/arm_mptimer.c
@@ -20,22 +20,33 @@
*/
#include "qemu/osdep.h"
+#include "hw/ptimer.h"
#include "hw/timer/arm_mptimer.h"
#include "qapi/error.h"
-#include "qemu/timer.h"
+#include "qemu/main-loop.h"
#include "qom/cpu.h"
+#define PTIMER_POLICY \
+ (PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD | \
+ PTIMER_POLICY_CONTINUOUS_TRIGGER | \
+ PTIMER_POLICY_NO_IMMEDIATE_TRIGGER | \
+ PTIMER_POLICY_NO_IMMEDIATE_RELOAD | \
+ PTIMER_POLICY_NO_COUNTER_ROUND_DOWN)
+
/* This device implements the per-cpu private timer and watchdog block
* which is used in both the ARM11MPCore and Cortex-A9MP.
*/
static inline int get_current_cpu(ARMMPTimerState *s)
{
- if (current_cpu->cpu_index >= s->num_cpu) {
+ int cpu_id = current_cpu ? current_cpu->cpu_index : 0;
+
+ if (cpu_id >= s->num_cpu) {
hw_error("arm_mptimer: num-cpu %d but this cpu is %d!\n",
- s->num_cpu, current_cpu->cpu_index);
+ s->num_cpu, cpu_id);
}
- return current_cpu->cpu_index;
+
+ return cpu_id;
}
static inline void timerblock_update_irq(TimerBlock *tb)
@@ -44,33 +55,42 @@ static inline void timerblock_update_irq(TimerBlock *tb)
}
/* Return conversion factor from mpcore timer ticks to qemu timer ticks. */
-static inline uint32_t timerblock_scale(TimerBlock *tb)
+static inline uint32_t timerblock_scale(uint32_t control)
{
- return (((tb->control >> 8) & 0xff) + 1) * 10;
+ return (((control >> 8) & 0xff) + 1) * 10;
}
-static void timerblock_reload(TimerBlock *tb, int restart)
+static inline void timerblock_set_count(struct ptimer_state *timer,
+ uint32_t control, uint64_t *count)
{
- if (tb->count == 0) {
- return;
+ /* PTimer would trigger interrupt for periodic timer when counter set
+ * to 0, MPtimer under certain condition only.
+ */
+ if ((control & 3) == 3 && (control & 0xff00) == 0 && *count == 0) {
+ *count = ptimer_get_limit(timer);
}
- if (restart) {
- tb->tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ ptimer_set_count(timer, *count);
+}
+
+static inline void timerblock_run(struct ptimer_state *timer,
+ uint32_t control, uint32_t load)
+{
+ if ((control & 1) && ((control & 0xff00) || load != 0)) {
+ ptimer_run(timer, !(control & 2));
}
- tb->tick += (int64_t)tb->count * timerblock_scale(tb);
- timer_mod(tb->timer, tb->tick);
}
static void timerblock_tick(void *opaque)
{
TimerBlock *tb = (TimerBlock *)opaque;
- tb->status = 1;
- if (tb->control & 2) {
- tb->count = tb->load;
- timerblock_reload(tb, 0);
- } else {
- tb->count = 0;
+ /* Periodic timer with load = 0 and prescaler != 0 would re-trigger
+ * IRQ after one period, otherwise it either stops or wraps around.
+ */
+ if ((tb->control & 2) && (tb->control & 0xff00) == 0 &&
+ ptimer_get_limit(tb->timer) == 0) {
+ ptimer_stop(tb->timer);
}
+ tb->status = 1;
timerblock_update_irq(tb);
}
@@ -78,21 +98,11 @@ static uint64_t timerblock_read(void *opaque, hwaddr addr,
unsigned size)
{
TimerBlock *tb = (TimerBlock *)opaque;
- int64_t val;
switch (addr) {
case 0: /* Load */
- return tb->load;
+ return ptimer_get_limit(tb->timer);
case 4: /* Counter. */
- if (((tb->control & 1) == 0) || (tb->count == 0)) {
- return 0;
- }
- /* Slow and ugly, but hopefully won't happen too often. */
- val = tb->tick - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- val /= timerblock_scale(tb);
- if (val < 0) {
- val = 0;
- }
- return val;
+ return ptimer_get_count(tb->timer);
case 8: /* Control. */
return tb->control;
case 12: /* Interrupt status. */
@@ -106,37 +116,45 @@ static void timerblock_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
TimerBlock *tb = (TimerBlock *)opaque;
- int64_t old;
+ uint32_t control = tb->control;
switch (addr) {
case 0: /* Load */
- tb->load = value;
- /* Fall through. */
- case 4: /* Counter. */
- if ((tb->control & 1) && tb->count) {
- /* Cancel the previous timer. */
- timer_del(tb->timer);
+ /* Setting load to 0 stops the timer without doing the tick if
+ * prescaler = 0.
+ */
+ if ((control & 1) && (control & 0xff00) == 0 && value == 0) {
+ ptimer_stop(tb->timer);
}
- tb->count = value;
- if (tb->control & 1) {
- timerblock_reload(tb, 1);
+ ptimer_set_limit(tb->timer, value, 1);
+ timerblock_run(tb->timer, control, value);
+ break;
+ case 4: /* Counter. */
+ /* Setting counter to 0 stops the one-shot timer, or periodic with
+ * load = 0, without doing the tick if prescaler = 0.
+ */
+ if ((control & 1) && (control & 0xff00) == 0 && value == 0 &&
+ (!(control & 2) || ptimer_get_limit(tb->timer) == 0)) {
+ ptimer_stop(tb->timer);
}
+ timerblock_set_count(tb->timer, control, &value);
+ timerblock_run(tb->timer, control, value);
break;
case 8: /* Control. */
- old = tb->control;
- tb->control = value;
+ if ((control & 3) != (value & 3)) {
+ ptimer_stop(tb->timer);
+ }
+ if ((control & 0xff00) != (value & 0xff00)) {
+ ptimer_set_period(tb->timer, timerblock_scale(value));
+ }
if (value & 1) {
- if ((old & 1) && (tb->count != 0)) {
- /* Do nothing if timer is ticking right now. */
- break;
+ uint64_t count = ptimer_get_count(tb->timer);
+ /* Re-load periodic timer counter if needed. */
+ if ((value & 2) && count == 0) {
+ timerblock_set_count(tb->timer, value, &count);
}
- if (tb->control & 2) {
- tb->count = tb->load;
- }
- timerblock_reload(tb, 1);
- } else if (old & 1) {
- /* Shutdown the timer. */
- timer_del(tb->timer);
+ timerblock_run(tb->timer, value, count);
}
+ tb->control = value;
break;
case 12: /* Interrupt status. */
tb->status &= ~value;
@@ -186,13 +204,12 @@ static const MemoryRegionOps timerblock_ops = {
static void timerblock_reset(TimerBlock *tb)
{
- tb->count = 0;
- tb->load = 0;
tb->control = 0;
tb->status = 0;
- tb->tick = 0;
if (tb->timer) {
- timer_del(tb->timer);
+ ptimer_stop(tb->timer);
+ ptimer_set_limit(tb->timer, 0, 1);
+ ptimer_set_period(tb->timer, timerblock_scale(0));
}
}
@@ -238,7 +255,8 @@ static void arm_mptimer_realize(DeviceState *dev, Error **errp)
*/
for (i = 0; i < s->num_cpu; i++) {
TimerBlock *tb = &s->timerblock[i];
- tb->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, timerblock_tick, tb);
+ QEMUBH *bh = qemu_bh_new(timerblock_tick, tb);
+ tb->timer = ptimer_init(bh, PTIMER_POLICY);
sysbus_init_irq(sbd, &tb->irq);
memory_region_init_io(&tb->iomem, OBJECT(s), &timerblock_ops, tb,
"arm_mptimer_timerblock", 0x20);
@@ -248,26 +266,23 @@ static void arm_mptimer_realize(DeviceState *dev, Error **errp)
static const VMStateDescription vmstate_timerblock = {
.name = "arm_mptimer_timerblock",
- .version_id = 2,
- .minimum_version_id = 2,
+ .version_id = 3,
+ .minimum_version_id = 3,
.fields = (VMStateField[]) {
- VMSTATE_UINT32(count, TimerBlock),
- VMSTATE_UINT32(load, TimerBlock),
VMSTATE_UINT32(control, TimerBlock),
VMSTATE_UINT32(status, TimerBlock),
- VMSTATE_INT64(tick, TimerBlock),
- VMSTATE_TIMER_PTR(timer, TimerBlock),
+ VMSTATE_PTIMER(timer, TimerBlock),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_arm_mptimer = {
.name = "arm_mptimer",
- .version_id = 2,
- .minimum_version_id = 2,
+ .version_id = 3,
+ .minimum_version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_STRUCT_VARRAY_UINT32(timerblock, ARMMPTimerState, num_cpu,
- 2, vmstate_timerblock, TimerBlock),
+ 3, vmstate_timerblock, TimerBlock),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c
index 111a16db37..98fddd7ac1 100644
--- a/hw/timer/arm_timer.c
+++ b/hw/timer/arm_timer.c
@@ -171,7 +171,7 @@ static arm_timer_state *arm_timer_init(uint32_t freq)
s->control = TIMER_CTRL_IE;
bh = qemu_bh_new(arm_timer_tick, s);
- s->timer = ptimer_init(bh);
+ s->timer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
vmstate_register(NULL, -1, &vmstate_arm_timer, s);
return s;
}
diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c
index 0f21faf876..e1fcf73c3e 100644
--- a/hw/timer/digic-timer.c
+++ b/hw/timer/digic-timer.c
@@ -127,7 +127,7 @@ static void digic_timer_init(Object *obj)
{
DigicTimerState *s = DIGIC_TIMER(obj);
- s->ptimer = ptimer_init(NULL);
+ s->ptimer = ptimer_init(NULL, PTIMER_POLICY_DEFAULT);
/*
* FIXME: there is no documentation on Digic timer
diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c
index 36d8f462c4..8e18236c5a 100644
--- a/hw/timer/etraxfs_timer.c
+++ b/hw/timer/etraxfs_timer.c
@@ -322,9 +322,9 @@ static int etraxfs_timer_init(SysBusDevice *dev)
t->bh_t0 = qemu_bh_new(timer0_hit, t);
t->bh_t1 = qemu_bh_new(timer1_hit, t);
t->bh_wd = qemu_bh_new(watchdog_hit, t);
- t->ptimer_t0 = ptimer_init(t->bh_t0);
- t->ptimer_t1 = ptimer_init(t->bh_t1);
- t->ptimer_wd = ptimer_init(t->bh_wd);
+ t->ptimer_t0 = ptimer_init(t->bh_t0, PTIMER_POLICY_DEFAULT);
+ t->ptimer_t1 = ptimer_init(t->bh_t1, PTIMER_POLICY_DEFAULT);
+ t->ptimer_wd = ptimer_init(t->bh_wd, PTIMER_POLICY_DEFAULT);
sysbus_init_irq(dev, &t->irq);
sysbus_init_irq(dev, &t->nmi);
diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c
index ae69345f0d..0c189348ae 100644
--- a/hw/timer/exynos4210_mct.c
+++ b/hw/timer/exynos4210_mct.c
@@ -1431,15 +1431,16 @@ static void exynos4210_mct_init(Object *obj)
/* Global timer */
bh[0] = qemu_bh_new(exynos4210_gfrc_event, s);
- s->g_timer.ptimer_frc = ptimer_init(bh[0]);
+ s->g_timer.ptimer_frc = ptimer_init(bh[0], PTIMER_POLICY_DEFAULT);
memset(&s->g_timer.reg, 0, sizeof(struct gregs));
/* Local timers */
for (i = 0; i < 2; i++) {
bh[0] = qemu_bh_new(exynos4210_ltick_event, &s->l_timer[i]);
bh[1] = qemu_bh_new(exynos4210_lfrc_event, &s->l_timer[i]);
- s->l_timer[i].tick_timer.ptimer_tick = ptimer_init(bh[0]);
- s->l_timer[i].ptimer_frc = ptimer_init(bh[1]);
+ s->l_timer[i].tick_timer.ptimer_tick =
+ ptimer_init(bh[0], PTIMER_POLICY_DEFAULT);
+ s->l_timer[i].ptimer_frc = ptimer_init(bh[1], PTIMER_POLICY_DEFAULT);
s->l_timer[i].id = i;
}
diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c
index 0e9e2e9bf5..f5765075c7 100644
--- a/hw/timer/exynos4210_pwm.c
+++ b/hw/timer/exynos4210_pwm.c
@@ -390,7 +390,7 @@ static void exynos4210_pwm_init(Object *obj)
for (i = 0; i < EXYNOS4210_PWM_TIMERS_NUM; i++) {
bh = qemu_bh_new(exynos4210_pwm_tick, &s->timer[i]);
sysbus_init_irq(dev, &s->timer[i].irq);
- s->timer[i].ptimer = ptimer_init(bh);
+ s->timer[i].ptimer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
s->timer[i].id = i;
s->timer[i].parent = s;
}
diff --git a/hw/timer/exynos4210_rtc.c b/hw/timer/exynos4210_rtc.c
index da4dd451b9..1a648c5d9e 100644
--- a/hw/timer/exynos4210_rtc.c
+++ b/hw/timer/exynos4210_rtc.c
@@ -555,12 +555,12 @@ static void exynos4210_rtc_init(Object *obj)
QEMUBH *bh;
bh = qemu_bh_new(exynos4210_rtc_tick, s);
- s->ptimer = ptimer_init(bh);
+ s->ptimer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
ptimer_set_freq(s->ptimer, RTC_BASE_FREQ);
exynos4210_rtc_update_freq(s, 0);
bh = qemu_bh_new(exynos4210_rtc_1Hz_tick, s);
- s->ptimer_1Hz = ptimer_init(bh);
+ s->ptimer_1Hz = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
ptimer_set_freq(s->ptimer_1Hz, RTC_BASE_FREQ);
sysbus_init_irq(dev, &s->alm_irq);
diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c
index dd000f5afa..4ed96e970a 100644
--- a/hw/timer/grlib_gptimer.c
+++ b/hw/timer/grlib_gptimer.c
@@ -26,7 +26,6 @@
#include "hw/sysbus.h"
#include "qemu/timer.h"
#include "hw/ptimer.h"
-#include "qemu/timer.h"
#include "qemu/main-loop.h"
#include "trace.h"
@@ -363,7 +362,7 @@ static int grlib_gptimer_init(SysBusDevice *dev)
timer->unit = unit;
timer->bh = qemu_bh_new(grlib_gptimer_hit, timer);
- timer->ptimer = ptimer_init(timer->bh);
+ timer->ptimer = ptimer_init(timer->bh, PTIMER_POLICY_DEFAULT);
timer->id = i;
/* One IRQ line for each timer */
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
index eddf3481e8..8677b753b1 100644
--- a/hw/timer/imx_epit.c
+++ b/hw/timer/imx_epit.c
@@ -30,7 +30,7 @@
} \
} while (0)
-static char const *imx_epit_reg_name(uint32_t reg)
+static const char *imx_epit_reg_name(uint32_t reg)
{
switch (reg) {
case 0:
@@ -314,10 +314,10 @@ static void imx_epit_realize(DeviceState *dev, Error **errp)
0x00001000);
sysbus_init_mmio(sbd, &s->iomem);
- s->timer_reload = ptimer_init(NULL);
+ s->timer_reload = ptimer_init(NULL, PTIMER_POLICY_DEFAULT);
bh = qemu_bh_new(imx_epit_cmp, s);
- s->timer_cmp = ptimer_init(bh);
+ s->timer_cmp = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
}
static void imx_epit_class_init(ObjectClass *klass, void *data)
diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c
index 82bc73cb86..010ccbf207 100644
--- a/hw/timer/imx_gpt.c
+++ b/hw/timer/imx_gpt.c
@@ -29,7 +29,7 @@
} \
} while (0)
-static char const *imx_gpt_reg_name(uint32_t reg)
+static const char *imx_gpt_reg_name(uint32_t reg)
{
switch (reg) {
case 0:
@@ -461,7 +461,7 @@ static void imx_gpt_realize(DeviceState *dev, Error **errp)
sysbus_init_mmio(sbd, &s->iomem);
bh = qemu_bh_new(imx_gpt_timeout, s);
- s->timer = ptimer_init(bh);
+ s->timer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
}
static void imx_gpt_class_init(ObjectClass *klass, void *data)
diff --git a/hw/timer/lm32_timer.c b/hw/timer/lm32_timer.c
index e45a65bb9d..2a07b59524 100644
--- a/hw/timer/lm32_timer.c
+++ b/hw/timer/lm32_timer.c
@@ -184,7 +184,7 @@ static void lm32_timer_init(Object *obj)
sysbus_init_irq(dev, &s->irq);
s->bh = qemu_bh_new(timer_hit, s);
- s->ptimer = ptimer_init(s->bh);
+ s->ptimer = ptimer_init(s->bh, PTIMER_POLICY_DEFAULT);
memory_region_init_io(&s->iomem, obj, &timer_ops, s,
"timer", R_MAX * 4);
diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c
index ea625f25ce..da209d02f0 100644
--- a/hw/timer/mc146818rtc.c
+++ b/hw/timer/mc146818rtc.c
@@ -717,11 +717,18 @@ static void rtc_set_date_from_host(ISADevice *dev)
rtc_set_cmos(s, &tm);
}
+static void rtc_pre_save(void *opaque)
+{
+ RTCState *s = opaque;
+
+ rtc_update_time(s);
+}
+
static int rtc_post_load(void *opaque, int version_id)
{
RTCState *s = opaque;
- if (version_id <= 2) {
+ if (version_id <= 2 || rtc_clock == QEMU_CLOCK_REALTIME) {
rtc_set_time(s);
s->offset = 0;
check_update_timer(s);
@@ -764,6 +771,7 @@ static const VMStateDescription vmstate_rtc = {
.name = "mc146818rtc",
.version_id = 3,
.minimum_version_id = 1,
+ .pre_save = rtc_pre_save,
.post_load = rtc_post_load,
.fields = (VMStateField[]) {
VMSTATE_BUFFER(cmos_data, RTCState),
diff --git a/hw/timer/milkymist-sysctl.c b/hw/timer/milkymist-sysctl.c
index 21948328ce..44885907c9 100644
--- a/hw/timer/milkymist-sysctl.c
+++ b/hw/timer/milkymist-sysctl.c
@@ -281,8 +281,8 @@ static void milkymist_sysctl_init(Object *obj)
s->bh0 = qemu_bh_new(timer0_hit, s);
s->bh1 = qemu_bh_new(timer1_hit, s);
- s->ptimer0 = ptimer_init(s->bh0);
- s->ptimer1 = ptimer_init(s->bh1);
+ s->ptimer0 = ptimer_init(s->bh0, PTIMER_POLICY_DEFAULT);
+ s->ptimer1 = ptimer_init(s->bh1, PTIMER_POLICY_DEFAULT);
memory_region_init_io(&s->regs_region, obj, &sysctl_mmio_ops, s,
"milkymist-sysctl", R_MAX * 4);
diff --git a/hw/timer/puv3_ost.c b/hw/timer/puv3_ost.c
index 93650b7990..0b3d717e60 100644
--- a/hw/timer/puv3_ost.c
+++ b/hw/timer/puv3_ost.c
@@ -125,7 +125,7 @@ static int puv3_ost_init(SysBusDevice *dev)
sysbus_init_irq(dev, &s->irq);
s->bh = qemu_bh_new(puv3_ost_tick, s);
- s->ptimer = ptimer_init(s->bh);
+ s->ptimer = ptimer_init(s->bh, PTIMER_POLICY_DEFAULT);
ptimer_set_freq(s->ptimer, 50 * 1000 * 1000);
memory_region_init_io(&s->iomem, OBJECT(s), &puv3_ost_ops, s, "puv3_ost",
diff --git a/hw/timer/sh_timer.c b/hw/timer/sh_timer.c
index 255b2fc910..9afb2d048c 100644
--- a/hw/timer/sh_timer.c
+++ b/hw/timer/sh_timer.c
@@ -203,7 +203,7 @@ static void *sh_timer_init(uint32_t freq, int feat, qemu_irq irq)
s->irq = irq;
bh = qemu_bh_new(sh_timer_tick, s);
- s->timer = ptimer_init(bh);
+ s->timer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
sh_timer_write(s, OFFSET_TCOR >> 2, s->tcor);
sh_timer_write(s, OFFSET_TCNT >> 2, s->tcnt);
diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c
index fb3e08bedc..bfee1f3027 100644
--- a/hw/timer/slavio_timer.c
+++ b/hw/timer/slavio_timer.c
@@ -389,7 +389,7 @@ static int slavio_timer_init1(SysBusDevice *dev)
tc->timer_index = i;
bh = qemu_bh_new(slavio_timer_irq, tc);
- s->cputimer[i].timer = ptimer_init(bh);
+ s->cputimer[i].timer = ptimer_init(bh, PTIMER_POLICY_DEFAULT);
ptimer_set_period(s->cputimer[i].timer, TIMER_PERIOD);
size = i == 0 ? SYS_TIMER_SIZE : CPU_TIMER_SIZE;
diff --git a/hw/timer/stm32f2xx_timer.c b/hw/timer/stm32f2xx_timer.c
index bf0fb288c4..e5f5e14a90 100644
--- a/hw/timer/stm32f2xx_timer.c
+++ b/hw/timer/stm32f2xx_timer.c
@@ -51,6 +51,15 @@ static void stm32f2xx_timer_interrupt(void *opaque)
qemu_irq_pulse(s->irq);
stm32f2xx_timer_set_alarm(s, s->hit_time);
}
+
+ if (s->tim_ccmr1 & (TIM_CCMR1_OC2M2 | TIM_CCMR1_OC2M1) &&
+ !(s->tim_ccmr1 & TIM_CCMR1_OC2M0) &&
+ s->tim_ccmr1 & TIM_CCMR1_OC2PE &&
+ s->tim_ccer & TIM_CCER_CC2E) {
+ /* PWM 2 - Mode 1 */
+ DB_PRINT("PWM2 Duty Cycle: %d%%\n",
+ s->tim_ccr2 / (100 * (s->tim_psc + 1)));
+ }
}
static inline int64_t stm32f2xx_ns_to_ticks(STM32F2XXTimerState *s, int64_t t)
@@ -208,7 +217,7 @@ static void stm32f2xx_timer_write(void *opaque, hwaddr offset,
return;
case TIM_PSC:
timer_val = stm32f2xx_ns_to_ticks(s, now) - s->tick_offset;
- s->tim_psc = value;
+ s->tim_psc = value & 0xFFFF;
value = timer_val;
break;
case TIM_CNT:
diff --git a/hw/timer/xilinx_timer.c b/hw/timer/xilinx_timer.c
index 2ea970dc9d..59439c05be 100644
--- a/hw/timer/xilinx_timer.c
+++ b/hw/timer/xilinx_timer.c
@@ -218,7 +218,7 @@ static void xilinx_timer_realize(DeviceState *dev, Error **errp)
xt->parent = t;
xt->nr = i;
xt->bh = qemu_bh_new(timer_hit, xt);
- xt->ptimer = ptimer_init(xt->bh);
+ xt->ptimer = ptimer_init(xt->bh, PTIMER_POLICY_DEFAULT);
ptimer_set_freq(xt->ptimer, t->freq_hz);
}
diff --git a/hw/tpm/tpm_passthrough.c b/hw/tpm/tpm_passthrough.c
index e88c0d20bc..9234eb3459 100644
--- a/hw/tpm/tpm_passthrough.c
+++ b/hw/tpm/tpm_passthrough.c
@@ -165,8 +165,7 @@ static int tpm_passthrough_unix_tx_bufs(TPMPassthruState *tpm_pt,
ret = tpm_passthrough_unix_write(tpm_pt->tpm_fd, in, in_len);
if (ret != in_len) {
- if (!tpm_pt->tpm_op_canceled ||
- (tpm_pt->tpm_op_canceled && errno != ECANCELED)) {
+ if (!tpm_pt->tpm_op_canceled || errno != ECANCELED) {
error_report("tpm_passthrough: error while transmitting data "
"to TPM: %s (%i)",
strerror(errno), errno);
@@ -178,8 +177,7 @@ static int tpm_passthrough_unix_tx_bufs(TPMPassthruState *tpm_pt,
ret = tpm_passthrough_unix_read(tpm_pt->tpm_fd, out, out_len);
if (ret < 0) {
- if (!tpm_pt->tpm_op_canceled ||
- (tpm_pt->tpm_op_canceled && errno != ECANCELED)) {
+ if (!tpm_pt->tpm_op_canceled || errno != ECANCELED) {
error_report("tpm_passthrough: error while reading data from "
"TPM: %s (%i)",
strerror(errno), errno);
diff --git a/hw/tpm/tpm_tis.c b/hw/tpm/tpm_tis.c
index 381e7266ea..a6440fef91 100644
--- a/hw/tpm/tpm_tis.c
+++ b/hw/tpm/tpm_tis.c
@@ -34,7 +34,6 @@
#include "qapi/error.h"
#include "qemu-common.h"
#include "qemu/main-loop.h"
-#include "sysemu/tpm_backend.h"
#define DEBUG_TIS 0
diff --git a/hw/tricore/tricore_testboard.c b/hw/tricore/tricore_testboard.c
index 8d3520f5be..19dd587207 100644
--- a/hw/tricore/tricore_testboard.c
+++ b/hw/tricore/tricore_testboard.c
@@ -46,7 +46,7 @@ static void tricore_load_kernel(CPUTriCoreState *env)
long kernel_size;
kernel_size = load_elf(tricoretb_binfo.kernel_filename, NULL,
- NULL, (uint64_t *)&entry, NULL,
+ NULL, &entry, NULL,
NULL, 0,
EM_TRICORE, 1, 0);
if (kernel_size <= 0) {
diff --git a/hw/unicore32/puv3.c b/hw/unicore32/puv3.c
index 31cd171016..032078fd3e 100644
--- a/hw/unicore32/puv3.c
+++ b/hw/unicore32/puv3.c
@@ -13,7 +13,6 @@
#include "qapi/error.h"
#include "qemu-common.h"
#include "cpu.h"
-#include "qemu-common.h"
#include "ui/console.h"
#include "elf.h"
#include "exec/address-spaces.h"
diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c
index 3213f9f8af..eceb5f3ee2 100644
--- a/hw/usb/ccid-card-emulated.c
+++ b/hw/usb/ccid-card-emulated.c
@@ -547,7 +547,7 @@ static int emulated_initfn(CCIDCardState *base)
return 0;
}
-static int emulated_exitfn(CCIDCardState *base)
+static void emulated_exitfn(CCIDCardState *base)
{
EmulatedState *card = EMULATED_CCID_CARD(base);
VEvent *vevent = vevent_new(VEVENT_LAST, NULL, NULL);
@@ -564,7 +564,6 @@ static int emulated_exitfn(CCIDCardState *base)
qemu_mutex_destroy(&card->handle_apdu_mutex);
qemu_mutex_destroy(&card->vreader_mutex);
qemu_mutex_destroy(&card->event_list_mutex);
- return 0;
}
static Property emulated_card_properties[] = {
diff --git a/hw/usb/ccid-card-passthru.c b/hw/usb/ccid-card-passthru.c
index c0e90e501c..88cb6d8978 100644
--- a/hw/usb/ccid-card-passthru.c
+++ b/hw/usb/ccid-card-passthru.c
@@ -48,7 +48,7 @@ typedef struct PassthruState PassthruState;
struct PassthruState {
CCIDCardState base;
- CharDriverState *cs;
+ CharBackend cs;
uint8_t vscard_in_data[VSCARD_IN_SIZE];
uint32_t vscard_in_pos;
uint32_t vscard_in_hdr;
@@ -75,8 +75,11 @@ static void ccid_card_vscard_send_msg(PassthruState *s,
scr_msg_header.type = htonl(type);
scr_msg_header.reader_id = htonl(reader_id);
scr_msg_header.length = htonl(length);
- qemu_chr_fe_write(s->cs, (uint8_t *)&scr_msg_header, sizeof(VSCMsgHeader));
- qemu_chr_fe_write(s->cs, payload, length);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->cs, (uint8_t *)&scr_msg_header,
+ sizeof(VSCMsgHeader));
+ qemu_chr_fe_write_all(&s->cs, payload, length);
}
static void ccid_card_vscard_send_apdu(PassthruState *s,
@@ -261,7 +264,10 @@ static void ccid_card_vscard_handle_message(PassthruState *card,
static void ccid_card_vscard_drop_connection(PassthruState *card)
{
- qemu_chr_delete(card->cs);
+ CharDriverState *chr = qemu_chr_fe_get_driver(&card->cs);
+
+ qemu_chr_fe_deinit(&card->cs);
+ qemu_chr_delete(chr);
card->vscard_in_pos = card->vscard_in_hdr = 0;
}
@@ -306,8 +312,6 @@ static void ccid_card_vscard_event(void *opaque, int event)
case CHR_EVENT_BREAK:
card->vscard_in_pos = card->vscard_in_hdr = 0;
break;
- case CHR_EVENT_FOCUS:
- break;
case CHR_EVENT_OPENED:
DPRINTF(card, D_INFO, "%s: CHR_EVENT_OPENED\n", __func__);
break;
@@ -321,7 +325,7 @@ static void passthru_apdu_from_guest(
{
PassthruState *card = PASSTHRU_CCID_CARD(base);
- if (!card->cs) {
+ if (!qemu_chr_fe_get_driver(&card->cs)) {
printf("ccid-passthru: no chardev, discarding apdu length %d\n", len);
return;
}
@@ -342,12 +346,12 @@ static int passthru_initfn(CCIDCardState *base)
card->vscard_in_pos = 0;
card->vscard_in_hdr = 0;
- if (card->cs) {
+ if (qemu_chr_fe_get_driver(&card->cs)) {
DPRINTF(card, D_INFO, "initing chardev\n");
- qemu_chr_add_handlers(card->cs,
+ qemu_chr_fe_set_handlers(&card->cs,
ccid_card_vscard_can_read,
ccid_card_vscard_read,
- ccid_card_vscard_event, card);
+ ccid_card_vscard_event, card, NULL, true);
ccid_card_vscard_send_init(card);
} else {
error_report("missing chardev");
@@ -361,11 +365,6 @@ static int passthru_initfn(CCIDCardState *base)
return 0;
}
-static int passthru_exitfn(CCIDCardState *base)
-{
- return 0;
-}
-
static VMStateDescription passthru_vmstate = {
.name = "ccid-card-passthru",
.version_id = 1,
@@ -392,7 +391,6 @@ static void passthru_class_initfn(ObjectClass *klass, void *data)
CCIDCardClass *cc = CCID_CARD_CLASS(klass);
cc->initfn = passthru_initfn;
- cc->exitfn = passthru_exitfn;
cc->get_atr = passthru_get_atr;
cc->apdu_from_guest = passthru_apdu_from_guest;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
diff --git a/hw/usb/ccid.h b/hw/usb/ccid.h
index 9334da8acd..1f070116d6 100644
--- a/hw/usb/ccid.h
+++ b/hw/usb/ccid.h
@@ -33,7 +33,7 @@ typedef struct CCIDCardClass {
void (*apdu_from_guest)(CCIDCardState *card,
const uint8_t *apdu,
uint32_t len);
- int (*exitfn)(CCIDCardState *card);
+ void (*exitfn)(CCIDCardState *card);
int (*initfn)(CCIDCardState *card);
} CCIDCardClass;
diff --git a/hw/usb/desc.c b/hw/usb/desc.c
index 5e0e1d157e..7828e52c6f 100644
--- a/hw/usb/desc.c
+++ b/hw/usb/desc.c
@@ -556,9 +556,7 @@ void usb_desc_create_serial(USBDevice *dev)
DeviceState *hcd = dev->qdev.parent_bus->parent;
const USBDesc *desc = usb_device_get_usb_desc(dev);
int index = desc->id.iSerialNumber;
- char serial[64];
- char *path;
- int dst;
+ char *path, *serial;
if (dev->serial) {
/* 'serial' usb bus property has priority if present */
@@ -567,14 +565,16 @@ void usb_desc_create_serial(USBDevice *dev)
}
assert(index != 0 && desc->str[index] != NULL);
- dst = snprintf(serial, sizeof(serial), "%s", desc->str[index]);
path = qdev_get_dev_path(hcd);
if (path) {
- dst += snprintf(serial+dst, sizeof(serial)-dst, "-%s", path);
+ serial = g_strdup_printf("%s-%s-%s", desc->str[index],
+ path, dev->port->path);
+ } else {
+ serial = g_strdup_printf("%s-%s", desc->str[index], dev->port->path);
}
- dst += snprintf(serial+dst, sizeof(serial)-dst, "-%s", dev->port->path);
usb_desc_set_string(dev, index, serial);
g_free(path);
+ g_free(serial);
}
const char *usb_desc_get_string(USBDevice *dev, uint8_t index)
diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c
index 1be85ae75a..9cb0f50750 100644
--- a/hw/usb/dev-mtp.c
+++ b/hw/usb/dev-mtp.c
@@ -17,7 +17,6 @@
#include <sys/statvfs.h>
#ifdef CONFIG_INOTIFY1
#include <sys/inotify.h>
-#include "qapi/error.h"
#include "qemu/main-loop.h"
#endif
@@ -48,6 +47,9 @@ enum mtp_code {
CMD_GET_OBJECT_INFO = 0x1008,
CMD_GET_OBJECT = 0x1009,
CMD_GET_PARTIAL_OBJECT = 0x101b,
+ CMD_GET_OBJECT_PROPS_SUPPORTED = 0x9801,
+ CMD_GET_OBJECT_PROP_DESC = 0x9802,
+ CMD_GET_OBJECT_PROP_VALUE = 0x9803,
/* response codes */
RES_OK = 0x2001,
@@ -59,10 +61,12 @@ enum mtp_code {
RES_INCOMPLETE_TRANSFER = 0x2007,
RES_INVALID_STORAGE_ID = 0x2008,
RES_INVALID_OBJECT_HANDLE = 0x2009,
+ RES_INVALID_OBJECT_FORMAT_CODE = 0x200b,
RES_SPEC_BY_FORMAT_UNSUPPORTED = 0x2014,
RES_INVALID_PARENT_OBJECT = 0x201a,
RES_INVALID_PARAMETER = 0x201d,
RES_SESSION_ALREADY_OPEN = 0x201e,
+ RES_INVALID_OBJECT_PROP_CODE = 0xA801,
/* format codes */
FMT_UNDEFINED_OBJECT = 0x3000,
@@ -72,6 +76,22 @@ enum mtp_code {
EVT_OBJ_ADDED = 0x4002,
EVT_OBJ_REMOVED = 0x4003,
EVT_OBJ_INFO_CHANGED = 0x4007,
+
+ /* object properties */
+ PROP_STORAGE_ID = 0xDC01,
+ PROP_OBJECT_FORMAT = 0xDC02,
+ PROP_OBJECT_COMPRESSED_SIZE = 0xDC04,
+ PROP_PARENT_OBJECT = 0xDC0B,
+ PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER = 0xDC41,
+ PROP_NAME = 0xDC44,
+};
+
+enum mtp_data_type {
+ DATA_TYPE_UINT16 = 0x0004,
+ DATA_TYPE_UINT32 = 0x0006,
+ DATA_TYPE_UINT64 = 0x0008,
+ DATA_TYPE_UINT128 = 0x000a,
+ DATA_TYPE_STRING = 0xffff,
};
typedef struct {
@@ -115,8 +135,8 @@ struct MTPControl {
struct MTPData {
uint16_t code;
uint32_t trans;
- uint32_t offset;
- uint32_t length;
+ uint64_t offset;
+ uint64_t length;
uint32_t alloc;
uint8_t *data;
bool first;
@@ -778,6 +798,9 @@ static MTPData *usb_mtp_get_device_info(MTPState *s, MTPControl *c)
CMD_GET_OBJECT_INFO,
CMD_GET_OBJECT,
CMD_GET_PARTIAL_OBJECT,
+ CMD_GET_OBJECT_PROPS_SUPPORTED,
+ CMD_GET_OBJECT_PROP_DESC,
+ CMD_GET_OBJECT_PROP_VALUE,
};
static const uint16_t fmt[] = {
FMT_UNDEFINED_OBJECT,
@@ -883,7 +906,12 @@ static MTPData *usb_mtp_get_object_info(MTPState *s, MTPControl *c,
usb_mtp_add_u32(d, QEMU_STORAGE_ID);
usb_mtp_add_u16(d, o->format);
usb_mtp_add_u16(d, 0);
- usb_mtp_add_u32(d, o->stat.st_size);
+
+ if (o->stat.st_size > 0xFFFFFFFF) {
+ usb_mtp_add_u32(d, 0xFFFFFFFF);
+ } else {
+ usb_mtp_add_u32(d, o->stat.st_size);
+ }
usb_mtp_add_u16(d, 0);
usb_mtp_add_u32(d, 0);
@@ -966,6 +994,122 @@ static MTPData *usb_mtp_get_partial_object(MTPState *s, MTPControl *c,
return d;
}
+static MTPData *usb_mtp_get_object_props_supported(MTPState *s, MTPControl *c)
+{
+ static const uint16_t props[] = {
+ PROP_STORAGE_ID,
+ PROP_OBJECT_FORMAT,
+ PROP_OBJECT_COMPRESSED_SIZE,
+ PROP_PARENT_OBJECT,
+ PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER,
+ PROP_NAME,
+ };
+ MTPData *d = usb_mtp_data_alloc(c);
+ usb_mtp_add_u16_array(d, ARRAY_SIZE(props), props);
+
+ return d;
+}
+
+static MTPData *usb_mtp_get_object_prop_desc(MTPState *s, MTPControl *c)
+{
+ MTPData *d = usb_mtp_data_alloc(c);
+ switch (c->argv[0]) {
+ case PROP_STORAGE_ID:
+ usb_mtp_add_u16(d, PROP_STORAGE_ID);
+ usb_mtp_add_u16(d, DATA_TYPE_UINT32);
+ usb_mtp_add_u8(d, 0x00);
+ usb_mtp_add_u32(d, 0x00000000);
+ usb_mtp_add_u32(d, 0x00000000);
+ usb_mtp_add_u8(d, 0x00);
+ break;
+ case PROP_OBJECT_FORMAT:
+ usb_mtp_add_u16(d, PROP_OBJECT_FORMAT);
+ usb_mtp_add_u16(d, DATA_TYPE_UINT16);
+ usb_mtp_add_u8(d, 0x00);
+ usb_mtp_add_u16(d, 0x0000);
+ usb_mtp_add_u32(d, 0x00000000);
+ usb_mtp_add_u8(d, 0x00);
+ break;
+ case PROP_OBJECT_COMPRESSED_SIZE:
+ usb_mtp_add_u16(d, PROP_OBJECT_COMPRESSED_SIZE);
+ usb_mtp_add_u16(d, DATA_TYPE_UINT64);
+ usb_mtp_add_u8(d, 0x00);
+ usb_mtp_add_u64(d, 0x0000000000000000);
+ usb_mtp_add_u32(d, 0x00000000);
+ usb_mtp_add_u8(d, 0x00);
+ break;
+ case PROP_PARENT_OBJECT:
+ usb_mtp_add_u16(d, PROP_PARENT_OBJECT);
+ usb_mtp_add_u16(d, DATA_TYPE_UINT32);
+ usb_mtp_add_u8(d, 0x00);
+ usb_mtp_add_u32(d, 0x00000000);
+ usb_mtp_add_u32(d, 0x00000000);
+ usb_mtp_add_u8(d, 0x00);
+ break;
+ case PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER:
+ usb_mtp_add_u16(d, PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER);
+ usb_mtp_add_u16(d, DATA_TYPE_UINT128);
+ usb_mtp_add_u8(d, 0x00);
+ usb_mtp_add_u64(d, 0x0000000000000000);
+ usb_mtp_add_u64(d, 0x0000000000000000);
+ usb_mtp_add_u32(d, 0x00000000);
+ usb_mtp_add_u8(d, 0x00);
+ break;
+ case PROP_NAME:
+ usb_mtp_add_u16(d, PROP_NAME);
+ usb_mtp_add_u16(d, DATA_TYPE_STRING);
+ usb_mtp_add_u8(d, 0x00);
+ usb_mtp_add_u8(d, 0x00);
+ usb_mtp_add_u32(d, 0x00000000);
+ usb_mtp_add_u8(d, 0x00);
+ break;
+ default:
+ usb_mtp_data_free(d);
+ return NULL;
+ }
+
+ return d;
+}
+
+static MTPData *usb_mtp_get_object_prop_value(MTPState *s, MTPControl *c,
+ MTPObject *o)
+{
+ MTPData *d = usb_mtp_data_alloc(c);
+ switch (c->argv[1]) {
+ case PROP_STORAGE_ID:
+ usb_mtp_add_u32(d, QEMU_STORAGE_ID);
+ break;
+ case PROP_OBJECT_FORMAT:
+ usb_mtp_add_u16(d, o->format);
+ break;
+ case PROP_OBJECT_COMPRESSED_SIZE:
+ usb_mtp_add_u64(d, o->stat.st_size);
+ break;
+ case PROP_PARENT_OBJECT:
+ if (o->parent == NULL) {
+ usb_mtp_add_u32(d, 0x00000000);
+ } else {
+ usb_mtp_add_u32(d, o->parent->handle);
+ }
+ break;
+ case PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER:
+ /* Should be persistant between sessions,
+ * but using our objedt ID is "good enough"
+ * for now */
+ usb_mtp_add_u64(d, 0x0000000000000000);
+ usb_mtp_add_u64(d, o->handle);
+ break;
+ case PROP_NAME:
+ usb_mtp_add_str(d, o->name);
+ break;
+ default:
+ usb_mtp_data_free(d);
+ return NULL;
+ }
+
+ return d;
+}
+
static void usb_mtp_command(MTPState *s, MTPControl *c)
{
MTPData *data_in = NULL;
@@ -1113,6 +1257,43 @@ static void usb_mtp_command(MTPState *s, MTPControl *c)
nres = 1;
res0 = data_in->length;
break;
+ case CMD_GET_OBJECT_PROPS_SUPPORTED:
+ if (c->argv[0] != FMT_UNDEFINED_OBJECT &&
+ c->argv[0] != FMT_ASSOCIATION) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_FORMAT_CODE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ data_in = usb_mtp_get_object_props_supported(s, c);
+ break;
+ case CMD_GET_OBJECT_PROP_DESC:
+ if (c->argv[1] != FMT_UNDEFINED_OBJECT &&
+ c->argv[1] != FMT_ASSOCIATION) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_FORMAT_CODE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ data_in = usb_mtp_get_object_prop_desc(s, c);
+ if (data_in == NULL) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_PROP_CODE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ break;
+ case CMD_GET_OBJECT_PROP_VALUE:
+ o = usb_mtp_object_lookup(s, c->argv[0]);
+ if (o == NULL) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ data_in = usb_mtp_get_object_prop_value(s, c, o);
+ if (data_in == NULL) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_PROP_CODE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ break;
default:
trace_usb_mtp_op_unknown(s->dev.addr, c->code);
usb_mtp_queue_result(s, RES_OPERATION_NOT_SUPPORTED,
@@ -1193,10 +1374,15 @@ static void usb_mtp_handle_data(USBDevice *dev, USBPacket *p)
}
if (s->data_in != NULL) {
MTPData *d = s->data_in;
- int dlen = d->length - d->offset;
+ uint64_t dlen = d->length - d->offset;
if (d->first) {
trace_usb_mtp_data_in(s->dev.addr, d->trans, d->length);
- container.length = cpu_to_le32(d->length + sizeof(container));
+ if (d->length + sizeof(container) > 0xFFFFFFFF) {
+ container.length = cpu_to_le32(0xFFFFFFFF);
+ } else {
+ container.length =
+ cpu_to_le32(d->length + sizeof(container));
+ }
container.type = cpu_to_le16(TYPE_DATA);
container.code = cpu_to_le16(d->code);
container.trans = cpu_to_le32(d->trans);
diff --git a/hw/usb/dev-serial.c b/hw/usb/dev-serial.c
index ba8538e60e..6066d9b0f7 100644
--- a/hw/usb/dev-serial.c
+++ b/hw/usb/dev-serial.c
@@ -103,7 +103,7 @@ typedef struct {
uint8_t event_trigger;
QEMUSerialSetParams params;
int latency; /* ms */
- CharDriverState *cs;
+ CharBackend cs;
} USBSerialState;
#define TYPE_USB_SERIAL "usb-serial-dev"
@@ -209,8 +209,10 @@ static uint8_t usb_get_modem_lines(USBSerialState *s)
int flags;
uint8_t ret;
- if (qemu_chr_fe_ioctl(s->cs, CHR_IOCTL_SERIAL_GET_TIOCM, &flags) == -ENOTSUP)
+ if (qemu_chr_fe_ioctl(&s->cs,
+ CHR_IOCTL_SERIAL_GET_TIOCM, &flags) == -ENOTSUP) {
return FTDI_CTS|FTDI_DSR|FTDI_RLSD;
+ }
ret = 0;
if (flags & CHR_TIOCM_CTS)
@@ -260,7 +262,7 @@ static void usb_serial_handle_control(USBDevice *dev, USBPacket *p,
case DeviceOutVendor | FTDI_SET_MDM_CTRL:
{
static int flags;
- qemu_chr_fe_ioctl(s->cs,CHR_IOCTL_SERIAL_GET_TIOCM, &flags);
+ qemu_chr_fe_ioctl(&s->cs, CHR_IOCTL_SERIAL_GET_TIOCM, &flags);
if (value & FTDI_SET_RTS) {
if (value & FTDI_RTS)
flags |= CHR_TIOCM_RTS;
@@ -273,7 +275,7 @@ static void usb_serial_handle_control(USBDevice *dev, USBPacket *p,
else
flags &= ~CHR_TIOCM_DTR;
}
- qemu_chr_fe_ioctl(s->cs,CHR_IOCTL_SERIAL_SET_TIOCM, &flags);
+ qemu_chr_fe_ioctl(&s->cs, CHR_IOCTL_SERIAL_SET_TIOCM, &flags);
break;
}
case DeviceOutVendor | FTDI_SET_FLOW_CTRL:
@@ -292,7 +294,7 @@ static void usb_serial_handle_control(USBDevice *dev, USBPacket *p,
divisor = 1;
s->params.speed = (48000000 / 2) / (8 * divisor + subdivisor8);
- qemu_chr_fe_ioctl(s->cs, CHR_IOCTL_SERIAL_SET_PARAMS, &s->params);
+ qemu_chr_fe_ioctl(&s->cs, CHR_IOCTL_SERIAL_SET_PARAMS, &s->params);
break;
}
case DeviceOutVendor | FTDI_SET_DATA:
@@ -321,7 +323,7 @@ static void usb_serial_handle_control(USBDevice *dev, USBPacket *p,
DPRINTF("unsupported stop bits %d\n", value & FTDI_STOP);
goto fail;
}
- qemu_chr_fe_ioctl(s->cs, CHR_IOCTL_SERIAL_SET_PARAMS, &s->params);
+ qemu_chr_fe_ioctl(&s->cs, CHR_IOCTL_SERIAL_SET_PARAMS, &s->params);
/* TODO: TX ON/OFF */
break;
case DeviceInVendor | FTDI_GET_MDM_ST:
@@ -366,7 +368,9 @@ static void usb_serial_handle_data(USBDevice *dev, USBPacket *p)
goto fail;
for (i = 0; i < p->iov.niov; i++) {
iov = p->iov.iov + i;
- qemu_chr_fe_write(s->cs, iov->iov_base, iov->iov_len);
+ /* XXX this blocks entire thread. Rewrite to use
+ * qemu_chr_fe_write and background I/O callbacks */
+ qemu_chr_fe_write_all(&s->cs, iov->iov_base, iov->iov_len);
}
p->actual_length = p->iov.size;
break;
@@ -462,8 +466,6 @@ static void usb_serial_event(void *opaque, int event)
case CHR_EVENT_BREAK:
s->event_trigger |= FTDI_BI;
break;
- case CHR_EVENT_FOCUS:
- break;
case CHR_EVENT_OPENED:
if (!s->dev.attached) {
usb_device_attach(&s->dev, &error_abort);
@@ -481,12 +483,13 @@ static void usb_serial_realize(USBDevice *dev, Error **errp)
{
USBSerialState *s = USB_SERIAL_DEV(dev);
Error *local_err = NULL;
+ CharDriverState *chr = qemu_chr_fe_get_driver(&s->cs);
usb_desc_create_serial(dev);
usb_desc_init(dev);
dev->auto_attach = 0;
- if (!s->cs) {
+ if (!chr) {
error_setg(errp, "Property chardev is required");
return;
}
@@ -497,11 +500,11 @@ static void usb_serial_realize(USBDevice *dev, Error **errp)
return;
}
- qemu_chr_add_handlers(s->cs, usb_serial_can_read, usb_serial_read,
- usb_serial_event, s);
+ qemu_chr_fe_set_handlers(&s->cs, usb_serial_can_read, usb_serial_read,
+ usb_serial_event, s, NULL, true);
usb_serial_handle_reset(dev);
- if (s->cs->be_open && !dev->attached) {
+ if (chr->be_open && !dev->attached) {
usb_device_attach(dev, &error_abort);
}
}
@@ -545,7 +548,7 @@ static USBDevice *usb_serial_init(USBBus *bus, const char *filename)
filename++;
snprintf(label, sizeof(label), "usbserial%d", index++);
- cdrv = qemu_chr_new(label, filename, NULL);
+ cdrv = qemu_chr_new(label, filename);
if (!cdrv)
return NULL;
@@ -563,7 +566,7 @@ static USBDevice *usb_braille_init(USBBus *bus, const char *unused)
USBDevice *dev;
CharDriverState *cdrv;
- cdrv = qemu_chr_new("braille", "braille", NULL);
+ cdrv = qemu_chr_new("braille", "braille");
if (!cdrv)
return NULL;
diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c
index af4b851356..89e11b68c4 100644
--- a/hw/usb/dev-smartcard-reader.c
+++ b/hw/usb/dev-smartcard-reader.c
@@ -508,14 +508,14 @@ static void ccid_card_apdu_from_guest(CCIDCardState *card,
}
}
-static int ccid_card_exitfn(CCIDCardState *card)
+static void ccid_card_exitfn(CCIDCardState *card)
{
CCIDCardClass *cc = CCID_CARD_GET_CLASS(card);
if (cc->exitfn) {
- return cc->exitfn(card);
+ cc->exitfn(card);
}
- return 0;
+
}
static int ccid_card_initfn(CCIDCardState *card)
@@ -1279,7 +1279,6 @@ void ccid_card_card_inserted(CCIDCardState *card)
static int ccid_card_exit(DeviceState *qdev)
{
- int ret = 0;
CCIDCardState *card = CCID_CARD(qdev);
USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent);
USBCCIDState *s = USB_CCID_DEV(dev);
@@ -1287,9 +1286,9 @@ static int ccid_card_exit(DeviceState *qdev)
if (ccid_card_inserted(s)) {
ccid_card_card_removed(card);
}
- ret = ccid_card_exitfn(card);
+ ccid_card_exitfn(card);
s->card = NULL;
- return ret;
+ return 0;
}
static int ccid_card_init(DeviceState *qdev)
diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c
index b093db729c..7622a3ae72 100644
--- a/hw/usb/hcd-ehci.c
+++ b/hw/usb/hcd-ehci.c
@@ -1190,6 +1190,7 @@ static int ehci_init_transfer(EHCIPacket *p)
while (bytes > 0) {
if (cpage > 4) {
fprintf(stderr, "cpage out of range (%d)\n", cpage);
+ qemu_sglist_destroy(&p->sgl);
return -1;
}
@@ -1426,6 +1427,7 @@ static int ehci_process_itd(EHCIState *ehci,
if (off + len > 4096) {
/* transfer crosses page border */
if (pg == 6) {
+ qemu_sglist_destroy(&ehci->isgl);
return -1; /* avoid page pg + 1 */
}
ptr2 = (itd->bufptr[pg + 1] & ITD_BUFPTR_MASK);
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index fa5703832c..c82a92fff7 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -2139,7 +2139,7 @@ static const TypeInfo ohci_pci_info = {
static Property ohci_sysbus_properties[] = {
DEFINE_PROP_UINT32("num-ports", OHCISysBusState, num_ports, 3),
- DEFINE_PROP_DMAADDR("dma-offset", OHCISysBusState, dma_offset, 3),
+ DEFINE_PROP_DMAADDR("dma-offset", OHCISysBusState, dma_offset, 0),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index 188f95416a..4acf0c6dd8 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -21,6 +21,7 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "qemu/timer.h"
+#include "qemu/queue.h"
#include "hw/usb.h"
#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
@@ -46,14 +47,14 @@
#define MAXSLOTS 64
#define MAXINTRS 16
-#define TD_QUEUE 24
-
/* Very pessimistic, let's hope it's enough for all cases */
-#define EV_QUEUE (((3*TD_QUEUE)+16)*MAXSLOTS)
+#define EV_QUEUE (((3 * 24) + 16) * MAXSLOTS)
/* Do not deliver ER Full events. NEC's driver does some things not bound
* to the specs when it gets them */
#define ER_FULL_HACK
+#define TRB_LINK_LIMIT 4
+
#define LEN_CAP 0x40
#define LEN_OPER (0x400 + 0x10 * MAXPORTS)
#define LEN_RUNTIME ((MAXINTRS + 1) * 0x20)
@@ -343,7 +344,7 @@ typedef struct XHCIPort {
} XHCIPort;
typedef struct XHCITransfer {
- XHCIState *xhci;
+ XHCIEPContext *epctx;
USBPacket packet;
QEMUSGList sgl;
bool running_async;
@@ -351,15 +352,12 @@ typedef struct XHCITransfer {
bool complete;
bool int_req;
unsigned int iso_pkts;
- unsigned int slotid;
- unsigned int epid;
unsigned int streamid;
bool in_xfer;
bool iso_xfer;
bool timed_xfer;
unsigned int trb_count;
- unsigned int trb_alloced;
XHCITRB *trbs;
TRBCCode status;
@@ -369,6 +367,8 @@ typedef struct XHCITransfer {
unsigned int cur_pkt;
uint64_t mfindex_kick;
+
+ QTAILQ_ENTRY(XHCITransfer) next;
} XHCITransfer;
struct XHCIStreamContext {
@@ -383,9 +383,8 @@ struct XHCIEPContext {
unsigned int epid;
XHCIRing ring;
- unsigned int next_xfer;
- unsigned int comp_xfer;
- XHCITransfer transfers[TD_QUEUE];
+ uint32_t xfer_count;
+ QTAILQ_HEAD(, XHCITransfer) transfers;
XHCITransfer *retry;
EPType type;
dma_addr_t pctx;
@@ -508,13 +507,13 @@ enum xhci_flags {
static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
unsigned int epid, unsigned int streamid);
+static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid);
static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid,
unsigned int epid);
static void xhci_xfer_report(XHCITransfer *xfer);
static void xhci_event(XHCIState *xhci, XHCIEvent *event, int v);
static void xhci_write_event(XHCIState *xhci, XHCIEvent *event, int v);
-static USBEndpoint *xhci_epid_to_usbep(XHCIState *xhci,
- unsigned int slotid, unsigned int epid);
+static USBEndpoint *xhci_epid_to_usbep(XHCIEPContext *epctx);
static const char *TRBType_names[] = {
[TRB_RESERVED] = "TRB_RESERVED",
@@ -1000,6 +999,7 @@ static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb,
dma_addr_t *addr)
{
PCIDevice *pci_dev = PCI_DEVICE(xhci);
+ uint32_t link_cnt = 0;
while (1) {
TRBType type;
@@ -1026,6 +1026,9 @@ static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb,
ring->dequeue += TRB_SIZE;
return type;
} else {
+ if (++link_cnt > TRB_LINK_LIMIT) {
+ return 0;
+ }
ring->dequeue = xhci_mask64(trb->parameter);
if (trb->control & TRB_LK_TC) {
ring->ccs = !ring->ccs;
@@ -1043,6 +1046,7 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring)
bool ccs = ring->ccs;
/* hack to bundle together the two/three TDs that make a setup transfer */
bool control_td_set = 0;
+ uint32_t link_cnt = 0;
while (1) {
TRBType type;
@@ -1058,6 +1062,9 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring)
type = TRB_TYPE(trb);
if (type == TR_LINK) {
+ if (++link_cnt > TRB_LINK_LIMIT) {
+ return -length;
+ }
dequeue = xhci_mask64(trb.parameter);
if (trb.control & TRB_LK_TC) {
ccs = !ccs;
@@ -1192,7 +1199,7 @@ static int xhci_epmask_to_eps_with_streams(XHCIState *xhci,
}
epctx = slot->eps[i - 1];
- ep = xhci_epid_to_usbep(xhci, slotid, i);
+ ep = xhci_epid_to_usbep(epctx);
if (!epctx || !epctx->nr_pstreams || !ep) {
continue;
}
@@ -1353,7 +1360,7 @@ static void xhci_set_ep_state(XHCIState *xhci, XHCIEPContext *epctx,
static void xhci_ep_kick_timer(void *opaque)
{
XHCIEPContext *epctx = opaque;
- xhci_kick_ep(epctx->xhci, epctx->slotid, epctx->epid, 0);
+ xhci_kick_epctx(epctx, 0);
}
static XHCIEPContext *xhci_alloc_epctx(XHCIState *xhci,
@@ -1361,19 +1368,13 @@ static XHCIEPContext *xhci_alloc_epctx(XHCIState *xhci,
unsigned int epid)
{
XHCIEPContext *epctx;
- int i;
epctx = g_new0(XHCIEPContext, 1);
epctx->xhci = xhci;
epctx->slotid = slotid;
epctx->epid = epid;
- for (i = 0; i < ARRAY_SIZE(epctx->transfers); i++) {
- epctx->transfers[i].xhci = xhci;
- epctx->transfers[i].slotid = slotid;
- epctx->transfers[i].epid = epid;
- usb_packet_init(&epctx->transfers[i].packet);
- }
+ QTAILQ_INIT(&epctx->transfers);
epctx->kick_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, xhci_ep_kick_timer, epctx);
return epctx;
@@ -1434,6 +1435,38 @@ static TRBCCode xhci_enable_ep(XHCIState *xhci, unsigned int slotid,
return CC_SUCCESS;
}
+static XHCITransfer *xhci_ep_alloc_xfer(XHCIEPContext *epctx,
+ uint32_t length)
+{
+ uint32_t limit = epctx->nr_pstreams + 16;
+ XHCITransfer *xfer;
+
+ if (epctx->xfer_count >= limit) {
+ return NULL;
+ }
+
+ xfer = g_new0(XHCITransfer, 1);
+ xfer->epctx = epctx;
+ xfer->trbs = g_new(XHCITRB, length);
+ xfer->trb_count = length;
+ usb_packet_init(&xfer->packet);
+
+ QTAILQ_INSERT_TAIL(&epctx->transfers, xfer, next);
+ epctx->xfer_count++;
+
+ return xfer;
+}
+
+static void xhci_ep_free_xfer(XHCITransfer *xfer)
+{
+ QTAILQ_REMOVE(&xfer->epctx->transfers, xfer, next);
+ xfer->epctx->xfer_count--;
+
+ usb_packet_cleanup(&xfer->packet);
+ g_free(xfer->trbs);
+ g_free(xfer);
+}
+
static int xhci_ep_nuke_one_xfer(XHCITransfer *t, TRBCCode report)
{
int killed = 0;
@@ -1449,10 +1482,9 @@ static int xhci_ep_nuke_one_xfer(XHCITransfer *t, TRBCCode report)
killed = 1;
}
if (t->running_retry) {
- XHCIEPContext *epctx = t->xhci->slots[t->slotid-1].eps[t->epid-1];
- if (epctx) {
- epctx->retry = NULL;
- timer_del(epctx->kick_timer);
+ if (t->epctx) {
+ t->epctx->retry = NULL;
+ timer_del(t->epctx->kick_timer);
}
t->running_retry = 0;
killed = 1;
@@ -1460,7 +1492,7 @@ static int xhci_ep_nuke_one_xfer(XHCITransfer *t, TRBCCode report)
g_free(t->trbs);
t->trbs = NULL;
- t->trb_count = t->trb_alloced = 0;
+ t->trb_count = 0;
return killed;
}
@@ -1470,7 +1502,8 @@ static int xhci_ep_nuke_xfers(XHCIState *xhci, unsigned int slotid,
{
XHCISlot *slot;
XHCIEPContext *epctx;
- int i, xferi, killed = 0;
+ XHCITransfer *xfer;
+ int killed = 0;
USBEndpoint *ep = NULL;
assert(slotid >= 1 && slotid <= xhci->numslots);
assert(epid >= 1 && epid <= 31);
@@ -1485,17 +1518,19 @@ static int xhci_ep_nuke_xfers(XHCIState *xhci, unsigned int slotid,
epctx = slot->eps[epid-1];
- xferi = epctx->next_xfer;
- for (i = 0; i < TD_QUEUE; i++) {
- killed += xhci_ep_nuke_one_xfer(&epctx->transfers[xferi], report);
+ for (;;) {
+ xfer = QTAILQ_FIRST(&epctx->transfers);
+ if (xfer == NULL) {
+ break;
+ }
+ killed += xhci_ep_nuke_one_xfer(xfer, report);
if (killed) {
report = 0; /* Only report once */
}
- epctx->transfers[xferi].packet.ep = NULL;
- xferi = (xferi + 1) % TD_QUEUE;
+ xhci_ep_free_xfer(xfer);
}
- ep = xhci_epid_to_usbep(xhci, slotid, epid);
+ ep = xhci_epid_to_usbep(epctx);
if (ep) {
usb_device_ep_stopped(ep->dev, ep);
}
@@ -1507,7 +1542,6 @@ static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid,
{
XHCISlot *slot;
XHCIEPContext *epctx;
- int i;
trace_usb_xhci_ep_disable(slotid, epid);
assert(slotid >= 1 && slotid <= xhci->numslots);
@@ -1528,10 +1562,6 @@ static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid,
xhci_free_streams(epctx);
}
- for (i = 0; i < ARRAY_SIZE(epctx->transfers); i++) {
- usb_packet_cleanup(&epctx->transfers[i].packet);
- }
-
/* only touch guest RAM if we're not resetting the HC */
if (xhci->dcbaap_low || xhci->dcbaap_high) {
xhci_set_ep_state(xhci, epctx, NULL, EP_DISABLED);
@@ -1684,7 +1714,7 @@ static TRBCCode xhci_set_ep_dequeue(XHCIState *xhci, unsigned int slotid,
static int xhci_xfer_create_sgl(XHCITransfer *xfer, int in_xfer)
{
- XHCIState *xhci = xfer->xhci;
+ XHCIState *xhci = xfer->epctx->xhci;
int i;
xfer->int_req = false;
@@ -1743,7 +1773,7 @@ static void xhci_xfer_report(XHCITransfer *xfer)
bool reported = 0;
bool shortpkt = 0;
XHCIEvent event = {ER_TRANSFER, CC_SUCCESS};
- XHCIState *xhci = xfer->xhci;
+ XHCIState *xhci = xfer->epctx->xhci;
int i;
left = xfer->packet.actual_length;
@@ -1753,6 +1783,12 @@ static void xhci_xfer_report(XHCITransfer *xfer)
unsigned int chunk = 0;
switch (TRB_TYPE(*trb)) {
+ case TR_SETUP:
+ chunk = trb->status & 0x1ffff;
+ if (chunk > 8) {
+ chunk = 8;
+ }
+ break;
case TR_DATA:
case TR_NORMAL:
case TR_ISOCH:
@@ -1775,8 +1811,8 @@ static void xhci_xfer_report(XHCITransfer *xfer)
if (!reported && ((trb->control & TRB_TR_IOC) ||
(shortpkt && (trb->control & TRB_TR_ISP)) ||
(xfer->status != CC_SUCCESS && left == 0))) {
- event.slotid = xfer->slotid;
- event.epid = xfer->epid;
+ event.slotid = xfer->epctx->slotid;
+ event.epid = xfer->epctx->epid;
event.length = (trb->status & 0x1ffff) - chunk;
event.flags = 0;
event.ptr = trb->addr;
@@ -1811,9 +1847,8 @@ static void xhci_xfer_report(XHCITransfer *xfer)
static void xhci_stall_ep(XHCITransfer *xfer)
{
- XHCIState *xhci = xfer->xhci;
- XHCISlot *slot = &xhci->slots[xfer->slotid-1];
- XHCIEPContext *epctx = slot->eps[xfer->epid-1];
+ XHCIEPContext *epctx = xfer->epctx;
+ XHCIState *xhci = epctx->xhci;
uint32_t err;
XHCIStreamContext *sctx;
@@ -1837,7 +1872,6 @@ static int xhci_submit(XHCIState *xhci, XHCITransfer *xfer,
static int xhci_setup_packet(XHCITransfer *xfer)
{
- XHCIState *xhci = xfer->xhci;
USBEndpoint *ep;
int dir;
@@ -1846,7 +1880,7 @@ static int xhci_setup_packet(XHCITransfer *xfer)
if (xfer->packet.ep) {
ep = xfer->packet.ep;
} else {
- ep = xhci_epid_to_usbep(xhci, xfer->slotid, xfer->epid);
+ ep = xhci_epid_to_usbep(xfer->epctx);
if (!ep) {
DPRINTF("xhci: slot %d has no device\n",
xfer->slotid);
@@ -1926,7 +1960,8 @@ static int xhci_fire_ctl_transfer(XHCIState *xhci, XHCITransfer *xfer)
trb_setup = &xfer->trbs[0];
trb_status = &xfer->trbs[xfer->trb_count-1];
- trace_usb_xhci_xfer_start(xfer, xfer->slotid, xfer->epid, xfer->streamid);
+ trace_usb_xhci_xfer_start(xfer, xfer->epctx->slotid,
+ xfer->epctx->epid, xfer->streamid);
/* at most one Event Data TRB allowed after STATUS */
if (TRB_TYPE(*trb_status) == TR_EVDATA && xfer->trb_count > 2) {
@@ -1969,7 +2004,7 @@ static int xhci_fire_ctl_transfer(XHCIState *xhci, XHCITransfer *xfer)
xhci_complete_packet(xfer);
if (!xfer->running_async && !xfer->running_retry) {
- xhci_kick_ep(xhci, xfer->slotid, xfer->epid, 0);
+ xhci_kick_epctx(xfer->epctx, 0);
}
return 0;
}
@@ -2073,29 +2108,23 @@ static int xhci_submit(XHCIState *xhci, XHCITransfer *xfer, XHCIEPContext *epctx
xhci_complete_packet(xfer);
if (!xfer->running_async && !xfer->running_retry) {
- xhci_kick_ep(xhci, xfer->slotid, xfer->epid, xfer->streamid);
+ xhci_kick_epctx(xfer->epctx, xfer->streamid);
}
return 0;
}
static int xhci_fire_transfer(XHCIState *xhci, XHCITransfer *xfer, XHCIEPContext *epctx)
{
- trace_usb_xhci_xfer_start(xfer, xfer->slotid, xfer->epid, xfer->streamid);
+ trace_usb_xhci_xfer_start(xfer, xfer->epctx->slotid,
+ xfer->epctx->epid, xfer->streamid);
return xhci_submit(xhci, xfer, epctx);
}
static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
unsigned int epid, unsigned int streamid)
{
- XHCIStreamContext *stctx;
XHCIEPContext *epctx;
- XHCIRing *ring;
- USBEndpoint *ep = NULL;
- uint64_t mfindex;
- int length;
- int i;
- trace_usb_xhci_ep_kick(slotid, epid, streamid);
assert(slotid >= 1 && slotid <= xhci->numslots);
assert(epid >= 1 && epid <= 31);
@@ -2110,11 +2139,27 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
return;
}
+ xhci_kick_epctx(epctx, streamid);
+}
+
+static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid)
+{
+ XHCIState *xhci = epctx->xhci;
+ XHCIStreamContext *stctx;
+ XHCITransfer *xfer;
+ XHCIRing *ring;
+ USBEndpoint *ep = NULL;
+ uint64_t mfindex;
+ int length;
+ int i;
+
+ trace_usb_xhci_ep_kick(epctx->slotid, epctx->epid, streamid);
+
/* If the device has been detached, but the guest has not noticed this
yet the 2 above checks will succeed, but we must NOT continue */
- if (!xhci->slots[slotid - 1].uport ||
- !xhci->slots[slotid - 1].uport->dev ||
- !xhci->slots[slotid - 1].uport->dev->attached) {
+ if (!xhci->slots[epctx->slotid - 1].uport ||
+ !xhci->slots[epctx->slotid - 1].uport->dev ||
+ !xhci->slots[epctx->slotid - 1].uport->dev->attached) {
return;
}
@@ -2153,6 +2198,7 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
xhci_complete_packet(xfer);
}
assert(!xfer->running_retry);
+ xhci_ep_free_xfer(epctx->retry);
epctx->retry = NULL;
}
@@ -2178,27 +2224,14 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
assert(ring->dequeue != 0);
while (1) {
- XHCITransfer *xfer = &epctx->transfers[epctx->next_xfer];
- if (xfer->running_async || xfer->running_retry) {
- break;
- }
length = xhci_ring_chain_length(xhci, ring);
- if (length < 0) {
+ if (length <= 0) {
break;
- } else if (length == 0) {
- break;
- }
- if (xfer->trbs && xfer->trb_alloced < length) {
- xfer->trb_count = 0;
- xfer->trb_alloced = 0;
- g_free(xfer->trbs);
- xfer->trbs = NULL;
}
- if (!xfer->trbs) {
- xfer->trbs = g_new(XHCITRB, length);
- xfer->trb_alloced = length;
+ xfer = xhci_ep_alloc_xfer(epctx, length);
+ if (xfer == NULL) {
+ break;
}
- xfer->trb_count = length;
for (i = 0; i < length; i++) {
TRBType type;
@@ -2207,33 +2240,27 @@ static void xhci_kick_ep(XHCIState *xhci, unsigned int slotid,
}
xfer->streamid = streamid;
- if (epid == 1) {
- if (xhci_fire_ctl_transfer(xhci, xfer) >= 0) {
- epctx->next_xfer = (epctx->next_xfer + 1) % TD_QUEUE;
- } else {
- DPRINTF("xhci: error firing CTL transfer\n");
- }
+ if (epctx->epid == 1) {
+ xhci_fire_ctl_transfer(xhci, xfer);
} else {
- if (xhci_fire_transfer(xhci, xfer, epctx) >= 0) {
- epctx->next_xfer = (epctx->next_xfer + 1) % TD_QUEUE;
- } else {
- if (!xfer->timed_xfer) {
- DPRINTF("xhci: error firing data transfer\n");
- }
- }
+ xhci_fire_transfer(xhci, xfer, epctx);
+ }
+ if (xfer->complete) {
+ xhci_ep_free_xfer(xfer);
+ xfer = NULL;
}
if (epctx->state == EP_HALTED) {
break;
}
- if (xfer->running_retry) {
+ if (xfer != NULL && xfer->running_retry) {
DPRINTF("xhci: xfer nacked, stopping schedule\n");
epctx->retry = xfer;
break;
}
}
- ep = xhci_epid_to_usbep(xhci, slotid, epid);
+ ep = xhci_epid_to_usbep(epctx);
if (ep) {
usb_device_flush_ep_queue(ep->dev, ep);
}
@@ -3464,7 +3491,10 @@ static void xhci_complete(USBPort *port, USBPacket *packet)
return;
}
xhci_complete_packet(xfer);
- xhci_kick_ep(xfer->xhci, xfer->slotid, xfer->epid, xfer->streamid);
+ xhci_kick_epctx(xfer->epctx, xfer->streamid);
+ if (xfer->complete) {
+ xhci_ep_free_xfer(xfer);
+ }
}
static void xhci_child_detach(USBPort *uport, USBDevice *child)
@@ -3495,17 +3525,20 @@ static int xhci_find_epid(USBEndpoint *ep)
}
}
-static USBEndpoint *xhci_epid_to_usbep(XHCIState *xhci,
- unsigned int slotid, unsigned int epid)
+static USBEndpoint *xhci_epid_to_usbep(XHCIEPContext *epctx)
{
- assert(slotid >= 1 && slotid <= xhci->numslots);
+ USBPort *uport;
+ uint32_t token;
- if (!xhci->slots[slotid - 1].uport) {
+ if (!epctx) {
return NULL;
}
-
- return usb_ep_get(xhci->slots[slotid - 1].uport->dev,
- (epid & 1) ? USB_TOKEN_IN : USB_TOKEN_OUT, epid >> 1);
+ uport = epctx->xhci->slots[epctx->slotid - 1].uport;
+ token = (epctx->epid & 1) ? USB_TOKEN_IN : USB_TOKEN_OUT;
+ if (!uport) {
+ return NULL;
+ }
+ return usb_ep_get(uport->dev, token, epctx->epid >> 1);
}
static void xhci_wakeup_endpoint(USBBus *bus, USBEndpoint *ep,
@@ -3709,8 +3742,7 @@ static void usb_xhci_exit(PCIDevice *dev)
/* destroy msix memory region */
if (dev->msix_table && dev->msix_pba
&& dev->msix_entry_used) {
- memory_region_del_subregion(&xhci->mem, &dev->msix_table_mmio);
- memory_region_del_subregion(&xhci->mem, &dev->msix_pba_mmio);
+ msix_uninit(dev, &xhci->mem, &xhci->mem);
}
usb_bus_release(&xhci->bus);
diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c
index e94672c155..bd81d71a98 100644
--- a/hw/usb/host-libusb.c
+++ b/hw/usb/host-libusb.c
@@ -743,10 +743,13 @@ static void usb_host_speed_compat(USBHostDevice *s)
rc = libusb_get_ss_endpoint_companion_descriptor
(ctx, endp, &endp_ss_comp);
if (rc == LIBUSB_SUCCESS) {
+ int streams = endp_ss_comp->bmAttributes & 0x1f;
+ if (streams) {
+ compat_full = false;
+ compat_high = false;
+ }
libusb_free_ss_endpoint_companion_descriptor
(endp_ss_comp);
- compat_full = false;
- compat_high = false;
}
#endif
break;
diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
index 444672a000..a65723781e 100644
--- a/hw/usb/redirect.c
+++ b/hw/usb/redirect.c
@@ -105,7 +105,7 @@ struct PacketIdQueue {
struct USBRedirDevice {
USBDevice dev;
/* Properties */
- CharDriverState *cs;
+ CharBackend cs;
uint8_t debug;
char *filter_str;
int32_t bootindex;
@@ -132,6 +132,7 @@ struct USBRedirDevice {
struct usbredirfilter_rule *filter_rules;
int filter_rules_count;
int compatible_speedmask;
+ VMChangeStateEntry *vmstate;
};
#define TYPE_USB_REDIR "usb-redir"
@@ -283,9 +284,10 @@ static gboolean usbredir_write_unblocked(GIOChannel *chan, GIOCondition cond,
static int usbredir_write(void *priv, uint8_t *data, int count)
{
USBRedirDevice *dev = priv;
+ CharDriverState *chr = qemu_chr_fe_get_driver(&dev->cs);
int r;
- if (!dev->cs->be_open) {
+ if (!chr->be_open) {
return 0;
}
@@ -294,10 +296,10 @@ static int usbredir_write(void *priv, uint8_t *data, int count)
return 0;
}
- r = qemu_chr_fe_write(dev->cs, data, count);
+ r = qemu_chr_fe_write(&dev->cs, data, count);
if (r < count) {
if (!dev->watch) {
- dev->watch = qemu_chr_fe_add_watch(dev->cs, G_IO_OUT|G_IO_HUP,
+ dev->watch = qemu_chr_fe_add_watch(&dev->cs, G_IO_OUT | G_IO_HUP,
usbredir_write_unblocked, dev);
}
if (r < 0) {
@@ -1375,7 +1377,7 @@ static void usbredir_realize(USBDevice *udev, Error **errp)
USBRedirDevice *dev = USB_REDIRECT(udev);
int i;
- if (dev->cs == NULL) {
+ if (!qemu_chr_fe_get_driver(&dev->cs)) {
error_setg(errp, QERR_MISSING_PARAMETER, "chardev");
return;
}
@@ -1406,10 +1408,12 @@ static void usbredir_realize(USBDevice *udev, Error **errp)
dev->compatible_speedmask = USB_SPEED_MASK_FULL | USB_SPEED_MASK_HIGH;
/* Let the backend know we are ready */
- qemu_chr_add_handlers(dev->cs, usbredir_chardev_can_read,
- usbredir_chardev_read, usbredir_chardev_event, dev);
+ qemu_chr_fe_set_handlers(&dev->cs, usbredir_chardev_can_read,
+ usbredir_chardev_read, usbredir_chardev_event,
+ dev, NULL, true);
- qemu_add_vm_change_state_handler(usbredir_vm_state_change, dev);
+ dev->vmstate =
+ qemu_add_vm_change_state_handler(usbredir_vm_state_change, dev);
}
static void usbredir_cleanup_device_queues(USBRedirDevice *dev)
@@ -1426,9 +1430,11 @@ static void usbredir_cleanup_device_queues(USBRedirDevice *dev)
static void usbredir_handle_destroy(USBDevice *udev)
{
USBRedirDevice *dev = USB_REDIRECT(udev);
+ CharDriverState *chr = qemu_chr_fe_get_driver(&dev->cs);
+
+ qemu_chr_fe_deinit(&dev->cs);
+ qemu_chr_delete(chr);
- qemu_chr_delete(dev->cs);
- dev->cs = NULL;
/* Note must be done after qemu_chr_close, as that causes a close event */
qemu_bh_delete(dev->chardev_close_bh);
qemu_bh_delete(dev->device_reject_bh);
@@ -1446,6 +1452,7 @@ static void usbredir_handle_destroy(USBDevice *udev)
}
free(dev->filter_rules);
+ qemu_del_vm_change_state_handler(dev->vmstate);
}
static int usbredir_check_filter(USBRedirDevice *dev)
@@ -2036,18 +2043,22 @@ static void usbredir_interrupt_packet(void *priv, uint64_t id,
}
if (ep & USB_DIR_IN) {
+ bool q_was_empty;
+
if (dev->endpoint[EP2I(ep)].interrupt_started == 0) {
DPRINTF("received int packet while not started ep %02X\n", ep);
free(data);
return;
}
- if (QTAILQ_EMPTY(&dev->endpoint[EP2I(ep)].bufpq)) {
- usb_wakeup(usb_ep_get(&dev->dev, USB_TOKEN_IN, ep & 0x0f), 0);
- }
+ q_was_empty = QTAILQ_EMPTY(&dev->endpoint[EP2I(ep)].bufpq);
/* bufp_alloc also adds the packet to the ep queue */
bufp_alloc(dev, data, data_len, interrupt_packet->status, ep, data);
+
+ if (q_was_empty) {
+ usb_wakeup(usb_ep_get(&dev->dev, USB_TOKEN_IN, ep & 0x0f), 0);
+ }
} else {
/*
* We report output interrupt packets as completed directly upon
diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
index 174d715e3e..8e676e6c96 100644
--- a/hw/usb/xen-usb.c
+++ b/hw/usb/xen-usb.c
@@ -47,7 +47,7 @@
struct timeval tv; \
\
gettimeofday(&tv, NULL); \
- xen_be_printf(xendev, lvl, "%8ld.%06ld xen-usb(%s):" fmt, \
+ xen_pv_printf(xendev, lvl, "%8ld.%06ld xen-usb(%s):" fmt, \
tv.tv_sec, tv.tv_usec, __func__, ##args); \
}
#define TR_BUS(xendev, fmt, args...) TR(xendev, 2, fmt, ##args)
@@ -153,15 +153,15 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
}
if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) {
- xen_be_printf(xendev, 0, "bad number of segments in request (%d)\n",
+ xen_pv_printf(xendev, 0, "bad number of segments in request (%d)\n",
nr_segs);
return -EINVAL;
}
for (i = 0; i < nr_segs; i++) {
if ((unsigned)usbback_req->req.seg[i].offset +
- (unsigned)usbback_req->req.seg[i].length > PAGE_SIZE) {
- xen_be_printf(xendev, 0, "segment crosses page boundary\n");
+ (unsigned)usbback_req->req.seg[i].length > XC_PAGE_SIZE) {
+ xen_pv_printf(xendev, 0, "segment crosses page boundary\n");
return -EINVAL;
}
}
@@ -183,7 +183,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
for (i = 0; i < usbback_req->nr_buffer_segs; i++) {
seg = usbback_req->req.seg + i;
- addr = usbback_req->buffer + i * PAGE_SIZE + seg->offset;
+ addr = usbback_req->buffer + i * XC_PAGE_SIZE + seg->offset;
qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length);
}
}
@@ -199,7 +199,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
*/
if (!usbback_req->nr_extra_segs) {
- xen_be_printf(xendev, 0, "iso request without descriptor segments\n");
+ xen_pv_printf(xendev, 0, "iso request without descriptor segments\n");
return -EINVAL;
}
@@ -314,7 +314,7 @@ static void usbback_do_response(struct usbback_req *usbback_req, int32_t status,
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->urb_ring, notify);
if (notify) {
- xen_be_send_notify(xendev);
+ xen_pv_send_notify(xendev);
}
}
@@ -551,14 +551,14 @@ static void usbback_dispatch(struct usbback_req *usbback_req)
ret = usbback_init_packet(usbback_req);
if (ret) {
- xen_be_printf(&usbif->xendev, 0, "invalid request\n");
+ xen_pv_printf(&usbif->xendev, 0, "invalid request\n");
ret = -ESHUTDOWN;
goto fail_free_urb;
}
ret = usbback_gnttab_map(usbback_req);
if (ret) {
- xen_be_printf(&usbif->xendev, 0, "invalid buffer, ret=%d\n", ret);
+ xen_pv_printf(&usbif->xendev, 0, "invalid buffer, ret=%d\n", ret);
ret = -ESHUTDOWN;
goto fail_free_urb;
}
@@ -590,7 +590,7 @@ static void usbback_hotplug_notify(struct usbback_info *usbif)
/* Check for full ring. */
if ((RING_SIZE(ring) - ring->rsp_prod_pvt - ring->req_cons) == 0) {
- xen_be_send_notify(&usbif->xendev);
+ xen_pv_send_notify(&usbif->xendev);
return;
}
@@ -609,7 +609,7 @@ static void usbback_hotplug_notify(struct usbback_info *usbif)
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify);
if (notify) {
- xen_be_send_notify(&usbif->xendev);
+ xen_pv_send_notify(&usbif->xendev);
}
TR_BUS(&usbif->xendev, "hotplug port %d speed %d\n", usb_hp->port,
@@ -646,7 +646,7 @@ static void usbback_bh(void *opaque)
if (RING_REQUEST_PROD_OVERFLOW(urb_ring, rp)) {
rc = urb_ring->rsp_prod_pvt;
- xen_be_printf(&usbif->xendev, 0, "domU provided bogus ring requests "
+ xen_pv_printf(&usbif->xendev, 0, "domU provided bogus ring requests "
"(%#x - %#x = %u). Halting ring processing.\n",
rp, rc, rp - rc);
usbif->ring_error = true;
@@ -712,15 +712,10 @@ static void usbback_portid_detach(struct usbback_info *usbif, unsigned port)
static void usbback_portid_remove(struct usbback_info *usbif, unsigned port)
{
- USBPort *p;
-
if (!usbif->ports[port - 1].dev) {
return;
}
- p = &(usbif->ports[port - 1].port);
- snprintf(p->path, sizeof(p->path), "%d", 99);
-
object_unparent(OBJECT(usbif->ports[port - 1].dev));
usbif->ports[port - 1].dev = NULL;
usbback_portid_detach(usbif, port);
@@ -733,10 +728,10 @@ static void usbback_portid_add(struct usbback_info *usbif, unsigned port,
{
unsigned speed;
char *portname;
- USBPort *p;
Error *local_err = NULL;
QDict *qdict;
QemuOpts *opts;
+ char *tmp;
if (usbif->ports[port - 1].dev) {
return;
@@ -744,16 +739,21 @@ static void usbback_portid_add(struct usbback_info *usbif, unsigned port,
portname = strchr(busid, '-');
if (!portname) {
- xen_be_printf(&usbif->xendev, 0, "device %s illegal specification\n",
+ xen_pv_printf(&usbif->xendev, 0, "device %s illegal specification\n",
busid);
return;
}
portname++;
- p = &(usbif->ports[port - 1].port);
- snprintf(p->path, sizeof(p->path), "%s", portname);
qdict = qdict_new();
qdict_put(qdict, "driver", qstring_from_str("usb-host"));
+ tmp = g_strdup_printf("%s.0", usbif->xendev.qdev.id);
+ qdict_put(qdict, "bus", qstring_from_str(tmp));
+ g_free(tmp);
+ tmp = g_strdup_printf("%s-%u", usbif->xendev.qdev.id, port);
+ qdict_put(qdict, "id", qstring_from_str(tmp));
+ g_free(tmp);
+ qdict_put(qdict, "port", qint_from_int(port));
qdict_put(qdict, "hostbus", qint_from_int(atoi(busid)));
qdict_put(qdict, "hostport", qstring_from_str(portname));
opts = qemu_opts_from_qdict(qemu_find_opts("device"), qdict, &local_err);
@@ -765,7 +765,6 @@ static void usbback_portid_add(struct usbback_info *usbif, unsigned port,
goto err;
}
QDECREF(qdict);
- snprintf(p->path, sizeof(p->path), "%d", port);
speed = usbif->ports[port - 1].dev->speed;
switch (speed) {
case USB_SPEED_LOW:
@@ -783,7 +782,7 @@ static void usbback_portid_add(struct usbback_info *usbif, unsigned port,
break;
}
if (speed == USBIF_SPEED_NONE) {
- xen_be_printf(&usbif->xendev, 0, "device %s wrong speed\n", busid);
+ xen_pv_printf(&usbif->xendev, 0, "device %s wrong speed\n", busid);
object_unparent(OBJECT(usbif->ports[port - 1].dev));
usbif->ports[port - 1].dev = NULL;
return;
@@ -799,8 +798,7 @@ static void usbback_portid_add(struct usbback_info *usbif, unsigned port,
err:
QDECREF(qdict);
- snprintf(p->path, sizeof(p->path), "%d", 99);
- xen_be_printf(&usbif->xendev, 0, "device %s could not be opened\n", busid);
+ xen_pv_printf(&usbif->xendev, 0, "device %s could not be opened\n", busid);
}
static void usbback_process_port(struct usbback_info *usbif, unsigned port)
@@ -811,7 +809,7 @@ static void usbback_process_port(struct usbback_info *usbif, unsigned port)
snprintf(node, sizeof(node), "port/%d", port);
busid = xenstore_read_be_str(&usbif->xendev, node);
if (busid == NULL) {
- xen_be_printf(&usbif->xendev, 0, "xenstore_read %s failed\n", node);
+ xen_pv_printf(&usbif->xendev, 0, "xenstore_read %s failed\n", node);
return;
}
@@ -834,7 +832,7 @@ static void usbback_disconnect(struct XenDevice *xendev)
usbif = container_of(xendev, struct usbback_info, xendev);
- xen_be_unbind_evtchn(xendev);
+ xen_pv_unbind_evtchn(xendev);
if (usbif->urb_sring) {
xengnttab_unmap(xendev->gnttabdev, usbif->urb_sring, 1);
@@ -868,15 +866,15 @@ static int usbback_connect(struct XenDevice *xendev)
usbif = container_of(xendev, struct usbback_info, xendev);
if (xenstore_read_fe_int(xendev, "urb-ring-ref", &urb_ring_ref)) {
- xen_be_printf(xendev, 0, "error reading urb-ring-ref\n");
+ xen_pv_printf(xendev, 0, "error reading urb-ring-ref\n");
return -1;
}
if (xenstore_read_fe_int(xendev, "conn-ring-ref", &conn_ring_ref)) {
- xen_be_printf(xendev, 0, "error reading conn-ring-ref\n");
+ xen_pv_printf(xendev, 0, "error reading conn-ring-ref\n");
return -1;
}
if (xenstore_read_fe_int(xendev, "event-channel", &xendev->remote_port)) {
- xen_be_printf(xendev, 0, "error reading event-channel\n");
+ xen_pv_printf(xendev, 0, "error reading event-channel\n");
return -1;
}
@@ -887,7 +885,7 @@ static int usbback_connect(struct XenDevice *xendev)
conn_ring_ref,
PROT_READ | PROT_WRITE);
if (!usbif->urb_sring || !usbif->conn_sring) {
- xen_be_printf(xendev, 0, "error mapping rings\n");
+ xen_pv_printf(xendev, 0, "error mapping rings\n");
usbback_disconnect(xendev);
return -1;
}
@@ -899,7 +897,7 @@ static int usbback_connect(struct XenDevice *xendev)
xen_be_bind_evtchn(xendev);
- xen_be_printf(xendev, 1, "urb-ring-ref %d, conn-ring-ref %d, "
+ xen_pv_printf(xendev, 1, "urb-ring-ref %d, conn-ring-ref %d, "
"remote port %d, local port %d\n", urb_ring_ref,
conn_ring_ref, xendev->remote_port, xendev->local_port);
@@ -935,12 +933,12 @@ static int usbback_init(struct XenDevice *xendev)
if (xenstore_read_be_int(xendev, "num-ports", &usbif->num_ports) ||
usbif->num_ports < 1 || usbif->num_ports > USBBACK_MAXPORTS) {
- xen_be_printf(xendev, 0, "num-ports not readable or out of bounds\n");
+ xen_pv_printf(xendev, 0, "num-ports not readable or out of bounds\n");
return -1;
}
if (xenstore_read_be_int(xendev, "usb-ver", &usbif->usb_ver) ||
(usbif->usb_ver != USB_VER_USB11 && usbif->usb_ver != USB_VER_USB20)) {
- xen_be_printf(xendev, 0, "usb-ver not readable or out of bounds\n");
+ xen_pv_printf(xendev, 0, "usb-ver not readable or out of bounds\n");
return -1;
}
@@ -1012,13 +1010,13 @@ static void usbback_alloc(struct XenDevice *xendev)
usbif = container_of(xendev, struct usbback_info, xendev);
- usb_bus_new(&usbif->bus, sizeof(usbif->bus), &xen_usb_bus_ops, xen_sysdev);
+ usb_bus_new(&usbif->bus, sizeof(usbif->bus), &xen_usb_bus_ops,
+ DEVICE(&xendev->qdev));
for (i = 0; i < USBBACK_MAXPORTS; i++) {
p = &(usbif->ports[i].port);
usb_register_port(&usbif->bus, p, usbif, i, &xen_usb_port_ops,
USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL |
USB_SPEED_MASK_HIGH);
- snprintf(p->path, sizeof(p->path), "%d", 99);
}
QTAILQ_INIT(&usbif->req_free_q);
@@ -1028,7 +1026,7 @@ static void usbback_alloc(struct XenDevice *xendev)
/* max_grants: for each request and for the rings (request and connect). */
max_grants = USBIF_MAX_SEGMENTS_PER_REQUEST * USB_URB_RING_SIZE + 2;
if (xengnttab_set_max_grants(xendev->gnttabdev, max_grants) < 0) {
- xen_be_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
+ xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
strerror(errno));
}
}
@@ -1066,7 +1064,6 @@ static int usbback_free(struct XenDevice *xendev)
}
usb_bus_release(&usbif->bus);
- object_unparent(OBJECT(&usbif->bus));
TR_BUS(xendev, "finished\n");
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index b313e7c2c6..801578b4b9 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -34,6 +34,7 @@
#include "qemu/range.h"
#include "sysemu/kvm.h"
#include "trace.h"
+#include "qapi/error.h"
struct vfio_group_head vfio_group_list =
QLIST_HEAD_INITIALIZER(vfio_group_list);
@@ -293,11 +294,10 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
section->offset_within_address_space & (1ULL << 63);
}
-static void vfio_iommu_map_notify(Notifier *n, void *data)
+static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
VFIOContainer *container = giommu->container;
- IOMMUTLBEntry *iotlb = data;
hwaddr iova = iotlb->iova + giommu->iommu_offset;
MemoryRegion *mr;
hwaddr xlat;
@@ -454,6 +454,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
section->offset_within_region;
giommu->container = container;
giommu->n.notify = vfio_iommu_map_notify;
+ giommu->n.notifier_flags = IOMMU_NOTIFIER_ALL;
QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
@@ -609,16 +610,16 @@ vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
return NULL;
}
-static void vfio_setup_region_sparse_mmaps(VFIORegion *region,
- struct vfio_region_info *info)
+static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
+ struct vfio_region_info *info)
{
struct vfio_info_cap_header *hdr;
struct vfio_region_info_cap_sparse_mmap *sparse;
- int i;
+ int i, j;
hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
if (!hdr) {
- return;
+ return -ENODEV;
}
sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
@@ -626,16 +627,24 @@ static void vfio_setup_region_sparse_mmaps(VFIORegion *region,
trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
region->nr, sparse->nr_areas);
- region->nr_mmaps = sparse->nr_areas;
- region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
+ region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
- for (i = 0; i < region->nr_mmaps; i++) {
- region->mmaps[i].offset = sparse->areas[i].offset;
- region->mmaps[i].size = sparse->areas[i].size;
- trace_vfio_region_sparse_mmap_entry(i, region->mmaps[i].offset,
- region->mmaps[i].offset +
- region->mmaps[i].size);
+ for (i = 0, j = 0; i < sparse->nr_areas; i++) {
+ trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
+ sparse->areas[i].offset +
+ sparse->areas[i].size);
+
+ if (sparse->areas[i].size) {
+ region->mmaps[j].offset = sparse->areas[i].offset;
+ region->mmaps[j].size = sparse->areas[i].size;
+ j++;
+ }
}
+
+ region->nr_mmaps = j;
+ region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
+
+ return 0;
}
int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
@@ -661,12 +670,11 @@ int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
region, name, region->size);
if (!vbasedev->no_mmap &&
- region->flags & VFIO_REGION_INFO_FLAG_MMAP &&
- !(region->size & ~qemu_real_host_page_mask)) {
+ region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
- vfio_setup_region_sparse_mmaps(region, info);
+ ret = vfio_setup_region_sparse_mmaps(region, info);
- if (!region->nr_mmaps) {
+ if (ret) {
region->nr_mmaps = 1;
region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
region->mmaps[0].offset = 0;
@@ -723,12 +731,11 @@ int vfio_region_mmap(VFIORegion *region)
name = g_strdup_printf("%s mmaps[%d]",
memory_region_name(region->mem), i);
- memory_region_init_ram_ptr(&region->mmaps[i].mem,
- memory_region_owner(region->mem),
- name, region->mmaps[i].size,
- region->mmaps[i].mmap);
+ memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
+ memory_region_owner(region->mem),
+ name, region->mmaps[i].size,
+ region->mmaps[i].mmap);
g_free(name);
- memory_region_set_skip_dump(&region->mmaps[i].mem);
memory_region_add_subregion(region->mem, region->mmaps[i].offset,
&region->mmaps[i].mem);
@@ -900,7 +907,8 @@ static void vfio_put_address_space(VFIOAddressSpace *space)
}
}
-static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
+static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
+ Error **errp)
{
VFIOContainer *container;
int ret, fd;
@@ -918,15 +926,15 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
fd = qemu_open("/dev/vfio/vfio", O_RDWR);
if (fd < 0) {
- error_report("vfio: failed to open /dev/vfio/vfio: %m");
+ error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
ret = -errno;
goto put_space_exit;
}
ret = ioctl(fd, VFIO_GET_API_VERSION);
if (ret != VFIO_API_VERSION) {
- error_report("vfio: supported vfio version: %d, "
- "reported version: %d", VFIO_API_VERSION, ret);
+ error_setg(errp, "supported vfio version: %d, "
+ "reported version: %d", VFIO_API_VERSION, ret);
ret = -EINVAL;
goto close_fd_exit;
}
@@ -941,7 +949,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
if (ret) {
- error_report("vfio: failed to set group container: %m");
+ error_setg_errno(errp, errno, "failed to set group container");
ret = -errno;
goto free_container_exit;
}
@@ -949,7 +957,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU;
ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
if (ret) {
- error_report("vfio: failed to set iommu for container: %m");
+ error_setg_errno(errp, errno, "failed to set iommu for container");
ret = -errno;
goto free_container_exit;
}
@@ -976,7 +984,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
if (ret) {
- error_report("vfio: failed to set group container: %m");
+ error_setg_errno(errp, errno, "failed to set group container");
ret = -errno;
goto free_container_exit;
}
@@ -984,7 +992,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
if (ret) {
- error_report("vfio: failed to set iommu for container: %m");
+ error_setg_errno(errp, errno, "failed to set iommu for container");
ret = -errno;
goto free_container_exit;
}
@@ -997,7 +1005,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
if (!v2) {
ret = ioctl(fd, VFIO_IOMMU_ENABLE);
if (ret) {
- error_report("vfio: failed to enable container: %m");
+ error_setg_errno(errp, errno, "failed to enable container");
ret = -errno;
goto free_container_exit;
}
@@ -1008,7 +1016,9 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
&address_space_memory);
if (container->error) {
memory_listener_unregister(&container->prereg_listener);
- error_report("vfio: RAM memory listener initialization failed for container");
+ ret = container->error;
+ error_setg(errp,
+ "RAM memory listener initialization failed for container");
goto free_container_exit;
}
}
@@ -1016,7 +1026,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
info.argsz = sizeof(info);
ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
if (ret) {
- error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m");
+ error_setg_errno(errp, errno,
+ "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
ret = -errno;
if (v2) {
memory_listener_unregister(&container->prereg_listener);
@@ -1033,6 +1044,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
*/
ret = vfio_spapr_remove_window(container, info.dma32_window_start);
if (ret) {
+ error_setg_errno(errp, -ret,
+ "failed to remove existing window");
goto free_container_exit;
}
} else {
@@ -1043,7 +1056,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
0x1000);
}
} else {
- error_report("vfio: No available IOMMU models");
+ error_setg(errp, "No available IOMMU models");
ret = -EINVAL;
goto free_container_exit;
}
@@ -1054,7 +1067,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
if (container->error) {
ret = container->error;
- error_report("vfio: memory listener initialization failed for container");
+ error_setg_errno(errp, -ret,
+ "memory listener initialization failed for container");
goto listener_release_exit;
}
@@ -1115,7 +1129,7 @@ static void vfio_disconnect_container(VFIOGroup *group)
}
}
-VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
+VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
{
VFIOGroup *group;
char path[32];
@@ -1127,8 +1141,8 @@ VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
if (group->container->space->as == as) {
return group;
} else {
- error_report("vfio: group %d used in multiple address spaces",
- group->groupid);
+ error_setg(errp, "group %d used in multiple address spaces",
+ group->groupid);
return NULL;
}
}
@@ -1139,27 +1153,29 @@ VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
group->fd = qemu_open(path, O_RDWR);
if (group->fd < 0) {
- error_report("vfio: error opening %s: %m", path);
+ error_setg_errno(errp, errno, "failed to open %s", path);
goto free_group_exit;
}
if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
- error_report("vfio: error getting group status: %m");
+ error_setg_errno(errp, errno, "failed to get group %d status", groupid);
goto close_fd_exit;
}
if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
- error_report("vfio: error, group %d is not viable, please ensure "
- "all devices within the iommu_group are bound to their "
- "vfio bus driver.", groupid);
+ error_setg(errp, "group %d is not viable", groupid);
+ error_append_hint(errp,
+ "Please ensure all devices within the iommu_group "
+ "are bound to their vfio bus driver.\n");
goto close_fd_exit;
}
group->groupid = groupid;
QLIST_INIT(&group->device_list);
- if (vfio_connect_container(group, as)) {
- error_report("vfio: failed to setup container for group %d", groupid);
+ if (vfio_connect_container(group, as, errp)) {
+ error_prepend(errp, "failed to setup container for group %d: ",
+ groupid);
goto close_fd_exit;
}
@@ -1201,23 +1217,24 @@ void vfio_put_group(VFIOGroup *group)
}
int vfio_get_device(VFIOGroup *group, const char *name,
- VFIODevice *vbasedev)
+ VFIODevice *vbasedev, Error **errp)
{
struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
int ret, fd;
fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
if (fd < 0) {
- error_report("vfio: error getting device %s from group %d: %m",
- name, group->groupid);
- error_printf("Verify all devices in group %d are bound to vfio-<bus> "
- "or pci-stub and not already in use\n", group->groupid);
+ error_setg_errno(errp, errno, "error getting device from group %d",
+ group->groupid);
+ error_append_hint(errp,
+ "Verify all devices in group %d are bound to vfio-<bus> "
+ "or pci-stub and not already in use\n", group->groupid);
return fd;
}
ret = ioctl(fd, VFIO_DEVICE_GET_INFO, &dev_info);
if (ret) {
- error_report("vfio: error getting device info: %m");
+ error_setg_errno(errp, errno, "error getting device info");
close(fd);
return ret;
}
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index bec694c8d8..811eecd1b4 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -898,7 +898,7 @@ static uint64_t vfio_rtl8168_quirk_data_read(void *opaque,
{
VFIOrtl8168Quirk *rtl = opaque;
VFIOPCIDevice *vdev = rtl->vdev;
- uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x74, size);
+ uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x70, size);
if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) {
hwaddr offset = rtl->addr & 0xfff;
@@ -1056,7 +1056,7 @@ typedef struct VFIOIGDQuirk {
* of the IGD device.
*/
int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
- struct vfio_region_info *info)
+ struct vfio_region_info *info, Error **errp)
{
int ret;
@@ -1064,7 +1064,7 @@ int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
ret = pread(vdev->vbasedev.fd, vdev->igd_opregion,
info->size, info->offset);
if (ret != info->size) {
- error_report("vfio: Error reading IGD OpRegion");
+ error_setg(errp, "failed to read IGD OpRegion");
g_free(vdev->igd_opregion);
vdev->igd_opregion = NULL;
return -EINVAL;
@@ -1363,6 +1363,7 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
uint64_t *bdsm_size;
uint32_t gmch;
uint16_t cmd_orig, cmd;
+ Error *err = NULL;
/*
* This must be an Intel VGA device at address 00:02.0 for us to even
@@ -1464,7 +1465,8 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
* try to enable it. Probably shouldn't be using legacy mode without VGA,
* but also no point in us enabling VGA if disabled in hardware.
*/
- if (!(gmch & 0x2) && !vdev->vga && vfio_populate_vga(vdev)) {
+ if (!(gmch & 0x2) && !vdev->vga && vfio_populate_vga(vdev, &err)) {
+ error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
error_report("IGD device %s failed to enable VGA access, "
"legacy mode disabled", vdev->vbasedev.name);
goto out;
@@ -1487,10 +1489,10 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
}
/* Setup OpRegion access */
- ret = vfio_pci_igd_opregion_init(vdev, opregion);
+ ret = vfio_pci_igd_opregion_init(vdev, opregion, &err);
if (ret) {
- error_report("IGD device %s failed to setup OpRegion, "
- "legacy mode disabled", vdev->vbasedev.name);
+ error_append_hint(&err, "IGD legacy mode disabled\n");
+ error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
goto out;
}
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 7bfa17ce38..d7dbe0e3e0 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -100,7 +100,7 @@ static void vfio_intx_eoi(VFIODevice *vbasedev)
vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
}
-static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
+static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
{
#ifdef CONFIG_KVM
struct kvm_irqfd irqfd = {
@@ -126,7 +126,7 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
/* Get an eventfd for resample/unmask */
if (event_notifier_init(&vdev->intx.unmask, 0)) {
- error_report("vfio: Error: event_notifier_init failed eoi");
+ error_setg(errp, "event_notifier_init failed eoi");
goto fail;
}
@@ -134,7 +134,7 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
- error_report("vfio: Error: Failed to setup resample irqfd: %m");
+ error_setg_errno(errp, errno, "failed to setup resample irqfd");
goto fail_irqfd;
}
@@ -153,7 +153,7 @@ static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev)
ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
g_free(irq_set);
if (ret) {
- error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
+ error_setg_errno(errp, -ret, "failed to setup INTx unmask fd");
goto fail_vfio;
}
@@ -222,6 +222,7 @@ static void vfio_intx_update(PCIDevice *pdev)
{
VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
PCIINTxRoute route;
+ Error *err = NULL;
if (vdev->interrupt != VFIO_INT_INTx) {
return;
@@ -244,18 +245,22 @@ static void vfio_intx_update(PCIDevice *pdev)
return;
}
- vfio_intx_enable_kvm(vdev);
+ vfio_intx_enable_kvm(vdev, &err);
+ if (err) {
+ error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
+ }
/* Re-enable the interrupt in cased we missed an EOI */
vfio_intx_eoi(&vdev->vbasedev);
}
-static int vfio_intx_enable(VFIOPCIDevice *vdev)
+static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
{
uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
int ret, argsz;
struct vfio_irq_set *irq_set;
int32_t *pfd;
+ Error *err = NULL;
if (!pin) {
return 0;
@@ -279,7 +284,7 @@ static int vfio_intx_enable(VFIOPCIDevice *vdev)
ret = event_notifier_init(&vdev->intx.interrupt, 0);
if (ret) {
- error_report("vfio: Error: event_notifier_init failed");
+ error_setg_errno(errp, -ret, "event_notifier_init failed");
return ret;
}
@@ -299,13 +304,16 @@ static int vfio_intx_enable(VFIOPCIDevice *vdev)
ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
g_free(irq_set);
if (ret) {
- error_report("vfio: Error: Failed to setup INTx fd: %m");
+ error_setg_errno(errp, -ret, "failed to setup INTx fd");
qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
event_notifier_cleanup(&vdev->intx.interrupt);
return -errno;
}
- vfio_intx_enable_kvm(vdev);
+ vfio_intx_enable_kvm(vdev, &err);
+ if (err) {
+ error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
+ }
vdev->interrupt = VFIO_INT_INTx;
@@ -496,7 +504,9 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
vfio_update_kvm_msi_virq(vector, *msg, pdev);
}
} else {
- vfio_add_kvm_msi_virq(vdev, vector, nr, true);
+ if (msg) {
+ vfio_add_kvm_msi_virq(vdev, vector, nr, true);
+ }
}
/*
@@ -705,6 +715,7 @@ retry:
static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
{
+ Error *err = NULL;
int i;
for (i = 0; i < vdev->nr_vectors; i++) {
@@ -724,7 +735,10 @@ static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
vdev->nr_vectors = 0;
vdev->interrupt = VFIO_INT_NONE;
- vfio_intx_enable(vdev);
+ vfio_intx_enable(vdev, &err);
+ if (err) {
+ error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
+ }
}
static void vfio_msix_disable(VFIOPCIDevice *vdev)
@@ -1057,6 +1071,55 @@ static const MemoryRegionOps vfio_vga_ops = {
};
/*
+ * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
+ * size if the BAR is in an exclusive page in host so that we could map
+ * this BAR to guest. But this sub-page BAR may not occupy an exclusive
+ * page in guest. So we should set the priority of the expanded memory
+ * region to zero in case of overlap with BARs which share the same page
+ * with the sub-page BAR in guest. Besides, we should also recover the
+ * size of this sub-page BAR when its base address is changed in guest
+ * and not page aligned any more.
+ */
+static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
+{
+ VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
+ VFIORegion *region = &vdev->bars[bar].region;
+ MemoryRegion *mmap_mr, *mr;
+ PCIIORegion *r;
+ pcibus_t bar_addr;
+ uint64_t size = region->size;
+
+ /* Make sure that the whole region is allowed to be mmapped */
+ if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
+ region->mmaps[0].size != region->size) {
+ return;
+ }
+
+ r = &pdev->io_regions[bar];
+ bar_addr = r->addr;
+ mr = region->mem;
+ mmap_mr = &region->mmaps[0].mem;
+
+ /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
+ if (bar_addr != PCI_BAR_UNMAPPED &&
+ !(bar_addr & ~qemu_real_host_page_mask)) {
+ size = qemu_real_host_page_size;
+ }
+
+ memory_region_transaction_begin();
+
+ memory_region_set_size(mr, size);
+ memory_region_set_size(mmap_mr, size);
+ if (size != region->size && memory_region_is_mapped(mr)) {
+ memory_region_del_subregion(r->address_space, mr);
+ memory_region_add_subregion_overlap(r->address_space,
+ bar_addr, mr, 0);
+ }
+
+ memory_region_transaction_commit();
+}
+
+/*
* PCI config space
*/
uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
@@ -1139,6 +1202,24 @@ void vfio_pci_write_config(PCIDevice *pdev,
} else if (was_enabled && !is_enabled) {
vfio_msix_disable(vdev);
}
+ } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
+ range_covers_byte(addr, len, PCI_COMMAND)) {
+ pcibus_t old_addr[PCI_NUM_REGIONS - 1];
+ int bar;
+
+ for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
+ old_addr[bar] = pdev->io_regions[bar].addr;
+ }
+
+ pci_default_write_config(pdev, addr, val, len);
+
+ for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
+ if (old_addr[bar] != pdev->io_regions[bar].addr &&
+ pdev->io_regions[bar].size > 0 &&
+ pdev->io_regions[bar].size < qemu_real_host_page_size) {
+ vfio_sub_page_bar_update_mapping(pdev, bar);
+ }
+ }
} else {
/* Write everything to QEMU to keep emulated bits correct */
pci_default_write_config(pdev, addr, val, len);
@@ -1166,7 +1247,7 @@ static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
}
}
-static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
+static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
{
uint16_t ctrl;
bool msi_64bit, msi_maskbit;
@@ -1175,6 +1256,7 @@ static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
+ error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
return -errno;
}
ctrl = le16_to_cpu(ctrl);
@@ -1190,8 +1272,8 @@ static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
if (ret == -ENOTSUP) {
return 0;
}
- error_prepend(&err, "vfio: msi_init failed: ");
- error_report_err(err);
+ error_prepend(&err, "msi_init failed: ");
+ error_propagate(errp, err);
return ret;
}
vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
@@ -1275,7 +1357,7 @@ static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
* need to first look for where the MSI-X table lives. So we
* unfortunately split MSI-X setup across two functions.
*/
-static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
+static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
{
uint8_t pos;
uint16_t ctrl;
@@ -1285,22 +1367,25 @@ static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
if (!pos) {
- return 0;
+ return;
}
if (pread(fd, &ctrl, sizeof(ctrl),
vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
- return -errno;
+ error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
+ return;
}
if (pread(fd, &table, sizeof(table),
vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
- return -errno;
+ error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
+ return;
}
if (pread(fd, &pba, sizeof(pba),
vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
- return -errno;
+ error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
+ return;
}
ctrl = le16_to_cpu(ctrl);
@@ -1330,10 +1415,10 @@ static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
(vdev->device_id & 0xff00) == 0x5800) {
msix->pba_offset = 0x1000;
} else {
- error_report("vfio: Hardware reports invalid configuration, "
- "MSIX PBA outside of specified BAR");
+ error_setg(errp, "hardware reports invalid configuration, "
+ "MSIX PBA outside of specified BAR");
g_free(msix);
- return -EINVAL;
+ return;
}
}
@@ -1342,11 +1427,9 @@ static int vfio_msix_early_setup(VFIOPCIDevice *vdev)
vdev->msix = msix;
vfio_pci_fixup_msix_region(vdev);
-
- return 0;
}
-static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos)
+static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
{
int ret;
@@ -1361,7 +1444,7 @@ static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos)
if (ret == -ENOTSUP) {
return 0;
}
- error_report("vfio: msix_init failed");
+ error_setg(errp, "msix_init failed");
return ret;
}
@@ -1546,7 +1629,8 @@ static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
}
-static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
+static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
+ Error **errp)
{
uint16_t flags;
uint8_t type;
@@ -1558,8 +1642,8 @@ static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
type != PCI_EXP_TYPE_LEG_END &&
type != PCI_EXP_TYPE_RC_END) {
- error_report("vfio: Assignment of PCIe type 0x%x "
- "devices is not currently supported", type);
+ error_setg(errp, "assignment of PCIe type 0x%x "
+ "devices is not currently supported", type);
return -EINVAL;
}
@@ -1693,7 +1777,7 @@ static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
}
}
-static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
+static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
{
PCIDevice *pdev = &vdev->pdev;
uint8_t cap_id, next, size;
@@ -1718,9 +1802,9 @@ static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
* will be changed as we unwind the stack.
*/
if (next) {
- ret = vfio_add_std_cap(vdev, next);
+ ret = vfio_add_std_cap(vdev, next, errp);
if (ret) {
- return ret;
+ goto out;
}
} else {
/* Begin the rebuild, use QEMU emulated list bits */
@@ -1734,40 +1818,40 @@ static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
switch (cap_id) {
case PCI_CAP_ID_MSI:
- ret = vfio_msi_setup(vdev, pos);
+ ret = vfio_msi_setup(vdev, pos, errp);
break;
case PCI_CAP_ID_EXP:
vfio_check_pcie_flr(vdev, pos);
- ret = vfio_setup_pcie_cap(vdev, pos, size);
+ ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
break;
case PCI_CAP_ID_MSIX:
- ret = vfio_msix_setup(vdev, pos);
+ ret = vfio_msix_setup(vdev, pos, errp);
break;
case PCI_CAP_ID_PM:
vfio_check_pm_reset(vdev, pos);
vdev->pm_cap = pos;
- ret = pci_add_capability(pdev, cap_id, pos, size);
+ ret = pci_add_capability2(pdev, cap_id, pos, size, errp);
break;
case PCI_CAP_ID_AF:
vfio_check_af_flr(vdev, pos);
- ret = pci_add_capability(pdev, cap_id, pos, size);
+ ret = pci_add_capability2(pdev, cap_id, pos, size, errp);
break;
default:
- ret = pci_add_capability(pdev, cap_id, pos, size);
+ ret = pci_add_capability2(pdev, cap_id, pos, size, errp);
break;
}
-
+out:
if (ret < 0) {
- error_report("vfio: %s Error adding PCI capability "
- "0x%x[0x%x]@0x%x: %d", vdev->vbasedev.name,
- cap_id, size, pos, ret);
+ error_prepend(errp,
+ "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
+ cap_id, size, pos);
return ret;
}
return 0;
}
-static int vfio_add_ext_cap(VFIOPCIDevice *vdev)
+static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
{
PCIDevice *pdev = &vdev->pdev;
uint32_t header;
@@ -1778,7 +1862,7 @@ static int vfio_add_ext_cap(VFIOPCIDevice *vdev)
/* Only add extended caps if we have them and the guest can see them */
if (!pci_is_express(pdev) || !pci_bus_is_express(pdev->bus) ||
!pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
- return 0;
+ return;
}
/*
@@ -1843,10 +1927,10 @@ static int vfio_add_ext_cap(VFIOPCIDevice *vdev)
}
g_free(config);
- return 0;
+ return;
}
-static int vfio_add_capabilities(VFIOPCIDevice *vdev)
+static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
{
PCIDevice *pdev = &vdev->pdev;
int ret;
@@ -1856,12 +1940,13 @@ static int vfio_add_capabilities(VFIOPCIDevice *vdev)
return 0; /* Nothing to add */
}
- ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
+ ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp);
if (ret) {
return ret;
}
- return vfio_add_ext_cap(vdev);
+ vfio_add_ext_cap(vdev);
+ return 0;
}
static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
@@ -1903,7 +1988,24 @@ static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
{
- vfio_intx_enable(vdev);
+ Error *err = NULL;
+ int nr;
+
+ vfio_intx_enable(vdev, &err);
+ if (err) {
+ error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
+ }
+
+ for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
+ off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr);
+ uint32_t val = 0;
+ uint32_t len = sizeof(val);
+
+ if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) {
+ error_report("%s(%s) reset bar %d failed: %m", __func__,
+ vdev->vbasedev.name, nr);
+ }
+ }
}
static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
@@ -1928,7 +2030,9 @@ static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
- vfio_pci_pre_reset(vdev);
+ if (!single) {
+ vfio_pci_pre_reset(vdev);
+ }
vdev->vbasedev.needs_reset = false;
info = g_malloc0(sizeof(*info));
@@ -2086,7 +2190,9 @@ out:
}
}
out_single:
- vfio_pci_post_reset(vdev);
+ if (!single) {
+ vfio_pci_post_reset(vdev);
+ }
g_free(info);
return ret;
@@ -2132,7 +2238,7 @@ static VFIODeviceOps vfio_pci_ops = {
.vfio_eoi = vfio_intx_eoi,
};
-int vfio_populate_vga(VFIOPCIDevice *vdev)
+int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
{
VFIODevice *vbasedev = &vdev->vbasedev;
struct vfio_region_info *reg_info;
@@ -2140,15 +2246,18 @@ int vfio_populate_vga(VFIOPCIDevice *vdev)
ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
if (ret) {
+ error_setg_errno(errp, -ret,
+ "failed getting region info for VGA region index %d",
+ VFIO_PCI_VGA_REGION_INDEX);
return ret;
}
if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
!(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
reg_info->size < 0xbffff + 1) {
- error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
- (unsigned long)reg_info->flags,
- (unsigned long)reg_info->size);
+ error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
+ (unsigned long)reg_info->flags,
+ (unsigned long)reg_info->size);
g_free(reg_info);
return -EINVAL;
}
@@ -2197,7 +2306,7 @@ int vfio_populate_vga(VFIOPCIDevice *vdev)
return 0;
}
-static int vfio_populate_device(VFIOPCIDevice *vdev)
+static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
{
VFIODevice *vbasedev = &vdev->vbasedev;
struct vfio_region_info *reg_info;
@@ -2206,19 +2315,19 @@ static int vfio_populate_device(VFIOPCIDevice *vdev)
/* Sanity check device */
if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
- error_report("vfio: Um, this isn't a PCI device");
- goto error;
+ error_setg(errp, "this isn't a PCI device");
+ return;
}
if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
- error_report("vfio: unexpected number of io regions %u",
- vbasedev->num_regions);
- goto error;
+ error_setg(errp, "unexpected number of io regions %u",
+ vbasedev->num_regions);
+ return;
}
if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
- error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs);
- goto error;
+ error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
+ return;
}
for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
@@ -2229,8 +2338,8 @@ static int vfio_populate_device(VFIOPCIDevice *vdev)
g_free(name);
if (ret) {
- error_report("vfio: Error getting region %d info: %m", i);
- goto error;
+ error_setg_errno(errp, -ret, "failed to get region %d info", i);
+ return;
}
QLIST_INIT(&vdev->bars[i].quirks);
@@ -2239,8 +2348,8 @@ static int vfio_populate_device(VFIOPCIDevice *vdev)
ret = vfio_get_region_info(vbasedev,
VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
if (ret) {
- error_report("vfio: Error getting config info: %m");
- goto error;
+ error_setg_errno(errp, -ret, "failed to get config info");
+ return;
}
trace_vfio_populate_device_config(vdev->vbasedev.name,
@@ -2257,11 +2366,11 @@ static int vfio_populate_device(VFIOPCIDevice *vdev)
g_free(reg_info);
if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
- ret = vfio_populate_vga(vdev);
+ ret = vfio_populate_vga(vdev, errp);
if (ret) {
- error_report(
- "vfio: Device does not support requested feature x-vga");
- goto error;
+ error_append_hint(errp, "device does not support "
+ "requested feature x-vga\n");
+ return;
}
}
@@ -2271,17 +2380,13 @@ static int vfio_populate_device(VFIOPCIDevice *vdev)
if (ret) {
/* This can fail for an old kernel or legacy PCI dev */
trace_vfio_populate_device_get_irq_info_failure();
- ret = 0;
} else if (irq_info.count == 1) {
vdev->pci_aer = true;
} else {
- error_report("vfio: %s "
+ error_report(WARN_PREFIX
"Could not enable error recovery for the device",
vbasedev->name);
}
-
-error:
- return ret;
}
static void vfio_put_device(VFIOPCIDevice *vdev)
@@ -2485,18 +2590,26 @@ static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
vdev->req_enabled = false;
}
-static int vfio_initfn(PCIDevice *pdev)
+static void vfio_realize(PCIDevice *pdev, Error **errp)
{
VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
VFIODevice *vbasedev_iter;
VFIOGroup *group;
char *tmp, group_path[PATH_MAX], *group_name;
+ Error *err = NULL;
ssize_t len;
struct stat st;
int groupid;
int i, ret;
if (!vdev->vbasedev.sysfsdev) {
+ if (!(~vdev->host.domain || ~vdev->host.bus ||
+ ~vdev->host.slot || ~vdev->host.function)) {
+ error_setg(errp, "No provided host device");
+ error_append_hint(errp, "Use -vfio-pci,host=DDDD:BB:DD.F "
+ "or -vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
+ return;
+ }
vdev->vbasedev.sysfsdev =
g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
vdev->host.domain, vdev->host.bus,
@@ -2504,9 +2617,9 @@ static int vfio_initfn(PCIDevice *pdev)
}
if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
- error_report("vfio: error: no such host device: %s",
- vdev->vbasedev.sysfsdev);
- return -errno;
+ error_setg_errno(errp, errno, "no such host device");
+ error_prepend(errp, ERR_PREFIX, vdev->vbasedev.sysfsdev);
+ return;
}
vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
@@ -2518,45 +2631,44 @@ static int vfio_initfn(PCIDevice *pdev)
g_free(tmp);
if (len <= 0 || len >= sizeof(group_path)) {
- error_report("vfio: error no iommu_group for device");
- return len < 0 ? -errno : -ENAMETOOLONG;
+ error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
+ "no iommu_group found");
+ goto error;
}
group_path[len] = 0;
group_name = basename(group_path);
if (sscanf(group_name, "%d", &groupid) != 1) {
- error_report("vfio: error reading %s: %m", group_path);
- return -errno;
+ error_setg_errno(errp, errno, "failed to read %s", group_path);
+ goto error;
}
- trace_vfio_initfn(vdev->vbasedev.name, groupid);
+ trace_vfio_realize(vdev->vbasedev.name, groupid);
- group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev));
+ group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
if (!group) {
- error_report("vfio: failed to get group %d", groupid);
- return -ENOENT;
+ goto error;
}
QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
- error_report("vfio: error: device %s is already attached",
- vdev->vbasedev.name);
+ error_setg(errp, "device is already attached");
vfio_put_group(group);
- return -EBUSY;
+ goto error;
}
}
- ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev);
+ ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
if (ret) {
- error_report("vfio: failed to get device %s", vdev->vbasedev.name);
vfio_put_group(group);
- return ret;
+ goto error;
}
- ret = vfio_populate_device(vdev);
- if (ret) {
- return ret;
+ vfio_populate_device(vdev, &err);
+ if (err) {
+ error_propagate(errp, err);
+ goto error;
}
/* Get a copy of config space */
@@ -2565,8 +2677,8 @@ static int vfio_initfn(PCIDevice *pdev)
vdev->config_offset);
if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
ret = ret < 0 ? -errno : -EFAULT;
- error_report("vfio: Failed to read device config space");
- return ret;
+ error_setg_errno(errp, -ret, "failed to read device config space");
+ goto error;
}
/* vfio emulates a lot for us, but some bits need extra love */
@@ -2582,8 +2694,8 @@ static int vfio_initfn(PCIDevice *pdev)
*/
if (vdev->vendor_id != PCI_ANY_ID) {
if (vdev->vendor_id >= 0xffff) {
- error_report("vfio: Invalid PCI vendor ID provided");
- return -EINVAL;
+ error_setg(errp, "invalid PCI vendor ID provided");
+ goto error;
}
vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
@@ -2593,8 +2705,8 @@ static int vfio_initfn(PCIDevice *pdev)
if (vdev->device_id != PCI_ANY_ID) {
if (vdev->device_id > 0xffff) {
- error_report("vfio: Invalid PCI device ID provided");
- return -EINVAL;
+ error_setg(errp, "invalid PCI device ID provided");
+ goto error;
}
vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
@@ -2604,8 +2716,8 @@ static int vfio_initfn(PCIDevice *pdev)
if (vdev->sub_vendor_id != PCI_ANY_ID) {
if (vdev->sub_vendor_id > 0xffff) {
- error_report("vfio: Invalid PCI subsystem vendor ID provided");
- return -EINVAL;
+ error_setg(errp, "invalid PCI subsystem vendor ID provided");
+ goto error;
}
vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
vdev->sub_vendor_id, ~0);
@@ -2615,8 +2727,8 @@ static int vfio_initfn(PCIDevice *pdev)
if (vdev->sub_device_id != PCI_ANY_ID) {
if (vdev->sub_device_id > 0xffff) {
- error_report("vfio: Invalid PCI subsystem device ID provided");
- return -EINVAL;
+ error_setg(errp, "invalid PCI subsystem device ID provided");
+ goto error;
}
vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
@@ -2644,14 +2756,15 @@ static int vfio_initfn(PCIDevice *pdev)
vfio_pci_size_rom(vdev);
- ret = vfio_msix_early_setup(vdev);
- if (ret) {
- return ret;
+ vfio_msix_early_setup(vdev, &err);
+ if (err) {
+ error_propagate(errp, err);
+ goto error;
}
vfio_bars_setup(vdev);
- ret = vfio_add_capabilities(vdev);
+ ret = vfio_add_capabilities(vdev, errp);
if (ret) {
goto out_teardown;
}
@@ -2669,9 +2782,9 @@ static int vfio_initfn(PCIDevice *pdev)
struct vfio_region_info *opregion;
if (vdev->pdev.qdev.hotplugged) {
- error_report("Cannot support IGD OpRegion feature on hotplugged "
- "device %s", vdev->vbasedev.name);
- ret = -EINVAL;
+ error_setg(errp,
+ "cannot support IGD OpRegion feature on hotplugged "
+ "device");
goto out_teardown;
}
@@ -2679,16 +2792,14 @@ static int vfio_initfn(PCIDevice *pdev)
VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
if (ret) {
- error_report("Device %s does not support requested IGD OpRegion "
- "feature", vdev->vbasedev.name);
+ error_setg_errno(errp, -ret,
+ "does not support requested IGD OpRegion feature");
goto out_teardown;
}
- ret = vfio_pci_igd_opregion_init(vdev, opregion);
+ ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
g_free(opregion);
if (ret) {
- error_report("Device %s IGD OpRegion initialization failed",
- vdev->vbasedev.name);
goto out_teardown;
}
}
@@ -2708,7 +2819,7 @@ static int vfio_initfn(PCIDevice *pdev)
vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
vfio_intx_mmap_enable, vdev);
pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
- ret = vfio_intx_enable(vdev);
+ ret = vfio_intx_enable(vdev, errp);
if (ret) {
goto out_teardown;
}
@@ -2718,13 +2829,14 @@ static int vfio_initfn(PCIDevice *pdev)
vfio_register_req_notifier(vdev);
vfio_setup_resetfn_quirk(vdev);
- return 0;
+ return;
out_teardown:
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
vfio_teardown_msi(vdev);
vfio_bars_exit(vdev);
- return ret;
+error:
+ error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name);
}
static void vfio_instance_finalize(Object *obj)
@@ -2806,6 +2918,10 @@ static void vfio_instance_init(Object *obj)
device_add_bootindex_property(obj, &vdev->bootindex,
"bootindex", NULL,
&pci_dev->qdev, NULL);
+ vdev->host.domain = ~0U;
+ vdev->host.bus = ~0U;
+ vdev->host.slot = ~0U;
+ vdev->host.function = ~0U;
}
static Property vfio_pci_dev_properties[] = {
@@ -2853,7 +2969,7 @@ static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
dc->vmsd = &vfio_pci_vmstate;
dc->desc = "VFIO-based PCI device assignment";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
- pdc->init = vfio_initfn;
+ pdc->realize = vfio_realize;
pdc->exit = vfio_exitfn;
pdc->config_read = vfio_pci_read_config;
pdc->config_write = vfio_pci_write_config;
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
index 7d482d9d21..a8366bb2a7 100644
--- a/hw/vfio/pci.h
+++ b/hw/vfio/pci.h
@@ -161,9 +161,10 @@ void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr);
void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr);
void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev);
-int vfio_populate_vga(VFIOPCIDevice *vdev);
+int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
- struct vfio_region_info *info);
+ struct vfio_region_info *info,
+ Error **errp);
#endif /* HW_VFIO_VFIO_PCI_H */
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
index a559e7b659..a4663c918e 100644
--- a/hw/vfio/platform.c
+++ b/hw/vfio/platform.c
@@ -44,9 +44,10 @@ static inline bool vfio_irq_is_automasked(VFIOINTp *intp)
* and add it into the list of IRQs
* @vbasedev: the VFIO device handle
* @info: irq info struct retrieved from VFIO driver
+ * @errp: error object
*/
static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
- struct vfio_irq_info info)
+ struct vfio_irq_info info, Error **errp)
{
int ret;
VFIOPlatformDevice *vdev =
@@ -69,7 +70,8 @@ static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
if (ret) {
g_free(intp->interrupt);
g_free(intp);
- error_report("vfio: Error: trigger event_notifier_init failed ");
+ error_setg_errno(errp, -ret,
+ "failed to initialize trigger eventd notifier");
return NULL;
}
if (vfio_irq_is_automasked(intp)) {
@@ -80,7 +82,8 @@ static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
g_free(intp->interrupt);
g_free(intp->unmask);
g_free(intp);
- error_report("vfio: Error: resamplefd event_notifier_init failed");
+ error_setg_errno(errp, -ret,
+ "failed to initialize resample eventd notifier");
return NULL;
}
}
@@ -456,9 +459,10 @@ static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
* vfio_populate_device - Allocate and populate MMIO region
* and IRQ structs according to driver returned information
* @vbasedev: the VFIO device handle
+ * @errp: error object
*
*/
-static int vfio_populate_device(VFIODevice *vbasedev)
+static int vfio_populate_device(VFIODevice *vbasedev, Error **errp)
{
VFIOINTp *intp, *tmp;
int i, ret = -1;
@@ -466,7 +470,7 @@ static int vfio_populate_device(VFIODevice *vbasedev)
container_of(vbasedev, VFIOPlatformDevice, vbasedev);
if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
- error_report("vfio: Um, this isn't a platform device");
+ error_setg(errp, "this isn't a platform device");
return ret;
}
@@ -480,7 +484,7 @@ static int vfio_populate_device(VFIODevice *vbasedev)
vdev->regions[i], i, name);
g_free(name);
if (ret) {
- error_report("vfio: Error getting region %d info: %m", i);
+ error_setg_errno(errp, -ret, "failed to get region %d info", i);
goto reg_error;
}
}
@@ -496,16 +500,15 @@ static int vfio_populate_device(VFIODevice *vbasedev)
irq.index = i;
ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
if (ret) {
- error_report("vfio: error getting device %s irq info",
- vbasedev->name);
+ error_setg_errno(errp, -ret, "failed to get device irq info");
goto irq_err;
} else {
trace_vfio_platform_populate_interrupts(irq.index,
irq.count,
irq.flags);
- intp = vfio_init_intp(vbasedev, irq);
+ intp = vfio_init_intp(vbasedev, irq, errp);
if (!intp) {
- error_report("vfio: Error installing IRQ %d up", i);
+ ret = -1;
goto irq_err;
}
}
@@ -538,13 +541,14 @@ static VFIODeviceOps vfio_platform_ops = {
/**
* vfio_base_device_init - perform preliminary VFIO setup
* @vbasedev: the VFIO device handle
+ * @errp: error object
*
* Implement the VFIO command sequence that allows to discover
* assigned device resources: group extraction, device
* fd retrieval, resource query.
* Precondition: the device name must be initialized
*/
-static int vfio_base_device_init(VFIODevice *vbasedev)
+static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
{
VFIOGroup *group;
VFIODevice *vbasedev_iter;
@@ -560,6 +564,7 @@ static int vfio_base_device_init(VFIODevice *vbasedev)
vbasedev->name = g_strdup(basename(vbasedev->sysfsdev));
} else {
if (!vbasedev->name || strchr(vbasedev->name, '/')) {
+ error_setg(errp, "wrong host device name");
return -EINVAL;
}
@@ -568,8 +573,8 @@ static int vfio_base_device_init(VFIODevice *vbasedev)
}
if (stat(vbasedev->sysfsdev, &st) < 0) {
- error_report("vfio: error: no such host device: %s",
- vbasedev->sysfsdev);
+ error_setg_errno(errp, errno,
+ "failed to get the sysfs host device file status");
return -errno;
}
@@ -578,44 +583,41 @@ static int vfio_base_device_init(VFIODevice *vbasedev)
g_free(tmp);
if (len < 0 || len >= sizeof(group_path)) {
- error_report("vfio: error no iommu_group for device");
- return len < 0 ? -errno : -ENAMETOOLONG;
+ ret = len < 0 ? -errno : -ENAMETOOLONG;
+ error_setg_errno(errp, -ret, "no iommu_group found");
+ return ret;
}
group_path[len] = 0;
group_name = basename(group_path);
if (sscanf(group_name, "%d", &groupid) != 1) {
- error_report("vfio: error reading %s: %m", group_path);
+ error_setg_errno(errp, errno, "failed to read %s", group_path);
return -errno;
}
trace_vfio_platform_base_device_init(vbasedev->name, groupid);
- group = vfio_get_group(groupid, &address_space_memory);
+ group = vfio_get_group(groupid, &address_space_memory, errp);
if (!group) {
- error_report("vfio: failed to get group %d", groupid);
return -ENOENT;
}
QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
- error_report("vfio: error: device %s is already attached",
- vbasedev->name);
+ error_setg(errp, "device is already attached");
vfio_put_group(group);
return -EBUSY;
}
}
- ret = vfio_get_device(group, vbasedev->name, vbasedev);
+ ret = vfio_get_device(group, vbasedev->name, vbasedev, errp);
if (ret) {
- error_report("vfio: failed to get device %s", vbasedev->name);
vfio_put_group(group);
return ret;
}
- ret = vfio_populate_device(vbasedev);
+ ret = vfio_populate_device(vbasedev, errp);
if (ret) {
- error_report("vfio: failed to populate device %s", vbasedev->name);
vfio_put_group(group);
}
@@ -644,11 +646,9 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp)
vbasedev->sysfsdev : vbasedev->name,
vdev->compat);
- ret = vfio_base_device_init(vbasedev);
+ ret = vfio_base_device_init(vbasedev, errp);
if (ret) {
- error_setg(errp, "vfio: vfio_base_device_init failed for %s",
- vbasedev->name);
- return;
+ goto out;
}
for (i = 0; i < vbasedev->num_regions; i++) {
@@ -658,6 +658,16 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp)
}
sysbus_init_mmio(sbdev, vdev->regions[i]->mem);
}
+out:
+ if (!ret) {
+ return;
+ }
+
+ if (vdev->vbasedev.name) {
+ error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name);
+ } else {
+ error_prepend(errp, "vfio error: ");
+ }
}
static const VMStateDescription vfio_platform_vmstate = {
diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
index 7443d348d9..4409bcc0d7 100644
--- a/hw/vfio/spapr.c
+++ b/hw/vfio/spapr.c
@@ -25,7 +25,7 @@ static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section)
}
return !memory_region_is_ram(section->mr) ||
- memory_region_is_skip_dump(section->mr);
+ memory_region_is_ram_device(section->mr);
}
static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa)
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index da133221de..ef81609b98 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -36,7 +36,7 @@ vfio_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int
vfio_pci_hot_reset_result(const char *name, const char *result) "%s hot reset: %s"
vfio_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device %s config:\n size: 0x%lx, offset: 0x%lx, flags: 0x%lx"
vfio_populate_device_get_irq_info_failure(void) "VFIO_DEVICE_GET_IRQ_INFO failure: %m"
-vfio_initfn(const char *name, int group_id) " (%s) group %d"
+vfio_realize(const char *name, int group_id) " (%s) group %d"
vfio_add_ext_cap_dropped(const char *name, uint16_t cap, uint16_t offset) "%s %x@%x"
vfio_pci_reset(const char *name) " (%s)"
vfio_pci_reset_flr(const char *name) "%s FLR/VFIO_DEVICE_RESET"
diff --git a/hw/vigs/vigs_gl_backend_glx.c b/hw/vigs/vigs_gl_backend_glx.c
index 8212b88dbb..98e07f7646 100644
--- a/hw/vigs/vigs_gl_backend_glx.c
+++ b/hw/vigs/vigs_gl_backend_glx.c
@@ -129,7 +129,7 @@ static bool vigs_gl_backend_glx_check_gl_version(struct vigs_gl_backend_glx *gl_
int ctx_attribs[] =
{
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
- GLX_CONTEXT_MINOR_VERSION_ARB, 1,
+ GLX_CONTEXT_MINOR_VERSION_ARB, 2,
GLX_RENDER_TYPE, GLX_RGBA_TYPE,
GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_CORE_PROFILE_BIT_ARB,
None
@@ -331,7 +331,7 @@ static bool vigs_gl_backend_glx_create_context(struct vigs_gl_backend_glx *gl_ba
int attribs[] =
{
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
- GLX_CONTEXT_MINOR_VERSION_ARB, 1,
+ GLX_CONTEXT_MINOR_VERSION_ARB, 2,
GLX_RENDER_TYPE, GLX_RGBA_TYPE,
GLX_CONTEXT_PROFILE_MASK_ARB, GLX_CONTEXT_CORE_PROFILE_BIT_ARB,
None
diff --git a/hw/vigs/vigs_gl_backend_wgl.c b/hw/vigs/vigs_gl_backend_wgl.c
index 9db01a0a7e..7cdfc7fe08 100644
--- a/hw/vigs/vigs_gl_backend_wgl.c
+++ b/hw/vigs/vigs_gl_backend_wgl.c
@@ -158,7 +158,7 @@ static bool vigs_gl_backend_wgl_check_gl_version(struct vigs_gl_backend_wgl *gl_
int ctx_attribs[] =
{
WGL_CONTEXT_MAJOR_VERSION_ARB, 3,
- WGL_CONTEXT_MINOR_VERSION_ARB, 1,
+ WGL_CONTEXT_MINOR_VERSION_ARB, 2,
WGL_CONTEXT_PROFILE_MASK_ARB, WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0
};
@@ -355,7 +355,7 @@ static bool vigs_gl_backend_wgl_create_context(struct vigs_gl_backend_wgl *gl_ba
int attribs[] =
{
WGL_CONTEXT_MAJOR_VERSION_ARB, 3,
- WGL_CONTEXT_MINOR_VERSION_ARB, 1,
+ WGL_CONTEXT_MINOR_VERSION_ARB, 2,
WGL_CONTEXT_PROFILE_MASK_ARB, WGL_CONTEXT_CORE_PROFILE_BIT_ARB,
0
};
diff --git a/hw/virtio/Makefile.objs b/hw/virtio/Makefile.objs
index 3e2b175da8..95c4c30ea1 100644
--- a/hw/virtio/Makefile.objs
+++ b/hw/virtio/Makefile.objs
@@ -5,3 +5,7 @@ common-obj-y += virtio-mmio.o
obj-y += virtio.o virtio-balloon.o
obj-$(CONFIG_LINUX) += vhost.o vhost-backend.o vhost-user.o
+
+obj-$(CONFIG_VHOST_VSOCK) += vhost-vsock.o
+obj-y += virtio-crypto.o
+obj-$(CONFIG_VIRTIO_PCI) += virtio-crypto-pci.o
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 55184d33b3..7b6f55e70e 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -5,7 +5,7 @@ virtqueue_fill(void *vq, const void *elem, unsigned int len, unsigned int idx) "
virtqueue_flush(void *vq, unsigned int count) "vq %p count %u"
virtqueue_pop(void *vq, void *elem, unsigned int in_num, unsigned int out_num) "vq %p elem %p in_num %u out_num %u"
virtio_queue_notify(void *vdev, int n, void *vq) "vdev %p n %d vq %p"
-virtio_irq(void *vq) "vq %p"
+virtio_notify_irqfd(void *vdev, void *vq) "vdev %p vq %p"
virtio_notify(void *vdev, void *vq) "vdev %p vq %p"
virtio_set_status(void *vdev, uint8_t val) "vdev %p val %u"
@@ -14,3 +14,8 @@ virtio_rng_guest_not_ready(void *rng) "rng %p: guest not ready"
virtio_rng_pushed(void *rng, size_t len) "rng %p: %zd bytes pushed"
virtio_rng_request(void *rng, size_t size, unsigned quota) "rng %p: %zd bytes requested, %u bytes quota left"
+# hw/virtio/virtio-balloon.c
+virtio_balloon_handle_output(const char *name, uint64_t gpa) "section name: %s gpa: %"PRIx64
+virtio_balloon_get_config(uint32_t num_pages, uint32_t actual) "num_pages: %d actual: %d"
+virtio_balloon_set_config(uint32_t actual, uint32_t oldactual) "actual: %d oldactual: %d"
+virtio_balloon_to_target(uint64_t target, uint32_t num_pages) "balloon target: %"PRIx64" num_pages: %d"
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 7681f152f3..272a5ec584 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -172,6 +172,19 @@ static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
return idx - dev->vq_index;
}
+#ifdef CONFIG_VHOST_VSOCK
+static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev,
+ uint64_t guest_cid)
+{
+ return vhost_kernel_call(dev, VHOST_VSOCK_SET_GUEST_CID, &guest_cid);
+}
+
+static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start)
+{
+ return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start);
+}
+#endif /* CONFIG_VHOST_VSOCK */
+
static const VhostOps kernel_ops = {
.backend_type = VHOST_BACKEND_TYPE_KERNEL,
.vhost_backend_init = vhost_kernel_init,
@@ -197,6 +210,10 @@ static const VhostOps kernel_ops = {
.vhost_set_owner = vhost_kernel_set_owner,
.vhost_reset_device = vhost_kernel_reset_device,
.vhost_get_vq_index = vhost_kernel_get_vq_index,
+#ifdef CONFIG_VHOST_VSOCK
+ .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
+ .vhost_vsock_set_running = vhost_kernel_vsock_set_running,
+#endif /* CONFIG_VHOST_VSOCK */
};
int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index b57454a4b7..7ee92b32c5 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -116,7 +116,7 @@ static bool ioeventfd_enabled(void)
static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
{
- CharDriverState *chr = dev->opaque;
+ CharBackend *chr = dev->opaque;
uint8_t *p = (uint8_t *) msg;
int r, size = VHOST_USER_HDR_SIZE;
@@ -196,7 +196,7 @@ static bool vhost_user_one_time_request(VhostUserRequest request)
static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
int *fds, int fd_num)
{
- CharDriverState *chr = dev->opaque;
+ CharBackend *chr = dev->opaque;
int ret, size = VHOST_USER_HDR_SIZE + msg->size;
/*
diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
new file mode 100644
index 0000000000..b4815629e1
--- /dev/null
+++ b/hw/virtio/vhost-vsock.c
@@ -0,0 +1,417 @@
+/*
+ * Virtio vsock device
+ *
+ * Copyright 2015 Red Hat, Inc.
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+
+#include <sys/ioctl.h>
+#include "qemu/osdep.h"
+#include "standard-headers/linux/virtio_vsock.h"
+#include "qapi/error.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-access.h"
+#include "migration/migration.h"
+#include "qemu/error-report.h"
+#include "hw/virtio/vhost-vsock.h"
+#include "qemu/iov.h"
+#include "monitor/monitor.h"
+
+enum {
+ VHOST_VSOCK_SAVEVM_VERSION = 0,
+
+ VHOST_VSOCK_QUEUE_SIZE = 128,
+};
+
+static void vhost_vsock_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VHostVSock *vsock = VHOST_VSOCK(vdev);
+ struct virtio_vsock_config vsockcfg = {};
+
+ virtio_stq_p(vdev, &vsockcfg.guest_cid, vsock->conf.guest_cid);
+ memcpy(config, &vsockcfg, sizeof(vsockcfg));
+}
+
+static int vhost_vsock_set_guest_cid(VHostVSock *vsock)
+{
+ const VhostOps *vhost_ops = vsock->vhost_dev.vhost_ops;
+ int ret;
+
+ if (!vhost_ops->vhost_vsock_set_guest_cid) {
+ return -ENOSYS;
+ }
+
+ ret = vhost_ops->vhost_vsock_set_guest_cid(&vsock->vhost_dev,
+ vsock->conf.guest_cid);
+ if (ret < 0) {
+ return -errno;
+ }
+ return 0;
+}
+
+static int vhost_vsock_set_running(VHostVSock *vsock, int start)
+{
+ const VhostOps *vhost_ops = vsock->vhost_dev.vhost_ops;
+ int ret;
+
+ if (!vhost_ops->vhost_vsock_set_running) {
+ return -ENOSYS;
+ }
+
+ ret = vhost_ops->vhost_vsock_set_running(&vsock->vhost_dev, start);
+ if (ret < 0) {
+ return -errno;
+ }
+ return 0;
+}
+
+static void vhost_vsock_start(VirtIODevice *vdev)
+{
+ VHostVSock *vsock = VHOST_VSOCK(vdev);
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ int ret;
+ int i;
+
+ if (!k->set_guest_notifiers) {
+ error_report("binding does not support guest notifiers");
+ return;
+ }
+
+ ret = vhost_dev_enable_notifiers(&vsock->vhost_dev, vdev);
+ if (ret < 0) {
+ error_report("Error enabling host notifiers: %d", -ret);
+ return;
+ }
+
+ ret = k->set_guest_notifiers(qbus->parent, vsock->vhost_dev.nvqs, true);
+ if (ret < 0) {
+ error_report("Error binding guest notifier: %d", -ret);
+ goto err_host_notifiers;
+ }
+
+ vsock->vhost_dev.acked_features = vdev->guest_features;
+ ret = vhost_dev_start(&vsock->vhost_dev, vdev);
+ if (ret < 0) {
+ error_report("Error starting vhost: %d", -ret);
+ goto err_guest_notifiers;
+ }
+
+ ret = vhost_vsock_set_running(vsock, 1);
+ if (ret < 0) {
+ error_report("Error starting vhost vsock: %d", -ret);
+ goto err_dev_start;
+ }
+
+ /* guest_notifier_mask/pending not used yet, so just unmask
+ * everything here. virtio-pci will do the right thing by
+ * enabling/disabling irqfd.
+ */
+ for (i = 0; i < vsock->vhost_dev.nvqs; i++) {
+ vhost_virtqueue_mask(&vsock->vhost_dev, vdev, i, false);
+ }
+
+ return;
+
+err_dev_start:
+ vhost_dev_stop(&vsock->vhost_dev, vdev);
+err_guest_notifiers:
+ k->set_guest_notifiers(qbus->parent, vsock->vhost_dev.nvqs, false);
+err_host_notifiers:
+ vhost_dev_disable_notifiers(&vsock->vhost_dev, vdev);
+}
+
+static void vhost_vsock_stop(VirtIODevice *vdev)
+{
+ VHostVSock *vsock = VHOST_VSOCK(vdev);
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ int ret;
+
+ if (!k->set_guest_notifiers) {
+ return;
+ }
+
+ ret = vhost_vsock_set_running(vsock, 0);
+ if (ret < 0) {
+ error_report("vhost vsock set running failed: %d", ret);
+ return;
+ }
+
+ vhost_dev_stop(&vsock->vhost_dev, vdev);
+
+ ret = k->set_guest_notifiers(qbus->parent, vsock->vhost_dev.nvqs, false);
+ if (ret < 0) {
+ error_report("vhost guest notifier cleanup failed: %d", ret);
+ return;
+ }
+
+ vhost_dev_disable_notifiers(&vsock->vhost_dev, vdev);
+}
+
+static void vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VHostVSock *vsock = VHOST_VSOCK(vdev);
+ bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
+
+ if (!vdev->vm_running) {
+ should_start = false;
+ }
+
+ if (vsock->vhost_dev.started == should_start) {
+ return;
+ }
+
+ if (should_start) {
+ vhost_vsock_start(vdev);
+ } else {
+ vhost_vsock_stop(vdev);
+ }
+}
+
+static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
+ uint64_t requested_features,
+ Error **errp)
+{
+ /* No feature bits used yet */
+ return requested_features;
+}
+
+static void vhost_vsock_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+ /* Do nothing */
+}
+
+static void vhost_vsock_guest_notifier_mask(VirtIODevice *vdev, int idx,
+ bool mask)
+{
+ VHostVSock *vsock = VHOST_VSOCK(vdev);
+
+ vhost_virtqueue_mask(&vsock->vhost_dev, vdev, idx, mask);
+}
+
+static bool vhost_vsock_guest_notifier_pending(VirtIODevice *vdev, int idx)
+{
+ VHostVSock *vsock = VHOST_VSOCK(vdev);
+
+ return vhost_virtqueue_pending(&vsock->vhost_dev, idx);
+}
+
+static void vhost_vsock_send_transport_reset(VHostVSock *vsock)
+{
+ VirtQueueElement *elem;
+ VirtQueue *vq = vsock->event_vq;
+ struct virtio_vsock_event event = {
+ .id = cpu_to_le32(VIRTIO_VSOCK_EVENT_TRANSPORT_RESET),
+ };
+
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ error_report("vhost-vsock missed transport reset event");
+ return;
+ }
+
+ if (elem->out_num) {
+ error_report("invalid vhost-vsock event virtqueue element with "
+ "out buffers");
+ goto out;
+ }
+
+ if (iov_from_buf(elem->in_sg, elem->in_num, 0,
+ &event, sizeof(event)) != sizeof(event)) {
+ error_report("vhost-vsock event virtqueue element is too short");
+ goto out;
+ }
+
+ virtqueue_push(vq, elem, sizeof(event));
+ virtio_notify(VIRTIO_DEVICE(vsock), vq);
+
+out:
+ g_free(elem);
+}
+
+static void vhost_vsock_post_load_timer_cleanup(VHostVSock *vsock)
+{
+ if (!vsock->post_load_timer) {
+ return;
+ }
+
+ timer_del(vsock->post_load_timer);
+ timer_free(vsock->post_load_timer);
+ vsock->post_load_timer = NULL;
+}
+
+static void vhost_vsock_post_load_timer_cb(void *opaque)
+{
+ VHostVSock *vsock = opaque;
+
+ vhost_vsock_post_load_timer_cleanup(vsock);
+ vhost_vsock_send_transport_reset(vsock);
+}
+
+static void vhost_vsock_pre_save(void *opaque)
+{
+ VHostVSock *vsock = opaque;
+
+ /* At this point, backend must be stopped, otherwise
+ * it might keep writing to memory. */
+ assert(!vsock->vhost_dev.started);
+}
+
+static int vhost_vsock_post_load(void *opaque, int version_id)
+{
+ VHostVSock *vsock = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(vsock);
+
+ if (virtio_queue_get_addr(vdev, 2)) {
+ /* Defer transport reset event to a vm clock timer so that virtqueue
+ * changes happen after migration has completed.
+ */
+ assert(!vsock->post_load_timer);
+ vsock->post_load_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ vhost_vsock_post_load_timer_cb,
+ vsock);
+ timer_mod(vsock->post_load_timer, 1);
+ }
+ return 0;
+}
+
+static const VMStateDescription vmstate_virtio_vhost_vsock = {
+ .name = "virtio-vhost_vsock",
+ .minimum_version_id = VHOST_VSOCK_SAVEVM_VERSION,
+ .version_id = VHOST_VSOCK_SAVEVM_VERSION,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+ .pre_save = vhost_vsock_pre_save,
+ .post_load = vhost_vsock_post_load,
+};
+
+static void vhost_vsock_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostVSock *vsock = VHOST_VSOCK(dev);
+ int vhostfd;
+ int ret;
+
+ /* Refuse to use reserved CID numbers */
+ if (vsock->conf.guest_cid <= 2) {
+ error_setg(errp, "guest-cid property must be greater than 2");
+ return;
+ }
+
+ if (vsock->conf.guest_cid > UINT32_MAX) {
+ error_setg(errp, "guest-cid property must be a 32-bit number");
+ return;
+ }
+
+ if (vsock->conf.vhostfd) {
+ vhostfd = monitor_fd_param(cur_mon, vsock->conf.vhostfd, errp);
+ if (vhostfd == -1) {
+ error_prepend(errp, "vhost-vsock: unable to parse vhostfd: ");
+ return;
+ }
+ } else {
+ vhostfd = open("/dev/vhost-vsock", O_RDWR);
+ if (vhostfd < 0) {
+ error_setg_errno(errp, -errno,
+ "vhost-vsock: failed to open vhost device");
+ return;
+ }
+ }
+
+ virtio_init(vdev, "vhost-vsock", VIRTIO_ID_VSOCK,
+ sizeof(struct virtio_vsock_config));
+
+ /* Receive and transmit queues belong to vhost */
+ virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE, vhost_vsock_handle_output);
+ virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE, vhost_vsock_handle_output);
+
+ /* The event queue belongs to QEMU */
+ vsock->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
+ vhost_vsock_handle_output);
+
+ vsock->vhost_dev.nvqs = ARRAY_SIZE(vsock->vhost_vqs);
+ vsock->vhost_dev.vqs = vsock->vhost_vqs;
+ ret = vhost_dev_init(&vsock->vhost_dev, (void *)(uintptr_t)vhostfd,
+ VHOST_BACKEND_TYPE_KERNEL, 0);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "vhost-vsock: vhost_dev_init failed");
+ goto err_virtio;
+ }
+
+ ret = vhost_vsock_set_guest_cid(vsock);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "vhost-vsock: unable to set guest cid");
+ goto err_vhost_dev;
+ }
+
+ vsock->post_load_timer = NULL;
+ return;
+
+err_vhost_dev:
+ vhost_dev_cleanup(&vsock->vhost_dev);
+err_virtio:
+ virtio_cleanup(vdev);
+ close(vhostfd);
+ return;
+}
+
+static void vhost_vsock_device_unrealize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostVSock *vsock = VHOST_VSOCK(dev);
+
+ vhost_vsock_post_load_timer_cleanup(vsock);
+
+ /* This will stop vhost backend if appropriate. */
+ vhost_vsock_set_status(vdev, 0);
+
+ vhost_dev_cleanup(&vsock->vhost_dev);
+ virtio_cleanup(vdev);
+}
+
+static Property vhost_vsock_properties[] = {
+ DEFINE_PROP_UINT64("guest-cid", VHostVSock, conf.guest_cid, 0),
+ DEFINE_PROP_STRING("vhostfd", VHostVSock, conf.vhostfd),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vhost_vsock_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ dc->props = vhost_vsock_properties;
+ dc->vmsd = &vmstate_virtio_vhost_vsock;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ vdc->realize = vhost_vsock_device_realize;
+ vdc->unrealize = vhost_vsock_device_unrealize;
+ vdc->get_features = vhost_vsock_get_features;
+ vdc->get_config = vhost_vsock_get_config;
+ vdc->set_status = vhost_vsock_set_status;
+ vdc->guest_notifier_mask = vhost_vsock_guest_notifier_mask;
+ vdc->guest_notifier_pending = vhost_vsock_guest_notifier_pending;
+}
+
+static const TypeInfo vhost_vsock_info = {
+ .name = TYPE_VHOST_VSOCK,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VHostVSock),
+ .class_init = vhost_vsock_class_init,
+};
+
+static void vhost_vsock_register_types(void)
+{
+ type_register_static(&vhost_vsock_info);
+}
+
+type_init(vhost_vsock_register_types)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 3d0c807d0e..f7f70237db 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -421,32 +421,73 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
dev->log_size = size;
}
+
+static int vhost_verify_ring_part_mapping(void *part,
+ uint64_t part_addr,
+ uint64_t part_size,
+ uint64_t start_addr,
+ uint64_t size)
+{
+ hwaddr l;
+ void *p;
+ int r = 0;
+
+ if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
+ return 0;
+ }
+ l = part_size;
+ p = cpu_physical_memory_map(part_addr, &l, 1);
+ if (!p || l != part_size) {
+ r = -ENOMEM;
+ }
+ if (p != part) {
+ r = -EBUSY;
+ }
+ cpu_physical_memory_unmap(p, l, 0, 0);
+ return r;
+}
+
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
uint64_t start_addr,
uint64_t size)
{
- int i;
+ int i, j;
int r = 0;
+ const char *part_name[] = {
+ "descriptor table",
+ "available ring",
+ "used ring"
+ };
- for (i = 0; !r && i < dev->nvqs; ++i) {
+ for (i = 0; i < dev->nvqs; ++i) {
struct vhost_virtqueue *vq = dev->vqs + i;
- hwaddr l;
- void *p;
- if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
- continue;
+ j = 0;
+ r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys,
+ vq->desc_size, start_addr, size);
+ if (!r) {
+ break;
}
- l = vq->ring_size;
- p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
- if (!p || l != vq->ring_size) {
- error_report("Unable to map ring buffer for ring %d", i);
- r = -ENOMEM;
+
+ j++;
+ r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys,
+ vq->avail_size, start_addr, size);
+ if (!r) {
+ break;
}
- if (p != vq->ring) {
- error_report("Ring buffer relocated for ring %d", i);
- r = -EBUSY;
+
+ j++;
+ r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys,
+ vq->used_size, start_addr, size);
+ if (!r) {
+ break;
}
- cpu_physical_memory_unmap(p, l, 0, 0);
+ }
+
+ if (r == -ENOMEM) {
+ error_report("Unable to map %s for ring %d", part_name[j], i);
+ } else if (r == -EBUSY) {
+ error_report("%s relocated for ring %d", part_name[j], i);
}
return r;
}
@@ -822,6 +863,9 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
unsigned idx)
{
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
hwaddr s, l, a;
int r;
int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
@@ -857,15 +901,15 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
}
}
- s = l = virtio_queue_get_desc_size(vdev, idx);
- a = virtio_queue_get_desc_addr(vdev, idx);
+ vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
+ vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
vq->desc = cpu_physical_memory_map(a, &l, 0);
if (!vq->desc || l != s) {
r = -ENOMEM;
goto fail_alloc_desc;
}
- s = l = virtio_queue_get_avail_size(vdev, idx);
- a = virtio_queue_get_avail_addr(vdev, idx);
+ vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
+ vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
vq->avail = cpu_physical_memory_map(a, &l, 0);
if (!vq->avail || l != s) {
r = -ENOMEM;
@@ -879,14 +923,6 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
goto fail_alloc_used;
}
- vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
- vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
- vq->ring = cpu_physical_memory_map(a, &l, 1);
- if (!vq->ring || l != s) {
- r = -ENOMEM;
- goto fail_alloc_ring;
- }
-
r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
if (r < 0) {
r = -errno;
@@ -912,13 +948,21 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
vhost_virtqueue_mask(dev, vdev, idx, false);
}
+ if (k->query_guest_notifiers &&
+ k->query_guest_notifiers(qbus->parent) &&
+ virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
+ file.fd = -1;
+ r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
+ if (r) {
+ goto fail_vector;
+ }
+ }
+
return 0;
+fail_vector:
fail_kick:
fail_alloc:
- cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
- 0, 0);
-fail_alloc_ring:
cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
0, 0);
fail_alloc_used:
@@ -959,8 +1003,6 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
vhost_vq_index);
}
- cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
- 0, virtio_queue_get_ring_size(vdev, idx));
cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
1, virtio_queue_get_used_size(vdev, idx));
cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
@@ -1108,7 +1150,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
- } else if (!qemu_memfd_check()) {
+ } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
error_setg(&hdev->migration_blocker,
"Migration disabled: failed to allocate shared memory");
}
@@ -1172,13 +1214,14 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusState *vbus = VIRTIO_BUS(qbus);
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int i, r, e;
- if (!k->ioeventfd_started) {
+ /* We will pass the notifiers to the kernel, make sure that QEMU
+ * doesn't interfere.
+ */
+ r = virtio_device_grab_ioeventfd(vdev);
+ if (r < 0) {
error_report("binding does not support host notifiers");
- r = -ENOSYS;
goto fail;
}
@@ -1201,6 +1244,7 @@ fail_vq:
}
assert (e >= 0);
}
+ virtio_device_release_ioeventfd(vdev);
fail:
return r;
}
@@ -1223,6 +1267,7 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
}
assert (r >= 0);
}
+ virtio_device_release_ioeventfd(vdev);
}
/* Test and clear event pending status.
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index 5af429a58a..884570a57d 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -34,13 +34,11 @@
static void balloon_page(void *addr, int deflate)
{
-#if defined(__linux__)
if (!qemu_balloon_is_inhibited() && (!kvm_enabled() ||
kvm_has_sync_mmu())) {
qemu_madvise(addr, BALLOON_PAGE_SIZE,
deflate ? QEMU_MADV_WILLNEED : QEMU_MADV_DONTNEED);
}
-#endif
}
static const char *balloon_stat_names[] = {
@@ -396,26 +394,9 @@ static void virtio_balloon_to_target(void *opaque, ram_addr_t target)
trace_virtio_balloon_to_target(target, dev->num_pages);
}
-static void virtio_balloon_save_device(VirtIODevice *vdev, QEMUFile *f)
+static int virtio_balloon_post_load_device(void *opaque, int version_id)
{
- VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
-
- qemu_put_be32(f, s->num_pages);
- qemu_put_be32(f, s->actual);
-}
-
-static int virtio_balloon_load(QEMUFile *f, void *opaque, size_t size)
-{
- return virtio_load(VIRTIO_DEVICE(opaque), f, 1);
-}
-
-static int virtio_balloon_load_device(VirtIODevice *vdev, QEMUFile *f,
- int version_id)
-{
- VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
-
- s->num_pages = qemu_get_be32(f);
- s->actual = qemu_get_be32(f);
+ VirtIOBalloon *s = VIRTIO_BALLOON(opaque);
if (balloon_stats_enabled(s)) {
balloon_stats_change_timer(s, s->stats_poll_interval);
@@ -423,6 +404,18 @@ static int virtio_balloon_load_device(VirtIODevice *vdev, QEMUFile *f,
return 0;
}
+static const VMStateDescription vmstate_virtio_balloon_device = {
+ .name = "virtio-balloon-device",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = virtio_balloon_post_load_device,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(num_pages, VirtIOBalloon),
+ VMSTATE_UINT32(actual, VirtIOBalloon),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -463,11 +456,24 @@ static void virtio_balloon_device_reset(VirtIODevice *vdev)
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
if (s->stats_vq_elem != NULL) {
+ virtqueue_unpop(s->svq, s->stats_vq_elem, 0);
g_free(s->stats_vq_elem);
s->stats_vq_elem = NULL;
}
}
+static void virtio_balloon_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
+
+ if (!s->stats_vq_elem && vdev->vm_running &&
+ (status & VIRTIO_CONFIG_S_DRIVER_OK) && virtqueue_rewind(s->svq, 1)) {
+ /* poll stats queue for the element we have discarded when the VM
+ * was stopped */
+ virtio_balloon_receive_stats(vdev, s->svq);
+ }
+}
+
static void virtio_balloon_instance_init(Object *obj)
{
VirtIOBalloon *s = VIRTIO_BALLOON(obj);
@@ -481,7 +487,15 @@ static void virtio_balloon_instance_init(Object *obj)
NULL, s, NULL);
}
-VMSTATE_VIRTIO_DEVICE(balloon, 1, virtio_balloon_load, virtio_vmstate_save);
+static const VMStateDescription vmstate_virtio_balloon = {
+ .name = "virtio-balloon",
+ .minimum_version_id = 1,
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
static Property virtio_balloon_properties[] = {
DEFINE_PROP_BIT("deflate-on-oom", VirtIOBalloon, host_features,
@@ -503,8 +517,8 @@ static void virtio_balloon_class_init(ObjectClass *klass, void *data)
vdc->get_config = virtio_balloon_get_config;
vdc->set_config = virtio_balloon_set_config;
vdc->get_features = virtio_balloon_get_features;
- vdc->save = virtio_balloon_save_device;
- vdc->load = virtio_balloon_load_device;
+ vdc->set_status = virtio_balloon_set_status;
+ vdc->vmsd = &vmstate_virtio_balloon_device;
}
static const TypeInfo virtio_balloon_info = {
diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c
index a85b7c8abe..d6c0c72bd2 100644
--- a/hw/virtio/virtio-bus.c
+++ b/hw/virtio/virtio-bus.c
@@ -49,16 +49,17 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp)
DPRINTF("%s: plug device.\n", qbus->name);
- if (klass->device_plugged != NULL) {
- klass->device_plugged(qbus->parent, errp);
+ if (klass->pre_plugged != NULL) {
+ klass->pre_plugged(qbus->parent, errp);
}
/* Get the features of the plugged device. */
assert(vdc->get_features != NULL);
vdev->host_features = vdc->get_features(vdev, vdev->host_features,
errp);
- if (klass->post_plugged != NULL) {
- klass->post_plugged(qbus->parent, errp);
+
+ if (klass->device_plugged != NULL) {
+ klass->device_plugged(qbus->parent, errp);
}
}
@@ -146,130 +147,133 @@ void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config)
}
}
-/*
- * This function handles both assigning the ioeventfd handler and
- * registering it with the kernel.
- * assign: register/deregister ioeventfd with the kernel
- * set_handler: use the generic ioeventfd handler
- */
-static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus,
- int n, bool assign, bool set_handler)
+/* On success, ioeventfd ownership belongs to the caller. */
+int virtio_bus_grab_ioeventfd(VirtioBusState *bus)
{
- VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
- VirtQueue *vq = virtio_get_queue(vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
- int r = 0;
- if (assign) {
- r = event_notifier_init(notifier, 1);
- if (r < 0) {
- error_report("%s: unable to init event notifier: %d", __func__, r);
- return r;
- }
- virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
- r = k->ioeventfd_assign(proxy, notifier, n, assign);
- if (r < 0) {
- error_report("%s: unable to assign ioeventfd: %d", __func__, r);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
- return r;
- }
- } else {
- k->ioeventfd_assign(proxy, notifier, n, assign);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
+ /* vhost can be used even if ioeventfd=off in the proxy device,
+ * so do not check k->ioeventfd_enabled.
+ */
+ if (!k->ioeventfd_assign) {
+ return -ENOSYS;
+ }
+
+ if (bus->ioeventfd_grabbed == 0 && bus->ioeventfd_started) {
+ virtio_bus_stop_ioeventfd(bus);
+ /* Remember that we need to restart ioeventfd
+ * when ioeventfd_grabbed becomes zero.
+ */
+ bus->ioeventfd_started = true;
+ }
+ bus->ioeventfd_grabbed++;
+ return 0;
+}
+
+void virtio_bus_release_ioeventfd(VirtioBusState *bus)
+{
+ assert(bus->ioeventfd_grabbed != 0);
+ if (--bus->ioeventfd_grabbed == 0 && bus->ioeventfd_started) {
+ /* Force virtio_bus_start_ioeventfd to act. */
+ bus->ioeventfd_started = false;
+ virtio_bus_start_ioeventfd(bus);
}
- return r;
}
-void virtio_bus_start_ioeventfd(VirtioBusState *bus)
+int virtio_bus_start_ioeventfd(VirtioBusState *bus)
{
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
DeviceState *proxy = DEVICE(BUS(bus)->parent);
- VirtIODevice *vdev;
- int n, r;
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ int r;
- if (!k->ioeventfd_started || k->ioeventfd_started(proxy)) {
- return;
- }
- if (k->ioeventfd_disabled(proxy)) {
- return;
+ if (!k->ioeventfd_assign || !k->ioeventfd_enabled(proxy)) {
+ return -ENOSYS;
}
- vdev = virtio_bus_get_device(bus);
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = set_host_notifier_internal(proxy, bus, n, true, true);
- if (r < 0) {
- goto assign_error;
- }
+ if (bus->ioeventfd_started) {
+ return 0;
}
- k->ioeventfd_set_started(proxy, true, false);
- return;
-assign_error:
- while (--n >= 0) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
+ /* Only set our notifier if we have ownership. */
+ if (!bus->ioeventfd_grabbed) {
+ r = vdc->start_ioeventfd(vdev);
+ if (r < 0) {
+ error_report("%s: failed. Fallback to userspace (slower).", __func__);
+ return r;
}
-
- r = set_host_notifier_internal(proxy, bus, n, false, false);
- assert(r >= 0);
}
- k->ioeventfd_set_started(proxy, false, true);
- error_report("%s: failed. Fallback to userspace (slower).", __func__);
+ bus->ioeventfd_started = true;
+ return 0;
}
void virtio_bus_stop_ioeventfd(VirtioBusState *bus)
{
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
- DeviceState *proxy = DEVICE(BUS(bus)->parent);
VirtIODevice *vdev;
- int n, r;
+ VirtioDeviceClass *vdc;
- if (!k->ioeventfd_started || !k->ioeventfd_started(proxy)) {
+ if (!bus->ioeventfd_started) {
return;
}
- vdev = virtio_bus_get_device(bus);
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = set_host_notifier_internal(proxy, bus, n, false, false);
- assert(r >= 0);
+
+ /* Only remove our notifier if we have ownership. */
+ if (!bus->ioeventfd_grabbed) {
+ vdev = virtio_bus_get_device(bus);
+ vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ vdc->stop_ioeventfd(vdev);
}
- k->ioeventfd_set_started(proxy, false, false);
+ bus->ioeventfd_started = false;
+}
+
+bool virtio_bus_ioeventfd_enabled(VirtioBusState *bus)
+{
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+ DeviceState *proxy = DEVICE(BUS(bus)->parent);
+
+ return k->ioeventfd_assign && k->ioeventfd_enabled(proxy);
}
/*
- * This function switches from/to the generic ioeventfd handler.
- * assign==false means 'use generic ioeventfd handler'.
+ * This function switches ioeventfd on/off in the device.
+ * The caller must set or clear the handlers for the EventNotifier.
*/
int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign)
{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ int r = 0;
- if (!k->ioeventfd_started) {
+ if (!k->ioeventfd_assign) {
return -ENOSYS;
}
- k->ioeventfd_set_disabled(proxy, assign);
+
if (assign) {
- /*
- * Stop using the generic ioeventfd, we are doing eventfd handling
- * ourselves below
- *
- * FIXME: We should just switch the handler and not deassign the
- * ioeventfd.
- * Otherwise, there's a window where we don't have an
- * ioeventfd and we may end up with a notification where
- * we don't expect one.
- */
- virtio_bus_stop_ioeventfd(bus);
+ r = event_notifier_init(notifier, 1);
+ if (r < 0) {
+ error_report("%s: unable to init event notifier: %s (%d)",
+ __func__, strerror(-r), r);
+ return r;
+ }
+ r = k->ioeventfd_assign(proxy, notifier, n, true);
+ if (r < 0) {
+ error_report("%s: unable to assign ioeventfd: %d", __func__, r);
+ goto cleanup_event_notifier;
+ }
+ return 0;
+ } else {
+ k->ioeventfd_assign(proxy, notifier, n, false);
}
- return set_host_notifier_internal(proxy, bus, n, assign, false);
+
+cleanup_event_notifier:
+ /* Test and clear notifier after disabling event,
+ * in case poll callback didn't have time to run.
+ */
+ virtio_queue_host_notifier_read(notifier);
+ event_notifier_cleanup(notifier);
+ return r;
}
static char *virtio_bus_get_dev_path(DeviceState *dev)
diff --git a/hw/virtio/virtio-crypto-pci.c b/hw/virtio/virtio-crypto-pci.c
new file mode 100644
index 0000000000..a1b09064c0
--- /dev/null
+++ b/hw/virtio/virtio-crypto-pci.c
@@ -0,0 +1,77 @@
+/*
+ * Virtio crypto device
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "hw/pci/pci.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-pci.h"
+#include "hw/virtio/virtio-crypto.h"
+#include "qapi/error.h"
+
+static Property virtio_crypto_pci_properties[] = {
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOCryptoPCI *vcrypto = VIRTIO_CRYPTO_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&vcrypto->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ virtio_pci_force_virtio_1(vpci_dev);
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+ object_property_set_link(OBJECT(vcrypto),
+ OBJECT(vcrypto->vdev.conf.cryptodev), "cryptodev",
+ NULL);
+}
+
+static void virtio_crypto_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = virtio_crypto_pci_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->props = virtio_crypto_pci_properties;
+ dc->hotpluggable = false;
+ pcidev_k->class_id = PCI_CLASS_OTHERS;
+}
+
+static void virtio_crypto_initfn(Object *obj)
+{
+ VirtIOCryptoPCI *dev = VIRTIO_CRYPTO_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_CRYPTO);
+ object_property_add_alias(obj, "cryptodev", OBJECT(&dev->vdev),
+ "cryptodev", &error_abort);
+}
+
+static const TypeInfo virtio_crypto_pci_info = {
+ .name = TYPE_VIRTIO_CRYPTO_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOCryptoPCI),
+ .instance_init = virtio_crypto_initfn,
+ .class_init = virtio_crypto_pci_class_init,
+};
+
+static void virtio_crypto_pci_register_types(void)
+{
+ type_register_static(&virtio_crypto_pci_info);
+}
+type_init(virtio_crypto_pci_register_types)
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
new file mode 100644
index 0000000000..2f2467e859
--- /dev/null
+++ b/hw/virtio/virtio-crypto.c
@@ -0,0 +1,908 @@
+/*
+ * Virtio crypto Support
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * Authors:
+ * Gonglei <arei.gonglei@huawei.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * (at your option) any later version. See the COPYING file in the
+ * top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu/iov.h"
+#include "hw/qdev.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-crypto.h"
+#include "hw/virtio/virtio-access.h"
+#include "standard-headers/linux/virtio_ids.h"
+
+#define VIRTIO_CRYPTO_VM_VERSION 1
+
+/*
+ * Transfer virtqueue index to crypto queue index.
+ * The control virtqueue is after the data virtqueues
+ * so the input value doesn't need to be adjusted
+ */
+static inline int virtio_crypto_vq2q(int queue_index)
+{
+ return queue_index;
+}
+
+static int
+virtio_crypto_cipher_session_helper(VirtIODevice *vdev,
+ CryptoDevBackendSymSessionInfo *info,
+ struct virtio_crypto_cipher_session_para *cipher_para,
+ struct iovec **iov, unsigned int *out_num)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ unsigned int num = *out_num;
+
+ info->cipher_alg = ldl_le_p(&cipher_para->algo);
+ info->key_len = ldl_le_p(&cipher_para->keylen);
+ info->direction = ldl_le_p(&cipher_para->op);
+ DPRINTF("cipher_alg=%" PRIu32 ", info->direction=%" PRIu32 "\n",
+ info->cipher_alg, info->direction);
+
+ if (info->key_len > vcrypto->conf.max_cipher_key_len) {
+ error_report("virtio-crypto length of cipher key is too big: %u",
+ info->key_len);
+ return -VIRTIO_CRYPTO_ERR;
+ }
+ /* Get cipher key */
+ if (info->key_len > 0) {
+ size_t s;
+ DPRINTF("keylen=%" PRIu32 "\n", info->key_len);
+
+ info->cipher_key = g_malloc(info->key_len);
+ s = iov_to_buf(*iov, num, 0, info->cipher_key, info->key_len);
+ if (unlikely(s != info->key_len)) {
+ virtio_error(vdev, "virtio-crypto cipher key incorrect");
+ return -EFAULT;
+ }
+ iov_discard_front(iov, &num, info->key_len);
+ *out_num = num;
+ }
+
+ return 0;
+}
+
+static int64_t
+virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_sym_create_session_req *sess_req,
+ uint32_t queue_id,
+ uint32_t opcode,
+ struct iovec *iov, unsigned int out_num)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ CryptoDevBackendSymSessionInfo info;
+ int64_t session_id;
+ int queue_index;
+ uint32_t op_type;
+ Error *local_err = NULL;
+ int ret;
+
+ memset(&info, 0, sizeof(info));
+ op_type = ldl_le_p(&sess_req->op_type);
+ info.op_type = op_type;
+ info.op_code = opcode;
+
+ if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ ret = virtio_crypto_cipher_session_helper(vdev, &info,
+ &sess_req->u.cipher.para,
+ &iov, &out_num);
+ if (ret < 0) {
+ goto err;
+ }
+ } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ size_t s;
+ /* cipher part */
+ ret = virtio_crypto_cipher_session_helper(vdev, &info,
+ &sess_req->u.chain.para.cipher_param,
+ &iov, &out_num);
+ if (ret < 0) {
+ goto err;
+ }
+ /* hash part */
+ info.alg_chain_order = ldl_le_p(
+ &sess_req->u.chain.para.alg_chain_order);
+ info.add_len = ldl_le_p(&sess_req->u.chain.para.aad_len);
+ info.hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode);
+ if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) {
+ info.hash_alg = ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo);
+ info.auth_key_len = ldl_le_p(
+ &sess_req->u.chain.para.u.mac_param.auth_key_len);
+ info.hash_result_len = ldl_le_p(
+ &sess_req->u.chain.para.u.mac_param.hash_result_len);
+ if (info.auth_key_len > vcrypto->conf.max_auth_key_len) {
+ error_report("virtio-crypto length of auth key is too big: %u",
+ info.auth_key_len);
+ ret = -VIRTIO_CRYPTO_ERR;
+ goto err;
+ }
+ /* get auth key */
+ if (info.auth_key_len > 0) {
+ DPRINTF("auth_keylen=%" PRIu32 "\n", info.auth_key_len);
+ info.auth_key = g_malloc(info.auth_key_len);
+ s = iov_to_buf(iov, out_num, 0, info.auth_key,
+ info.auth_key_len);
+ if (unlikely(s != info.auth_key_len)) {
+ virtio_error(vdev,
+ "virtio-crypto authenticated key incorrect");
+ ret = -EFAULT;
+ goto err;
+ }
+ iov_discard_front(&iov, &out_num, info.auth_key_len);
+ }
+ } else if (info.hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) {
+ info.hash_alg = ldl_le_p(
+ &sess_req->u.chain.para.u.hash_param.algo);
+ info.hash_result_len = ldl_le_p(
+ &sess_req->u.chain.para.u.hash_param.hash_result_len);
+ } else {
+ /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+ error_report("unsupported hash mode");
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ goto err;
+ }
+ } else {
+ /* VIRTIO_CRYPTO_SYM_OP_NONE */
+ error_report("unsupported cipher op_type: VIRTIO_CRYPTO_SYM_OP_NONE");
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ goto err;
+ }
+
+ queue_index = virtio_crypto_vq2q(queue_id);
+ session_id = cryptodev_backend_sym_create_session(
+ vcrypto->cryptodev,
+ &info, queue_index, &local_err);
+ if (session_id >= 0) {
+ DPRINTF("create session_id=%" PRIu64 " successfully\n",
+ session_id);
+
+ ret = session_id;
+ } else {
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ ret = -VIRTIO_CRYPTO_ERR;
+ }
+
+err:
+ g_free(info.cipher_key);
+ g_free(info.auth_key);
+ return ret;
+}
+
+static uint8_t
+virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_destroy_session_req *close_sess_req,
+ uint32_t queue_id)
+{
+ int ret;
+ uint64_t session_id;
+ uint32_t status;
+ Error *local_err = NULL;
+
+ session_id = ldq_le_p(&close_sess_req->session_id);
+ DPRINTF("close session, id=%" PRIu64 "\n", session_id);
+
+ ret = cryptodev_backend_sym_close_session(
+ vcrypto->cryptodev, session_id, queue_id, &local_err);
+ if (ret == 0) {
+ status = VIRTIO_CRYPTO_OK;
+ } else {
+ if (local_err) {
+ error_report_err(local_err);
+ } else {
+ error_report("destroy session failed");
+ }
+ status = VIRTIO_CRYPTO_ERR;
+ }
+
+ return status;
+}
+
+static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ struct virtio_crypto_op_ctrl_req ctrl;
+ VirtQueueElement *elem;
+ struct iovec *in_iov;
+ struct iovec *out_iov;
+ unsigned in_num;
+ unsigned out_num;
+ uint32_t queue_id;
+ uint32_t opcode;
+ struct virtio_crypto_session_input input;
+ int64_t session_id;
+ uint8_t status;
+ size_t s;
+
+ for (;;) {
+ elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
+ if (!elem) {
+ break;
+ }
+ if (elem->out_num < 1 || elem->in_num < 1) {
+ virtio_error(vdev, "virtio-crypto ctrl missing headers");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ break;
+ }
+
+ out_num = elem->out_num;
+ out_iov = elem->out_sg;
+ in_num = elem->in_num;
+ in_iov = elem->in_sg;
+ if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl))
+ != sizeof(ctrl))) {
+ virtio_error(vdev, "virtio-crypto request ctrl_hdr too short");
+ virtqueue_detach_element(vq, elem, 0);
+ g_free(elem);
+ break;
+ }
+ iov_discard_front(&out_iov, &out_num, sizeof(ctrl));
+
+ opcode = ldl_le_p(&ctrl.header.opcode);
+ queue_id = ldl_le_p(&ctrl.header.queue_id);
+
+ switch (opcode) {
+ case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION:
+ memset(&input, 0, sizeof(input));
+ session_id = virtio_crypto_create_sym_session(vcrypto,
+ &ctrl.u.sym_create_session,
+ queue_id, opcode,
+ out_iov, out_num);
+ /* Serious errors, need to reset virtio crypto device */
+ if (session_id == -EFAULT) {
+ virtqueue_detach_element(vq, elem, 0);
+ break;
+ } else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) {
+ stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
+ } else if (session_id == -VIRTIO_CRYPTO_ERR) {
+ stl_le_p(&input.status, VIRTIO_CRYPTO_ERR);
+ } else {
+ /* Set the session id */
+ stq_le_p(&input.session_id, session_id);
+ stl_le_p(&input.status, VIRTIO_CRYPTO_OK);
+ }
+
+ s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
+ if (unlikely(s != sizeof(input))) {
+ virtio_error(vdev, "virtio-crypto input incorrect");
+ virtqueue_detach_element(vq, elem, 0);
+ break;
+ }
+ virtqueue_push(vq, elem, sizeof(input));
+ virtio_notify(vdev, vq);
+ break;
+ case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION:
+ case VIRTIO_CRYPTO_HASH_DESTROY_SESSION:
+ case VIRTIO_CRYPTO_MAC_DESTROY_SESSION:
+ case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION:
+ status = virtio_crypto_handle_close_session(vcrypto,
+ &ctrl.u.destroy_session, queue_id);
+ /* The status only occupy one byte, we can directly use it */
+ s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status));
+ if (unlikely(s != sizeof(status))) {
+ virtio_error(vdev, "virtio-crypto status incorrect");
+ virtqueue_detach_element(vq, elem, 0);
+ break;
+ }
+ virtqueue_push(vq, elem, sizeof(status));
+ virtio_notify(vdev, vq);
+ break;
+ case VIRTIO_CRYPTO_HASH_CREATE_SESSION:
+ case VIRTIO_CRYPTO_MAC_CREATE_SESSION:
+ case VIRTIO_CRYPTO_AEAD_CREATE_SESSION:
+ default:
+ error_report("virtio-crypto unsupported ctrl opcode: %d", opcode);
+ memset(&input, 0, sizeof(input));
+ stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP);
+ s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input));
+ if (unlikely(s != sizeof(input))) {
+ virtio_error(vdev, "virtio-crypto input incorrect");
+ virtqueue_detach_element(vq, elem, 0);
+ break;
+ }
+ virtqueue_push(vq, elem, sizeof(input));
+ virtio_notify(vdev, vq);
+
+ break;
+ } /* end switch case */
+
+ g_free(elem);
+ } /* end for loop */
+}
+
+static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
+ VirtIOCryptoReq *req)
+{
+ req->vcrypto = vcrypto;
+ req->vq = vq;
+ req->in = NULL;
+ req->in_iov = NULL;
+ req->in_num = 0;
+ req->in_len = 0;
+ req->flags = CRYPTODEV_BACKEND_ALG__MAX;
+ req->u.sym_op_info = NULL;
+}
+
+static void virtio_crypto_free_request(VirtIOCryptoReq *req)
+{
+ if (req) {
+ if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+ g_free(req->u.sym_op_info);
+ }
+ g_free(req);
+ }
+}
+
+static void
+virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
+ VirtIOCryptoReq *req,
+ uint32_t status,
+ CryptoDevBackendSymOpInfo *sym_op_info)
+{
+ size_t s, len;
+
+ if (status != VIRTIO_CRYPTO_OK) {
+ return;
+ }
+
+ len = sym_op_info->dst_len;
+ /* Save the cipher result */
+ s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
+ if (s != len) {
+ virtio_error(vdev, "virtio-crypto dest data incorrect");
+ return;
+ }
+
+ iov_discard_front(&req->in_iov, &req->in_num, len);
+
+ if (sym_op_info->op_type ==
+ VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ /* Save the digest result */
+ s = iov_from_buf(req->in_iov, req->in_num, 0,
+ sym_op_info->digest_result,
+ sym_op_info->digest_result_len);
+ if (s != sym_op_info->digest_result_len) {
+ virtio_error(vdev, "virtio-crypto digest result incorrect");
+ }
+ }
+}
+
+static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
+{
+ VirtIOCrypto *vcrypto = req->vcrypto;
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+
+ if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+ virtio_crypto_sym_input_data_helper(vdev, req, status,
+ req->u.sym_op_info);
+ }
+ stb_p(&req->in->status, status);
+ virtqueue_push(req->vq, &req->elem, req->in_len);
+ virtio_notify(vdev, req->vq);
+}
+
+static VirtIOCryptoReq *
+virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
+{
+ VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
+
+ if (req) {
+ virtio_crypto_init_request(s, vq, req);
+ }
+ return req;
+}
+
+static CryptoDevBackendSymOpInfo *
+virtio_crypto_sym_op_helper(VirtIODevice *vdev,
+ struct virtio_crypto_cipher_para *cipher_para,
+ struct virtio_crypto_alg_chain_data_para *alg_chain_para,
+ struct iovec *iov, unsigned int out_num)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ CryptoDevBackendSymOpInfo *op_info;
+ uint32_t src_len = 0, dst_len = 0;
+ uint32_t iv_len = 0;
+ uint32_t aad_len = 0, hash_result_len = 0;
+ uint32_t hash_start_src_offset = 0, len_to_hash = 0;
+ uint32_t cipher_start_src_offset = 0, len_to_cipher = 0;
+
+ size_t max_len, curr_size = 0;
+ size_t s;
+
+ /* Plain cipher */
+ if (cipher_para) {
+ iv_len = ldl_le_p(&cipher_para->iv_len);
+ src_len = ldl_le_p(&cipher_para->src_data_len);
+ dst_len = ldl_le_p(&cipher_para->dst_data_len);
+ } else if (alg_chain_para) { /* Algorithm chain */
+ iv_len = ldl_le_p(&alg_chain_para->iv_len);
+ src_len = ldl_le_p(&alg_chain_para->src_data_len);
+ dst_len = ldl_le_p(&alg_chain_para->dst_data_len);
+
+ aad_len = ldl_le_p(&alg_chain_para->aad_len);
+ hash_result_len = ldl_le_p(&alg_chain_para->hash_result_len);
+ hash_start_src_offset = ldl_le_p(
+ &alg_chain_para->hash_start_src_offset);
+ cipher_start_src_offset = ldl_le_p(
+ &alg_chain_para->cipher_start_src_offset);
+ len_to_cipher = ldl_le_p(&alg_chain_para->len_to_cipher);
+ len_to_hash = ldl_le_p(&alg_chain_para->len_to_hash);
+ } else {
+ return NULL;
+ }
+
+ max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
+ if (unlikely(max_len > vcrypto->conf.max_size)) {
+ virtio_error(vdev, "virtio-crypto too big length");
+ return NULL;
+ }
+
+ op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);
+ op_info->iv_len = iv_len;
+ op_info->src_len = src_len;
+ op_info->dst_len = dst_len;
+ op_info->aad_len = aad_len;
+ op_info->digest_result_len = hash_result_len;
+ op_info->hash_start_src_offset = hash_start_src_offset;
+ op_info->len_to_hash = len_to_hash;
+ op_info->cipher_start_src_offset = cipher_start_src_offset;
+ op_info->len_to_cipher = len_to_cipher;
+ /* Handle the initilization vector */
+ if (op_info->iv_len > 0) {
+ DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
+ op_info->iv = op_info->data + curr_size;
+
+ s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
+ if (unlikely(s != op_info->iv_len)) {
+ virtio_error(vdev, "virtio-crypto iv incorrect");
+ goto err;
+ }
+ iov_discard_front(&iov, &out_num, op_info->iv_len);
+ curr_size += op_info->iv_len;
+ }
+
+ /* Handle additional authentication data if exists */
+ if (op_info->aad_len > 0) {
+ DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
+ op_info->aad_data = op_info->data + curr_size;
+
+ s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len);
+ if (unlikely(s != op_info->aad_len)) {
+ virtio_error(vdev, "virtio-crypto additional auth data incorrect");
+ goto err;
+ }
+ iov_discard_front(&iov, &out_num, op_info->aad_len);
+
+ curr_size += op_info->aad_len;
+ }
+
+ /* Handle the source data */
+ if (op_info->src_len > 0) {
+ DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
+ op_info->src = op_info->data + curr_size;
+
+ s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
+ if (unlikely(s != op_info->src_len)) {
+ virtio_error(vdev, "virtio-crypto source data incorrect");
+ goto err;
+ }
+ iov_discard_front(&iov, &out_num, op_info->src_len);
+
+ curr_size += op_info->src_len;
+ }
+
+ /* Handle the destination data */
+ op_info->dst = op_info->data + curr_size;
+ curr_size += op_info->dst_len;
+
+ DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
+
+ /* Handle the hash digest result */
+ if (hash_result_len > 0) {
+ DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
+ op_info->digest_result = op_info->data + curr_size;
+ }
+
+ return op_info;
+
+err:
+ g_free(op_info);
+ return NULL;
+}
+
+static int
+virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_sym_data_req *req,
+ CryptoDevBackendSymOpInfo **sym_op_info,
+ struct iovec *iov, unsigned int out_num)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ uint32_t op_type;
+ CryptoDevBackendSymOpInfo *op_info;
+
+ op_type = ldl_le_p(&req->op_type);
+
+ if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
+ NULL, iov, out_num);
+ if (!op_info) {
+ return -EFAULT;
+ }
+ op_info->op_type = op_type;
+ } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ op_info = virtio_crypto_sym_op_helper(vdev, NULL,
+ &req->u.chain.para,
+ iov, out_num);
+ if (!op_info) {
+ return -EFAULT;
+ }
+ op_info->op_type = op_type;
+ } else {
+ /* VIRTIO_CRYPTO_SYM_OP_NONE */
+ error_report("virtio-crypto unsupported cipher type");
+ return -VIRTIO_CRYPTO_NOTSUPP;
+ }
+
+ *sym_op_info = op_info;
+
+ return 0;
+}
+
+static int
+virtio_crypto_handle_request(VirtIOCryptoReq *request)
+{
+ VirtIOCrypto *vcrypto = request->vcrypto;
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ VirtQueueElement *elem = &request->elem;
+ int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
+ struct virtio_crypto_op_data_req req;
+ int ret;
+ struct iovec *in_iov;
+ struct iovec *out_iov;
+ unsigned in_num;
+ unsigned out_num;
+ uint32_t opcode;
+ uint8_t status = VIRTIO_CRYPTO_ERR;
+ uint64_t session_id;
+ CryptoDevBackendSymOpInfo *sym_op_info = NULL;
+ Error *local_err = NULL;
+
+ if (elem->out_num < 1 || elem->in_num < 1) {
+ virtio_error(vdev, "virtio-crypto dataq missing headers");
+ return -1;
+ }
+
+ out_num = elem->out_num;
+ out_iov = elem->out_sg;
+ in_num = elem->in_num;
+ in_iov = elem->in_sg;
+ if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
+ != sizeof(req))) {
+ virtio_error(vdev, "virtio-crypto request outhdr too short");
+ return -1;
+ }
+ iov_discard_front(&out_iov, &out_num, sizeof(req));
+
+ if (in_iov[in_num - 1].iov_len <
+ sizeof(struct virtio_crypto_inhdr)) {
+ virtio_error(vdev, "virtio-crypto request inhdr too short");
+ return -1;
+ }
+ /* We always touch the last byte, so just see how big in_iov is. */
+ request->in_len = iov_size(in_iov, in_num);
+ request->in = (void *)in_iov[in_num - 1].iov_base
+ + in_iov[in_num - 1].iov_len
+ - sizeof(struct virtio_crypto_inhdr);
+ iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));
+
+ /*
+ * The length of operation result, including dest_data
+ * and digest_result if exists.
+ */
+ request->in_num = in_num;
+ request->in_iov = in_iov;
+
+ opcode = ldl_le_p(&req.header.opcode);
+ session_id = ldq_le_p(&req.header.session_id);
+
+ switch (opcode) {
+ case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
+ case VIRTIO_CRYPTO_CIPHER_DECRYPT:
+ ret = virtio_crypto_handle_sym_req(vcrypto,
+ &req.u.sym_req,
+ &sym_op_info,
+ out_iov, out_num);
+ /* Serious errors, need to reset virtio crypto device */
+ if (ret == -EFAULT) {
+ return -1;
+ } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
+ virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
+ virtio_crypto_free_request(request);
+ } else {
+ sym_op_info->session_id = session_id;
+
+ /* Set request's parameter */
+ request->flags = CRYPTODEV_BACKEND_ALG_SYM;
+ request->u.sym_op_info = sym_op_info;
+ ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev,
+ request, queue_index, &local_err);
+ if (ret < 0) {
+ status = -ret;
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ } else { /* ret == VIRTIO_CRYPTO_OK */
+ status = ret;
+ }
+ virtio_crypto_req_complete(request, status);
+ virtio_crypto_free_request(request);
+ }
+ break;
+ case VIRTIO_CRYPTO_HASH:
+ case VIRTIO_CRYPTO_MAC:
+ case VIRTIO_CRYPTO_AEAD_ENCRYPT:
+ case VIRTIO_CRYPTO_AEAD_DECRYPT:
+ default:
+ error_report("virtio-crypto unsupported dataq opcode: %u",
+ opcode);
+ virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
+ virtio_crypto_free_request(request);
+ }
+
+ return 0;
+}
+
+static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ VirtIOCryptoReq *req;
+
+ while ((req = virtio_crypto_get_request(vcrypto, vq))) {
+ if (virtio_crypto_handle_request(req) < 0) {
+ virtqueue_detach_element(req->vq, &req->elem, 0);
+ virtio_crypto_free_request(req);
+ break;
+ }
+ }
+}
+
+static void virtio_crypto_dataq_bh(void *opaque)
+{
+ VirtIOCryptoQueue *q = opaque;
+ VirtIOCrypto *vcrypto = q->vcrypto;
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+
+ /* This happens when device was stopped but BH wasn't. */
+ if (!vdev->vm_running) {
+ return;
+ }
+
+ /* Just in case the driver is not ready on more */
+ if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
+ return;
+ }
+
+ for (;;) {
+ virtio_crypto_handle_dataq(vdev, q->dataq);
+ virtio_queue_set_notification(q->dataq, 1);
+
+ /* Are we done or did the guest add more buffers? */
+ if (virtio_queue_empty(q->dataq)) {
+ break;
+ }
+
+ virtio_queue_set_notification(q->dataq, 0);
+ }
+}
+
+static void
+virtio_crypto_handle_dataq_bh(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ VirtIOCryptoQueue *q =
+ &vcrypto->vqs[virtio_crypto_vq2q(virtio_get_queue_index(vq))];
+
+ /* This happens when device was stopped but VCPU wasn't. */
+ if (!vdev->vm_running) {
+ return;
+ }
+ virtio_queue_set_notification(vq, 0);
+ qemu_bh_schedule(q->dataq_bh);
+}
+
+static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
+ uint64_t features,
+ Error **errp)
+{
+ return features;
+}
+
+static void virtio_crypto_reset(VirtIODevice *vdev)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ /* multiqueue is disabled by default */
+ vcrypto->curr_queues = 1;
+ if (!vcrypto->cryptodev->ready) {
+ vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
+ } else {
+ vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
+ }
+}
+
+static void virtio_crypto_init_config(VirtIODevice *vdev)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+
+ vcrypto->conf.crypto_services =
+ vcrypto->conf.cryptodev->conf.crypto_services;
+ vcrypto->conf.cipher_algo_l =
+ vcrypto->conf.cryptodev->conf.cipher_algo_l;
+ vcrypto->conf.cipher_algo_h =
+ vcrypto->conf.cryptodev->conf.cipher_algo_h;
+ vcrypto->conf.hash_algo = vcrypto->conf.cryptodev->conf.hash_algo;
+ vcrypto->conf.mac_algo_l = vcrypto->conf.cryptodev->conf.mac_algo_l;
+ vcrypto->conf.mac_algo_h = vcrypto->conf.cryptodev->conf.mac_algo_h;
+ vcrypto->conf.aead_algo = vcrypto->conf.cryptodev->conf.aead_algo;
+ vcrypto->conf.max_cipher_key_len =
+ vcrypto->conf.cryptodev->conf.max_cipher_key_len;
+ vcrypto->conf.max_auth_key_len =
+ vcrypto->conf.cryptodev->conf.max_auth_key_len;
+ vcrypto->conf.max_size = vcrypto->conf.cryptodev->conf.max_size;
+}
+
+static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
+ int i;
+
+ vcrypto->cryptodev = vcrypto->conf.cryptodev;
+ if (vcrypto->cryptodev == NULL) {
+ error_setg(errp, "'cryptodev' parameter expects a valid object");
+ return;
+ }
+
+ vcrypto->max_queues = MAX(vcrypto->cryptodev->conf.peers.queues, 1);
+ if (vcrypto->max_queues + 1 > VIRTIO_QUEUE_MAX) {
+ error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
+ "must be a postive integer less than %d.",
+ vcrypto->max_queues, VIRTIO_QUEUE_MAX);
+ return;
+ }
+
+ virtio_init(vdev, "virtio-crypto", VIRTIO_ID_CRYPTO, vcrypto->config_size);
+ vcrypto->curr_queues = 1;
+ vcrypto->vqs = g_malloc0(sizeof(VirtIOCryptoQueue) * vcrypto->max_queues);
+ for (i = 0; i < vcrypto->max_queues; i++) {
+ vcrypto->vqs[i].dataq =
+ virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh);
+ vcrypto->vqs[i].dataq_bh =
+ qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]);
+ vcrypto->vqs[i].vcrypto = vcrypto;
+ }
+
+ vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
+ if (!vcrypto->cryptodev->ready) {
+ vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
+ } else {
+ vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
+ }
+
+ virtio_crypto_init_config(vdev);
+}
+
+static void virtio_crypto_device_unrealize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
+ VirtIOCryptoQueue *q;
+ int i, max_queues;
+
+ max_queues = vcrypto->multiqueue ? vcrypto->max_queues : 1;
+ for (i = 0; i < max_queues; i++) {
+ virtio_del_queue(vdev, i);
+ q = &vcrypto->vqs[i];
+ qemu_bh_delete(q->dataq_bh);
+ }
+
+ g_free(vcrypto->vqs);
+
+ virtio_cleanup(vdev);
+}
+
+static const VMStateDescription vmstate_virtio_crypto = {
+ .name = "virtio-crypto",
+ .unmigratable = 1,
+ .minimum_version_id = VIRTIO_CRYPTO_VM_VERSION,
+ .version_id = VIRTIO_CRYPTO_VM_VERSION,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static Property virtio_crypto_properties[] = {
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VirtIOCrypto *c = VIRTIO_CRYPTO(vdev);
+ struct virtio_crypto_config crypto_cfg = {};
+
+ /*
+ * Virtio-crypto device conforms to VIRTIO 1.0 which is always LE,
+ * so we can use LE accessors directly.
+ */
+ stl_le_p(&crypto_cfg.status, c->status);
+ stl_le_p(&crypto_cfg.max_dataqueues, c->max_queues);
+ stl_le_p(&crypto_cfg.crypto_services, c->conf.crypto_services);
+ stl_le_p(&crypto_cfg.cipher_algo_l, c->conf.cipher_algo_l);
+ stl_le_p(&crypto_cfg.cipher_algo_h, c->conf.cipher_algo_h);
+ stl_le_p(&crypto_cfg.hash_algo, c->conf.hash_algo);
+ stl_le_p(&crypto_cfg.mac_algo_l, c->conf.mac_algo_l);
+ stl_le_p(&crypto_cfg.mac_algo_h, c->conf.mac_algo_h);
+ stl_le_p(&crypto_cfg.aead_algo, c->conf.aead_algo);
+ stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len);
+ stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len);
+ stq_le_p(&crypto_cfg.max_size, c->conf.max_size);
+
+ memcpy(config, &crypto_cfg, c->config_size);
+}
+
+static void virtio_crypto_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ dc->props = virtio_crypto_properties;
+ dc->vmsd = &vmstate_virtio_crypto;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ vdc->realize = virtio_crypto_device_realize;
+ vdc->unrealize = virtio_crypto_device_unrealize;
+ vdc->get_config = virtio_crypto_get_config;
+ vdc->get_features = virtio_crypto_get_features;
+ vdc->reset = virtio_crypto_reset;
+}
+
+static void virtio_crypto_instance_init(Object *obj)
+{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(obj);
+
+ /*
+ * The default config_size is sizeof(struct virtio_crypto_config).
+ * Can be overriden with virtio_crypto_set_config_size.
+ */
+ vcrypto->config_size = sizeof(struct virtio_crypto_config);
+
+ object_property_add_link(obj, "cryptodev",
+ TYPE_CRYPTODEV_BACKEND,
+ (Object **)&vcrypto->conf.cryptodev,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
+}
+
+static const TypeInfo virtio_crypto_info = {
+ .name = TYPE_VIRTIO_CRYPTO,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VirtIOCrypto),
+ .instance_init = virtio_crypto_instance_init,
+ .class_init = virtio_crypto_class_init,
+};
+
+static void virtio_register_types(void)
+{
+ type_register_static(&virtio_crypto_info);
+}
+
+type_init(virtio_register_types)
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 13798b3cb8..17412cb7b5 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -89,38 +89,12 @@ typedef struct {
uint32_t guest_page_shift;
/* virtio-bus */
VirtioBusState bus;
- bool ioeventfd_disabled;
- bool ioeventfd_started;
bool format_transport_address;
} VirtIOMMIOProxy;
-static bool virtio_mmio_ioeventfd_started(DeviceState *d)
+static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
-
- return proxy->ioeventfd_started;
-}
-
-static void virtio_mmio_ioeventfd_set_started(DeviceState *d, bool started,
- bool err)
-{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
-
- proxy->ioeventfd_started = started;
-}
-
-static bool virtio_mmio_ioeventfd_disabled(DeviceState *d)
-{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
-
- return !kvm_eventfds_enabled() || proxy->ioeventfd_disabled;
-}
-
-static void virtio_mmio_ioeventfd_set_disabled(DeviceState *d, bool disabled)
-{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
-
- proxy->ioeventfd_disabled = disabled;
+ return kvm_eventfds_enabled();
}
static int virtio_mmio_ioeventfd_assign(DeviceState *d,
@@ -217,7 +191,7 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
return virtio_queue_get_addr(vdev, vdev->queue_sel)
>> proxy->guest_page_shift;
case VIRTIO_MMIO_INTERRUPTSTATUS:
- return vdev->isr;
+ return atomic_read(&vdev->isr);
case VIRTIO_MMIO_STATUS:
return vdev->status;
case VIRTIO_MMIO_HOSTFEATURESSEL:
@@ -325,7 +299,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
}
break;
case VIRTIO_MMIO_INTERRUPTACK:
- vdev->isr &= ~value;
+ atomic_and(&vdev->isr, ~value);
virtio_update_irq(vdev);
break;
case VIRTIO_MMIO_STATUS:
@@ -373,7 +347,7 @@ static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
if (!vdev) {
return;
}
- level = (vdev->isr != 0);
+ level = (atomic_read(&vdev->isr) != 0);
DPRINTF("virtio_mmio setting IRQ %d\n", level);
qemu_set_irq(proxy->irq, level);
}
@@ -557,10 +531,7 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
k->save_config = virtio_mmio_save_config;
k->load_config = virtio_mmio_load_config;
k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
- k->ioeventfd_started = virtio_mmio_ioeventfd_started;
- k->ioeventfd_set_started = virtio_mmio_ioeventfd_set_started;
- k->ioeventfd_disabled = virtio_mmio_ioeventfd_disabled;
- k->ioeventfd_set_disabled = virtio_mmio_ioeventfd_set_disabled;
+ k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
k->has_variable_vring_alignment = true;
bus_class->max_dev = 1;
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 755f9218b7..21c2b9dbfc 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -73,7 +73,7 @@ static void virtio_pci_notify(DeviceState *d, uint16_t vector)
msix_notify(&proxy->pci_dev, vector);
else {
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- pci_set_irq(&proxy->pci_dev, vdev->isr & 1);
+ pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1);
}
}
@@ -262,38 +262,21 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
return 0;
}
-static bool virtio_pci_ioeventfd_started(DeviceState *d)
+static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
{
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
- return proxy->ioeventfd_started;
+ return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
}
-static void virtio_pci_ioeventfd_set_started(DeviceState *d, bool started,
- bool err)
-{
- VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
- proxy->ioeventfd_started = started;
-}
+#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
-static bool virtio_pci_ioeventfd_disabled(DeviceState *d)
+static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
{
- VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
- return proxy->ioeventfd_disabled ||
- !(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD);
+ return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
+ QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
}
-static void virtio_pci_ioeventfd_set_disabled(DeviceState *d, bool disabled)
-{
- VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
- proxy->ioeventfd_disabled = disabled;
-}
-
-#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
-
static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
int n, bool assign)
{
@@ -307,7 +290,7 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
MemoryRegion *modern_mr = &proxy->notify.mr;
MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
MemoryRegion *legacy_mr = &proxy->bar;
- hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
+ hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
virtio_get_queue_index(vq);
hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
@@ -466,8 +449,7 @@ static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
break;
case VIRTIO_PCI_ISR:
/* reading from the ISR also clears it. */
- ret = vdev->isr;
- vdev->isr = 0;
+ ret = atomic_xchg(&vdev->isr, 0);
pci_irq_deassert(&proxy->pci_dev);
break;
case VIRTIO_MSI_CONFIG_VECTOR:
@@ -1192,7 +1174,9 @@ static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
break;
case VIRTIO_PCI_COMMON_DF:
if (proxy->dfselect <= 1) {
- val = (vdev->host_features & ~VIRTIO_LEGACY_FEATURES) >>
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+
+ val = (vdev->host_features & ~vdc->legacy_features) >>
(32 * proxy->dfselect);
}
break;
@@ -1370,7 +1354,8 @@ static void virtio_pci_notify_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
VirtIODevice *vdev = opaque;
- unsigned queue = addr / QEMU_VIRTIO_PCI_QUEUE_MEM_MULT;
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent);
+ unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
if (queue < VIRTIO_QUEUE_MAX) {
virtio_queue_notify(vdev, queue);
@@ -1393,9 +1378,7 @@ static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- uint64_t val = vdev->isr;
-
- vdev->isr = 0;
+ uint64_t val = atomic_xchg(&vdev->isr, 0);
pci_irq_deassert(&proxy->pci_dev);
return val;
@@ -1520,7 +1503,7 @@ static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
&notify_pio_ops,
virtio_bus_get_device(&proxy->bus),
"virtio-pci-notify-pio",
- proxy->notify.size);
+ proxy->notify_pio.size);
}
static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
@@ -1544,7 +1527,7 @@ static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
struct virtio_pci_cap *cap)
{
virtio_pci_modern_region_map(proxy, region, cap,
- &proxy->modern_bar, proxy->modern_mem_bar);
+ &proxy->modern_bar, proxy->modern_mem_bar_idx);
}
static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
@@ -1552,7 +1535,7 @@ static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
struct virtio_pci_cap *cap)
{
virtio_pci_modern_region_map(proxy, region, cap,
- &proxy->io_bar, proxy->modern_io_bar);
+ &proxy->io_bar, proxy->modern_io_bar_idx);
}
static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
@@ -1569,18 +1552,49 @@ static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
&region->mr);
}
+static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ if (virtio_pci_modern(proxy)) {
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
+ }
+
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
+}
+
/* This is called by virtio-bus just after the device is plugged. */
static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
VirtioBusState *bus = &proxy->bus;
bool legacy = virtio_pci_legacy(proxy);
- bool modern = virtio_pci_modern(proxy);
+ bool modern;
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
uint8_t *config;
uint32_t size;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ /*
+ * Virtio capabilities present without
+ * VIRTIO_F_VERSION_1 confuses guests
+ */
+ if (!proxy->ignore_backend_features &&
+ !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
+ virtio_pci_disable_modern(proxy);
+
+ if (!legacy) {
+ error_setg(errp, "Device doesn't support modern mode, and legacy"
+ " mode is disabled");
+ error_append_hint(errp, "Set disable-legacy to off\n");
+
+ return;
+ }
+ }
+
+ modern = virtio_pci_modern(proxy);
+
config = proxy->pci_dev.config;
if (proxy->class_code) {
pci_config_set_class(config, proxy->class_code);
@@ -1609,7 +1623,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
struct virtio_pci_notify_cap notify = {
.cap.cap_len = sizeof notify,
.notify_off_multiplier =
- cpu_to_le32(QEMU_VIRTIO_PCI_QUEUE_MEM_MULT),
+ cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
};
struct virtio_pci_cfg_cap cfg = {
.cap.cap_len = sizeof cfg,
@@ -1622,7 +1636,6 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
struct virtio_pci_cfg_cap *cfg_mask;
- virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
virtio_pci_modern_regions_init(proxy);
virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
@@ -1634,14 +1647,14 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
memory_region_init(&proxy->io_bar, OBJECT(proxy),
"virtio-pci-io", 0x4);
- pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar,
+ pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
&notify_pio.cap);
}
- pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar,
+ pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_PREFETCH |
PCI_BASE_ADDRESS_MEM_TYPE_64,
@@ -1657,7 +1670,7 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
if (proxy->nvectors) {
int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
- proxy->msix_bar);
+ proxy->msix_bar_idx);
if (err) {
/* Notice when a system that supports MSIx can't initialize it. */
if (err != -ENOTSUP) {
@@ -1680,15 +1693,9 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
&virtio_pci_config_ops,
proxy, "virtio-pci", size);
- pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar,
+ pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
}
-
- if (!kvm_has_many_ioeventfds()) {
- proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
- }
-
- virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
}
static void virtio_pci_device_unplugged(DeviceState *d)
@@ -1717,6 +1724,10 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
bool pcie_port = pci_bus_is_express(pci_dev->bus) &&
!pci_bus_is_root(pci_dev->bus);
+ if (!kvm_has_many_ioeventfds()) {
+ proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
+ }
+
/*
* virtio pci bar layout used by default.
* subclasses can re-arrange things if needed.
@@ -1726,10 +1737,10 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
* region 4+5 -- virtio modern memory (64bit) bar
*
*/
- proxy->legacy_io_bar = 0;
- proxy->msix_bar = 1;
- proxy->modern_io_bar = 2;
- proxy->modern_mem_bar = 4;
+ proxy->legacy_io_bar_idx = 0;
+ proxy->msix_bar_idx = 1;
+ proxy->modern_io_bar_idx = 2;
+ proxy->modern_mem_bar_idx = 4;
proxy->common.offset = 0x0;
proxy->common.size = 0x1000;
@@ -1744,8 +1755,7 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
proxy->notify.offset = 0x3000;
- proxy->notify.size =
- QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX;
+ proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
proxy->notify_pio.offset = 0x0;
@@ -1754,8 +1764,8 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
/* subclasses can enforce modern, so do this unconditionally */
memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
- 2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
- VIRTIO_QUEUE_MAX);
+ /* PCI BAR regions must be powers of 2 */
+ pow2ceil(proxy->notify.offset + proxy->notify.size));
memory_region_init_alias(&proxy->modern_cfg,
OBJECT(proxy),
@@ -1770,6 +1780,14 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
}
+ if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
+ error_setg(errp, "device cannot work as neither modern nor legacy mode"
+ " is enabled");
+ error_append_hint(errp, "Set either disable-modern or disable-legacy"
+ " to off\n");
+ return;
+ }
+
if (pcie_port && pci_is_express(pci_dev)) {
int pos;
@@ -1833,6 +1851,10 @@ static Property virtio_pci_properties[] = {
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
+ DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
+ DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
+ ignore_backend_features, false),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2055,6 +2077,54 @@ static const TypeInfo vhost_scsi_pci_info = {
};
#endif
+/* vhost-vsock-pci */
+
+#ifdef CONFIG_VHOST_VSOCK
+static Property vhost_vsock_pci_properties[] = {
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VHostVSockPCI *dev = VHOST_VSOCK_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void vhost_vsock_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ k->realize = vhost_vsock_pci_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->props = vhost_vsock_pci_properties;
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_VSOCK;
+ pcidev_k->revision = 0x00;
+ pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
+}
+
+static void vhost_vsock_pci_instance_init(Object *obj)
+{
+ VHostVSockPCI *dev = VHOST_VSOCK_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_VSOCK);
+}
+
+static const TypeInfo vhost_vsock_pci_info = {
+ .name = TYPE_VHOST_VSOCK_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VHostVSockPCI),
+ .instance_init = vhost_vsock_pci_instance_init,
+ .class_init = vhost_vsock_pci_class_init,
+};
+#endif
+
/* virtio-balloon-pci */
static Property virtio_balloon_pci_properties[] = {
@@ -2444,13 +2514,11 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
k->vmstate_change = virtio_pci_vmstate_change;
+ k->pre_plugged = virtio_pci_pre_plugged;
k->device_plugged = virtio_pci_device_plugged;
k->device_unplugged = virtio_pci_device_unplugged;
k->query_nvectors = virtio_pci_query_nvectors;
- k->ioeventfd_started = virtio_pci_ioeventfd_started;
- k->ioeventfd_set_started = virtio_pci_ioeventfd_set_started;
- k->ioeventfd_disabled = virtio_pci_ioeventfd_disabled;
- k->ioeventfd_set_disabled = virtio_pci_ioeventfd_set_disabled;
+ k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
}
@@ -2485,6 +2553,9 @@ static void virtio_pci_register_types(void)
#ifdef CONFIG_VHOST_SCSI
type_register_static(&vhost_scsi_pci_info);
#endif
+#ifdef CONFIG_VHOST_VSOCK
+ type_register_static(&vhost_vsock_pci_info);
+#endif
}
type_init(virtio_pci_register_types)
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index 25fbf8a375..5e078866c4 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -25,12 +25,17 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-input.h"
#include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-crypto.h"
+
#ifdef CONFIG_VIRTFS
#include "hw/9pfs/virtio-9p.h"
#endif
#ifdef CONFIG_VHOST_SCSI
#include "hw/virtio/vhost-scsi.h"
#endif
+#ifdef CONFIG_VHOST_VSOCK
+#include "hw/virtio/vhost-vsock.h"
+#endif
typedef struct VirtIOPCIProxy VirtIOPCIProxy;
typedef struct VirtIOBlkPCI VirtIOBlkPCI;
@@ -44,6 +49,8 @@ typedef struct VirtIOInputPCI VirtIOInputPCI;
typedef struct VirtIOInputHIDPCI VirtIOInputHIDPCI;
typedef struct VirtIOInputHostPCI VirtIOInputHostPCI;
typedef struct VirtIOGPUPCI VirtIOGPUPCI;
+typedef struct VHostVSockPCI VHostVSockPCI;
+typedef struct VirtIOCryptoPCI VirtIOCryptoPCI;
/* virtio-pci-bus */
@@ -64,6 +71,7 @@ enum {
VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT,
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
+ VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT,
};
/* Need to activate work-arounds for buggy guests at vmstate load. */
@@ -84,6 +92,10 @@ enum {
#define VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY \
(1 << VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT)
+/* page per vq flag to be used by split drivers within guests */
+#define VIRTIO_PCI_FLAG_PAGE_PER_VQ \
+ (1 << VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT)
+
typedef struct {
MSIMessage msg;
int virq;
@@ -134,13 +146,14 @@ struct VirtIOPCIProxy {
MemoryRegion io_bar;
MemoryRegion modern_cfg;
AddressSpace modern_as;
- uint32_t legacy_io_bar;
- uint32_t msix_bar;
- uint32_t modern_io_bar;
- uint32_t modern_mem_bar;
+ uint32_t legacy_io_bar_idx;
+ uint32_t msix_bar_idx;
+ uint32_t modern_io_bar_idx;
+ uint32_t modern_mem_bar_idx;
int config_cap;
uint32_t flags;
bool disable_modern;
+ bool ignore_backend_features;
OnOffAuto disable_legacy;
uint32_t class_code;
uint32_t nvectors;
@@ -149,8 +162,6 @@ struct VirtIOPCIProxy {
uint32_t guest_features[2];
VirtIOPCIQueue vqs[VIRTIO_QUEUE_MAX];
- bool ioeventfd_disabled;
- bool ioeventfd_started;
VirtIOIRQFD *vector_irqfd;
int nvqs_with_notifiers;
VirtioBusState bus;
@@ -172,6 +183,11 @@ static inline void virtio_pci_force_virtio_1(VirtIOPCIProxy *proxy)
proxy->disable_legacy = ON_OFF_AUTO_ON;
}
+static inline void virtio_pci_disable_modern(VirtIOPCIProxy *proxy)
+{
+ proxy->disable_modern = true;
+}
+
/*
* virtio-scsi-pci: This extends VirtioPCIProxy.
*/
@@ -324,6 +340,32 @@ struct VirtIOGPUPCI {
VirtIOGPU vdev;
};
+#ifdef CONFIG_VHOST_VSOCK
+/*
+ * vhost-vsock-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VHOST_VSOCK_PCI "vhost-vsock-pci"
+#define VHOST_VSOCK_PCI(obj) \
+ OBJECT_CHECK(VHostVSockPCI, (obj), TYPE_VHOST_VSOCK_PCI)
+
+struct VHostVSockPCI {
+ VirtIOPCIProxy parent_obj;
+ VHostVSock vdev;
+};
+#endif
+
+/*
+ * virtio-crypto-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_CRYPTO_PCI "virtio-crypto-pci"
+#define VIRTIO_CRYPTO_PCI(obj) \
+ OBJECT_CHECK(VirtIOCryptoPCI, (obj), TYPE_VIRTIO_CRYPTO_PCI)
+
+struct VirtIOCryptoPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOCrypto vdev;
+};
+
/* Virtio ABI version, if we increment this, we break the guest driver. */
#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c
index cd8ca10177..9639f4e89b 100644
--- a/hw/virtio/virtio-rng.c
+++ b/hw/virtio/virtio-rng.c
@@ -120,15 +120,9 @@ static uint64_t get_features(VirtIODevice *vdev, uint64_t f, Error **errp)
return f;
}
-static int virtio_rng_load(QEMUFile *f, void *opaque, size_t size)
+static int virtio_rng_post_load(void *opaque, int version_id)
{
VirtIORNG *vrng = opaque;
- int ret;
-
- ret = virtio_load(VIRTIO_DEVICE(vrng), f, 1);
- if (ret != 0) {
- return ret;
- }
/* We may have an element ready but couldn't process it due to a quota
* limit. Make sure to try again after live migration when the quota may
@@ -216,7 +210,16 @@ static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
virtio_cleanup(vdev);
}
-VMSTATE_VIRTIO_DEVICE(rng, 1, virtio_rng_load, virtio_vmstate_save);
+static const VMStateDescription vmstate_virtio_rng = {
+ .name = "virtio-rng",
+ .minimum_version_id = 1,
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_VIRTIO_DEVICE,
+ VMSTATE_END_OF_LIST()
+ },
+ .post_load = virtio_rng_post_load,
+};
static Property virtio_rng_properties[] = {
/* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 74c085c74d..1af2de2714 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -97,7 +97,6 @@ struct VirtQueue
uint16_t vector;
VirtIOHandleOutput handle_output;
VirtIOHandleOutput handle_aio_output;
- bool use_aio;
VirtIODevice *vdev;
EventNotifier guest_notifier;
EventNotifier host_notifier;
@@ -264,14 +263,59 @@ static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
0, elem->out_sg[i].iov_len);
}
-void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
- unsigned int len)
+/* virtqueue_detach_element:
+ * @vq: The #VirtQueue
+ * @elem: The #VirtQueueElement
+ * @len: number of bytes written
+ *
+ * Detach the element from the virtqueue. This function is suitable for device
+ * reset or other situations where a #VirtQueueElement is simply freed and will
+ * not be pushed or discarded.
+ */
+void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len)
{
- vq->last_avail_idx--;
vq->inuse--;
virtqueue_unmap_sg(vq, elem, len);
}
+/* virtqueue_unpop:
+ * @vq: The #VirtQueue
+ * @elem: The #VirtQueueElement
+ * @len: number of bytes written
+ *
+ * Pretend the most recent element wasn't popped from the virtqueue. The next
+ * call to virtqueue_pop() will refetch the element.
+ */
+void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len)
+{
+ vq->last_avail_idx--;
+ virtqueue_detach_element(vq, elem, len);
+}
+
+/* virtqueue_rewind:
+ * @vq: The #VirtQueue
+ * @num: Number of elements to push back
+ *
+ * Pretend that elements weren't popped from the virtqueue. The next
+ * virtqueue_pop() will refetch the oldest element.
+ *
+ * Use virtqueue_unpop() instead if you have a VirtQueueElement.
+ *
+ * Returns: true on success, false if @num is greater than the number of in use
+ * elements.
+ */
+bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
+{
+ if (num > vq->inuse) {
+ return false;
+ }
+ vq->last_avail_idx -= num;
+ vq->inuse -= num;
+ return true;
+}
+
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx)
{
@@ -281,6 +325,10 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
virtqueue_unmap_sg(vq, elem, len);
+ if (unlikely(vq->vdev->broken)) {
+ return;
+ }
+
idx = (idx + vq->used_idx) % vq->vring.num;
uelem.id = elem->index;
@@ -291,6 +339,12 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
uint16_t old, new;
+
+ if (unlikely(vq->vdev->broken)) {
+ vq->inuse -= count;
+ return;
+ }
+
/* Make sure buffer is written before we update index. */
smp_wmb();
trace_virtqueue_flush(vq, count);
@@ -315,9 +369,9 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
/* Check it isn't doing very strange things with descriptor numbers. */
if (num_heads > vq->vring.num) {
- error_report("Guest moved used index from %u to %u",
+ virtio_error(vq->vdev, "Guest moved used index from %u to %u",
idx, vq->shadow_avail_idx);
- exit(1);
+ return -EINVAL;
}
/* On success, callers read a descriptor at vq->last_avail_idx.
* Make sure descriptor read does not bypass avail index read. */
@@ -328,45 +382,49 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
return num_heads;
}
-static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
+static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
+ unsigned int *head)
{
- unsigned int head;
-
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
- head = vring_avail_ring(vq, idx % vq->vring.num);
+ *head = vring_avail_ring(vq, idx % vq->vring.num);
/* If their number is silly, that's a fatal mistake. */
- if (head >= vq->vring.num) {
- error_report("Guest says index %u is available", head);
- exit(1);
+ if (*head >= vq->vring.num) {
+ virtio_error(vq->vdev, "Guest says index %u is available", *head);
+ return false;
}
- return head;
+ return true;
}
-static unsigned virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
- hwaddr desc_pa, unsigned int max)
-{
- unsigned int next;
+enum {
+ VIRTQUEUE_READ_DESC_ERROR = -1,
+ VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
+ VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
+};
+static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
+ hwaddr desc_pa, unsigned int max,
+ unsigned int *next)
+{
/* If this descriptor says it doesn't chain, we're done. */
if (!(desc->flags & VRING_DESC_F_NEXT)) {
- return max;
+ return VIRTQUEUE_READ_DESC_DONE;
}
/* Check they're not leading us off end of descriptors. */
- next = desc->next;
+ *next = desc->next;
/* Make sure compiler knows to grab that: we don't want it changing! */
smp_wmb();
- if (next >= max) {
- error_report("Desc next is %u", next);
- exit(1);
+ if (*next >= max) {
+ virtio_error(vdev, "Desc next is %u", *next);
+ return VIRTQUEUE_READ_DESC_ERROR;
}
- vring_desc_read(vdev, desc, desc_pa, next);
- return next;
+ vring_desc_read(vdev, desc, desc_pa, *next);
+ return VIRTQUEUE_READ_DESC_MORE;
}
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
@@ -375,33 +433,38 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
{
unsigned int idx;
unsigned int total_bufs, in_total, out_total;
+ int rc;
idx = vq->last_avail_idx;
total_bufs = in_total = out_total = 0;
- while (virtqueue_num_heads(vq, idx)) {
+ while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
VirtIODevice *vdev = vq->vdev;
unsigned int max, num_bufs, indirect = 0;
VRingDesc desc;
hwaddr desc_pa;
- int i;
+ unsigned int i;
max = vq->vring.num;
num_bufs = total_bufs;
- i = virtqueue_get_head(vq, idx++);
+
+ if (!virtqueue_get_head(vq, idx++, &i)) {
+ goto err;
+ }
+
desc_pa = vq->vring.desc;
vring_desc_read(vdev, &desc, desc_pa, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
- error_report("Invalid size for indirect buffer table");
- exit(1);
+ virtio_error(vdev, "Invalid size for indirect buffer table");
+ goto err;
}
/* If we've got too many, that implies a descriptor loop. */
if (num_bufs >= max) {
- error_report("Looped descriptor");
- exit(1);
+ virtio_error(vdev, "Looped descriptor");
+ goto err;
}
/* loop over the indirect descriptor table */
@@ -415,8 +478,8 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
do {
/* If we've got too many, that implies a descriptor loop. */
if (++num_bufs > max) {
- error_report("Looped descriptor");
- exit(1);
+ virtio_error(vdev, "Looped descriptor");
+ goto err;
}
if (desc.flags & VRING_DESC_F_WRITE) {
@@ -427,13 +490,24 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
goto done;
}
- } while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
+
+ rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
+ } while (rc == VIRTQUEUE_READ_DESC_MORE);
+
+ if (rc == VIRTQUEUE_READ_DESC_ERROR) {
+ goto err;
+ }
if (!indirect)
total_bufs = num_bufs;
else
total_bufs++;
}
+
+ if (rc < 0) {
+ goto err;
+ }
+
done:
if (in_bytes) {
*in_bytes = in_total;
@@ -441,6 +515,11 @@ done:
if (out_bytes) {
*out_bytes = out_total;
}
+ return;
+
+err:
+ in_total = out_total = 0;
+ goto done;
}
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
@@ -452,27 +531,35 @@ int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
return in_bytes <= in_total && out_bytes <= out_total;
}
-static void virtqueue_map_desc(unsigned int *p_num_sg, hwaddr *addr, struct iovec *iov,
+static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
+ hwaddr *addr, struct iovec *iov,
unsigned int max_num_sg, bool is_write,
hwaddr pa, size_t sz)
{
+ bool ok = false;
unsigned num_sg = *p_num_sg;
assert(num_sg <= max_num_sg);
if (!sz) {
- error_report("virtio: zero sized buffers are not allowed");
- exit(1);
+ virtio_error(vdev, "virtio: zero sized buffers are not allowed");
+ goto out;
}
while (sz) {
hwaddr len = sz;
if (num_sg == max_num_sg) {
- error_report("virtio: too many write descriptors in indirect table");
- exit(1);
+ virtio_error(vdev, "virtio: too many write descriptors in "
+ "indirect table");
+ goto out;
}
iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
+ if (!iov[num_sg].iov_base) {
+ virtio_error(vdev, "virtio: bogus descriptor or out of resources");
+ goto out;
+ }
+
iov[num_sg].iov_len = len;
addr[num_sg] = pa;
@@ -480,7 +567,28 @@ static void virtqueue_map_desc(unsigned int *p_num_sg, hwaddr *addr, struct iove
pa += len;
num_sg++;
}
+ ok = true;
+
+out:
*p_num_sg = num_sg;
+ return ok;
+}
+
+/* Only used by error code paths before we have a VirtQueueElement (therefore
+ * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
+ * yet.
+ */
+static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
+ struct iovec *iov)
+{
+ unsigned int i;
+
+ for (i = 0; i < out_num + in_num; i++) {
+ int is_write = i >= out_num;
+
+ cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
+ iov++;
+ }
}
static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
@@ -524,7 +632,7 @@ void virtqueue_map(VirtQueueElement *elem)
VIRTQUEUE_MAX_SIZE, 0);
}
-void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
+static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
{
VirtQueueElement *elem;
size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
@@ -555,7 +663,11 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
VRingDesc desc;
+ int rc;
+ if (unlikely(vdev->broken)) {
+ return NULL;
+ }
if (virtio_queue_empty(vq)) {
return NULL;
}
@@ -569,20 +681,24 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
max = vq->vring.num;
if (vq->inuse >= vq->vring.num) {
- error_report("Virtqueue size exceeded");
- exit(1);
+ virtio_error(vdev, "Virtqueue size exceeded");
+ return NULL;
+ }
+
+ if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
+ return NULL;
}
- i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vq->last_avail_idx);
}
+ i = head;
vring_desc_read(vdev, &desc, desc_pa, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
- error_report("Invalid size for indirect buffer table");
- exit(1);
+ virtio_error(vdev, "Invalid size for indirect buffer table");
+ return NULL;
}
/* loop over the indirect descriptor table */
@@ -594,24 +710,38 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
/* Collect all the descriptors */
do {
+ bool map_ok;
+
if (desc.flags & VRING_DESC_F_WRITE) {
- virtqueue_map_desc(&in_num, addr + out_num, iov + out_num,
- VIRTQUEUE_MAX_SIZE - out_num, true, desc.addr, desc.len);
+ map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
+ iov + out_num,
+ VIRTQUEUE_MAX_SIZE - out_num, true,
+ desc.addr, desc.len);
} else {
if (in_num) {
- error_report("Incorrect order for descriptors");
- exit(1);
+ virtio_error(vdev, "Incorrect order for descriptors");
+ goto err_undo_map;
}
- virtqueue_map_desc(&out_num, addr, iov,
- VIRTQUEUE_MAX_SIZE, false, desc.addr, desc.len);
+ map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
+ VIRTQUEUE_MAX_SIZE, false,
+ desc.addr, desc.len);
+ }
+ if (!map_ok) {
+ goto err_undo_map;
}
/* If we've got too many, that implies a descriptor loop. */
if ((in_num + out_num) > max) {
- error_report("Looped descriptor");
- exit(1);
+ virtio_error(vdev, "Looped descriptor");
+ goto err_undo_map;
}
- } while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
+
+ rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
+ } while (rc == VIRTQUEUE_READ_DESC_MORE);
+
+ if (rc == VIRTQUEUE_READ_DESC_ERROR) {
+ goto err_undo_map;
+ }
/* Now copy what we have collected and mapped */
elem = virtqueue_alloc_element(sz, out_num, in_num);
@@ -629,6 +759,10 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
return elem;
+
+err_undo_map:
+ virtqueue_undo_map_desc(out_num, in_num, iov);
+ return NULL;
}
/* Reading and writing a structure directly to QEMUFile is *awful*, but
@@ -720,6 +854,10 @@ static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ if (unlikely(vdev->broken)) {
+ return;
+ }
+
if (k->notify) {
k->notify(qbus->parent, vector);
}
@@ -803,10 +941,11 @@ void virtio_reset(void *opaque)
k->reset(vdev);
}
+ vdev->broken = false;
vdev->guest_features = 0;
vdev->queue_sel = 0;
vdev->status = 0;
- vdev->isr = 0;
+ atomic_set(&vdev->isr, 0);
vdev->config_vector = VIRTIO_NO_VECTOR;
virtio_notify_vector(vdev, vdev->config_vector);
@@ -822,6 +961,7 @@ void virtio_reset(void *opaque)
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
+ vdev->vq[i].inuse = 0;
}
}
@@ -1109,6 +1249,10 @@ static void virtio_queue_notify_vq(VirtQueue *vq)
if (vq->vring.desc && vq->handle_output) {
VirtIODevice *vdev = vq->vdev;
+ if (unlikely(vdev->broken)) {
+ return;
+ }
+
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
vq->handle_output(vdev, vq);
}
@@ -1142,9 +1286,8 @@ void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
}
}
-static VirtQueue *virtio_add_queue_internal(VirtIODevice *vdev, int queue_size,
- VirtIOHandleOutput handle_output,
- bool use_aio)
+VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
+ VirtIOHandleOutput handle_output)
{
int i;
@@ -1161,28 +1304,10 @@ static VirtQueue *virtio_add_queue_internal(VirtIODevice *vdev, int queue_size,
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
vdev->vq[i].handle_output = handle_output;
vdev->vq[i].handle_aio_output = NULL;
- vdev->vq[i].use_aio = use_aio;
return &vdev->vq[i];
}
-/* Add a virt queue and mark AIO.
- * An AIO queue will use the AioContext based event interface instead of the
- * default IOHandler and EventNotifier interface.
- */
-VirtQueue *virtio_add_queue_aio(VirtIODevice *vdev, int queue_size,
- VirtIOHandleOutput handle_output)
-{
- return virtio_add_queue_internal(vdev, queue_size, handle_output, true);
-}
-
-/* Add a normal virt queue (on the contrary to the AIO version above. */
-VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
- VirtIOHandleOutput handle_output)
-{
- return virtio_add_queue_internal(vdev, queue_size, handle_output, false);
-}
-
void virtio_del_queue(VirtIODevice *vdev, int n)
{
if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
@@ -1193,11 +1318,16 @@ void virtio_del_queue(VirtIODevice *vdev, int n)
vdev->vq[n].vring.num_default = 0;
}
-void virtio_irq(VirtQueue *vq)
+static void virtio_set_isr(VirtIODevice *vdev, int value)
{
- trace_virtio_irq(vq);
- vq->vdev->isr |= 0x01;
- virtio_notify_vector(vq->vdev, vq->vector);
+ uint8_t old = atomic_read(&vdev->isr);
+
+ /* Do not write ISR if it does not change, so that its cacheline remains
+ * shared in the common case where the guest does not read it.
+ */
+ if ((old & value) != value) {
+ atomic_or(&vdev->isr, value);
+ }
}
bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
@@ -1223,6 +1353,33 @@ bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
return !v || vring_need_event(vring_get_used_event(vq), new, old);
}
+void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
+{
+ if (!virtio_should_notify(vdev, vq)) {
+ return;
+ }
+
+ trace_virtio_notify_irqfd(vdev, vq);
+
+ /*
+ * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
+ * windows drivers included in virtio-win 1.8.0 (circa 2015) are
+ * incorrectly polling this bit during crashdump and hibernation
+ * in MSI mode, causing a hang if this bit is never updated.
+ * Recent releases of Windows do not really shut down, but rather
+ * log out and hibernate to make the next startup faster. Hence,
+ * this manifested as a more serious hang during shutdown with
+ *
+ * Next driver release from 2016 fixed this problem, so working around it
+ * is not a must, but it's easy to do so let's do it here.
+ *
+ * Note: it's safe to update ISR from any thread as it was switched
+ * to an atomic operation.
+ */
+ virtio_set_isr(vq->vdev, 0x1);
+ event_notifier_set(&vq->guest_notifier);
+}
+
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
{
if (!virtio_should_notify(vdev, vq)) {
@@ -1230,7 +1387,7 @@ void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
}
trace_virtio_notify(vdev, vq);
- vdev->isr |= 0x01;
+ virtio_set_isr(vq->vdev, 0x1);
virtio_notify_vector(vdev, vq->vector);
}
@@ -1239,7 +1396,7 @@ void virtio_notify_config(VirtIODevice *vdev)
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
return;
- vdev->isr |= 0x03;
+ virtio_set_isr(vdev, 0x3);
vdev->generation++;
virtio_notify_vector(vdev, vdev->config_vector);
}
@@ -1293,6 +1450,13 @@ static bool virtio_extra_state_needed(void *opaque)
k->has_extra_state(qbus->parent);
}
+static bool virtio_broken_needed(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+
+ return vdev->broken;
+}
+
static const VMStateDescription vmstate_virtqueue = {
.name = "virtqueue_state",
.version_id = 1,
@@ -1407,6 +1571,17 @@ static const VMStateDescription vmstate_virtio_64bit_features = {
}
};
+static const VMStateDescription vmstate_virtio_broken = {
+ .name = "virtio/broken",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_broken_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(broken, VirtIODevice),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_virtio = {
.name = "virtio",
.version_id = 1,
@@ -1420,6 +1595,7 @@ static const VMStateDescription vmstate_virtio = {
&vmstate_virtio_64bit_features,
&vmstate_virtio_virtqueues,
&vmstate_virtio_ringsize,
+ &vmstate_virtio_broken,
&vmstate_virtio_extra_state,
NULL
}
@@ -1471,16 +1647,35 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
vdc->save(vdev, f);
}
+ if (vdc->vmsd) {
+ vmstate_save_state(f, vdc->vmsd, vdev, NULL);
+ }
+
/* Subsections */
vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
}
/* A wrapper for use as a VMState .put function */
-void virtio_vmstate_save(QEMUFile *f, void *opaque, size_t size)
+static void virtio_device_put(QEMUFile *f, void *opaque, size_t size)
{
virtio_save(VIRTIO_DEVICE(opaque), f);
}
+/* A wrapper for use as a VMState .get function */
+static int virtio_device_get(QEMUFile *f, void *opaque, size_t size)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
+ DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
+
+ return virtio_load(vdev, f, dc->vmsd->version_id);
+}
+
+const VMStateInfo virtio_vmstate_info = {
+ .name = "virtio",
+ .get = virtio_device_get,
+ .put = virtio_device_put,
+};
+
static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
@@ -1585,7 +1780,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
"inconsistent with Host index 0x%x",
i, vdev->vq[i].last_avail_idx);
return -1;
- }
+ }
if (k->load_queue) {
ret = k->load_queue(qbus->parent, i, f);
if (ret)
@@ -1602,6 +1797,13 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
}
}
+ if (vdc->vmsd) {
+ ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
+ if (ret) {
+ return ret;
+ }
+ }
+
/* Subsections */
ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
if (ret) {
@@ -1725,11 +1927,12 @@ void virtio_init(VirtIODevice *vdev, const char *name,
vdev->device_id = device_id;
vdev->status = 0;
- vdev->isr = 0;
+ atomic_set(&vdev->isr, 0);
vdev->queue_sel = 0;
vdev->config_vector = VIRTIO_NO_VECTOR;
vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
vdev->vm_running = runstate_is_running();
+ vdev->broken = false;
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
vdev->vq[i].vector = VIRTIO_NO_VECTOR;
vdev->vq[i].vdev = vdev;
@@ -1764,11 +1967,6 @@ hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
return vdev->vq[n].vring.used;
}
-hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
-{
- return vdev->vq[n].vring.desc;
-}
-
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
{
return sizeof(VRingDesc) * vdev->vq[n].vring.num;
@@ -1786,12 +1984,6 @@ hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
}
-hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
-{
- return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
- virtio_queue_get_used_size(vdev, n);
-}
-
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
{
return vdev->vq[n].last_avail_idx;
@@ -1822,7 +2014,7 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
if (event_notifier_test_and_clear(n)) {
- virtio_irq(vq);
+ virtio_notify_vector(vq->vdev, vq->vector);
}
}
@@ -1871,7 +2063,7 @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
}
}
-static void virtio_queue_host_notifier_read(EventNotifier *n)
+void virtio_queue_host_notifier_read(EventNotifier *n)
{
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
if (event_notifier_test_and_clear(n)) {
@@ -1879,32 +2071,6 @@ static void virtio_queue_host_notifier_read(EventNotifier *n)
}
}
-void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
- bool set_handler)
-{
- AioContext *ctx = qemu_get_aio_context();
- if (assign && set_handler) {
- if (vq->use_aio) {
- aio_set_event_notifier(ctx, &vq->host_notifier, true,
- virtio_queue_host_notifier_read);
- } else {
- event_notifier_set_handler(&vq->host_notifier, true,
- virtio_queue_host_notifier_read);
- }
- } else {
- if (vq->use_aio) {
- aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
- } else {
- event_notifier_set_handler(&vq->host_notifier, true, NULL);
- }
- }
- if (!assign) {
- /* Test and clear notifier before after disabling event,
- * in case poll callback didn't have time to run. */
- virtio_queue_host_notifier_read(&vq->host_notifier);
- }
-}
-
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
{
return &vq->host_notifier;
@@ -1916,12 +2082,31 @@ void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
vdev->bus_name = g_strdup(bus_name);
}
+void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ error_vreport(fmt, ap);
+ va_end(ap);
+
+ vdev->broken = true;
+
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
+ virtio_notify_config(vdev);
+ }
+}
+
static void virtio_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
Error *err = NULL;
+ /* Devices should either use vmsd or the load/save methods */
+ assert(!vdc->vmsd || !vdc->load);
+
if (vdc->realize != NULL) {
vdc->realize(dev, &err);
if (err != NULL) {
@@ -1962,15 +2147,120 @@ static Property virtio_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
+static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
+{
+ VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ int n, r, err;
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ VirtQueue *vq = &vdev->vq[n];
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ r = virtio_bus_set_host_notifier(qbus, n, true);
+ if (r < 0) {
+ err = r;
+ goto assign_error;
+ }
+ event_notifier_set_handler(&vq->host_notifier, true,
+ virtio_queue_host_notifier_read);
+ }
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ /* Kick right away to begin processing requests already in vring */
+ VirtQueue *vq = &vdev->vq[n];
+ if (!vq->vring.num) {
+ continue;
+ }
+ event_notifier_set(&vq->host_notifier);
+ }
+ return 0;
+
+assign_error:
+ while (--n >= 0) {
+ VirtQueue *vq = &vdev->vq[n];
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ event_notifier_set_handler(&vq->host_notifier, true, NULL);
+ r = virtio_bus_set_host_notifier(qbus, n, false);
+ assert(r >= 0);
+ }
+ return err;
+}
+
+int virtio_device_start_ioeventfd(VirtIODevice *vdev)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+
+ return virtio_bus_start_ioeventfd(vbus);
+}
+
+static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
+{
+ VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ int n, r;
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ VirtQueue *vq = &vdev->vq[n];
+
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ event_notifier_set_handler(&vq->host_notifier, true, NULL);
+ r = virtio_bus_set_host_notifier(qbus, n, false);
+ assert(r >= 0);
+ }
+}
+
+void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+
+ virtio_bus_stop_ioeventfd(vbus);
+}
+
+int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+
+ return virtio_bus_grab_ioeventfd(vbus);
+}
+
+void virtio_device_release_ioeventfd(VirtIODevice *vdev)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+
+ virtio_bus_release_ioeventfd(vbus);
+}
+
static void virtio_device_class_init(ObjectClass *klass, void *data)
{
/* Set the default value here. */
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = virtio_device_realize;
dc->unrealize = virtio_device_unrealize;
dc->bus_type = TYPE_VIRTIO_BUS;
dc->props = virtio_properties;
+ vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
+ vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
+
+ vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
+}
+
+bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusState *vbus = VIRTIO_BUS(qbus);
+
+ return virtio_bus_ioeventfd_enabled(vbus);
}
static const TypeInfo virtio_device_info = {
diff --git a/hw/xen/Makefile.objs b/hw/xen/Makefile.objs
index d3670940b7..591cdc229d 100644
--- a/hw/xen/Makefile.objs
+++ b/hw/xen/Makefile.objs
@@ -1,5 +1,5 @@
# xen backend driver support
-common-obj-$(CONFIG_XEN_BACKEND) += xen_backend.o xen_devconfig.o
+common-obj-$(CONFIG_XEN_BACKEND) += xen_backend.o xen_devconfig.o xen_pvdev.o
obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen-host-pci-device.o
obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt.o xen_pt_config_init.o xen_pt_graphics.o xen_pt_msi.o
diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c
index 69a238817e..d1190041ae 100644
--- a/hw/xen/xen_backend.c
+++ b/hw/xen/xen_backend.c
@@ -27,15 +27,18 @@
#include "hw/hw.h"
#include "hw/sysbus.h"
+#include "hw/boards.h"
#include "sysemu/char.h"
#include "qemu/log.h"
+#include "qapi/error.h"
#include "hw/xen/xen_backend.h"
+#include "hw/xen/xen_pvdev.h"
+#include "monitor/qdev.h"
#include <xen/grant_table.h>
-#define TYPE_XENSYSDEV "xensysdev"
-
DeviceState *xen_sysdev;
+BusState *xen_sysbus;
/* ------------------------------------------------------------- */
@@ -46,129 +49,7 @@ struct xs_handle *xenstore = NULL;
const char *xen_protocol;
/* private */
-struct xs_dirs {
- char *xs_dir;
- QTAILQ_ENTRY(xs_dirs) list;
-};
-static QTAILQ_HEAD(xs_dirs_head, xs_dirs) xs_cleanup =
- QTAILQ_HEAD_INITIALIZER(xs_cleanup);
-
-static QTAILQ_HEAD(XenDeviceHead, XenDevice) xendevs = QTAILQ_HEAD_INITIALIZER(xendevs);
-static int debug = 0;
-
-/* ------------------------------------------------------------- */
-
-static void xenstore_cleanup_dir(char *dir)
-{
- struct xs_dirs *d;
-
- d = g_malloc(sizeof(*d));
- d->xs_dir = dir;
- QTAILQ_INSERT_TAIL(&xs_cleanup, d, list);
-}
-
-void xen_config_cleanup(void)
-{
- struct xs_dirs *d;
-
- QTAILQ_FOREACH(d, &xs_cleanup, list) {
- xs_rm(xenstore, 0, d->xs_dir);
- }
-}
-
-int xenstore_write_str(const char *base, const char *node, const char *val)
-{
- char abspath[XEN_BUFSIZE];
-
- snprintf(abspath, sizeof(abspath), "%s/%s", base, node);
- if (!xs_write(xenstore, 0, abspath, val, strlen(val))) {
- return -1;
- }
- return 0;
-}
-
-char *xenstore_read_str(const char *base, const char *node)
-{
- char abspath[XEN_BUFSIZE];
- unsigned int len;
- char *str, *ret = NULL;
-
- snprintf(abspath, sizeof(abspath), "%s/%s", base, node);
- str = xs_read(xenstore, 0, abspath, &len);
- if (str != NULL) {
- /* move to qemu-allocated memory to make sure
- * callers can savely g_free() stuff. */
- ret = g_strdup(str);
- free(str);
- }
- return ret;
-}
-
-int xenstore_mkdir(char *path, int p)
-{
- struct xs_permissions perms[2] = {
- {
- .id = 0, /* set owner: dom0 */
- }, {
- .id = xen_domid,
- .perms = p,
- }
- };
-
- if (!xs_mkdir(xenstore, 0, path)) {
- xen_be_printf(NULL, 0, "xs_mkdir %s: failed\n", path);
- return -1;
- }
- xenstore_cleanup_dir(g_strdup(path));
-
- if (!xs_set_permissions(xenstore, 0, path, perms, 2)) {
- xen_be_printf(NULL, 0, "xs_set_permissions %s: failed\n", path);
- return -1;
- }
- return 0;
-}
-
-int xenstore_write_int(const char *base, const char *node, int ival)
-{
- char val[12];
-
- snprintf(val, sizeof(val), "%d", ival);
- return xenstore_write_str(base, node, val);
-}
-
-int xenstore_write_int64(const char *base, const char *node, int64_t ival)
-{
- char val[21];
-
- snprintf(val, sizeof(val), "%"PRId64, ival);
- return xenstore_write_str(base, node, val);
-}
-
-int xenstore_read_int(const char *base, const char *node, int *ival)
-{
- char *val;
- int rc = -1;
-
- val = xenstore_read_str(base, node);
- if (val && 1 == sscanf(val, "%d", ival)) {
- rc = 0;
- }
- g_free(val);
- return rc;
-}
-
-int xenstore_read_uint64(const char *base, const char *node, uint64_t *uval)
-{
- char *val;
- int rc = -1;
-
- val = xenstore_read_str(base, node);
- if (val && 1 == sscanf(val, "%"SCNu64, uval)) {
- rc = 0;
- }
- g_free(val);
- return rc;
-}
+static int debug;
int xenstore_write_be_str(struct XenDevice *xendev, const char *node, const char *val)
{
@@ -205,27 +86,14 @@ int xenstore_read_fe_int(struct XenDevice *xendev, const char *node, int *ival)
return xenstore_read_int(xendev->fe, node, ival);
}
-int xenstore_read_fe_uint64(struct XenDevice *xendev, const char *node, uint64_t *uval)
+int xenstore_read_fe_uint64(struct XenDevice *xendev, const char *node,
+ uint64_t *uval)
{
return xenstore_read_uint64(xendev->fe, node, uval);
}
/* ------------------------------------------------------------- */
-const char *xenbus_strstate(enum xenbus_state state)
-{
- static const char *const name[] = {
- [ XenbusStateUnknown ] = "Unknown",
- [ XenbusStateInitialising ] = "Initialising",
- [ XenbusStateInitWait ] = "InitWait",
- [ XenbusStateInitialised ] = "Initialised",
- [ XenbusStateConnected ] = "Connected",
- [ XenbusStateClosing ] = "Closing",
- [ XenbusStateClosed ] = "Closed",
- };
- return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
-}
-
int xen_be_set_state(struct XenDevice *xendev, enum xenbus_state state)
{
int rc;
@@ -234,33 +102,12 @@ int xen_be_set_state(struct XenDevice *xendev, enum xenbus_state state)
if (rc < 0) {
return rc;
}
- xen_be_printf(xendev, 1, "backend state: %s -> %s\n",
+ xen_pv_printf(xendev, 1, "backend state: %s -> %s\n",
xenbus_strstate(xendev->be_state), xenbus_strstate(state));
xendev->be_state = state;
return 0;
}
-/* ------------------------------------------------------------- */
-
-struct XenDevice *xen_be_find_xendev(const char *type, int dom, int dev)
-{
- struct XenDevice *xendev;
-
- QTAILQ_FOREACH(xendev, &xendevs, next) {
- if (xendev->dom != dom) {
- continue;
- }
- if (xendev->dev != dev) {
- continue;
- }
- if (strcmp(xendev->type, type) != 0) {
- continue;
- }
- return xendev;
- }
- return NULL;
-}
-
/*
* get xen backend device, allocate a new one if it doesn't exist.
*/
@@ -269,13 +116,19 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev,
{
struct XenDevice *xendev;
- xendev = xen_be_find_xendev(type, dom, dev);
+ xendev = xen_pv_find_xendev(type, dom, dev);
if (xendev) {
return xendev;
}
/* init new xendev */
xendev = g_malloc0(ops->size);
+ object_initialize(&xendev->qdev, ops->size, TYPE_XENBACKEND);
+ qdev_set_parent_bus(&xendev->qdev, xen_sysbus);
+ qdev_set_id(&xendev->qdev, g_strdup_printf("xen-%s-%d", type, dev));
+ qdev_init_nofail(&xendev->qdev);
+ object_unref(OBJECT(&xendev->qdev));
+
xendev->type = type;
xendev->dom = dom;
xendev->dev = dev;
@@ -291,7 +144,7 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev,
xendev->evtchndev = xenevtchn_open(NULL, 0);
if (xendev->evtchndev == NULL) {
- xen_be_printf(NULL, 0, "can't open evtchn device\n");
+ xen_pv_printf(NULL, 0, "can't open evtchn device\n");
g_free(xendev);
return NULL;
}
@@ -300,7 +153,7 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev,
if (ops->flags & DEVOPS_FLAG_NEED_GNTDEV) {
xendev->gnttabdev = xengnttab_open(NULL, 0);
if (xendev->gnttabdev == NULL) {
- xen_be_printf(NULL, 0, "can't open gnttab device\n");
+ xen_pv_printf(NULL, 0, "can't open gnttab device\n");
xenevtchn_close(xendev->evtchndev);
g_free(xendev);
return NULL;
@@ -309,7 +162,7 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev,
xendev->gnttabdev = NULL;
}
- QTAILQ_INSERT_TAIL(&xendevs, xendev, next);
+ xen_pv_insert_xendev(xendev);
if (xendev->ops->alloc) {
xendev->ops->alloc(xendev);
@@ -318,32 +171,6 @@ static struct XenDevice *xen_be_get_xendev(const char *type, int dom, int dev,
return xendev;
}
-/*
- * release xen backend device.
- */
-static void xen_be_del_xendev(struct XenDevice *xendev)
-{
- if (xendev->ops->free) {
- xendev->ops->free(xendev);
- }
-
- if (xendev->fe) {
- char token[XEN_BUFSIZE];
- snprintf(token, sizeof(token), "fe:%p", xendev);
- xs_unwatch(xenstore, xendev->fe, token);
- g_free(xendev->fe);
- }
-
- if (xendev->evtchndev != NULL) {
- xenevtchn_close(xendev->evtchndev);
- }
- if (xendev->gnttabdev != NULL) {
- xengnttab_close(xendev->gnttabdev);
- }
-
- QTAILQ_REMOVE(&xendevs, xendev, next);
- g_free(xendev);
-}
/*
* Sync internal data structures on xenstore updates.
@@ -359,7 +186,7 @@ static void xen_be_backend_changed(struct XenDevice *xendev, const char *node)
}
if (node) {
- xen_be_printf(xendev, 2, "backend update: %s\n", node);
+ xen_pv_printf(xendev, 2, "backend update: %s\n", node);
if (xendev->ops->backend_changed) {
xendev->ops->backend_changed(xendev, node);
}
@@ -375,7 +202,7 @@ static void xen_be_frontend_changed(struct XenDevice *xendev, const char *node)
fe_state = XenbusStateUnknown;
}
if (xendev->fe_state != fe_state) {
- xen_be_printf(xendev, 1, "frontend state: %s -> %s\n",
+ xen_pv_printf(xendev, 1, "frontend state: %s -> %s\n",
xenbus_strstate(xendev->fe_state),
xenbus_strstate(fe_state));
}
@@ -385,12 +212,13 @@ static void xen_be_frontend_changed(struct XenDevice *xendev, const char *node)
g_free(xendev->protocol);
xendev->protocol = xenstore_read_fe_str(xendev, "protocol");
if (xendev->protocol) {
- xen_be_printf(xendev, 1, "frontend protocol: %s\n", xendev->protocol);
+ xen_pv_printf(xendev, 1, "frontend protocol: %s\n",
+ xendev->protocol);
}
}
if (node) {
- xen_be_printf(xendev, 2, "frontend update: %s\n", node);
+ xen_pv_printf(xendev, 2, "frontend update: %s\n", node);
if (xendev->ops->frontend_changed) {
xendev->ops->frontend_changed(xendev, node);
}
@@ -414,26 +242,26 @@ static int xen_be_try_setup(struct XenDevice *xendev)
int be_state;
if (xenstore_read_be_int(xendev, "state", &be_state) == -1) {
- xen_be_printf(xendev, 0, "reading backend state failed\n");
+ xen_pv_printf(xendev, 0, "reading backend state failed\n");
return -1;
}
if (be_state != XenbusStateInitialising) {
- xen_be_printf(xendev, 0, "initial backend state is wrong (%s)\n",
+ xen_pv_printf(xendev, 0, "initial backend state is wrong (%s)\n",
xenbus_strstate(be_state));
return -1;
}
xendev->fe = xenstore_read_be_str(xendev, "frontend");
if (xendev->fe == NULL) {
- xen_be_printf(xendev, 0, "reading frontend path failed\n");
+ xen_pv_printf(xendev, 0, "reading frontend path failed\n");
return -1;
}
/* setup frontend watch */
snprintf(token, sizeof(token), "fe:%p", xendev);
if (!xs_watch(xenstore, xendev->fe, token)) {
- xen_be_printf(xendev, 0, "watching frontend path (%s) failed\n",
+ xen_pv_printf(xendev, 0, "watching frontend path (%s) failed\n",
xendev->fe);
return -1;
}
@@ -457,7 +285,7 @@ static int xen_be_try_init(struct XenDevice *xendev)
int rc = 0;
if (!xendev->online) {
- xen_be_printf(xendev, 1, "not online\n");
+ xen_pv_printf(xendev, 1, "not online\n");
return -1;
}
@@ -465,7 +293,7 @@ static int xen_be_try_init(struct XenDevice *xendev)
rc = xendev->ops->init(xendev);
}
if (rc != 0) {
- xen_be_printf(xendev, 1, "init() failed\n");
+ xen_pv_printf(xendev, 1, "init() failed\n");
return rc;
}
@@ -488,9 +316,9 @@ static int xen_be_try_initialise(struct XenDevice *xendev)
if (xendev->fe_state != XenbusStateInitialised &&
xendev->fe_state != XenbusStateConnected) {
if (xendev->ops->flags & DEVOPS_FLAG_IGNORE_STATE) {
- xen_be_printf(xendev, 2, "frontend not ready, ignoring\n");
+ xen_pv_printf(xendev, 2, "frontend not ready, ignoring\n");
} else {
- xen_be_printf(xendev, 2, "frontend not ready (yet)\n");
+ xen_pv_printf(xendev, 2, "frontend not ready (yet)\n");
return -1;
}
}
@@ -499,7 +327,7 @@ static int xen_be_try_initialise(struct XenDevice *xendev)
rc = xendev->ops->initialise(xendev);
}
if (rc != 0) {
- xen_be_printf(xendev, 0, "initialise() failed\n");
+ xen_pv_printf(xendev, 0, "initialise() failed\n");
return rc;
}
@@ -520,9 +348,9 @@ static void xen_be_try_connected(struct XenDevice *xendev)
if (xendev->fe_state != XenbusStateConnected) {
if (xendev->ops->flags & DEVOPS_FLAG_IGNORE_STATE) {
- xen_be_printf(xendev, 2, "frontend not ready, ignoring\n");
+ xen_pv_printf(xendev, 2, "frontend not ready, ignoring\n");
} else {
- xen_be_printf(xendev, 2, "frontend not ready (yet)\n");
+ xen_pv_printf(xendev, 2, "frontend not ready (yet)\n");
return;
}
}
@@ -556,7 +384,7 @@ static int xen_be_try_reset(struct XenDevice *xendev)
return -1;
}
- xen_be_printf(xendev, 1, "device reset (for re-connect)\n");
+ xen_pv_printf(xendev, 1, "device reset (for re-connect)\n");
xen_be_set_state(xendev, XenbusStateInitialising);
return 0;
}
@@ -617,7 +445,8 @@ static int xenstore_scan(const char *type, int dom, struct XenDevOps *ops)
snprintf(token, sizeof(token), "be:%p:%d:%p", type, dom, ops);
snprintf(path, sizeof(path), "backend/%s/%d", type, dom);
if (!xs_watch(xenstore, path, token)) {
- xen_be_printf(NULL, 0, "xen be: watching backend path (%s) failed\n", path);
+ xen_pv_printf(NULL, 0, "xen be: watching backend path (%s) failed\n",
+ path);
return -1;
}
@@ -637,8 +466,8 @@ static int xenstore_scan(const char *type, int dom, struct XenDevOps *ops)
return 0;
}
-static void xenstore_update_be(char *watch, char *type, int dom,
- struct XenDevOps *ops)
+void xenstore_update_be(char *watch, char *type, int dom,
+ struct XenDevOps *ops)
{
struct XenDevice *xendev;
char path[XEN_BUFSIZE], *bepath;
@@ -662,7 +491,7 @@ static void xenstore_update_be(char *watch, char *type, int dom,
if (xendev != NULL) {
bepath = xs_read(xenstore, 0, xendev->be, &len);
if (bepath == NULL) {
- xen_be_del_xendev(xendev);
+ xen_pv_del_xendev(xendev);
} else {
free(bepath);
xen_be_backend_changed(xendev, path);
@@ -671,7 +500,7 @@ static void xenstore_update_be(char *watch, char *type, int dom,
}
}
-static void xenstore_update_fe(char *watch, struct XenDevice *xendev)
+void xenstore_update_fe(char *watch, struct XenDevice *xendev)
{
char *node;
unsigned int len;
@@ -688,56 +517,13 @@ static void xenstore_update_fe(char *watch, struct XenDevice *xendev)
xen_be_frontend_changed(xendev, node);
xen_be_check_state(xendev);
}
-
-static void xenstore_update(void *unused)
-{
- char **vec = NULL;
- intptr_t type, ops, ptr;
- unsigned int dom, count;
-
- vec = xs_read_watch(xenstore, &count);
- if (vec == NULL) {
- goto cleanup;
- }
-
- if (sscanf(vec[XS_WATCH_TOKEN], "be:%" PRIxPTR ":%d:%" PRIxPTR,
- &type, &dom, &ops) == 3) {
- xenstore_update_be(vec[XS_WATCH_PATH], (void*)type, dom, (void*)ops);
- }
- if (sscanf(vec[XS_WATCH_TOKEN], "fe:%" PRIxPTR, &ptr) == 1) {
- xenstore_update_fe(vec[XS_WATCH_PATH], (void*)ptr);
- }
-
-cleanup:
- free(vec);
-}
-
-static void xen_be_evtchn_event(void *opaque)
-{
- struct XenDevice *xendev = opaque;
- evtchn_port_t port;
-
- port = xenevtchn_pending(xendev->evtchndev);
- if (port != xendev->local_port) {
- xen_be_printf(xendev, 0,
- "xenevtchn_pending returned %d (expected %d)\n",
- port, xendev->local_port);
- return;
- }
- xenevtchn_unmask(xendev->evtchndev, port);
-
- if (xendev->ops->event) {
- xendev->ops->event(xendev);
- }
-}
-
/* -------------------------------------------------------------------- */
int xen_be_init(void)
{
xenstore = xs_daemon_open();
if (!xenstore) {
- xen_be_printf(NULL, 0, "can't connect to xenstored\n");
+ xen_pv_printf(NULL, 0, "can't connect to xenstored\n");
return -1;
}
@@ -750,6 +536,8 @@ int xen_be_init(void)
xen_sysdev = qdev_create(NULL, TYPE_XENSYSDEV);
qdev_init_nofail(xen_sysdev);
+ xen_sysbus = qbus_create(TYPE_XENSYSBUS, DEVICE(xen_sysdev), "xen-sysbus");
+ qbus_set_bus_hotplug_handler(xen_sysbus, &error_abort);
return 0;
@@ -761,6 +549,15 @@ err:
return -1;
}
+static void xen_set_dynamic_sysbus(void)
+{
+ Object *machine = qdev_get_machine();
+ ObjectClass *oc = object_get_class(machine);
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->has_dynamic_sysbus = true;
+}
+
int xen_be_register(const char *type, struct XenDevOps *ops)
{
char path[50];
@@ -782,6 +579,8 @@ int xen_be_register(const char *type, struct XenDevOps *ops)
void xen_be_register_common(void)
{
+ xen_set_dynamic_sysbus();
+
xen_be_register("console", &xen_console_ops);
xen_be_register("vkbd", &xen_kbdmouse_ops);
xen_be_register("qdisk", &xen_blkdev_ops);
@@ -798,70 +597,52 @@ int xen_be_bind_evtchn(struct XenDevice *xendev)
xendev->local_port = xenevtchn_bind_interdomain
(xendev->evtchndev, xendev->dom, xendev->remote_port);
if (xendev->local_port == -1) {
- xen_be_printf(xendev, 0, "xenevtchn_bind_interdomain failed\n");
+ xen_pv_printf(xendev, 0, "xenevtchn_bind_interdomain failed\n");
return -1;
}
- xen_be_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
+ xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port);
qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev),
- xen_be_evtchn_event, NULL, xendev);
+ xen_pv_evtchn_event, NULL, xendev);
return 0;
}
-void xen_be_unbind_evtchn(struct XenDevice *xendev)
-{
- if (xendev->local_port == -1) {
- return;
- }
- qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), NULL, NULL, NULL);
- xenevtchn_unbind(xendev->evtchndev, xendev->local_port);
- xen_be_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port);
- xendev->local_port = -1;
-}
-int xen_be_send_notify(struct XenDevice *xendev)
+static Property xendev_properties[] = {
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xendev_class_init(ObjectClass *klass, void *data)
{
- return xenevtchn_notify(xendev->evtchndev, xendev->local_port);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->props = xendev_properties;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
-/*
- * msg_level:
- * 0 == errors (stderr + logfile).
- * 1 == informative debug messages (logfile only).
- * 2 == noisy debug messages (logfile only).
- * 3 == will flood your log (logfile only).
- */
-void xen_be_printf(struct XenDevice *xendev, int msg_level, const char *fmt, ...)
+static const TypeInfo xendev_type_info = {
+ .name = TYPE_XENBACKEND,
+ .parent = TYPE_XENSYSDEV,
+ .class_init = xendev_class_init,
+ .instance_size = sizeof(struct XenDevice),
+};
+
+static void xen_sysbus_class_init(ObjectClass *klass, void *data)
{
- va_list args;
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
- if (xendev) {
- if (msg_level > xendev->debug) {
- return;
- }
- qemu_log("xen be: %s: ", xendev->name);
- if (msg_level == 0) {
- fprintf(stderr, "xen be: %s: ", xendev->name);
- }
- } else {
- if (msg_level > debug) {
- return;
- }
- qemu_log("xen be core: ");
- if (msg_level == 0) {
- fprintf(stderr, "xen be core: ");
- }
- }
- va_start(args, fmt);
- qemu_log_vprintf(fmt, args);
- va_end(args);
- if (msg_level == 0) {
- va_start(args, fmt);
- vfprintf(stderr, fmt, args);
- va_end(args);
- }
- qemu_log_flush();
+ hc->unplug = qdev_simple_device_unplug_cb;
}
+static const TypeInfo xensysbus_info = {
+ .name = TYPE_XENSYSBUS,
+ .parent = TYPE_BUS,
+ .class_init = xen_sysbus_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ }
+};
+
static int xen_sysdev_init(SysBusDevice *dev)
{
return 0;
@@ -878,6 +659,7 @@ static void xen_sysdev_class_init(ObjectClass *klass, void *data)
k->init = xen_sysdev_init;
dc->props = xen_sysdev_properties;
+ dc->bus_type = TYPE_XENSYSBUS;
}
static const TypeInfo xensysdev_info = {
@@ -889,7 +671,9 @@ static const TypeInfo xensysdev_info = {
static void xenbe_register_types(void)
{
+ type_register_static(&xensysbus_info);
type_register_static(&xensysdev_info);
+ type_register_static(&xendev_type_info);
}
-type_init(xenbe_register_types);
+type_init(xenbe_register_types)
diff --git a/hw/xen/xen_devconfig.c b/hw/xen/xen_devconfig.c
index b7d290df6c..a80e78c0dc 100644
--- a/hw/xen/xen_devconfig.c
+++ b/hw/xen/xen_devconfig.c
@@ -55,7 +55,7 @@ int xen_config_dev_blk(DriveInfo *disk)
const char *filename = qemu_opt_get(disk->opts, "file");
snprintf(device_name, sizeof(device_name), "xvd%c", 'a' + disk->unit);
- xen_be_printf(NULL, 1, "config disk %d [%s]: %s\n",
+ xen_pv_printf(NULL, 1, "config disk %d [%s]: %s\n",
disk->unit, device_name, filename);
xen_config_dev_dirs("vbd", "qdisk", vdev, fe, be, sizeof(fe));
@@ -83,7 +83,7 @@ int xen_config_dev_nic(NICInfo *nic)
snprintf(mac, sizeof(mac), "%02x:%02x:%02x:%02x:%02x:%02x",
nic->macaddr.a[0], nic->macaddr.a[1], nic->macaddr.a[2],
nic->macaddr.a[3], nic->macaddr.a[4], nic->macaddr.a[5]);
- xen_be_printf(NULL, 1, "config nic %d: mac=\"%s\"\n", vlan_id, mac);
+ xen_pv_printf(NULL, 1, "config nic %d: mac=\"%s\"\n", vlan_id, mac);
xen_config_dev_dirs("vif", "qnic", vlan_id, fe, be, sizeof(fe));
/* frontend */
diff --git a/hw/xen/xen_pvdev.c b/hw/xen/xen_pvdev.c
new file mode 100644
index 0000000000..aed783e844
--- /dev/null
+++ b/hw/xen/xen_pvdev.c
@@ -0,0 +1,318 @@
+/*
+ * Xen para-virtualization device
+ *
+ * (c) 2008 Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "hw/qdev-core.h"
+#include "hw/xen/xen_backend.h"
+#include "hw/xen/xen_pvdev.h"
+
+/* private */
+static int debug;
+
+struct xs_dirs {
+ char *xs_dir;
+ QTAILQ_ENTRY(xs_dirs) list;
+};
+
+static QTAILQ_HEAD(xs_dirs_head, xs_dirs) xs_cleanup =
+ QTAILQ_HEAD_INITIALIZER(xs_cleanup);
+
+static QTAILQ_HEAD(XenDeviceHead, XenDevice) xendevs =
+ QTAILQ_HEAD_INITIALIZER(xendevs);
+
+/* ------------------------------------------------------------- */
+
+static void xenstore_cleanup_dir(char *dir)
+{
+ struct xs_dirs *d;
+
+ d = g_malloc(sizeof(*d));
+ d->xs_dir = dir;
+ QTAILQ_INSERT_TAIL(&xs_cleanup, d, list);
+}
+
+void xen_config_cleanup(void)
+{
+ struct xs_dirs *d;
+
+ QTAILQ_FOREACH(d, &xs_cleanup, list) {
+ xs_rm(xenstore, 0, d->xs_dir);
+ }
+}
+
+int xenstore_mkdir(char *path, int p)
+{
+ struct xs_permissions perms[2] = {
+ {
+ .id = 0, /* set owner: dom0 */
+ }, {
+ .id = xen_domid,
+ .perms = p,
+ }
+ };
+
+ if (!xs_mkdir(xenstore, 0, path)) {
+ xen_pv_printf(NULL, 0, "xs_mkdir %s: failed\n", path);
+ return -1;
+ }
+ xenstore_cleanup_dir(g_strdup(path));
+
+ if (!xs_set_permissions(xenstore, 0, path, perms, 2)) {
+ xen_pv_printf(NULL, 0, "xs_set_permissions %s: failed\n", path);
+ return -1;
+ }
+ return 0;
+}
+
+int xenstore_write_str(const char *base, const char *node, const char *val)
+{
+ char abspath[XEN_BUFSIZE];
+
+ snprintf(abspath, sizeof(abspath), "%s/%s", base, node);
+ if (!xs_write(xenstore, 0, abspath, val, strlen(val))) {
+ return -1;
+ }
+ return 0;
+}
+
+char *xenstore_read_str(const char *base, const char *node)
+{
+ char abspath[XEN_BUFSIZE];
+ unsigned int len;
+ char *str, *ret = NULL;
+
+ snprintf(abspath, sizeof(abspath), "%s/%s", base, node);
+ str = xs_read(xenstore, 0, abspath, &len);
+ if (str != NULL) {
+ /* move to qemu-allocated memory to make sure
+ * callers can savely g_free() stuff. */
+ ret = g_strdup(str);
+ free(str);
+ }
+ return ret;
+}
+
+int xenstore_write_int(const char *base, const char *node, int ival)
+{
+ char val[12];
+
+ snprintf(val, sizeof(val), "%d", ival);
+ return xenstore_write_str(base, node, val);
+}
+
+int xenstore_write_int64(const char *base, const char *node, int64_t ival)
+{
+ char val[21];
+
+ snprintf(val, sizeof(val), "%"PRId64, ival);
+ return xenstore_write_str(base, node, val);
+}
+
+int xenstore_read_int(const char *base, const char *node, int *ival)
+{
+ char *val;
+ int rc = -1;
+
+ val = xenstore_read_str(base, node);
+ if (val && 1 == sscanf(val, "%d", ival)) {
+ rc = 0;
+ }
+ g_free(val);
+ return rc;
+}
+
+int xenstore_read_uint64(const char *base, const char *node, uint64_t *uval)
+{
+ char *val;
+ int rc = -1;
+
+ val = xenstore_read_str(base, node);
+ if (val && 1 == sscanf(val, "%"SCNu64, uval)) {
+ rc = 0;
+ }
+ g_free(val);
+ return rc;
+}
+
+void xenstore_update(void *unused)
+{
+ char **vec = NULL;
+ intptr_t type, ops, ptr;
+ unsigned int dom, count;
+
+ vec = xs_read_watch(xenstore, &count);
+ if (vec == NULL) {
+ goto cleanup;
+ }
+
+ if (sscanf(vec[XS_WATCH_TOKEN], "be:%" PRIxPTR ":%d:%" PRIxPTR,
+ &type, &dom, &ops) == 3) {
+ xenstore_update_be(vec[XS_WATCH_PATH], (void *)type, dom, (void*)ops);
+ }
+ if (sscanf(vec[XS_WATCH_TOKEN], "fe:%" PRIxPTR, &ptr) == 1) {
+ xenstore_update_fe(vec[XS_WATCH_PATH], (void *)ptr);
+ }
+
+cleanup:
+ free(vec);
+}
+
+const char *xenbus_strstate(enum xenbus_state state)
+{
+ static const char *const name[] = {
+ [XenbusStateUnknown] = "Unknown",
+ [XenbusStateInitialising] = "Initialising",
+ [XenbusStateInitWait] = "InitWait",
+ [XenbusStateInitialised] = "Initialised",
+ [XenbusStateConnected] = "Connected",
+ [XenbusStateClosing] = "Closing",
+ [XenbusStateClosed] = "Closed",
+ };
+ return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
+}
+
+/*
+ * msg_level:
+ * 0 == errors (stderr + logfile).
+ * 1 == informative debug messages (logfile only).
+ * 2 == noisy debug messages (logfile only).
+ * 3 == will flood your log (logfile only).
+ */
+void xen_pv_printf(struct XenDevice *xendev, int msg_level,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ if (xendev) {
+ if (msg_level > xendev->debug) {
+ return;
+ }
+ qemu_log("xen be: %s: ", xendev->name);
+ if (msg_level == 0) {
+ fprintf(stderr, "xen be: %s: ", xendev->name);
+ }
+ } else {
+ if (msg_level > debug) {
+ return;
+ }
+ qemu_log("xen be core: ");
+ if (msg_level == 0) {
+ fprintf(stderr, "xen be core: ");
+ }
+ }
+ va_start(args, fmt);
+ qemu_log_vprintf(fmt, args);
+ va_end(args);
+ if (msg_level == 0) {
+ va_start(args, fmt);
+ vfprintf(stderr, fmt, args);
+ va_end(args);
+ }
+ qemu_log_flush();
+}
+
+void xen_pv_evtchn_event(void *opaque)
+{
+ struct XenDevice *xendev = opaque;
+ evtchn_port_t port;
+
+ port = xenevtchn_pending(xendev->evtchndev);
+ if (port != xendev->local_port) {
+ xen_pv_printf(xendev, 0,
+ "xenevtchn_pending returned %d (expected %d)\n",
+ port, xendev->local_port);
+ return;
+ }
+ xenevtchn_unmask(xendev->evtchndev, port);
+
+ if (xendev->ops->event) {
+ xendev->ops->event(xendev);
+ }
+}
+
+void xen_pv_unbind_evtchn(struct XenDevice *xendev)
+{
+ if (xendev->local_port == -1) {
+ return;
+ }
+ qemu_set_fd_handler(xenevtchn_fd(xendev->evtchndev), NULL, NULL, NULL);
+ xenevtchn_unbind(xendev->evtchndev, xendev->local_port);
+ xen_pv_printf(xendev, 2, "unbind evtchn port %d\n", xendev->local_port);
+ xendev->local_port = -1;
+}
+
+int xen_pv_send_notify(struct XenDevice *xendev)
+{
+ return xenevtchn_notify(xendev->evtchndev, xendev->local_port);
+}
+
+/* ------------------------------------------------------------- */
+
+struct XenDevice *xen_pv_find_xendev(const char *type, int dom, int dev)
+{
+ struct XenDevice *xendev;
+
+ QTAILQ_FOREACH(xendev, &xendevs, next) {
+ if (xendev->dom != dom) {
+ continue;
+ }
+ if (xendev->dev != dev) {
+ continue;
+ }
+ if (strcmp(xendev->type, type) != 0) {
+ continue;
+ }
+ return xendev;
+ }
+ return NULL;
+}
+
+/*
+ * release xen backend device.
+ */
+void xen_pv_del_xendev(struct XenDevice *xendev)
+{
+ if (xendev->ops->free) {
+ xendev->ops->free(xendev);
+ }
+
+ if (xendev->fe) {
+ char token[XEN_BUFSIZE];
+ snprintf(token, sizeof(token), "fe:%p", xendev);
+ xs_unwatch(xenstore, xendev->fe, token);
+ g_free(xendev->fe);
+ }
+
+ if (xendev->evtchndev != NULL) {
+ xenevtchn_close(xendev->evtchndev);
+ }
+ if (xendev->gnttabdev != NULL) {
+ xengnttab_close(xendev->gnttabdev);
+ }
+
+ QTAILQ_REMOVE(&xendevs, xendev, next);
+
+ qdev_unplug(&xendev->qdev, NULL);
+}
+
+void xen_pv_insert_xendev(struct XenDevice *xendev)
+{
+ QTAILQ_INSERT_TAIL(&xendevs, xendev, next);
+}
diff --git a/hw/xenpv/xen_domainbuild.c b/hw/xenpv/xen_domainbuild.c
index 5a9f5ac806..457a8976c3 100644
--- a/hw/xenpv/xen_domainbuild.c
+++ b/hw/xenpv/xen_domainbuild.c
@@ -53,11 +53,7 @@ int xenstore_domain_init1(const char *kernel, const char *ramdisk,
char *dom, uuid_string[42], vm[256], path[256];
int i;
- snprintf(uuid_string, sizeof(uuid_string), UUID_FMT,
- qemu_uuid[0], qemu_uuid[1], qemu_uuid[2], qemu_uuid[3],
- qemu_uuid[4], qemu_uuid[5], qemu_uuid[6], qemu_uuid[7],
- qemu_uuid[8], qemu_uuid[9], qemu_uuid[10], qemu_uuid[11],
- qemu_uuid[12], qemu_uuid[13], qemu_uuid[14], qemu_uuid[15]);
+ qemu_uuid_unparse(&qemu_uuid, uuid_string);
dom = xs_get_domain_path(xenstore, xen_domid);
snprintf(vm, sizeof(vm), "/vm/%s", uuid_string);
@@ -236,7 +232,7 @@ int xen_domain_build_pv(const char *kernel, const char *ramdisk,
unsigned long xenstore_mfn = 0, console_mfn = 0;
int rc;
- memcpy(uuid, qemu_uuid, sizeof(uuid));
+ memcpy(uuid, &qemu_uuid, sizeof(uuid));
rc = xen_domain_create(xen_xc, ssidref, uuid, flags, &xen_domid);
if (rc < 0) {
fprintf(stderr, "xen: xc_domain_create() failed\n");
diff --git a/hw/xtensa/xtfpga.c b/hw/xtensa/xtfpga.c
index ac75949484..dc6fdcc266 100644
--- a/hw/xtensa/xtfpga.c
+++ b/hw/xtensa/xtfpga.c
@@ -265,7 +265,7 @@ static void lx_init(const LxBoardDesc *board, MachineState *machine)
}
if (!serial_hds[0]) {
- serial_hds[0] = qemu_chr_new("serial0", "null", NULL);
+ serial_hds[0] = qemu_chr_new("serial0", "null");
}
serial_mm_init(system_io, 0x0d050020, 2, xtensa_get_extint(env, 0),