|
@@ -83,12 +83,51 @@ static void gpex_host_realize(DeviceState *dev, Error **errp)
|
|
|
int i;
|
|
|
|
|
|
pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX);
|
|
|
+ sysbus_init_mmio(sbd, &pex->mmio);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Note that the MemoryRegions io_mmio and io_ioport that we pass
|
|
|
+ * to pci_register_root_bus() are not the same as the
|
|
|
+ * MemoryRegions io_mmio_window and io_ioport_window that we
|
|
|
+ * expose as SysBus MRs. The difference is in the behaviour of
|
|
|
+ * accesses to addresses where no PCI device has been mapped.
|
|
|
+ *
|
|
|
+ * io_mmio and io_ioport are the underlying PCI view of the PCI
|
|
|
+ * address space, and when a PCI device does a bus master access
|
|
|
+ * to a bad address this is reported back to it as a transaction
|
|
|
+ * failure.
|
|
|
+ *
|
|
|
+ * io_mmio_window and io_ioport_window implement "unmapped
|
|
|
+ * addresses read as -1 and ignore writes"; this is traditional
|
|
|
+ * x86 PC behaviour, which is not mandated by the PCI spec proper
|
|
|
+ * but expected by much PCI-using guest software, including Linux.
|
|
|
+ *
|
|
|
+ * In the interests of not being unnecessarily surprising, we
|
|
|
+ * implement it in the gpex PCI host controller, by providing the
|
|
|
+ * _window MRs, which are containers with io ops that implement
|
|
|
+ * the 'background' behaviour and which hold the real PCI MRs as
|
|
|
+ * subregions.
|
|
|
+ */
|
|
|
memory_region_init(&s->io_mmio, OBJECT(s), "gpex_mmio", UINT64_MAX);
|
|
|
memory_region_init(&s->io_ioport, OBJECT(s), "gpex_ioport", 64 * 1024);
|
|
|
|
|
|
- sysbus_init_mmio(sbd, &pex->mmio);
|
|
|
- sysbus_init_mmio(sbd, &s->io_mmio);
|
|
|
- sysbus_init_mmio(sbd, &s->io_ioport);
|
|
|
+ if (s->allow_unmapped_accesses) {
|
|
|
+ memory_region_init_io(&s->io_mmio_window, OBJECT(s),
|
|
|
+ &unassigned_io_ops, OBJECT(s),
|
|
|
+ "gpex_mmio_window", UINT64_MAX);
|
|
|
+ memory_region_init_io(&s->io_ioport_window, OBJECT(s),
|
|
|
+ &unassigned_io_ops, OBJECT(s),
|
|
|
+ "gpex_ioport_window", 64 * 1024);
|
|
|
+
|
|
|
+ memory_region_add_subregion(&s->io_mmio_window, 0, &s->io_mmio);
|
|
|
+ memory_region_add_subregion(&s->io_ioport_window, 0, &s->io_ioport);
|
|
|
+ sysbus_init_mmio(sbd, &s->io_mmio_window);
|
|
|
+ sysbus_init_mmio(sbd, &s->io_ioport_window);
|
|
|
+ } else {
|
|
|
+ sysbus_init_mmio(sbd, &s->io_mmio);
|
|
|
+ sysbus_init_mmio(sbd, &s->io_ioport);
|
|
|
+ }
|
|
|
+
|
|
|
for (i = 0; i < GPEX_NUM_IRQS; i++) {
|
|
|
sysbus_init_irq(sbd, &s->irq[i]);
|
|
|
s->irq_num[i] = -1;
|
|
@@ -108,6 +147,16 @@ static const char *gpex_host_root_bus_path(PCIHostState *host_bridge,
|
|
|
return "0000:00";
|
|
|
}
|
|
|
|
|
|
+static Property gpex_host_properties[] = {
|
|
|
+ /*
|
|
|
+ * Permit CPU accesses to unmapped areas of the PIO and MMIO windows
|
|
|
+ * (discarding writes and returning -1 for reads) rather than aborting.
|
|
|
+ */
|
|
|
+ DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost,
|
|
|
+ allow_unmapped_accesses, true),
|
|
|
+ DEFINE_PROP_END_OF_LIST(),
|
|
|
+};
|
|
|
+
|
|
|
static void gpex_host_class_init(ObjectClass *klass, void *data)
|
|
|
{
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
@@ -117,6 +166,7 @@ static void gpex_host_class_init(ObjectClass *klass, void *data)
|
|
|
dc->realize = gpex_host_realize;
|
|
|
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
|
|
|
dc->fw_name = "pci";
|
|
|
+ device_class_set_props(dc, gpex_host_properties);
|
|
|
}
|
|
|
|
|
|
static void gpex_host_initfn(Object *obj)
|