virt.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * QEMU loongson 3a5000 develop board emulation
  4. *
  5. * Copyright (c) 2021 Loongson Technology Corporation Limited
  6. */
  7. #include "qemu/osdep.h"
  8. #include "qemu/units.h"
  9. #include "qemu/datadir.h"
  10. #include "qapi/error.h"
  11. #include "hw/boards.h"
  12. #include "hw/char/serial-mm.h"
  13. #include "system/kvm.h"
  14. #include "system/tcg.h"
  15. #include "system/system.h"
  16. #include "system/qtest.h"
  17. #include "system/runstate.h"
  18. #include "system/reset.h"
  19. #include "system/rtc.h"
  20. #include "hw/loongarch/virt.h"
  21. #include "exec/address-spaces.h"
  22. #include "hw/irq.h"
  23. #include "net/net.h"
  24. #include "hw/loader.h"
  25. #include "elf.h"
  26. #include "hw/intc/loongarch_ipi.h"
  27. #include "hw/intc/loongarch_extioi.h"
  28. #include "hw/intc/loongarch_pch_pic.h"
  29. #include "hw/intc/loongarch_pch_msi.h"
  30. #include "hw/pci-host/ls7a.h"
  31. #include "hw/pci-host/gpex.h"
  32. #include "hw/misc/unimp.h"
  33. #include "hw/loongarch/fw_cfg.h"
  34. #include "target/loongarch/cpu.h"
  35. #include "hw/firmware/smbios.h"
  36. #include "qapi/qapi-visit-common.h"
  37. #include "hw/acpi/generic_event_device.h"
  38. #include "hw/mem/nvdimm.h"
  39. #include "hw/platform-bus.h"
  40. #include "hw/display/ramfb.h"
  41. #include "hw/uefi/var-service-api.h"
  42. #include "hw/mem/pc-dimm.h"
  43. #include "system/tpm.h"
  44. #include "system/block-backend.h"
  45. #include "hw/block/flash.h"
  46. #include "hw/virtio/virtio-iommu.h"
  47. #include "qemu/error-report.h"
  48. static void virt_get_veiointc(Object *obj, Visitor *v, const char *name,
  49. void *opaque, Error **errp)
  50. {
  51. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
  52. OnOffAuto veiointc = lvms->veiointc;
  53. visit_type_OnOffAuto(v, name, &veiointc, errp);
  54. }
  55. static void virt_set_veiointc(Object *obj, Visitor *v, const char *name,
  56. void *opaque, Error **errp)
  57. {
  58. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
  59. visit_type_OnOffAuto(v, name, &lvms->veiointc, errp);
  60. }
  61. static PFlashCFI01 *virt_flash_create1(LoongArchVirtMachineState *lvms,
  62. const char *name,
  63. const char *alias_prop_name)
  64. {
  65. DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01);
  66. qdev_prop_set_uint64(dev, "sector-length", VIRT_FLASH_SECTOR_SIZE);
  67. qdev_prop_set_uint8(dev, "width", 4);
  68. qdev_prop_set_uint8(dev, "device-width", 2);
  69. qdev_prop_set_bit(dev, "big-endian", false);
  70. qdev_prop_set_uint16(dev, "id0", 0x89);
  71. qdev_prop_set_uint16(dev, "id1", 0x18);
  72. qdev_prop_set_uint16(dev, "id2", 0x00);
  73. qdev_prop_set_uint16(dev, "id3", 0x00);
  74. qdev_prop_set_string(dev, "name", name);
  75. object_property_add_child(OBJECT(lvms), name, OBJECT(dev));
  76. object_property_add_alias(OBJECT(lvms), alias_prop_name,
  77. OBJECT(dev), "drive");
  78. return PFLASH_CFI01(dev);
  79. }
  80. static void virt_flash_create(LoongArchVirtMachineState *lvms)
  81. {
  82. lvms->flash[0] = virt_flash_create1(lvms, "virt.flash0", "pflash0");
  83. lvms->flash[1] = virt_flash_create1(lvms, "virt.flash1", "pflash1");
  84. }
  85. static void virt_flash_map1(PFlashCFI01 *flash,
  86. hwaddr base, hwaddr size,
  87. MemoryRegion *sysmem)
  88. {
  89. DeviceState *dev = DEVICE(flash);
  90. BlockBackend *blk;
  91. hwaddr real_size = size;
  92. blk = pflash_cfi01_get_blk(flash);
  93. if (blk) {
  94. real_size = blk_getlength(blk);
  95. assert(real_size && real_size <= size);
  96. }
  97. assert(QEMU_IS_ALIGNED(real_size, VIRT_FLASH_SECTOR_SIZE));
  98. assert(real_size / VIRT_FLASH_SECTOR_SIZE <= UINT32_MAX);
  99. qdev_prop_set_uint32(dev, "num-blocks", real_size / VIRT_FLASH_SECTOR_SIZE);
  100. sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
  101. memory_region_add_subregion(sysmem, base,
  102. sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0));
  103. }
  104. static void virt_flash_map(LoongArchVirtMachineState *lvms,
  105. MemoryRegion *sysmem)
  106. {
  107. PFlashCFI01 *flash0 = lvms->flash[0];
  108. PFlashCFI01 *flash1 = lvms->flash[1];
  109. virt_flash_map1(flash0, VIRT_FLASH0_BASE, VIRT_FLASH0_SIZE, sysmem);
  110. virt_flash_map1(flash1, VIRT_FLASH1_BASE, VIRT_FLASH1_SIZE, sysmem);
  111. }
  112. static void virt_build_smbios(LoongArchVirtMachineState *lvms)
  113. {
  114. MachineState *ms = MACHINE(lvms);
  115. MachineClass *mc = MACHINE_GET_CLASS(lvms);
  116. uint8_t *smbios_tables, *smbios_anchor;
  117. size_t smbios_tables_len, smbios_anchor_len;
  118. const char *product = "QEMU Virtual Machine";
  119. if (!lvms->fw_cfg) {
  120. return;
  121. }
  122. smbios_set_defaults("QEMU", product, mc->name);
  123. smbios_get_tables(ms, SMBIOS_ENTRY_POINT_TYPE_64,
  124. NULL, 0,
  125. &smbios_tables, &smbios_tables_len,
  126. &smbios_anchor, &smbios_anchor_len, &error_fatal);
  127. if (smbios_anchor) {
  128. fw_cfg_add_file(lvms->fw_cfg, "etc/smbios/smbios-tables",
  129. smbios_tables, smbios_tables_len);
  130. fw_cfg_add_file(lvms->fw_cfg, "etc/smbios/smbios-anchor",
  131. smbios_anchor, smbios_anchor_len);
  132. }
  133. }
  134. static void virt_done(Notifier *notifier, void *data)
  135. {
  136. LoongArchVirtMachineState *lvms = container_of(notifier,
  137. LoongArchVirtMachineState, machine_done);
  138. virt_build_smbios(lvms);
  139. virt_acpi_setup(lvms);
  140. virt_fdt_setup(lvms);
  141. }
  142. static void virt_powerdown_req(Notifier *notifier, void *opaque)
  143. {
  144. LoongArchVirtMachineState *s;
  145. s = container_of(notifier, LoongArchVirtMachineState, powerdown_notifier);
  146. acpi_send_event(s->acpi_ged, ACPI_POWER_DOWN_STATUS);
  147. }
  148. static void memmap_add_entry(uint64_t address, uint64_t length, uint32_t type)
  149. {
  150. /* Ensure there are no duplicate entries. */
  151. for (unsigned i = 0; i < memmap_entries; i++) {
  152. assert(memmap_table[i].address != address);
  153. }
  154. memmap_table = g_renew(struct memmap_entry, memmap_table,
  155. memmap_entries + 1);
  156. memmap_table[memmap_entries].address = cpu_to_le64(address);
  157. memmap_table[memmap_entries].length = cpu_to_le64(length);
  158. memmap_table[memmap_entries].type = cpu_to_le32(type);
  159. memmap_table[memmap_entries].reserved = 0;
  160. memmap_entries++;
  161. }
  162. static DeviceState *create_acpi_ged(DeviceState *pch_pic,
  163. LoongArchVirtMachineState *lvms)
  164. {
  165. DeviceState *dev;
  166. MachineState *ms = MACHINE(lvms);
  167. MachineClass *mc = MACHINE_GET_CLASS(lvms);
  168. uint32_t event = ACPI_GED_PWR_DOWN_EVT;
  169. if (ms->ram_slots) {
  170. event |= ACPI_GED_MEM_HOTPLUG_EVT;
  171. }
  172. if (mc->has_hotpluggable_cpus) {
  173. event |= ACPI_GED_CPU_HOTPLUG_EVT;
  174. }
  175. dev = qdev_new(TYPE_ACPI_GED);
  176. qdev_prop_set_uint32(dev, "ged-event", event);
  177. sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
  178. /* ged event */
  179. sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, VIRT_GED_EVT_ADDR);
  180. /* memory hotplug */
  181. sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, VIRT_GED_MEM_ADDR);
  182. /* ged regs used for reset and power down */
  183. sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, VIRT_GED_REG_ADDR);
  184. if (mc->has_hotpluggable_cpus) {
  185. sysbus_mmio_map(SYS_BUS_DEVICE(dev), 3, VIRT_GED_CPUHP_ADDR);
  186. }
  187. sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0,
  188. qdev_get_gpio_in(pch_pic, VIRT_SCI_IRQ - VIRT_GSI_BASE));
  189. return dev;
  190. }
  191. static DeviceState *create_platform_bus(DeviceState *pch_pic)
  192. {
  193. DeviceState *dev;
  194. SysBusDevice *sysbus;
  195. int i, irq;
  196. MemoryRegion *sysmem = get_system_memory();
  197. dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE);
  198. dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE);
  199. qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS);
  200. qdev_prop_set_uint32(dev, "mmio_size", VIRT_PLATFORM_BUS_SIZE);
  201. sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
  202. sysbus = SYS_BUS_DEVICE(dev);
  203. for (i = 0; i < VIRT_PLATFORM_BUS_NUM_IRQS; i++) {
  204. irq = VIRT_PLATFORM_BUS_IRQ - VIRT_GSI_BASE + i;
  205. sysbus_connect_irq(sysbus, i, qdev_get_gpio_in(pch_pic, irq));
  206. }
  207. memory_region_add_subregion(sysmem,
  208. VIRT_PLATFORM_BUS_BASEADDRESS,
  209. sysbus_mmio_get_region(sysbus, 0));
  210. return dev;
  211. }
  212. static void virt_devices_init(DeviceState *pch_pic,
  213. LoongArchVirtMachineState *lvms)
  214. {
  215. MachineClass *mc = MACHINE_GET_CLASS(lvms);
  216. DeviceState *gpex_dev;
  217. SysBusDevice *d;
  218. PCIBus *pci_bus;
  219. MemoryRegion *ecam_alias, *ecam_reg, *pio_alias, *pio_reg;
  220. MemoryRegion *mmio_alias, *mmio_reg;
  221. int i;
  222. gpex_dev = qdev_new(TYPE_GPEX_HOST);
  223. d = SYS_BUS_DEVICE(gpex_dev);
  224. sysbus_realize_and_unref(d, &error_fatal);
  225. pci_bus = PCI_HOST_BRIDGE(gpex_dev)->bus;
  226. lvms->pci_bus = pci_bus;
  227. /* Map only part size_ecam bytes of ECAM space */
  228. ecam_alias = g_new0(MemoryRegion, 1);
  229. ecam_reg = sysbus_mmio_get_region(d, 0);
  230. memory_region_init_alias(ecam_alias, OBJECT(gpex_dev), "pcie-ecam",
  231. ecam_reg, 0, VIRT_PCI_CFG_SIZE);
  232. memory_region_add_subregion(get_system_memory(), VIRT_PCI_CFG_BASE,
  233. ecam_alias);
  234. /* Map PCI mem space */
  235. mmio_alias = g_new0(MemoryRegion, 1);
  236. mmio_reg = sysbus_mmio_get_region(d, 1);
  237. memory_region_init_alias(mmio_alias, OBJECT(gpex_dev), "pcie-mmio",
  238. mmio_reg, VIRT_PCI_MEM_BASE, VIRT_PCI_MEM_SIZE);
  239. memory_region_add_subregion(get_system_memory(), VIRT_PCI_MEM_BASE,
  240. mmio_alias);
  241. /* Map PCI IO port space. */
  242. pio_alias = g_new0(MemoryRegion, 1);
  243. pio_reg = sysbus_mmio_get_region(d, 2);
  244. memory_region_init_alias(pio_alias, OBJECT(gpex_dev), "pcie-io", pio_reg,
  245. VIRT_PCI_IO_OFFSET, VIRT_PCI_IO_SIZE);
  246. memory_region_add_subregion(get_system_memory(), VIRT_PCI_IO_BASE,
  247. pio_alias);
  248. for (i = 0; i < PCI_NUM_PINS; i++) {
  249. sysbus_connect_irq(d, i,
  250. qdev_get_gpio_in(pch_pic, 16 + i));
  251. gpex_set_irq_num(GPEX_HOST(gpex_dev), i, 16 + i);
  252. }
  253. /*
  254. * Create uart fdt node in reverse order so that they appear
  255. * in the finished device tree lowest address first
  256. */
  257. for (i = VIRT_UART_COUNT; i-- > 0;) {
  258. hwaddr base = VIRT_UART_BASE + i * VIRT_UART_SIZE;
  259. int irq = VIRT_UART_IRQ + i - VIRT_GSI_BASE;
  260. serial_mm_init(get_system_memory(), base, 0,
  261. qdev_get_gpio_in(pch_pic, irq),
  262. 115200, serial_hd(i), DEVICE_LITTLE_ENDIAN);
  263. }
  264. /* Network init */
  265. pci_init_nic_devices(pci_bus, mc->default_nic);
  266. /*
  267. * There are some invalid guest memory access.
  268. * Create some unimplemented devices to emulate this.
  269. */
  270. create_unimplemented_device("pci-dma-cfg", 0x1001041c, 0x4);
  271. sysbus_create_simple("ls7a_rtc", VIRT_RTC_REG_BASE,
  272. qdev_get_gpio_in(pch_pic,
  273. VIRT_RTC_IRQ - VIRT_GSI_BASE));
  274. /* acpi ged */
  275. lvms->acpi_ged = create_acpi_ged(pch_pic, lvms);
  276. /* platform bus */
  277. lvms->platform_bus_dev = create_platform_bus(pch_pic);
  278. }
  279. static void virt_cpu_irq_init(LoongArchVirtMachineState *lvms)
  280. {
  281. int num;
  282. MachineState *ms = MACHINE(lvms);
  283. MachineClass *mc = MACHINE_GET_CLASS(ms);
  284. const CPUArchIdList *possible_cpus;
  285. CPUState *cs;
  286. Error *err = NULL;
  287. /* cpu nodes */
  288. possible_cpus = mc->possible_cpu_arch_ids(ms);
  289. for (num = 0; num < possible_cpus->len; num++) {
  290. cs = possible_cpus->cpus[num].cpu;
  291. if (cs == NULL) {
  292. continue;
  293. }
  294. hotplug_handler_plug(HOTPLUG_HANDLER(lvms->ipi), DEVICE(cs), &err);
  295. hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), DEVICE(cs), &err);
  296. }
  297. }
  298. static void virt_irq_init(LoongArchVirtMachineState *lvms)
  299. {
  300. DeviceState *pch_pic, *pch_msi;
  301. DeviceState *ipi, *extioi;
  302. SysBusDevice *d;
  303. int i, start, num;
  304. /*
  305. * Extended IRQ model.
  306. * |
  307. * +-----------+ +-------------|--------+ +-----------+
  308. * | IPI/Timer | --> | CPUINTC(0-3)|(4-255) | <-- | IPI/Timer |
  309. * +-----------+ +-------------|--------+ +-----------+
  310. * ^ |
  311. * |
  312. * +---------+
  313. * | EIOINTC |
  314. * +---------+
  315. * ^ ^
  316. * | |
  317. * +---------+ +---------+
  318. * | PCH-PIC | | PCH-MSI |
  319. * +---------+ +---------+
  320. * ^ ^ ^
  321. * | | |
  322. * +--------+ +---------+ +---------+
  323. * | UARTs | | Devices | | Devices |
  324. * +--------+ +---------+ +---------+
  325. *
  326. * Virt extended IRQ model.
  327. *
  328. * +-----+ +---------------+ +-------+
  329. * | IPI |--> | CPUINTC(0-255)| <-- | Timer |
  330. * +-----+ +---------------+ +-------+
  331. * ^
  332. * |
  333. * +-----------+
  334. * | V-EIOINTC |
  335. * +-----------+
  336. * ^ ^
  337. * | |
  338. * +---------+ +---------+
  339. * | PCH-PIC | | PCH-MSI |
  340. * +---------+ +---------+
  341. * ^ ^ ^
  342. * | | |
  343. * +--------+ +---------+ +---------+
  344. * | UARTs | | Devices | | Devices |
  345. * +--------+ +---------+ +---------+
  346. */
  347. /* Create IPI device */
  348. ipi = qdev_new(TYPE_LOONGARCH_IPI);
  349. lvms->ipi = ipi;
  350. sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal);
  351. /* IPI iocsr memory region */
  352. memory_region_add_subregion(&lvms->system_iocsr, SMP_IPI_MAILBOX,
  353. sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 0));
  354. memory_region_add_subregion(&lvms->system_iocsr, MAIL_SEND_ADDR,
  355. sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 1));
  356. /* Create EXTIOI device */
  357. extioi = qdev_new(TYPE_LOONGARCH_EXTIOI);
  358. lvms->extioi = extioi;
  359. if (virt_is_veiointc_enabled(lvms)) {
  360. qdev_prop_set_bit(extioi, "has-virtualization-extension", true);
  361. }
  362. sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal);
  363. memory_region_add_subregion(&lvms->system_iocsr, APIC_BASE,
  364. sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 0));
  365. if (virt_is_veiointc_enabled(lvms)) {
  366. memory_region_add_subregion(&lvms->system_iocsr, EXTIOI_VIRT_BASE,
  367. sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 1));
  368. }
  369. virt_cpu_irq_init(lvms);
  370. pch_pic = qdev_new(TYPE_LOONGARCH_PIC);
  371. num = VIRT_PCH_PIC_IRQ_NUM;
  372. qdev_prop_set_uint32(pch_pic, "pch_pic_irq_num", num);
  373. d = SYS_BUS_DEVICE(pch_pic);
  374. sysbus_realize_and_unref(d, &error_fatal);
  375. memory_region_add_subregion(get_system_memory(), VIRT_IOAPIC_REG_BASE,
  376. sysbus_mmio_get_region(d, 0));
  377. memory_region_add_subregion(get_system_memory(),
  378. VIRT_IOAPIC_REG_BASE + PCH_PIC_ROUTE_ENTRY_OFFSET,
  379. sysbus_mmio_get_region(d, 1));
  380. memory_region_add_subregion(get_system_memory(),
  381. VIRT_IOAPIC_REG_BASE + PCH_PIC_INT_STATUS_LO,
  382. sysbus_mmio_get_region(d, 2));
  383. /* Connect pch_pic irqs to extioi */
  384. for (i = 0; i < num; i++) {
  385. qdev_connect_gpio_out(DEVICE(d), i, qdev_get_gpio_in(extioi, i));
  386. }
  387. pch_msi = qdev_new(TYPE_LOONGARCH_PCH_MSI);
  388. start = num;
  389. num = EXTIOI_IRQS - start;
  390. qdev_prop_set_uint32(pch_msi, "msi_irq_base", start);
  391. qdev_prop_set_uint32(pch_msi, "msi_irq_num", num);
  392. d = SYS_BUS_DEVICE(pch_msi);
  393. sysbus_realize_and_unref(d, &error_fatal);
  394. sysbus_mmio_map(d, 0, VIRT_PCH_MSI_ADDR_LOW);
  395. for (i = 0; i < num; i++) {
  396. /* Connect pch_msi irqs to extioi */
  397. qdev_connect_gpio_out(DEVICE(d), i,
  398. qdev_get_gpio_in(extioi, i + start));
  399. }
  400. virt_devices_init(pch_pic, lvms);
  401. }
  402. static void virt_firmware_init(LoongArchVirtMachineState *lvms)
  403. {
  404. char *filename = MACHINE(lvms)->firmware;
  405. char *bios_name = NULL;
  406. int bios_size, i;
  407. BlockBackend *pflash_blk0;
  408. MemoryRegion *mr;
  409. lvms->bios_loaded = false;
  410. /* Map legacy -drive if=pflash to machine properties */
  411. for (i = 0; i < ARRAY_SIZE(lvms->flash); i++) {
  412. pflash_cfi01_legacy_drive(lvms->flash[i],
  413. drive_get(IF_PFLASH, 0, i));
  414. }
  415. virt_flash_map(lvms, get_system_memory());
  416. pflash_blk0 = pflash_cfi01_get_blk(lvms->flash[0]);
  417. if (pflash_blk0) {
  418. if (filename) {
  419. error_report("cannot use both '-bios' and '-drive if=pflash'"
  420. "options at once");
  421. exit(1);
  422. }
  423. lvms->bios_loaded = true;
  424. return;
  425. }
  426. if (filename) {
  427. bios_name = qemu_find_file(QEMU_FILE_TYPE_BIOS, filename);
  428. if (!bios_name) {
  429. error_report("Could not find ROM image '%s'", filename);
  430. exit(1);
  431. }
  432. mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(lvms->flash[0]), 0);
  433. bios_size = load_image_mr(bios_name, mr);
  434. if (bios_size < 0) {
  435. error_report("Could not load ROM image '%s'", bios_name);
  436. exit(1);
  437. }
  438. g_free(bios_name);
  439. lvms->bios_loaded = true;
  440. }
  441. }
  442. static MemTxResult virt_iocsr_misc_write(void *opaque, hwaddr addr,
  443. uint64_t val, unsigned size,
  444. MemTxAttrs attrs)
  445. {
  446. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(opaque);
  447. uint64_t features;
  448. switch (addr) {
  449. case MISC_FUNC_REG:
  450. if (!virt_is_veiointc_enabled(lvms)) {
  451. return MEMTX_OK;
  452. }
  453. features = address_space_ldl(&lvms->as_iocsr,
  454. EXTIOI_VIRT_BASE + EXTIOI_VIRT_CONFIG,
  455. attrs, NULL);
  456. if (val & BIT_ULL(IOCSRM_EXTIOI_EN)) {
  457. features |= BIT(EXTIOI_ENABLE);
  458. }
  459. if (val & BIT_ULL(IOCSRM_EXTIOI_INT_ENCODE)) {
  460. features |= BIT(EXTIOI_ENABLE_INT_ENCODE);
  461. }
  462. address_space_stl(&lvms->as_iocsr,
  463. EXTIOI_VIRT_BASE + EXTIOI_VIRT_CONFIG,
  464. features, attrs, NULL);
  465. break;
  466. default:
  467. g_assert_not_reached();
  468. }
  469. return MEMTX_OK;
  470. }
  471. static MemTxResult virt_iocsr_misc_read(void *opaque, hwaddr addr,
  472. uint64_t *data,
  473. unsigned size, MemTxAttrs attrs)
  474. {
  475. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(opaque);
  476. uint64_t ret = 0;
  477. int features;
  478. switch (addr) {
  479. case VERSION_REG:
  480. ret = 0x11ULL;
  481. break;
  482. case FEATURE_REG:
  483. ret = BIT(IOCSRF_MSI) | BIT(IOCSRF_EXTIOI) | BIT(IOCSRF_CSRIPI);
  484. if (kvm_enabled()) {
  485. ret |= BIT(IOCSRF_VM);
  486. }
  487. break;
  488. case VENDOR_REG:
  489. ret = 0x6e6f73676e6f6f4cULL; /* "Loongson" */
  490. break;
  491. case CPUNAME_REG:
  492. ret = 0x303030354133ULL; /* "3A5000" */
  493. break;
  494. case MISC_FUNC_REG:
  495. if (!virt_is_veiointc_enabled(lvms)) {
  496. ret |= BIT_ULL(IOCSRM_EXTIOI_EN);
  497. break;
  498. }
  499. features = address_space_ldl(&lvms->as_iocsr,
  500. EXTIOI_VIRT_BASE + EXTIOI_VIRT_CONFIG,
  501. attrs, NULL);
  502. if (features & BIT(EXTIOI_ENABLE)) {
  503. ret |= BIT_ULL(IOCSRM_EXTIOI_EN);
  504. }
  505. if (features & BIT(EXTIOI_ENABLE_INT_ENCODE)) {
  506. ret |= BIT_ULL(IOCSRM_EXTIOI_INT_ENCODE);
  507. }
  508. break;
  509. default:
  510. g_assert_not_reached();
  511. }
  512. *data = ret;
  513. return MEMTX_OK;
  514. }
  515. static const MemoryRegionOps virt_iocsr_misc_ops = {
  516. .read_with_attrs = virt_iocsr_misc_read,
  517. .write_with_attrs = virt_iocsr_misc_write,
  518. .endianness = DEVICE_LITTLE_ENDIAN,
  519. .valid = {
  520. .min_access_size = 4,
  521. .max_access_size = 8,
  522. },
  523. .impl = {
  524. .min_access_size = 8,
  525. .max_access_size = 8,
  526. },
  527. };
  528. static void fw_cfg_add_memory(MachineState *ms)
  529. {
  530. hwaddr base, size, ram_size, gap;
  531. int nb_numa_nodes, nodes;
  532. NodeInfo *numa_info;
  533. ram_size = ms->ram_size;
  534. base = VIRT_LOWMEM_BASE;
  535. gap = VIRT_LOWMEM_SIZE;
  536. nodes = nb_numa_nodes = ms->numa_state->num_nodes;
  537. numa_info = ms->numa_state->nodes;
  538. if (!nodes) {
  539. nodes = 1;
  540. }
  541. /* add fw_cfg memory map of node0 */
  542. if (nb_numa_nodes) {
  543. size = numa_info[0].node_mem;
  544. } else {
  545. size = ram_size;
  546. }
  547. if (size >= gap) {
  548. memmap_add_entry(base, gap, 1);
  549. size -= gap;
  550. base = VIRT_HIGHMEM_BASE;
  551. }
  552. if (size) {
  553. memmap_add_entry(base, size, 1);
  554. base += size;
  555. }
  556. if (nodes < 2) {
  557. return;
  558. }
  559. /* add fw_cfg memory map of other nodes */
  560. if (numa_info[0].node_mem < gap && ram_size > gap) {
  561. /*
  562. * memory map for the maining nodes splited into two part
  563. * lowram: [base, +(gap - numa_info[0].node_mem))
  564. * highram: [VIRT_HIGHMEM_BASE, +(ram_size - gap))
  565. */
  566. memmap_add_entry(base, gap - numa_info[0].node_mem, 1);
  567. size = ram_size - gap;
  568. base = VIRT_HIGHMEM_BASE;
  569. } else {
  570. size = ram_size - numa_info[0].node_mem;
  571. }
  572. if (size) {
  573. memmap_add_entry(base, size, 1);
  574. }
  575. }
  576. static void virt_init(MachineState *machine)
  577. {
  578. const char *cpu_model = machine->cpu_type;
  579. MemoryRegion *address_space_mem = get_system_memory();
  580. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine);
  581. int i;
  582. hwaddr base, size, ram_size = machine->ram_size;
  583. MachineClass *mc = MACHINE_GET_CLASS(machine);
  584. Object *cpuobj;
  585. if (!cpu_model) {
  586. cpu_model = LOONGARCH_CPU_TYPE_NAME("la464");
  587. }
  588. /* Create IOCSR space */
  589. memory_region_init_io(&lvms->system_iocsr, OBJECT(machine), NULL,
  590. machine, "iocsr", UINT64_MAX);
  591. address_space_init(&lvms->as_iocsr, &lvms->system_iocsr, "IOCSR");
  592. memory_region_init_io(&lvms->iocsr_mem, OBJECT(machine),
  593. &virt_iocsr_misc_ops,
  594. machine, "iocsr_misc", 0x428);
  595. memory_region_add_subregion(&lvms->system_iocsr, 0, &lvms->iocsr_mem);
  596. /* Init CPUs */
  597. mc->possible_cpu_arch_ids(machine);
  598. for (i = 0; i < machine->smp.cpus; i++) {
  599. cpuobj = object_new(machine->cpu_type);
  600. if (cpuobj == NULL) {
  601. error_report("Fail to create object with type %s ",
  602. machine->cpu_type);
  603. exit(EXIT_FAILURE);
  604. }
  605. qdev_realize_and_unref(DEVICE(cpuobj), NULL, &error_fatal);
  606. }
  607. fw_cfg_add_memory(machine);
  608. /* Node0 memory */
  609. size = ram_size;
  610. base = VIRT_LOWMEM_BASE;
  611. if (size > VIRT_LOWMEM_SIZE) {
  612. size = VIRT_LOWMEM_SIZE;
  613. }
  614. memory_region_init_alias(&lvms->lowmem, NULL, "loongarch.lowram",
  615. machine->ram, base, size);
  616. memory_region_add_subregion(address_space_mem, base, &lvms->lowmem);
  617. base += size;
  618. if (ram_size - size) {
  619. base = VIRT_HIGHMEM_BASE;
  620. memory_region_init_alias(&lvms->highmem, NULL, "loongarch.highram",
  621. machine->ram, VIRT_LOWMEM_BASE + size, ram_size - size);
  622. memory_region_add_subregion(address_space_mem, base, &lvms->highmem);
  623. base += ram_size - size;
  624. }
  625. /* initialize device memory address space */
  626. if (machine->ram_size < machine->maxram_size) {
  627. ram_addr_t device_mem_size = machine->maxram_size - machine->ram_size;
  628. if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) {
  629. error_report("unsupported amount of memory slots: %"PRIu64,
  630. machine->ram_slots);
  631. exit(EXIT_FAILURE);
  632. }
  633. if (QEMU_ALIGN_UP(machine->maxram_size,
  634. TARGET_PAGE_SIZE) != machine->maxram_size) {
  635. error_report("maximum memory size must by aligned to multiple of "
  636. "%d bytes", TARGET_PAGE_SIZE);
  637. exit(EXIT_FAILURE);
  638. }
  639. machine_memory_devices_init(machine, base, device_mem_size);
  640. }
  641. /* load the BIOS image. */
  642. virt_firmware_init(lvms);
  643. /* fw_cfg init */
  644. lvms->fw_cfg = virt_fw_cfg_init(ram_size, machine);
  645. rom_set_fw(lvms->fw_cfg);
  646. if (lvms->fw_cfg != NULL) {
  647. fw_cfg_add_file(lvms->fw_cfg, "etc/memmap",
  648. memmap_table,
  649. sizeof(struct memmap_entry) * (memmap_entries));
  650. }
  651. /* Initialize the IO interrupt subsystem */
  652. virt_irq_init(lvms);
  653. lvms->machine_done.notify = virt_done;
  654. qemu_add_machine_init_done_notifier(&lvms->machine_done);
  655. /* connect powerdown request */
  656. lvms->powerdown_notifier.notify = virt_powerdown_req;
  657. qemu_register_powerdown_notifier(&lvms->powerdown_notifier);
  658. lvms->bootinfo.ram_size = ram_size;
  659. loongarch_load_kernel(machine, &lvms->bootinfo);
  660. }
  661. static void virt_get_acpi(Object *obj, Visitor *v, const char *name,
  662. void *opaque, Error **errp)
  663. {
  664. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
  665. OnOffAuto acpi = lvms->acpi;
  666. visit_type_OnOffAuto(v, name, &acpi, errp);
  667. }
  668. static void virt_set_acpi(Object *obj, Visitor *v, const char *name,
  669. void *opaque, Error **errp)
  670. {
  671. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
  672. visit_type_OnOffAuto(v, name, &lvms->acpi, errp);
  673. }
  674. static void virt_initfn(Object *obj)
  675. {
  676. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj);
  677. if (tcg_enabled()) {
  678. lvms->veiointc = ON_OFF_AUTO_OFF;
  679. }
  680. lvms->acpi = ON_OFF_AUTO_AUTO;
  681. lvms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
  682. lvms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
  683. virt_flash_create(lvms);
  684. }
  685. static void virt_get_topo_from_index(MachineState *ms,
  686. LoongArchCPUTopo *topo, int index)
  687. {
  688. topo->socket_id = index / (ms->smp.cores * ms->smp.threads);
  689. topo->core_id = index / ms->smp.threads % ms->smp.cores;
  690. topo->thread_id = index % ms->smp.threads;
  691. }
  692. static unsigned int topo_align_up(unsigned int count)
  693. {
  694. g_assert(count >= 1);
  695. count -= 1;
  696. return BIT(count ? 32 - clz32(count) : 0);
  697. }
  698. /*
  699. * LoongArch Reference Manual Vol1, Chapter 7.4.12 CPU Identity
  700. * For CPU architecture, bit0 .. bit8 is valid for CPU id, max cpuid is 512
  701. * However for IPI/Eiointc interrupt controller, max supported cpu id for
  702. * irq routingis 256
  703. *
  704. * Here max cpu id is 256 for virt machine
  705. */
  706. static int virt_get_arch_id_from_topo(MachineState *ms, LoongArchCPUTopo *topo)
  707. {
  708. int arch_id, threads, cores, sockets;
  709. threads = topo_align_up(ms->smp.threads);
  710. cores = topo_align_up(ms->smp.cores);
  711. sockets = topo_align_up(ms->smp.sockets);
  712. if ((threads * cores * sockets) > 256) {
  713. error_report("Exceeding max cpuid 256 with sockets[%d] cores[%d]"
  714. " threads[%d]", ms->smp.sockets, ms->smp.cores,
  715. ms->smp.threads);
  716. exit(1);
  717. }
  718. arch_id = topo->thread_id + topo->core_id * threads;
  719. arch_id += topo->socket_id * threads * cores;
  720. return arch_id;
  721. }
  722. /* Find cpu slot in machine->possible_cpus by arch_id */
  723. static CPUArchId *virt_find_cpu_slot(MachineState *ms, int arch_id)
  724. {
  725. int n;
  726. for (n = 0; n < ms->possible_cpus->len; n++) {
  727. if (ms->possible_cpus->cpus[n].arch_id == arch_id) {
  728. return &ms->possible_cpus->cpus[n];
  729. }
  730. }
  731. return NULL;
  732. }
  733. /* Find cpu slot for cold-plut CPU object where cpu is NULL */
  734. static CPUArchId *virt_find_empty_cpu_slot(MachineState *ms)
  735. {
  736. int n;
  737. for (n = 0; n < ms->possible_cpus->len; n++) {
  738. if (ms->possible_cpus->cpus[n].cpu == NULL) {
  739. return &ms->possible_cpus->cpus[n];
  740. }
  741. }
  742. return NULL;
  743. }
  744. static void virt_cpu_pre_plug(HotplugHandler *hotplug_dev,
  745. DeviceState *dev, Error **errp)
  746. {
  747. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
  748. MachineState *ms = MACHINE(OBJECT(hotplug_dev));
  749. LoongArchCPU *cpu = LOONGARCH_CPU(dev);
  750. CPUState *cs = CPU(dev);
  751. CPUArchId *cpu_slot;
  752. Error *err = NULL;
  753. LoongArchCPUTopo topo;
  754. int arch_id;
  755. if (lvms->acpi_ged) {
  756. if ((cpu->thread_id < 0) || (cpu->thread_id >= ms->smp.threads)) {
  757. error_setg(&err,
  758. "Invalid thread-id %u specified, must be in range 1:%u",
  759. cpu->thread_id, ms->smp.threads - 1);
  760. goto out;
  761. }
  762. if ((cpu->core_id < 0) || (cpu->core_id >= ms->smp.cores)) {
  763. error_setg(&err,
  764. "Invalid core-id %u specified, must be in range 1:%u",
  765. cpu->core_id, ms->smp.cores - 1);
  766. goto out;
  767. }
  768. if ((cpu->socket_id < 0) || (cpu->socket_id >= ms->smp.sockets)) {
  769. error_setg(&err,
  770. "Invalid socket-id %u specified, must be in range 1:%u",
  771. cpu->socket_id, ms->smp.sockets - 1);
  772. goto out;
  773. }
  774. topo.socket_id = cpu->socket_id;
  775. topo.core_id = cpu->core_id;
  776. topo.thread_id = cpu->thread_id;
  777. arch_id = virt_get_arch_id_from_topo(ms, &topo);
  778. cpu_slot = virt_find_cpu_slot(ms, arch_id);
  779. if (CPU(cpu_slot->cpu)) {
  780. error_setg(&err,
  781. "cpu(id%d=%d:%d:%d) with arch-id %" PRIu64 " exists",
  782. cs->cpu_index, cpu->socket_id, cpu->core_id,
  783. cpu->thread_id, cpu_slot->arch_id);
  784. goto out;
  785. }
  786. } else {
  787. /* For cold-add cpu, find empty cpu slot */
  788. cpu_slot = virt_find_empty_cpu_slot(ms);
  789. topo.socket_id = cpu_slot->props.socket_id;
  790. topo.core_id = cpu_slot->props.core_id;
  791. topo.thread_id = cpu_slot->props.thread_id;
  792. object_property_set_int(OBJECT(dev), "socket-id", topo.socket_id, NULL);
  793. object_property_set_int(OBJECT(dev), "core-id", topo.core_id, NULL);
  794. object_property_set_int(OBJECT(dev), "thread-id", topo.thread_id, NULL);
  795. }
  796. cpu->env.address_space_iocsr = &lvms->as_iocsr;
  797. cpu->phy_id = cpu_slot->arch_id;
  798. cs->cpu_index = cpu_slot - ms->possible_cpus->cpus;
  799. numa_cpu_pre_plug(cpu_slot, dev, &err);
  800. out:
  801. if (err) {
  802. error_propagate(errp, err);
  803. }
  804. }
  805. static void virt_cpu_unplug_request(HotplugHandler *hotplug_dev,
  806. DeviceState *dev, Error **errp)
  807. {
  808. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
  809. Error *err = NULL;
  810. LoongArchCPU *cpu = LOONGARCH_CPU(dev);
  811. CPUState *cs = CPU(dev);
  812. if (cs->cpu_index == 0) {
  813. error_setg(&err, "hot-unplug of boot cpu(id%d=%d:%d:%d) not supported",
  814. cs->cpu_index, cpu->socket_id,
  815. cpu->core_id, cpu->thread_id);
  816. error_propagate(errp, err);
  817. return;
  818. }
  819. hotplug_handler_unplug_request(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &err);
  820. if (err) {
  821. error_propagate(errp, err);
  822. }
  823. }
  824. static void virt_cpu_unplug(HotplugHandler *hotplug_dev,
  825. DeviceState *dev, Error **errp)
  826. {
  827. CPUArchId *cpu_slot;
  828. Error *err = NULL;
  829. LoongArchCPU *cpu = LOONGARCH_CPU(dev);
  830. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
  831. /* Notify ipi and extioi irqchip to remove interrupt routing to CPU */
  832. hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->ipi), dev, &err);
  833. if (err) {
  834. error_propagate(errp, err);
  835. return;
  836. }
  837. hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->extioi), dev, &err);
  838. if (err) {
  839. error_propagate(errp, err);
  840. return;
  841. }
  842. /* Notify acpi ged CPU removed */
  843. hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &err);
  844. if (err) {
  845. error_propagate(errp, err);
  846. return;
  847. }
  848. cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id);
  849. cpu_slot->cpu = NULL;
  850. return;
  851. }
  852. static void virt_cpu_plug(HotplugHandler *hotplug_dev,
  853. DeviceState *dev, Error **errp)
  854. {
  855. CPUArchId *cpu_slot;
  856. LoongArchCPU *cpu = LOONGARCH_CPU(dev);
  857. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
  858. Error *err = NULL;
  859. cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id);
  860. cpu_slot->cpu = CPU(dev);
  861. if (lvms->ipi) {
  862. hotplug_handler_plug(HOTPLUG_HANDLER(lvms->ipi), dev, &err);
  863. if (err) {
  864. error_propagate(errp, err);
  865. return;
  866. }
  867. }
  868. if (lvms->extioi) {
  869. hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), dev, &err);
  870. if (err) {
  871. error_propagate(errp, err);
  872. return;
  873. }
  874. }
  875. if (lvms->acpi_ged) {
  876. hotplug_handler_plug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &err);
  877. if (err) {
  878. error_propagate(errp, err);
  879. }
  880. }
  881. return;
  882. }
  883. static bool memhp_type_supported(DeviceState *dev)
  884. {
  885. /* we only support pc dimm now */
  886. return object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) &&
  887. !object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM);
  888. }
  889. static void virt_mem_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
  890. Error **errp)
  891. {
  892. pc_dimm_pre_plug(PC_DIMM(dev), MACHINE(hotplug_dev), errp);
  893. }
  894. static void virt_device_pre_plug(HotplugHandler *hotplug_dev,
  895. DeviceState *dev, Error **errp)
  896. {
  897. if (memhp_type_supported(dev)) {
  898. virt_mem_pre_plug(hotplug_dev, dev, errp);
  899. } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
  900. virt_cpu_pre_plug(hotplug_dev, dev, errp);
  901. }
  902. }
  903. static void virt_mem_unplug_request(HotplugHandler *hotplug_dev,
  904. DeviceState *dev, Error **errp)
  905. {
  906. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
  907. /* the acpi ged is always exist */
  908. hotplug_handler_unplug_request(HOTPLUG_HANDLER(lvms->acpi_ged), dev,
  909. errp);
  910. }
  911. static void virt_device_unplug_request(HotplugHandler *hotplug_dev,
  912. DeviceState *dev, Error **errp)
  913. {
  914. if (memhp_type_supported(dev)) {
  915. virt_mem_unplug_request(hotplug_dev, dev, errp);
  916. } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
  917. virt_cpu_unplug_request(hotplug_dev, dev, errp);
  918. }
  919. }
  920. static void virt_mem_unplug(HotplugHandler *hotplug_dev,
  921. DeviceState *dev, Error **errp)
  922. {
  923. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
  924. hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, errp);
  925. pc_dimm_unplug(PC_DIMM(dev), MACHINE(lvms));
  926. qdev_unrealize(dev);
  927. }
  928. static void virt_device_unplug(HotplugHandler *hotplug_dev,
  929. DeviceState *dev, Error **errp)
  930. {
  931. if (memhp_type_supported(dev)) {
  932. virt_mem_unplug(hotplug_dev, dev, errp);
  933. } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
  934. virt_cpu_unplug(hotplug_dev, dev, errp);
  935. }
  936. }
  937. static void virt_mem_plug(HotplugHandler *hotplug_dev,
  938. DeviceState *dev, Error **errp)
  939. {
  940. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
  941. pc_dimm_plug(PC_DIMM(dev), MACHINE(lvms));
  942. hotplug_handler_plug(HOTPLUG_HANDLER(lvms->acpi_ged),
  943. dev, &error_abort);
  944. }
  945. static void virt_device_plug_cb(HotplugHandler *hotplug_dev,
  946. DeviceState *dev, Error **errp)
  947. {
  948. LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
  949. MachineClass *mc = MACHINE_GET_CLASS(lvms);
  950. PlatformBusDevice *pbus;
  951. if (device_is_dynamic_sysbus(mc, dev)) {
  952. if (lvms->platform_bus_dev) {
  953. pbus = PLATFORM_BUS_DEVICE(lvms->platform_bus_dev);
  954. platform_bus_link_device(pbus, SYS_BUS_DEVICE(dev));
  955. }
  956. } else if (memhp_type_supported(dev)) {
  957. virt_mem_plug(hotplug_dev, dev, errp);
  958. } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
  959. virt_cpu_plug(hotplug_dev, dev, errp);
  960. }
  961. }
  962. static HotplugHandler *virt_get_hotplug_handler(MachineState *machine,
  963. DeviceState *dev)
  964. {
  965. MachineClass *mc = MACHINE_GET_CLASS(machine);
  966. if (device_is_dynamic_sysbus(mc, dev) ||
  967. object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU) ||
  968. object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) ||
  969. memhp_type_supported(dev)) {
  970. return HOTPLUG_HANDLER(machine);
  971. }
  972. return NULL;
  973. }
  974. static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms)
  975. {
  976. int n, arch_id;
  977. unsigned int max_cpus = ms->smp.max_cpus;
  978. LoongArchCPUTopo topo;
  979. if (ms->possible_cpus) {
  980. assert(ms->possible_cpus->len == max_cpus);
  981. return ms->possible_cpus;
  982. }
  983. ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
  984. sizeof(CPUArchId) * max_cpus);
  985. ms->possible_cpus->len = max_cpus;
  986. for (n = 0; n < ms->possible_cpus->len; n++) {
  987. virt_get_topo_from_index(ms, &topo, n);
  988. arch_id = virt_get_arch_id_from_topo(ms, &topo);
  989. ms->possible_cpus->cpus[n].type = ms->cpu_type;
  990. ms->possible_cpus->cpus[n].arch_id = arch_id;
  991. ms->possible_cpus->cpus[n].vcpus_count = 1;
  992. ms->possible_cpus->cpus[n].props.has_socket_id = true;
  993. ms->possible_cpus->cpus[n].props.socket_id = topo.socket_id;
  994. ms->possible_cpus->cpus[n].props.has_core_id = true;
  995. ms->possible_cpus->cpus[n].props.core_id = topo.core_id;
  996. ms->possible_cpus->cpus[n].props.has_thread_id = true;
  997. ms->possible_cpus->cpus[n].props.thread_id = topo.thread_id;
  998. }
  999. return ms->possible_cpus;
  1000. }
  1001. static CpuInstanceProperties virt_cpu_index_to_props(MachineState *ms,
  1002. unsigned cpu_index)
  1003. {
  1004. MachineClass *mc = MACHINE_GET_CLASS(ms);
  1005. const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
  1006. assert(cpu_index < possible_cpus->len);
  1007. return possible_cpus->cpus[cpu_index].props;
  1008. }
  1009. static int64_t virt_get_default_cpu_node_id(const MachineState *ms, int idx)
  1010. {
  1011. int64_t socket_id;
  1012. if (ms->numa_state->num_nodes) {
  1013. socket_id = ms->possible_cpus->cpus[idx].props.socket_id;
  1014. return socket_id % ms->numa_state->num_nodes;
  1015. } else {
  1016. return 0;
  1017. }
  1018. }
  1019. static void virt_class_init(ObjectClass *oc, void *data)
  1020. {
  1021. MachineClass *mc = MACHINE_CLASS(oc);
  1022. HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
  1023. mc->init = virt_init;
  1024. mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("la464");
  1025. mc->default_ram_id = "loongarch.ram";
  1026. mc->desc = "QEMU LoongArch Virtual Machine";
  1027. mc->max_cpus = LOONGARCH_MAX_CPUS;
  1028. mc->is_default = 1;
  1029. mc->default_kernel_irqchip_split = false;
  1030. mc->block_default_type = IF_VIRTIO;
  1031. mc->default_boot_order = "c";
  1032. mc->no_cdrom = 1;
  1033. mc->possible_cpu_arch_ids = virt_possible_cpu_arch_ids;
  1034. mc->cpu_index_to_instance_props = virt_cpu_index_to_props;
  1035. mc->get_default_cpu_node_id = virt_get_default_cpu_node_id;
  1036. mc->numa_mem_supported = true;
  1037. mc->auto_enable_numa_with_memhp = true;
  1038. mc->auto_enable_numa_with_memdev = true;
  1039. mc->has_hotpluggable_cpus = true;
  1040. mc->get_hotplug_handler = virt_get_hotplug_handler;
  1041. mc->default_nic = "virtio-net-pci";
  1042. hc->plug = virt_device_plug_cb;
  1043. hc->pre_plug = virt_device_pre_plug;
  1044. hc->unplug_request = virt_device_unplug_request;
  1045. hc->unplug = virt_device_unplug;
  1046. object_class_property_add(oc, "acpi", "OnOffAuto",
  1047. virt_get_acpi, virt_set_acpi,
  1048. NULL, NULL);
  1049. object_class_property_set_description(oc, "acpi",
  1050. "Enable ACPI");
  1051. object_class_property_add(oc, "v-eiointc", "OnOffAuto",
  1052. virt_get_veiointc, virt_set_veiointc,
  1053. NULL, NULL);
  1054. object_class_property_set_description(oc, "v-eiointc",
  1055. "Enable Virt Extend I/O Interrupt Controller.");
  1056. machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE);
  1057. machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS);
  1058. #ifdef CONFIG_TPM
  1059. machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
  1060. #endif
  1061. }
  1062. static const TypeInfo virt_machine_types[] = {
  1063. {
  1064. .name = TYPE_LOONGARCH_VIRT_MACHINE,
  1065. .parent = TYPE_MACHINE,
  1066. .instance_size = sizeof(LoongArchVirtMachineState),
  1067. .class_init = virt_class_init,
  1068. .instance_init = virt_initfn,
  1069. .interfaces = (InterfaceInfo[]) {
  1070. { TYPE_HOTPLUG_HANDLER },
  1071. { }
  1072. },
  1073. }
  1074. };
  1075. DEFINE_TYPES(virt_machine_types)