acpi-build.c 105 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090
  1. /* Support for generating ACPI tables and passing them to Guests
  2. *
  3. * Copyright (C) 2008-2010 Kevin O'Connor <kevin@koconnor.net>
  4. * Copyright (C) 2006 Fabrice Bellard
  5. * Copyright (C) 2013 Red Hat Inc
  6. *
  7. * Author: Michael S. Tsirkin <mst@redhat.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. * You should have received a copy of the GNU General Public License along
  18. * with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "qemu/osdep.h"
  21. #include "qapi/error.h"
  22. #include "qapi/qmp/qnum.h"
  23. #include "acpi-build.h"
  24. #include "qemu/bitmap.h"
  25. #include "qemu/error-report.h"
  26. #include "hw/pci/pci.h"
  27. #include "hw/core/cpu.h"
  28. #include "target/i386/cpu.h"
  29. #include "hw/misc/pvpanic.h"
  30. #include "hw/timer/hpet.h"
  31. #include "hw/acpi/acpi-defs.h"
  32. #include "hw/acpi/acpi.h"
  33. #include "hw/acpi/cpu.h"
  34. #include "hw/nvram/fw_cfg.h"
  35. #include "hw/acpi/bios-linker-loader.h"
  36. #include "hw/isa/isa.h"
  37. #include "hw/block/fdc.h"
  38. #include "hw/acpi/memory_hotplug.h"
  39. #include "sysemu/tpm.h"
  40. #include "hw/acpi/tpm.h"
  41. #include "hw/acpi/vmgenid.h"
  42. #include "hw/boards.h"
  43. #include "sysemu/tpm_backend.h"
  44. #include "hw/rtc/mc146818rtc_regs.h"
  45. #include "migration/vmstate.h"
  46. #include "hw/mem/memory-device.h"
  47. #include "sysemu/numa.h"
  48. #include "sysemu/reset.h"
  49. /* Supported chipsets: */
  50. #include "hw/southbridge/piix.h"
  51. #include "hw/acpi/pcihp.h"
  52. #include "hw/i386/ich9.h"
  53. #include "hw/pci/pci_bus.h"
  54. #include "hw/pci-host/q35.h"
  55. #include "hw/i386/x86-iommu.h"
  56. #include "hw/acpi/aml-build.h"
  57. #include "hw/acpi/utils.h"
  58. #include "hw/acpi/pci.h"
  59. #include "qom/qom-qobject.h"
  60. #include "hw/i386/amd_iommu.h"
  61. #include "hw/i386/intel_iommu.h"
  62. #include "hw/acpi/ipmi.h"
  63. /* These are used to size the ACPI tables for -M pc-i440fx-1.7 and
  64. * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows
  65. * a little bit, there should be plenty of free space since the DSDT
  66. * shrunk by ~1.5k between QEMU 2.0 and QEMU 2.1.
  67. */
  68. #define ACPI_BUILD_LEGACY_CPU_AML_SIZE 97
  69. #define ACPI_BUILD_ALIGN_SIZE 0x1000
  70. #define ACPI_BUILD_TABLE_SIZE 0x20000
  71. /* #define DEBUG_ACPI_BUILD */
  72. #ifdef DEBUG_ACPI_BUILD
  73. #define ACPI_BUILD_DPRINTF(fmt, ...) \
  74. do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0)
  75. #else
  76. #define ACPI_BUILD_DPRINTF(fmt, ...)
  77. #endif
  78. /* Default IOAPIC ID */
  79. #define ACPI_BUILD_IOAPIC_ID 0x0
  80. typedef struct AcpiPmInfo {
  81. bool s3_disabled;
  82. bool s4_disabled;
  83. bool pcihp_bridge_en;
  84. uint8_t s4_val;
  85. AcpiFadtData fadt;
  86. uint16_t cpu_hp_io_base;
  87. uint16_t pcihp_io_base;
  88. uint16_t pcihp_io_len;
  89. } AcpiPmInfo;
  90. typedef struct AcpiMiscInfo {
  91. bool is_piix4;
  92. bool has_hpet;
  93. TPMVersion tpm_version;
  94. const unsigned char *dsdt_code;
  95. unsigned dsdt_size;
  96. uint16_t pvpanic_port;
  97. uint16_t applesmc_io_base;
  98. } AcpiMiscInfo;
  99. typedef struct AcpiBuildPciBusHotplugState {
  100. GArray *device_table;
  101. GArray *notify_table;
  102. struct AcpiBuildPciBusHotplugState *parent;
  103. bool pcihp_bridge_en;
  104. } AcpiBuildPciBusHotplugState;
  105. typedef struct FwCfgTPMConfig {
  106. uint32_t tpmppi_address;
  107. uint8_t tpm_version;
  108. uint8_t tpmppi_version;
  109. } QEMU_PACKED FwCfgTPMConfig;
  110. static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg);
  111. static void init_common_fadt_data(MachineState *ms, Object *o,
  112. AcpiFadtData *data)
  113. {
  114. uint32_t io = object_property_get_uint(o, ACPI_PM_PROP_PM_IO_BASE, NULL);
  115. AmlAddressSpace as = AML_AS_SYSTEM_IO;
  116. AcpiFadtData fadt = {
  117. .rev = 3,
  118. .flags =
  119. (1 << ACPI_FADT_F_WBINVD) |
  120. (1 << ACPI_FADT_F_PROC_C1) |
  121. (1 << ACPI_FADT_F_SLP_BUTTON) |
  122. (1 << ACPI_FADT_F_RTC_S4) |
  123. (1 << ACPI_FADT_F_USE_PLATFORM_CLOCK) |
  124. /* APIC destination mode ("Flat Logical") has an upper limit of 8
  125. * CPUs for more than 8 CPUs, "Clustered Logical" mode has to be
  126. * used
  127. */
  128. ((ms->smp.max_cpus > 8) ?
  129. (1 << ACPI_FADT_F_FORCE_APIC_CLUSTER_MODEL) : 0),
  130. .int_model = 1 /* Multiple APIC */,
  131. .rtc_century = RTC_CENTURY,
  132. .plvl2_lat = 0xfff /* C2 state not supported */,
  133. .plvl3_lat = 0xfff /* C3 state not supported */,
  134. .smi_cmd = ACPI_PORT_SMI_CMD,
  135. .sci_int = object_property_get_uint(o, ACPI_PM_PROP_SCI_INT, NULL),
  136. .acpi_enable_cmd =
  137. object_property_get_uint(o, ACPI_PM_PROP_ACPI_ENABLE_CMD, NULL),
  138. .acpi_disable_cmd =
  139. object_property_get_uint(o, ACPI_PM_PROP_ACPI_DISABLE_CMD, NULL),
  140. .pm1a_evt = { .space_id = as, .bit_width = 4 * 8, .address = io },
  141. .pm1a_cnt = { .space_id = as, .bit_width = 2 * 8,
  142. .address = io + 0x04 },
  143. .pm_tmr = { .space_id = as, .bit_width = 4 * 8, .address = io + 0x08 },
  144. .gpe0_blk = { .space_id = as, .bit_width =
  145. object_property_get_uint(o, ACPI_PM_PROP_GPE0_BLK_LEN, NULL) * 8,
  146. .address = object_property_get_uint(o, ACPI_PM_PROP_GPE0_BLK, NULL)
  147. },
  148. };
  149. *data = fadt;
  150. }
  151. static Object *object_resolve_type_unambiguous(const char *typename)
  152. {
  153. bool ambig;
  154. Object *o = object_resolve_path_type("", typename, &ambig);
  155. if (ambig || !o) {
  156. return NULL;
  157. }
  158. return o;
  159. }
  160. static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm)
  161. {
  162. Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM);
  163. Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE);
  164. Object *obj = piix ? piix : lpc;
  165. QObject *o;
  166. pm->cpu_hp_io_base = 0;
  167. pm->pcihp_io_base = 0;
  168. pm->pcihp_io_len = 0;
  169. assert(obj);
  170. init_common_fadt_data(machine, obj, &pm->fadt);
  171. if (piix) {
  172. /* w2k requires FADT(rev1) or it won't boot, keep PC compatible */
  173. pm->fadt.rev = 1;
  174. pm->cpu_hp_io_base = PIIX4_CPU_HOTPLUG_IO_BASE;
  175. pm->pcihp_io_base =
  176. object_property_get_uint(obj, ACPI_PCIHP_IO_BASE_PROP, NULL);
  177. pm->pcihp_io_len =
  178. object_property_get_uint(obj, ACPI_PCIHP_IO_LEN_PROP, NULL);
  179. }
  180. if (lpc) {
  181. struct AcpiGenericAddress r = { .space_id = AML_AS_SYSTEM_IO,
  182. .bit_width = 8, .address = ICH9_RST_CNT_IOPORT };
  183. pm->fadt.reset_reg = r;
  184. pm->fadt.reset_val = 0xf;
  185. pm->fadt.flags |= 1 << ACPI_FADT_F_RESET_REG_SUP;
  186. pm->cpu_hp_io_base = ICH9_CPU_HOTPLUG_IO_BASE;
  187. }
  188. /* The above need not be conditional on machine type because the reset port
  189. * happens to be the same on PIIX (pc) and ICH9 (q35). */
  190. QEMU_BUILD_BUG_ON(ICH9_RST_CNT_IOPORT != PIIX_RCR_IOPORT);
  191. /* Fill in optional s3/s4 related properties */
  192. o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL);
  193. if (o) {
  194. pm->s3_disabled = qnum_get_uint(qobject_to(QNum, o));
  195. } else {
  196. pm->s3_disabled = false;
  197. }
  198. qobject_unref(o);
  199. o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_DISABLED, NULL);
  200. if (o) {
  201. pm->s4_disabled = qnum_get_uint(qobject_to(QNum, o));
  202. } else {
  203. pm->s4_disabled = false;
  204. }
  205. qobject_unref(o);
  206. o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_VAL, NULL);
  207. if (o) {
  208. pm->s4_val = qnum_get_uint(qobject_to(QNum, o));
  209. } else {
  210. pm->s4_val = false;
  211. }
  212. qobject_unref(o);
  213. pm->pcihp_bridge_en =
  214. object_property_get_bool(obj, "acpi-pci-hotplug-with-bridge-support",
  215. NULL);
  216. }
  217. static void acpi_get_misc_info(AcpiMiscInfo *info)
  218. {
  219. Object *piix = object_resolve_type_unambiguous(TYPE_PIIX4_PM);
  220. Object *lpc = object_resolve_type_unambiguous(TYPE_ICH9_LPC_DEVICE);
  221. assert(!!piix != !!lpc);
  222. if (piix) {
  223. info->is_piix4 = true;
  224. }
  225. if (lpc) {
  226. info->is_piix4 = false;
  227. }
  228. info->has_hpet = hpet_find();
  229. info->tpm_version = tpm_get_version(tpm_find());
  230. info->pvpanic_port = pvpanic_port();
  231. info->applesmc_io_base = applesmc_port();
  232. }
  233. /*
  234. * Because of the PXB hosts we cannot simply query TYPE_PCI_HOST_BRIDGE.
  235. * On i386 arch we only have two pci hosts, so we can look only for them.
  236. */
  237. static Object *acpi_get_i386_pci_host(void)
  238. {
  239. PCIHostState *host;
  240. host = OBJECT_CHECK(PCIHostState,
  241. object_resolve_path("/machine/i440fx", NULL),
  242. TYPE_PCI_HOST_BRIDGE);
  243. if (!host) {
  244. host = OBJECT_CHECK(PCIHostState,
  245. object_resolve_path("/machine/q35", NULL),
  246. TYPE_PCI_HOST_BRIDGE);
  247. }
  248. return OBJECT(host);
  249. }
  250. static void acpi_get_pci_holes(Range *hole, Range *hole64)
  251. {
  252. Object *pci_host;
  253. pci_host = acpi_get_i386_pci_host();
  254. g_assert(pci_host);
  255. range_set_bounds1(hole,
  256. object_property_get_uint(pci_host,
  257. PCI_HOST_PROP_PCI_HOLE_START,
  258. NULL),
  259. object_property_get_uint(pci_host,
  260. PCI_HOST_PROP_PCI_HOLE_END,
  261. NULL));
  262. range_set_bounds1(hole64,
  263. object_property_get_uint(pci_host,
  264. PCI_HOST_PROP_PCI_HOLE64_START,
  265. NULL),
  266. object_property_get_uint(pci_host,
  267. PCI_HOST_PROP_PCI_HOLE64_END,
  268. NULL));
  269. }
  270. static void acpi_align_size(GArray *blob, unsigned align)
  271. {
  272. /* Align size to multiple of given size. This reduces the chance
  273. * we need to change size in the future (breaking cross version migration).
  274. */
  275. g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align));
  276. }
  277. /* FACS */
  278. static void
  279. build_facs(GArray *table_data)
  280. {
  281. AcpiFacsDescriptorRev1 *facs = acpi_data_push(table_data, sizeof *facs);
  282. memcpy(&facs->signature, "FACS", 4);
  283. facs->length = cpu_to_le32(sizeof(*facs));
  284. }
  285. void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
  286. const CPUArchIdList *apic_ids, GArray *entry)
  287. {
  288. uint32_t apic_id = apic_ids->cpus[uid].arch_id;
  289. /* ACPI spec says that LAPIC entry for non present
  290. * CPU may be omitted from MADT or it must be marked
  291. * as disabled. However omitting non present CPU from
  292. * MADT breaks hotplug on linux. So possible CPUs
  293. * should be put in MADT but kept disabled.
  294. */
  295. if (apic_id < 255) {
  296. AcpiMadtProcessorApic *apic = acpi_data_push(entry, sizeof *apic);
  297. apic->type = ACPI_APIC_PROCESSOR;
  298. apic->length = sizeof(*apic);
  299. apic->processor_id = uid;
  300. apic->local_apic_id = apic_id;
  301. if (apic_ids->cpus[uid].cpu != NULL) {
  302. apic->flags = cpu_to_le32(1);
  303. } else {
  304. apic->flags = cpu_to_le32(0);
  305. }
  306. } else {
  307. AcpiMadtProcessorX2Apic *apic = acpi_data_push(entry, sizeof *apic);
  308. apic->type = ACPI_APIC_LOCAL_X2APIC;
  309. apic->length = sizeof(*apic);
  310. apic->uid = cpu_to_le32(uid);
  311. apic->x2apic_id = cpu_to_le32(apic_id);
  312. if (apic_ids->cpus[uid].cpu != NULL) {
  313. apic->flags = cpu_to_le32(1);
  314. } else {
  315. apic->flags = cpu_to_le32(0);
  316. }
  317. }
  318. }
  319. static void
  320. build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms)
  321. {
  322. MachineClass *mc = MACHINE_GET_CLASS(pcms);
  323. X86MachineState *x86ms = X86_MACHINE(pcms);
  324. const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(pcms));
  325. int madt_start = table_data->len;
  326. AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(pcms->acpi_dev);
  327. AcpiDeviceIf *adev = ACPI_DEVICE_IF(pcms->acpi_dev);
  328. bool x2apic_mode = false;
  329. AcpiMultipleApicTable *madt;
  330. AcpiMadtIoApic *io_apic;
  331. AcpiMadtIntsrcovr *intsrcovr;
  332. int i;
  333. madt = acpi_data_push(table_data, sizeof *madt);
  334. madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS);
  335. madt->flags = cpu_to_le32(1);
  336. for (i = 0; i < apic_ids->len; i++) {
  337. adevc->madt_cpu(adev, i, apic_ids, table_data);
  338. if (apic_ids->cpus[i].arch_id > 254) {
  339. x2apic_mode = true;
  340. }
  341. }
  342. io_apic = acpi_data_push(table_data, sizeof *io_apic);
  343. io_apic->type = ACPI_APIC_IO;
  344. io_apic->length = sizeof(*io_apic);
  345. io_apic->io_apic_id = ACPI_BUILD_IOAPIC_ID;
  346. io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS);
  347. io_apic->interrupt = cpu_to_le32(0);
  348. if (x86ms->apic_xrupt_override) {
  349. intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr);
  350. intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE;
  351. intsrcovr->length = sizeof(*intsrcovr);
  352. intsrcovr->source = 0;
  353. intsrcovr->gsi = cpu_to_le32(2);
  354. intsrcovr->flags = cpu_to_le16(0); /* conforms to bus specifications */
  355. }
  356. for (i = 1; i < 16; i++) {
  357. #define ACPI_BUILD_PCI_IRQS ((1<<5) | (1<<9) | (1<<10) | (1<<11))
  358. if (!(ACPI_BUILD_PCI_IRQS & (1 << i))) {
  359. /* No need for a INT source override structure. */
  360. continue;
  361. }
  362. intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr);
  363. intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE;
  364. intsrcovr->length = sizeof(*intsrcovr);
  365. intsrcovr->source = i;
  366. intsrcovr->gsi = cpu_to_le32(i);
  367. intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */
  368. }
  369. if (x2apic_mode) {
  370. AcpiMadtLocalX2ApicNmi *local_nmi;
  371. local_nmi = acpi_data_push(table_data, sizeof *local_nmi);
  372. local_nmi->type = ACPI_APIC_LOCAL_X2APIC_NMI;
  373. local_nmi->length = sizeof(*local_nmi);
  374. local_nmi->uid = 0xFFFFFFFF; /* all processors */
  375. local_nmi->flags = cpu_to_le16(0);
  376. local_nmi->lint = 1; /* ACPI_LINT1 */
  377. } else {
  378. AcpiMadtLocalNmi *local_nmi;
  379. local_nmi = acpi_data_push(table_data, sizeof *local_nmi);
  380. local_nmi->type = ACPI_APIC_LOCAL_NMI;
  381. local_nmi->length = sizeof(*local_nmi);
  382. local_nmi->processor_id = 0xff; /* all processors */
  383. local_nmi->flags = cpu_to_le16(0);
  384. local_nmi->lint = 1; /* ACPI_LINT1 */
  385. }
  386. build_header(linker, table_data,
  387. (void *)(table_data->data + madt_start), "APIC",
  388. table_data->len - madt_start, 1, NULL, NULL);
  389. }
  390. static void build_append_pcihp_notify_entry(Aml *method, int slot)
  391. {
  392. Aml *if_ctx;
  393. int32_t devfn = PCI_DEVFN(slot, 0);
  394. if_ctx = aml_if(aml_and(aml_arg(0), aml_int(0x1U << slot), NULL));
  395. aml_append(if_ctx, aml_notify(aml_name("S%.02X", devfn), aml_arg(1)));
  396. aml_append(method, if_ctx);
  397. }
  398. static void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus,
  399. bool pcihp_bridge_en)
  400. {
  401. Aml *dev, *notify_method = NULL, *method;
  402. QObject *bsel;
  403. PCIBus *sec;
  404. int i;
  405. bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL);
  406. if (bsel) {
  407. uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel));
  408. aml_append(parent_scope, aml_name_decl("BSEL", aml_int(bsel_val)));
  409. notify_method = aml_method("DVNT", 2, AML_NOTSERIALIZED);
  410. }
  411. for (i = 0; i < ARRAY_SIZE(bus->devices); i += PCI_FUNC_MAX) {
  412. DeviceClass *dc;
  413. PCIDeviceClass *pc;
  414. PCIDevice *pdev = bus->devices[i];
  415. int slot = PCI_SLOT(i);
  416. bool hotplug_enabled_dev;
  417. bool bridge_in_acpi;
  418. if (!pdev) {
  419. if (bsel) { /* add hotplug slots for non present devices */
  420. dev = aml_device("S%.02X", PCI_DEVFN(slot, 0));
  421. aml_append(dev, aml_name_decl("_SUN", aml_int(slot)));
  422. aml_append(dev, aml_name_decl("_ADR", aml_int(slot << 16)));
  423. method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
  424. aml_append(method,
  425. aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN"))
  426. );
  427. aml_append(dev, method);
  428. aml_append(parent_scope, dev);
  429. build_append_pcihp_notify_entry(notify_method, slot);
  430. }
  431. continue;
  432. }
  433. pc = PCI_DEVICE_GET_CLASS(pdev);
  434. dc = DEVICE_GET_CLASS(pdev);
  435. /* When hotplug for bridges is enabled, bridges are
  436. * described in ACPI separately (see build_pci_bus_end).
  437. * In this case they aren't themselves hot-pluggable.
  438. * Hotplugged bridges *are* hot-pluggable.
  439. */
  440. bridge_in_acpi = pc->is_bridge && pcihp_bridge_en &&
  441. !DEVICE(pdev)->hotplugged;
  442. hotplug_enabled_dev = bsel && dc->hotpluggable && !bridge_in_acpi;
  443. if (pc->class_id == PCI_CLASS_BRIDGE_ISA) {
  444. continue;
  445. }
  446. /* start to compose PCI slot descriptor */
  447. dev = aml_device("S%.02X", PCI_DEVFN(slot, 0));
  448. aml_append(dev, aml_name_decl("_ADR", aml_int(slot << 16)));
  449. if (pc->class_id == PCI_CLASS_DISPLAY_VGA) {
  450. /* add VGA specific AML methods */
  451. int s3d;
  452. if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) {
  453. s3d = 3;
  454. } else {
  455. s3d = 0;
  456. }
  457. method = aml_method("_S1D", 0, AML_NOTSERIALIZED);
  458. aml_append(method, aml_return(aml_int(0)));
  459. aml_append(dev, method);
  460. method = aml_method("_S2D", 0, AML_NOTSERIALIZED);
  461. aml_append(method, aml_return(aml_int(0)));
  462. aml_append(dev, method);
  463. method = aml_method("_S3D", 0, AML_NOTSERIALIZED);
  464. aml_append(method, aml_return(aml_int(s3d)));
  465. aml_append(dev, method);
  466. } else if (hotplug_enabled_dev) {
  467. /* add _SUN/_EJ0 to make slot hotpluggable */
  468. aml_append(dev, aml_name_decl("_SUN", aml_int(slot)));
  469. method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
  470. aml_append(method,
  471. aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN"))
  472. );
  473. aml_append(dev, method);
  474. if (bsel) {
  475. build_append_pcihp_notify_entry(notify_method, slot);
  476. }
  477. } else if (bridge_in_acpi) {
  478. /*
  479. * device is coldplugged bridge,
  480. * add child device descriptions into its scope
  481. */
  482. PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
  483. build_append_pci_bus_devices(dev, sec_bus, pcihp_bridge_en);
  484. }
  485. /* slot descriptor has been composed, add it into parent context */
  486. aml_append(parent_scope, dev);
  487. }
  488. if (bsel) {
  489. aml_append(parent_scope, notify_method);
  490. }
  491. /* Append PCNT method to notify about events on local and child buses.
  492. * Add unconditionally for root since DSDT expects it.
  493. */
  494. method = aml_method("PCNT", 0, AML_NOTSERIALIZED);
  495. /* If bus supports hotplug select it and notify about local events */
  496. if (bsel) {
  497. uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel));
  498. aml_append(method, aml_store(aml_int(bsel_val), aml_name("BNUM")));
  499. aml_append(method,
  500. aml_call2("DVNT", aml_name("PCIU"), aml_int(1) /* Device Check */)
  501. );
  502. aml_append(method,
  503. aml_call2("DVNT", aml_name("PCID"), aml_int(3)/* Eject Request */)
  504. );
  505. }
  506. /* Notify about child bus events in any case */
  507. if (pcihp_bridge_en) {
  508. QLIST_FOREACH(sec, &bus->child, sibling) {
  509. int32_t devfn = sec->parent_dev->devfn;
  510. if (pci_bus_is_root(sec) || pci_bus_is_express(sec)) {
  511. continue;
  512. }
  513. aml_append(method, aml_name("^S%.02X.PCNT", devfn));
  514. }
  515. }
  516. aml_append(parent_scope, method);
  517. qobject_unref(bsel);
  518. }
  519. /**
  520. * build_prt_entry:
  521. * @link_name: link name for PCI route entry
  522. *
  523. * build AML package containing a PCI route entry for @link_name
  524. */
  525. static Aml *build_prt_entry(const char *link_name)
  526. {
  527. Aml *a_zero = aml_int(0);
  528. Aml *pkg = aml_package(4);
  529. aml_append(pkg, a_zero);
  530. aml_append(pkg, a_zero);
  531. aml_append(pkg, aml_name("%s", link_name));
  532. aml_append(pkg, a_zero);
  533. return pkg;
  534. }
  535. /*
  536. * initialize_route - Initialize the interrupt routing rule
  537. * through a specific LINK:
  538. * if (lnk_idx == idx)
  539. * route using link 'link_name'
  540. */
  541. static Aml *initialize_route(Aml *route, const char *link_name,
  542. Aml *lnk_idx, int idx)
  543. {
  544. Aml *if_ctx = aml_if(aml_equal(lnk_idx, aml_int(idx)));
  545. Aml *pkg = build_prt_entry(link_name);
  546. aml_append(if_ctx, aml_store(pkg, route));
  547. return if_ctx;
  548. }
  549. /*
  550. * build_prt - Define interrupt rounting rules
  551. *
  552. * Returns an array of 128 routes, one for each device,
  553. * based on device location.
  554. * The main goal is to equaly distribute the interrupts
  555. * over the 4 existing ACPI links (works only for i440fx).
  556. * The hash function is (slot + pin) & 3 -> "LNK[D|A|B|C]".
  557. *
  558. */
  559. static Aml *build_prt(bool is_pci0_prt)
  560. {
  561. Aml *method, *while_ctx, *pin, *res;
  562. method = aml_method("_PRT", 0, AML_NOTSERIALIZED);
  563. res = aml_local(0);
  564. pin = aml_local(1);
  565. aml_append(method, aml_store(aml_package(128), res));
  566. aml_append(method, aml_store(aml_int(0), pin));
  567. /* while (pin < 128) */
  568. while_ctx = aml_while(aml_lless(pin, aml_int(128)));
  569. {
  570. Aml *slot = aml_local(2);
  571. Aml *lnk_idx = aml_local(3);
  572. Aml *route = aml_local(4);
  573. /* slot = pin >> 2 */
  574. aml_append(while_ctx,
  575. aml_store(aml_shiftright(pin, aml_int(2), NULL), slot));
  576. /* lnk_idx = (slot + pin) & 3 */
  577. aml_append(while_ctx,
  578. aml_store(aml_and(aml_add(pin, slot, NULL), aml_int(3), NULL),
  579. lnk_idx));
  580. /* route[2] = "LNK[D|A|B|C]", selection based on pin % 3 */
  581. aml_append(while_ctx, initialize_route(route, "LNKD", lnk_idx, 0));
  582. if (is_pci0_prt) {
  583. Aml *if_device_1, *if_pin_4, *else_pin_4;
  584. /* device 1 is the power-management device, needs SCI */
  585. if_device_1 = aml_if(aml_equal(lnk_idx, aml_int(1)));
  586. {
  587. if_pin_4 = aml_if(aml_equal(pin, aml_int(4)));
  588. {
  589. aml_append(if_pin_4,
  590. aml_store(build_prt_entry("LNKS"), route));
  591. }
  592. aml_append(if_device_1, if_pin_4);
  593. else_pin_4 = aml_else();
  594. {
  595. aml_append(else_pin_4,
  596. aml_store(build_prt_entry("LNKA"), route));
  597. }
  598. aml_append(if_device_1, else_pin_4);
  599. }
  600. aml_append(while_ctx, if_device_1);
  601. } else {
  602. aml_append(while_ctx, initialize_route(route, "LNKA", lnk_idx, 1));
  603. }
  604. aml_append(while_ctx, initialize_route(route, "LNKB", lnk_idx, 2));
  605. aml_append(while_ctx, initialize_route(route, "LNKC", lnk_idx, 3));
  606. /* route[0] = 0x[slot]FFFF */
  607. aml_append(while_ctx,
  608. aml_store(aml_or(aml_shiftleft(slot, aml_int(16)), aml_int(0xFFFF),
  609. NULL),
  610. aml_index(route, aml_int(0))));
  611. /* route[1] = pin & 3 */
  612. aml_append(while_ctx,
  613. aml_store(aml_and(pin, aml_int(3), NULL),
  614. aml_index(route, aml_int(1))));
  615. /* res[pin] = route */
  616. aml_append(while_ctx, aml_store(route, aml_index(res, pin)));
  617. /* pin++ */
  618. aml_append(while_ctx, aml_increment(pin));
  619. }
  620. aml_append(method, while_ctx);
  621. /* return res*/
  622. aml_append(method, aml_return(res));
  623. return method;
  624. }
  625. typedef struct CrsRangeEntry {
  626. uint64_t base;
  627. uint64_t limit;
  628. } CrsRangeEntry;
  629. static void crs_range_insert(GPtrArray *ranges, uint64_t base, uint64_t limit)
  630. {
  631. CrsRangeEntry *entry;
  632. entry = g_malloc(sizeof(*entry));
  633. entry->base = base;
  634. entry->limit = limit;
  635. g_ptr_array_add(ranges, entry);
  636. }
  637. static void crs_range_free(gpointer data)
  638. {
  639. CrsRangeEntry *entry = (CrsRangeEntry *)data;
  640. g_free(entry);
  641. }
  642. typedef struct CrsRangeSet {
  643. GPtrArray *io_ranges;
  644. GPtrArray *mem_ranges;
  645. GPtrArray *mem_64bit_ranges;
  646. } CrsRangeSet;
  647. static void crs_range_set_init(CrsRangeSet *range_set)
  648. {
  649. range_set->io_ranges = g_ptr_array_new_with_free_func(crs_range_free);
  650. range_set->mem_ranges = g_ptr_array_new_with_free_func(crs_range_free);
  651. range_set->mem_64bit_ranges =
  652. g_ptr_array_new_with_free_func(crs_range_free);
  653. }
  654. static void crs_range_set_free(CrsRangeSet *range_set)
  655. {
  656. g_ptr_array_free(range_set->io_ranges, true);
  657. g_ptr_array_free(range_set->mem_ranges, true);
  658. g_ptr_array_free(range_set->mem_64bit_ranges, true);
  659. }
  660. static gint crs_range_compare(gconstpointer a, gconstpointer b)
  661. {
  662. CrsRangeEntry *entry_a = *(CrsRangeEntry **)a;
  663. CrsRangeEntry *entry_b = *(CrsRangeEntry **)b;
  664. if (entry_a->base < entry_b->base) {
  665. return -1;
  666. } else if (entry_a->base > entry_b->base) {
  667. return 1;
  668. } else {
  669. return 0;
  670. }
  671. }
  672. /*
  673. * crs_replace_with_free_ranges - given the 'used' ranges within [start - end]
  674. * interval, computes the 'free' ranges from the same interval.
  675. * Example: If the input array is { [a1 - a2],[b1 - b2] }, the function
  676. * will return { [base - a1], [a2 - b1], [b2 - limit] }.
  677. */
  678. static void crs_replace_with_free_ranges(GPtrArray *ranges,
  679. uint64_t start, uint64_t end)
  680. {
  681. GPtrArray *free_ranges = g_ptr_array_new();
  682. uint64_t free_base = start;
  683. int i;
  684. g_ptr_array_sort(ranges, crs_range_compare);
  685. for (i = 0; i < ranges->len; i++) {
  686. CrsRangeEntry *used = g_ptr_array_index(ranges, i);
  687. if (free_base < used->base) {
  688. crs_range_insert(free_ranges, free_base, used->base - 1);
  689. }
  690. free_base = used->limit + 1;
  691. }
  692. if (free_base < end) {
  693. crs_range_insert(free_ranges, free_base, end);
  694. }
  695. g_ptr_array_set_size(ranges, 0);
  696. for (i = 0; i < free_ranges->len; i++) {
  697. g_ptr_array_add(ranges, g_ptr_array_index(free_ranges, i));
  698. }
  699. g_ptr_array_free(free_ranges, true);
  700. }
  701. /*
  702. * crs_range_merge - merges adjacent ranges in the given array.
  703. * Array elements are deleted and replaced with the merged ranges.
  704. */
  705. static void crs_range_merge(GPtrArray *range)
  706. {
  707. GPtrArray *tmp = g_ptr_array_new_with_free_func(crs_range_free);
  708. CrsRangeEntry *entry;
  709. uint64_t range_base, range_limit;
  710. int i;
  711. if (!range->len) {
  712. return;
  713. }
  714. g_ptr_array_sort(range, crs_range_compare);
  715. entry = g_ptr_array_index(range, 0);
  716. range_base = entry->base;
  717. range_limit = entry->limit;
  718. for (i = 1; i < range->len; i++) {
  719. entry = g_ptr_array_index(range, i);
  720. if (entry->base - 1 == range_limit) {
  721. range_limit = entry->limit;
  722. } else {
  723. crs_range_insert(tmp, range_base, range_limit);
  724. range_base = entry->base;
  725. range_limit = entry->limit;
  726. }
  727. }
  728. crs_range_insert(tmp, range_base, range_limit);
  729. g_ptr_array_set_size(range, 0);
  730. for (i = 0; i < tmp->len; i++) {
  731. entry = g_ptr_array_index(tmp, i);
  732. crs_range_insert(range, entry->base, entry->limit);
  733. }
  734. g_ptr_array_free(tmp, true);
  735. }
  736. static Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set)
  737. {
  738. Aml *crs = aml_resource_template();
  739. CrsRangeSet temp_range_set;
  740. CrsRangeEntry *entry;
  741. uint8_t max_bus = pci_bus_num(host->bus);
  742. uint8_t type;
  743. int devfn;
  744. int i;
  745. crs_range_set_init(&temp_range_set);
  746. for (devfn = 0; devfn < ARRAY_SIZE(host->bus->devices); devfn++) {
  747. uint64_t range_base, range_limit;
  748. PCIDevice *dev = host->bus->devices[devfn];
  749. if (!dev) {
  750. continue;
  751. }
  752. for (i = 0; i < PCI_NUM_REGIONS; i++) {
  753. PCIIORegion *r = &dev->io_regions[i];
  754. range_base = r->addr;
  755. range_limit = r->addr + r->size - 1;
  756. /*
  757. * Work-around for old bioses
  758. * that do not support multiple root buses
  759. */
  760. if (!range_base || range_base > range_limit) {
  761. continue;
  762. }
  763. if (r->type & PCI_BASE_ADDRESS_SPACE_IO) {
  764. crs_range_insert(temp_range_set.io_ranges,
  765. range_base, range_limit);
  766. } else { /* "memory" */
  767. crs_range_insert(temp_range_set.mem_ranges,
  768. range_base, range_limit);
  769. }
  770. }
  771. type = dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
  772. if (type == PCI_HEADER_TYPE_BRIDGE) {
  773. uint8_t subordinate = dev->config[PCI_SUBORDINATE_BUS];
  774. if (subordinate > max_bus) {
  775. max_bus = subordinate;
  776. }
  777. range_base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_IO);
  778. range_limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_IO);
  779. /*
  780. * Work-around for old bioses
  781. * that do not support multiple root buses
  782. */
  783. if (range_base && range_base <= range_limit) {
  784. crs_range_insert(temp_range_set.io_ranges,
  785. range_base, range_limit);
  786. }
  787. range_base =
  788. pci_bridge_get_base(dev, PCI_BASE_ADDRESS_SPACE_MEMORY);
  789. range_limit =
  790. pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_SPACE_MEMORY);
  791. /*
  792. * Work-around for old bioses
  793. * that do not support multiple root buses
  794. */
  795. if (range_base && range_base <= range_limit) {
  796. uint64_t length = range_limit - range_base + 1;
  797. if (range_limit <= UINT32_MAX && length <= UINT32_MAX) {
  798. crs_range_insert(temp_range_set.mem_ranges,
  799. range_base, range_limit);
  800. } else {
  801. crs_range_insert(temp_range_set.mem_64bit_ranges,
  802. range_base, range_limit);
  803. }
  804. }
  805. range_base =
  806. pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
  807. range_limit =
  808. pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
  809. /*
  810. * Work-around for old bioses
  811. * that do not support multiple root buses
  812. */
  813. if (range_base && range_base <= range_limit) {
  814. uint64_t length = range_limit - range_base + 1;
  815. if (range_limit <= UINT32_MAX && length <= UINT32_MAX) {
  816. crs_range_insert(temp_range_set.mem_ranges,
  817. range_base, range_limit);
  818. } else {
  819. crs_range_insert(temp_range_set.mem_64bit_ranges,
  820. range_base, range_limit);
  821. }
  822. }
  823. }
  824. }
  825. crs_range_merge(temp_range_set.io_ranges);
  826. for (i = 0; i < temp_range_set.io_ranges->len; i++) {
  827. entry = g_ptr_array_index(temp_range_set.io_ranges, i);
  828. aml_append(crs,
  829. aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
  830. AML_POS_DECODE, AML_ENTIRE_RANGE,
  831. 0, entry->base, entry->limit, 0,
  832. entry->limit - entry->base + 1));
  833. crs_range_insert(range_set->io_ranges, entry->base, entry->limit);
  834. }
  835. crs_range_merge(temp_range_set.mem_ranges);
  836. for (i = 0; i < temp_range_set.mem_ranges->len; i++) {
  837. entry = g_ptr_array_index(temp_range_set.mem_ranges, i);
  838. aml_append(crs,
  839. aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
  840. AML_MAX_FIXED, AML_NON_CACHEABLE,
  841. AML_READ_WRITE,
  842. 0, entry->base, entry->limit, 0,
  843. entry->limit - entry->base + 1));
  844. crs_range_insert(range_set->mem_ranges, entry->base, entry->limit);
  845. }
  846. crs_range_merge(temp_range_set.mem_64bit_ranges);
  847. for (i = 0; i < temp_range_set.mem_64bit_ranges->len; i++) {
  848. entry = g_ptr_array_index(temp_range_set.mem_64bit_ranges, i);
  849. aml_append(crs,
  850. aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED,
  851. AML_MAX_FIXED, AML_NON_CACHEABLE,
  852. AML_READ_WRITE,
  853. 0, entry->base, entry->limit, 0,
  854. entry->limit - entry->base + 1));
  855. crs_range_insert(range_set->mem_64bit_ranges,
  856. entry->base, entry->limit);
  857. }
  858. crs_range_set_free(&temp_range_set);
  859. aml_append(crs,
  860. aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
  861. 0,
  862. pci_bus_num(host->bus),
  863. max_bus,
  864. 0,
  865. max_bus - pci_bus_num(host->bus) + 1));
  866. return crs;
  867. }
  868. static void build_hpet_aml(Aml *table)
  869. {
  870. Aml *crs;
  871. Aml *field;
  872. Aml *method;
  873. Aml *if_ctx;
  874. Aml *scope = aml_scope("_SB");
  875. Aml *dev = aml_device("HPET");
  876. Aml *zero = aml_int(0);
  877. Aml *id = aml_local(0);
  878. Aml *period = aml_local(1);
  879. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0103")));
  880. aml_append(dev, aml_name_decl("_UID", zero));
  881. aml_append(dev,
  882. aml_operation_region("HPTM", AML_SYSTEM_MEMORY, aml_int(HPET_BASE),
  883. HPET_LEN));
  884. field = aml_field("HPTM", AML_DWORD_ACC, AML_LOCK, AML_PRESERVE);
  885. aml_append(field, aml_named_field("VEND", 32));
  886. aml_append(field, aml_named_field("PRD", 32));
  887. aml_append(dev, field);
  888. method = aml_method("_STA", 0, AML_NOTSERIALIZED);
  889. aml_append(method, aml_store(aml_name("VEND"), id));
  890. aml_append(method, aml_store(aml_name("PRD"), period));
  891. aml_append(method, aml_shiftright(id, aml_int(16), id));
  892. if_ctx = aml_if(aml_lor(aml_equal(id, zero),
  893. aml_equal(id, aml_int(0xffff))));
  894. {
  895. aml_append(if_ctx, aml_return(zero));
  896. }
  897. aml_append(method, if_ctx);
  898. if_ctx = aml_if(aml_lor(aml_equal(period, zero),
  899. aml_lgreater(period, aml_int(100000000))));
  900. {
  901. aml_append(if_ctx, aml_return(zero));
  902. }
  903. aml_append(method, if_ctx);
  904. aml_append(method, aml_return(aml_int(0x0F)));
  905. aml_append(dev, method);
  906. crs = aml_resource_template();
  907. aml_append(crs, aml_memory32_fixed(HPET_BASE, HPET_LEN, AML_READ_ONLY));
  908. aml_append(dev, aml_name_decl("_CRS", crs));
  909. aml_append(scope, dev);
  910. aml_append(table, scope);
  911. }
  912. static Aml *build_fdinfo_aml(int idx, FloppyDriveType type)
  913. {
  914. Aml *dev, *fdi;
  915. uint8_t maxc, maxh, maxs;
  916. isa_fdc_get_drive_max_chs(type, &maxc, &maxh, &maxs);
  917. dev = aml_device("FLP%c", 'A' + idx);
  918. aml_append(dev, aml_name_decl("_ADR", aml_int(idx)));
  919. fdi = aml_package(16);
  920. aml_append(fdi, aml_int(idx)); /* Drive Number */
  921. aml_append(fdi,
  922. aml_int(cmos_get_fd_drive_type(type))); /* Device Type */
  923. /*
  924. * the values below are the limits of the drive, and are thus independent
  925. * of the inserted media
  926. */
  927. aml_append(fdi, aml_int(maxc)); /* Maximum Cylinder Number */
  928. aml_append(fdi, aml_int(maxs)); /* Maximum Sector Number */
  929. aml_append(fdi, aml_int(maxh)); /* Maximum Head Number */
  930. /*
  931. * SeaBIOS returns the below values for int 0x13 func 0x08 regardless of
  932. * the drive type, so shall we
  933. */
  934. aml_append(fdi, aml_int(0xAF)); /* disk_specify_1 */
  935. aml_append(fdi, aml_int(0x02)); /* disk_specify_2 */
  936. aml_append(fdi, aml_int(0x25)); /* disk_motor_wait */
  937. aml_append(fdi, aml_int(0x02)); /* disk_sector_siz */
  938. aml_append(fdi, aml_int(0x12)); /* disk_eot */
  939. aml_append(fdi, aml_int(0x1B)); /* disk_rw_gap */
  940. aml_append(fdi, aml_int(0xFF)); /* disk_dtl */
  941. aml_append(fdi, aml_int(0x6C)); /* disk_formt_gap */
  942. aml_append(fdi, aml_int(0xF6)); /* disk_fill */
  943. aml_append(fdi, aml_int(0x0F)); /* disk_head_sttl */
  944. aml_append(fdi, aml_int(0x08)); /* disk_motor_strt */
  945. aml_append(dev, aml_name_decl("_FDI", fdi));
  946. return dev;
  947. }
  948. static Aml *build_fdc_device_aml(ISADevice *fdc)
  949. {
  950. int i;
  951. Aml *dev;
  952. Aml *crs;
  953. #define ACPI_FDE_MAX_FD 4
  954. uint32_t fde_buf[5] = {
  955. 0, 0, 0, 0, /* presence of floppy drives #0 - #3 */
  956. cpu_to_le32(2) /* tape presence (2 == never present) */
  957. };
  958. dev = aml_device("FDC0");
  959. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0700")));
  960. crs = aml_resource_template();
  961. aml_append(crs, aml_io(AML_DECODE16, 0x03F2, 0x03F2, 0x00, 0x04));
  962. aml_append(crs, aml_io(AML_DECODE16, 0x03F7, 0x03F7, 0x00, 0x01));
  963. aml_append(crs, aml_irq_no_flags(6));
  964. aml_append(crs,
  965. aml_dma(AML_COMPATIBILITY, AML_NOTBUSMASTER, AML_TRANSFER8, 2));
  966. aml_append(dev, aml_name_decl("_CRS", crs));
  967. for (i = 0; i < MIN(MAX_FD, ACPI_FDE_MAX_FD); i++) {
  968. FloppyDriveType type = isa_fdc_get_drive_type(fdc, i);
  969. if (type < FLOPPY_DRIVE_TYPE_NONE) {
  970. fde_buf[i] = cpu_to_le32(1); /* drive present */
  971. aml_append(dev, build_fdinfo_aml(i, type));
  972. }
  973. }
  974. aml_append(dev, aml_name_decl("_FDE",
  975. aml_buffer(sizeof(fde_buf), (uint8_t *)fde_buf)));
  976. return dev;
  977. }
  978. static Aml *build_rtc_device_aml(void)
  979. {
  980. Aml *dev;
  981. Aml *crs;
  982. dev = aml_device("RTC");
  983. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0B00")));
  984. crs = aml_resource_template();
  985. aml_append(crs, aml_io(AML_DECODE16, 0x0070, 0x0070, 0x10, 0x02));
  986. aml_append(crs, aml_irq_no_flags(8));
  987. aml_append(crs, aml_io(AML_DECODE16, 0x0072, 0x0072, 0x02, 0x06));
  988. aml_append(dev, aml_name_decl("_CRS", crs));
  989. return dev;
  990. }
  991. static Aml *build_kbd_device_aml(void)
  992. {
  993. Aml *dev;
  994. Aml *crs;
  995. Aml *method;
  996. dev = aml_device("KBD");
  997. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0303")));
  998. method = aml_method("_STA", 0, AML_NOTSERIALIZED);
  999. aml_append(method, aml_return(aml_int(0x0f)));
  1000. aml_append(dev, method);
  1001. crs = aml_resource_template();
  1002. aml_append(crs, aml_io(AML_DECODE16, 0x0060, 0x0060, 0x01, 0x01));
  1003. aml_append(crs, aml_io(AML_DECODE16, 0x0064, 0x0064, 0x01, 0x01));
  1004. aml_append(crs, aml_irq_no_flags(1));
  1005. aml_append(dev, aml_name_decl("_CRS", crs));
  1006. return dev;
  1007. }
  1008. static Aml *build_mouse_device_aml(void)
  1009. {
  1010. Aml *dev;
  1011. Aml *crs;
  1012. Aml *method;
  1013. dev = aml_device("MOU");
  1014. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0F13")));
  1015. method = aml_method("_STA", 0, AML_NOTSERIALIZED);
  1016. aml_append(method, aml_return(aml_int(0x0f)));
  1017. aml_append(dev, method);
  1018. crs = aml_resource_template();
  1019. aml_append(crs, aml_irq_no_flags(12));
  1020. aml_append(dev, aml_name_decl("_CRS", crs));
  1021. return dev;
  1022. }
  1023. static Aml *build_lpt_device_aml(void)
  1024. {
  1025. Aml *dev;
  1026. Aml *crs;
  1027. Aml *method;
  1028. Aml *if_ctx;
  1029. Aml *else_ctx;
  1030. Aml *zero = aml_int(0);
  1031. Aml *is_present = aml_local(0);
  1032. dev = aml_device("LPT");
  1033. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0400")));
  1034. method = aml_method("_STA", 0, AML_NOTSERIALIZED);
  1035. aml_append(method, aml_store(aml_name("LPEN"), is_present));
  1036. if_ctx = aml_if(aml_equal(is_present, zero));
  1037. {
  1038. aml_append(if_ctx, aml_return(aml_int(0x00)));
  1039. }
  1040. aml_append(method, if_ctx);
  1041. else_ctx = aml_else();
  1042. {
  1043. aml_append(else_ctx, aml_return(aml_int(0x0f)));
  1044. }
  1045. aml_append(method, else_ctx);
  1046. aml_append(dev, method);
  1047. crs = aml_resource_template();
  1048. aml_append(crs, aml_io(AML_DECODE16, 0x0378, 0x0378, 0x08, 0x08));
  1049. aml_append(crs, aml_irq_no_flags(7));
  1050. aml_append(dev, aml_name_decl("_CRS", crs));
  1051. return dev;
  1052. }
  1053. static Aml *build_com_device_aml(uint8_t uid)
  1054. {
  1055. Aml *dev;
  1056. Aml *crs;
  1057. Aml *method;
  1058. Aml *if_ctx;
  1059. Aml *else_ctx;
  1060. Aml *zero = aml_int(0);
  1061. Aml *is_present = aml_local(0);
  1062. const char *enabled_field = "CAEN";
  1063. uint8_t irq = 4;
  1064. uint16_t io_port = 0x03F8;
  1065. assert(uid == 1 || uid == 2);
  1066. if (uid == 2) {
  1067. enabled_field = "CBEN";
  1068. irq = 3;
  1069. io_port = 0x02F8;
  1070. }
  1071. dev = aml_device("COM%d", uid);
  1072. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0501")));
  1073. aml_append(dev, aml_name_decl("_UID", aml_int(uid)));
  1074. method = aml_method("_STA", 0, AML_NOTSERIALIZED);
  1075. aml_append(method, aml_store(aml_name("%s", enabled_field), is_present));
  1076. if_ctx = aml_if(aml_equal(is_present, zero));
  1077. {
  1078. aml_append(if_ctx, aml_return(aml_int(0x00)));
  1079. }
  1080. aml_append(method, if_ctx);
  1081. else_ctx = aml_else();
  1082. {
  1083. aml_append(else_ctx, aml_return(aml_int(0x0f)));
  1084. }
  1085. aml_append(method, else_ctx);
  1086. aml_append(dev, method);
  1087. crs = aml_resource_template();
  1088. aml_append(crs, aml_io(AML_DECODE16, io_port, io_port, 0x00, 0x08));
  1089. aml_append(crs, aml_irq_no_flags(irq));
  1090. aml_append(dev, aml_name_decl("_CRS", crs));
  1091. return dev;
  1092. }
  1093. static void build_isa_devices_aml(Aml *table)
  1094. {
  1095. ISADevice *fdc = pc_find_fdc0();
  1096. bool ambiguous;
  1097. Aml *scope = aml_scope("_SB.PCI0.ISA");
  1098. Object *obj = object_resolve_path_type("", TYPE_ISA_BUS, &ambiguous);
  1099. aml_append(scope, build_rtc_device_aml());
  1100. aml_append(scope, build_kbd_device_aml());
  1101. aml_append(scope, build_mouse_device_aml());
  1102. if (fdc) {
  1103. aml_append(scope, build_fdc_device_aml(fdc));
  1104. }
  1105. aml_append(scope, build_lpt_device_aml());
  1106. aml_append(scope, build_com_device_aml(1));
  1107. aml_append(scope, build_com_device_aml(2));
  1108. if (ambiguous) {
  1109. error_report("Multiple ISA busses, unable to define IPMI ACPI data");
  1110. } else if (!obj) {
  1111. error_report("No ISA bus, unable to define IPMI ACPI data");
  1112. } else {
  1113. build_acpi_ipmi_devices(scope, BUS(obj), "\\_SB.PCI0.ISA");
  1114. }
  1115. aml_append(table, scope);
  1116. }
  1117. static void build_dbg_aml(Aml *table)
  1118. {
  1119. Aml *field;
  1120. Aml *method;
  1121. Aml *while_ctx;
  1122. Aml *scope = aml_scope("\\");
  1123. Aml *buf = aml_local(0);
  1124. Aml *len = aml_local(1);
  1125. Aml *idx = aml_local(2);
  1126. aml_append(scope,
  1127. aml_operation_region("DBG", AML_SYSTEM_IO, aml_int(0x0402), 0x01));
  1128. field = aml_field("DBG", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
  1129. aml_append(field, aml_named_field("DBGB", 8));
  1130. aml_append(scope, field);
  1131. method = aml_method("DBUG", 1, AML_NOTSERIALIZED);
  1132. aml_append(method, aml_to_hexstring(aml_arg(0), buf));
  1133. aml_append(method, aml_to_buffer(buf, buf));
  1134. aml_append(method, aml_subtract(aml_sizeof(buf), aml_int(1), len));
  1135. aml_append(method, aml_store(aml_int(0), idx));
  1136. while_ctx = aml_while(aml_lless(idx, len));
  1137. aml_append(while_ctx,
  1138. aml_store(aml_derefof(aml_index(buf, idx)), aml_name("DBGB")));
  1139. aml_append(while_ctx, aml_increment(idx));
  1140. aml_append(method, while_ctx);
  1141. aml_append(method, aml_store(aml_int(0x0A), aml_name("DBGB")));
  1142. aml_append(scope, method);
  1143. aml_append(table, scope);
  1144. }
  1145. static Aml *build_link_dev(const char *name, uint8_t uid, Aml *reg)
  1146. {
  1147. Aml *dev;
  1148. Aml *crs;
  1149. Aml *method;
  1150. uint32_t irqs[] = {5, 10, 11};
  1151. dev = aml_device("%s", name);
  1152. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C0F")));
  1153. aml_append(dev, aml_name_decl("_UID", aml_int(uid)));
  1154. crs = aml_resource_template();
  1155. aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
  1156. AML_SHARED, irqs, ARRAY_SIZE(irqs)));
  1157. aml_append(dev, aml_name_decl("_PRS", crs));
  1158. method = aml_method("_STA", 0, AML_NOTSERIALIZED);
  1159. aml_append(method, aml_return(aml_call1("IQST", reg)));
  1160. aml_append(dev, method);
  1161. method = aml_method("_DIS", 0, AML_NOTSERIALIZED);
  1162. aml_append(method, aml_or(reg, aml_int(0x80), reg));
  1163. aml_append(dev, method);
  1164. method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
  1165. aml_append(method, aml_return(aml_call1("IQCR", reg)));
  1166. aml_append(dev, method);
  1167. method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
  1168. aml_append(method, aml_create_dword_field(aml_arg(0), aml_int(5), "PRRI"));
  1169. aml_append(method, aml_store(aml_name("PRRI"), reg));
  1170. aml_append(dev, method);
  1171. return dev;
  1172. }
  1173. static Aml *build_gsi_link_dev(const char *name, uint8_t uid, uint8_t gsi)
  1174. {
  1175. Aml *dev;
  1176. Aml *crs;
  1177. Aml *method;
  1178. uint32_t irqs;
  1179. dev = aml_device("%s", name);
  1180. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C0F")));
  1181. aml_append(dev, aml_name_decl("_UID", aml_int(uid)));
  1182. crs = aml_resource_template();
  1183. irqs = gsi;
  1184. aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
  1185. AML_SHARED, &irqs, 1));
  1186. aml_append(dev, aml_name_decl("_PRS", crs));
  1187. aml_append(dev, aml_name_decl("_CRS", crs));
  1188. /*
  1189. * _DIS can be no-op because the interrupt cannot be disabled.
  1190. */
  1191. method = aml_method("_DIS", 0, AML_NOTSERIALIZED);
  1192. aml_append(dev, method);
  1193. method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
  1194. aml_append(dev, method);
  1195. return dev;
  1196. }
  1197. /* _CRS method - get current settings */
  1198. static Aml *build_iqcr_method(bool is_piix4)
  1199. {
  1200. Aml *if_ctx;
  1201. uint32_t irqs;
  1202. Aml *method = aml_method("IQCR", 1, AML_SERIALIZED);
  1203. Aml *crs = aml_resource_template();
  1204. irqs = 0;
  1205. aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL,
  1206. AML_ACTIVE_HIGH, AML_SHARED, &irqs, 1));
  1207. aml_append(method, aml_name_decl("PRR0", crs));
  1208. aml_append(method,
  1209. aml_create_dword_field(aml_name("PRR0"), aml_int(5), "PRRI"));
  1210. if (is_piix4) {
  1211. if_ctx = aml_if(aml_lless(aml_arg(0), aml_int(0x80)));
  1212. aml_append(if_ctx, aml_store(aml_arg(0), aml_name("PRRI")));
  1213. aml_append(method, if_ctx);
  1214. } else {
  1215. aml_append(method,
  1216. aml_store(aml_and(aml_arg(0), aml_int(0xF), NULL),
  1217. aml_name("PRRI")));
  1218. }
  1219. aml_append(method, aml_return(aml_name("PRR0")));
  1220. return method;
  1221. }
  1222. /* _STA method - get status */
  1223. static Aml *build_irq_status_method(void)
  1224. {
  1225. Aml *if_ctx;
  1226. Aml *method = aml_method("IQST", 1, AML_NOTSERIALIZED);
  1227. if_ctx = aml_if(aml_and(aml_int(0x80), aml_arg(0), NULL));
  1228. aml_append(if_ctx, aml_return(aml_int(0x09)));
  1229. aml_append(method, if_ctx);
  1230. aml_append(method, aml_return(aml_int(0x0B)));
  1231. return method;
  1232. }
  1233. static void build_piix4_pci0_int(Aml *table)
  1234. {
  1235. Aml *dev;
  1236. Aml *crs;
  1237. Aml *field;
  1238. Aml *method;
  1239. uint32_t irqs;
  1240. Aml *sb_scope = aml_scope("_SB");
  1241. Aml *pci0_scope = aml_scope("PCI0");
  1242. aml_append(pci0_scope, build_prt(true));
  1243. aml_append(sb_scope, pci0_scope);
  1244. field = aml_field("PCI0.ISA.P40C", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
  1245. aml_append(field, aml_named_field("PRQ0", 8));
  1246. aml_append(field, aml_named_field("PRQ1", 8));
  1247. aml_append(field, aml_named_field("PRQ2", 8));
  1248. aml_append(field, aml_named_field("PRQ3", 8));
  1249. aml_append(sb_scope, field);
  1250. aml_append(sb_scope, build_irq_status_method());
  1251. aml_append(sb_scope, build_iqcr_method(true));
  1252. aml_append(sb_scope, build_link_dev("LNKA", 0, aml_name("PRQ0")));
  1253. aml_append(sb_scope, build_link_dev("LNKB", 1, aml_name("PRQ1")));
  1254. aml_append(sb_scope, build_link_dev("LNKC", 2, aml_name("PRQ2")));
  1255. aml_append(sb_scope, build_link_dev("LNKD", 3, aml_name("PRQ3")));
  1256. dev = aml_device("LNKS");
  1257. {
  1258. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C0F")));
  1259. aml_append(dev, aml_name_decl("_UID", aml_int(4)));
  1260. crs = aml_resource_template();
  1261. irqs = 9;
  1262. aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL,
  1263. AML_ACTIVE_HIGH, AML_SHARED,
  1264. &irqs, 1));
  1265. aml_append(dev, aml_name_decl("_PRS", crs));
  1266. /* The SCI cannot be disabled and is always attached to GSI 9,
  1267. * so these are no-ops. We only need this link to override the
  1268. * polarity to active high and match the content of the MADT.
  1269. */
  1270. method = aml_method("_STA", 0, AML_NOTSERIALIZED);
  1271. aml_append(method, aml_return(aml_int(0x0b)));
  1272. aml_append(dev, method);
  1273. method = aml_method("_DIS", 0, AML_NOTSERIALIZED);
  1274. aml_append(dev, method);
  1275. method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
  1276. aml_append(method, aml_return(aml_name("_PRS")));
  1277. aml_append(dev, method);
  1278. method = aml_method("_SRS", 1, AML_NOTSERIALIZED);
  1279. aml_append(dev, method);
  1280. }
  1281. aml_append(sb_scope, dev);
  1282. aml_append(table, sb_scope);
  1283. }
  1284. static void append_q35_prt_entry(Aml *ctx, uint32_t nr, const char *name)
  1285. {
  1286. int i;
  1287. int head;
  1288. Aml *pkg;
  1289. char base = name[3] < 'E' ? 'A' : 'E';
  1290. char *s = g_strdup(name);
  1291. Aml *a_nr = aml_int((nr << 16) | 0xffff);
  1292. assert(strlen(s) == 4);
  1293. head = name[3] - base;
  1294. for (i = 0; i < 4; i++) {
  1295. if (head + i > 3) {
  1296. head = i * -1;
  1297. }
  1298. s[3] = base + head + i;
  1299. pkg = aml_package(4);
  1300. aml_append(pkg, a_nr);
  1301. aml_append(pkg, aml_int(i));
  1302. aml_append(pkg, aml_name("%s", s));
  1303. aml_append(pkg, aml_int(0));
  1304. aml_append(ctx, pkg);
  1305. }
  1306. g_free(s);
  1307. }
  1308. static Aml *build_q35_routing_table(const char *str)
  1309. {
  1310. int i;
  1311. Aml *pkg;
  1312. char *name = g_strdup_printf("%s ", str);
  1313. pkg = aml_package(128);
  1314. for (i = 0; i < 0x18; i++) {
  1315. name[3] = 'E' + (i & 0x3);
  1316. append_q35_prt_entry(pkg, i, name);
  1317. }
  1318. name[3] = 'E';
  1319. append_q35_prt_entry(pkg, 0x18, name);
  1320. /* INTA -> PIRQA for slot 25 - 31, see the default value of D<N>IR */
  1321. for (i = 0x0019; i < 0x1e; i++) {
  1322. name[3] = 'A';
  1323. append_q35_prt_entry(pkg, i, name);
  1324. }
  1325. /* PCIe->PCI bridge. use PIRQ[E-H] */
  1326. name[3] = 'E';
  1327. append_q35_prt_entry(pkg, 0x1e, name);
  1328. name[3] = 'A';
  1329. append_q35_prt_entry(pkg, 0x1f, name);
  1330. g_free(name);
  1331. return pkg;
  1332. }
  1333. static void build_q35_pci0_int(Aml *table)
  1334. {
  1335. Aml *field;
  1336. Aml *method;
  1337. Aml *sb_scope = aml_scope("_SB");
  1338. Aml *pci0_scope = aml_scope("PCI0");
  1339. /* Zero => PIC mode, One => APIC Mode */
  1340. aml_append(table, aml_name_decl("PICF", aml_int(0)));
  1341. method = aml_method("_PIC", 1, AML_NOTSERIALIZED);
  1342. {
  1343. aml_append(method, aml_store(aml_arg(0), aml_name("PICF")));
  1344. }
  1345. aml_append(table, method);
  1346. aml_append(pci0_scope,
  1347. aml_name_decl("PRTP", build_q35_routing_table("LNK")));
  1348. aml_append(pci0_scope,
  1349. aml_name_decl("PRTA", build_q35_routing_table("GSI")));
  1350. method = aml_method("_PRT", 0, AML_NOTSERIALIZED);
  1351. {
  1352. Aml *if_ctx;
  1353. Aml *else_ctx;
  1354. /* PCI IRQ routing table, example from ACPI 2.0a specification,
  1355. section 6.2.8.1 */
  1356. /* Note: we provide the same info as the PCI routing
  1357. table of the Bochs BIOS */
  1358. if_ctx = aml_if(aml_equal(aml_name("PICF"), aml_int(0)));
  1359. aml_append(if_ctx, aml_return(aml_name("PRTP")));
  1360. aml_append(method, if_ctx);
  1361. else_ctx = aml_else();
  1362. aml_append(else_ctx, aml_return(aml_name("PRTA")));
  1363. aml_append(method, else_ctx);
  1364. }
  1365. aml_append(pci0_scope, method);
  1366. aml_append(sb_scope, pci0_scope);
  1367. field = aml_field("PCI0.ISA.PIRQ", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
  1368. aml_append(field, aml_named_field("PRQA", 8));
  1369. aml_append(field, aml_named_field("PRQB", 8));
  1370. aml_append(field, aml_named_field("PRQC", 8));
  1371. aml_append(field, aml_named_field("PRQD", 8));
  1372. aml_append(field, aml_reserved_field(0x20));
  1373. aml_append(field, aml_named_field("PRQE", 8));
  1374. aml_append(field, aml_named_field("PRQF", 8));
  1375. aml_append(field, aml_named_field("PRQG", 8));
  1376. aml_append(field, aml_named_field("PRQH", 8));
  1377. aml_append(sb_scope, field);
  1378. aml_append(sb_scope, build_irq_status_method());
  1379. aml_append(sb_scope, build_iqcr_method(false));
  1380. aml_append(sb_scope, build_link_dev("LNKA", 0, aml_name("PRQA")));
  1381. aml_append(sb_scope, build_link_dev("LNKB", 1, aml_name("PRQB")));
  1382. aml_append(sb_scope, build_link_dev("LNKC", 2, aml_name("PRQC")));
  1383. aml_append(sb_scope, build_link_dev("LNKD", 3, aml_name("PRQD")));
  1384. aml_append(sb_scope, build_link_dev("LNKE", 4, aml_name("PRQE")));
  1385. aml_append(sb_scope, build_link_dev("LNKF", 5, aml_name("PRQF")));
  1386. aml_append(sb_scope, build_link_dev("LNKG", 6, aml_name("PRQG")));
  1387. aml_append(sb_scope, build_link_dev("LNKH", 7, aml_name("PRQH")));
  1388. aml_append(sb_scope, build_gsi_link_dev("GSIA", 0x10, 0x10));
  1389. aml_append(sb_scope, build_gsi_link_dev("GSIB", 0x11, 0x11));
  1390. aml_append(sb_scope, build_gsi_link_dev("GSIC", 0x12, 0x12));
  1391. aml_append(sb_scope, build_gsi_link_dev("GSID", 0x13, 0x13));
  1392. aml_append(sb_scope, build_gsi_link_dev("GSIE", 0x14, 0x14));
  1393. aml_append(sb_scope, build_gsi_link_dev("GSIF", 0x15, 0x15));
  1394. aml_append(sb_scope, build_gsi_link_dev("GSIG", 0x16, 0x16));
  1395. aml_append(sb_scope, build_gsi_link_dev("GSIH", 0x17, 0x17));
  1396. aml_append(table, sb_scope);
  1397. }
  1398. static void build_q35_isa_bridge(Aml *table)
  1399. {
  1400. Aml *dev;
  1401. Aml *scope;
  1402. Aml *field;
  1403. scope = aml_scope("_SB.PCI0");
  1404. dev = aml_device("ISA");
  1405. aml_append(dev, aml_name_decl("_ADR", aml_int(0x001F0000)));
  1406. /* ICH9 PCI to ISA irq remapping */
  1407. aml_append(dev, aml_operation_region("PIRQ", AML_PCI_CONFIG,
  1408. aml_int(0x60), 0x0C));
  1409. aml_append(dev, aml_operation_region("LPCD", AML_PCI_CONFIG,
  1410. aml_int(0x80), 0x02));
  1411. field = aml_field("LPCD", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE);
  1412. aml_append(field, aml_named_field("COMA", 3));
  1413. aml_append(field, aml_reserved_field(1));
  1414. aml_append(field, aml_named_field("COMB", 3));
  1415. aml_append(field, aml_reserved_field(1));
  1416. aml_append(field, aml_named_field("LPTD", 2));
  1417. aml_append(dev, field);
  1418. aml_append(dev, aml_operation_region("LPCE", AML_PCI_CONFIG,
  1419. aml_int(0x82), 0x02));
  1420. /* enable bits */
  1421. field = aml_field("LPCE", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE);
  1422. aml_append(field, aml_named_field("CAEN", 1));
  1423. aml_append(field, aml_named_field("CBEN", 1));
  1424. aml_append(field, aml_named_field("LPEN", 1));
  1425. aml_append(dev, field);
  1426. aml_append(scope, dev);
  1427. aml_append(table, scope);
  1428. }
  1429. static void build_piix4_pm(Aml *table)
  1430. {
  1431. Aml *dev;
  1432. Aml *scope;
  1433. scope = aml_scope("_SB.PCI0");
  1434. dev = aml_device("PX13");
  1435. aml_append(dev, aml_name_decl("_ADR", aml_int(0x00010003)));
  1436. aml_append(dev, aml_operation_region("P13C", AML_PCI_CONFIG,
  1437. aml_int(0x00), 0xff));
  1438. aml_append(scope, dev);
  1439. aml_append(table, scope);
  1440. }
  1441. static void build_piix4_isa_bridge(Aml *table)
  1442. {
  1443. Aml *dev;
  1444. Aml *scope;
  1445. Aml *field;
  1446. scope = aml_scope("_SB.PCI0");
  1447. dev = aml_device("ISA");
  1448. aml_append(dev, aml_name_decl("_ADR", aml_int(0x00010000)));
  1449. /* PIIX PCI to ISA irq remapping */
  1450. aml_append(dev, aml_operation_region("P40C", AML_PCI_CONFIG,
  1451. aml_int(0x60), 0x04));
  1452. /* enable bits */
  1453. field = aml_field("^PX13.P13C", AML_ANY_ACC, AML_NOLOCK, AML_PRESERVE);
  1454. /* Offset(0x5f),, 7, */
  1455. aml_append(field, aml_reserved_field(0x2f8));
  1456. aml_append(field, aml_reserved_field(7));
  1457. aml_append(field, aml_named_field("LPEN", 1));
  1458. /* Offset(0x67),, 3, */
  1459. aml_append(field, aml_reserved_field(0x38));
  1460. aml_append(field, aml_reserved_field(3));
  1461. aml_append(field, aml_named_field("CAEN", 1));
  1462. aml_append(field, aml_reserved_field(3));
  1463. aml_append(field, aml_named_field("CBEN", 1));
  1464. aml_append(dev, field);
  1465. aml_append(scope, dev);
  1466. aml_append(table, scope);
  1467. }
  1468. static void build_piix4_pci_hotplug(Aml *table)
  1469. {
  1470. Aml *scope;
  1471. Aml *field;
  1472. Aml *method;
  1473. scope = aml_scope("_SB.PCI0");
  1474. aml_append(scope,
  1475. aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(0xae00), 0x08));
  1476. field = aml_field("PCST", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
  1477. aml_append(field, aml_named_field("PCIU", 32));
  1478. aml_append(field, aml_named_field("PCID", 32));
  1479. aml_append(scope, field);
  1480. aml_append(scope,
  1481. aml_operation_region("SEJ", AML_SYSTEM_IO, aml_int(0xae08), 0x04));
  1482. field = aml_field("SEJ", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
  1483. aml_append(field, aml_named_field("B0EJ", 32));
  1484. aml_append(scope, field);
  1485. aml_append(scope,
  1486. aml_operation_region("BNMR", AML_SYSTEM_IO, aml_int(0xae10), 0x04));
  1487. field = aml_field("BNMR", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
  1488. aml_append(field, aml_named_field("BNUM", 32));
  1489. aml_append(scope, field);
  1490. aml_append(scope, aml_mutex("BLCK", 0));
  1491. method = aml_method("PCEJ", 2, AML_NOTSERIALIZED);
  1492. aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF));
  1493. aml_append(method, aml_store(aml_arg(0), aml_name("BNUM")));
  1494. aml_append(method,
  1495. aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("B0EJ")));
  1496. aml_append(method, aml_release(aml_name("BLCK")));
  1497. aml_append(method, aml_return(aml_int(0)));
  1498. aml_append(scope, method);
  1499. aml_append(table, scope);
  1500. }
  1501. static Aml *build_q35_osc_method(void)
  1502. {
  1503. Aml *if_ctx;
  1504. Aml *if_ctx2;
  1505. Aml *else_ctx;
  1506. Aml *method;
  1507. Aml *a_cwd1 = aml_name("CDW1");
  1508. Aml *a_ctrl = aml_local(0);
  1509. method = aml_method("_OSC", 4, AML_NOTSERIALIZED);
  1510. aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
  1511. if_ctx = aml_if(aml_equal(
  1512. aml_arg(0), aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766")));
  1513. aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
  1514. aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
  1515. aml_append(if_ctx, aml_store(aml_name("CDW3"), a_ctrl));
  1516. /*
  1517. * Always allow native PME, AER (no dependencies)
  1518. * Allow SHPC (PCI bridges can have SHPC controller)
  1519. */
  1520. aml_append(if_ctx, aml_and(a_ctrl, aml_int(0x1F), a_ctrl));
  1521. if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1))));
  1522. /* Unknown revision */
  1523. aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x08), a_cwd1));
  1524. aml_append(if_ctx, if_ctx2);
  1525. if_ctx2 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl)));
  1526. /* Capabilities bits were masked */
  1527. aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x10), a_cwd1));
  1528. aml_append(if_ctx, if_ctx2);
  1529. /* Update DWORD3 in the buffer */
  1530. aml_append(if_ctx, aml_store(a_ctrl, aml_name("CDW3")));
  1531. aml_append(method, if_ctx);
  1532. else_ctx = aml_else();
  1533. /* Unrecognized UUID */
  1534. aml_append(else_ctx, aml_or(a_cwd1, aml_int(4), a_cwd1));
  1535. aml_append(method, else_ctx);
  1536. aml_append(method, aml_return(aml_arg(3)));
  1537. return method;
  1538. }
  1539. static void build_smb0(Aml *table, I2CBus *smbus, int devnr, int func)
  1540. {
  1541. Aml *scope = aml_scope("_SB.PCI0");
  1542. Aml *dev = aml_device("SMB0");
  1543. aml_append(dev, aml_name_decl("_HID", aml_eisaid("APP0005")));
  1544. aml_append(dev, aml_name_decl("_ADR", aml_int(devnr << 16 | func)));
  1545. build_acpi_ipmi_devices(dev, BUS(smbus), "\\_SB.PCI0.SMB0");
  1546. aml_append(scope, dev);
  1547. aml_append(table, scope);
  1548. }
  1549. static void
  1550. build_dsdt(GArray *table_data, BIOSLinker *linker,
  1551. AcpiPmInfo *pm, AcpiMiscInfo *misc,
  1552. Range *pci_hole, Range *pci_hole64, MachineState *machine)
  1553. {
  1554. CrsRangeEntry *entry;
  1555. Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs;
  1556. CrsRangeSet crs_range_set;
  1557. PCMachineState *pcms = PC_MACHINE(machine);
  1558. PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(machine);
  1559. X86MachineState *x86ms = X86_MACHINE(machine);
  1560. AcpiMcfgInfo mcfg;
  1561. uint32_t nr_mem = machine->ram_slots;
  1562. int root_bus_limit = 0xFF;
  1563. PCIBus *bus = NULL;
  1564. TPMIf *tpm = tpm_find();
  1565. int i;
  1566. dsdt = init_aml_allocator();
  1567. /* Reserve space for header */
  1568. acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader));
  1569. build_dbg_aml(dsdt);
  1570. if (misc->is_piix4) {
  1571. sb_scope = aml_scope("_SB");
  1572. dev = aml_device("PCI0");
  1573. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
  1574. aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
  1575. aml_append(dev, aml_name_decl("_UID", aml_int(1)));
  1576. aml_append(sb_scope, dev);
  1577. aml_append(dsdt, sb_scope);
  1578. build_hpet_aml(dsdt);
  1579. build_piix4_pm(dsdt);
  1580. build_piix4_isa_bridge(dsdt);
  1581. build_isa_devices_aml(dsdt);
  1582. build_piix4_pci_hotplug(dsdt);
  1583. build_piix4_pci0_int(dsdt);
  1584. } else {
  1585. sb_scope = aml_scope("_SB");
  1586. dev = aml_device("PCI0");
  1587. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
  1588. aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03")));
  1589. aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
  1590. aml_append(dev, aml_name_decl("_UID", aml_int(1)));
  1591. aml_append(dev, build_q35_osc_method());
  1592. aml_append(sb_scope, dev);
  1593. aml_append(dsdt, sb_scope);
  1594. build_hpet_aml(dsdt);
  1595. build_q35_isa_bridge(dsdt);
  1596. build_isa_devices_aml(dsdt);
  1597. build_q35_pci0_int(dsdt);
  1598. if (pcms->smbus && !pcmc->do_not_add_smb_acpi) {
  1599. build_smb0(dsdt, pcms->smbus, ICH9_SMB_DEV, ICH9_SMB_FUNC);
  1600. }
  1601. }
  1602. if (pcmc->legacy_cpu_hotplug) {
  1603. build_legacy_cpu_hotplug_aml(dsdt, machine, pm->cpu_hp_io_base);
  1604. } else {
  1605. CPUHotplugFeatures opts = {
  1606. .acpi_1_compatible = true, .has_legacy_cphp = true
  1607. };
  1608. build_cpus_aml(dsdt, machine, opts, pm->cpu_hp_io_base,
  1609. "\\_SB.PCI0", "\\_GPE._E02");
  1610. }
  1611. if (pcms->memhp_io_base && nr_mem) {
  1612. build_memory_hotplug_aml(dsdt, nr_mem, "\\_SB.PCI0",
  1613. "\\_GPE._E03", AML_SYSTEM_IO,
  1614. pcms->memhp_io_base);
  1615. }
  1616. scope = aml_scope("_GPE");
  1617. {
  1618. aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006")));
  1619. if (misc->is_piix4) {
  1620. method = aml_method("_E01", 0, AML_NOTSERIALIZED);
  1621. aml_append(method,
  1622. aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF));
  1623. aml_append(method, aml_call0("\\_SB.PCI0.PCNT"));
  1624. aml_append(method, aml_release(aml_name("\\_SB.PCI0.BLCK")));
  1625. aml_append(scope, method);
  1626. }
  1627. if (machine->nvdimms_state->is_enabled) {
  1628. method = aml_method("_E04", 0, AML_NOTSERIALIZED);
  1629. aml_append(method, aml_notify(aml_name("\\_SB.NVDR"),
  1630. aml_int(0x80)));
  1631. aml_append(scope, method);
  1632. }
  1633. }
  1634. aml_append(dsdt, scope);
  1635. crs_range_set_init(&crs_range_set);
  1636. bus = PC_MACHINE(machine)->bus;
  1637. if (bus) {
  1638. QLIST_FOREACH(bus, &bus->child, sibling) {
  1639. uint8_t bus_num = pci_bus_num(bus);
  1640. uint8_t numa_node = pci_bus_numa_node(bus);
  1641. /* look only for expander root buses */
  1642. if (!pci_bus_is_root(bus)) {
  1643. continue;
  1644. }
  1645. if (bus_num < root_bus_limit) {
  1646. root_bus_limit = bus_num - 1;
  1647. }
  1648. scope = aml_scope("\\_SB");
  1649. dev = aml_device("PC%.02X", bus_num);
  1650. aml_append(dev, aml_name_decl("_UID", aml_int(bus_num)));
  1651. aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num)));
  1652. if (pci_bus_is_express(bus)) {
  1653. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
  1654. aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03")));
  1655. aml_append(dev, build_q35_osc_method());
  1656. } else {
  1657. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
  1658. }
  1659. if (numa_node != NUMA_NODE_UNASSIGNED) {
  1660. aml_append(dev, aml_name_decl("_PXM", aml_int(numa_node)));
  1661. }
  1662. aml_append(dev, build_prt(false));
  1663. crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), &crs_range_set);
  1664. aml_append(dev, aml_name_decl("_CRS", crs));
  1665. aml_append(scope, dev);
  1666. aml_append(dsdt, scope);
  1667. }
  1668. }
  1669. /*
  1670. * At this point crs_range_set has all the ranges used by pci
  1671. * busses *other* than PCI0. These ranges will be excluded from
  1672. * the PCI0._CRS. Add mmconfig to the set so it will be excluded
  1673. * too.
  1674. */
  1675. if (acpi_get_mcfg(&mcfg)) {
  1676. crs_range_insert(crs_range_set.mem_ranges,
  1677. mcfg.base, mcfg.base + mcfg.size - 1);
  1678. }
  1679. scope = aml_scope("\\_SB.PCI0");
  1680. /* build PCI0._CRS */
  1681. crs = aml_resource_template();
  1682. aml_append(crs,
  1683. aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
  1684. 0x0000, 0x0, root_bus_limit,
  1685. 0x0000, root_bus_limit + 1));
  1686. aml_append(crs, aml_io(AML_DECODE16, 0x0CF8, 0x0CF8, 0x01, 0x08));
  1687. aml_append(crs,
  1688. aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
  1689. AML_POS_DECODE, AML_ENTIRE_RANGE,
  1690. 0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8));
  1691. crs_replace_with_free_ranges(crs_range_set.io_ranges, 0x0D00, 0xFFFF);
  1692. for (i = 0; i < crs_range_set.io_ranges->len; i++) {
  1693. entry = g_ptr_array_index(crs_range_set.io_ranges, i);
  1694. aml_append(crs,
  1695. aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
  1696. AML_POS_DECODE, AML_ENTIRE_RANGE,
  1697. 0x0000, entry->base, entry->limit,
  1698. 0x0000, entry->limit - entry->base + 1));
  1699. }
  1700. aml_append(crs,
  1701. aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
  1702. AML_CACHEABLE, AML_READ_WRITE,
  1703. 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000));
  1704. crs_replace_with_free_ranges(crs_range_set.mem_ranges,
  1705. range_lob(pci_hole),
  1706. range_upb(pci_hole));
  1707. for (i = 0; i < crs_range_set.mem_ranges->len; i++) {
  1708. entry = g_ptr_array_index(crs_range_set.mem_ranges, i);
  1709. aml_append(crs,
  1710. aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
  1711. AML_NON_CACHEABLE, AML_READ_WRITE,
  1712. 0, entry->base, entry->limit,
  1713. 0, entry->limit - entry->base + 1));
  1714. }
  1715. if (!range_is_empty(pci_hole64)) {
  1716. crs_replace_with_free_ranges(crs_range_set.mem_64bit_ranges,
  1717. range_lob(pci_hole64),
  1718. range_upb(pci_hole64));
  1719. for (i = 0; i < crs_range_set.mem_64bit_ranges->len; i++) {
  1720. entry = g_ptr_array_index(crs_range_set.mem_64bit_ranges, i);
  1721. aml_append(crs,
  1722. aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED,
  1723. AML_MAX_FIXED,
  1724. AML_CACHEABLE, AML_READ_WRITE,
  1725. 0, entry->base, entry->limit,
  1726. 0, entry->limit - entry->base + 1));
  1727. }
  1728. }
  1729. if (TPM_IS_TIS(tpm_find())) {
  1730. aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE,
  1731. TPM_TIS_ADDR_SIZE, AML_READ_WRITE));
  1732. }
  1733. aml_append(scope, aml_name_decl("_CRS", crs));
  1734. /* reserve GPE0 block resources */
  1735. dev = aml_device("GPE0");
  1736. aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06")));
  1737. aml_append(dev, aml_name_decl("_UID", aml_string("GPE0 resources")));
  1738. /* device present, functioning, decoding, not shown in UI */
  1739. aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
  1740. crs = aml_resource_template();
  1741. aml_append(crs,
  1742. aml_io(
  1743. AML_DECODE16,
  1744. pm->fadt.gpe0_blk.address,
  1745. pm->fadt.gpe0_blk.address,
  1746. 1,
  1747. pm->fadt.gpe0_blk.bit_width / 8)
  1748. );
  1749. aml_append(dev, aml_name_decl("_CRS", crs));
  1750. aml_append(scope, dev);
  1751. crs_range_set_free(&crs_range_set);
  1752. /* reserve PCIHP resources */
  1753. if (pm->pcihp_io_len) {
  1754. dev = aml_device("PHPR");
  1755. aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06")));
  1756. aml_append(dev,
  1757. aml_name_decl("_UID", aml_string("PCI Hotplug resources")));
  1758. /* device present, functioning, decoding, not shown in UI */
  1759. aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
  1760. crs = aml_resource_template();
  1761. aml_append(crs,
  1762. aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1,
  1763. pm->pcihp_io_len)
  1764. );
  1765. aml_append(dev, aml_name_decl("_CRS", crs));
  1766. aml_append(scope, dev);
  1767. }
  1768. aml_append(dsdt, scope);
  1769. /* create S3_ / S4_ / S5_ packages if necessary */
  1770. scope = aml_scope("\\");
  1771. if (!pm->s3_disabled) {
  1772. pkg = aml_package(4);
  1773. aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */
  1774. aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */
  1775. aml_append(pkg, aml_int(0)); /* reserved */
  1776. aml_append(pkg, aml_int(0)); /* reserved */
  1777. aml_append(scope, aml_name_decl("_S3", pkg));
  1778. }
  1779. if (!pm->s4_disabled) {
  1780. pkg = aml_package(4);
  1781. aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */
  1782. /* PM1b_CNT.SLP_TYP, FIXME: not impl. */
  1783. aml_append(pkg, aml_int(pm->s4_val));
  1784. aml_append(pkg, aml_int(0)); /* reserved */
  1785. aml_append(pkg, aml_int(0)); /* reserved */
  1786. aml_append(scope, aml_name_decl("_S4", pkg));
  1787. }
  1788. pkg = aml_package(4);
  1789. aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */
  1790. aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */
  1791. aml_append(pkg, aml_int(0)); /* reserved */
  1792. aml_append(pkg, aml_int(0)); /* reserved */
  1793. aml_append(scope, aml_name_decl("_S5", pkg));
  1794. aml_append(dsdt, scope);
  1795. /* create fw_cfg node, unconditionally */
  1796. {
  1797. /* when using port i/o, the 8-bit data register *always* overlaps
  1798. * with half of the 16-bit control register. Hence, the total size
  1799. * of the i/o region used is FW_CFG_CTL_SIZE; when using DMA, the
  1800. * DMA control register is located at FW_CFG_DMA_IO_BASE + 4 */
  1801. uint8_t io_size = object_property_get_bool(OBJECT(x86ms->fw_cfg),
  1802. "dma_enabled", NULL) ?
  1803. ROUND_UP(FW_CFG_CTL_SIZE, 4) + sizeof(dma_addr_t) :
  1804. FW_CFG_CTL_SIZE;
  1805. scope = aml_scope("\\_SB.PCI0");
  1806. dev = aml_device("FWCF");
  1807. aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002")));
  1808. /* device present, functioning, decoding, not shown in UI */
  1809. aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
  1810. crs = aml_resource_template();
  1811. aml_append(crs,
  1812. aml_io(AML_DECODE16, FW_CFG_IO_BASE, FW_CFG_IO_BASE, 0x01, io_size)
  1813. );
  1814. aml_append(dev, aml_name_decl("_CRS", crs));
  1815. aml_append(scope, dev);
  1816. aml_append(dsdt, scope);
  1817. }
  1818. if (misc->applesmc_io_base) {
  1819. scope = aml_scope("\\_SB.PCI0.ISA");
  1820. dev = aml_device("SMC");
  1821. aml_append(dev, aml_name_decl("_HID", aml_eisaid("APP0001")));
  1822. /* device present, functioning, decoding, not shown in UI */
  1823. aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
  1824. crs = aml_resource_template();
  1825. aml_append(crs,
  1826. aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base,
  1827. 0x01, APPLESMC_MAX_DATA_LENGTH)
  1828. );
  1829. aml_append(crs, aml_irq_no_flags(6));
  1830. aml_append(dev, aml_name_decl("_CRS", crs));
  1831. aml_append(scope, dev);
  1832. aml_append(dsdt, scope);
  1833. }
  1834. if (misc->pvpanic_port) {
  1835. scope = aml_scope("\\_SB.PCI0.ISA");
  1836. dev = aml_device("PEVT");
  1837. aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0001")));
  1838. crs = aml_resource_template();
  1839. aml_append(crs,
  1840. aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1)
  1841. );
  1842. aml_append(dev, aml_name_decl("_CRS", crs));
  1843. aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO,
  1844. aml_int(misc->pvpanic_port), 1));
  1845. field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
  1846. aml_append(field, aml_named_field("PEPT", 8));
  1847. aml_append(dev, field);
  1848. /* device present, functioning, decoding, shown in UI */
  1849. aml_append(dev, aml_name_decl("_STA", aml_int(0xF)));
  1850. method = aml_method("RDPT", 0, AML_NOTSERIALIZED);
  1851. aml_append(method, aml_store(aml_name("PEPT"), aml_local(0)));
  1852. aml_append(method, aml_return(aml_local(0)));
  1853. aml_append(dev, method);
  1854. method = aml_method("WRPT", 1, AML_NOTSERIALIZED);
  1855. aml_append(method, aml_store(aml_arg(0), aml_name("PEPT")));
  1856. aml_append(dev, method);
  1857. aml_append(scope, dev);
  1858. aml_append(dsdt, scope);
  1859. }
  1860. sb_scope = aml_scope("\\_SB");
  1861. {
  1862. Object *pci_host;
  1863. PCIBus *bus = NULL;
  1864. pci_host = acpi_get_i386_pci_host();
  1865. if (pci_host) {
  1866. bus = PCI_HOST_BRIDGE(pci_host)->bus;
  1867. }
  1868. if (bus) {
  1869. Aml *scope = aml_scope("PCI0");
  1870. /* Scan all PCI buses. Generate tables to support hotplug. */
  1871. build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en);
  1872. if (TPM_IS_TIS(tpm)) {
  1873. if (misc->tpm_version == TPM_VERSION_2_0) {
  1874. dev = aml_device("TPM");
  1875. aml_append(dev, aml_name_decl("_HID",
  1876. aml_string("MSFT0101")));
  1877. } else {
  1878. dev = aml_device("ISA.TPM");
  1879. aml_append(dev, aml_name_decl("_HID",
  1880. aml_eisaid("PNP0C31")));
  1881. }
  1882. aml_append(dev, aml_name_decl("_STA", aml_int(0xF)));
  1883. crs = aml_resource_template();
  1884. aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE,
  1885. TPM_TIS_ADDR_SIZE, AML_READ_WRITE));
  1886. /*
  1887. FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs,
  1888. Rewrite to take IRQ from TPM device model and
  1889. fix default IRQ value there to use some unused IRQ
  1890. */
  1891. /* aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); */
  1892. aml_append(dev, aml_name_decl("_CRS", crs));
  1893. tpm_build_ppi_acpi(tpm, dev);
  1894. aml_append(scope, dev);
  1895. }
  1896. aml_append(sb_scope, scope);
  1897. }
  1898. }
  1899. if (TPM_IS_CRB(tpm)) {
  1900. dev = aml_device("TPM");
  1901. aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101")));
  1902. crs = aml_resource_template();
  1903. aml_append(crs, aml_memory32_fixed(TPM_CRB_ADDR_BASE,
  1904. TPM_CRB_ADDR_SIZE, AML_READ_WRITE));
  1905. aml_append(dev, aml_name_decl("_CRS", crs));
  1906. method = aml_method("_STA", 0, AML_NOTSERIALIZED);
  1907. aml_append(method, aml_return(aml_int(0x0f)));
  1908. aml_append(dev, method);
  1909. tpm_build_ppi_acpi(tpm, dev);
  1910. aml_append(sb_scope, dev);
  1911. }
  1912. aml_append(dsdt, sb_scope);
  1913. /* copy AML table into ACPI tables blob and patch header there */
  1914. g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
  1915. build_header(linker, table_data,
  1916. (void *)(table_data->data + table_data->len - dsdt->buf->len),
  1917. "DSDT", dsdt->buf->len, 1, NULL, NULL);
  1918. free_aml_allocator();
  1919. }
  1920. static void
  1921. build_hpet(GArray *table_data, BIOSLinker *linker)
  1922. {
  1923. Acpi20Hpet *hpet;
  1924. hpet = acpi_data_push(table_data, sizeof(*hpet));
  1925. /* Note timer_block_id value must be kept in sync with value advertised by
  1926. * emulated hpet
  1927. */
  1928. hpet->timer_block_id = cpu_to_le32(0x8086a201);
  1929. hpet->addr.address = cpu_to_le64(HPET_BASE);
  1930. build_header(linker, table_data,
  1931. (void *)hpet, "HPET", sizeof(*hpet), 1, NULL, NULL);
  1932. }
  1933. static void
  1934. build_tpm_tcpa(GArray *table_data, BIOSLinker *linker, GArray *tcpalog)
  1935. {
  1936. Acpi20Tcpa *tcpa = acpi_data_push(table_data, sizeof *tcpa);
  1937. unsigned log_addr_size = sizeof(tcpa->log_area_start_address);
  1938. unsigned log_addr_offset =
  1939. (char *)&tcpa->log_area_start_address - table_data->data;
  1940. tcpa->platform_class = cpu_to_le16(TPM_TCPA_ACPI_CLASS_CLIENT);
  1941. tcpa->log_area_minimum_length = cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE);
  1942. acpi_data_push(tcpalog, le32_to_cpu(tcpa->log_area_minimum_length));
  1943. bios_linker_loader_alloc(linker, ACPI_BUILD_TPMLOG_FILE, tcpalog, 1,
  1944. false /* high memory */);
  1945. /* log area start address to be filled by Guest linker */
  1946. bios_linker_loader_add_pointer(linker,
  1947. ACPI_BUILD_TABLE_FILE, log_addr_offset, log_addr_size,
  1948. ACPI_BUILD_TPMLOG_FILE, 0);
  1949. build_header(linker, table_data,
  1950. (void *)tcpa, "TCPA", sizeof(*tcpa), 2, NULL, NULL);
  1951. }
  1952. static void
  1953. build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog)
  1954. {
  1955. Acpi20TPM2 *tpm2_ptr = acpi_data_push(table_data, sizeof *tpm2_ptr);
  1956. unsigned log_addr_size = sizeof(tpm2_ptr->log_area_start_address);
  1957. unsigned log_addr_offset =
  1958. (char *)&tpm2_ptr->log_area_start_address - table_data->data;
  1959. tpm2_ptr->platform_class = cpu_to_le16(TPM2_ACPI_CLASS_CLIENT);
  1960. if (TPM_IS_TIS(tpm_find())) {
  1961. tpm2_ptr->control_area_address = cpu_to_le64(0);
  1962. tpm2_ptr->start_method = cpu_to_le32(TPM2_START_METHOD_MMIO);
  1963. } else if (TPM_IS_CRB(tpm_find())) {
  1964. tpm2_ptr->control_area_address = cpu_to_le64(TPM_CRB_ADDR_CTRL);
  1965. tpm2_ptr->start_method = cpu_to_le32(TPM2_START_METHOD_CRB);
  1966. } else {
  1967. g_warn_if_reached();
  1968. }
  1969. tpm2_ptr->log_area_minimum_length =
  1970. cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE);
  1971. /* log area start address to be filled by Guest linker */
  1972. bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE,
  1973. log_addr_offset, log_addr_size,
  1974. ACPI_BUILD_TPMLOG_FILE, 0);
  1975. build_header(linker, table_data,
  1976. (void *)tpm2_ptr, "TPM2", sizeof(*tpm2_ptr), 4, NULL, NULL);
  1977. }
  1978. #define HOLE_640K_START (640 * KiB)
  1979. #define HOLE_640K_END (1 * MiB)
  1980. static void
  1981. build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
  1982. {
  1983. AcpiSystemResourceAffinityTable *srat;
  1984. AcpiSratMemoryAffinity *numamem;
  1985. int i;
  1986. int srat_start, numa_start, slots;
  1987. uint64_t mem_len, mem_base, next_base;
  1988. MachineClass *mc = MACHINE_GET_CLASS(machine);
  1989. X86MachineState *x86ms = X86_MACHINE(machine);
  1990. const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
  1991. PCMachineState *pcms = PC_MACHINE(machine);
  1992. ram_addr_t hotplugabble_address_space_size =
  1993. object_property_get_int(OBJECT(pcms), PC_MACHINE_DEVMEM_REGION_SIZE,
  1994. NULL);
  1995. srat_start = table_data->len;
  1996. srat = acpi_data_push(table_data, sizeof *srat);
  1997. srat->reserved1 = cpu_to_le32(1);
  1998. for (i = 0; i < apic_ids->len; i++) {
  1999. int node_id = apic_ids->cpus[i].props.node_id;
  2000. uint32_t apic_id = apic_ids->cpus[i].arch_id;
  2001. if (apic_id < 255) {
  2002. AcpiSratProcessorAffinity *core;
  2003. core = acpi_data_push(table_data, sizeof *core);
  2004. core->type = ACPI_SRAT_PROCESSOR_APIC;
  2005. core->length = sizeof(*core);
  2006. core->local_apic_id = apic_id;
  2007. core->proximity_lo = node_id;
  2008. memset(core->proximity_hi, 0, 3);
  2009. core->local_sapic_eid = 0;
  2010. core->flags = cpu_to_le32(1);
  2011. } else {
  2012. AcpiSratProcessorX2ApicAffinity *core;
  2013. core = acpi_data_push(table_data, sizeof *core);
  2014. core->type = ACPI_SRAT_PROCESSOR_x2APIC;
  2015. core->length = sizeof(*core);
  2016. core->x2apic_id = cpu_to_le32(apic_id);
  2017. core->proximity_domain = cpu_to_le32(node_id);
  2018. core->flags = cpu_to_le32(1);
  2019. }
  2020. }
  2021. /* the memory map is a bit tricky, it contains at least one hole
  2022. * from 640k-1M and possibly another one from 3.5G-4G.
  2023. */
  2024. next_base = 0;
  2025. numa_start = table_data->len;
  2026. for (i = 1; i < pcms->numa_nodes + 1; ++i) {
  2027. mem_base = next_base;
  2028. mem_len = pcms->node_mem[i - 1];
  2029. next_base = mem_base + mem_len;
  2030. /* Cut out the 640K hole */
  2031. if (mem_base <= HOLE_640K_START &&
  2032. next_base > HOLE_640K_START) {
  2033. mem_len -= next_base - HOLE_640K_START;
  2034. if (mem_len > 0) {
  2035. numamem = acpi_data_push(table_data, sizeof *numamem);
  2036. build_srat_memory(numamem, mem_base, mem_len, i - 1,
  2037. MEM_AFFINITY_ENABLED);
  2038. }
  2039. /* Check for the rare case: 640K < RAM < 1M */
  2040. if (next_base <= HOLE_640K_END) {
  2041. next_base = HOLE_640K_END;
  2042. continue;
  2043. }
  2044. mem_base = HOLE_640K_END;
  2045. mem_len = next_base - HOLE_640K_END;
  2046. }
  2047. /* Cut out the ACPI_PCI hole */
  2048. if (mem_base <= x86ms->below_4g_mem_size &&
  2049. next_base > x86ms->below_4g_mem_size) {
  2050. mem_len -= next_base - x86ms->below_4g_mem_size;
  2051. if (mem_len > 0) {
  2052. numamem = acpi_data_push(table_data, sizeof *numamem);
  2053. build_srat_memory(numamem, mem_base, mem_len, i - 1,
  2054. MEM_AFFINITY_ENABLED);
  2055. }
  2056. mem_base = 1ULL << 32;
  2057. mem_len = next_base - x86ms->below_4g_mem_size;
  2058. next_base = mem_base + mem_len;
  2059. }
  2060. if (mem_len > 0) {
  2061. numamem = acpi_data_push(table_data, sizeof *numamem);
  2062. build_srat_memory(numamem, mem_base, mem_len, i - 1,
  2063. MEM_AFFINITY_ENABLED);
  2064. }
  2065. }
  2066. slots = (table_data->len - numa_start) / sizeof *numamem;
  2067. for (; slots < pcms->numa_nodes + 2; slots++) {
  2068. numamem = acpi_data_push(table_data, sizeof *numamem);
  2069. build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS);
  2070. }
  2071. /*
  2072. * Entry is required for Windows to enable memory hotplug in OS
  2073. * and for Linux to enable SWIOTLB when booted with less than
  2074. * 4G of RAM. Windows works better if the entry sets proximity
  2075. * to the highest NUMA node in the machine.
  2076. * Memory devices may override proximity set by this entry,
  2077. * providing _PXM method if necessary.
  2078. */
  2079. if (hotplugabble_address_space_size) {
  2080. numamem = acpi_data_push(table_data, sizeof *numamem);
  2081. build_srat_memory(numamem, machine->device_memory->base,
  2082. hotplugabble_address_space_size, pcms->numa_nodes - 1,
  2083. MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
  2084. }
  2085. build_header(linker, table_data,
  2086. (void *)(table_data->data + srat_start),
  2087. "SRAT",
  2088. table_data->len - srat_start, 1, NULL, NULL);
  2089. }
  2090. /*
  2091. * VT-d spec 8.1 DMA Remapping Reporting Structure
  2092. * (version Oct. 2014 or later)
  2093. */
  2094. static void
  2095. build_dmar_q35(GArray *table_data, BIOSLinker *linker)
  2096. {
  2097. int dmar_start = table_data->len;
  2098. AcpiTableDmar *dmar;
  2099. AcpiDmarHardwareUnit *drhd;
  2100. AcpiDmarRootPortATS *atsr;
  2101. uint8_t dmar_flags = 0;
  2102. X86IOMMUState *iommu = x86_iommu_get_default();
  2103. AcpiDmarDeviceScope *scope = NULL;
  2104. /* Root complex IOAPIC use one path[0] only */
  2105. size_t ioapic_scope_size = sizeof(*scope) + sizeof(scope->path[0]);
  2106. IntelIOMMUState *intel_iommu = INTEL_IOMMU_DEVICE(iommu);
  2107. assert(iommu);
  2108. if (x86_iommu_ir_supported(iommu)) {
  2109. dmar_flags |= 0x1; /* Flags: 0x1: INT_REMAP */
  2110. }
  2111. dmar = acpi_data_push(table_data, sizeof(*dmar));
  2112. dmar->host_address_width = intel_iommu->aw_bits - 1;
  2113. dmar->flags = dmar_flags;
  2114. /* DMAR Remapping Hardware Unit Definition structure */
  2115. drhd = acpi_data_push(table_data, sizeof(*drhd) + ioapic_scope_size);
  2116. drhd->type = cpu_to_le16(ACPI_DMAR_TYPE_HARDWARE_UNIT);
  2117. drhd->length = cpu_to_le16(sizeof(*drhd) + ioapic_scope_size);
  2118. drhd->flags = ACPI_DMAR_INCLUDE_PCI_ALL;
  2119. drhd->pci_segment = cpu_to_le16(0);
  2120. drhd->address = cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR);
  2121. /* Scope definition for the root-complex IOAPIC. See VT-d spec
  2122. * 8.3.1 (version Oct. 2014 or later). */
  2123. scope = &drhd->scope[0];
  2124. scope->entry_type = 0x03; /* Type: 0x03 for IOAPIC */
  2125. scope->length = ioapic_scope_size;
  2126. scope->enumeration_id = ACPI_BUILD_IOAPIC_ID;
  2127. scope->bus = Q35_PSEUDO_BUS_PLATFORM;
  2128. scope->path[0].device = PCI_SLOT(Q35_PSEUDO_DEVFN_IOAPIC);
  2129. scope->path[0].function = PCI_FUNC(Q35_PSEUDO_DEVFN_IOAPIC);
  2130. if (iommu->dt_supported) {
  2131. atsr = acpi_data_push(table_data, sizeof(*atsr));
  2132. atsr->type = cpu_to_le16(ACPI_DMAR_TYPE_ATSR);
  2133. atsr->length = cpu_to_le16(sizeof(*atsr));
  2134. atsr->flags = ACPI_DMAR_ATSR_ALL_PORTS;
  2135. atsr->pci_segment = cpu_to_le16(0);
  2136. }
  2137. build_header(linker, table_data, (void *)(table_data->data + dmar_start),
  2138. "DMAR", table_data->len - dmar_start, 1, NULL, NULL);
  2139. }
  2140. /*
  2141. * IVRS table as specified in AMD IOMMU Specification v2.62, Section 5.2
  2142. * accessible here http://support.amd.com/TechDocs/48882_IOMMU.pdf
  2143. */
  2144. #define IOAPIC_SB_DEVID (uint64_t)PCI_BUILD_BDF(0, PCI_DEVFN(0x14, 0))
  2145. /*
  2146. * Insert IVHD entry for device and recurse, insert alias, or insert range as
  2147. * necessary for the PCI topology.
  2148. */
  2149. static void
  2150. insert_ivhd(PCIBus *bus, PCIDevice *dev, void *opaque)
  2151. {
  2152. GArray *table_data = opaque;
  2153. uint32_t entry;
  2154. /* "Select" IVHD entry, type 0x2 */
  2155. entry = PCI_BUILD_BDF(pci_bus_num(bus), dev->devfn) << 8 | 0x2;
  2156. build_append_int_noprefix(table_data, entry, 4);
  2157. if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
  2158. PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
  2159. uint8_t sec = pci_bus_num(sec_bus);
  2160. uint8_t sub = dev->config[PCI_SUBORDINATE_BUS];
  2161. if (pci_bus_is_express(sec_bus)) {
  2162. /*
  2163. * Walk the bus if there are subordinates, otherwise use a range
  2164. * to cover an entire leaf bus. We could potentially also use a
  2165. * range for traversed buses, but we'd need to take care not to
  2166. * create both Select and Range entries covering the same device.
  2167. * This is easier and potentially more compact.
  2168. *
  2169. * An example bare metal system seems to use Select entries for
  2170. * root ports without a slot (ie. built-ins) and Range entries
  2171. * when there is a slot. The same system also only hard-codes
  2172. * the alias range for an onboard PCIe-to-PCI bridge, apparently
  2173. * making no effort to support nested bridges. We attempt to
  2174. * be more thorough here.
  2175. */
  2176. if (sec == sub) { /* leaf bus */
  2177. /* "Start of Range" IVHD entry, type 0x3 */
  2178. entry = PCI_BUILD_BDF(sec, PCI_DEVFN(0, 0)) << 8 | 0x3;
  2179. build_append_int_noprefix(table_data, entry, 4);
  2180. /* "End of Range" IVHD entry, type 0x4 */
  2181. entry = PCI_BUILD_BDF(sub, PCI_DEVFN(31, 7)) << 8 | 0x4;
  2182. build_append_int_noprefix(table_data, entry, 4);
  2183. } else {
  2184. pci_for_each_device(sec_bus, sec, insert_ivhd, table_data);
  2185. }
  2186. } else {
  2187. /*
  2188. * If the secondary bus is conventional, then we need to create an
  2189. * Alias range for everything downstream. The range covers the
  2190. * first devfn on the secondary bus to the last devfn on the
  2191. * subordinate bus. The alias target depends on legacy versus
  2192. * express bridges, just as in pci_device_iommu_address_space().
  2193. * DeviceIDa vs DeviceIDb as per the AMD IOMMU spec.
  2194. */
  2195. uint16_t dev_id_a, dev_id_b;
  2196. dev_id_a = PCI_BUILD_BDF(sec, PCI_DEVFN(0, 0));
  2197. if (pci_is_express(dev) &&
  2198. pcie_cap_get_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
  2199. dev_id_b = dev_id_a;
  2200. } else {
  2201. dev_id_b = PCI_BUILD_BDF(pci_bus_num(bus), dev->devfn);
  2202. }
  2203. /* "Alias Start of Range" IVHD entry, type 0x43, 8 bytes */
  2204. build_append_int_noprefix(table_data, dev_id_a << 8 | 0x43, 4);
  2205. build_append_int_noprefix(table_data, dev_id_b << 8 | 0x0, 4);
  2206. /* "End of Range" IVHD entry, type 0x4 */
  2207. entry = PCI_BUILD_BDF(sub, PCI_DEVFN(31, 7)) << 8 | 0x4;
  2208. build_append_int_noprefix(table_data, entry, 4);
  2209. }
  2210. }
  2211. }
  2212. /* For all PCI host bridges, walk and insert IVHD entries */
  2213. static int
  2214. ivrs_host_bridges(Object *obj, void *opaque)
  2215. {
  2216. GArray *ivhd_blob = opaque;
  2217. if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) {
  2218. PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
  2219. if (bus) {
  2220. pci_for_each_device(bus, pci_bus_num(bus), insert_ivhd, ivhd_blob);
  2221. }
  2222. }
  2223. return 0;
  2224. }
  2225. static void
  2226. build_amd_iommu(GArray *table_data, BIOSLinker *linker)
  2227. {
  2228. int ivhd_table_len = 24;
  2229. int iommu_start = table_data->len;
  2230. AMDVIState *s = AMD_IOMMU_DEVICE(x86_iommu_get_default());
  2231. GArray *ivhd_blob = g_array_new(false, true, 1);
  2232. /* IVRS header */
  2233. acpi_data_push(table_data, sizeof(AcpiTableHeader));
  2234. /* IVinfo - IO virtualization information common to all
  2235. * IOMMU units in a system
  2236. */
  2237. build_append_int_noprefix(table_data, 40UL << 8/* PASize */, 4);
  2238. /* reserved */
  2239. build_append_int_noprefix(table_data, 0, 8);
  2240. /* IVHD definition - type 10h */
  2241. build_append_int_noprefix(table_data, 0x10, 1);
  2242. /* virtualization flags */
  2243. build_append_int_noprefix(table_data,
  2244. (1UL << 0) | /* HtTunEn */
  2245. (1UL << 4) | /* iotblSup */
  2246. (1UL << 6) | /* PrefSup */
  2247. (1UL << 7), /* PPRSup */
  2248. 1);
  2249. /*
  2250. * A PCI bus walk, for each PCI host bridge, is necessary to create a
  2251. * complete set of IVHD entries. Do this into a separate blob so that we
  2252. * can calculate the total IVRS table length here and then append the new
  2253. * blob further below. Fall back to an entry covering all devices, which
  2254. * is sufficient when no aliases are present.
  2255. */
  2256. object_child_foreach_recursive(object_get_root(),
  2257. ivrs_host_bridges, ivhd_blob);
  2258. if (!ivhd_blob->len) {
  2259. /*
  2260. * Type 1 device entry reporting all devices
  2261. * These are 4-byte device entries currently reporting the range of
  2262. * Refer to Spec - Table 95:IVHD Device Entry Type Codes(4-byte)
  2263. */
  2264. build_append_int_noprefix(ivhd_blob, 0x0000001, 4);
  2265. }
  2266. ivhd_table_len += ivhd_blob->len;
  2267. /*
  2268. * When interrupt remapping is supported, we add a special IVHD device
  2269. * for type IO-APIC.
  2270. */
  2271. if (x86_iommu_ir_supported(x86_iommu_get_default())) {
  2272. ivhd_table_len += 8;
  2273. }
  2274. /* IVHD length */
  2275. build_append_int_noprefix(table_data, ivhd_table_len, 2);
  2276. /* DeviceID */
  2277. build_append_int_noprefix(table_data, s->devid, 2);
  2278. /* Capability offset */
  2279. build_append_int_noprefix(table_data, s->capab_offset, 2);
  2280. /* IOMMU base address */
  2281. build_append_int_noprefix(table_data, s->mmio.addr, 8);
  2282. /* PCI Segment Group */
  2283. build_append_int_noprefix(table_data, 0, 2);
  2284. /* IOMMU info */
  2285. build_append_int_noprefix(table_data, 0, 2);
  2286. /* IOMMU Feature Reporting */
  2287. build_append_int_noprefix(table_data,
  2288. (48UL << 30) | /* HATS */
  2289. (48UL << 28) | /* GATS */
  2290. (1UL << 2) | /* GTSup */
  2291. (1UL << 6), /* GASup */
  2292. 4);
  2293. /* IVHD entries as found above */
  2294. g_array_append_vals(table_data, ivhd_blob->data, ivhd_blob->len);
  2295. g_array_free(ivhd_blob, TRUE);
  2296. /*
  2297. * Add a special IVHD device type.
  2298. * Refer to spec - Table 95: IVHD device entry type codes
  2299. *
  2300. * Linux IOMMU driver checks for the special IVHD device (type IO-APIC).
  2301. * See Linux kernel commit 'c2ff5cf5294bcbd7fa50f7d860e90a66db7e5059'
  2302. */
  2303. if (x86_iommu_ir_supported(x86_iommu_get_default())) {
  2304. build_append_int_noprefix(table_data,
  2305. (0x1ull << 56) | /* type IOAPIC */
  2306. (IOAPIC_SB_DEVID << 40) | /* IOAPIC devid */
  2307. 0x48, /* special device */
  2308. 8);
  2309. }
  2310. build_header(linker, table_data, (void *)(table_data->data + iommu_start),
  2311. "IVRS", table_data->len - iommu_start, 1, NULL, NULL);
  2312. }
  2313. typedef
  2314. struct AcpiBuildState {
  2315. /* Copy of table in RAM (for patching). */
  2316. MemoryRegion *table_mr;
  2317. /* Is table patched? */
  2318. uint8_t patched;
  2319. void *rsdp;
  2320. MemoryRegion *rsdp_mr;
  2321. MemoryRegion *linker_mr;
  2322. } AcpiBuildState;
  2323. static bool acpi_get_mcfg(AcpiMcfgInfo *mcfg)
  2324. {
  2325. Object *pci_host;
  2326. QObject *o;
  2327. pci_host = acpi_get_i386_pci_host();
  2328. g_assert(pci_host);
  2329. o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_BASE, NULL);
  2330. if (!o) {
  2331. return false;
  2332. }
  2333. mcfg->base = qnum_get_uint(qobject_to(QNum, o));
  2334. qobject_unref(o);
  2335. if (mcfg->base == PCIE_BASE_ADDR_UNMAPPED) {
  2336. return false;
  2337. }
  2338. o = object_property_get_qobject(pci_host, PCIE_HOST_MCFG_SIZE, NULL);
  2339. assert(o);
  2340. mcfg->size = qnum_get_uint(qobject_to(QNum, o));
  2341. qobject_unref(o);
  2342. return true;
  2343. }
  2344. static
  2345. void acpi_build(AcpiBuildTables *tables, MachineState *machine)
  2346. {
  2347. PCMachineState *pcms = PC_MACHINE(machine);
  2348. PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
  2349. X86MachineState *x86ms = X86_MACHINE(machine);
  2350. GArray *table_offsets;
  2351. unsigned facs, dsdt, rsdt, fadt;
  2352. AcpiPmInfo pm;
  2353. AcpiMiscInfo misc;
  2354. AcpiMcfgInfo mcfg;
  2355. Range pci_hole, pci_hole64;
  2356. uint8_t *u;
  2357. size_t aml_len = 0;
  2358. GArray *tables_blob = tables->table_data;
  2359. AcpiSlicOem slic_oem = { .id = NULL, .table_id = NULL };
  2360. Object *vmgenid_dev;
  2361. acpi_get_pm_info(machine, &pm);
  2362. acpi_get_misc_info(&misc);
  2363. acpi_get_pci_holes(&pci_hole, &pci_hole64);
  2364. acpi_get_slic_oem(&slic_oem);
  2365. table_offsets = g_array_new(false, true /* clear */,
  2366. sizeof(uint32_t));
  2367. ACPI_BUILD_DPRINTF("init ACPI tables\n");
  2368. bios_linker_loader_alloc(tables->linker,
  2369. ACPI_BUILD_TABLE_FILE, tables_blob,
  2370. 64 /* Ensure FACS is aligned */,
  2371. false /* high memory */);
  2372. /*
  2373. * FACS is pointed to by FADT.
  2374. * We place it first since it's the only table that has alignment
  2375. * requirements.
  2376. */
  2377. facs = tables_blob->len;
  2378. build_facs(tables_blob);
  2379. /* DSDT is pointed to by FADT */
  2380. dsdt = tables_blob->len;
  2381. build_dsdt(tables_blob, tables->linker, &pm, &misc,
  2382. &pci_hole, &pci_hole64, machine);
  2383. /* Count the size of the DSDT and SSDT, we will need it for legacy
  2384. * sizing of ACPI tables.
  2385. */
  2386. aml_len += tables_blob->len - dsdt;
  2387. /* ACPI tables pointed to by RSDT */
  2388. fadt = tables_blob->len;
  2389. acpi_add_table(table_offsets, tables_blob);
  2390. pm.fadt.facs_tbl_offset = &facs;
  2391. pm.fadt.dsdt_tbl_offset = &dsdt;
  2392. pm.fadt.xdsdt_tbl_offset = &dsdt;
  2393. build_fadt(tables_blob, tables->linker, &pm.fadt,
  2394. slic_oem.id, slic_oem.table_id);
  2395. aml_len += tables_blob->len - fadt;
  2396. acpi_add_table(table_offsets, tables_blob);
  2397. build_madt(tables_blob, tables->linker, pcms);
  2398. vmgenid_dev = find_vmgenid_dev();
  2399. if (vmgenid_dev) {
  2400. acpi_add_table(table_offsets, tables_blob);
  2401. vmgenid_build_acpi(VMGENID(vmgenid_dev), tables_blob,
  2402. tables->vmgenid, tables->linker);
  2403. }
  2404. if (misc.has_hpet) {
  2405. acpi_add_table(table_offsets, tables_blob);
  2406. build_hpet(tables_blob, tables->linker);
  2407. }
  2408. if (misc.tpm_version != TPM_VERSION_UNSPEC) {
  2409. acpi_add_table(table_offsets, tables_blob);
  2410. build_tpm_tcpa(tables_blob, tables->linker, tables->tcpalog);
  2411. if (misc.tpm_version == TPM_VERSION_2_0) {
  2412. acpi_add_table(table_offsets, tables_blob);
  2413. build_tpm2(tables_blob, tables->linker, tables->tcpalog);
  2414. }
  2415. }
  2416. if (pcms->numa_nodes) {
  2417. acpi_add_table(table_offsets, tables_blob);
  2418. build_srat(tables_blob, tables->linker, machine);
  2419. if (machine->numa_state->have_numa_distance) {
  2420. acpi_add_table(table_offsets, tables_blob);
  2421. build_slit(tables_blob, tables->linker, machine);
  2422. }
  2423. }
  2424. if (acpi_get_mcfg(&mcfg)) {
  2425. acpi_add_table(table_offsets, tables_blob);
  2426. build_mcfg(tables_blob, tables->linker, &mcfg);
  2427. }
  2428. if (x86_iommu_get_default()) {
  2429. IommuType IOMMUType = x86_iommu_get_type();
  2430. if (IOMMUType == TYPE_AMD) {
  2431. acpi_add_table(table_offsets, tables_blob);
  2432. build_amd_iommu(tables_blob, tables->linker);
  2433. } else if (IOMMUType == TYPE_INTEL) {
  2434. acpi_add_table(table_offsets, tables_blob);
  2435. build_dmar_q35(tables_blob, tables->linker);
  2436. }
  2437. }
  2438. if (machine->nvdimms_state->is_enabled) {
  2439. nvdimm_build_acpi(table_offsets, tables_blob, tables->linker,
  2440. machine->nvdimms_state, machine->ram_slots);
  2441. }
  2442. /* Add tables supplied by user (if any) */
  2443. for (u = acpi_table_first(); u; u = acpi_table_next(u)) {
  2444. unsigned len = acpi_table_len(u);
  2445. acpi_add_table(table_offsets, tables_blob);
  2446. g_array_append_vals(tables_blob, u, len);
  2447. }
  2448. /* RSDT is pointed to by RSDP */
  2449. rsdt = tables_blob->len;
  2450. build_rsdt(tables_blob, tables->linker, table_offsets,
  2451. slic_oem.id, slic_oem.table_id);
  2452. /* RSDP is in FSEG memory, so allocate it separately */
  2453. {
  2454. AcpiRsdpData rsdp_data = {
  2455. .revision = 0,
  2456. .oem_id = ACPI_BUILD_APPNAME6,
  2457. .xsdt_tbl_offset = NULL,
  2458. .rsdt_tbl_offset = &rsdt,
  2459. };
  2460. build_rsdp(tables->rsdp, tables->linker, &rsdp_data);
  2461. if (!pcmc->rsdp_in_ram) {
  2462. /* We used to allocate some extra space for RSDP revision 2 but
  2463. * only used the RSDP revision 0 space. The extra bytes were
  2464. * zeroed out and not used.
  2465. * Here we continue wasting those extra 16 bytes to make sure we
  2466. * don't break migration for machine types 2.2 and older due to
  2467. * RSDP blob size mismatch.
  2468. */
  2469. build_append_int_noprefix(tables->rsdp, 0, 16);
  2470. }
  2471. }
  2472. /* We'll expose it all to Guest so we want to reduce
  2473. * chance of size changes.
  2474. *
  2475. * We used to align the tables to 4k, but of course this would
  2476. * too simple to be enough. 4k turned out to be too small an
  2477. * alignment very soon, and in fact it is almost impossible to
  2478. * keep the table size stable for all (max_cpus, max_memory_slots)
  2479. * combinations. So the table size is always 64k for pc-i440fx-2.1
  2480. * and we give an error if the table grows beyond that limit.
  2481. *
  2482. * We still have the problem of migrating from "-M pc-i440fx-2.0". For
  2483. * that, we exploit the fact that QEMU 2.1 generates _smaller_ tables
  2484. * than 2.0 and we can always pad the smaller tables with zeros. We can
  2485. * then use the exact size of the 2.0 tables.
  2486. *
  2487. * All this is for PIIX4, since QEMU 2.0 didn't support Q35 migration.
  2488. */
  2489. if (pcmc->legacy_acpi_table_size) {
  2490. /* Subtracting aml_len gives the size of fixed tables. Then add the
  2491. * size of the PIIX4 DSDT/SSDT in QEMU 2.0.
  2492. */
  2493. int legacy_aml_len =
  2494. pcmc->legacy_acpi_table_size +
  2495. ACPI_BUILD_LEGACY_CPU_AML_SIZE * x86ms->apic_id_limit;
  2496. int legacy_table_size =
  2497. ROUND_UP(tables_blob->len - aml_len + legacy_aml_len,
  2498. ACPI_BUILD_ALIGN_SIZE);
  2499. if (tables_blob->len > legacy_table_size) {
  2500. /* Should happen only with PCI bridges and -M pc-i440fx-2.0. */
  2501. warn_report("ACPI table size %u exceeds %d bytes,"
  2502. " migration may not work",
  2503. tables_blob->len, legacy_table_size);
  2504. error_printf("Try removing CPUs, NUMA nodes, memory slots"
  2505. " or PCI bridges.");
  2506. }
  2507. g_array_set_size(tables_blob, legacy_table_size);
  2508. } else {
  2509. /* Make sure we have a buffer in case we need to resize the tables. */
  2510. if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) {
  2511. /* As of QEMU 2.1, this fires with 160 VCPUs and 255 memory slots. */
  2512. warn_report("ACPI table size %u exceeds %d bytes,"
  2513. " migration may not work",
  2514. tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2);
  2515. error_printf("Try removing CPUs, NUMA nodes, memory slots"
  2516. " or PCI bridges.");
  2517. }
  2518. acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE);
  2519. }
  2520. acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE);
  2521. /* Cleanup memory that's no longer used. */
  2522. g_array_free(table_offsets, true);
  2523. }
  2524. static void acpi_ram_update(MemoryRegion *mr, GArray *data)
  2525. {
  2526. uint32_t size = acpi_data_len(data);
  2527. /* Make sure RAM size is correct - in case it got changed e.g. by migration */
  2528. memory_region_ram_resize(mr, size, &error_abort);
  2529. memcpy(memory_region_get_ram_ptr(mr), data->data, size);
  2530. memory_region_set_dirty(mr, 0, size);
  2531. }
  2532. static void acpi_build_update(void *build_opaque)
  2533. {
  2534. AcpiBuildState *build_state = build_opaque;
  2535. AcpiBuildTables tables;
  2536. /* No state to update or already patched? Nothing to do. */
  2537. if (!build_state || build_state->patched) {
  2538. return;
  2539. }
  2540. build_state->patched = 1;
  2541. acpi_build_tables_init(&tables);
  2542. acpi_build(&tables, MACHINE(qdev_get_machine()));
  2543. acpi_ram_update(build_state->table_mr, tables.table_data);
  2544. if (build_state->rsdp) {
  2545. memcpy(build_state->rsdp, tables.rsdp->data, acpi_data_len(tables.rsdp));
  2546. } else {
  2547. acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
  2548. }
  2549. acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
  2550. acpi_build_tables_cleanup(&tables, true);
  2551. }
  2552. static void acpi_build_reset(void *build_opaque)
  2553. {
  2554. AcpiBuildState *build_state = build_opaque;
  2555. build_state->patched = 0;
  2556. }
  2557. static const VMStateDescription vmstate_acpi_build = {
  2558. .name = "acpi_build",
  2559. .version_id = 1,
  2560. .minimum_version_id = 1,
  2561. .fields = (VMStateField[]) {
  2562. VMSTATE_UINT8(patched, AcpiBuildState),
  2563. VMSTATE_END_OF_LIST()
  2564. },
  2565. };
  2566. void acpi_setup(void)
  2567. {
  2568. PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
  2569. PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
  2570. X86MachineState *x86ms = X86_MACHINE(pcms);
  2571. AcpiBuildTables tables;
  2572. AcpiBuildState *build_state;
  2573. Object *vmgenid_dev;
  2574. TPMIf *tpm;
  2575. static FwCfgTPMConfig tpm_config;
  2576. if (!x86ms->fw_cfg) {
  2577. ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n");
  2578. return;
  2579. }
  2580. if (!pcms->acpi_build_enabled) {
  2581. ACPI_BUILD_DPRINTF("ACPI build disabled. Bailing out.\n");
  2582. return;
  2583. }
  2584. if (!acpi_enabled) {
  2585. ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n");
  2586. return;
  2587. }
  2588. build_state = g_malloc0(sizeof *build_state);
  2589. acpi_build_tables_init(&tables);
  2590. acpi_build(&tables, MACHINE(pcms));
  2591. /* Now expose it all to Guest */
  2592. build_state->table_mr = acpi_add_rom_blob(acpi_build_update,
  2593. build_state, tables.table_data,
  2594. ACPI_BUILD_TABLE_FILE,
  2595. ACPI_BUILD_TABLE_MAX_SIZE);
  2596. assert(build_state->table_mr != NULL);
  2597. build_state->linker_mr =
  2598. acpi_add_rom_blob(acpi_build_update, build_state,
  2599. tables.linker->cmd_blob, "etc/table-loader", 0);
  2600. fw_cfg_add_file(x86ms->fw_cfg, ACPI_BUILD_TPMLOG_FILE,
  2601. tables.tcpalog->data, acpi_data_len(tables.tcpalog));
  2602. tpm = tpm_find();
  2603. if (tpm && object_property_get_bool(OBJECT(tpm), "ppi", &error_abort)) {
  2604. tpm_config = (FwCfgTPMConfig) {
  2605. .tpmppi_address = cpu_to_le32(TPM_PPI_ADDR_BASE),
  2606. .tpm_version = tpm_get_version(tpm),
  2607. .tpmppi_version = TPM_PPI_VERSION_1_30
  2608. };
  2609. fw_cfg_add_file(x86ms->fw_cfg, "etc/tpm/config",
  2610. &tpm_config, sizeof tpm_config);
  2611. }
  2612. vmgenid_dev = find_vmgenid_dev();
  2613. if (vmgenid_dev) {
  2614. vmgenid_add_fw_cfg(VMGENID(vmgenid_dev), x86ms->fw_cfg,
  2615. tables.vmgenid);
  2616. }
  2617. if (!pcmc->rsdp_in_ram) {
  2618. /*
  2619. * Keep for compatibility with old machine types.
  2620. * Though RSDP is small, its contents isn't immutable, so
  2621. * we'll update it along with the rest of tables on guest access.
  2622. */
  2623. uint32_t rsdp_size = acpi_data_len(tables.rsdp);
  2624. build_state->rsdp = g_memdup(tables.rsdp->data, rsdp_size);
  2625. fw_cfg_add_file_callback(x86ms->fw_cfg, ACPI_BUILD_RSDP_FILE,
  2626. acpi_build_update, NULL, build_state,
  2627. build_state->rsdp, rsdp_size, true);
  2628. build_state->rsdp_mr = NULL;
  2629. } else {
  2630. build_state->rsdp = NULL;
  2631. build_state->rsdp_mr = acpi_add_rom_blob(acpi_build_update,
  2632. build_state, tables.rsdp,
  2633. ACPI_BUILD_RSDP_FILE, 0);
  2634. }
  2635. qemu_register_reset(acpi_build_reset, build_state);
  2636. acpi_build_reset(build_state);
  2637. vmstate_register(NULL, 0, &vmstate_acpi_build, build_state);
  2638. /* Cleanup tables but don't free the memory: we track it
  2639. * in build_state.
  2640. */
  2641. acpi_build_tables_cleanup(&tables, false);
  2642. }