kvm-cpu.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064
  1. /*
  2. * RISC-V implementation of KVM hooks
  3. *
  4. * Copyright (c) 2020 Huawei Technologies Co., Ltd
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2 or later, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "qemu/osdep.h"
  19. #include <sys/ioctl.h>
  20. #include <sys/prctl.h>
  21. #include <linux/kvm.h>
  22. #include "qemu/timer.h"
  23. #include "qapi/error.h"
  24. #include "qemu/error-report.h"
  25. #include "qemu/main-loop.h"
  26. #include "qapi/visitor.h"
  27. #include "sysemu/sysemu.h"
  28. #include "sysemu/kvm.h"
  29. #include "sysemu/kvm_int.h"
  30. #include "cpu.h"
  31. #include "trace.h"
  32. #include "hw/core/accel-cpu.h"
  33. #include "hw/pci/pci.h"
  34. #include "exec/memattrs.h"
  35. #include "exec/address-spaces.h"
  36. #include "hw/boards.h"
  37. #include "hw/irq.h"
  38. #include "hw/intc/riscv_imsic.h"
  39. #include "qemu/log.h"
  40. #include "hw/loader.h"
  41. #include "kvm_riscv.h"
  42. #include "sbi_ecall_interface.h"
  43. #include "chardev/char-fe.h"
  44. #include "migration/misc.h"
  45. #include "sysemu/runstate.h"
  46. #include "hw/riscv/numa.h"
  47. #define PR_RISCV_V_SET_CONTROL 69
  48. #define PR_RISCV_V_VSTATE_CTRL_ON 2
  49. void riscv_kvm_aplic_request(void *opaque, int irq, int level)
  50. {
  51. kvm_set_irq(kvm_state, irq, !!level);
  52. }
  53. static bool cap_has_mp_state;
  54. static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type,
  55. uint64_t idx)
  56. {
  57. uint64_t id = KVM_REG_RISCV | type | idx;
  58. switch (riscv_cpu_mxl(env)) {
  59. case MXL_RV32:
  60. id |= KVM_REG_SIZE_U32;
  61. break;
  62. case MXL_RV64:
  63. id |= KVM_REG_SIZE_U64;
  64. break;
  65. default:
  66. g_assert_not_reached();
  67. }
  68. return id;
  69. }
  70. static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx)
  71. {
  72. return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx;
  73. }
  74. static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx)
  75. {
  76. return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx;
  77. }
  78. static uint64_t kvm_encode_reg_size_id(uint64_t id, size_t size_b)
  79. {
  80. uint64_t size_ctz = __builtin_ctz(size_b);
  81. return id | (size_ctz << KVM_REG_SIZE_SHIFT);
  82. }
  83. static uint64_t kvm_riscv_vector_reg_id(RISCVCPU *cpu,
  84. uint64_t idx)
  85. {
  86. uint64_t id;
  87. size_t size_b;
  88. g_assert(idx < 32);
  89. id = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | KVM_REG_RISCV_VECTOR_REG(idx);
  90. size_b = cpu->cfg.vlenb;
  91. return kvm_encode_reg_size_id(id, size_b);
  92. }
  93. #define RISCV_CORE_REG(env, name) \
  94. kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \
  95. KVM_REG_RISCV_CORE_REG(name))
  96. #define RISCV_CSR_REG(env, name) \
  97. kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \
  98. KVM_REG_RISCV_CSR_REG(name))
  99. #define RISCV_CONFIG_REG(env, name) \
  100. kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \
  101. KVM_REG_RISCV_CONFIG_REG(name))
  102. #define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \
  103. KVM_REG_RISCV_TIMER_REG(name))
  104. #define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx)
  105. #define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx)
  106. #define RISCV_VECTOR_CSR_REG(env, name) \
  107. kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \
  108. KVM_REG_RISCV_VECTOR_CSR_REG(name))
  109. #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
  110. do { \
  111. int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
  112. if (_ret) { \
  113. return _ret; \
  114. } \
  115. } while (0)
  116. #define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
  117. do { \
  118. int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
  119. if (_ret) { \
  120. return _ret; \
  121. } \
  122. } while (0)
  123. #define KVM_RISCV_GET_TIMER(cs, name, reg) \
  124. do { \
  125. int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), &reg); \
  126. if (ret) { \
  127. abort(); \
  128. } \
  129. } while (0)
  130. #define KVM_RISCV_SET_TIMER(cs, name, reg) \
  131. do { \
  132. int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), &reg); \
  133. if (ret) { \
  134. abort(); \
  135. } \
  136. } while (0)
  137. typedef struct KVMCPUConfig {
  138. const char *name;
  139. const char *description;
  140. target_ulong offset;
  141. uint64_t kvm_reg_id;
  142. bool user_set;
  143. bool supported;
  144. } KVMCPUConfig;
  145. #define KVM_MISA_CFG(_bit, _reg_id) \
  146. {.offset = _bit, .kvm_reg_id = _reg_id}
  147. /* KVM ISA extensions */
  148. static KVMCPUConfig kvm_misa_ext_cfgs[] = {
  149. KVM_MISA_CFG(RVA, KVM_RISCV_ISA_EXT_A),
  150. KVM_MISA_CFG(RVC, KVM_RISCV_ISA_EXT_C),
  151. KVM_MISA_CFG(RVD, KVM_RISCV_ISA_EXT_D),
  152. KVM_MISA_CFG(RVF, KVM_RISCV_ISA_EXT_F),
  153. KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
  154. KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
  155. KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
  156. KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V),
  157. };
  158. static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v,
  159. const char *name,
  160. void *opaque, Error **errp)
  161. {
  162. KVMCPUConfig *misa_ext_cfg = opaque;
  163. target_ulong misa_bit = misa_ext_cfg->offset;
  164. RISCVCPU *cpu = RISCV_CPU(obj);
  165. CPURISCVState *env = &cpu->env;
  166. bool value = env->misa_ext_mask & misa_bit;
  167. visit_type_bool(v, name, &value, errp);
  168. }
  169. static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
  170. const char *name,
  171. void *opaque, Error **errp)
  172. {
  173. KVMCPUConfig *misa_ext_cfg = opaque;
  174. target_ulong misa_bit = misa_ext_cfg->offset;
  175. RISCVCPU *cpu = RISCV_CPU(obj);
  176. CPURISCVState *env = &cpu->env;
  177. bool value, host_bit;
  178. if (!visit_type_bool(v, name, &value, errp)) {
  179. return;
  180. }
  181. host_bit = env->misa_ext_mask & misa_bit;
  182. if (value == host_bit) {
  183. return;
  184. }
  185. if (!value) {
  186. misa_ext_cfg->user_set = true;
  187. return;
  188. }
  189. /*
  190. * Forbid users to enable extensions that aren't
  191. * available in the hart.
  192. */
  193. error_setg(errp, "Enabling MISA bit '%s' is not allowed: it's not "
  194. "enabled in the host", misa_ext_cfg->name);
  195. }
  196. static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
  197. {
  198. CPURISCVState *env = &cpu->env;
  199. uint64_t id, reg;
  200. int i, ret;
  201. for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
  202. KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
  203. target_ulong misa_bit = misa_cfg->offset;
  204. if (!misa_cfg->user_set) {
  205. continue;
  206. }
  207. /* If we're here we're going to disable the MISA bit */
  208. reg = 0;
  209. id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
  210. misa_cfg->kvm_reg_id);
  211. ret = kvm_set_one_reg(cs, id, &reg);
  212. if (ret != 0) {
  213. /*
  214. * We're not checking for -EINVAL because if the bit is about
  215. * to be disabled, it means that it was already enabled by
  216. * KVM. We determined that by fetching the 'isa' register
  217. * during init() time. Any error at this point is worth
  218. * aborting.
  219. */
  220. error_report("Unable to set KVM reg %s, error %d",
  221. misa_cfg->name, ret);
  222. exit(EXIT_FAILURE);
  223. }
  224. env->misa_ext &= ~misa_bit;
  225. }
  226. }
  227. #define KVM_EXT_CFG(_name, _prop, _reg_id) \
  228. {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
  229. .kvm_reg_id = _reg_id}
  230. static KVMCPUConfig kvm_multi_ext_cfgs[] = {
  231. KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
  232. KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
  233. KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
  234. KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
  235. KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
  236. KVM_EXT_CFG("zifencei", ext_zifencei, KVM_RISCV_ISA_EXT_ZIFENCEI),
  237. KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL),
  238. KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
  239. KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
  240. KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
  241. KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
  242. KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH),
  243. KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN),
  244. KVM_EXT_CFG("zba", ext_zba, KVM_RISCV_ISA_EXT_ZBA),
  245. KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
  246. KVM_EXT_CFG("zbc", ext_zbc, KVM_RISCV_ISA_EXT_ZBC),
  247. KVM_EXT_CFG("zbkb", ext_zbkb, KVM_RISCV_ISA_EXT_ZBKB),
  248. KVM_EXT_CFG("zbkc", ext_zbkc, KVM_RISCV_ISA_EXT_ZBKC),
  249. KVM_EXT_CFG("zbkx", ext_zbkx, KVM_RISCV_ISA_EXT_ZBKX),
  250. KVM_EXT_CFG("zbs", ext_zbs, KVM_RISCV_ISA_EXT_ZBS),
  251. KVM_EXT_CFG("zknd", ext_zknd, KVM_RISCV_ISA_EXT_ZKND),
  252. KVM_EXT_CFG("zkne", ext_zkne, KVM_RISCV_ISA_EXT_ZKNE),
  253. KVM_EXT_CFG("zknh", ext_zknh, KVM_RISCV_ISA_EXT_ZKNH),
  254. KVM_EXT_CFG("zkr", ext_zkr, KVM_RISCV_ISA_EXT_ZKR),
  255. KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED),
  256. KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH),
  257. KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT),
  258. KVM_EXT_CFG("ztso", ext_ztso, KVM_RISCV_ISA_EXT_ZTSO),
  259. KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB),
  260. KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC),
  261. KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH),
  262. KVM_EXT_CFG("zvfhmin", ext_zvfhmin, KVM_RISCV_ISA_EXT_ZVFHMIN),
  263. KVM_EXT_CFG("zvkb", ext_zvkb, KVM_RISCV_ISA_EXT_ZVKB),
  264. KVM_EXT_CFG("zvkg", ext_zvkg, KVM_RISCV_ISA_EXT_ZVKG),
  265. KVM_EXT_CFG("zvkned", ext_zvkned, KVM_RISCV_ISA_EXT_ZVKNED),
  266. KVM_EXT_CFG("zvknha", ext_zvknha, KVM_RISCV_ISA_EXT_ZVKNHA),
  267. KVM_EXT_CFG("zvknhb", ext_zvknhb, KVM_RISCV_ISA_EXT_ZVKNHB),
  268. KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
  269. KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
  270. KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
  271. KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
  272. KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
  273. KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
  274. KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
  275. KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
  276. KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
  277. };
  278. static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
  279. {
  280. return (void *)&cpu->cfg + kvmcfg->offset;
  281. }
  282. static void kvm_cpu_cfg_set(RISCVCPU *cpu, KVMCPUConfig *multi_ext,
  283. uint32_t val)
  284. {
  285. bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
  286. *ext_enabled = val;
  287. }
  288. static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu,
  289. KVMCPUConfig *multi_ext)
  290. {
  291. bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
  292. return *ext_enabled;
  293. }
  294. static void kvm_cpu_get_multi_ext_cfg(Object *obj, Visitor *v,
  295. const char *name,
  296. void *opaque, Error **errp)
  297. {
  298. KVMCPUConfig *multi_ext_cfg = opaque;
  299. RISCVCPU *cpu = RISCV_CPU(obj);
  300. bool value = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
  301. visit_type_bool(v, name, &value, errp);
  302. }
  303. static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
  304. const char *name,
  305. void *opaque, Error **errp)
  306. {
  307. KVMCPUConfig *multi_ext_cfg = opaque;
  308. RISCVCPU *cpu = RISCV_CPU(obj);
  309. bool value, host_val;
  310. if (!visit_type_bool(v, name, &value, errp)) {
  311. return;
  312. }
  313. host_val = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
  314. /*
  315. * Ignore if the user is setting the same value
  316. * as the host.
  317. */
  318. if (value == host_val) {
  319. return;
  320. }
  321. if (!multi_ext_cfg->supported) {
  322. /*
  323. * Error out if the user is trying to enable an
  324. * extension that KVM doesn't support. Ignore
  325. * option otherwise.
  326. */
  327. if (value) {
  328. error_setg(errp, "KVM does not support disabling extension %s",
  329. multi_ext_cfg->name);
  330. }
  331. return;
  332. }
  333. multi_ext_cfg->user_set = true;
  334. kvm_cpu_cfg_set(cpu, multi_ext_cfg, value);
  335. }
  336. static KVMCPUConfig kvm_cbom_blocksize = {
  337. .name = "cbom_blocksize",
  338. .offset = CPU_CFG_OFFSET(cbom_blocksize),
  339. .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)
  340. };
  341. static KVMCPUConfig kvm_cboz_blocksize = {
  342. .name = "cboz_blocksize",
  343. .offset = CPU_CFG_OFFSET(cboz_blocksize),
  344. .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)
  345. };
  346. static KVMCPUConfig kvm_v_vlenb = {
  347. .name = "vlenb",
  348. .offset = CPU_CFG_OFFSET(vlenb),
  349. .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_VECTOR |
  350. KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)
  351. };
  352. static KVMCPUConfig kvm_sbi_dbcn = {
  353. .name = "sbi_dbcn",
  354. .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
  355. KVM_REG_RISCV_SBI_EXT | KVM_RISCV_SBI_EXT_DBCN
  356. };
  357. static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
  358. {
  359. CPURISCVState *env = &cpu->env;
  360. uint64_t id, reg;
  361. int i, ret;
  362. for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
  363. KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
  364. if (!multi_ext_cfg->user_set) {
  365. continue;
  366. }
  367. id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
  368. multi_ext_cfg->kvm_reg_id);
  369. reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
  370. ret = kvm_set_one_reg(cs, id, &reg);
  371. if (ret != 0) {
  372. if (!reg && ret == -EINVAL) {
  373. warn_report("KVM cannot disable extension %s",
  374. multi_ext_cfg->name);
  375. } else {
  376. error_report("Unable to enable extension %s in KVM, error %d",
  377. multi_ext_cfg->name, ret);
  378. exit(EXIT_FAILURE);
  379. }
  380. }
  381. }
  382. }
  383. static void cpu_get_cfg_unavailable(Object *obj, Visitor *v,
  384. const char *name,
  385. void *opaque, Error **errp)
  386. {
  387. bool value = false;
  388. visit_type_bool(v, name, &value, errp);
  389. }
  390. static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
  391. const char *name,
  392. void *opaque, Error **errp)
  393. {
  394. const char *propname = opaque;
  395. bool value;
  396. if (!visit_type_bool(v, name, &value, errp)) {
  397. return;
  398. }
  399. if (value) {
  400. error_setg(errp, "'%s' is not available with KVM",
  401. propname);
  402. }
  403. }
  404. static void riscv_cpu_add_kvm_unavail_prop(Object *obj, const char *prop_name)
  405. {
  406. /* Check if KVM created the property already */
  407. if (object_property_find(obj, prop_name)) {
  408. return;
  409. }
  410. /*
  411. * Set the default to disabled for every extension
  412. * unknown to KVM and error out if the user attempts
  413. * to enable any of them.
  414. */
  415. object_property_add(obj, prop_name, "bool",
  416. cpu_get_cfg_unavailable,
  417. cpu_set_cfg_unavailable,
  418. NULL, (void *)prop_name);
  419. }
  420. static void riscv_cpu_add_kvm_unavail_prop_array(Object *obj,
  421. const RISCVCPUMultiExtConfig *array)
  422. {
  423. const RISCVCPUMultiExtConfig *prop;
  424. g_assert(array);
  425. for (prop = array; prop && prop->name; prop++) {
  426. riscv_cpu_add_kvm_unavail_prop(obj, prop->name);
  427. }
  428. }
  429. static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
  430. {
  431. int i;
  432. riscv_add_satp_mode_properties(cpu_obj);
  433. for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
  434. KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
  435. int bit = misa_cfg->offset;
  436. misa_cfg->name = riscv_get_misa_ext_name(bit);
  437. misa_cfg->description = riscv_get_misa_ext_description(bit);
  438. object_property_add(cpu_obj, misa_cfg->name, "bool",
  439. kvm_cpu_get_misa_ext_cfg,
  440. kvm_cpu_set_misa_ext_cfg,
  441. NULL, misa_cfg);
  442. object_property_set_description(cpu_obj, misa_cfg->name,
  443. misa_cfg->description);
  444. }
  445. for (i = 0; misa_bits[i] != 0; i++) {
  446. const char *ext_name = riscv_get_misa_ext_name(misa_bits[i]);
  447. riscv_cpu_add_kvm_unavail_prop(cpu_obj, ext_name);
  448. }
  449. for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
  450. KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
  451. object_property_add(cpu_obj, multi_cfg->name, "bool",
  452. kvm_cpu_get_multi_ext_cfg,
  453. kvm_cpu_set_multi_ext_cfg,
  454. NULL, multi_cfg);
  455. }
  456. riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions);
  457. riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts);
  458. riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts);
  459. /* We don't have the needed KVM support for profiles */
  460. for (i = 0; riscv_profiles[i] != NULL; i++) {
  461. riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name);
  462. }
  463. }
  464. static int kvm_riscv_get_regs_core(CPUState *cs)
  465. {
  466. int ret = 0;
  467. int i;
  468. target_ulong reg;
  469. CPURISCVState *env = &RISCV_CPU(cs)->env;
  470. ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
  471. if (ret) {
  472. return ret;
  473. }
  474. env->pc = reg;
  475. for (i = 1; i < 32; i++) {
  476. uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
  477. ret = kvm_get_one_reg(cs, id, &reg);
  478. if (ret) {
  479. return ret;
  480. }
  481. env->gpr[i] = reg;
  482. }
  483. return ret;
  484. }
  485. static int kvm_riscv_put_regs_core(CPUState *cs)
  486. {
  487. int ret = 0;
  488. int i;
  489. target_ulong reg;
  490. CPURISCVState *env = &RISCV_CPU(cs)->env;
  491. reg = env->pc;
  492. ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
  493. if (ret) {
  494. return ret;
  495. }
  496. for (i = 1; i < 32; i++) {
  497. uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i);
  498. reg = env->gpr[i];
  499. ret = kvm_set_one_reg(cs, id, &reg);
  500. if (ret) {
  501. return ret;
  502. }
  503. }
  504. return ret;
  505. }
  506. static int kvm_riscv_get_regs_csr(CPUState *cs)
  507. {
  508. CPURISCVState *env = &RISCV_CPU(cs)->env;
  509. KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
  510. KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
  511. KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
  512. KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
  513. KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
  514. KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
  515. KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
  516. KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
  517. KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
  518. return 0;
  519. }
  520. static int kvm_riscv_put_regs_csr(CPUState *cs)
  521. {
  522. CPURISCVState *env = &RISCV_CPU(cs)->env;
  523. KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
  524. KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
  525. KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
  526. KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
  527. KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
  528. KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
  529. KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
  530. KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
  531. KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
  532. return 0;
  533. }
  534. static int kvm_riscv_get_regs_fp(CPUState *cs)
  535. {
  536. int ret = 0;
  537. int i;
  538. CPURISCVState *env = &RISCV_CPU(cs)->env;
  539. if (riscv_has_ext(env, RVD)) {
  540. uint64_t reg;
  541. for (i = 0; i < 32; i++) {
  542. ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), &reg);
  543. if (ret) {
  544. return ret;
  545. }
  546. env->fpr[i] = reg;
  547. }
  548. return ret;
  549. }
  550. if (riscv_has_ext(env, RVF)) {
  551. uint32_t reg;
  552. for (i = 0; i < 32; i++) {
  553. ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), &reg);
  554. if (ret) {
  555. return ret;
  556. }
  557. env->fpr[i] = reg;
  558. }
  559. return ret;
  560. }
  561. return ret;
  562. }
  563. static int kvm_riscv_put_regs_fp(CPUState *cs)
  564. {
  565. int ret = 0;
  566. int i;
  567. CPURISCVState *env = &RISCV_CPU(cs)->env;
  568. if (riscv_has_ext(env, RVD)) {
  569. uint64_t reg;
  570. for (i = 0; i < 32; i++) {
  571. reg = env->fpr[i];
  572. ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), &reg);
  573. if (ret) {
  574. return ret;
  575. }
  576. }
  577. return ret;
  578. }
  579. if (riscv_has_ext(env, RVF)) {
  580. uint32_t reg;
  581. for (i = 0; i < 32; i++) {
  582. reg = env->fpr[i];
  583. ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), &reg);
  584. if (ret) {
  585. return ret;
  586. }
  587. }
  588. return ret;
  589. }
  590. return ret;
  591. }
  592. static void kvm_riscv_get_regs_timer(CPUState *cs)
  593. {
  594. CPURISCVState *env = &RISCV_CPU(cs)->env;
  595. if (env->kvm_timer_dirty) {
  596. return;
  597. }
  598. KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time);
  599. KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare);
  600. KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state);
  601. KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency);
  602. env->kvm_timer_dirty = true;
  603. }
  604. static void kvm_riscv_put_regs_timer(CPUState *cs)
  605. {
  606. uint64_t reg;
  607. CPURISCVState *env = &RISCV_CPU(cs)->env;
  608. if (!env->kvm_timer_dirty) {
  609. return;
  610. }
  611. KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time);
  612. KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare);
  613. /*
  614. * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
  615. * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
  616. * doesn't matter that adaping in QEMU now.
  617. * TODO If KVM changes, adapt here.
  618. */
  619. if (env->kvm_timer_state) {
  620. KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state);
  621. }
  622. /*
  623. * For now, migration will not work between Hosts with different timer
  624. * frequency. Therefore, we should check whether they are the same here
  625. * during the migration.
  626. */
  627. if (migration_is_running()) {
  628. KVM_RISCV_GET_TIMER(cs, frequency, reg);
  629. if (reg != env->kvm_timer_frequency) {
  630. error_report("Dst Hosts timer frequency != Src Hosts");
  631. }
  632. }
  633. env->kvm_timer_dirty = false;
  634. }
  635. uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs)
  636. {
  637. uint64_t reg;
  638. KVM_RISCV_GET_TIMER(cs, frequency, reg);
  639. return reg;
  640. }
  641. static int kvm_riscv_get_regs_vector(CPUState *cs)
  642. {
  643. RISCVCPU *cpu = RISCV_CPU(cs);
  644. CPURISCVState *env = &cpu->env;
  645. target_ulong reg;
  646. uint64_t vreg_id;
  647. int vreg_idx, ret = 0;
  648. if (!riscv_has_ext(env, RVV)) {
  649. return 0;
  650. }
  651. ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
  652. if (ret) {
  653. return ret;
  654. }
  655. env->vstart = reg;
  656. ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
  657. if (ret) {
  658. return ret;
  659. }
  660. env->vl = reg;
  661. ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
  662. if (ret) {
  663. return ret;
  664. }
  665. env->vtype = reg;
  666. if (kvm_v_vlenb.supported) {
  667. ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
  668. if (ret) {
  669. return ret;
  670. }
  671. cpu->cfg.vlenb = reg;
  672. for (int i = 0; i < 32; i++) {
  673. /*
  674. * vreg[] is statically allocated using RV_VLEN_MAX.
  675. * Use it instead of vlenb to calculate vreg_idx for
  676. * simplicity.
  677. */
  678. vreg_idx = i * RV_VLEN_MAX / 64;
  679. vreg_id = kvm_riscv_vector_reg_id(cpu, i);
  680. ret = kvm_get_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
  681. if (ret) {
  682. return ret;
  683. }
  684. }
  685. }
  686. return 0;
  687. }
  688. static int kvm_riscv_put_regs_vector(CPUState *cs)
  689. {
  690. RISCVCPU *cpu = RISCV_CPU(cs);
  691. CPURISCVState *env = &cpu->env;
  692. target_ulong reg;
  693. uint64_t vreg_id;
  694. int vreg_idx, ret = 0;
  695. if (!riscv_has_ext(env, RVV)) {
  696. return 0;
  697. }
  698. reg = env->vstart;
  699. ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), &reg);
  700. if (ret) {
  701. return ret;
  702. }
  703. reg = env->vl;
  704. ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), &reg);
  705. if (ret) {
  706. return ret;
  707. }
  708. reg = env->vtype;
  709. ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), &reg);
  710. if (ret) {
  711. return ret;
  712. }
  713. if (kvm_v_vlenb.supported) {
  714. reg = cpu->cfg.vlenb;
  715. ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vlenb), &reg);
  716. for (int i = 0; i < 32; i++) {
  717. /*
  718. * vreg[] is statically allocated using RV_VLEN_MAX.
  719. * Use it instead of vlenb to calculate vreg_idx for
  720. * simplicity.
  721. */
  722. vreg_idx = i * RV_VLEN_MAX / 64;
  723. vreg_id = kvm_riscv_vector_reg_id(cpu, i);
  724. ret = kvm_set_one_reg(cs, vreg_id, &env->vreg[vreg_idx]);
  725. if (ret) {
  726. return ret;
  727. }
  728. }
  729. }
  730. return ret;
  731. }
  732. typedef struct KVMScratchCPU {
  733. int kvmfd;
  734. int vmfd;
  735. int cpufd;
  736. } KVMScratchCPU;
  737. /*
  738. * Heavily inspired by kvm_arm_create_scratch_host_vcpu()
  739. * from target/arm/kvm.c.
  740. */
  741. static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU *scratch)
  742. {
  743. int kvmfd = -1, vmfd = -1, cpufd = -1;
  744. kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
  745. if (kvmfd < 0) {
  746. goto err;
  747. }
  748. do {
  749. vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
  750. } while (vmfd == -1 && errno == EINTR);
  751. if (vmfd < 0) {
  752. goto err;
  753. }
  754. cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
  755. if (cpufd < 0) {
  756. goto err;
  757. }
  758. scratch->kvmfd = kvmfd;
  759. scratch->vmfd = vmfd;
  760. scratch->cpufd = cpufd;
  761. return true;
  762. err:
  763. if (cpufd >= 0) {
  764. close(cpufd);
  765. }
  766. if (vmfd >= 0) {
  767. close(vmfd);
  768. }
  769. if (kvmfd >= 0) {
  770. close(kvmfd);
  771. }
  772. return false;
  773. }
  774. static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
  775. {
  776. close(scratch->cpufd);
  777. close(scratch->vmfd);
  778. close(scratch->kvmfd);
  779. }
  780. static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
  781. {
  782. CPURISCVState *env = &cpu->env;
  783. struct kvm_one_reg reg;
  784. int ret;
  785. reg.id = RISCV_CONFIG_REG(env, mvendorid);
  786. reg.addr = (uint64_t)&cpu->cfg.mvendorid;
  787. ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
  788. if (ret != 0) {
  789. error_report("Unable to retrieve mvendorid from host, error %d", ret);
  790. }
  791. reg.id = RISCV_CONFIG_REG(env, marchid);
  792. reg.addr = (uint64_t)&cpu->cfg.marchid;
  793. ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
  794. if (ret != 0) {
  795. error_report("Unable to retrieve marchid from host, error %d", ret);
  796. }
  797. reg.id = RISCV_CONFIG_REG(env, mimpid);
  798. reg.addr = (uint64_t)&cpu->cfg.mimpid;
  799. ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
  800. if (ret != 0) {
  801. error_report("Unable to retrieve mimpid from host, error %d", ret);
  802. }
  803. }
  804. static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
  805. KVMScratchCPU *kvmcpu)
  806. {
  807. CPURISCVState *env = &cpu->env;
  808. struct kvm_one_reg reg;
  809. int ret;
  810. reg.id = RISCV_CONFIG_REG(env, isa);
  811. reg.addr = (uint64_t)&env->misa_ext_mask;
  812. ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
  813. if (ret) {
  814. error_report("Unable to fetch ISA register from KVM, "
  815. "error %d", ret);
  816. kvm_riscv_destroy_scratch_vcpu(kvmcpu);
  817. exit(EXIT_FAILURE);
  818. }
  819. env->misa_ext = env->misa_ext_mask;
  820. }
  821. static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
  822. KVMCPUConfig *cbomz_cfg)
  823. {
  824. CPURISCVState *env = &cpu->env;
  825. struct kvm_one_reg reg;
  826. int ret;
  827. reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
  828. cbomz_cfg->kvm_reg_id);
  829. reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
  830. ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
  831. if (ret != 0) {
  832. error_report("Unable to read KVM reg %s, error %d",
  833. cbomz_cfg->name, ret);
  834. exit(EXIT_FAILURE);
  835. }
  836. }
  837. static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu,
  838. KVMScratchCPU *kvmcpu)
  839. {
  840. CPURISCVState *env = &cpu->env;
  841. uint64_t val;
  842. int i, ret;
  843. for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
  844. KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
  845. struct kvm_one_reg reg;
  846. reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT,
  847. multi_ext_cfg->kvm_reg_id);
  848. reg.addr = (uint64_t)&val;
  849. ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
  850. if (ret != 0) {
  851. if (errno == EINVAL) {
  852. /* Silently default to 'false' if KVM does not support it. */
  853. multi_ext_cfg->supported = false;
  854. val = false;
  855. } else {
  856. error_report("Unable to read ISA_EXT KVM register %s: %s",
  857. multi_ext_cfg->name, strerror(errno));
  858. exit(EXIT_FAILURE);
  859. }
  860. } else {
  861. multi_ext_cfg->supported = true;
  862. }
  863. kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
  864. }
  865. if (cpu->cfg.ext_zicbom) {
  866. kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
  867. }
  868. if (cpu->cfg.ext_zicboz) {
  869. kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
  870. }
  871. }
  872. static int uint64_cmp(const void *a, const void *b)
  873. {
  874. uint64_t val1 = *(const uint64_t *)a;
  875. uint64_t val2 = *(const uint64_t *)b;
  876. if (val1 < val2) {
  877. return -1;
  878. }
  879. if (val1 > val2) {
  880. return 1;
  881. }
  882. return 0;
  883. }
  884. static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
  885. KVMScratchCPU *kvmcpu,
  886. struct kvm_reg_list *reglist)
  887. {
  888. struct kvm_reg_list *reg_search;
  889. reg_search = bsearch(&kvm_sbi_dbcn.kvm_reg_id, reglist->reg, reglist->n,
  890. sizeof(uint64_t), uint64_cmp);
  891. if (reg_search) {
  892. kvm_sbi_dbcn.supported = true;
  893. }
  894. }
  895. static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
  896. struct kvm_reg_list *reglist)
  897. {
  898. struct kvm_one_reg reg;
  899. struct kvm_reg_list *reg_search;
  900. uint64_t val;
  901. int ret;
  902. reg_search = bsearch(&kvm_v_vlenb.kvm_reg_id, reglist->reg, reglist->n,
  903. sizeof(uint64_t), uint64_cmp);
  904. if (reg_search) {
  905. reg.id = kvm_v_vlenb.kvm_reg_id;
  906. reg.addr = (uint64_t)&val;
  907. ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
  908. if (ret != 0) {
  909. error_report("Unable to read vlenb register, error code: %d",
  910. errno);
  911. exit(EXIT_FAILURE);
  912. }
  913. kvm_v_vlenb.supported = true;
  914. cpu->cfg.vlenb = val;
  915. }
  916. }
  917. static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
  918. {
  919. KVMCPUConfig *multi_ext_cfg;
  920. struct kvm_one_reg reg;
  921. struct kvm_reg_list rl_struct;
  922. struct kvm_reg_list *reglist;
  923. uint64_t val, reg_id, *reg_search;
  924. int i, ret;
  925. rl_struct.n = 0;
  926. ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, &rl_struct);
  927. /*
  928. * If KVM_GET_REG_LIST isn't supported we'll get errno 22
  929. * (EINVAL). Use read_legacy() in this case.
  930. */
  931. if (errno == EINVAL) {
  932. return kvm_riscv_read_multiext_legacy(cpu, kvmcpu);
  933. } else if (errno != E2BIG) {
  934. /*
  935. * E2BIG is an expected error message for the API since we
  936. * don't know the number of registers. The right amount will
  937. * be written in rl_struct.n.
  938. *
  939. * Error out if we get any other errno.
  940. */
  941. error_report("Error when accessing get-reg-list: %s",
  942. strerror(errno));
  943. exit(EXIT_FAILURE);
  944. }
  945. reglist = g_malloc(sizeof(struct kvm_reg_list) +
  946. rl_struct.n * sizeof(uint64_t));
  947. reglist->n = rl_struct.n;
  948. ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, reglist);
  949. if (ret) {
  950. error_report("Error when reading KVM_GET_REG_LIST: %s",
  951. strerror(errno));
  952. exit(EXIT_FAILURE);
  953. }
  954. /* sort reglist to use bsearch() */
  955. qsort(&reglist->reg, reglist->n, sizeof(uint64_t), uint64_cmp);
  956. for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
  957. multi_ext_cfg = &kvm_multi_ext_cfgs[i];
  958. reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT,
  959. multi_ext_cfg->kvm_reg_id);
  960. reg_search = bsearch(&reg_id, reglist->reg, reglist->n,
  961. sizeof(uint64_t), uint64_cmp);
  962. if (!reg_search) {
  963. continue;
  964. }
  965. reg.id = reg_id;
  966. reg.addr = (uint64_t)&val;
  967. ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
  968. if (ret != 0) {
  969. error_report("Unable to read ISA_EXT KVM register %s: %s",
  970. multi_ext_cfg->name, strerror(errno));
  971. exit(EXIT_FAILURE);
  972. }
  973. multi_ext_cfg->supported = true;
  974. kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
  975. }
  976. if (cpu->cfg.ext_zicbom) {
  977. kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
  978. }
  979. if (cpu->cfg.ext_zicboz) {
  980. kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
  981. }
  982. if (riscv_has_ext(&cpu->env, RVV)) {
  983. kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
  984. }
  985. kvm_riscv_check_sbi_dbcn_support(cpu, kvmcpu, reglist);
  986. }
  987. static void riscv_init_kvm_registers(Object *cpu_obj)
  988. {
  989. RISCVCPU *cpu = RISCV_CPU(cpu_obj);
  990. KVMScratchCPU kvmcpu;
  991. if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
  992. return;
  993. }
  994. kvm_riscv_init_machine_ids(cpu, &kvmcpu);
  995. kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
  996. kvm_riscv_init_multiext_cfg(cpu, &kvmcpu);
  997. kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
  998. }
  999. const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
  1000. KVM_CAP_LAST_INFO
  1001. };
  1002. int kvm_arch_get_registers(CPUState *cs, Error **errp)
  1003. {
  1004. int ret = 0;
  1005. ret = kvm_riscv_get_regs_core(cs);
  1006. if (ret) {
  1007. return ret;
  1008. }
  1009. ret = kvm_riscv_get_regs_csr(cs);
  1010. if (ret) {
  1011. return ret;
  1012. }
  1013. ret = kvm_riscv_get_regs_fp(cs);
  1014. if (ret) {
  1015. return ret;
  1016. }
  1017. ret = kvm_riscv_get_regs_vector(cs);
  1018. if (ret) {
  1019. return ret;
  1020. }
  1021. return ret;
  1022. }
  1023. int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state)
  1024. {
  1025. if (cap_has_mp_state) {
  1026. struct kvm_mp_state mp_state = {
  1027. .mp_state = state
  1028. };
  1029. int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
  1030. if (ret) {
  1031. fprintf(stderr, "%s: failed to sync MP_STATE %d/%s\n",
  1032. __func__, ret, strerror(-ret));
  1033. return -1;
  1034. }
  1035. }
  1036. return 0;
  1037. }
  1038. int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
  1039. {
  1040. int ret = 0;
  1041. ret = kvm_riscv_put_regs_core(cs);
  1042. if (ret) {
  1043. return ret;
  1044. }
  1045. ret = kvm_riscv_put_regs_csr(cs);
  1046. if (ret) {
  1047. return ret;
  1048. }
  1049. ret = kvm_riscv_put_regs_fp(cs);
  1050. if (ret) {
  1051. return ret;
  1052. }
  1053. ret = kvm_riscv_put_regs_vector(cs);
  1054. if (ret) {
  1055. return ret;
  1056. }
  1057. if (KVM_PUT_RESET_STATE == level) {
  1058. RISCVCPU *cpu = RISCV_CPU(cs);
  1059. if (cs->cpu_index == 0) {
  1060. ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_RUNNABLE);
  1061. } else {
  1062. ret = kvm_riscv_sync_mpstate_to_kvm(cpu, KVM_MP_STATE_STOPPED);
  1063. }
  1064. if (ret) {
  1065. return ret;
  1066. }
  1067. }
  1068. return ret;
  1069. }
  1070. int kvm_arch_release_virq_post(int virq)
  1071. {
  1072. return 0;
  1073. }
  1074. int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
  1075. uint64_t address, uint32_t data, PCIDevice *dev)
  1076. {
  1077. return 0;
  1078. }
  1079. int kvm_arch_destroy_vcpu(CPUState *cs)
  1080. {
  1081. return 0;
  1082. }
  1083. unsigned long kvm_arch_vcpu_id(CPUState *cpu)
  1084. {
  1085. return cpu->cpu_index;
  1086. }
  1087. static void kvm_riscv_vm_state_change(void *opaque, bool running,
  1088. RunState state)
  1089. {
  1090. CPUState *cs = opaque;
  1091. if (running) {
  1092. kvm_riscv_put_regs_timer(cs);
  1093. } else {
  1094. kvm_riscv_get_regs_timer(cs);
  1095. }
  1096. }
  1097. void kvm_arch_init_irq_routing(KVMState *s)
  1098. {
  1099. }
  1100. static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
  1101. {
  1102. CPURISCVState *env = &cpu->env;
  1103. target_ulong reg;
  1104. uint64_t id;
  1105. int ret;
  1106. id = RISCV_CONFIG_REG(env, mvendorid);
  1107. /*
  1108. * cfg.mvendorid is an uint32 but a target_ulong will
  1109. * be written. Assign it to a target_ulong var to avoid
  1110. * writing pieces of other cpu->cfg fields in the reg.
  1111. */
  1112. reg = cpu->cfg.mvendorid;
  1113. ret = kvm_set_one_reg(cs, id, &reg);
  1114. if (ret != 0) {
  1115. return ret;
  1116. }
  1117. id = RISCV_CONFIG_REG(env, marchid);
  1118. ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
  1119. if (ret != 0) {
  1120. return ret;
  1121. }
  1122. id = RISCV_CONFIG_REG(env, mimpid);
  1123. ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
  1124. return ret;
  1125. }
  1126. static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
  1127. {
  1128. target_ulong reg = 1;
  1129. if (!kvm_sbi_dbcn.supported) {
  1130. return 0;
  1131. }
  1132. return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, &reg);
  1133. }
  1134. int kvm_arch_init_vcpu(CPUState *cs)
  1135. {
  1136. int ret = 0;
  1137. RISCVCPU *cpu = RISCV_CPU(cs);
  1138. qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
  1139. if (!object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) {
  1140. ret = kvm_vcpu_set_machine_ids(cpu, cs);
  1141. if (ret != 0) {
  1142. return ret;
  1143. }
  1144. }
  1145. kvm_riscv_update_cpu_misa_ext(cpu, cs);
  1146. kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs);
  1147. ret = kvm_vcpu_enable_sbi_dbcn(cpu, cs);
  1148. return ret;
  1149. }
  1150. int kvm_arch_msi_data_to_gsi(uint32_t data)
  1151. {
  1152. abort();
  1153. }
  1154. int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
  1155. int vector, PCIDevice *dev)
  1156. {
  1157. return 0;
  1158. }
  1159. int kvm_arch_get_default_type(MachineState *ms)
  1160. {
  1161. return 0;
  1162. }
  1163. int kvm_arch_init(MachineState *ms, KVMState *s)
  1164. {
  1165. cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
  1166. return 0;
  1167. }
  1168. int kvm_arch_irqchip_create(KVMState *s)
  1169. {
  1170. if (kvm_kernel_irqchip_split()) {
  1171. error_report("-machine kernel_irqchip=split is not supported on RISC-V.");
  1172. exit(1);
  1173. }
  1174. /*
  1175. * We can create the VAIA using the newer device control API.
  1176. */
  1177. return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
  1178. }
  1179. int kvm_arch_process_async_events(CPUState *cs)
  1180. {
  1181. return 0;
  1182. }
  1183. void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
  1184. {
  1185. }
  1186. MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
  1187. {
  1188. return MEMTXATTRS_UNSPECIFIED;
  1189. }
  1190. bool kvm_arch_stop_on_emulation_error(CPUState *cs)
  1191. {
  1192. return true;
  1193. }
  1194. static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run)
  1195. {
  1196. g_autofree uint8_t *buf = NULL;
  1197. RISCVCPU *cpu = RISCV_CPU(cs);
  1198. target_ulong num_bytes;
  1199. uint64_t addr;
  1200. unsigned char ch;
  1201. int ret;
  1202. switch (run->riscv_sbi.function_id) {
  1203. case SBI_EXT_DBCN_CONSOLE_READ:
  1204. case SBI_EXT_DBCN_CONSOLE_WRITE:
  1205. num_bytes = run->riscv_sbi.args[0];
  1206. if (num_bytes == 0) {
  1207. run->riscv_sbi.ret[0] = SBI_SUCCESS;
  1208. run->riscv_sbi.ret[1] = 0;
  1209. break;
  1210. }
  1211. addr = run->riscv_sbi.args[1];
  1212. /*
  1213. * Handle the case where a 32 bit CPU is running in a
  1214. * 64 bit addressing env.
  1215. */
  1216. if (riscv_cpu_mxl(&cpu->env) == MXL_RV32) {
  1217. addr |= (uint64_t)run->riscv_sbi.args[2] << 32;
  1218. }
  1219. buf = g_malloc0(num_bytes);
  1220. if (run->riscv_sbi.function_id == SBI_EXT_DBCN_CONSOLE_READ) {
  1221. ret = qemu_chr_fe_read_all(serial_hd(0)->be, buf, num_bytes);
  1222. if (ret < 0) {
  1223. error_report("SBI_EXT_DBCN_CONSOLE_READ: error when "
  1224. "reading chardev");
  1225. exit(1);
  1226. }
  1227. cpu_physical_memory_write(addr, buf, ret);
  1228. } else {
  1229. cpu_physical_memory_read(addr, buf, num_bytes);
  1230. ret = qemu_chr_fe_write_all(serial_hd(0)->be, buf, num_bytes);
  1231. if (ret < 0) {
  1232. error_report("SBI_EXT_DBCN_CONSOLE_WRITE: error when "
  1233. "writing chardev");
  1234. exit(1);
  1235. }
  1236. }
  1237. run->riscv_sbi.ret[0] = SBI_SUCCESS;
  1238. run->riscv_sbi.ret[1] = ret;
  1239. break;
  1240. case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
  1241. ch = run->riscv_sbi.args[0];
  1242. ret = qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
  1243. if (ret < 0) {
  1244. error_report("SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: error when "
  1245. "writing chardev");
  1246. exit(1);
  1247. }
  1248. run->riscv_sbi.ret[0] = SBI_SUCCESS;
  1249. run->riscv_sbi.ret[1] = 0;
  1250. break;
  1251. default:
  1252. run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
  1253. }
  1254. }
  1255. static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
  1256. {
  1257. int ret = 0;
  1258. unsigned char ch;
  1259. switch (run->riscv_sbi.extension_id) {
  1260. case SBI_EXT_0_1_CONSOLE_PUTCHAR:
  1261. ch = run->riscv_sbi.args[0];
  1262. qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
  1263. break;
  1264. case SBI_EXT_0_1_CONSOLE_GETCHAR:
  1265. ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
  1266. if (ret == sizeof(ch)) {
  1267. run->riscv_sbi.ret[0] = ch;
  1268. } else {
  1269. run->riscv_sbi.ret[0] = -1;
  1270. }
  1271. ret = 0;
  1272. break;
  1273. case SBI_EXT_DBCN:
  1274. kvm_riscv_handle_sbi_dbcn(cs, run);
  1275. break;
  1276. default:
  1277. qemu_log_mask(LOG_UNIMP,
  1278. "%s: un-handled SBI EXIT, specific reasons is %lu\n",
  1279. __func__, run->riscv_sbi.extension_id);
  1280. ret = -1;
  1281. break;
  1282. }
  1283. return ret;
  1284. }
  1285. static int kvm_riscv_handle_csr(CPUState *cs, struct kvm_run *run)
  1286. {
  1287. target_ulong csr_num = run->riscv_csr.csr_num;
  1288. target_ulong new_value = run->riscv_csr.new_value;
  1289. target_ulong write_mask = run->riscv_csr.write_mask;
  1290. int ret = 0;
  1291. switch (csr_num) {
  1292. case CSR_SEED:
  1293. run->riscv_csr.ret_value = riscv_new_csr_seed(new_value, write_mask);
  1294. break;
  1295. default:
  1296. qemu_log_mask(LOG_UNIMP,
  1297. "%s: un-handled CSR EXIT for CSR %lx\n",
  1298. __func__, csr_num);
  1299. ret = -1;
  1300. break;
  1301. }
  1302. return ret;
  1303. }
  1304. static bool kvm_riscv_handle_debug(CPUState *cs)
  1305. {
  1306. RISCVCPU *cpu = RISCV_CPU(cs);
  1307. CPURISCVState *env = &cpu->env;
  1308. /* Ensure PC is synchronised */
  1309. kvm_cpu_synchronize_state(cs);
  1310. if (kvm_find_sw_breakpoint(cs, env->pc)) {
  1311. return true;
  1312. }
  1313. return false;
  1314. }
  1315. int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
  1316. {
  1317. int ret = 0;
  1318. switch (run->exit_reason) {
  1319. case KVM_EXIT_RISCV_SBI:
  1320. ret = kvm_riscv_handle_sbi(cs, run);
  1321. break;
  1322. case KVM_EXIT_RISCV_CSR:
  1323. ret = kvm_riscv_handle_csr(cs, run);
  1324. break;
  1325. case KVM_EXIT_DEBUG:
  1326. if (kvm_riscv_handle_debug(cs)) {
  1327. ret = EXCP_DEBUG;
  1328. }
  1329. break;
  1330. default:
  1331. qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
  1332. __func__, run->exit_reason);
  1333. ret = -1;
  1334. break;
  1335. }
  1336. return ret;
  1337. }
  1338. void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
  1339. {
  1340. CPURISCVState *env = &cpu->env;
  1341. int i;
  1342. if (!kvm_enabled()) {
  1343. return;
  1344. }
  1345. for (i = 0; i < 32; i++) {
  1346. env->gpr[i] = 0;
  1347. }
  1348. env->pc = cpu->env.kernel_addr;
  1349. env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
  1350. env->gpr[11] = cpu->env.fdt_addr; /* a1 */
  1351. env->satp = 0;
  1352. env->mie = 0;
  1353. env->stvec = 0;
  1354. env->sscratch = 0;
  1355. env->sepc = 0;
  1356. env->scause = 0;
  1357. env->stval = 0;
  1358. env->mip = 0;
  1359. }
  1360. void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
  1361. {
  1362. int ret;
  1363. unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
  1364. if (irq != IRQ_S_EXT) {
  1365. perror("kvm riscv set irq != IRQ_S_EXT\n");
  1366. abort();
  1367. }
  1368. ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
  1369. if (ret < 0) {
  1370. perror("Set irq failed");
  1371. abort();
  1372. }
  1373. }
  1374. static int aia_mode;
  1375. static const char *kvm_aia_mode_str(uint64_t mode)
  1376. {
  1377. switch (mode) {
  1378. case KVM_DEV_RISCV_AIA_MODE_EMUL:
  1379. return "emul";
  1380. case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
  1381. return "hwaccel";
  1382. case KVM_DEV_RISCV_AIA_MODE_AUTO:
  1383. default:
  1384. return "auto";
  1385. };
  1386. }
  1387. static char *riscv_get_kvm_aia(Object *obj, Error **errp)
  1388. {
  1389. return g_strdup(kvm_aia_mode_str(aia_mode));
  1390. }
  1391. static void riscv_set_kvm_aia(Object *obj, const char *val, Error **errp)
  1392. {
  1393. if (!strcmp(val, "emul")) {
  1394. aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL;
  1395. } else if (!strcmp(val, "hwaccel")) {
  1396. aia_mode = KVM_DEV_RISCV_AIA_MODE_HWACCEL;
  1397. } else if (!strcmp(val, "auto")) {
  1398. aia_mode = KVM_DEV_RISCV_AIA_MODE_AUTO;
  1399. } else {
  1400. error_setg(errp, "Invalid KVM AIA mode");
  1401. error_append_hint(errp, "Valid values are emul, hwaccel, and auto.\n");
  1402. }
  1403. }
  1404. void kvm_arch_accel_class_init(ObjectClass *oc)
  1405. {
  1406. object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
  1407. riscv_set_kvm_aia);
  1408. object_class_property_set_description(oc, "riscv-aia",
  1409. "Set KVM AIA mode. Valid values are "
  1410. "emul, hwaccel, and auto. Default "
  1411. "is auto.");
  1412. object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
  1413. "auto");
  1414. }
  1415. void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
  1416. uint64_t aia_irq_num, uint64_t aia_msi_num,
  1417. uint64_t aplic_base, uint64_t imsic_base,
  1418. uint64_t guest_num)
  1419. {
  1420. int ret, i;
  1421. int aia_fd = -1;
  1422. uint64_t default_aia_mode;
  1423. uint64_t socket_count = riscv_socket_count(machine);
  1424. uint64_t max_hart_per_socket = 0;
  1425. uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
  1426. uint64_t socket_bits, hart_bits, guest_bits;
  1427. uint64_t max_group_id;
  1428. aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
  1429. if (aia_fd < 0) {
  1430. error_report("Unable to create in-kernel irqchip");
  1431. exit(1);
  1432. }
  1433. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
  1434. KVM_DEV_RISCV_AIA_CONFIG_MODE,
  1435. &default_aia_mode, false, NULL);
  1436. if (ret < 0) {
  1437. error_report("KVM AIA: failed to get current KVM AIA mode");
  1438. exit(1);
  1439. }
  1440. qemu_log("KVM AIA: default mode is %s\n",
  1441. kvm_aia_mode_str(default_aia_mode));
  1442. if (default_aia_mode != aia_mode) {
  1443. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
  1444. KVM_DEV_RISCV_AIA_CONFIG_MODE,
  1445. &aia_mode, true, NULL);
  1446. if (ret < 0)
  1447. warn_report("KVM AIA: failed to set KVM AIA mode");
  1448. else
  1449. qemu_log("KVM AIA: set current mode to %s\n",
  1450. kvm_aia_mode_str(aia_mode));
  1451. }
  1452. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
  1453. KVM_DEV_RISCV_AIA_CONFIG_SRCS,
  1454. &aia_irq_num, true, NULL);
  1455. if (ret < 0) {
  1456. error_report("KVM AIA: failed to set number of input irq lines");
  1457. exit(1);
  1458. }
  1459. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
  1460. KVM_DEV_RISCV_AIA_CONFIG_IDS,
  1461. &aia_msi_num, true, NULL);
  1462. if (ret < 0) {
  1463. error_report("KVM AIA: failed to set number of msi");
  1464. exit(1);
  1465. }
  1466. if (socket_count > 1) {
  1467. max_group_id = socket_count - 1;
  1468. socket_bits = find_last_bit(&max_group_id, BITS_PER_LONG) + 1;
  1469. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
  1470. KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
  1471. &socket_bits, true, NULL);
  1472. if (ret < 0) {
  1473. error_report("KVM AIA: failed to set group_bits");
  1474. exit(1);
  1475. }
  1476. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
  1477. KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
  1478. &group_shift, true, NULL);
  1479. if (ret < 0) {
  1480. error_report("KVM AIA: failed to set group_shift");
  1481. exit(1);
  1482. }
  1483. }
  1484. guest_bits = guest_num == 0 ? 0 :
  1485. find_last_bit(&guest_num, BITS_PER_LONG) + 1;
  1486. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
  1487. KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS,
  1488. &guest_bits, true, NULL);
  1489. if (ret < 0) {
  1490. error_report("KVM AIA: failed to set guest_bits");
  1491. exit(1);
  1492. }
  1493. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
  1494. KVM_DEV_RISCV_AIA_ADDR_APLIC,
  1495. &aplic_base, true, NULL);
  1496. if (ret < 0) {
  1497. error_report("KVM AIA: failed to set the base address of APLIC");
  1498. exit(1);
  1499. }
  1500. for (socket = 0; socket < socket_count; socket++) {
  1501. socket_imsic_base = imsic_base + socket * (1U << group_shift);
  1502. hart_count = riscv_socket_hart_count(machine, socket);
  1503. base_hart = riscv_socket_first_hartid(machine, socket);
  1504. if (max_hart_per_socket < hart_count) {
  1505. max_hart_per_socket = hart_count;
  1506. }
  1507. for (i = 0; i < hart_count; i++) {
  1508. imsic_addr = socket_imsic_base + i * IMSIC_HART_SIZE(guest_bits);
  1509. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
  1510. KVM_DEV_RISCV_AIA_ADDR_IMSIC(i + base_hart),
  1511. &imsic_addr, true, NULL);
  1512. if (ret < 0) {
  1513. error_report("KVM AIA: failed to set the IMSIC address for hart %d", i);
  1514. exit(1);
  1515. }
  1516. }
  1517. }
  1518. if (max_hart_per_socket > 1) {
  1519. max_hart_per_socket--;
  1520. hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
  1521. } else {
  1522. hart_bits = 0;
  1523. }
  1524. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
  1525. KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
  1526. &hart_bits, true, NULL);
  1527. if (ret < 0) {
  1528. error_report("KVM AIA: failed to set hart_bits");
  1529. exit(1);
  1530. }
  1531. if (kvm_has_gsi_routing()) {
  1532. for (uint64_t idx = 0; idx < aia_irq_num + 1; ++idx) {
  1533. /* KVM AIA only has one APLIC instance */
  1534. kvm_irqchip_add_irq_route(kvm_state, idx, 0, idx);
  1535. }
  1536. kvm_gsi_routing_allowed = true;
  1537. kvm_irqchip_commit_routes(kvm_state);
  1538. }
  1539. ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CTRL,
  1540. KVM_DEV_RISCV_AIA_CTRL_INIT,
  1541. NULL, true, NULL);
  1542. if (ret < 0) {
  1543. error_report("KVM AIA: initialized fail");
  1544. exit(1);
  1545. }
  1546. kvm_msi_via_irqfd_allowed = true;
  1547. }
  1548. static void kvm_cpu_instance_init(CPUState *cs)
  1549. {
  1550. Object *obj = OBJECT(RISCV_CPU(cs));
  1551. riscv_init_kvm_registers(obj);
  1552. kvm_riscv_add_cpu_user_properties(obj);
  1553. }
  1554. /*
  1555. * We'll get here via the following path:
  1556. *
  1557. * riscv_cpu_realize()
  1558. * -> cpu_exec_realizefn()
  1559. * -> kvm_cpu_realize() (via accel_cpu_common_realize())
  1560. */
  1561. static bool kvm_cpu_realize(CPUState *cs, Error **errp)
  1562. {
  1563. RISCVCPU *cpu = RISCV_CPU(cs);
  1564. int ret;
  1565. if (riscv_has_ext(&cpu->env, RVV)) {
  1566. ret = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON);
  1567. if (ret) {
  1568. error_setg(errp, "Error in prctl PR_RISCV_V_SET_CONTROL, code: %s",
  1569. strerrorname_np(errno));
  1570. return false;
  1571. }
  1572. }
  1573. return true;
  1574. }
  1575. void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
  1576. {
  1577. CPURISCVState *env = &cpu->env;
  1578. KVMScratchCPU kvmcpu;
  1579. struct kvm_one_reg reg;
  1580. uint64_t val;
  1581. int ret;
  1582. /* short-circuit without spinning the scratch CPU */
  1583. if (!cpu->cfg.ext_zicbom && !cpu->cfg.ext_zicboz &&
  1584. !riscv_has_ext(env, RVV)) {
  1585. return;
  1586. }
  1587. if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
  1588. error_setg(errp, "Unable to create scratch KVM cpu");
  1589. return;
  1590. }
  1591. if (cpu->cfg.ext_zicbom &&
  1592. riscv_cpu_option_set(kvm_cbom_blocksize.name)) {
  1593. reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
  1594. kvm_cbom_blocksize.kvm_reg_id);
  1595. reg.addr = (uint64_t)&val;
  1596. ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
  1597. if (ret != 0) {
  1598. error_setg(errp, "Unable to read cbom_blocksize, error %d", errno);
  1599. return;
  1600. }
  1601. if (cpu->cfg.cbom_blocksize != val) {
  1602. error_setg(errp, "Unable to set cbom_blocksize to a different "
  1603. "value than the host (%lu)", val);
  1604. return;
  1605. }
  1606. }
  1607. if (cpu->cfg.ext_zicboz &&
  1608. riscv_cpu_option_set(kvm_cboz_blocksize.name)) {
  1609. reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG,
  1610. kvm_cboz_blocksize.kvm_reg_id);
  1611. reg.addr = (uint64_t)&val;
  1612. ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
  1613. if (ret != 0) {
  1614. error_setg(errp, "Unable to read cboz_blocksize, error %d", errno);
  1615. return;
  1616. }
  1617. if (cpu->cfg.cboz_blocksize != val) {
  1618. error_setg(errp, "Unable to set cboz_blocksize to a different "
  1619. "value than the host (%lu)", val);
  1620. return;
  1621. }
  1622. }
  1623. /* Users are setting vlen, not vlenb */
  1624. if (riscv_has_ext(env, RVV) && riscv_cpu_option_set("vlen")) {
  1625. if (!kvm_v_vlenb.supported) {
  1626. error_setg(errp, "Unable to set 'vlenb': register not supported");
  1627. return;
  1628. }
  1629. reg.id = kvm_v_vlenb.kvm_reg_id;
  1630. reg.addr = (uint64_t)&val;
  1631. ret = ioctl(kvmcpu.cpufd, KVM_GET_ONE_REG, &reg);
  1632. if (ret != 0) {
  1633. error_setg(errp, "Unable to read vlenb register, error %d", errno);
  1634. return;
  1635. }
  1636. if (cpu->cfg.vlenb != val) {
  1637. error_setg(errp, "Unable to set 'vlen' to a different "
  1638. "value than the host (%lu)", val * 8);
  1639. return;
  1640. }
  1641. }
  1642. kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
  1643. }
  1644. static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data)
  1645. {
  1646. AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
  1647. acc->cpu_instance_init = kvm_cpu_instance_init;
  1648. acc->cpu_target_realize = kvm_cpu_realize;
  1649. }
  1650. static const TypeInfo kvm_cpu_accel_type_info = {
  1651. .name = ACCEL_CPU_NAME("kvm"),
  1652. .parent = TYPE_ACCEL_CPU,
  1653. .class_init = kvm_cpu_accel_class_init,
  1654. .abstract = true,
  1655. };
  1656. static void kvm_cpu_accel_register_types(void)
  1657. {
  1658. type_register_static(&kvm_cpu_accel_type_info);
  1659. }
  1660. type_init(kvm_cpu_accel_register_types);
  1661. static void riscv_host_cpu_class_init(ObjectClass *c, void *data)
  1662. {
  1663. RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
  1664. #if defined(TARGET_RISCV32)
  1665. mcc->misa_mxl_max = MXL_RV32;
  1666. #elif defined(TARGET_RISCV64)
  1667. mcc->misa_mxl_max = MXL_RV64;
  1668. #endif
  1669. }
  1670. static const TypeInfo riscv_kvm_cpu_type_infos[] = {
  1671. {
  1672. .name = TYPE_RISCV_CPU_HOST,
  1673. .parent = TYPE_RISCV_CPU,
  1674. .class_init = riscv_host_cpu_class_init,
  1675. }
  1676. };
  1677. DEFINE_TYPES(riscv_kvm_cpu_type_infos)
  1678. static const uint32_t ebreak_insn = 0x00100073;
  1679. static const uint16_t c_ebreak_insn = 0x9002;
  1680. int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
  1681. {
  1682. if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 0)) {
  1683. return -EINVAL;
  1684. }
  1685. if ((bp->saved_insn & 0x3) == 0x3) {
  1686. if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0)
  1687. || cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak_insn, 4, 1)) {
  1688. return -EINVAL;
  1689. }
  1690. } else {
  1691. if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak_insn, 2, 1)) {
  1692. return -EINVAL;
  1693. }
  1694. }
  1695. return 0;
  1696. }
  1697. int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
  1698. {
  1699. uint32_t ebreak;
  1700. uint16_t c_ebreak;
  1701. if ((bp->saved_insn & 0x3) == 0x3) {
  1702. if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&ebreak, 4, 0) ||
  1703. ebreak != ebreak_insn ||
  1704. cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
  1705. return -EINVAL;
  1706. }
  1707. } else {
  1708. if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&c_ebreak, 2, 0) ||
  1709. c_ebreak != c_ebreak_insn ||
  1710. cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 2, 1)) {
  1711. return -EINVAL;
  1712. }
  1713. }
  1714. return 0;
  1715. }
  1716. int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
  1717. {
  1718. /* TODO; To be implemented later. */
  1719. return -EINVAL;
  1720. }
  1721. int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
  1722. {
  1723. /* TODO; To be implemented later. */
  1724. return -EINVAL;
  1725. }
  1726. void kvm_arch_remove_all_hw_breakpoints(void)
  1727. {
  1728. /* TODO; To be implemented later. */
  1729. }
  1730. void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
  1731. {
  1732. if (kvm_sw_breakpoints_active(cs)) {
  1733. dbg->control |= KVM_GUESTDBG_ENABLE;
  1734. }
  1735. }