cpu.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. /*
  2. * Target-specific parts of the CPU object
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "qemu-common.h"
  21. #include "qapi/error.h"
  22. #include "exec/target_page.h"
  23. #include "hw/qdev-core.h"
  24. #include "hw/qdev-properties.h"
  25. #include "qemu/error-report.h"
  26. #include "migration/vmstate.h"
  27. #ifdef CONFIG_USER_ONLY
  28. #include "qemu.h"
  29. #else
  30. #include "exec/address-spaces.h"
  31. #endif
  32. #include "sysemu/tcg.h"
  33. #include "sysemu/kvm.h"
  34. #include "sysemu/replay.h"
  35. #include "translate-all.h"
  36. #include "exec/log.h"
  37. uintptr_t qemu_host_page_size;
  38. intptr_t qemu_host_page_mask;
  39. #ifndef CONFIG_USER_ONLY
  40. static int cpu_common_post_load(void *opaque, int version_id)
  41. {
  42. CPUState *cpu = opaque;
  43. /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
  44. version_id is increased. */
  45. cpu->interrupt_request &= ~0x01;
  46. tlb_flush(cpu);
  47. /* loadvm has just updated the content of RAM, bypassing the
  48. * usual mechanisms that ensure we flush TBs for writes to
  49. * memory we've translated code from. So we must flush all TBs,
  50. * which will now be stale.
  51. */
  52. tb_flush(cpu);
  53. return 0;
  54. }
  55. static int cpu_common_pre_load(void *opaque)
  56. {
  57. CPUState *cpu = opaque;
  58. cpu->exception_index = -1;
  59. return 0;
  60. }
  61. static bool cpu_common_exception_index_needed(void *opaque)
  62. {
  63. CPUState *cpu = opaque;
  64. return tcg_enabled() && cpu->exception_index != -1;
  65. }
  66. static const VMStateDescription vmstate_cpu_common_exception_index = {
  67. .name = "cpu_common/exception_index",
  68. .version_id = 1,
  69. .minimum_version_id = 1,
  70. .needed = cpu_common_exception_index_needed,
  71. .fields = (VMStateField[]) {
  72. VMSTATE_INT32(exception_index, CPUState),
  73. VMSTATE_END_OF_LIST()
  74. }
  75. };
  76. static bool cpu_common_crash_occurred_needed(void *opaque)
  77. {
  78. CPUState *cpu = opaque;
  79. return cpu->crash_occurred;
  80. }
  81. static const VMStateDescription vmstate_cpu_common_crash_occurred = {
  82. .name = "cpu_common/crash_occurred",
  83. .version_id = 1,
  84. .minimum_version_id = 1,
  85. .needed = cpu_common_crash_occurred_needed,
  86. .fields = (VMStateField[]) {
  87. VMSTATE_BOOL(crash_occurred, CPUState),
  88. VMSTATE_END_OF_LIST()
  89. }
  90. };
  91. const VMStateDescription vmstate_cpu_common = {
  92. .name = "cpu_common",
  93. .version_id = 1,
  94. .minimum_version_id = 1,
  95. .pre_load = cpu_common_pre_load,
  96. .post_load = cpu_common_post_load,
  97. .fields = (VMStateField[]) {
  98. VMSTATE_UINT32(halted, CPUState),
  99. VMSTATE_UINT32(interrupt_request, CPUState),
  100. VMSTATE_END_OF_LIST()
  101. },
  102. .subsections = (const VMStateDescription*[]) {
  103. &vmstate_cpu_common_exception_index,
  104. &vmstate_cpu_common_crash_occurred,
  105. NULL
  106. }
  107. };
  108. #endif
  109. void cpu_exec_unrealizefn(CPUState *cpu)
  110. {
  111. CPUClass *cc = CPU_GET_CLASS(cpu);
  112. tlb_destroy(cpu);
  113. cpu_list_remove(cpu);
  114. #ifdef CONFIG_USER_ONLY
  115. assert(cc->vmsd == NULL);
  116. #else
  117. if (cc->vmsd != NULL) {
  118. vmstate_unregister(NULL, cc->vmsd, cpu);
  119. }
  120. if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
  121. vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
  122. }
  123. tcg_iommu_free_notifier_list(cpu);
  124. #endif
  125. }
  126. Property cpu_common_props[] = {
  127. #ifndef CONFIG_USER_ONLY
  128. /* Create a memory property for softmmu CPU object,
  129. * so users can wire up its memory. (This can't go in hw/core/cpu.c
  130. * because that file is compiled only once for both user-mode
  131. * and system builds.) The default if no link is set up is to use
  132. * the system address space.
  133. */
  134. DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
  135. MemoryRegion *),
  136. #endif
  137. DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
  138. DEFINE_PROP_END_OF_LIST(),
  139. };
  140. void cpu_exec_initfn(CPUState *cpu)
  141. {
  142. cpu->as = NULL;
  143. cpu->num_ases = 0;
  144. #ifndef CONFIG_USER_ONLY
  145. cpu->thread_id = qemu_get_thread_id();
  146. cpu->memory = get_system_memory();
  147. object_ref(OBJECT(cpu->memory));
  148. #endif
  149. }
  150. void cpu_exec_realizefn(CPUState *cpu, Error **errp)
  151. {
  152. CPUClass *cc = CPU_GET_CLASS(cpu);
  153. static bool tcg_target_initialized;
  154. cpu_list_add(cpu);
  155. if (tcg_enabled() && !tcg_target_initialized) {
  156. tcg_target_initialized = true;
  157. cc->tcg_initialize();
  158. }
  159. tlb_init(cpu);
  160. qemu_plugin_vcpu_init_hook(cpu);
  161. #ifdef CONFIG_USER_ONLY
  162. assert(cc->vmsd == NULL);
  163. #else /* !CONFIG_USER_ONLY */
  164. if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
  165. vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
  166. }
  167. if (cc->vmsd != NULL) {
  168. vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
  169. }
  170. tcg_iommu_init_notifier_list(cpu);
  171. #endif
  172. }
  173. const char *parse_cpu_option(const char *cpu_option)
  174. {
  175. ObjectClass *oc;
  176. CPUClass *cc;
  177. gchar **model_pieces;
  178. const char *cpu_type;
  179. model_pieces = g_strsplit(cpu_option, ",", 2);
  180. if (!model_pieces[0]) {
  181. error_report("-cpu option cannot be empty");
  182. exit(1);
  183. }
  184. oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]);
  185. if (oc == NULL) {
  186. error_report("unable to find CPU model '%s'", model_pieces[0]);
  187. g_strfreev(model_pieces);
  188. exit(EXIT_FAILURE);
  189. }
  190. cpu_type = object_class_get_name(oc);
  191. cc = CPU_CLASS(oc);
  192. cc->parse_features(cpu_type, model_pieces[1], &error_fatal);
  193. g_strfreev(model_pieces);
  194. return cpu_type;
  195. }
  196. #if defined(CONFIG_USER_ONLY)
  197. void tb_invalidate_phys_addr(target_ulong addr)
  198. {
  199. mmap_lock();
  200. tb_invalidate_phys_page_range(addr, addr + 1);
  201. mmap_unlock();
  202. }
  203. static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
  204. {
  205. tb_invalidate_phys_addr(pc);
  206. }
  207. #else
  208. void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
  209. {
  210. ram_addr_t ram_addr;
  211. MemoryRegion *mr;
  212. hwaddr l = 1;
  213. if (!tcg_enabled()) {
  214. return;
  215. }
  216. RCU_READ_LOCK_GUARD();
  217. mr = address_space_translate(as, addr, &addr, &l, false, attrs);
  218. if (!(memory_region_is_ram(mr)
  219. || memory_region_is_romd(mr))) {
  220. return;
  221. }
  222. ram_addr = memory_region_get_ram_addr(mr) + addr;
  223. tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
  224. }
  225. static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
  226. {
  227. /*
  228. * There may not be a virtual to physical translation for the pc
  229. * right now, but there may exist cached TB for this pc.
  230. * Flush the whole TB cache to force re-translation of such TBs.
  231. * This is heavyweight, but we're debugging anyway.
  232. */
  233. tb_flush(cpu);
  234. }
  235. #endif
  236. /* Add a breakpoint. */
  237. int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
  238. CPUBreakpoint **breakpoint)
  239. {
  240. CPUBreakpoint *bp;
  241. bp = g_malloc(sizeof(*bp));
  242. bp->pc = pc;
  243. bp->flags = flags;
  244. /* keep all GDB-injected breakpoints in front */
  245. if (flags & BP_GDB) {
  246. QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
  247. } else {
  248. QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
  249. }
  250. breakpoint_invalidate(cpu, pc);
  251. if (breakpoint) {
  252. *breakpoint = bp;
  253. }
  254. return 0;
  255. }
  256. /* Remove a specific breakpoint. */
  257. int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
  258. {
  259. CPUBreakpoint *bp;
  260. QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
  261. if (bp->pc == pc && bp->flags == flags) {
  262. cpu_breakpoint_remove_by_ref(cpu, bp);
  263. return 0;
  264. }
  265. }
  266. return -ENOENT;
  267. }
  268. /* Remove a specific breakpoint by reference. */
  269. void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
  270. {
  271. QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
  272. breakpoint_invalidate(cpu, breakpoint->pc);
  273. g_free(breakpoint);
  274. }
  275. /* Remove all matching breakpoints. */
  276. void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
  277. {
  278. CPUBreakpoint *bp, *next;
  279. QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
  280. if (bp->flags & mask) {
  281. cpu_breakpoint_remove_by_ref(cpu, bp);
  282. }
  283. }
  284. }
  285. /* enable or disable single step mode. EXCP_DEBUG is returned by the
  286. CPU loop after each instruction */
  287. void cpu_single_step(CPUState *cpu, int enabled)
  288. {
  289. if (cpu->singlestep_enabled != enabled) {
  290. cpu->singlestep_enabled = enabled;
  291. if (kvm_enabled()) {
  292. kvm_update_guest_debug(cpu, 0);
  293. } else {
  294. /* must flush all the translated code to avoid inconsistencies */
  295. /* XXX: only flush what is necessary */
  296. tb_flush(cpu);
  297. }
  298. }
  299. }
  300. void cpu_abort(CPUState *cpu, const char *fmt, ...)
  301. {
  302. va_list ap;
  303. va_list ap2;
  304. va_start(ap, fmt);
  305. va_copy(ap2, ap);
  306. fprintf(stderr, "qemu: fatal: ");
  307. vfprintf(stderr, fmt, ap);
  308. fprintf(stderr, "\n");
  309. cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP);
  310. if (qemu_log_separate()) {
  311. FILE *logfile = qemu_log_lock();
  312. qemu_log("qemu: fatal: ");
  313. qemu_log_vprintf(fmt, ap2);
  314. qemu_log("\n");
  315. log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
  316. qemu_log_flush();
  317. qemu_log_unlock(logfile);
  318. qemu_log_close();
  319. }
  320. va_end(ap2);
  321. va_end(ap);
  322. replay_finish();
  323. #if defined(CONFIG_USER_ONLY)
  324. {
  325. struct sigaction act;
  326. sigfillset(&act.sa_mask);
  327. act.sa_handler = SIG_DFL;
  328. act.sa_flags = 0;
  329. sigaction(SIGABRT, &act, NULL);
  330. }
  331. #endif
  332. abort();
  333. }
  334. /* physical memory access (slow version, mainly for debug) */
  335. #if defined(CONFIG_USER_ONLY)
  336. int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
  337. void *ptr, target_ulong len, bool is_write)
  338. {
  339. int flags;
  340. target_ulong l, page;
  341. void * p;
  342. uint8_t *buf = ptr;
  343. while (len > 0) {
  344. page = addr & TARGET_PAGE_MASK;
  345. l = (page + TARGET_PAGE_SIZE) - addr;
  346. if (l > len)
  347. l = len;
  348. flags = page_get_flags(page);
  349. if (!(flags & PAGE_VALID))
  350. return -1;
  351. if (is_write) {
  352. if (!(flags & PAGE_WRITE))
  353. return -1;
  354. /* XXX: this code should not depend on lock_user */
  355. if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
  356. return -1;
  357. memcpy(p, buf, l);
  358. unlock_user(p, addr, l);
  359. } else {
  360. if (!(flags & PAGE_READ))
  361. return -1;
  362. /* XXX: this code should not depend on lock_user */
  363. if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
  364. return -1;
  365. memcpy(buf, p, l);
  366. unlock_user(p, addr, 0);
  367. }
  368. len -= l;
  369. buf += l;
  370. addr += l;
  371. }
  372. return 0;
  373. }
  374. #endif
  375. bool target_words_bigendian(void)
  376. {
  377. #if defined(TARGET_WORDS_BIGENDIAN)
  378. return true;
  379. #else
  380. return false;
  381. #endif
  382. }
  383. void page_size_init(void)
  384. {
  385. /* NOTE: we can always suppose that qemu_host_page_size >=
  386. TARGET_PAGE_SIZE */
  387. if (qemu_host_page_size == 0) {
  388. qemu_host_page_size = qemu_real_host_page_size;
  389. }
  390. if (qemu_host_page_size < TARGET_PAGE_SIZE) {
  391. qemu_host_page_size = TARGET_PAGE_SIZE;
  392. }
  393. qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
  394. }