tcg-accel-ops.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * QEMU TCG vCPU common functionality
  3. *
  4. * Functionality common to all TCG vCPU variants: mttcg, rr and icount.
  5. *
  6. * Copyright (c) 2003-2008 Fabrice Bellard
  7. * Copyright (c) 2014 Red Hat Inc.
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include "qemu/osdep.h"
  28. #include "system/tcg.h"
  29. #include "system/replay.h"
  30. #include "system/cpu-timers.h"
  31. #include "qemu/main-loop.h"
  32. #include "qemu/guest-random.h"
  33. #include "qemu/timer.h"
  34. #include "exec/exec-all.h"
  35. #include "exec/hwaddr.h"
  36. #include "exec/tb-flush.h"
  37. #include "gdbstub/enums.h"
  38. #include "hw/core/cpu.h"
  39. #include "tcg-accel-ops.h"
  40. #include "tcg-accel-ops-mttcg.h"
  41. #include "tcg-accel-ops-rr.h"
  42. #include "tcg-accel-ops-icount.h"
  43. /* common functionality among all TCG variants */
  44. void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
  45. {
  46. uint32_t cflags;
  47. /*
  48. * Include the cluster number in the hash we use to look up TBs.
  49. * This is important because a TB that is valid for one cluster at
  50. * a given physical address and set of CPU flags is not necessarily
  51. * valid for another:
  52. * the two clusters may have different views of physical memory, or
  53. * may have different CPU features (eg FPU present or absent).
  54. */
  55. cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
  56. cflags |= parallel ? CF_PARALLEL : 0;
  57. cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
  58. tcg_cflags_set(cpu, cflags);
  59. }
  60. void tcg_cpu_destroy(CPUState *cpu)
  61. {
  62. cpu_thread_signal_destroyed(cpu);
  63. }
  64. int tcg_cpu_exec(CPUState *cpu)
  65. {
  66. int ret;
  67. assert(tcg_enabled());
  68. cpu_exec_start(cpu);
  69. ret = cpu_exec(cpu);
  70. cpu_exec_end(cpu);
  71. return ret;
  72. }
  73. static void tcg_cpu_reset_hold(CPUState *cpu)
  74. {
  75. tcg_flush_jmp_cache(cpu);
  76. tlb_flush(cpu);
  77. }
  78. /* mask must never be zero, except for A20 change call */
  79. void tcg_handle_interrupt(CPUState *cpu, int mask)
  80. {
  81. g_assert(bql_locked());
  82. cpu->interrupt_request |= mask;
  83. /*
  84. * If called from iothread context, wake the target cpu in
  85. * case its halted.
  86. */
  87. if (!qemu_cpu_is_self(cpu)) {
  88. qemu_cpu_kick(cpu);
  89. } else {
  90. qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
  91. }
  92. }
  93. static bool tcg_supports_guest_debug(void)
  94. {
  95. return true;
  96. }
  97. /* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */
  98. static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
  99. {
  100. static const int xlat[] = {
  101. [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
  102. [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
  103. [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
  104. };
  105. CPUClass *cc = CPU_GET_CLASS(cpu);
  106. int cputype = xlat[gdbtype];
  107. if (cc->gdb_stop_before_watchpoint) {
  108. cputype |= BP_STOP_BEFORE_ACCESS;
  109. }
  110. return cputype;
  111. }
  112. static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
  113. {
  114. CPUState *cpu;
  115. int err = 0;
  116. switch (type) {
  117. case GDB_BREAKPOINT_SW:
  118. case GDB_BREAKPOINT_HW:
  119. CPU_FOREACH(cpu) {
  120. err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
  121. if (err) {
  122. break;
  123. }
  124. }
  125. return err;
  126. case GDB_WATCHPOINT_WRITE:
  127. case GDB_WATCHPOINT_READ:
  128. case GDB_WATCHPOINT_ACCESS:
  129. CPU_FOREACH(cpu) {
  130. err = cpu_watchpoint_insert(cpu, addr, len,
  131. xlat_gdb_type(cpu, type), NULL);
  132. if (err) {
  133. break;
  134. }
  135. }
  136. return err;
  137. default:
  138. return -ENOSYS;
  139. }
  140. }
  141. static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
  142. {
  143. CPUState *cpu;
  144. int err = 0;
  145. switch (type) {
  146. case GDB_BREAKPOINT_SW:
  147. case GDB_BREAKPOINT_HW:
  148. CPU_FOREACH(cpu) {
  149. err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
  150. if (err) {
  151. break;
  152. }
  153. }
  154. return err;
  155. case GDB_WATCHPOINT_WRITE:
  156. case GDB_WATCHPOINT_READ:
  157. case GDB_WATCHPOINT_ACCESS:
  158. CPU_FOREACH(cpu) {
  159. err = cpu_watchpoint_remove(cpu, addr, len,
  160. xlat_gdb_type(cpu, type));
  161. if (err) {
  162. break;
  163. }
  164. }
  165. return err;
  166. default:
  167. return -ENOSYS;
  168. }
  169. }
  170. static inline void tcg_remove_all_breakpoints(CPUState *cpu)
  171. {
  172. cpu_breakpoint_remove_all(cpu, BP_GDB);
  173. cpu_watchpoint_remove_all(cpu, BP_GDB);
  174. }
  175. static void tcg_accel_ops_init(AccelOpsClass *ops)
  176. {
  177. if (qemu_tcg_mttcg_enabled()) {
  178. ops->create_vcpu_thread = mttcg_start_vcpu_thread;
  179. ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
  180. ops->handle_interrupt = tcg_handle_interrupt;
  181. } else {
  182. ops->create_vcpu_thread = rr_start_vcpu_thread;
  183. ops->kick_vcpu_thread = rr_kick_vcpu_thread;
  184. if (icount_enabled()) {
  185. ops->handle_interrupt = icount_handle_interrupt;
  186. ops->get_virtual_clock = icount_get;
  187. ops->get_elapsed_ticks = icount_get;
  188. } else {
  189. ops->handle_interrupt = tcg_handle_interrupt;
  190. }
  191. }
  192. ops->cpu_reset_hold = tcg_cpu_reset_hold;
  193. ops->supports_guest_debug = tcg_supports_guest_debug;
  194. ops->insert_breakpoint = tcg_insert_breakpoint;
  195. ops->remove_breakpoint = tcg_remove_breakpoint;
  196. ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
  197. }
  198. static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
  199. {
  200. AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
  201. ops->ops_init = tcg_accel_ops_init;
  202. }
  203. static const TypeInfo tcg_accel_ops_type = {
  204. .name = ACCEL_OPS_NAME("tcg"),
  205. .parent = TYPE_ACCEL_OPS,
  206. .class_init = tcg_accel_ops_class_init,
  207. .abstract = true,
  208. };
  209. module_obj(ACCEL_OPS_NAME("tcg"));
  210. static void tcg_accel_ops_register_types(void)
  211. {
  212. type_register_static(&tcg_accel_ops_type);
  213. }
  214. type_init(tcg_accel_ops_register_types);