tcg-cpu-ops.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. * TCG CPU-specific operations
  3. *
  4. * Copyright 2021 SUSE LLC
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #ifndef TCG_CPU_OPS_H
  10. #define TCG_CPU_OPS_H
  11. #include "exec/breakpoint.h"
  12. #include "exec/hwaddr.h"
  13. #include "exec/memattrs.h"
  14. #include "exec/memop.h"
  15. #include "exec/mmu-access-type.h"
  16. #include "exec/vaddr.h"
  17. struct TCGCPUOps {
  18. /**
  19. * @initialize: Initialize TCG state
  20. *
  21. * Called when the first CPU is realized.
  22. */
  23. void (*initialize)(void);
  24. /**
  25. * @translate_code: Translate guest instructions to TCGOps
  26. * @cpu: cpu context
  27. * @tb: translation block
  28. * @max_insns: max number of instructions to translate
  29. * @pc: guest virtual program counter address
  30. * @host_pc: host physical program counter address
  31. *
  32. * This function must be provided by the target, which should create
  33. * the target-specific DisasContext, and then invoke translator_loop.
  34. */
  35. void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
  36. int *max_insns, vaddr pc, void *host_pc);
  37. /**
  38. * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
  39. *
  40. * This is called when we abandon execution of a TB before starting it,
  41. * and must set all parts of the CPU state which the previous TB in the
  42. * chain may not have updated.
  43. * By default, when this is NULL, a call is made to @set_pc(tb->pc).
  44. *
  45. * If more state needs to be restored, the target must implement a
  46. * function to restore all the state, and register it here.
  47. */
  48. void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb);
  49. /**
  50. * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn
  51. *
  52. * This is called when we unwind state in the middle of a TB,
  53. * usually before raising an exception. Set all part of the CPU
  54. * state which are tracked insn-by-insn in the target-specific
  55. * arguments to start_insn, passed as @data.
  56. */
  57. void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb,
  58. const uint64_t *data);
  59. /** @cpu_exec_enter: Callback for cpu_exec preparation */
  60. void (*cpu_exec_enter)(CPUState *cpu);
  61. /** @cpu_exec_exit: Callback for cpu_exec cleanup */
  62. void (*cpu_exec_exit)(CPUState *cpu);
  63. /** @debug_excp_handler: Callback for handling debug exceptions */
  64. void (*debug_excp_handler)(CPUState *cpu);
  65. #ifdef CONFIG_USER_ONLY
  66. /**
  67. * @fake_user_interrupt: Callback for 'fake exception' handling.
  68. *
  69. * Simulate 'fake exception' which will be handled outside the
  70. * cpu execution loop (hack for x86 user mode).
  71. */
  72. void (*fake_user_interrupt)(CPUState *cpu);
  73. /**
  74. * record_sigsegv:
  75. * @cpu: cpu context
  76. * @addr: faulting guest address
  77. * @access_type: access was read/write/execute
  78. * @maperr: true for invalid page, false for permission fault
  79. * @ra: host pc for unwinding
  80. *
  81. * We are about to raise SIGSEGV with si_code set for @maperr,
  82. * and si_addr set for @addr. Record anything further needed
  83. * for the signal ucontext_t.
  84. *
  85. * If the emulated kernel does not provide anything to the signal
  86. * handler with anything besides the user context registers, and
  87. * the siginfo_t, then this hook need do nothing and may be omitted.
  88. * Otherwise, record the data and return; the caller will raise
  89. * the signal, unwind the cpu state, and return to the main loop.
  90. *
  91. * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided
  92. * so that a "normal" cpu exception can be raised. In this case,
  93. * the signal must be raised by the architecture cpu_loop.
  94. */
  95. void (*record_sigsegv)(CPUState *cpu, vaddr addr,
  96. MMUAccessType access_type,
  97. bool maperr, uintptr_t ra);
  98. /**
  99. * record_sigbus:
  100. * @cpu: cpu context
  101. * @addr: misaligned guest address
  102. * @access_type: access was read/write/execute
  103. * @ra: host pc for unwinding
  104. *
  105. * We are about to raise SIGBUS with si_code BUS_ADRALN,
  106. * and si_addr set for @addr. Record anything further needed
  107. * for the signal ucontext_t.
  108. *
  109. * If the emulated kernel does not provide the signal handler with
  110. * anything besides the user context registers, and the siginfo_t,
  111. * then this hook need do nothing and may be omitted.
  112. * Otherwise, record the data and return; the caller will raise
  113. * the signal, unwind the cpu state, and return to the main loop.
  114. *
  115. * If it is simpler to re-use the sysemu do_unaligned_access code,
  116. * @ra is provided so that a "normal" cpu exception can be raised.
  117. * In this case, the signal must be raised by the architecture cpu_loop.
  118. */
  119. void (*record_sigbus)(CPUState *cpu, vaddr addr,
  120. MMUAccessType access_type, uintptr_t ra);
  121. #else
  122. /** @do_interrupt: Callback for interrupt handling. */
  123. void (*do_interrupt)(CPUState *cpu);
  124. /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
  125. bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
  126. /**
  127. * @cpu_exec_halt: Callback for handling halt in cpu_exec.
  128. *
  129. * The target CPU should do any special processing here that it needs
  130. * to do when the CPU is in the halted state.
  131. *
  132. * Return true to indicate that the CPU should now leave halt, false
  133. * if it should remain in the halted state. (This should generally
  134. * be the same value that cpu_has_work() would return.)
  135. *
  136. * This method must be provided. If the target does not need to
  137. * do anything special for halt, the same function used for its
  138. * CPUClass::has_work method can be used here, as they have the
  139. * same function signature.
  140. */
  141. bool (*cpu_exec_halt)(CPUState *cpu);
  142. /**
  143. * @tlb_fill_align: Handle a softmmu tlb miss
  144. * @cpu: cpu context
  145. * @out: output page properties
  146. * @addr: virtual address
  147. * @access_type: read, write or execute
  148. * @mmu_idx: mmu context
  149. * @memop: memory operation for the access
  150. * @size: memory access size, or 0 for whole page
  151. * @probe: test only, no fault
  152. * @ra: host return address for exception unwind
  153. *
  154. * If the access is valid, fill in @out and return true.
  155. * Otherwise if probe is true, return false.
  156. * Otherwise raise an exception and do not return.
  157. *
  158. * The alignment check for the access is deferred to this hook,
  159. * so that the target can determine the priority of any alignment
  160. * fault with respect to other potential faults from paging.
  161. * Zero may be passed for @memop to skip any alignment check
  162. * for non-memory-access operations such as probing.
  163. */
  164. bool (*tlb_fill_align)(CPUState *cpu, CPUTLBEntryFull *out, vaddr addr,
  165. MMUAccessType access_type, int mmu_idx,
  166. MemOp memop, int size, bool probe, uintptr_t ra);
  167. /**
  168. * @tlb_fill: Handle a softmmu tlb miss
  169. *
  170. * If the access is valid, call tlb_set_page and return true;
  171. * if the access is invalid and probe is true, return false;
  172. * otherwise raise an exception and do not return.
  173. */
  174. bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
  175. MMUAccessType access_type, int mmu_idx,
  176. bool probe, uintptr_t retaddr);
  177. /**
  178. * @do_transaction_failed: Callback for handling failed memory transactions
  179. * (ie bus faults or external aborts; not MMU faults)
  180. */
  181. void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
  182. unsigned size, MMUAccessType access_type,
  183. int mmu_idx, MemTxAttrs attrs,
  184. MemTxResult response, uintptr_t retaddr);
  185. /**
  186. * @do_unaligned_access: Callback for unaligned access handling
  187. * The callback must exit via raising an exception.
  188. */
  189. G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
  190. MMUAccessType access_type,
  191. int mmu_idx, uintptr_t retaddr);
  192. /**
  193. * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
  194. */
  195. vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
  196. /**
  197. * @debug_check_watchpoint: return true if the architectural
  198. * watchpoint whose address has matched should really fire, used by ARM
  199. * and RISC-V
  200. */
  201. bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
  202. /**
  203. * @debug_check_breakpoint: return true if the architectural
  204. * breakpoint whose PC has matched should really fire.
  205. */
  206. bool (*debug_check_breakpoint)(CPUState *cpu);
  207. /**
  208. * @io_recompile_replay_branch: Callback for cpu_io_recompile.
  209. *
  210. * The cpu has been stopped, and cpu_restore_state_from_tb has been
  211. * called. If the faulting instruction is in a delay slot, and the
  212. * target architecture requires re-execution of the branch, then
  213. * adjust the cpu state as required and return true.
  214. */
  215. bool (*io_recompile_replay_branch)(CPUState *cpu,
  216. const TranslationBlock *tb);
  217. /**
  218. * @need_replay_interrupt: Return %true if @interrupt_request
  219. * needs to be recorded for replay purposes.
  220. */
  221. bool (*need_replay_interrupt)(int interrupt_request);
  222. #endif /* !CONFIG_USER_ONLY */
  223. };
  224. #if defined(CONFIG_USER_ONLY)
  225. static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
  226. MemTxAttrs atr, int fl, uintptr_t ra)
  227. {
  228. }
  229. static inline int cpu_watchpoint_address_matches(CPUState *cpu,
  230. vaddr addr, vaddr len)
  231. {
  232. return 0;
  233. }
  234. #else
  235. /**
  236. * cpu_check_watchpoint:
  237. * @cpu: cpu context
  238. * @addr: guest virtual address
  239. * @len: access length
  240. * @attrs: memory access attributes
  241. * @flags: watchpoint access type
  242. * @ra: unwind return address
  243. *
  244. * Check for a watchpoint hit in [addr, addr+len) of the type
  245. * specified by @flags. Exit via exception with a hit.
  246. */
  247. void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
  248. MemTxAttrs attrs, int flags, uintptr_t ra);
  249. /**
  250. * cpu_watchpoint_address_matches:
  251. * @cpu: cpu context
  252. * @addr: guest virtual address
  253. * @len: access length
  254. *
  255. * Return the watchpoint flags that apply to [addr, addr+len).
  256. * If no watchpoint is registered for the range, the result is 0.
  257. */
  258. int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
  259. #endif
  260. #endif /* TCG_CPU_OPS_H */