|
@@ -123,10 +123,10 @@ static MemoryRegion io_mem_subpage_ram;
|
|
|
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-CPUState *first_cpu;
|
|
|
|
|
|
+CPUArchState *first_cpu;
|
|
/* current CPU in the current thread. It is only valid inside
|
|
/* current CPU in the current thread. It is only valid inside
|
|
cpu_exec() */
|
|
cpu_exec() */
|
|
-DEFINE_TLS(CPUState *,cpu_single_env);
|
|
|
|
|
|
+DEFINE_TLS(CPUArchState *,cpu_single_env);
|
|
/* 0 = Do not count executed instructions.
|
|
/* 0 = Do not count executed instructions.
|
|
1 = Precise instruction counting.
|
|
1 = Precise instruction counting.
|
|
2 = Adaptive rate instruction counting. */
|
|
2 = Adaptive rate instruction counting. */
|
|
@@ -509,7 +509,7 @@ static target_phys_addr_t section_addr(MemoryRegionSection *section,
|
|
}
|
|
}
|
|
|
|
|
|
static void tlb_protect_code(ram_addr_t ram_addr);
|
|
static void tlb_protect_code(ram_addr_t ram_addr);
|
|
-static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
|
|
|
|
|
|
+static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
|
|
target_ulong vaddr);
|
|
target_ulong vaddr);
|
|
#define mmap_lock() do { } while(0)
|
|
#define mmap_lock() do { } while(0)
|
|
#define mmap_unlock() do { } while(0)
|
|
#define mmap_unlock() do { } while(0)
|
|
@@ -661,7 +661,7 @@ void cpu_exec_init_all(void)
|
|
|
|
|
|
static int cpu_common_post_load(void *opaque, int version_id)
|
|
static int cpu_common_post_load(void *opaque, int version_id)
|
|
{
|
|
{
|
|
- CPUState *env = opaque;
|
|
|
|
|
|
+ CPUArchState *env = opaque;
|
|
|
|
|
|
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
|
|
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
|
|
version_id is increased. */
|
|
version_id is increased. */
|
|
@@ -678,16 +678,16 @@ static const VMStateDescription vmstate_cpu_common = {
|
|
.minimum_version_id_old = 1,
|
|
.minimum_version_id_old = 1,
|
|
.post_load = cpu_common_post_load,
|
|
.post_load = cpu_common_post_load,
|
|
.fields = (VMStateField []) {
|
|
.fields = (VMStateField []) {
|
|
- VMSTATE_UINT32(halted, CPUState),
|
|
|
|
- VMSTATE_UINT32(interrupt_request, CPUState),
|
|
|
|
|
|
+ VMSTATE_UINT32(halted, CPUArchState),
|
|
|
|
+ VMSTATE_UINT32(interrupt_request, CPUArchState),
|
|
VMSTATE_END_OF_LIST()
|
|
VMSTATE_END_OF_LIST()
|
|
}
|
|
}
|
|
};
|
|
};
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-CPUState *qemu_get_cpu(int cpu)
|
|
|
|
|
|
+CPUArchState *qemu_get_cpu(int cpu)
|
|
{
|
|
{
|
|
- CPUState *env = first_cpu;
|
|
|
|
|
|
+ CPUArchState *env = first_cpu;
|
|
|
|
|
|
while (env) {
|
|
while (env) {
|
|
if (env->cpu_index == cpu)
|
|
if (env->cpu_index == cpu)
|
|
@@ -698,9 +698,9 @@ CPUState *qemu_get_cpu(int cpu)
|
|
return env;
|
|
return env;
|
|
}
|
|
}
|
|
|
|
|
|
-void cpu_exec_init(CPUState *env)
|
|
|
|
|
|
+void cpu_exec_init(CPUArchState *env)
|
|
{
|
|
{
|
|
- CPUState **penv;
|
|
|
|
|
|
+ CPUArchState **penv;
|
|
int cpu_index;
|
|
int cpu_index;
|
|
|
|
|
|
#if defined(CONFIG_USER_ONLY)
|
|
#if defined(CONFIG_USER_ONLY)
|
|
@@ -799,9 +799,9 @@ static void page_flush_tb(void)
|
|
|
|
|
|
/* flush all the translation blocks */
|
|
/* flush all the translation blocks */
|
|
/* XXX: tb_flush is currently not thread safe */
|
|
/* XXX: tb_flush is currently not thread safe */
|
|
-void tb_flush(CPUState *env1)
|
|
|
|
|
|
+void tb_flush(CPUArchState *env1)
|
|
{
|
|
{
|
|
- CPUState *env;
|
|
|
|
|
|
+ CPUArchState *env;
|
|
#if defined(DEBUG_FLUSH)
|
|
#if defined(DEBUG_FLUSH)
|
|
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
|
|
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
|
|
(unsigned long)(code_gen_ptr - code_gen_buffer),
|
|
(unsigned long)(code_gen_ptr - code_gen_buffer),
|
|
@@ -934,7 +934,7 @@ static inline void tb_reset_jump(TranslationBlock *tb, int n)
|
|
|
|
|
|
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
|
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
|
{
|
|
{
|
|
- CPUState *env;
|
|
|
|
|
|
+ CPUArchState *env;
|
|
PageDesc *p;
|
|
PageDesc *p;
|
|
unsigned int h, n1;
|
|
unsigned int h, n1;
|
|
tb_page_addr_t phys_pc;
|
|
tb_page_addr_t phys_pc;
|
|
@@ -1043,7 +1043,7 @@ static void build_page_bitmap(PageDesc *p)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-TranslationBlock *tb_gen_code(CPUState *env,
|
|
|
|
|
|
+TranslationBlock *tb_gen_code(CPUArchState *env,
|
|
target_ulong pc, target_ulong cs_base,
|
|
target_ulong pc, target_ulong cs_base,
|
|
int flags, int cflags)
|
|
int flags, int cflags)
|
|
{
|
|
{
|
|
@@ -1090,7 +1090,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
|
int is_cpu_write_access)
|
|
int is_cpu_write_access)
|
|
{
|
|
{
|
|
TranslationBlock *tb, *tb_next, *saved_tb;
|
|
TranslationBlock *tb, *tb_next, *saved_tb;
|
|
- CPUState *env = cpu_single_env;
|
|
|
|
|
|
+ CPUArchState *env = cpu_single_env;
|
|
tb_page_addr_t tb_start, tb_end;
|
|
tb_page_addr_t tb_start, tb_end;
|
|
PageDesc *p;
|
|
PageDesc *p;
|
|
int n;
|
|
int n;
|
|
@@ -1227,7 +1227,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
|
|
int n;
|
|
int n;
|
|
#ifdef TARGET_HAS_PRECISE_SMC
|
|
#ifdef TARGET_HAS_PRECISE_SMC
|
|
TranslationBlock *current_tb = NULL;
|
|
TranslationBlock *current_tb = NULL;
|
|
- CPUState *env = cpu_single_env;
|
|
|
|
|
|
+ CPUArchState *env = cpu_single_env;
|
|
int current_tb_modified = 0;
|
|
int current_tb_modified = 0;
|
|
target_ulong current_pc = 0;
|
|
target_ulong current_pc = 0;
|
|
target_ulong current_cs_base = 0;
|
|
target_ulong current_cs_base = 0;
|
|
@@ -1457,12 +1457,12 @@ static void tb_reset_jump_recursive(TranslationBlock *tb)
|
|
|
|
|
|
#if defined(TARGET_HAS_ICE)
|
|
#if defined(TARGET_HAS_ICE)
|
|
#if defined(CONFIG_USER_ONLY)
|
|
#if defined(CONFIG_USER_ONLY)
|
|
-static void breakpoint_invalidate(CPUState *env, target_ulong pc)
|
|
|
|
|
|
+static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
|
|
{
|
|
{
|
|
tb_invalidate_phys_page_range(pc, pc + 1, 0);
|
|
tb_invalidate_phys_page_range(pc, pc + 1, 0);
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
-static void breakpoint_invalidate(CPUState *env, target_ulong pc)
|
|
|
|
|
|
+static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
|
|
{
|
|
{
|
|
target_phys_addr_t addr;
|
|
target_phys_addr_t addr;
|
|
ram_addr_t ram_addr;
|
|
ram_addr_t ram_addr;
|
|
@@ -1482,19 +1482,19 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc)
|
|
#endif /* TARGET_HAS_ICE */
|
|
#endif /* TARGET_HAS_ICE */
|
|
|
|
|
|
#if defined(CONFIG_USER_ONLY)
|
|
#if defined(CONFIG_USER_ONLY)
|
|
-void cpu_watchpoint_remove_all(CPUState *env, int mask)
|
|
|
|
|
|
+void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
|
|
|
|
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
-int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
|
|
|
|
|
|
+int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
|
|
int flags, CPUWatchpoint **watchpoint)
|
|
int flags, CPUWatchpoint **watchpoint)
|
|
{
|
|
{
|
|
return -ENOSYS;
|
|
return -ENOSYS;
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
/* Add a watchpoint. */
|
|
/* Add a watchpoint. */
|
|
-int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
|
|
|
|
|
|
+int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
|
|
int flags, CPUWatchpoint **watchpoint)
|
|
int flags, CPUWatchpoint **watchpoint)
|
|
{
|
|
{
|
|
target_ulong len_mask = ~(len - 1);
|
|
target_ulong len_mask = ~(len - 1);
|
|
@@ -1527,7 +1527,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
|
|
}
|
|
}
|
|
|
|
|
|
/* Remove a specific watchpoint. */
|
|
/* Remove a specific watchpoint. */
|
|
-int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
|
|
|
|
|
|
+int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
|
|
int flags)
|
|
int flags)
|
|
{
|
|
{
|
|
target_ulong len_mask = ~(len - 1);
|
|
target_ulong len_mask = ~(len - 1);
|
|
@@ -1544,7 +1544,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
|
|
}
|
|
}
|
|
|
|
|
|
/* Remove a specific watchpoint by reference. */
|
|
/* Remove a specific watchpoint by reference. */
|
|
-void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
|
|
|
|
|
|
+void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
|
|
{
|
|
{
|
|
QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
|
|
QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
|
|
|
|
|
|
@@ -1554,7 +1554,7 @@ void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
|
|
}
|
|
}
|
|
|
|
|
|
/* Remove all matching watchpoints. */
|
|
/* Remove all matching watchpoints. */
|
|
-void cpu_watchpoint_remove_all(CPUState *env, int mask)
|
|
|
|
|
|
+void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
|
|
{
|
|
{
|
|
CPUWatchpoint *wp, *next;
|
|
CPUWatchpoint *wp, *next;
|
|
|
|
|
|
@@ -1566,7 +1566,7 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* Add a breakpoint. */
|
|
/* Add a breakpoint. */
|
|
-int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
|
|
|
|
|
|
+int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
|
|
CPUBreakpoint **breakpoint)
|
|
CPUBreakpoint **breakpoint)
|
|
{
|
|
{
|
|
#if defined(TARGET_HAS_ICE)
|
|
#if defined(TARGET_HAS_ICE)
|
|
@@ -1594,7 +1594,7 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
|
|
}
|
|
}
|
|
|
|
|
|
/* Remove a specific breakpoint. */
|
|
/* Remove a specific breakpoint. */
|
|
-int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
|
|
|
|
|
|
+int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
|
|
{
|
|
{
|
|
#if defined(TARGET_HAS_ICE)
|
|
#if defined(TARGET_HAS_ICE)
|
|
CPUBreakpoint *bp;
|
|
CPUBreakpoint *bp;
|
|
@@ -1612,7 +1612,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
|
|
}
|
|
}
|
|
|
|
|
|
/* Remove a specific breakpoint by reference. */
|
|
/* Remove a specific breakpoint by reference. */
|
|
-void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
|
|
|
|
|
|
+void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
|
|
{
|
|
{
|
|
#if defined(TARGET_HAS_ICE)
|
|
#if defined(TARGET_HAS_ICE)
|
|
QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
|
|
QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
|
|
@@ -1624,7 +1624,7 @@ void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
|
|
}
|
|
}
|
|
|
|
|
|
/* Remove all matching breakpoints. */
|
|
/* Remove all matching breakpoints. */
|
|
-void cpu_breakpoint_remove_all(CPUState *env, int mask)
|
|
|
|
|
|
+void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
|
|
{
|
|
{
|
|
#if defined(TARGET_HAS_ICE)
|
|
#if defined(TARGET_HAS_ICE)
|
|
CPUBreakpoint *bp, *next;
|
|
CPUBreakpoint *bp, *next;
|
|
@@ -1638,7 +1638,7 @@ void cpu_breakpoint_remove_all(CPUState *env, int mask)
|
|
|
|
|
|
/* enable or disable single step mode. EXCP_DEBUG is returned by the
|
|
/* enable or disable single step mode. EXCP_DEBUG is returned by the
|
|
CPU loop after each instruction */
|
|
CPU loop after each instruction */
|
|
-void cpu_single_step(CPUState *env, int enabled)
|
|
|
|
|
|
+void cpu_single_step(CPUArchState *env, int enabled)
|
|
{
|
|
{
|
|
#if defined(TARGET_HAS_ICE)
|
|
#if defined(TARGET_HAS_ICE)
|
|
if (env->singlestep_enabled != enabled) {
|
|
if (env->singlestep_enabled != enabled) {
|
|
@@ -1694,7 +1694,7 @@ void cpu_set_log_filename(const char *filename)
|
|
cpu_set_log(loglevel);
|
|
cpu_set_log(loglevel);
|
|
}
|
|
}
|
|
|
|
|
|
-static void cpu_unlink_tb(CPUState *env)
|
|
|
|
|
|
+static void cpu_unlink_tb(CPUArchState *env)
|
|
{
|
|
{
|
|
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
|
|
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
|
|
problem and hope the cpu will stop of its own accord. For userspace
|
|
problem and hope the cpu will stop of its own accord. For userspace
|
|
@@ -1716,7 +1716,7 @@ static void cpu_unlink_tb(CPUState *env)
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
#ifndef CONFIG_USER_ONLY
|
|
/* mask must never be zero, except for A20 change call */
|
|
/* mask must never be zero, except for A20 change call */
|
|
-static void tcg_handle_interrupt(CPUState *env, int mask)
|
|
|
|
|
|
+static void tcg_handle_interrupt(CPUArchState *env, int mask)
|
|
{
|
|
{
|
|
int old_mask;
|
|
int old_mask;
|
|
|
|
|
|
@@ -1747,19 +1747,19 @@ CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
|
|
|
|
|
|
#else /* CONFIG_USER_ONLY */
|
|
#else /* CONFIG_USER_ONLY */
|
|
|
|
|
|
-void cpu_interrupt(CPUState *env, int mask)
|
|
|
|
|
|
+void cpu_interrupt(CPUArchState *env, int mask)
|
|
{
|
|
{
|
|
env->interrupt_request |= mask;
|
|
env->interrupt_request |= mask;
|
|
cpu_unlink_tb(env);
|
|
cpu_unlink_tb(env);
|
|
}
|
|
}
|
|
#endif /* CONFIG_USER_ONLY */
|
|
#endif /* CONFIG_USER_ONLY */
|
|
|
|
|
|
-void cpu_reset_interrupt(CPUState *env, int mask)
|
|
|
|
|
|
+void cpu_reset_interrupt(CPUArchState *env, int mask)
|
|
{
|
|
{
|
|
env->interrupt_request &= ~mask;
|
|
env->interrupt_request &= ~mask;
|
|
}
|
|
}
|
|
|
|
|
|
-void cpu_exit(CPUState *env)
|
|
|
|
|
|
+void cpu_exit(CPUArchState *env)
|
|
{
|
|
{
|
|
env->exit_request = 1;
|
|
env->exit_request = 1;
|
|
cpu_unlink_tb(env);
|
|
cpu_unlink_tb(env);
|
|
@@ -1837,7 +1837,7 @@ int cpu_str_to_log_mask(const char *str)
|
|
return mask;
|
|
return mask;
|
|
}
|
|
}
|
|
|
|
|
|
-void cpu_abort(CPUState *env, const char *fmt, ...)
|
|
|
|
|
|
+void cpu_abort(CPUArchState *env, const char *fmt, ...)
|
|
{
|
|
{
|
|
va_list ap;
|
|
va_list ap;
|
|
va_list ap2;
|
|
va_list ap2;
|
|
@@ -1877,17 +1877,17 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
|
|
abort();
|
|
abort();
|
|
}
|
|
}
|
|
|
|
|
|
-CPUState *cpu_copy(CPUState *env)
|
|
|
|
|
|
+CPUArchState *cpu_copy(CPUArchState *env)
|
|
{
|
|
{
|
|
- CPUState *new_env = cpu_init(env->cpu_model_str);
|
|
|
|
- CPUState *next_cpu = new_env->next_cpu;
|
|
|
|
|
|
+ CPUArchState *new_env = cpu_init(env->cpu_model_str);
|
|
|
|
+ CPUArchState *next_cpu = new_env->next_cpu;
|
|
int cpu_index = new_env->cpu_index;
|
|
int cpu_index = new_env->cpu_index;
|
|
#if defined(TARGET_HAS_ICE)
|
|
#if defined(TARGET_HAS_ICE)
|
|
CPUBreakpoint *bp;
|
|
CPUBreakpoint *bp;
|
|
CPUWatchpoint *wp;
|
|
CPUWatchpoint *wp;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
- memcpy(new_env, env, sizeof(CPUState));
|
|
|
|
|
|
+ memcpy(new_env, env, sizeof(CPUArchState));
|
|
|
|
|
|
/* Preserve chaining and index. */
|
|
/* Preserve chaining and index. */
|
|
new_env->next_cpu = next_cpu;
|
|
new_env->next_cpu = next_cpu;
|
|
@@ -1913,7 +1913,7 @@ CPUState *cpu_copy(CPUState *env)
|
|
|
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
|
|
-static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
|
|
|
|
|
|
+static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
|
|
{
|
|
{
|
|
unsigned int i;
|
|
unsigned int i;
|
|
|
|
|
|
@@ -1947,7 +1947,7 @@ static CPUTLBEntry s_cputlb_empty_entry = {
|
|
* entries from the TLB at any time, so flushing more entries than
|
|
* entries from the TLB at any time, so flushing more entries than
|
|
* required is only an efficiency issue, not a correctness issue.
|
|
* required is only an efficiency issue, not a correctness issue.
|
|
*/
|
|
*/
|
|
-void tlb_flush(CPUState *env, int flush_global)
|
|
|
|
|
|
+void tlb_flush(CPUArchState *env, int flush_global)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
@@ -1984,7 +1984,7 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-void tlb_flush_page(CPUState *env, target_ulong addr)
|
|
|
|
|
|
+void tlb_flush_page(CPUArchState *env, target_ulong addr)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
int mmu_idx;
|
|
int mmu_idx;
|
|
@@ -2025,7 +2025,7 @@ static void tlb_protect_code(ram_addr_t ram_addr)
|
|
|
|
|
|
/* update the TLB so that writes in physical page 'phys_addr' are no longer
|
|
/* update the TLB so that writes in physical page 'phys_addr' are no longer
|
|
tested for self modifying code */
|
|
tested for self modifying code */
|
|
-static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
|
|
|
|
|
|
+static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
|
|
target_ulong vaddr)
|
|
target_ulong vaddr)
|
|
{
|
|
{
|
|
cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
|
|
cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
|
|
@@ -2047,7 +2047,7 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
|
|
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
|
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
|
int dirty_flags)
|
|
int dirty_flags)
|
|
{
|
|
{
|
|
- CPUState *env;
|
|
|
|
|
|
+ CPUArchState *env;
|
|
unsigned long length, start1;
|
|
unsigned long length, start1;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
@@ -2102,7 +2102,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
|
|
}
|
|
}
|
|
|
|
|
|
/* update the TLB according to the current state of the dirty bits */
|
|
/* update the TLB according to the current state of the dirty bits */
|
|
-void cpu_tlb_update_dirty(CPUState *env)
|
|
|
|
|
|
+void cpu_tlb_update_dirty(CPUArchState *env)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
int mmu_idx;
|
|
int mmu_idx;
|
|
@@ -2120,7 +2120,7 @@ static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
|
|
|
|
|
|
/* update the TLB corresponding to virtual page vaddr
|
|
/* update the TLB corresponding to virtual page vaddr
|
|
so that it is no longer dirty */
|
|
so that it is no longer dirty */
|
|
-static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
|
|
|
|
|
|
+static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
int mmu_idx;
|
|
int mmu_idx;
|
|
@@ -2133,7 +2133,7 @@ static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
|
|
|
|
|
|
/* Our TLB does not support large pages, so remember the area covered by
|
|
/* Our TLB does not support large pages, so remember the area covered by
|
|
large pages and trigger a full TLB flush if these are invalidated. */
|
|
large pages and trigger a full TLB flush if these are invalidated. */
|
|
-static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
|
|
|
|
|
|
+static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
|
|
target_ulong size)
|
|
target_ulong size)
|
|
{
|
|
{
|
|
target_ulong mask = ~(size - 1);
|
|
target_ulong mask = ~(size - 1);
|
|
@@ -2174,7 +2174,7 @@ static bool is_ram_rom_romd(MemoryRegionSection *s)
|
|
/* Add a new TLB entry. At most one entry for a given virtual address
|
|
/* Add a new TLB entry. At most one entry for a given virtual address
|
|
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
|
|
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
|
|
supplied size is only used by tlb_flush_page. */
|
|
supplied size is only used by tlb_flush_page. */
|
|
-void tlb_set_page(CPUState *env, target_ulong vaddr,
|
|
|
|
|
|
+void tlb_set_page(CPUArchState *env, target_ulong vaddr,
|
|
target_phys_addr_t paddr, int prot,
|
|
target_phys_addr_t paddr, int prot,
|
|
int mmu_idx, target_ulong size)
|
|
int mmu_idx, target_ulong size)
|
|
{
|
|
{
|
|
@@ -2277,11 +2277,11 @@ void tlb_set_page(CPUState *env, target_ulong vaddr,
|
|
|
|
|
|
#else
|
|
#else
|
|
|
|
|
|
-void tlb_flush(CPUState *env, int flush_global)
|
|
|
|
|
|
+void tlb_flush(CPUArchState *env, int flush_global)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
-void tlb_flush_page(CPUState *env, target_ulong addr)
|
|
|
|
|
|
+void tlb_flush_page(CPUArchState *env, target_ulong addr)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2542,7 +2542,7 @@ int page_unprotect(target_ulong address, unsigned long pc, void *puc)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void tlb_set_dirty(CPUState *env,
|
|
|
|
|
|
+static inline void tlb_set_dirty(CPUArchState *env,
|
|
unsigned long addr, target_ulong vaddr)
|
|
unsigned long addr, target_ulong vaddr)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
@@ -3299,7 +3299,7 @@ static const MemoryRegionOps notdirty_mem_ops = {
|
|
/* Generate a debug exception if a watchpoint has been hit. */
|
|
/* Generate a debug exception if a watchpoint has been hit. */
|
|
static void check_watchpoint(int offset, int len_mask, int flags)
|
|
static void check_watchpoint(int offset, int len_mask, int flags)
|
|
{
|
|
{
|
|
- CPUState *env = cpu_single_env;
|
|
|
|
|
|
+ CPUArchState *env = cpu_single_env;
|
|
target_ulong pc, cs_base;
|
|
target_ulong pc, cs_base;
|
|
TranslationBlock *tb;
|
|
TranslationBlock *tb;
|
|
target_ulong vaddr;
|
|
target_ulong vaddr;
|
|
@@ -3544,7 +3544,7 @@ static void core_begin(MemoryListener *listener)
|
|
|
|
|
|
static void core_commit(MemoryListener *listener)
|
|
static void core_commit(MemoryListener *listener)
|
|
{
|
|
{
|
|
- CPUState *env;
|
|
|
|
|
|
+ CPUArchState *env;
|
|
|
|
|
|
/* since each CPU stores ram addresses in its TLB cache, we must
|
|
/* since each CPU stores ram addresses in its TLB cache, we must
|
|
reset the modified entries */
|
|
reset the modified entries */
|
|
@@ -3734,7 +3734,7 @@ MemoryRegion *get_system_io(void)
|
|
|
|
|
|
/* physical memory access (slow version, mainly for debug) */
|
|
/* physical memory access (slow version, mainly for debug) */
|
|
#if defined(CONFIG_USER_ONLY)
|
|
#if defined(CONFIG_USER_ONLY)
|
|
-int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
|
|
|
|
|
+int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
|
|
uint8_t *buf, int len, int is_write)
|
|
uint8_t *buf, int len, int is_write)
|
|
{
|
|
{
|
|
int l, flags;
|
|
int l, flags;
|
|
@@ -4440,7 +4440,7 @@ void stq_be_phys(target_phys_addr_t addr, uint64_t val)
|
|
}
|
|
}
|
|
|
|
|
|
/* virtual memory access for debug (includes writing to ROM) */
|
|
/* virtual memory access for debug (includes writing to ROM) */
|
|
-int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
|
|
|
|
|
+int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
|
|
uint8_t *buf, int len, int is_write)
|
|
uint8_t *buf, int len, int is_write)
|
|
{
|
|
{
|
|
int l;
|
|
int l;
|
|
@@ -4471,7 +4471,7 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
|
|
|
|
|
|
/* in deterministic execution mode, instructions doing device I/Os
|
|
/* in deterministic execution mode, instructions doing device I/Os
|
|
must be at the end of the TB */
|
|
must be at the end of the TB */
|
|
-void cpu_io_recompile(CPUState *env, void *retaddr)
|
|
|
|
|
|
+void cpu_io_recompile(CPUArchState *env, void *retaddr)
|
|
{
|
|
{
|
|
TranslationBlock *tb;
|
|
TranslationBlock *tb;
|
|
uint32_t n, cflags;
|
|
uint32_t n, cflags;
|
|
@@ -4585,7 +4585,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
|
|
/* NOTE: this function can trigger an exception */
|
|
/* NOTE: this function can trigger an exception */
|
|
/* NOTE2: the returned address is not exactly the physical address: it
|
|
/* NOTE2: the returned address is not exactly the physical address: it
|
|
is the offset relative to phys_ram_base */
|
|
is the offset relative to phys_ram_base */
|
|
-tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
|
|
|
|
|
|
+tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
|
{
|
|
{
|
|
int mmu_idx, page_index, pd;
|
|
int mmu_idx, page_index, pd;
|
|
void *p;
|
|
void *p;
|