|
@@ -325,8 +325,6 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
|
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
|
|
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
|
|
/* Set if TLB entry is an IO callback. */
|
|
/* Set if TLB entry is an IO callback. */
|
|
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
|
|
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
|
|
-/* Set if TLB entry contains a watchpoint. */
|
|
|
|
-#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
|
|
|
|
/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
|
|
/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
|
|
#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
|
|
#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
|
|
/* Set if TLB entry writes ignored. */
|
|
/* Set if TLB entry writes ignored. */
|
|
@@ -338,7 +336,7 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
|
*/
|
|
*/
|
|
#define TLB_FLAGS_MASK \
|
|
#define TLB_FLAGS_MASK \
|
|
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
|
|
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
|
|
- | TLB_WATCHPOINT | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
|
|
|
|
|
|
+ | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Flags stored in CPUTLBEntryFull.slow_flags[x].
|
|
* Flags stored in CPUTLBEntryFull.slow_flags[x].
|
|
@@ -346,8 +344,10 @@ CPUArchState *cpu_copy(CPUArchState *env);
|
|
*/
|
|
*/
|
|
/* Set if TLB entry requires byte swap. */
|
|
/* Set if TLB entry requires byte swap. */
|
|
#define TLB_BSWAP (1 << 0)
|
|
#define TLB_BSWAP (1 << 0)
|
|
|
|
+/* Set if TLB entry contains a watchpoint. */
|
|
|
|
+#define TLB_WATCHPOINT (1 << 1)
|
|
|
|
|
|
-#define TLB_SLOW_FLAGS_MASK TLB_BSWAP
|
|
|
|
|
|
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT)
|
|
|
|
|
|
/* The two sets of flags must not overlap. */
|
|
/* The two sets of flags must not overlap. */
|
|
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
|
|
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
|