|
@@ -1084,35 +1084,33 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
|
|
static void
|
|
static void
|
|
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
|
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
|
|
PageDesc *p, tb_page_addr_t start,
|
|
PageDesc *p, tb_page_addr_t start,
|
|
- tb_page_addr_t end,
|
|
|
|
|
|
+ tb_page_addr_t last,
|
|
uintptr_t retaddr)
|
|
uintptr_t retaddr)
|
|
{
|
|
{
|
|
TranslationBlock *tb;
|
|
TranslationBlock *tb;
|
|
- tb_page_addr_t tb_start, tb_end;
|
|
|
|
PageForEachNext n;
|
|
PageForEachNext n;
|
|
#ifdef TARGET_HAS_PRECISE_SMC
|
|
#ifdef TARGET_HAS_PRECISE_SMC
|
|
bool current_tb_modified = false;
|
|
bool current_tb_modified = false;
|
|
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
|
|
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
|
|
#endif /* TARGET_HAS_PRECISE_SMC */
|
|
#endif /* TARGET_HAS_PRECISE_SMC */
|
|
- tb_page_addr_t last G_GNUC_UNUSED = end - 1;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
- * We remove all the TBs in the range [start, end[.
|
|
|
|
|
|
+ * We remove all the TBs in the range [start, last].
|
|
* XXX: see if in some cases it could be faster to invalidate all the code
|
|
* XXX: see if in some cases it could be faster to invalidate all the code
|
|
*/
|
|
*/
|
|
PAGE_FOR_EACH_TB(start, last, p, tb, n) {
|
|
PAGE_FOR_EACH_TB(start, last, p, tb, n) {
|
|
|
|
+ tb_page_addr_t tb_start, tb_last;
|
|
|
|
+
|
|
/* NOTE: this is subtle as a TB may span two physical pages */
|
|
/* NOTE: this is subtle as a TB may span two physical pages */
|
|
|
|
+ tb_start = tb_page_addr0(tb);
|
|
|
|
+ tb_last = tb_start + tb->size - 1;
|
|
if (n == 0) {
|
|
if (n == 0) {
|
|
- /* NOTE: tb_end may be after the end of the page, but
|
|
|
|
- it is not a problem */
|
|
|
|
- tb_start = tb_page_addr0(tb);
|
|
|
|
- tb_end = tb_start + tb->size;
|
|
|
|
|
|
+ tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK);
|
|
} else {
|
|
} else {
|
|
tb_start = tb_page_addr1(tb);
|
|
tb_start = tb_page_addr1(tb);
|
|
- tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
|
|
|
|
- & ~TARGET_PAGE_MASK);
|
|
|
|
|
|
+ tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
|
|
}
|
|
}
|
|
- if (!(tb_end <= start || tb_start >= end)) {
|
|
|
|
|
|
+ if (!(tb_last < start || tb_start > last)) {
|
|
#ifdef TARGET_HAS_PRECISE_SMC
|
|
#ifdef TARGET_HAS_PRECISE_SMC
|
|
if (current_tb == tb &&
|
|
if (current_tb == tb &&
|
|
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
|
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
|
|
@@ -1165,7 +1163,7 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
|
|
start = addr & TARGET_PAGE_MASK;
|
|
start = addr & TARGET_PAGE_MASK;
|
|
last = addr | ~TARGET_PAGE_MASK;
|
|
last = addr | ~TARGET_PAGE_MASK;
|
|
pages = page_collection_lock(start, last);
|
|
pages = page_collection_lock(start, last);
|
|
- tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
|
|
|
|
|
|
+ tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
|
|
page_collection_unlock(pages);
|
|
page_collection_unlock(pages);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1192,7 +1190,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
assert_page_locked(pd);
|
|
assert_page_locked(pd);
|
|
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
|
|
|
|
|
|
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
|
|
}
|
|
}
|
|
page_collection_unlock(pages);
|
|
page_collection_unlock(pages);
|
|
}
|
|
}
|
|
@@ -1212,7 +1210,7 @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
|
|
}
|
|
}
|
|
|
|
|
|
assert_page_locked(p);
|
|
assert_page_locked(p);
|
|
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
|
|
|
|
|
|
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|