|
@@ -991,11 +991,10 @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
|
* Called with mmap_lock held for user-mode emulation.
|
|
* Called with mmap_lock held for user-mode emulation.
|
|
* NOTE: this function must not be called while a TB is running.
|
|
* NOTE: this function must not be called while a TB is running.
|
|
*/
|
|
*/
|
|
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
|
|
|
|
|
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
|
|
{
|
|
{
|
|
TranslationBlock *tb;
|
|
TranslationBlock *tb;
|
|
PageForEachNext n;
|
|
PageForEachNext n;
|
|
- tb_page_addr_t last = end - 1;
|
|
|
|
|
|
|
|
assert_memory_lock();
|
|
assert_memory_lock();
|
|
|
|
|
|
@@ -1011,11 +1010,11 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
|
*/
|
|
*/
|
|
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
|
void tb_invalidate_phys_page(tb_page_addr_t addr)
|
|
{
|
|
{
|
|
- tb_page_addr_t start, end;
|
|
|
|
|
|
+ tb_page_addr_t start, last;
|
|
|
|
|
|
start = addr & TARGET_PAGE_MASK;
|
|
start = addr & TARGET_PAGE_MASK;
|
|
- end = start + TARGET_PAGE_SIZE;
|
|
|
|
- tb_invalidate_phys_range(start, end);
|
|
|
|
|
|
+ last = addr | ~TARGET_PAGE_MASK;
|
|
|
|
+ tb_invalidate_phys_range(start, last);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1169,28 +1168,30 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
|
|
|
|
|
|
/*
|
|
/*
|
|
* Invalidate all TBs which intersect with the target physical address range
|
|
* Invalidate all TBs which intersect with the target physical address range
|
|
- * [start;end[. NOTE: start and end may refer to *different* physical pages.
|
|
|
|
|
|
+ * [start;last]. NOTE: start and end may refer to *different* physical pages.
|
|
* 'is_cpu_write_access' should be true if called from a real cpu write
|
|
* 'is_cpu_write_access' should be true if called from a real cpu write
|
|
* access: the virtual CPU will exit the current TB if code is modified inside
|
|
* access: the virtual CPU will exit the current TB if code is modified inside
|
|
* this TB.
|
|
* this TB.
|
|
*/
|
|
*/
|
|
-void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
|
|
|
|
|
|
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
|
|
{
|
|
{
|
|
struct page_collection *pages;
|
|
struct page_collection *pages;
|
|
- tb_page_addr_t next;
|
|
|
|
|
|
+ tb_page_addr_t index, index_last;
|
|
|
|
+
|
|
|
|
+ pages = page_collection_lock(start, last);
|
|
|
|
|
|
- pages = page_collection_lock(start, end - 1);
|
|
|
|
- for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
|
|
|
|
- start < end;
|
|
|
|
- start = next, next += TARGET_PAGE_SIZE) {
|
|
|
|
- PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
|
|
|
|
- tb_page_addr_t bound = MIN(next, end);
|
|
|
|
|
|
+ index_last = last >> TARGET_PAGE_BITS;
|
|
|
|
+ for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
|
|
|
|
+ PageDesc *pd = page_find(index);
|
|
|
|
+ tb_page_addr_t bound;
|
|
|
|
|
|
if (pd == NULL) {
|
|
if (pd == NULL) {
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
assert_page_locked(pd);
|
|
assert_page_locked(pd);
|
|
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
|
|
|
|
|
|
+ bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
|
|
|
|
+ bound = MIN(bound, last);
|
|
|
|
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
|
|
}
|
|
}
|
|
page_collection_unlock(pages);
|
|
page_collection_unlock(pages);
|
|
}
|
|
}
|