123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772 |
- /*
- * Copyright (C) 2011 Citrix Ltd.
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
- #include "qemu/osdep.h"
- #include "qemu/units.h"
- #include "qemu/error-report.h"
- #include <sys/resource.h>
- #include "hw/xen/xen-hvm-common.h"
- #include "hw/xen/xen_native.h"
- #include "qemu/bitmap.h"
- #include "system/runstate.h"
- #include "system/xen-mapcache.h"
- #include "trace.h"
- #include <xenevtchn.h>
- #include <xengnttab.h>
- #if HOST_LONG_BITS == 32
- # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
- #else
- # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
- #endif
- /* This is the size of the virtual address space reserve to QEMU that will not
- * be use by MapCache.
- * From empirical tests I observed that qemu use 75MB more than the
- * max_mcache_size.
- */
- #define NON_MCACHE_MEMORY_SIZE (80 * MiB)
- typedef struct MapCacheEntry {
- hwaddr paddr_index;
- uint8_t *vaddr_base;
- unsigned long *valid_mapping;
- uint32_t lock;
- #define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0)
- #define XEN_MAPCACHE_ENTRY_GRANT (1 << 1)
- uint8_t flags;
- hwaddr size;
- struct MapCacheEntry *next;
- } MapCacheEntry;
- typedef struct MapCacheRev {
- uint8_t *vaddr_req;
- hwaddr paddr_index;
- hwaddr size;
- QTAILQ_ENTRY(MapCacheRev) next;
- bool dma;
- } MapCacheRev;
- typedef struct MapCache {
- MapCacheEntry *entry;
- unsigned long nr_buckets;
- QTAILQ_HEAD(, MapCacheRev) locked_entries;
- /* For most cases (>99.9%), the page address is the same. */
- MapCacheEntry *last_entry;
- unsigned long max_mcache_size;
- unsigned int bucket_shift;
- unsigned long bucket_size;
- phys_offset_to_gaddr_t phys_offset_to_gaddr;
- QemuMutex lock;
- void *opaque;
- } MapCache;
- static MapCache *mapcache;
- static MapCache *mapcache_grants;
- static xengnttab_handle *xen_region_gnttabdev;
- static inline void mapcache_lock(MapCache *mc)
- {
- qemu_mutex_lock(&mc->lock);
- }
- static inline void mapcache_unlock(MapCache *mc)
- {
- qemu_mutex_unlock(&mc->lock);
- }
- static inline int test_bits(int nr, int size, const unsigned long *addr)
- {
- unsigned long res = find_next_zero_bit(addr, size + nr, nr);
- if (res >= nr + size)
- return 1;
- else
- return 0;
- }
- static MapCache *xen_map_cache_init_single(phys_offset_to_gaddr_t f,
- void *opaque,
- unsigned int bucket_shift,
- unsigned long max_size)
- {
- unsigned long size;
- MapCache *mc;
- assert(bucket_shift >= XC_PAGE_SHIFT);
- mc = g_new0(MapCache, 1);
- mc->phys_offset_to_gaddr = f;
- mc->opaque = opaque;
- qemu_mutex_init(&mc->lock);
- QTAILQ_INIT(&mc->locked_entries);
- mc->bucket_shift = bucket_shift;
- mc->bucket_size = 1UL << bucket_shift;
- mc->max_mcache_size = max_size;
- mc->nr_buckets =
- (((mc->max_mcache_size >> XC_PAGE_SHIFT) +
- (1UL << (bucket_shift - XC_PAGE_SHIFT)) - 1) >>
- (bucket_shift - XC_PAGE_SHIFT));
- size = mc->nr_buckets * sizeof(MapCacheEntry);
- size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
- trace_xen_map_cache_init(mc->nr_buckets, size);
- mc->entry = g_malloc0(size);
- return mc;
- }
- void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
- {
- struct rlimit rlimit_as;
- unsigned long max_mcache_size;
- unsigned int bucket_shift;
- xen_region_gnttabdev = xengnttab_open(NULL, 0);
- if (xen_region_gnttabdev == NULL) {
- error_report("mapcache: Failed to open gnttab device");
- exit(EXIT_FAILURE);
- }
- if (HOST_LONG_BITS == 32) {
- bucket_shift = 16;
- } else {
- bucket_shift = 20;
- }
- if (geteuid() == 0) {
- rlimit_as.rlim_cur = RLIM_INFINITY;
- rlimit_as.rlim_max = RLIM_INFINITY;
- max_mcache_size = MCACHE_MAX_SIZE;
- } else {
- getrlimit(RLIMIT_AS, &rlimit_as);
- rlimit_as.rlim_cur = rlimit_as.rlim_max;
- if (rlimit_as.rlim_max != RLIM_INFINITY) {
- warn_report("QEMU's maximum size of virtual"
- " memory is not infinity");
- }
- if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) {
- max_mcache_size = rlimit_as.rlim_max - NON_MCACHE_MEMORY_SIZE;
- } else {
- max_mcache_size = MCACHE_MAX_SIZE;
- }
- }
- mapcache = xen_map_cache_init_single(f, opaque,
- bucket_shift,
- max_mcache_size);
- /*
- * Grant mappings must use XC_PAGE_SIZE granularity since we can't
- * map anything beyond the number of pages granted to us.
- */
- mapcache_grants = xen_map_cache_init_single(f, opaque,
- XC_PAGE_SHIFT,
- max_mcache_size);
- setrlimit(RLIMIT_AS, &rlimit_as);
- }
- static void xen_remap_bucket(MapCache *mc,
- MapCacheEntry *entry,
- void *vaddr,
- hwaddr size,
- hwaddr address_index,
- bool dummy,
- bool grant,
- bool is_write,
- ram_addr_t ram_offset)
- {
- uint8_t *vaddr_base;
- g_autofree uint32_t *refs = NULL;
- g_autofree xen_pfn_t *pfns = NULL;
- g_autofree int *err;
- unsigned int i;
- hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
- trace_xen_remap_bucket(address_index);
- if (grant) {
- refs = g_new0(uint32_t, nb_pfn);
- } else {
- pfns = g_new0(xen_pfn_t, nb_pfn);
- }
- err = g_new0(int, nb_pfn);
- if (entry->vaddr_base != NULL) {
- if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
- ram_block_notify_remove(entry->vaddr_base, entry->size,
- entry->size);
- }
- /*
- * If an entry is being replaced by another mapping and we're using
- * MAP_FIXED flag for it - there is possibility of a race for vaddr
- * address with another thread doing an mmap call itself
- * (see man 2 mmap). To avoid that we skip explicit unmapping here
- * and allow the kernel to destroy the previous mappings by replacing
- * them in mmap call later.
- *
- * Non-identical replacements are not allowed therefore.
- */
- assert(!vaddr || (entry->vaddr_base == vaddr && entry->size == size));
- if (!vaddr && munmap(entry->vaddr_base, entry->size) != 0) {
- perror("unmap fails");
- exit(-1);
- }
- }
- g_free(entry->valid_mapping);
- entry->valid_mapping = NULL;
- if (grant) {
- hwaddr grant_base = address_index - (ram_offset >> XC_PAGE_SHIFT);
- for (i = 0; i < nb_pfn; i++) {
- refs[i] = grant_base + i;
- }
- } else {
- for (i = 0; i < nb_pfn; i++) {
- pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i;
- }
- }
- entry->flags &= ~XEN_MAPCACHE_ENTRY_GRANT;
- if (!dummy) {
- if (grant) {
- int prot = PROT_READ;
- if (is_write) {
- prot |= PROT_WRITE;
- }
- entry->flags |= XEN_MAPCACHE_ENTRY_GRANT;
- assert(vaddr == NULL);
- vaddr_base = xengnttab_map_domain_grant_refs(xen_region_gnttabdev,
- nb_pfn,
- xen_domid, refs,
- prot);
- } else {
- /*
- * If the caller has requested the mapping at a specific address use
- * MAP_FIXED to make sure it's honored.
- *
- * We don't yet support upgrading mappings from RO to RW, to handle
- * models using ordinary address_space_rw(), foreign mappings ignore
- * is_write and are always mapped RW.
- */
- vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
- PROT_READ | PROT_WRITE,
- vaddr ? MAP_FIXED : 0,
- nb_pfn, pfns, err);
- }
- if (vaddr_base == NULL) {
- perror(grant ? "xengnttab_map_domain_grant_refs"
- : "xenforeignmemory_map2");
- exit(-1);
- }
- } else {
- /*
- * We create dummy mappings where we are unable to create a foreign
- * mapping immediately due to certain circumstances (i.e. on resume now)
- */
- vaddr_base = mmap(vaddr, size, PROT_READ | PROT_WRITE,
- MAP_ANON | MAP_SHARED | (vaddr ? MAP_FIXED : 0),
- -1, 0);
- if (vaddr_base == MAP_FAILED) {
- perror("mmap");
- exit(-1);
- }
- }
- if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
- ram_block_notify_add(vaddr_base, size, size);
- }
- entry->vaddr_base = vaddr_base;
- entry->paddr_index = address_index;
- entry->size = size;
- entry->valid_mapping = g_new0(unsigned long,
- BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
- if (dummy) {
- entry->flags |= XEN_MAPCACHE_ENTRY_DUMMY;
- } else {
- entry->flags &= ~(XEN_MAPCACHE_ENTRY_DUMMY);
- }
- bitmap_zero(entry->valid_mapping, nb_pfn);
- for (i = 0; i < nb_pfn; i++) {
- if (!err[i]) {
- bitmap_set(entry->valid_mapping, i, 1);
- }
- }
- }
- static uint8_t *xen_map_cache_unlocked(MapCache *mc,
- hwaddr phys_addr, hwaddr size,
- ram_addr_t ram_offset,
- uint8_t lock, bool dma,
- bool grant, bool is_write)
- {
- MapCacheEntry *entry, *pentry = NULL,
- *free_entry = NULL, *free_pentry = NULL;
- hwaddr address_index;
- hwaddr address_offset;
- hwaddr cache_size = size;
- hwaddr test_bit_size;
- bool translated G_GNUC_UNUSED = false;
- bool dummy = false;
- tryagain:
- address_index = phys_addr >> mc->bucket_shift;
- address_offset = phys_addr & (mc->bucket_size - 1);
- trace_xen_map_cache(phys_addr);
- /* test_bit_size is always a multiple of XC_PAGE_SIZE */
- if (size) {
- test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));
- if (test_bit_size % XC_PAGE_SIZE) {
- test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
- }
- } else {
- test_bit_size = XC_PAGE_SIZE;
- }
- if (mc->last_entry != NULL &&
- mc->last_entry->paddr_index == address_index &&
- !lock && !size &&
- test_bits(address_offset >> XC_PAGE_SHIFT,
- test_bit_size >> XC_PAGE_SHIFT,
- mc->last_entry->valid_mapping)) {
- trace_xen_map_cache_return(
- mc->last_entry->vaddr_base + address_offset
- );
- return mc->last_entry->vaddr_base + address_offset;
- }
- /* size is always a multiple of mc->bucket_size */
- if (size) {
- cache_size = size + address_offset;
- if (cache_size % mc->bucket_size) {
- cache_size += mc->bucket_size - (cache_size % mc->bucket_size);
- }
- } else {
- cache_size = mc->bucket_size;
- }
- entry = &mc->entry[address_index % mc->nr_buckets];
- while (entry && (lock || entry->lock) && entry->vaddr_base &&
- (entry->paddr_index != address_index || entry->size != cache_size ||
- !test_bits(address_offset >> XC_PAGE_SHIFT,
- test_bit_size >> XC_PAGE_SHIFT,
- entry->valid_mapping))) {
- if (!free_entry && !entry->lock) {
- free_entry = entry;
- free_pentry = pentry;
- }
- pentry = entry;
- entry = entry->next;
- }
- if (!entry && free_entry) {
- entry = free_entry;
- pentry = free_pentry;
- }
- if (!entry) {
- entry = g_new0(MapCacheEntry, 1);
- pentry->next = entry;
- xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
- grant, is_write, ram_offset);
- } else if (!entry->lock) {
- if (!entry->vaddr_base || entry->paddr_index != address_index ||
- entry->size != cache_size ||
- !test_bits(address_offset >> XC_PAGE_SHIFT,
- test_bit_size >> XC_PAGE_SHIFT,
- entry->valid_mapping)) {
- xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy,
- grant, is_write, ram_offset);
- }
- }
- if(!test_bits(address_offset >> XC_PAGE_SHIFT,
- test_bit_size >> XC_PAGE_SHIFT,
- entry->valid_mapping)) {
- mc->last_entry = NULL;
- #ifdef XEN_COMPAT_PHYSMAP
- if (!translated && mc->phys_offset_to_gaddr) {
- phys_addr = mc->phys_offset_to_gaddr(phys_addr, size);
- translated = true;
- goto tryagain;
- }
- #endif
- if (!dummy && runstate_check(RUN_STATE_INMIGRATE)) {
- dummy = true;
- goto tryagain;
- }
- trace_xen_map_cache_return(NULL);
- return NULL;
- }
- mc->last_entry = entry;
- if (lock) {
- MapCacheRev *reventry = g_new0(MapCacheRev, 1);
- entry->lock++;
- if (entry->lock == 0) {
- error_report("mapcache entry lock overflow: "HWADDR_FMT_plx" -> %p",
- entry->paddr_index, entry->vaddr_base);
- abort();
- }
- reventry->dma = dma;
- reventry->vaddr_req = mc->last_entry->vaddr_base + address_offset;
- reventry->paddr_index = mc->last_entry->paddr_index;
- reventry->size = entry->size;
- QTAILQ_INSERT_HEAD(&mc->locked_entries, reventry, next);
- }
- trace_xen_map_cache_return(
- mc->last_entry->vaddr_base + address_offset
- );
- return mc->last_entry->vaddr_base + address_offset;
- }
- uint8_t *xen_map_cache(MemoryRegion *mr,
- hwaddr phys_addr, hwaddr size,
- ram_addr_t ram_addr_offset,
- uint8_t lock, bool dma,
- bool is_write)
- {
- bool grant = xen_mr_is_grants(mr);
- MapCache *mc = grant ? mapcache_grants : mapcache;
- uint8_t *p;
- if (grant && !lock) {
- /*
- * Grants are only supported via address_space_map(). Anything
- * else is considered a user/guest error.
- *
- * QEMU generally doesn't expect these mappings to ever fail, so
- * if this happens we report an error message and abort().
- */
- error_report("Tried to access a grant reference without mapping it.");
- abort();
- }
- mapcache_lock(mc);
- p = xen_map_cache_unlocked(mc, phys_addr, size, ram_addr_offset,
- lock, dma, grant, is_write);
- mapcache_unlock(mc);
- return p;
- }
- static ram_addr_t xen_ram_addr_from_mapcache_single(MapCache *mc, void *ptr)
- {
- MapCacheEntry *entry = NULL;
- MapCacheRev *reventry;
- hwaddr paddr_index;
- hwaddr size;
- ram_addr_t raddr;
- int found = 0;
- mapcache_lock(mc);
- QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
- if (reventry->vaddr_req == ptr) {
- paddr_index = reventry->paddr_index;
- size = reventry->size;
- found = 1;
- break;
- }
- }
- if (!found) {
- trace_xen_ram_addr_from_mapcache_not_found(ptr);
- mapcache_unlock(mc);
- return RAM_ADDR_INVALID;
- }
- entry = &mc->entry[paddr_index % mc->nr_buckets];
- while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
- entry = entry->next;
- }
- if (!entry) {
- trace_xen_ram_addr_from_mapcache_not_in_cache(ptr);
- raddr = RAM_ADDR_INVALID;
- } else {
- raddr = (reventry->paddr_index << mc->bucket_shift) +
- ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
- }
- mapcache_unlock(mc);
- return raddr;
- }
- ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
- {
- ram_addr_t addr;
- addr = xen_ram_addr_from_mapcache_single(mapcache, ptr);
- if (addr == RAM_ADDR_INVALID) {
- addr = xen_ram_addr_from_mapcache_single(mapcache_grants, ptr);
- }
- return addr;
- }
- static void xen_invalidate_map_cache_entry_unlocked(MapCache *mc,
- uint8_t *buffer)
- {
- MapCacheEntry *entry = NULL, *pentry = NULL;
- MapCacheRev *reventry;
- hwaddr paddr_index;
- hwaddr size;
- int found = 0;
- int rc;
- QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
- if (reventry->vaddr_req == buffer) {
- paddr_index = reventry->paddr_index;
- size = reventry->size;
- found = 1;
- break;
- }
- }
- if (!found) {
- trace_xen_invalidate_map_cache_entry_unlocked_not_found(buffer);
- QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
- trace_xen_invalidate_map_cache_entry_unlocked_found(
- reventry->paddr_index,
- reventry->vaddr_req
- );
- }
- return;
- }
- QTAILQ_REMOVE(&mc->locked_entries, reventry, next);
- g_free(reventry);
- if (mc->last_entry != NULL &&
- mc->last_entry->paddr_index == paddr_index) {
- mc->last_entry = NULL;
- }
- entry = &mc->entry[paddr_index % mc->nr_buckets];
- while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
- pentry = entry;
- entry = entry->next;
- }
- if (!entry) {
- trace_xen_invalidate_map_cache_entry_unlocked_miss(buffer);
- return;
- }
- entry->lock--;
- if (entry->lock > 0) {
- return;
- }
- ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size);
- if (entry->flags & XEN_MAPCACHE_ENTRY_GRANT) {
- rc = xengnttab_unmap(xen_region_gnttabdev, entry->vaddr_base,
- entry->size >> mc->bucket_shift);
- } else {
- rc = munmap(entry->vaddr_base, entry->size);
- }
- if (rc) {
- perror("unmap fails");
- exit(-1);
- }
- g_free(entry->valid_mapping);
- if (pentry) {
- pentry->next = entry->next;
- g_free(entry);
- } else {
- /*
- * Invalidate mapping but keep entry->next pointing to the rest
- * of the list.
- *
- * Note that lock is already zero here, otherwise we don't unmap.
- */
- entry->paddr_index = 0;
- entry->vaddr_base = NULL;
- entry->valid_mapping = NULL;
- entry->flags = 0;
- entry->size = 0;
- }
- }
- typedef struct XenMapCacheData {
- Coroutine *co;
- uint8_t *buffer;
- } XenMapCacheData;
- static void xen_invalidate_map_cache_entry_single(MapCache *mc, uint8_t *buffer)
- {
- mapcache_lock(mc);
- xen_invalidate_map_cache_entry_unlocked(mc, buffer);
- mapcache_unlock(mc);
- }
- static void xen_invalidate_map_cache_entry_all(uint8_t *buffer)
- {
- xen_invalidate_map_cache_entry_single(mapcache, buffer);
- xen_invalidate_map_cache_entry_single(mapcache_grants, buffer);
- }
- static void xen_invalidate_map_cache_entry_bh(void *opaque)
- {
- XenMapCacheData *data = opaque;
- xen_invalidate_map_cache_entry_all(data->buffer);
- aio_co_wake(data->co);
- }
- void coroutine_mixed_fn xen_invalidate_map_cache_entry(uint8_t *buffer)
- {
- if (qemu_in_coroutine()) {
- XenMapCacheData data = {
- .co = qemu_coroutine_self(),
- .buffer = buffer,
- };
- aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
- xen_invalidate_map_cache_entry_bh, &data);
- qemu_coroutine_yield();
- } else {
- xen_invalidate_map_cache_entry_all(buffer);
- }
- }
- static void xen_invalidate_map_cache_single(MapCache *mc)
- {
- unsigned long i;
- MapCacheRev *reventry;
- mapcache_lock(mc);
- QTAILQ_FOREACH(reventry, &mc->locked_entries, next) {
- if (!reventry->dma) {
- continue;
- }
- trace_xen_invalidate_map_cache(reventry->paddr_index,
- reventry->vaddr_req);
- }
- for (i = 0; i < mc->nr_buckets; i++) {
- MapCacheEntry *entry = &mc->entry[i];
- if (entry->vaddr_base == NULL) {
- continue;
- }
- if (entry->lock > 0) {
- continue;
- }
- if (munmap(entry->vaddr_base, entry->size) != 0) {
- perror("unmap fails");
- exit(-1);
- }
- entry->paddr_index = 0;
- entry->vaddr_base = NULL;
- entry->size = 0;
- g_free(entry->valid_mapping);
- entry->valid_mapping = NULL;
- }
- mc->last_entry = NULL;
- mapcache_unlock(mc);
- }
- void xen_invalidate_map_cache(void)
- {
- /* Flush pending AIO before destroying the mapcache */
- bdrv_drain_all();
- xen_invalidate_map_cache_single(mapcache);
- xen_invalidate_map_cache_single(mapcache_grants);
- }
- static uint8_t *xen_replace_cache_entry_unlocked(MapCache *mc,
- hwaddr old_phys_addr,
- hwaddr new_phys_addr,
- hwaddr size)
- {
- MapCacheEntry *entry;
- hwaddr address_index, address_offset;
- hwaddr test_bit_size, cache_size = size;
- address_index = old_phys_addr >> mc->bucket_shift;
- address_offset = old_phys_addr & (mc->bucket_size - 1);
- assert(size);
- /* test_bit_size is always a multiple of XC_PAGE_SIZE */
- test_bit_size = size + (old_phys_addr & (XC_PAGE_SIZE - 1));
- if (test_bit_size % XC_PAGE_SIZE) {
- test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
- }
- cache_size = size + address_offset;
- if (cache_size % mc->bucket_size) {
- cache_size += mc->bucket_size - (cache_size % mc->bucket_size);
- }
- entry = &mc->entry[address_index % mc->nr_buckets];
- while (entry && !(entry->paddr_index == address_index &&
- entry->size == cache_size)) {
- entry = entry->next;
- }
- if (!entry) {
- trace_xen_replace_cache_entry_unlocked(old_phys_addr);
- return NULL;
- }
- assert((entry->flags & XEN_MAPCACHE_ENTRY_GRANT) == 0);
- address_index = new_phys_addr >> mc->bucket_shift;
- address_offset = new_phys_addr & (mc->bucket_size - 1);
- trace_xen_replace_cache_entry_dummy(old_phys_addr, new_phys_addr);
- xen_remap_bucket(mc, entry, entry->vaddr_base,
- cache_size, address_index, false,
- false, false, old_phys_addr);
- if (!test_bits(address_offset >> XC_PAGE_SHIFT,
- test_bit_size >> XC_PAGE_SHIFT,
- entry->valid_mapping)) {
- trace_xen_replace_cache_entry_unlocked_could_not_update_entry(
- old_phys_addr
- );
- return NULL;
- }
- return entry->vaddr_base + address_offset;
- }
- uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
- hwaddr new_phys_addr,
- hwaddr size)
- {
- uint8_t *p;
- mapcache_lock(mapcache);
- p = xen_replace_cache_entry_unlocked(mapcache, old_phys_addr,
- new_phys_addr, size);
- mapcache_unlock(mapcache);
- return p;
- }
|