|
@@ -58,7 +58,7 @@
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
static bool in_migration;
|
|
|
|
|
|
-RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
|
|
|
+RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
|
|
|
|
|
|
static MemoryRegion *system_memory;
|
|
|
static MemoryRegion *system_io;
|
|
@@ -815,7 +815,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
|
|
|
if (block && addr - block->offset < block->max_length) {
|
|
|
goto found;
|
|
|
}
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
if (addr - block->offset < block->max_length) {
|
|
|
goto found;
|
|
|
}
|
|
@@ -1197,15 +1197,16 @@ static ram_addr_t find_ram_offset(ram_addr_t size)
|
|
|
|
|
|
assert(size != 0); /* it would hand out same offset multiple times */
|
|
|
|
|
|
- if (QTAILQ_EMPTY(&ram_list.blocks))
|
|
|
+ if (QLIST_EMPTY(&ram_list.blocks)) {
|
|
|
return 0;
|
|
|
+ }
|
|
|
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
ram_addr_t end, next = RAM_ADDR_MAX;
|
|
|
|
|
|
end = block->offset + block->max_length;
|
|
|
|
|
|
- QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(next_block, &ram_list.blocks, next) {
|
|
|
if (next_block->offset >= end) {
|
|
|
next = MIN(next, next_block->offset);
|
|
|
}
|
|
@@ -1230,9 +1231,9 @@ ram_addr_t last_ram_offset(void)
|
|
|
RAMBlock *block;
|
|
|
ram_addr_t last = 0;
|
|
|
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next)
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
last = MAX(last, block->offset + block->max_length);
|
|
|
-
|
|
|
+ }
|
|
|
return last;
|
|
|
}
|
|
|
|
|
@@ -1256,7 +1257,7 @@ static RAMBlock *find_ram_block(ram_addr_t addr)
|
|
|
{
|
|
|
RAMBlock *block;
|
|
|
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
if (block->offset == addr) {
|
|
|
return block;
|
|
|
}
|
|
@@ -1284,7 +1285,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
|
|
|
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
|
|
|
|
|
|
qemu_mutex_lock_ramlist();
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
|
|
|
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
|
|
|
new_block->idstr);
|
|
@@ -1366,6 +1367,7 @@ int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
|
|
|
static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
|
|
{
|
|
|
RAMBlock *block;
|
|
|
+ RAMBlock *last_block = NULL;
|
|
|
ram_addr_t old_ram_size, new_ram_size;
|
|
|
|
|
|
old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
|
|
@@ -1392,16 +1394,22 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* Keep the list sorted from biggest to smallest block. */
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
|
|
|
+ * QLIST (which has an RCU-friendly variant) does not have insertion at
|
|
|
+ * tail, so save the last element in last_block.
|
|
|
+ */
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ last_block = block;
|
|
|
if (block->max_length < new_block->max_length) {
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
if (block) {
|
|
|
- QTAILQ_INSERT_BEFORE(block, new_block, next);
|
|
|
- } else {
|
|
|
- QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
|
|
|
+ QLIST_INSERT_BEFORE(block, new_block, next);
|
|
|
+ } else if (last_block) {
|
|
|
+ QLIST_INSERT_AFTER(last_block, new_block, next);
|
|
|
+ } else { /* list is empty */
|
|
|
+ QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
|
|
|
}
|
|
|
ram_list.mru_block = NULL;
|
|
|
|
|
@@ -1546,9 +1554,9 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
|
|
|
|
|
|
/* This assumes the iothread lock is taken here too. */
|
|
|
qemu_mutex_lock_ramlist();
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
if (addr == block->offset) {
|
|
|
- QTAILQ_REMOVE(&ram_list.blocks, block, next);
|
|
|
+ QLIST_REMOVE(block, next);
|
|
|
ram_list.mru_block = NULL;
|
|
|
ram_list.version++;
|
|
|
g_free_rcu(block, rcu);
|
|
@@ -1582,9 +1590,9 @@ void qemu_ram_free(ram_addr_t addr)
|
|
|
|
|
|
/* This assumes the iothread lock is taken here too. */
|
|
|
qemu_mutex_lock_ramlist();
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
if (addr == block->offset) {
|
|
|
- QTAILQ_REMOVE(&ram_list.blocks, block, next);
|
|
|
+ QLIST_REMOVE(block, next);
|
|
|
ram_list.mru_block = NULL;
|
|
|
ram_list.version++;
|
|
|
call_rcu(block, reclaim_ramblock, rcu);
|
|
@@ -1602,7 +1610,7 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
|
|
|
int flags;
|
|
|
void *area, *vaddr;
|
|
|
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
offset = addr - block->offset;
|
|
|
if (offset < block->max_length) {
|
|
|
vaddr = ramblock_ptr(block, offset);
|
|
@@ -1707,7 +1715,7 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
|
|
|
return xen_map_cache(addr, *size, 1);
|
|
|
} else {
|
|
|
RAMBlock *block;
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
if (addr - block->offset < block->max_length) {
|
|
|
if (addr - block->offset + *size > block->max_length)
|
|
|
*size = block->max_length - addr + block->offset;
|
|
@@ -1747,7 +1755,7 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
|
|
|
goto found;
|
|
|
}
|
|
|
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
/* This case append when the block is not mapped. */
|
|
|
if (block->host == NULL) {
|
|
|
continue;
|
|
@@ -3019,7 +3027,7 @@ void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
|
|
|
{
|
|
|
RAMBlock *block;
|
|
|
|
|
|
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
|
|
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
|
|
|
func(block->host, block->offset, block->used_length, opaque);
|
|
|
}
|
|
|
}
|