arch_init.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174
  1. /*
  2. * QEMU System Emulator
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include <stdint.h>
  25. #include <stdarg.h>
  26. #include <stdlib.h>
  27. #ifndef _WIN32
  28. #include <sys/types.h>
  29. #include <sys/mman.h>
  30. #endif
  31. #include "config.h"
  32. #include "monitor.h"
  33. #include "sysemu.h"
  34. #include "bitops.h"
  35. #include "bitmap.h"
  36. #include "arch_init.h"
  37. #include "audio/audio.h"
  38. #include "hw/pc.h"
  39. #include "hw/pci/pci.h"
  40. #include "hw/audiodev.h"
  41. #include "kvm.h"
  42. #include "migration.h"
  43. #include "net.h"
  44. #include "gdbstub.h"
  45. #include "hw/smbios.h"
  46. #include "exec-memory.h"
  47. #include "hw/pcspk.h"
  48. #include "qemu/page_cache.h"
  49. #include "qmp-commands.h"
  50. #include "trace.h"
  51. #ifdef DEBUG_ARCH_INIT
  52. #define DPRINTF(fmt, ...) \
  53. do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
  54. #else
  55. #define DPRINTF(fmt, ...) \
  56. do { } while (0)
  57. #endif
  58. #ifdef TARGET_SPARC
  59. int graphic_width = 1024;
  60. int graphic_height = 768;
  61. int graphic_depth = 8;
  62. #else
  63. int graphic_width = 800;
  64. int graphic_height = 600;
  65. int graphic_depth = 15;
  66. #endif
  67. #if defined(TARGET_ALPHA)
  68. #define QEMU_ARCH QEMU_ARCH_ALPHA
  69. #elif defined(TARGET_ARM)
  70. #define QEMU_ARCH QEMU_ARCH_ARM
  71. #elif defined(TARGET_CRIS)
  72. #define QEMU_ARCH QEMU_ARCH_CRIS
  73. #elif defined(TARGET_I386)
  74. #define QEMU_ARCH QEMU_ARCH_I386
  75. #elif defined(TARGET_M68K)
  76. #define QEMU_ARCH QEMU_ARCH_M68K
  77. #elif defined(TARGET_LM32)
  78. #define QEMU_ARCH QEMU_ARCH_LM32
  79. #elif defined(TARGET_MICROBLAZE)
  80. #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
  81. #elif defined(TARGET_MIPS)
  82. #define QEMU_ARCH QEMU_ARCH_MIPS
  83. #elif defined(TARGET_OPENRISC)
  84. #define QEMU_ARCH QEMU_ARCH_OPENRISC
  85. #elif defined(TARGET_PPC)
  86. #define QEMU_ARCH QEMU_ARCH_PPC
  87. #elif defined(TARGET_S390X)
  88. #define QEMU_ARCH QEMU_ARCH_S390X
  89. #elif defined(TARGET_SH4)
  90. #define QEMU_ARCH QEMU_ARCH_SH4
  91. #elif defined(TARGET_SPARC)
  92. #define QEMU_ARCH QEMU_ARCH_SPARC
  93. #elif defined(TARGET_XTENSA)
  94. #define QEMU_ARCH QEMU_ARCH_XTENSA
  95. #elif defined(TARGET_UNICORE32)
  96. #define QEMU_ARCH QEMU_ARCH_UNICORE32
  97. #endif
  98. const uint32_t arch_type = QEMU_ARCH;
  99. /***********************************************************/
  100. /* ram save/restore */
  101. #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
  102. #define RAM_SAVE_FLAG_COMPRESS 0x02
  103. #define RAM_SAVE_FLAG_MEM_SIZE 0x04
  104. #define RAM_SAVE_FLAG_PAGE 0x08
  105. #define RAM_SAVE_FLAG_EOS 0x10
  106. #define RAM_SAVE_FLAG_CONTINUE 0x20
  107. #define RAM_SAVE_FLAG_XBZRLE 0x40
  108. #ifdef __ALTIVEC__
  109. #include <altivec.h>
  110. #define VECTYPE vector unsigned char
  111. #define SPLAT(p) vec_splat(vec_ld(0, p), 0)
  112. #define ALL_EQ(v1, v2) vec_all_eq(v1, v2)
  113. /* altivec.h may redefine the bool macro as vector type.
  114. * Reset it to POSIX semantics. */
  115. #undef bool
  116. #define bool _Bool
  117. #elif defined __SSE2__
  118. #include <emmintrin.h>
  119. #define VECTYPE __m128i
  120. #define SPLAT(p) _mm_set1_epi8(*(p))
  121. #define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF)
  122. #else
  123. #define VECTYPE unsigned long
  124. #define SPLAT(p) (*(p) * (~0UL / 255))
  125. #define ALL_EQ(v1, v2) ((v1) == (v2))
  126. #endif
  127. static struct defconfig_file {
  128. const char *filename;
  129. /* Indicates it is an user config file (disabled by -no-user-config) */
  130. bool userconfig;
  131. } default_config_files[] = {
  132. { CONFIG_QEMU_CONFDIR "/qemu.conf", true },
  133. { CONFIG_QEMU_CONFDIR "/target-" TARGET_ARCH ".conf", true },
  134. { NULL }, /* end of list */
  135. };
  136. int qemu_read_default_config_files(bool userconfig)
  137. {
  138. int ret;
  139. struct defconfig_file *f;
  140. for (f = default_config_files; f->filename; f++) {
  141. if (!userconfig && f->userconfig) {
  142. continue;
  143. }
  144. ret = qemu_read_config_file(f->filename);
  145. if (ret < 0 && ret != -ENOENT) {
  146. return ret;
  147. }
  148. }
  149. return 0;
  150. }
  151. static int is_dup_page(uint8_t *page)
  152. {
  153. VECTYPE *p = (VECTYPE *)page;
  154. VECTYPE val = SPLAT(page);
  155. int i;
  156. for (i = 0; i < TARGET_PAGE_SIZE / sizeof(VECTYPE); i++) {
  157. if (!ALL_EQ(val, p[i])) {
  158. return 0;
  159. }
  160. }
  161. return 1;
  162. }
  163. /* struct contains XBZRLE cache and a static page
  164. used by the compression */
  165. static struct {
  166. /* buffer used for XBZRLE encoding */
  167. uint8_t *encoded_buf;
  168. /* buffer for storing page content */
  169. uint8_t *current_buf;
  170. /* buffer used for XBZRLE decoding */
  171. uint8_t *decoded_buf;
  172. /* Cache for XBZRLE */
  173. PageCache *cache;
  174. } XBZRLE = {
  175. .encoded_buf = NULL,
  176. .current_buf = NULL,
  177. .decoded_buf = NULL,
  178. .cache = NULL,
  179. };
  180. int64_t xbzrle_cache_resize(int64_t new_size)
  181. {
  182. if (XBZRLE.cache != NULL) {
  183. return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
  184. TARGET_PAGE_SIZE;
  185. }
  186. return pow2floor(new_size);
  187. }
  188. /* accounting for migration statistics */
  189. typedef struct AccountingInfo {
  190. uint64_t dup_pages;
  191. uint64_t norm_pages;
  192. uint64_t iterations;
  193. uint64_t xbzrle_bytes;
  194. uint64_t xbzrle_pages;
  195. uint64_t xbzrle_cache_miss;
  196. uint64_t xbzrle_overflows;
  197. } AccountingInfo;
  198. static AccountingInfo acct_info;
  199. static void acct_clear(void)
  200. {
  201. memset(&acct_info, 0, sizeof(acct_info));
  202. }
  203. uint64_t dup_mig_bytes_transferred(void)
  204. {
  205. return acct_info.dup_pages * TARGET_PAGE_SIZE;
  206. }
  207. uint64_t dup_mig_pages_transferred(void)
  208. {
  209. return acct_info.dup_pages;
  210. }
  211. uint64_t norm_mig_bytes_transferred(void)
  212. {
  213. return acct_info.norm_pages * TARGET_PAGE_SIZE;
  214. }
  215. uint64_t norm_mig_pages_transferred(void)
  216. {
  217. return acct_info.norm_pages;
  218. }
  219. uint64_t xbzrle_mig_bytes_transferred(void)
  220. {
  221. return acct_info.xbzrle_bytes;
  222. }
  223. uint64_t xbzrle_mig_pages_transferred(void)
  224. {
  225. return acct_info.xbzrle_pages;
  226. }
  227. uint64_t xbzrle_mig_pages_cache_miss(void)
  228. {
  229. return acct_info.xbzrle_cache_miss;
  230. }
  231. uint64_t xbzrle_mig_pages_overflow(void)
  232. {
  233. return acct_info.xbzrle_overflows;
  234. }
  235. static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
  236. int cont, int flag)
  237. {
  238. qemu_put_be64(f, offset | cont | flag);
  239. if (!cont) {
  240. qemu_put_byte(f, strlen(block->idstr));
  241. qemu_put_buffer(f, (uint8_t *)block->idstr,
  242. strlen(block->idstr));
  243. }
  244. }
  245. #define ENCODING_FLAG_XBZRLE 0x1
  246. static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
  247. ram_addr_t current_addr, RAMBlock *block,
  248. ram_addr_t offset, int cont, bool last_stage)
  249. {
  250. int encoded_len = 0, bytes_sent = -1;
  251. uint8_t *prev_cached_page;
  252. if (!cache_is_cached(XBZRLE.cache, current_addr)) {
  253. if (!last_stage) {
  254. cache_insert(XBZRLE.cache, current_addr,
  255. g_memdup(current_data, TARGET_PAGE_SIZE));
  256. }
  257. acct_info.xbzrle_cache_miss++;
  258. return -1;
  259. }
  260. prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
  261. /* save current buffer into memory */
  262. memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
  263. /* XBZRLE encoding (if there is no overflow) */
  264. encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
  265. TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
  266. TARGET_PAGE_SIZE);
  267. if (encoded_len == 0) {
  268. DPRINTF("Skipping unmodified page\n");
  269. return 0;
  270. } else if (encoded_len == -1) {
  271. DPRINTF("Overflow\n");
  272. acct_info.xbzrle_overflows++;
  273. /* update data in the cache */
  274. memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
  275. return -1;
  276. }
  277. /* we need to update the data in the cache, in order to get the same data */
  278. if (!last_stage) {
  279. memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
  280. }
  281. /* Send XBZRLE based compressed page */
  282. save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
  283. qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
  284. qemu_put_be16(f, encoded_len);
  285. qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
  286. bytes_sent = encoded_len + 1 + 2;
  287. acct_info.xbzrle_pages++;
  288. acct_info.xbzrle_bytes += bytes_sent;
  289. return bytes_sent;
  290. }
  291. static RAMBlock *last_block;
  292. static ram_addr_t last_offset;
  293. static unsigned long *migration_bitmap;
  294. static uint64_t migration_dirty_pages;
  295. static inline bool migration_bitmap_test_and_reset_dirty(MemoryRegion *mr,
  296. ram_addr_t offset)
  297. {
  298. bool ret;
  299. int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
  300. ret = test_and_clear_bit(nr, migration_bitmap);
  301. if (ret) {
  302. migration_dirty_pages--;
  303. }
  304. return ret;
  305. }
  306. static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
  307. ram_addr_t offset)
  308. {
  309. bool ret;
  310. int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
  311. ret = test_and_set_bit(nr, migration_bitmap);
  312. if (!ret) {
  313. migration_dirty_pages++;
  314. }
  315. return ret;
  316. }
  317. static void migration_bitmap_sync(void)
  318. {
  319. RAMBlock *block;
  320. ram_addr_t addr;
  321. uint64_t num_dirty_pages_init = migration_dirty_pages;
  322. MigrationState *s = migrate_get_current();
  323. static int64_t start_time;
  324. static int64_t num_dirty_pages_period;
  325. int64_t end_time;
  326. if (!start_time) {
  327. start_time = qemu_get_clock_ms(rt_clock);
  328. }
  329. trace_migration_bitmap_sync_start();
  330. memory_global_sync_dirty_bitmap(get_system_memory());
  331. QLIST_FOREACH(block, &ram_list.blocks, next) {
  332. for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
  333. if (memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
  334. DIRTY_MEMORY_MIGRATION)) {
  335. migration_bitmap_set_dirty(block->mr, addr);
  336. }
  337. }
  338. memory_region_reset_dirty(block->mr, 0, block->length,
  339. DIRTY_MEMORY_MIGRATION);
  340. }
  341. trace_migration_bitmap_sync_end(migration_dirty_pages
  342. - num_dirty_pages_init);
  343. num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
  344. end_time = qemu_get_clock_ms(rt_clock);
  345. /* more than 1 second = 1000 millisecons */
  346. if (end_time > start_time + 1000) {
  347. s->dirty_pages_rate = num_dirty_pages_period * 1000
  348. / (end_time - start_time);
  349. start_time = end_time;
  350. num_dirty_pages_period = 0;
  351. }
  352. }
  353. /*
  354. * ram_save_block: Writes a page of memory to the stream f
  355. *
  356. * Returns: 0: if the page hasn't changed
  357. * -1: if there are no more dirty pages
  358. * n: the amount of bytes written in other case
  359. */
  360. static int ram_save_block(QEMUFile *f, bool last_stage)
  361. {
  362. RAMBlock *block = last_block;
  363. ram_addr_t offset = last_offset;
  364. int bytes_sent = -1;
  365. MemoryRegion *mr;
  366. ram_addr_t current_addr;
  367. if (!block)
  368. block = QLIST_FIRST(&ram_list.blocks);
  369. do {
  370. mr = block->mr;
  371. if (migration_bitmap_test_and_reset_dirty(mr, offset)) {
  372. uint8_t *p;
  373. int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
  374. p = memory_region_get_ram_ptr(mr) + offset;
  375. if (is_dup_page(p)) {
  376. acct_info.dup_pages++;
  377. save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS);
  378. qemu_put_byte(f, *p);
  379. bytes_sent = 1;
  380. } else if (migrate_use_xbzrle()) {
  381. current_addr = block->offset + offset;
  382. bytes_sent = save_xbzrle_page(f, p, current_addr, block,
  383. offset, cont, last_stage);
  384. if (!last_stage) {
  385. p = get_cached_data(XBZRLE.cache, current_addr);
  386. }
  387. }
  388. /* either we didn't send yet (we may have had XBZRLE overflow) */
  389. if (bytes_sent == -1) {
  390. save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
  391. qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
  392. bytes_sent = TARGET_PAGE_SIZE;
  393. acct_info.norm_pages++;
  394. }
  395. /* if page is unmodified, continue to the next */
  396. if (bytes_sent != 0) {
  397. break;
  398. }
  399. }
  400. offset += TARGET_PAGE_SIZE;
  401. if (offset >= block->length) {
  402. offset = 0;
  403. block = QLIST_NEXT(block, next);
  404. if (!block)
  405. block = QLIST_FIRST(&ram_list.blocks);
  406. }
  407. } while (block != last_block || offset != last_offset);
  408. last_block = block;
  409. last_offset = offset;
  410. return bytes_sent;
  411. }
  412. static uint64_t bytes_transferred;
  413. static ram_addr_t ram_save_remaining(void)
  414. {
  415. return migration_dirty_pages;
  416. }
  417. uint64_t ram_bytes_remaining(void)
  418. {
  419. return ram_save_remaining() * TARGET_PAGE_SIZE;
  420. }
  421. uint64_t ram_bytes_transferred(void)
  422. {
  423. return bytes_transferred;
  424. }
  425. uint64_t ram_bytes_total(void)
  426. {
  427. RAMBlock *block;
  428. uint64_t total = 0;
  429. QLIST_FOREACH(block, &ram_list.blocks, next)
  430. total += block->length;
  431. return total;
  432. }
  433. static int block_compar(const void *a, const void *b)
  434. {
  435. RAMBlock * const *ablock = a;
  436. RAMBlock * const *bblock = b;
  437. return strcmp((*ablock)->idstr, (*bblock)->idstr);
  438. }
  439. static void sort_ram_list(void)
  440. {
  441. RAMBlock *block, *nblock, **blocks;
  442. int n;
  443. n = 0;
  444. QLIST_FOREACH(block, &ram_list.blocks, next) {
  445. ++n;
  446. }
  447. blocks = g_malloc(n * sizeof *blocks);
  448. n = 0;
  449. QLIST_FOREACH_SAFE(block, &ram_list.blocks, next, nblock) {
  450. blocks[n++] = block;
  451. QLIST_REMOVE(block, next);
  452. }
  453. qsort(blocks, n, sizeof *blocks, block_compar);
  454. while (--n >= 0) {
  455. QLIST_INSERT_HEAD(&ram_list.blocks, blocks[n], next);
  456. }
  457. g_free(blocks);
  458. }
  459. static void migration_end(void)
  460. {
  461. memory_global_dirty_log_stop();
  462. if (migrate_use_xbzrle()) {
  463. cache_fini(XBZRLE.cache);
  464. g_free(XBZRLE.cache);
  465. g_free(XBZRLE.encoded_buf);
  466. g_free(XBZRLE.current_buf);
  467. g_free(XBZRLE.decoded_buf);
  468. XBZRLE.cache = NULL;
  469. }
  470. }
  471. static void ram_migration_cancel(void *opaque)
  472. {
  473. migration_end();
  474. }
  475. static void reset_ram_globals(void)
  476. {
  477. last_block = NULL;
  478. last_offset = 0;
  479. sort_ram_list();
  480. }
  481. #define MAX_WAIT 50 /* ms, half buffered_file limit */
  482. static int ram_save_setup(QEMUFile *f, void *opaque)
  483. {
  484. RAMBlock *block;
  485. int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
  486. migration_bitmap = bitmap_new(ram_pages);
  487. bitmap_set(migration_bitmap, 1, ram_pages);
  488. migration_dirty_pages = ram_pages;
  489. bytes_transferred = 0;
  490. reset_ram_globals();
  491. if (migrate_use_xbzrle()) {
  492. XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
  493. TARGET_PAGE_SIZE,
  494. TARGET_PAGE_SIZE);
  495. if (!XBZRLE.cache) {
  496. DPRINTF("Error creating cache\n");
  497. return -1;
  498. }
  499. XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
  500. XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
  501. acct_clear();
  502. }
  503. memory_global_dirty_log_start();
  504. migration_bitmap_sync();
  505. qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
  506. QLIST_FOREACH(block, &ram_list.blocks, next) {
  507. qemu_put_byte(f, strlen(block->idstr));
  508. qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
  509. qemu_put_be64(f, block->length);
  510. }
  511. qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
  512. return 0;
  513. }
  514. static int ram_save_iterate(QEMUFile *f, void *opaque)
  515. {
  516. uint64_t bytes_transferred_last;
  517. double bwidth = 0;
  518. int ret;
  519. int i;
  520. uint64_t expected_downtime;
  521. MigrationState *s = migrate_get_current();
  522. bytes_transferred_last = bytes_transferred;
  523. bwidth = qemu_get_clock_ns(rt_clock);
  524. i = 0;
  525. while ((ret = qemu_file_rate_limit(f)) == 0) {
  526. int bytes_sent;
  527. bytes_sent = ram_save_block(f, false);
  528. /* no more blocks to sent */
  529. if (bytes_sent < 0) {
  530. break;
  531. }
  532. bytes_transferred += bytes_sent;
  533. acct_info.iterations++;
  534. /* we want to check in the 1st loop, just in case it was the 1st time
  535. and we had to sync the dirty bitmap.
  536. qemu_get_clock_ns() is a bit expensive, so we only check each some
  537. iterations
  538. */
  539. if ((i & 63) == 0) {
  540. uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
  541. if (t1 > MAX_WAIT) {
  542. DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
  543. t1, i);
  544. break;
  545. }
  546. }
  547. i++;
  548. }
  549. if (ret < 0) {
  550. return ret;
  551. }
  552. bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
  553. bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
  554. /* if we haven't transferred anything this round, force
  555. * expected_downtime to a very high value, but without
  556. * crashing */
  557. if (bwidth == 0) {
  558. bwidth = 0.000001;
  559. }
  560. qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
  561. expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
  562. DPRINTF("ram_save_live: expected(%" PRIu64 ") <= max(" PRIu64 ")?\n",
  563. expected_downtime, migrate_max_downtime());
  564. if (expected_downtime <= migrate_max_downtime()) {
  565. migration_bitmap_sync();
  566. expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
  567. s->expected_downtime = expected_downtime / 1000000; /* ns -> ms */
  568. return expected_downtime <= migrate_max_downtime();
  569. }
  570. return 0;
  571. }
  572. static int ram_save_complete(QEMUFile *f, void *opaque)
  573. {
  574. migration_bitmap_sync();
  575. /* try transferring iterative blocks of memory */
  576. /* flush all remaining blocks regardless of rate limiting */
  577. while (true) {
  578. int bytes_sent;
  579. bytes_sent = ram_save_block(f, true);
  580. /* no more blocks to sent */
  581. if (bytes_sent < 0) {
  582. break;
  583. }
  584. bytes_transferred += bytes_sent;
  585. }
  586. memory_global_dirty_log_stop();
  587. qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
  588. g_free(migration_bitmap);
  589. migration_bitmap = NULL;
  590. return 0;
  591. }
  592. static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
  593. {
  594. int ret, rc = 0;
  595. unsigned int xh_len;
  596. int xh_flags;
  597. if (!XBZRLE.decoded_buf) {
  598. XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
  599. }
  600. /* extract RLE header */
  601. xh_flags = qemu_get_byte(f);
  602. xh_len = qemu_get_be16(f);
  603. if (xh_flags != ENCODING_FLAG_XBZRLE) {
  604. fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
  605. return -1;
  606. }
  607. if (xh_len > TARGET_PAGE_SIZE) {
  608. fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
  609. return -1;
  610. }
  611. /* load data and decode */
  612. qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
  613. /* decode RLE */
  614. ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
  615. TARGET_PAGE_SIZE);
  616. if (ret == -1) {
  617. fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
  618. rc = -1;
  619. } else if (ret > TARGET_PAGE_SIZE) {
  620. fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
  621. ret, TARGET_PAGE_SIZE);
  622. abort();
  623. }
  624. return rc;
  625. }
  626. static inline void *host_from_stream_offset(QEMUFile *f,
  627. ram_addr_t offset,
  628. int flags)
  629. {
  630. static RAMBlock *block = NULL;
  631. char id[256];
  632. uint8_t len;
  633. if (flags & RAM_SAVE_FLAG_CONTINUE) {
  634. if (!block) {
  635. fprintf(stderr, "Ack, bad migration stream!\n");
  636. return NULL;
  637. }
  638. return memory_region_get_ram_ptr(block->mr) + offset;
  639. }
  640. len = qemu_get_byte(f);
  641. qemu_get_buffer(f, (uint8_t *)id, len);
  642. id[len] = 0;
  643. QLIST_FOREACH(block, &ram_list.blocks, next) {
  644. if (!strncmp(id, block->idstr, sizeof(id)))
  645. return memory_region_get_ram_ptr(block->mr) + offset;
  646. }
  647. fprintf(stderr, "Can't find block %s!\n", id);
  648. return NULL;
  649. }
  650. static int ram_load(QEMUFile *f, void *opaque, int version_id)
  651. {
  652. ram_addr_t addr;
  653. int flags, ret = 0;
  654. int error;
  655. static uint64_t seq_iter;
  656. seq_iter++;
  657. if (version_id < 4 || version_id > 4) {
  658. return -EINVAL;
  659. }
  660. do {
  661. addr = qemu_get_be64(f);
  662. flags = addr & ~TARGET_PAGE_MASK;
  663. addr &= TARGET_PAGE_MASK;
  664. if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
  665. if (version_id == 4) {
  666. /* Synchronize RAM block list */
  667. char id[256];
  668. ram_addr_t length;
  669. ram_addr_t total_ram_bytes = addr;
  670. while (total_ram_bytes) {
  671. RAMBlock *block;
  672. uint8_t len;
  673. len = qemu_get_byte(f);
  674. qemu_get_buffer(f, (uint8_t *)id, len);
  675. id[len] = 0;
  676. length = qemu_get_be64(f);
  677. QLIST_FOREACH(block, &ram_list.blocks, next) {
  678. if (!strncmp(id, block->idstr, sizeof(id))) {
  679. if (block->length != length) {
  680. ret = -EINVAL;
  681. goto done;
  682. }
  683. break;
  684. }
  685. }
  686. if (!block) {
  687. fprintf(stderr, "Unknown ramblock \"%s\", cannot "
  688. "accept migration\n", id);
  689. ret = -EINVAL;
  690. goto done;
  691. }
  692. total_ram_bytes -= length;
  693. }
  694. }
  695. }
  696. if (flags & RAM_SAVE_FLAG_COMPRESS) {
  697. void *host;
  698. uint8_t ch;
  699. host = host_from_stream_offset(f, addr, flags);
  700. if (!host) {
  701. return -EINVAL;
  702. }
  703. ch = qemu_get_byte(f);
  704. memset(host, ch, TARGET_PAGE_SIZE);
  705. #ifndef _WIN32
  706. if (ch == 0 &&
  707. (!kvm_enabled() || kvm_has_sync_mmu())) {
  708. qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
  709. }
  710. #endif
  711. } else if (flags & RAM_SAVE_FLAG_PAGE) {
  712. void *host;
  713. host = host_from_stream_offset(f, addr, flags);
  714. if (!host) {
  715. return -EINVAL;
  716. }
  717. qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
  718. } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
  719. if (!migrate_use_xbzrle()) {
  720. return -EINVAL;
  721. }
  722. void *host = host_from_stream_offset(f, addr, flags);
  723. if (!host) {
  724. return -EINVAL;
  725. }
  726. if (load_xbzrle(f, addr, host) < 0) {
  727. ret = -EINVAL;
  728. goto done;
  729. }
  730. }
  731. error = qemu_file_get_error(f);
  732. if (error) {
  733. ret = error;
  734. goto done;
  735. }
  736. } while (!(flags & RAM_SAVE_FLAG_EOS));
  737. done:
  738. DPRINTF("Completed load of VM with exit code %d seq iteration "
  739. "%" PRIu64 "\n", ret, seq_iter);
  740. return ret;
  741. }
  742. SaveVMHandlers savevm_ram_handlers = {
  743. .save_live_setup = ram_save_setup,
  744. .save_live_iterate = ram_save_iterate,
  745. .save_live_complete = ram_save_complete,
  746. .load_state = ram_load,
  747. .cancel = ram_migration_cancel,
  748. };
  749. #ifdef HAS_AUDIO
  750. struct soundhw {
  751. const char *name;
  752. const char *descr;
  753. int enabled;
  754. int isa;
  755. union {
  756. int (*init_isa) (ISABus *bus);
  757. int (*init_pci) (PCIBus *bus);
  758. } init;
  759. };
  760. static struct soundhw soundhw[] = {
  761. #ifdef HAS_AUDIO_CHOICE
  762. #ifdef CONFIG_PCSPK
  763. {
  764. "pcspk",
  765. "PC speaker",
  766. 0,
  767. 1,
  768. { .init_isa = pcspk_audio_init }
  769. },
  770. #endif
  771. #ifdef CONFIG_SB16
  772. {
  773. "sb16",
  774. "Creative Sound Blaster 16",
  775. 0,
  776. 1,
  777. { .init_isa = SB16_init }
  778. },
  779. #endif
  780. #ifdef CONFIG_CS4231A
  781. {
  782. "cs4231a",
  783. "CS4231A",
  784. 0,
  785. 1,
  786. { .init_isa = cs4231a_init }
  787. },
  788. #endif
  789. #ifdef CONFIG_ADLIB
  790. {
  791. "adlib",
  792. #ifdef HAS_YMF262
  793. "Yamaha YMF262 (OPL3)",
  794. #else
  795. "Yamaha YM3812 (OPL2)",
  796. #endif
  797. 0,
  798. 1,
  799. { .init_isa = Adlib_init }
  800. },
  801. #endif
  802. #ifdef CONFIG_GUS
  803. {
  804. "gus",
  805. "Gravis Ultrasound GF1",
  806. 0,
  807. 1,
  808. { .init_isa = GUS_init }
  809. },
  810. #endif
  811. #ifdef CONFIG_AC97
  812. {
  813. "ac97",
  814. "Intel 82801AA AC97 Audio",
  815. 0,
  816. 0,
  817. { .init_pci = ac97_init }
  818. },
  819. #endif
  820. #ifdef CONFIG_ES1370
  821. {
  822. "es1370",
  823. "ENSONIQ AudioPCI ES1370",
  824. 0,
  825. 0,
  826. { .init_pci = es1370_init }
  827. },
  828. #endif
  829. #ifdef CONFIG_HDA
  830. {
  831. "hda",
  832. "Intel HD Audio",
  833. 0,
  834. 0,
  835. { .init_pci = intel_hda_and_codec_init }
  836. },
  837. #endif
  838. #endif /* HAS_AUDIO_CHOICE */
  839. { NULL, NULL, 0, 0, { NULL } }
  840. };
  841. void select_soundhw(const char *optarg)
  842. {
  843. struct soundhw *c;
  844. if (is_help_option(optarg)) {
  845. show_valid_cards:
  846. #ifdef HAS_AUDIO_CHOICE
  847. printf("Valid sound card names (comma separated):\n");
  848. for (c = soundhw; c->name; ++c) {
  849. printf ("%-11s %s\n", c->name, c->descr);
  850. }
  851. printf("\n-soundhw all will enable all of the above\n");
  852. #else
  853. printf("Machine has no user-selectable audio hardware "
  854. "(it may or may not have always-present audio hardware).\n");
  855. #endif
  856. exit(!is_help_option(optarg));
  857. }
  858. else {
  859. size_t l;
  860. const char *p;
  861. char *e;
  862. int bad_card = 0;
  863. if (!strcmp(optarg, "all")) {
  864. for (c = soundhw; c->name; ++c) {
  865. c->enabled = 1;
  866. }
  867. return;
  868. }
  869. p = optarg;
  870. while (*p) {
  871. e = strchr(p, ',');
  872. l = !e ? strlen(p) : (size_t) (e - p);
  873. for (c = soundhw; c->name; ++c) {
  874. if (!strncmp(c->name, p, l) && !c->name[l]) {
  875. c->enabled = 1;
  876. break;
  877. }
  878. }
  879. if (!c->name) {
  880. if (l > 80) {
  881. fprintf(stderr,
  882. "Unknown sound card name (too big to show)\n");
  883. }
  884. else {
  885. fprintf(stderr, "Unknown sound card name `%.*s'\n",
  886. (int) l, p);
  887. }
  888. bad_card = 1;
  889. }
  890. p += l + (e != NULL);
  891. }
  892. if (bad_card) {
  893. goto show_valid_cards;
  894. }
  895. }
  896. }
  897. void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
  898. {
  899. struct soundhw *c;
  900. for (c = soundhw; c->name; ++c) {
  901. if (c->enabled) {
  902. if (c->isa) {
  903. if (isa_bus) {
  904. c->init.init_isa(isa_bus);
  905. }
  906. } else {
  907. if (pci_bus) {
  908. c->init.init_pci(pci_bus);
  909. }
  910. }
  911. }
  912. }
  913. }
  914. #else
  915. void select_soundhw(const char *optarg)
  916. {
  917. }
  918. void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
  919. {
  920. }
  921. #endif
  922. int qemu_uuid_parse(const char *str, uint8_t *uuid)
  923. {
  924. int ret;
  925. if (strlen(str) != 36) {
  926. return -1;
  927. }
  928. ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
  929. &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
  930. &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
  931. &uuid[15]);
  932. if (ret != 16) {
  933. return -1;
  934. }
  935. #ifdef TARGET_I386
  936. smbios_add_field(1, offsetof(struct smbios_type_1, uuid), 16, uuid);
  937. #endif
  938. return 0;
  939. }
  940. void do_acpitable_option(const char *optarg)
  941. {
  942. #ifdef TARGET_I386
  943. if (acpi_table_add(optarg) < 0) {
  944. fprintf(stderr, "Wrong acpi table provided\n");
  945. exit(1);
  946. }
  947. #endif
  948. }
  949. void do_smbios_option(const char *optarg)
  950. {
  951. #ifdef TARGET_I386
  952. if (smbios_entry_add(optarg) < 0) {
  953. fprintf(stderr, "Wrong smbios provided\n");
  954. exit(1);
  955. }
  956. #endif
  957. }
  958. void cpudef_init(void)
  959. {
  960. #if defined(cpudef_setup)
  961. cpudef_setup(); /* parse cpu definitions in target config file */
  962. #endif
  963. }
  964. int audio_available(void)
  965. {
  966. #ifdef HAS_AUDIO
  967. return 1;
  968. #else
  969. return 0;
  970. #endif
  971. }
  972. int tcg_available(void)
  973. {
  974. return 1;
  975. }
  976. int kvm_available(void)
  977. {
  978. #ifdef CONFIG_KVM
  979. return 1;
  980. #else
  981. return 0;
  982. #endif
  983. }
  984. int xen_available(void)
  985. {
  986. #ifdef CONFIG_XEN
  987. return 1;
  988. #else
  989. return 0;
  990. #endif
  991. }
  992. TargetInfo *qmp_query_target(Error **errp)
  993. {
  994. TargetInfo *info = g_malloc0(sizeof(*info));
  995. info->arch = TARGET_TYPE;
  996. return info;
  997. }