arch_init.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098
  1. /*
  2. * QEMU System Emulator
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include <stdint.h>
  25. #include <stdarg.h>
  26. #include <stdlib.h>
  27. #ifndef _WIN32
  28. #include <sys/types.h>
  29. #include <sys/mman.h>
  30. #endif
  31. #include "config.h"
  32. #include "monitor.h"
  33. #include "sysemu.h"
  34. #include "arch_init.h"
  35. #include "audio/audio.h"
  36. #include "hw/pc.h"
  37. #include "hw/pci.h"
  38. #include "hw/audiodev.h"
  39. #include "kvm.h"
  40. #include "migration.h"
  41. #include "net.h"
  42. #include "gdbstub.h"
  43. #include "hw/smbios.h"
  44. #include "exec-memory.h"
  45. #include "hw/pcspk.h"
  46. #include "qemu/page_cache.h"
  47. #include "qmp-commands.h"
  48. #ifdef DEBUG_ARCH_INIT
  49. #define DPRINTF(fmt, ...) \
  50. do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
  51. #else
  52. #define DPRINTF(fmt, ...) \
  53. do { } while (0)
  54. #endif
  55. #ifdef TARGET_SPARC
  56. int graphic_width = 1024;
  57. int graphic_height = 768;
  58. int graphic_depth = 8;
  59. #else
  60. int graphic_width = 800;
  61. int graphic_height = 600;
  62. int graphic_depth = 15;
  63. #endif
  64. #if defined(TARGET_ALPHA)
  65. #define QEMU_ARCH QEMU_ARCH_ALPHA
  66. #elif defined(TARGET_ARM)
  67. #define QEMU_ARCH QEMU_ARCH_ARM
  68. #elif defined(TARGET_CRIS)
  69. #define QEMU_ARCH QEMU_ARCH_CRIS
  70. #elif defined(TARGET_I386)
  71. #define QEMU_ARCH QEMU_ARCH_I386
  72. #elif defined(TARGET_M68K)
  73. #define QEMU_ARCH QEMU_ARCH_M68K
  74. #elif defined(TARGET_LM32)
  75. #define QEMU_ARCH QEMU_ARCH_LM32
  76. #elif defined(TARGET_MICROBLAZE)
  77. #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
  78. #elif defined(TARGET_MIPS)
  79. #define QEMU_ARCH QEMU_ARCH_MIPS
  80. #elif defined(TARGET_OPENRISC)
  81. #define QEMU_ARCH QEMU_ARCH_OPENRISC
  82. #elif defined(TARGET_PPC)
  83. #define QEMU_ARCH QEMU_ARCH_PPC
  84. #elif defined(TARGET_S390X)
  85. #define QEMU_ARCH QEMU_ARCH_S390X
  86. #elif defined(TARGET_SH4)
  87. #define QEMU_ARCH QEMU_ARCH_SH4
  88. #elif defined(TARGET_SPARC)
  89. #define QEMU_ARCH QEMU_ARCH_SPARC
  90. #elif defined(TARGET_XTENSA)
  91. #define QEMU_ARCH QEMU_ARCH_XTENSA
  92. #elif defined(TARGET_UNICORE32)
  93. #define QEMU_ARCH QEMU_ARCH_UNICORE32
  94. #endif
  95. const uint32_t arch_type = QEMU_ARCH;
  96. /***********************************************************/
  97. /* ram save/restore */
  98. #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
  99. #define RAM_SAVE_FLAG_COMPRESS 0x02
  100. #define RAM_SAVE_FLAG_MEM_SIZE 0x04
  101. #define RAM_SAVE_FLAG_PAGE 0x08
  102. #define RAM_SAVE_FLAG_EOS 0x10
  103. #define RAM_SAVE_FLAG_CONTINUE 0x20
  104. #define RAM_SAVE_FLAG_XBZRLE 0x40
  105. #ifdef __ALTIVEC__
  106. #include <altivec.h>
  107. #define VECTYPE vector unsigned char
  108. #define SPLAT(p) vec_splat(vec_ld(0, p), 0)
  109. #define ALL_EQ(v1, v2) vec_all_eq(v1, v2)
  110. /* altivec.h may redefine the bool macro as vector type.
  111. * Reset it to POSIX semantics. */
  112. #undef bool
  113. #define bool _Bool
  114. #elif defined __SSE2__
  115. #include <emmintrin.h>
  116. #define VECTYPE __m128i
  117. #define SPLAT(p) _mm_set1_epi8(*(p))
  118. #define ALL_EQ(v1, v2) (_mm_movemask_epi8(_mm_cmpeq_epi8(v1, v2)) == 0xFFFF)
  119. #else
  120. #define VECTYPE unsigned long
  121. #define SPLAT(p) (*(p) * (~0UL / 255))
  122. #define ALL_EQ(v1, v2) ((v1) == (v2))
  123. #endif
  124. static struct defconfig_file {
  125. const char *filename;
  126. /* Indicates it is an user config file (disabled by -no-user-config) */
  127. bool userconfig;
  128. } default_config_files[] = {
  129. { CONFIG_QEMU_DATADIR "/cpus-" TARGET_ARCH ".conf", false },
  130. { CONFIG_QEMU_CONFDIR "/qemu.conf", true },
  131. { CONFIG_QEMU_CONFDIR "/target-" TARGET_ARCH ".conf", true },
  132. { NULL }, /* end of list */
  133. };
  134. int qemu_read_default_config_files(bool userconfig)
  135. {
  136. int ret;
  137. struct defconfig_file *f;
  138. for (f = default_config_files; f->filename; f++) {
  139. if (!userconfig && f->userconfig) {
  140. continue;
  141. }
  142. ret = qemu_read_config_file(f->filename);
  143. if (ret < 0 && ret != -ENOENT) {
  144. return ret;
  145. }
  146. }
  147. return 0;
  148. }
  149. static int is_dup_page(uint8_t *page)
  150. {
  151. VECTYPE *p = (VECTYPE *)page;
  152. VECTYPE val = SPLAT(page);
  153. int i;
  154. for (i = 0; i < TARGET_PAGE_SIZE / sizeof(VECTYPE); i++) {
  155. if (!ALL_EQ(val, p[i])) {
  156. return 0;
  157. }
  158. }
  159. return 1;
  160. }
  161. /* struct contains XBZRLE cache and a static page
  162. used by the compression */
  163. static struct {
  164. /* buffer used for XBZRLE encoding */
  165. uint8_t *encoded_buf;
  166. /* buffer for storing page content */
  167. uint8_t *current_buf;
  168. /* buffer used for XBZRLE decoding */
  169. uint8_t *decoded_buf;
  170. /* Cache for XBZRLE */
  171. PageCache *cache;
  172. } XBZRLE = {
  173. .encoded_buf = NULL,
  174. .current_buf = NULL,
  175. .decoded_buf = NULL,
  176. .cache = NULL,
  177. };
  178. int64_t xbzrle_cache_resize(int64_t new_size)
  179. {
  180. if (XBZRLE.cache != NULL) {
  181. return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
  182. TARGET_PAGE_SIZE;
  183. }
  184. return pow2floor(new_size);
  185. }
  186. /* accounting for migration statistics */
  187. typedef struct AccountingInfo {
  188. uint64_t dup_pages;
  189. uint64_t norm_pages;
  190. uint64_t iterations;
  191. uint64_t xbzrle_bytes;
  192. uint64_t xbzrle_pages;
  193. uint64_t xbzrle_cache_miss;
  194. uint64_t xbzrle_overflows;
  195. } AccountingInfo;
  196. static AccountingInfo acct_info;
  197. static void acct_clear(void)
  198. {
  199. memset(&acct_info, 0, sizeof(acct_info));
  200. }
  201. uint64_t dup_mig_bytes_transferred(void)
  202. {
  203. return acct_info.dup_pages * TARGET_PAGE_SIZE;
  204. }
  205. uint64_t dup_mig_pages_transferred(void)
  206. {
  207. return acct_info.dup_pages;
  208. }
  209. uint64_t norm_mig_bytes_transferred(void)
  210. {
  211. return acct_info.norm_pages * TARGET_PAGE_SIZE;
  212. }
  213. uint64_t norm_mig_pages_transferred(void)
  214. {
  215. return acct_info.norm_pages;
  216. }
  217. uint64_t xbzrle_mig_bytes_transferred(void)
  218. {
  219. return acct_info.xbzrle_bytes;
  220. }
  221. uint64_t xbzrle_mig_pages_transferred(void)
  222. {
  223. return acct_info.xbzrle_pages;
  224. }
  225. uint64_t xbzrle_mig_pages_cache_miss(void)
  226. {
  227. return acct_info.xbzrle_cache_miss;
  228. }
  229. uint64_t xbzrle_mig_pages_overflow(void)
  230. {
  231. return acct_info.xbzrle_overflows;
  232. }
  233. static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
  234. int cont, int flag)
  235. {
  236. qemu_put_be64(f, offset | cont | flag);
  237. if (!cont) {
  238. qemu_put_byte(f, strlen(block->idstr));
  239. qemu_put_buffer(f, (uint8_t *)block->idstr,
  240. strlen(block->idstr));
  241. }
  242. }
  243. #define ENCODING_FLAG_XBZRLE 0x1
  244. static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
  245. ram_addr_t current_addr, RAMBlock *block,
  246. ram_addr_t offset, int cont, bool last_stage)
  247. {
  248. int encoded_len = 0, bytes_sent = -1;
  249. uint8_t *prev_cached_page;
  250. if (!cache_is_cached(XBZRLE.cache, current_addr)) {
  251. if (!last_stage) {
  252. cache_insert(XBZRLE.cache, current_addr,
  253. g_memdup(current_data, TARGET_PAGE_SIZE));
  254. }
  255. acct_info.xbzrle_cache_miss++;
  256. return -1;
  257. }
  258. prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
  259. /* save current buffer into memory */
  260. memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);
  261. /* XBZRLE encoding (if there is no overflow) */
  262. encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
  263. TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
  264. TARGET_PAGE_SIZE);
  265. if (encoded_len == 0) {
  266. DPRINTF("Skipping unmodified page\n");
  267. return 0;
  268. } else if (encoded_len == -1) {
  269. DPRINTF("Overflow\n");
  270. acct_info.xbzrle_overflows++;
  271. /* update data in the cache */
  272. memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
  273. return -1;
  274. }
  275. /* we need to update the data in the cache, in order to get the same data */
  276. if (!last_stage) {
  277. memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
  278. }
  279. /* Send XBZRLE based compressed page */
  280. save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
  281. qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
  282. qemu_put_be16(f, encoded_len);
  283. qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
  284. bytes_sent = encoded_len + 1 + 2;
  285. acct_info.xbzrle_pages++;
  286. acct_info.xbzrle_bytes += bytes_sent;
  287. return bytes_sent;
  288. }
  289. static RAMBlock *last_block;
  290. static ram_addr_t last_offset;
  291. /*
  292. * ram_save_block: Writes a page of memory to the stream f
  293. *
  294. * Returns: 0: if the page hasn't changed
  295. * -1: if there are no more dirty pages
  296. * n: the amount of bytes written in other case
  297. */
  298. static int ram_save_block(QEMUFile *f, bool last_stage)
  299. {
  300. RAMBlock *block = last_block;
  301. ram_addr_t offset = last_offset;
  302. int bytes_sent = -1;
  303. MemoryRegion *mr;
  304. ram_addr_t current_addr;
  305. if (!block)
  306. block = QLIST_FIRST(&ram_list.blocks);
  307. do {
  308. mr = block->mr;
  309. if (memory_region_get_dirty(mr, offset, TARGET_PAGE_SIZE,
  310. DIRTY_MEMORY_MIGRATION)) {
  311. uint8_t *p;
  312. int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
  313. memory_region_reset_dirty(mr, offset, TARGET_PAGE_SIZE,
  314. DIRTY_MEMORY_MIGRATION);
  315. p = memory_region_get_ram_ptr(mr) + offset;
  316. if (is_dup_page(p)) {
  317. acct_info.dup_pages++;
  318. save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS);
  319. qemu_put_byte(f, *p);
  320. bytes_sent = 1;
  321. } else if (migrate_use_xbzrle()) {
  322. current_addr = block->offset + offset;
  323. bytes_sent = save_xbzrle_page(f, p, current_addr, block,
  324. offset, cont, last_stage);
  325. if (!last_stage) {
  326. p = get_cached_data(XBZRLE.cache, current_addr);
  327. }
  328. }
  329. /* either we didn't send yet (we may have had XBZRLE overflow) */
  330. if (bytes_sent == -1) {
  331. save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
  332. qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
  333. bytes_sent = TARGET_PAGE_SIZE;
  334. acct_info.norm_pages++;
  335. }
  336. /* if page is unmodified, continue to the next */
  337. if (bytes_sent != 0) {
  338. break;
  339. }
  340. }
  341. offset += TARGET_PAGE_SIZE;
  342. if (offset >= block->length) {
  343. offset = 0;
  344. block = QLIST_NEXT(block, next);
  345. if (!block)
  346. block = QLIST_FIRST(&ram_list.blocks);
  347. }
  348. } while (block != last_block || offset != last_offset);
  349. last_block = block;
  350. last_offset = offset;
  351. return bytes_sent;
  352. }
  353. static uint64_t bytes_transferred;
  354. static ram_addr_t ram_save_remaining(void)
  355. {
  356. return ram_list.dirty_pages;
  357. }
  358. uint64_t ram_bytes_remaining(void)
  359. {
  360. return ram_save_remaining() * TARGET_PAGE_SIZE;
  361. }
  362. uint64_t ram_bytes_transferred(void)
  363. {
  364. return bytes_transferred;
  365. }
  366. uint64_t ram_bytes_total(void)
  367. {
  368. RAMBlock *block;
  369. uint64_t total = 0;
  370. QLIST_FOREACH(block, &ram_list.blocks, next)
  371. total += block->length;
  372. return total;
  373. }
  374. static int block_compar(const void *a, const void *b)
  375. {
  376. RAMBlock * const *ablock = a;
  377. RAMBlock * const *bblock = b;
  378. return strcmp((*ablock)->idstr, (*bblock)->idstr);
  379. }
  380. static void sort_ram_list(void)
  381. {
  382. RAMBlock *block, *nblock, **blocks;
  383. int n;
  384. n = 0;
  385. QLIST_FOREACH(block, &ram_list.blocks, next) {
  386. ++n;
  387. }
  388. blocks = g_malloc(n * sizeof *blocks);
  389. n = 0;
  390. QLIST_FOREACH_SAFE(block, &ram_list.blocks, next, nblock) {
  391. blocks[n++] = block;
  392. QLIST_REMOVE(block, next);
  393. }
  394. qsort(blocks, n, sizeof *blocks, block_compar);
  395. while (--n >= 0) {
  396. QLIST_INSERT_HEAD(&ram_list.blocks, blocks[n], next);
  397. }
  398. g_free(blocks);
  399. }
  400. static void migration_end(void)
  401. {
  402. memory_global_dirty_log_stop();
  403. if (migrate_use_xbzrle()) {
  404. cache_fini(XBZRLE.cache);
  405. g_free(XBZRLE.cache);
  406. g_free(XBZRLE.encoded_buf);
  407. g_free(XBZRLE.current_buf);
  408. g_free(XBZRLE.decoded_buf);
  409. XBZRLE.cache = NULL;
  410. }
  411. }
  412. static void ram_migration_cancel(void *opaque)
  413. {
  414. migration_end();
  415. }
  416. #define MAX_WAIT 50 /* ms, half buffered_file limit */
  417. static int ram_save_setup(QEMUFile *f, void *opaque)
  418. {
  419. ram_addr_t addr;
  420. RAMBlock *block;
  421. bytes_transferred = 0;
  422. last_block = NULL;
  423. last_offset = 0;
  424. sort_ram_list();
  425. if (migrate_use_xbzrle()) {
  426. XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
  427. TARGET_PAGE_SIZE,
  428. TARGET_PAGE_SIZE);
  429. if (!XBZRLE.cache) {
  430. DPRINTF("Error creating cache\n");
  431. return -1;
  432. }
  433. XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
  434. XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
  435. acct_clear();
  436. }
  437. /* Make sure all dirty bits are set */
  438. QLIST_FOREACH(block, &ram_list.blocks, next) {
  439. for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
  440. if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
  441. DIRTY_MEMORY_MIGRATION)) {
  442. memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE);
  443. }
  444. }
  445. }
  446. memory_global_dirty_log_start();
  447. qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
  448. QLIST_FOREACH(block, &ram_list.blocks, next) {
  449. qemu_put_byte(f, strlen(block->idstr));
  450. qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
  451. qemu_put_be64(f, block->length);
  452. }
  453. qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
  454. return 0;
  455. }
  456. static int ram_save_iterate(QEMUFile *f, void *opaque)
  457. {
  458. uint64_t bytes_transferred_last;
  459. double bwidth = 0;
  460. int ret;
  461. int i;
  462. uint64_t expected_time;
  463. bytes_transferred_last = bytes_transferred;
  464. bwidth = qemu_get_clock_ns(rt_clock);
  465. i = 0;
  466. while ((ret = qemu_file_rate_limit(f)) == 0) {
  467. int bytes_sent;
  468. bytes_sent = ram_save_block(f, false);
  469. /* no more blocks to sent */
  470. if (bytes_sent < 0) {
  471. break;
  472. }
  473. bytes_transferred += bytes_sent;
  474. acct_info.iterations++;
  475. /* we want to check in the 1st loop, just in case it was the 1st time
  476. and we had to sync the dirty bitmap.
  477. qemu_get_clock_ns() is a bit expensive, so we only check each some
  478. iterations
  479. */
  480. if ((i & 63) == 0) {
  481. uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
  482. if (t1 > MAX_WAIT) {
  483. DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
  484. t1, i);
  485. break;
  486. }
  487. }
  488. i++;
  489. }
  490. if (ret < 0) {
  491. return ret;
  492. }
  493. bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
  494. bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
  495. /* if we haven't transferred anything this round, force expected_time to a
  496. * a very high value, but without crashing */
  497. if (bwidth == 0) {
  498. bwidth = 0.000001;
  499. }
  500. qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
  501. expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
  502. DPRINTF("ram_save_live: expected(%" PRIu64 ") <= max(%" PRIu64 ")?\n",
  503. expected_time, migrate_max_downtime());
  504. if (expected_time <= migrate_max_downtime()) {
  505. memory_global_sync_dirty_bitmap(get_system_memory());
  506. expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
  507. return expected_time <= migrate_max_downtime();
  508. }
  509. return 0;
  510. }
  511. static int ram_save_complete(QEMUFile *f, void *opaque)
  512. {
  513. memory_global_sync_dirty_bitmap(get_system_memory());
  514. /* try transferring iterative blocks of memory */
  515. /* flush all remaining blocks regardless of rate limiting */
  516. while (true) {
  517. int bytes_sent;
  518. bytes_sent = ram_save_block(f, true);
  519. /* no more blocks to sent */
  520. if (bytes_sent < 0) {
  521. break;
  522. }
  523. bytes_transferred += bytes_sent;
  524. }
  525. memory_global_dirty_log_stop();
  526. qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
  527. return 0;
  528. }
  529. static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
  530. {
  531. int ret, rc = 0;
  532. unsigned int xh_len;
  533. int xh_flags;
  534. if (!XBZRLE.decoded_buf) {
  535. XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
  536. }
  537. /* extract RLE header */
  538. xh_flags = qemu_get_byte(f);
  539. xh_len = qemu_get_be16(f);
  540. if (xh_flags != ENCODING_FLAG_XBZRLE) {
  541. fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
  542. return -1;
  543. }
  544. if (xh_len > TARGET_PAGE_SIZE) {
  545. fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
  546. return -1;
  547. }
  548. /* load data and decode */
  549. qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
  550. /* decode RLE */
  551. ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
  552. TARGET_PAGE_SIZE);
  553. if (ret == -1) {
  554. fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
  555. rc = -1;
  556. } else if (ret > TARGET_PAGE_SIZE) {
  557. fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
  558. ret, TARGET_PAGE_SIZE);
  559. abort();
  560. }
  561. return rc;
  562. }
  563. static inline void *host_from_stream_offset(QEMUFile *f,
  564. ram_addr_t offset,
  565. int flags)
  566. {
  567. static RAMBlock *block = NULL;
  568. char id[256];
  569. uint8_t len;
  570. if (flags & RAM_SAVE_FLAG_CONTINUE) {
  571. if (!block) {
  572. fprintf(stderr, "Ack, bad migration stream!\n");
  573. return NULL;
  574. }
  575. return memory_region_get_ram_ptr(block->mr) + offset;
  576. }
  577. len = qemu_get_byte(f);
  578. qemu_get_buffer(f, (uint8_t *)id, len);
  579. id[len] = 0;
  580. QLIST_FOREACH(block, &ram_list.blocks, next) {
  581. if (!strncmp(id, block->idstr, sizeof(id)))
  582. return memory_region_get_ram_ptr(block->mr) + offset;
  583. }
  584. fprintf(stderr, "Can't find block %s!\n", id);
  585. return NULL;
  586. }
  587. static int ram_load(QEMUFile *f, void *opaque, int version_id)
  588. {
  589. ram_addr_t addr;
  590. int flags, ret = 0;
  591. int error;
  592. static uint64_t seq_iter;
  593. seq_iter++;
  594. if (version_id < 4 || version_id > 4) {
  595. return -EINVAL;
  596. }
  597. do {
  598. addr = qemu_get_be64(f);
  599. flags = addr & ~TARGET_PAGE_MASK;
  600. addr &= TARGET_PAGE_MASK;
  601. if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
  602. if (version_id == 4) {
  603. /* Synchronize RAM block list */
  604. char id[256];
  605. ram_addr_t length;
  606. ram_addr_t total_ram_bytes = addr;
  607. while (total_ram_bytes) {
  608. RAMBlock *block;
  609. uint8_t len;
  610. len = qemu_get_byte(f);
  611. qemu_get_buffer(f, (uint8_t *)id, len);
  612. id[len] = 0;
  613. length = qemu_get_be64(f);
  614. QLIST_FOREACH(block, &ram_list.blocks, next) {
  615. if (!strncmp(id, block->idstr, sizeof(id))) {
  616. if (block->length != length) {
  617. ret = -EINVAL;
  618. goto done;
  619. }
  620. break;
  621. }
  622. }
  623. if (!block) {
  624. fprintf(stderr, "Unknown ramblock \"%s\", cannot "
  625. "accept migration\n", id);
  626. ret = -EINVAL;
  627. goto done;
  628. }
  629. total_ram_bytes -= length;
  630. }
  631. }
  632. }
  633. if (flags & RAM_SAVE_FLAG_COMPRESS) {
  634. void *host;
  635. uint8_t ch;
  636. host = host_from_stream_offset(f, addr, flags);
  637. if (!host) {
  638. return -EINVAL;
  639. }
  640. ch = qemu_get_byte(f);
  641. memset(host, ch, TARGET_PAGE_SIZE);
  642. #ifndef _WIN32
  643. if (ch == 0 &&
  644. (!kvm_enabled() || kvm_has_sync_mmu())) {
  645. qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
  646. }
  647. #endif
  648. } else if (flags & RAM_SAVE_FLAG_PAGE) {
  649. void *host;
  650. host = host_from_stream_offset(f, addr, flags);
  651. if (!host) {
  652. return -EINVAL;
  653. }
  654. qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
  655. } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
  656. if (!migrate_use_xbzrle()) {
  657. return -EINVAL;
  658. }
  659. void *host = host_from_stream_offset(f, addr, flags);
  660. if (!host) {
  661. return -EINVAL;
  662. }
  663. if (load_xbzrle(f, addr, host) < 0) {
  664. ret = -EINVAL;
  665. goto done;
  666. }
  667. }
  668. error = qemu_file_get_error(f);
  669. if (error) {
  670. ret = error;
  671. goto done;
  672. }
  673. } while (!(flags & RAM_SAVE_FLAG_EOS));
  674. done:
  675. DPRINTF("Completed load of VM with exit code %d seq iteration "
  676. "%" PRIu64 "\n", ret, seq_iter);
  677. return ret;
  678. }
  679. SaveVMHandlers savevm_ram_handlers = {
  680. .save_live_setup = ram_save_setup,
  681. .save_live_iterate = ram_save_iterate,
  682. .save_live_complete = ram_save_complete,
  683. .load_state = ram_load,
  684. .cancel = ram_migration_cancel,
  685. };
  686. #ifdef HAS_AUDIO
  687. struct soundhw {
  688. const char *name;
  689. const char *descr;
  690. int enabled;
  691. int isa;
  692. union {
  693. int (*init_isa) (ISABus *bus);
  694. int (*init_pci) (PCIBus *bus);
  695. } init;
  696. };
  697. static struct soundhw soundhw[] = {
  698. #ifdef HAS_AUDIO_CHOICE
  699. #ifdef CONFIG_PCSPK
  700. {
  701. "pcspk",
  702. "PC speaker",
  703. 0,
  704. 1,
  705. { .init_isa = pcspk_audio_init }
  706. },
  707. #endif
  708. #ifdef CONFIG_SB16
  709. {
  710. "sb16",
  711. "Creative Sound Blaster 16",
  712. 0,
  713. 1,
  714. { .init_isa = SB16_init }
  715. },
  716. #endif
  717. #ifdef CONFIG_CS4231A
  718. {
  719. "cs4231a",
  720. "CS4231A",
  721. 0,
  722. 1,
  723. { .init_isa = cs4231a_init }
  724. },
  725. #endif
  726. #ifdef CONFIG_ADLIB
  727. {
  728. "adlib",
  729. #ifdef HAS_YMF262
  730. "Yamaha YMF262 (OPL3)",
  731. #else
  732. "Yamaha YM3812 (OPL2)",
  733. #endif
  734. 0,
  735. 1,
  736. { .init_isa = Adlib_init }
  737. },
  738. #endif
  739. #ifdef CONFIG_GUS
  740. {
  741. "gus",
  742. "Gravis Ultrasound GF1",
  743. 0,
  744. 1,
  745. { .init_isa = GUS_init }
  746. },
  747. #endif
  748. #ifdef CONFIG_AC97
  749. {
  750. "ac97",
  751. "Intel 82801AA AC97 Audio",
  752. 0,
  753. 0,
  754. { .init_pci = ac97_init }
  755. },
  756. #endif
  757. #ifdef CONFIG_ES1370
  758. {
  759. "es1370",
  760. "ENSONIQ AudioPCI ES1370",
  761. 0,
  762. 0,
  763. { .init_pci = es1370_init }
  764. },
  765. #endif
  766. #ifdef CONFIG_HDA
  767. {
  768. "hda",
  769. "Intel HD Audio",
  770. 0,
  771. 0,
  772. { .init_pci = intel_hda_and_codec_init }
  773. },
  774. #endif
  775. #endif /* HAS_AUDIO_CHOICE */
  776. { NULL, NULL, 0, 0, { NULL } }
  777. };
  778. void select_soundhw(const char *optarg)
  779. {
  780. struct soundhw *c;
  781. if (is_help_option(optarg)) {
  782. show_valid_cards:
  783. #ifdef HAS_AUDIO_CHOICE
  784. printf("Valid sound card names (comma separated):\n");
  785. for (c = soundhw; c->name; ++c) {
  786. printf ("%-11s %s\n", c->name, c->descr);
  787. }
  788. printf("\n-soundhw all will enable all of the above\n");
  789. #else
  790. printf("Machine has no user-selectable audio hardware "
  791. "(it may or may not have always-present audio hardware).\n");
  792. #endif
  793. exit(!is_help_option(optarg));
  794. }
  795. else {
  796. size_t l;
  797. const char *p;
  798. char *e;
  799. int bad_card = 0;
  800. if (!strcmp(optarg, "all")) {
  801. for (c = soundhw; c->name; ++c) {
  802. c->enabled = 1;
  803. }
  804. return;
  805. }
  806. p = optarg;
  807. while (*p) {
  808. e = strchr(p, ',');
  809. l = !e ? strlen(p) : (size_t) (e - p);
  810. for (c = soundhw; c->name; ++c) {
  811. if (!strncmp(c->name, p, l) && !c->name[l]) {
  812. c->enabled = 1;
  813. break;
  814. }
  815. }
  816. if (!c->name) {
  817. if (l > 80) {
  818. fprintf(stderr,
  819. "Unknown sound card name (too big to show)\n");
  820. }
  821. else {
  822. fprintf(stderr, "Unknown sound card name `%.*s'\n",
  823. (int) l, p);
  824. }
  825. bad_card = 1;
  826. }
  827. p += l + (e != NULL);
  828. }
  829. if (bad_card) {
  830. goto show_valid_cards;
  831. }
  832. }
  833. }
  834. void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
  835. {
  836. struct soundhw *c;
  837. for (c = soundhw; c->name; ++c) {
  838. if (c->enabled) {
  839. if (c->isa) {
  840. if (isa_bus) {
  841. c->init.init_isa(isa_bus);
  842. }
  843. } else {
  844. if (pci_bus) {
  845. c->init.init_pci(pci_bus);
  846. }
  847. }
  848. }
  849. }
  850. }
  851. #else
  852. void select_soundhw(const char *optarg)
  853. {
  854. }
  855. void audio_init(ISABus *isa_bus, PCIBus *pci_bus)
  856. {
  857. }
  858. #endif
  859. int qemu_uuid_parse(const char *str, uint8_t *uuid)
  860. {
  861. int ret;
  862. if (strlen(str) != 36) {
  863. return -1;
  864. }
  865. ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
  866. &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
  867. &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
  868. &uuid[15]);
  869. if (ret != 16) {
  870. return -1;
  871. }
  872. #ifdef TARGET_I386
  873. smbios_add_field(1, offsetof(struct smbios_type_1, uuid), 16, uuid);
  874. #endif
  875. return 0;
  876. }
  877. void do_acpitable_option(const char *optarg)
  878. {
  879. #ifdef TARGET_I386
  880. if (acpi_table_add(optarg) < 0) {
  881. fprintf(stderr, "Wrong acpi table provided\n");
  882. exit(1);
  883. }
  884. #endif
  885. }
  886. void do_smbios_option(const char *optarg)
  887. {
  888. #ifdef TARGET_I386
  889. if (smbios_entry_add(optarg) < 0) {
  890. fprintf(stderr, "Wrong smbios provided\n");
  891. exit(1);
  892. }
  893. #endif
  894. }
  895. void cpudef_init(void)
  896. {
  897. #if defined(cpudef_setup)
  898. cpudef_setup(); /* parse cpu definitions in target config file */
  899. #endif
  900. }
  901. int audio_available(void)
  902. {
  903. #ifdef HAS_AUDIO
  904. return 1;
  905. #else
  906. return 0;
  907. #endif
  908. }
  909. int tcg_available(void)
  910. {
  911. return 1;
  912. }
  913. int kvm_available(void)
  914. {
  915. #ifdef CONFIG_KVM
  916. return 1;
  917. #else
  918. return 0;
  919. #endif
  920. }
  921. int xen_available(void)
  922. {
  923. #ifdef CONFIG_XEN
  924. return 1;
  925. #else
  926. return 0;
  927. #endif
  928. }
  929. TargetInfo *qmp_query_target(Error **errp)
  930. {
  931. TargetInfo *info = g_malloc0(sizeof(*info));
  932. info->arch = TARGET_TYPE;
  933. return info;
  934. }