2
0

arch_init.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418
  1. /*
  2. * QEMU System Emulator
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include <stdint.h>
  25. #include <stdarg.h>
  26. #include <stdlib.h>
  27. #ifndef _WIN32
  28. #include <sys/types.h>
  29. #include <sys/mman.h>
  30. #endif
  31. #include "config.h"
  32. #include "monitor/monitor.h"
  33. #include "sysemu/sysemu.h"
  34. #include "qemu/bitops.h"
  35. #include "qemu/bitmap.h"
  36. #include "sysemu/arch_init.h"
  37. #include "audio/audio.h"
  38. #include "hw/i386/pc.h"
  39. #include "hw/pci/pci.h"
  40. #include "hw/audio/audio.h"
  41. #include "sysemu/kvm.h"
  42. #include "migration/migration.h"
  43. #include "hw/i386/smbios.h"
  44. #include "exec/address-spaces.h"
  45. #include "hw/audio/pcspk.h"
  46. #include "migration/page_cache.h"
  47. #include "qemu/config-file.h"
  48. #include "qemu/error-report.h"
  49. #include "qmp-commands.h"
  50. #include "trace.h"
  51. #include "exec/cpu-all.h"
  52. #include "exec/ram_addr.h"
  53. #include "hw/acpi/acpi.h"
  54. #include "qemu/host-utils.h"
  55. #ifdef DEBUG_ARCH_INIT
  56. #define DPRINTF(fmt, ...) \
  57. do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
  58. #else
  59. #define DPRINTF(fmt, ...) \
  60. do { } while (0)
  61. #endif
  62. #ifdef TARGET_SPARC
  63. int graphic_width = 1024;
  64. int graphic_height = 768;
  65. int graphic_depth = 8;
  66. #else
  67. int graphic_width = 800;
  68. int graphic_height = 600;
  69. int graphic_depth = 32;
  70. #endif
  71. #if defined(TARGET_ALPHA)
  72. #define QEMU_ARCH QEMU_ARCH_ALPHA
  73. #elif defined(TARGET_ARM)
  74. #define QEMU_ARCH QEMU_ARCH_ARM
  75. #elif defined(TARGET_CRIS)
  76. #define QEMU_ARCH QEMU_ARCH_CRIS
  77. #elif defined(TARGET_I386)
  78. #define QEMU_ARCH QEMU_ARCH_I386
  79. #elif defined(TARGET_M68K)
  80. #define QEMU_ARCH QEMU_ARCH_M68K
  81. #elif defined(TARGET_LM32)
  82. #define QEMU_ARCH QEMU_ARCH_LM32
  83. #elif defined(TARGET_MICROBLAZE)
  84. #define QEMU_ARCH QEMU_ARCH_MICROBLAZE
  85. #elif defined(TARGET_MIPS)
  86. #define QEMU_ARCH QEMU_ARCH_MIPS
  87. #elif defined(TARGET_MOXIE)
  88. #define QEMU_ARCH QEMU_ARCH_MOXIE
  89. #elif defined(TARGET_OPENRISC)
  90. #define QEMU_ARCH QEMU_ARCH_OPENRISC
  91. #elif defined(TARGET_PPC)
  92. #define QEMU_ARCH QEMU_ARCH_PPC
  93. #elif defined(TARGET_S390X)
  94. #define QEMU_ARCH QEMU_ARCH_S390X
  95. #elif defined(TARGET_SH4)
  96. #define QEMU_ARCH QEMU_ARCH_SH4
  97. #elif defined(TARGET_SPARC)
  98. #define QEMU_ARCH QEMU_ARCH_SPARC
  99. #elif defined(TARGET_XTENSA)
  100. #define QEMU_ARCH QEMU_ARCH_XTENSA
  101. #elif defined(TARGET_UNICORE32)
  102. #define QEMU_ARCH QEMU_ARCH_UNICORE32
  103. #endif
  104. const uint32_t arch_type = QEMU_ARCH;
  105. static bool mig_throttle_on;
  106. static int dirty_rate_high_cnt;
  107. static void check_guest_throttling(void);
  108. static uint64_t bitmap_sync_count;
  109. /***********************************************************/
  110. /* ram save/restore */
  111. #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
  112. #define RAM_SAVE_FLAG_COMPRESS 0x02
  113. #define RAM_SAVE_FLAG_MEM_SIZE 0x04
  114. #define RAM_SAVE_FLAG_PAGE 0x08
  115. #define RAM_SAVE_FLAG_EOS 0x10
  116. #define RAM_SAVE_FLAG_CONTINUE 0x20
  117. #define RAM_SAVE_FLAG_XBZRLE 0x40
  118. /* 0x80 is reserved in migration.h start with 0x100 next */
  119. static struct defconfig_file {
  120. const char *filename;
  121. /* Indicates it is an user config file (disabled by -no-user-config) */
  122. bool userconfig;
  123. } default_config_files[] = {
  124. { CONFIG_QEMU_CONFDIR "/qemu.conf", true },
  125. { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
  126. { NULL }, /* end of list */
  127. };
  128. static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
  129. int qemu_read_default_config_files(bool userconfig)
  130. {
  131. int ret;
  132. struct defconfig_file *f;
  133. for (f = default_config_files; f->filename; f++) {
  134. if (!userconfig && f->userconfig) {
  135. continue;
  136. }
  137. ret = qemu_read_config_file(f->filename);
  138. if (ret < 0 && ret != -ENOENT) {
  139. return ret;
  140. }
  141. }
  142. return 0;
  143. }
  144. static inline bool is_zero_range(uint8_t *p, uint64_t size)
  145. {
  146. return buffer_find_nonzero_offset(p, size) == size;
  147. }
  148. /* struct contains XBZRLE cache and a static page
  149. used by the compression */
  150. static struct {
  151. /* buffer used for XBZRLE encoding */
  152. uint8_t *encoded_buf;
  153. /* buffer for storing page content */
  154. uint8_t *current_buf;
  155. /* Cache for XBZRLE, Protected by lock. */
  156. PageCache *cache;
  157. QemuMutex lock;
  158. } XBZRLE;
  159. /* buffer used for XBZRLE decoding */
  160. static uint8_t *xbzrle_decoded_buf;
  161. static void XBZRLE_cache_lock(void)
  162. {
  163. if (migrate_use_xbzrle())
  164. qemu_mutex_lock(&XBZRLE.lock);
  165. }
  166. static void XBZRLE_cache_unlock(void)
  167. {
  168. if (migrate_use_xbzrle())
  169. qemu_mutex_unlock(&XBZRLE.lock);
  170. }
  171. /*
  172. * called from qmp_migrate_set_cache_size in main thread, possibly while
  173. * a migration is in progress.
  174. * A running migration maybe using the cache and might finish during this
  175. * call, hence changes to the cache are protected by XBZRLE.lock().
  176. */
  177. int64_t xbzrle_cache_resize(int64_t new_size)
  178. {
  179. PageCache *new_cache;
  180. int64_t ret;
  181. if (new_size < TARGET_PAGE_SIZE) {
  182. return -1;
  183. }
  184. XBZRLE_cache_lock();
  185. if (XBZRLE.cache != NULL) {
  186. if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
  187. goto out_new_size;
  188. }
  189. new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
  190. TARGET_PAGE_SIZE);
  191. if (!new_cache) {
  192. error_report("Error creating cache");
  193. ret = -1;
  194. goto out;
  195. }
  196. cache_fini(XBZRLE.cache);
  197. XBZRLE.cache = new_cache;
  198. }
  199. out_new_size:
  200. ret = pow2floor(new_size);
  201. out:
  202. XBZRLE_cache_unlock();
  203. return ret;
  204. }
  205. /* accounting for migration statistics */
  206. typedef struct AccountingInfo {
  207. uint64_t dup_pages;
  208. uint64_t skipped_pages;
  209. uint64_t norm_pages;
  210. uint64_t iterations;
  211. uint64_t xbzrle_bytes;
  212. uint64_t xbzrle_pages;
  213. uint64_t xbzrle_cache_miss;
  214. double xbzrle_cache_miss_rate;
  215. uint64_t xbzrle_overflows;
  216. } AccountingInfo;
  217. static AccountingInfo acct_info;
  218. static void acct_clear(void)
  219. {
  220. memset(&acct_info, 0, sizeof(acct_info));
  221. }
  222. uint64_t dup_mig_bytes_transferred(void)
  223. {
  224. return acct_info.dup_pages * TARGET_PAGE_SIZE;
  225. }
  226. uint64_t dup_mig_pages_transferred(void)
  227. {
  228. return acct_info.dup_pages;
  229. }
  230. uint64_t skipped_mig_bytes_transferred(void)
  231. {
  232. return acct_info.skipped_pages * TARGET_PAGE_SIZE;
  233. }
  234. uint64_t skipped_mig_pages_transferred(void)
  235. {
  236. return acct_info.skipped_pages;
  237. }
  238. uint64_t norm_mig_bytes_transferred(void)
  239. {
  240. return acct_info.norm_pages * TARGET_PAGE_SIZE;
  241. }
  242. uint64_t norm_mig_pages_transferred(void)
  243. {
  244. return acct_info.norm_pages;
  245. }
  246. uint64_t xbzrle_mig_bytes_transferred(void)
  247. {
  248. return acct_info.xbzrle_bytes;
  249. }
  250. uint64_t xbzrle_mig_pages_transferred(void)
  251. {
  252. return acct_info.xbzrle_pages;
  253. }
  254. uint64_t xbzrle_mig_pages_cache_miss(void)
  255. {
  256. return acct_info.xbzrle_cache_miss;
  257. }
  258. double xbzrle_mig_cache_miss_rate(void)
  259. {
  260. return acct_info.xbzrle_cache_miss_rate;
  261. }
  262. uint64_t xbzrle_mig_pages_overflow(void)
  263. {
  264. return acct_info.xbzrle_overflows;
  265. }
  266. static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
  267. int cont, int flag)
  268. {
  269. size_t size;
  270. qemu_put_be64(f, offset | cont | flag);
  271. size = 8;
  272. if (!cont) {
  273. qemu_put_byte(f, strlen(block->idstr));
  274. qemu_put_buffer(f, (uint8_t *)block->idstr,
  275. strlen(block->idstr));
  276. size += 1 + strlen(block->idstr);
  277. }
  278. return size;
  279. }
  280. /* This is the last block that we have visited serching for dirty pages
  281. */
  282. static RAMBlock *last_seen_block;
  283. /* This is the last block from where we have sent data */
  284. static RAMBlock *last_sent_block;
  285. static ram_addr_t last_offset;
  286. static unsigned long *migration_bitmap;
  287. static uint64_t migration_dirty_pages;
  288. static uint32_t last_version;
  289. static bool ram_bulk_stage;
  290. /* Update the xbzrle cache to reflect a page that's been sent as all 0.
  291. * The important thing is that a stale (not-yet-0'd) page be replaced
  292. * by the new data.
  293. * As a bonus, if the page wasn't in the cache it gets added so that
  294. * when a small write is made into the 0'd page it gets XBZRLE sent
  295. */
  296. static void xbzrle_cache_zero_page(ram_addr_t current_addr)
  297. {
  298. if (ram_bulk_stage || !migrate_use_xbzrle()) {
  299. return;
  300. }
  301. /* We don't care if this fails to allocate a new cache page
  302. * as long as it updated an old one */
  303. cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
  304. }
  305. #define ENCODING_FLAG_XBZRLE 0x1
  306. static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
  307. ram_addr_t current_addr, RAMBlock *block,
  308. ram_addr_t offset, int cont, bool last_stage)
  309. {
  310. int encoded_len = 0, bytes_sent = -1;
  311. uint8_t *prev_cached_page;
  312. if (!cache_is_cached(XBZRLE.cache, current_addr)) {
  313. acct_info.xbzrle_cache_miss++;
  314. if (!last_stage) {
  315. if (cache_insert(XBZRLE.cache, current_addr, *current_data) == -1) {
  316. return -1;
  317. } else {
  318. /* update *current_data when the page has been
  319. inserted into cache */
  320. *current_data = get_cached_data(XBZRLE.cache, current_addr);
  321. }
  322. }
  323. return -1;
  324. }
  325. prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
  326. /* save current buffer into memory */
  327. memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
  328. /* XBZRLE encoding (if there is no overflow) */
  329. encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
  330. TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
  331. TARGET_PAGE_SIZE);
  332. if (encoded_len == 0) {
  333. DPRINTF("Skipping unmodified page\n");
  334. return 0;
  335. } else if (encoded_len == -1) {
  336. DPRINTF("Overflow\n");
  337. acct_info.xbzrle_overflows++;
  338. /* update data in the cache */
  339. if (!last_stage) {
  340. memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
  341. *current_data = prev_cached_page;
  342. }
  343. return -1;
  344. }
  345. /* we need to update the data in the cache, in order to get the same data */
  346. if (!last_stage) {
  347. memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
  348. }
  349. /* Send XBZRLE based compressed page */
  350. bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
  351. qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
  352. qemu_put_be16(f, encoded_len);
  353. qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
  354. bytes_sent += encoded_len + 1 + 2;
  355. acct_info.xbzrle_pages++;
  356. acct_info.xbzrle_bytes += bytes_sent;
  357. return bytes_sent;
  358. }
  359. static inline
  360. ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
  361. ram_addr_t start)
  362. {
  363. unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
  364. unsigned long nr = base + (start >> TARGET_PAGE_BITS);
  365. uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
  366. unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
  367. unsigned long next;
  368. if (ram_bulk_stage && nr > base) {
  369. next = nr + 1;
  370. } else {
  371. next = find_next_bit(migration_bitmap, size, nr);
  372. }
  373. if (next < size) {
  374. clear_bit(next, migration_bitmap);
  375. migration_dirty_pages--;
  376. }
  377. return (next - base) << TARGET_PAGE_BITS;
  378. }
  379. static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
  380. {
  381. bool ret;
  382. int nr = addr >> TARGET_PAGE_BITS;
  383. ret = test_and_set_bit(nr, migration_bitmap);
  384. if (!ret) {
  385. migration_dirty_pages++;
  386. }
  387. return ret;
  388. }
  389. static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
  390. {
  391. ram_addr_t addr;
  392. unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
  393. /* start address is aligned at the start of a word? */
  394. if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
  395. int k;
  396. int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
  397. unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
  398. for (k = page; k < page + nr; k++) {
  399. if (src[k]) {
  400. unsigned long new_dirty;
  401. new_dirty = ~migration_bitmap[k];
  402. migration_bitmap[k] |= src[k];
  403. new_dirty &= src[k];
  404. migration_dirty_pages += ctpopl(new_dirty);
  405. src[k] = 0;
  406. }
  407. }
  408. } else {
  409. for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
  410. if (cpu_physical_memory_get_dirty(start + addr,
  411. TARGET_PAGE_SIZE,
  412. DIRTY_MEMORY_MIGRATION)) {
  413. cpu_physical_memory_reset_dirty(start + addr,
  414. TARGET_PAGE_SIZE,
  415. DIRTY_MEMORY_MIGRATION);
  416. migration_bitmap_set_dirty(start + addr);
  417. }
  418. }
  419. }
  420. }
  421. /* Needs iothread lock! */
  422. static void migration_bitmap_sync(void)
  423. {
  424. RAMBlock *block;
  425. uint64_t num_dirty_pages_init = migration_dirty_pages;
  426. MigrationState *s = migrate_get_current();
  427. static int64_t start_time;
  428. static int64_t bytes_xfer_prev;
  429. static int64_t num_dirty_pages_period;
  430. int64_t end_time;
  431. int64_t bytes_xfer_now;
  432. static uint64_t xbzrle_cache_miss_prev;
  433. static uint64_t iterations_prev;
  434. bitmap_sync_count++;
  435. if (!bytes_xfer_prev) {
  436. bytes_xfer_prev = ram_bytes_transferred();
  437. }
  438. if (!start_time) {
  439. start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  440. }
  441. trace_migration_bitmap_sync_start();
  442. address_space_sync_dirty_bitmap(&address_space_memory);
  443. QTAILQ_FOREACH(block, &ram_list.blocks, next) {
  444. migration_bitmap_sync_range(block->mr->ram_addr, block->length);
  445. }
  446. trace_migration_bitmap_sync_end(migration_dirty_pages
  447. - num_dirty_pages_init);
  448. num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
  449. end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
  450. /* more than 1 second = 1000 millisecons */
  451. if (end_time > start_time + 1000) {
  452. if (migrate_auto_converge()) {
  453. /* The following detection logic can be refined later. For now:
  454. Check to see if the dirtied bytes is 50% more than the approx.
  455. amount of bytes that just got transferred since the last time we
  456. were in this routine. If that happens >N times (for now N==4)
  457. we turn on the throttle down logic */
  458. bytes_xfer_now = ram_bytes_transferred();
  459. if (s->dirty_pages_rate &&
  460. (num_dirty_pages_period * TARGET_PAGE_SIZE >
  461. (bytes_xfer_now - bytes_xfer_prev)/2) &&
  462. (dirty_rate_high_cnt++ > 4)) {
  463. trace_migration_throttle();
  464. mig_throttle_on = true;
  465. dirty_rate_high_cnt = 0;
  466. }
  467. bytes_xfer_prev = bytes_xfer_now;
  468. } else {
  469. mig_throttle_on = false;
  470. }
  471. if (migrate_use_xbzrle()) {
  472. if (iterations_prev != 0) {
  473. acct_info.xbzrle_cache_miss_rate =
  474. (double)(acct_info.xbzrle_cache_miss -
  475. xbzrle_cache_miss_prev) /
  476. (acct_info.iterations - iterations_prev);
  477. }
  478. iterations_prev = acct_info.iterations;
  479. xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
  480. }
  481. s->dirty_pages_rate = num_dirty_pages_period * 1000
  482. / (end_time - start_time);
  483. s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
  484. start_time = end_time;
  485. num_dirty_pages_period = 0;
  486. s->dirty_sync_count = bitmap_sync_count;
  487. }
  488. }
  489. /*
  490. * ram_save_page: Send the given page to the stream
  491. *
  492. * Returns: Number of bytes written.
  493. */
  494. static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
  495. bool last_stage)
  496. {
  497. int bytes_sent;
  498. int cont;
  499. ram_addr_t current_addr;
  500. MemoryRegion *mr = block->mr;
  501. uint8_t *p;
  502. int ret;
  503. bool send_async = true;
  504. cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
  505. p = memory_region_get_ram_ptr(mr) + offset;
  506. /* In doubt sent page as normal */
  507. bytes_sent = -1;
  508. ret = ram_control_save_page(f, block->offset,
  509. offset, TARGET_PAGE_SIZE, &bytes_sent);
  510. XBZRLE_cache_lock();
  511. current_addr = block->offset + offset;
  512. if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
  513. if (ret != RAM_SAVE_CONTROL_DELAYED) {
  514. if (bytes_sent > 0) {
  515. acct_info.norm_pages++;
  516. } else if (bytes_sent == 0) {
  517. acct_info.dup_pages++;
  518. }
  519. }
  520. } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
  521. acct_info.dup_pages++;
  522. bytes_sent = save_block_hdr(f, block, offset, cont,
  523. RAM_SAVE_FLAG_COMPRESS);
  524. qemu_put_byte(f, 0);
  525. bytes_sent++;
  526. /* Must let xbzrle know, otherwise a previous (now 0'd) cached
  527. * page would be stale
  528. */
  529. xbzrle_cache_zero_page(current_addr);
  530. } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
  531. bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
  532. offset, cont, last_stage);
  533. if (!last_stage) {
  534. /* Can't send this cached data async, since the cache page
  535. * might get updated before it gets to the wire
  536. */
  537. send_async = false;
  538. }
  539. }
  540. /* XBZRLE overflow or normal page */
  541. if (bytes_sent == -1) {
  542. bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
  543. if (send_async) {
  544. qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
  545. } else {
  546. qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
  547. }
  548. bytes_sent += TARGET_PAGE_SIZE;
  549. acct_info.norm_pages++;
  550. }
  551. XBZRLE_cache_unlock();
  552. return bytes_sent;
  553. }
  554. /*
  555. * ram_find_and_save_block: Finds a page to send and sends it to f
  556. *
  557. * Returns: The number of bytes written.
  558. * 0 means no dirty pages
  559. */
  560. static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
  561. {
  562. RAMBlock *block = last_seen_block;
  563. ram_addr_t offset = last_offset;
  564. bool complete_round = false;
  565. int bytes_sent = 0;
  566. MemoryRegion *mr;
  567. if (!block)
  568. block = QTAILQ_FIRST(&ram_list.blocks);
  569. while (true) {
  570. mr = block->mr;
  571. offset = migration_bitmap_find_and_reset_dirty(mr, offset);
  572. if (complete_round && block == last_seen_block &&
  573. offset >= last_offset) {
  574. break;
  575. }
  576. if (offset >= block->length) {
  577. offset = 0;
  578. block = QTAILQ_NEXT(block, next);
  579. if (!block) {
  580. block = QTAILQ_FIRST(&ram_list.blocks);
  581. complete_round = true;
  582. ram_bulk_stage = false;
  583. }
  584. } else {
  585. bytes_sent = ram_save_page(f, block, offset, last_stage);
  586. /* if page is unmodified, continue to the next */
  587. if (bytes_sent > 0) {
  588. last_sent_block = block;
  589. break;
  590. }
  591. }
  592. }
  593. last_seen_block = block;
  594. last_offset = offset;
  595. return bytes_sent;
  596. }
  597. static uint64_t bytes_transferred;
  598. void acct_update_position(QEMUFile *f, size_t size, bool zero)
  599. {
  600. uint64_t pages = size / TARGET_PAGE_SIZE;
  601. if (zero) {
  602. acct_info.dup_pages += pages;
  603. } else {
  604. acct_info.norm_pages += pages;
  605. bytes_transferred += size;
  606. qemu_update_position(f, size);
  607. }
  608. }
  609. static ram_addr_t ram_save_remaining(void)
  610. {
  611. return migration_dirty_pages;
  612. }
  613. uint64_t ram_bytes_remaining(void)
  614. {
  615. return ram_save_remaining() * TARGET_PAGE_SIZE;
  616. }
  617. uint64_t ram_bytes_transferred(void)
  618. {
  619. return bytes_transferred;
  620. }
  621. uint64_t ram_bytes_total(void)
  622. {
  623. RAMBlock *block;
  624. uint64_t total = 0;
  625. QTAILQ_FOREACH(block, &ram_list.blocks, next)
  626. total += block->length;
  627. return total;
  628. }
  629. void free_xbzrle_decoded_buf(void)
  630. {
  631. g_free(xbzrle_decoded_buf);
  632. xbzrle_decoded_buf = NULL;
  633. }
  634. static void migration_end(void)
  635. {
  636. if (migration_bitmap) {
  637. memory_global_dirty_log_stop();
  638. g_free(migration_bitmap);
  639. migration_bitmap = NULL;
  640. }
  641. XBZRLE_cache_lock();
  642. if (XBZRLE.cache) {
  643. cache_fini(XBZRLE.cache);
  644. g_free(XBZRLE.encoded_buf);
  645. g_free(XBZRLE.current_buf);
  646. XBZRLE.cache = NULL;
  647. XBZRLE.encoded_buf = NULL;
  648. XBZRLE.current_buf = NULL;
  649. }
  650. XBZRLE_cache_unlock();
  651. }
  652. static void ram_migration_cancel(void *opaque)
  653. {
  654. migration_end();
  655. }
  656. static void reset_ram_globals(void)
  657. {
  658. last_seen_block = NULL;
  659. last_sent_block = NULL;
  660. last_offset = 0;
  661. last_version = ram_list.version;
  662. ram_bulk_stage = true;
  663. }
  664. #define MAX_WAIT 50 /* ms, half buffered_file limit */
  665. static int ram_save_setup(QEMUFile *f, void *opaque)
  666. {
  667. RAMBlock *block;
  668. int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
  669. mig_throttle_on = false;
  670. dirty_rate_high_cnt = 0;
  671. bitmap_sync_count = 0;
  672. if (migrate_use_xbzrle()) {
  673. XBZRLE_cache_lock();
  674. XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
  675. TARGET_PAGE_SIZE,
  676. TARGET_PAGE_SIZE);
  677. if (!XBZRLE.cache) {
  678. XBZRLE_cache_unlock();
  679. error_report("Error creating cache");
  680. return -1;
  681. }
  682. XBZRLE_cache_unlock();
  683. /* We prefer not to abort if there is no memory */
  684. XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
  685. if (!XBZRLE.encoded_buf) {
  686. error_report("Error allocating encoded_buf");
  687. return -1;
  688. }
  689. XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
  690. if (!XBZRLE.current_buf) {
  691. error_report("Error allocating current_buf");
  692. g_free(XBZRLE.encoded_buf);
  693. XBZRLE.encoded_buf = NULL;
  694. return -1;
  695. }
  696. acct_clear();
  697. }
  698. qemu_mutex_lock_iothread();
  699. qemu_mutex_lock_ramlist();
  700. bytes_transferred = 0;
  701. reset_ram_globals();
  702. ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
  703. migration_bitmap = bitmap_new(ram_bitmap_pages);
  704. bitmap_set(migration_bitmap, 0, ram_bitmap_pages);
  705. /*
  706. * Count the total number of pages used by ram blocks not including any
  707. * gaps due to alignment or unplugs.
  708. */
  709. migration_dirty_pages = 0;
  710. QTAILQ_FOREACH(block, &ram_list.blocks, next) {
  711. uint64_t block_pages;
  712. block_pages = block->length >> TARGET_PAGE_BITS;
  713. migration_dirty_pages += block_pages;
  714. }
  715. memory_global_dirty_log_start();
  716. migration_bitmap_sync();
  717. qemu_mutex_unlock_iothread();
  718. qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
  719. QTAILQ_FOREACH(block, &ram_list.blocks, next) {
  720. qemu_put_byte(f, strlen(block->idstr));
  721. qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
  722. qemu_put_be64(f, block->length);
  723. }
  724. qemu_mutex_unlock_ramlist();
  725. ram_control_before_iterate(f, RAM_CONTROL_SETUP);
  726. ram_control_after_iterate(f, RAM_CONTROL_SETUP);
  727. qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
  728. return 0;
  729. }
  730. static int ram_save_iterate(QEMUFile *f, void *opaque)
  731. {
  732. int ret;
  733. int i;
  734. int64_t t0;
  735. int total_sent = 0;
  736. qemu_mutex_lock_ramlist();
  737. if (ram_list.version != last_version) {
  738. reset_ram_globals();
  739. }
  740. ram_control_before_iterate(f, RAM_CONTROL_ROUND);
  741. t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
  742. i = 0;
  743. while ((ret = qemu_file_rate_limit(f)) == 0) {
  744. int bytes_sent;
  745. bytes_sent = ram_find_and_save_block(f, false);
  746. /* no more blocks to sent */
  747. if (bytes_sent == 0) {
  748. break;
  749. }
  750. total_sent += bytes_sent;
  751. acct_info.iterations++;
  752. check_guest_throttling();
  753. /* we want to check in the 1st loop, just in case it was the 1st time
  754. and we had to sync the dirty bitmap.
  755. qemu_get_clock_ns() is a bit expensive, so we only check each some
  756. iterations
  757. */
  758. if ((i & 63) == 0) {
  759. uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
  760. if (t1 > MAX_WAIT) {
  761. DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
  762. t1, i);
  763. break;
  764. }
  765. }
  766. i++;
  767. }
  768. qemu_mutex_unlock_ramlist();
  769. /*
  770. * Must occur before EOS (or any QEMUFile operation)
  771. * because of RDMA protocol.
  772. */
  773. ram_control_after_iterate(f, RAM_CONTROL_ROUND);
  774. bytes_transferred += total_sent;
  775. /*
  776. * Do not count these 8 bytes into total_sent, so that we can
  777. * return 0 if no page had been dirtied.
  778. */
  779. qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
  780. bytes_transferred += 8;
  781. ret = qemu_file_get_error(f);
  782. if (ret < 0) {
  783. return ret;
  784. }
  785. return total_sent;
  786. }
  787. static int ram_save_complete(QEMUFile *f, void *opaque)
  788. {
  789. qemu_mutex_lock_ramlist();
  790. migration_bitmap_sync();
  791. ram_control_before_iterate(f, RAM_CONTROL_FINISH);
  792. /* try transferring iterative blocks of memory */
  793. /* flush all remaining blocks regardless of rate limiting */
  794. while (true) {
  795. int bytes_sent;
  796. bytes_sent = ram_find_and_save_block(f, true);
  797. /* no more blocks to sent */
  798. if (bytes_sent == 0) {
  799. break;
  800. }
  801. bytes_transferred += bytes_sent;
  802. }
  803. ram_control_after_iterate(f, RAM_CONTROL_FINISH);
  804. migration_end();
  805. qemu_mutex_unlock_ramlist();
  806. qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
  807. return 0;
  808. }
  809. static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
  810. {
  811. uint64_t remaining_size;
  812. remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
  813. if (remaining_size < max_size) {
  814. qemu_mutex_lock_iothread();
  815. migration_bitmap_sync();
  816. qemu_mutex_unlock_iothread();
  817. remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
  818. }
  819. return remaining_size;
  820. }
  821. static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
  822. {
  823. unsigned int xh_len;
  824. int xh_flags;
  825. if (!xbzrle_decoded_buf) {
  826. xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
  827. }
  828. /* extract RLE header */
  829. xh_flags = qemu_get_byte(f);
  830. xh_len = qemu_get_be16(f);
  831. if (xh_flags != ENCODING_FLAG_XBZRLE) {
  832. error_report("Failed to load XBZRLE page - wrong compression!");
  833. return -1;
  834. }
  835. if (xh_len > TARGET_PAGE_SIZE) {
  836. error_report("Failed to load XBZRLE page - len overflow!");
  837. return -1;
  838. }
  839. /* load data and decode */
  840. qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
  841. /* decode RLE */
  842. if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
  843. TARGET_PAGE_SIZE) == -1) {
  844. error_report("Failed to load XBZRLE page - decode error!");
  845. return -1;
  846. }
  847. return 0;
  848. }
  849. static inline void *host_from_stream_offset(QEMUFile *f,
  850. ram_addr_t offset,
  851. int flags)
  852. {
  853. static RAMBlock *block = NULL;
  854. char id[256];
  855. uint8_t len;
  856. if (flags & RAM_SAVE_FLAG_CONTINUE) {
  857. if (!block) {
  858. error_report("Ack, bad migration stream!");
  859. return NULL;
  860. }
  861. return memory_region_get_ram_ptr(block->mr) + offset;
  862. }
  863. len = qemu_get_byte(f);
  864. qemu_get_buffer(f, (uint8_t *)id, len);
  865. id[len] = 0;
  866. QTAILQ_FOREACH(block, &ram_list.blocks, next) {
  867. if (!strncmp(id, block->idstr, sizeof(id)))
  868. return memory_region_get_ram_ptr(block->mr) + offset;
  869. }
  870. error_report("Can't find block %s!", id);
  871. return NULL;
  872. }
  873. /*
  874. * If a page (or a whole RDMA chunk) has been
  875. * determined to be zero, then zap it.
  876. */
  877. void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
  878. {
  879. if (ch != 0 || !is_zero_range(host, size)) {
  880. memset(host, ch, size);
  881. }
  882. }
  883. static int ram_load(QEMUFile *f, void *opaque, int version_id)
  884. {
  885. ram_addr_t addr;
  886. int flags, ret = 0;
  887. static uint64_t seq_iter;
  888. seq_iter++;
  889. if (version_id != 4) {
  890. ret = -EINVAL;
  891. }
  892. while (!ret) {
  893. addr = qemu_get_be64(f);
  894. flags = addr & ~TARGET_PAGE_MASK;
  895. addr &= TARGET_PAGE_MASK;
  896. if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
  897. /* Synchronize RAM block list */
  898. char id[256];
  899. ram_addr_t length;
  900. ram_addr_t total_ram_bytes = addr;
  901. while (total_ram_bytes) {
  902. RAMBlock *block;
  903. uint8_t len;
  904. len = qemu_get_byte(f);
  905. qemu_get_buffer(f, (uint8_t *)id, len);
  906. id[len] = 0;
  907. length = qemu_get_be64(f);
  908. QTAILQ_FOREACH(block, &ram_list.blocks, next) {
  909. if (!strncmp(id, block->idstr, sizeof(id))) {
  910. if (block->length != length) {
  911. error_report("Length mismatch: %s: " RAM_ADDR_FMT
  912. " in != " RAM_ADDR_FMT, id, length,
  913. block->length);
  914. ret = -EINVAL;
  915. }
  916. break;
  917. }
  918. }
  919. if (!block) {
  920. error_report("Unknown ramblock \"%s\", cannot "
  921. "accept migration", id);
  922. ret = -EINVAL;
  923. }
  924. if (ret) {
  925. break;
  926. }
  927. total_ram_bytes -= length;
  928. }
  929. } else if (flags & RAM_SAVE_FLAG_COMPRESS) {
  930. void *host;
  931. uint8_t ch;
  932. host = host_from_stream_offset(f, addr, flags);
  933. if (!host) {
  934. error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
  935. ret = -EINVAL;
  936. break;
  937. }
  938. ch = qemu_get_byte(f);
  939. ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
  940. } else if (flags & RAM_SAVE_FLAG_PAGE) {
  941. void *host;
  942. host = host_from_stream_offset(f, addr, flags);
  943. if (!host) {
  944. error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
  945. ret = -EINVAL;
  946. break;
  947. }
  948. qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
  949. } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
  950. void *host = host_from_stream_offset(f, addr, flags);
  951. if (!host) {
  952. error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
  953. ret = -EINVAL;
  954. break;
  955. }
  956. if (load_xbzrle(f, addr, host) < 0) {
  957. error_report("Failed to decompress XBZRLE page at "
  958. RAM_ADDR_FMT, addr);
  959. ret = -EINVAL;
  960. break;
  961. }
  962. } else if (flags & RAM_SAVE_FLAG_HOOK) {
  963. ram_control_load_hook(f, flags);
  964. } else if (flags & RAM_SAVE_FLAG_EOS) {
  965. /* normal exit */
  966. break;
  967. } else {
  968. error_report("Unknown migration flags: %#x", flags);
  969. ret = -EINVAL;
  970. break;
  971. }
  972. ret = qemu_file_get_error(f);
  973. }
  974. DPRINTF("Completed load of VM with exit code %d seq iteration "
  975. "%" PRIu64 "\n", ret, seq_iter);
  976. return ret;
  977. }
  978. static SaveVMHandlers savevm_ram_handlers = {
  979. .save_live_setup = ram_save_setup,
  980. .save_live_iterate = ram_save_iterate,
  981. .save_live_complete = ram_save_complete,
  982. .save_live_pending = ram_save_pending,
  983. .load_state = ram_load,
  984. .cancel = ram_migration_cancel,
  985. };
  986. void ram_mig_init(void)
  987. {
  988. qemu_mutex_init(&XBZRLE.lock);
  989. register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
  990. }
  991. struct soundhw {
  992. const char *name;
  993. const char *descr;
  994. int enabled;
  995. int isa;
  996. union {
  997. int (*init_isa) (ISABus *bus);
  998. int (*init_pci) (PCIBus *bus);
  999. } init;
  1000. };
  1001. static struct soundhw soundhw[9];
  1002. static int soundhw_count;
  1003. void isa_register_soundhw(const char *name, const char *descr,
  1004. int (*init_isa)(ISABus *bus))
  1005. {
  1006. assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
  1007. soundhw[soundhw_count].name = name;
  1008. soundhw[soundhw_count].descr = descr;
  1009. soundhw[soundhw_count].isa = 1;
  1010. soundhw[soundhw_count].init.init_isa = init_isa;
  1011. soundhw_count++;
  1012. }
  1013. void pci_register_soundhw(const char *name, const char *descr,
  1014. int (*init_pci)(PCIBus *bus))
  1015. {
  1016. assert(soundhw_count < ARRAY_SIZE(soundhw) - 1);
  1017. soundhw[soundhw_count].name = name;
  1018. soundhw[soundhw_count].descr = descr;
  1019. soundhw[soundhw_count].isa = 0;
  1020. soundhw[soundhw_count].init.init_pci = init_pci;
  1021. soundhw_count++;
  1022. }
  1023. void select_soundhw(const char *optarg)
  1024. {
  1025. struct soundhw *c;
  1026. if (is_help_option(optarg)) {
  1027. show_valid_cards:
  1028. if (soundhw_count) {
  1029. printf("Valid sound card names (comma separated):\n");
  1030. for (c = soundhw; c->name; ++c) {
  1031. printf ("%-11s %s\n", c->name, c->descr);
  1032. }
  1033. printf("\n-soundhw all will enable all of the above\n");
  1034. } else {
  1035. printf("Machine has no user-selectable audio hardware "
  1036. "(it may or may not have always-present audio hardware).\n");
  1037. }
  1038. exit(!is_help_option(optarg));
  1039. }
  1040. else {
  1041. size_t l;
  1042. const char *p;
  1043. char *e;
  1044. int bad_card = 0;
  1045. if (!strcmp(optarg, "all")) {
  1046. for (c = soundhw; c->name; ++c) {
  1047. c->enabled = 1;
  1048. }
  1049. return;
  1050. }
  1051. p = optarg;
  1052. while (*p) {
  1053. e = strchr(p, ',');
  1054. l = !e ? strlen(p) : (size_t) (e - p);
  1055. for (c = soundhw; c->name; ++c) {
  1056. if (!strncmp(c->name, p, l) && !c->name[l]) {
  1057. c->enabled = 1;
  1058. break;
  1059. }
  1060. }
  1061. if (!c->name) {
  1062. if (l > 80) {
  1063. error_report("Unknown sound card name (too big to show)");
  1064. }
  1065. else {
  1066. error_report("Unknown sound card name `%.*s'",
  1067. (int) l, p);
  1068. }
  1069. bad_card = 1;
  1070. }
  1071. p += l + (e != NULL);
  1072. }
  1073. if (bad_card) {
  1074. goto show_valid_cards;
  1075. }
  1076. }
  1077. }
  1078. void audio_init(void)
  1079. {
  1080. struct soundhw *c;
  1081. ISABus *isa_bus = (ISABus *) object_resolve_path_type("", TYPE_ISA_BUS, NULL);
  1082. PCIBus *pci_bus = (PCIBus *) object_resolve_path_type("", TYPE_PCI_BUS, NULL);
  1083. for (c = soundhw; c->name; ++c) {
  1084. if (c->enabled) {
  1085. if (c->isa) {
  1086. if (!isa_bus) {
  1087. error_report("ISA bus not available for %s", c->name);
  1088. exit(1);
  1089. }
  1090. c->init.init_isa(isa_bus);
  1091. } else {
  1092. if (!pci_bus) {
  1093. error_report("PCI bus not available for %s", c->name);
  1094. exit(1);
  1095. }
  1096. c->init.init_pci(pci_bus);
  1097. }
  1098. }
  1099. }
  1100. }
  1101. int qemu_uuid_parse(const char *str, uint8_t *uuid)
  1102. {
  1103. int ret;
  1104. if (strlen(str) != 36) {
  1105. return -1;
  1106. }
  1107. ret = sscanf(str, UUID_FMT, &uuid[0], &uuid[1], &uuid[2], &uuid[3],
  1108. &uuid[4], &uuid[5], &uuid[6], &uuid[7], &uuid[8], &uuid[9],
  1109. &uuid[10], &uuid[11], &uuid[12], &uuid[13], &uuid[14],
  1110. &uuid[15]);
  1111. if (ret != 16) {
  1112. return -1;
  1113. }
  1114. return 0;
  1115. }
  1116. void do_acpitable_option(const QemuOpts *opts)
  1117. {
  1118. #ifdef TARGET_I386
  1119. Error *err = NULL;
  1120. acpi_table_add(opts, &err);
  1121. if (err) {
  1122. error_report("Wrong acpi table provided: %s",
  1123. error_get_pretty(err));
  1124. error_free(err);
  1125. exit(1);
  1126. }
  1127. #endif
  1128. }
  1129. void do_smbios_option(QemuOpts *opts)
  1130. {
  1131. #ifdef TARGET_I386
  1132. smbios_entry_add(opts);
  1133. #endif
  1134. }
  1135. void cpudef_init(void)
  1136. {
  1137. #if defined(cpudef_setup)
  1138. cpudef_setup(); /* parse cpu definitions in target config file */
  1139. #endif
  1140. }
  1141. int tcg_available(void)
  1142. {
  1143. return 1;
  1144. }
  1145. int kvm_available(void)
  1146. {
  1147. #ifdef CONFIG_KVM
  1148. return 1;
  1149. #else
  1150. return 0;
  1151. #endif
  1152. }
  1153. int xen_available(void)
  1154. {
  1155. #ifdef CONFIG_XEN
  1156. return 1;
  1157. #else
  1158. return 0;
  1159. #endif
  1160. }
  1161. TargetInfo *qmp_query_target(Error **errp)
  1162. {
  1163. TargetInfo *info = g_malloc0(sizeof(*info));
  1164. info->arch = g_strdup(TARGET_NAME);
  1165. return info;
  1166. }
  1167. /* Stub function that's gets run on the vcpu when its brought out of the
  1168. VM to run inside qemu via async_run_on_cpu()*/
  1169. static void mig_sleep_cpu(void *opq)
  1170. {
  1171. qemu_mutex_unlock_iothread();
  1172. g_usleep(30*1000);
  1173. qemu_mutex_lock_iothread();
  1174. }
  1175. /* To reduce the dirty rate explicitly disallow the VCPUs from spending
  1176. much time in the VM. The migration thread will try to catchup.
  1177. Workload will experience a performance drop.
  1178. */
  1179. static void mig_throttle_guest_down(void)
  1180. {
  1181. CPUState *cpu;
  1182. qemu_mutex_lock_iothread();
  1183. CPU_FOREACH(cpu) {
  1184. async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
  1185. }
  1186. qemu_mutex_unlock_iothread();
  1187. }
  1188. static void check_guest_throttling(void)
  1189. {
  1190. static int64_t t0;
  1191. int64_t t1;
  1192. if (!mig_throttle_on) {
  1193. return;
  1194. }
  1195. if (!t0) {
  1196. t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
  1197. return;
  1198. }
  1199. t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
  1200. /* If it has been more than 40 ms since the last time the guest
  1201. * was throttled then do it again.
  1202. */
  1203. if (40 < (t1-t0)/1000000) {
  1204. mig_throttle_guest_down();
  1205. t0 = t1;
  1206. }
  1207. }