123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795 |
- /*
- * QEMU System Emulator
- *
- * Copyright (c) 2003-2008 Fabrice Bellard
- * Copyright (c) 2011-2015 Red Hat Inc
- *
- * Authors:
- * Juan Quintela <quintela@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
- #include "qemu/osdep.h"
- #include "cpu.h"
- #include "qemu/cutils.h"
- #include "qemu/bitops.h"
- #include "qemu/bitmap.h"
- #include "qemu/main-loop.h"
- #include "xbzrle.h"
- #include "ram.h"
- #include "migration.h"
- #include "migration/register.h"
- #include "migration/misc.h"
- #include "qemu-file.h"
- #include "postcopy-ram.h"
- #include "page_cache.h"
- #include "qemu/error-report.h"
- #include "qapi/error.h"
- #include "qapi/qapi-types-migration.h"
- #include "qapi/qapi-events-migration.h"
- #include "qapi/qmp/qerror.h"
- #include "trace.h"
- #include "exec/ram_addr.h"
- #include "exec/target_page.h"
- #include "qemu/rcu_queue.h"
- #include "migration/colo.h"
- #include "block.h"
- #include "sysemu/sysemu.h"
- #include "sysemu/cpu-throttle.h"
- #include "savevm.h"
- #include "qemu/iov.h"
- #include "multifd.h"
- /***********************************************************/
- /* ram save/restore */
- /* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
- * worked for pages that where filled with the same char. We switched
- * it to only search for the zero value. And to avoid confusion with
- * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
- */
- #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
- #define RAM_SAVE_FLAG_ZERO 0x02
- #define RAM_SAVE_FLAG_MEM_SIZE 0x04
- #define RAM_SAVE_FLAG_PAGE 0x08
- #define RAM_SAVE_FLAG_EOS 0x10
- #define RAM_SAVE_FLAG_CONTINUE 0x20
- #define RAM_SAVE_FLAG_XBZRLE 0x40
- /* 0x80 is reserved in migration.h start with 0x100 next */
- #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
- static inline bool is_zero_range(uint8_t *p, uint64_t size)
- {
- return buffer_is_zero(p, size);
- }
- XBZRLECacheStats xbzrle_counters;
- /* struct contains XBZRLE cache and a static page
- used by the compression */
- static struct {
- /* buffer used for XBZRLE encoding */
- uint8_t *encoded_buf;
- /* buffer for storing page content */
- uint8_t *current_buf;
- /* Cache for XBZRLE, Protected by lock. */
- PageCache *cache;
- QemuMutex lock;
- /* it will store a page full of zeros */
- uint8_t *zero_target_page;
- /* buffer used for XBZRLE decoding */
- uint8_t *decoded_buf;
- } XBZRLE;
- static void XBZRLE_cache_lock(void)
- {
- if (migrate_use_xbzrle())
- qemu_mutex_lock(&XBZRLE.lock);
- }
- static void XBZRLE_cache_unlock(void)
- {
- if (migrate_use_xbzrle())
- qemu_mutex_unlock(&XBZRLE.lock);
- }
- /**
- * xbzrle_cache_resize: resize the xbzrle cache
- *
- * This function is called from qmp_migrate_set_cache_size in main
- * thread, possibly while a migration is in progress. A running
- * migration may be using the cache and might finish during this call,
- * hence changes to the cache are protected by XBZRLE.lock().
- *
- * Returns 0 for success or -1 for error
- *
- * @new_size: new cache size
- * @errp: set *errp if the check failed, with reason
- */
- int xbzrle_cache_resize(int64_t new_size, Error **errp)
- {
- PageCache *new_cache;
- int64_t ret = 0;
- /* Check for truncation */
- if (new_size != (size_t)new_size) {
- error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
- "exceeding address space");
- return -1;
- }
- if (new_size == migrate_xbzrle_cache_size()) {
- /* nothing to do */
- return 0;
- }
- XBZRLE_cache_lock();
- if (XBZRLE.cache != NULL) {
- new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
- if (!new_cache) {
- ret = -1;
- goto out;
- }
- cache_fini(XBZRLE.cache);
- XBZRLE.cache = new_cache;
- }
- out:
- XBZRLE_cache_unlock();
- return ret;
- }
- bool ramblock_is_ignored(RAMBlock *block)
- {
- return !qemu_ram_is_migratable(block) ||
- (migrate_ignore_shared() && qemu_ram_is_shared(block));
- }
- #undef RAMBLOCK_FOREACH
- int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
- {
- RAMBlock *block;
- int ret = 0;
- RCU_READ_LOCK_GUARD();
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- ret = func(block, opaque);
- if (ret) {
- break;
- }
- }
- return ret;
- }
- static void ramblock_recv_map_init(void)
- {
- RAMBlock *rb;
- RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
- assert(!rb->receivedmap);
- rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
- }
- }
- int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
- {
- return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
- rb->receivedmap);
- }
- bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
- {
- return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
- }
- void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
- {
- set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
- }
- void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
- size_t nr)
- {
- bitmap_set_atomic(rb->receivedmap,
- ramblock_recv_bitmap_offset(host_addr, rb),
- nr);
- }
- #define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
- /*
- * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
- *
- * Returns >0 if success with sent bytes, or <0 if error.
- */
- int64_t ramblock_recv_bitmap_send(QEMUFile *file,
- const char *block_name)
- {
- RAMBlock *block = qemu_ram_block_by_name(block_name);
- unsigned long *le_bitmap, nbits;
- uint64_t size;
- if (!block) {
- error_report("%s: invalid block name: %s", __func__, block_name);
- return -1;
- }
- nbits = block->used_length >> TARGET_PAGE_BITS;
- /*
- * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
- * machines we may need 4 more bytes for padding (see below
- * comment). So extend it a bit before hand.
- */
- le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
- /*
- * Always use little endian when sending the bitmap. This is
- * required that when source and destination VMs are not using the
- * same endianness. (Note: big endian won't work.)
- */
- bitmap_to_le(le_bitmap, block->receivedmap, nbits);
- /* Size of the bitmap, in bytes */
- size = DIV_ROUND_UP(nbits, 8);
- /*
- * size is always aligned to 8 bytes for 64bit machines, but it
- * may not be true for 32bit machines. We need this padding to
- * make sure the migration can survive even between 32bit and
- * 64bit machines.
- */
- size = ROUND_UP(size, 8);
- qemu_put_be64(file, size);
- qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
- /*
- * Mark as an end, in case the middle part is screwed up due to
- * some "mysterious" reason.
- */
- qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
- qemu_fflush(file);
- g_free(le_bitmap);
- if (qemu_file_get_error(file)) {
- return qemu_file_get_error(file);
- }
- return size + sizeof(size);
- }
- /*
- * An outstanding page request, on the source, having been received
- * and queued
- */
- struct RAMSrcPageRequest {
- RAMBlock *rb;
- hwaddr offset;
- hwaddr len;
- QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
- };
- /* State of RAM for migration */
- struct RAMState {
- /* QEMUFile used for this migration */
- QEMUFile *f;
- /* Last block that we have visited searching for dirty pages */
- RAMBlock *last_seen_block;
- /* Last block from where we have sent data */
- RAMBlock *last_sent_block;
- /* Last dirty target page we have sent */
- ram_addr_t last_page;
- /* last ram version we have seen */
- uint32_t last_version;
- /* We are in the first round */
- bool ram_bulk_stage;
- /* The free page optimization is enabled */
- bool fpo_enabled;
- /* How many times we have dirty too many pages */
- int dirty_rate_high_cnt;
- /* these variables are used for bitmap sync */
- /* last time we did a full bitmap_sync */
- int64_t time_last_bitmap_sync;
- /* bytes transferred at start_time */
- uint64_t bytes_xfer_prev;
- /* number of dirty pages since start_time */
- uint64_t num_dirty_pages_period;
- /* xbzrle misses since the beginning of the period */
- uint64_t xbzrle_cache_miss_prev;
- /* Amount of xbzrle pages since the beginning of the period */
- uint64_t xbzrle_pages_prev;
- /* Amount of xbzrle encoded bytes since the beginning of the period */
- uint64_t xbzrle_bytes_prev;
- /* compression statistics since the beginning of the period */
- /* amount of count that no free thread to compress data */
- uint64_t compress_thread_busy_prev;
- /* amount bytes after compression */
- uint64_t compressed_size_prev;
- /* amount of compressed pages */
- uint64_t compress_pages_prev;
- /* total handled target pages at the beginning of period */
- uint64_t target_page_count_prev;
- /* total handled target pages since start */
- uint64_t target_page_count;
- /* number of dirty bits in the bitmap */
- uint64_t migration_dirty_pages;
- /* Protects modification of the bitmap and migration dirty pages */
- QemuMutex bitmap_mutex;
- /* The RAMBlock used in the last src_page_requests */
- RAMBlock *last_req_rb;
- /* Queue of outstanding page requests from the destination */
- QemuMutex src_page_req_mutex;
- QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
- };
- typedef struct RAMState RAMState;
- static RAMState *ram_state;
- static NotifierWithReturnList precopy_notifier_list;
- void precopy_infrastructure_init(void)
- {
- notifier_with_return_list_init(&precopy_notifier_list);
- }
- void precopy_add_notifier(NotifierWithReturn *n)
- {
- notifier_with_return_list_add(&precopy_notifier_list, n);
- }
- void precopy_remove_notifier(NotifierWithReturn *n)
- {
- notifier_with_return_remove(n);
- }
- int precopy_notify(PrecopyNotifyReason reason, Error **errp)
- {
- PrecopyNotifyData pnd;
- pnd.reason = reason;
- pnd.errp = errp;
- return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
- }
- void precopy_enable_free_page_optimization(void)
- {
- if (!ram_state) {
- return;
- }
- ram_state->fpo_enabled = true;
- }
- uint64_t ram_bytes_remaining(void)
- {
- return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
- 0;
- }
- MigrationStats ram_counters;
- /* used by the search for pages to send */
- struct PageSearchStatus {
- /* Current block being searched */
- RAMBlock *block;
- /* Current page to search from */
- unsigned long page;
- /* Set once we wrap around */
- bool complete_round;
- };
- typedef struct PageSearchStatus PageSearchStatus;
- CompressionStats compression_counters;
- struct CompressParam {
- bool done;
- bool quit;
- bool zero_page;
- QEMUFile *file;
- QemuMutex mutex;
- QemuCond cond;
- RAMBlock *block;
- ram_addr_t offset;
- /* internally used fields */
- z_stream stream;
- uint8_t *originbuf;
- };
- typedef struct CompressParam CompressParam;
- struct DecompressParam {
- bool done;
- bool quit;
- QemuMutex mutex;
- QemuCond cond;
- void *des;
- uint8_t *compbuf;
- int len;
- z_stream stream;
- };
- typedef struct DecompressParam DecompressParam;
- static CompressParam *comp_param;
- static QemuThread *compress_threads;
- /* comp_done_cond is used to wake up the migration thread when
- * one of the compression threads has finished the compression.
- * comp_done_lock is used to co-work with comp_done_cond.
- */
- static QemuMutex comp_done_lock;
- static QemuCond comp_done_cond;
- /* The empty QEMUFileOps will be used by file in CompressParam */
- static const QEMUFileOps empty_ops = { };
- static QEMUFile *decomp_file;
- static DecompressParam *decomp_param;
- static QemuThread *decompress_threads;
- static QemuMutex decomp_done_lock;
- static QemuCond decomp_done_cond;
- static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
- ram_addr_t offset, uint8_t *source_buf);
- static void *do_data_compress(void *opaque)
- {
- CompressParam *param = opaque;
- RAMBlock *block;
- ram_addr_t offset;
- bool zero_page;
- qemu_mutex_lock(¶m->mutex);
- while (!param->quit) {
- if (param->block) {
- block = param->block;
- offset = param->offset;
- param->block = NULL;
- qemu_mutex_unlock(¶m->mutex);
- zero_page = do_compress_ram_page(param->file, ¶m->stream,
- block, offset, param->originbuf);
- qemu_mutex_lock(&comp_done_lock);
- param->done = true;
- param->zero_page = zero_page;
- qemu_cond_signal(&comp_done_cond);
- qemu_mutex_unlock(&comp_done_lock);
- qemu_mutex_lock(¶m->mutex);
- } else {
- qemu_cond_wait(¶m->cond, ¶m->mutex);
- }
- }
- qemu_mutex_unlock(¶m->mutex);
- return NULL;
- }
- static void compress_threads_save_cleanup(void)
- {
- int i, thread_count;
- if (!migrate_use_compression() || !comp_param) {
- return;
- }
- thread_count = migrate_compress_threads();
- for (i = 0; i < thread_count; i++) {
- /*
- * we use it as a indicator which shows if the thread is
- * properly init'd or not
- */
- if (!comp_param[i].file) {
- break;
- }
- qemu_mutex_lock(&comp_param[i].mutex);
- comp_param[i].quit = true;
- qemu_cond_signal(&comp_param[i].cond);
- qemu_mutex_unlock(&comp_param[i].mutex);
- qemu_thread_join(compress_threads + i);
- qemu_mutex_destroy(&comp_param[i].mutex);
- qemu_cond_destroy(&comp_param[i].cond);
- deflateEnd(&comp_param[i].stream);
- g_free(comp_param[i].originbuf);
- qemu_fclose(comp_param[i].file);
- comp_param[i].file = NULL;
- }
- qemu_mutex_destroy(&comp_done_lock);
- qemu_cond_destroy(&comp_done_cond);
- g_free(compress_threads);
- g_free(comp_param);
- compress_threads = NULL;
- comp_param = NULL;
- }
- static int compress_threads_save_setup(void)
- {
- int i, thread_count;
- if (!migrate_use_compression()) {
- return 0;
- }
- thread_count = migrate_compress_threads();
- compress_threads = g_new0(QemuThread, thread_count);
- comp_param = g_new0(CompressParam, thread_count);
- qemu_cond_init(&comp_done_cond);
- qemu_mutex_init(&comp_done_lock);
- for (i = 0; i < thread_count; i++) {
- comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
- if (!comp_param[i].originbuf) {
- goto exit;
- }
- if (deflateInit(&comp_param[i].stream,
- migrate_compress_level()) != Z_OK) {
- g_free(comp_param[i].originbuf);
- goto exit;
- }
- /* comp_param[i].file is just used as a dummy buffer to save data,
- * set its ops to empty.
- */
- comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
- comp_param[i].done = true;
- comp_param[i].quit = false;
- qemu_mutex_init(&comp_param[i].mutex);
- qemu_cond_init(&comp_param[i].cond);
- qemu_thread_create(compress_threads + i, "compress",
- do_data_compress, comp_param + i,
- QEMU_THREAD_JOINABLE);
- }
- return 0;
- exit:
- compress_threads_save_cleanup();
- return -1;
- }
- /**
- * save_page_header: write page header to wire
- *
- * If this is the 1st block, it also writes the block identification
- *
- * Returns the number of bytes written
- *
- * @f: QEMUFile where to send the data
- * @block: block that contains the page we want to send
- * @offset: offset inside the block for the page
- * in the lower bits, it contains flags
- */
- static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
- ram_addr_t offset)
- {
- size_t size, len;
- if (block == rs->last_sent_block) {
- offset |= RAM_SAVE_FLAG_CONTINUE;
- }
- qemu_put_be64(f, offset);
- size = 8;
- if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
- len = strlen(block->idstr);
- qemu_put_byte(f, len);
- qemu_put_buffer(f, (uint8_t *)block->idstr, len);
- size += 1 + len;
- rs->last_sent_block = block;
- }
- return size;
- }
- /**
- * mig_throttle_guest_down: throotle down the guest
- *
- * Reduce amount of guest cpu execution to hopefully slow down memory
- * writes. If guest dirty memory rate is reduced below the rate at
- * which we can transfer pages to the destination then we should be
- * able to complete migration. Some workloads dirty memory way too
- * fast and will not effectively converge, even with auto-converge.
- */
- static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
- uint64_t bytes_dirty_threshold)
- {
- MigrationState *s = migrate_get_current();
- uint64_t pct_initial = s->parameters.cpu_throttle_initial;
- uint64_t pct_increment = s->parameters.cpu_throttle_increment;
- bool pct_tailslow = s->parameters.cpu_throttle_tailslow;
- int pct_max = s->parameters.max_cpu_throttle;
- uint64_t throttle_now = cpu_throttle_get_percentage();
- uint64_t cpu_now, cpu_ideal, throttle_inc;
- /* We have not started throttling yet. Let's start it. */
- if (!cpu_throttle_active()) {
- cpu_throttle_set(pct_initial);
- } else {
- /* Throttling already on, just increase the rate */
- if (!pct_tailslow) {
- throttle_inc = pct_increment;
- } else {
- /* Compute the ideal CPU percentage used by Guest, which may
- * make the dirty rate match the dirty rate threshold. */
- cpu_now = 100 - throttle_now;
- cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 /
- bytes_dirty_period);
- throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment);
- }
- cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max));
- }
- }
- /**
- * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
- *
- * @rs: current RAM state
- * @current_addr: address for the zero page
- *
- * Update the xbzrle cache to reflect a page that's been sent as all 0.
- * The important thing is that a stale (not-yet-0'd) page be replaced
- * by the new data.
- * As a bonus, if the page wasn't in the cache it gets added so that
- * when a small write is made into the 0'd page it gets XBZRLE sent.
- */
- static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
- {
- if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
- return;
- }
- /* We don't care if this fails to allocate a new cache page
- * as long as it updated an old one */
- cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
- ram_counters.dirty_sync_count);
- }
- #define ENCODING_FLAG_XBZRLE 0x1
- /**
- * save_xbzrle_page: compress and send current page
- *
- * Returns: 1 means that we wrote the page
- * 0 means that page is identical to the one already sent
- * -1 means that xbzrle would be longer than normal
- *
- * @rs: current RAM state
- * @current_data: pointer to the address of the page contents
- * @current_addr: addr of the page
- * @block: block that contains the page we want to send
- * @offset: offset inside the block for the page
- * @last_stage: if we are at the completion stage
- */
- static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
- ram_addr_t current_addr, RAMBlock *block,
- ram_addr_t offset, bool last_stage)
- {
- int encoded_len = 0, bytes_xbzrle;
- uint8_t *prev_cached_page;
- if (!cache_is_cached(XBZRLE.cache, current_addr,
- ram_counters.dirty_sync_count)) {
- xbzrle_counters.cache_miss++;
- if (!last_stage) {
- if (cache_insert(XBZRLE.cache, current_addr, *current_data,
- ram_counters.dirty_sync_count) == -1) {
- return -1;
- } else {
- /* update *current_data when the page has been
- inserted into cache */
- *current_data = get_cached_data(XBZRLE.cache, current_addr);
- }
- }
- return -1;
- }
- /*
- * Reaching here means the page has hit the xbzrle cache, no matter what
- * encoding result it is (normal encoding, overflow or skipping the page),
- * count the page as encoded. This is used to calculate the encoding rate.
- *
- * Example: 2 pages (8KB) being encoded, first page encoding generates 2KB,
- * 2nd page turns out to be skipped (i.e. no new bytes written to the
- * page), the overall encoding rate will be 8KB / 2KB = 4, which has the
- * skipped page included. In this way, the encoding rate can tell if the
- * guest page is good for xbzrle encoding.
- */
- xbzrle_counters.pages++;
- prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
- /* save current buffer into memory */
- memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
- /* XBZRLE encoding (if there is no overflow) */
- encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
- TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
- TARGET_PAGE_SIZE);
- /*
- * Update the cache contents, so that it corresponds to the data
- * sent, in all cases except where we skip the page.
- */
- if (!last_stage && encoded_len != 0) {
- memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
- /*
- * In the case where we couldn't compress, ensure that the caller
- * sends the data from the cache, since the guest might have
- * changed the RAM since we copied it.
- */
- *current_data = prev_cached_page;
- }
- if (encoded_len == 0) {
- trace_save_xbzrle_page_skipping();
- return 0;
- } else if (encoded_len == -1) {
- trace_save_xbzrle_page_overflow();
- xbzrle_counters.overflow++;
- xbzrle_counters.bytes += TARGET_PAGE_SIZE;
- return -1;
- }
- /* Send XBZRLE based compressed page */
- bytes_xbzrle = save_page_header(rs, rs->f, block,
- offset | RAM_SAVE_FLAG_XBZRLE);
- qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
- qemu_put_be16(rs->f, encoded_len);
- qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
- bytes_xbzrle += encoded_len + 1 + 2;
- /*
- * Like compressed_size (please see update_compress_thread_counts),
- * the xbzrle encoded bytes don't count the 8 byte header with
- * RAM_SAVE_FLAG_CONTINUE.
- */
- xbzrle_counters.bytes += bytes_xbzrle - 8;
- ram_counters.transferred += bytes_xbzrle;
- return 1;
- }
- /**
- * migration_bitmap_find_dirty: find the next dirty page from start
- *
- * Returns the page offset within memory region of the start of a dirty page
- *
- * @rs: current RAM state
- * @rb: RAMBlock where to search for dirty pages
- * @start: page where we start the search
- */
- static inline
- unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
- unsigned long start)
- {
- unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
- unsigned long *bitmap = rb->bmap;
- unsigned long next;
- if (ramblock_is_ignored(rb)) {
- return size;
- }
- /*
- * When the free page optimization is enabled, we need to check the bitmap
- * to send the non-free pages rather than all the pages in the bulk stage.
- */
- if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
- next = start + 1;
- } else {
- next = find_next_bit(bitmap, size, start);
- }
- return next;
- }
- static inline bool migration_bitmap_clear_dirty(RAMState *rs,
- RAMBlock *rb,
- unsigned long page)
- {
- bool ret;
- qemu_mutex_lock(&rs->bitmap_mutex);
- /*
- * Clear dirty bitmap if needed. This _must_ be called before we
- * send any of the page in the chunk because we need to make sure
- * we can capture further page content changes when we sync dirty
- * log the next time. So as long as we are going to send any of
- * the page in the chunk we clear the remote dirty bitmap for all.
- * Clearing it earlier won't be a problem, but too late will.
- */
- if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
- uint8_t shift = rb->clear_bmap_shift;
- hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
- hwaddr start = (((ram_addr_t)page) << TARGET_PAGE_BITS) & (-size);
- /*
- * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
- * can make things easier sometimes since then start address
- * of the small chunk will always be 64 pages aligned so the
- * bitmap will always be aligned to unsigned long. We should
- * even be able to remove this restriction but I'm simply
- * keeping it.
- */
- assert(shift >= 6);
- trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
- memory_region_clear_dirty_bitmap(rb->mr, start, size);
- }
- ret = test_and_clear_bit(page, rb->bmap);
- if (ret) {
- rs->migration_dirty_pages--;
- }
- qemu_mutex_unlock(&rs->bitmap_mutex);
- return ret;
- }
- /* Called with RCU critical section */
- static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
- {
- uint64_t new_dirty_pages =
- cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length);
- rs->migration_dirty_pages += new_dirty_pages;
- rs->num_dirty_pages_period += new_dirty_pages;
- }
- /**
- * ram_pagesize_summary: calculate all the pagesizes of a VM
- *
- * Returns a summary bitmap of the page sizes of all RAMBlocks
- *
- * For VMs with just normal pages this is equivalent to the host page
- * size. If it's got some huge pages then it's the OR of all the
- * different page sizes.
- */
- uint64_t ram_pagesize_summary(void)
- {
- RAMBlock *block;
- uint64_t summary = 0;
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- summary |= block->page_size;
- }
- return summary;
- }
- uint64_t ram_get_total_transferred_pages(void)
- {
- return ram_counters.normal + ram_counters.duplicate +
- compression_counters.pages + xbzrle_counters.pages;
- }
- static void migration_update_rates(RAMState *rs, int64_t end_time)
- {
- uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
- double compressed_size;
- /* calculate period counters */
- ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
- / (end_time - rs->time_last_bitmap_sync);
- if (!page_count) {
- return;
- }
- if (migrate_use_xbzrle()) {
- double encoded_size, unencoded_size;
- xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
- rs->xbzrle_cache_miss_prev) / page_count;
- rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
- unencoded_size = (xbzrle_counters.pages - rs->xbzrle_pages_prev) *
- TARGET_PAGE_SIZE;
- encoded_size = xbzrle_counters.bytes - rs->xbzrle_bytes_prev;
- if (xbzrle_counters.pages == rs->xbzrle_pages_prev || !encoded_size) {
- xbzrle_counters.encoding_rate = 0;
- } else {
- xbzrle_counters.encoding_rate = unencoded_size / encoded_size;
- }
- rs->xbzrle_pages_prev = xbzrle_counters.pages;
- rs->xbzrle_bytes_prev = xbzrle_counters.bytes;
- }
- if (migrate_use_compression()) {
- compression_counters.busy_rate = (double)(compression_counters.busy -
- rs->compress_thread_busy_prev) / page_count;
- rs->compress_thread_busy_prev = compression_counters.busy;
- compressed_size = compression_counters.compressed_size -
- rs->compressed_size_prev;
- if (compressed_size) {
- double uncompressed_size = (compression_counters.pages -
- rs->compress_pages_prev) * TARGET_PAGE_SIZE;
- /* Compression-Ratio = Uncompressed-size / Compressed-size */
- compression_counters.compression_rate =
- uncompressed_size / compressed_size;
- rs->compress_pages_prev = compression_counters.pages;
- rs->compressed_size_prev = compression_counters.compressed_size;
- }
- }
- }
- static void migration_trigger_throttle(RAMState *rs)
- {
- MigrationState *s = migrate_get_current();
- uint64_t threshold = s->parameters.throttle_trigger_threshold;
- uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev;
- uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
- uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
- /* During block migration the auto-converge logic incorrectly detects
- * that ram migration makes no progress. Avoid this by disabling the
- * throttling logic during the bulk phase of block migration. */
- if (migrate_auto_converge() && !blk_mig_bulk_active()) {
- /* The following detection logic can be refined later. For now:
- Check to see if the ratio between dirtied bytes and the approx.
- amount of bytes that just got transferred since the last time
- we were in this routine reaches the threshold. If that happens
- twice, start or increase throttling. */
- if ((bytes_dirty_period > bytes_dirty_threshold) &&
- (++rs->dirty_rate_high_cnt >= 2)) {
- trace_migration_throttle();
- rs->dirty_rate_high_cnt = 0;
- mig_throttle_guest_down(bytes_dirty_period,
- bytes_dirty_threshold);
- }
- }
- }
- static void migration_bitmap_sync(RAMState *rs)
- {
- RAMBlock *block;
- int64_t end_time;
- ram_counters.dirty_sync_count++;
- if (!rs->time_last_bitmap_sync) {
- rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
- }
- trace_migration_bitmap_sync_start();
- memory_global_dirty_log_sync();
- qemu_mutex_lock(&rs->bitmap_mutex);
- WITH_RCU_READ_LOCK_GUARD() {
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- ramblock_sync_dirty_bitmap(rs, block);
- }
- ram_counters.remaining = ram_bytes_remaining();
- }
- qemu_mutex_unlock(&rs->bitmap_mutex);
- memory_global_after_dirty_log_sync();
- trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
- end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
- /* more than 1 second = 1000 millisecons */
- if (end_time > rs->time_last_bitmap_sync + 1000) {
- migration_trigger_throttle(rs);
- migration_update_rates(rs, end_time);
- rs->target_page_count_prev = rs->target_page_count;
- /* reset period counters */
- rs->time_last_bitmap_sync = end_time;
- rs->num_dirty_pages_period = 0;
- rs->bytes_xfer_prev = ram_counters.transferred;
- }
- if (migrate_use_events()) {
- qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
- }
- }
- static void migration_bitmap_sync_precopy(RAMState *rs)
- {
- Error *local_err = NULL;
- /*
- * The current notifier usage is just an optimization to migration, so we
- * don't stop the normal migration process in the error case.
- */
- if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
- error_report_err(local_err);
- local_err = NULL;
- }
- migration_bitmap_sync(rs);
- if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
- error_report_err(local_err);
- }
- }
- /**
- * save_zero_page_to_file: send the zero page to the file
- *
- * Returns the size of data written to the file, 0 means the page is not
- * a zero page
- *
- * @rs: current RAM state
- * @file: the file where the data is saved
- * @block: block that contains the page we want to send
- * @offset: offset inside the block for the page
- */
- static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
- RAMBlock *block, ram_addr_t offset)
- {
- uint8_t *p = block->host + offset;
- int len = 0;
- if (is_zero_range(p, TARGET_PAGE_SIZE)) {
- len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
- qemu_put_byte(file, 0);
- len += 1;
- }
- return len;
- }
- /**
- * save_zero_page: send the zero page to the stream
- *
- * Returns the number of pages written.
- *
- * @rs: current RAM state
- * @block: block that contains the page we want to send
- * @offset: offset inside the block for the page
- */
- static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
- {
- int len = save_zero_page_to_file(rs, rs->f, block, offset);
- if (len) {
- ram_counters.duplicate++;
- ram_counters.transferred += len;
- return 1;
- }
- return -1;
- }
- static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
- {
- if (!migrate_release_ram() || !migration_in_postcopy()) {
- return;
- }
- ram_discard_range(rbname, offset, ((ram_addr_t)pages) << TARGET_PAGE_BITS);
- }
- /*
- * @pages: the number of pages written by the control path,
- * < 0 - error
- * > 0 - number of pages written
- *
- * Return true if the pages has been saved, otherwise false is returned.
- */
- static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
- int *pages)
- {
- uint64_t bytes_xmit = 0;
- int ret;
- *pages = -1;
- ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
- &bytes_xmit);
- if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
- return false;
- }
- if (bytes_xmit) {
- ram_counters.transferred += bytes_xmit;
- *pages = 1;
- }
- if (ret == RAM_SAVE_CONTROL_DELAYED) {
- return true;
- }
- if (bytes_xmit > 0) {
- ram_counters.normal++;
- } else if (bytes_xmit == 0) {
- ram_counters.duplicate++;
- }
- return true;
- }
- /*
- * directly send the page to the stream
- *
- * Returns the number of pages written.
- *
- * @rs: current RAM state
- * @block: block that contains the page we want to send
- * @offset: offset inside the block for the page
- * @buf: the page to be sent
- * @async: send to page asyncly
- */
- static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
- uint8_t *buf, bool async)
- {
- ram_counters.transferred += save_page_header(rs, rs->f, block,
- offset | RAM_SAVE_FLAG_PAGE);
- if (async) {
- qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
- migrate_release_ram() &
- migration_in_postcopy());
- } else {
- qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
- }
- ram_counters.transferred += TARGET_PAGE_SIZE;
- ram_counters.normal++;
- return 1;
- }
- /**
- * ram_save_page: send the given page to the stream
- *
- * Returns the number of pages written.
- * < 0 - error
- * >=0 - Number of pages written - this might legally be 0
- * if xbzrle noticed the page was the same.
- *
- * @rs: current RAM state
- * @block: block that contains the page we want to send
- * @offset: offset inside the block for the page
- * @last_stage: if we are at the completion stage
- */
- static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
- {
- int pages = -1;
- uint8_t *p;
- bool send_async = true;
- RAMBlock *block = pss->block;
- ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
- ram_addr_t current_addr = block->offset + offset;
- p = block->host + offset;
- trace_ram_save_page(block->idstr, (uint64_t)offset, p);
- XBZRLE_cache_lock();
- if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
- migrate_use_xbzrle()) {
- pages = save_xbzrle_page(rs, &p, current_addr, block,
- offset, last_stage);
- if (!last_stage) {
- /* Can't send this cached data async, since the cache page
- * might get updated before it gets to the wire
- */
- send_async = false;
- }
- }
- /* XBZRLE overflow or normal page */
- if (pages == -1) {
- pages = save_normal_page(rs, block, offset, p, send_async);
- }
- XBZRLE_cache_unlock();
- return pages;
- }
- static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
- ram_addr_t offset)
- {
- if (multifd_queue_page(rs->f, block, offset) < 0) {
- return -1;
- }
- ram_counters.normal++;
- return 1;
- }
- static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
- ram_addr_t offset, uint8_t *source_buf)
- {
- RAMState *rs = ram_state;
- uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
- bool zero_page = false;
- int ret;
- if (save_zero_page_to_file(rs, f, block, offset)) {
- zero_page = true;
- goto exit;
- }
- save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
- /*
- * copy it to a internal buffer to avoid it being modified by VM
- * so that we can catch up the error during compression and
- * decompression
- */
- memcpy(source_buf, p, TARGET_PAGE_SIZE);
- ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
- if (ret < 0) {
- qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
- error_report("compressed data failed!");
- return false;
- }
- exit:
- ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
- return zero_page;
- }
- static void
- update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
- {
- ram_counters.transferred += bytes_xmit;
- if (param->zero_page) {
- ram_counters.duplicate++;
- return;
- }
- /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
- compression_counters.compressed_size += bytes_xmit - 8;
- compression_counters.pages++;
- }
- static bool save_page_use_compression(RAMState *rs);
- static void flush_compressed_data(RAMState *rs)
- {
- int idx, len, thread_count;
- if (!save_page_use_compression(rs)) {
- return;
- }
- thread_count = migrate_compress_threads();
- qemu_mutex_lock(&comp_done_lock);
- for (idx = 0; idx < thread_count; idx++) {
- while (!comp_param[idx].done) {
- qemu_cond_wait(&comp_done_cond, &comp_done_lock);
- }
- }
- qemu_mutex_unlock(&comp_done_lock);
- for (idx = 0; idx < thread_count; idx++) {
- qemu_mutex_lock(&comp_param[idx].mutex);
- if (!comp_param[idx].quit) {
- len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
- /*
- * it's safe to fetch zero_page without holding comp_done_lock
- * as there is no further request submitted to the thread,
- * i.e, the thread should be waiting for a request at this point.
- */
- update_compress_thread_counts(&comp_param[idx], len);
- }
- qemu_mutex_unlock(&comp_param[idx].mutex);
- }
- }
- static inline void set_compress_params(CompressParam *param, RAMBlock *block,
- ram_addr_t offset)
- {
- param->block = block;
- param->offset = offset;
- }
- static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
- ram_addr_t offset)
- {
- int idx, thread_count, bytes_xmit = -1, pages = -1;
- bool wait = migrate_compress_wait_thread();
- thread_count = migrate_compress_threads();
- qemu_mutex_lock(&comp_done_lock);
- retry:
- for (idx = 0; idx < thread_count; idx++) {
- if (comp_param[idx].done) {
- comp_param[idx].done = false;
- bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
- qemu_mutex_lock(&comp_param[idx].mutex);
- set_compress_params(&comp_param[idx], block, offset);
- qemu_cond_signal(&comp_param[idx].cond);
- qemu_mutex_unlock(&comp_param[idx].mutex);
- pages = 1;
- update_compress_thread_counts(&comp_param[idx], bytes_xmit);
- break;
- }
- }
- /*
- * wait for the free thread if the user specifies 'compress-wait-thread',
- * otherwise we will post the page out in the main thread as normal page.
- */
- if (pages < 0 && wait) {
- qemu_cond_wait(&comp_done_cond, &comp_done_lock);
- goto retry;
- }
- qemu_mutex_unlock(&comp_done_lock);
- return pages;
- }
- /**
- * find_dirty_block: find the next dirty page and update any state
- * associated with the search process.
- *
- * Returns true if a page is found
- *
- * @rs: current RAM state
- * @pss: data about the state of the current dirty page scan
- * @again: set to false if the search has scanned the whole of RAM
- */
- static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
- {
- pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
- if (pss->complete_round && pss->block == rs->last_seen_block &&
- pss->page >= rs->last_page) {
- /*
- * We've been once around the RAM and haven't found anything.
- * Give up.
- */
- *again = false;
- return false;
- }
- if ((((ram_addr_t)pss->page) << TARGET_PAGE_BITS)
- >= pss->block->used_length) {
- /* Didn't find anything in this RAM Block */
- pss->page = 0;
- pss->block = QLIST_NEXT_RCU(pss->block, next);
- if (!pss->block) {
- /*
- * If memory migration starts over, we will meet a dirtied page
- * which may still exists in compression threads's ring, so we
- * should flush the compressed data to make sure the new page
- * is not overwritten by the old one in the destination.
- *
- * Also If xbzrle is on, stop using the data compression at this
- * point. In theory, xbzrle can do better than compression.
- */
- flush_compressed_data(rs);
- /* Hit the end of the list */
- pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
- /* Flag that we've looped */
- pss->complete_round = true;
- rs->ram_bulk_stage = false;
- }
- /* Didn't find anything this time, but try again on the new block */
- *again = true;
- return false;
- } else {
- /* Can go around again, but... */
- *again = true;
- /* We've found something so probably don't need to */
- return true;
- }
- }
- /**
- * unqueue_page: gets a page of the queue
- *
- * Helper for 'get_queued_page' - gets a page off the queue
- *
- * Returns the block of the page (or NULL if none available)
- *
- * @rs: current RAM state
- * @offset: used to return the offset within the RAMBlock
- */
- static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
- {
- RAMBlock *block = NULL;
- if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
- return NULL;
- }
- QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
- if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
- struct RAMSrcPageRequest *entry =
- QSIMPLEQ_FIRST(&rs->src_page_requests);
- block = entry->rb;
- *offset = entry->offset;
- if (entry->len > TARGET_PAGE_SIZE) {
- entry->len -= TARGET_PAGE_SIZE;
- entry->offset += TARGET_PAGE_SIZE;
- } else {
- memory_region_unref(block->mr);
- QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
- g_free(entry);
- migration_consume_urgent_request();
- }
- }
- return block;
- }
- /**
- * get_queued_page: unqueue a page from the postcopy requests
- *
- * Skips pages that are already sent (!dirty)
- *
- * Returns true if a queued page is found
- *
- * @rs: current RAM state
- * @pss: data about the state of the current dirty page scan
- */
- static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
- {
- RAMBlock *block;
- ram_addr_t offset;
- bool dirty;
- do {
- block = unqueue_page(rs, &offset);
- /*
- * We're sending this page, and since it's postcopy nothing else
- * will dirty it, and we must make sure it doesn't get sent again
- * even if this queue request was received after the background
- * search already sent it.
- */
- if (block) {
- unsigned long page;
- page = offset >> TARGET_PAGE_BITS;
- dirty = test_bit(page, block->bmap);
- if (!dirty) {
- trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
- page);
- } else {
- trace_get_queued_page(block->idstr, (uint64_t)offset, page);
- }
- }
- } while (block && !dirty);
- if (block) {
- /*
- * As soon as we start servicing pages out of order, then we have
- * to kill the bulk stage, since the bulk stage assumes
- * in (migration_bitmap_find_and_reset_dirty) that every page is
- * dirty, that's no longer true.
- */
- rs->ram_bulk_stage = false;
- /*
- * We want the background search to continue from the queued page
- * since the guest is likely to want other pages near to the page
- * it just requested.
- */
- pss->block = block;
- pss->page = offset >> TARGET_PAGE_BITS;
- /*
- * This unqueued page would break the "one round" check, even is
- * really rare.
- */
- pss->complete_round = false;
- }
- return !!block;
- }
- /**
- * migration_page_queue_free: drop any remaining pages in the ram
- * request queue
- *
- * It should be empty at the end anyway, but in error cases there may
- * be some left. in case that there is any page left, we drop it.
- *
- */
- static void migration_page_queue_free(RAMState *rs)
- {
- struct RAMSrcPageRequest *mspr, *next_mspr;
- /* This queue generally should be empty - but in the case of a failed
- * migration might have some droppings in.
- */
- RCU_READ_LOCK_GUARD();
- QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
- memory_region_unref(mspr->rb->mr);
- QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
- g_free(mspr);
- }
- }
- /**
- * ram_save_queue_pages: queue the page for transmission
- *
- * A request from postcopy destination for example.
- *
- * Returns zero on success or negative on error
- *
- * @rbname: Name of the RAMBLock of the request. NULL means the
- * same that last one.
- * @start: starting address from the start of the RAMBlock
- * @len: length (in bytes) to send
- */
- int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
- {
- RAMBlock *ramblock;
- RAMState *rs = ram_state;
- ram_counters.postcopy_requests++;
- RCU_READ_LOCK_GUARD();
- if (!rbname) {
- /* Reuse last RAMBlock */
- ramblock = rs->last_req_rb;
- if (!ramblock) {
- /*
- * Shouldn't happen, we can't reuse the last RAMBlock if
- * it's the 1st request.
- */
- error_report("ram_save_queue_pages no previous block");
- return -1;
- }
- } else {
- ramblock = qemu_ram_block_by_name(rbname);
- if (!ramblock) {
- /* We shouldn't be asked for a non-existent RAMBlock */
- error_report("ram_save_queue_pages no block '%s'", rbname);
- return -1;
- }
- rs->last_req_rb = ramblock;
- }
- trace_ram_save_queue_pages(ramblock->idstr, start, len);
- if (start+len > ramblock->used_length) {
- error_report("%s request overrun start=" RAM_ADDR_FMT " len="
- RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
- __func__, start, len, ramblock->used_length);
- return -1;
- }
- struct RAMSrcPageRequest *new_entry =
- g_malloc0(sizeof(struct RAMSrcPageRequest));
- new_entry->rb = ramblock;
- new_entry->offset = start;
- new_entry->len = len;
- memory_region_ref(ramblock->mr);
- qemu_mutex_lock(&rs->src_page_req_mutex);
- QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
- migration_make_urgent_request();
- qemu_mutex_unlock(&rs->src_page_req_mutex);
- return 0;
- }
- static bool save_page_use_compression(RAMState *rs)
- {
- if (!migrate_use_compression()) {
- return false;
- }
- /*
- * If xbzrle is on, stop using the data compression after first
- * round of migration even if compression is enabled. In theory,
- * xbzrle can do better than compression.
- */
- if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
- return true;
- }
- return false;
- }
- /*
- * try to compress the page before posting it out, return true if the page
- * has been properly handled by compression, otherwise needs other
- * paths to handle it
- */
- static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
- {
- if (!save_page_use_compression(rs)) {
- return false;
- }
- /*
- * When starting the process of a new block, the first page of
- * the block should be sent out before other pages in the same
- * block, and all the pages in last block should have been sent
- * out, keeping this order is important, because the 'cont' flag
- * is used to avoid resending the block name.
- *
- * We post the fist page as normal page as compression will take
- * much CPU resource.
- */
- if (block != rs->last_sent_block) {
- flush_compressed_data(rs);
- return false;
- }
- if (compress_page_with_multi_thread(rs, block, offset) > 0) {
- return true;
- }
- compression_counters.busy++;
- return false;
- }
- /**
- * ram_save_target_page: save one target page
- *
- * Returns the number of pages written
- *
- * @rs: current RAM state
- * @pss: data about the page we want to send
- * @last_stage: if we are at the completion stage
- */
- static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
- bool last_stage)
- {
- RAMBlock *block = pss->block;
- ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
- int res;
- if (control_save_page(rs, block, offset, &res)) {
- return res;
- }
- if (save_compress_page(rs, block, offset)) {
- return 1;
- }
- res = save_zero_page(rs, block, offset);
- if (res > 0) {
- /* Must let xbzrle know, otherwise a previous (now 0'd) cached
- * page would be stale
- */
- if (!save_page_use_compression(rs)) {
- XBZRLE_cache_lock();
- xbzrle_cache_zero_page(rs, block->offset + offset);
- XBZRLE_cache_unlock();
- }
- ram_release_pages(block->idstr, offset, res);
- return res;
- }
- /*
- * Do not use multifd for:
- * 1. Compression as the first page in the new block should be posted out
- * before sending the compressed page
- * 2. In postcopy as one whole host page should be placed
- */
- if (!save_page_use_compression(rs) && migrate_use_multifd()
- && !migration_in_postcopy()) {
- return ram_save_multifd_page(rs, block, offset);
- }
- return ram_save_page(rs, pss, last_stage);
- }
- /**
- * ram_save_host_page: save a whole host page
- *
- * Starting at *offset send pages up to the end of the current host
- * page. It's valid for the initial offset to point into the middle of
- * a host page in which case the remainder of the hostpage is sent.
- * Only dirty target pages are sent. Note that the host page size may
- * be a huge page for this block.
- * The saving stops at the boundary of the used_length of the block
- * if the RAMBlock isn't a multiple of the host page size.
- *
- * Returns the number of pages written or negative on error
- *
- * @rs: current RAM state
- * @ms: current migration state
- * @pss: data about the page we want to send
- * @last_stage: if we are at the completion stage
- */
- static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
- bool last_stage)
- {
- int tmppages, pages = 0;
- size_t pagesize_bits =
- qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
- if (ramblock_is_ignored(pss->block)) {
- error_report("block %s should not be migrated !", pss->block->idstr);
- return 0;
- }
- do {
- /* Check the pages is dirty and if it is send it */
- if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
- pss->page++;
- continue;
- }
- tmppages = ram_save_target_page(rs, pss, last_stage);
- if (tmppages < 0) {
- return tmppages;
- }
- pages += tmppages;
- pss->page++;
- /* Allow rate limiting to happen in the middle of huge pages */
- migration_rate_limit();
- } while ((pss->page & (pagesize_bits - 1)) &&
- offset_in_ramblock(pss->block,
- ((ram_addr_t)pss->page) << TARGET_PAGE_BITS));
- /* The offset we leave with is the last one we looked at */
- pss->page--;
- return pages;
- }
- /**
- * ram_find_and_save_block: finds a dirty page and sends it to f
- *
- * Called within an RCU critical section.
- *
- * Returns the number of pages written where zero means no dirty pages,
- * or negative on error
- *
- * @rs: current RAM state
- * @last_stage: if we are at the completion stage
- *
- * On systems where host-page-size > target-page-size it will send all the
- * pages in a host page that are dirty.
- */
- static int ram_find_and_save_block(RAMState *rs, bool last_stage)
- {
- PageSearchStatus pss;
- int pages = 0;
- bool again, found;
- /* No dirty page as there is zero RAM */
- if (!ram_bytes_total()) {
- return pages;
- }
- pss.block = rs->last_seen_block;
- pss.page = rs->last_page;
- pss.complete_round = false;
- if (!pss.block) {
- pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
- }
- do {
- again = true;
- found = get_queued_page(rs, &pss);
- if (!found) {
- /* priority queue empty, so just search for something dirty */
- found = find_dirty_block(rs, &pss, &again);
- }
- if (found) {
- pages = ram_save_host_page(rs, &pss, last_stage);
- }
- } while (!pages && again);
- rs->last_seen_block = pss.block;
- rs->last_page = pss.page;
- return pages;
- }
- void acct_update_position(QEMUFile *f, size_t size, bool zero)
- {
- uint64_t pages = size / TARGET_PAGE_SIZE;
- if (zero) {
- ram_counters.duplicate += pages;
- } else {
- ram_counters.normal += pages;
- ram_counters.transferred += size;
- qemu_update_position(f, size);
- }
- }
- static uint64_t ram_bytes_total_common(bool count_ignored)
- {
- RAMBlock *block;
- uint64_t total = 0;
- RCU_READ_LOCK_GUARD();
- if (count_ignored) {
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
- total += block->used_length;
- }
- } else {
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- total += block->used_length;
- }
- }
- return total;
- }
- uint64_t ram_bytes_total(void)
- {
- return ram_bytes_total_common(false);
- }
- static void xbzrle_load_setup(void)
- {
- XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
- }
- static void xbzrle_load_cleanup(void)
- {
- g_free(XBZRLE.decoded_buf);
- XBZRLE.decoded_buf = NULL;
- }
- static void ram_state_cleanup(RAMState **rsp)
- {
- if (*rsp) {
- migration_page_queue_free(*rsp);
- qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
- qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
- g_free(*rsp);
- *rsp = NULL;
- }
- }
- static void xbzrle_cleanup(void)
- {
- XBZRLE_cache_lock();
- if (XBZRLE.cache) {
- cache_fini(XBZRLE.cache);
- g_free(XBZRLE.encoded_buf);
- g_free(XBZRLE.current_buf);
- g_free(XBZRLE.zero_target_page);
- XBZRLE.cache = NULL;
- XBZRLE.encoded_buf = NULL;
- XBZRLE.current_buf = NULL;
- XBZRLE.zero_target_page = NULL;
- }
- XBZRLE_cache_unlock();
- }
- static void ram_save_cleanup(void *opaque)
- {
- RAMState **rsp = opaque;
- RAMBlock *block;
- /* caller have hold iothread lock or is in a bh, so there is
- * no writing race against the migration bitmap
- */
- memory_global_dirty_log_stop();
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- g_free(block->clear_bmap);
- block->clear_bmap = NULL;
- g_free(block->bmap);
- block->bmap = NULL;
- }
- xbzrle_cleanup();
- compress_threads_save_cleanup();
- ram_state_cleanup(rsp);
- }
- static void ram_state_reset(RAMState *rs)
- {
- rs->last_seen_block = NULL;
- rs->last_sent_block = NULL;
- rs->last_page = 0;
- rs->last_version = ram_list.version;
- rs->ram_bulk_stage = true;
- rs->fpo_enabled = false;
- }
- #define MAX_WAIT 50 /* ms, half buffered_file limit */
- /*
- * 'expected' is the value you expect the bitmap mostly to be full
- * of; it won't bother printing lines that are all this value.
- * If 'todump' is null the migration bitmap is dumped.
- */
- void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
- unsigned long pages)
- {
- int64_t cur;
- int64_t linelen = 128;
- char linebuf[129];
- for (cur = 0; cur < pages; cur += linelen) {
- int64_t curb;
- bool found = false;
- /*
- * Last line; catch the case where the line length
- * is longer than remaining ram
- */
- if (cur + linelen > pages) {
- linelen = pages - cur;
- }
- for (curb = 0; curb < linelen; curb++) {
- bool thisbit = test_bit(cur + curb, todump);
- linebuf[curb] = thisbit ? '1' : '.';
- found = found || (thisbit != expected);
- }
- if (found) {
- linebuf[curb] = '\0';
- fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
- }
- }
- }
- /* **** functions for postcopy ***** */
- void ram_postcopy_migrated_memory_release(MigrationState *ms)
- {
- struct RAMBlock *block;
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- unsigned long *bitmap = block->bmap;
- unsigned long range = block->used_length >> TARGET_PAGE_BITS;
- unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
- while (run_start < range) {
- unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
- ram_discard_range(block->idstr,
- ((ram_addr_t)run_start) << TARGET_PAGE_BITS,
- ((ram_addr_t)(run_end - run_start))
- << TARGET_PAGE_BITS);
- run_start = find_next_zero_bit(bitmap, range, run_end + 1);
- }
- }
- }
- /**
- * postcopy_send_discard_bm_ram: discard a RAMBlock
- *
- * Returns zero on success
- *
- * Callback from postcopy_each_ram_send_discard for each RAMBlock
- *
- * @ms: current migration state
- * @block: RAMBlock to discard
- */
- static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
- {
- unsigned long end = block->used_length >> TARGET_PAGE_BITS;
- unsigned long current;
- unsigned long *bitmap = block->bmap;
- for (current = 0; current < end; ) {
- unsigned long one = find_next_bit(bitmap, end, current);
- unsigned long zero, discard_length;
- if (one >= end) {
- break;
- }
- zero = find_next_zero_bit(bitmap, end, one + 1);
- if (zero >= end) {
- discard_length = end - one;
- } else {
- discard_length = zero - one;
- }
- postcopy_discard_send_range(ms, one, discard_length);
- current = one + discard_length;
- }
- return 0;
- }
- /**
- * postcopy_each_ram_send_discard: discard all RAMBlocks
- *
- * Returns 0 for success or negative for error
- *
- * Utility for the outgoing postcopy code.
- * Calls postcopy_send_discard_bm_ram for each RAMBlock
- * passing it bitmap indexes and name.
- * (qemu_ram_foreach_block ends up passing unscaled lengths
- * which would mean postcopy code would have to deal with target page)
- *
- * @ms: current migration state
- */
- static int postcopy_each_ram_send_discard(MigrationState *ms)
- {
- struct RAMBlock *block;
- int ret;
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- postcopy_discard_send_init(ms, block->idstr);
- /*
- * Postcopy sends chunks of bitmap over the wire, but it
- * just needs indexes at this point, avoids it having
- * target page specific code.
- */
- ret = postcopy_send_discard_bm_ram(ms, block);
- postcopy_discard_send_finish(ms);
- if (ret) {
- return ret;
- }
- }
- return 0;
- }
- /**
- * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
- *
- * Helper for postcopy_chunk_hostpages; it's called twice to
- * canonicalize the two bitmaps, that are similar, but one is
- * inverted.
- *
- * Postcopy requires that all target pages in a hostpage are dirty or
- * clean, not a mix. This function canonicalizes the bitmaps.
- *
- * @ms: current migration state
- * @block: block that contains the page we want to canonicalize
- */
- static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
- {
- RAMState *rs = ram_state;
- unsigned long *bitmap = block->bmap;
- unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
- unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
- unsigned long run_start;
- if (block->page_size == TARGET_PAGE_SIZE) {
- /* Easy case - TPS==HPS for a non-huge page RAMBlock */
- return;
- }
- /* Find a dirty page */
- run_start = find_next_bit(bitmap, pages, 0);
- while (run_start < pages) {
- /*
- * If the start of this run of pages is in the middle of a host
- * page, then we need to fixup this host page.
- */
- if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
- /* Find the end of this run */
- run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
- /*
- * If the end isn't at the start of a host page, then the
- * run doesn't finish at the end of a host page
- * and we need to discard.
- */
- }
- if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
- unsigned long page;
- unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
- host_ratio);
- run_start = QEMU_ALIGN_UP(run_start, host_ratio);
- /* Clean up the bitmap */
- for (page = fixup_start_addr;
- page < fixup_start_addr + host_ratio; page++) {
- /*
- * Remark them as dirty, updating the count for any pages
- * that weren't previously dirty.
- */
- rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
- }
- }
- /* Find the next dirty page for the next iteration */
- run_start = find_next_bit(bitmap, pages, run_start);
- }
- }
- /**
- * postcopy_chunk_hostpages: discard any partially sent host page
- *
- * Utility for the outgoing postcopy code.
- *
- * Discard any partially sent host-page size chunks, mark any partially
- * dirty host-page size chunks as all dirty. In this case the host-page
- * is the host-page for the particular RAMBlock, i.e. it might be a huge page
- *
- * Returns zero on success
- *
- * @ms: current migration state
- * @block: block we want to work with
- */
- static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
- {
- postcopy_discard_send_init(ms, block->idstr);
- /*
- * Ensure that all partially dirty host pages are made fully dirty.
- */
- postcopy_chunk_hostpages_pass(ms, block);
- postcopy_discard_send_finish(ms);
- return 0;
- }
- /**
- * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
- *
- * Returns zero on success
- *
- * Transmit the set of pages to be discarded after precopy to the target
- * these are pages that:
- * a) Have been previously transmitted but are now dirty again
- * b) Pages that have never been transmitted, this ensures that
- * any pages on the destination that have been mapped by background
- * tasks get discarded (transparent huge pages is the specific concern)
- * Hopefully this is pretty sparse
- *
- * @ms: current migration state
- */
- int ram_postcopy_send_discard_bitmap(MigrationState *ms)
- {
- RAMState *rs = ram_state;
- RAMBlock *block;
- int ret;
- RCU_READ_LOCK_GUARD();
- /* This should be our last sync, the src is now paused */
- migration_bitmap_sync(rs);
- /* Easiest way to make sure we don't resume in the middle of a host-page */
- rs->last_seen_block = NULL;
- rs->last_sent_block = NULL;
- rs->last_page = 0;
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- /* Deal with TPS != HPS and huge pages */
- ret = postcopy_chunk_hostpages(ms, block);
- if (ret) {
- return ret;
- }
- #ifdef DEBUG_POSTCOPY
- ram_debug_dump_bitmap(block->bmap, true,
- block->used_length >> TARGET_PAGE_BITS);
- #endif
- }
- trace_ram_postcopy_send_discard_bitmap();
- return postcopy_each_ram_send_discard(ms);
- }
- /**
- * ram_discard_range: discard dirtied pages at the beginning of postcopy
- *
- * Returns zero on success
- *
- * @rbname: name of the RAMBlock of the request. NULL means the
- * same that last one.
- * @start: RAMBlock starting page
- * @length: RAMBlock size
- */
- int ram_discard_range(const char *rbname, uint64_t start, size_t length)
- {
- trace_ram_discard_range(rbname, start, length);
- RCU_READ_LOCK_GUARD();
- RAMBlock *rb = qemu_ram_block_by_name(rbname);
- if (!rb) {
- error_report("ram_discard_range: Failed to find block '%s'", rbname);
- return -1;
- }
- /*
- * On source VM, we don't need to update the received bitmap since
- * we don't even have one.
- */
- if (rb->receivedmap) {
- bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
- length >> qemu_target_page_bits());
- }
- return ram_block_discard_range(rb, start, length);
- }
- /*
- * For every allocation, we will try not to crash the VM if the
- * allocation failed.
- */
- static int xbzrle_init(void)
- {
- Error *local_err = NULL;
- if (!migrate_use_xbzrle()) {
- return 0;
- }
- XBZRLE_cache_lock();
- XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
- if (!XBZRLE.zero_target_page) {
- error_report("%s: Error allocating zero page", __func__);
- goto err_out;
- }
- XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
- TARGET_PAGE_SIZE, &local_err);
- if (!XBZRLE.cache) {
- error_report_err(local_err);
- goto free_zero_page;
- }
- XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
- if (!XBZRLE.encoded_buf) {
- error_report("%s: Error allocating encoded_buf", __func__);
- goto free_cache;
- }
- XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
- if (!XBZRLE.current_buf) {
- error_report("%s: Error allocating current_buf", __func__);
- goto free_encoded_buf;
- }
- /* We are all good */
- XBZRLE_cache_unlock();
- return 0;
- free_encoded_buf:
- g_free(XBZRLE.encoded_buf);
- XBZRLE.encoded_buf = NULL;
- free_cache:
- cache_fini(XBZRLE.cache);
- XBZRLE.cache = NULL;
- free_zero_page:
- g_free(XBZRLE.zero_target_page);
- XBZRLE.zero_target_page = NULL;
- err_out:
- XBZRLE_cache_unlock();
- return -ENOMEM;
- }
- static int ram_state_init(RAMState **rsp)
- {
- *rsp = g_try_new0(RAMState, 1);
- if (!*rsp) {
- error_report("%s: Init ramstate fail", __func__);
- return -1;
- }
- qemu_mutex_init(&(*rsp)->bitmap_mutex);
- qemu_mutex_init(&(*rsp)->src_page_req_mutex);
- QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
- /*
- * Count the total number of pages used by ram blocks not including any
- * gaps due to alignment or unplugs.
- * This must match with the initial values of dirty bitmap.
- */
- (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
- ram_state_reset(*rsp);
- return 0;
- }
- static void ram_list_init_bitmaps(void)
- {
- MigrationState *ms = migrate_get_current();
- RAMBlock *block;
- unsigned long pages;
- uint8_t shift;
- /* Skip setting bitmap if there is no RAM */
- if (ram_bytes_total()) {
- shift = ms->clear_bitmap_shift;
- if (shift > CLEAR_BITMAP_SHIFT_MAX) {
- error_report("clear_bitmap_shift (%u) too big, using "
- "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
- shift = CLEAR_BITMAP_SHIFT_MAX;
- } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
- error_report("clear_bitmap_shift (%u) too small, using "
- "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
- shift = CLEAR_BITMAP_SHIFT_MIN;
- }
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- pages = block->max_length >> TARGET_PAGE_BITS;
- /*
- * The initial dirty bitmap for migration must be set with all
- * ones to make sure we'll migrate every guest RAM page to
- * destination.
- * Here we set RAMBlock.bmap all to 1 because when rebegin a
- * new migration after a failed migration, ram_list.
- * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
- * guest memory.
- */
- block->bmap = bitmap_new(pages);
- bitmap_set(block->bmap, 0, pages);
- block->clear_bmap_shift = shift;
- block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
- }
- }
- }
- static void ram_init_bitmaps(RAMState *rs)
- {
- /* For memory_global_dirty_log_start below. */
- qemu_mutex_lock_iothread();
- qemu_mutex_lock_ramlist();
- WITH_RCU_READ_LOCK_GUARD() {
- ram_list_init_bitmaps();
- memory_global_dirty_log_start();
- migration_bitmap_sync_precopy(rs);
- }
- qemu_mutex_unlock_ramlist();
- qemu_mutex_unlock_iothread();
- }
- static int ram_init_all(RAMState **rsp)
- {
- if (ram_state_init(rsp)) {
- return -1;
- }
- if (xbzrle_init()) {
- ram_state_cleanup(rsp);
- return -1;
- }
- ram_init_bitmaps(*rsp);
- return 0;
- }
- static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
- {
- RAMBlock *block;
- uint64_t pages = 0;
- /*
- * Postcopy is not using xbzrle/compression, so no need for that.
- * Also, since source are already halted, we don't need to care
- * about dirty page logging as well.
- */
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- pages += bitmap_count_one(block->bmap,
- block->used_length >> TARGET_PAGE_BITS);
- }
- /* This may not be aligned with current bitmaps. Recalculate. */
- rs->migration_dirty_pages = pages;
- rs->last_seen_block = NULL;
- rs->last_sent_block = NULL;
- rs->last_page = 0;
- rs->last_version = ram_list.version;
- /*
- * Disable the bulk stage, otherwise we'll resend the whole RAM no
- * matter what we have sent.
- */
- rs->ram_bulk_stage = false;
- /* Update RAMState cache of output QEMUFile */
- rs->f = out;
- trace_ram_state_resume_prepare(pages);
- }
- /*
- * This function clears bits of the free pages reported by the caller from the
- * migration dirty bitmap. @addr is the host address corresponding to the
- * start of the continuous guest free pages, and @len is the total bytes of
- * those pages.
- */
- void qemu_guest_free_page_hint(void *addr, size_t len)
- {
- RAMBlock *block;
- ram_addr_t offset;
- size_t used_len, start, npages;
- MigrationState *s = migrate_get_current();
- /* This function is currently expected to be used during live migration */
- if (!migration_is_setup_or_active(s->state)) {
- return;
- }
- for (; len > 0; len -= used_len, addr += used_len) {
- block = qemu_ram_block_from_host(addr, false, &offset);
- if (unlikely(!block || offset >= block->used_length)) {
- /*
- * The implementation might not support RAMBlock resize during
- * live migration, but it could happen in theory with future
- * updates. So we add a check here to capture that case.
- */
- error_report_once("%s unexpected error", __func__);
- return;
- }
- if (len <= block->used_length - offset) {
- used_len = len;
- } else {
- used_len = block->used_length - offset;
- }
- start = offset >> TARGET_PAGE_BITS;
- npages = used_len >> TARGET_PAGE_BITS;
- qemu_mutex_lock(&ram_state->bitmap_mutex);
- ram_state->migration_dirty_pages -=
- bitmap_count_one_with_offset(block->bmap, start, npages);
- bitmap_clear(block->bmap, start, npages);
- qemu_mutex_unlock(&ram_state->bitmap_mutex);
- }
- }
- /*
- * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
- * long-running RCU critical section. When rcu-reclaims in the code
- * start to become numerous it will be necessary to reduce the
- * granularity of these critical sections.
- */
- /**
- * ram_save_setup: Setup RAM for migration
- *
- * Returns zero to indicate success and negative for error
- *
- * @f: QEMUFile where to send the data
- * @opaque: RAMState pointer
- */
- static int ram_save_setup(QEMUFile *f, void *opaque)
- {
- RAMState **rsp = opaque;
- RAMBlock *block;
- if (compress_threads_save_setup()) {
- return -1;
- }
- /* migration has already setup the bitmap, reuse it. */
- if (!migration_in_colo_state()) {
- if (ram_init_all(rsp) != 0) {
- compress_threads_save_cleanup();
- return -1;
- }
- }
- (*rsp)->f = f;
- WITH_RCU_READ_LOCK_GUARD() {
- qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
- RAMBLOCK_FOREACH_MIGRATABLE(block) {
- qemu_put_byte(f, strlen(block->idstr));
- qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
- qemu_put_be64(f, block->used_length);
- if (migrate_postcopy_ram() && block->page_size !=
- qemu_host_page_size) {
- qemu_put_be64(f, block->page_size);
- }
- if (migrate_ignore_shared()) {
- qemu_put_be64(f, block->mr->addr);
- }
- }
- }
- ram_control_before_iterate(f, RAM_CONTROL_SETUP);
- ram_control_after_iterate(f, RAM_CONTROL_SETUP);
- multifd_send_sync_main(f);
- qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
- qemu_fflush(f);
- return 0;
- }
- /**
- * ram_save_iterate: iterative stage for migration
- *
- * Returns zero to indicate success and negative for error
- *
- * @f: QEMUFile where to send the data
- * @opaque: RAMState pointer
- */
- static int ram_save_iterate(QEMUFile *f, void *opaque)
- {
- RAMState **temp = opaque;
- RAMState *rs = *temp;
- int ret = 0;
- int i;
- int64_t t0;
- int done = 0;
- if (blk_mig_bulk_active()) {
- /* Avoid transferring ram during bulk phase of block migration as
- * the bulk phase will usually take a long time and transferring
- * ram updates during that time is pointless. */
- goto out;
- }
- WITH_RCU_READ_LOCK_GUARD() {
- if (ram_list.version != rs->last_version) {
- ram_state_reset(rs);
- }
- /* Read version before ram_list.blocks */
- smp_rmb();
- ram_control_before_iterate(f, RAM_CONTROL_ROUND);
- t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
- i = 0;
- while ((ret = qemu_file_rate_limit(f)) == 0 ||
- !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
- int pages;
- if (qemu_file_get_error(f)) {
- break;
- }
- pages = ram_find_and_save_block(rs, false);
- /* no more pages to sent */
- if (pages == 0) {
- done = 1;
- break;
- }
- if (pages < 0) {
- qemu_file_set_error(f, pages);
- break;
- }
- rs->target_page_count += pages;
- /*
- * During postcopy, it is necessary to make sure one whole host
- * page is sent in one chunk.
- */
- if (migrate_postcopy_ram()) {
- flush_compressed_data(rs);
- }
- /*
- * we want to check in the 1st loop, just in case it was the 1st
- * time and we had to sync the dirty bitmap.
- * qemu_clock_get_ns() is a bit expensive, so we only check each
- * some iterations
- */
- if ((i & 63) == 0) {
- uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
- 1000000;
- if (t1 > MAX_WAIT) {
- trace_ram_save_iterate_big_wait(t1, i);
- break;
- }
- }
- i++;
- }
- }
- /*
- * Must occur before EOS (or any QEMUFile operation)
- * because of RDMA protocol.
- */
- ram_control_after_iterate(f, RAM_CONTROL_ROUND);
- out:
- if (ret >= 0
- && migration_is_setup_or_active(migrate_get_current()->state)) {
- multifd_send_sync_main(rs->f);
- qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
- qemu_fflush(f);
- ram_counters.transferred += 8;
- ret = qemu_file_get_error(f);
- }
- if (ret < 0) {
- return ret;
- }
- return done;
- }
- /**
- * ram_save_complete: function called to send the remaining amount of ram
- *
- * Returns zero to indicate success or negative on error
- *
- * Called with iothread lock
- *
- * @f: QEMUFile where to send the data
- * @opaque: RAMState pointer
- */
- static int ram_save_complete(QEMUFile *f, void *opaque)
- {
- RAMState **temp = opaque;
- RAMState *rs = *temp;
- int ret = 0;
- WITH_RCU_READ_LOCK_GUARD() {
- if (!migration_in_postcopy()) {
- migration_bitmap_sync_precopy(rs);
- }
- ram_control_before_iterate(f, RAM_CONTROL_FINISH);
- /* try transferring iterative blocks of memory */
- /* flush all remaining blocks regardless of rate limiting */
- while (true) {
- int pages;
- pages = ram_find_and_save_block(rs, !migration_in_colo_state());
- /* no more blocks to sent */
- if (pages == 0) {
- break;
- }
- if (pages < 0) {
- ret = pages;
- break;
- }
- }
- flush_compressed_data(rs);
- ram_control_after_iterate(f, RAM_CONTROL_FINISH);
- }
- if (ret >= 0) {
- multifd_send_sync_main(rs->f);
- qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
- qemu_fflush(f);
- }
- return ret;
- }
- static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
- uint64_t *res_precopy_only,
- uint64_t *res_compatible,
- uint64_t *res_postcopy_only)
- {
- RAMState **temp = opaque;
- RAMState *rs = *temp;
- uint64_t remaining_size;
- remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
- if (!migration_in_postcopy() &&
- remaining_size < max_size) {
- qemu_mutex_lock_iothread();
- WITH_RCU_READ_LOCK_GUARD() {
- migration_bitmap_sync_precopy(rs);
- }
- qemu_mutex_unlock_iothread();
- remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
- }
- if (migrate_postcopy_ram()) {
- /* We can do postcopy, and all the data is postcopiable */
- *res_compatible += remaining_size;
- } else {
- *res_precopy_only += remaining_size;
- }
- }
- static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
- {
- unsigned int xh_len;
- int xh_flags;
- uint8_t *loaded_data;
- /* extract RLE header */
- xh_flags = qemu_get_byte(f);
- xh_len = qemu_get_be16(f);
- if (xh_flags != ENCODING_FLAG_XBZRLE) {
- error_report("Failed to load XBZRLE page - wrong compression!");
- return -1;
- }
- if (xh_len > TARGET_PAGE_SIZE) {
- error_report("Failed to load XBZRLE page - len overflow!");
- return -1;
- }
- loaded_data = XBZRLE.decoded_buf;
- /* load data and decode */
- /* it can change loaded_data to point to an internal buffer */
- qemu_get_buffer_in_place(f, &loaded_data, xh_len);
- /* decode RLE */
- if (xbzrle_decode_buffer(loaded_data, xh_len, host,
- TARGET_PAGE_SIZE) == -1) {
- error_report("Failed to load XBZRLE page - decode error!");
- return -1;
- }
- return 0;
- }
- /**
- * ram_block_from_stream: read a RAMBlock id from the migration stream
- *
- * Must be called from within a rcu critical section.
- *
- * Returns a pointer from within the RCU-protected ram_list.
- *
- * @f: QEMUFile where to read the data from
- * @flags: Page flags (mostly to see if it's a continuation of previous block)
- */
- static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
- {
- static RAMBlock *block = NULL;
- char id[256];
- uint8_t len;
- if (flags & RAM_SAVE_FLAG_CONTINUE) {
- if (!block) {
- error_report("Ack, bad migration stream!");
- return NULL;
- }
- return block;
- }
- len = qemu_get_byte(f);
- qemu_get_buffer(f, (uint8_t *)id, len);
- id[len] = 0;
- block = qemu_ram_block_by_name(id);
- if (!block) {
- error_report("Can't find block %s", id);
- return NULL;
- }
- if (ramblock_is_ignored(block)) {
- error_report("block %s should not be migrated !", id);
- return NULL;
- }
- return block;
- }
- static inline void *host_from_ram_block_offset(RAMBlock *block,
- ram_addr_t offset)
- {
- if (!offset_in_ramblock(block, offset)) {
- return NULL;
- }
- return block->host + offset;
- }
- static inline void *colo_cache_from_block_offset(RAMBlock *block,
- ram_addr_t offset, bool record_bitmap)
- {
- if (!offset_in_ramblock(block, offset)) {
- return NULL;
- }
- if (!block->colo_cache) {
- error_report("%s: colo_cache is NULL in block :%s",
- __func__, block->idstr);
- return NULL;
- }
- /*
- * During colo checkpoint, we need bitmap of these migrated pages.
- * It help us to decide which pages in ram cache should be flushed
- * into VM's RAM later.
- */
- if (record_bitmap &&
- !test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
- ram_state->migration_dirty_pages++;
- }
- return block->colo_cache + offset;
- }
- /**
- * ram_handle_compressed: handle the zero page case
- *
- * If a page (or a whole RDMA chunk) has been
- * determined to be zero, then zap it.
- *
- * @host: host address for the zero page
- * @ch: what the page is filled from. We only support zero
- * @size: size of the zero page
- */
- void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
- {
- if (ch != 0 || !is_zero_range(host, size)) {
- memset(host, ch, size);
- }
- }
- /* return the size after decompression, or negative value on error */
- static int
- qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
- const uint8_t *source, size_t source_len)
- {
- int err;
- err = inflateReset(stream);
- if (err != Z_OK) {
- return -1;
- }
- stream->avail_in = source_len;
- stream->next_in = (uint8_t *)source;
- stream->avail_out = dest_len;
- stream->next_out = dest;
- err = inflate(stream, Z_NO_FLUSH);
- if (err != Z_STREAM_END) {
- return -1;
- }
- return stream->total_out;
- }
- static void *do_data_decompress(void *opaque)
- {
- DecompressParam *param = opaque;
- unsigned long pagesize;
- uint8_t *des;
- int len, ret;
- qemu_mutex_lock(¶m->mutex);
- while (!param->quit) {
- if (param->des) {
- des = param->des;
- len = param->len;
- param->des = 0;
- qemu_mutex_unlock(¶m->mutex);
- pagesize = TARGET_PAGE_SIZE;
- ret = qemu_uncompress_data(¶m->stream, des, pagesize,
- param->compbuf, len);
- if (ret < 0 && migrate_get_current()->decompress_error_check) {
- error_report("decompress data failed");
- qemu_file_set_error(decomp_file, ret);
- }
- qemu_mutex_lock(&decomp_done_lock);
- param->done = true;
- qemu_cond_signal(&decomp_done_cond);
- qemu_mutex_unlock(&decomp_done_lock);
- qemu_mutex_lock(¶m->mutex);
- } else {
- qemu_cond_wait(¶m->cond, ¶m->mutex);
- }
- }
- qemu_mutex_unlock(¶m->mutex);
- return NULL;
- }
- static int wait_for_decompress_done(void)
- {
- int idx, thread_count;
- if (!migrate_use_compression()) {
- return 0;
- }
- thread_count = migrate_decompress_threads();
- qemu_mutex_lock(&decomp_done_lock);
- for (idx = 0; idx < thread_count; idx++) {
- while (!decomp_param[idx].done) {
- qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
- }
- }
- qemu_mutex_unlock(&decomp_done_lock);
- return qemu_file_get_error(decomp_file);
- }
- static void compress_threads_load_cleanup(void)
- {
- int i, thread_count;
- if (!migrate_use_compression()) {
- return;
- }
- thread_count = migrate_decompress_threads();
- for (i = 0; i < thread_count; i++) {
- /*
- * we use it as a indicator which shows if the thread is
- * properly init'd or not
- */
- if (!decomp_param[i].compbuf) {
- break;
- }
- qemu_mutex_lock(&decomp_param[i].mutex);
- decomp_param[i].quit = true;
- qemu_cond_signal(&decomp_param[i].cond);
- qemu_mutex_unlock(&decomp_param[i].mutex);
- }
- for (i = 0; i < thread_count; i++) {
- if (!decomp_param[i].compbuf) {
- break;
- }
- qemu_thread_join(decompress_threads + i);
- qemu_mutex_destroy(&decomp_param[i].mutex);
- qemu_cond_destroy(&decomp_param[i].cond);
- inflateEnd(&decomp_param[i].stream);
- g_free(decomp_param[i].compbuf);
- decomp_param[i].compbuf = NULL;
- }
- g_free(decompress_threads);
- g_free(decomp_param);
- decompress_threads = NULL;
- decomp_param = NULL;
- decomp_file = NULL;
- }
- static int compress_threads_load_setup(QEMUFile *f)
- {
- int i, thread_count;
- if (!migrate_use_compression()) {
- return 0;
- }
- thread_count = migrate_decompress_threads();
- decompress_threads = g_new0(QemuThread, thread_count);
- decomp_param = g_new0(DecompressParam, thread_count);
- qemu_mutex_init(&decomp_done_lock);
- qemu_cond_init(&decomp_done_cond);
- decomp_file = f;
- for (i = 0; i < thread_count; i++) {
- if (inflateInit(&decomp_param[i].stream) != Z_OK) {
- goto exit;
- }
- decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
- qemu_mutex_init(&decomp_param[i].mutex);
- qemu_cond_init(&decomp_param[i].cond);
- decomp_param[i].done = true;
- decomp_param[i].quit = false;
- qemu_thread_create(decompress_threads + i, "decompress",
- do_data_decompress, decomp_param + i,
- QEMU_THREAD_JOINABLE);
- }
- return 0;
- exit:
- compress_threads_load_cleanup();
- return -1;
- }
- static void decompress_data_with_multi_threads(QEMUFile *f,
- void *host, int len)
- {
- int idx, thread_count;
- thread_count = migrate_decompress_threads();
- qemu_mutex_lock(&decomp_done_lock);
- while (true) {
- for (idx = 0; idx < thread_count; idx++) {
- if (decomp_param[idx].done) {
- decomp_param[idx].done = false;
- qemu_mutex_lock(&decomp_param[idx].mutex);
- qemu_get_buffer(f, decomp_param[idx].compbuf, len);
- decomp_param[idx].des = host;
- decomp_param[idx].len = len;
- qemu_cond_signal(&decomp_param[idx].cond);
- qemu_mutex_unlock(&decomp_param[idx].mutex);
- break;
- }
- }
- if (idx < thread_count) {
- break;
- } else {
- qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
- }
- }
- qemu_mutex_unlock(&decomp_done_lock);
- }
- /*
- * colo cache: this is for secondary VM, we cache the whole
- * memory of the secondary VM, it is need to hold the global lock
- * to call this helper.
- */
- int colo_init_ram_cache(void)
- {
- RAMBlock *block;
- WITH_RCU_READ_LOCK_GUARD() {
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- block->colo_cache = qemu_anon_ram_alloc(block->used_length,
- NULL,
- false);
- if (!block->colo_cache) {
- error_report("%s: Can't alloc memory for COLO cache of block %s,"
- "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
- block->used_length);
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- if (block->colo_cache) {
- qemu_anon_ram_free(block->colo_cache, block->used_length);
- block->colo_cache = NULL;
- }
- }
- return -errno;
- }
- }
- }
- /*
- * Record the dirty pages that sent by PVM, we use this dirty bitmap together
- * with to decide which page in cache should be flushed into SVM's RAM. Here
- * we use the same name 'ram_bitmap' as for migration.
- */
- if (ram_bytes_total()) {
- RAMBlock *block;
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
- block->bmap = bitmap_new(pages);
- }
- }
- ram_state_init(&ram_state);
- return 0;
- }
- /* TODO: duplicated with ram_init_bitmaps */
- void colo_incoming_start_dirty_log(void)
- {
- RAMBlock *block = NULL;
- /* For memory_global_dirty_log_start below. */
- qemu_mutex_lock_iothread();
- qemu_mutex_lock_ramlist();
- memory_global_dirty_log_sync();
- WITH_RCU_READ_LOCK_GUARD() {
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- ramblock_sync_dirty_bitmap(ram_state, block);
- /* Discard this dirty bitmap record */
- bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS);
- }
- memory_global_dirty_log_start();
- }
- ram_state->migration_dirty_pages = 0;
- qemu_mutex_unlock_ramlist();
- qemu_mutex_unlock_iothread();
- }
- /* It is need to hold the global lock to call this helper */
- void colo_release_ram_cache(void)
- {
- RAMBlock *block;
- memory_global_dirty_log_stop();
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- g_free(block->bmap);
- block->bmap = NULL;
- }
- WITH_RCU_READ_LOCK_GUARD() {
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- if (block->colo_cache) {
- qemu_anon_ram_free(block->colo_cache, block->used_length);
- block->colo_cache = NULL;
- }
- }
- }
- ram_state_cleanup(&ram_state);
- }
- /**
- * ram_load_setup: Setup RAM for migration incoming side
- *
- * Returns zero to indicate success and negative for error
- *
- * @f: QEMUFile where to receive the data
- * @opaque: RAMState pointer
- */
- static int ram_load_setup(QEMUFile *f, void *opaque)
- {
- if (compress_threads_load_setup(f)) {
- return -1;
- }
- xbzrle_load_setup();
- ramblock_recv_map_init();
- return 0;
- }
- static int ram_load_cleanup(void *opaque)
- {
- RAMBlock *rb;
- RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
- qemu_ram_block_writeback(rb);
- }
- xbzrle_load_cleanup();
- compress_threads_load_cleanup();
- RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
- g_free(rb->receivedmap);
- rb->receivedmap = NULL;
- }
- return 0;
- }
- /**
- * ram_postcopy_incoming_init: allocate postcopy data structures
- *
- * Returns 0 for success and negative if there was one error
- *
- * @mis: current migration incoming state
- *
- * Allocate data structures etc needed by incoming migration with
- * postcopy-ram. postcopy-ram's similarly names
- * postcopy_ram_incoming_init does the work.
- */
- int ram_postcopy_incoming_init(MigrationIncomingState *mis)
- {
- return postcopy_ram_incoming_init(mis);
- }
- /**
- * ram_load_postcopy: load a page in postcopy case
- *
- * Returns 0 for success or -errno in case of error
- *
- * Called in postcopy mode by ram_load().
- * rcu_read_lock is taken prior to this being called.
- *
- * @f: QEMUFile where to send the data
- */
- static int ram_load_postcopy(QEMUFile *f)
- {
- int flags = 0, ret = 0;
- bool place_needed = false;
- bool matches_target_page_size = false;
- MigrationIncomingState *mis = migration_incoming_get_current();
- /* Temporary page that is later 'placed' */
- void *postcopy_host_page = mis->postcopy_tmp_page;
- void *this_host = NULL;
- bool all_zero = true;
- int target_pages = 0;
- while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
- ram_addr_t addr;
- void *host = NULL;
- void *page_buffer = NULL;
- void *place_source = NULL;
- RAMBlock *block = NULL;
- uint8_t ch;
- int len;
- addr = qemu_get_be64(f);
- /*
- * If qemu file error, we should stop here, and then "addr"
- * may be invalid
- */
- ret = qemu_file_get_error(f);
- if (ret) {
- break;
- }
- flags = addr & ~TARGET_PAGE_MASK;
- addr &= TARGET_PAGE_MASK;
- trace_ram_load_postcopy_loop((uint64_t)addr, flags);
- if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
- RAM_SAVE_FLAG_COMPRESS_PAGE)) {
- block = ram_block_from_stream(f, flags);
- host = host_from_ram_block_offset(block, addr);
- if (!host) {
- error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
- ret = -EINVAL;
- break;
- }
- target_pages++;
- matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
- /*
- * Postcopy requires that we place whole host pages atomically;
- * these may be huge pages for RAMBlocks that are backed by
- * hugetlbfs.
- * To make it atomic, the data is read into a temporary page
- * that's moved into place later.
- * The migration protocol uses, possibly smaller, target-pages
- * however the source ensures it always sends all the components
- * of a host page in one chunk.
- */
- page_buffer = postcopy_host_page +
- ((uintptr_t)host & (block->page_size - 1));
- if (target_pages == 1) {
- this_host = (void *)QEMU_ALIGN_DOWN((uintptr_t)host,
- block->page_size);
- } else {
- /* not the 1st TP within the HP */
- if (QEMU_ALIGN_DOWN((uintptr_t)host, block->page_size) !=
- (uintptr_t)this_host) {
- error_report("Non-same host page %p/%p",
- host, this_host);
- ret = -EINVAL;
- break;
- }
- }
- /*
- * If it's the last part of a host page then we place the host
- * page
- */
- if (target_pages == (block->page_size / TARGET_PAGE_SIZE)) {
- place_needed = true;
- }
- place_source = postcopy_host_page;
- }
- switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
- case RAM_SAVE_FLAG_ZERO:
- ch = qemu_get_byte(f);
- /*
- * Can skip to set page_buffer when
- * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
- */
- if (ch || !matches_target_page_size) {
- memset(page_buffer, ch, TARGET_PAGE_SIZE);
- }
- if (ch) {
- all_zero = false;
- }
- break;
- case RAM_SAVE_FLAG_PAGE:
- all_zero = false;
- if (!matches_target_page_size) {
- /* For huge pages, we always use temporary buffer */
- qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
- } else {
- /*
- * For small pages that matches target page size, we
- * avoid the qemu_file copy. Instead we directly use
- * the buffer of QEMUFile to place the page. Note: we
- * cannot do any QEMUFile operation before using that
- * buffer to make sure the buffer is valid when
- * placing the page.
- */
- qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
- TARGET_PAGE_SIZE);
- }
- break;
- case RAM_SAVE_FLAG_COMPRESS_PAGE:
- all_zero = false;
- len = qemu_get_be32(f);
- if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
- error_report("Invalid compressed data length: %d", len);
- ret = -EINVAL;
- break;
- }
- decompress_data_with_multi_threads(f, page_buffer, len);
- break;
- case RAM_SAVE_FLAG_EOS:
- /* normal exit */
- multifd_recv_sync_main();
- break;
- default:
- error_report("Unknown combination of migration flags: %#x"
- " (postcopy mode)", flags);
- ret = -EINVAL;
- break;
- }
- /* Got the whole host page, wait for decompress before placing. */
- if (place_needed) {
- ret |= wait_for_decompress_done();
- }
- /* Detect for any possible file errors */
- if (!ret && qemu_file_get_error(f)) {
- ret = qemu_file_get_error(f);
- }
- if (!ret && place_needed) {
- /* This gets called at the last target page in the host page */
- void *place_dest = (void *)QEMU_ALIGN_DOWN((uintptr_t)host,
- block->page_size);
- if (all_zero) {
- ret = postcopy_place_page_zero(mis, place_dest,
- block);
- } else {
- ret = postcopy_place_page(mis, place_dest,
- place_source, block);
- }
- place_needed = false;
- target_pages = 0;
- /* Assume we have a zero page until we detect something different */
- all_zero = true;
- }
- }
- return ret;
- }
- static bool postcopy_is_advised(void)
- {
- PostcopyState ps = postcopy_state_get();
- return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
- }
- static bool postcopy_is_running(void)
- {
- PostcopyState ps = postcopy_state_get();
- return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
- }
- /*
- * Flush content of RAM cache into SVM's memory.
- * Only flush the pages that be dirtied by PVM or SVM or both.
- */
- void colo_flush_ram_cache(void)
- {
- RAMBlock *block = NULL;
- void *dst_host;
- void *src_host;
- unsigned long offset = 0;
- memory_global_dirty_log_sync();
- WITH_RCU_READ_LOCK_GUARD() {
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- ramblock_sync_dirty_bitmap(ram_state, block);
- }
- }
- trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
- WITH_RCU_READ_LOCK_GUARD() {
- block = QLIST_FIRST_RCU(&ram_list.blocks);
- while (block) {
- offset = migration_bitmap_find_dirty(ram_state, block, offset);
- if (((ram_addr_t)offset) << TARGET_PAGE_BITS
- >= block->used_length) {
- offset = 0;
- block = QLIST_NEXT_RCU(block, next);
- } else {
- migration_bitmap_clear_dirty(ram_state, block, offset);
- dst_host = block->host
- + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
- src_host = block->colo_cache
- + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
- memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
- }
- }
- }
- trace_colo_flush_ram_cache_end();
- }
- /**
- * ram_load_precopy: load pages in precopy case
- *
- * Returns 0 for success or -errno in case of error
- *
- * Called in precopy mode by ram_load().
- * rcu_read_lock is taken prior to this being called.
- *
- * @f: QEMUFile where to send the data
- */
- static int ram_load_precopy(QEMUFile *f)
- {
- int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0;
- /* ADVISE is earlier, it shows the source has the postcopy capability on */
- bool postcopy_advised = postcopy_is_advised();
- if (!migrate_use_compression()) {
- invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
- }
- while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
- ram_addr_t addr, total_ram_bytes;
- void *host = NULL, *host_bak = NULL;
- uint8_t ch;
- /*
- * Yield periodically to let main loop run, but an iteration of
- * the main loop is expensive, so do it each some iterations
- */
- if ((i & 32767) == 0 && qemu_in_coroutine()) {
- aio_co_schedule(qemu_get_current_aio_context(),
- qemu_coroutine_self());
- qemu_coroutine_yield();
- }
- i++;
- addr = qemu_get_be64(f);
- flags = addr & ~TARGET_PAGE_MASK;
- addr &= TARGET_PAGE_MASK;
- if (flags & invalid_flags) {
- if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
- error_report("Received an unexpected compressed page");
- }
- ret = -EINVAL;
- break;
- }
- if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
- RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
- RAMBlock *block = ram_block_from_stream(f, flags);
- host = host_from_ram_block_offset(block, addr);
- /*
- * After going into COLO stage, we should not load the page
- * into SVM's memory directly, we put them into colo_cache firstly.
- * NOTE: We need to keep a copy of SVM's ram in colo_cache.
- * Previously, we copied all these memory in preparing stage of COLO
- * while we need to stop VM, which is a time-consuming process.
- * Here we optimize it by a trick, back-up every page while in
- * migration process while COLO is enabled, though it affects the
- * speed of the migration, but it obviously reduce the downtime of
- * back-up all SVM'S memory in COLO preparing stage.
- */
- if (migration_incoming_colo_enabled()) {
- if (migration_incoming_in_colo_state()) {
- /* In COLO stage, put all pages into cache temporarily */
- host = colo_cache_from_block_offset(block, addr, true);
- } else {
- /*
- * In migration stage but before COLO stage,
- * Put all pages into both cache and SVM's memory.
- */
- host_bak = colo_cache_from_block_offset(block, addr, false);
- }
- }
- if (!host) {
- error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
- ret = -EINVAL;
- break;
- }
- if (!migration_incoming_in_colo_state()) {
- ramblock_recv_bitmap_set(block, host);
- }
- trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
- }
- switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
- case RAM_SAVE_FLAG_MEM_SIZE:
- /* Synchronize RAM block list */
- total_ram_bytes = addr;
- while (!ret && total_ram_bytes) {
- RAMBlock *block;
- char id[256];
- ram_addr_t length;
- len = qemu_get_byte(f);
- qemu_get_buffer(f, (uint8_t *)id, len);
- id[len] = 0;
- length = qemu_get_be64(f);
- block = qemu_ram_block_by_name(id);
- if (block && !qemu_ram_is_migratable(block)) {
- error_report("block %s should not be migrated !", id);
- ret = -EINVAL;
- } else if (block) {
- if (length != block->used_length) {
- Error *local_err = NULL;
- ret = qemu_ram_resize(block, length,
- &local_err);
- if (local_err) {
- error_report_err(local_err);
- }
- }
- /* For postcopy we need to check hugepage sizes match */
- if (postcopy_advised &&
- block->page_size != qemu_host_page_size) {
- uint64_t remote_page_size = qemu_get_be64(f);
- if (remote_page_size != block->page_size) {
- error_report("Mismatched RAM page size %s "
- "(local) %zd != %" PRId64,
- id, block->page_size,
- remote_page_size);
- ret = -EINVAL;
- }
- }
- if (migrate_ignore_shared()) {
- hwaddr addr = qemu_get_be64(f);
- if (ramblock_is_ignored(block) &&
- block->mr->addr != addr) {
- error_report("Mismatched GPAs for block %s "
- "%" PRId64 "!= %" PRId64,
- id, (uint64_t)addr,
- (uint64_t)block->mr->addr);
- ret = -EINVAL;
- }
- }
- ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
- block->idstr);
- } else {
- error_report("Unknown ramblock \"%s\", cannot "
- "accept migration", id);
- ret = -EINVAL;
- }
- total_ram_bytes -= length;
- }
- break;
- case RAM_SAVE_FLAG_ZERO:
- ch = qemu_get_byte(f);
- ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
- break;
- case RAM_SAVE_FLAG_PAGE:
- qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
- break;
- case RAM_SAVE_FLAG_COMPRESS_PAGE:
- len = qemu_get_be32(f);
- if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
- error_report("Invalid compressed data length: %d", len);
- ret = -EINVAL;
- break;
- }
- decompress_data_with_multi_threads(f, host, len);
- break;
- case RAM_SAVE_FLAG_XBZRLE:
- if (load_xbzrle(f, addr, host) < 0) {
- error_report("Failed to decompress XBZRLE page at "
- RAM_ADDR_FMT, addr);
- ret = -EINVAL;
- break;
- }
- break;
- case RAM_SAVE_FLAG_EOS:
- /* normal exit */
- multifd_recv_sync_main();
- break;
- default:
- if (flags & RAM_SAVE_FLAG_HOOK) {
- ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
- } else {
- error_report("Unknown combination of migration flags: %#x",
- flags);
- ret = -EINVAL;
- }
- }
- if (!ret) {
- ret = qemu_file_get_error(f);
- }
- if (!ret && host_bak) {
- memcpy(host_bak, host, TARGET_PAGE_SIZE);
- }
- }
- ret |= wait_for_decompress_done();
- return ret;
- }
- static int ram_load(QEMUFile *f, void *opaque, int version_id)
- {
- int ret = 0;
- static uint64_t seq_iter;
- /*
- * If system is running in postcopy mode, page inserts to host memory must
- * be atomic
- */
- bool postcopy_running = postcopy_is_running();
- seq_iter++;
- if (version_id != 4) {
- return -EINVAL;
- }
- /*
- * This RCU critical section can be very long running.
- * When RCU reclaims in the code start to become numerous,
- * it will be necessary to reduce the granularity of this
- * critical section.
- */
- WITH_RCU_READ_LOCK_GUARD() {
- if (postcopy_running) {
- ret = ram_load_postcopy(f);
- } else {
- ret = ram_load_precopy(f);
- }
- }
- trace_ram_load_complete(ret, seq_iter);
- return ret;
- }
- static bool ram_has_postcopy(void *opaque)
- {
- RAMBlock *rb;
- RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
- if (ramblock_is_pmem(rb)) {
- info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
- "is not supported now!", rb->idstr, rb->host);
- return false;
- }
- }
- return migrate_postcopy_ram();
- }
- /* Sync all the dirty bitmap with destination VM. */
- static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
- {
- RAMBlock *block;
- QEMUFile *file = s->to_dst_file;
- int ramblock_count = 0;
- trace_ram_dirty_bitmap_sync_start();
- RAMBLOCK_FOREACH_NOT_IGNORED(block) {
- qemu_savevm_send_recv_bitmap(file, block->idstr);
- trace_ram_dirty_bitmap_request(block->idstr);
- ramblock_count++;
- }
- trace_ram_dirty_bitmap_sync_wait();
- /* Wait until all the ramblocks' dirty bitmap synced */
- while (ramblock_count--) {
- qemu_sem_wait(&s->rp_state.rp_sem);
- }
- trace_ram_dirty_bitmap_sync_complete();
- return 0;
- }
- static void ram_dirty_bitmap_reload_notify(MigrationState *s)
- {
- qemu_sem_post(&s->rp_state.rp_sem);
- }
- /*
- * Read the received bitmap, revert it as the initial dirty bitmap.
- * This is only used when the postcopy migration is paused but wants
- * to resume from a middle point.
- */
- int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
- {
- int ret = -EINVAL;
- QEMUFile *file = s->rp_state.from_dst_file;
- unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
- uint64_t local_size = DIV_ROUND_UP(nbits, 8);
- uint64_t size, end_mark;
- trace_ram_dirty_bitmap_reload_begin(block->idstr);
- if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
- error_report("%s: incorrect state %s", __func__,
- MigrationStatus_str(s->state));
- return -EINVAL;
- }
- /*
- * Note: see comments in ramblock_recv_bitmap_send() on why we
- * need the endianness conversion, and the paddings.
- */
- local_size = ROUND_UP(local_size, 8);
- /* Add paddings */
- le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
- size = qemu_get_be64(file);
- /* The size of the bitmap should match with our ramblock */
- if (size != local_size) {
- error_report("%s: ramblock '%s' bitmap size mismatch "
- "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
- block->idstr, size, local_size);
- ret = -EINVAL;
- goto out;
- }
- size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
- end_mark = qemu_get_be64(file);
- ret = qemu_file_get_error(file);
- if (ret || size != local_size) {
- error_report("%s: read bitmap failed for ramblock '%s': %d"
- " (size 0x%"PRIx64", got: 0x%"PRIx64")",
- __func__, block->idstr, ret, local_size, size);
- ret = -EIO;
- goto out;
- }
- if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
- error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
- __func__, block->idstr, end_mark);
- ret = -EINVAL;
- goto out;
- }
- /*
- * Endianness conversion. We are during postcopy (though paused).
- * The dirty bitmap won't change. We can directly modify it.
- */
- bitmap_from_le(block->bmap, le_bitmap, nbits);
- /*
- * What we received is "received bitmap". Revert it as the initial
- * dirty bitmap for this ramblock.
- */
- bitmap_complement(block->bmap, block->bmap, nbits);
- trace_ram_dirty_bitmap_reload_complete(block->idstr);
- /*
- * We succeeded to sync bitmap for current ramblock. If this is
- * the last one to sync, we need to notify the main send thread.
- */
- ram_dirty_bitmap_reload_notify(s);
- ret = 0;
- out:
- g_free(le_bitmap);
- return ret;
- }
- static int ram_resume_prepare(MigrationState *s, void *opaque)
- {
- RAMState *rs = *(RAMState **)opaque;
- int ret;
- ret = ram_dirty_bitmap_sync_all(s, rs);
- if (ret) {
- return ret;
- }
- ram_state_resume_prepare(rs, s->to_dst_file);
- return 0;
- }
- static SaveVMHandlers savevm_ram_handlers = {
- .save_setup = ram_save_setup,
- .save_live_iterate = ram_save_iterate,
- .save_live_complete_postcopy = ram_save_complete,
- .save_live_complete_precopy = ram_save_complete,
- .has_postcopy = ram_has_postcopy,
- .save_live_pending = ram_save_pending,
- .load_state = ram_load,
- .save_cleanup = ram_save_cleanup,
- .load_setup = ram_load_setup,
- .load_cleanup = ram_load_cleanup,
- .resume_prepare = ram_resume_prepare,
- };
- void ram_mig_init(void)
- {
- qemu_mutex_init(&XBZRLE.lock);
- register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
- }
|