block.c 124 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568
  1. /*
  2. * QEMU System Emulator block driver
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "config-host.h"
  25. #include "qemu-common.h"
  26. #include "trace.h"
  27. #include "monitor/monitor.h"
  28. #include "block/block_int.h"
  29. #include "block/blockjob.h"
  30. #include "qemu/module.h"
  31. #include "qapi/qmp/qjson.h"
  32. #include "sysemu/sysemu.h"
  33. #include "qemu/notify.h"
  34. #include "block/coroutine.h"
  35. #include "qmp-commands.h"
  36. #include "qemu/timer.h"
  37. #ifdef CONFIG_BSD
  38. #include <sys/types.h>
  39. #include <sys/stat.h>
  40. #include <sys/ioctl.h>
  41. #include <sys/queue.h>
  42. #ifndef __DragonFly__
  43. #include <sys/disk.h>
  44. #endif
  45. #endif
  46. #ifdef _WIN32
  47. #include <windows.h>
  48. #endif
  49. #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
  50. typedef enum {
  51. BDRV_REQ_COPY_ON_READ = 0x1,
  52. BDRV_REQ_ZERO_WRITE = 0x2,
  53. } BdrvRequestFlags;
  54. static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
  55. static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
  56. int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
  57. BlockDriverCompletionFunc *cb, void *opaque);
  58. static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
  59. int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
  60. BlockDriverCompletionFunc *cb, void *opaque);
  61. static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
  62. int64_t sector_num, int nb_sectors,
  63. QEMUIOVector *iov);
  64. static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
  65. int64_t sector_num, int nb_sectors,
  66. QEMUIOVector *iov);
  67. static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
  68. int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
  69. BdrvRequestFlags flags);
  70. static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
  71. int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
  72. BdrvRequestFlags flags);
  73. static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
  74. int64_t sector_num,
  75. QEMUIOVector *qiov,
  76. int nb_sectors,
  77. BlockDriverCompletionFunc *cb,
  78. void *opaque,
  79. bool is_write);
  80. static void coroutine_fn bdrv_co_do_rw(void *opaque);
  81. static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
  82. int64_t sector_num, int nb_sectors);
  83. static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
  84. bool is_write, double elapsed_time, uint64_t *wait);
  85. static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
  86. double elapsed_time, uint64_t *wait);
  87. static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
  88. bool is_write, int64_t *wait);
  89. static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
  90. QTAILQ_HEAD_INITIALIZER(bdrv_states);
  91. static QLIST_HEAD(, BlockDriver) bdrv_drivers =
  92. QLIST_HEAD_INITIALIZER(bdrv_drivers);
  93. /* The device to use for VM snapshots */
  94. static BlockDriverState *bs_snapshots;
  95. /* If non-zero, use only whitelisted block drivers */
  96. static int use_bdrv_whitelist;
  97. #ifdef _WIN32
  98. static int is_windows_drive_prefix(const char *filename)
  99. {
  100. return (((filename[0] >= 'a' && filename[0] <= 'z') ||
  101. (filename[0] >= 'A' && filename[0] <= 'Z')) &&
  102. filename[1] == ':');
  103. }
  104. int is_windows_drive(const char *filename)
  105. {
  106. if (is_windows_drive_prefix(filename) &&
  107. filename[2] == '\0')
  108. return 1;
  109. if (strstart(filename, "\\\\.\\", NULL) ||
  110. strstart(filename, "//./", NULL))
  111. return 1;
  112. return 0;
  113. }
  114. #endif
  115. /* throttling disk I/O limits */
  116. void bdrv_io_limits_disable(BlockDriverState *bs)
  117. {
  118. bs->io_limits_enabled = false;
  119. while (qemu_co_queue_next(&bs->throttled_reqs));
  120. if (bs->block_timer) {
  121. qemu_del_timer(bs->block_timer);
  122. qemu_free_timer(bs->block_timer);
  123. bs->block_timer = NULL;
  124. }
  125. bs->slice_start = 0;
  126. bs->slice_end = 0;
  127. bs->slice_time = 0;
  128. memset(&bs->io_base, 0, sizeof(bs->io_base));
  129. }
  130. static void bdrv_block_timer(void *opaque)
  131. {
  132. BlockDriverState *bs = opaque;
  133. qemu_co_queue_next(&bs->throttled_reqs);
  134. }
  135. void bdrv_io_limits_enable(BlockDriverState *bs)
  136. {
  137. qemu_co_queue_init(&bs->throttled_reqs);
  138. bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
  139. bs->io_limits_enabled = true;
  140. }
  141. bool bdrv_io_limits_enabled(BlockDriverState *bs)
  142. {
  143. BlockIOLimit *io_limits = &bs->io_limits;
  144. return io_limits->bps[BLOCK_IO_LIMIT_READ]
  145. || io_limits->bps[BLOCK_IO_LIMIT_WRITE]
  146. || io_limits->bps[BLOCK_IO_LIMIT_TOTAL]
  147. || io_limits->iops[BLOCK_IO_LIMIT_READ]
  148. || io_limits->iops[BLOCK_IO_LIMIT_WRITE]
  149. || io_limits->iops[BLOCK_IO_LIMIT_TOTAL];
  150. }
  151. static void bdrv_io_limits_intercept(BlockDriverState *bs,
  152. bool is_write, int nb_sectors)
  153. {
  154. int64_t wait_time = -1;
  155. if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
  156. qemu_co_queue_wait(&bs->throttled_reqs);
  157. }
  158. /* In fact, we hope to keep each request's timing, in FIFO mode. The next
  159. * throttled requests will not be dequeued until the current request is
  160. * allowed to be serviced. So if the current request still exceeds the
  161. * limits, it will be inserted to the head. All requests followed it will
  162. * be still in throttled_reqs queue.
  163. */
  164. while (bdrv_exceed_io_limits(bs, nb_sectors, is_write, &wait_time)) {
  165. qemu_mod_timer(bs->block_timer,
  166. wait_time + qemu_get_clock_ns(vm_clock));
  167. qemu_co_queue_wait_insert_head(&bs->throttled_reqs);
  168. }
  169. qemu_co_queue_next(&bs->throttled_reqs);
  170. }
  171. /* check if the path starts with "<protocol>:" */
  172. static int path_has_protocol(const char *path)
  173. {
  174. const char *p;
  175. #ifdef _WIN32
  176. if (is_windows_drive(path) ||
  177. is_windows_drive_prefix(path)) {
  178. return 0;
  179. }
  180. p = path + strcspn(path, ":/\\");
  181. #else
  182. p = path + strcspn(path, ":/");
  183. #endif
  184. return *p == ':';
  185. }
  186. int path_is_absolute(const char *path)
  187. {
  188. #ifdef _WIN32
  189. /* specific case for names like: "\\.\d:" */
  190. if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
  191. return 1;
  192. }
  193. return (*path == '/' || *path == '\\');
  194. #else
  195. return (*path == '/');
  196. #endif
  197. }
  198. /* if filename is absolute, just copy it to dest. Otherwise, build a
  199. path to it by considering it is relative to base_path. URL are
  200. supported. */
  201. void path_combine(char *dest, int dest_size,
  202. const char *base_path,
  203. const char *filename)
  204. {
  205. const char *p, *p1;
  206. int len;
  207. if (dest_size <= 0)
  208. return;
  209. if (path_is_absolute(filename)) {
  210. pstrcpy(dest, dest_size, filename);
  211. } else {
  212. p = strchr(base_path, ':');
  213. if (p)
  214. p++;
  215. else
  216. p = base_path;
  217. p1 = strrchr(base_path, '/');
  218. #ifdef _WIN32
  219. {
  220. const char *p2;
  221. p2 = strrchr(base_path, '\\');
  222. if (!p1 || p2 > p1)
  223. p1 = p2;
  224. }
  225. #endif
  226. if (p1)
  227. p1++;
  228. else
  229. p1 = base_path;
  230. if (p1 > p)
  231. p = p1;
  232. len = p - base_path;
  233. if (len > dest_size - 1)
  234. len = dest_size - 1;
  235. memcpy(dest, base_path, len);
  236. dest[len] = '\0';
  237. pstrcat(dest, dest_size, filename);
  238. }
  239. }
  240. void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
  241. {
  242. if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
  243. pstrcpy(dest, sz, bs->backing_file);
  244. } else {
  245. path_combine(dest, sz, bs->filename, bs->backing_file);
  246. }
  247. }
  248. void bdrv_register(BlockDriver *bdrv)
  249. {
  250. /* Block drivers without coroutine functions need emulation */
  251. if (!bdrv->bdrv_co_readv) {
  252. bdrv->bdrv_co_readv = bdrv_co_readv_em;
  253. bdrv->bdrv_co_writev = bdrv_co_writev_em;
  254. /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
  255. * the block driver lacks aio we need to emulate that too.
  256. */
  257. if (!bdrv->bdrv_aio_readv) {
  258. /* add AIO emulation layer */
  259. bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
  260. bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
  261. }
  262. }
  263. QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
  264. }
  265. /* create a new block device (by default it is empty) */
  266. BlockDriverState *bdrv_new(const char *device_name)
  267. {
  268. BlockDriverState *bs;
  269. bs = g_malloc0(sizeof(BlockDriverState));
  270. pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
  271. if (device_name[0] != '\0') {
  272. QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
  273. }
  274. bdrv_iostatus_disable(bs);
  275. notifier_list_init(&bs->close_notifiers);
  276. return bs;
  277. }
  278. void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
  279. {
  280. notifier_list_add(&bs->close_notifiers, notify);
  281. }
  282. BlockDriver *bdrv_find_format(const char *format_name)
  283. {
  284. BlockDriver *drv1;
  285. QLIST_FOREACH(drv1, &bdrv_drivers, list) {
  286. if (!strcmp(drv1->format_name, format_name)) {
  287. return drv1;
  288. }
  289. }
  290. return NULL;
  291. }
  292. static int bdrv_is_whitelisted(BlockDriver *drv)
  293. {
  294. static const char *whitelist[] = {
  295. CONFIG_BDRV_WHITELIST
  296. };
  297. const char **p;
  298. if (!whitelist[0])
  299. return 1; /* no whitelist, anything goes */
  300. for (p = whitelist; *p; p++) {
  301. if (!strcmp(drv->format_name, *p)) {
  302. return 1;
  303. }
  304. }
  305. return 0;
  306. }
  307. BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
  308. {
  309. BlockDriver *drv = bdrv_find_format(format_name);
  310. return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
  311. }
  312. typedef struct CreateCo {
  313. BlockDriver *drv;
  314. char *filename;
  315. QEMUOptionParameter *options;
  316. int ret;
  317. } CreateCo;
  318. static void coroutine_fn bdrv_create_co_entry(void *opaque)
  319. {
  320. CreateCo *cco = opaque;
  321. assert(cco->drv);
  322. cco->ret = cco->drv->bdrv_create(cco->filename, cco->options);
  323. }
  324. int bdrv_create(BlockDriver *drv, const char* filename,
  325. QEMUOptionParameter *options)
  326. {
  327. int ret;
  328. Coroutine *co;
  329. CreateCo cco = {
  330. .drv = drv,
  331. .filename = g_strdup(filename),
  332. .options = options,
  333. .ret = NOT_DONE,
  334. };
  335. if (!drv->bdrv_create) {
  336. ret = -ENOTSUP;
  337. goto out;
  338. }
  339. if (qemu_in_coroutine()) {
  340. /* Fast-path if already in coroutine context */
  341. bdrv_create_co_entry(&cco);
  342. } else {
  343. co = qemu_coroutine_create(bdrv_create_co_entry);
  344. qemu_coroutine_enter(co, &cco);
  345. while (cco.ret == NOT_DONE) {
  346. qemu_aio_wait();
  347. }
  348. }
  349. ret = cco.ret;
  350. out:
  351. g_free(cco.filename);
  352. return ret;
  353. }
  354. int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
  355. {
  356. BlockDriver *drv;
  357. drv = bdrv_find_protocol(filename);
  358. if (drv == NULL) {
  359. return -ENOENT;
  360. }
  361. return bdrv_create(drv, filename, options);
  362. }
  363. /*
  364. * Create a uniquely-named empty temporary file.
  365. * Return 0 upon success, otherwise a negative errno value.
  366. */
  367. int get_tmp_filename(char *filename, int size)
  368. {
  369. #ifdef _WIN32
  370. char temp_dir[MAX_PATH];
  371. /* GetTempFileName requires that its output buffer (4th param)
  372. have length MAX_PATH or greater. */
  373. assert(size >= MAX_PATH);
  374. return (GetTempPath(MAX_PATH, temp_dir)
  375. && GetTempFileName(temp_dir, "qem", 0, filename)
  376. ? 0 : -GetLastError());
  377. #else
  378. int fd;
  379. const char *tmpdir;
  380. tmpdir = getenv("TMPDIR");
  381. if (!tmpdir)
  382. tmpdir = "/tmp";
  383. if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
  384. return -EOVERFLOW;
  385. }
  386. fd = mkstemp(filename);
  387. if (fd < 0) {
  388. return -errno;
  389. }
  390. if (close(fd) != 0) {
  391. unlink(filename);
  392. return -errno;
  393. }
  394. return 0;
  395. #endif
  396. }
  397. /*
  398. * Detect host devices. By convention, /dev/cdrom[N] is always
  399. * recognized as a host CDROM.
  400. */
  401. static BlockDriver *find_hdev_driver(const char *filename)
  402. {
  403. int score_max = 0, score;
  404. BlockDriver *drv = NULL, *d;
  405. QLIST_FOREACH(d, &bdrv_drivers, list) {
  406. if (d->bdrv_probe_device) {
  407. score = d->bdrv_probe_device(filename);
  408. if (score > score_max) {
  409. score_max = score;
  410. drv = d;
  411. }
  412. }
  413. }
  414. return drv;
  415. }
  416. BlockDriver *bdrv_find_protocol(const char *filename)
  417. {
  418. BlockDriver *drv1;
  419. char protocol[128];
  420. int len;
  421. const char *p;
  422. /* TODO Drivers without bdrv_file_open must be specified explicitly */
  423. /*
  424. * XXX(hch): we really should not let host device detection
  425. * override an explicit protocol specification, but moving this
  426. * later breaks access to device names with colons in them.
  427. * Thanks to the brain-dead persistent naming schemes on udev-
  428. * based Linux systems those actually are quite common.
  429. */
  430. drv1 = find_hdev_driver(filename);
  431. if (drv1) {
  432. return drv1;
  433. }
  434. if (!path_has_protocol(filename)) {
  435. return bdrv_find_format("file");
  436. }
  437. p = strchr(filename, ':');
  438. assert(p != NULL);
  439. len = p - filename;
  440. if (len > sizeof(protocol) - 1)
  441. len = sizeof(protocol) - 1;
  442. memcpy(protocol, filename, len);
  443. protocol[len] = '\0';
  444. QLIST_FOREACH(drv1, &bdrv_drivers, list) {
  445. if (drv1->protocol_name &&
  446. !strcmp(drv1->protocol_name, protocol)) {
  447. return drv1;
  448. }
  449. }
  450. return NULL;
  451. }
  452. static int find_image_format(BlockDriverState *bs, const char *filename,
  453. BlockDriver **pdrv)
  454. {
  455. int score, score_max;
  456. BlockDriver *drv1, *drv;
  457. uint8_t buf[2048];
  458. int ret = 0;
  459. /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
  460. if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
  461. drv = bdrv_find_format("raw");
  462. if (!drv) {
  463. ret = -ENOENT;
  464. }
  465. *pdrv = drv;
  466. return ret;
  467. }
  468. ret = bdrv_pread(bs, 0, buf, sizeof(buf));
  469. if (ret < 0) {
  470. *pdrv = NULL;
  471. return ret;
  472. }
  473. score_max = 0;
  474. drv = NULL;
  475. QLIST_FOREACH(drv1, &bdrv_drivers, list) {
  476. if (drv1->bdrv_probe) {
  477. score = drv1->bdrv_probe(buf, ret, filename);
  478. if (score > score_max) {
  479. score_max = score;
  480. drv = drv1;
  481. }
  482. }
  483. }
  484. if (!drv) {
  485. ret = -ENOENT;
  486. }
  487. *pdrv = drv;
  488. return ret;
  489. }
  490. /**
  491. * Set the current 'total_sectors' value
  492. */
  493. static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
  494. {
  495. BlockDriver *drv = bs->drv;
  496. /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
  497. if (bs->sg)
  498. return 0;
  499. /* query actual device if possible, otherwise just trust the hint */
  500. if (drv->bdrv_getlength) {
  501. int64_t length = drv->bdrv_getlength(bs);
  502. if (length < 0) {
  503. return length;
  504. }
  505. hint = length >> BDRV_SECTOR_BITS;
  506. }
  507. bs->total_sectors = hint;
  508. return 0;
  509. }
  510. /**
  511. * Set open flags for a given cache mode
  512. *
  513. * Return 0 on success, -1 if the cache mode was invalid.
  514. */
  515. int bdrv_parse_cache_flags(const char *mode, int *flags)
  516. {
  517. *flags &= ~BDRV_O_CACHE_MASK;
  518. if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
  519. *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
  520. } else if (!strcmp(mode, "directsync")) {
  521. *flags |= BDRV_O_NOCACHE;
  522. } else if (!strcmp(mode, "writeback")) {
  523. *flags |= BDRV_O_CACHE_WB;
  524. } else if (!strcmp(mode, "unsafe")) {
  525. *flags |= BDRV_O_CACHE_WB;
  526. *flags |= BDRV_O_NO_FLUSH;
  527. } else if (!strcmp(mode, "writethrough")) {
  528. /* this is the default */
  529. } else {
  530. return -1;
  531. }
  532. return 0;
  533. }
  534. /**
  535. * The copy-on-read flag is actually a reference count so multiple users may
  536. * use the feature without worrying about clobbering its previous state.
  537. * Copy-on-read stays enabled until all users have called to disable it.
  538. */
  539. void bdrv_enable_copy_on_read(BlockDriverState *bs)
  540. {
  541. bs->copy_on_read++;
  542. }
  543. void bdrv_disable_copy_on_read(BlockDriverState *bs)
  544. {
  545. assert(bs->copy_on_read > 0);
  546. bs->copy_on_read--;
  547. }
  548. static int bdrv_open_flags(BlockDriverState *bs, int flags)
  549. {
  550. int open_flags = flags | BDRV_O_CACHE_WB;
  551. /*
  552. * Clear flags that are internal to the block layer before opening the
  553. * image.
  554. */
  555. open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
  556. /*
  557. * Snapshots should be writable.
  558. */
  559. if (bs->is_temporary) {
  560. open_flags |= BDRV_O_RDWR;
  561. }
  562. return open_flags;
  563. }
  564. /*
  565. * Common part for opening disk images and files
  566. */
  567. static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
  568. const char *filename,
  569. int flags, BlockDriver *drv)
  570. {
  571. int ret, open_flags;
  572. assert(drv != NULL);
  573. assert(bs->file == NULL);
  574. trace_bdrv_open_common(bs, filename, flags, drv->format_name);
  575. bs->open_flags = flags;
  576. bs->buffer_alignment = 512;
  577. assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
  578. if ((flags & BDRV_O_RDWR) && (flags & BDRV_O_COPY_ON_READ)) {
  579. bdrv_enable_copy_on_read(bs);
  580. }
  581. pstrcpy(bs->filename, sizeof(bs->filename), filename);
  582. if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
  583. return -ENOTSUP;
  584. }
  585. bs->drv = drv;
  586. bs->opaque = g_malloc0(drv->instance_size);
  587. bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
  588. open_flags = bdrv_open_flags(bs, flags);
  589. bs->read_only = !(open_flags & BDRV_O_RDWR);
  590. /* Open the image, either directly or using a protocol */
  591. if (drv->bdrv_file_open) {
  592. if (file != NULL) {
  593. bdrv_swap(file, bs);
  594. ret = 0;
  595. } else {
  596. ret = drv->bdrv_file_open(bs, filename, open_flags);
  597. }
  598. } else {
  599. assert(file != NULL);
  600. bs->file = file;
  601. ret = drv->bdrv_open(bs, open_flags);
  602. }
  603. if (ret < 0) {
  604. goto free_and_fail;
  605. }
  606. ret = refresh_total_sectors(bs, bs->total_sectors);
  607. if (ret < 0) {
  608. goto free_and_fail;
  609. }
  610. #ifndef _WIN32
  611. if (bs->is_temporary) {
  612. unlink(filename);
  613. }
  614. #endif
  615. return 0;
  616. free_and_fail:
  617. bs->file = NULL;
  618. g_free(bs->opaque);
  619. bs->opaque = NULL;
  620. bs->drv = NULL;
  621. return ret;
  622. }
  623. /*
  624. * Opens a file using a protocol (file, host_device, nbd, ...)
  625. */
  626. int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
  627. {
  628. BlockDriverState *bs;
  629. BlockDriver *drv;
  630. int ret;
  631. drv = bdrv_find_protocol(filename);
  632. if (!drv) {
  633. return -ENOENT;
  634. }
  635. bs = bdrv_new("");
  636. ret = bdrv_open_common(bs, NULL, filename, flags, drv);
  637. if (ret < 0) {
  638. bdrv_delete(bs);
  639. return ret;
  640. }
  641. bs->growable = 1;
  642. *pbs = bs;
  643. return 0;
  644. }
  645. int bdrv_open_backing_file(BlockDriverState *bs)
  646. {
  647. char backing_filename[PATH_MAX];
  648. int back_flags, ret;
  649. BlockDriver *back_drv = NULL;
  650. if (bs->backing_hd != NULL) {
  651. return 0;
  652. }
  653. bs->open_flags &= ~BDRV_O_NO_BACKING;
  654. if (bs->backing_file[0] == '\0') {
  655. return 0;
  656. }
  657. bs->backing_hd = bdrv_new("");
  658. bdrv_get_full_backing_filename(bs, backing_filename,
  659. sizeof(backing_filename));
  660. if (bs->backing_format[0] != '\0') {
  661. back_drv = bdrv_find_format(bs->backing_format);
  662. }
  663. /* backing files always opened read-only */
  664. back_flags = bs->open_flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT);
  665. ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
  666. if (ret < 0) {
  667. bdrv_delete(bs->backing_hd);
  668. bs->backing_hd = NULL;
  669. bs->open_flags |= BDRV_O_NO_BACKING;
  670. return ret;
  671. }
  672. return 0;
  673. }
  674. /*
  675. * Opens a disk image (raw, qcow2, vmdk, ...)
  676. */
  677. int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
  678. BlockDriver *drv)
  679. {
  680. int ret;
  681. /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
  682. char tmp_filename[PATH_MAX + 1];
  683. BlockDriverState *file = NULL;
  684. if (flags & BDRV_O_SNAPSHOT) {
  685. BlockDriverState *bs1;
  686. int64_t total_size;
  687. int is_protocol = 0;
  688. BlockDriver *bdrv_qcow2;
  689. QEMUOptionParameter *options;
  690. char backing_filename[PATH_MAX];
  691. /* if snapshot, we create a temporary backing file and open it
  692. instead of opening 'filename' directly */
  693. /* if there is a backing file, use it */
  694. bs1 = bdrv_new("");
  695. ret = bdrv_open(bs1, filename, 0, drv);
  696. if (ret < 0) {
  697. bdrv_delete(bs1);
  698. return ret;
  699. }
  700. total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
  701. if (bs1->drv && bs1->drv->protocol_name)
  702. is_protocol = 1;
  703. bdrv_delete(bs1);
  704. ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
  705. if (ret < 0) {
  706. return ret;
  707. }
  708. /* Real path is meaningless for protocols */
  709. if (is_protocol)
  710. snprintf(backing_filename, sizeof(backing_filename),
  711. "%s", filename);
  712. else if (!realpath(filename, backing_filename))
  713. return -errno;
  714. bdrv_qcow2 = bdrv_find_format("qcow2");
  715. options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
  716. set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
  717. set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
  718. if (drv) {
  719. set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
  720. drv->format_name);
  721. }
  722. ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
  723. free_option_parameters(options);
  724. if (ret < 0) {
  725. return ret;
  726. }
  727. filename = tmp_filename;
  728. drv = bdrv_qcow2;
  729. bs->is_temporary = 1;
  730. }
  731. /* Open image file without format layer */
  732. if (flags & BDRV_O_RDWR) {
  733. flags |= BDRV_O_ALLOW_RDWR;
  734. }
  735. ret = bdrv_file_open(&file, filename, bdrv_open_flags(bs, flags));
  736. if (ret < 0) {
  737. return ret;
  738. }
  739. /* Find the right image format driver */
  740. if (!drv) {
  741. ret = find_image_format(file, filename, &drv);
  742. }
  743. if (!drv) {
  744. goto unlink_and_fail;
  745. }
  746. /* Open the image */
  747. ret = bdrv_open_common(bs, file, filename, flags, drv);
  748. if (ret < 0) {
  749. goto unlink_and_fail;
  750. }
  751. if (bs->file != file) {
  752. bdrv_delete(file);
  753. file = NULL;
  754. }
  755. /* If there is a backing file, use it */
  756. if ((flags & BDRV_O_NO_BACKING) == 0) {
  757. ret = bdrv_open_backing_file(bs);
  758. if (ret < 0) {
  759. bdrv_close(bs);
  760. return ret;
  761. }
  762. }
  763. if (!bdrv_key_required(bs)) {
  764. bdrv_dev_change_media_cb(bs, true);
  765. }
  766. /* throttling disk I/O limits */
  767. if (bs->io_limits_enabled) {
  768. bdrv_io_limits_enable(bs);
  769. }
  770. return 0;
  771. unlink_and_fail:
  772. if (file != NULL) {
  773. bdrv_delete(file);
  774. }
  775. if (bs->is_temporary) {
  776. unlink(filename);
  777. }
  778. return ret;
  779. }
  780. typedef struct BlockReopenQueueEntry {
  781. bool prepared;
  782. BDRVReopenState state;
  783. QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
  784. } BlockReopenQueueEntry;
  785. /*
  786. * Adds a BlockDriverState to a simple queue for an atomic, transactional
  787. * reopen of multiple devices.
  788. *
  789. * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
  790. * already performed, or alternatively may be NULL a new BlockReopenQueue will
  791. * be created and initialized. This newly created BlockReopenQueue should be
  792. * passed back in for subsequent calls that are intended to be of the same
  793. * atomic 'set'.
  794. *
  795. * bs is the BlockDriverState to add to the reopen queue.
  796. *
  797. * flags contains the open flags for the associated bs
  798. *
  799. * returns a pointer to bs_queue, which is either the newly allocated
  800. * bs_queue, or the existing bs_queue being used.
  801. *
  802. */
  803. BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
  804. BlockDriverState *bs, int flags)
  805. {
  806. assert(bs != NULL);
  807. BlockReopenQueueEntry *bs_entry;
  808. if (bs_queue == NULL) {
  809. bs_queue = g_new0(BlockReopenQueue, 1);
  810. QSIMPLEQ_INIT(bs_queue);
  811. }
  812. if (bs->file) {
  813. bdrv_reopen_queue(bs_queue, bs->file, flags);
  814. }
  815. bs_entry = g_new0(BlockReopenQueueEntry, 1);
  816. QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
  817. bs_entry->state.bs = bs;
  818. bs_entry->state.flags = flags;
  819. return bs_queue;
  820. }
  821. /*
  822. * Reopen multiple BlockDriverStates atomically & transactionally.
  823. *
  824. * The queue passed in (bs_queue) must have been built up previous
  825. * via bdrv_reopen_queue().
  826. *
  827. * Reopens all BDS specified in the queue, with the appropriate
  828. * flags. All devices are prepared for reopen, and failure of any
  829. * device will cause all device changes to be abandonded, and intermediate
  830. * data cleaned up.
  831. *
  832. * If all devices prepare successfully, then the changes are committed
  833. * to all devices.
  834. *
  835. */
  836. int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
  837. {
  838. int ret = -1;
  839. BlockReopenQueueEntry *bs_entry, *next;
  840. Error *local_err = NULL;
  841. assert(bs_queue != NULL);
  842. bdrv_drain_all();
  843. QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
  844. if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
  845. error_propagate(errp, local_err);
  846. goto cleanup;
  847. }
  848. bs_entry->prepared = true;
  849. }
  850. /* If we reach this point, we have success and just need to apply the
  851. * changes
  852. */
  853. QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
  854. bdrv_reopen_commit(&bs_entry->state);
  855. }
  856. ret = 0;
  857. cleanup:
  858. QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
  859. if (ret && bs_entry->prepared) {
  860. bdrv_reopen_abort(&bs_entry->state);
  861. }
  862. g_free(bs_entry);
  863. }
  864. g_free(bs_queue);
  865. return ret;
  866. }
  867. /* Reopen a single BlockDriverState with the specified flags. */
  868. int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
  869. {
  870. int ret = -1;
  871. Error *local_err = NULL;
  872. BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
  873. ret = bdrv_reopen_multiple(queue, &local_err);
  874. if (local_err != NULL) {
  875. error_propagate(errp, local_err);
  876. }
  877. return ret;
  878. }
  879. /*
  880. * Prepares a BlockDriverState for reopen. All changes are staged in the
  881. * 'opaque' field of the BDRVReopenState, which is used and allocated by
  882. * the block driver layer .bdrv_reopen_prepare()
  883. *
  884. * bs is the BlockDriverState to reopen
  885. * flags are the new open flags
  886. * queue is the reopen queue
  887. *
  888. * Returns 0 on success, non-zero on error. On error errp will be set
  889. * as well.
  890. *
  891. * On failure, bdrv_reopen_abort() will be called to clean up any data.
  892. * It is the responsibility of the caller to then call the abort() or
  893. * commit() for any other BDS that have been left in a prepare() state
  894. *
  895. */
  896. int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
  897. Error **errp)
  898. {
  899. int ret = -1;
  900. Error *local_err = NULL;
  901. BlockDriver *drv;
  902. assert(reopen_state != NULL);
  903. assert(reopen_state->bs->drv != NULL);
  904. drv = reopen_state->bs->drv;
  905. /* if we are to stay read-only, do not allow permission change
  906. * to r/w */
  907. if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
  908. reopen_state->flags & BDRV_O_RDWR) {
  909. error_set(errp, QERR_DEVICE_IS_READ_ONLY,
  910. reopen_state->bs->device_name);
  911. goto error;
  912. }
  913. ret = bdrv_flush(reopen_state->bs);
  914. if (ret) {
  915. error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
  916. strerror(-ret));
  917. goto error;
  918. }
  919. if (drv->bdrv_reopen_prepare) {
  920. ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
  921. if (ret) {
  922. if (local_err != NULL) {
  923. error_propagate(errp, local_err);
  924. } else {
  925. error_set(errp, QERR_OPEN_FILE_FAILED,
  926. reopen_state->bs->filename);
  927. }
  928. goto error;
  929. }
  930. } else {
  931. /* It is currently mandatory to have a bdrv_reopen_prepare()
  932. * handler for each supported drv. */
  933. error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
  934. drv->format_name, reopen_state->bs->device_name,
  935. "reopening of file");
  936. ret = -1;
  937. goto error;
  938. }
  939. ret = 0;
  940. error:
  941. return ret;
  942. }
  943. /*
  944. * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
  945. * makes them final by swapping the staging BlockDriverState contents into
  946. * the active BlockDriverState contents.
  947. */
  948. void bdrv_reopen_commit(BDRVReopenState *reopen_state)
  949. {
  950. BlockDriver *drv;
  951. assert(reopen_state != NULL);
  952. drv = reopen_state->bs->drv;
  953. assert(drv != NULL);
  954. /* If there are any driver level actions to take */
  955. if (drv->bdrv_reopen_commit) {
  956. drv->bdrv_reopen_commit(reopen_state);
  957. }
  958. /* set BDS specific flags now */
  959. reopen_state->bs->open_flags = reopen_state->flags;
  960. reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
  961. BDRV_O_CACHE_WB);
  962. reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
  963. }
  964. /*
  965. * Abort the reopen, and delete and free the staged changes in
  966. * reopen_state
  967. */
  968. void bdrv_reopen_abort(BDRVReopenState *reopen_state)
  969. {
  970. BlockDriver *drv;
  971. assert(reopen_state != NULL);
  972. drv = reopen_state->bs->drv;
  973. assert(drv != NULL);
  974. if (drv->bdrv_reopen_abort) {
  975. drv->bdrv_reopen_abort(reopen_state);
  976. }
  977. }
  978. void bdrv_close(BlockDriverState *bs)
  979. {
  980. bdrv_flush(bs);
  981. if (bs->job) {
  982. block_job_cancel_sync(bs->job);
  983. }
  984. bdrv_drain_all();
  985. notifier_list_notify(&bs->close_notifiers, bs);
  986. if (bs->drv) {
  987. if (bs == bs_snapshots) {
  988. bs_snapshots = NULL;
  989. }
  990. if (bs->backing_hd) {
  991. bdrv_delete(bs->backing_hd);
  992. bs->backing_hd = NULL;
  993. }
  994. bs->drv->bdrv_close(bs);
  995. g_free(bs->opaque);
  996. #ifdef _WIN32
  997. if (bs->is_temporary) {
  998. unlink(bs->filename);
  999. }
  1000. #endif
  1001. bs->opaque = NULL;
  1002. bs->drv = NULL;
  1003. bs->copy_on_read = 0;
  1004. bs->backing_file[0] = '\0';
  1005. bs->backing_format[0] = '\0';
  1006. bs->total_sectors = 0;
  1007. bs->encrypted = 0;
  1008. bs->valid_key = 0;
  1009. bs->sg = 0;
  1010. bs->growable = 0;
  1011. if (bs->file != NULL) {
  1012. bdrv_delete(bs->file);
  1013. bs->file = NULL;
  1014. }
  1015. }
  1016. bdrv_dev_change_media_cb(bs, false);
  1017. /*throttling disk I/O limits*/
  1018. if (bs->io_limits_enabled) {
  1019. bdrv_io_limits_disable(bs);
  1020. }
  1021. }
  1022. void bdrv_close_all(void)
  1023. {
  1024. BlockDriverState *bs;
  1025. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  1026. bdrv_close(bs);
  1027. }
  1028. }
  1029. /*
  1030. * Wait for pending requests to complete across all BlockDriverStates
  1031. *
  1032. * This function does not flush data to disk, use bdrv_flush_all() for that
  1033. * after calling this function.
  1034. *
  1035. * Note that completion of an asynchronous I/O operation can trigger any
  1036. * number of other I/O operations on other devices---for example a coroutine
  1037. * can be arbitrarily complex and a constant flow of I/O can come until the
  1038. * coroutine is complete. Because of this, it is not possible to have a
  1039. * function to drain a single device's I/O queue.
  1040. */
  1041. void bdrv_drain_all(void)
  1042. {
  1043. BlockDriverState *bs;
  1044. bool busy;
  1045. do {
  1046. busy = qemu_aio_wait();
  1047. /* FIXME: We do not have timer support here, so this is effectively
  1048. * a busy wait.
  1049. */
  1050. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  1051. if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
  1052. qemu_co_queue_restart_all(&bs->throttled_reqs);
  1053. busy = true;
  1054. }
  1055. }
  1056. } while (busy);
  1057. /* If requests are still pending there is a bug somewhere */
  1058. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  1059. assert(QLIST_EMPTY(&bs->tracked_requests));
  1060. assert(qemu_co_queue_empty(&bs->throttled_reqs));
  1061. }
  1062. }
  1063. /* make a BlockDriverState anonymous by removing from bdrv_state list.
  1064. Also, NULL terminate the device_name to prevent double remove */
  1065. void bdrv_make_anon(BlockDriverState *bs)
  1066. {
  1067. if (bs->device_name[0] != '\0') {
  1068. QTAILQ_REMOVE(&bdrv_states, bs, list);
  1069. }
  1070. bs->device_name[0] = '\0';
  1071. }
  1072. static void bdrv_rebind(BlockDriverState *bs)
  1073. {
  1074. if (bs->drv && bs->drv->bdrv_rebind) {
  1075. bs->drv->bdrv_rebind(bs);
  1076. }
  1077. }
  1078. static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
  1079. BlockDriverState *bs_src)
  1080. {
  1081. /* move some fields that need to stay attached to the device */
  1082. bs_dest->open_flags = bs_src->open_flags;
  1083. /* dev info */
  1084. bs_dest->dev_ops = bs_src->dev_ops;
  1085. bs_dest->dev_opaque = bs_src->dev_opaque;
  1086. bs_dest->dev = bs_src->dev;
  1087. bs_dest->buffer_alignment = bs_src->buffer_alignment;
  1088. bs_dest->copy_on_read = bs_src->copy_on_read;
  1089. bs_dest->enable_write_cache = bs_src->enable_write_cache;
  1090. /* i/o timing parameters */
  1091. bs_dest->slice_time = bs_src->slice_time;
  1092. bs_dest->slice_start = bs_src->slice_start;
  1093. bs_dest->slice_end = bs_src->slice_end;
  1094. bs_dest->io_limits = bs_src->io_limits;
  1095. bs_dest->io_base = bs_src->io_base;
  1096. bs_dest->throttled_reqs = bs_src->throttled_reqs;
  1097. bs_dest->block_timer = bs_src->block_timer;
  1098. bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
  1099. /* r/w error */
  1100. bs_dest->on_read_error = bs_src->on_read_error;
  1101. bs_dest->on_write_error = bs_src->on_write_error;
  1102. /* i/o status */
  1103. bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
  1104. bs_dest->iostatus = bs_src->iostatus;
  1105. /* dirty bitmap */
  1106. bs_dest->dirty_bitmap = bs_src->dirty_bitmap;
  1107. /* job */
  1108. bs_dest->in_use = bs_src->in_use;
  1109. bs_dest->job = bs_src->job;
  1110. /* keep the same entry in bdrv_states */
  1111. pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
  1112. bs_src->device_name);
  1113. bs_dest->list = bs_src->list;
  1114. }
  1115. /*
  1116. * Swap bs contents for two image chains while they are live,
  1117. * while keeping required fields on the BlockDriverState that is
  1118. * actually attached to a device.
  1119. *
  1120. * This will modify the BlockDriverState fields, and swap contents
  1121. * between bs_new and bs_old. Both bs_new and bs_old are modified.
  1122. *
  1123. * bs_new is required to be anonymous.
  1124. *
  1125. * This function does not create any image files.
  1126. */
  1127. void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
  1128. {
  1129. BlockDriverState tmp;
  1130. /* bs_new must be anonymous and shouldn't have anything fancy enabled */
  1131. assert(bs_new->device_name[0] == '\0');
  1132. assert(bs_new->dirty_bitmap == NULL);
  1133. assert(bs_new->job == NULL);
  1134. assert(bs_new->dev == NULL);
  1135. assert(bs_new->in_use == 0);
  1136. assert(bs_new->io_limits_enabled == false);
  1137. assert(bs_new->block_timer == NULL);
  1138. tmp = *bs_new;
  1139. *bs_new = *bs_old;
  1140. *bs_old = tmp;
  1141. /* there are some fields that should not be swapped, move them back */
  1142. bdrv_move_feature_fields(&tmp, bs_old);
  1143. bdrv_move_feature_fields(bs_old, bs_new);
  1144. bdrv_move_feature_fields(bs_new, &tmp);
  1145. /* bs_new shouldn't be in bdrv_states even after the swap! */
  1146. assert(bs_new->device_name[0] == '\0');
  1147. /* Check a few fields that should remain attached to the device */
  1148. assert(bs_new->dev == NULL);
  1149. assert(bs_new->job == NULL);
  1150. assert(bs_new->in_use == 0);
  1151. assert(bs_new->io_limits_enabled == false);
  1152. assert(bs_new->block_timer == NULL);
  1153. bdrv_rebind(bs_new);
  1154. bdrv_rebind(bs_old);
  1155. }
  1156. /*
  1157. * Add new bs contents at the top of an image chain while the chain is
  1158. * live, while keeping required fields on the top layer.
  1159. *
  1160. * This will modify the BlockDriverState fields, and swap contents
  1161. * between bs_new and bs_top. Both bs_new and bs_top are modified.
  1162. *
  1163. * bs_new is required to be anonymous.
  1164. *
  1165. * This function does not create any image files.
  1166. */
  1167. void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
  1168. {
  1169. bdrv_swap(bs_new, bs_top);
  1170. /* The contents of 'tmp' will become bs_top, as we are
  1171. * swapping bs_new and bs_top contents. */
  1172. bs_top->backing_hd = bs_new;
  1173. bs_top->open_flags &= ~BDRV_O_NO_BACKING;
  1174. pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file),
  1175. bs_new->filename);
  1176. pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format),
  1177. bs_new->drv ? bs_new->drv->format_name : "");
  1178. }
  1179. void bdrv_delete(BlockDriverState *bs)
  1180. {
  1181. assert(!bs->dev);
  1182. assert(!bs->job);
  1183. assert(!bs->in_use);
  1184. /* remove from list, if necessary */
  1185. bdrv_make_anon(bs);
  1186. bdrv_close(bs);
  1187. assert(bs != bs_snapshots);
  1188. g_free(bs);
  1189. }
  1190. int bdrv_attach_dev(BlockDriverState *bs, void *dev)
  1191. /* TODO change to DeviceState *dev when all users are qdevified */
  1192. {
  1193. if (bs->dev) {
  1194. return -EBUSY;
  1195. }
  1196. bs->dev = dev;
  1197. bdrv_iostatus_reset(bs);
  1198. return 0;
  1199. }
  1200. /* TODO qdevified devices don't use this, remove when devices are qdevified */
  1201. void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
  1202. {
  1203. if (bdrv_attach_dev(bs, dev) < 0) {
  1204. abort();
  1205. }
  1206. }
  1207. void bdrv_detach_dev(BlockDriverState *bs, void *dev)
  1208. /* TODO change to DeviceState *dev when all users are qdevified */
  1209. {
  1210. assert(bs->dev == dev);
  1211. bs->dev = NULL;
  1212. bs->dev_ops = NULL;
  1213. bs->dev_opaque = NULL;
  1214. bs->buffer_alignment = 512;
  1215. }
  1216. /* TODO change to return DeviceState * when all users are qdevified */
  1217. void *bdrv_get_attached_dev(BlockDriverState *bs)
  1218. {
  1219. return bs->dev;
  1220. }
  1221. void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
  1222. void *opaque)
  1223. {
  1224. bs->dev_ops = ops;
  1225. bs->dev_opaque = opaque;
  1226. if (bdrv_dev_has_removable_media(bs) && bs == bs_snapshots) {
  1227. bs_snapshots = NULL;
  1228. }
  1229. }
  1230. void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
  1231. enum MonitorEvent ev,
  1232. BlockErrorAction action, bool is_read)
  1233. {
  1234. QObject *data;
  1235. const char *action_str;
  1236. switch (action) {
  1237. case BDRV_ACTION_REPORT:
  1238. action_str = "report";
  1239. break;
  1240. case BDRV_ACTION_IGNORE:
  1241. action_str = "ignore";
  1242. break;
  1243. case BDRV_ACTION_STOP:
  1244. action_str = "stop";
  1245. break;
  1246. default:
  1247. abort();
  1248. }
  1249. data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
  1250. bdrv->device_name,
  1251. action_str,
  1252. is_read ? "read" : "write");
  1253. monitor_protocol_event(ev, data);
  1254. qobject_decref(data);
  1255. }
  1256. static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
  1257. {
  1258. QObject *data;
  1259. data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
  1260. bdrv_get_device_name(bs), ejected);
  1261. monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
  1262. qobject_decref(data);
  1263. }
  1264. static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
  1265. {
  1266. if (bs->dev_ops && bs->dev_ops->change_media_cb) {
  1267. bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
  1268. bs->dev_ops->change_media_cb(bs->dev_opaque, load);
  1269. if (tray_was_closed) {
  1270. /* tray open */
  1271. bdrv_emit_qmp_eject_event(bs, true);
  1272. }
  1273. if (load) {
  1274. /* tray close */
  1275. bdrv_emit_qmp_eject_event(bs, false);
  1276. }
  1277. }
  1278. }
  1279. bool bdrv_dev_has_removable_media(BlockDriverState *bs)
  1280. {
  1281. return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
  1282. }
  1283. void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
  1284. {
  1285. if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
  1286. bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
  1287. }
  1288. }
  1289. bool bdrv_dev_is_tray_open(BlockDriverState *bs)
  1290. {
  1291. if (bs->dev_ops && bs->dev_ops->is_tray_open) {
  1292. return bs->dev_ops->is_tray_open(bs->dev_opaque);
  1293. }
  1294. return false;
  1295. }
  1296. static void bdrv_dev_resize_cb(BlockDriverState *bs)
  1297. {
  1298. if (bs->dev_ops && bs->dev_ops->resize_cb) {
  1299. bs->dev_ops->resize_cb(bs->dev_opaque);
  1300. }
  1301. }
  1302. bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
  1303. {
  1304. if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
  1305. return bs->dev_ops->is_medium_locked(bs->dev_opaque);
  1306. }
  1307. return false;
  1308. }
  1309. /*
  1310. * Run consistency checks on an image
  1311. *
  1312. * Returns 0 if the check could be completed (it doesn't mean that the image is
  1313. * free of errors) or -errno when an internal error occurred. The results of the
  1314. * check are stored in res.
  1315. */
  1316. int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
  1317. {
  1318. if (bs->drv->bdrv_check == NULL) {
  1319. return -ENOTSUP;
  1320. }
  1321. memset(res, 0, sizeof(*res));
  1322. return bs->drv->bdrv_check(bs, res, fix);
  1323. }
  1324. #define COMMIT_BUF_SECTORS 2048
  1325. /* commit COW file into the raw image */
  1326. int bdrv_commit(BlockDriverState *bs)
  1327. {
  1328. BlockDriver *drv = bs->drv;
  1329. int64_t sector, total_sectors;
  1330. int n, ro, open_flags;
  1331. int ret = 0;
  1332. uint8_t *buf;
  1333. char filename[PATH_MAX];
  1334. if (!drv)
  1335. return -ENOMEDIUM;
  1336. if (!bs->backing_hd) {
  1337. return -ENOTSUP;
  1338. }
  1339. if (bdrv_in_use(bs) || bdrv_in_use(bs->backing_hd)) {
  1340. return -EBUSY;
  1341. }
  1342. ro = bs->backing_hd->read_only;
  1343. /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
  1344. pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
  1345. open_flags = bs->backing_hd->open_flags;
  1346. if (ro) {
  1347. if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
  1348. return -EACCES;
  1349. }
  1350. }
  1351. total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
  1352. buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
  1353. for (sector = 0; sector < total_sectors; sector += n) {
  1354. if (bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
  1355. if (bdrv_read(bs, sector, buf, n) != 0) {
  1356. ret = -EIO;
  1357. goto ro_cleanup;
  1358. }
  1359. if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
  1360. ret = -EIO;
  1361. goto ro_cleanup;
  1362. }
  1363. }
  1364. }
  1365. if (drv->bdrv_make_empty) {
  1366. ret = drv->bdrv_make_empty(bs);
  1367. bdrv_flush(bs);
  1368. }
  1369. /*
  1370. * Make sure all data we wrote to the backing device is actually
  1371. * stable on disk.
  1372. */
  1373. if (bs->backing_hd)
  1374. bdrv_flush(bs->backing_hd);
  1375. ro_cleanup:
  1376. g_free(buf);
  1377. if (ro) {
  1378. /* ignoring error return here */
  1379. bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
  1380. }
  1381. return ret;
  1382. }
  1383. int bdrv_commit_all(void)
  1384. {
  1385. BlockDriverState *bs;
  1386. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  1387. int ret = bdrv_commit(bs);
  1388. if (ret < 0) {
  1389. return ret;
  1390. }
  1391. }
  1392. return 0;
  1393. }
  1394. struct BdrvTrackedRequest {
  1395. BlockDriverState *bs;
  1396. int64_t sector_num;
  1397. int nb_sectors;
  1398. bool is_write;
  1399. QLIST_ENTRY(BdrvTrackedRequest) list;
  1400. Coroutine *co; /* owner, used for deadlock detection */
  1401. CoQueue wait_queue; /* coroutines blocked on this request */
  1402. };
  1403. /**
  1404. * Remove an active request from the tracked requests list
  1405. *
  1406. * This function should be called when a tracked request is completing.
  1407. */
  1408. static void tracked_request_end(BdrvTrackedRequest *req)
  1409. {
  1410. QLIST_REMOVE(req, list);
  1411. qemu_co_queue_restart_all(&req->wait_queue);
  1412. }
  1413. /**
  1414. * Add an active request to the tracked requests list
  1415. */
  1416. static void tracked_request_begin(BdrvTrackedRequest *req,
  1417. BlockDriverState *bs,
  1418. int64_t sector_num,
  1419. int nb_sectors, bool is_write)
  1420. {
  1421. *req = (BdrvTrackedRequest){
  1422. .bs = bs,
  1423. .sector_num = sector_num,
  1424. .nb_sectors = nb_sectors,
  1425. .is_write = is_write,
  1426. .co = qemu_coroutine_self(),
  1427. };
  1428. qemu_co_queue_init(&req->wait_queue);
  1429. QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
  1430. }
  1431. /**
  1432. * Round a region to cluster boundaries
  1433. */
  1434. void bdrv_round_to_clusters(BlockDriverState *bs,
  1435. int64_t sector_num, int nb_sectors,
  1436. int64_t *cluster_sector_num,
  1437. int *cluster_nb_sectors)
  1438. {
  1439. BlockDriverInfo bdi;
  1440. if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
  1441. *cluster_sector_num = sector_num;
  1442. *cluster_nb_sectors = nb_sectors;
  1443. } else {
  1444. int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
  1445. *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
  1446. *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
  1447. nb_sectors, c);
  1448. }
  1449. }
  1450. static bool tracked_request_overlaps(BdrvTrackedRequest *req,
  1451. int64_t sector_num, int nb_sectors) {
  1452. /* aaaa bbbb */
  1453. if (sector_num >= req->sector_num + req->nb_sectors) {
  1454. return false;
  1455. }
  1456. /* bbbb aaaa */
  1457. if (req->sector_num >= sector_num + nb_sectors) {
  1458. return false;
  1459. }
  1460. return true;
  1461. }
  1462. static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
  1463. int64_t sector_num, int nb_sectors)
  1464. {
  1465. BdrvTrackedRequest *req;
  1466. int64_t cluster_sector_num;
  1467. int cluster_nb_sectors;
  1468. bool retry;
  1469. /* If we touch the same cluster it counts as an overlap. This guarantees
  1470. * that allocating writes will be serialized and not race with each other
  1471. * for the same cluster. For example, in copy-on-read it ensures that the
  1472. * CoR read and write operations are atomic and guest writes cannot
  1473. * interleave between them.
  1474. */
  1475. bdrv_round_to_clusters(bs, sector_num, nb_sectors,
  1476. &cluster_sector_num, &cluster_nb_sectors);
  1477. do {
  1478. retry = false;
  1479. QLIST_FOREACH(req, &bs->tracked_requests, list) {
  1480. if (tracked_request_overlaps(req, cluster_sector_num,
  1481. cluster_nb_sectors)) {
  1482. /* Hitting this means there was a reentrant request, for
  1483. * example, a block driver issuing nested requests. This must
  1484. * never happen since it means deadlock.
  1485. */
  1486. assert(qemu_coroutine_self() != req->co);
  1487. qemu_co_queue_wait(&req->wait_queue);
  1488. retry = true;
  1489. break;
  1490. }
  1491. }
  1492. } while (retry);
  1493. }
  1494. /*
  1495. * Return values:
  1496. * 0 - success
  1497. * -EINVAL - backing format specified, but no file
  1498. * -ENOSPC - can't update the backing file because no space is left in the
  1499. * image file header
  1500. * -ENOTSUP - format driver doesn't support changing the backing file
  1501. */
  1502. int bdrv_change_backing_file(BlockDriverState *bs,
  1503. const char *backing_file, const char *backing_fmt)
  1504. {
  1505. BlockDriver *drv = bs->drv;
  1506. int ret;
  1507. /* Backing file format doesn't make sense without a backing file */
  1508. if (backing_fmt && !backing_file) {
  1509. return -EINVAL;
  1510. }
  1511. if (drv->bdrv_change_backing_file != NULL) {
  1512. ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
  1513. } else {
  1514. ret = -ENOTSUP;
  1515. }
  1516. if (ret == 0) {
  1517. pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
  1518. pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
  1519. }
  1520. return ret;
  1521. }
  1522. /*
  1523. * Finds the image layer in the chain that has 'bs' as its backing file.
  1524. *
  1525. * active is the current topmost image.
  1526. *
  1527. * Returns NULL if bs is not found in active's image chain,
  1528. * or if active == bs.
  1529. */
  1530. BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
  1531. BlockDriverState *bs)
  1532. {
  1533. BlockDriverState *overlay = NULL;
  1534. BlockDriverState *intermediate;
  1535. assert(active != NULL);
  1536. assert(bs != NULL);
  1537. /* if bs is the same as active, then by definition it has no overlay
  1538. */
  1539. if (active == bs) {
  1540. return NULL;
  1541. }
  1542. intermediate = active;
  1543. while (intermediate->backing_hd) {
  1544. if (intermediate->backing_hd == bs) {
  1545. overlay = intermediate;
  1546. break;
  1547. }
  1548. intermediate = intermediate->backing_hd;
  1549. }
  1550. return overlay;
  1551. }
  1552. typedef struct BlkIntermediateStates {
  1553. BlockDriverState *bs;
  1554. QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
  1555. } BlkIntermediateStates;
  1556. /*
  1557. * Drops images above 'base' up to and including 'top', and sets the image
  1558. * above 'top' to have base as its backing file.
  1559. *
  1560. * Requires that the overlay to 'top' is opened r/w, so that the backing file
  1561. * information in 'bs' can be properly updated.
  1562. *
  1563. * E.g., this will convert the following chain:
  1564. * bottom <- base <- intermediate <- top <- active
  1565. *
  1566. * to
  1567. *
  1568. * bottom <- base <- active
  1569. *
  1570. * It is allowed for bottom==base, in which case it converts:
  1571. *
  1572. * base <- intermediate <- top <- active
  1573. *
  1574. * to
  1575. *
  1576. * base <- active
  1577. *
  1578. * Error conditions:
  1579. * if active == top, that is considered an error
  1580. *
  1581. */
  1582. int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
  1583. BlockDriverState *base)
  1584. {
  1585. BlockDriverState *intermediate;
  1586. BlockDriverState *base_bs = NULL;
  1587. BlockDriverState *new_top_bs = NULL;
  1588. BlkIntermediateStates *intermediate_state, *next;
  1589. int ret = -EIO;
  1590. QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
  1591. QSIMPLEQ_INIT(&states_to_delete);
  1592. if (!top->drv || !base->drv) {
  1593. goto exit;
  1594. }
  1595. new_top_bs = bdrv_find_overlay(active, top);
  1596. if (new_top_bs == NULL) {
  1597. /* we could not find the image above 'top', this is an error */
  1598. goto exit;
  1599. }
  1600. /* special case of new_top_bs->backing_hd already pointing to base - nothing
  1601. * to do, no intermediate images */
  1602. if (new_top_bs->backing_hd == base) {
  1603. ret = 0;
  1604. goto exit;
  1605. }
  1606. intermediate = top;
  1607. /* now we will go down through the list, and add each BDS we find
  1608. * into our deletion queue, until we hit the 'base'
  1609. */
  1610. while (intermediate) {
  1611. intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
  1612. intermediate_state->bs = intermediate;
  1613. QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
  1614. if (intermediate->backing_hd == base) {
  1615. base_bs = intermediate->backing_hd;
  1616. break;
  1617. }
  1618. intermediate = intermediate->backing_hd;
  1619. }
  1620. if (base_bs == NULL) {
  1621. /* something went wrong, we did not end at the base. safely
  1622. * unravel everything, and exit with error */
  1623. goto exit;
  1624. }
  1625. /* success - we can delete the intermediate states, and link top->base */
  1626. ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
  1627. base_bs->drv ? base_bs->drv->format_name : "");
  1628. if (ret) {
  1629. goto exit;
  1630. }
  1631. new_top_bs->backing_hd = base_bs;
  1632. QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
  1633. /* so that bdrv_close() does not recursively close the chain */
  1634. intermediate_state->bs->backing_hd = NULL;
  1635. bdrv_delete(intermediate_state->bs);
  1636. }
  1637. ret = 0;
  1638. exit:
  1639. QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
  1640. g_free(intermediate_state);
  1641. }
  1642. return ret;
  1643. }
  1644. static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
  1645. size_t size)
  1646. {
  1647. int64_t len;
  1648. if (!bdrv_is_inserted(bs))
  1649. return -ENOMEDIUM;
  1650. if (bs->growable)
  1651. return 0;
  1652. len = bdrv_getlength(bs);
  1653. if (offset < 0)
  1654. return -EIO;
  1655. if ((offset > len) || (len - offset < size))
  1656. return -EIO;
  1657. return 0;
  1658. }
  1659. static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
  1660. int nb_sectors)
  1661. {
  1662. return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
  1663. nb_sectors * BDRV_SECTOR_SIZE);
  1664. }
  1665. typedef struct RwCo {
  1666. BlockDriverState *bs;
  1667. int64_t sector_num;
  1668. int nb_sectors;
  1669. QEMUIOVector *qiov;
  1670. bool is_write;
  1671. int ret;
  1672. } RwCo;
  1673. static void coroutine_fn bdrv_rw_co_entry(void *opaque)
  1674. {
  1675. RwCo *rwco = opaque;
  1676. if (!rwco->is_write) {
  1677. rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
  1678. rwco->nb_sectors, rwco->qiov, 0);
  1679. } else {
  1680. rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
  1681. rwco->nb_sectors, rwco->qiov, 0);
  1682. }
  1683. }
  1684. /*
  1685. * Process a synchronous request using coroutines
  1686. */
  1687. static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
  1688. int nb_sectors, bool is_write)
  1689. {
  1690. QEMUIOVector qiov;
  1691. struct iovec iov = {
  1692. .iov_base = (void *)buf,
  1693. .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
  1694. };
  1695. Coroutine *co;
  1696. RwCo rwco = {
  1697. .bs = bs,
  1698. .sector_num = sector_num,
  1699. .nb_sectors = nb_sectors,
  1700. .qiov = &qiov,
  1701. .is_write = is_write,
  1702. .ret = NOT_DONE,
  1703. };
  1704. qemu_iovec_init_external(&qiov, &iov, 1);
  1705. /**
  1706. * In sync call context, when the vcpu is blocked, this throttling timer
  1707. * will not fire; so the I/O throttling function has to be disabled here
  1708. * if it has been enabled.
  1709. */
  1710. if (bs->io_limits_enabled) {
  1711. fprintf(stderr, "Disabling I/O throttling on '%s' due "
  1712. "to synchronous I/O.\n", bdrv_get_device_name(bs));
  1713. bdrv_io_limits_disable(bs);
  1714. }
  1715. if (qemu_in_coroutine()) {
  1716. /* Fast-path if already in coroutine context */
  1717. bdrv_rw_co_entry(&rwco);
  1718. } else {
  1719. co = qemu_coroutine_create(bdrv_rw_co_entry);
  1720. qemu_coroutine_enter(co, &rwco);
  1721. while (rwco.ret == NOT_DONE) {
  1722. qemu_aio_wait();
  1723. }
  1724. }
  1725. return rwco.ret;
  1726. }
  1727. /* return < 0 if error. See bdrv_write() for the return codes */
  1728. int bdrv_read(BlockDriverState *bs, int64_t sector_num,
  1729. uint8_t *buf, int nb_sectors)
  1730. {
  1731. return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
  1732. }
  1733. /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
  1734. int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
  1735. uint8_t *buf, int nb_sectors)
  1736. {
  1737. bool enabled;
  1738. int ret;
  1739. enabled = bs->io_limits_enabled;
  1740. bs->io_limits_enabled = false;
  1741. ret = bdrv_read(bs, 0, buf, 1);
  1742. bs->io_limits_enabled = enabled;
  1743. return ret;
  1744. }
  1745. /* Return < 0 if error. Important errors are:
  1746. -EIO generic I/O error (may happen for all errors)
  1747. -ENOMEDIUM No media inserted.
  1748. -EINVAL Invalid sector number or nb_sectors
  1749. -EACCES Trying to write a read-only device
  1750. */
  1751. int bdrv_write(BlockDriverState *bs, int64_t sector_num,
  1752. const uint8_t *buf, int nb_sectors)
  1753. {
  1754. return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true);
  1755. }
  1756. int bdrv_pread(BlockDriverState *bs, int64_t offset,
  1757. void *buf, int count1)
  1758. {
  1759. uint8_t tmp_buf[BDRV_SECTOR_SIZE];
  1760. int len, nb_sectors, count;
  1761. int64_t sector_num;
  1762. int ret;
  1763. count = count1;
  1764. /* first read to align to sector start */
  1765. len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
  1766. if (len > count)
  1767. len = count;
  1768. sector_num = offset >> BDRV_SECTOR_BITS;
  1769. if (len > 0) {
  1770. if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
  1771. return ret;
  1772. memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
  1773. count -= len;
  1774. if (count == 0)
  1775. return count1;
  1776. sector_num++;
  1777. buf += len;
  1778. }
  1779. /* read the sectors "in place" */
  1780. nb_sectors = count >> BDRV_SECTOR_BITS;
  1781. if (nb_sectors > 0) {
  1782. if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
  1783. return ret;
  1784. sector_num += nb_sectors;
  1785. len = nb_sectors << BDRV_SECTOR_BITS;
  1786. buf += len;
  1787. count -= len;
  1788. }
  1789. /* add data from the last sector */
  1790. if (count > 0) {
  1791. if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
  1792. return ret;
  1793. memcpy(buf, tmp_buf, count);
  1794. }
  1795. return count1;
  1796. }
  1797. int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
  1798. const void *buf, int count1)
  1799. {
  1800. uint8_t tmp_buf[BDRV_SECTOR_SIZE];
  1801. int len, nb_sectors, count;
  1802. int64_t sector_num;
  1803. int ret;
  1804. count = count1;
  1805. /* first write to align to sector start */
  1806. len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
  1807. if (len > count)
  1808. len = count;
  1809. sector_num = offset >> BDRV_SECTOR_BITS;
  1810. if (len > 0) {
  1811. if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
  1812. return ret;
  1813. memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
  1814. if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
  1815. return ret;
  1816. count -= len;
  1817. if (count == 0)
  1818. return count1;
  1819. sector_num++;
  1820. buf += len;
  1821. }
  1822. /* write the sectors "in place" */
  1823. nb_sectors = count >> BDRV_SECTOR_BITS;
  1824. if (nb_sectors > 0) {
  1825. if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
  1826. return ret;
  1827. sector_num += nb_sectors;
  1828. len = nb_sectors << BDRV_SECTOR_BITS;
  1829. buf += len;
  1830. count -= len;
  1831. }
  1832. /* add data from the last sector */
  1833. if (count > 0) {
  1834. if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
  1835. return ret;
  1836. memcpy(tmp_buf, buf, count);
  1837. if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
  1838. return ret;
  1839. }
  1840. return count1;
  1841. }
  1842. /*
  1843. * Writes to the file and ensures that no writes are reordered across this
  1844. * request (acts as a barrier)
  1845. *
  1846. * Returns 0 on success, -errno in error cases.
  1847. */
  1848. int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
  1849. const void *buf, int count)
  1850. {
  1851. int ret;
  1852. ret = bdrv_pwrite(bs, offset, buf, count);
  1853. if (ret < 0) {
  1854. return ret;
  1855. }
  1856. /* No flush needed for cache modes that already do it */
  1857. if (bs->enable_write_cache) {
  1858. bdrv_flush(bs);
  1859. }
  1860. return 0;
  1861. }
  1862. static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
  1863. int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
  1864. {
  1865. /* Perform I/O through a temporary buffer so that users who scribble over
  1866. * their read buffer while the operation is in progress do not end up
  1867. * modifying the image file. This is critical for zero-copy guest I/O
  1868. * where anything might happen inside guest memory.
  1869. */
  1870. void *bounce_buffer;
  1871. BlockDriver *drv = bs->drv;
  1872. struct iovec iov;
  1873. QEMUIOVector bounce_qiov;
  1874. int64_t cluster_sector_num;
  1875. int cluster_nb_sectors;
  1876. size_t skip_bytes;
  1877. int ret;
  1878. /* Cover entire cluster so no additional backing file I/O is required when
  1879. * allocating cluster in the image file.
  1880. */
  1881. bdrv_round_to_clusters(bs, sector_num, nb_sectors,
  1882. &cluster_sector_num, &cluster_nb_sectors);
  1883. trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
  1884. cluster_sector_num, cluster_nb_sectors);
  1885. iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
  1886. iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
  1887. qemu_iovec_init_external(&bounce_qiov, &iov, 1);
  1888. ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
  1889. &bounce_qiov);
  1890. if (ret < 0) {
  1891. goto err;
  1892. }
  1893. if (drv->bdrv_co_write_zeroes &&
  1894. buffer_is_zero(bounce_buffer, iov.iov_len)) {
  1895. ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
  1896. cluster_nb_sectors);
  1897. } else {
  1898. /* This does not change the data on the disk, it is not necessary
  1899. * to flush even in cache=writethrough mode.
  1900. */
  1901. ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
  1902. &bounce_qiov);
  1903. }
  1904. if (ret < 0) {
  1905. /* It might be okay to ignore write errors for guest requests. If this
  1906. * is a deliberate copy-on-read then we don't want to ignore the error.
  1907. * Simply report it in all cases.
  1908. */
  1909. goto err;
  1910. }
  1911. skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
  1912. qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
  1913. nb_sectors * BDRV_SECTOR_SIZE);
  1914. err:
  1915. qemu_vfree(bounce_buffer);
  1916. return ret;
  1917. }
  1918. /*
  1919. * Handle a read request in coroutine context
  1920. */
  1921. static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
  1922. int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
  1923. BdrvRequestFlags flags)
  1924. {
  1925. BlockDriver *drv = bs->drv;
  1926. BdrvTrackedRequest req;
  1927. int ret;
  1928. if (!drv) {
  1929. return -ENOMEDIUM;
  1930. }
  1931. if (bdrv_check_request(bs, sector_num, nb_sectors)) {
  1932. return -EIO;
  1933. }
  1934. /* throttling disk read I/O */
  1935. if (bs->io_limits_enabled) {
  1936. bdrv_io_limits_intercept(bs, false, nb_sectors);
  1937. }
  1938. if (bs->copy_on_read) {
  1939. flags |= BDRV_REQ_COPY_ON_READ;
  1940. }
  1941. if (flags & BDRV_REQ_COPY_ON_READ) {
  1942. bs->copy_on_read_in_flight++;
  1943. }
  1944. if (bs->copy_on_read_in_flight) {
  1945. wait_for_overlapping_requests(bs, sector_num, nb_sectors);
  1946. }
  1947. tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
  1948. if (flags & BDRV_REQ_COPY_ON_READ) {
  1949. int pnum;
  1950. ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
  1951. if (ret < 0) {
  1952. goto out;
  1953. }
  1954. if (!ret || pnum != nb_sectors) {
  1955. ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
  1956. goto out;
  1957. }
  1958. }
  1959. ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
  1960. out:
  1961. tracked_request_end(&req);
  1962. if (flags & BDRV_REQ_COPY_ON_READ) {
  1963. bs->copy_on_read_in_flight--;
  1964. }
  1965. return ret;
  1966. }
  1967. int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
  1968. int nb_sectors, QEMUIOVector *qiov)
  1969. {
  1970. trace_bdrv_co_readv(bs, sector_num, nb_sectors);
  1971. return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
  1972. }
  1973. int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
  1974. int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
  1975. {
  1976. trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
  1977. return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
  1978. BDRV_REQ_COPY_ON_READ);
  1979. }
  1980. static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
  1981. int64_t sector_num, int nb_sectors)
  1982. {
  1983. BlockDriver *drv = bs->drv;
  1984. QEMUIOVector qiov;
  1985. struct iovec iov;
  1986. int ret;
  1987. /* TODO Emulate only part of misaligned requests instead of letting block
  1988. * drivers return -ENOTSUP and emulate everything */
  1989. /* First try the efficient write zeroes operation */
  1990. if (drv->bdrv_co_write_zeroes) {
  1991. ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
  1992. if (ret != -ENOTSUP) {
  1993. return ret;
  1994. }
  1995. }
  1996. /* Fall back to bounce buffer if write zeroes is unsupported */
  1997. iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
  1998. iov.iov_base = qemu_blockalign(bs, iov.iov_len);
  1999. memset(iov.iov_base, 0, iov.iov_len);
  2000. qemu_iovec_init_external(&qiov, &iov, 1);
  2001. ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
  2002. qemu_vfree(iov.iov_base);
  2003. return ret;
  2004. }
  2005. /*
  2006. * Handle a write request in coroutine context
  2007. */
  2008. static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
  2009. int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
  2010. BdrvRequestFlags flags)
  2011. {
  2012. BlockDriver *drv = bs->drv;
  2013. BdrvTrackedRequest req;
  2014. int ret;
  2015. if (!bs->drv) {
  2016. return -ENOMEDIUM;
  2017. }
  2018. if (bs->read_only) {
  2019. return -EACCES;
  2020. }
  2021. if (bdrv_check_request(bs, sector_num, nb_sectors)) {
  2022. return -EIO;
  2023. }
  2024. /* throttling disk write I/O */
  2025. if (bs->io_limits_enabled) {
  2026. bdrv_io_limits_intercept(bs, true, nb_sectors);
  2027. }
  2028. if (bs->copy_on_read_in_flight) {
  2029. wait_for_overlapping_requests(bs, sector_num, nb_sectors);
  2030. }
  2031. tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
  2032. if (flags & BDRV_REQ_ZERO_WRITE) {
  2033. ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
  2034. } else {
  2035. ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
  2036. }
  2037. if (ret == 0 && !bs->enable_write_cache) {
  2038. ret = bdrv_co_flush(bs);
  2039. }
  2040. if (bs->dirty_bitmap) {
  2041. bdrv_set_dirty(bs, sector_num, nb_sectors);
  2042. }
  2043. if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
  2044. bs->wr_highest_sector = sector_num + nb_sectors - 1;
  2045. }
  2046. tracked_request_end(&req);
  2047. return ret;
  2048. }
  2049. int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
  2050. int nb_sectors, QEMUIOVector *qiov)
  2051. {
  2052. trace_bdrv_co_writev(bs, sector_num, nb_sectors);
  2053. return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
  2054. }
  2055. int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
  2056. int64_t sector_num, int nb_sectors)
  2057. {
  2058. trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
  2059. return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
  2060. BDRV_REQ_ZERO_WRITE);
  2061. }
  2062. /**
  2063. * Truncate file to 'offset' bytes (needed only for file protocols)
  2064. */
  2065. int bdrv_truncate(BlockDriverState *bs, int64_t offset)
  2066. {
  2067. BlockDriver *drv = bs->drv;
  2068. int ret;
  2069. if (!drv)
  2070. return -ENOMEDIUM;
  2071. if (!drv->bdrv_truncate)
  2072. return -ENOTSUP;
  2073. if (bs->read_only)
  2074. return -EACCES;
  2075. if (bdrv_in_use(bs))
  2076. return -EBUSY;
  2077. ret = drv->bdrv_truncate(bs, offset);
  2078. if (ret == 0) {
  2079. ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
  2080. bdrv_dev_resize_cb(bs);
  2081. }
  2082. return ret;
  2083. }
  2084. /**
  2085. * Length of a allocated file in bytes. Sparse files are counted by actual
  2086. * allocated space. Return < 0 if error or unknown.
  2087. */
  2088. int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
  2089. {
  2090. BlockDriver *drv = bs->drv;
  2091. if (!drv) {
  2092. return -ENOMEDIUM;
  2093. }
  2094. if (drv->bdrv_get_allocated_file_size) {
  2095. return drv->bdrv_get_allocated_file_size(bs);
  2096. }
  2097. if (bs->file) {
  2098. return bdrv_get_allocated_file_size(bs->file);
  2099. }
  2100. return -ENOTSUP;
  2101. }
  2102. /**
  2103. * Length of a file in bytes. Return < 0 if error or unknown.
  2104. */
  2105. int64_t bdrv_getlength(BlockDriverState *bs)
  2106. {
  2107. BlockDriver *drv = bs->drv;
  2108. if (!drv)
  2109. return -ENOMEDIUM;
  2110. if (bs->growable || bdrv_dev_has_removable_media(bs)) {
  2111. if (drv->bdrv_getlength) {
  2112. return drv->bdrv_getlength(bs);
  2113. }
  2114. }
  2115. return bs->total_sectors * BDRV_SECTOR_SIZE;
  2116. }
  2117. /* return 0 as number of sectors if no device present or error */
  2118. void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
  2119. {
  2120. int64_t length;
  2121. length = bdrv_getlength(bs);
  2122. if (length < 0)
  2123. length = 0;
  2124. else
  2125. length = length >> BDRV_SECTOR_BITS;
  2126. *nb_sectors_ptr = length;
  2127. }
  2128. /* throttling disk io limits */
  2129. void bdrv_set_io_limits(BlockDriverState *bs,
  2130. BlockIOLimit *io_limits)
  2131. {
  2132. bs->io_limits = *io_limits;
  2133. bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
  2134. }
  2135. void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
  2136. BlockdevOnError on_write_error)
  2137. {
  2138. bs->on_read_error = on_read_error;
  2139. bs->on_write_error = on_write_error;
  2140. }
  2141. BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read)
  2142. {
  2143. return is_read ? bs->on_read_error : bs->on_write_error;
  2144. }
  2145. BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error)
  2146. {
  2147. BlockdevOnError on_err = is_read ? bs->on_read_error : bs->on_write_error;
  2148. switch (on_err) {
  2149. case BLOCKDEV_ON_ERROR_ENOSPC:
  2150. return (error == ENOSPC) ? BDRV_ACTION_STOP : BDRV_ACTION_REPORT;
  2151. case BLOCKDEV_ON_ERROR_STOP:
  2152. return BDRV_ACTION_STOP;
  2153. case BLOCKDEV_ON_ERROR_REPORT:
  2154. return BDRV_ACTION_REPORT;
  2155. case BLOCKDEV_ON_ERROR_IGNORE:
  2156. return BDRV_ACTION_IGNORE;
  2157. default:
  2158. abort();
  2159. }
  2160. }
  2161. /* This is done by device models because, while the block layer knows
  2162. * about the error, it does not know whether an operation comes from
  2163. * the device or the block layer (from a job, for example).
  2164. */
  2165. void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
  2166. bool is_read, int error)
  2167. {
  2168. assert(error >= 0);
  2169. bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read);
  2170. if (action == BDRV_ACTION_STOP) {
  2171. vm_stop(RUN_STATE_IO_ERROR);
  2172. bdrv_iostatus_set_err(bs, error);
  2173. }
  2174. }
  2175. int bdrv_is_read_only(BlockDriverState *bs)
  2176. {
  2177. return bs->read_only;
  2178. }
  2179. int bdrv_is_sg(BlockDriverState *bs)
  2180. {
  2181. return bs->sg;
  2182. }
  2183. int bdrv_enable_write_cache(BlockDriverState *bs)
  2184. {
  2185. return bs->enable_write_cache;
  2186. }
  2187. void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce)
  2188. {
  2189. bs->enable_write_cache = wce;
  2190. /* so a reopen() will preserve wce */
  2191. if (wce) {
  2192. bs->open_flags |= BDRV_O_CACHE_WB;
  2193. } else {
  2194. bs->open_flags &= ~BDRV_O_CACHE_WB;
  2195. }
  2196. }
  2197. int bdrv_is_encrypted(BlockDriverState *bs)
  2198. {
  2199. if (bs->backing_hd && bs->backing_hd->encrypted)
  2200. return 1;
  2201. return bs->encrypted;
  2202. }
  2203. int bdrv_key_required(BlockDriverState *bs)
  2204. {
  2205. BlockDriverState *backing_hd = bs->backing_hd;
  2206. if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
  2207. return 1;
  2208. return (bs->encrypted && !bs->valid_key);
  2209. }
  2210. int bdrv_set_key(BlockDriverState *bs, const char *key)
  2211. {
  2212. int ret;
  2213. if (bs->backing_hd && bs->backing_hd->encrypted) {
  2214. ret = bdrv_set_key(bs->backing_hd, key);
  2215. if (ret < 0)
  2216. return ret;
  2217. if (!bs->encrypted)
  2218. return 0;
  2219. }
  2220. if (!bs->encrypted) {
  2221. return -EINVAL;
  2222. } else if (!bs->drv || !bs->drv->bdrv_set_key) {
  2223. return -ENOMEDIUM;
  2224. }
  2225. ret = bs->drv->bdrv_set_key(bs, key);
  2226. if (ret < 0) {
  2227. bs->valid_key = 0;
  2228. } else if (!bs->valid_key) {
  2229. bs->valid_key = 1;
  2230. /* call the change callback now, we skipped it on open */
  2231. bdrv_dev_change_media_cb(bs, true);
  2232. }
  2233. return ret;
  2234. }
  2235. const char *bdrv_get_format_name(BlockDriverState *bs)
  2236. {
  2237. return bs->drv ? bs->drv->format_name : NULL;
  2238. }
  2239. void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
  2240. void *opaque)
  2241. {
  2242. BlockDriver *drv;
  2243. QLIST_FOREACH(drv, &bdrv_drivers, list) {
  2244. it(opaque, drv->format_name);
  2245. }
  2246. }
  2247. BlockDriverState *bdrv_find(const char *name)
  2248. {
  2249. BlockDriverState *bs;
  2250. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  2251. if (!strcmp(name, bs->device_name)) {
  2252. return bs;
  2253. }
  2254. }
  2255. return NULL;
  2256. }
  2257. BlockDriverState *bdrv_next(BlockDriverState *bs)
  2258. {
  2259. if (!bs) {
  2260. return QTAILQ_FIRST(&bdrv_states);
  2261. }
  2262. return QTAILQ_NEXT(bs, list);
  2263. }
  2264. void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
  2265. {
  2266. BlockDriverState *bs;
  2267. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  2268. it(opaque, bs);
  2269. }
  2270. }
  2271. const char *bdrv_get_device_name(BlockDriverState *bs)
  2272. {
  2273. return bs->device_name;
  2274. }
  2275. int bdrv_get_flags(BlockDriverState *bs)
  2276. {
  2277. return bs->open_flags;
  2278. }
  2279. void bdrv_flush_all(void)
  2280. {
  2281. BlockDriverState *bs;
  2282. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  2283. bdrv_flush(bs);
  2284. }
  2285. }
  2286. int bdrv_has_zero_init(BlockDriverState *bs)
  2287. {
  2288. assert(bs->drv);
  2289. if (bs->drv->bdrv_has_zero_init) {
  2290. return bs->drv->bdrv_has_zero_init(bs);
  2291. }
  2292. return 1;
  2293. }
  2294. typedef struct BdrvCoIsAllocatedData {
  2295. BlockDriverState *bs;
  2296. int64_t sector_num;
  2297. int nb_sectors;
  2298. int *pnum;
  2299. int ret;
  2300. bool done;
  2301. } BdrvCoIsAllocatedData;
  2302. /*
  2303. * Returns true iff the specified sector is present in the disk image. Drivers
  2304. * not implementing the functionality are assumed to not support backing files,
  2305. * hence all their sectors are reported as allocated.
  2306. *
  2307. * If 'sector_num' is beyond the end of the disk image the return value is 0
  2308. * and 'pnum' is set to 0.
  2309. *
  2310. * 'pnum' is set to the number of sectors (including and immediately following
  2311. * the specified sector) that are known to be in the same
  2312. * allocated/unallocated state.
  2313. *
  2314. * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
  2315. * beyond the end of the disk image it will be clamped.
  2316. */
  2317. int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t sector_num,
  2318. int nb_sectors, int *pnum)
  2319. {
  2320. int64_t n;
  2321. if (sector_num >= bs->total_sectors) {
  2322. *pnum = 0;
  2323. return 0;
  2324. }
  2325. n = bs->total_sectors - sector_num;
  2326. if (n < nb_sectors) {
  2327. nb_sectors = n;
  2328. }
  2329. if (!bs->drv->bdrv_co_is_allocated) {
  2330. *pnum = nb_sectors;
  2331. return 1;
  2332. }
  2333. return bs->drv->bdrv_co_is_allocated(bs, sector_num, nb_sectors, pnum);
  2334. }
  2335. /* Coroutine wrapper for bdrv_is_allocated() */
  2336. static void coroutine_fn bdrv_is_allocated_co_entry(void *opaque)
  2337. {
  2338. BdrvCoIsAllocatedData *data = opaque;
  2339. BlockDriverState *bs = data->bs;
  2340. data->ret = bdrv_co_is_allocated(bs, data->sector_num, data->nb_sectors,
  2341. data->pnum);
  2342. data->done = true;
  2343. }
  2344. /*
  2345. * Synchronous wrapper around bdrv_co_is_allocated().
  2346. *
  2347. * See bdrv_co_is_allocated() for details.
  2348. */
  2349. int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
  2350. int *pnum)
  2351. {
  2352. Coroutine *co;
  2353. BdrvCoIsAllocatedData data = {
  2354. .bs = bs,
  2355. .sector_num = sector_num,
  2356. .nb_sectors = nb_sectors,
  2357. .pnum = pnum,
  2358. .done = false,
  2359. };
  2360. co = qemu_coroutine_create(bdrv_is_allocated_co_entry);
  2361. qemu_coroutine_enter(co, &data);
  2362. while (!data.done) {
  2363. qemu_aio_wait();
  2364. }
  2365. return data.ret;
  2366. }
  2367. /*
  2368. * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
  2369. *
  2370. * Return true if the given sector is allocated in any image between
  2371. * BASE and TOP (inclusive). BASE can be NULL to check if the given
  2372. * sector is allocated in any image of the chain. Return false otherwise.
  2373. *
  2374. * 'pnum' is set to the number of sectors (including and immediately following
  2375. * the specified sector) that are known to be in the same
  2376. * allocated/unallocated state.
  2377. *
  2378. */
  2379. int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
  2380. BlockDriverState *base,
  2381. int64_t sector_num,
  2382. int nb_sectors, int *pnum)
  2383. {
  2384. BlockDriverState *intermediate;
  2385. int ret, n = nb_sectors;
  2386. intermediate = top;
  2387. while (intermediate && intermediate != base) {
  2388. int pnum_inter;
  2389. ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors,
  2390. &pnum_inter);
  2391. if (ret < 0) {
  2392. return ret;
  2393. } else if (ret) {
  2394. *pnum = pnum_inter;
  2395. return 1;
  2396. }
  2397. /*
  2398. * [sector_num, nb_sectors] is unallocated on top but intermediate
  2399. * might have
  2400. *
  2401. * [sector_num+x, nr_sectors] allocated.
  2402. */
  2403. if (n > pnum_inter &&
  2404. (intermediate == top ||
  2405. sector_num + pnum_inter < intermediate->total_sectors)) {
  2406. n = pnum_inter;
  2407. }
  2408. intermediate = intermediate->backing_hd;
  2409. }
  2410. *pnum = n;
  2411. return 0;
  2412. }
  2413. BlockInfo *bdrv_query_info(BlockDriverState *bs)
  2414. {
  2415. BlockInfo *info = g_malloc0(sizeof(*info));
  2416. info->device = g_strdup(bs->device_name);
  2417. info->type = g_strdup("unknown");
  2418. info->locked = bdrv_dev_is_medium_locked(bs);
  2419. info->removable = bdrv_dev_has_removable_media(bs);
  2420. if (bdrv_dev_has_removable_media(bs)) {
  2421. info->has_tray_open = true;
  2422. info->tray_open = bdrv_dev_is_tray_open(bs);
  2423. }
  2424. if (bdrv_iostatus_is_enabled(bs)) {
  2425. info->has_io_status = true;
  2426. info->io_status = bs->iostatus;
  2427. }
  2428. if (bs->dirty_bitmap) {
  2429. info->has_dirty = true;
  2430. info->dirty = g_malloc0(sizeof(*info->dirty));
  2431. info->dirty->count = bdrv_get_dirty_count(bs) * BDRV_SECTOR_SIZE;
  2432. info->dirty->granularity =
  2433. ((int64_t) BDRV_SECTOR_SIZE << hbitmap_granularity(bs->dirty_bitmap));
  2434. }
  2435. if (bs->drv) {
  2436. info->has_inserted = true;
  2437. info->inserted = g_malloc0(sizeof(*info->inserted));
  2438. info->inserted->file = g_strdup(bs->filename);
  2439. info->inserted->ro = bs->read_only;
  2440. info->inserted->drv = g_strdup(bs->drv->format_name);
  2441. info->inserted->encrypted = bs->encrypted;
  2442. info->inserted->encryption_key_missing = bdrv_key_required(bs);
  2443. if (bs->backing_file[0]) {
  2444. info->inserted->has_backing_file = true;
  2445. info->inserted->backing_file = g_strdup(bs->backing_file);
  2446. }
  2447. info->inserted->backing_file_depth = bdrv_get_backing_file_depth(bs);
  2448. if (bs->io_limits_enabled) {
  2449. info->inserted->bps =
  2450. bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
  2451. info->inserted->bps_rd =
  2452. bs->io_limits.bps[BLOCK_IO_LIMIT_READ];
  2453. info->inserted->bps_wr =
  2454. bs->io_limits.bps[BLOCK_IO_LIMIT_WRITE];
  2455. info->inserted->iops =
  2456. bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
  2457. info->inserted->iops_rd =
  2458. bs->io_limits.iops[BLOCK_IO_LIMIT_READ];
  2459. info->inserted->iops_wr =
  2460. bs->io_limits.iops[BLOCK_IO_LIMIT_WRITE];
  2461. }
  2462. }
  2463. return info;
  2464. }
  2465. BlockInfoList *qmp_query_block(Error **errp)
  2466. {
  2467. BlockInfoList *head = NULL, **p_next = &head;
  2468. BlockDriverState *bs;
  2469. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  2470. BlockInfoList *info = g_malloc0(sizeof(*info));
  2471. info->value = bdrv_query_info(bs);
  2472. *p_next = info;
  2473. p_next = &info->next;
  2474. }
  2475. return head;
  2476. }
  2477. BlockStats *bdrv_query_stats(const BlockDriverState *bs)
  2478. {
  2479. BlockStats *s;
  2480. s = g_malloc0(sizeof(*s));
  2481. if (bs->device_name[0]) {
  2482. s->has_device = true;
  2483. s->device = g_strdup(bs->device_name);
  2484. }
  2485. s->stats = g_malloc0(sizeof(*s->stats));
  2486. s->stats->rd_bytes = bs->nr_bytes[BDRV_ACCT_READ];
  2487. s->stats->wr_bytes = bs->nr_bytes[BDRV_ACCT_WRITE];
  2488. s->stats->rd_operations = bs->nr_ops[BDRV_ACCT_READ];
  2489. s->stats->wr_operations = bs->nr_ops[BDRV_ACCT_WRITE];
  2490. s->stats->wr_highest_offset = bs->wr_highest_sector * BDRV_SECTOR_SIZE;
  2491. s->stats->flush_operations = bs->nr_ops[BDRV_ACCT_FLUSH];
  2492. s->stats->wr_total_time_ns = bs->total_time_ns[BDRV_ACCT_WRITE];
  2493. s->stats->rd_total_time_ns = bs->total_time_ns[BDRV_ACCT_READ];
  2494. s->stats->flush_total_time_ns = bs->total_time_ns[BDRV_ACCT_FLUSH];
  2495. if (bs->file) {
  2496. s->has_parent = true;
  2497. s->parent = bdrv_query_stats(bs->file);
  2498. }
  2499. return s;
  2500. }
  2501. BlockStatsList *qmp_query_blockstats(Error **errp)
  2502. {
  2503. BlockStatsList *head = NULL, **p_next = &head;
  2504. BlockDriverState *bs;
  2505. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  2506. BlockStatsList *info = g_malloc0(sizeof(*info));
  2507. info->value = bdrv_query_stats(bs);
  2508. *p_next = info;
  2509. p_next = &info->next;
  2510. }
  2511. return head;
  2512. }
  2513. const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
  2514. {
  2515. if (bs->backing_hd && bs->backing_hd->encrypted)
  2516. return bs->backing_file;
  2517. else if (bs->encrypted)
  2518. return bs->filename;
  2519. else
  2520. return NULL;
  2521. }
  2522. void bdrv_get_backing_filename(BlockDriverState *bs,
  2523. char *filename, int filename_size)
  2524. {
  2525. pstrcpy(filename, filename_size, bs->backing_file);
  2526. }
  2527. int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
  2528. const uint8_t *buf, int nb_sectors)
  2529. {
  2530. BlockDriver *drv = bs->drv;
  2531. if (!drv)
  2532. return -ENOMEDIUM;
  2533. if (!drv->bdrv_write_compressed)
  2534. return -ENOTSUP;
  2535. if (bdrv_check_request(bs, sector_num, nb_sectors))
  2536. return -EIO;
  2537. assert(!bs->dirty_bitmap);
  2538. return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
  2539. }
  2540. int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
  2541. {
  2542. BlockDriver *drv = bs->drv;
  2543. if (!drv)
  2544. return -ENOMEDIUM;
  2545. if (!drv->bdrv_get_info)
  2546. return -ENOTSUP;
  2547. memset(bdi, 0, sizeof(*bdi));
  2548. return drv->bdrv_get_info(bs, bdi);
  2549. }
  2550. int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
  2551. int64_t pos, int size)
  2552. {
  2553. BlockDriver *drv = bs->drv;
  2554. if (!drv)
  2555. return -ENOMEDIUM;
  2556. if (drv->bdrv_save_vmstate)
  2557. return drv->bdrv_save_vmstate(bs, buf, pos, size);
  2558. if (bs->file)
  2559. return bdrv_save_vmstate(bs->file, buf, pos, size);
  2560. return -ENOTSUP;
  2561. }
  2562. int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
  2563. int64_t pos, int size)
  2564. {
  2565. BlockDriver *drv = bs->drv;
  2566. if (!drv)
  2567. return -ENOMEDIUM;
  2568. if (drv->bdrv_load_vmstate)
  2569. return drv->bdrv_load_vmstate(bs, buf, pos, size);
  2570. if (bs->file)
  2571. return bdrv_load_vmstate(bs->file, buf, pos, size);
  2572. return -ENOTSUP;
  2573. }
  2574. void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
  2575. {
  2576. BlockDriver *drv = bs->drv;
  2577. if (!drv || !drv->bdrv_debug_event) {
  2578. return;
  2579. }
  2580. drv->bdrv_debug_event(bs, event);
  2581. }
  2582. int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
  2583. const char *tag)
  2584. {
  2585. while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
  2586. bs = bs->file;
  2587. }
  2588. if (bs && bs->drv && bs->drv->bdrv_debug_breakpoint) {
  2589. return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
  2590. }
  2591. return -ENOTSUP;
  2592. }
  2593. int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
  2594. {
  2595. while (bs && bs->drv && !bs->drv->bdrv_debug_resume) {
  2596. bs = bs->file;
  2597. }
  2598. if (bs && bs->drv && bs->drv->bdrv_debug_resume) {
  2599. return bs->drv->bdrv_debug_resume(bs, tag);
  2600. }
  2601. return -ENOTSUP;
  2602. }
  2603. bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
  2604. {
  2605. while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
  2606. bs = bs->file;
  2607. }
  2608. if (bs && bs->drv && bs->drv->bdrv_debug_is_suspended) {
  2609. return bs->drv->bdrv_debug_is_suspended(bs, tag);
  2610. }
  2611. return false;
  2612. }
  2613. /**************************************************************/
  2614. /* handling of snapshots */
  2615. int bdrv_can_snapshot(BlockDriverState *bs)
  2616. {
  2617. BlockDriver *drv = bs->drv;
  2618. if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
  2619. return 0;
  2620. }
  2621. if (!drv->bdrv_snapshot_create) {
  2622. if (bs->file != NULL) {
  2623. return bdrv_can_snapshot(bs->file);
  2624. }
  2625. return 0;
  2626. }
  2627. return 1;
  2628. }
  2629. int bdrv_is_snapshot(BlockDriverState *bs)
  2630. {
  2631. return !!(bs->open_flags & BDRV_O_SNAPSHOT);
  2632. }
  2633. BlockDriverState *bdrv_snapshots(void)
  2634. {
  2635. BlockDriverState *bs;
  2636. if (bs_snapshots) {
  2637. return bs_snapshots;
  2638. }
  2639. bs = NULL;
  2640. while ((bs = bdrv_next(bs))) {
  2641. if (bdrv_can_snapshot(bs)) {
  2642. bs_snapshots = bs;
  2643. return bs;
  2644. }
  2645. }
  2646. return NULL;
  2647. }
  2648. int bdrv_snapshot_create(BlockDriverState *bs,
  2649. QEMUSnapshotInfo *sn_info)
  2650. {
  2651. BlockDriver *drv = bs->drv;
  2652. if (!drv)
  2653. return -ENOMEDIUM;
  2654. if (drv->bdrv_snapshot_create)
  2655. return drv->bdrv_snapshot_create(bs, sn_info);
  2656. if (bs->file)
  2657. return bdrv_snapshot_create(bs->file, sn_info);
  2658. return -ENOTSUP;
  2659. }
  2660. int bdrv_snapshot_goto(BlockDriverState *bs,
  2661. const char *snapshot_id)
  2662. {
  2663. BlockDriver *drv = bs->drv;
  2664. int ret, open_ret;
  2665. if (!drv)
  2666. return -ENOMEDIUM;
  2667. if (drv->bdrv_snapshot_goto)
  2668. return drv->bdrv_snapshot_goto(bs, snapshot_id);
  2669. if (bs->file) {
  2670. drv->bdrv_close(bs);
  2671. ret = bdrv_snapshot_goto(bs->file, snapshot_id);
  2672. open_ret = drv->bdrv_open(bs, bs->open_flags);
  2673. if (open_ret < 0) {
  2674. bdrv_delete(bs->file);
  2675. bs->drv = NULL;
  2676. return open_ret;
  2677. }
  2678. return ret;
  2679. }
  2680. return -ENOTSUP;
  2681. }
  2682. int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
  2683. {
  2684. BlockDriver *drv = bs->drv;
  2685. if (!drv)
  2686. return -ENOMEDIUM;
  2687. if (drv->bdrv_snapshot_delete)
  2688. return drv->bdrv_snapshot_delete(bs, snapshot_id);
  2689. if (bs->file)
  2690. return bdrv_snapshot_delete(bs->file, snapshot_id);
  2691. return -ENOTSUP;
  2692. }
  2693. int bdrv_snapshot_list(BlockDriverState *bs,
  2694. QEMUSnapshotInfo **psn_info)
  2695. {
  2696. BlockDriver *drv = bs->drv;
  2697. if (!drv)
  2698. return -ENOMEDIUM;
  2699. if (drv->bdrv_snapshot_list)
  2700. return drv->bdrv_snapshot_list(bs, psn_info);
  2701. if (bs->file)
  2702. return bdrv_snapshot_list(bs->file, psn_info);
  2703. return -ENOTSUP;
  2704. }
  2705. int bdrv_snapshot_load_tmp(BlockDriverState *bs,
  2706. const char *snapshot_name)
  2707. {
  2708. BlockDriver *drv = bs->drv;
  2709. if (!drv) {
  2710. return -ENOMEDIUM;
  2711. }
  2712. if (!bs->read_only) {
  2713. return -EINVAL;
  2714. }
  2715. if (drv->bdrv_snapshot_load_tmp) {
  2716. return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
  2717. }
  2718. return -ENOTSUP;
  2719. }
  2720. /* backing_file can either be relative, or absolute, or a protocol. If it is
  2721. * relative, it must be relative to the chain. So, passing in bs->filename
  2722. * from a BDS as backing_file should not be done, as that may be relative to
  2723. * the CWD rather than the chain. */
  2724. BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
  2725. const char *backing_file)
  2726. {
  2727. char *filename_full = NULL;
  2728. char *backing_file_full = NULL;
  2729. char *filename_tmp = NULL;
  2730. int is_protocol = 0;
  2731. BlockDriverState *curr_bs = NULL;
  2732. BlockDriverState *retval = NULL;
  2733. if (!bs || !bs->drv || !backing_file) {
  2734. return NULL;
  2735. }
  2736. filename_full = g_malloc(PATH_MAX);
  2737. backing_file_full = g_malloc(PATH_MAX);
  2738. filename_tmp = g_malloc(PATH_MAX);
  2739. is_protocol = path_has_protocol(backing_file);
  2740. for (curr_bs = bs; curr_bs->backing_hd; curr_bs = curr_bs->backing_hd) {
  2741. /* If either of the filename paths is actually a protocol, then
  2742. * compare unmodified paths; otherwise make paths relative */
  2743. if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
  2744. if (strcmp(backing_file, curr_bs->backing_file) == 0) {
  2745. retval = curr_bs->backing_hd;
  2746. break;
  2747. }
  2748. } else {
  2749. /* If not an absolute filename path, make it relative to the current
  2750. * image's filename path */
  2751. path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
  2752. backing_file);
  2753. /* We are going to compare absolute pathnames */
  2754. if (!realpath(filename_tmp, filename_full)) {
  2755. continue;
  2756. }
  2757. /* We need to make sure the backing filename we are comparing against
  2758. * is relative to the current image filename (or absolute) */
  2759. path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
  2760. curr_bs->backing_file);
  2761. if (!realpath(filename_tmp, backing_file_full)) {
  2762. continue;
  2763. }
  2764. if (strcmp(backing_file_full, filename_full) == 0) {
  2765. retval = curr_bs->backing_hd;
  2766. break;
  2767. }
  2768. }
  2769. }
  2770. g_free(filename_full);
  2771. g_free(backing_file_full);
  2772. g_free(filename_tmp);
  2773. return retval;
  2774. }
  2775. int bdrv_get_backing_file_depth(BlockDriverState *bs)
  2776. {
  2777. if (!bs->drv) {
  2778. return 0;
  2779. }
  2780. if (!bs->backing_hd) {
  2781. return 0;
  2782. }
  2783. return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
  2784. }
  2785. BlockDriverState *bdrv_find_base(BlockDriverState *bs)
  2786. {
  2787. BlockDriverState *curr_bs = NULL;
  2788. if (!bs) {
  2789. return NULL;
  2790. }
  2791. curr_bs = bs;
  2792. while (curr_bs->backing_hd) {
  2793. curr_bs = curr_bs->backing_hd;
  2794. }
  2795. return curr_bs;
  2796. }
  2797. #define NB_SUFFIXES 4
  2798. char *get_human_readable_size(char *buf, int buf_size, int64_t size)
  2799. {
  2800. static const char suffixes[NB_SUFFIXES] = "KMGT";
  2801. int64_t base;
  2802. int i;
  2803. if (size <= 999) {
  2804. snprintf(buf, buf_size, "%" PRId64, size);
  2805. } else {
  2806. base = 1024;
  2807. for(i = 0; i < NB_SUFFIXES; i++) {
  2808. if (size < (10 * base)) {
  2809. snprintf(buf, buf_size, "%0.1f%c",
  2810. (double)size / base,
  2811. suffixes[i]);
  2812. break;
  2813. } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
  2814. snprintf(buf, buf_size, "%" PRId64 "%c",
  2815. ((size + (base >> 1)) / base),
  2816. suffixes[i]);
  2817. break;
  2818. }
  2819. base = base * 1024;
  2820. }
  2821. }
  2822. return buf;
  2823. }
  2824. char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
  2825. {
  2826. char buf1[128], date_buf[128], clock_buf[128];
  2827. struct tm tm;
  2828. time_t ti;
  2829. int64_t secs;
  2830. if (!sn) {
  2831. snprintf(buf, buf_size,
  2832. "%-10s%-20s%7s%20s%15s",
  2833. "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
  2834. } else {
  2835. ti = sn->date_sec;
  2836. localtime_r(&ti, &tm);
  2837. strftime(date_buf, sizeof(date_buf),
  2838. "%Y-%m-%d %H:%M:%S", &tm);
  2839. secs = sn->vm_clock_nsec / 1000000000;
  2840. snprintf(clock_buf, sizeof(clock_buf),
  2841. "%02d:%02d:%02d.%03d",
  2842. (int)(secs / 3600),
  2843. (int)((secs / 60) % 60),
  2844. (int)(secs % 60),
  2845. (int)((sn->vm_clock_nsec / 1000000) % 1000));
  2846. snprintf(buf, buf_size,
  2847. "%-10s%-20s%7s%20s%15s",
  2848. sn->id_str, sn->name,
  2849. get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
  2850. date_buf,
  2851. clock_buf);
  2852. }
  2853. return buf;
  2854. }
  2855. /**************************************************************/
  2856. /* async I/Os */
  2857. BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
  2858. QEMUIOVector *qiov, int nb_sectors,
  2859. BlockDriverCompletionFunc *cb, void *opaque)
  2860. {
  2861. trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
  2862. return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
  2863. cb, opaque, false);
  2864. }
  2865. BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
  2866. QEMUIOVector *qiov, int nb_sectors,
  2867. BlockDriverCompletionFunc *cb, void *opaque)
  2868. {
  2869. trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
  2870. return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors,
  2871. cb, opaque, true);
  2872. }
  2873. typedef struct MultiwriteCB {
  2874. int error;
  2875. int num_requests;
  2876. int num_callbacks;
  2877. struct {
  2878. BlockDriverCompletionFunc *cb;
  2879. void *opaque;
  2880. QEMUIOVector *free_qiov;
  2881. } callbacks[];
  2882. } MultiwriteCB;
  2883. static void multiwrite_user_cb(MultiwriteCB *mcb)
  2884. {
  2885. int i;
  2886. for (i = 0; i < mcb->num_callbacks; i++) {
  2887. mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
  2888. if (mcb->callbacks[i].free_qiov) {
  2889. qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
  2890. }
  2891. g_free(mcb->callbacks[i].free_qiov);
  2892. }
  2893. }
  2894. static void multiwrite_cb(void *opaque, int ret)
  2895. {
  2896. MultiwriteCB *mcb = opaque;
  2897. trace_multiwrite_cb(mcb, ret);
  2898. if (ret < 0 && !mcb->error) {
  2899. mcb->error = ret;
  2900. }
  2901. mcb->num_requests--;
  2902. if (mcb->num_requests == 0) {
  2903. multiwrite_user_cb(mcb);
  2904. g_free(mcb);
  2905. }
  2906. }
  2907. static int multiwrite_req_compare(const void *a, const void *b)
  2908. {
  2909. const BlockRequest *req1 = a, *req2 = b;
  2910. /*
  2911. * Note that we can't simply subtract req2->sector from req1->sector
  2912. * here as that could overflow the return value.
  2913. */
  2914. if (req1->sector > req2->sector) {
  2915. return 1;
  2916. } else if (req1->sector < req2->sector) {
  2917. return -1;
  2918. } else {
  2919. return 0;
  2920. }
  2921. }
  2922. /*
  2923. * Takes a bunch of requests and tries to merge them. Returns the number of
  2924. * requests that remain after merging.
  2925. */
  2926. static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
  2927. int num_reqs, MultiwriteCB *mcb)
  2928. {
  2929. int i, outidx;
  2930. // Sort requests by start sector
  2931. qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
  2932. // Check if adjacent requests touch the same clusters. If so, combine them,
  2933. // filling up gaps with zero sectors.
  2934. outidx = 0;
  2935. for (i = 1; i < num_reqs; i++) {
  2936. int merge = 0;
  2937. int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
  2938. // Handle exactly sequential writes and overlapping writes.
  2939. if (reqs[i].sector <= oldreq_last) {
  2940. merge = 1;
  2941. }
  2942. if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
  2943. merge = 0;
  2944. }
  2945. if (merge) {
  2946. size_t size;
  2947. QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
  2948. qemu_iovec_init(qiov,
  2949. reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
  2950. // Add the first request to the merged one. If the requests are
  2951. // overlapping, drop the last sectors of the first request.
  2952. size = (reqs[i].sector - reqs[outidx].sector) << 9;
  2953. qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
  2954. // We should need to add any zeros between the two requests
  2955. assert (reqs[i].sector <= oldreq_last);
  2956. // Add the second request
  2957. qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
  2958. reqs[outidx].nb_sectors = qiov->size >> 9;
  2959. reqs[outidx].qiov = qiov;
  2960. mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
  2961. } else {
  2962. outidx++;
  2963. reqs[outidx].sector = reqs[i].sector;
  2964. reqs[outidx].nb_sectors = reqs[i].nb_sectors;
  2965. reqs[outidx].qiov = reqs[i].qiov;
  2966. }
  2967. }
  2968. return outidx + 1;
  2969. }
  2970. /*
  2971. * Submit multiple AIO write requests at once.
  2972. *
  2973. * On success, the function returns 0 and all requests in the reqs array have
  2974. * been submitted. In error case this function returns -1, and any of the
  2975. * requests may or may not be submitted yet. In particular, this means that the
  2976. * callback will be called for some of the requests, for others it won't. The
  2977. * caller must check the error field of the BlockRequest to wait for the right
  2978. * callbacks (if error != 0, no callback will be called).
  2979. *
  2980. * The implementation may modify the contents of the reqs array, e.g. to merge
  2981. * requests. However, the fields opaque and error are left unmodified as they
  2982. * are used to signal failure for a single request to the caller.
  2983. */
  2984. int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
  2985. {
  2986. MultiwriteCB *mcb;
  2987. int i;
  2988. /* don't submit writes if we don't have a medium */
  2989. if (bs->drv == NULL) {
  2990. for (i = 0; i < num_reqs; i++) {
  2991. reqs[i].error = -ENOMEDIUM;
  2992. }
  2993. return -1;
  2994. }
  2995. if (num_reqs == 0) {
  2996. return 0;
  2997. }
  2998. // Create MultiwriteCB structure
  2999. mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
  3000. mcb->num_requests = 0;
  3001. mcb->num_callbacks = num_reqs;
  3002. for (i = 0; i < num_reqs; i++) {
  3003. mcb->callbacks[i].cb = reqs[i].cb;
  3004. mcb->callbacks[i].opaque = reqs[i].opaque;
  3005. }
  3006. // Check for mergable requests
  3007. num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
  3008. trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
  3009. /* Run the aio requests. */
  3010. mcb->num_requests = num_reqs;
  3011. for (i = 0; i < num_reqs; i++) {
  3012. bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
  3013. reqs[i].nb_sectors, multiwrite_cb, mcb);
  3014. }
  3015. return 0;
  3016. }
  3017. void bdrv_aio_cancel(BlockDriverAIOCB *acb)
  3018. {
  3019. acb->aiocb_info->cancel(acb);
  3020. }
  3021. /* block I/O throttling */
  3022. static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
  3023. bool is_write, double elapsed_time, uint64_t *wait)
  3024. {
  3025. uint64_t bps_limit = 0;
  3026. double bytes_limit, bytes_base, bytes_res;
  3027. double slice_time, wait_time;
  3028. if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
  3029. bps_limit = bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
  3030. } else if (bs->io_limits.bps[is_write]) {
  3031. bps_limit = bs->io_limits.bps[is_write];
  3032. } else {
  3033. if (wait) {
  3034. *wait = 0;
  3035. }
  3036. return false;
  3037. }
  3038. slice_time = bs->slice_end - bs->slice_start;
  3039. slice_time /= (NANOSECONDS_PER_SECOND);
  3040. bytes_limit = bps_limit * slice_time;
  3041. bytes_base = bs->nr_bytes[is_write] - bs->io_base.bytes[is_write];
  3042. if (bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL]) {
  3043. bytes_base += bs->nr_bytes[!is_write] - bs->io_base.bytes[!is_write];
  3044. }
  3045. /* bytes_base: the bytes of data which have been read/written; and
  3046. * it is obtained from the history statistic info.
  3047. * bytes_res: the remaining bytes of data which need to be read/written.
  3048. * (bytes_base + bytes_res) / bps_limit: used to calcuate
  3049. * the total time for completing reading/writting all data.
  3050. */
  3051. bytes_res = (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
  3052. if (bytes_base + bytes_res <= bytes_limit) {
  3053. if (wait) {
  3054. *wait = 0;
  3055. }
  3056. return false;
  3057. }
  3058. /* Calc approx time to dispatch */
  3059. wait_time = (bytes_base + bytes_res) / bps_limit - elapsed_time;
  3060. /* When the I/O rate at runtime exceeds the limits,
  3061. * bs->slice_end need to be extended in order that the current statistic
  3062. * info can be kept until the timer fire, so it is increased and tuned
  3063. * based on the result of experiment.
  3064. */
  3065. bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
  3066. bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
  3067. if (wait) {
  3068. *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
  3069. }
  3070. return true;
  3071. }
  3072. static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write,
  3073. double elapsed_time, uint64_t *wait)
  3074. {
  3075. uint64_t iops_limit = 0;
  3076. double ios_limit, ios_base;
  3077. double slice_time, wait_time;
  3078. if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
  3079. iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL];
  3080. } else if (bs->io_limits.iops[is_write]) {
  3081. iops_limit = bs->io_limits.iops[is_write];
  3082. } else {
  3083. if (wait) {
  3084. *wait = 0;
  3085. }
  3086. return false;
  3087. }
  3088. slice_time = bs->slice_end - bs->slice_start;
  3089. slice_time /= (NANOSECONDS_PER_SECOND);
  3090. ios_limit = iops_limit * slice_time;
  3091. ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write];
  3092. if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) {
  3093. ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write];
  3094. }
  3095. if (ios_base + 1 <= ios_limit) {
  3096. if (wait) {
  3097. *wait = 0;
  3098. }
  3099. return false;
  3100. }
  3101. /* Calc approx time to dispatch */
  3102. wait_time = (ios_base + 1) / iops_limit;
  3103. if (wait_time > elapsed_time) {
  3104. wait_time = wait_time - elapsed_time;
  3105. } else {
  3106. wait_time = 0;
  3107. }
  3108. bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10;
  3109. bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME;
  3110. if (wait) {
  3111. *wait = wait_time * BLOCK_IO_SLICE_TIME * 10;
  3112. }
  3113. return true;
  3114. }
  3115. static bool bdrv_exceed_io_limits(BlockDriverState *bs, int nb_sectors,
  3116. bool is_write, int64_t *wait)
  3117. {
  3118. int64_t now, max_wait;
  3119. uint64_t bps_wait = 0, iops_wait = 0;
  3120. double elapsed_time;
  3121. int bps_ret, iops_ret;
  3122. now = qemu_get_clock_ns(vm_clock);
  3123. if ((bs->slice_start < now)
  3124. && (bs->slice_end > now)) {
  3125. bs->slice_end = now + bs->slice_time;
  3126. } else {
  3127. bs->slice_time = 5 * BLOCK_IO_SLICE_TIME;
  3128. bs->slice_start = now;
  3129. bs->slice_end = now + bs->slice_time;
  3130. bs->io_base.bytes[is_write] = bs->nr_bytes[is_write];
  3131. bs->io_base.bytes[!is_write] = bs->nr_bytes[!is_write];
  3132. bs->io_base.ios[is_write] = bs->nr_ops[is_write];
  3133. bs->io_base.ios[!is_write] = bs->nr_ops[!is_write];
  3134. }
  3135. elapsed_time = now - bs->slice_start;
  3136. elapsed_time /= (NANOSECONDS_PER_SECOND);
  3137. bps_ret = bdrv_exceed_bps_limits(bs, nb_sectors,
  3138. is_write, elapsed_time, &bps_wait);
  3139. iops_ret = bdrv_exceed_iops_limits(bs, is_write,
  3140. elapsed_time, &iops_wait);
  3141. if (bps_ret || iops_ret) {
  3142. max_wait = bps_wait > iops_wait ? bps_wait : iops_wait;
  3143. if (wait) {
  3144. *wait = max_wait;
  3145. }
  3146. now = qemu_get_clock_ns(vm_clock);
  3147. if (bs->slice_end < now + max_wait) {
  3148. bs->slice_end = now + max_wait;
  3149. }
  3150. return true;
  3151. }
  3152. if (wait) {
  3153. *wait = 0;
  3154. }
  3155. return false;
  3156. }
  3157. /**************************************************************/
  3158. /* async block device emulation */
  3159. typedef struct BlockDriverAIOCBSync {
  3160. BlockDriverAIOCB common;
  3161. QEMUBH *bh;
  3162. int ret;
  3163. /* vector translation state */
  3164. QEMUIOVector *qiov;
  3165. uint8_t *bounce;
  3166. int is_write;
  3167. } BlockDriverAIOCBSync;
  3168. static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
  3169. {
  3170. BlockDriverAIOCBSync *acb =
  3171. container_of(blockacb, BlockDriverAIOCBSync, common);
  3172. qemu_bh_delete(acb->bh);
  3173. acb->bh = NULL;
  3174. qemu_aio_release(acb);
  3175. }
  3176. static const AIOCBInfo bdrv_em_aiocb_info = {
  3177. .aiocb_size = sizeof(BlockDriverAIOCBSync),
  3178. .cancel = bdrv_aio_cancel_em,
  3179. };
  3180. static void bdrv_aio_bh_cb(void *opaque)
  3181. {
  3182. BlockDriverAIOCBSync *acb = opaque;
  3183. if (!acb->is_write)
  3184. qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
  3185. qemu_vfree(acb->bounce);
  3186. acb->common.cb(acb->common.opaque, acb->ret);
  3187. qemu_bh_delete(acb->bh);
  3188. acb->bh = NULL;
  3189. qemu_aio_release(acb);
  3190. }
  3191. static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
  3192. int64_t sector_num,
  3193. QEMUIOVector *qiov,
  3194. int nb_sectors,
  3195. BlockDriverCompletionFunc *cb,
  3196. void *opaque,
  3197. int is_write)
  3198. {
  3199. BlockDriverAIOCBSync *acb;
  3200. acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
  3201. acb->is_write = is_write;
  3202. acb->qiov = qiov;
  3203. acb->bounce = qemu_blockalign(bs, qiov->size);
  3204. acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
  3205. if (is_write) {
  3206. qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
  3207. acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
  3208. } else {
  3209. acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
  3210. }
  3211. qemu_bh_schedule(acb->bh);
  3212. return &acb->common;
  3213. }
  3214. static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
  3215. int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
  3216. BlockDriverCompletionFunc *cb, void *opaque)
  3217. {
  3218. return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
  3219. }
  3220. static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
  3221. int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
  3222. BlockDriverCompletionFunc *cb, void *opaque)
  3223. {
  3224. return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
  3225. }
  3226. typedef struct BlockDriverAIOCBCoroutine {
  3227. BlockDriverAIOCB common;
  3228. BlockRequest req;
  3229. bool is_write;
  3230. bool *done;
  3231. QEMUBH* bh;
  3232. } BlockDriverAIOCBCoroutine;
  3233. static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
  3234. {
  3235. BlockDriverAIOCBCoroutine *acb =
  3236. container_of(blockacb, BlockDriverAIOCBCoroutine, common);
  3237. bool done = false;
  3238. acb->done = &done;
  3239. while (!done) {
  3240. qemu_aio_wait();
  3241. }
  3242. }
  3243. static const AIOCBInfo bdrv_em_co_aiocb_info = {
  3244. .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
  3245. .cancel = bdrv_aio_co_cancel_em,
  3246. };
  3247. static void bdrv_co_em_bh(void *opaque)
  3248. {
  3249. BlockDriverAIOCBCoroutine *acb = opaque;
  3250. acb->common.cb(acb->common.opaque, acb->req.error);
  3251. if (acb->done) {
  3252. *acb->done = true;
  3253. }
  3254. qemu_bh_delete(acb->bh);
  3255. qemu_aio_release(acb);
  3256. }
  3257. /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
  3258. static void coroutine_fn bdrv_co_do_rw(void *opaque)
  3259. {
  3260. BlockDriverAIOCBCoroutine *acb = opaque;
  3261. BlockDriverState *bs = acb->common.bs;
  3262. if (!acb->is_write) {
  3263. acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
  3264. acb->req.nb_sectors, acb->req.qiov, 0);
  3265. } else {
  3266. acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
  3267. acb->req.nb_sectors, acb->req.qiov, 0);
  3268. }
  3269. acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
  3270. qemu_bh_schedule(acb->bh);
  3271. }
  3272. static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
  3273. int64_t sector_num,
  3274. QEMUIOVector *qiov,
  3275. int nb_sectors,
  3276. BlockDriverCompletionFunc *cb,
  3277. void *opaque,
  3278. bool is_write)
  3279. {
  3280. Coroutine *co;
  3281. BlockDriverAIOCBCoroutine *acb;
  3282. acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
  3283. acb->req.sector = sector_num;
  3284. acb->req.nb_sectors = nb_sectors;
  3285. acb->req.qiov = qiov;
  3286. acb->is_write = is_write;
  3287. acb->done = NULL;
  3288. co = qemu_coroutine_create(bdrv_co_do_rw);
  3289. qemu_coroutine_enter(co, acb);
  3290. return &acb->common;
  3291. }
  3292. static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
  3293. {
  3294. BlockDriverAIOCBCoroutine *acb = opaque;
  3295. BlockDriverState *bs = acb->common.bs;
  3296. acb->req.error = bdrv_co_flush(bs);
  3297. acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
  3298. qemu_bh_schedule(acb->bh);
  3299. }
  3300. BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
  3301. BlockDriverCompletionFunc *cb, void *opaque)
  3302. {
  3303. trace_bdrv_aio_flush(bs, opaque);
  3304. Coroutine *co;
  3305. BlockDriverAIOCBCoroutine *acb;
  3306. acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
  3307. acb->done = NULL;
  3308. co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
  3309. qemu_coroutine_enter(co, acb);
  3310. return &acb->common;
  3311. }
  3312. static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
  3313. {
  3314. BlockDriverAIOCBCoroutine *acb = opaque;
  3315. BlockDriverState *bs = acb->common.bs;
  3316. acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
  3317. acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
  3318. qemu_bh_schedule(acb->bh);
  3319. }
  3320. BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
  3321. int64_t sector_num, int nb_sectors,
  3322. BlockDriverCompletionFunc *cb, void *opaque)
  3323. {
  3324. Coroutine *co;
  3325. BlockDriverAIOCBCoroutine *acb;
  3326. trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
  3327. acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
  3328. acb->req.sector = sector_num;
  3329. acb->req.nb_sectors = nb_sectors;
  3330. acb->done = NULL;
  3331. co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
  3332. qemu_coroutine_enter(co, acb);
  3333. return &acb->common;
  3334. }
  3335. void bdrv_init(void)
  3336. {
  3337. module_call_init(MODULE_INIT_BLOCK);
  3338. }
  3339. void bdrv_init_with_whitelist(void)
  3340. {
  3341. use_bdrv_whitelist = 1;
  3342. bdrv_init();
  3343. }
  3344. void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
  3345. BlockDriverCompletionFunc *cb, void *opaque)
  3346. {
  3347. BlockDriverAIOCB *acb;
  3348. acb = g_slice_alloc(aiocb_info->aiocb_size);
  3349. acb->aiocb_info = aiocb_info;
  3350. acb->bs = bs;
  3351. acb->cb = cb;
  3352. acb->opaque = opaque;
  3353. return acb;
  3354. }
  3355. void qemu_aio_release(void *p)
  3356. {
  3357. BlockDriverAIOCB *acb = p;
  3358. g_slice_free1(acb->aiocb_info->aiocb_size, acb);
  3359. }
  3360. /**************************************************************/
  3361. /* Coroutine block device emulation */
  3362. typedef struct CoroutineIOCompletion {
  3363. Coroutine *coroutine;
  3364. int ret;
  3365. } CoroutineIOCompletion;
  3366. static void bdrv_co_io_em_complete(void *opaque, int ret)
  3367. {
  3368. CoroutineIOCompletion *co = opaque;
  3369. co->ret = ret;
  3370. qemu_coroutine_enter(co->coroutine, NULL);
  3371. }
  3372. static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
  3373. int nb_sectors, QEMUIOVector *iov,
  3374. bool is_write)
  3375. {
  3376. CoroutineIOCompletion co = {
  3377. .coroutine = qemu_coroutine_self(),
  3378. };
  3379. BlockDriverAIOCB *acb;
  3380. if (is_write) {
  3381. acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
  3382. bdrv_co_io_em_complete, &co);
  3383. } else {
  3384. acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
  3385. bdrv_co_io_em_complete, &co);
  3386. }
  3387. trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
  3388. if (!acb) {
  3389. return -EIO;
  3390. }
  3391. qemu_coroutine_yield();
  3392. return co.ret;
  3393. }
  3394. static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
  3395. int64_t sector_num, int nb_sectors,
  3396. QEMUIOVector *iov)
  3397. {
  3398. return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
  3399. }
  3400. static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
  3401. int64_t sector_num, int nb_sectors,
  3402. QEMUIOVector *iov)
  3403. {
  3404. return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
  3405. }
  3406. static void coroutine_fn bdrv_flush_co_entry(void *opaque)
  3407. {
  3408. RwCo *rwco = opaque;
  3409. rwco->ret = bdrv_co_flush(rwco->bs);
  3410. }
  3411. int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
  3412. {
  3413. int ret;
  3414. if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
  3415. return 0;
  3416. }
  3417. /* Write back cached data to the OS even with cache=unsafe */
  3418. if (bs->drv->bdrv_co_flush_to_os) {
  3419. ret = bs->drv->bdrv_co_flush_to_os(bs);
  3420. if (ret < 0) {
  3421. return ret;
  3422. }
  3423. }
  3424. /* But don't actually force it to the disk with cache=unsafe */
  3425. if (bs->open_flags & BDRV_O_NO_FLUSH) {
  3426. goto flush_parent;
  3427. }
  3428. if (bs->drv->bdrv_co_flush_to_disk) {
  3429. ret = bs->drv->bdrv_co_flush_to_disk(bs);
  3430. } else if (bs->drv->bdrv_aio_flush) {
  3431. BlockDriverAIOCB *acb;
  3432. CoroutineIOCompletion co = {
  3433. .coroutine = qemu_coroutine_self(),
  3434. };
  3435. acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
  3436. if (acb == NULL) {
  3437. ret = -EIO;
  3438. } else {
  3439. qemu_coroutine_yield();
  3440. ret = co.ret;
  3441. }
  3442. } else {
  3443. /*
  3444. * Some block drivers always operate in either writethrough or unsafe
  3445. * mode and don't support bdrv_flush therefore. Usually qemu doesn't
  3446. * know how the server works (because the behaviour is hardcoded or
  3447. * depends on server-side configuration), so we can't ensure that
  3448. * everything is safe on disk. Returning an error doesn't work because
  3449. * that would break guests even if the server operates in writethrough
  3450. * mode.
  3451. *
  3452. * Let's hope the user knows what he's doing.
  3453. */
  3454. ret = 0;
  3455. }
  3456. if (ret < 0) {
  3457. return ret;
  3458. }
  3459. /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
  3460. * in the case of cache=unsafe, so there are no useless flushes.
  3461. */
  3462. flush_parent:
  3463. return bdrv_co_flush(bs->file);
  3464. }
  3465. void bdrv_invalidate_cache(BlockDriverState *bs)
  3466. {
  3467. if (bs->drv && bs->drv->bdrv_invalidate_cache) {
  3468. bs->drv->bdrv_invalidate_cache(bs);
  3469. }
  3470. }
  3471. void bdrv_invalidate_cache_all(void)
  3472. {
  3473. BlockDriverState *bs;
  3474. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  3475. bdrv_invalidate_cache(bs);
  3476. }
  3477. }
  3478. void bdrv_clear_incoming_migration_all(void)
  3479. {
  3480. BlockDriverState *bs;
  3481. QTAILQ_FOREACH(bs, &bdrv_states, list) {
  3482. bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
  3483. }
  3484. }
  3485. int bdrv_flush(BlockDriverState *bs)
  3486. {
  3487. Coroutine *co;
  3488. RwCo rwco = {
  3489. .bs = bs,
  3490. .ret = NOT_DONE,
  3491. };
  3492. if (qemu_in_coroutine()) {
  3493. /* Fast-path if already in coroutine context */
  3494. bdrv_flush_co_entry(&rwco);
  3495. } else {
  3496. co = qemu_coroutine_create(bdrv_flush_co_entry);
  3497. qemu_coroutine_enter(co, &rwco);
  3498. while (rwco.ret == NOT_DONE) {
  3499. qemu_aio_wait();
  3500. }
  3501. }
  3502. return rwco.ret;
  3503. }
  3504. static void coroutine_fn bdrv_discard_co_entry(void *opaque)
  3505. {
  3506. RwCo *rwco = opaque;
  3507. rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
  3508. }
  3509. int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
  3510. int nb_sectors)
  3511. {
  3512. if (!bs->drv) {
  3513. return -ENOMEDIUM;
  3514. } else if (bdrv_check_request(bs, sector_num, nb_sectors)) {
  3515. return -EIO;
  3516. } else if (bs->read_only) {
  3517. return -EROFS;
  3518. }
  3519. if (bs->dirty_bitmap) {
  3520. bdrv_reset_dirty(bs, sector_num, nb_sectors);
  3521. }
  3522. if (bs->drv->bdrv_co_discard) {
  3523. return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
  3524. } else if (bs->drv->bdrv_aio_discard) {
  3525. BlockDriverAIOCB *acb;
  3526. CoroutineIOCompletion co = {
  3527. .coroutine = qemu_coroutine_self(),
  3528. };
  3529. acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
  3530. bdrv_co_io_em_complete, &co);
  3531. if (acb == NULL) {
  3532. return -EIO;
  3533. } else {
  3534. qemu_coroutine_yield();
  3535. return co.ret;
  3536. }
  3537. } else {
  3538. return 0;
  3539. }
  3540. }
  3541. int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
  3542. {
  3543. Coroutine *co;
  3544. RwCo rwco = {
  3545. .bs = bs,
  3546. .sector_num = sector_num,
  3547. .nb_sectors = nb_sectors,
  3548. .ret = NOT_DONE,
  3549. };
  3550. if (qemu_in_coroutine()) {
  3551. /* Fast-path if already in coroutine context */
  3552. bdrv_discard_co_entry(&rwco);
  3553. } else {
  3554. co = qemu_coroutine_create(bdrv_discard_co_entry);
  3555. qemu_coroutine_enter(co, &rwco);
  3556. while (rwco.ret == NOT_DONE) {
  3557. qemu_aio_wait();
  3558. }
  3559. }
  3560. return rwco.ret;
  3561. }
  3562. /**************************************************************/
  3563. /* removable device support */
  3564. /**
  3565. * Return TRUE if the media is present
  3566. */
  3567. int bdrv_is_inserted(BlockDriverState *bs)
  3568. {
  3569. BlockDriver *drv = bs->drv;
  3570. if (!drv)
  3571. return 0;
  3572. if (!drv->bdrv_is_inserted)
  3573. return 1;
  3574. return drv->bdrv_is_inserted(bs);
  3575. }
  3576. /**
  3577. * Return whether the media changed since the last call to this
  3578. * function, or -ENOTSUP if we don't know. Most drivers don't know.
  3579. */
  3580. int bdrv_media_changed(BlockDriverState *bs)
  3581. {
  3582. BlockDriver *drv = bs->drv;
  3583. if (drv && drv->bdrv_media_changed) {
  3584. return drv->bdrv_media_changed(bs);
  3585. }
  3586. return -ENOTSUP;
  3587. }
  3588. /**
  3589. * If eject_flag is TRUE, eject the media. Otherwise, close the tray
  3590. */
  3591. void bdrv_eject(BlockDriverState *bs, bool eject_flag)
  3592. {
  3593. BlockDriver *drv = bs->drv;
  3594. if (drv && drv->bdrv_eject) {
  3595. drv->bdrv_eject(bs, eject_flag);
  3596. }
  3597. if (bs->device_name[0] != '\0') {
  3598. bdrv_emit_qmp_eject_event(bs, eject_flag);
  3599. }
  3600. }
  3601. /**
  3602. * Lock or unlock the media (if it is locked, the user won't be able
  3603. * to eject it manually).
  3604. */
  3605. void bdrv_lock_medium(BlockDriverState *bs, bool locked)
  3606. {
  3607. BlockDriver *drv = bs->drv;
  3608. trace_bdrv_lock_medium(bs, locked);
  3609. if (drv && drv->bdrv_lock_medium) {
  3610. drv->bdrv_lock_medium(bs, locked);
  3611. }
  3612. }
  3613. /* needed for generic scsi interface */
  3614. int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
  3615. {
  3616. BlockDriver *drv = bs->drv;
  3617. if (drv && drv->bdrv_ioctl)
  3618. return drv->bdrv_ioctl(bs, req, buf);
  3619. return -ENOTSUP;
  3620. }
  3621. BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
  3622. unsigned long int req, void *buf,
  3623. BlockDriverCompletionFunc *cb, void *opaque)
  3624. {
  3625. BlockDriver *drv = bs->drv;
  3626. if (drv && drv->bdrv_aio_ioctl)
  3627. return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
  3628. return NULL;
  3629. }
  3630. void bdrv_set_buffer_alignment(BlockDriverState *bs, int align)
  3631. {
  3632. bs->buffer_alignment = align;
  3633. }
  3634. void *qemu_blockalign(BlockDriverState *bs, size_t size)
  3635. {
  3636. return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
  3637. }
  3638. /*
  3639. * Check if all memory in this vector is sector aligned.
  3640. */
  3641. bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
  3642. {
  3643. int i;
  3644. for (i = 0; i < qiov->niov; i++) {
  3645. if ((uintptr_t) qiov->iov[i].iov_base % bs->buffer_alignment) {
  3646. return false;
  3647. }
  3648. }
  3649. return true;
  3650. }
  3651. void bdrv_set_dirty_tracking(BlockDriverState *bs, int granularity)
  3652. {
  3653. int64_t bitmap_size;
  3654. assert((granularity & (granularity - 1)) == 0);
  3655. if (granularity) {
  3656. granularity >>= BDRV_SECTOR_BITS;
  3657. assert(!bs->dirty_bitmap);
  3658. bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS);
  3659. bs->dirty_bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
  3660. } else {
  3661. if (bs->dirty_bitmap) {
  3662. hbitmap_free(bs->dirty_bitmap);
  3663. bs->dirty_bitmap = NULL;
  3664. }
  3665. }
  3666. }
  3667. int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
  3668. {
  3669. if (bs->dirty_bitmap) {
  3670. return hbitmap_get(bs->dirty_bitmap, sector);
  3671. } else {
  3672. return 0;
  3673. }
  3674. }
  3675. void bdrv_dirty_iter_init(BlockDriverState *bs, HBitmapIter *hbi)
  3676. {
  3677. hbitmap_iter_init(hbi, bs->dirty_bitmap, 0);
  3678. }
  3679. void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
  3680. int nr_sectors)
  3681. {
  3682. hbitmap_set(bs->dirty_bitmap, cur_sector, nr_sectors);
  3683. }
  3684. void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
  3685. int nr_sectors)
  3686. {
  3687. hbitmap_reset(bs->dirty_bitmap, cur_sector, nr_sectors);
  3688. }
  3689. int64_t bdrv_get_dirty_count(BlockDriverState *bs)
  3690. {
  3691. if (bs->dirty_bitmap) {
  3692. return hbitmap_count(bs->dirty_bitmap);
  3693. } else {
  3694. return 0;
  3695. }
  3696. }
  3697. void bdrv_set_in_use(BlockDriverState *bs, int in_use)
  3698. {
  3699. assert(bs->in_use != in_use);
  3700. bs->in_use = in_use;
  3701. }
  3702. int bdrv_in_use(BlockDriverState *bs)
  3703. {
  3704. return bs->in_use;
  3705. }
  3706. void bdrv_iostatus_enable(BlockDriverState *bs)
  3707. {
  3708. bs->iostatus_enabled = true;
  3709. bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
  3710. }
  3711. /* The I/O status is only enabled if the drive explicitly
  3712. * enables it _and_ the VM is configured to stop on errors */
  3713. bool bdrv_iostatus_is_enabled(const BlockDriverState *bs)
  3714. {
  3715. return (bs->iostatus_enabled &&
  3716. (bs->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
  3717. bs->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
  3718. bs->on_read_error == BLOCKDEV_ON_ERROR_STOP));
  3719. }
  3720. void bdrv_iostatus_disable(BlockDriverState *bs)
  3721. {
  3722. bs->iostatus_enabled = false;
  3723. }
  3724. void bdrv_iostatus_reset(BlockDriverState *bs)
  3725. {
  3726. if (bdrv_iostatus_is_enabled(bs)) {
  3727. bs->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
  3728. if (bs->job) {
  3729. block_job_iostatus_reset(bs->job);
  3730. }
  3731. }
  3732. }
  3733. void bdrv_iostatus_set_err(BlockDriverState *bs, int error)
  3734. {
  3735. assert(bdrv_iostatus_is_enabled(bs));
  3736. if (bs->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
  3737. bs->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
  3738. BLOCK_DEVICE_IO_STATUS_FAILED;
  3739. }
  3740. }
  3741. void
  3742. bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
  3743. enum BlockAcctType type)
  3744. {
  3745. assert(type < BDRV_MAX_IOTYPE);
  3746. cookie->bytes = bytes;
  3747. cookie->start_time_ns = get_clock();
  3748. cookie->type = type;
  3749. }
  3750. void
  3751. bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
  3752. {
  3753. assert(cookie->type < BDRV_MAX_IOTYPE);
  3754. bs->nr_bytes[cookie->type] += cookie->bytes;
  3755. bs->nr_ops[cookie->type]++;
  3756. bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
  3757. }
  3758. void bdrv_img_create(const char *filename, const char *fmt,
  3759. const char *base_filename, const char *base_fmt,
  3760. char *options, uint64_t img_size, int flags, Error **errp)
  3761. {
  3762. QEMUOptionParameter *param = NULL, *create_options = NULL;
  3763. QEMUOptionParameter *backing_fmt, *backing_file, *size;
  3764. BlockDriverState *bs = NULL;
  3765. BlockDriver *drv, *proto_drv;
  3766. BlockDriver *backing_drv = NULL;
  3767. int ret = 0;
  3768. /* Find driver and parse its options */
  3769. drv = bdrv_find_format(fmt);
  3770. if (!drv) {
  3771. error_setg(errp, "Unknown file format '%s'", fmt);
  3772. return;
  3773. }
  3774. proto_drv = bdrv_find_protocol(filename);
  3775. if (!proto_drv) {
  3776. error_setg(errp, "Unknown protocol '%s'", filename);
  3777. return;
  3778. }
  3779. create_options = append_option_parameters(create_options,
  3780. drv->create_options);
  3781. create_options = append_option_parameters(create_options,
  3782. proto_drv->create_options);
  3783. /* Create parameter list with default values */
  3784. param = parse_option_parameters("", create_options, param);
  3785. set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
  3786. /* Parse -o options */
  3787. if (options) {
  3788. param = parse_option_parameters(options, create_options, param);
  3789. if (param == NULL) {
  3790. error_setg(errp, "Invalid options for file format '%s'.", fmt);
  3791. goto out;
  3792. }
  3793. }
  3794. if (base_filename) {
  3795. if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
  3796. base_filename)) {
  3797. error_setg(errp, "Backing file not supported for file format '%s'",
  3798. fmt);
  3799. goto out;
  3800. }
  3801. }
  3802. if (base_fmt) {
  3803. if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
  3804. error_setg(errp, "Backing file format not supported for file "
  3805. "format '%s'", fmt);
  3806. goto out;
  3807. }
  3808. }
  3809. backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
  3810. if (backing_file && backing_file->value.s) {
  3811. if (!strcmp(filename, backing_file->value.s)) {
  3812. error_setg(errp, "Error: Trying to create an image with the "
  3813. "same filename as the backing file");
  3814. goto out;
  3815. }
  3816. }
  3817. backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
  3818. if (backing_fmt && backing_fmt->value.s) {
  3819. backing_drv = bdrv_find_format(backing_fmt->value.s);
  3820. if (!backing_drv) {
  3821. error_setg(errp, "Unknown backing file format '%s'",
  3822. backing_fmt->value.s);
  3823. goto out;
  3824. }
  3825. }
  3826. // The size for the image must always be specified, with one exception:
  3827. // If we are using a backing file, we can obtain the size from there
  3828. size = get_option_parameter(param, BLOCK_OPT_SIZE);
  3829. if (size && size->value.n == -1) {
  3830. if (backing_file && backing_file->value.s) {
  3831. uint64_t size;
  3832. char buf[32];
  3833. int back_flags;
  3834. /* backing files always opened read-only */
  3835. back_flags =
  3836. flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
  3837. bs = bdrv_new("");
  3838. ret = bdrv_open(bs, backing_file->value.s, back_flags, backing_drv);
  3839. if (ret < 0) {
  3840. error_setg_errno(errp, -ret, "Could not open '%s'",
  3841. backing_file->value.s);
  3842. goto out;
  3843. }
  3844. bdrv_get_geometry(bs, &size);
  3845. size *= 512;
  3846. snprintf(buf, sizeof(buf), "%" PRId64, size);
  3847. set_option_parameter(param, BLOCK_OPT_SIZE, buf);
  3848. } else {
  3849. error_setg(errp, "Image creation needs a size parameter");
  3850. goto out;
  3851. }
  3852. }
  3853. printf("Formatting '%s', fmt=%s ", filename, fmt);
  3854. print_option_parameters(param);
  3855. puts("");
  3856. ret = bdrv_create(drv, filename, param);
  3857. if (ret < 0) {
  3858. if (ret == -ENOTSUP) {
  3859. error_setg(errp,"Formatting or formatting option not supported for "
  3860. "file format '%s'", fmt);
  3861. } else if (ret == -EFBIG) {
  3862. error_setg(errp, "The image size is too large for file format '%s'",
  3863. fmt);
  3864. } else {
  3865. error_setg(errp, "%s: error while creating %s: %s", filename, fmt,
  3866. strerror(-ret));
  3867. }
  3868. }
  3869. out:
  3870. free_option_parameters(create_options);
  3871. free_option_parameters(param);
  3872. if (bs) {
  3873. bdrv_delete(bs);
  3874. }
  3875. }