blockdev.c 117 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858
  1. /*
  2. * QEMU host block devices
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or
  7. * later. See the COPYING file in the top-level directory.
  8. *
  9. * This file incorporates work covered by the following copyright and
  10. * permission notice:
  11. *
  12. * Copyright (c) 2003-2008 Fabrice Bellard
  13. *
  14. * Permission is hereby granted, free of charge, to any person obtaining a copy
  15. * of this software and associated documentation files (the "Software"), to deal
  16. * in the Software without restriction, including without limitation the rights
  17. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  18. * copies of the Software, and to permit persons to whom the Software is
  19. * furnished to do so, subject to the following conditions:
  20. *
  21. * The above copyright notice and this permission notice shall be included in
  22. * all copies or substantial portions of the Software.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  27. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  28. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  29. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  30. * THE SOFTWARE.
  31. */
  32. #include "qemu/osdep.h"
  33. #include "sysemu/block-backend.h"
  34. #include "sysemu/blockdev.h"
  35. #include "hw/block/block.h"
  36. #include "block/blockjob.h"
  37. #include "block/qdict.h"
  38. #include "block/throttle-groups.h"
  39. #include "monitor/monitor.h"
  40. #include "qemu/error-report.h"
  41. #include "qemu/option.h"
  42. #include "qemu/qemu-print.h"
  43. #include "qemu/config-file.h"
  44. #include "qapi/qapi-commands-block.h"
  45. #include "qapi/qapi-commands-transaction.h"
  46. #include "qapi/qapi-visit-block-core.h"
  47. #include "qapi/qmp/qdict.h"
  48. #include "qapi/qmp/qnum.h"
  49. #include "qapi/qmp/qstring.h"
  50. #include "qapi/error.h"
  51. #include "qapi/qmp/qerror.h"
  52. #include "qapi/qmp/qlist.h"
  53. #include "qapi/qobject-output-visitor.h"
  54. #include "sysemu/sysemu.h"
  55. #include "sysemu/iothread.h"
  56. #include "block/block_int.h"
  57. #include "block/trace.h"
  58. #include "sysemu/runstate.h"
  59. #include "sysemu/replay.h"
  60. #include "qemu/cutils.h"
  61. #include "qemu/help_option.h"
  62. #include "qemu/main-loop.h"
  63. #include "qemu/throttle-options.h"
  64. /* Protected by BQL */
  65. QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
  66. QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
  67. void bdrv_set_monitor_owned(BlockDriverState *bs)
  68. {
  69. GLOBAL_STATE_CODE();
  70. QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list);
  71. }
  72. static const char *const if_name[IF_COUNT] = {
  73. [IF_NONE] = "none",
  74. [IF_IDE] = "ide",
  75. [IF_SCSI] = "scsi",
  76. [IF_FLOPPY] = "floppy",
  77. [IF_PFLASH] = "pflash",
  78. [IF_MTD] = "mtd",
  79. [IF_SD] = "sd",
  80. [IF_VIRTIO] = "virtio",
  81. [IF_XEN] = "xen",
  82. };
  83. static int if_max_devs[IF_COUNT] = {
  84. /*
  85. * Do not change these numbers! They govern how drive option
  86. * index maps to unit and bus. That mapping is ABI.
  87. *
  88. * All controllers used to implement if=T drives need to support
  89. * if_max_devs[T] units, for any T with if_max_devs[T] != 0.
  90. * Otherwise, some index values map to "impossible" bus, unit
  91. * values.
  92. *
  93. * For instance, if you change [IF_SCSI] to 255, -drive
  94. * if=scsi,index=12 no longer means bus=1,unit=5, but
  95. * bus=0,unit=12. With an lsi53c895a controller (7 units max),
  96. * the drive can't be set up. Regression.
  97. */
  98. [IF_IDE] = 2,
  99. [IF_SCSI] = 7,
  100. };
  101. /**
  102. * Boards may call this to offer board-by-board overrides
  103. * of the default, global values.
  104. */
  105. void override_max_devs(BlockInterfaceType type, int max_devs)
  106. {
  107. BlockBackend *blk;
  108. DriveInfo *dinfo;
  109. GLOBAL_STATE_CODE();
  110. if (max_devs <= 0) {
  111. return;
  112. }
  113. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  114. dinfo = blk_legacy_dinfo(blk);
  115. if (dinfo->type == type) {
  116. fprintf(stderr, "Cannot override units-per-bus property of"
  117. " the %s interface, because a drive of that type has"
  118. " already been added.\n", if_name[type]);
  119. g_assert_not_reached();
  120. }
  121. }
  122. if_max_devs[type] = max_devs;
  123. }
  124. /*
  125. * We automatically delete the drive when a device using it gets
  126. * unplugged. Questionable feature, but we can't just drop it.
  127. * Device models call blockdev_mark_auto_del() to schedule the
  128. * automatic deletion, and generic qdev code calls blockdev_auto_del()
  129. * when deletion is actually safe.
  130. */
  131. void blockdev_mark_auto_del(BlockBackend *blk)
  132. {
  133. DriveInfo *dinfo = blk_legacy_dinfo(blk);
  134. BlockJob *job;
  135. GLOBAL_STATE_CODE();
  136. if (!dinfo) {
  137. return;
  138. }
  139. for (job = block_job_next(NULL); job; job = block_job_next(job)) {
  140. if (block_job_has_bdrv(job, blk_bs(blk))) {
  141. AioContext *aio_context = job->job.aio_context;
  142. aio_context_acquire(aio_context);
  143. job_cancel(&job->job, false);
  144. aio_context_release(aio_context);
  145. }
  146. }
  147. dinfo->auto_del = 1;
  148. }
  149. void blockdev_auto_del(BlockBackend *blk)
  150. {
  151. DriveInfo *dinfo = blk_legacy_dinfo(blk);
  152. GLOBAL_STATE_CODE();
  153. if (dinfo && dinfo->auto_del) {
  154. monitor_remove_blk(blk);
  155. blk_unref(blk);
  156. }
  157. }
  158. static int drive_index_to_bus_id(BlockInterfaceType type, int index)
  159. {
  160. int max_devs = if_max_devs[type];
  161. return max_devs ? index / max_devs : 0;
  162. }
  163. static int drive_index_to_unit_id(BlockInterfaceType type, int index)
  164. {
  165. int max_devs = if_max_devs[type];
  166. return max_devs ? index % max_devs : index;
  167. }
  168. QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file,
  169. const char *optstr)
  170. {
  171. QemuOpts *opts;
  172. GLOBAL_STATE_CODE();
  173. opts = qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false);
  174. if (!opts) {
  175. return NULL;
  176. }
  177. if (type != IF_DEFAULT) {
  178. qemu_opt_set(opts, "if", if_name[type], &error_abort);
  179. }
  180. if (index >= 0) {
  181. qemu_opt_set_number(opts, "index", index, &error_abort);
  182. }
  183. if (file)
  184. qemu_opt_set(opts, "file", file, &error_abort);
  185. return opts;
  186. }
  187. DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit)
  188. {
  189. BlockBackend *blk;
  190. DriveInfo *dinfo;
  191. GLOBAL_STATE_CODE();
  192. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  193. dinfo = blk_legacy_dinfo(blk);
  194. if (dinfo && dinfo->type == type
  195. && dinfo->bus == bus && dinfo->unit == unit) {
  196. return dinfo;
  197. }
  198. }
  199. return NULL;
  200. }
  201. /*
  202. * Check board claimed all -drive that are meant to be claimed.
  203. * Fatal error if any remain unclaimed.
  204. */
  205. void drive_check_orphaned(void)
  206. {
  207. BlockBackend *blk;
  208. DriveInfo *dinfo;
  209. Location loc;
  210. bool orphans = false;
  211. GLOBAL_STATE_CODE();
  212. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  213. dinfo = blk_legacy_dinfo(blk);
  214. /*
  215. * Ignore default drives, because we create certain default
  216. * drives unconditionally, then leave them unclaimed. Not the
  217. * users fault.
  218. * Ignore IF_VIRTIO, because it gets desugared into -device,
  219. * so we can leave failing to -device.
  220. * Ignore IF_NONE, because leaving unclaimed IF_NONE remains
  221. * available for device_add is a feature.
  222. */
  223. if (dinfo->is_default || dinfo->type == IF_VIRTIO
  224. || dinfo->type == IF_NONE) {
  225. continue;
  226. }
  227. if (!blk_get_attached_dev(blk)) {
  228. loc_push_none(&loc);
  229. qemu_opts_loc_restore(dinfo->opts);
  230. error_report("machine type does not support"
  231. " if=%s,bus=%d,unit=%d",
  232. if_name[dinfo->type], dinfo->bus, dinfo->unit);
  233. loc_pop(&loc);
  234. orphans = true;
  235. }
  236. }
  237. if (orphans) {
  238. exit(1);
  239. }
  240. }
  241. DriveInfo *drive_get_by_index(BlockInterfaceType type, int index)
  242. {
  243. GLOBAL_STATE_CODE();
  244. return drive_get(type,
  245. drive_index_to_bus_id(type, index),
  246. drive_index_to_unit_id(type, index));
  247. }
  248. int drive_get_max_bus(BlockInterfaceType type)
  249. {
  250. int max_bus;
  251. BlockBackend *blk;
  252. DriveInfo *dinfo;
  253. GLOBAL_STATE_CODE();
  254. max_bus = -1;
  255. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  256. dinfo = blk_legacy_dinfo(blk);
  257. if (dinfo && dinfo->type == type && dinfo->bus > max_bus) {
  258. max_bus = dinfo->bus;
  259. }
  260. }
  261. return max_bus;
  262. }
  263. static void bdrv_format_print(void *opaque, const char *name)
  264. {
  265. qemu_printf(" %s", name);
  266. }
  267. typedef struct {
  268. QEMUBH *bh;
  269. BlockDriverState *bs;
  270. } BDRVPutRefBH;
  271. static int parse_block_error_action(const char *buf, bool is_read, Error **errp)
  272. {
  273. if (!strcmp(buf, "ignore")) {
  274. return BLOCKDEV_ON_ERROR_IGNORE;
  275. } else if (!is_read && !strcmp(buf, "enospc")) {
  276. return BLOCKDEV_ON_ERROR_ENOSPC;
  277. } else if (!strcmp(buf, "stop")) {
  278. return BLOCKDEV_ON_ERROR_STOP;
  279. } else if (!strcmp(buf, "report")) {
  280. return BLOCKDEV_ON_ERROR_REPORT;
  281. } else {
  282. error_setg(errp, "'%s' invalid %s error action",
  283. buf, is_read ? "read" : "write");
  284. return -1;
  285. }
  286. }
  287. static bool parse_stats_intervals(BlockAcctStats *stats, QList *intervals,
  288. Error **errp)
  289. {
  290. const QListEntry *entry;
  291. for (entry = qlist_first(intervals); entry; entry = qlist_next(entry)) {
  292. switch (qobject_type(entry->value)) {
  293. case QTYPE_QSTRING: {
  294. unsigned long long length;
  295. const char *str = qstring_get_str(qobject_to(QString,
  296. entry->value));
  297. if (parse_uint_full(str, &length, 10) == 0 &&
  298. length > 0 && length <= UINT_MAX) {
  299. block_acct_add_interval(stats, (unsigned) length);
  300. } else {
  301. error_setg(errp, "Invalid interval length: %s", str);
  302. return false;
  303. }
  304. break;
  305. }
  306. case QTYPE_QNUM: {
  307. int64_t length = qnum_get_int(qobject_to(QNum, entry->value));
  308. if (length > 0 && length <= UINT_MAX) {
  309. block_acct_add_interval(stats, (unsigned) length);
  310. } else {
  311. error_setg(errp, "Invalid interval length: %" PRId64, length);
  312. return false;
  313. }
  314. break;
  315. }
  316. default:
  317. error_setg(errp, "The specification of stats-intervals is invalid");
  318. return false;
  319. }
  320. }
  321. return true;
  322. }
  323. typedef enum { MEDIA_DISK, MEDIA_CDROM } DriveMediaType;
  324. /* All parameters but @opts are optional and may be set to NULL. */
  325. static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags,
  326. const char **throttling_group, ThrottleConfig *throttle_cfg,
  327. BlockdevDetectZeroesOptions *detect_zeroes, Error **errp)
  328. {
  329. Error *local_error = NULL;
  330. const char *aio;
  331. if (bdrv_flags) {
  332. if (qemu_opt_get_bool(opts, "copy-on-read", false)) {
  333. *bdrv_flags |= BDRV_O_COPY_ON_READ;
  334. }
  335. if ((aio = qemu_opt_get(opts, "aio")) != NULL) {
  336. if (bdrv_parse_aio(aio, bdrv_flags) < 0) {
  337. error_setg(errp, "invalid aio option");
  338. return;
  339. }
  340. }
  341. }
  342. /* disk I/O throttling */
  343. if (throttling_group) {
  344. *throttling_group = qemu_opt_get(opts, "throttling.group");
  345. }
  346. if (throttle_cfg) {
  347. throttle_config_init(throttle_cfg);
  348. throttle_cfg->buckets[THROTTLE_BPS_TOTAL].avg =
  349. qemu_opt_get_number(opts, "throttling.bps-total", 0);
  350. throttle_cfg->buckets[THROTTLE_BPS_READ].avg =
  351. qemu_opt_get_number(opts, "throttling.bps-read", 0);
  352. throttle_cfg->buckets[THROTTLE_BPS_WRITE].avg =
  353. qemu_opt_get_number(opts, "throttling.bps-write", 0);
  354. throttle_cfg->buckets[THROTTLE_OPS_TOTAL].avg =
  355. qemu_opt_get_number(opts, "throttling.iops-total", 0);
  356. throttle_cfg->buckets[THROTTLE_OPS_READ].avg =
  357. qemu_opt_get_number(opts, "throttling.iops-read", 0);
  358. throttle_cfg->buckets[THROTTLE_OPS_WRITE].avg =
  359. qemu_opt_get_number(opts, "throttling.iops-write", 0);
  360. throttle_cfg->buckets[THROTTLE_BPS_TOTAL].max =
  361. qemu_opt_get_number(opts, "throttling.bps-total-max", 0);
  362. throttle_cfg->buckets[THROTTLE_BPS_READ].max =
  363. qemu_opt_get_number(opts, "throttling.bps-read-max", 0);
  364. throttle_cfg->buckets[THROTTLE_BPS_WRITE].max =
  365. qemu_opt_get_number(opts, "throttling.bps-write-max", 0);
  366. throttle_cfg->buckets[THROTTLE_OPS_TOTAL].max =
  367. qemu_opt_get_number(opts, "throttling.iops-total-max", 0);
  368. throttle_cfg->buckets[THROTTLE_OPS_READ].max =
  369. qemu_opt_get_number(opts, "throttling.iops-read-max", 0);
  370. throttle_cfg->buckets[THROTTLE_OPS_WRITE].max =
  371. qemu_opt_get_number(opts, "throttling.iops-write-max", 0);
  372. throttle_cfg->buckets[THROTTLE_BPS_TOTAL].burst_length =
  373. qemu_opt_get_number(opts, "throttling.bps-total-max-length", 1);
  374. throttle_cfg->buckets[THROTTLE_BPS_READ].burst_length =
  375. qemu_opt_get_number(opts, "throttling.bps-read-max-length", 1);
  376. throttle_cfg->buckets[THROTTLE_BPS_WRITE].burst_length =
  377. qemu_opt_get_number(opts, "throttling.bps-write-max-length", 1);
  378. throttle_cfg->buckets[THROTTLE_OPS_TOTAL].burst_length =
  379. qemu_opt_get_number(opts, "throttling.iops-total-max-length", 1);
  380. throttle_cfg->buckets[THROTTLE_OPS_READ].burst_length =
  381. qemu_opt_get_number(opts, "throttling.iops-read-max-length", 1);
  382. throttle_cfg->buckets[THROTTLE_OPS_WRITE].burst_length =
  383. qemu_opt_get_number(opts, "throttling.iops-write-max-length", 1);
  384. throttle_cfg->op_size =
  385. qemu_opt_get_number(opts, "throttling.iops-size", 0);
  386. if (!throttle_is_valid(throttle_cfg, errp)) {
  387. return;
  388. }
  389. }
  390. if (detect_zeroes) {
  391. *detect_zeroes =
  392. qapi_enum_parse(&BlockdevDetectZeroesOptions_lookup,
  393. qemu_opt_get(opts, "detect-zeroes"),
  394. BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF,
  395. &local_error);
  396. if (local_error) {
  397. error_propagate(errp, local_error);
  398. return;
  399. }
  400. }
  401. }
  402. /* Takes the ownership of bs_opts */
  403. static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
  404. Error **errp)
  405. {
  406. const char *buf;
  407. int bdrv_flags = 0;
  408. int on_read_error, on_write_error;
  409. bool account_invalid, account_failed;
  410. bool writethrough, read_only;
  411. BlockBackend *blk;
  412. BlockDriverState *bs;
  413. ThrottleConfig cfg;
  414. int snapshot = 0;
  415. Error *error = NULL;
  416. QemuOpts *opts;
  417. QDict *interval_dict = NULL;
  418. QList *interval_list = NULL;
  419. const char *id;
  420. BlockdevDetectZeroesOptions detect_zeroes =
  421. BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
  422. const char *throttling_group = NULL;
  423. /* Check common options by copying from bs_opts to opts, all other options
  424. * stay in bs_opts for processing by bdrv_open(). */
  425. id = qdict_get_try_str(bs_opts, "id");
  426. opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, errp);
  427. if (!opts) {
  428. goto err_no_opts;
  429. }
  430. if (!qemu_opts_absorb_qdict(opts, bs_opts, errp)) {
  431. goto early_err;
  432. }
  433. if (id) {
  434. qdict_del(bs_opts, "id");
  435. }
  436. /* extract parameters */
  437. snapshot = qemu_opt_get_bool(opts, "snapshot", 0);
  438. account_invalid = qemu_opt_get_bool(opts, "stats-account-invalid", true);
  439. account_failed = qemu_opt_get_bool(opts, "stats-account-failed", true);
  440. writethrough = !qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true);
  441. id = qemu_opts_id(opts);
  442. qdict_extract_subqdict(bs_opts, &interval_dict, "stats-intervals.");
  443. qdict_array_split(interval_dict, &interval_list);
  444. if (qdict_size(interval_dict) != 0) {
  445. error_setg(errp, "Invalid option stats-intervals.%s",
  446. qdict_first(interval_dict)->key);
  447. goto early_err;
  448. }
  449. extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg,
  450. &detect_zeroes, &error);
  451. if (error) {
  452. error_propagate(errp, error);
  453. goto early_err;
  454. }
  455. if ((buf = qemu_opt_get(opts, "format")) != NULL) {
  456. if (is_help_option(buf)) {
  457. qemu_printf("Supported formats:");
  458. bdrv_iterate_format(bdrv_format_print, NULL, false);
  459. qemu_printf("\nSupported formats (read-only):");
  460. bdrv_iterate_format(bdrv_format_print, NULL, true);
  461. qemu_printf("\n");
  462. goto early_err;
  463. }
  464. if (qdict_haskey(bs_opts, "driver")) {
  465. error_setg(errp, "Cannot specify both 'driver' and 'format'");
  466. goto early_err;
  467. }
  468. qdict_put_str(bs_opts, "driver", buf);
  469. }
  470. on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
  471. if ((buf = qemu_opt_get(opts, "werror")) != NULL) {
  472. on_write_error = parse_block_error_action(buf, 0, &error);
  473. if (error) {
  474. error_propagate(errp, error);
  475. goto early_err;
  476. }
  477. }
  478. on_read_error = BLOCKDEV_ON_ERROR_REPORT;
  479. if ((buf = qemu_opt_get(opts, "rerror")) != NULL) {
  480. on_read_error = parse_block_error_action(buf, 1, &error);
  481. if (error) {
  482. error_propagate(errp, error);
  483. goto early_err;
  484. }
  485. }
  486. if (snapshot) {
  487. bdrv_flags |= BDRV_O_SNAPSHOT;
  488. }
  489. read_only = qemu_opt_get_bool(opts, BDRV_OPT_READ_ONLY, false);
  490. /* init */
  491. if ((!file || !*file) && !qdict_size(bs_opts)) {
  492. BlockBackendRootState *blk_rs;
  493. blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
  494. blk_rs = blk_get_root_state(blk);
  495. blk_rs->open_flags = bdrv_flags | (read_only ? 0 : BDRV_O_RDWR);
  496. blk_rs->detect_zeroes = detect_zeroes;
  497. qobject_unref(bs_opts);
  498. } else {
  499. if (file && !*file) {
  500. file = NULL;
  501. }
  502. /* bdrv_open() defaults to the values in bdrv_flags (for compatibility
  503. * with other callers) rather than what we want as the real defaults.
  504. * Apply the defaults here instead. */
  505. qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
  506. qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
  507. qdict_set_default_str(bs_opts, BDRV_OPT_READ_ONLY,
  508. read_only ? "on" : "off");
  509. qdict_set_default_str(bs_opts, BDRV_OPT_AUTO_READ_ONLY, "on");
  510. assert((bdrv_flags & BDRV_O_CACHE_MASK) == 0);
  511. if (runstate_check(RUN_STATE_INMIGRATE)) {
  512. bdrv_flags |= BDRV_O_INACTIVE;
  513. }
  514. blk = blk_new_open(file, NULL, bs_opts, bdrv_flags, errp);
  515. if (!blk) {
  516. goto err_no_bs_opts;
  517. }
  518. bs = blk_bs(blk);
  519. bs->detect_zeroes = detect_zeroes;
  520. block_acct_setup(blk_get_stats(blk), account_invalid, account_failed);
  521. if (!parse_stats_intervals(blk_get_stats(blk), interval_list, errp)) {
  522. blk_unref(blk);
  523. blk = NULL;
  524. goto err_no_bs_opts;
  525. }
  526. }
  527. /* disk I/O throttling */
  528. if (throttle_enabled(&cfg)) {
  529. if (!throttling_group) {
  530. throttling_group = id;
  531. }
  532. blk_io_limits_enable(blk, throttling_group);
  533. blk_set_io_limits(blk, &cfg);
  534. }
  535. blk_set_enable_write_cache(blk, !writethrough);
  536. blk_set_on_error(blk, on_read_error, on_write_error);
  537. if (!monitor_add_blk(blk, id, errp)) {
  538. blk_unref(blk);
  539. blk = NULL;
  540. goto err_no_bs_opts;
  541. }
  542. err_no_bs_opts:
  543. qemu_opts_del(opts);
  544. qobject_unref(interval_dict);
  545. qobject_unref(interval_list);
  546. return blk;
  547. early_err:
  548. qemu_opts_del(opts);
  549. qobject_unref(interval_dict);
  550. qobject_unref(interval_list);
  551. err_no_opts:
  552. qobject_unref(bs_opts);
  553. return NULL;
  554. }
  555. /* Takes the ownership of bs_opts */
  556. BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp)
  557. {
  558. int bdrv_flags = 0;
  559. GLOBAL_STATE_CODE();
  560. /* bdrv_open() defaults to the values in bdrv_flags (for compatibility
  561. * with other callers) rather than what we want as the real defaults.
  562. * Apply the defaults here instead. */
  563. qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
  564. qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
  565. qdict_set_default_str(bs_opts, BDRV_OPT_READ_ONLY, "off");
  566. if (runstate_check(RUN_STATE_INMIGRATE)) {
  567. bdrv_flags |= BDRV_O_INACTIVE;
  568. }
  569. return bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp);
  570. }
  571. void blockdev_close_all_bdrv_states(void)
  572. {
  573. BlockDriverState *bs, *next_bs;
  574. GLOBAL_STATE_CODE();
  575. QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) {
  576. AioContext *ctx = bdrv_get_aio_context(bs);
  577. aio_context_acquire(ctx);
  578. bdrv_unref(bs);
  579. aio_context_release(ctx);
  580. }
  581. }
  582. /* Iterates over the list of monitor-owned BlockDriverStates */
  583. BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs)
  584. {
  585. GLOBAL_STATE_CODE();
  586. return bs ? QTAILQ_NEXT(bs, monitor_list)
  587. : QTAILQ_FIRST(&monitor_bdrv_states);
  588. }
  589. static bool qemu_opt_rename(QemuOpts *opts, const char *from, const char *to,
  590. Error **errp)
  591. {
  592. const char *value;
  593. value = qemu_opt_get(opts, from);
  594. if (value) {
  595. if (qemu_opt_find(opts, to)) {
  596. error_setg(errp, "'%s' and its alias '%s' can't be used at the "
  597. "same time", to, from);
  598. return false;
  599. }
  600. }
  601. /* rename all items in opts */
  602. while ((value = qemu_opt_get(opts, from))) {
  603. qemu_opt_set(opts, to, value, &error_abort);
  604. qemu_opt_unset(opts, from);
  605. }
  606. return true;
  607. }
  608. QemuOptsList qemu_legacy_drive_opts = {
  609. .name = "drive",
  610. .head = QTAILQ_HEAD_INITIALIZER(qemu_legacy_drive_opts.head),
  611. .desc = {
  612. {
  613. .name = "bus",
  614. .type = QEMU_OPT_NUMBER,
  615. .help = "bus number",
  616. },{
  617. .name = "unit",
  618. .type = QEMU_OPT_NUMBER,
  619. .help = "unit number (i.e. lun for scsi)",
  620. },{
  621. .name = "index",
  622. .type = QEMU_OPT_NUMBER,
  623. .help = "index number",
  624. },{
  625. .name = "media",
  626. .type = QEMU_OPT_STRING,
  627. .help = "media type (disk, cdrom)",
  628. },{
  629. .name = "if",
  630. .type = QEMU_OPT_STRING,
  631. .help = "interface (ide, scsi, sd, mtd, floppy, pflash, virtio)",
  632. },{
  633. .name = "file",
  634. .type = QEMU_OPT_STRING,
  635. .help = "file name",
  636. },
  637. /* Options that are passed on, but have special semantics with -drive */
  638. {
  639. .name = BDRV_OPT_READ_ONLY,
  640. .type = QEMU_OPT_BOOL,
  641. .help = "open drive file as read-only",
  642. },{
  643. .name = "rerror",
  644. .type = QEMU_OPT_STRING,
  645. .help = "read error action",
  646. },{
  647. .name = "werror",
  648. .type = QEMU_OPT_STRING,
  649. .help = "write error action",
  650. },{
  651. .name = "copy-on-read",
  652. .type = QEMU_OPT_BOOL,
  653. .help = "copy read data from backing file into image file",
  654. },
  655. { /* end of list */ }
  656. },
  657. };
  658. DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type,
  659. Error **errp)
  660. {
  661. const char *value;
  662. BlockBackend *blk;
  663. DriveInfo *dinfo = NULL;
  664. QDict *bs_opts;
  665. QemuOpts *legacy_opts;
  666. DriveMediaType media = MEDIA_DISK;
  667. BlockInterfaceType type;
  668. int max_devs, bus_id, unit_id, index;
  669. const char *werror, *rerror;
  670. bool read_only = false;
  671. bool copy_on_read;
  672. const char *filename;
  673. int i;
  674. GLOBAL_STATE_CODE();
  675. /* Change legacy command line options into QMP ones */
  676. static const struct {
  677. const char *from;
  678. const char *to;
  679. } opt_renames[] = {
  680. { "iops", "throttling.iops-total" },
  681. { "iops_rd", "throttling.iops-read" },
  682. { "iops_wr", "throttling.iops-write" },
  683. { "bps", "throttling.bps-total" },
  684. { "bps_rd", "throttling.bps-read" },
  685. { "bps_wr", "throttling.bps-write" },
  686. { "iops_max", "throttling.iops-total-max" },
  687. { "iops_rd_max", "throttling.iops-read-max" },
  688. { "iops_wr_max", "throttling.iops-write-max" },
  689. { "bps_max", "throttling.bps-total-max" },
  690. { "bps_rd_max", "throttling.bps-read-max" },
  691. { "bps_wr_max", "throttling.bps-write-max" },
  692. { "iops_size", "throttling.iops-size" },
  693. { "group", "throttling.group" },
  694. { "readonly", BDRV_OPT_READ_ONLY },
  695. };
  696. for (i = 0; i < ARRAY_SIZE(opt_renames); i++) {
  697. if (!qemu_opt_rename(all_opts, opt_renames[i].from,
  698. opt_renames[i].to, errp)) {
  699. return NULL;
  700. }
  701. }
  702. value = qemu_opt_get(all_opts, "cache");
  703. if (value) {
  704. int flags = 0;
  705. bool writethrough;
  706. if (bdrv_parse_cache_mode(value, &flags, &writethrough) != 0) {
  707. error_setg(errp, "invalid cache option");
  708. return NULL;
  709. }
  710. /* Specific options take precedence */
  711. if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) {
  712. qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB,
  713. !writethrough, &error_abort);
  714. }
  715. if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) {
  716. qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT,
  717. !!(flags & BDRV_O_NOCACHE), &error_abort);
  718. }
  719. if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) {
  720. qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH,
  721. !!(flags & BDRV_O_NO_FLUSH), &error_abort);
  722. }
  723. qemu_opt_unset(all_opts, "cache");
  724. }
  725. /* Get a QDict for processing the options */
  726. bs_opts = qdict_new();
  727. qemu_opts_to_qdict(all_opts, bs_opts);
  728. legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0,
  729. &error_abort);
  730. if (!qemu_opts_absorb_qdict(legacy_opts, bs_opts, errp)) {
  731. goto fail;
  732. }
  733. /* Media type */
  734. value = qemu_opt_get(legacy_opts, "media");
  735. if (value) {
  736. if (!strcmp(value, "disk")) {
  737. media = MEDIA_DISK;
  738. } else if (!strcmp(value, "cdrom")) {
  739. media = MEDIA_CDROM;
  740. read_only = true;
  741. } else {
  742. error_setg(errp, "'%s' invalid media", value);
  743. goto fail;
  744. }
  745. }
  746. /* copy-on-read is disabled with a warning for read-only devices */
  747. read_only |= qemu_opt_get_bool(legacy_opts, BDRV_OPT_READ_ONLY, false);
  748. copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false);
  749. if (read_only && copy_on_read) {
  750. warn_report("disabling copy-on-read on read-only drive");
  751. copy_on_read = false;
  752. }
  753. qdict_put_str(bs_opts, BDRV_OPT_READ_ONLY, read_only ? "on" : "off");
  754. qdict_put_str(bs_opts, "copy-on-read", copy_on_read ? "on" : "off");
  755. /* Controller type */
  756. value = qemu_opt_get(legacy_opts, "if");
  757. if (value) {
  758. for (type = 0;
  759. type < IF_COUNT && strcmp(value, if_name[type]);
  760. type++) {
  761. }
  762. if (type == IF_COUNT) {
  763. error_setg(errp, "unsupported bus type '%s'", value);
  764. goto fail;
  765. }
  766. } else {
  767. type = block_default_type;
  768. }
  769. /* Device address specified by bus/unit or index.
  770. * If none was specified, try to find the first free one. */
  771. bus_id = qemu_opt_get_number(legacy_opts, "bus", 0);
  772. unit_id = qemu_opt_get_number(legacy_opts, "unit", -1);
  773. index = qemu_opt_get_number(legacy_opts, "index", -1);
  774. max_devs = if_max_devs[type];
  775. if (index != -1) {
  776. if (bus_id != 0 || unit_id != -1) {
  777. error_setg(errp, "index cannot be used with bus and unit");
  778. goto fail;
  779. }
  780. bus_id = drive_index_to_bus_id(type, index);
  781. unit_id = drive_index_to_unit_id(type, index);
  782. }
  783. if (unit_id == -1) {
  784. unit_id = 0;
  785. while (drive_get(type, bus_id, unit_id) != NULL) {
  786. unit_id++;
  787. if (max_devs && unit_id >= max_devs) {
  788. unit_id -= max_devs;
  789. bus_id++;
  790. }
  791. }
  792. }
  793. if (max_devs && unit_id >= max_devs) {
  794. error_setg(errp, "unit %d too big (max is %d)", unit_id, max_devs - 1);
  795. goto fail;
  796. }
  797. if (drive_get(type, bus_id, unit_id) != NULL) {
  798. error_setg(errp, "drive with bus=%d, unit=%d (index=%d) exists",
  799. bus_id, unit_id, index);
  800. goto fail;
  801. }
  802. /* no id supplied -> create one */
  803. if (qemu_opts_id(all_opts) == NULL) {
  804. char *new_id;
  805. const char *mediastr = "";
  806. if (type == IF_IDE || type == IF_SCSI) {
  807. mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd";
  808. }
  809. if (max_devs) {
  810. new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id,
  811. mediastr, unit_id);
  812. } else {
  813. new_id = g_strdup_printf("%s%s%i", if_name[type],
  814. mediastr, unit_id);
  815. }
  816. qdict_put_str(bs_opts, "id", new_id);
  817. g_free(new_id);
  818. }
  819. /* Add virtio block device */
  820. if (type == IF_VIRTIO) {
  821. QemuOpts *devopts;
  822. devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0,
  823. &error_abort);
  824. qemu_opt_set(devopts, "driver", "virtio-blk", &error_abort);
  825. qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
  826. &error_abort);
  827. }
  828. filename = qemu_opt_get(legacy_opts, "file");
  829. /* Check werror/rerror compatibility with if=... */
  830. werror = qemu_opt_get(legacy_opts, "werror");
  831. if (werror != NULL) {
  832. if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO &&
  833. type != IF_NONE) {
  834. error_setg(errp, "werror is not supported by this bus type");
  835. goto fail;
  836. }
  837. qdict_put_str(bs_opts, "werror", werror);
  838. }
  839. rerror = qemu_opt_get(legacy_opts, "rerror");
  840. if (rerror != NULL) {
  841. if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI &&
  842. type != IF_NONE) {
  843. error_setg(errp, "rerror is not supported by this bus type");
  844. goto fail;
  845. }
  846. qdict_put_str(bs_opts, "rerror", rerror);
  847. }
  848. /* Actual block device init: Functionality shared with blockdev-add */
  849. blk = blockdev_init(filename, bs_opts, errp);
  850. bs_opts = NULL;
  851. if (!blk) {
  852. goto fail;
  853. }
  854. /* Create legacy DriveInfo */
  855. dinfo = g_malloc0(sizeof(*dinfo));
  856. dinfo->opts = all_opts;
  857. dinfo->type = type;
  858. dinfo->bus = bus_id;
  859. dinfo->unit = unit_id;
  860. blk_set_legacy_dinfo(blk, dinfo);
  861. switch(type) {
  862. case IF_IDE:
  863. case IF_SCSI:
  864. case IF_XEN:
  865. case IF_NONE:
  866. dinfo->media_cd = media == MEDIA_CDROM;
  867. break;
  868. default:
  869. break;
  870. }
  871. fail:
  872. qemu_opts_del(legacy_opts);
  873. qobject_unref(bs_opts);
  874. return dinfo;
  875. }
  876. static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp)
  877. {
  878. BlockDriverState *bs;
  879. bs = bdrv_lookup_bs(name, name, errp);
  880. if (bs == NULL) {
  881. return NULL;
  882. }
  883. if (!bdrv_is_root_node(bs)) {
  884. error_setg(errp, "Need a root block node");
  885. return NULL;
  886. }
  887. if (!bdrv_is_inserted(bs)) {
  888. error_setg(errp, "Device has no medium");
  889. return NULL;
  890. }
  891. return bs;
  892. }
  893. static void blockdev_do_action(TransactionAction *action, Error **errp)
  894. {
  895. TransactionActionList list;
  896. list.value = action;
  897. list.next = NULL;
  898. qmp_transaction(&list, false, NULL, errp);
  899. }
  900. void qmp_blockdev_snapshot_sync(bool has_device, const char *device,
  901. bool has_node_name, const char *node_name,
  902. const char *snapshot_file,
  903. bool has_snapshot_node_name,
  904. const char *snapshot_node_name,
  905. bool has_format, const char *format,
  906. bool has_mode, NewImageMode mode, Error **errp)
  907. {
  908. BlockdevSnapshotSync snapshot = {
  909. .has_device = has_device,
  910. .device = (char *) device,
  911. .has_node_name = has_node_name,
  912. .node_name = (char *) node_name,
  913. .snapshot_file = (char *) snapshot_file,
  914. .has_snapshot_node_name = has_snapshot_node_name,
  915. .snapshot_node_name = (char *) snapshot_node_name,
  916. .has_format = has_format,
  917. .format = (char *) format,
  918. .has_mode = has_mode,
  919. .mode = mode,
  920. };
  921. TransactionAction action = {
  922. .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC,
  923. .u.blockdev_snapshot_sync.data = &snapshot,
  924. };
  925. blockdev_do_action(&action, errp);
  926. }
  927. void qmp_blockdev_snapshot(const char *node, const char *overlay,
  928. Error **errp)
  929. {
  930. BlockdevSnapshot snapshot_data = {
  931. .node = (char *) node,
  932. .overlay = (char *) overlay
  933. };
  934. TransactionAction action = {
  935. .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT,
  936. .u.blockdev_snapshot.data = &snapshot_data,
  937. };
  938. blockdev_do_action(&action, errp);
  939. }
  940. void qmp_blockdev_snapshot_internal_sync(const char *device,
  941. const char *name,
  942. Error **errp)
  943. {
  944. BlockdevSnapshotInternal snapshot = {
  945. .device = (char *) device,
  946. .name = (char *) name
  947. };
  948. TransactionAction action = {
  949. .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC,
  950. .u.blockdev_snapshot_internal_sync.data = &snapshot,
  951. };
  952. blockdev_do_action(&action, errp);
  953. }
  954. SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
  955. bool has_id,
  956. const char *id,
  957. bool has_name,
  958. const char *name,
  959. Error **errp)
  960. {
  961. BlockDriverState *bs;
  962. AioContext *aio_context;
  963. QEMUSnapshotInfo sn;
  964. Error *local_err = NULL;
  965. SnapshotInfo *info = NULL;
  966. int ret;
  967. bs = qmp_get_root_bs(device, errp);
  968. if (!bs) {
  969. return NULL;
  970. }
  971. aio_context = bdrv_get_aio_context(bs);
  972. aio_context_acquire(aio_context);
  973. if (!has_id) {
  974. id = NULL;
  975. }
  976. if (!has_name) {
  977. name = NULL;
  978. }
  979. if (!id && !name) {
  980. error_setg(errp, "Name or id must be provided");
  981. goto out_aio_context;
  982. }
  983. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) {
  984. goto out_aio_context;
  985. }
  986. ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err);
  987. if (local_err) {
  988. error_propagate(errp, local_err);
  989. goto out_aio_context;
  990. }
  991. if (!ret) {
  992. error_setg(errp,
  993. "Snapshot with id '%s' and name '%s' does not exist on "
  994. "device '%s'",
  995. STR_OR_NULL(id), STR_OR_NULL(name), device);
  996. goto out_aio_context;
  997. }
  998. bdrv_snapshot_delete(bs, id, name, &local_err);
  999. if (local_err) {
  1000. error_propagate(errp, local_err);
  1001. goto out_aio_context;
  1002. }
  1003. aio_context_release(aio_context);
  1004. info = g_new0(SnapshotInfo, 1);
  1005. info->id = g_strdup(sn.id_str);
  1006. info->name = g_strdup(sn.name);
  1007. info->date_nsec = sn.date_nsec;
  1008. info->date_sec = sn.date_sec;
  1009. info->vm_state_size = sn.vm_state_size;
  1010. info->vm_clock_nsec = sn.vm_clock_nsec % 1000000000;
  1011. info->vm_clock_sec = sn.vm_clock_nsec / 1000000000;
  1012. if (sn.icount != -1ULL) {
  1013. info->icount = sn.icount;
  1014. info->has_icount = true;
  1015. }
  1016. return info;
  1017. out_aio_context:
  1018. aio_context_release(aio_context);
  1019. return NULL;
  1020. }
  1021. /* New and old BlockDriverState structs for atomic group operations */
  1022. typedef struct BlkActionState BlkActionState;
  1023. /**
  1024. * BlkActionOps:
  1025. * Table of operations that define an Action.
  1026. *
  1027. * @instance_size: Size of state struct, in bytes.
  1028. * @prepare: Prepare the work, must NOT be NULL.
  1029. * @commit: Commit the changes, can be NULL.
  1030. * @abort: Abort the changes on fail, can be NULL.
  1031. * @clean: Clean up resources after all transaction actions have called
  1032. * commit() or abort(). Can be NULL.
  1033. *
  1034. * Only prepare() may fail. In a single transaction, only one of commit() or
  1035. * abort() will be called. clean() will always be called if it is present.
  1036. *
  1037. * Always run under BQL.
  1038. */
  1039. typedef struct BlkActionOps {
  1040. size_t instance_size;
  1041. void (*prepare)(BlkActionState *common, Error **errp);
  1042. void (*commit)(BlkActionState *common);
  1043. void (*abort)(BlkActionState *common);
  1044. void (*clean)(BlkActionState *common);
  1045. } BlkActionOps;
  1046. /**
  1047. * BlkActionState:
  1048. * Describes one Action's state within a Transaction.
  1049. *
  1050. * @action: QAPI-defined enum identifying which Action to perform.
  1051. * @ops: Table of ActionOps this Action can perform.
  1052. * @block_job_txn: Transaction which this action belongs to.
  1053. * @entry: List membership for all Actions in this Transaction.
  1054. *
  1055. * This structure must be arranged as first member in a subclassed type,
  1056. * assuming that the compiler will also arrange it to the same offsets as the
  1057. * base class.
  1058. */
  1059. struct BlkActionState {
  1060. TransactionAction *action;
  1061. const BlkActionOps *ops;
  1062. JobTxn *block_job_txn;
  1063. TransactionProperties *txn_props;
  1064. QTAILQ_ENTRY(BlkActionState) entry;
  1065. };
  1066. /* internal snapshot private data */
  1067. typedef struct InternalSnapshotState {
  1068. BlkActionState common;
  1069. BlockDriverState *bs;
  1070. QEMUSnapshotInfo sn;
  1071. bool created;
  1072. } InternalSnapshotState;
  1073. static int action_check_completion_mode(BlkActionState *s, Error **errp)
  1074. {
  1075. if (s->txn_props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
  1076. error_setg(errp,
  1077. "Action '%s' does not support Transaction property "
  1078. "completion-mode = %s",
  1079. TransactionActionKind_str(s->action->type),
  1080. ActionCompletionMode_str(s->txn_props->completion_mode));
  1081. return -1;
  1082. }
  1083. return 0;
  1084. }
  1085. static void internal_snapshot_prepare(BlkActionState *common,
  1086. Error **errp)
  1087. {
  1088. Error *local_err = NULL;
  1089. const char *device;
  1090. const char *name;
  1091. BlockDriverState *bs;
  1092. QEMUSnapshotInfo old_sn, *sn;
  1093. bool ret;
  1094. int64_t rt;
  1095. BlockdevSnapshotInternal *internal;
  1096. InternalSnapshotState *state;
  1097. AioContext *aio_context;
  1098. int ret1;
  1099. g_assert(common->action->type ==
  1100. TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC);
  1101. internal = common->action->u.blockdev_snapshot_internal_sync.data;
  1102. state = DO_UPCAST(InternalSnapshotState, common, common);
  1103. /* 1. parse input */
  1104. device = internal->device;
  1105. name = internal->name;
  1106. /* 2. check for validation */
  1107. if (action_check_completion_mode(common, errp) < 0) {
  1108. return;
  1109. }
  1110. bs = qmp_get_root_bs(device, errp);
  1111. if (!bs) {
  1112. return;
  1113. }
  1114. aio_context = bdrv_get_aio_context(bs);
  1115. aio_context_acquire(aio_context);
  1116. state->bs = bs;
  1117. /* Paired with .clean() */
  1118. bdrv_drained_begin(bs);
  1119. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
  1120. goto out;
  1121. }
  1122. if (bdrv_is_read_only(bs)) {
  1123. error_setg(errp, "Device '%s' is read only", device);
  1124. goto out;
  1125. }
  1126. if (!bdrv_can_snapshot(bs)) {
  1127. error_setg(errp, "Block format '%s' used by device '%s' "
  1128. "does not support internal snapshots",
  1129. bs->drv->format_name, device);
  1130. goto out;
  1131. }
  1132. if (!strlen(name)) {
  1133. error_setg(errp, "Name is empty");
  1134. goto out;
  1135. }
  1136. /* check whether a snapshot with name exist */
  1137. ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn,
  1138. &local_err);
  1139. if (local_err) {
  1140. error_propagate(errp, local_err);
  1141. goto out;
  1142. } else if (ret) {
  1143. error_setg(errp,
  1144. "Snapshot with name '%s' already exists on device '%s'",
  1145. name, device);
  1146. goto out;
  1147. }
  1148. /* 3. take the snapshot */
  1149. sn = &state->sn;
  1150. pstrcpy(sn->name, sizeof(sn->name), name);
  1151. rt = g_get_real_time();
  1152. sn->date_sec = rt / G_USEC_PER_SEC;
  1153. sn->date_nsec = (rt % G_USEC_PER_SEC) * 1000;
  1154. sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  1155. if (replay_mode != REPLAY_MODE_NONE) {
  1156. sn->icount = replay_get_current_icount();
  1157. } else {
  1158. sn->icount = -1ULL;
  1159. }
  1160. ret1 = bdrv_snapshot_create(bs, sn);
  1161. if (ret1 < 0) {
  1162. error_setg_errno(errp, -ret1,
  1163. "Failed to create snapshot '%s' on device '%s'",
  1164. name, device);
  1165. goto out;
  1166. }
  1167. /* 4. succeed, mark a snapshot is created */
  1168. state->created = true;
  1169. out:
  1170. aio_context_release(aio_context);
  1171. }
  1172. static void internal_snapshot_abort(BlkActionState *common)
  1173. {
  1174. InternalSnapshotState *state =
  1175. DO_UPCAST(InternalSnapshotState, common, common);
  1176. BlockDriverState *bs = state->bs;
  1177. QEMUSnapshotInfo *sn = &state->sn;
  1178. AioContext *aio_context;
  1179. Error *local_error = NULL;
  1180. if (!state->created) {
  1181. return;
  1182. }
  1183. aio_context = bdrv_get_aio_context(state->bs);
  1184. aio_context_acquire(aio_context);
  1185. if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) {
  1186. error_reportf_err(local_error,
  1187. "Failed to delete snapshot with id '%s' and "
  1188. "name '%s' on device '%s' in abort: ",
  1189. sn->id_str, sn->name,
  1190. bdrv_get_device_name(bs));
  1191. }
  1192. aio_context_release(aio_context);
  1193. }
  1194. static void internal_snapshot_clean(BlkActionState *common)
  1195. {
  1196. InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState,
  1197. common, common);
  1198. AioContext *aio_context;
  1199. if (!state->bs) {
  1200. return;
  1201. }
  1202. aio_context = bdrv_get_aio_context(state->bs);
  1203. aio_context_acquire(aio_context);
  1204. bdrv_drained_end(state->bs);
  1205. aio_context_release(aio_context);
  1206. }
  1207. /* external snapshot private data */
  1208. typedef struct ExternalSnapshotState {
  1209. BlkActionState common;
  1210. BlockDriverState *old_bs;
  1211. BlockDriverState *new_bs;
  1212. bool overlay_appended;
  1213. } ExternalSnapshotState;
  1214. static void external_snapshot_prepare(BlkActionState *common,
  1215. Error **errp)
  1216. {
  1217. int ret;
  1218. int flags = 0;
  1219. QDict *options = NULL;
  1220. Error *local_err = NULL;
  1221. /* Device and node name of the image to generate the snapshot from */
  1222. const char *device;
  1223. const char *node_name;
  1224. /* Reference to the new image (for 'blockdev-snapshot') */
  1225. const char *snapshot_ref;
  1226. /* File name of the new image (for 'blockdev-snapshot-sync') */
  1227. const char *new_image_file;
  1228. ExternalSnapshotState *state =
  1229. DO_UPCAST(ExternalSnapshotState, common, common);
  1230. TransactionAction *action = common->action;
  1231. AioContext *aio_context;
  1232. uint64_t perm, shared;
  1233. /* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
  1234. * purpose but a different set of parameters */
  1235. switch (action->type) {
  1236. case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT:
  1237. {
  1238. BlockdevSnapshot *s = action->u.blockdev_snapshot.data;
  1239. device = s->node;
  1240. node_name = s->node;
  1241. new_image_file = NULL;
  1242. snapshot_ref = s->overlay;
  1243. }
  1244. break;
  1245. case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC:
  1246. {
  1247. BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
  1248. device = s->has_device ? s->device : NULL;
  1249. node_name = s->has_node_name ? s->node_name : NULL;
  1250. new_image_file = s->snapshot_file;
  1251. snapshot_ref = NULL;
  1252. }
  1253. break;
  1254. default:
  1255. g_assert_not_reached();
  1256. }
  1257. /* start processing */
  1258. if (action_check_completion_mode(common, errp) < 0) {
  1259. return;
  1260. }
  1261. state->old_bs = bdrv_lookup_bs(device, node_name, errp);
  1262. if (!state->old_bs) {
  1263. return;
  1264. }
  1265. aio_context = bdrv_get_aio_context(state->old_bs);
  1266. aio_context_acquire(aio_context);
  1267. /* Paired with .clean() */
  1268. bdrv_drained_begin(state->old_bs);
  1269. if (!bdrv_is_inserted(state->old_bs)) {
  1270. error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
  1271. goto out;
  1272. }
  1273. if (bdrv_op_is_blocked(state->old_bs,
  1274. BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) {
  1275. goto out;
  1276. }
  1277. if (!bdrv_is_read_only(state->old_bs)) {
  1278. if (bdrv_flush(state->old_bs)) {
  1279. error_setg(errp, QERR_IO_ERROR);
  1280. goto out;
  1281. }
  1282. }
  1283. if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) {
  1284. BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
  1285. const char *format = s->has_format ? s->format : "qcow2";
  1286. enum NewImageMode mode;
  1287. const char *snapshot_node_name =
  1288. s->has_snapshot_node_name ? s->snapshot_node_name : NULL;
  1289. if (node_name && !snapshot_node_name) {
  1290. error_setg(errp, "New overlay node-name missing");
  1291. goto out;
  1292. }
  1293. if (snapshot_node_name &&
  1294. bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
  1295. error_setg(errp, "New overlay node-name already in use");
  1296. goto out;
  1297. }
  1298. flags = state->old_bs->open_flags;
  1299. flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_COPY_ON_READ);
  1300. flags |= BDRV_O_NO_BACKING;
  1301. /* create new image w/backing file */
  1302. mode = s->has_mode ? s->mode : NEW_IMAGE_MODE_ABSOLUTE_PATHS;
  1303. if (mode != NEW_IMAGE_MODE_EXISTING) {
  1304. int64_t size = bdrv_getlength(state->old_bs);
  1305. if (size < 0) {
  1306. error_setg_errno(errp, -size, "bdrv_getlength failed");
  1307. goto out;
  1308. }
  1309. bdrv_refresh_filename(state->old_bs);
  1310. bdrv_img_create(new_image_file, format,
  1311. state->old_bs->filename,
  1312. state->old_bs->drv->format_name,
  1313. NULL, size, flags, false, &local_err);
  1314. if (local_err) {
  1315. error_propagate(errp, local_err);
  1316. goto out;
  1317. }
  1318. }
  1319. options = qdict_new();
  1320. if (snapshot_node_name) {
  1321. qdict_put_str(options, "node-name", snapshot_node_name);
  1322. }
  1323. qdict_put_str(options, "driver", format);
  1324. }
  1325. state->new_bs = bdrv_open(new_image_file, snapshot_ref, options, flags,
  1326. errp);
  1327. /* We will manually add the backing_hd field to the bs later */
  1328. if (!state->new_bs) {
  1329. goto out;
  1330. }
  1331. /*
  1332. * Allow attaching a backing file to an overlay that's already in use only
  1333. * if the parents don't assume that they are already seeing a valid image.
  1334. * (Specifically, allow it as a mirror target, which is write-only access.)
  1335. */
  1336. bdrv_get_cumulative_perm(state->new_bs, &perm, &shared);
  1337. if (perm & BLK_PERM_CONSISTENT_READ) {
  1338. error_setg(errp, "The overlay is already in use");
  1339. goto out;
  1340. }
  1341. if (state->new_bs->drv->is_filter) {
  1342. error_setg(errp, "Filters cannot be used as overlays");
  1343. goto out;
  1344. }
  1345. if (bdrv_cow_child(state->new_bs)) {
  1346. error_setg(errp, "The overlay already has a backing image");
  1347. goto out;
  1348. }
  1349. if (!state->new_bs->drv->supports_backing) {
  1350. error_setg(errp, "The overlay does not support backing images");
  1351. goto out;
  1352. }
  1353. ret = bdrv_append(state->new_bs, state->old_bs, errp);
  1354. if (ret < 0) {
  1355. goto out;
  1356. }
  1357. state->overlay_appended = true;
  1358. out:
  1359. aio_context_release(aio_context);
  1360. }
  1361. static void external_snapshot_commit(BlkActionState *common)
  1362. {
  1363. ExternalSnapshotState *state =
  1364. DO_UPCAST(ExternalSnapshotState, common, common);
  1365. AioContext *aio_context;
  1366. aio_context = bdrv_get_aio_context(state->old_bs);
  1367. aio_context_acquire(aio_context);
  1368. /* We don't need (or want) to use the transactional
  1369. * bdrv_reopen_multiple() across all the entries at once, because we
  1370. * don't want to abort all of them if one of them fails the reopen */
  1371. if (!qatomic_read(&state->old_bs->copy_on_read)) {
  1372. bdrv_reopen_set_read_only(state->old_bs, true, NULL);
  1373. }
  1374. aio_context_release(aio_context);
  1375. }
  1376. static void external_snapshot_abort(BlkActionState *common)
  1377. {
  1378. ExternalSnapshotState *state =
  1379. DO_UPCAST(ExternalSnapshotState, common, common);
  1380. if (state->new_bs) {
  1381. if (state->overlay_appended) {
  1382. AioContext *aio_context;
  1383. AioContext *tmp_context;
  1384. int ret;
  1385. aio_context = bdrv_get_aio_context(state->old_bs);
  1386. aio_context_acquire(aio_context);
  1387. bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd()
  1388. close state->old_bs; we need it */
  1389. bdrv_set_backing_hd(state->new_bs, NULL, &error_abort);
  1390. /*
  1391. * The call to bdrv_set_backing_hd() above returns state->old_bs to
  1392. * the main AioContext. As we're still going to be using it, return
  1393. * it to the AioContext it was before.
  1394. */
  1395. tmp_context = bdrv_get_aio_context(state->old_bs);
  1396. if (aio_context != tmp_context) {
  1397. aio_context_release(aio_context);
  1398. aio_context_acquire(tmp_context);
  1399. ret = bdrv_try_set_aio_context(state->old_bs,
  1400. aio_context, NULL);
  1401. assert(ret == 0);
  1402. aio_context_release(tmp_context);
  1403. aio_context_acquire(aio_context);
  1404. }
  1405. bdrv_replace_node(state->new_bs, state->old_bs, &error_abort);
  1406. bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */
  1407. aio_context_release(aio_context);
  1408. }
  1409. }
  1410. }
  1411. static void external_snapshot_clean(BlkActionState *common)
  1412. {
  1413. ExternalSnapshotState *state =
  1414. DO_UPCAST(ExternalSnapshotState, common, common);
  1415. AioContext *aio_context;
  1416. if (!state->old_bs) {
  1417. return;
  1418. }
  1419. aio_context = bdrv_get_aio_context(state->old_bs);
  1420. aio_context_acquire(aio_context);
  1421. bdrv_drained_end(state->old_bs);
  1422. bdrv_unref(state->new_bs);
  1423. aio_context_release(aio_context);
  1424. }
  1425. typedef struct DriveBackupState {
  1426. BlkActionState common;
  1427. BlockDriverState *bs;
  1428. BlockJob *job;
  1429. } DriveBackupState;
  1430. static BlockJob *do_backup_common(BackupCommon *backup,
  1431. BlockDriverState *bs,
  1432. BlockDriverState *target_bs,
  1433. AioContext *aio_context,
  1434. JobTxn *txn, Error **errp);
  1435. static void drive_backup_prepare(BlkActionState *common, Error **errp)
  1436. {
  1437. DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
  1438. DriveBackup *backup;
  1439. BlockDriverState *bs;
  1440. BlockDriverState *target_bs;
  1441. BlockDriverState *source = NULL;
  1442. AioContext *aio_context;
  1443. AioContext *old_context;
  1444. QDict *options;
  1445. Error *local_err = NULL;
  1446. int flags;
  1447. int64_t size;
  1448. bool set_backing_hd = false;
  1449. int ret;
  1450. assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
  1451. backup = common->action->u.drive_backup.data;
  1452. if (!backup->has_mode) {
  1453. backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
  1454. }
  1455. bs = bdrv_lookup_bs(backup->device, backup->device, errp);
  1456. if (!bs) {
  1457. return;
  1458. }
  1459. if (!bs->drv) {
  1460. error_setg(errp, "Device has no medium");
  1461. return;
  1462. }
  1463. aio_context = bdrv_get_aio_context(bs);
  1464. aio_context_acquire(aio_context);
  1465. state->bs = bs;
  1466. /* Paired with .clean() */
  1467. bdrv_drained_begin(bs);
  1468. if (!backup->has_format) {
  1469. backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ?
  1470. NULL : (char *) bs->drv->format_name;
  1471. }
  1472. /* Early check to avoid creating target */
  1473. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
  1474. goto out;
  1475. }
  1476. flags = bs->open_flags | BDRV_O_RDWR;
  1477. /*
  1478. * See if we have a backing HD we can use to create our new image
  1479. * on top of.
  1480. */
  1481. if (backup->sync == MIRROR_SYNC_MODE_TOP) {
  1482. /*
  1483. * Backup will not replace the source by the target, so none
  1484. * of the filters skipped here will be removed (in contrast to
  1485. * mirror). Therefore, we can skip all of them when looking
  1486. * for the first COW relationship.
  1487. */
  1488. source = bdrv_cow_bs(bdrv_skip_filters(bs));
  1489. if (!source) {
  1490. backup->sync = MIRROR_SYNC_MODE_FULL;
  1491. }
  1492. }
  1493. if (backup->sync == MIRROR_SYNC_MODE_NONE) {
  1494. source = bs;
  1495. flags |= BDRV_O_NO_BACKING;
  1496. set_backing_hd = true;
  1497. }
  1498. size = bdrv_getlength(bs);
  1499. if (size < 0) {
  1500. error_setg_errno(errp, -size, "bdrv_getlength failed");
  1501. goto out;
  1502. }
  1503. if (backup->mode != NEW_IMAGE_MODE_EXISTING) {
  1504. assert(backup->format);
  1505. if (source) {
  1506. /* Implicit filters should not appear in the filename */
  1507. BlockDriverState *explicit_backing =
  1508. bdrv_skip_implicit_filters(source);
  1509. bdrv_refresh_filename(explicit_backing);
  1510. bdrv_img_create(backup->target, backup->format,
  1511. explicit_backing->filename,
  1512. explicit_backing->drv->format_name, NULL,
  1513. size, flags, false, &local_err);
  1514. } else {
  1515. bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL,
  1516. size, flags, false, &local_err);
  1517. }
  1518. }
  1519. if (local_err) {
  1520. error_propagate(errp, local_err);
  1521. goto out;
  1522. }
  1523. options = qdict_new();
  1524. qdict_put_str(options, "discard", "unmap");
  1525. qdict_put_str(options, "detect-zeroes", "unmap");
  1526. if (backup->format) {
  1527. qdict_put_str(options, "driver", backup->format);
  1528. }
  1529. target_bs = bdrv_open(backup->target, NULL, options, flags, errp);
  1530. if (!target_bs) {
  1531. goto out;
  1532. }
  1533. /* Honor bdrv_try_set_aio_context() context acquisition requirements. */
  1534. old_context = bdrv_get_aio_context(target_bs);
  1535. aio_context_release(aio_context);
  1536. aio_context_acquire(old_context);
  1537. ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
  1538. if (ret < 0) {
  1539. bdrv_unref(target_bs);
  1540. aio_context_release(old_context);
  1541. return;
  1542. }
  1543. aio_context_release(old_context);
  1544. aio_context_acquire(aio_context);
  1545. if (set_backing_hd) {
  1546. if (bdrv_set_backing_hd(target_bs, source, errp) < 0) {
  1547. goto unref;
  1548. }
  1549. }
  1550. state->job = do_backup_common(qapi_DriveBackup_base(backup),
  1551. bs, target_bs, aio_context,
  1552. common->block_job_txn, errp);
  1553. unref:
  1554. bdrv_unref(target_bs);
  1555. out:
  1556. aio_context_release(aio_context);
  1557. }
  1558. static void drive_backup_commit(BlkActionState *common)
  1559. {
  1560. DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
  1561. AioContext *aio_context;
  1562. aio_context = bdrv_get_aio_context(state->bs);
  1563. aio_context_acquire(aio_context);
  1564. assert(state->job);
  1565. job_start(&state->job->job);
  1566. aio_context_release(aio_context);
  1567. }
  1568. static void drive_backup_abort(BlkActionState *common)
  1569. {
  1570. DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
  1571. if (state->job) {
  1572. AioContext *aio_context;
  1573. aio_context = bdrv_get_aio_context(state->bs);
  1574. aio_context_acquire(aio_context);
  1575. job_cancel_sync(&state->job->job, true);
  1576. aio_context_release(aio_context);
  1577. }
  1578. }
  1579. static void drive_backup_clean(BlkActionState *common)
  1580. {
  1581. DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
  1582. AioContext *aio_context;
  1583. if (!state->bs) {
  1584. return;
  1585. }
  1586. aio_context = bdrv_get_aio_context(state->bs);
  1587. aio_context_acquire(aio_context);
  1588. bdrv_drained_end(state->bs);
  1589. aio_context_release(aio_context);
  1590. }
  1591. typedef struct BlockdevBackupState {
  1592. BlkActionState common;
  1593. BlockDriverState *bs;
  1594. BlockJob *job;
  1595. } BlockdevBackupState;
  1596. static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
  1597. {
  1598. BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
  1599. BlockdevBackup *backup;
  1600. BlockDriverState *bs;
  1601. BlockDriverState *target_bs;
  1602. AioContext *aio_context;
  1603. AioContext *old_context;
  1604. int ret;
  1605. assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
  1606. backup = common->action->u.blockdev_backup.data;
  1607. bs = bdrv_lookup_bs(backup->device, backup->device, errp);
  1608. if (!bs) {
  1609. return;
  1610. }
  1611. target_bs = bdrv_lookup_bs(backup->target, backup->target, errp);
  1612. if (!target_bs) {
  1613. return;
  1614. }
  1615. /* Honor bdrv_try_set_aio_context() context acquisition requirements. */
  1616. aio_context = bdrv_get_aio_context(bs);
  1617. old_context = bdrv_get_aio_context(target_bs);
  1618. aio_context_acquire(old_context);
  1619. ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
  1620. if (ret < 0) {
  1621. aio_context_release(old_context);
  1622. return;
  1623. }
  1624. aio_context_release(old_context);
  1625. aio_context_acquire(aio_context);
  1626. state->bs = bs;
  1627. /* Paired with .clean() */
  1628. bdrv_drained_begin(state->bs);
  1629. state->job = do_backup_common(qapi_BlockdevBackup_base(backup),
  1630. bs, target_bs, aio_context,
  1631. common->block_job_txn, errp);
  1632. aio_context_release(aio_context);
  1633. }
  1634. static void blockdev_backup_commit(BlkActionState *common)
  1635. {
  1636. BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
  1637. AioContext *aio_context;
  1638. aio_context = bdrv_get_aio_context(state->bs);
  1639. aio_context_acquire(aio_context);
  1640. assert(state->job);
  1641. job_start(&state->job->job);
  1642. aio_context_release(aio_context);
  1643. }
  1644. static void blockdev_backup_abort(BlkActionState *common)
  1645. {
  1646. BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
  1647. if (state->job) {
  1648. AioContext *aio_context;
  1649. aio_context = bdrv_get_aio_context(state->bs);
  1650. aio_context_acquire(aio_context);
  1651. job_cancel_sync(&state->job->job, true);
  1652. aio_context_release(aio_context);
  1653. }
  1654. }
  1655. static void blockdev_backup_clean(BlkActionState *common)
  1656. {
  1657. BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
  1658. AioContext *aio_context;
  1659. if (!state->bs) {
  1660. return;
  1661. }
  1662. aio_context = bdrv_get_aio_context(state->bs);
  1663. aio_context_acquire(aio_context);
  1664. bdrv_drained_end(state->bs);
  1665. aio_context_release(aio_context);
  1666. }
  1667. typedef struct BlockDirtyBitmapState {
  1668. BlkActionState common;
  1669. BdrvDirtyBitmap *bitmap;
  1670. BlockDriverState *bs;
  1671. HBitmap *backup;
  1672. bool prepared;
  1673. bool was_enabled;
  1674. } BlockDirtyBitmapState;
  1675. static void block_dirty_bitmap_add_prepare(BlkActionState *common,
  1676. Error **errp)
  1677. {
  1678. Error *local_err = NULL;
  1679. BlockDirtyBitmapAdd *action;
  1680. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1681. common, common);
  1682. if (action_check_completion_mode(common, errp) < 0) {
  1683. return;
  1684. }
  1685. action = common->action->u.block_dirty_bitmap_add.data;
  1686. /* AIO context taken and released within qmp_block_dirty_bitmap_add */
  1687. qmp_block_dirty_bitmap_add(action->node, action->name,
  1688. action->has_granularity, action->granularity,
  1689. action->has_persistent, action->persistent,
  1690. action->has_disabled, action->disabled,
  1691. &local_err);
  1692. if (!local_err) {
  1693. state->prepared = true;
  1694. } else {
  1695. error_propagate(errp, local_err);
  1696. }
  1697. }
  1698. static void block_dirty_bitmap_add_abort(BlkActionState *common)
  1699. {
  1700. BlockDirtyBitmapAdd *action;
  1701. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1702. common, common);
  1703. action = common->action->u.block_dirty_bitmap_add.data;
  1704. /* Should not be able to fail: IF the bitmap was added via .prepare(),
  1705. * then the node reference and bitmap name must have been valid.
  1706. */
  1707. if (state->prepared) {
  1708. qmp_block_dirty_bitmap_remove(action->node, action->name, &error_abort);
  1709. }
  1710. }
  1711. static void block_dirty_bitmap_clear_prepare(BlkActionState *common,
  1712. Error **errp)
  1713. {
  1714. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1715. common, common);
  1716. BlockDirtyBitmap *action;
  1717. if (action_check_completion_mode(common, errp) < 0) {
  1718. return;
  1719. }
  1720. action = common->action->u.block_dirty_bitmap_clear.data;
  1721. state->bitmap = block_dirty_bitmap_lookup(action->node,
  1722. action->name,
  1723. &state->bs,
  1724. errp);
  1725. if (!state->bitmap) {
  1726. return;
  1727. }
  1728. if (bdrv_dirty_bitmap_check(state->bitmap, BDRV_BITMAP_DEFAULT, errp)) {
  1729. return;
  1730. }
  1731. bdrv_clear_dirty_bitmap(state->bitmap, &state->backup);
  1732. }
  1733. static void block_dirty_bitmap_restore(BlkActionState *common)
  1734. {
  1735. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1736. common, common);
  1737. if (state->backup) {
  1738. bdrv_restore_dirty_bitmap(state->bitmap, state->backup);
  1739. }
  1740. }
  1741. static void block_dirty_bitmap_free_backup(BlkActionState *common)
  1742. {
  1743. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1744. common, common);
  1745. hbitmap_free(state->backup);
  1746. }
  1747. static void block_dirty_bitmap_enable_prepare(BlkActionState *common,
  1748. Error **errp)
  1749. {
  1750. BlockDirtyBitmap *action;
  1751. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1752. common, common);
  1753. if (action_check_completion_mode(common, errp) < 0) {
  1754. return;
  1755. }
  1756. action = common->action->u.block_dirty_bitmap_enable.data;
  1757. state->bitmap = block_dirty_bitmap_lookup(action->node,
  1758. action->name,
  1759. NULL,
  1760. errp);
  1761. if (!state->bitmap) {
  1762. return;
  1763. }
  1764. if (bdrv_dirty_bitmap_check(state->bitmap, BDRV_BITMAP_ALLOW_RO, errp)) {
  1765. return;
  1766. }
  1767. state->was_enabled = bdrv_dirty_bitmap_enabled(state->bitmap);
  1768. bdrv_enable_dirty_bitmap(state->bitmap);
  1769. }
  1770. static void block_dirty_bitmap_enable_abort(BlkActionState *common)
  1771. {
  1772. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1773. common, common);
  1774. if (!state->was_enabled) {
  1775. bdrv_disable_dirty_bitmap(state->bitmap);
  1776. }
  1777. }
  1778. static void block_dirty_bitmap_disable_prepare(BlkActionState *common,
  1779. Error **errp)
  1780. {
  1781. BlockDirtyBitmap *action;
  1782. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1783. common, common);
  1784. if (action_check_completion_mode(common, errp) < 0) {
  1785. return;
  1786. }
  1787. action = common->action->u.block_dirty_bitmap_disable.data;
  1788. state->bitmap = block_dirty_bitmap_lookup(action->node,
  1789. action->name,
  1790. NULL,
  1791. errp);
  1792. if (!state->bitmap) {
  1793. return;
  1794. }
  1795. if (bdrv_dirty_bitmap_check(state->bitmap, BDRV_BITMAP_ALLOW_RO, errp)) {
  1796. return;
  1797. }
  1798. state->was_enabled = bdrv_dirty_bitmap_enabled(state->bitmap);
  1799. bdrv_disable_dirty_bitmap(state->bitmap);
  1800. }
  1801. static void block_dirty_bitmap_disable_abort(BlkActionState *common)
  1802. {
  1803. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1804. common, common);
  1805. if (state->was_enabled) {
  1806. bdrv_enable_dirty_bitmap(state->bitmap);
  1807. }
  1808. }
  1809. static void block_dirty_bitmap_merge_prepare(BlkActionState *common,
  1810. Error **errp)
  1811. {
  1812. BlockDirtyBitmapMerge *action;
  1813. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1814. common, common);
  1815. if (action_check_completion_mode(common, errp) < 0) {
  1816. return;
  1817. }
  1818. action = common->action->u.block_dirty_bitmap_merge.data;
  1819. state->bitmap = block_dirty_bitmap_merge(action->node, action->target,
  1820. action->bitmaps, &state->backup,
  1821. errp);
  1822. }
  1823. static void block_dirty_bitmap_remove_prepare(BlkActionState *common,
  1824. Error **errp)
  1825. {
  1826. BlockDirtyBitmap *action;
  1827. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1828. common, common);
  1829. if (action_check_completion_mode(common, errp) < 0) {
  1830. return;
  1831. }
  1832. action = common->action->u.block_dirty_bitmap_remove.data;
  1833. state->bitmap = block_dirty_bitmap_remove(action->node, action->name,
  1834. false, &state->bs, errp);
  1835. if (state->bitmap) {
  1836. bdrv_dirty_bitmap_skip_store(state->bitmap, true);
  1837. bdrv_dirty_bitmap_set_busy(state->bitmap, true);
  1838. }
  1839. }
  1840. static void block_dirty_bitmap_remove_abort(BlkActionState *common)
  1841. {
  1842. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1843. common, common);
  1844. if (state->bitmap) {
  1845. bdrv_dirty_bitmap_skip_store(state->bitmap, false);
  1846. bdrv_dirty_bitmap_set_busy(state->bitmap, false);
  1847. }
  1848. }
  1849. static void block_dirty_bitmap_remove_commit(BlkActionState *common)
  1850. {
  1851. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1852. common, common);
  1853. bdrv_dirty_bitmap_set_busy(state->bitmap, false);
  1854. bdrv_release_dirty_bitmap(state->bitmap);
  1855. }
  1856. static void abort_prepare(BlkActionState *common, Error **errp)
  1857. {
  1858. error_setg(errp, "Transaction aborted using Abort action");
  1859. }
  1860. static void abort_commit(BlkActionState *common)
  1861. {
  1862. g_assert_not_reached(); /* this action never succeeds */
  1863. }
  1864. static const BlkActionOps actions[] = {
  1865. [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT] = {
  1866. .instance_size = sizeof(ExternalSnapshotState),
  1867. .prepare = external_snapshot_prepare,
  1868. .commit = external_snapshot_commit,
  1869. .abort = external_snapshot_abort,
  1870. .clean = external_snapshot_clean,
  1871. },
  1872. [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC] = {
  1873. .instance_size = sizeof(ExternalSnapshotState),
  1874. .prepare = external_snapshot_prepare,
  1875. .commit = external_snapshot_commit,
  1876. .abort = external_snapshot_abort,
  1877. .clean = external_snapshot_clean,
  1878. },
  1879. [TRANSACTION_ACTION_KIND_DRIVE_BACKUP] = {
  1880. .instance_size = sizeof(DriveBackupState),
  1881. .prepare = drive_backup_prepare,
  1882. .commit = drive_backup_commit,
  1883. .abort = drive_backup_abort,
  1884. .clean = drive_backup_clean,
  1885. },
  1886. [TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP] = {
  1887. .instance_size = sizeof(BlockdevBackupState),
  1888. .prepare = blockdev_backup_prepare,
  1889. .commit = blockdev_backup_commit,
  1890. .abort = blockdev_backup_abort,
  1891. .clean = blockdev_backup_clean,
  1892. },
  1893. [TRANSACTION_ACTION_KIND_ABORT] = {
  1894. .instance_size = sizeof(BlkActionState),
  1895. .prepare = abort_prepare,
  1896. .commit = abort_commit,
  1897. },
  1898. [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC] = {
  1899. .instance_size = sizeof(InternalSnapshotState),
  1900. .prepare = internal_snapshot_prepare,
  1901. .abort = internal_snapshot_abort,
  1902. .clean = internal_snapshot_clean,
  1903. },
  1904. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ADD] = {
  1905. .instance_size = sizeof(BlockDirtyBitmapState),
  1906. .prepare = block_dirty_bitmap_add_prepare,
  1907. .abort = block_dirty_bitmap_add_abort,
  1908. },
  1909. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_CLEAR] = {
  1910. .instance_size = sizeof(BlockDirtyBitmapState),
  1911. .prepare = block_dirty_bitmap_clear_prepare,
  1912. .commit = block_dirty_bitmap_free_backup,
  1913. .abort = block_dirty_bitmap_restore,
  1914. },
  1915. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ENABLE] = {
  1916. .instance_size = sizeof(BlockDirtyBitmapState),
  1917. .prepare = block_dirty_bitmap_enable_prepare,
  1918. .abort = block_dirty_bitmap_enable_abort,
  1919. },
  1920. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_DISABLE] = {
  1921. .instance_size = sizeof(BlockDirtyBitmapState),
  1922. .prepare = block_dirty_bitmap_disable_prepare,
  1923. .abort = block_dirty_bitmap_disable_abort,
  1924. },
  1925. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_MERGE] = {
  1926. .instance_size = sizeof(BlockDirtyBitmapState),
  1927. .prepare = block_dirty_bitmap_merge_prepare,
  1928. .commit = block_dirty_bitmap_free_backup,
  1929. .abort = block_dirty_bitmap_restore,
  1930. },
  1931. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_REMOVE] = {
  1932. .instance_size = sizeof(BlockDirtyBitmapState),
  1933. .prepare = block_dirty_bitmap_remove_prepare,
  1934. .commit = block_dirty_bitmap_remove_commit,
  1935. .abort = block_dirty_bitmap_remove_abort,
  1936. },
  1937. /* Where are transactions for MIRROR, COMMIT and STREAM?
  1938. * Although these blockjobs use transaction callbacks like the backup job,
  1939. * these jobs do not necessarily adhere to transaction semantics.
  1940. * These jobs may not fully undo all of their actions on abort, nor do they
  1941. * necessarily work in transactions with more than one job in them.
  1942. */
  1943. };
  1944. /**
  1945. * Allocate a TransactionProperties structure if necessary, and fill
  1946. * that structure with desired defaults if they are unset.
  1947. */
  1948. static TransactionProperties *get_transaction_properties(
  1949. TransactionProperties *props)
  1950. {
  1951. if (!props) {
  1952. props = g_new0(TransactionProperties, 1);
  1953. }
  1954. if (!props->has_completion_mode) {
  1955. props->has_completion_mode = true;
  1956. props->completion_mode = ACTION_COMPLETION_MODE_INDIVIDUAL;
  1957. }
  1958. return props;
  1959. }
  1960. /*
  1961. * 'Atomic' group operations. The operations are performed as a set, and if
  1962. * any fail then we roll back all operations in the group.
  1963. *
  1964. * Always run under BQL.
  1965. */
  1966. void qmp_transaction(TransactionActionList *dev_list,
  1967. bool has_props,
  1968. struct TransactionProperties *props,
  1969. Error **errp)
  1970. {
  1971. TransactionActionList *dev_entry = dev_list;
  1972. JobTxn *block_job_txn = NULL;
  1973. BlkActionState *state, *next;
  1974. Error *local_err = NULL;
  1975. GLOBAL_STATE_CODE();
  1976. QTAILQ_HEAD(, BlkActionState) snap_bdrv_states;
  1977. QTAILQ_INIT(&snap_bdrv_states);
  1978. /* Does this transaction get canceled as a group on failure?
  1979. * If not, we don't really need to make a JobTxn.
  1980. */
  1981. props = get_transaction_properties(props);
  1982. if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
  1983. block_job_txn = job_txn_new();
  1984. }
  1985. /* drain all i/o before any operations */
  1986. bdrv_drain_all();
  1987. /* We don't do anything in this loop that commits us to the operations */
  1988. while (NULL != dev_entry) {
  1989. TransactionAction *dev_info = NULL;
  1990. const BlkActionOps *ops;
  1991. dev_info = dev_entry->value;
  1992. dev_entry = dev_entry->next;
  1993. assert(dev_info->type < ARRAY_SIZE(actions));
  1994. ops = &actions[dev_info->type];
  1995. assert(ops->instance_size > 0);
  1996. state = g_malloc0(ops->instance_size);
  1997. state->ops = ops;
  1998. state->action = dev_info;
  1999. state->block_job_txn = block_job_txn;
  2000. state->txn_props = props;
  2001. QTAILQ_INSERT_TAIL(&snap_bdrv_states, state, entry);
  2002. state->ops->prepare(state, &local_err);
  2003. if (local_err) {
  2004. error_propagate(errp, local_err);
  2005. goto delete_and_fail;
  2006. }
  2007. }
  2008. QTAILQ_FOREACH(state, &snap_bdrv_states, entry) {
  2009. if (state->ops->commit) {
  2010. state->ops->commit(state);
  2011. }
  2012. }
  2013. /* success */
  2014. goto exit;
  2015. delete_and_fail:
  2016. /* failure, and it is all-or-none; roll back all operations */
  2017. QTAILQ_FOREACH_REVERSE(state, &snap_bdrv_states, entry) {
  2018. if (state->ops->abort) {
  2019. state->ops->abort(state);
  2020. }
  2021. }
  2022. exit:
  2023. QTAILQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) {
  2024. if (state->ops->clean) {
  2025. state->ops->clean(state);
  2026. }
  2027. g_free(state);
  2028. }
  2029. if (!has_props) {
  2030. qapi_free_TransactionProperties(props);
  2031. }
  2032. job_txn_unref(block_job_txn);
  2033. }
  2034. BlockDirtyBitmapSha256 *qmp_x_debug_block_dirty_bitmap_sha256(const char *node,
  2035. const char *name,
  2036. Error **errp)
  2037. {
  2038. BdrvDirtyBitmap *bitmap;
  2039. BlockDriverState *bs;
  2040. BlockDirtyBitmapSha256 *ret = NULL;
  2041. char *sha256;
  2042. bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp);
  2043. if (!bitmap || !bs) {
  2044. return NULL;
  2045. }
  2046. sha256 = bdrv_dirty_bitmap_sha256(bitmap, errp);
  2047. if (sha256 == NULL) {
  2048. return NULL;
  2049. }
  2050. ret = g_new(BlockDirtyBitmapSha256, 1);
  2051. ret->sha256 = sha256;
  2052. return ret;
  2053. }
  2054. void coroutine_fn qmp_block_resize(bool has_device, const char *device,
  2055. bool has_node_name, const char *node_name,
  2056. int64_t size, Error **errp)
  2057. {
  2058. Error *local_err = NULL;
  2059. BlockBackend *blk;
  2060. BlockDriverState *bs;
  2061. AioContext *old_ctx;
  2062. bs = bdrv_lookup_bs(has_device ? device : NULL,
  2063. has_node_name ? node_name : NULL,
  2064. &local_err);
  2065. if (local_err) {
  2066. error_propagate(errp, local_err);
  2067. return;
  2068. }
  2069. if (size < 0) {
  2070. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size");
  2071. return;
  2072. }
  2073. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
  2074. error_setg(errp, QERR_DEVICE_IN_USE, device);
  2075. return;
  2076. }
  2077. blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp);
  2078. if (!blk) {
  2079. return;
  2080. }
  2081. bdrv_co_lock(bs);
  2082. bdrv_drained_begin(bs);
  2083. bdrv_co_unlock(bs);
  2084. old_ctx = bdrv_co_enter(bs);
  2085. blk_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp);
  2086. bdrv_co_leave(bs, old_ctx);
  2087. bdrv_co_lock(bs);
  2088. bdrv_drained_end(bs);
  2089. blk_unref(blk);
  2090. bdrv_co_unlock(bs);
  2091. }
  2092. void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
  2093. bool has_base, const char *base,
  2094. bool has_base_node, const char *base_node,
  2095. bool has_backing_file, const char *backing_file,
  2096. bool has_bottom, const char *bottom,
  2097. bool has_speed, int64_t speed,
  2098. bool has_on_error, BlockdevOnError on_error,
  2099. bool has_filter_node_name, const char *filter_node_name,
  2100. bool has_auto_finalize, bool auto_finalize,
  2101. bool has_auto_dismiss, bool auto_dismiss,
  2102. Error **errp)
  2103. {
  2104. BlockDriverState *bs, *iter, *iter_end;
  2105. BlockDriverState *base_bs = NULL;
  2106. BlockDriverState *bottom_bs = NULL;
  2107. AioContext *aio_context;
  2108. Error *local_err = NULL;
  2109. int job_flags = JOB_DEFAULT;
  2110. if (has_base && has_base_node) {
  2111. error_setg(errp, "'base' and 'base-node' cannot be specified "
  2112. "at the same time");
  2113. return;
  2114. }
  2115. if (has_base && has_bottom) {
  2116. error_setg(errp, "'base' and 'bottom' cannot be specified "
  2117. "at the same time");
  2118. return;
  2119. }
  2120. if (has_bottom && has_base_node) {
  2121. error_setg(errp, "'bottom' and 'base-node' cannot be specified "
  2122. "at the same time");
  2123. return;
  2124. }
  2125. if (!has_on_error) {
  2126. on_error = BLOCKDEV_ON_ERROR_REPORT;
  2127. }
  2128. bs = bdrv_lookup_bs(device, device, errp);
  2129. if (!bs) {
  2130. return;
  2131. }
  2132. aio_context = bdrv_get_aio_context(bs);
  2133. aio_context_acquire(aio_context);
  2134. if (has_base) {
  2135. base_bs = bdrv_find_backing_image(bs, base);
  2136. if (base_bs == NULL) {
  2137. error_setg(errp, "Can't find '%s' in the backing chain", base);
  2138. goto out;
  2139. }
  2140. assert(bdrv_get_aio_context(base_bs) == aio_context);
  2141. }
  2142. if (has_base_node) {
  2143. base_bs = bdrv_lookup_bs(NULL, base_node, errp);
  2144. if (!base_bs) {
  2145. goto out;
  2146. }
  2147. if (bs == base_bs || !bdrv_chain_contains(bs, base_bs)) {
  2148. error_setg(errp, "Node '%s' is not a backing image of '%s'",
  2149. base_node, device);
  2150. goto out;
  2151. }
  2152. assert(bdrv_get_aio_context(base_bs) == aio_context);
  2153. bdrv_refresh_filename(base_bs);
  2154. }
  2155. if (has_bottom) {
  2156. bottom_bs = bdrv_lookup_bs(NULL, bottom, errp);
  2157. if (!bottom_bs) {
  2158. goto out;
  2159. }
  2160. if (!bottom_bs->drv) {
  2161. error_setg(errp, "Node '%s' is not open", bottom);
  2162. goto out;
  2163. }
  2164. if (bottom_bs->drv->is_filter) {
  2165. error_setg(errp, "Node '%s' is a filter, use a non-filter node "
  2166. "as 'bottom'", bottom);
  2167. goto out;
  2168. }
  2169. if (!bdrv_chain_contains(bs, bottom_bs)) {
  2170. error_setg(errp, "Node '%s' is not in a chain starting from '%s'",
  2171. bottom, device);
  2172. goto out;
  2173. }
  2174. assert(bdrv_get_aio_context(bottom_bs) == aio_context);
  2175. }
  2176. /*
  2177. * Check for op blockers in the whole chain between bs and base (or bottom)
  2178. */
  2179. iter_end = has_bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs;
  2180. for (iter = bs; iter && iter != iter_end;
  2181. iter = bdrv_filter_or_cow_bs(iter))
  2182. {
  2183. if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_STREAM, errp)) {
  2184. goto out;
  2185. }
  2186. }
  2187. /* if we are streaming the entire chain, the result will have no backing
  2188. * file, and specifying one is therefore an error */
  2189. if (base_bs == NULL && has_backing_file) {
  2190. error_setg(errp, "backing file specified, but streaming the "
  2191. "entire chain");
  2192. goto out;
  2193. }
  2194. if (has_auto_finalize && !auto_finalize) {
  2195. job_flags |= JOB_MANUAL_FINALIZE;
  2196. }
  2197. if (has_auto_dismiss && !auto_dismiss) {
  2198. job_flags |= JOB_MANUAL_DISMISS;
  2199. }
  2200. stream_start(has_job_id ? job_id : NULL, bs, base_bs, backing_file,
  2201. bottom_bs, job_flags, has_speed ? speed : 0, on_error,
  2202. filter_node_name, &local_err);
  2203. if (local_err) {
  2204. error_propagate(errp, local_err);
  2205. goto out;
  2206. }
  2207. trace_qmp_block_stream(bs);
  2208. out:
  2209. aio_context_release(aio_context);
  2210. }
  2211. void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
  2212. bool has_base_node, const char *base_node,
  2213. bool has_base, const char *base,
  2214. bool has_top_node, const char *top_node,
  2215. bool has_top, const char *top,
  2216. bool has_backing_file, const char *backing_file,
  2217. bool has_speed, int64_t speed,
  2218. bool has_on_error, BlockdevOnError on_error,
  2219. bool has_filter_node_name, const char *filter_node_name,
  2220. bool has_auto_finalize, bool auto_finalize,
  2221. bool has_auto_dismiss, bool auto_dismiss,
  2222. Error **errp)
  2223. {
  2224. BlockDriverState *bs;
  2225. BlockDriverState *iter;
  2226. BlockDriverState *base_bs, *top_bs;
  2227. AioContext *aio_context;
  2228. Error *local_err = NULL;
  2229. int job_flags = JOB_DEFAULT;
  2230. uint64_t top_perm, top_shared;
  2231. if (!has_speed) {
  2232. speed = 0;
  2233. }
  2234. if (!has_on_error) {
  2235. on_error = BLOCKDEV_ON_ERROR_REPORT;
  2236. }
  2237. if (!has_filter_node_name) {
  2238. filter_node_name = NULL;
  2239. }
  2240. if (has_auto_finalize && !auto_finalize) {
  2241. job_flags |= JOB_MANUAL_FINALIZE;
  2242. }
  2243. if (has_auto_dismiss && !auto_dismiss) {
  2244. job_flags |= JOB_MANUAL_DISMISS;
  2245. }
  2246. /* Important Note:
  2247. * libvirt relies on the DeviceNotFound error class in order to probe for
  2248. * live commit feature versions; for this to work, we must make sure to
  2249. * perform the device lookup before any generic errors that may occur in a
  2250. * scenario in which all optional arguments are omitted. */
  2251. bs = qmp_get_root_bs(device, &local_err);
  2252. if (!bs) {
  2253. bs = bdrv_lookup_bs(device, device, NULL);
  2254. if (!bs) {
  2255. error_free(local_err);
  2256. error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
  2257. "Device '%s' not found", device);
  2258. } else {
  2259. error_propagate(errp, local_err);
  2260. }
  2261. return;
  2262. }
  2263. aio_context = bdrv_get_aio_context(bs);
  2264. aio_context_acquire(aio_context);
  2265. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) {
  2266. goto out;
  2267. }
  2268. /* default top_bs is the active layer */
  2269. top_bs = bs;
  2270. if (has_top_node && has_top) {
  2271. error_setg(errp, "'top-node' and 'top' are mutually exclusive");
  2272. goto out;
  2273. } else if (has_top_node) {
  2274. top_bs = bdrv_lookup_bs(NULL, top_node, errp);
  2275. if (top_bs == NULL) {
  2276. goto out;
  2277. }
  2278. if (!bdrv_chain_contains(bs, top_bs)) {
  2279. error_setg(errp, "'%s' is not in this backing file chain",
  2280. top_node);
  2281. goto out;
  2282. }
  2283. } else if (has_top && top) {
  2284. /* This strcmp() is just a shortcut, there is no need to
  2285. * refresh @bs's filename. If it mismatches,
  2286. * bdrv_find_backing_image() will do the refresh and may still
  2287. * return @bs. */
  2288. if (strcmp(bs->filename, top) != 0) {
  2289. top_bs = bdrv_find_backing_image(bs, top);
  2290. }
  2291. }
  2292. if (top_bs == NULL) {
  2293. error_setg(errp, "Top image file %s not found", top ? top : "NULL");
  2294. goto out;
  2295. }
  2296. assert(bdrv_get_aio_context(top_bs) == aio_context);
  2297. if (has_base_node && has_base) {
  2298. error_setg(errp, "'base-node' and 'base' are mutually exclusive");
  2299. goto out;
  2300. } else if (has_base_node) {
  2301. base_bs = bdrv_lookup_bs(NULL, base_node, errp);
  2302. if (base_bs == NULL) {
  2303. goto out;
  2304. }
  2305. if (!bdrv_chain_contains(top_bs, base_bs)) {
  2306. error_setg(errp, "'%s' is not in this backing file chain",
  2307. base_node);
  2308. goto out;
  2309. }
  2310. } else if (has_base && base) {
  2311. base_bs = bdrv_find_backing_image(top_bs, base);
  2312. if (base_bs == NULL) {
  2313. error_setg(errp, "Can't find '%s' in the backing chain", base);
  2314. goto out;
  2315. }
  2316. } else {
  2317. base_bs = bdrv_find_base(top_bs);
  2318. if (base_bs == NULL) {
  2319. error_setg(errp, "There is no backimg image");
  2320. goto out;
  2321. }
  2322. }
  2323. assert(bdrv_get_aio_context(base_bs) == aio_context);
  2324. for (iter = top_bs; iter != bdrv_filter_or_cow_bs(base_bs);
  2325. iter = bdrv_filter_or_cow_bs(iter))
  2326. {
  2327. if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
  2328. goto out;
  2329. }
  2330. }
  2331. /* Do not allow attempts to commit an image into itself */
  2332. if (top_bs == base_bs) {
  2333. error_setg(errp, "cannot commit an image into itself");
  2334. goto out;
  2335. }
  2336. /*
  2337. * Active commit is required if and only if someone has taken a
  2338. * WRITE permission on the top node. Historically, we have always
  2339. * used active commit for top nodes, so continue that practice
  2340. * lest we possibly break clients that rely on this behavior, e.g.
  2341. * to later attach this node to a writing parent.
  2342. * (Active commit is never really wrong.)
  2343. */
  2344. bdrv_get_cumulative_perm(top_bs, &top_perm, &top_shared);
  2345. if (top_perm & BLK_PERM_WRITE ||
  2346. bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs))
  2347. {
  2348. if (has_backing_file) {
  2349. if (bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs)) {
  2350. error_setg(errp, "'backing-file' specified,"
  2351. " but 'top' is the active layer");
  2352. } else {
  2353. error_setg(errp, "'backing-file' specified, but 'top' has a "
  2354. "writer on it");
  2355. }
  2356. goto out;
  2357. }
  2358. if (!has_job_id) {
  2359. /*
  2360. * Emulate here what block_job_create() does, because it
  2361. * is possible that @bs != @top_bs (the block job should
  2362. * be named after @bs, even if @top_bs is the actual
  2363. * source)
  2364. */
  2365. job_id = bdrv_get_device_name(bs);
  2366. }
  2367. commit_active_start(job_id, top_bs, base_bs, job_flags, speed, on_error,
  2368. filter_node_name, NULL, NULL, false, &local_err);
  2369. } else {
  2370. BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs);
  2371. if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
  2372. goto out;
  2373. }
  2374. commit_start(has_job_id ? job_id : NULL, bs, base_bs, top_bs, job_flags,
  2375. speed, on_error, has_backing_file ? backing_file : NULL,
  2376. filter_node_name, &local_err);
  2377. }
  2378. if (local_err != NULL) {
  2379. error_propagate(errp, local_err);
  2380. goto out;
  2381. }
  2382. out:
  2383. aio_context_release(aio_context);
  2384. }
  2385. /* Common QMP interface for drive-backup and blockdev-backup */
  2386. static BlockJob *do_backup_common(BackupCommon *backup,
  2387. BlockDriverState *bs,
  2388. BlockDriverState *target_bs,
  2389. AioContext *aio_context,
  2390. JobTxn *txn, Error **errp)
  2391. {
  2392. BlockJob *job = NULL;
  2393. BdrvDirtyBitmap *bmap = NULL;
  2394. BackupPerf perf = { .max_workers = 64 };
  2395. int job_flags = JOB_DEFAULT;
  2396. if (!backup->has_speed) {
  2397. backup->speed = 0;
  2398. }
  2399. if (!backup->has_on_source_error) {
  2400. backup->on_source_error = BLOCKDEV_ON_ERROR_REPORT;
  2401. }
  2402. if (!backup->has_on_target_error) {
  2403. backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT;
  2404. }
  2405. if (!backup->has_job_id) {
  2406. backup->job_id = NULL;
  2407. }
  2408. if (!backup->has_auto_finalize) {
  2409. backup->auto_finalize = true;
  2410. }
  2411. if (!backup->has_auto_dismiss) {
  2412. backup->auto_dismiss = true;
  2413. }
  2414. if (!backup->has_compress) {
  2415. backup->compress = false;
  2416. }
  2417. if (backup->x_perf) {
  2418. if (backup->x_perf->has_use_copy_range) {
  2419. perf.use_copy_range = backup->x_perf->use_copy_range;
  2420. }
  2421. if (backup->x_perf->has_max_workers) {
  2422. perf.max_workers = backup->x_perf->max_workers;
  2423. }
  2424. if (backup->x_perf->has_max_chunk) {
  2425. perf.max_chunk = backup->x_perf->max_chunk;
  2426. }
  2427. }
  2428. if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
  2429. (backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) {
  2430. /* done before desugaring 'incremental' to print the right message */
  2431. if (!backup->has_bitmap) {
  2432. error_setg(errp, "must provide a valid bitmap name for "
  2433. "'%s' sync mode", MirrorSyncMode_str(backup->sync));
  2434. return NULL;
  2435. }
  2436. }
  2437. if (backup->sync == MIRROR_SYNC_MODE_INCREMENTAL) {
  2438. if (backup->has_bitmap_mode &&
  2439. backup->bitmap_mode != BITMAP_SYNC_MODE_ON_SUCCESS) {
  2440. error_setg(errp, "Bitmap sync mode must be '%s' "
  2441. "when using sync mode '%s'",
  2442. BitmapSyncMode_str(BITMAP_SYNC_MODE_ON_SUCCESS),
  2443. MirrorSyncMode_str(backup->sync));
  2444. return NULL;
  2445. }
  2446. backup->has_bitmap_mode = true;
  2447. backup->sync = MIRROR_SYNC_MODE_BITMAP;
  2448. backup->bitmap_mode = BITMAP_SYNC_MODE_ON_SUCCESS;
  2449. }
  2450. if (backup->has_bitmap) {
  2451. bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap);
  2452. if (!bmap) {
  2453. error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap);
  2454. return NULL;
  2455. }
  2456. if (!backup->has_bitmap_mode) {
  2457. error_setg(errp, "Bitmap sync mode must be given "
  2458. "when providing a bitmap");
  2459. return NULL;
  2460. }
  2461. if (bdrv_dirty_bitmap_check(bmap, BDRV_BITMAP_ALLOW_RO, errp)) {
  2462. return NULL;
  2463. }
  2464. /* This does not produce a useful bitmap artifact: */
  2465. if (backup->sync == MIRROR_SYNC_MODE_NONE) {
  2466. error_setg(errp, "sync mode '%s' does not produce meaningful bitmap"
  2467. " outputs", MirrorSyncMode_str(backup->sync));
  2468. return NULL;
  2469. }
  2470. /* If the bitmap isn't used for input or output, this is useless: */
  2471. if (backup->bitmap_mode == BITMAP_SYNC_MODE_NEVER &&
  2472. backup->sync != MIRROR_SYNC_MODE_BITMAP) {
  2473. error_setg(errp, "Bitmap sync mode '%s' has no meaningful effect"
  2474. " when combined with sync mode '%s'",
  2475. BitmapSyncMode_str(backup->bitmap_mode),
  2476. MirrorSyncMode_str(backup->sync));
  2477. return NULL;
  2478. }
  2479. }
  2480. if (!backup->has_bitmap && backup->has_bitmap_mode) {
  2481. error_setg(errp, "Cannot specify bitmap sync mode without a bitmap");
  2482. return NULL;
  2483. }
  2484. if (!backup->auto_finalize) {
  2485. job_flags |= JOB_MANUAL_FINALIZE;
  2486. }
  2487. if (!backup->auto_dismiss) {
  2488. job_flags |= JOB_MANUAL_DISMISS;
  2489. }
  2490. job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
  2491. backup->sync, bmap, backup->bitmap_mode,
  2492. backup->compress,
  2493. backup->filter_node_name,
  2494. &perf,
  2495. backup->on_source_error,
  2496. backup->on_target_error,
  2497. job_flags, NULL, NULL, txn, errp);
  2498. return job;
  2499. }
  2500. void qmp_drive_backup(DriveBackup *backup, Error **errp)
  2501. {
  2502. TransactionAction action = {
  2503. .type = TRANSACTION_ACTION_KIND_DRIVE_BACKUP,
  2504. .u.drive_backup.data = backup,
  2505. };
  2506. blockdev_do_action(&action, errp);
  2507. }
  2508. BlockDeviceInfoList *qmp_query_named_block_nodes(bool has_flat,
  2509. bool flat,
  2510. Error **errp)
  2511. {
  2512. bool return_flat = has_flat && flat;
  2513. return bdrv_named_nodes_list(return_flat, errp);
  2514. }
  2515. XDbgBlockGraph *qmp_x_debug_query_block_graph(Error **errp)
  2516. {
  2517. return bdrv_get_xdbg_block_graph(errp);
  2518. }
  2519. void qmp_blockdev_backup(BlockdevBackup *backup, Error **errp)
  2520. {
  2521. TransactionAction action = {
  2522. .type = TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP,
  2523. .u.blockdev_backup.data = backup,
  2524. };
  2525. blockdev_do_action(&action, errp);
  2526. }
  2527. /* Parameter check and block job starting for drive mirroring.
  2528. * Caller should hold @device and @target's aio context (must be the same).
  2529. **/
  2530. static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
  2531. BlockDriverState *target,
  2532. bool has_replaces, const char *replaces,
  2533. enum MirrorSyncMode sync,
  2534. BlockMirrorBackingMode backing_mode,
  2535. bool zero_target,
  2536. bool has_speed, int64_t speed,
  2537. bool has_granularity, uint32_t granularity,
  2538. bool has_buf_size, int64_t buf_size,
  2539. bool has_on_source_error,
  2540. BlockdevOnError on_source_error,
  2541. bool has_on_target_error,
  2542. BlockdevOnError on_target_error,
  2543. bool has_unmap, bool unmap,
  2544. bool has_filter_node_name,
  2545. const char *filter_node_name,
  2546. bool has_copy_mode, MirrorCopyMode copy_mode,
  2547. bool has_auto_finalize, bool auto_finalize,
  2548. bool has_auto_dismiss, bool auto_dismiss,
  2549. Error **errp)
  2550. {
  2551. BlockDriverState *unfiltered_bs;
  2552. int job_flags = JOB_DEFAULT;
  2553. if (!has_speed) {
  2554. speed = 0;
  2555. }
  2556. if (!has_on_source_error) {
  2557. on_source_error = BLOCKDEV_ON_ERROR_REPORT;
  2558. }
  2559. if (!has_on_target_error) {
  2560. on_target_error = BLOCKDEV_ON_ERROR_REPORT;
  2561. }
  2562. if (!has_granularity) {
  2563. granularity = 0;
  2564. }
  2565. if (!has_buf_size) {
  2566. buf_size = 0;
  2567. }
  2568. if (!has_unmap) {
  2569. unmap = true;
  2570. }
  2571. if (!has_filter_node_name) {
  2572. filter_node_name = NULL;
  2573. }
  2574. if (!has_copy_mode) {
  2575. copy_mode = MIRROR_COPY_MODE_BACKGROUND;
  2576. }
  2577. if (has_auto_finalize && !auto_finalize) {
  2578. job_flags |= JOB_MANUAL_FINALIZE;
  2579. }
  2580. if (has_auto_dismiss && !auto_dismiss) {
  2581. job_flags |= JOB_MANUAL_DISMISS;
  2582. }
  2583. if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
  2584. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
  2585. "a value in range [512B, 64MB]");
  2586. return;
  2587. }
  2588. if (granularity & (granularity - 1)) {
  2589. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
  2590. "a power of 2");
  2591. return;
  2592. }
  2593. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
  2594. return;
  2595. }
  2596. if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_MIRROR_TARGET, errp)) {
  2597. return;
  2598. }
  2599. if (!bdrv_backing_chain_next(bs) && sync == MIRROR_SYNC_MODE_TOP) {
  2600. sync = MIRROR_SYNC_MODE_FULL;
  2601. }
  2602. if (!has_replaces) {
  2603. /* We want to mirror from @bs, but keep implicit filters on top */
  2604. unfiltered_bs = bdrv_skip_implicit_filters(bs);
  2605. if (unfiltered_bs != bs) {
  2606. replaces = unfiltered_bs->node_name;
  2607. has_replaces = true;
  2608. }
  2609. }
  2610. if (has_replaces) {
  2611. BlockDriverState *to_replace_bs;
  2612. AioContext *replace_aio_context;
  2613. int64_t bs_size, replace_size;
  2614. bs_size = bdrv_getlength(bs);
  2615. if (bs_size < 0) {
  2616. error_setg_errno(errp, -bs_size, "Failed to query device's size");
  2617. return;
  2618. }
  2619. to_replace_bs = check_to_replace_node(bs, replaces, errp);
  2620. if (!to_replace_bs) {
  2621. return;
  2622. }
  2623. replace_aio_context = bdrv_get_aio_context(to_replace_bs);
  2624. aio_context_acquire(replace_aio_context);
  2625. replace_size = bdrv_getlength(to_replace_bs);
  2626. aio_context_release(replace_aio_context);
  2627. if (replace_size < 0) {
  2628. error_setg_errno(errp, -replace_size,
  2629. "Failed to query the replacement node's size");
  2630. return;
  2631. }
  2632. if (bs_size != replace_size) {
  2633. error_setg(errp, "cannot replace image with a mirror image of "
  2634. "different size");
  2635. return;
  2636. }
  2637. }
  2638. /* pass the node name to replace to mirror start since it's loose coupling
  2639. * and will allow to check whether the node still exist at mirror completion
  2640. */
  2641. mirror_start(job_id, bs, target,
  2642. has_replaces ? replaces : NULL, job_flags,
  2643. speed, granularity, buf_size, sync, backing_mode, zero_target,
  2644. on_source_error, on_target_error, unmap, filter_node_name,
  2645. copy_mode, errp);
  2646. }
  2647. void qmp_drive_mirror(DriveMirror *arg, Error **errp)
  2648. {
  2649. BlockDriverState *bs;
  2650. BlockDriverState *target_backing_bs, *target_bs;
  2651. AioContext *aio_context;
  2652. AioContext *old_context;
  2653. BlockMirrorBackingMode backing_mode;
  2654. Error *local_err = NULL;
  2655. QDict *options = NULL;
  2656. int flags;
  2657. int64_t size;
  2658. const char *format = arg->format;
  2659. bool zero_target;
  2660. int ret;
  2661. bs = qmp_get_root_bs(arg->device, errp);
  2662. if (!bs) {
  2663. return;
  2664. }
  2665. /* Early check to avoid creating target */
  2666. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
  2667. return;
  2668. }
  2669. aio_context = bdrv_get_aio_context(bs);
  2670. aio_context_acquire(aio_context);
  2671. if (!arg->has_mode) {
  2672. arg->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
  2673. }
  2674. if (!arg->has_format) {
  2675. format = (arg->mode == NEW_IMAGE_MODE_EXISTING
  2676. ? NULL : bs->drv->format_name);
  2677. }
  2678. flags = bs->open_flags | BDRV_O_RDWR;
  2679. target_backing_bs = bdrv_cow_bs(bdrv_skip_filters(bs));
  2680. if (!target_backing_bs && arg->sync == MIRROR_SYNC_MODE_TOP) {
  2681. arg->sync = MIRROR_SYNC_MODE_FULL;
  2682. }
  2683. if (arg->sync == MIRROR_SYNC_MODE_NONE) {
  2684. target_backing_bs = bs;
  2685. }
  2686. size = bdrv_getlength(bs);
  2687. if (size < 0) {
  2688. error_setg_errno(errp, -size, "bdrv_getlength failed");
  2689. goto out;
  2690. }
  2691. if (arg->has_replaces) {
  2692. if (!arg->has_node_name) {
  2693. error_setg(errp, "a node-name must be provided when replacing a"
  2694. " named node of the graph");
  2695. goto out;
  2696. }
  2697. }
  2698. if (arg->mode == NEW_IMAGE_MODE_ABSOLUTE_PATHS) {
  2699. backing_mode = MIRROR_SOURCE_BACKING_CHAIN;
  2700. } else {
  2701. backing_mode = MIRROR_OPEN_BACKING_CHAIN;
  2702. }
  2703. /* Don't open backing image in create() */
  2704. flags |= BDRV_O_NO_BACKING;
  2705. if ((arg->sync == MIRROR_SYNC_MODE_FULL || !target_backing_bs)
  2706. && arg->mode != NEW_IMAGE_MODE_EXISTING)
  2707. {
  2708. /* create new image w/o backing file */
  2709. assert(format);
  2710. bdrv_img_create(arg->target, format,
  2711. NULL, NULL, NULL, size, flags, false, &local_err);
  2712. } else {
  2713. /* Implicit filters should not appear in the filename */
  2714. BlockDriverState *explicit_backing =
  2715. bdrv_skip_implicit_filters(target_backing_bs);
  2716. switch (arg->mode) {
  2717. case NEW_IMAGE_MODE_EXISTING:
  2718. break;
  2719. case NEW_IMAGE_MODE_ABSOLUTE_PATHS:
  2720. /* create new image with backing file */
  2721. bdrv_refresh_filename(explicit_backing);
  2722. bdrv_img_create(arg->target, format,
  2723. explicit_backing->filename,
  2724. explicit_backing->drv->format_name,
  2725. NULL, size, flags, false, &local_err);
  2726. break;
  2727. default:
  2728. abort();
  2729. }
  2730. }
  2731. if (local_err) {
  2732. error_propagate(errp, local_err);
  2733. goto out;
  2734. }
  2735. options = qdict_new();
  2736. if (arg->has_node_name) {
  2737. qdict_put_str(options, "node-name", arg->node_name);
  2738. }
  2739. if (format) {
  2740. qdict_put_str(options, "driver", format);
  2741. }
  2742. /* Mirroring takes care of copy-on-write using the source's backing
  2743. * file.
  2744. */
  2745. target_bs = bdrv_open(arg->target, NULL, options, flags, errp);
  2746. if (!target_bs) {
  2747. goto out;
  2748. }
  2749. zero_target = (arg->sync == MIRROR_SYNC_MODE_FULL &&
  2750. (arg->mode == NEW_IMAGE_MODE_EXISTING ||
  2751. !bdrv_has_zero_init(target_bs)));
  2752. /* Honor bdrv_try_set_aio_context() context acquisition requirements. */
  2753. old_context = bdrv_get_aio_context(target_bs);
  2754. aio_context_release(aio_context);
  2755. aio_context_acquire(old_context);
  2756. ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
  2757. if (ret < 0) {
  2758. bdrv_unref(target_bs);
  2759. aio_context_release(old_context);
  2760. return;
  2761. }
  2762. aio_context_release(old_context);
  2763. aio_context_acquire(aio_context);
  2764. blockdev_mirror_common(arg->has_job_id ? arg->job_id : NULL, bs, target_bs,
  2765. arg->has_replaces, arg->replaces, arg->sync,
  2766. backing_mode, zero_target,
  2767. arg->has_speed, arg->speed,
  2768. arg->has_granularity, arg->granularity,
  2769. arg->has_buf_size, arg->buf_size,
  2770. arg->has_on_source_error, arg->on_source_error,
  2771. arg->has_on_target_error, arg->on_target_error,
  2772. arg->has_unmap, arg->unmap,
  2773. false, NULL,
  2774. arg->has_copy_mode, arg->copy_mode,
  2775. arg->has_auto_finalize, arg->auto_finalize,
  2776. arg->has_auto_dismiss, arg->auto_dismiss,
  2777. errp);
  2778. bdrv_unref(target_bs);
  2779. out:
  2780. aio_context_release(aio_context);
  2781. }
  2782. void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
  2783. const char *device, const char *target,
  2784. bool has_replaces, const char *replaces,
  2785. MirrorSyncMode sync,
  2786. bool has_speed, int64_t speed,
  2787. bool has_granularity, uint32_t granularity,
  2788. bool has_buf_size, int64_t buf_size,
  2789. bool has_on_source_error,
  2790. BlockdevOnError on_source_error,
  2791. bool has_on_target_error,
  2792. BlockdevOnError on_target_error,
  2793. bool has_filter_node_name,
  2794. const char *filter_node_name,
  2795. bool has_copy_mode, MirrorCopyMode copy_mode,
  2796. bool has_auto_finalize, bool auto_finalize,
  2797. bool has_auto_dismiss, bool auto_dismiss,
  2798. Error **errp)
  2799. {
  2800. BlockDriverState *bs;
  2801. BlockDriverState *target_bs;
  2802. AioContext *aio_context;
  2803. AioContext *old_context;
  2804. BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN;
  2805. bool zero_target;
  2806. int ret;
  2807. bs = qmp_get_root_bs(device, errp);
  2808. if (!bs) {
  2809. return;
  2810. }
  2811. target_bs = bdrv_lookup_bs(target, target, errp);
  2812. if (!target_bs) {
  2813. return;
  2814. }
  2815. zero_target = (sync == MIRROR_SYNC_MODE_FULL);
  2816. /* Honor bdrv_try_set_aio_context() context acquisition requirements. */
  2817. old_context = bdrv_get_aio_context(target_bs);
  2818. aio_context = bdrv_get_aio_context(bs);
  2819. aio_context_acquire(old_context);
  2820. ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
  2821. aio_context_release(old_context);
  2822. aio_context_acquire(aio_context);
  2823. if (ret < 0) {
  2824. goto out;
  2825. }
  2826. blockdev_mirror_common(has_job_id ? job_id : NULL, bs, target_bs,
  2827. has_replaces, replaces, sync, backing_mode,
  2828. zero_target, has_speed, speed,
  2829. has_granularity, granularity,
  2830. has_buf_size, buf_size,
  2831. has_on_source_error, on_source_error,
  2832. has_on_target_error, on_target_error,
  2833. true, true,
  2834. has_filter_node_name, filter_node_name,
  2835. has_copy_mode, copy_mode,
  2836. has_auto_finalize, auto_finalize,
  2837. has_auto_dismiss, auto_dismiss,
  2838. errp);
  2839. out:
  2840. aio_context_release(aio_context);
  2841. }
  2842. /* Get a block job using its ID and acquire its AioContext */
  2843. static BlockJob *find_block_job(const char *id, AioContext **aio_context,
  2844. Error **errp)
  2845. {
  2846. BlockJob *job;
  2847. assert(id != NULL);
  2848. *aio_context = NULL;
  2849. job = block_job_get(id);
  2850. if (!job) {
  2851. error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
  2852. "Block job '%s' not found", id);
  2853. return NULL;
  2854. }
  2855. *aio_context = block_job_get_aio_context(job);
  2856. aio_context_acquire(*aio_context);
  2857. return job;
  2858. }
  2859. void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
  2860. {
  2861. AioContext *aio_context;
  2862. BlockJob *job = find_block_job(device, &aio_context, errp);
  2863. if (!job) {
  2864. return;
  2865. }
  2866. block_job_set_speed(job, speed, errp);
  2867. aio_context_release(aio_context);
  2868. }
  2869. void qmp_block_job_cancel(const char *device,
  2870. bool has_force, bool force, Error **errp)
  2871. {
  2872. AioContext *aio_context;
  2873. BlockJob *job = find_block_job(device, &aio_context, errp);
  2874. if (!job) {
  2875. return;
  2876. }
  2877. if (!has_force) {
  2878. force = false;
  2879. }
  2880. if (job_user_paused(&job->job) && !force) {
  2881. error_setg(errp, "The block job for device '%s' is currently paused",
  2882. device);
  2883. goto out;
  2884. }
  2885. trace_qmp_block_job_cancel(job);
  2886. job_user_cancel(&job->job, force, errp);
  2887. out:
  2888. aio_context_release(aio_context);
  2889. }
  2890. void qmp_block_job_pause(const char *device, Error **errp)
  2891. {
  2892. AioContext *aio_context;
  2893. BlockJob *job = find_block_job(device, &aio_context, errp);
  2894. if (!job) {
  2895. return;
  2896. }
  2897. trace_qmp_block_job_pause(job);
  2898. job_user_pause(&job->job, errp);
  2899. aio_context_release(aio_context);
  2900. }
  2901. void qmp_block_job_resume(const char *device, Error **errp)
  2902. {
  2903. AioContext *aio_context;
  2904. BlockJob *job = find_block_job(device, &aio_context, errp);
  2905. if (!job) {
  2906. return;
  2907. }
  2908. trace_qmp_block_job_resume(job);
  2909. job_user_resume(&job->job, errp);
  2910. aio_context_release(aio_context);
  2911. }
  2912. void qmp_block_job_complete(const char *device, Error **errp)
  2913. {
  2914. AioContext *aio_context;
  2915. BlockJob *job = find_block_job(device, &aio_context, errp);
  2916. if (!job) {
  2917. return;
  2918. }
  2919. trace_qmp_block_job_complete(job);
  2920. job_complete(&job->job, errp);
  2921. aio_context_release(aio_context);
  2922. }
  2923. void qmp_block_job_finalize(const char *id, Error **errp)
  2924. {
  2925. AioContext *aio_context;
  2926. BlockJob *job = find_block_job(id, &aio_context, errp);
  2927. if (!job) {
  2928. return;
  2929. }
  2930. trace_qmp_block_job_finalize(job);
  2931. job_ref(&job->job);
  2932. job_finalize(&job->job, errp);
  2933. /*
  2934. * Job's context might have changed via job_finalize (and job_txn_apply
  2935. * automatically acquires the new one), so make sure we release the correct
  2936. * one.
  2937. */
  2938. aio_context = block_job_get_aio_context(job);
  2939. job_unref(&job->job);
  2940. aio_context_release(aio_context);
  2941. }
  2942. void qmp_block_job_dismiss(const char *id, Error **errp)
  2943. {
  2944. AioContext *aio_context;
  2945. BlockJob *bjob = find_block_job(id, &aio_context, errp);
  2946. Job *job;
  2947. if (!bjob) {
  2948. return;
  2949. }
  2950. trace_qmp_block_job_dismiss(bjob);
  2951. job = &bjob->job;
  2952. job_dismiss(&job, errp);
  2953. aio_context_release(aio_context);
  2954. }
  2955. void qmp_change_backing_file(const char *device,
  2956. const char *image_node_name,
  2957. const char *backing_file,
  2958. Error **errp)
  2959. {
  2960. BlockDriverState *bs = NULL;
  2961. AioContext *aio_context;
  2962. BlockDriverState *image_bs = NULL;
  2963. Error *local_err = NULL;
  2964. bool ro;
  2965. int ret;
  2966. bs = qmp_get_root_bs(device, errp);
  2967. if (!bs) {
  2968. return;
  2969. }
  2970. aio_context = bdrv_get_aio_context(bs);
  2971. aio_context_acquire(aio_context);
  2972. image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err);
  2973. if (local_err) {
  2974. error_propagate(errp, local_err);
  2975. goto out;
  2976. }
  2977. if (!image_bs) {
  2978. error_setg(errp, "image file not found");
  2979. goto out;
  2980. }
  2981. if (bdrv_find_base(image_bs) == image_bs) {
  2982. error_setg(errp, "not allowing backing file change on an image "
  2983. "without a backing file");
  2984. goto out;
  2985. }
  2986. /* even though we are not necessarily operating on bs, we need it to
  2987. * determine if block ops are currently prohibited on the chain */
  2988. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
  2989. goto out;
  2990. }
  2991. /* final sanity check */
  2992. if (!bdrv_chain_contains(bs, image_bs)) {
  2993. error_setg(errp, "'%s' and image file are not in the same chain",
  2994. device);
  2995. goto out;
  2996. }
  2997. /* if not r/w, reopen to make r/w */
  2998. ro = bdrv_is_read_only(image_bs);
  2999. if (ro) {
  3000. if (bdrv_reopen_set_read_only(image_bs, false, errp) != 0) {
  3001. goto out;
  3002. }
  3003. }
  3004. ret = bdrv_change_backing_file(image_bs, backing_file,
  3005. image_bs->drv ? image_bs->drv->format_name : "",
  3006. false);
  3007. if (ret < 0) {
  3008. error_setg_errno(errp, -ret, "Could not change backing file to '%s'",
  3009. backing_file);
  3010. /* don't exit here, so we can try to restore open flags if
  3011. * appropriate */
  3012. }
  3013. if (ro) {
  3014. bdrv_reopen_set_read_only(image_bs, true, errp);
  3015. }
  3016. out:
  3017. aio_context_release(aio_context);
  3018. }
  3019. void qmp_blockdev_add(BlockdevOptions *options, Error **errp)
  3020. {
  3021. BlockDriverState *bs;
  3022. QObject *obj;
  3023. Visitor *v = qobject_output_visitor_new(&obj);
  3024. QDict *qdict;
  3025. visit_type_BlockdevOptions(v, NULL, &options, &error_abort);
  3026. visit_complete(v, &obj);
  3027. qdict = qobject_to(QDict, obj);
  3028. qdict_flatten(qdict);
  3029. if (!qdict_get_try_str(qdict, "node-name")) {
  3030. error_setg(errp, "'node-name' must be specified for the root node");
  3031. goto fail;
  3032. }
  3033. bs = bds_tree_init(qdict, errp);
  3034. if (!bs) {
  3035. goto fail;
  3036. }
  3037. bdrv_set_monitor_owned(bs);
  3038. fail:
  3039. visit_free(v);
  3040. }
  3041. void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp)
  3042. {
  3043. BlockReopenQueue *queue = NULL;
  3044. GSList *drained = NULL;
  3045. GSList *p;
  3046. /* Add each one of the BDS that we want to reopen to the queue */
  3047. for (; reopen_list != NULL; reopen_list = reopen_list->next) {
  3048. BlockdevOptions *options = reopen_list->value;
  3049. BlockDriverState *bs;
  3050. AioContext *ctx;
  3051. QObject *obj;
  3052. Visitor *v;
  3053. QDict *qdict;
  3054. /* Check for the selected node name */
  3055. if (!options->has_node_name) {
  3056. error_setg(errp, "node-name not specified");
  3057. goto fail;
  3058. }
  3059. bs = bdrv_find_node(options->node_name);
  3060. if (!bs) {
  3061. error_setg(errp, "Failed to find node with node-name='%s'",
  3062. options->node_name);
  3063. goto fail;
  3064. }
  3065. /* Put all options in a QDict and flatten it */
  3066. v = qobject_output_visitor_new(&obj);
  3067. visit_type_BlockdevOptions(v, NULL, &options, &error_abort);
  3068. visit_complete(v, &obj);
  3069. visit_free(v);
  3070. qdict = qobject_to(QDict, obj);
  3071. qdict_flatten(qdict);
  3072. ctx = bdrv_get_aio_context(bs);
  3073. aio_context_acquire(ctx);
  3074. bdrv_subtree_drained_begin(bs);
  3075. queue = bdrv_reopen_queue(queue, bs, qdict, false);
  3076. drained = g_slist_prepend(drained, bs);
  3077. aio_context_release(ctx);
  3078. }
  3079. /* Perform the reopen operation */
  3080. bdrv_reopen_multiple(queue, errp);
  3081. queue = NULL;
  3082. fail:
  3083. bdrv_reopen_queue_free(queue);
  3084. for (p = drained; p; p = p->next) {
  3085. BlockDriverState *bs = p->data;
  3086. AioContext *ctx = bdrv_get_aio_context(bs);
  3087. aio_context_acquire(ctx);
  3088. bdrv_subtree_drained_end(bs);
  3089. aio_context_release(ctx);
  3090. }
  3091. g_slist_free(drained);
  3092. }
  3093. void qmp_blockdev_del(const char *node_name, Error **errp)
  3094. {
  3095. AioContext *aio_context;
  3096. BlockDriverState *bs;
  3097. GLOBAL_STATE_CODE();
  3098. bs = bdrv_find_node(node_name);
  3099. if (!bs) {
  3100. error_setg(errp, "Failed to find node with node-name='%s'", node_name);
  3101. return;
  3102. }
  3103. if (bdrv_has_blk(bs)) {
  3104. error_setg(errp, "Node %s is in use", node_name);
  3105. return;
  3106. }
  3107. aio_context = bdrv_get_aio_context(bs);
  3108. aio_context_acquire(aio_context);
  3109. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) {
  3110. goto out;
  3111. }
  3112. if (!QTAILQ_IN_USE(bs, monitor_list)) {
  3113. error_setg(errp, "Node %s is not owned by the monitor",
  3114. bs->node_name);
  3115. goto out;
  3116. }
  3117. if (bs->refcnt > 1) {
  3118. error_setg(errp, "Block device %s is in use",
  3119. bdrv_get_device_or_node_name(bs));
  3120. goto out;
  3121. }
  3122. QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list);
  3123. bdrv_unref(bs);
  3124. out:
  3125. aio_context_release(aio_context);
  3126. }
  3127. static BdrvChild *bdrv_find_child(BlockDriverState *parent_bs,
  3128. const char *child_name)
  3129. {
  3130. BdrvChild *child;
  3131. QLIST_FOREACH(child, &parent_bs->children, next) {
  3132. if (strcmp(child->name, child_name) == 0) {
  3133. return child;
  3134. }
  3135. }
  3136. return NULL;
  3137. }
  3138. void qmp_x_blockdev_change(const char *parent, bool has_child,
  3139. const char *child, bool has_node,
  3140. const char *node, Error **errp)
  3141. {
  3142. BlockDriverState *parent_bs, *new_bs = NULL;
  3143. BdrvChild *p_child;
  3144. parent_bs = bdrv_lookup_bs(parent, parent, errp);
  3145. if (!parent_bs) {
  3146. return;
  3147. }
  3148. if (has_child == has_node) {
  3149. if (has_child) {
  3150. error_setg(errp, "The parameters child and node are in conflict");
  3151. } else {
  3152. error_setg(errp, "Either child or node must be specified");
  3153. }
  3154. return;
  3155. }
  3156. if (has_child) {
  3157. p_child = bdrv_find_child(parent_bs, child);
  3158. if (!p_child) {
  3159. error_setg(errp, "Node '%s' does not have child '%s'",
  3160. parent, child);
  3161. return;
  3162. }
  3163. bdrv_del_child(parent_bs, p_child, errp);
  3164. }
  3165. if (has_node) {
  3166. new_bs = bdrv_find_node(node);
  3167. if (!new_bs) {
  3168. error_setg(errp, "Node '%s' not found", node);
  3169. return;
  3170. }
  3171. bdrv_add_child(parent_bs, new_bs, errp);
  3172. }
  3173. }
  3174. BlockJobInfoList *qmp_query_block_jobs(Error **errp)
  3175. {
  3176. BlockJobInfoList *head = NULL, **tail = &head;
  3177. BlockJob *job;
  3178. for (job = block_job_next(NULL); job; job = block_job_next(job)) {
  3179. BlockJobInfo *value;
  3180. AioContext *aio_context;
  3181. if (block_job_is_internal(job)) {
  3182. continue;
  3183. }
  3184. aio_context = block_job_get_aio_context(job);
  3185. aio_context_acquire(aio_context);
  3186. value = block_job_query(job, errp);
  3187. aio_context_release(aio_context);
  3188. if (!value) {
  3189. qapi_free_BlockJobInfoList(head);
  3190. return NULL;
  3191. }
  3192. QAPI_LIST_APPEND(tail, value);
  3193. }
  3194. return head;
  3195. }
  3196. void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
  3197. bool has_force, bool force, Error **errp)
  3198. {
  3199. AioContext *old_context;
  3200. AioContext *new_context;
  3201. BlockDriverState *bs;
  3202. bs = bdrv_find_node(node_name);
  3203. if (!bs) {
  3204. error_setg(errp, "Failed to find node with node-name='%s'", node_name);
  3205. return;
  3206. }
  3207. /* Protects against accidents. */
  3208. if (!(has_force && force) && bdrv_has_blk(bs)) {
  3209. error_setg(errp, "Node %s is associated with a BlockBackend and could "
  3210. "be in use (use force=true to override this check)",
  3211. node_name);
  3212. return;
  3213. }
  3214. if (iothread->type == QTYPE_QSTRING) {
  3215. IOThread *obj = iothread_by_id(iothread->u.s);
  3216. if (!obj) {
  3217. error_setg(errp, "Cannot find iothread %s", iothread->u.s);
  3218. return;
  3219. }
  3220. new_context = iothread_get_aio_context(obj);
  3221. } else {
  3222. new_context = qemu_get_aio_context();
  3223. }
  3224. old_context = bdrv_get_aio_context(bs);
  3225. aio_context_acquire(old_context);
  3226. bdrv_try_set_aio_context(bs, new_context, errp);
  3227. aio_context_release(old_context);
  3228. }
  3229. QemuOptsList qemu_common_drive_opts = {
  3230. .name = "drive",
  3231. .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
  3232. .desc = {
  3233. {
  3234. .name = "snapshot",
  3235. .type = QEMU_OPT_BOOL,
  3236. .help = "enable/disable snapshot mode",
  3237. },{
  3238. .name = "aio",
  3239. .type = QEMU_OPT_STRING,
  3240. .help = "host AIO implementation (threads, native, io_uring)",
  3241. },{
  3242. .name = BDRV_OPT_CACHE_WB,
  3243. .type = QEMU_OPT_BOOL,
  3244. .help = "Enable writeback mode",
  3245. },{
  3246. .name = "format",
  3247. .type = QEMU_OPT_STRING,
  3248. .help = "disk format (raw, qcow2, ...)",
  3249. },{
  3250. .name = "rerror",
  3251. .type = QEMU_OPT_STRING,
  3252. .help = "read error action",
  3253. },{
  3254. .name = "werror",
  3255. .type = QEMU_OPT_STRING,
  3256. .help = "write error action",
  3257. },{
  3258. .name = BDRV_OPT_READ_ONLY,
  3259. .type = QEMU_OPT_BOOL,
  3260. .help = "open drive file as read-only",
  3261. },
  3262. THROTTLE_OPTS,
  3263. {
  3264. .name = "throttling.group",
  3265. .type = QEMU_OPT_STRING,
  3266. .help = "name of the block throttling group",
  3267. },{
  3268. .name = "copy-on-read",
  3269. .type = QEMU_OPT_BOOL,
  3270. .help = "copy read data from backing file into image file",
  3271. },{
  3272. .name = "detect-zeroes",
  3273. .type = QEMU_OPT_STRING,
  3274. .help = "try to optimize zero writes (off, on, unmap)",
  3275. },{
  3276. .name = "stats-account-invalid",
  3277. .type = QEMU_OPT_BOOL,
  3278. .help = "whether to account for invalid I/O operations "
  3279. "in the statistics",
  3280. },{
  3281. .name = "stats-account-failed",
  3282. .type = QEMU_OPT_BOOL,
  3283. .help = "whether to account for failed I/O operations "
  3284. "in the statistics",
  3285. },
  3286. { /* end of list */ }
  3287. },
  3288. };
  3289. QemuOptsList qemu_drive_opts = {
  3290. .name = "drive",
  3291. .head = QTAILQ_HEAD_INITIALIZER(qemu_drive_opts.head),
  3292. .desc = {
  3293. /*
  3294. * no elements => accept any params
  3295. * validation will happen later
  3296. */
  3297. { /* end of list */ }
  3298. },
  3299. };