blockdev.c 114 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813
  1. /*
  2. * QEMU host block devices
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or
  7. * later. See the COPYING file in the top-level directory.
  8. *
  9. * This file incorporates work covered by the following copyright and
  10. * permission notice:
  11. *
  12. * Copyright (c) 2003-2008 Fabrice Bellard
  13. *
  14. * Permission is hereby granted, free of charge, to any person obtaining a copy
  15. * of this software and associated documentation files (the "Software"), to deal
  16. * in the Software without restriction, including without limitation the rights
  17. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  18. * copies of the Software, and to permit persons to whom the Software is
  19. * furnished to do so, subject to the following conditions:
  20. *
  21. * The above copyright notice and this permission notice shall be included in
  22. * all copies or substantial portions of the Software.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  27. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  28. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  29. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  30. * THE SOFTWARE.
  31. */
  32. #include "qemu/osdep.h"
  33. #include "sysemu/block-backend.h"
  34. #include "sysemu/blockdev.h"
  35. #include "hw/block/block.h"
  36. #include "block/blockjob.h"
  37. #include "block/dirty-bitmap.h"
  38. #include "block/qdict.h"
  39. #include "block/throttle-groups.h"
  40. #include "monitor/monitor.h"
  41. #include "qemu/error-report.h"
  42. #include "qemu/option.h"
  43. #include "qemu/qemu-print.h"
  44. #include "qemu/config-file.h"
  45. #include "qapi/qapi-commands-block.h"
  46. #include "qapi/qapi-commands-transaction.h"
  47. #include "qapi/qapi-visit-block-core.h"
  48. #include "qapi/qmp/qdict.h"
  49. #include "qapi/qmp/qnum.h"
  50. #include "qapi/qmp/qstring.h"
  51. #include "qapi/error.h"
  52. #include "qapi/qmp/qerror.h"
  53. #include "qapi/qmp/qlist.h"
  54. #include "qapi/qobject-output-visitor.h"
  55. #include "sysemu/sysemu.h"
  56. #include "sysemu/iothread.h"
  57. #include "block/block_int.h"
  58. #include "block/trace.h"
  59. #include "sysemu/runstate.h"
  60. #include "sysemu/replay.h"
  61. #include "qemu/cutils.h"
  62. #include "qemu/help_option.h"
  63. #include "qemu/main-loop.h"
  64. #include "qemu/throttle-options.h"
  65. /* Protected by BQL */
  66. QTAILQ_HEAD(, BlockDriverState) monitor_bdrv_states =
  67. QTAILQ_HEAD_INITIALIZER(monitor_bdrv_states);
  68. void bdrv_set_monitor_owned(BlockDriverState *bs)
  69. {
  70. GLOBAL_STATE_CODE();
  71. QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list);
  72. }
  73. static const char *const if_name[IF_COUNT] = {
  74. [IF_NONE] = "none",
  75. [IF_IDE] = "ide",
  76. [IF_SCSI] = "scsi",
  77. [IF_FLOPPY] = "floppy",
  78. [IF_PFLASH] = "pflash",
  79. [IF_MTD] = "mtd",
  80. [IF_SD] = "sd",
  81. [IF_VIRTIO] = "virtio",
  82. [IF_XEN] = "xen",
  83. };
  84. static int if_max_devs[IF_COUNT] = {
  85. /*
  86. * Do not change these numbers! They govern how drive option
  87. * index maps to unit and bus. That mapping is ABI.
  88. *
  89. * All controllers used to implement if=T drives need to support
  90. * if_max_devs[T] units, for any T with if_max_devs[T] != 0.
  91. * Otherwise, some index values map to "impossible" bus, unit
  92. * values.
  93. *
  94. * For instance, if you change [IF_SCSI] to 255, -drive
  95. * if=scsi,index=12 no longer means bus=1,unit=5, but
  96. * bus=0,unit=12. With an lsi53c895a controller (7 units max),
  97. * the drive can't be set up. Regression.
  98. */
  99. [IF_IDE] = 2,
  100. [IF_SCSI] = 7,
  101. };
  102. /**
  103. * Boards may call this to offer board-by-board overrides
  104. * of the default, global values.
  105. */
  106. void override_max_devs(BlockInterfaceType type, int max_devs)
  107. {
  108. BlockBackend *blk;
  109. DriveInfo *dinfo;
  110. GLOBAL_STATE_CODE();
  111. if (max_devs <= 0) {
  112. return;
  113. }
  114. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  115. dinfo = blk_legacy_dinfo(blk);
  116. if (dinfo->type == type) {
  117. fprintf(stderr, "Cannot override units-per-bus property of"
  118. " the %s interface, because a drive of that type has"
  119. " already been added.\n", if_name[type]);
  120. g_assert_not_reached();
  121. }
  122. }
  123. if_max_devs[type] = max_devs;
  124. }
  125. /*
  126. * We automatically delete the drive when a device using it gets
  127. * unplugged. Questionable feature, but we can't just drop it.
  128. * Device models call blockdev_mark_auto_del() to schedule the
  129. * automatic deletion, and generic qdev code calls blockdev_auto_del()
  130. * when deletion is actually safe.
  131. */
  132. void blockdev_mark_auto_del(BlockBackend *blk)
  133. {
  134. DriveInfo *dinfo = blk_legacy_dinfo(blk);
  135. BlockJob *job;
  136. GLOBAL_STATE_CODE();
  137. if (!dinfo) {
  138. return;
  139. }
  140. JOB_LOCK_GUARD();
  141. for (job = block_job_next_locked(NULL); job;
  142. job = block_job_next_locked(job)) {
  143. if (block_job_has_bdrv(job, blk_bs(blk))) {
  144. job_cancel_locked(&job->job, false);
  145. }
  146. }
  147. dinfo->auto_del = 1;
  148. }
  149. void blockdev_auto_del(BlockBackend *blk)
  150. {
  151. DriveInfo *dinfo = blk_legacy_dinfo(blk);
  152. GLOBAL_STATE_CODE();
  153. if (dinfo && dinfo->auto_del) {
  154. monitor_remove_blk(blk);
  155. blk_unref(blk);
  156. }
  157. }
  158. static int drive_index_to_bus_id(BlockInterfaceType type, int index)
  159. {
  160. int max_devs = if_max_devs[type];
  161. return max_devs ? index / max_devs : 0;
  162. }
  163. static int drive_index_to_unit_id(BlockInterfaceType type, int index)
  164. {
  165. int max_devs = if_max_devs[type];
  166. return max_devs ? index % max_devs : index;
  167. }
  168. QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file,
  169. const char *optstr)
  170. {
  171. QemuOpts *opts;
  172. GLOBAL_STATE_CODE();
  173. opts = qemu_opts_parse_noisily(qemu_find_opts("drive"), optstr, false);
  174. if (!opts) {
  175. return NULL;
  176. }
  177. if (type != IF_DEFAULT) {
  178. qemu_opt_set(opts, "if", if_name[type], &error_abort);
  179. }
  180. if (index >= 0) {
  181. qemu_opt_set_number(opts, "index", index, &error_abort);
  182. }
  183. if (file)
  184. qemu_opt_set(opts, "file", file, &error_abort);
  185. return opts;
  186. }
  187. DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit)
  188. {
  189. BlockBackend *blk;
  190. DriveInfo *dinfo;
  191. GLOBAL_STATE_CODE();
  192. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  193. dinfo = blk_legacy_dinfo(blk);
  194. if (dinfo && dinfo->type == type
  195. && dinfo->bus == bus && dinfo->unit == unit) {
  196. return dinfo;
  197. }
  198. }
  199. return NULL;
  200. }
  201. /*
  202. * Check board claimed all -drive that are meant to be claimed.
  203. * Fatal error if any remain unclaimed.
  204. */
  205. void drive_check_orphaned(void)
  206. {
  207. BlockBackend *blk;
  208. DriveInfo *dinfo;
  209. Location loc;
  210. bool orphans = false;
  211. GLOBAL_STATE_CODE();
  212. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  213. dinfo = blk_legacy_dinfo(blk);
  214. /*
  215. * Ignore default drives, because we create certain default
  216. * drives unconditionally, then leave them unclaimed. Not the
  217. * users fault.
  218. * Ignore IF_VIRTIO, because it gets desugared into -device,
  219. * so we can leave failing to -device.
  220. * Ignore IF_NONE, because leaving unclaimed IF_NONE remains
  221. * available for device_add is a feature.
  222. */
  223. if (dinfo->is_default || dinfo->type == IF_VIRTIO
  224. || dinfo->type == IF_NONE) {
  225. continue;
  226. }
  227. if (!blk_get_attached_dev(blk)) {
  228. loc_push_none(&loc);
  229. qemu_opts_loc_restore(dinfo->opts);
  230. error_report("machine type does not support"
  231. " if=%s,bus=%d,unit=%d",
  232. if_name[dinfo->type], dinfo->bus, dinfo->unit);
  233. loc_pop(&loc);
  234. orphans = true;
  235. }
  236. }
  237. if (orphans) {
  238. exit(1);
  239. }
  240. }
  241. DriveInfo *drive_get_by_index(BlockInterfaceType type, int index)
  242. {
  243. GLOBAL_STATE_CODE();
  244. return drive_get(type,
  245. drive_index_to_bus_id(type, index),
  246. drive_index_to_unit_id(type, index));
  247. }
  248. int drive_get_max_bus(BlockInterfaceType type)
  249. {
  250. int max_bus;
  251. BlockBackend *blk;
  252. DriveInfo *dinfo;
  253. GLOBAL_STATE_CODE();
  254. max_bus = -1;
  255. for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
  256. dinfo = blk_legacy_dinfo(blk);
  257. if (dinfo && dinfo->type == type && dinfo->bus > max_bus) {
  258. max_bus = dinfo->bus;
  259. }
  260. }
  261. return max_bus;
  262. }
  263. static void bdrv_format_print(void *opaque, const char *name)
  264. {
  265. qemu_printf(" %s", name);
  266. }
  267. typedef struct {
  268. QEMUBH *bh;
  269. BlockDriverState *bs;
  270. } BDRVPutRefBH;
  271. static int parse_block_error_action(const char *buf, bool is_read, Error **errp)
  272. {
  273. if (!strcmp(buf, "ignore")) {
  274. return BLOCKDEV_ON_ERROR_IGNORE;
  275. } else if (!is_read && !strcmp(buf, "enospc")) {
  276. return BLOCKDEV_ON_ERROR_ENOSPC;
  277. } else if (!strcmp(buf, "stop")) {
  278. return BLOCKDEV_ON_ERROR_STOP;
  279. } else if (!strcmp(buf, "report")) {
  280. return BLOCKDEV_ON_ERROR_REPORT;
  281. } else {
  282. error_setg(errp, "'%s' invalid %s error action",
  283. buf, is_read ? "read" : "write");
  284. return -1;
  285. }
  286. }
  287. static bool parse_stats_intervals(BlockAcctStats *stats, QList *intervals,
  288. Error **errp)
  289. {
  290. const QListEntry *entry;
  291. for (entry = qlist_first(intervals); entry; entry = qlist_next(entry)) {
  292. switch (qobject_type(entry->value)) {
  293. case QTYPE_QSTRING: {
  294. unsigned long long length;
  295. const char *str = qstring_get_str(qobject_to(QString,
  296. entry->value));
  297. if (parse_uint_full(str, &length, 10) == 0 &&
  298. length > 0 && length <= UINT_MAX) {
  299. block_acct_add_interval(stats, (unsigned) length);
  300. } else {
  301. error_setg(errp, "Invalid interval length: %s", str);
  302. return false;
  303. }
  304. break;
  305. }
  306. case QTYPE_QNUM: {
  307. int64_t length = qnum_get_int(qobject_to(QNum, entry->value));
  308. if (length > 0 && length <= UINT_MAX) {
  309. block_acct_add_interval(stats, (unsigned) length);
  310. } else {
  311. error_setg(errp, "Invalid interval length: %" PRId64, length);
  312. return false;
  313. }
  314. break;
  315. }
  316. default:
  317. error_setg(errp, "The specification of stats-intervals is invalid");
  318. return false;
  319. }
  320. }
  321. return true;
  322. }
  323. typedef enum { MEDIA_DISK, MEDIA_CDROM } DriveMediaType;
  324. /* All parameters but @opts are optional and may be set to NULL. */
  325. static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags,
  326. const char **throttling_group, ThrottleConfig *throttle_cfg,
  327. BlockdevDetectZeroesOptions *detect_zeroes, Error **errp)
  328. {
  329. Error *local_error = NULL;
  330. const char *aio;
  331. if (bdrv_flags) {
  332. if (qemu_opt_get_bool(opts, "copy-on-read", false)) {
  333. *bdrv_flags |= BDRV_O_COPY_ON_READ;
  334. }
  335. if ((aio = qemu_opt_get(opts, "aio")) != NULL) {
  336. if (bdrv_parse_aio(aio, bdrv_flags) < 0) {
  337. error_setg(errp, "invalid aio option");
  338. return;
  339. }
  340. }
  341. }
  342. /* disk I/O throttling */
  343. if (throttling_group) {
  344. *throttling_group = qemu_opt_get(opts, "throttling.group");
  345. }
  346. if (throttle_cfg) {
  347. throttle_config_init(throttle_cfg);
  348. throttle_cfg->buckets[THROTTLE_BPS_TOTAL].avg =
  349. qemu_opt_get_number(opts, "throttling.bps-total", 0);
  350. throttle_cfg->buckets[THROTTLE_BPS_READ].avg =
  351. qemu_opt_get_number(opts, "throttling.bps-read", 0);
  352. throttle_cfg->buckets[THROTTLE_BPS_WRITE].avg =
  353. qemu_opt_get_number(opts, "throttling.bps-write", 0);
  354. throttle_cfg->buckets[THROTTLE_OPS_TOTAL].avg =
  355. qemu_opt_get_number(opts, "throttling.iops-total", 0);
  356. throttle_cfg->buckets[THROTTLE_OPS_READ].avg =
  357. qemu_opt_get_number(opts, "throttling.iops-read", 0);
  358. throttle_cfg->buckets[THROTTLE_OPS_WRITE].avg =
  359. qemu_opt_get_number(opts, "throttling.iops-write", 0);
  360. throttle_cfg->buckets[THROTTLE_BPS_TOTAL].max =
  361. qemu_opt_get_number(opts, "throttling.bps-total-max", 0);
  362. throttle_cfg->buckets[THROTTLE_BPS_READ].max =
  363. qemu_opt_get_number(opts, "throttling.bps-read-max", 0);
  364. throttle_cfg->buckets[THROTTLE_BPS_WRITE].max =
  365. qemu_opt_get_number(opts, "throttling.bps-write-max", 0);
  366. throttle_cfg->buckets[THROTTLE_OPS_TOTAL].max =
  367. qemu_opt_get_number(opts, "throttling.iops-total-max", 0);
  368. throttle_cfg->buckets[THROTTLE_OPS_READ].max =
  369. qemu_opt_get_number(opts, "throttling.iops-read-max", 0);
  370. throttle_cfg->buckets[THROTTLE_OPS_WRITE].max =
  371. qemu_opt_get_number(opts, "throttling.iops-write-max", 0);
  372. throttle_cfg->buckets[THROTTLE_BPS_TOTAL].burst_length =
  373. qemu_opt_get_number(opts, "throttling.bps-total-max-length", 1);
  374. throttle_cfg->buckets[THROTTLE_BPS_READ].burst_length =
  375. qemu_opt_get_number(opts, "throttling.bps-read-max-length", 1);
  376. throttle_cfg->buckets[THROTTLE_BPS_WRITE].burst_length =
  377. qemu_opt_get_number(opts, "throttling.bps-write-max-length", 1);
  378. throttle_cfg->buckets[THROTTLE_OPS_TOTAL].burst_length =
  379. qemu_opt_get_number(opts, "throttling.iops-total-max-length", 1);
  380. throttle_cfg->buckets[THROTTLE_OPS_READ].burst_length =
  381. qemu_opt_get_number(opts, "throttling.iops-read-max-length", 1);
  382. throttle_cfg->buckets[THROTTLE_OPS_WRITE].burst_length =
  383. qemu_opt_get_number(opts, "throttling.iops-write-max-length", 1);
  384. throttle_cfg->op_size =
  385. qemu_opt_get_number(opts, "throttling.iops-size", 0);
  386. if (!throttle_is_valid(throttle_cfg, errp)) {
  387. return;
  388. }
  389. }
  390. if (detect_zeroes) {
  391. *detect_zeroes =
  392. qapi_enum_parse(&BlockdevDetectZeroesOptions_lookup,
  393. qemu_opt_get(opts, "detect-zeroes"),
  394. BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF,
  395. &local_error);
  396. if (local_error) {
  397. error_propagate(errp, local_error);
  398. return;
  399. }
  400. }
  401. }
  402. static OnOffAuto account_get_opt(QemuOpts *opts, const char *name)
  403. {
  404. if (!qemu_opt_find(opts, name)) {
  405. return ON_OFF_AUTO_AUTO;
  406. }
  407. if (qemu_opt_get_bool(opts, name, true)) {
  408. return ON_OFF_AUTO_ON;
  409. }
  410. return ON_OFF_AUTO_OFF;
  411. }
  412. /* Takes the ownership of bs_opts */
  413. static BlockBackend *blockdev_init(const char *file, QDict *bs_opts,
  414. Error **errp)
  415. {
  416. const char *buf;
  417. int bdrv_flags = 0;
  418. int on_read_error, on_write_error;
  419. OnOffAuto account_invalid, account_failed;
  420. bool writethrough, read_only;
  421. BlockBackend *blk;
  422. BlockDriverState *bs;
  423. ThrottleConfig cfg;
  424. int snapshot = 0;
  425. Error *error = NULL;
  426. QemuOpts *opts;
  427. QDict *interval_dict = NULL;
  428. QList *interval_list = NULL;
  429. const char *id;
  430. BlockdevDetectZeroesOptions detect_zeroes =
  431. BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF;
  432. const char *throttling_group = NULL;
  433. /* Check common options by copying from bs_opts to opts, all other options
  434. * stay in bs_opts for processing by bdrv_open(). */
  435. id = qdict_get_try_str(bs_opts, "id");
  436. opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, errp);
  437. if (!opts) {
  438. goto err_no_opts;
  439. }
  440. if (!qemu_opts_absorb_qdict(opts, bs_opts, errp)) {
  441. goto early_err;
  442. }
  443. if (id) {
  444. qdict_del(bs_opts, "id");
  445. }
  446. /* extract parameters */
  447. snapshot = qemu_opt_get_bool(opts, "snapshot", 0);
  448. account_invalid = account_get_opt(opts, "stats-account-invalid");
  449. account_failed = account_get_opt(opts, "stats-account-failed");
  450. writethrough = !qemu_opt_get_bool(opts, BDRV_OPT_CACHE_WB, true);
  451. id = qemu_opts_id(opts);
  452. qdict_extract_subqdict(bs_opts, &interval_dict, "stats-intervals.");
  453. qdict_array_split(interval_dict, &interval_list);
  454. if (qdict_size(interval_dict) != 0) {
  455. error_setg(errp, "Invalid option stats-intervals.%s",
  456. qdict_first(interval_dict)->key);
  457. goto early_err;
  458. }
  459. extract_common_blockdev_options(opts, &bdrv_flags, &throttling_group, &cfg,
  460. &detect_zeroes, &error);
  461. if (error) {
  462. error_propagate(errp, error);
  463. goto early_err;
  464. }
  465. if ((buf = qemu_opt_get(opts, "format")) != NULL) {
  466. if (is_help_option(buf)) {
  467. qemu_printf("Supported formats:");
  468. bdrv_iterate_format(bdrv_format_print, NULL, false);
  469. qemu_printf("\nSupported formats (read-only):");
  470. bdrv_iterate_format(bdrv_format_print, NULL, true);
  471. qemu_printf("\n");
  472. goto early_err;
  473. }
  474. if (qdict_haskey(bs_opts, "driver")) {
  475. error_setg(errp, "Cannot specify both 'driver' and 'format'");
  476. goto early_err;
  477. }
  478. qdict_put_str(bs_opts, "driver", buf);
  479. }
  480. on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
  481. if ((buf = qemu_opt_get(opts, "werror")) != NULL) {
  482. on_write_error = parse_block_error_action(buf, 0, &error);
  483. if (error) {
  484. error_propagate(errp, error);
  485. goto early_err;
  486. }
  487. }
  488. on_read_error = BLOCKDEV_ON_ERROR_REPORT;
  489. if ((buf = qemu_opt_get(opts, "rerror")) != NULL) {
  490. on_read_error = parse_block_error_action(buf, 1, &error);
  491. if (error) {
  492. error_propagate(errp, error);
  493. goto early_err;
  494. }
  495. }
  496. if (snapshot) {
  497. bdrv_flags |= BDRV_O_SNAPSHOT;
  498. }
  499. read_only = qemu_opt_get_bool(opts, BDRV_OPT_READ_ONLY, false);
  500. /* init */
  501. if ((!file || !*file) && !qdict_size(bs_opts)) {
  502. BlockBackendRootState *blk_rs;
  503. blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
  504. blk_rs = blk_get_root_state(blk);
  505. blk_rs->open_flags = bdrv_flags | (read_only ? 0 : BDRV_O_RDWR);
  506. blk_rs->detect_zeroes = detect_zeroes;
  507. qobject_unref(bs_opts);
  508. } else {
  509. if (file && !*file) {
  510. file = NULL;
  511. }
  512. /* bdrv_open() defaults to the values in bdrv_flags (for compatibility
  513. * with other callers) rather than what we want as the real defaults.
  514. * Apply the defaults here instead. */
  515. qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
  516. qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
  517. qdict_set_default_str(bs_opts, BDRV_OPT_READ_ONLY,
  518. read_only ? "on" : "off");
  519. qdict_set_default_str(bs_opts, BDRV_OPT_AUTO_READ_ONLY, "on");
  520. assert((bdrv_flags & BDRV_O_CACHE_MASK) == 0);
  521. if (runstate_check(RUN_STATE_INMIGRATE)) {
  522. bdrv_flags |= BDRV_O_INACTIVE;
  523. }
  524. blk = blk_new_open(file, NULL, bs_opts, bdrv_flags, errp);
  525. if (!blk) {
  526. goto err_no_bs_opts;
  527. }
  528. bs = blk_bs(blk);
  529. bs->detect_zeroes = detect_zeroes;
  530. block_acct_setup(blk_get_stats(blk), account_invalid, account_failed);
  531. if (!parse_stats_intervals(blk_get_stats(blk), interval_list, errp)) {
  532. blk_unref(blk);
  533. blk = NULL;
  534. goto err_no_bs_opts;
  535. }
  536. }
  537. /* disk I/O throttling */
  538. if (throttle_enabled(&cfg)) {
  539. if (!throttling_group) {
  540. throttling_group = id;
  541. }
  542. blk_io_limits_enable(blk, throttling_group);
  543. blk_set_io_limits(blk, &cfg);
  544. }
  545. blk_set_enable_write_cache(blk, !writethrough);
  546. blk_set_on_error(blk, on_read_error, on_write_error);
  547. if (!monitor_add_blk(blk, id, errp)) {
  548. blk_unref(blk);
  549. blk = NULL;
  550. goto err_no_bs_opts;
  551. }
  552. err_no_bs_opts:
  553. qemu_opts_del(opts);
  554. qobject_unref(interval_dict);
  555. qobject_unref(interval_list);
  556. return blk;
  557. early_err:
  558. qemu_opts_del(opts);
  559. qobject_unref(interval_dict);
  560. qobject_unref(interval_list);
  561. err_no_opts:
  562. qobject_unref(bs_opts);
  563. return NULL;
  564. }
  565. /* Takes the ownership of bs_opts */
  566. BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp)
  567. {
  568. int bdrv_flags = 0;
  569. GLOBAL_STATE_CODE();
  570. /* bdrv_open() defaults to the values in bdrv_flags (for compatibility
  571. * with other callers) rather than what we want as the real defaults.
  572. * Apply the defaults here instead. */
  573. qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_DIRECT, "off");
  574. qdict_set_default_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH, "off");
  575. qdict_set_default_str(bs_opts, BDRV_OPT_READ_ONLY, "off");
  576. if (runstate_check(RUN_STATE_INMIGRATE)) {
  577. bdrv_flags |= BDRV_O_INACTIVE;
  578. }
  579. return bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp);
  580. }
  581. void blockdev_close_all_bdrv_states(void)
  582. {
  583. BlockDriverState *bs, *next_bs;
  584. GLOBAL_STATE_CODE();
  585. QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) {
  586. AioContext *ctx = bdrv_get_aio_context(bs);
  587. aio_context_acquire(ctx);
  588. bdrv_unref(bs);
  589. aio_context_release(ctx);
  590. }
  591. }
  592. /* Iterates over the list of monitor-owned BlockDriverStates */
  593. BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs)
  594. {
  595. GLOBAL_STATE_CODE();
  596. return bs ? QTAILQ_NEXT(bs, monitor_list)
  597. : QTAILQ_FIRST(&monitor_bdrv_states);
  598. }
  599. static bool qemu_opt_rename(QemuOpts *opts, const char *from, const char *to,
  600. Error **errp)
  601. {
  602. const char *value;
  603. value = qemu_opt_get(opts, from);
  604. if (value) {
  605. if (qemu_opt_find(opts, to)) {
  606. error_setg(errp, "'%s' and its alias '%s' can't be used at the "
  607. "same time", to, from);
  608. return false;
  609. }
  610. }
  611. /* rename all items in opts */
  612. while ((value = qemu_opt_get(opts, from))) {
  613. qemu_opt_set(opts, to, value, &error_abort);
  614. qemu_opt_unset(opts, from);
  615. }
  616. return true;
  617. }
  618. QemuOptsList qemu_legacy_drive_opts = {
  619. .name = "drive",
  620. .head = QTAILQ_HEAD_INITIALIZER(qemu_legacy_drive_opts.head),
  621. .desc = {
  622. {
  623. .name = "bus",
  624. .type = QEMU_OPT_NUMBER,
  625. .help = "bus number",
  626. },{
  627. .name = "unit",
  628. .type = QEMU_OPT_NUMBER,
  629. .help = "unit number (i.e. lun for scsi)",
  630. },{
  631. .name = "index",
  632. .type = QEMU_OPT_NUMBER,
  633. .help = "index number",
  634. },{
  635. .name = "media",
  636. .type = QEMU_OPT_STRING,
  637. .help = "media type (disk, cdrom)",
  638. },{
  639. .name = "if",
  640. .type = QEMU_OPT_STRING,
  641. .help = "interface (ide, scsi, sd, mtd, floppy, pflash, virtio)",
  642. },{
  643. .name = "file",
  644. .type = QEMU_OPT_STRING,
  645. .help = "file name",
  646. },
  647. /* Options that are passed on, but have special semantics with -drive */
  648. {
  649. .name = BDRV_OPT_READ_ONLY,
  650. .type = QEMU_OPT_BOOL,
  651. .help = "open drive file as read-only",
  652. },{
  653. .name = "rerror",
  654. .type = QEMU_OPT_STRING,
  655. .help = "read error action",
  656. },{
  657. .name = "werror",
  658. .type = QEMU_OPT_STRING,
  659. .help = "write error action",
  660. },{
  661. .name = "copy-on-read",
  662. .type = QEMU_OPT_BOOL,
  663. .help = "copy read data from backing file into image file",
  664. },
  665. { /* end of list */ }
  666. },
  667. };
  668. DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type,
  669. Error **errp)
  670. {
  671. const char *value;
  672. BlockBackend *blk;
  673. DriveInfo *dinfo = NULL;
  674. QDict *bs_opts;
  675. QemuOpts *legacy_opts;
  676. DriveMediaType media = MEDIA_DISK;
  677. BlockInterfaceType type;
  678. int max_devs, bus_id, unit_id, index;
  679. const char *werror, *rerror;
  680. bool read_only = false;
  681. bool copy_on_read;
  682. const char *filename;
  683. int i;
  684. GLOBAL_STATE_CODE();
  685. /* Change legacy command line options into QMP ones */
  686. static const struct {
  687. const char *from;
  688. const char *to;
  689. } opt_renames[] = {
  690. { "iops", "throttling.iops-total" },
  691. { "iops_rd", "throttling.iops-read" },
  692. { "iops_wr", "throttling.iops-write" },
  693. { "bps", "throttling.bps-total" },
  694. { "bps_rd", "throttling.bps-read" },
  695. { "bps_wr", "throttling.bps-write" },
  696. { "iops_max", "throttling.iops-total-max" },
  697. { "iops_rd_max", "throttling.iops-read-max" },
  698. { "iops_wr_max", "throttling.iops-write-max" },
  699. { "bps_max", "throttling.bps-total-max" },
  700. { "bps_rd_max", "throttling.bps-read-max" },
  701. { "bps_wr_max", "throttling.bps-write-max" },
  702. { "iops_size", "throttling.iops-size" },
  703. { "group", "throttling.group" },
  704. { "readonly", BDRV_OPT_READ_ONLY },
  705. };
  706. for (i = 0; i < ARRAY_SIZE(opt_renames); i++) {
  707. if (!qemu_opt_rename(all_opts, opt_renames[i].from,
  708. opt_renames[i].to, errp)) {
  709. return NULL;
  710. }
  711. }
  712. value = qemu_opt_get(all_opts, "cache");
  713. if (value) {
  714. int flags = 0;
  715. bool writethrough;
  716. if (bdrv_parse_cache_mode(value, &flags, &writethrough) != 0) {
  717. error_setg(errp, "invalid cache option");
  718. return NULL;
  719. }
  720. /* Specific options take precedence */
  721. if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_WB)) {
  722. qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_WB,
  723. !writethrough, &error_abort);
  724. }
  725. if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_DIRECT)) {
  726. qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_DIRECT,
  727. !!(flags & BDRV_O_NOCACHE), &error_abort);
  728. }
  729. if (!qemu_opt_get(all_opts, BDRV_OPT_CACHE_NO_FLUSH)) {
  730. qemu_opt_set_bool(all_opts, BDRV_OPT_CACHE_NO_FLUSH,
  731. !!(flags & BDRV_O_NO_FLUSH), &error_abort);
  732. }
  733. qemu_opt_unset(all_opts, "cache");
  734. }
  735. /* Get a QDict for processing the options */
  736. bs_opts = qdict_new();
  737. qemu_opts_to_qdict(all_opts, bs_opts);
  738. legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0,
  739. &error_abort);
  740. if (!qemu_opts_absorb_qdict(legacy_opts, bs_opts, errp)) {
  741. goto fail;
  742. }
  743. /* Media type */
  744. value = qemu_opt_get(legacy_opts, "media");
  745. if (value) {
  746. if (!strcmp(value, "disk")) {
  747. media = MEDIA_DISK;
  748. } else if (!strcmp(value, "cdrom")) {
  749. media = MEDIA_CDROM;
  750. read_only = true;
  751. } else {
  752. error_setg(errp, "'%s' invalid media", value);
  753. goto fail;
  754. }
  755. }
  756. /* copy-on-read is disabled with a warning for read-only devices */
  757. read_only |= qemu_opt_get_bool(legacy_opts, BDRV_OPT_READ_ONLY, false);
  758. copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false);
  759. if (read_only && copy_on_read) {
  760. warn_report("disabling copy-on-read on read-only drive");
  761. copy_on_read = false;
  762. }
  763. qdict_put_str(bs_opts, BDRV_OPT_READ_ONLY, read_only ? "on" : "off");
  764. qdict_put_str(bs_opts, "copy-on-read", copy_on_read ? "on" : "off");
  765. /* Controller type */
  766. value = qemu_opt_get(legacy_opts, "if");
  767. if (value) {
  768. for (type = 0;
  769. type < IF_COUNT && strcmp(value, if_name[type]);
  770. type++) {
  771. }
  772. if (type == IF_COUNT) {
  773. error_setg(errp, "unsupported bus type '%s'", value);
  774. goto fail;
  775. }
  776. } else {
  777. type = block_default_type;
  778. }
  779. /* Device address specified by bus/unit or index.
  780. * If none was specified, try to find the first free one. */
  781. bus_id = qemu_opt_get_number(legacy_opts, "bus", 0);
  782. unit_id = qemu_opt_get_number(legacy_opts, "unit", -1);
  783. index = qemu_opt_get_number(legacy_opts, "index", -1);
  784. max_devs = if_max_devs[type];
  785. if (index != -1) {
  786. if (bus_id != 0 || unit_id != -1) {
  787. error_setg(errp, "index cannot be used with bus and unit");
  788. goto fail;
  789. }
  790. bus_id = drive_index_to_bus_id(type, index);
  791. unit_id = drive_index_to_unit_id(type, index);
  792. }
  793. if (unit_id == -1) {
  794. unit_id = 0;
  795. while (drive_get(type, bus_id, unit_id) != NULL) {
  796. unit_id++;
  797. if (max_devs && unit_id >= max_devs) {
  798. unit_id -= max_devs;
  799. bus_id++;
  800. }
  801. }
  802. }
  803. if (max_devs && unit_id >= max_devs) {
  804. error_setg(errp, "unit %d too big (max is %d)", unit_id, max_devs - 1);
  805. goto fail;
  806. }
  807. if (drive_get(type, bus_id, unit_id) != NULL) {
  808. error_setg(errp, "drive with bus=%d, unit=%d (index=%d) exists",
  809. bus_id, unit_id, index);
  810. goto fail;
  811. }
  812. /* no id supplied -> create one */
  813. if (qemu_opts_id(all_opts) == NULL) {
  814. char *new_id;
  815. const char *mediastr = "";
  816. if (type == IF_IDE || type == IF_SCSI) {
  817. mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd";
  818. }
  819. if (max_devs) {
  820. new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id,
  821. mediastr, unit_id);
  822. } else {
  823. new_id = g_strdup_printf("%s%s%i", if_name[type],
  824. mediastr, unit_id);
  825. }
  826. qdict_put_str(bs_opts, "id", new_id);
  827. g_free(new_id);
  828. }
  829. /* Add virtio block device */
  830. if (type == IF_VIRTIO) {
  831. QemuOpts *devopts;
  832. devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0,
  833. &error_abort);
  834. qemu_opt_set(devopts, "driver", "virtio-blk", &error_abort);
  835. qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id"),
  836. &error_abort);
  837. }
  838. filename = qemu_opt_get(legacy_opts, "file");
  839. /* Check werror/rerror compatibility with if=... */
  840. werror = qemu_opt_get(legacy_opts, "werror");
  841. if (werror != NULL) {
  842. if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO &&
  843. type != IF_NONE) {
  844. error_setg(errp, "werror is not supported by this bus type");
  845. goto fail;
  846. }
  847. qdict_put_str(bs_opts, "werror", werror);
  848. }
  849. rerror = qemu_opt_get(legacy_opts, "rerror");
  850. if (rerror != NULL) {
  851. if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI &&
  852. type != IF_NONE) {
  853. error_setg(errp, "rerror is not supported by this bus type");
  854. goto fail;
  855. }
  856. qdict_put_str(bs_opts, "rerror", rerror);
  857. }
  858. /* Actual block device init: Functionality shared with blockdev-add */
  859. blk = blockdev_init(filename, bs_opts, errp);
  860. bs_opts = NULL;
  861. if (!blk) {
  862. goto fail;
  863. }
  864. /* Create legacy DriveInfo */
  865. dinfo = g_malloc0(sizeof(*dinfo));
  866. dinfo->opts = all_opts;
  867. dinfo->type = type;
  868. dinfo->bus = bus_id;
  869. dinfo->unit = unit_id;
  870. blk_set_legacy_dinfo(blk, dinfo);
  871. switch(type) {
  872. case IF_IDE:
  873. case IF_SCSI:
  874. case IF_XEN:
  875. case IF_NONE:
  876. dinfo->media_cd = media == MEDIA_CDROM;
  877. break;
  878. default:
  879. break;
  880. }
  881. fail:
  882. qemu_opts_del(legacy_opts);
  883. qobject_unref(bs_opts);
  884. return dinfo;
  885. }
  886. static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp)
  887. {
  888. BlockDriverState *bs;
  889. AioContext *aio_context;
  890. bs = bdrv_lookup_bs(name, name, errp);
  891. if (bs == NULL) {
  892. return NULL;
  893. }
  894. if (!bdrv_is_root_node(bs)) {
  895. error_setg(errp, "Need a root block node");
  896. return NULL;
  897. }
  898. aio_context = bdrv_get_aio_context(bs);
  899. aio_context_acquire(aio_context);
  900. if (!bdrv_is_inserted(bs)) {
  901. error_setg(errp, "Device has no medium");
  902. bs = NULL;
  903. }
  904. aio_context_release(aio_context);
  905. return bs;
  906. }
  907. static void blockdev_do_action(TransactionAction *action, Error **errp)
  908. {
  909. TransactionActionList list;
  910. list.value = action;
  911. list.next = NULL;
  912. qmp_transaction(&list, NULL, errp);
  913. }
  914. void qmp_blockdev_snapshot_sync(const char *device, const char *node_name,
  915. const char *snapshot_file,
  916. const char *snapshot_node_name,
  917. const char *format,
  918. bool has_mode, NewImageMode mode, Error **errp)
  919. {
  920. BlockdevSnapshotSync snapshot = {
  921. .device = (char *) device,
  922. .node_name = (char *) node_name,
  923. .snapshot_file = (char *) snapshot_file,
  924. .snapshot_node_name = (char *) snapshot_node_name,
  925. .format = (char *) format,
  926. .has_mode = has_mode,
  927. .mode = mode,
  928. };
  929. TransactionAction action = {
  930. .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC,
  931. .u.blockdev_snapshot_sync.data = &snapshot,
  932. };
  933. blockdev_do_action(&action, errp);
  934. }
  935. void qmp_blockdev_snapshot(const char *node, const char *overlay,
  936. Error **errp)
  937. {
  938. BlockdevSnapshot snapshot_data = {
  939. .node = (char *) node,
  940. .overlay = (char *) overlay
  941. };
  942. TransactionAction action = {
  943. .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT,
  944. .u.blockdev_snapshot.data = &snapshot_data,
  945. };
  946. blockdev_do_action(&action, errp);
  947. }
  948. void qmp_blockdev_snapshot_internal_sync(const char *device,
  949. const char *name,
  950. Error **errp)
  951. {
  952. BlockdevSnapshotInternal snapshot = {
  953. .device = (char *) device,
  954. .name = (char *) name
  955. };
  956. TransactionAction action = {
  957. .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC,
  958. .u.blockdev_snapshot_internal_sync.data = &snapshot,
  959. };
  960. blockdev_do_action(&action, errp);
  961. }
  962. SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
  963. const char *id,
  964. const char *name,
  965. Error **errp)
  966. {
  967. BlockDriverState *bs;
  968. AioContext *aio_context;
  969. QEMUSnapshotInfo sn;
  970. Error *local_err = NULL;
  971. SnapshotInfo *info = NULL;
  972. int ret;
  973. bs = qmp_get_root_bs(device, errp);
  974. if (!bs) {
  975. return NULL;
  976. }
  977. aio_context = bdrv_get_aio_context(bs);
  978. aio_context_acquire(aio_context);
  979. if (!id && !name) {
  980. error_setg(errp, "Name or id must be provided");
  981. goto out_aio_context;
  982. }
  983. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) {
  984. goto out_aio_context;
  985. }
  986. ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err);
  987. if (local_err) {
  988. error_propagate(errp, local_err);
  989. goto out_aio_context;
  990. }
  991. if (!ret) {
  992. error_setg(errp,
  993. "Snapshot with id '%s' and name '%s' does not exist on "
  994. "device '%s'",
  995. STR_OR_NULL(id), STR_OR_NULL(name), device);
  996. goto out_aio_context;
  997. }
  998. bdrv_snapshot_delete(bs, id, name, &local_err);
  999. if (local_err) {
  1000. error_propagate(errp, local_err);
  1001. goto out_aio_context;
  1002. }
  1003. aio_context_release(aio_context);
  1004. info = g_new0(SnapshotInfo, 1);
  1005. info->id = g_strdup(sn.id_str);
  1006. info->name = g_strdup(sn.name);
  1007. info->date_nsec = sn.date_nsec;
  1008. info->date_sec = sn.date_sec;
  1009. info->vm_state_size = sn.vm_state_size;
  1010. info->vm_clock_nsec = sn.vm_clock_nsec % 1000000000;
  1011. info->vm_clock_sec = sn.vm_clock_nsec / 1000000000;
  1012. if (sn.icount != -1ULL) {
  1013. info->icount = sn.icount;
  1014. info->has_icount = true;
  1015. }
  1016. return info;
  1017. out_aio_context:
  1018. aio_context_release(aio_context);
  1019. return NULL;
  1020. }
  1021. /* New and old BlockDriverState structs for atomic group operations */
  1022. typedef struct BlkActionState BlkActionState;
  1023. /**
  1024. * BlkActionOps:
  1025. * Table of operations that define an Action.
  1026. *
  1027. * @instance_size: Size of state struct, in bytes.
  1028. * @prepare: Prepare the work, must NOT be NULL.
  1029. * @commit: Commit the changes, can be NULL.
  1030. * @abort: Abort the changes on fail, can be NULL.
  1031. * @clean: Clean up resources after all transaction actions have called
  1032. * commit() or abort(). Can be NULL.
  1033. *
  1034. * Only prepare() may fail. In a single transaction, only one of commit() or
  1035. * abort() will be called. clean() will always be called if it is present.
  1036. *
  1037. * Always run under BQL.
  1038. */
  1039. typedef struct BlkActionOps {
  1040. size_t instance_size;
  1041. void (*prepare)(BlkActionState *common, Error **errp);
  1042. void (*commit)(BlkActionState *common);
  1043. void (*abort)(BlkActionState *common);
  1044. void (*clean)(BlkActionState *common);
  1045. } BlkActionOps;
  1046. /**
  1047. * BlkActionState:
  1048. * Describes one Action's state within a Transaction.
  1049. *
  1050. * @action: QAPI-defined enum identifying which Action to perform.
  1051. * @ops: Table of ActionOps this Action can perform.
  1052. * @block_job_txn: Transaction which this action belongs to.
  1053. * @entry: List membership for all Actions in this Transaction.
  1054. *
  1055. * This structure must be arranged as first member in a subclassed type,
  1056. * assuming that the compiler will also arrange it to the same offsets as the
  1057. * base class.
  1058. */
  1059. struct BlkActionState {
  1060. TransactionAction *action;
  1061. const BlkActionOps *ops;
  1062. JobTxn *block_job_txn;
  1063. TransactionProperties *txn_props;
  1064. QTAILQ_ENTRY(BlkActionState) entry;
  1065. };
  1066. /* internal snapshot private data */
  1067. typedef struct InternalSnapshotState {
  1068. BlkActionState common;
  1069. BlockDriverState *bs;
  1070. QEMUSnapshotInfo sn;
  1071. bool created;
  1072. } InternalSnapshotState;
  1073. static int action_check_completion_mode(BlkActionState *s, Error **errp)
  1074. {
  1075. if (s->txn_props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
  1076. error_setg(errp,
  1077. "Action '%s' does not support Transaction property "
  1078. "completion-mode = %s",
  1079. TransactionActionKind_str(s->action->type),
  1080. ActionCompletionMode_str(s->txn_props->completion_mode));
  1081. return -1;
  1082. }
  1083. return 0;
  1084. }
  1085. static void internal_snapshot_prepare(BlkActionState *common,
  1086. Error **errp)
  1087. {
  1088. Error *local_err = NULL;
  1089. const char *device;
  1090. const char *name;
  1091. BlockDriverState *bs;
  1092. QEMUSnapshotInfo old_sn, *sn;
  1093. bool ret;
  1094. int64_t rt;
  1095. BlockdevSnapshotInternal *internal;
  1096. InternalSnapshotState *state;
  1097. AioContext *aio_context;
  1098. int ret1;
  1099. g_assert(common->action->type ==
  1100. TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC);
  1101. internal = common->action->u.blockdev_snapshot_internal_sync.data;
  1102. state = DO_UPCAST(InternalSnapshotState, common, common);
  1103. /* 1. parse input */
  1104. device = internal->device;
  1105. name = internal->name;
  1106. /* 2. check for validation */
  1107. if (action_check_completion_mode(common, errp) < 0) {
  1108. return;
  1109. }
  1110. bs = qmp_get_root_bs(device, errp);
  1111. if (!bs) {
  1112. return;
  1113. }
  1114. aio_context = bdrv_get_aio_context(bs);
  1115. aio_context_acquire(aio_context);
  1116. state->bs = bs;
  1117. /* Paired with .clean() */
  1118. bdrv_drained_begin(bs);
  1119. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
  1120. goto out;
  1121. }
  1122. if (bdrv_is_read_only(bs)) {
  1123. error_setg(errp, "Device '%s' is read only", device);
  1124. goto out;
  1125. }
  1126. if (!bdrv_can_snapshot(bs)) {
  1127. error_setg(errp, "Block format '%s' used by device '%s' "
  1128. "does not support internal snapshots",
  1129. bs->drv->format_name, device);
  1130. goto out;
  1131. }
  1132. if (!strlen(name)) {
  1133. error_setg(errp, "Name is empty");
  1134. goto out;
  1135. }
  1136. /* check whether a snapshot with name exist */
  1137. ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn,
  1138. &local_err);
  1139. if (local_err) {
  1140. error_propagate(errp, local_err);
  1141. goto out;
  1142. } else if (ret) {
  1143. error_setg(errp,
  1144. "Snapshot with name '%s' already exists on device '%s'",
  1145. name, device);
  1146. goto out;
  1147. }
  1148. /* 3. take the snapshot */
  1149. sn = &state->sn;
  1150. pstrcpy(sn->name, sizeof(sn->name), name);
  1151. rt = g_get_real_time();
  1152. sn->date_sec = rt / G_USEC_PER_SEC;
  1153. sn->date_nsec = (rt % G_USEC_PER_SEC) * 1000;
  1154. sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  1155. if (replay_mode != REPLAY_MODE_NONE) {
  1156. sn->icount = replay_get_current_icount();
  1157. } else {
  1158. sn->icount = -1ULL;
  1159. }
  1160. ret1 = bdrv_snapshot_create(bs, sn);
  1161. if (ret1 < 0) {
  1162. error_setg_errno(errp, -ret1,
  1163. "Failed to create snapshot '%s' on device '%s'",
  1164. name, device);
  1165. goto out;
  1166. }
  1167. /* 4. succeed, mark a snapshot is created */
  1168. state->created = true;
  1169. out:
  1170. aio_context_release(aio_context);
  1171. }
  1172. static void internal_snapshot_abort(BlkActionState *common)
  1173. {
  1174. InternalSnapshotState *state =
  1175. DO_UPCAST(InternalSnapshotState, common, common);
  1176. BlockDriverState *bs = state->bs;
  1177. QEMUSnapshotInfo *sn = &state->sn;
  1178. AioContext *aio_context;
  1179. Error *local_error = NULL;
  1180. if (!state->created) {
  1181. return;
  1182. }
  1183. aio_context = bdrv_get_aio_context(state->bs);
  1184. aio_context_acquire(aio_context);
  1185. if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) {
  1186. error_reportf_err(local_error,
  1187. "Failed to delete snapshot with id '%s' and "
  1188. "name '%s' on device '%s' in abort: ",
  1189. sn->id_str, sn->name,
  1190. bdrv_get_device_name(bs));
  1191. }
  1192. aio_context_release(aio_context);
  1193. }
  1194. static void internal_snapshot_clean(BlkActionState *common)
  1195. {
  1196. InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState,
  1197. common, common);
  1198. AioContext *aio_context;
  1199. if (!state->bs) {
  1200. return;
  1201. }
  1202. aio_context = bdrv_get_aio_context(state->bs);
  1203. aio_context_acquire(aio_context);
  1204. bdrv_drained_end(state->bs);
  1205. aio_context_release(aio_context);
  1206. }
  1207. /* external snapshot private data */
  1208. typedef struct ExternalSnapshotState {
  1209. BlkActionState common;
  1210. BlockDriverState *old_bs;
  1211. BlockDriverState *new_bs;
  1212. bool overlay_appended;
  1213. } ExternalSnapshotState;
  1214. static void external_snapshot_prepare(BlkActionState *common,
  1215. Error **errp)
  1216. {
  1217. int ret;
  1218. int flags = 0;
  1219. QDict *options = NULL;
  1220. Error *local_err = NULL;
  1221. /* Device and node name of the image to generate the snapshot from */
  1222. const char *device;
  1223. const char *node_name;
  1224. /* Reference to the new image (for 'blockdev-snapshot') */
  1225. const char *snapshot_ref;
  1226. /* File name of the new image (for 'blockdev-snapshot-sync') */
  1227. const char *new_image_file;
  1228. ExternalSnapshotState *state =
  1229. DO_UPCAST(ExternalSnapshotState, common, common);
  1230. TransactionAction *action = common->action;
  1231. AioContext *aio_context;
  1232. uint64_t perm, shared;
  1233. /* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
  1234. * purpose but a different set of parameters */
  1235. switch (action->type) {
  1236. case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT:
  1237. {
  1238. BlockdevSnapshot *s = action->u.blockdev_snapshot.data;
  1239. device = s->node;
  1240. node_name = s->node;
  1241. new_image_file = NULL;
  1242. snapshot_ref = s->overlay;
  1243. }
  1244. break;
  1245. case TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC:
  1246. {
  1247. BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
  1248. device = s->device;
  1249. node_name = s->node_name;
  1250. new_image_file = s->snapshot_file;
  1251. snapshot_ref = NULL;
  1252. }
  1253. break;
  1254. default:
  1255. g_assert_not_reached();
  1256. }
  1257. /* start processing */
  1258. if (action_check_completion_mode(common, errp) < 0) {
  1259. return;
  1260. }
  1261. state->old_bs = bdrv_lookup_bs(device, node_name, errp);
  1262. if (!state->old_bs) {
  1263. return;
  1264. }
  1265. aio_context = bdrv_get_aio_context(state->old_bs);
  1266. aio_context_acquire(aio_context);
  1267. /* Paired with .clean() */
  1268. bdrv_drained_begin(state->old_bs);
  1269. if (!bdrv_is_inserted(state->old_bs)) {
  1270. error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
  1271. goto out;
  1272. }
  1273. if (bdrv_op_is_blocked(state->old_bs,
  1274. BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) {
  1275. goto out;
  1276. }
  1277. if (!bdrv_is_read_only(state->old_bs)) {
  1278. if (bdrv_flush(state->old_bs)) {
  1279. error_setg(errp, QERR_IO_ERROR);
  1280. goto out;
  1281. }
  1282. }
  1283. if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) {
  1284. BlockdevSnapshotSync *s = action->u.blockdev_snapshot_sync.data;
  1285. const char *format = s->format ?: "qcow2";
  1286. enum NewImageMode mode;
  1287. const char *snapshot_node_name = s->snapshot_node_name;
  1288. if (node_name && !snapshot_node_name) {
  1289. error_setg(errp, "New overlay node-name missing");
  1290. goto out;
  1291. }
  1292. if (snapshot_node_name &&
  1293. bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
  1294. error_setg(errp, "New overlay node-name already in use");
  1295. goto out;
  1296. }
  1297. flags = state->old_bs->open_flags;
  1298. flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_COPY_ON_READ);
  1299. flags |= BDRV_O_NO_BACKING;
  1300. /* create new image w/backing file */
  1301. mode = s->has_mode ? s->mode : NEW_IMAGE_MODE_ABSOLUTE_PATHS;
  1302. if (mode != NEW_IMAGE_MODE_EXISTING) {
  1303. int64_t size = bdrv_getlength(state->old_bs);
  1304. if (size < 0) {
  1305. error_setg_errno(errp, -size, "bdrv_getlength failed");
  1306. goto out;
  1307. }
  1308. bdrv_refresh_filename(state->old_bs);
  1309. aio_context_release(aio_context);
  1310. bdrv_img_create(new_image_file, format,
  1311. state->old_bs->filename,
  1312. state->old_bs->drv->format_name,
  1313. NULL, size, flags, false, &local_err);
  1314. aio_context_acquire(aio_context);
  1315. if (local_err) {
  1316. error_propagate(errp, local_err);
  1317. goto out;
  1318. }
  1319. }
  1320. options = qdict_new();
  1321. if (snapshot_node_name) {
  1322. qdict_put_str(options, "node-name", snapshot_node_name);
  1323. }
  1324. qdict_put_str(options, "driver", format);
  1325. }
  1326. state->new_bs = bdrv_open(new_image_file, snapshot_ref, options, flags,
  1327. errp);
  1328. /* We will manually add the backing_hd field to the bs later */
  1329. if (!state->new_bs) {
  1330. goto out;
  1331. }
  1332. /*
  1333. * Allow attaching a backing file to an overlay that's already in use only
  1334. * if the parents don't assume that they are already seeing a valid image.
  1335. * (Specifically, allow it as a mirror target, which is write-only access.)
  1336. */
  1337. bdrv_get_cumulative_perm(state->new_bs, &perm, &shared);
  1338. if (perm & BLK_PERM_CONSISTENT_READ) {
  1339. error_setg(errp, "The overlay is already in use");
  1340. goto out;
  1341. }
  1342. if (state->new_bs->drv->is_filter) {
  1343. error_setg(errp, "Filters cannot be used as overlays");
  1344. goto out;
  1345. }
  1346. if (bdrv_cow_child(state->new_bs)) {
  1347. error_setg(errp, "The overlay already has a backing image");
  1348. goto out;
  1349. }
  1350. if (!state->new_bs->drv->supports_backing) {
  1351. error_setg(errp, "The overlay does not support backing images");
  1352. goto out;
  1353. }
  1354. ret = bdrv_append(state->new_bs, state->old_bs, errp);
  1355. if (ret < 0) {
  1356. goto out;
  1357. }
  1358. state->overlay_appended = true;
  1359. out:
  1360. aio_context_release(aio_context);
  1361. }
  1362. static void external_snapshot_commit(BlkActionState *common)
  1363. {
  1364. ExternalSnapshotState *state =
  1365. DO_UPCAST(ExternalSnapshotState, common, common);
  1366. AioContext *aio_context;
  1367. aio_context = bdrv_get_aio_context(state->old_bs);
  1368. aio_context_acquire(aio_context);
  1369. /* We don't need (or want) to use the transactional
  1370. * bdrv_reopen_multiple() across all the entries at once, because we
  1371. * don't want to abort all of them if one of them fails the reopen */
  1372. if (!qatomic_read(&state->old_bs->copy_on_read)) {
  1373. bdrv_reopen_set_read_only(state->old_bs, true, NULL);
  1374. }
  1375. aio_context_release(aio_context);
  1376. }
  1377. static void external_snapshot_abort(BlkActionState *common)
  1378. {
  1379. ExternalSnapshotState *state =
  1380. DO_UPCAST(ExternalSnapshotState, common, common);
  1381. if (state->new_bs) {
  1382. if (state->overlay_appended) {
  1383. AioContext *aio_context;
  1384. AioContext *tmp_context;
  1385. int ret;
  1386. aio_context = bdrv_get_aio_context(state->old_bs);
  1387. aio_context_acquire(aio_context);
  1388. bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd()
  1389. close state->old_bs; we need it */
  1390. bdrv_set_backing_hd(state->new_bs, NULL, &error_abort);
  1391. /*
  1392. * The call to bdrv_set_backing_hd() above returns state->old_bs to
  1393. * the main AioContext. As we're still going to be using it, return
  1394. * it to the AioContext it was before.
  1395. */
  1396. tmp_context = bdrv_get_aio_context(state->old_bs);
  1397. if (aio_context != tmp_context) {
  1398. aio_context_release(aio_context);
  1399. aio_context_acquire(tmp_context);
  1400. ret = bdrv_try_change_aio_context(state->old_bs,
  1401. aio_context, NULL, NULL);
  1402. assert(ret == 0);
  1403. aio_context_release(tmp_context);
  1404. aio_context_acquire(aio_context);
  1405. }
  1406. bdrv_replace_node(state->new_bs, state->old_bs, &error_abort);
  1407. bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */
  1408. aio_context_release(aio_context);
  1409. }
  1410. }
  1411. }
  1412. static void external_snapshot_clean(BlkActionState *common)
  1413. {
  1414. ExternalSnapshotState *state =
  1415. DO_UPCAST(ExternalSnapshotState, common, common);
  1416. AioContext *aio_context;
  1417. if (!state->old_bs) {
  1418. return;
  1419. }
  1420. aio_context = bdrv_get_aio_context(state->old_bs);
  1421. aio_context_acquire(aio_context);
  1422. bdrv_drained_end(state->old_bs);
  1423. bdrv_unref(state->new_bs);
  1424. aio_context_release(aio_context);
  1425. }
  1426. typedef struct DriveBackupState {
  1427. BlkActionState common;
  1428. BlockDriverState *bs;
  1429. BlockJob *job;
  1430. } DriveBackupState;
  1431. static BlockJob *do_backup_common(BackupCommon *backup,
  1432. BlockDriverState *bs,
  1433. BlockDriverState *target_bs,
  1434. AioContext *aio_context,
  1435. JobTxn *txn, Error **errp);
  1436. static void drive_backup_prepare(BlkActionState *common, Error **errp)
  1437. {
  1438. DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
  1439. DriveBackup *backup;
  1440. BlockDriverState *bs;
  1441. BlockDriverState *target_bs;
  1442. BlockDriverState *source = NULL;
  1443. AioContext *aio_context;
  1444. AioContext *old_context;
  1445. const char *format;
  1446. QDict *options;
  1447. Error *local_err = NULL;
  1448. int flags;
  1449. int64_t size;
  1450. bool set_backing_hd = false;
  1451. int ret;
  1452. assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
  1453. backup = common->action->u.drive_backup.data;
  1454. if (!backup->has_mode) {
  1455. backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
  1456. }
  1457. bs = bdrv_lookup_bs(backup->device, backup->device, errp);
  1458. if (!bs) {
  1459. return;
  1460. }
  1461. if (!bs->drv) {
  1462. error_setg(errp, "Device has no medium");
  1463. return;
  1464. }
  1465. aio_context = bdrv_get_aio_context(bs);
  1466. aio_context_acquire(aio_context);
  1467. state->bs = bs;
  1468. /* Paired with .clean() */
  1469. bdrv_drained_begin(bs);
  1470. format = backup->format;
  1471. if (!format && backup->mode != NEW_IMAGE_MODE_EXISTING) {
  1472. format = bs->drv->format_name;
  1473. }
  1474. /* Early check to avoid creating target */
  1475. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
  1476. goto out;
  1477. }
  1478. flags = bs->open_flags | BDRV_O_RDWR;
  1479. /*
  1480. * See if we have a backing HD we can use to create our new image
  1481. * on top of.
  1482. */
  1483. if (backup->sync == MIRROR_SYNC_MODE_TOP) {
  1484. /*
  1485. * Backup will not replace the source by the target, so none
  1486. * of the filters skipped here will be removed (in contrast to
  1487. * mirror). Therefore, we can skip all of them when looking
  1488. * for the first COW relationship.
  1489. */
  1490. source = bdrv_cow_bs(bdrv_skip_filters(bs));
  1491. if (!source) {
  1492. backup->sync = MIRROR_SYNC_MODE_FULL;
  1493. }
  1494. }
  1495. if (backup->sync == MIRROR_SYNC_MODE_NONE) {
  1496. source = bs;
  1497. flags |= BDRV_O_NO_BACKING;
  1498. set_backing_hd = true;
  1499. }
  1500. size = bdrv_getlength(bs);
  1501. if (size < 0) {
  1502. error_setg_errno(errp, -size, "bdrv_getlength failed");
  1503. goto out;
  1504. }
  1505. if (backup->mode != NEW_IMAGE_MODE_EXISTING) {
  1506. assert(format);
  1507. if (source) {
  1508. /* Implicit filters should not appear in the filename */
  1509. BlockDriverState *explicit_backing =
  1510. bdrv_skip_implicit_filters(source);
  1511. bdrv_refresh_filename(explicit_backing);
  1512. bdrv_img_create(backup->target, format,
  1513. explicit_backing->filename,
  1514. explicit_backing->drv->format_name, NULL,
  1515. size, flags, false, &local_err);
  1516. } else {
  1517. bdrv_img_create(backup->target, format, NULL, NULL, NULL,
  1518. size, flags, false, &local_err);
  1519. }
  1520. }
  1521. if (local_err) {
  1522. error_propagate(errp, local_err);
  1523. goto out;
  1524. }
  1525. options = qdict_new();
  1526. qdict_put_str(options, "discard", "unmap");
  1527. qdict_put_str(options, "detect-zeroes", "unmap");
  1528. if (format) {
  1529. qdict_put_str(options, "driver", format);
  1530. }
  1531. target_bs = bdrv_open(backup->target, NULL, options, flags, errp);
  1532. if (!target_bs) {
  1533. goto out;
  1534. }
  1535. /* Honor bdrv_try_change_aio_context() context acquisition requirements. */
  1536. old_context = bdrv_get_aio_context(target_bs);
  1537. aio_context_release(aio_context);
  1538. aio_context_acquire(old_context);
  1539. ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
  1540. if (ret < 0) {
  1541. bdrv_unref(target_bs);
  1542. aio_context_release(old_context);
  1543. return;
  1544. }
  1545. aio_context_release(old_context);
  1546. aio_context_acquire(aio_context);
  1547. if (set_backing_hd) {
  1548. if (bdrv_set_backing_hd(target_bs, source, errp) < 0) {
  1549. goto unref;
  1550. }
  1551. }
  1552. state->job = do_backup_common(qapi_DriveBackup_base(backup),
  1553. bs, target_bs, aio_context,
  1554. common->block_job_txn, errp);
  1555. unref:
  1556. bdrv_unref(target_bs);
  1557. out:
  1558. aio_context_release(aio_context);
  1559. }
  1560. static void drive_backup_commit(BlkActionState *common)
  1561. {
  1562. DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
  1563. AioContext *aio_context;
  1564. aio_context = bdrv_get_aio_context(state->bs);
  1565. aio_context_acquire(aio_context);
  1566. assert(state->job);
  1567. job_start(&state->job->job);
  1568. aio_context_release(aio_context);
  1569. }
  1570. static void drive_backup_abort(BlkActionState *common)
  1571. {
  1572. DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
  1573. if (state->job) {
  1574. job_cancel_sync(&state->job->job, true);
  1575. }
  1576. }
  1577. static void drive_backup_clean(BlkActionState *common)
  1578. {
  1579. DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
  1580. AioContext *aio_context;
  1581. if (!state->bs) {
  1582. return;
  1583. }
  1584. aio_context = bdrv_get_aio_context(state->bs);
  1585. aio_context_acquire(aio_context);
  1586. bdrv_drained_end(state->bs);
  1587. aio_context_release(aio_context);
  1588. }
  1589. typedef struct BlockdevBackupState {
  1590. BlkActionState common;
  1591. BlockDriverState *bs;
  1592. BlockJob *job;
  1593. } BlockdevBackupState;
  1594. static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
  1595. {
  1596. BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
  1597. BlockdevBackup *backup;
  1598. BlockDriverState *bs;
  1599. BlockDriverState *target_bs;
  1600. AioContext *aio_context;
  1601. AioContext *old_context;
  1602. int ret;
  1603. assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
  1604. backup = common->action->u.blockdev_backup.data;
  1605. bs = bdrv_lookup_bs(backup->device, backup->device, errp);
  1606. if (!bs) {
  1607. return;
  1608. }
  1609. target_bs = bdrv_lookup_bs(backup->target, backup->target, errp);
  1610. if (!target_bs) {
  1611. return;
  1612. }
  1613. /* Honor bdrv_try_change_aio_context() context acquisition requirements. */
  1614. aio_context = bdrv_get_aio_context(bs);
  1615. old_context = bdrv_get_aio_context(target_bs);
  1616. aio_context_acquire(old_context);
  1617. ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
  1618. if (ret < 0) {
  1619. aio_context_release(old_context);
  1620. return;
  1621. }
  1622. aio_context_release(old_context);
  1623. aio_context_acquire(aio_context);
  1624. state->bs = bs;
  1625. /* Paired with .clean() */
  1626. bdrv_drained_begin(state->bs);
  1627. state->job = do_backup_common(qapi_BlockdevBackup_base(backup),
  1628. bs, target_bs, aio_context,
  1629. common->block_job_txn, errp);
  1630. aio_context_release(aio_context);
  1631. }
  1632. static void blockdev_backup_commit(BlkActionState *common)
  1633. {
  1634. BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
  1635. AioContext *aio_context;
  1636. aio_context = bdrv_get_aio_context(state->bs);
  1637. aio_context_acquire(aio_context);
  1638. assert(state->job);
  1639. job_start(&state->job->job);
  1640. aio_context_release(aio_context);
  1641. }
  1642. static void blockdev_backup_abort(BlkActionState *common)
  1643. {
  1644. BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
  1645. if (state->job) {
  1646. job_cancel_sync(&state->job->job, true);
  1647. }
  1648. }
  1649. static void blockdev_backup_clean(BlkActionState *common)
  1650. {
  1651. BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
  1652. AioContext *aio_context;
  1653. if (!state->bs) {
  1654. return;
  1655. }
  1656. aio_context = bdrv_get_aio_context(state->bs);
  1657. aio_context_acquire(aio_context);
  1658. bdrv_drained_end(state->bs);
  1659. aio_context_release(aio_context);
  1660. }
  1661. typedef struct BlockDirtyBitmapState {
  1662. BlkActionState common;
  1663. BdrvDirtyBitmap *bitmap;
  1664. BlockDriverState *bs;
  1665. HBitmap *backup;
  1666. bool prepared;
  1667. bool was_enabled;
  1668. } BlockDirtyBitmapState;
  1669. static void block_dirty_bitmap_add_prepare(BlkActionState *common,
  1670. Error **errp)
  1671. {
  1672. Error *local_err = NULL;
  1673. BlockDirtyBitmapAdd *action;
  1674. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1675. common, common);
  1676. if (action_check_completion_mode(common, errp) < 0) {
  1677. return;
  1678. }
  1679. action = common->action->u.block_dirty_bitmap_add.data;
  1680. /* AIO context taken and released within qmp_block_dirty_bitmap_add */
  1681. qmp_block_dirty_bitmap_add(action->node, action->name,
  1682. action->has_granularity, action->granularity,
  1683. action->has_persistent, action->persistent,
  1684. action->has_disabled, action->disabled,
  1685. &local_err);
  1686. if (!local_err) {
  1687. state->prepared = true;
  1688. } else {
  1689. error_propagate(errp, local_err);
  1690. }
  1691. }
  1692. static void block_dirty_bitmap_add_abort(BlkActionState *common)
  1693. {
  1694. BlockDirtyBitmapAdd *action;
  1695. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1696. common, common);
  1697. action = common->action->u.block_dirty_bitmap_add.data;
  1698. /* Should not be able to fail: IF the bitmap was added via .prepare(),
  1699. * then the node reference and bitmap name must have been valid.
  1700. */
  1701. if (state->prepared) {
  1702. qmp_block_dirty_bitmap_remove(action->node, action->name, &error_abort);
  1703. }
  1704. }
  1705. static void block_dirty_bitmap_clear_prepare(BlkActionState *common,
  1706. Error **errp)
  1707. {
  1708. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1709. common, common);
  1710. BlockDirtyBitmap *action;
  1711. if (action_check_completion_mode(common, errp) < 0) {
  1712. return;
  1713. }
  1714. action = common->action->u.block_dirty_bitmap_clear.data;
  1715. state->bitmap = block_dirty_bitmap_lookup(action->node,
  1716. action->name,
  1717. &state->bs,
  1718. errp);
  1719. if (!state->bitmap) {
  1720. return;
  1721. }
  1722. if (bdrv_dirty_bitmap_check(state->bitmap, BDRV_BITMAP_DEFAULT, errp)) {
  1723. return;
  1724. }
  1725. bdrv_clear_dirty_bitmap(state->bitmap, &state->backup);
  1726. }
  1727. static void block_dirty_bitmap_restore(BlkActionState *common)
  1728. {
  1729. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1730. common, common);
  1731. if (state->backup) {
  1732. bdrv_restore_dirty_bitmap(state->bitmap, state->backup);
  1733. }
  1734. }
  1735. static void block_dirty_bitmap_free_backup(BlkActionState *common)
  1736. {
  1737. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1738. common, common);
  1739. hbitmap_free(state->backup);
  1740. }
  1741. static void block_dirty_bitmap_enable_prepare(BlkActionState *common,
  1742. Error **errp)
  1743. {
  1744. BlockDirtyBitmap *action;
  1745. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1746. common, common);
  1747. if (action_check_completion_mode(common, errp) < 0) {
  1748. return;
  1749. }
  1750. action = common->action->u.block_dirty_bitmap_enable.data;
  1751. state->bitmap = block_dirty_bitmap_lookup(action->node,
  1752. action->name,
  1753. NULL,
  1754. errp);
  1755. if (!state->bitmap) {
  1756. return;
  1757. }
  1758. if (bdrv_dirty_bitmap_check(state->bitmap, BDRV_BITMAP_ALLOW_RO, errp)) {
  1759. return;
  1760. }
  1761. state->was_enabled = bdrv_dirty_bitmap_enabled(state->bitmap);
  1762. bdrv_enable_dirty_bitmap(state->bitmap);
  1763. }
  1764. static void block_dirty_bitmap_enable_abort(BlkActionState *common)
  1765. {
  1766. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1767. common, common);
  1768. if (!state->was_enabled) {
  1769. bdrv_disable_dirty_bitmap(state->bitmap);
  1770. }
  1771. }
  1772. static void block_dirty_bitmap_disable_prepare(BlkActionState *common,
  1773. Error **errp)
  1774. {
  1775. BlockDirtyBitmap *action;
  1776. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1777. common, common);
  1778. if (action_check_completion_mode(common, errp) < 0) {
  1779. return;
  1780. }
  1781. action = common->action->u.block_dirty_bitmap_disable.data;
  1782. state->bitmap = block_dirty_bitmap_lookup(action->node,
  1783. action->name,
  1784. NULL,
  1785. errp);
  1786. if (!state->bitmap) {
  1787. return;
  1788. }
  1789. if (bdrv_dirty_bitmap_check(state->bitmap, BDRV_BITMAP_ALLOW_RO, errp)) {
  1790. return;
  1791. }
  1792. state->was_enabled = bdrv_dirty_bitmap_enabled(state->bitmap);
  1793. bdrv_disable_dirty_bitmap(state->bitmap);
  1794. }
  1795. static void block_dirty_bitmap_disable_abort(BlkActionState *common)
  1796. {
  1797. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1798. common, common);
  1799. if (state->was_enabled) {
  1800. bdrv_enable_dirty_bitmap(state->bitmap);
  1801. }
  1802. }
  1803. static void block_dirty_bitmap_merge_prepare(BlkActionState *common,
  1804. Error **errp)
  1805. {
  1806. BlockDirtyBitmapMerge *action;
  1807. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1808. common, common);
  1809. if (action_check_completion_mode(common, errp) < 0) {
  1810. return;
  1811. }
  1812. action = common->action->u.block_dirty_bitmap_merge.data;
  1813. state->bitmap = block_dirty_bitmap_merge(action->node, action->target,
  1814. action->bitmaps, &state->backup,
  1815. errp);
  1816. }
  1817. static void block_dirty_bitmap_remove_prepare(BlkActionState *common,
  1818. Error **errp)
  1819. {
  1820. BlockDirtyBitmap *action;
  1821. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1822. common, common);
  1823. if (action_check_completion_mode(common, errp) < 0) {
  1824. return;
  1825. }
  1826. action = common->action->u.block_dirty_bitmap_remove.data;
  1827. state->bitmap = block_dirty_bitmap_remove(action->node, action->name,
  1828. false, &state->bs, errp);
  1829. if (state->bitmap) {
  1830. bdrv_dirty_bitmap_skip_store(state->bitmap, true);
  1831. bdrv_dirty_bitmap_set_busy(state->bitmap, true);
  1832. }
  1833. }
  1834. static void block_dirty_bitmap_remove_abort(BlkActionState *common)
  1835. {
  1836. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1837. common, common);
  1838. if (state->bitmap) {
  1839. bdrv_dirty_bitmap_skip_store(state->bitmap, false);
  1840. bdrv_dirty_bitmap_set_busy(state->bitmap, false);
  1841. }
  1842. }
  1843. static void block_dirty_bitmap_remove_commit(BlkActionState *common)
  1844. {
  1845. BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
  1846. common, common);
  1847. bdrv_dirty_bitmap_set_busy(state->bitmap, false);
  1848. bdrv_release_dirty_bitmap(state->bitmap);
  1849. }
  1850. static void abort_prepare(BlkActionState *common, Error **errp)
  1851. {
  1852. error_setg(errp, "Transaction aborted using Abort action");
  1853. }
  1854. static void abort_commit(BlkActionState *common)
  1855. {
  1856. g_assert_not_reached(); /* this action never succeeds */
  1857. }
  1858. static const BlkActionOps actions[] = {
  1859. [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT] = {
  1860. .instance_size = sizeof(ExternalSnapshotState),
  1861. .prepare = external_snapshot_prepare,
  1862. .commit = external_snapshot_commit,
  1863. .abort = external_snapshot_abort,
  1864. .clean = external_snapshot_clean,
  1865. },
  1866. [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC] = {
  1867. .instance_size = sizeof(ExternalSnapshotState),
  1868. .prepare = external_snapshot_prepare,
  1869. .commit = external_snapshot_commit,
  1870. .abort = external_snapshot_abort,
  1871. .clean = external_snapshot_clean,
  1872. },
  1873. [TRANSACTION_ACTION_KIND_DRIVE_BACKUP] = {
  1874. .instance_size = sizeof(DriveBackupState),
  1875. .prepare = drive_backup_prepare,
  1876. .commit = drive_backup_commit,
  1877. .abort = drive_backup_abort,
  1878. .clean = drive_backup_clean,
  1879. },
  1880. [TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP] = {
  1881. .instance_size = sizeof(BlockdevBackupState),
  1882. .prepare = blockdev_backup_prepare,
  1883. .commit = blockdev_backup_commit,
  1884. .abort = blockdev_backup_abort,
  1885. .clean = blockdev_backup_clean,
  1886. },
  1887. [TRANSACTION_ACTION_KIND_ABORT] = {
  1888. .instance_size = sizeof(BlkActionState),
  1889. .prepare = abort_prepare,
  1890. .commit = abort_commit,
  1891. },
  1892. [TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC] = {
  1893. .instance_size = sizeof(InternalSnapshotState),
  1894. .prepare = internal_snapshot_prepare,
  1895. .abort = internal_snapshot_abort,
  1896. .clean = internal_snapshot_clean,
  1897. },
  1898. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ADD] = {
  1899. .instance_size = sizeof(BlockDirtyBitmapState),
  1900. .prepare = block_dirty_bitmap_add_prepare,
  1901. .abort = block_dirty_bitmap_add_abort,
  1902. },
  1903. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_CLEAR] = {
  1904. .instance_size = sizeof(BlockDirtyBitmapState),
  1905. .prepare = block_dirty_bitmap_clear_prepare,
  1906. .commit = block_dirty_bitmap_free_backup,
  1907. .abort = block_dirty_bitmap_restore,
  1908. },
  1909. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_ENABLE] = {
  1910. .instance_size = sizeof(BlockDirtyBitmapState),
  1911. .prepare = block_dirty_bitmap_enable_prepare,
  1912. .abort = block_dirty_bitmap_enable_abort,
  1913. },
  1914. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_DISABLE] = {
  1915. .instance_size = sizeof(BlockDirtyBitmapState),
  1916. .prepare = block_dirty_bitmap_disable_prepare,
  1917. .abort = block_dirty_bitmap_disable_abort,
  1918. },
  1919. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_MERGE] = {
  1920. .instance_size = sizeof(BlockDirtyBitmapState),
  1921. .prepare = block_dirty_bitmap_merge_prepare,
  1922. .commit = block_dirty_bitmap_free_backup,
  1923. .abort = block_dirty_bitmap_restore,
  1924. },
  1925. [TRANSACTION_ACTION_KIND_BLOCK_DIRTY_BITMAP_REMOVE] = {
  1926. .instance_size = sizeof(BlockDirtyBitmapState),
  1927. .prepare = block_dirty_bitmap_remove_prepare,
  1928. .commit = block_dirty_bitmap_remove_commit,
  1929. .abort = block_dirty_bitmap_remove_abort,
  1930. },
  1931. /* Where are transactions for MIRROR, COMMIT and STREAM?
  1932. * Although these blockjobs use transaction callbacks like the backup job,
  1933. * these jobs do not necessarily adhere to transaction semantics.
  1934. * These jobs may not fully undo all of their actions on abort, nor do they
  1935. * necessarily work in transactions with more than one job in them.
  1936. */
  1937. };
  1938. /**
  1939. * Allocate a TransactionProperties structure if necessary, and fill
  1940. * that structure with desired defaults if they are unset.
  1941. */
  1942. static TransactionProperties *get_transaction_properties(
  1943. TransactionProperties *props)
  1944. {
  1945. if (!props) {
  1946. props = g_new0(TransactionProperties, 1);
  1947. }
  1948. if (!props->has_completion_mode) {
  1949. props->has_completion_mode = true;
  1950. props->completion_mode = ACTION_COMPLETION_MODE_INDIVIDUAL;
  1951. }
  1952. return props;
  1953. }
  1954. /*
  1955. * 'Atomic' group operations. The operations are performed as a set, and if
  1956. * any fail then we roll back all operations in the group.
  1957. *
  1958. * Always run under BQL.
  1959. */
  1960. void qmp_transaction(TransactionActionList *dev_list,
  1961. struct TransactionProperties *props,
  1962. Error **errp)
  1963. {
  1964. TransactionActionList *dev_entry = dev_list;
  1965. bool has_props = !!props;
  1966. JobTxn *block_job_txn = NULL;
  1967. BlkActionState *state, *next;
  1968. Error *local_err = NULL;
  1969. GLOBAL_STATE_CODE();
  1970. QTAILQ_HEAD(, BlkActionState) snap_bdrv_states;
  1971. QTAILQ_INIT(&snap_bdrv_states);
  1972. /* Does this transaction get canceled as a group on failure?
  1973. * If not, we don't really need to make a JobTxn.
  1974. */
  1975. props = get_transaction_properties(props);
  1976. if (props->completion_mode != ACTION_COMPLETION_MODE_INDIVIDUAL) {
  1977. block_job_txn = job_txn_new();
  1978. }
  1979. /* drain all i/o before any operations */
  1980. bdrv_drain_all();
  1981. /* We don't do anything in this loop that commits us to the operations */
  1982. while (NULL != dev_entry) {
  1983. TransactionAction *dev_info = NULL;
  1984. const BlkActionOps *ops;
  1985. dev_info = dev_entry->value;
  1986. dev_entry = dev_entry->next;
  1987. assert(dev_info->type < ARRAY_SIZE(actions));
  1988. ops = &actions[dev_info->type];
  1989. assert(ops->instance_size > 0);
  1990. state = g_malloc0(ops->instance_size);
  1991. state->ops = ops;
  1992. state->action = dev_info;
  1993. state->block_job_txn = block_job_txn;
  1994. state->txn_props = props;
  1995. QTAILQ_INSERT_TAIL(&snap_bdrv_states, state, entry);
  1996. state->ops->prepare(state, &local_err);
  1997. if (local_err) {
  1998. error_propagate(errp, local_err);
  1999. goto delete_and_fail;
  2000. }
  2001. }
  2002. QTAILQ_FOREACH(state, &snap_bdrv_states, entry) {
  2003. if (state->ops->commit) {
  2004. state->ops->commit(state);
  2005. }
  2006. }
  2007. /* success */
  2008. goto exit;
  2009. delete_and_fail:
  2010. /* failure, and it is all-or-none; roll back all operations */
  2011. QTAILQ_FOREACH_REVERSE(state, &snap_bdrv_states, entry) {
  2012. if (state->ops->abort) {
  2013. state->ops->abort(state);
  2014. }
  2015. }
  2016. exit:
  2017. QTAILQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) {
  2018. if (state->ops->clean) {
  2019. state->ops->clean(state);
  2020. }
  2021. g_free(state);
  2022. }
  2023. if (!has_props) {
  2024. qapi_free_TransactionProperties(props);
  2025. }
  2026. job_txn_unref(block_job_txn);
  2027. }
  2028. BlockDirtyBitmapSha256 *qmp_x_debug_block_dirty_bitmap_sha256(const char *node,
  2029. const char *name,
  2030. Error **errp)
  2031. {
  2032. BdrvDirtyBitmap *bitmap;
  2033. BlockDriverState *bs;
  2034. BlockDirtyBitmapSha256 *ret = NULL;
  2035. char *sha256;
  2036. bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp);
  2037. if (!bitmap || !bs) {
  2038. return NULL;
  2039. }
  2040. sha256 = bdrv_dirty_bitmap_sha256(bitmap, errp);
  2041. if (sha256 == NULL) {
  2042. return NULL;
  2043. }
  2044. ret = g_new(BlockDirtyBitmapSha256, 1);
  2045. ret->sha256 = sha256;
  2046. return ret;
  2047. }
  2048. void coroutine_fn qmp_block_resize(const char *device, const char *node_name,
  2049. int64_t size, Error **errp)
  2050. {
  2051. Error *local_err = NULL;
  2052. BlockBackend *blk;
  2053. BlockDriverState *bs;
  2054. AioContext *old_ctx;
  2055. bs = bdrv_lookup_bs(device, node_name, &local_err);
  2056. if (local_err) {
  2057. error_propagate(errp, local_err);
  2058. return;
  2059. }
  2060. if (size < 0) {
  2061. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "size", "a >0 size");
  2062. return;
  2063. }
  2064. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
  2065. error_setg(errp, QERR_DEVICE_IN_USE, device);
  2066. return;
  2067. }
  2068. blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp);
  2069. if (!blk) {
  2070. return;
  2071. }
  2072. bdrv_co_lock(bs);
  2073. bdrv_drained_begin(bs);
  2074. bdrv_co_unlock(bs);
  2075. old_ctx = bdrv_co_enter(bs);
  2076. blk_co_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp);
  2077. bdrv_co_leave(bs, old_ctx);
  2078. bdrv_co_lock(bs);
  2079. bdrv_drained_end(bs);
  2080. blk_unref(blk);
  2081. bdrv_co_unlock(bs);
  2082. }
  2083. void qmp_block_stream(const char *job_id, const char *device,
  2084. const char *base,
  2085. const char *base_node,
  2086. const char *backing_file,
  2087. const char *bottom,
  2088. bool has_speed, int64_t speed,
  2089. bool has_on_error, BlockdevOnError on_error,
  2090. const char *filter_node_name,
  2091. bool has_auto_finalize, bool auto_finalize,
  2092. bool has_auto_dismiss, bool auto_dismiss,
  2093. Error **errp)
  2094. {
  2095. BlockDriverState *bs, *iter, *iter_end;
  2096. BlockDriverState *base_bs = NULL;
  2097. BlockDriverState *bottom_bs = NULL;
  2098. AioContext *aio_context;
  2099. Error *local_err = NULL;
  2100. int job_flags = JOB_DEFAULT;
  2101. if (base && base_node) {
  2102. error_setg(errp, "'base' and 'base-node' cannot be specified "
  2103. "at the same time");
  2104. return;
  2105. }
  2106. if (base && bottom) {
  2107. error_setg(errp, "'base' and 'bottom' cannot be specified "
  2108. "at the same time");
  2109. return;
  2110. }
  2111. if (bottom && base_node) {
  2112. error_setg(errp, "'bottom' and 'base-node' cannot be specified "
  2113. "at the same time");
  2114. return;
  2115. }
  2116. if (!has_on_error) {
  2117. on_error = BLOCKDEV_ON_ERROR_REPORT;
  2118. }
  2119. bs = bdrv_lookup_bs(device, device, errp);
  2120. if (!bs) {
  2121. return;
  2122. }
  2123. aio_context = bdrv_get_aio_context(bs);
  2124. aio_context_acquire(aio_context);
  2125. if (base) {
  2126. base_bs = bdrv_find_backing_image(bs, base);
  2127. if (base_bs == NULL) {
  2128. error_setg(errp, "Can't find '%s' in the backing chain", base);
  2129. goto out;
  2130. }
  2131. assert(bdrv_get_aio_context(base_bs) == aio_context);
  2132. }
  2133. if (base_node) {
  2134. base_bs = bdrv_lookup_bs(NULL, base_node, errp);
  2135. if (!base_bs) {
  2136. goto out;
  2137. }
  2138. if (bs == base_bs || !bdrv_chain_contains(bs, base_bs)) {
  2139. error_setg(errp, "Node '%s' is not a backing image of '%s'",
  2140. base_node, device);
  2141. goto out;
  2142. }
  2143. assert(bdrv_get_aio_context(base_bs) == aio_context);
  2144. bdrv_refresh_filename(base_bs);
  2145. }
  2146. if (bottom) {
  2147. bottom_bs = bdrv_lookup_bs(NULL, bottom, errp);
  2148. if (!bottom_bs) {
  2149. goto out;
  2150. }
  2151. if (!bottom_bs->drv) {
  2152. error_setg(errp, "Node '%s' is not open", bottom);
  2153. goto out;
  2154. }
  2155. if (bottom_bs->drv->is_filter) {
  2156. error_setg(errp, "Node '%s' is a filter, use a non-filter node "
  2157. "as 'bottom'", bottom);
  2158. goto out;
  2159. }
  2160. if (!bdrv_chain_contains(bs, bottom_bs)) {
  2161. error_setg(errp, "Node '%s' is not in a chain starting from '%s'",
  2162. bottom, device);
  2163. goto out;
  2164. }
  2165. assert(bdrv_get_aio_context(bottom_bs) == aio_context);
  2166. }
  2167. /*
  2168. * Check for op blockers in the whole chain between bs and base (or bottom)
  2169. */
  2170. iter_end = bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs;
  2171. for (iter = bs; iter && iter != iter_end;
  2172. iter = bdrv_filter_or_cow_bs(iter))
  2173. {
  2174. if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_STREAM, errp)) {
  2175. goto out;
  2176. }
  2177. }
  2178. /* if we are streaming the entire chain, the result will have no backing
  2179. * file, and specifying one is therefore an error */
  2180. if (!base_bs && backing_file) {
  2181. error_setg(errp, "backing file specified, but streaming the "
  2182. "entire chain");
  2183. goto out;
  2184. }
  2185. if (has_auto_finalize && !auto_finalize) {
  2186. job_flags |= JOB_MANUAL_FINALIZE;
  2187. }
  2188. if (has_auto_dismiss && !auto_dismiss) {
  2189. job_flags |= JOB_MANUAL_DISMISS;
  2190. }
  2191. stream_start(job_id, bs, base_bs, backing_file,
  2192. bottom_bs, job_flags, has_speed ? speed : 0, on_error,
  2193. filter_node_name, &local_err);
  2194. if (local_err) {
  2195. error_propagate(errp, local_err);
  2196. goto out;
  2197. }
  2198. trace_qmp_block_stream(bs);
  2199. out:
  2200. aio_context_release(aio_context);
  2201. }
  2202. void qmp_block_commit(const char *job_id, const char *device,
  2203. const char *base_node,
  2204. const char *base,
  2205. const char *top_node,
  2206. const char *top,
  2207. const char *backing_file,
  2208. bool has_speed, int64_t speed,
  2209. bool has_on_error, BlockdevOnError on_error,
  2210. const char *filter_node_name,
  2211. bool has_auto_finalize, bool auto_finalize,
  2212. bool has_auto_dismiss, bool auto_dismiss,
  2213. Error **errp)
  2214. {
  2215. BlockDriverState *bs;
  2216. BlockDriverState *iter;
  2217. BlockDriverState *base_bs, *top_bs;
  2218. AioContext *aio_context;
  2219. Error *local_err = NULL;
  2220. int job_flags = JOB_DEFAULT;
  2221. uint64_t top_perm, top_shared;
  2222. if (!has_speed) {
  2223. speed = 0;
  2224. }
  2225. if (!has_on_error) {
  2226. on_error = BLOCKDEV_ON_ERROR_REPORT;
  2227. }
  2228. if (has_auto_finalize && !auto_finalize) {
  2229. job_flags |= JOB_MANUAL_FINALIZE;
  2230. }
  2231. if (has_auto_dismiss && !auto_dismiss) {
  2232. job_flags |= JOB_MANUAL_DISMISS;
  2233. }
  2234. /* Important Note:
  2235. * libvirt relies on the DeviceNotFound error class in order to probe for
  2236. * live commit feature versions; for this to work, we must make sure to
  2237. * perform the device lookup before any generic errors that may occur in a
  2238. * scenario in which all optional arguments are omitted. */
  2239. bs = qmp_get_root_bs(device, &local_err);
  2240. if (!bs) {
  2241. bs = bdrv_lookup_bs(device, device, NULL);
  2242. if (!bs) {
  2243. error_free(local_err);
  2244. error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND,
  2245. "Device '%s' not found", device);
  2246. } else {
  2247. error_propagate(errp, local_err);
  2248. }
  2249. return;
  2250. }
  2251. aio_context = bdrv_get_aio_context(bs);
  2252. aio_context_acquire(aio_context);
  2253. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) {
  2254. goto out;
  2255. }
  2256. /* default top_bs is the active layer */
  2257. top_bs = bs;
  2258. if (top_node && top) {
  2259. error_setg(errp, "'top-node' and 'top' are mutually exclusive");
  2260. goto out;
  2261. } else if (top_node) {
  2262. top_bs = bdrv_lookup_bs(NULL, top_node, errp);
  2263. if (top_bs == NULL) {
  2264. goto out;
  2265. }
  2266. if (!bdrv_chain_contains(bs, top_bs)) {
  2267. error_setg(errp, "'%s' is not in this backing file chain",
  2268. top_node);
  2269. goto out;
  2270. }
  2271. } else if (top) {
  2272. /* This strcmp() is just a shortcut, there is no need to
  2273. * refresh @bs's filename. If it mismatches,
  2274. * bdrv_find_backing_image() will do the refresh and may still
  2275. * return @bs. */
  2276. if (strcmp(bs->filename, top) != 0) {
  2277. top_bs = bdrv_find_backing_image(bs, top);
  2278. }
  2279. }
  2280. if (top_bs == NULL) {
  2281. error_setg(errp, "Top image file %s not found", top ? top : "NULL");
  2282. goto out;
  2283. }
  2284. assert(bdrv_get_aio_context(top_bs) == aio_context);
  2285. if (base_node && base) {
  2286. error_setg(errp, "'base-node' and 'base' are mutually exclusive");
  2287. goto out;
  2288. } else if (base_node) {
  2289. base_bs = bdrv_lookup_bs(NULL, base_node, errp);
  2290. if (base_bs == NULL) {
  2291. goto out;
  2292. }
  2293. if (!bdrv_chain_contains(top_bs, base_bs)) {
  2294. error_setg(errp, "'%s' is not in this backing file chain",
  2295. base_node);
  2296. goto out;
  2297. }
  2298. } else if (base) {
  2299. base_bs = bdrv_find_backing_image(top_bs, base);
  2300. if (base_bs == NULL) {
  2301. error_setg(errp, "Can't find '%s' in the backing chain", base);
  2302. goto out;
  2303. }
  2304. } else {
  2305. base_bs = bdrv_find_base(top_bs);
  2306. if (base_bs == NULL) {
  2307. error_setg(errp, "There is no backimg image");
  2308. goto out;
  2309. }
  2310. }
  2311. assert(bdrv_get_aio_context(base_bs) == aio_context);
  2312. for (iter = top_bs; iter != bdrv_filter_or_cow_bs(base_bs);
  2313. iter = bdrv_filter_or_cow_bs(iter))
  2314. {
  2315. if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
  2316. goto out;
  2317. }
  2318. }
  2319. /* Do not allow attempts to commit an image into itself */
  2320. if (top_bs == base_bs) {
  2321. error_setg(errp, "cannot commit an image into itself");
  2322. goto out;
  2323. }
  2324. /*
  2325. * Active commit is required if and only if someone has taken a
  2326. * WRITE permission on the top node. Historically, we have always
  2327. * used active commit for top nodes, so continue that practice
  2328. * lest we possibly break clients that rely on this behavior, e.g.
  2329. * to later attach this node to a writing parent.
  2330. * (Active commit is never really wrong.)
  2331. */
  2332. bdrv_get_cumulative_perm(top_bs, &top_perm, &top_shared);
  2333. if (top_perm & BLK_PERM_WRITE ||
  2334. bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs))
  2335. {
  2336. if (backing_file) {
  2337. if (bdrv_skip_filters(top_bs) == bdrv_skip_filters(bs)) {
  2338. error_setg(errp, "'backing-file' specified,"
  2339. " but 'top' is the active layer");
  2340. } else {
  2341. error_setg(errp, "'backing-file' specified, but 'top' has a "
  2342. "writer on it");
  2343. }
  2344. goto out;
  2345. }
  2346. if (!job_id) {
  2347. /*
  2348. * Emulate here what block_job_create() does, because it
  2349. * is possible that @bs != @top_bs (the block job should
  2350. * be named after @bs, even if @top_bs is the actual
  2351. * source)
  2352. */
  2353. job_id = bdrv_get_device_name(bs);
  2354. }
  2355. commit_active_start(job_id, top_bs, base_bs, job_flags, speed, on_error,
  2356. filter_node_name, NULL, NULL, false, &local_err);
  2357. } else {
  2358. BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs);
  2359. if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
  2360. goto out;
  2361. }
  2362. commit_start(job_id, bs, base_bs, top_bs, job_flags,
  2363. speed, on_error, backing_file,
  2364. filter_node_name, &local_err);
  2365. }
  2366. if (local_err != NULL) {
  2367. error_propagate(errp, local_err);
  2368. goto out;
  2369. }
  2370. out:
  2371. aio_context_release(aio_context);
  2372. }
  2373. /* Common QMP interface for drive-backup and blockdev-backup */
  2374. static BlockJob *do_backup_common(BackupCommon *backup,
  2375. BlockDriverState *bs,
  2376. BlockDriverState *target_bs,
  2377. AioContext *aio_context,
  2378. JobTxn *txn, Error **errp)
  2379. {
  2380. BlockJob *job = NULL;
  2381. BdrvDirtyBitmap *bmap = NULL;
  2382. BackupPerf perf = { .max_workers = 64 };
  2383. int job_flags = JOB_DEFAULT;
  2384. if (!backup->has_speed) {
  2385. backup->speed = 0;
  2386. }
  2387. if (!backup->has_on_source_error) {
  2388. backup->on_source_error = BLOCKDEV_ON_ERROR_REPORT;
  2389. }
  2390. if (!backup->has_on_target_error) {
  2391. backup->on_target_error = BLOCKDEV_ON_ERROR_REPORT;
  2392. }
  2393. if (!backup->has_auto_finalize) {
  2394. backup->auto_finalize = true;
  2395. }
  2396. if (!backup->has_auto_dismiss) {
  2397. backup->auto_dismiss = true;
  2398. }
  2399. if (!backup->has_compress) {
  2400. backup->compress = false;
  2401. }
  2402. if (backup->x_perf) {
  2403. if (backup->x_perf->has_use_copy_range) {
  2404. perf.use_copy_range = backup->x_perf->use_copy_range;
  2405. }
  2406. if (backup->x_perf->has_max_workers) {
  2407. perf.max_workers = backup->x_perf->max_workers;
  2408. }
  2409. if (backup->x_perf->has_max_chunk) {
  2410. perf.max_chunk = backup->x_perf->max_chunk;
  2411. }
  2412. }
  2413. if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) ||
  2414. (backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) {
  2415. /* done before desugaring 'incremental' to print the right message */
  2416. if (!backup->bitmap) {
  2417. error_setg(errp, "must provide a valid bitmap name for "
  2418. "'%s' sync mode", MirrorSyncMode_str(backup->sync));
  2419. return NULL;
  2420. }
  2421. }
  2422. if (backup->sync == MIRROR_SYNC_MODE_INCREMENTAL) {
  2423. if (backup->has_bitmap_mode &&
  2424. backup->bitmap_mode != BITMAP_SYNC_MODE_ON_SUCCESS) {
  2425. error_setg(errp, "Bitmap sync mode must be '%s' "
  2426. "when using sync mode '%s'",
  2427. BitmapSyncMode_str(BITMAP_SYNC_MODE_ON_SUCCESS),
  2428. MirrorSyncMode_str(backup->sync));
  2429. return NULL;
  2430. }
  2431. backup->has_bitmap_mode = true;
  2432. backup->sync = MIRROR_SYNC_MODE_BITMAP;
  2433. backup->bitmap_mode = BITMAP_SYNC_MODE_ON_SUCCESS;
  2434. }
  2435. if (backup->bitmap) {
  2436. bmap = bdrv_find_dirty_bitmap(bs, backup->bitmap);
  2437. if (!bmap) {
  2438. error_setg(errp, "Bitmap '%s' could not be found", backup->bitmap);
  2439. return NULL;
  2440. }
  2441. if (!backup->has_bitmap_mode) {
  2442. error_setg(errp, "Bitmap sync mode must be given "
  2443. "when providing a bitmap");
  2444. return NULL;
  2445. }
  2446. if (bdrv_dirty_bitmap_check(bmap, BDRV_BITMAP_ALLOW_RO, errp)) {
  2447. return NULL;
  2448. }
  2449. /* This does not produce a useful bitmap artifact: */
  2450. if (backup->sync == MIRROR_SYNC_MODE_NONE) {
  2451. error_setg(errp, "sync mode '%s' does not produce meaningful bitmap"
  2452. " outputs", MirrorSyncMode_str(backup->sync));
  2453. return NULL;
  2454. }
  2455. /* If the bitmap isn't used for input or output, this is useless: */
  2456. if (backup->bitmap_mode == BITMAP_SYNC_MODE_NEVER &&
  2457. backup->sync != MIRROR_SYNC_MODE_BITMAP) {
  2458. error_setg(errp, "Bitmap sync mode '%s' has no meaningful effect"
  2459. " when combined with sync mode '%s'",
  2460. BitmapSyncMode_str(backup->bitmap_mode),
  2461. MirrorSyncMode_str(backup->sync));
  2462. return NULL;
  2463. }
  2464. }
  2465. if (!backup->bitmap && backup->has_bitmap_mode) {
  2466. error_setg(errp, "Cannot specify bitmap sync mode without a bitmap");
  2467. return NULL;
  2468. }
  2469. if (!backup->auto_finalize) {
  2470. job_flags |= JOB_MANUAL_FINALIZE;
  2471. }
  2472. if (!backup->auto_dismiss) {
  2473. job_flags |= JOB_MANUAL_DISMISS;
  2474. }
  2475. job = backup_job_create(backup->job_id, bs, target_bs, backup->speed,
  2476. backup->sync, bmap, backup->bitmap_mode,
  2477. backup->compress,
  2478. backup->filter_node_name,
  2479. &perf,
  2480. backup->on_source_error,
  2481. backup->on_target_error,
  2482. job_flags, NULL, NULL, txn, errp);
  2483. return job;
  2484. }
  2485. void qmp_drive_backup(DriveBackup *backup, Error **errp)
  2486. {
  2487. TransactionAction action = {
  2488. .type = TRANSACTION_ACTION_KIND_DRIVE_BACKUP,
  2489. .u.drive_backup.data = backup,
  2490. };
  2491. blockdev_do_action(&action, errp);
  2492. }
  2493. BlockDeviceInfoList *qmp_query_named_block_nodes(bool has_flat,
  2494. bool flat,
  2495. Error **errp)
  2496. {
  2497. bool return_flat = has_flat && flat;
  2498. return bdrv_named_nodes_list(return_flat, errp);
  2499. }
  2500. XDbgBlockGraph *qmp_x_debug_query_block_graph(Error **errp)
  2501. {
  2502. return bdrv_get_xdbg_block_graph(errp);
  2503. }
  2504. void qmp_blockdev_backup(BlockdevBackup *backup, Error **errp)
  2505. {
  2506. TransactionAction action = {
  2507. .type = TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP,
  2508. .u.blockdev_backup.data = backup,
  2509. };
  2510. blockdev_do_action(&action, errp);
  2511. }
  2512. /* Parameter check and block job starting for drive mirroring.
  2513. * Caller should hold @device and @target's aio context (must be the same).
  2514. **/
  2515. static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
  2516. BlockDriverState *target,
  2517. const char *replaces,
  2518. enum MirrorSyncMode sync,
  2519. BlockMirrorBackingMode backing_mode,
  2520. bool zero_target,
  2521. bool has_speed, int64_t speed,
  2522. bool has_granularity, uint32_t granularity,
  2523. bool has_buf_size, int64_t buf_size,
  2524. bool has_on_source_error,
  2525. BlockdevOnError on_source_error,
  2526. bool has_on_target_error,
  2527. BlockdevOnError on_target_error,
  2528. bool has_unmap, bool unmap,
  2529. const char *filter_node_name,
  2530. bool has_copy_mode, MirrorCopyMode copy_mode,
  2531. bool has_auto_finalize, bool auto_finalize,
  2532. bool has_auto_dismiss, bool auto_dismiss,
  2533. Error **errp)
  2534. {
  2535. BlockDriverState *unfiltered_bs;
  2536. int job_flags = JOB_DEFAULT;
  2537. if (!has_speed) {
  2538. speed = 0;
  2539. }
  2540. if (!has_on_source_error) {
  2541. on_source_error = BLOCKDEV_ON_ERROR_REPORT;
  2542. }
  2543. if (!has_on_target_error) {
  2544. on_target_error = BLOCKDEV_ON_ERROR_REPORT;
  2545. }
  2546. if (!has_granularity) {
  2547. granularity = 0;
  2548. }
  2549. if (!has_buf_size) {
  2550. buf_size = 0;
  2551. }
  2552. if (!has_unmap) {
  2553. unmap = true;
  2554. }
  2555. if (!has_copy_mode) {
  2556. copy_mode = MIRROR_COPY_MODE_BACKGROUND;
  2557. }
  2558. if (has_auto_finalize && !auto_finalize) {
  2559. job_flags |= JOB_MANUAL_FINALIZE;
  2560. }
  2561. if (has_auto_dismiss && !auto_dismiss) {
  2562. job_flags |= JOB_MANUAL_DISMISS;
  2563. }
  2564. if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
  2565. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
  2566. "a value in range [512B, 64MB]");
  2567. return;
  2568. }
  2569. if (granularity & (granularity - 1)) {
  2570. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
  2571. "a power of 2");
  2572. return;
  2573. }
  2574. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
  2575. return;
  2576. }
  2577. if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_MIRROR_TARGET, errp)) {
  2578. return;
  2579. }
  2580. if (!bdrv_backing_chain_next(bs) && sync == MIRROR_SYNC_MODE_TOP) {
  2581. sync = MIRROR_SYNC_MODE_FULL;
  2582. }
  2583. if (!replaces) {
  2584. /* We want to mirror from @bs, but keep implicit filters on top */
  2585. unfiltered_bs = bdrv_skip_implicit_filters(bs);
  2586. if (unfiltered_bs != bs) {
  2587. replaces = unfiltered_bs->node_name;
  2588. }
  2589. }
  2590. if (replaces) {
  2591. BlockDriverState *to_replace_bs;
  2592. AioContext *replace_aio_context;
  2593. int64_t bs_size, replace_size;
  2594. bs_size = bdrv_getlength(bs);
  2595. if (bs_size < 0) {
  2596. error_setg_errno(errp, -bs_size, "Failed to query device's size");
  2597. return;
  2598. }
  2599. to_replace_bs = check_to_replace_node(bs, replaces, errp);
  2600. if (!to_replace_bs) {
  2601. return;
  2602. }
  2603. replace_aio_context = bdrv_get_aio_context(to_replace_bs);
  2604. aio_context_acquire(replace_aio_context);
  2605. replace_size = bdrv_getlength(to_replace_bs);
  2606. aio_context_release(replace_aio_context);
  2607. if (replace_size < 0) {
  2608. error_setg_errno(errp, -replace_size,
  2609. "Failed to query the replacement node's size");
  2610. return;
  2611. }
  2612. if (bs_size != replace_size) {
  2613. error_setg(errp, "cannot replace image with a mirror image of "
  2614. "different size");
  2615. return;
  2616. }
  2617. }
  2618. /* pass the node name to replace to mirror start since it's loose coupling
  2619. * and will allow to check whether the node still exist at mirror completion
  2620. */
  2621. mirror_start(job_id, bs, target,
  2622. replaces, job_flags,
  2623. speed, granularity, buf_size, sync, backing_mode, zero_target,
  2624. on_source_error, on_target_error, unmap, filter_node_name,
  2625. copy_mode, errp);
  2626. }
  2627. void qmp_drive_mirror(DriveMirror *arg, Error **errp)
  2628. {
  2629. BlockDriverState *bs;
  2630. BlockDriverState *target_backing_bs, *target_bs;
  2631. AioContext *aio_context;
  2632. AioContext *old_context;
  2633. BlockMirrorBackingMode backing_mode;
  2634. Error *local_err = NULL;
  2635. QDict *options = NULL;
  2636. int flags;
  2637. int64_t size;
  2638. const char *format = arg->format;
  2639. bool zero_target;
  2640. int ret;
  2641. bs = qmp_get_root_bs(arg->device, errp);
  2642. if (!bs) {
  2643. return;
  2644. }
  2645. /* Early check to avoid creating target */
  2646. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
  2647. return;
  2648. }
  2649. aio_context = bdrv_get_aio_context(bs);
  2650. aio_context_acquire(aio_context);
  2651. if (!arg->has_mode) {
  2652. arg->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS;
  2653. }
  2654. if (!arg->format) {
  2655. format = (arg->mode == NEW_IMAGE_MODE_EXISTING
  2656. ? NULL : bs->drv->format_name);
  2657. }
  2658. flags = bs->open_flags | BDRV_O_RDWR;
  2659. target_backing_bs = bdrv_cow_bs(bdrv_skip_filters(bs));
  2660. if (!target_backing_bs && arg->sync == MIRROR_SYNC_MODE_TOP) {
  2661. arg->sync = MIRROR_SYNC_MODE_FULL;
  2662. }
  2663. if (arg->sync == MIRROR_SYNC_MODE_NONE) {
  2664. target_backing_bs = bs;
  2665. }
  2666. size = bdrv_getlength(bs);
  2667. if (size < 0) {
  2668. error_setg_errno(errp, -size, "bdrv_getlength failed");
  2669. goto out;
  2670. }
  2671. if (arg->replaces) {
  2672. if (!arg->node_name) {
  2673. error_setg(errp, "a node-name must be provided when replacing a"
  2674. " named node of the graph");
  2675. goto out;
  2676. }
  2677. }
  2678. if (arg->mode == NEW_IMAGE_MODE_ABSOLUTE_PATHS) {
  2679. backing_mode = MIRROR_SOURCE_BACKING_CHAIN;
  2680. } else {
  2681. backing_mode = MIRROR_OPEN_BACKING_CHAIN;
  2682. }
  2683. /* Don't open backing image in create() */
  2684. flags |= BDRV_O_NO_BACKING;
  2685. if ((arg->sync == MIRROR_SYNC_MODE_FULL || !target_backing_bs)
  2686. && arg->mode != NEW_IMAGE_MODE_EXISTING)
  2687. {
  2688. /* create new image w/o backing file */
  2689. assert(format);
  2690. bdrv_img_create(arg->target, format,
  2691. NULL, NULL, NULL, size, flags, false, &local_err);
  2692. } else {
  2693. /* Implicit filters should not appear in the filename */
  2694. BlockDriverState *explicit_backing =
  2695. bdrv_skip_implicit_filters(target_backing_bs);
  2696. switch (arg->mode) {
  2697. case NEW_IMAGE_MODE_EXISTING:
  2698. break;
  2699. case NEW_IMAGE_MODE_ABSOLUTE_PATHS:
  2700. /* create new image with backing file */
  2701. bdrv_refresh_filename(explicit_backing);
  2702. bdrv_img_create(arg->target, format,
  2703. explicit_backing->filename,
  2704. explicit_backing->drv->format_name,
  2705. NULL, size, flags, false, &local_err);
  2706. break;
  2707. default:
  2708. abort();
  2709. }
  2710. }
  2711. if (local_err) {
  2712. error_propagate(errp, local_err);
  2713. goto out;
  2714. }
  2715. options = qdict_new();
  2716. if (arg->node_name) {
  2717. qdict_put_str(options, "node-name", arg->node_name);
  2718. }
  2719. if (format) {
  2720. qdict_put_str(options, "driver", format);
  2721. }
  2722. /* Mirroring takes care of copy-on-write using the source's backing
  2723. * file.
  2724. */
  2725. target_bs = bdrv_open(arg->target, NULL, options, flags, errp);
  2726. if (!target_bs) {
  2727. goto out;
  2728. }
  2729. zero_target = (arg->sync == MIRROR_SYNC_MODE_FULL &&
  2730. (arg->mode == NEW_IMAGE_MODE_EXISTING ||
  2731. !bdrv_has_zero_init(target_bs)));
  2732. /* Honor bdrv_try_change_aio_context() context acquisition requirements. */
  2733. old_context = bdrv_get_aio_context(target_bs);
  2734. aio_context_release(aio_context);
  2735. aio_context_acquire(old_context);
  2736. ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
  2737. if (ret < 0) {
  2738. bdrv_unref(target_bs);
  2739. aio_context_release(old_context);
  2740. return;
  2741. }
  2742. aio_context_release(old_context);
  2743. aio_context_acquire(aio_context);
  2744. blockdev_mirror_common(arg->job_id, bs, target_bs,
  2745. arg->replaces, arg->sync,
  2746. backing_mode, zero_target,
  2747. arg->has_speed, arg->speed,
  2748. arg->has_granularity, arg->granularity,
  2749. arg->has_buf_size, arg->buf_size,
  2750. arg->has_on_source_error, arg->on_source_error,
  2751. arg->has_on_target_error, arg->on_target_error,
  2752. arg->has_unmap, arg->unmap,
  2753. NULL,
  2754. arg->has_copy_mode, arg->copy_mode,
  2755. arg->has_auto_finalize, arg->auto_finalize,
  2756. arg->has_auto_dismiss, arg->auto_dismiss,
  2757. errp);
  2758. bdrv_unref(target_bs);
  2759. out:
  2760. aio_context_release(aio_context);
  2761. }
  2762. void qmp_blockdev_mirror(const char *job_id,
  2763. const char *device, const char *target,
  2764. const char *replaces,
  2765. MirrorSyncMode sync,
  2766. bool has_speed, int64_t speed,
  2767. bool has_granularity, uint32_t granularity,
  2768. bool has_buf_size, int64_t buf_size,
  2769. bool has_on_source_error,
  2770. BlockdevOnError on_source_error,
  2771. bool has_on_target_error,
  2772. BlockdevOnError on_target_error,
  2773. const char *filter_node_name,
  2774. bool has_copy_mode, MirrorCopyMode copy_mode,
  2775. bool has_auto_finalize, bool auto_finalize,
  2776. bool has_auto_dismiss, bool auto_dismiss,
  2777. Error **errp)
  2778. {
  2779. BlockDriverState *bs;
  2780. BlockDriverState *target_bs;
  2781. AioContext *aio_context;
  2782. AioContext *old_context;
  2783. BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN;
  2784. bool zero_target;
  2785. int ret;
  2786. bs = qmp_get_root_bs(device, errp);
  2787. if (!bs) {
  2788. return;
  2789. }
  2790. target_bs = bdrv_lookup_bs(target, target, errp);
  2791. if (!target_bs) {
  2792. return;
  2793. }
  2794. zero_target = (sync == MIRROR_SYNC_MODE_FULL);
  2795. /* Honor bdrv_try_change_aio_context() context acquisition requirements. */
  2796. old_context = bdrv_get_aio_context(target_bs);
  2797. aio_context = bdrv_get_aio_context(bs);
  2798. aio_context_acquire(old_context);
  2799. ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
  2800. aio_context_release(old_context);
  2801. aio_context_acquire(aio_context);
  2802. if (ret < 0) {
  2803. goto out;
  2804. }
  2805. blockdev_mirror_common(job_id, bs, target_bs,
  2806. replaces, sync, backing_mode,
  2807. zero_target, has_speed, speed,
  2808. has_granularity, granularity,
  2809. has_buf_size, buf_size,
  2810. has_on_source_error, on_source_error,
  2811. has_on_target_error, on_target_error,
  2812. true, true, filter_node_name,
  2813. has_copy_mode, copy_mode,
  2814. has_auto_finalize, auto_finalize,
  2815. has_auto_dismiss, auto_dismiss,
  2816. errp);
  2817. out:
  2818. aio_context_release(aio_context);
  2819. }
  2820. /*
  2821. * Get a block job using its ID. Called with job_mutex held.
  2822. */
  2823. static BlockJob *find_block_job_locked(const char *id, Error **errp)
  2824. {
  2825. BlockJob *job;
  2826. assert(id != NULL);
  2827. job = block_job_get_locked(id);
  2828. if (!job) {
  2829. error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
  2830. "Block job '%s' not found", id);
  2831. return NULL;
  2832. }
  2833. return job;
  2834. }
  2835. void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
  2836. {
  2837. BlockJob *job;
  2838. JOB_LOCK_GUARD();
  2839. job = find_block_job_locked(device, errp);
  2840. if (!job) {
  2841. return;
  2842. }
  2843. block_job_set_speed_locked(job, speed, errp);
  2844. }
  2845. void qmp_block_job_cancel(const char *device,
  2846. bool has_force, bool force, Error **errp)
  2847. {
  2848. BlockJob *job;
  2849. JOB_LOCK_GUARD();
  2850. job = find_block_job_locked(device, errp);
  2851. if (!job) {
  2852. return;
  2853. }
  2854. if (!has_force) {
  2855. force = false;
  2856. }
  2857. if (job_user_paused_locked(&job->job) && !force) {
  2858. error_setg(errp, "The block job for device '%s' is currently paused",
  2859. device);
  2860. return;
  2861. }
  2862. trace_qmp_block_job_cancel(job);
  2863. job_user_cancel_locked(&job->job, force, errp);
  2864. }
  2865. void qmp_block_job_pause(const char *device, Error **errp)
  2866. {
  2867. BlockJob *job;
  2868. JOB_LOCK_GUARD();
  2869. job = find_block_job_locked(device, errp);
  2870. if (!job) {
  2871. return;
  2872. }
  2873. trace_qmp_block_job_pause(job);
  2874. job_user_pause_locked(&job->job, errp);
  2875. }
  2876. void qmp_block_job_resume(const char *device, Error **errp)
  2877. {
  2878. BlockJob *job;
  2879. JOB_LOCK_GUARD();
  2880. job = find_block_job_locked(device, errp);
  2881. if (!job) {
  2882. return;
  2883. }
  2884. trace_qmp_block_job_resume(job);
  2885. job_user_resume_locked(&job->job, errp);
  2886. }
  2887. void qmp_block_job_complete(const char *device, Error **errp)
  2888. {
  2889. BlockJob *job;
  2890. JOB_LOCK_GUARD();
  2891. job = find_block_job_locked(device, errp);
  2892. if (!job) {
  2893. return;
  2894. }
  2895. trace_qmp_block_job_complete(job);
  2896. job_complete_locked(&job->job, errp);
  2897. }
  2898. void qmp_block_job_finalize(const char *id, Error **errp)
  2899. {
  2900. BlockJob *job;
  2901. JOB_LOCK_GUARD();
  2902. job = find_block_job_locked(id, errp);
  2903. if (!job) {
  2904. return;
  2905. }
  2906. trace_qmp_block_job_finalize(job);
  2907. job_ref_locked(&job->job);
  2908. job_finalize_locked(&job->job, errp);
  2909. job_unref_locked(&job->job);
  2910. }
  2911. void qmp_block_job_dismiss(const char *id, Error **errp)
  2912. {
  2913. BlockJob *bjob;
  2914. Job *job;
  2915. JOB_LOCK_GUARD();
  2916. bjob = find_block_job_locked(id, errp);
  2917. if (!bjob) {
  2918. return;
  2919. }
  2920. trace_qmp_block_job_dismiss(bjob);
  2921. job = &bjob->job;
  2922. job_dismiss_locked(&job, errp);
  2923. }
  2924. void qmp_change_backing_file(const char *device,
  2925. const char *image_node_name,
  2926. const char *backing_file,
  2927. Error **errp)
  2928. {
  2929. BlockDriverState *bs = NULL;
  2930. AioContext *aio_context;
  2931. BlockDriverState *image_bs = NULL;
  2932. Error *local_err = NULL;
  2933. bool ro;
  2934. int ret;
  2935. bs = qmp_get_root_bs(device, errp);
  2936. if (!bs) {
  2937. return;
  2938. }
  2939. aio_context = bdrv_get_aio_context(bs);
  2940. aio_context_acquire(aio_context);
  2941. image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err);
  2942. if (local_err) {
  2943. error_propagate(errp, local_err);
  2944. goto out;
  2945. }
  2946. if (!image_bs) {
  2947. error_setg(errp, "image file not found");
  2948. goto out;
  2949. }
  2950. if (bdrv_find_base(image_bs) == image_bs) {
  2951. error_setg(errp, "not allowing backing file change on an image "
  2952. "without a backing file");
  2953. goto out;
  2954. }
  2955. /* even though we are not necessarily operating on bs, we need it to
  2956. * determine if block ops are currently prohibited on the chain */
  2957. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
  2958. goto out;
  2959. }
  2960. /* final sanity check */
  2961. if (!bdrv_chain_contains(bs, image_bs)) {
  2962. error_setg(errp, "'%s' and image file are not in the same chain",
  2963. device);
  2964. goto out;
  2965. }
  2966. /* if not r/w, reopen to make r/w */
  2967. ro = bdrv_is_read_only(image_bs);
  2968. if (ro) {
  2969. if (bdrv_reopen_set_read_only(image_bs, false, errp) != 0) {
  2970. goto out;
  2971. }
  2972. }
  2973. ret = bdrv_change_backing_file(image_bs, backing_file,
  2974. image_bs->drv ? image_bs->drv->format_name : "",
  2975. false);
  2976. if (ret < 0) {
  2977. error_setg_errno(errp, -ret, "Could not change backing file to '%s'",
  2978. backing_file);
  2979. /* don't exit here, so we can try to restore open flags if
  2980. * appropriate */
  2981. }
  2982. if (ro) {
  2983. bdrv_reopen_set_read_only(image_bs, true, errp);
  2984. }
  2985. out:
  2986. aio_context_release(aio_context);
  2987. }
  2988. void qmp_blockdev_add(BlockdevOptions *options, Error **errp)
  2989. {
  2990. BlockDriverState *bs;
  2991. QObject *obj;
  2992. Visitor *v = qobject_output_visitor_new(&obj);
  2993. QDict *qdict;
  2994. visit_type_BlockdevOptions(v, NULL, &options, &error_abort);
  2995. visit_complete(v, &obj);
  2996. qdict = qobject_to(QDict, obj);
  2997. qdict_flatten(qdict);
  2998. if (!qdict_get_try_str(qdict, "node-name")) {
  2999. error_setg(errp, "'node-name' must be specified for the root node");
  3000. goto fail;
  3001. }
  3002. bs = bds_tree_init(qdict, errp);
  3003. if (!bs) {
  3004. goto fail;
  3005. }
  3006. bdrv_set_monitor_owned(bs);
  3007. fail:
  3008. visit_free(v);
  3009. }
  3010. void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp)
  3011. {
  3012. BlockReopenQueue *queue = NULL;
  3013. /* Add each one of the BDS that we want to reopen to the queue */
  3014. for (; reopen_list != NULL; reopen_list = reopen_list->next) {
  3015. BlockdevOptions *options = reopen_list->value;
  3016. BlockDriverState *bs;
  3017. AioContext *ctx;
  3018. QObject *obj;
  3019. Visitor *v;
  3020. QDict *qdict;
  3021. /* Check for the selected node name */
  3022. if (!options->node_name) {
  3023. error_setg(errp, "node-name not specified");
  3024. goto fail;
  3025. }
  3026. bs = bdrv_find_node(options->node_name);
  3027. if (!bs) {
  3028. error_setg(errp, "Failed to find node with node-name='%s'",
  3029. options->node_name);
  3030. goto fail;
  3031. }
  3032. /* Put all options in a QDict and flatten it */
  3033. v = qobject_output_visitor_new(&obj);
  3034. visit_type_BlockdevOptions(v, NULL, &options, &error_abort);
  3035. visit_complete(v, &obj);
  3036. visit_free(v);
  3037. qdict = qobject_to(QDict, obj);
  3038. qdict_flatten(qdict);
  3039. ctx = bdrv_get_aio_context(bs);
  3040. aio_context_acquire(ctx);
  3041. queue = bdrv_reopen_queue(queue, bs, qdict, false);
  3042. aio_context_release(ctx);
  3043. }
  3044. /* Perform the reopen operation */
  3045. bdrv_reopen_multiple(queue, errp);
  3046. queue = NULL;
  3047. fail:
  3048. bdrv_reopen_queue_free(queue);
  3049. }
  3050. void qmp_blockdev_del(const char *node_name, Error **errp)
  3051. {
  3052. AioContext *aio_context;
  3053. BlockDriverState *bs;
  3054. GLOBAL_STATE_CODE();
  3055. bs = bdrv_find_node(node_name);
  3056. if (!bs) {
  3057. error_setg(errp, "Failed to find node with node-name='%s'", node_name);
  3058. return;
  3059. }
  3060. if (bdrv_has_blk(bs)) {
  3061. error_setg(errp, "Node %s is in use", node_name);
  3062. return;
  3063. }
  3064. aio_context = bdrv_get_aio_context(bs);
  3065. aio_context_acquire(aio_context);
  3066. if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) {
  3067. goto out;
  3068. }
  3069. if (!QTAILQ_IN_USE(bs, monitor_list)) {
  3070. error_setg(errp, "Node %s is not owned by the monitor",
  3071. bs->node_name);
  3072. goto out;
  3073. }
  3074. if (bs->refcnt > 1) {
  3075. error_setg(errp, "Block device %s is in use",
  3076. bdrv_get_device_or_node_name(bs));
  3077. goto out;
  3078. }
  3079. QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list);
  3080. bdrv_unref(bs);
  3081. out:
  3082. aio_context_release(aio_context);
  3083. }
  3084. static BdrvChild *bdrv_find_child(BlockDriverState *parent_bs,
  3085. const char *child_name)
  3086. {
  3087. BdrvChild *child;
  3088. QLIST_FOREACH(child, &parent_bs->children, next) {
  3089. if (strcmp(child->name, child_name) == 0) {
  3090. return child;
  3091. }
  3092. }
  3093. return NULL;
  3094. }
  3095. void qmp_x_blockdev_change(const char *parent, const char *child,
  3096. const char *node, Error **errp)
  3097. {
  3098. BlockDriverState *parent_bs, *new_bs = NULL;
  3099. BdrvChild *p_child;
  3100. parent_bs = bdrv_lookup_bs(parent, parent, errp);
  3101. if (!parent_bs) {
  3102. return;
  3103. }
  3104. if (!child == !node) {
  3105. if (child) {
  3106. error_setg(errp, "The parameters child and node are in conflict");
  3107. } else {
  3108. error_setg(errp, "Either child or node must be specified");
  3109. }
  3110. return;
  3111. }
  3112. if (child) {
  3113. p_child = bdrv_find_child(parent_bs, child);
  3114. if (!p_child) {
  3115. error_setg(errp, "Node '%s' does not have child '%s'",
  3116. parent, child);
  3117. return;
  3118. }
  3119. bdrv_del_child(parent_bs, p_child, errp);
  3120. }
  3121. if (node) {
  3122. new_bs = bdrv_find_node(node);
  3123. if (!new_bs) {
  3124. error_setg(errp, "Node '%s' not found", node);
  3125. return;
  3126. }
  3127. bdrv_add_child(parent_bs, new_bs, errp);
  3128. }
  3129. }
  3130. BlockJobInfoList *qmp_query_block_jobs(Error **errp)
  3131. {
  3132. BlockJobInfoList *head = NULL, **tail = &head;
  3133. BlockJob *job;
  3134. JOB_LOCK_GUARD();
  3135. for (job = block_job_next_locked(NULL); job;
  3136. job = block_job_next_locked(job)) {
  3137. BlockJobInfo *value;
  3138. if (block_job_is_internal(job)) {
  3139. continue;
  3140. }
  3141. value = block_job_query_locked(job, errp);
  3142. if (!value) {
  3143. qapi_free_BlockJobInfoList(head);
  3144. return NULL;
  3145. }
  3146. QAPI_LIST_APPEND(tail, value);
  3147. }
  3148. return head;
  3149. }
  3150. void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
  3151. bool has_force, bool force, Error **errp)
  3152. {
  3153. AioContext *old_context;
  3154. AioContext *new_context;
  3155. BlockDriverState *bs;
  3156. bs = bdrv_find_node(node_name);
  3157. if (!bs) {
  3158. error_setg(errp, "Failed to find node with node-name='%s'", node_name);
  3159. return;
  3160. }
  3161. /* Protects against accidents. */
  3162. if (!(has_force && force) && bdrv_has_blk(bs)) {
  3163. error_setg(errp, "Node %s is associated with a BlockBackend and could "
  3164. "be in use (use force=true to override this check)",
  3165. node_name);
  3166. return;
  3167. }
  3168. if (iothread->type == QTYPE_QSTRING) {
  3169. IOThread *obj = iothread_by_id(iothread->u.s);
  3170. if (!obj) {
  3171. error_setg(errp, "Cannot find iothread %s", iothread->u.s);
  3172. return;
  3173. }
  3174. new_context = iothread_get_aio_context(obj);
  3175. } else {
  3176. new_context = qemu_get_aio_context();
  3177. }
  3178. old_context = bdrv_get_aio_context(bs);
  3179. aio_context_acquire(old_context);
  3180. bdrv_try_change_aio_context(bs, new_context, NULL, errp);
  3181. aio_context_release(old_context);
  3182. }
  3183. QemuOptsList qemu_common_drive_opts = {
  3184. .name = "drive",
  3185. .head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
  3186. .desc = {
  3187. {
  3188. .name = "snapshot",
  3189. .type = QEMU_OPT_BOOL,
  3190. .help = "enable/disable snapshot mode",
  3191. },{
  3192. .name = "aio",
  3193. .type = QEMU_OPT_STRING,
  3194. .help = "host AIO implementation (threads, native, io_uring)",
  3195. },{
  3196. .name = BDRV_OPT_CACHE_WB,
  3197. .type = QEMU_OPT_BOOL,
  3198. .help = "Enable writeback mode",
  3199. },{
  3200. .name = "format",
  3201. .type = QEMU_OPT_STRING,
  3202. .help = "disk format (raw, qcow2, ...)",
  3203. },{
  3204. .name = "rerror",
  3205. .type = QEMU_OPT_STRING,
  3206. .help = "read error action",
  3207. },{
  3208. .name = "werror",
  3209. .type = QEMU_OPT_STRING,
  3210. .help = "write error action",
  3211. },{
  3212. .name = BDRV_OPT_READ_ONLY,
  3213. .type = QEMU_OPT_BOOL,
  3214. .help = "open drive file as read-only",
  3215. },
  3216. THROTTLE_OPTS,
  3217. {
  3218. .name = "throttling.group",
  3219. .type = QEMU_OPT_STRING,
  3220. .help = "name of the block throttling group",
  3221. },{
  3222. .name = "copy-on-read",
  3223. .type = QEMU_OPT_BOOL,
  3224. .help = "copy read data from backing file into image file",
  3225. },{
  3226. .name = "detect-zeroes",
  3227. .type = QEMU_OPT_STRING,
  3228. .help = "try to optimize zero writes (off, on, unmap)",
  3229. },{
  3230. .name = "stats-account-invalid",
  3231. .type = QEMU_OPT_BOOL,
  3232. .help = "whether to account for invalid I/O operations "
  3233. "in the statistics",
  3234. },{
  3235. .name = "stats-account-failed",
  3236. .type = QEMU_OPT_BOOL,
  3237. .help = "whether to account for failed I/O operations "
  3238. "in the statistics",
  3239. },
  3240. { /* end of list */ }
  3241. },
  3242. };
  3243. QemuOptsList qemu_drive_opts = {
  3244. .name = "drive",
  3245. .head = QTAILQ_HEAD_INITIALIZER(qemu_drive_opts.head),
  3246. .desc = {
  3247. /*
  3248. * no elements => accept any params
  3249. * validation will happen later
  3250. */
  3251. { /* end of list */ }
  3252. },
  3253. };