file-posix.c 114 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960
  1. /*
  2. * Block driver for RAW files (posix)
  3. *
  4. * Copyright (c) 2006 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "qapi/error.h"
  26. #include "qemu/cutils.h"
  27. #include "qemu/error-report.h"
  28. #include "block/block_int.h"
  29. #include "qemu/module.h"
  30. #include "qemu/option.h"
  31. #include "qemu/units.h"
  32. #include "qemu/memalign.h"
  33. #include "trace.h"
  34. #include "block/thread-pool.h"
  35. #include "qemu/iov.h"
  36. #include "block/raw-aio.h"
  37. #include "qapi/qmp/qdict.h"
  38. #include "qapi/qmp/qstring.h"
  39. #include "scsi/pr-manager.h"
  40. #include "scsi/constants.h"
  41. #if defined(__APPLE__) && (__MACH__)
  42. #include <sys/ioctl.h>
  43. #if defined(HAVE_HOST_BLOCK_DEVICE)
  44. #include <paths.h>
  45. #include <sys/param.h>
  46. #include <sys/mount.h>
  47. #include <IOKit/IOKitLib.h>
  48. #include <IOKit/IOBSD.h>
  49. #include <IOKit/storage/IOMediaBSDClient.h>
  50. #include <IOKit/storage/IOMedia.h>
  51. #include <IOKit/storage/IOCDMedia.h>
  52. //#include <IOKit/storage/IOCDTypes.h>
  53. #include <IOKit/storage/IODVDMedia.h>
  54. #include <CoreFoundation/CoreFoundation.h>
  55. #endif /* defined(HAVE_HOST_BLOCK_DEVICE) */
  56. #endif
  57. #ifdef __sun__
  58. #define _POSIX_PTHREAD_SEMANTICS 1
  59. #include <sys/dkio.h>
  60. #endif
  61. #ifdef __linux__
  62. #include <sys/ioctl.h>
  63. #include <sys/param.h>
  64. #include <sys/syscall.h>
  65. #include <sys/vfs.h>
  66. #include <linux/cdrom.h>
  67. #include <linux/fd.h>
  68. #include <linux/fs.h>
  69. #include <linux/hdreg.h>
  70. #include <linux/magic.h>
  71. #include <scsi/sg.h>
  72. #ifdef __s390__
  73. #include <asm/dasd.h>
  74. #endif
  75. #ifndef FS_NOCOW_FL
  76. #define FS_NOCOW_FL 0x00800000 /* Do not cow file */
  77. #endif
  78. #endif
  79. #if defined(CONFIG_FALLOCATE_PUNCH_HOLE) || defined(CONFIG_FALLOCATE_ZERO_RANGE)
  80. #include <linux/falloc.h>
  81. #endif
  82. #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
  83. #include <sys/disk.h>
  84. #include <sys/cdio.h>
  85. #endif
  86. #ifdef __OpenBSD__
  87. #include <sys/ioctl.h>
  88. #include <sys/disklabel.h>
  89. #include <sys/dkio.h>
  90. #endif
  91. #ifdef __NetBSD__
  92. #include <sys/ioctl.h>
  93. #include <sys/disklabel.h>
  94. #include <sys/dkio.h>
  95. #include <sys/disk.h>
  96. #endif
  97. #ifdef __DragonFly__
  98. #include <sys/ioctl.h>
  99. #include <sys/diskslice.h>
  100. #endif
  101. /* OS X does not have O_DSYNC */
  102. #ifndef O_DSYNC
  103. #ifdef O_SYNC
  104. #define O_DSYNC O_SYNC
  105. #elif defined(O_FSYNC)
  106. #define O_DSYNC O_FSYNC
  107. #endif
  108. #endif
  109. /* Approximate O_DIRECT with O_DSYNC if O_DIRECT isn't available */
  110. #ifndef O_DIRECT
  111. #define O_DIRECT O_DSYNC
  112. #endif
  113. #define FTYPE_FILE 0
  114. #define FTYPE_CD 1
  115. #define MAX_BLOCKSIZE 4096
  116. /* Posix file locking bytes. Libvirt takes byte 0, we start from higher bytes,
  117. * leaving a few more bytes for its future use. */
  118. #define RAW_LOCK_PERM_BASE 100
  119. #define RAW_LOCK_SHARED_BASE 200
  120. typedef struct BDRVRawState {
  121. int fd;
  122. bool use_lock;
  123. int type;
  124. int open_flags;
  125. size_t buf_align;
  126. /* The current permissions. */
  127. uint64_t perm;
  128. uint64_t shared_perm;
  129. /* The perms bits whose corresponding bytes are already locked in
  130. * s->fd. */
  131. uint64_t locked_perm;
  132. uint64_t locked_shared_perm;
  133. uint64_t aio_max_batch;
  134. int perm_change_fd;
  135. int perm_change_flags;
  136. BDRVReopenState *reopen_state;
  137. bool has_discard:1;
  138. bool has_write_zeroes:1;
  139. bool discard_zeroes:1;
  140. bool use_linux_aio:1;
  141. bool use_linux_io_uring:1;
  142. int page_cache_inconsistent; /* errno from fdatasync failure */
  143. bool has_fallocate;
  144. bool needs_alignment;
  145. bool force_alignment;
  146. bool drop_cache;
  147. bool check_cache_dropped;
  148. struct {
  149. uint64_t discard_nb_ok;
  150. uint64_t discard_nb_failed;
  151. uint64_t discard_bytes_ok;
  152. } stats;
  153. PRManager *pr_mgr;
  154. } BDRVRawState;
  155. typedef struct BDRVRawReopenState {
  156. int open_flags;
  157. bool drop_cache;
  158. bool check_cache_dropped;
  159. } BDRVRawReopenState;
  160. static int fd_open(BlockDriverState *bs)
  161. {
  162. BDRVRawState *s = bs->opaque;
  163. /* this is just to ensure s->fd is sane (its called by io ops) */
  164. if (s->fd >= 0) {
  165. return 0;
  166. }
  167. return -EIO;
  168. }
  169. static int64_t raw_getlength(BlockDriverState *bs);
  170. typedef struct RawPosixAIOData {
  171. BlockDriverState *bs;
  172. int aio_type;
  173. int aio_fildes;
  174. off_t aio_offset;
  175. uint64_t aio_nbytes;
  176. union {
  177. struct {
  178. struct iovec *iov;
  179. int niov;
  180. } io;
  181. struct {
  182. uint64_t cmd;
  183. void *buf;
  184. } ioctl;
  185. struct {
  186. int aio_fd2;
  187. off_t aio_offset2;
  188. } copy_range;
  189. struct {
  190. PreallocMode prealloc;
  191. Error **errp;
  192. } truncate;
  193. };
  194. } RawPosixAIOData;
  195. #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  196. static int cdrom_reopen(BlockDriverState *bs);
  197. #endif
  198. /*
  199. * Elide EAGAIN and EACCES details when failing to lock, as this
  200. * indicates that the specified file region is already locked by
  201. * another process, which is considered a common scenario.
  202. */
  203. #define raw_lock_error_setg_errno(errp, err, fmt, ...) \
  204. do { \
  205. if ((err) == EAGAIN || (err) == EACCES) { \
  206. error_setg((errp), (fmt), ## __VA_ARGS__); \
  207. } else { \
  208. error_setg_errno((errp), (err), (fmt), ## __VA_ARGS__); \
  209. } \
  210. } while (0)
  211. #if defined(__NetBSD__)
  212. static int raw_normalize_devicepath(const char **filename, Error **errp)
  213. {
  214. static char namebuf[PATH_MAX];
  215. const char *dp, *fname;
  216. struct stat sb;
  217. fname = *filename;
  218. dp = strrchr(fname, '/');
  219. if (lstat(fname, &sb) < 0) {
  220. error_setg_file_open(errp, errno, fname);
  221. return -errno;
  222. }
  223. if (!S_ISBLK(sb.st_mode)) {
  224. return 0;
  225. }
  226. if (dp == NULL) {
  227. snprintf(namebuf, PATH_MAX, "r%s", fname);
  228. } else {
  229. snprintf(namebuf, PATH_MAX, "%.*s/r%s",
  230. (int)(dp - fname), fname, dp + 1);
  231. }
  232. *filename = namebuf;
  233. warn_report("%s is a block device, using %s", fname, *filename);
  234. return 0;
  235. }
  236. #else
  237. static int raw_normalize_devicepath(const char **filename, Error **errp)
  238. {
  239. return 0;
  240. }
  241. #endif
  242. /*
  243. * Get logical block size via ioctl. On success store it in @sector_size_p.
  244. */
  245. static int probe_logical_blocksize(int fd, unsigned int *sector_size_p)
  246. {
  247. unsigned int sector_size;
  248. bool success = false;
  249. int i;
  250. errno = ENOTSUP;
  251. static const unsigned long ioctl_list[] = {
  252. #ifdef BLKSSZGET
  253. BLKSSZGET,
  254. #endif
  255. #ifdef DKIOCGETBLOCKSIZE
  256. DKIOCGETBLOCKSIZE,
  257. #endif
  258. #ifdef DIOCGSECTORSIZE
  259. DIOCGSECTORSIZE,
  260. #endif
  261. };
  262. /* Try a few ioctls to get the right size */
  263. for (i = 0; i < (int)ARRAY_SIZE(ioctl_list); i++) {
  264. if (ioctl(fd, ioctl_list[i], &sector_size) >= 0) {
  265. *sector_size_p = sector_size;
  266. success = true;
  267. }
  268. }
  269. return success ? 0 : -errno;
  270. }
  271. /**
  272. * Get physical block size of @fd.
  273. * On success, store it in @blk_size and return 0.
  274. * On failure, return -errno.
  275. */
  276. static int probe_physical_blocksize(int fd, unsigned int *blk_size)
  277. {
  278. #ifdef BLKPBSZGET
  279. if (ioctl(fd, BLKPBSZGET, blk_size) < 0) {
  280. return -errno;
  281. }
  282. return 0;
  283. #else
  284. return -ENOTSUP;
  285. #endif
  286. }
  287. /*
  288. * Returns true if no alignment restrictions are necessary even for files
  289. * opened with O_DIRECT.
  290. *
  291. * raw_probe_alignment() probes the required alignment and assume that 1 means
  292. * the probing failed, so it falls back to a safe default of 4k. This can be
  293. * avoided if we know that byte alignment is okay for the file.
  294. */
  295. static bool dio_byte_aligned(int fd)
  296. {
  297. #ifdef __linux__
  298. struct statfs buf;
  299. int ret;
  300. ret = fstatfs(fd, &buf);
  301. if (ret == 0 && buf.f_type == NFS_SUPER_MAGIC) {
  302. return true;
  303. }
  304. #endif
  305. return false;
  306. }
  307. static bool raw_needs_alignment(BlockDriverState *bs)
  308. {
  309. BDRVRawState *s = bs->opaque;
  310. if ((bs->open_flags & BDRV_O_NOCACHE) != 0 && !dio_byte_aligned(s->fd)) {
  311. return true;
  312. }
  313. return s->force_alignment;
  314. }
  315. /* Check if read is allowed with given memory buffer and length.
  316. *
  317. * This function is used to check O_DIRECT memory buffer and request alignment.
  318. */
  319. static bool raw_is_io_aligned(int fd, void *buf, size_t len)
  320. {
  321. ssize_t ret = pread(fd, buf, len, 0);
  322. if (ret >= 0) {
  323. return true;
  324. }
  325. #ifdef __linux__
  326. /* The Linux kernel returns EINVAL for misaligned O_DIRECT reads. Ignore
  327. * other errors (e.g. real I/O error), which could happen on a failed
  328. * drive, since we only care about probing alignment.
  329. */
  330. if (errno != EINVAL) {
  331. return true;
  332. }
  333. #endif
  334. return false;
  335. }
  336. static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
  337. {
  338. BDRVRawState *s = bs->opaque;
  339. char *buf;
  340. size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
  341. size_t alignments[] = {1, 512, 1024, 2048, 4096};
  342. /* For SCSI generic devices the alignment is not really used.
  343. With buffered I/O, we don't have any restrictions. */
  344. if (bdrv_is_sg(bs) || !s->needs_alignment) {
  345. bs->bl.request_alignment = 1;
  346. s->buf_align = 1;
  347. return;
  348. }
  349. bs->bl.request_alignment = 0;
  350. s->buf_align = 0;
  351. /* Let's try to use the logical blocksize for the alignment. */
  352. if (probe_logical_blocksize(fd, &bs->bl.request_alignment) < 0) {
  353. bs->bl.request_alignment = 0;
  354. }
  355. #ifdef __linux__
  356. /*
  357. * The XFS ioctl definitions are shipped in extra packages that might
  358. * not always be available. Since we just need the XFS_IOC_DIOINFO ioctl
  359. * here, we simply use our own definition instead:
  360. */
  361. struct xfs_dioattr {
  362. uint32_t d_mem;
  363. uint32_t d_miniosz;
  364. uint32_t d_maxiosz;
  365. } da;
  366. if (ioctl(fd, _IOR('X', 30, struct xfs_dioattr), &da) >= 0) {
  367. bs->bl.request_alignment = da.d_miniosz;
  368. /* The kernel returns wrong information for d_mem */
  369. /* s->buf_align = da.d_mem; */
  370. }
  371. #endif
  372. /*
  373. * If we could not get the sizes so far, we can only guess them. First try
  374. * to detect request alignment, since it is more likely to succeed. Then
  375. * try to detect buf_align, which cannot be detected in some cases (e.g.
  376. * Gluster). If buf_align cannot be detected, we fallback to the value of
  377. * request_alignment.
  378. */
  379. if (!bs->bl.request_alignment) {
  380. int i;
  381. size_t align;
  382. buf = qemu_memalign(max_align, max_align);
  383. for (i = 0; i < ARRAY_SIZE(alignments); i++) {
  384. align = alignments[i];
  385. if (raw_is_io_aligned(fd, buf, align)) {
  386. /* Fallback to safe value. */
  387. bs->bl.request_alignment = (align != 1) ? align : max_align;
  388. break;
  389. }
  390. }
  391. qemu_vfree(buf);
  392. }
  393. if (!s->buf_align) {
  394. int i;
  395. size_t align;
  396. buf = qemu_memalign(max_align, 2 * max_align);
  397. for (i = 0; i < ARRAY_SIZE(alignments); i++) {
  398. align = alignments[i];
  399. if (raw_is_io_aligned(fd, buf + align, max_align)) {
  400. /* Fallback to request_alignment. */
  401. s->buf_align = (align != 1) ? align : bs->bl.request_alignment;
  402. break;
  403. }
  404. }
  405. qemu_vfree(buf);
  406. }
  407. if (!s->buf_align || !bs->bl.request_alignment) {
  408. error_setg(errp, "Could not find working O_DIRECT alignment");
  409. error_append_hint(errp, "Try cache.direct=off\n");
  410. }
  411. }
  412. static int check_hdev_writable(int fd)
  413. {
  414. #if defined(BLKROGET)
  415. /* Linux block devices can be configured "read-only" using blockdev(8).
  416. * This is independent of device node permissions and therefore open(2)
  417. * with O_RDWR succeeds. Actual writes fail with EPERM.
  418. *
  419. * bdrv_open() is supposed to fail if the disk is read-only. Explicitly
  420. * check for read-only block devices so that Linux block devices behave
  421. * properly.
  422. */
  423. struct stat st;
  424. int readonly = 0;
  425. if (fstat(fd, &st)) {
  426. return -errno;
  427. }
  428. if (!S_ISBLK(st.st_mode)) {
  429. return 0;
  430. }
  431. if (ioctl(fd, BLKROGET, &readonly) < 0) {
  432. return -errno;
  433. }
  434. if (readonly) {
  435. return -EACCES;
  436. }
  437. #endif /* defined(BLKROGET) */
  438. return 0;
  439. }
  440. static void raw_parse_flags(int bdrv_flags, int *open_flags, bool has_writers)
  441. {
  442. bool read_write = false;
  443. assert(open_flags != NULL);
  444. *open_flags |= O_BINARY;
  445. *open_flags &= ~O_ACCMODE;
  446. if (bdrv_flags & BDRV_O_AUTO_RDONLY) {
  447. read_write = has_writers;
  448. } else if (bdrv_flags & BDRV_O_RDWR) {
  449. read_write = true;
  450. }
  451. if (read_write) {
  452. *open_flags |= O_RDWR;
  453. } else {
  454. *open_flags |= O_RDONLY;
  455. }
  456. /* Use O_DSYNC for write-through caching, no flags for write-back caching,
  457. * and O_DIRECT for no caching. */
  458. if ((bdrv_flags & BDRV_O_NOCACHE)) {
  459. *open_flags |= O_DIRECT;
  460. }
  461. }
  462. static void raw_parse_filename(const char *filename, QDict *options,
  463. Error **errp)
  464. {
  465. bdrv_parse_filename_strip_prefix(filename, "file:", options);
  466. }
  467. static QemuOptsList raw_runtime_opts = {
  468. .name = "raw",
  469. .head = QTAILQ_HEAD_INITIALIZER(raw_runtime_opts.head),
  470. .desc = {
  471. {
  472. .name = "filename",
  473. .type = QEMU_OPT_STRING,
  474. .help = "File name of the image",
  475. },
  476. {
  477. .name = "aio",
  478. .type = QEMU_OPT_STRING,
  479. .help = "host AIO implementation (threads, native, io_uring)",
  480. },
  481. {
  482. .name = "aio-max-batch",
  483. .type = QEMU_OPT_NUMBER,
  484. .help = "AIO max batch size (0 = auto handled by AIO backend, default: 0)",
  485. },
  486. {
  487. .name = "locking",
  488. .type = QEMU_OPT_STRING,
  489. .help = "file locking mode (on/off/auto, default: auto)",
  490. },
  491. {
  492. .name = "pr-manager",
  493. .type = QEMU_OPT_STRING,
  494. .help = "id of persistent reservation manager object (default: none)",
  495. },
  496. #if defined(__linux__)
  497. {
  498. .name = "drop-cache",
  499. .type = QEMU_OPT_BOOL,
  500. .help = "invalidate page cache during live migration (default: on)",
  501. },
  502. #endif
  503. {
  504. .name = "x-check-cache-dropped",
  505. .type = QEMU_OPT_BOOL,
  506. .help = "check that page cache was dropped on live migration (default: off)"
  507. },
  508. { /* end of list */ }
  509. },
  510. };
  511. static const char *const mutable_opts[] = { "x-check-cache-dropped", NULL };
  512. static int raw_open_common(BlockDriverState *bs, QDict *options,
  513. int bdrv_flags, int open_flags,
  514. bool device, Error **errp)
  515. {
  516. BDRVRawState *s = bs->opaque;
  517. QemuOpts *opts;
  518. Error *local_err = NULL;
  519. const char *filename = NULL;
  520. const char *str;
  521. BlockdevAioOptions aio, aio_default;
  522. int fd, ret;
  523. struct stat st;
  524. OnOffAuto locking;
  525. opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort);
  526. if (!qemu_opts_absorb_qdict(opts, options, errp)) {
  527. ret = -EINVAL;
  528. goto fail;
  529. }
  530. filename = qemu_opt_get(opts, "filename");
  531. ret = raw_normalize_devicepath(&filename, errp);
  532. if (ret != 0) {
  533. goto fail;
  534. }
  535. if (bdrv_flags & BDRV_O_NATIVE_AIO) {
  536. aio_default = BLOCKDEV_AIO_OPTIONS_NATIVE;
  537. #ifdef CONFIG_LINUX_IO_URING
  538. } else if (bdrv_flags & BDRV_O_IO_URING) {
  539. aio_default = BLOCKDEV_AIO_OPTIONS_IO_URING;
  540. #endif
  541. } else {
  542. aio_default = BLOCKDEV_AIO_OPTIONS_THREADS;
  543. }
  544. aio = qapi_enum_parse(&BlockdevAioOptions_lookup,
  545. qemu_opt_get(opts, "aio"),
  546. aio_default, &local_err);
  547. if (local_err) {
  548. error_propagate(errp, local_err);
  549. ret = -EINVAL;
  550. goto fail;
  551. }
  552. s->use_linux_aio = (aio == BLOCKDEV_AIO_OPTIONS_NATIVE);
  553. #ifdef CONFIG_LINUX_IO_URING
  554. s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING);
  555. #endif
  556. s->aio_max_batch = qemu_opt_get_number(opts, "aio-max-batch", 0);
  557. locking = qapi_enum_parse(&OnOffAuto_lookup,
  558. qemu_opt_get(opts, "locking"),
  559. ON_OFF_AUTO_AUTO, &local_err);
  560. if (local_err) {
  561. error_propagate(errp, local_err);
  562. ret = -EINVAL;
  563. goto fail;
  564. }
  565. switch (locking) {
  566. case ON_OFF_AUTO_ON:
  567. s->use_lock = true;
  568. if (!qemu_has_ofd_lock()) {
  569. warn_report("File lock requested but OFD locking syscall is "
  570. "unavailable, falling back to POSIX file locks");
  571. error_printf("Due to the implementation, locks can be lost "
  572. "unexpectedly.\n");
  573. }
  574. break;
  575. case ON_OFF_AUTO_OFF:
  576. s->use_lock = false;
  577. break;
  578. case ON_OFF_AUTO_AUTO:
  579. s->use_lock = qemu_has_ofd_lock();
  580. break;
  581. default:
  582. abort();
  583. }
  584. str = qemu_opt_get(opts, "pr-manager");
  585. if (str) {
  586. s->pr_mgr = pr_manager_lookup(str, &local_err);
  587. if (local_err) {
  588. error_propagate(errp, local_err);
  589. ret = -EINVAL;
  590. goto fail;
  591. }
  592. }
  593. s->drop_cache = qemu_opt_get_bool(opts, "drop-cache", true);
  594. s->check_cache_dropped = qemu_opt_get_bool(opts, "x-check-cache-dropped",
  595. false);
  596. s->open_flags = open_flags;
  597. raw_parse_flags(bdrv_flags, &s->open_flags, false);
  598. s->fd = -1;
  599. fd = qemu_open(filename, s->open_flags, errp);
  600. ret = fd < 0 ? -errno : 0;
  601. if (ret < 0) {
  602. if (ret == -EROFS) {
  603. ret = -EACCES;
  604. }
  605. goto fail;
  606. }
  607. s->fd = fd;
  608. /* Check s->open_flags rather than bdrv_flags due to auto-read-only */
  609. if (s->open_flags & O_RDWR) {
  610. ret = check_hdev_writable(s->fd);
  611. if (ret < 0) {
  612. error_setg_errno(errp, -ret, "The device is not writable");
  613. goto fail;
  614. }
  615. }
  616. s->perm = 0;
  617. s->shared_perm = BLK_PERM_ALL;
  618. #ifdef CONFIG_LINUX_AIO
  619. /* Currently Linux does AIO only for files opened with O_DIRECT */
  620. if (s->use_linux_aio) {
  621. if (!(s->open_flags & O_DIRECT)) {
  622. error_setg(errp, "aio=native was specified, but it requires "
  623. "cache.direct=on, which was not specified.");
  624. ret = -EINVAL;
  625. goto fail;
  626. }
  627. if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) {
  628. error_prepend(errp, "Unable to use native AIO: ");
  629. goto fail;
  630. }
  631. }
  632. #else
  633. if (s->use_linux_aio) {
  634. error_setg(errp, "aio=native was specified, but is not supported "
  635. "in this build.");
  636. ret = -EINVAL;
  637. goto fail;
  638. }
  639. #endif /* !defined(CONFIG_LINUX_AIO) */
  640. #ifdef CONFIG_LINUX_IO_URING
  641. if (s->use_linux_io_uring) {
  642. if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) {
  643. error_prepend(errp, "Unable to use io_uring: ");
  644. goto fail;
  645. }
  646. }
  647. #else
  648. if (s->use_linux_io_uring) {
  649. error_setg(errp, "aio=io_uring was specified, but is not supported "
  650. "in this build.");
  651. ret = -EINVAL;
  652. goto fail;
  653. }
  654. #endif /* !defined(CONFIG_LINUX_IO_URING) */
  655. s->has_discard = true;
  656. s->has_write_zeroes = true;
  657. if (fstat(s->fd, &st) < 0) {
  658. ret = -errno;
  659. error_setg_errno(errp, errno, "Could not stat file");
  660. goto fail;
  661. }
  662. if (!device) {
  663. if (!S_ISREG(st.st_mode)) {
  664. error_setg(errp, "'%s' driver requires '%s' to be a regular file",
  665. bs->drv->format_name, bs->filename);
  666. ret = -EINVAL;
  667. goto fail;
  668. } else {
  669. s->discard_zeroes = true;
  670. s->has_fallocate = true;
  671. }
  672. } else {
  673. if (!(S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
  674. error_setg(errp, "'%s' driver requires '%s' to be either "
  675. "a character or block device",
  676. bs->drv->format_name, bs->filename);
  677. ret = -EINVAL;
  678. goto fail;
  679. }
  680. }
  681. if (S_ISBLK(st.st_mode)) {
  682. #ifdef BLKDISCARDZEROES
  683. unsigned int arg;
  684. if (ioctl(s->fd, BLKDISCARDZEROES, &arg) == 0 && arg) {
  685. s->discard_zeroes = true;
  686. }
  687. #endif
  688. #ifdef __linux__
  689. /* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do
  690. * not rely on the contents of discarded blocks unless using O_DIRECT.
  691. * Same for BLKZEROOUT.
  692. */
  693. if (!(bs->open_flags & BDRV_O_NOCACHE)) {
  694. s->discard_zeroes = false;
  695. s->has_write_zeroes = false;
  696. }
  697. #endif
  698. }
  699. #ifdef __FreeBSD__
  700. if (S_ISCHR(st.st_mode)) {
  701. /*
  702. * The file is a char device (disk), which on FreeBSD isn't behind
  703. * a pager, so force all requests to be aligned. This is needed
  704. * so QEMU makes sure all IO operations on the device are aligned
  705. * to sector size, or else FreeBSD will reject them with EINVAL.
  706. */
  707. s->force_alignment = true;
  708. }
  709. #endif
  710. s->needs_alignment = raw_needs_alignment(bs);
  711. bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK;
  712. if (S_ISREG(st.st_mode)) {
  713. /* When extending regular files, we get zeros from the OS */
  714. bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE;
  715. }
  716. ret = 0;
  717. fail:
  718. if (ret < 0 && s->fd != -1) {
  719. qemu_close(s->fd);
  720. }
  721. if (filename && (bdrv_flags & BDRV_O_TEMPORARY)) {
  722. unlink(filename);
  723. }
  724. qemu_opts_del(opts);
  725. return ret;
  726. }
  727. static int raw_open(BlockDriverState *bs, QDict *options, int flags,
  728. Error **errp)
  729. {
  730. BDRVRawState *s = bs->opaque;
  731. s->type = FTYPE_FILE;
  732. return raw_open_common(bs, options, flags, 0, false, errp);
  733. }
  734. typedef enum {
  735. RAW_PL_PREPARE,
  736. RAW_PL_COMMIT,
  737. RAW_PL_ABORT,
  738. } RawPermLockOp;
  739. #define PERM_FOREACH(i) \
  740. for ((i) = 0; (1ULL << (i)) <= BLK_PERM_ALL; i++)
  741. /* Lock bytes indicated by @perm_lock_bits and @shared_perm_lock_bits in the
  742. * file; if @unlock == true, also unlock the unneeded bytes.
  743. * @shared_perm_lock_bits is the mask of all permissions that are NOT shared.
  744. */
  745. static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
  746. uint64_t perm_lock_bits,
  747. uint64_t shared_perm_lock_bits,
  748. bool unlock, Error **errp)
  749. {
  750. int ret;
  751. int i;
  752. uint64_t locked_perm, locked_shared_perm;
  753. if (s) {
  754. locked_perm = s->locked_perm;
  755. locked_shared_perm = s->locked_shared_perm;
  756. } else {
  757. /*
  758. * We don't have the previous bits, just lock/unlock for each of the
  759. * requested bits.
  760. */
  761. if (unlock) {
  762. locked_perm = BLK_PERM_ALL;
  763. locked_shared_perm = BLK_PERM_ALL;
  764. } else {
  765. locked_perm = 0;
  766. locked_shared_perm = 0;
  767. }
  768. }
  769. PERM_FOREACH(i) {
  770. int off = RAW_LOCK_PERM_BASE + i;
  771. uint64_t bit = (1ULL << i);
  772. if ((perm_lock_bits & bit) && !(locked_perm & bit)) {
  773. ret = qemu_lock_fd(fd, off, 1, false);
  774. if (ret) {
  775. raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d",
  776. off);
  777. return ret;
  778. } else if (s) {
  779. s->locked_perm |= bit;
  780. }
  781. } else if (unlock && (locked_perm & bit) && !(perm_lock_bits & bit)) {
  782. ret = qemu_unlock_fd(fd, off, 1);
  783. if (ret) {
  784. error_setg_errno(errp, -ret, "Failed to unlock byte %d", off);
  785. return ret;
  786. } else if (s) {
  787. s->locked_perm &= ~bit;
  788. }
  789. }
  790. }
  791. PERM_FOREACH(i) {
  792. int off = RAW_LOCK_SHARED_BASE + i;
  793. uint64_t bit = (1ULL << i);
  794. if ((shared_perm_lock_bits & bit) && !(locked_shared_perm & bit)) {
  795. ret = qemu_lock_fd(fd, off, 1, false);
  796. if (ret) {
  797. raw_lock_error_setg_errno(errp, -ret, "Failed to lock byte %d",
  798. off);
  799. return ret;
  800. } else if (s) {
  801. s->locked_shared_perm |= bit;
  802. }
  803. } else if (unlock && (locked_shared_perm & bit) &&
  804. !(shared_perm_lock_bits & bit)) {
  805. ret = qemu_unlock_fd(fd, off, 1);
  806. if (ret) {
  807. error_setg_errno(errp, -ret, "Failed to unlock byte %d", off);
  808. return ret;
  809. } else if (s) {
  810. s->locked_shared_perm &= ~bit;
  811. }
  812. }
  813. }
  814. return 0;
  815. }
  816. /* Check "unshared" bytes implied by @perm and ~@shared_perm in the file. */
  817. static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
  818. Error **errp)
  819. {
  820. int ret;
  821. int i;
  822. PERM_FOREACH(i) {
  823. int off = RAW_LOCK_SHARED_BASE + i;
  824. uint64_t p = 1ULL << i;
  825. if (perm & p) {
  826. ret = qemu_lock_fd_test(fd, off, 1, true);
  827. if (ret) {
  828. char *perm_name = bdrv_perm_names(p);
  829. raw_lock_error_setg_errno(errp, -ret,
  830. "Failed to get \"%s\" lock",
  831. perm_name);
  832. g_free(perm_name);
  833. return ret;
  834. }
  835. }
  836. }
  837. PERM_FOREACH(i) {
  838. int off = RAW_LOCK_PERM_BASE + i;
  839. uint64_t p = 1ULL << i;
  840. if (!(shared_perm & p)) {
  841. ret = qemu_lock_fd_test(fd, off, 1, true);
  842. if (ret) {
  843. char *perm_name = bdrv_perm_names(p);
  844. raw_lock_error_setg_errno(errp, -ret,
  845. "Failed to get shared \"%s\" lock",
  846. perm_name);
  847. g_free(perm_name);
  848. return ret;
  849. }
  850. }
  851. }
  852. return 0;
  853. }
  854. static int raw_handle_perm_lock(BlockDriverState *bs,
  855. RawPermLockOp op,
  856. uint64_t new_perm, uint64_t new_shared,
  857. Error **errp)
  858. {
  859. BDRVRawState *s = bs->opaque;
  860. int ret = 0;
  861. Error *local_err = NULL;
  862. if (!s->use_lock) {
  863. return 0;
  864. }
  865. if (bdrv_get_flags(bs) & BDRV_O_INACTIVE) {
  866. return 0;
  867. }
  868. switch (op) {
  869. case RAW_PL_PREPARE:
  870. if ((s->perm | new_perm) == s->perm &&
  871. (s->shared_perm & new_shared) == s->shared_perm)
  872. {
  873. /*
  874. * We are going to unlock bytes, it should not fail. If it fail due
  875. * to some fs-dependent permission-unrelated reasons (which occurs
  876. * sometimes on NFS and leads to abort in bdrv_replace_child) we
  877. * can't prevent such errors by any check here. And we ignore them
  878. * anyway in ABORT and COMMIT.
  879. */
  880. return 0;
  881. }
  882. ret = raw_apply_lock_bytes(s, s->fd, s->perm | new_perm,
  883. ~s->shared_perm | ~new_shared,
  884. false, errp);
  885. if (!ret) {
  886. ret = raw_check_lock_bytes(s->fd, new_perm, new_shared, errp);
  887. if (!ret) {
  888. return 0;
  889. }
  890. error_append_hint(errp,
  891. "Is another process using the image [%s]?\n",
  892. bs->filename);
  893. }
  894. /* fall through to unlock bytes. */
  895. case RAW_PL_ABORT:
  896. raw_apply_lock_bytes(s, s->fd, s->perm, ~s->shared_perm,
  897. true, &local_err);
  898. if (local_err) {
  899. /* Theoretically the above call only unlocks bytes and it cannot
  900. * fail. Something weird happened, report it.
  901. */
  902. warn_report_err(local_err);
  903. }
  904. break;
  905. case RAW_PL_COMMIT:
  906. raw_apply_lock_bytes(s, s->fd, new_perm, ~new_shared,
  907. true, &local_err);
  908. if (local_err) {
  909. /* Theoretically the above call only unlocks bytes and it cannot
  910. * fail. Something weird happened, report it.
  911. */
  912. warn_report_err(local_err);
  913. }
  914. break;
  915. }
  916. return ret;
  917. }
  918. static int raw_reconfigure_getfd(BlockDriverState *bs, int flags,
  919. int *open_flags, uint64_t perm, bool force_dup,
  920. Error **errp)
  921. {
  922. BDRVRawState *s = bs->opaque;
  923. int fd = -1;
  924. int ret;
  925. bool has_writers = perm &
  926. (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_RESIZE);
  927. int fcntl_flags = O_APPEND | O_NONBLOCK;
  928. #ifdef O_NOATIME
  929. fcntl_flags |= O_NOATIME;
  930. #endif
  931. *open_flags = 0;
  932. if (s->type == FTYPE_CD) {
  933. *open_flags |= O_NONBLOCK;
  934. }
  935. raw_parse_flags(flags, open_flags, has_writers);
  936. #ifdef O_ASYNC
  937. /* Not all operating systems have O_ASYNC, and those that don't
  938. * will not let us track the state into rs->open_flags (typically
  939. * you achieve the same effect with an ioctl, for example I_SETSIG
  940. * on Solaris). But we do not use O_ASYNC, so that's fine.
  941. */
  942. assert((s->open_flags & O_ASYNC) == 0);
  943. #endif
  944. if (!force_dup && *open_flags == s->open_flags) {
  945. /* We're lucky, the existing fd is fine */
  946. return s->fd;
  947. }
  948. if ((*open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) {
  949. /* dup the original fd */
  950. fd = qemu_dup(s->fd);
  951. if (fd >= 0) {
  952. ret = fcntl_setfl(fd, *open_flags);
  953. if (ret) {
  954. qemu_close(fd);
  955. fd = -1;
  956. }
  957. }
  958. }
  959. /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */
  960. if (fd == -1) {
  961. const char *normalized_filename = bs->filename;
  962. ret = raw_normalize_devicepath(&normalized_filename, errp);
  963. if (ret >= 0) {
  964. fd = qemu_open(normalized_filename, *open_flags, errp);
  965. if (fd == -1) {
  966. return -1;
  967. }
  968. }
  969. }
  970. if (fd != -1 && (*open_flags & O_RDWR)) {
  971. ret = check_hdev_writable(fd);
  972. if (ret < 0) {
  973. qemu_close(fd);
  974. error_setg_errno(errp, -ret, "The device is not writable");
  975. return -1;
  976. }
  977. }
  978. return fd;
  979. }
  980. static int raw_reopen_prepare(BDRVReopenState *state,
  981. BlockReopenQueue *queue, Error **errp)
  982. {
  983. BDRVRawState *s;
  984. BDRVRawReopenState *rs;
  985. QemuOpts *opts;
  986. int ret;
  987. assert(state != NULL);
  988. assert(state->bs != NULL);
  989. s = state->bs->opaque;
  990. state->opaque = g_new0(BDRVRawReopenState, 1);
  991. rs = state->opaque;
  992. /* Handle options changes */
  993. opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort);
  994. if (!qemu_opts_absorb_qdict(opts, state->options, errp)) {
  995. ret = -EINVAL;
  996. goto out;
  997. }
  998. rs->drop_cache = qemu_opt_get_bool_del(opts, "drop-cache", true);
  999. rs->check_cache_dropped =
  1000. qemu_opt_get_bool_del(opts, "x-check-cache-dropped", false);
  1001. /* This driver's reopen function doesn't currently allow changing
  1002. * other options, so let's put them back in the original QDict and
  1003. * bdrv_reopen_prepare() will detect changes and complain. */
  1004. qemu_opts_to_qdict(opts, state->options);
  1005. /*
  1006. * As part of reopen prepare we also want to create new fd by
  1007. * raw_reconfigure_getfd(). But it wants updated "perm", when in
  1008. * bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to
  1009. * permission update. Happily, permission update is always a part (a seprate
  1010. * stage) of bdrv_reopen_multiple() so we can rely on this fact and
  1011. * reconfigure fd in raw_check_perm().
  1012. */
  1013. s->reopen_state = state;
  1014. ret = 0;
  1015. out:
  1016. qemu_opts_del(opts);
  1017. return ret;
  1018. }
  1019. static void raw_reopen_commit(BDRVReopenState *state)
  1020. {
  1021. BDRVRawReopenState *rs = state->opaque;
  1022. BDRVRawState *s = state->bs->opaque;
  1023. s->drop_cache = rs->drop_cache;
  1024. s->check_cache_dropped = rs->check_cache_dropped;
  1025. s->open_flags = rs->open_flags;
  1026. g_free(state->opaque);
  1027. state->opaque = NULL;
  1028. assert(s->reopen_state == state);
  1029. s->reopen_state = NULL;
  1030. }
  1031. static void raw_reopen_abort(BDRVReopenState *state)
  1032. {
  1033. BDRVRawReopenState *rs = state->opaque;
  1034. BDRVRawState *s = state->bs->opaque;
  1035. /* nothing to do if NULL, we didn't get far enough */
  1036. if (rs == NULL) {
  1037. return;
  1038. }
  1039. g_free(state->opaque);
  1040. state->opaque = NULL;
  1041. assert(s->reopen_state == state);
  1042. s->reopen_state = NULL;
  1043. }
  1044. static int hdev_get_max_hw_transfer(int fd, struct stat *st)
  1045. {
  1046. #ifdef BLKSECTGET
  1047. if (S_ISBLK(st->st_mode)) {
  1048. unsigned short max_sectors = 0;
  1049. if (ioctl(fd, BLKSECTGET, &max_sectors) == 0) {
  1050. return max_sectors * 512;
  1051. }
  1052. } else {
  1053. int max_bytes = 0;
  1054. if (ioctl(fd, BLKSECTGET, &max_bytes) == 0) {
  1055. return max_bytes;
  1056. }
  1057. }
  1058. return -errno;
  1059. #else
  1060. return -ENOSYS;
  1061. #endif
  1062. }
  1063. static int hdev_get_max_segments(int fd, struct stat *st)
  1064. {
  1065. #ifdef CONFIG_LINUX
  1066. char buf[32];
  1067. const char *end;
  1068. char *sysfspath = NULL;
  1069. int ret;
  1070. int sysfd = -1;
  1071. long max_segments;
  1072. if (S_ISCHR(st->st_mode)) {
  1073. if (ioctl(fd, SG_GET_SG_TABLESIZE, &ret) == 0) {
  1074. return ret;
  1075. }
  1076. return -ENOTSUP;
  1077. }
  1078. if (!S_ISBLK(st->st_mode)) {
  1079. return -ENOTSUP;
  1080. }
  1081. sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments",
  1082. major(st->st_rdev), minor(st->st_rdev));
  1083. sysfd = open(sysfspath, O_RDONLY);
  1084. if (sysfd == -1) {
  1085. ret = -errno;
  1086. goto out;
  1087. }
  1088. do {
  1089. ret = read(sysfd, buf, sizeof(buf) - 1);
  1090. } while (ret == -1 && errno == EINTR);
  1091. if (ret < 0) {
  1092. ret = -errno;
  1093. goto out;
  1094. } else if (ret == 0) {
  1095. ret = -EIO;
  1096. goto out;
  1097. }
  1098. buf[ret] = 0;
  1099. /* The file is ended with '\n', pass 'end' to accept that. */
  1100. ret = qemu_strtol(buf, &end, 10, &max_segments);
  1101. if (ret == 0 && end && *end == '\n') {
  1102. ret = max_segments;
  1103. }
  1104. out:
  1105. if (sysfd != -1) {
  1106. close(sysfd);
  1107. }
  1108. g_free(sysfspath);
  1109. return ret;
  1110. #else
  1111. return -ENOTSUP;
  1112. #endif
  1113. }
  1114. static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
  1115. {
  1116. BDRVRawState *s = bs->opaque;
  1117. struct stat st;
  1118. s->needs_alignment = raw_needs_alignment(bs);
  1119. raw_probe_alignment(bs, s->fd, errp);
  1120. bs->bl.min_mem_alignment = s->buf_align;
  1121. bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size());
  1122. /*
  1123. * Maximum transfers are best effort, so it is okay to ignore any
  1124. * errors. That said, based on the man page errors in fstat would be
  1125. * very much unexpected; the only possible case seems to be ENOMEM.
  1126. */
  1127. if (fstat(s->fd, &st)) {
  1128. return;
  1129. }
  1130. #if defined(__APPLE__) && (__MACH__)
  1131. struct statfs buf;
  1132. if (!fstatfs(s->fd, &buf)) {
  1133. bs->bl.opt_transfer = buf.f_iosize;
  1134. bs->bl.pdiscard_alignment = buf.f_bsize;
  1135. }
  1136. #endif
  1137. if (bs->sg || S_ISBLK(st.st_mode)) {
  1138. int ret = hdev_get_max_hw_transfer(s->fd, &st);
  1139. if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) {
  1140. bs->bl.max_hw_transfer = ret;
  1141. }
  1142. ret = hdev_get_max_segments(s->fd, &st);
  1143. if (ret > 0) {
  1144. bs->bl.max_hw_iov = ret;
  1145. }
  1146. }
  1147. }
  1148. static int check_for_dasd(int fd)
  1149. {
  1150. #ifdef BIODASDINFO2
  1151. struct dasd_information2_t info = {0};
  1152. return ioctl(fd, BIODASDINFO2, &info);
  1153. #else
  1154. return -1;
  1155. #endif
  1156. }
  1157. /**
  1158. * Try to get @bs's logical and physical block size.
  1159. * On success, store them in @bsz and return zero.
  1160. * On failure, return negative errno.
  1161. */
  1162. static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
  1163. {
  1164. BDRVRawState *s = bs->opaque;
  1165. int ret;
  1166. /* If DASD, get blocksizes */
  1167. if (check_for_dasd(s->fd) < 0) {
  1168. return -ENOTSUP;
  1169. }
  1170. ret = probe_logical_blocksize(s->fd, &bsz->log);
  1171. if (ret < 0) {
  1172. return ret;
  1173. }
  1174. return probe_physical_blocksize(s->fd, &bsz->phys);
  1175. }
  1176. /**
  1177. * Try to get @bs's geometry: cyls, heads, sectors.
  1178. * On success, store them in @geo and return 0.
  1179. * On failure return -errno.
  1180. * (Allows block driver to assign default geometry values that guest sees)
  1181. */
  1182. #ifdef __linux__
  1183. static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
  1184. {
  1185. BDRVRawState *s = bs->opaque;
  1186. struct hd_geometry ioctl_geo = {0};
  1187. /* If DASD, get its geometry */
  1188. if (check_for_dasd(s->fd) < 0) {
  1189. return -ENOTSUP;
  1190. }
  1191. if (ioctl(s->fd, HDIO_GETGEO, &ioctl_geo) < 0) {
  1192. return -errno;
  1193. }
  1194. /* HDIO_GETGEO may return success even though geo contains zeros
  1195. (e.g. certain multipath setups) */
  1196. if (!ioctl_geo.heads || !ioctl_geo.sectors || !ioctl_geo.cylinders) {
  1197. return -ENOTSUP;
  1198. }
  1199. /* Do not return a geometry for partition */
  1200. if (ioctl_geo.start != 0) {
  1201. return -ENOTSUP;
  1202. }
  1203. geo->heads = ioctl_geo.heads;
  1204. geo->sectors = ioctl_geo.sectors;
  1205. geo->cylinders = ioctl_geo.cylinders;
  1206. return 0;
  1207. }
  1208. #else /* __linux__ */
  1209. static int hdev_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
  1210. {
  1211. return -ENOTSUP;
  1212. }
  1213. #endif
  1214. #if defined(__linux__)
  1215. static int handle_aiocb_ioctl(void *opaque)
  1216. {
  1217. RawPosixAIOData *aiocb = opaque;
  1218. int ret;
  1219. do {
  1220. ret = ioctl(aiocb->aio_fildes, aiocb->ioctl.cmd, aiocb->ioctl.buf);
  1221. } while (ret == -1 && errno == EINTR);
  1222. if (ret == -1) {
  1223. return -errno;
  1224. }
  1225. return 0;
  1226. }
  1227. #endif /* linux */
  1228. static int handle_aiocb_flush(void *opaque)
  1229. {
  1230. RawPosixAIOData *aiocb = opaque;
  1231. BDRVRawState *s = aiocb->bs->opaque;
  1232. int ret;
  1233. if (s->page_cache_inconsistent) {
  1234. return -s->page_cache_inconsistent;
  1235. }
  1236. ret = qemu_fdatasync(aiocb->aio_fildes);
  1237. if (ret == -1) {
  1238. trace_file_flush_fdatasync_failed(errno);
  1239. /* There is no clear definition of the semantics of a failing fsync(),
  1240. * so we may have to assume the worst. The sad truth is that this
  1241. * assumption is correct for Linux. Some pages are now probably marked
  1242. * clean in the page cache even though they are inconsistent with the
  1243. * on-disk contents. The next fdatasync() call would succeed, but no
  1244. * further writeback attempt will be made. We can't get back to a state
  1245. * in which we know what is on disk (we would have to rewrite
  1246. * everything that was touched since the last fdatasync() at least), so
  1247. * make bdrv_flush() fail permanently. Given that the behaviour isn't
  1248. * really defined, I have little hope that other OSes are doing better.
  1249. *
  1250. * Obviously, this doesn't affect O_DIRECT, which bypasses the page
  1251. * cache. */
  1252. if ((s->open_flags & O_DIRECT) == 0) {
  1253. s->page_cache_inconsistent = errno;
  1254. }
  1255. return -errno;
  1256. }
  1257. return 0;
  1258. }
  1259. #ifdef CONFIG_PREADV
  1260. static bool preadv_present = true;
  1261. static ssize_t
  1262. qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  1263. {
  1264. return preadv(fd, iov, nr_iov, offset);
  1265. }
  1266. static ssize_t
  1267. qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  1268. {
  1269. return pwritev(fd, iov, nr_iov, offset);
  1270. }
  1271. #else
  1272. static bool preadv_present = false;
  1273. static ssize_t
  1274. qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  1275. {
  1276. return -ENOSYS;
  1277. }
  1278. static ssize_t
  1279. qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
  1280. {
  1281. return -ENOSYS;
  1282. }
  1283. #endif
  1284. static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb)
  1285. {
  1286. ssize_t len;
  1287. do {
  1288. if (aiocb->aio_type & QEMU_AIO_WRITE)
  1289. len = qemu_pwritev(aiocb->aio_fildes,
  1290. aiocb->io.iov,
  1291. aiocb->io.niov,
  1292. aiocb->aio_offset);
  1293. else
  1294. len = qemu_preadv(aiocb->aio_fildes,
  1295. aiocb->io.iov,
  1296. aiocb->io.niov,
  1297. aiocb->aio_offset);
  1298. } while (len == -1 && errno == EINTR);
  1299. if (len == -1) {
  1300. return -errno;
  1301. }
  1302. return len;
  1303. }
  1304. /*
  1305. * Read/writes the data to/from a given linear buffer.
  1306. *
  1307. * Returns the number of bytes handles or -errno in case of an error. Short
  1308. * reads are only returned if the end of the file is reached.
  1309. */
  1310. static ssize_t handle_aiocb_rw_linear(RawPosixAIOData *aiocb, char *buf)
  1311. {
  1312. ssize_t offset = 0;
  1313. ssize_t len;
  1314. while (offset < aiocb->aio_nbytes) {
  1315. if (aiocb->aio_type & QEMU_AIO_WRITE) {
  1316. len = pwrite(aiocb->aio_fildes,
  1317. (const char *)buf + offset,
  1318. aiocb->aio_nbytes - offset,
  1319. aiocb->aio_offset + offset);
  1320. } else {
  1321. len = pread(aiocb->aio_fildes,
  1322. buf + offset,
  1323. aiocb->aio_nbytes - offset,
  1324. aiocb->aio_offset + offset);
  1325. }
  1326. if (len == -1 && errno == EINTR) {
  1327. continue;
  1328. } else if (len == -1 && errno == EINVAL &&
  1329. (aiocb->bs->open_flags & BDRV_O_NOCACHE) &&
  1330. !(aiocb->aio_type & QEMU_AIO_WRITE) &&
  1331. offset > 0) {
  1332. /* O_DIRECT pread() may fail with EINVAL when offset is unaligned
  1333. * after a short read. Assume that O_DIRECT short reads only occur
  1334. * at EOF. Therefore this is a short read, not an I/O error.
  1335. */
  1336. break;
  1337. } else if (len == -1) {
  1338. offset = -errno;
  1339. break;
  1340. } else if (len == 0) {
  1341. break;
  1342. }
  1343. offset += len;
  1344. }
  1345. return offset;
  1346. }
  1347. static int handle_aiocb_rw(void *opaque)
  1348. {
  1349. RawPosixAIOData *aiocb = opaque;
  1350. ssize_t nbytes;
  1351. char *buf;
  1352. if (!(aiocb->aio_type & QEMU_AIO_MISALIGNED)) {
  1353. /*
  1354. * If there is just a single buffer, and it is properly aligned
  1355. * we can just use plain pread/pwrite without any problems.
  1356. */
  1357. if (aiocb->io.niov == 1) {
  1358. nbytes = handle_aiocb_rw_linear(aiocb, aiocb->io.iov->iov_base);
  1359. goto out;
  1360. }
  1361. /*
  1362. * We have more than one iovec, and all are properly aligned.
  1363. *
  1364. * Try preadv/pwritev first and fall back to linearizing the
  1365. * buffer if it's not supported.
  1366. */
  1367. if (preadv_present) {
  1368. nbytes = handle_aiocb_rw_vector(aiocb);
  1369. if (nbytes == aiocb->aio_nbytes ||
  1370. (nbytes < 0 && nbytes != -ENOSYS)) {
  1371. goto out;
  1372. }
  1373. preadv_present = false;
  1374. }
  1375. /*
  1376. * XXX(hch): short read/write. no easy way to handle the reminder
  1377. * using these interfaces. For now retry using plain
  1378. * pread/pwrite?
  1379. */
  1380. }
  1381. /*
  1382. * Ok, we have to do it the hard way, copy all segments into
  1383. * a single aligned buffer.
  1384. */
  1385. buf = qemu_try_blockalign(aiocb->bs, aiocb->aio_nbytes);
  1386. if (buf == NULL) {
  1387. nbytes = -ENOMEM;
  1388. goto out;
  1389. }
  1390. if (aiocb->aio_type & QEMU_AIO_WRITE) {
  1391. char *p = buf;
  1392. int i;
  1393. for (i = 0; i < aiocb->io.niov; ++i) {
  1394. memcpy(p, aiocb->io.iov[i].iov_base, aiocb->io.iov[i].iov_len);
  1395. p += aiocb->io.iov[i].iov_len;
  1396. }
  1397. assert(p - buf == aiocb->aio_nbytes);
  1398. }
  1399. nbytes = handle_aiocb_rw_linear(aiocb, buf);
  1400. if (!(aiocb->aio_type & QEMU_AIO_WRITE)) {
  1401. char *p = buf;
  1402. size_t count = aiocb->aio_nbytes, copy;
  1403. int i;
  1404. for (i = 0; i < aiocb->io.niov && count; ++i) {
  1405. copy = count;
  1406. if (copy > aiocb->io.iov[i].iov_len) {
  1407. copy = aiocb->io.iov[i].iov_len;
  1408. }
  1409. memcpy(aiocb->io.iov[i].iov_base, p, copy);
  1410. assert(count >= copy);
  1411. p += copy;
  1412. count -= copy;
  1413. }
  1414. assert(count == 0);
  1415. }
  1416. qemu_vfree(buf);
  1417. out:
  1418. if (nbytes == aiocb->aio_nbytes) {
  1419. return 0;
  1420. } else if (nbytes >= 0 && nbytes < aiocb->aio_nbytes) {
  1421. if (aiocb->aio_type & QEMU_AIO_WRITE) {
  1422. return -EINVAL;
  1423. } else {
  1424. iov_memset(aiocb->io.iov, aiocb->io.niov, nbytes,
  1425. 0, aiocb->aio_nbytes - nbytes);
  1426. return 0;
  1427. }
  1428. } else {
  1429. assert(nbytes < 0);
  1430. return nbytes;
  1431. }
  1432. }
  1433. #if defined(CONFIG_FALLOCATE) || defined(BLKZEROOUT) || defined(BLKDISCARD)
  1434. static int translate_err(int err)
  1435. {
  1436. if (err == -ENODEV || err == -ENOSYS || err == -EOPNOTSUPP ||
  1437. err == -ENOTTY) {
  1438. err = -ENOTSUP;
  1439. }
  1440. return err;
  1441. }
  1442. #endif
  1443. #ifdef CONFIG_FALLOCATE
  1444. static int do_fallocate(int fd, int mode, off_t offset, off_t len)
  1445. {
  1446. do {
  1447. if (fallocate(fd, mode, offset, len) == 0) {
  1448. return 0;
  1449. }
  1450. } while (errno == EINTR);
  1451. return translate_err(-errno);
  1452. }
  1453. #endif
  1454. static ssize_t handle_aiocb_write_zeroes_block(RawPosixAIOData *aiocb)
  1455. {
  1456. int ret = -ENOTSUP;
  1457. BDRVRawState *s = aiocb->bs->opaque;
  1458. if (!s->has_write_zeroes) {
  1459. return -ENOTSUP;
  1460. }
  1461. #ifdef BLKZEROOUT
  1462. /* The BLKZEROOUT implementation in the kernel doesn't set
  1463. * BLKDEV_ZERO_NOFALLBACK, so we can't call this if we have to avoid slow
  1464. * fallbacks. */
  1465. if (!(aiocb->aio_type & QEMU_AIO_NO_FALLBACK)) {
  1466. do {
  1467. uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
  1468. if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) {
  1469. return 0;
  1470. }
  1471. } while (errno == EINTR);
  1472. ret = translate_err(-errno);
  1473. if (ret == -ENOTSUP) {
  1474. s->has_write_zeroes = false;
  1475. }
  1476. }
  1477. #endif
  1478. return ret;
  1479. }
  1480. static int handle_aiocb_write_zeroes(void *opaque)
  1481. {
  1482. RawPosixAIOData *aiocb = opaque;
  1483. #ifdef CONFIG_FALLOCATE
  1484. BDRVRawState *s = aiocb->bs->opaque;
  1485. int64_t len;
  1486. #endif
  1487. if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
  1488. return handle_aiocb_write_zeroes_block(aiocb);
  1489. }
  1490. #ifdef CONFIG_FALLOCATE_ZERO_RANGE
  1491. if (s->has_write_zeroes) {
  1492. int ret = do_fallocate(s->fd, FALLOC_FL_ZERO_RANGE,
  1493. aiocb->aio_offset, aiocb->aio_nbytes);
  1494. if (ret == -ENOTSUP) {
  1495. s->has_write_zeroes = false;
  1496. } else if (ret == 0 || ret != -EINVAL) {
  1497. return ret;
  1498. }
  1499. /*
  1500. * Note: Some file systems do not like unaligned byte ranges, and
  1501. * return EINVAL in such a case, though they should not do it according
  1502. * to the man-page of fallocate(). Thus we simply ignore this return
  1503. * value and try the other fallbacks instead.
  1504. */
  1505. }
  1506. #endif
  1507. #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
  1508. if (s->has_discard && s->has_fallocate) {
  1509. int ret = do_fallocate(s->fd,
  1510. FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  1511. aiocb->aio_offset, aiocb->aio_nbytes);
  1512. if (ret == 0) {
  1513. ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
  1514. if (ret == 0 || ret != -ENOTSUP) {
  1515. return ret;
  1516. }
  1517. s->has_fallocate = false;
  1518. } else if (ret == -EINVAL) {
  1519. /*
  1520. * Some file systems like older versions of GPFS do not like un-
  1521. * aligned byte ranges, and return EINVAL in such a case, though
  1522. * they should not do it according to the man-page of fallocate().
  1523. * Warn about the bad filesystem and try the final fallback instead.
  1524. */
  1525. warn_report_once("Your file system is misbehaving: "
  1526. "fallocate(FALLOC_FL_PUNCH_HOLE) returned EINVAL. "
  1527. "Please report this bug to your file system "
  1528. "vendor.");
  1529. } else if (ret != -ENOTSUP) {
  1530. return ret;
  1531. } else {
  1532. s->has_discard = false;
  1533. }
  1534. }
  1535. #endif
  1536. #ifdef CONFIG_FALLOCATE
  1537. /* Last resort: we are trying to extend the file with zeroed data. This
  1538. * can be done via fallocate(fd, 0) */
  1539. len = bdrv_getlength(aiocb->bs);
  1540. if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) {
  1541. int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
  1542. if (ret == 0 || ret != -ENOTSUP) {
  1543. return ret;
  1544. }
  1545. s->has_fallocate = false;
  1546. }
  1547. #endif
  1548. return -ENOTSUP;
  1549. }
  1550. static int handle_aiocb_write_zeroes_unmap(void *opaque)
  1551. {
  1552. RawPosixAIOData *aiocb = opaque;
  1553. BDRVRawState *s G_GNUC_UNUSED = aiocb->bs->opaque;
  1554. /* First try to write zeros and unmap at the same time */
  1555. #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
  1556. int ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  1557. aiocb->aio_offset, aiocb->aio_nbytes);
  1558. switch (ret) {
  1559. case -ENOTSUP:
  1560. case -EINVAL:
  1561. case -EBUSY:
  1562. break;
  1563. default:
  1564. return ret;
  1565. }
  1566. #endif
  1567. /* If we couldn't manage to unmap while guaranteed that the area reads as
  1568. * all-zero afterwards, just write zeroes without unmapping */
  1569. return handle_aiocb_write_zeroes(aiocb);
  1570. }
  1571. #ifndef HAVE_COPY_FILE_RANGE
  1572. static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd,
  1573. off_t *out_off, size_t len, unsigned int flags)
  1574. {
  1575. #ifdef __NR_copy_file_range
  1576. return syscall(__NR_copy_file_range, in_fd, in_off, out_fd,
  1577. out_off, len, flags);
  1578. #else
  1579. errno = ENOSYS;
  1580. return -1;
  1581. #endif
  1582. }
  1583. #endif
  1584. static int handle_aiocb_copy_range(void *opaque)
  1585. {
  1586. RawPosixAIOData *aiocb = opaque;
  1587. uint64_t bytes = aiocb->aio_nbytes;
  1588. off_t in_off = aiocb->aio_offset;
  1589. off_t out_off = aiocb->copy_range.aio_offset2;
  1590. while (bytes) {
  1591. ssize_t ret = copy_file_range(aiocb->aio_fildes, &in_off,
  1592. aiocb->copy_range.aio_fd2, &out_off,
  1593. bytes, 0);
  1594. trace_file_copy_file_range(aiocb->bs, aiocb->aio_fildes, in_off,
  1595. aiocb->copy_range.aio_fd2, out_off, bytes,
  1596. 0, ret);
  1597. if (ret == 0) {
  1598. /* No progress (e.g. when beyond EOF), let the caller fall back to
  1599. * buffer I/O. */
  1600. return -ENOSPC;
  1601. }
  1602. if (ret < 0) {
  1603. switch (errno) {
  1604. case ENOSYS:
  1605. return -ENOTSUP;
  1606. case EINTR:
  1607. continue;
  1608. default:
  1609. return -errno;
  1610. }
  1611. }
  1612. bytes -= ret;
  1613. }
  1614. return 0;
  1615. }
  1616. static int handle_aiocb_discard(void *opaque)
  1617. {
  1618. RawPosixAIOData *aiocb = opaque;
  1619. int ret = -ENOTSUP;
  1620. BDRVRawState *s = aiocb->bs->opaque;
  1621. if (!s->has_discard) {
  1622. return -ENOTSUP;
  1623. }
  1624. if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
  1625. #ifdef BLKDISCARD
  1626. do {
  1627. uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
  1628. if (ioctl(aiocb->aio_fildes, BLKDISCARD, range) == 0) {
  1629. return 0;
  1630. }
  1631. } while (errno == EINTR);
  1632. ret = translate_err(-errno);
  1633. #endif
  1634. } else {
  1635. #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
  1636. ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  1637. aiocb->aio_offset, aiocb->aio_nbytes);
  1638. ret = translate_err(ret);
  1639. #elif defined(__APPLE__) && (__MACH__)
  1640. fpunchhole_t fpunchhole;
  1641. fpunchhole.fp_flags = 0;
  1642. fpunchhole.reserved = 0;
  1643. fpunchhole.fp_offset = aiocb->aio_offset;
  1644. fpunchhole.fp_length = aiocb->aio_nbytes;
  1645. if (fcntl(s->fd, F_PUNCHHOLE, &fpunchhole) == -1) {
  1646. ret = errno == ENODEV ? -ENOTSUP : -errno;
  1647. } else {
  1648. ret = 0;
  1649. }
  1650. #endif
  1651. }
  1652. if (ret == -ENOTSUP) {
  1653. s->has_discard = false;
  1654. }
  1655. return ret;
  1656. }
  1657. /*
  1658. * Help alignment probing by allocating the first block.
  1659. *
  1660. * When reading with direct I/O from unallocated area on Gluster backed by XFS,
  1661. * reading succeeds regardless of request length. In this case we fallback to
  1662. * safe alignment which is not optimal. Allocating the first block avoids this
  1663. * fallback.
  1664. *
  1665. * fd may be opened with O_DIRECT, but we don't know the buffer alignment or
  1666. * request alignment, so we use safe values.
  1667. *
  1668. * Returns: 0 on success, -errno on failure. Since this is an optimization,
  1669. * caller may ignore failures.
  1670. */
  1671. static int allocate_first_block(int fd, size_t max_size)
  1672. {
  1673. size_t write_size = (max_size < MAX_BLOCKSIZE)
  1674. ? BDRV_SECTOR_SIZE
  1675. : MAX_BLOCKSIZE;
  1676. size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
  1677. void *buf;
  1678. ssize_t n;
  1679. int ret;
  1680. buf = qemu_memalign(max_align, write_size);
  1681. memset(buf, 0, write_size);
  1682. do {
  1683. n = pwrite(fd, buf, write_size, 0);
  1684. } while (n == -1 && errno == EINTR);
  1685. ret = (n == -1) ? -errno : 0;
  1686. qemu_vfree(buf);
  1687. return ret;
  1688. }
  1689. static int handle_aiocb_truncate(void *opaque)
  1690. {
  1691. RawPosixAIOData *aiocb = opaque;
  1692. int result = 0;
  1693. int64_t current_length = 0;
  1694. char *buf = NULL;
  1695. struct stat st;
  1696. int fd = aiocb->aio_fildes;
  1697. int64_t offset = aiocb->aio_offset;
  1698. PreallocMode prealloc = aiocb->truncate.prealloc;
  1699. Error **errp = aiocb->truncate.errp;
  1700. if (fstat(fd, &st) < 0) {
  1701. result = -errno;
  1702. error_setg_errno(errp, -result, "Could not stat file");
  1703. return result;
  1704. }
  1705. current_length = st.st_size;
  1706. if (current_length > offset && prealloc != PREALLOC_MODE_OFF) {
  1707. error_setg(errp, "Cannot use preallocation for shrinking files");
  1708. return -ENOTSUP;
  1709. }
  1710. switch (prealloc) {
  1711. #ifdef CONFIG_POSIX_FALLOCATE
  1712. case PREALLOC_MODE_FALLOC:
  1713. /*
  1714. * Truncating before posix_fallocate() makes it about twice slower on
  1715. * file systems that do not support fallocate(), trying to check if a
  1716. * block is allocated before allocating it, so don't do that here.
  1717. */
  1718. if (offset != current_length) {
  1719. result = -posix_fallocate(fd, current_length,
  1720. offset - current_length);
  1721. if (result != 0) {
  1722. /* posix_fallocate() doesn't set errno. */
  1723. error_setg_errno(errp, -result,
  1724. "Could not preallocate new data");
  1725. } else if (current_length == 0) {
  1726. /*
  1727. * posix_fallocate() uses fallocate() if the filesystem
  1728. * supports it, or fallback to manually writing zeroes. If
  1729. * fallocate() was used, unaligned reads from the fallocated
  1730. * area in raw_probe_alignment() will succeed, hence we need to
  1731. * allocate the first block.
  1732. *
  1733. * Optimize future alignment probing; ignore failures.
  1734. */
  1735. allocate_first_block(fd, offset);
  1736. }
  1737. } else {
  1738. result = 0;
  1739. }
  1740. goto out;
  1741. #endif
  1742. case PREALLOC_MODE_FULL:
  1743. {
  1744. int64_t num = 0, left = offset - current_length;
  1745. off_t seek_result;
  1746. /*
  1747. * Knowing the final size from the beginning could allow the file
  1748. * system driver to do less allocations and possibly avoid
  1749. * fragmentation of the file.
  1750. */
  1751. if (ftruncate(fd, offset) != 0) {
  1752. result = -errno;
  1753. error_setg_errno(errp, -result, "Could not resize file");
  1754. goto out;
  1755. }
  1756. buf = g_malloc0(65536);
  1757. seek_result = lseek(fd, current_length, SEEK_SET);
  1758. if (seek_result < 0) {
  1759. result = -errno;
  1760. error_setg_errno(errp, -result,
  1761. "Failed to seek to the old end of file");
  1762. goto out;
  1763. }
  1764. while (left > 0) {
  1765. num = MIN(left, 65536);
  1766. result = write(fd, buf, num);
  1767. if (result < 0) {
  1768. if (errno == EINTR) {
  1769. continue;
  1770. }
  1771. result = -errno;
  1772. error_setg_errno(errp, -result,
  1773. "Could not write zeros for preallocation");
  1774. goto out;
  1775. }
  1776. left -= result;
  1777. }
  1778. if (result >= 0) {
  1779. result = fsync(fd);
  1780. if (result < 0) {
  1781. result = -errno;
  1782. error_setg_errno(errp, -result,
  1783. "Could not flush file to disk");
  1784. goto out;
  1785. }
  1786. }
  1787. goto out;
  1788. }
  1789. case PREALLOC_MODE_OFF:
  1790. if (ftruncate(fd, offset) != 0) {
  1791. result = -errno;
  1792. error_setg_errno(errp, -result, "Could not resize file");
  1793. } else if (current_length == 0 && offset > current_length) {
  1794. /* Optimize future alignment probing; ignore failures. */
  1795. allocate_first_block(fd, offset);
  1796. }
  1797. return result;
  1798. default:
  1799. result = -ENOTSUP;
  1800. error_setg(errp, "Unsupported preallocation mode: %s",
  1801. PreallocMode_str(prealloc));
  1802. return result;
  1803. }
  1804. out:
  1805. if (result < 0) {
  1806. if (ftruncate(fd, current_length) < 0) {
  1807. error_report("Failed to restore old file length: %s",
  1808. strerror(errno));
  1809. }
  1810. }
  1811. g_free(buf);
  1812. return result;
  1813. }
  1814. static int coroutine_fn raw_thread_pool_submit(BlockDriverState *bs,
  1815. ThreadPoolFunc func, void *arg)
  1816. {
  1817. /* @bs can be NULL, bdrv_get_aio_context() returns the main context then */
  1818. ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
  1819. return thread_pool_submit_co(pool, func, arg);
  1820. }
  1821. static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
  1822. uint64_t bytes, QEMUIOVector *qiov, int type)
  1823. {
  1824. BDRVRawState *s = bs->opaque;
  1825. RawPosixAIOData acb;
  1826. if (fd_open(bs) < 0)
  1827. return -EIO;
  1828. /*
  1829. * When using O_DIRECT, the request must be aligned to be able to use
  1830. * either libaio or io_uring interface. If not fail back to regular thread
  1831. * pool read/write code which emulates this for us if we
  1832. * set QEMU_AIO_MISALIGNED.
  1833. */
  1834. if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) {
  1835. type |= QEMU_AIO_MISALIGNED;
  1836. #ifdef CONFIG_LINUX_IO_URING
  1837. } else if (s->use_linux_io_uring) {
  1838. LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
  1839. assert(qiov->size == bytes);
  1840. return luring_co_submit(bs, aio, s->fd, offset, qiov, type);
  1841. #endif
  1842. #ifdef CONFIG_LINUX_AIO
  1843. } else if (s->use_linux_aio) {
  1844. LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
  1845. assert(qiov->size == bytes);
  1846. return laio_co_submit(bs, aio, s->fd, offset, qiov, type,
  1847. s->aio_max_batch);
  1848. #endif
  1849. }
  1850. acb = (RawPosixAIOData) {
  1851. .bs = bs,
  1852. .aio_fildes = s->fd,
  1853. .aio_type = type,
  1854. .aio_offset = offset,
  1855. .aio_nbytes = bytes,
  1856. .io = {
  1857. .iov = qiov->iov,
  1858. .niov = qiov->niov,
  1859. },
  1860. };
  1861. assert(qiov->size == bytes);
  1862. return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
  1863. }
  1864. static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
  1865. int64_t bytes, QEMUIOVector *qiov,
  1866. BdrvRequestFlags flags)
  1867. {
  1868. return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ);
  1869. }
  1870. static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
  1871. int64_t bytes, QEMUIOVector *qiov,
  1872. BdrvRequestFlags flags)
  1873. {
  1874. assert(flags == 0);
  1875. return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE);
  1876. }
  1877. static void raw_aio_plug(BlockDriverState *bs)
  1878. {
  1879. BDRVRawState __attribute__((unused)) *s = bs->opaque;
  1880. #ifdef CONFIG_LINUX_AIO
  1881. if (s->use_linux_aio) {
  1882. LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
  1883. laio_io_plug(bs, aio);
  1884. }
  1885. #endif
  1886. #ifdef CONFIG_LINUX_IO_URING
  1887. if (s->use_linux_io_uring) {
  1888. LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
  1889. luring_io_plug(bs, aio);
  1890. }
  1891. #endif
  1892. }
  1893. static void raw_aio_unplug(BlockDriverState *bs)
  1894. {
  1895. BDRVRawState __attribute__((unused)) *s = bs->opaque;
  1896. #ifdef CONFIG_LINUX_AIO
  1897. if (s->use_linux_aio) {
  1898. LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
  1899. laio_io_unplug(bs, aio, s->aio_max_batch);
  1900. }
  1901. #endif
  1902. #ifdef CONFIG_LINUX_IO_URING
  1903. if (s->use_linux_io_uring) {
  1904. LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
  1905. luring_io_unplug(bs, aio);
  1906. }
  1907. #endif
  1908. }
  1909. static int raw_co_flush_to_disk(BlockDriverState *bs)
  1910. {
  1911. BDRVRawState *s = bs->opaque;
  1912. RawPosixAIOData acb;
  1913. int ret;
  1914. ret = fd_open(bs);
  1915. if (ret < 0) {
  1916. return ret;
  1917. }
  1918. acb = (RawPosixAIOData) {
  1919. .bs = bs,
  1920. .aio_fildes = s->fd,
  1921. .aio_type = QEMU_AIO_FLUSH,
  1922. };
  1923. #ifdef CONFIG_LINUX_IO_URING
  1924. if (s->use_linux_io_uring) {
  1925. LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
  1926. return luring_co_submit(bs, aio, s->fd, 0, NULL, QEMU_AIO_FLUSH);
  1927. }
  1928. #endif
  1929. return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb);
  1930. }
  1931. static void raw_aio_attach_aio_context(BlockDriverState *bs,
  1932. AioContext *new_context)
  1933. {
  1934. BDRVRawState __attribute__((unused)) *s = bs->opaque;
  1935. #ifdef CONFIG_LINUX_AIO
  1936. if (s->use_linux_aio) {
  1937. Error *local_err = NULL;
  1938. if (!aio_setup_linux_aio(new_context, &local_err)) {
  1939. error_reportf_err(local_err, "Unable to use native AIO, "
  1940. "falling back to thread pool: ");
  1941. s->use_linux_aio = false;
  1942. }
  1943. }
  1944. #endif
  1945. #ifdef CONFIG_LINUX_IO_URING
  1946. if (s->use_linux_io_uring) {
  1947. Error *local_err = NULL;
  1948. if (!aio_setup_linux_io_uring(new_context, &local_err)) {
  1949. error_reportf_err(local_err, "Unable to use linux io_uring, "
  1950. "falling back to thread pool: ");
  1951. s->use_linux_io_uring = false;
  1952. }
  1953. }
  1954. #endif
  1955. }
  1956. static void raw_close(BlockDriverState *bs)
  1957. {
  1958. BDRVRawState *s = bs->opaque;
  1959. if (s->fd >= 0) {
  1960. qemu_close(s->fd);
  1961. s->fd = -1;
  1962. }
  1963. }
  1964. /**
  1965. * Truncates the given regular file @fd to @offset and, when growing, fills the
  1966. * new space according to @prealloc.
  1967. *
  1968. * Returns: 0 on success, -errno on failure.
  1969. */
  1970. static int coroutine_fn
  1971. raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset,
  1972. PreallocMode prealloc, Error **errp)
  1973. {
  1974. RawPosixAIOData acb;
  1975. acb = (RawPosixAIOData) {
  1976. .bs = bs,
  1977. .aio_fildes = fd,
  1978. .aio_type = QEMU_AIO_TRUNCATE,
  1979. .aio_offset = offset,
  1980. .truncate = {
  1981. .prealloc = prealloc,
  1982. .errp = errp,
  1983. },
  1984. };
  1985. return raw_thread_pool_submit(bs, handle_aiocb_truncate, &acb);
  1986. }
  1987. static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
  1988. bool exact, PreallocMode prealloc,
  1989. BdrvRequestFlags flags, Error **errp)
  1990. {
  1991. BDRVRawState *s = bs->opaque;
  1992. struct stat st;
  1993. int ret;
  1994. if (fstat(s->fd, &st)) {
  1995. ret = -errno;
  1996. error_setg_errno(errp, -ret, "Failed to fstat() the file");
  1997. return ret;
  1998. }
  1999. if (S_ISREG(st.st_mode)) {
  2000. /* Always resizes to the exact @offset */
  2001. return raw_regular_truncate(bs, s->fd, offset, prealloc, errp);
  2002. }
  2003. if (prealloc != PREALLOC_MODE_OFF) {
  2004. error_setg(errp, "Preallocation mode '%s' unsupported for this "
  2005. "non-regular file", PreallocMode_str(prealloc));
  2006. return -ENOTSUP;
  2007. }
  2008. if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
  2009. int64_t cur_length = raw_getlength(bs);
  2010. if (offset != cur_length && exact) {
  2011. error_setg(errp, "Cannot resize device files");
  2012. return -ENOTSUP;
  2013. } else if (offset > cur_length) {
  2014. error_setg(errp, "Cannot grow device files");
  2015. return -EINVAL;
  2016. }
  2017. } else {
  2018. error_setg(errp, "Resizing this file is not supported");
  2019. return -ENOTSUP;
  2020. }
  2021. return 0;
  2022. }
  2023. #ifdef __OpenBSD__
  2024. static int64_t raw_getlength(BlockDriverState *bs)
  2025. {
  2026. BDRVRawState *s = bs->opaque;
  2027. int fd = s->fd;
  2028. struct stat st;
  2029. if (fstat(fd, &st))
  2030. return -errno;
  2031. if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
  2032. struct disklabel dl;
  2033. if (ioctl(fd, DIOCGDINFO, &dl))
  2034. return -errno;
  2035. return (uint64_t)dl.d_secsize *
  2036. dl.d_partitions[DISKPART(st.st_rdev)].p_size;
  2037. } else
  2038. return st.st_size;
  2039. }
  2040. #elif defined(__NetBSD__)
  2041. static int64_t raw_getlength(BlockDriverState *bs)
  2042. {
  2043. BDRVRawState *s = bs->opaque;
  2044. int fd = s->fd;
  2045. struct stat st;
  2046. if (fstat(fd, &st))
  2047. return -errno;
  2048. if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
  2049. struct dkwedge_info dkw;
  2050. if (ioctl(fd, DIOCGWEDGEINFO, &dkw) != -1) {
  2051. return dkw.dkw_size * 512;
  2052. } else {
  2053. struct disklabel dl;
  2054. if (ioctl(fd, DIOCGDINFO, &dl))
  2055. return -errno;
  2056. return (uint64_t)dl.d_secsize *
  2057. dl.d_partitions[DISKPART(st.st_rdev)].p_size;
  2058. }
  2059. } else
  2060. return st.st_size;
  2061. }
  2062. #elif defined(__sun__)
  2063. static int64_t raw_getlength(BlockDriverState *bs)
  2064. {
  2065. BDRVRawState *s = bs->opaque;
  2066. struct dk_minfo minfo;
  2067. int ret;
  2068. int64_t size;
  2069. ret = fd_open(bs);
  2070. if (ret < 0) {
  2071. return ret;
  2072. }
  2073. /*
  2074. * Use the DKIOCGMEDIAINFO ioctl to read the size.
  2075. */
  2076. ret = ioctl(s->fd, DKIOCGMEDIAINFO, &minfo);
  2077. if (ret != -1) {
  2078. return minfo.dki_lbsize * minfo.dki_capacity;
  2079. }
  2080. /*
  2081. * There are reports that lseek on some devices fails, but
  2082. * irc discussion said that contingency on contingency was overkill.
  2083. */
  2084. size = lseek(s->fd, 0, SEEK_END);
  2085. if (size < 0) {
  2086. return -errno;
  2087. }
  2088. return size;
  2089. }
  2090. #elif defined(CONFIG_BSD)
  2091. static int64_t raw_getlength(BlockDriverState *bs)
  2092. {
  2093. BDRVRawState *s = bs->opaque;
  2094. int fd = s->fd;
  2095. int64_t size;
  2096. struct stat sb;
  2097. #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
  2098. int reopened = 0;
  2099. #endif
  2100. int ret;
  2101. ret = fd_open(bs);
  2102. if (ret < 0)
  2103. return ret;
  2104. #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
  2105. again:
  2106. #endif
  2107. if (!fstat(fd, &sb) && (S_IFCHR & sb.st_mode)) {
  2108. size = 0;
  2109. #ifdef DIOCGMEDIASIZE
  2110. if (ioctl(fd, DIOCGMEDIASIZE, (off_t *)&size)) {
  2111. size = 0;
  2112. }
  2113. #endif
  2114. #ifdef DIOCGPART
  2115. if (size == 0) {
  2116. struct partinfo pi;
  2117. if (ioctl(fd, DIOCGPART, &pi) == 0) {
  2118. size = pi.media_size;
  2119. }
  2120. }
  2121. #endif
  2122. #if defined(DKIOCGETBLOCKCOUNT) && defined(DKIOCGETBLOCKSIZE)
  2123. if (size == 0) {
  2124. uint64_t sectors = 0;
  2125. uint32_t sector_size = 0;
  2126. if (ioctl(fd, DKIOCGETBLOCKCOUNT, &sectors) == 0
  2127. && ioctl(fd, DKIOCGETBLOCKSIZE, &sector_size) == 0) {
  2128. size = sectors * sector_size;
  2129. }
  2130. }
  2131. #endif
  2132. if (size == 0) {
  2133. size = lseek(fd, 0LL, SEEK_END);
  2134. }
  2135. if (size < 0) {
  2136. return -errno;
  2137. }
  2138. #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  2139. switch(s->type) {
  2140. case FTYPE_CD:
  2141. /* XXX FreeBSD acd returns UINT_MAX sectors for an empty drive */
  2142. if (size == 2048LL * (unsigned)-1)
  2143. size = 0;
  2144. /* XXX no disc? maybe we need to reopen... */
  2145. if (size <= 0 && !reopened && cdrom_reopen(bs) >= 0) {
  2146. reopened = 1;
  2147. goto again;
  2148. }
  2149. }
  2150. #endif
  2151. } else {
  2152. size = lseek(fd, 0, SEEK_END);
  2153. if (size < 0) {
  2154. return -errno;
  2155. }
  2156. }
  2157. return size;
  2158. }
  2159. #else
  2160. static int64_t raw_getlength(BlockDriverState *bs)
  2161. {
  2162. BDRVRawState *s = bs->opaque;
  2163. int ret;
  2164. int64_t size;
  2165. ret = fd_open(bs);
  2166. if (ret < 0) {
  2167. return ret;
  2168. }
  2169. size = lseek(s->fd, 0, SEEK_END);
  2170. if (size < 0) {
  2171. return -errno;
  2172. }
  2173. return size;
  2174. }
  2175. #endif
  2176. static int64_t raw_get_allocated_file_size(BlockDriverState *bs)
  2177. {
  2178. struct stat st;
  2179. BDRVRawState *s = bs->opaque;
  2180. if (fstat(s->fd, &st) < 0) {
  2181. return -errno;
  2182. }
  2183. return (int64_t)st.st_blocks * 512;
  2184. }
  2185. static int coroutine_fn
  2186. raw_co_create(BlockdevCreateOptions *options, Error **errp)
  2187. {
  2188. BlockdevCreateOptionsFile *file_opts;
  2189. Error *local_err = NULL;
  2190. int fd;
  2191. uint64_t perm, shared;
  2192. int result = 0;
  2193. /* Validate options and set default values */
  2194. assert(options->driver == BLOCKDEV_DRIVER_FILE);
  2195. file_opts = &options->u.file;
  2196. if (!file_opts->has_nocow) {
  2197. file_opts->nocow = false;
  2198. }
  2199. if (!file_opts->has_preallocation) {
  2200. file_opts->preallocation = PREALLOC_MODE_OFF;
  2201. }
  2202. if (!file_opts->has_extent_size_hint) {
  2203. file_opts->extent_size_hint = 1 * MiB;
  2204. }
  2205. if (file_opts->extent_size_hint > UINT32_MAX) {
  2206. result = -EINVAL;
  2207. error_setg(errp, "Extent size hint is too large");
  2208. goto out;
  2209. }
  2210. /* Create file */
  2211. fd = qemu_create(file_opts->filename, O_RDWR | O_BINARY, 0644, errp);
  2212. if (fd < 0) {
  2213. result = -errno;
  2214. goto out;
  2215. }
  2216. /* Take permissions: We want to discard everything, so we need
  2217. * BLK_PERM_WRITE; and truncation to the desired size requires
  2218. * BLK_PERM_RESIZE.
  2219. * On the other hand, we cannot share the RESIZE permission
  2220. * because we promise that after this function, the file has the
  2221. * size given in the options. If someone else were to resize it
  2222. * concurrently, we could not guarantee that.
  2223. * Note that after this function, we can no longer guarantee that
  2224. * the file is not touched by a third party, so it may be resized
  2225. * then. */
  2226. perm = BLK_PERM_WRITE | BLK_PERM_RESIZE;
  2227. shared = BLK_PERM_ALL & ~BLK_PERM_RESIZE;
  2228. /* Step one: Take locks */
  2229. result = raw_apply_lock_bytes(NULL, fd, perm, ~shared, false, errp);
  2230. if (result < 0) {
  2231. goto out_close;
  2232. }
  2233. /* Step two: Check that nobody else has taken conflicting locks */
  2234. result = raw_check_lock_bytes(fd, perm, shared, errp);
  2235. if (result < 0) {
  2236. error_append_hint(errp,
  2237. "Is another process using the image [%s]?\n",
  2238. file_opts->filename);
  2239. goto out_unlock;
  2240. }
  2241. /* Clear the file by truncating it to 0 */
  2242. result = raw_regular_truncate(NULL, fd, 0, PREALLOC_MODE_OFF, errp);
  2243. if (result < 0) {
  2244. goto out_unlock;
  2245. }
  2246. if (file_opts->nocow) {
  2247. #ifdef __linux__
  2248. /* Set NOCOW flag to solve performance issue on fs like btrfs.
  2249. * This is an optimisation. The FS_IOC_SETFLAGS ioctl return value
  2250. * will be ignored since any failure of this operation should not
  2251. * block the left work.
  2252. */
  2253. int attr;
  2254. if (ioctl(fd, FS_IOC_GETFLAGS, &attr) == 0) {
  2255. attr |= FS_NOCOW_FL;
  2256. ioctl(fd, FS_IOC_SETFLAGS, &attr);
  2257. }
  2258. #endif
  2259. }
  2260. #ifdef FS_IOC_FSSETXATTR
  2261. /*
  2262. * Try to set the extent size hint. Failure is not fatal, and a warning is
  2263. * only printed if the option was explicitly specified.
  2264. */
  2265. {
  2266. struct fsxattr attr;
  2267. result = ioctl(fd, FS_IOC_FSGETXATTR, &attr);
  2268. if (result == 0) {
  2269. attr.fsx_xflags |= FS_XFLAG_EXTSIZE;
  2270. attr.fsx_extsize = file_opts->extent_size_hint;
  2271. result = ioctl(fd, FS_IOC_FSSETXATTR, &attr);
  2272. }
  2273. if (result < 0 && file_opts->has_extent_size_hint &&
  2274. file_opts->extent_size_hint)
  2275. {
  2276. warn_report("Failed to set extent size hint: %s",
  2277. strerror(errno));
  2278. }
  2279. }
  2280. #endif
  2281. /* Resize and potentially preallocate the file to the desired
  2282. * final size */
  2283. result = raw_regular_truncate(NULL, fd, file_opts->size,
  2284. file_opts->preallocation, errp);
  2285. if (result < 0) {
  2286. goto out_unlock;
  2287. }
  2288. out_unlock:
  2289. raw_apply_lock_bytes(NULL, fd, 0, 0, true, &local_err);
  2290. if (local_err) {
  2291. /* The above call should not fail, and if it does, that does
  2292. * not mean the whole creation operation has failed. So
  2293. * report it the user for their convenience, but do not report
  2294. * it to the caller. */
  2295. warn_report_err(local_err);
  2296. }
  2297. out_close:
  2298. if (qemu_close(fd) != 0 && result == 0) {
  2299. result = -errno;
  2300. error_setg_errno(errp, -result, "Could not close the new file");
  2301. }
  2302. out:
  2303. return result;
  2304. }
  2305. static int coroutine_fn raw_co_create_opts(BlockDriver *drv,
  2306. const char *filename,
  2307. QemuOpts *opts,
  2308. Error **errp)
  2309. {
  2310. BlockdevCreateOptions options;
  2311. int64_t total_size = 0;
  2312. int64_t extent_size_hint = 0;
  2313. bool has_extent_size_hint = false;
  2314. bool nocow = false;
  2315. PreallocMode prealloc;
  2316. char *buf = NULL;
  2317. Error *local_err = NULL;
  2318. /* Skip file: protocol prefix */
  2319. strstart(filename, "file:", &filename);
  2320. /* Read out options */
  2321. total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
  2322. BDRV_SECTOR_SIZE);
  2323. if (qemu_opt_get(opts, BLOCK_OPT_EXTENT_SIZE_HINT)) {
  2324. has_extent_size_hint = true;
  2325. extent_size_hint =
  2326. qemu_opt_get_size_del(opts, BLOCK_OPT_EXTENT_SIZE_HINT, -1);
  2327. }
  2328. nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false);
  2329. buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
  2330. prealloc = qapi_enum_parse(&PreallocMode_lookup, buf,
  2331. PREALLOC_MODE_OFF, &local_err);
  2332. g_free(buf);
  2333. if (local_err) {
  2334. error_propagate(errp, local_err);
  2335. return -EINVAL;
  2336. }
  2337. options = (BlockdevCreateOptions) {
  2338. .driver = BLOCKDEV_DRIVER_FILE,
  2339. .u.file = {
  2340. .filename = (char *) filename,
  2341. .size = total_size,
  2342. .has_preallocation = true,
  2343. .preallocation = prealloc,
  2344. .has_nocow = true,
  2345. .nocow = nocow,
  2346. .has_extent_size_hint = has_extent_size_hint,
  2347. .extent_size_hint = extent_size_hint,
  2348. },
  2349. };
  2350. return raw_co_create(&options, errp);
  2351. }
  2352. static int coroutine_fn raw_co_delete_file(BlockDriverState *bs,
  2353. Error **errp)
  2354. {
  2355. struct stat st;
  2356. int ret;
  2357. if (!(stat(bs->filename, &st) == 0) || !S_ISREG(st.st_mode)) {
  2358. error_setg_errno(errp, ENOENT, "%s is not a regular file",
  2359. bs->filename);
  2360. return -ENOENT;
  2361. }
  2362. ret = unlink(bs->filename);
  2363. if (ret < 0) {
  2364. ret = -errno;
  2365. error_setg_errno(errp, -ret, "Error when deleting file %s",
  2366. bs->filename);
  2367. }
  2368. return ret;
  2369. }
  2370. /*
  2371. * Find allocation range in @bs around offset @start.
  2372. * May change underlying file descriptor's file offset.
  2373. * If @start is not in a hole, store @start in @data, and the
  2374. * beginning of the next hole in @hole, and return 0.
  2375. * If @start is in a non-trailing hole, store @start in @hole and the
  2376. * beginning of the next non-hole in @data, and return 0.
  2377. * If @start is in a trailing hole or beyond EOF, return -ENXIO.
  2378. * If we can't find out, return a negative errno other than -ENXIO.
  2379. */
  2380. static int find_allocation(BlockDriverState *bs, off_t start,
  2381. off_t *data, off_t *hole)
  2382. {
  2383. #if defined SEEK_HOLE && defined SEEK_DATA
  2384. BDRVRawState *s = bs->opaque;
  2385. off_t offs;
  2386. /*
  2387. * SEEK_DATA cases:
  2388. * D1. offs == start: start is in data
  2389. * D2. offs > start: start is in a hole, next data at offs
  2390. * D3. offs < 0, errno = ENXIO: either start is in a trailing hole
  2391. * or start is beyond EOF
  2392. * If the latter happens, the file has been truncated behind
  2393. * our back since we opened it. All bets are off then.
  2394. * Treating like a trailing hole is simplest.
  2395. * D4. offs < 0, errno != ENXIO: we learned nothing
  2396. */
  2397. offs = lseek(s->fd, start, SEEK_DATA);
  2398. if (offs < 0) {
  2399. return -errno; /* D3 or D4 */
  2400. }
  2401. if (offs < start) {
  2402. /* This is not a valid return by lseek(). We are safe to just return
  2403. * -EIO in this case, and we'll treat it like D4. */
  2404. return -EIO;
  2405. }
  2406. if (offs > start) {
  2407. /* D2: in hole, next data at offs */
  2408. *hole = start;
  2409. *data = offs;
  2410. return 0;
  2411. }
  2412. /* D1: in data, end not yet known */
  2413. /*
  2414. * SEEK_HOLE cases:
  2415. * H1. offs == start: start is in a hole
  2416. * If this happens here, a hole has been dug behind our back
  2417. * since the previous lseek().
  2418. * H2. offs > start: either start is in data, next hole at offs,
  2419. * or start is in trailing hole, EOF at offs
  2420. * Linux treats trailing holes like any other hole: offs ==
  2421. * start. Solaris seeks to EOF instead: offs > start (blech).
  2422. * If that happens here, a hole has been dug behind our back
  2423. * since the previous lseek().
  2424. * H3. offs < 0, errno = ENXIO: start is beyond EOF
  2425. * If this happens, the file has been truncated behind our
  2426. * back since we opened it. Treat it like a trailing hole.
  2427. * H4. offs < 0, errno != ENXIO: we learned nothing
  2428. * Pretend we know nothing at all, i.e. "forget" about D1.
  2429. */
  2430. offs = lseek(s->fd, start, SEEK_HOLE);
  2431. if (offs < 0) {
  2432. return -errno; /* D1 and (H3 or H4) */
  2433. }
  2434. if (offs < start) {
  2435. /* This is not a valid return by lseek(). We are safe to just return
  2436. * -EIO in this case, and we'll treat it like H4. */
  2437. return -EIO;
  2438. }
  2439. if (offs > start) {
  2440. /*
  2441. * D1 and H2: either in data, next hole at offs, or it was in
  2442. * data but is now in a trailing hole. In the latter case,
  2443. * all bets are off. Treating it as if it there was data all
  2444. * the way to EOF is safe, so simply do that.
  2445. */
  2446. *data = start;
  2447. *hole = offs;
  2448. return 0;
  2449. }
  2450. /* D1 and H1 */
  2451. return -EBUSY;
  2452. #else
  2453. return -ENOTSUP;
  2454. #endif
  2455. }
  2456. /*
  2457. * Returns the allocation status of the specified offset.
  2458. *
  2459. * The block layer guarantees 'offset' and 'bytes' are within bounds.
  2460. *
  2461. * 'pnum' is set to the number of bytes (including and immediately following
  2462. * the specified offset) that are known to be in the same
  2463. * allocated/unallocated state.
  2464. *
  2465. * 'bytes' is a soft cap for 'pnum'. If the information is free, 'pnum' may
  2466. * well exceed it.
  2467. */
  2468. static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
  2469. bool want_zero,
  2470. int64_t offset,
  2471. int64_t bytes, int64_t *pnum,
  2472. int64_t *map,
  2473. BlockDriverState **file)
  2474. {
  2475. off_t data = 0, hole = 0;
  2476. int ret;
  2477. assert(QEMU_IS_ALIGNED(offset | bytes, bs->bl.request_alignment));
  2478. ret = fd_open(bs);
  2479. if (ret < 0) {
  2480. return ret;
  2481. }
  2482. if (!want_zero) {
  2483. *pnum = bytes;
  2484. *map = offset;
  2485. *file = bs;
  2486. return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
  2487. }
  2488. ret = find_allocation(bs, offset, &data, &hole);
  2489. if (ret == -ENXIO) {
  2490. /* Trailing hole */
  2491. *pnum = bytes;
  2492. ret = BDRV_BLOCK_ZERO;
  2493. } else if (ret < 0) {
  2494. /* No info available, so pretend there are no holes */
  2495. *pnum = bytes;
  2496. ret = BDRV_BLOCK_DATA;
  2497. } else if (data == offset) {
  2498. /* On a data extent, compute bytes to the end of the extent,
  2499. * possibly including a partial sector at EOF. */
  2500. *pnum = hole - offset;
  2501. /*
  2502. * We are not allowed to return partial sectors, though, so
  2503. * round up if necessary.
  2504. */
  2505. if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) {
  2506. int64_t file_length = raw_getlength(bs);
  2507. if (file_length > 0) {
  2508. /* Ignore errors, this is just a safeguard */
  2509. assert(hole == file_length);
  2510. }
  2511. *pnum = ROUND_UP(*pnum, bs->bl.request_alignment);
  2512. }
  2513. ret = BDRV_BLOCK_DATA;
  2514. } else {
  2515. /* On a hole, compute bytes to the beginning of the next extent. */
  2516. assert(hole == offset);
  2517. *pnum = data - offset;
  2518. ret = BDRV_BLOCK_ZERO;
  2519. }
  2520. *map = offset;
  2521. *file = bs;
  2522. return ret | BDRV_BLOCK_OFFSET_VALID;
  2523. }
  2524. #if defined(__linux__)
  2525. /* Verify that the file is not in the page cache */
  2526. static void check_cache_dropped(BlockDriverState *bs, Error **errp)
  2527. {
  2528. const size_t window_size = 128 * 1024 * 1024;
  2529. BDRVRawState *s = bs->opaque;
  2530. void *window = NULL;
  2531. size_t length = 0;
  2532. unsigned char *vec;
  2533. size_t page_size;
  2534. off_t offset;
  2535. off_t end;
  2536. /* mincore(2) page status information requires 1 byte per page */
  2537. page_size = sysconf(_SC_PAGESIZE);
  2538. vec = g_malloc(DIV_ROUND_UP(window_size, page_size));
  2539. end = raw_getlength(bs);
  2540. for (offset = 0; offset < end; offset += window_size) {
  2541. void *new_window;
  2542. size_t new_length;
  2543. size_t vec_end;
  2544. size_t i;
  2545. int ret;
  2546. /* Unmap previous window if size has changed */
  2547. new_length = MIN(end - offset, window_size);
  2548. if (new_length != length) {
  2549. munmap(window, length);
  2550. window = NULL;
  2551. length = 0;
  2552. }
  2553. new_window = mmap(window, new_length, PROT_NONE, MAP_PRIVATE,
  2554. s->fd, offset);
  2555. if (new_window == MAP_FAILED) {
  2556. error_setg_errno(errp, errno, "mmap failed");
  2557. break;
  2558. }
  2559. window = new_window;
  2560. length = new_length;
  2561. ret = mincore(window, length, vec);
  2562. if (ret < 0) {
  2563. error_setg_errno(errp, errno, "mincore failed");
  2564. break;
  2565. }
  2566. vec_end = DIV_ROUND_UP(length, page_size);
  2567. for (i = 0; i < vec_end; i++) {
  2568. if (vec[i] & 0x1) {
  2569. break;
  2570. }
  2571. }
  2572. if (i < vec_end) {
  2573. error_setg(errp, "page cache still in use!");
  2574. break;
  2575. }
  2576. }
  2577. if (window) {
  2578. munmap(window, length);
  2579. }
  2580. g_free(vec);
  2581. }
  2582. #endif /* __linux__ */
  2583. static void coroutine_fn raw_co_invalidate_cache(BlockDriverState *bs,
  2584. Error **errp)
  2585. {
  2586. BDRVRawState *s = bs->opaque;
  2587. int ret;
  2588. ret = fd_open(bs);
  2589. if (ret < 0) {
  2590. error_setg_errno(errp, -ret, "The file descriptor is not open");
  2591. return;
  2592. }
  2593. if (!s->drop_cache) {
  2594. return;
  2595. }
  2596. if (s->open_flags & O_DIRECT) {
  2597. return; /* No host kernel page cache */
  2598. }
  2599. #if defined(__linux__)
  2600. /* This sets the scene for the next syscall... */
  2601. ret = bdrv_co_flush(bs);
  2602. if (ret < 0) {
  2603. error_setg_errno(errp, -ret, "flush failed");
  2604. return;
  2605. }
  2606. /* Linux does not invalidate pages that are dirty, locked, or mmapped by a
  2607. * process. These limitations are okay because we just fsynced the file,
  2608. * we don't use mmap, and the file should not be in use by other processes.
  2609. */
  2610. ret = posix_fadvise(s->fd, 0, 0, POSIX_FADV_DONTNEED);
  2611. if (ret != 0) { /* the return value is a positive errno */
  2612. error_setg_errno(errp, ret, "fadvise failed");
  2613. return;
  2614. }
  2615. if (s->check_cache_dropped) {
  2616. check_cache_dropped(bs, errp);
  2617. }
  2618. #else /* __linux__ */
  2619. /* Do nothing. Live migration to a remote host with cache.direct=off is
  2620. * unsupported on other host operating systems. Cache consistency issues
  2621. * may occur but no error is reported here, partly because that's the
  2622. * historical behavior and partly because it's hard to differentiate valid
  2623. * configurations that should not cause errors.
  2624. */
  2625. #endif /* !__linux__ */
  2626. }
  2627. static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret)
  2628. {
  2629. if (ret) {
  2630. s->stats.discard_nb_failed++;
  2631. } else {
  2632. s->stats.discard_nb_ok++;
  2633. s->stats.discard_bytes_ok += nbytes;
  2634. }
  2635. }
  2636. static coroutine_fn int
  2637. raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes,
  2638. bool blkdev)
  2639. {
  2640. BDRVRawState *s = bs->opaque;
  2641. RawPosixAIOData acb;
  2642. int ret;
  2643. acb = (RawPosixAIOData) {
  2644. .bs = bs,
  2645. .aio_fildes = s->fd,
  2646. .aio_type = QEMU_AIO_DISCARD,
  2647. .aio_offset = offset,
  2648. .aio_nbytes = bytes,
  2649. };
  2650. if (blkdev) {
  2651. acb.aio_type |= QEMU_AIO_BLKDEV;
  2652. }
  2653. ret = raw_thread_pool_submit(bs, handle_aiocb_discard, &acb);
  2654. raw_account_discard(s, bytes, ret);
  2655. return ret;
  2656. }
  2657. static coroutine_fn int
  2658. raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
  2659. {
  2660. return raw_do_pdiscard(bs, offset, bytes, false);
  2661. }
  2662. static int coroutine_fn
  2663. raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
  2664. BdrvRequestFlags flags, bool blkdev)
  2665. {
  2666. BDRVRawState *s = bs->opaque;
  2667. RawPosixAIOData acb;
  2668. ThreadPoolFunc *handler;
  2669. #ifdef CONFIG_FALLOCATE
  2670. if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
  2671. BdrvTrackedRequest *req;
  2672. /*
  2673. * This is a workaround for a bug in the Linux XFS driver,
  2674. * where writes submitted through the AIO interface will be
  2675. * discarded if they happen beyond a concurrently running
  2676. * fallocate() that increases the file length (i.e., both the
  2677. * write and the fallocate() happen beyond the EOF).
  2678. *
  2679. * To work around it, we extend the tracked request for this
  2680. * zero write until INT64_MAX (effectively infinity), and mark
  2681. * it as serializing.
  2682. *
  2683. * We have to enable this workaround for all filesystems and
  2684. * AIO modes (not just XFS with aio=native), because for
  2685. * remote filesystems we do not know the host configuration.
  2686. */
  2687. req = bdrv_co_get_self_request(bs);
  2688. assert(req);
  2689. assert(req->type == BDRV_TRACKED_WRITE);
  2690. assert(req->offset <= offset);
  2691. assert(req->offset + req->bytes >= offset + bytes);
  2692. req->bytes = BDRV_MAX_LENGTH - req->offset;
  2693. bdrv_check_request(req->offset, req->bytes, &error_abort);
  2694. bdrv_make_request_serialising(req, bs->bl.request_alignment);
  2695. }
  2696. #endif
  2697. acb = (RawPosixAIOData) {
  2698. .bs = bs,
  2699. .aio_fildes = s->fd,
  2700. .aio_type = QEMU_AIO_WRITE_ZEROES,
  2701. .aio_offset = offset,
  2702. .aio_nbytes = bytes,
  2703. };
  2704. if (blkdev) {
  2705. acb.aio_type |= QEMU_AIO_BLKDEV;
  2706. }
  2707. if (flags & BDRV_REQ_NO_FALLBACK) {
  2708. acb.aio_type |= QEMU_AIO_NO_FALLBACK;
  2709. }
  2710. if (flags & BDRV_REQ_MAY_UNMAP) {
  2711. acb.aio_type |= QEMU_AIO_DISCARD;
  2712. handler = handle_aiocb_write_zeroes_unmap;
  2713. } else {
  2714. handler = handle_aiocb_write_zeroes;
  2715. }
  2716. return raw_thread_pool_submit(bs, handler, &acb);
  2717. }
  2718. static int coroutine_fn raw_co_pwrite_zeroes(
  2719. BlockDriverState *bs, int64_t offset,
  2720. int64_t bytes, BdrvRequestFlags flags)
  2721. {
  2722. return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false);
  2723. }
  2724. static int raw_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
  2725. {
  2726. return 0;
  2727. }
  2728. static BlockStatsSpecificFile get_blockstats_specific_file(BlockDriverState *bs)
  2729. {
  2730. BDRVRawState *s = bs->opaque;
  2731. return (BlockStatsSpecificFile) {
  2732. .discard_nb_ok = s->stats.discard_nb_ok,
  2733. .discard_nb_failed = s->stats.discard_nb_failed,
  2734. .discard_bytes_ok = s->stats.discard_bytes_ok,
  2735. };
  2736. }
  2737. static BlockStatsSpecific *raw_get_specific_stats(BlockDriverState *bs)
  2738. {
  2739. BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
  2740. stats->driver = BLOCKDEV_DRIVER_FILE;
  2741. stats->u.file = get_blockstats_specific_file(bs);
  2742. return stats;
  2743. }
  2744. #if defined(HAVE_HOST_BLOCK_DEVICE)
  2745. static BlockStatsSpecific *hdev_get_specific_stats(BlockDriverState *bs)
  2746. {
  2747. BlockStatsSpecific *stats = g_new(BlockStatsSpecific, 1);
  2748. stats->driver = BLOCKDEV_DRIVER_HOST_DEVICE;
  2749. stats->u.host_device = get_blockstats_specific_file(bs);
  2750. return stats;
  2751. }
  2752. #endif /* HAVE_HOST_BLOCK_DEVICE */
  2753. static QemuOptsList raw_create_opts = {
  2754. .name = "raw-create-opts",
  2755. .head = QTAILQ_HEAD_INITIALIZER(raw_create_opts.head),
  2756. .desc = {
  2757. {
  2758. .name = BLOCK_OPT_SIZE,
  2759. .type = QEMU_OPT_SIZE,
  2760. .help = "Virtual disk size"
  2761. },
  2762. {
  2763. .name = BLOCK_OPT_NOCOW,
  2764. .type = QEMU_OPT_BOOL,
  2765. .help = "Turn off copy-on-write (valid only on btrfs)"
  2766. },
  2767. {
  2768. .name = BLOCK_OPT_PREALLOC,
  2769. .type = QEMU_OPT_STRING,
  2770. .help = "Preallocation mode (allowed values: off"
  2771. #ifdef CONFIG_POSIX_FALLOCATE
  2772. ", falloc"
  2773. #endif
  2774. ", full)"
  2775. },
  2776. {
  2777. .name = BLOCK_OPT_EXTENT_SIZE_HINT,
  2778. .type = QEMU_OPT_SIZE,
  2779. .help = "Extent size hint for the image file, 0 to disable"
  2780. },
  2781. { /* end of list */ }
  2782. }
  2783. };
  2784. static int raw_check_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared,
  2785. Error **errp)
  2786. {
  2787. BDRVRawState *s = bs->opaque;
  2788. int input_flags = s->reopen_state ? s->reopen_state->flags : bs->open_flags;
  2789. int open_flags;
  2790. int ret;
  2791. /* We may need a new fd if auto-read-only switches the mode */
  2792. ret = raw_reconfigure_getfd(bs, input_flags, &open_flags, perm,
  2793. false, errp);
  2794. if (ret < 0) {
  2795. return ret;
  2796. } else if (ret != s->fd) {
  2797. Error *local_err = NULL;
  2798. /*
  2799. * Fail already check_perm() if we can't get a working O_DIRECT
  2800. * alignment with the new fd.
  2801. */
  2802. raw_probe_alignment(bs, ret, &local_err);
  2803. if (local_err) {
  2804. error_propagate(errp, local_err);
  2805. return -EINVAL;
  2806. }
  2807. s->perm_change_fd = ret;
  2808. s->perm_change_flags = open_flags;
  2809. }
  2810. /* Prepare permissions on old fd to avoid conflicts between old and new,
  2811. * but keep everything locked that new will need. */
  2812. ret = raw_handle_perm_lock(bs, RAW_PL_PREPARE, perm, shared, errp);
  2813. if (ret < 0) {
  2814. goto fail;
  2815. }
  2816. /* Copy locks to the new fd */
  2817. if (s->perm_change_fd && s->use_lock) {
  2818. ret = raw_apply_lock_bytes(NULL, s->perm_change_fd, perm, ~shared,
  2819. false, errp);
  2820. if (ret < 0) {
  2821. raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
  2822. goto fail;
  2823. }
  2824. }
  2825. return 0;
  2826. fail:
  2827. if (s->perm_change_fd) {
  2828. qemu_close(s->perm_change_fd);
  2829. }
  2830. s->perm_change_fd = 0;
  2831. return ret;
  2832. }
  2833. static void raw_set_perm(BlockDriverState *bs, uint64_t perm, uint64_t shared)
  2834. {
  2835. BDRVRawState *s = bs->opaque;
  2836. /* For reopen, we have already switched to the new fd (.bdrv_set_perm is
  2837. * called after .bdrv_reopen_commit) */
  2838. if (s->perm_change_fd && s->fd != s->perm_change_fd) {
  2839. qemu_close(s->fd);
  2840. s->fd = s->perm_change_fd;
  2841. s->open_flags = s->perm_change_flags;
  2842. }
  2843. s->perm_change_fd = 0;
  2844. raw_handle_perm_lock(bs, RAW_PL_COMMIT, perm, shared, NULL);
  2845. s->perm = perm;
  2846. s->shared_perm = shared;
  2847. }
  2848. static void raw_abort_perm_update(BlockDriverState *bs)
  2849. {
  2850. BDRVRawState *s = bs->opaque;
  2851. /* For reopen, .bdrv_reopen_abort is called afterwards and will close
  2852. * the file descriptor. */
  2853. if (s->perm_change_fd) {
  2854. qemu_close(s->perm_change_fd);
  2855. }
  2856. s->perm_change_fd = 0;
  2857. raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
  2858. }
  2859. static int coroutine_fn raw_co_copy_range_from(
  2860. BlockDriverState *bs, BdrvChild *src, int64_t src_offset,
  2861. BdrvChild *dst, int64_t dst_offset, int64_t bytes,
  2862. BdrvRequestFlags read_flags, BdrvRequestFlags write_flags)
  2863. {
  2864. return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
  2865. read_flags, write_flags);
  2866. }
  2867. static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs,
  2868. BdrvChild *src,
  2869. int64_t src_offset,
  2870. BdrvChild *dst,
  2871. int64_t dst_offset,
  2872. int64_t bytes,
  2873. BdrvRequestFlags read_flags,
  2874. BdrvRequestFlags write_flags)
  2875. {
  2876. RawPosixAIOData acb;
  2877. BDRVRawState *s = bs->opaque;
  2878. BDRVRawState *src_s;
  2879. assert(dst->bs == bs);
  2880. if (src->bs->drv->bdrv_co_copy_range_to != raw_co_copy_range_to) {
  2881. return -ENOTSUP;
  2882. }
  2883. src_s = src->bs->opaque;
  2884. if (fd_open(src->bs) < 0 || fd_open(dst->bs) < 0) {
  2885. return -EIO;
  2886. }
  2887. acb = (RawPosixAIOData) {
  2888. .bs = bs,
  2889. .aio_type = QEMU_AIO_COPY_RANGE,
  2890. .aio_fildes = src_s->fd,
  2891. .aio_offset = src_offset,
  2892. .aio_nbytes = bytes,
  2893. .copy_range = {
  2894. .aio_fd2 = s->fd,
  2895. .aio_offset2 = dst_offset,
  2896. },
  2897. };
  2898. return raw_thread_pool_submit(bs, handle_aiocb_copy_range, &acb);
  2899. }
  2900. BlockDriver bdrv_file = {
  2901. .format_name = "file",
  2902. .protocol_name = "file",
  2903. .instance_size = sizeof(BDRVRawState),
  2904. .bdrv_needs_filename = true,
  2905. .bdrv_probe = NULL, /* no probe for protocols */
  2906. .bdrv_parse_filename = raw_parse_filename,
  2907. .bdrv_file_open = raw_open,
  2908. .bdrv_reopen_prepare = raw_reopen_prepare,
  2909. .bdrv_reopen_commit = raw_reopen_commit,
  2910. .bdrv_reopen_abort = raw_reopen_abort,
  2911. .bdrv_close = raw_close,
  2912. .bdrv_co_create = raw_co_create,
  2913. .bdrv_co_create_opts = raw_co_create_opts,
  2914. .bdrv_has_zero_init = bdrv_has_zero_init_1,
  2915. .bdrv_co_block_status = raw_co_block_status,
  2916. .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
  2917. .bdrv_co_pwrite_zeroes = raw_co_pwrite_zeroes,
  2918. .bdrv_co_delete_file = raw_co_delete_file,
  2919. .bdrv_co_preadv = raw_co_preadv,
  2920. .bdrv_co_pwritev = raw_co_pwritev,
  2921. .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
  2922. .bdrv_co_pdiscard = raw_co_pdiscard,
  2923. .bdrv_co_copy_range_from = raw_co_copy_range_from,
  2924. .bdrv_co_copy_range_to = raw_co_copy_range_to,
  2925. .bdrv_refresh_limits = raw_refresh_limits,
  2926. .bdrv_io_plug = raw_aio_plug,
  2927. .bdrv_io_unplug = raw_aio_unplug,
  2928. .bdrv_attach_aio_context = raw_aio_attach_aio_context,
  2929. .bdrv_co_truncate = raw_co_truncate,
  2930. .bdrv_getlength = raw_getlength,
  2931. .bdrv_get_info = raw_get_info,
  2932. .bdrv_get_allocated_file_size
  2933. = raw_get_allocated_file_size,
  2934. .bdrv_get_specific_stats = raw_get_specific_stats,
  2935. .bdrv_check_perm = raw_check_perm,
  2936. .bdrv_set_perm = raw_set_perm,
  2937. .bdrv_abort_perm_update = raw_abort_perm_update,
  2938. .create_opts = &raw_create_opts,
  2939. .mutable_opts = mutable_opts,
  2940. };
  2941. /***********************************************/
  2942. /* host device */
  2943. #if defined(HAVE_HOST_BLOCK_DEVICE)
  2944. #if defined(__APPLE__) && defined(__MACH__)
  2945. static kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath,
  2946. CFIndex maxPathSize, int flags);
  2947. #if !defined(MAC_OS_VERSION_12_0) \
  2948. || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0)
  2949. #define IOMainPort IOMasterPort
  2950. #endif
  2951. static char *FindEjectableOpticalMedia(io_iterator_t *mediaIterator)
  2952. {
  2953. kern_return_t kernResult = KERN_FAILURE;
  2954. mach_port_t mainPort;
  2955. CFMutableDictionaryRef classesToMatch;
  2956. const char *matching_array[] = {kIODVDMediaClass, kIOCDMediaClass};
  2957. char *mediaType = NULL;
  2958. kernResult = IOMainPort(MACH_PORT_NULL, &mainPort);
  2959. if ( KERN_SUCCESS != kernResult ) {
  2960. printf("IOMainPort returned %d\n", kernResult);
  2961. }
  2962. int index;
  2963. for (index = 0; index < ARRAY_SIZE(matching_array); index++) {
  2964. classesToMatch = IOServiceMatching(matching_array[index]);
  2965. if (classesToMatch == NULL) {
  2966. error_report("IOServiceMatching returned NULL for %s",
  2967. matching_array[index]);
  2968. continue;
  2969. }
  2970. CFDictionarySetValue(classesToMatch, CFSTR(kIOMediaEjectableKey),
  2971. kCFBooleanTrue);
  2972. kernResult = IOServiceGetMatchingServices(mainPort, classesToMatch,
  2973. mediaIterator);
  2974. if (kernResult != KERN_SUCCESS) {
  2975. error_report("Note: IOServiceGetMatchingServices returned %d",
  2976. kernResult);
  2977. continue;
  2978. }
  2979. /* If a match was found, leave the loop */
  2980. if (*mediaIterator != 0) {
  2981. trace_file_FindEjectableOpticalMedia(matching_array[index]);
  2982. mediaType = g_strdup(matching_array[index]);
  2983. break;
  2984. }
  2985. }
  2986. return mediaType;
  2987. }
  2988. kern_return_t GetBSDPath(io_iterator_t mediaIterator, char *bsdPath,
  2989. CFIndex maxPathSize, int flags)
  2990. {
  2991. io_object_t nextMedia;
  2992. kern_return_t kernResult = KERN_FAILURE;
  2993. *bsdPath = '\0';
  2994. nextMedia = IOIteratorNext( mediaIterator );
  2995. if ( nextMedia )
  2996. {
  2997. CFTypeRef bsdPathAsCFString;
  2998. bsdPathAsCFString = IORegistryEntryCreateCFProperty( nextMedia, CFSTR( kIOBSDNameKey ), kCFAllocatorDefault, 0 );
  2999. if ( bsdPathAsCFString ) {
  3000. size_t devPathLength;
  3001. strcpy( bsdPath, _PATH_DEV );
  3002. if (flags & BDRV_O_NOCACHE) {
  3003. strcat(bsdPath, "r");
  3004. }
  3005. devPathLength = strlen( bsdPath );
  3006. if ( CFStringGetCString( bsdPathAsCFString, bsdPath + devPathLength, maxPathSize - devPathLength, kCFStringEncodingASCII ) ) {
  3007. kernResult = KERN_SUCCESS;
  3008. }
  3009. CFRelease( bsdPathAsCFString );
  3010. }
  3011. IOObjectRelease( nextMedia );
  3012. }
  3013. return kernResult;
  3014. }
  3015. /* Sets up a real cdrom for use in QEMU */
  3016. static bool setup_cdrom(char *bsd_path, Error **errp)
  3017. {
  3018. int index, num_of_test_partitions = 2, fd;
  3019. char test_partition[MAXPATHLEN];
  3020. bool partition_found = false;
  3021. /* look for a working partition */
  3022. for (index = 0; index < num_of_test_partitions; index++) {
  3023. snprintf(test_partition, sizeof(test_partition), "%ss%d", bsd_path,
  3024. index);
  3025. fd = qemu_open(test_partition, O_RDONLY | O_BINARY | O_LARGEFILE, NULL);
  3026. if (fd >= 0) {
  3027. partition_found = true;
  3028. qemu_close(fd);
  3029. break;
  3030. }
  3031. }
  3032. /* if a working partition on the device was not found */
  3033. if (partition_found == false) {
  3034. error_setg(errp, "Failed to find a working partition on disc");
  3035. } else {
  3036. trace_file_setup_cdrom(test_partition);
  3037. pstrcpy(bsd_path, MAXPATHLEN, test_partition);
  3038. }
  3039. return partition_found;
  3040. }
  3041. /* Prints directions on mounting and unmounting a device */
  3042. static void print_unmounting_directions(const char *file_name)
  3043. {
  3044. error_report("If device %s is mounted on the desktop, unmount"
  3045. " it first before using it in QEMU", file_name);
  3046. error_report("Command to unmount device: diskutil unmountDisk %s",
  3047. file_name);
  3048. error_report("Command to mount device: diskutil mountDisk %s", file_name);
  3049. }
  3050. #endif /* defined(__APPLE__) && defined(__MACH__) */
  3051. static int hdev_probe_device(const char *filename)
  3052. {
  3053. struct stat st;
  3054. /* allow a dedicated CD-ROM driver to match with a higher priority */
  3055. if (strstart(filename, "/dev/cdrom", NULL))
  3056. return 50;
  3057. if (stat(filename, &st) >= 0 &&
  3058. (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
  3059. return 100;
  3060. }
  3061. return 0;
  3062. }
  3063. static void hdev_parse_filename(const char *filename, QDict *options,
  3064. Error **errp)
  3065. {
  3066. bdrv_parse_filename_strip_prefix(filename, "host_device:", options);
  3067. }
  3068. static bool hdev_is_sg(BlockDriverState *bs)
  3069. {
  3070. #if defined(__linux__)
  3071. BDRVRawState *s = bs->opaque;
  3072. struct stat st;
  3073. struct sg_scsi_id scsiid;
  3074. int sg_version;
  3075. int ret;
  3076. if (stat(bs->filename, &st) < 0 || !S_ISCHR(st.st_mode)) {
  3077. return false;
  3078. }
  3079. ret = ioctl(s->fd, SG_GET_VERSION_NUM, &sg_version);
  3080. if (ret < 0) {
  3081. return false;
  3082. }
  3083. ret = ioctl(s->fd, SG_GET_SCSI_ID, &scsiid);
  3084. if (ret >= 0) {
  3085. trace_file_hdev_is_sg(scsiid.scsi_type, sg_version);
  3086. return true;
  3087. }
  3088. #endif
  3089. return false;
  3090. }
  3091. static int hdev_open(BlockDriverState *bs, QDict *options, int flags,
  3092. Error **errp)
  3093. {
  3094. BDRVRawState *s = bs->opaque;
  3095. int ret;
  3096. #if defined(__APPLE__) && defined(__MACH__)
  3097. /*
  3098. * Caution: while qdict_get_str() is fine, getting non-string types
  3099. * would require more care. When @options come from -blockdev or
  3100. * blockdev_add, its members are typed according to the QAPI
  3101. * schema, but when they come from -drive, they're all QString.
  3102. */
  3103. const char *filename = qdict_get_str(options, "filename");
  3104. char bsd_path[MAXPATHLEN] = "";
  3105. bool error_occurred = false;
  3106. /* If using a real cdrom */
  3107. if (strcmp(filename, "/dev/cdrom") == 0) {
  3108. char *mediaType = NULL;
  3109. kern_return_t ret_val;
  3110. io_iterator_t mediaIterator = 0;
  3111. mediaType = FindEjectableOpticalMedia(&mediaIterator);
  3112. if (mediaType == NULL) {
  3113. error_setg(errp, "Please make sure your CD/DVD is in the optical"
  3114. " drive");
  3115. error_occurred = true;
  3116. goto hdev_open_Mac_error;
  3117. }
  3118. ret_val = GetBSDPath(mediaIterator, bsd_path, sizeof(bsd_path), flags);
  3119. if (ret_val != KERN_SUCCESS) {
  3120. error_setg(errp, "Could not get BSD path for optical drive");
  3121. error_occurred = true;
  3122. goto hdev_open_Mac_error;
  3123. }
  3124. /* If a real optical drive was not found */
  3125. if (bsd_path[0] == '\0') {
  3126. error_setg(errp, "Failed to obtain bsd path for optical drive");
  3127. error_occurred = true;
  3128. goto hdev_open_Mac_error;
  3129. }
  3130. /* If using a cdrom disc and finding a partition on the disc failed */
  3131. if (strncmp(mediaType, kIOCDMediaClass, 9) == 0 &&
  3132. setup_cdrom(bsd_path, errp) == false) {
  3133. print_unmounting_directions(bsd_path);
  3134. error_occurred = true;
  3135. goto hdev_open_Mac_error;
  3136. }
  3137. qdict_put_str(options, "filename", bsd_path);
  3138. hdev_open_Mac_error:
  3139. g_free(mediaType);
  3140. if (mediaIterator) {
  3141. IOObjectRelease(mediaIterator);
  3142. }
  3143. if (error_occurred) {
  3144. return -ENOENT;
  3145. }
  3146. }
  3147. #endif /* defined(__APPLE__) && defined(__MACH__) */
  3148. s->type = FTYPE_FILE;
  3149. ret = raw_open_common(bs, options, flags, 0, true, errp);
  3150. if (ret < 0) {
  3151. #if defined(__APPLE__) && defined(__MACH__)
  3152. if (*bsd_path) {
  3153. filename = bsd_path;
  3154. }
  3155. /* if a physical device experienced an error while being opened */
  3156. if (strncmp(filename, "/dev/", 5) == 0) {
  3157. print_unmounting_directions(filename);
  3158. }
  3159. #endif /* defined(__APPLE__) && defined(__MACH__) */
  3160. return ret;
  3161. }
  3162. /* Since this does ioctl the device must be already opened */
  3163. bs->sg = hdev_is_sg(bs);
  3164. return ret;
  3165. }
  3166. #if defined(__linux__)
  3167. static int coroutine_fn
  3168. hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
  3169. {
  3170. BDRVRawState *s = bs->opaque;
  3171. RawPosixAIOData acb;
  3172. int ret;
  3173. ret = fd_open(bs);
  3174. if (ret < 0) {
  3175. return ret;
  3176. }
  3177. if (req == SG_IO && s->pr_mgr) {
  3178. struct sg_io_hdr *io_hdr = buf;
  3179. if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT ||
  3180. io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) {
  3181. return pr_manager_execute(s->pr_mgr, bdrv_get_aio_context(bs),
  3182. s->fd, io_hdr);
  3183. }
  3184. }
  3185. acb = (RawPosixAIOData) {
  3186. .bs = bs,
  3187. .aio_type = QEMU_AIO_IOCTL,
  3188. .aio_fildes = s->fd,
  3189. .aio_offset = 0,
  3190. .ioctl = {
  3191. .buf = buf,
  3192. .cmd = req,
  3193. },
  3194. };
  3195. return raw_thread_pool_submit(bs, handle_aiocb_ioctl, &acb);
  3196. }
  3197. #endif /* linux */
  3198. static coroutine_fn int
  3199. hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
  3200. {
  3201. BDRVRawState *s = bs->opaque;
  3202. int ret;
  3203. ret = fd_open(bs);
  3204. if (ret < 0) {
  3205. raw_account_discard(s, bytes, ret);
  3206. return ret;
  3207. }
  3208. return raw_do_pdiscard(bs, offset, bytes, true);
  3209. }
  3210. static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs,
  3211. int64_t offset, int64_t bytes, BdrvRequestFlags flags)
  3212. {
  3213. int rc;
  3214. rc = fd_open(bs);
  3215. if (rc < 0) {
  3216. return rc;
  3217. }
  3218. return raw_do_pwrite_zeroes(bs, offset, bytes, flags, true);
  3219. }
  3220. static BlockDriver bdrv_host_device = {
  3221. .format_name = "host_device",
  3222. .protocol_name = "host_device",
  3223. .instance_size = sizeof(BDRVRawState),
  3224. .bdrv_needs_filename = true,
  3225. .bdrv_probe_device = hdev_probe_device,
  3226. .bdrv_parse_filename = hdev_parse_filename,
  3227. .bdrv_file_open = hdev_open,
  3228. .bdrv_close = raw_close,
  3229. .bdrv_reopen_prepare = raw_reopen_prepare,
  3230. .bdrv_reopen_commit = raw_reopen_commit,
  3231. .bdrv_reopen_abort = raw_reopen_abort,
  3232. .bdrv_co_create_opts = bdrv_co_create_opts_simple,
  3233. .create_opts = &bdrv_create_opts_simple,
  3234. .mutable_opts = mutable_opts,
  3235. .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
  3236. .bdrv_co_pwrite_zeroes = hdev_co_pwrite_zeroes,
  3237. .bdrv_co_preadv = raw_co_preadv,
  3238. .bdrv_co_pwritev = raw_co_pwritev,
  3239. .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
  3240. .bdrv_co_pdiscard = hdev_co_pdiscard,
  3241. .bdrv_co_copy_range_from = raw_co_copy_range_from,
  3242. .bdrv_co_copy_range_to = raw_co_copy_range_to,
  3243. .bdrv_refresh_limits = raw_refresh_limits,
  3244. .bdrv_io_plug = raw_aio_plug,
  3245. .bdrv_io_unplug = raw_aio_unplug,
  3246. .bdrv_attach_aio_context = raw_aio_attach_aio_context,
  3247. .bdrv_co_truncate = raw_co_truncate,
  3248. .bdrv_getlength = raw_getlength,
  3249. .bdrv_get_info = raw_get_info,
  3250. .bdrv_get_allocated_file_size
  3251. = raw_get_allocated_file_size,
  3252. .bdrv_get_specific_stats = hdev_get_specific_stats,
  3253. .bdrv_check_perm = raw_check_perm,
  3254. .bdrv_set_perm = raw_set_perm,
  3255. .bdrv_abort_perm_update = raw_abort_perm_update,
  3256. .bdrv_probe_blocksizes = hdev_probe_blocksizes,
  3257. .bdrv_probe_geometry = hdev_probe_geometry,
  3258. /* generic scsi device */
  3259. #ifdef __linux__
  3260. .bdrv_co_ioctl = hdev_co_ioctl,
  3261. #endif
  3262. };
  3263. #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  3264. static void cdrom_parse_filename(const char *filename, QDict *options,
  3265. Error **errp)
  3266. {
  3267. bdrv_parse_filename_strip_prefix(filename, "host_cdrom:", options);
  3268. }
  3269. #endif
  3270. #ifdef __linux__
  3271. static int cdrom_open(BlockDriverState *bs, QDict *options, int flags,
  3272. Error **errp)
  3273. {
  3274. BDRVRawState *s = bs->opaque;
  3275. s->type = FTYPE_CD;
  3276. /* open will not fail even if no CD is inserted, so add O_NONBLOCK */
  3277. return raw_open_common(bs, options, flags, O_NONBLOCK, true, errp);
  3278. }
  3279. static int cdrom_probe_device(const char *filename)
  3280. {
  3281. int fd, ret;
  3282. int prio = 0;
  3283. struct stat st;
  3284. fd = qemu_open(filename, O_RDONLY | O_NONBLOCK, NULL);
  3285. if (fd < 0) {
  3286. goto out;
  3287. }
  3288. ret = fstat(fd, &st);
  3289. if (ret == -1 || !S_ISBLK(st.st_mode)) {
  3290. goto outc;
  3291. }
  3292. /* Attempt to detect via a CDROM specific ioctl */
  3293. ret = ioctl(fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
  3294. if (ret >= 0)
  3295. prio = 100;
  3296. outc:
  3297. qemu_close(fd);
  3298. out:
  3299. return prio;
  3300. }
  3301. static bool cdrom_is_inserted(BlockDriverState *bs)
  3302. {
  3303. BDRVRawState *s = bs->opaque;
  3304. int ret;
  3305. ret = ioctl(s->fd, CDROM_DRIVE_STATUS, CDSL_CURRENT);
  3306. return ret == CDS_DISC_OK;
  3307. }
  3308. static void cdrom_eject(BlockDriverState *bs, bool eject_flag)
  3309. {
  3310. BDRVRawState *s = bs->opaque;
  3311. if (eject_flag) {
  3312. if (ioctl(s->fd, CDROMEJECT, NULL) < 0)
  3313. perror("CDROMEJECT");
  3314. } else {
  3315. if (ioctl(s->fd, CDROMCLOSETRAY, NULL) < 0)
  3316. perror("CDROMEJECT");
  3317. }
  3318. }
  3319. static void cdrom_lock_medium(BlockDriverState *bs, bool locked)
  3320. {
  3321. BDRVRawState *s = bs->opaque;
  3322. if (ioctl(s->fd, CDROM_LOCKDOOR, locked) < 0) {
  3323. /*
  3324. * Note: an error can happen if the distribution automatically
  3325. * mounts the CD-ROM
  3326. */
  3327. /* perror("CDROM_LOCKDOOR"); */
  3328. }
  3329. }
  3330. static BlockDriver bdrv_host_cdrom = {
  3331. .format_name = "host_cdrom",
  3332. .protocol_name = "host_cdrom",
  3333. .instance_size = sizeof(BDRVRawState),
  3334. .bdrv_needs_filename = true,
  3335. .bdrv_probe_device = cdrom_probe_device,
  3336. .bdrv_parse_filename = cdrom_parse_filename,
  3337. .bdrv_file_open = cdrom_open,
  3338. .bdrv_close = raw_close,
  3339. .bdrv_reopen_prepare = raw_reopen_prepare,
  3340. .bdrv_reopen_commit = raw_reopen_commit,
  3341. .bdrv_reopen_abort = raw_reopen_abort,
  3342. .bdrv_co_create_opts = bdrv_co_create_opts_simple,
  3343. .create_opts = &bdrv_create_opts_simple,
  3344. .mutable_opts = mutable_opts,
  3345. .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
  3346. .bdrv_co_preadv = raw_co_preadv,
  3347. .bdrv_co_pwritev = raw_co_pwritev,
  3348. .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
  3349. .bdrv_refresh_limits = raw_refresh_limits,
  3350. .bdrv_io_plug = raw_aio_plug,
  3351. .bdrv_io_unplug = raw_aio_unplug,
  3352. .bdrv_attach_aio_context = raw_aio_attach_aio_context,
  3353. .bdrv_co_truncate = raw_co_truncate,
  3354. .bdrv_getlength = raw_getlength,
  3355. .has_variable_length = true,
  3356. .bdrv_get_allocated_file_size
  3357. = raw_get_allocated_file_size,
  3358. /* removable device support */
  3359. .bdrv_is_inserted = cdrom_is_inserted,
  3360. .bdrv_eject = cdrom_eject,
  3361. .bdrv_lock_medium = cdrom_lock_medium,
  3362. /* generic scsi device */
  3363. .bdrv_co_ioctl = hdev_co_ioctl,
  3364. };
  3365. #endif /* __linux__ */
  3366. #if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
  3367. static int cdrom_open(BlockDriverState *bs, QDict *options, int flags,
  3368. Error **errp)
  3369. {
  3370. BDRVRawState *s = bs->opaque;
  3371. int ret;
  3372. s->type = FTYPE_CD;
  3373. ret = raw_open_common(bs, options, flags, 0, true, errp);
  3374. if (ret) {
  3375. return ret;
  3376. }
  3377. /* make sure the door isn't locked at this time */
  3378. ioctl(s->fd, CDIOCALLOW);
  3379. return 0;
  3380. }
  3381. static int cdrom_probe_device(const char *filename)
  3382. {
  3383. if (strstart(filename, "/dev/cd", NULL) ||
  3384. strstart(filename, "/dev/acd", NULL))
  3385. return 100;
  3386. return 0;
  3387. }
  3388. static int cdrom_reopen(BlockDriverState *bs)
  3389. {
  3390. BDRVRawState *s = bs->opaque;
  3391. int fd;
  3392. /*
  3393. * Force reread of possibly changed/newly loaded disc,
  3394. * FreeBSD seems to not notice sometimes...
  3395. */
  3396. if (s->fd >= 0)
  3397. qemu_close(s->fd);
  3398. fd = qemu_open(bs->filename, s->open_flags, NULL);
  3399. if (fd < 0) {
  3400. s->fd = -1;
  3401. return -EIO;
  3402. }
  3403. s->fd = fd;
  3404. /* make sure the door isn't locked at this time */
  3405. ioctl(s->fd, CDIOCALLOW);
  3406. return 0;
  3407. }
  3408. static bool cdrom_is_inserted(BlockDriverState *bs)
  3409. {
  3410. return raw_getlength(bs) > 0;
  3411. }
  3412. static void cdrom_eject(BlockDriverState *bs, bool eject_flag)
  3413. {
  3414. BDRVRawState *s = bs->opaque;
  3415. if (s->fd < 0)
  3416. return;
  3417. (void) ioctl(s->fd, CDIOCALLOW);
  3418. if (eject_flag) {
  3419. if (ioctl(s->fd, CDIOCEJECT) < 0)
  3420. perror("CDIOCEJECT");
  3421. } else {
  3422. if (ioctl(s->fd, CDIOCCLOSE) < 0)
  3423. perror("CDIOCCLOSE");
  3424. }
  3425. cdrom_reopen(bs);
  3426. }
  3427. static void cdrom_lock_medium(BlockDriverState *bs, bool locked)
  3428. {
  3429. BDRVRawState *s = bs->opaque;
  3430. if (s->fd < 0)
  3431. return;
  3432. if (ioctl(s->fd, (locked ? CDIOCPREVENT : CDIOCALLOW)) < 0) {
  3433. /*
  3434. * Note: an error can happen if the distribution automatically
  3435. * mounts the CD-ROM
  3436. */
  3437. /* perror("CDROM_LOCKDOOR"); */
  3438. }
  3439. }
  3440. static BlockDriver bdrv_host_cdrom = {
  3441. .format_name = "host_cdrom",
  3442. .protocol_name = "host_cdrom",
  3443. .instance_size = sizeof(BDRVRawState),
  3444. .bdrv_needs_filename = true,
  3445. .bdrv_probe_device = cdrom_probe_device,
  3446. .bdrv_parse_filename = cdrom_parse_filename,
  3447. .bdrv_file_open = cdrom_open,
  3448. .bdrv_close = raw_close,
  3449. .bdrv_reopen_prepare = raw_reopen_prepare,
  3450. .bdrv_reopen_commit = raw_reopen_commit,
  3451. .bdrv_reopen_abort = raw_reopen_abort,
  3452. .bdrv_co_create_opts = bdrv_co_create_opts_simple,
  3453. .create_opts = &bdrv_create_opts_simple,
  3454. .mutable_opts = mutable_opts,
  3455. .bdrv_co_preadv = raw_co_preadv,
  3456. .bdrv_co_pwritev = raw_co_pwritev,
  3457. .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
  3458. .bdrv_refresh_limits = raw_refresh_limits,
  3459. .bdrv_io_plug = raw_aio_plug,
  3460. .bdrv_io_unplug = raw_aio_unplug,
  3461. .bdrv_attach_aio_context = raw_aio_attach_aio_context,
  3462. .bdrv_co_truncate = raw_co_truncate,
  3463. .bdrv_getlength = raw_getlength,
  3464. .has_variable_length = true,
  3465. .bdrv_get_allocated_file_size
  3466. = raw_get_allocated_file_size,
  3467. /* removable device support */
  3468. .bdrv_is_inserted = cdrom_is_inserted,
  3469. .bdrv_eject = cdrom_eject,
  3470. .bdrv_lock_medium = cdrom_lock_medium,
  3471. };
  3472. #endif /* __FreeBSD__ */
  3473. #endif /* HAVE_HOST_BLOCK_DEVICE */
  3474. static void bdrv_file_init(void)
  3475. {
  3476. /*
  3477. * Register all the drivers. Note that order is important, the driver
  3478. * registered last will get probed first.
  3479. */
  3480. bdrv_register(&bdrv_file);
  3481. #if defined(HAVE_HOST_BLOCK_DEVICE)
  3482. bdrv_register(&bdrv_host_device);
  3483. #ifdef __linux__
  3484. bdrv_register(&bdrv_host_cdrom);
  3485. #endif
  3486. #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  3487. bdrv_register(&bdrv_host_cdrom);
  3488. #endif
  3489. #endif /* HAVE_HOST_BLOCK_DEVICE */
  3490. }
  3491. block_init(bdrv_file_init);