nbd.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233
  1. /*
  2. * QEMU Block driver for NBD
  3. *
  4. * Copyright (c) 2019 Virtuozzo International GmbH.
  5. * Copyright Red Hat
  6. * Copyright (C) 2008 Bull S.A.S.
  7. * Author: Laurent Vivier <Laurent.Vivier@bull.net>
  8. *
  9. * Some parts:
  10. * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
  11. *
  12. * Permission is hereby granted, free of charge, to any person obtaining a copy
  13. * of this software and associated documentation files (the "Software"), to deal
  14. * in the Software without restriction, including without limitation the rights
  15. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  16. * copies of the Software, and to permit persons to whom the Software is
  17. * furnished to do so, subject to the following conditions:
  18. *
  19. * The above copyright notice and this permission notice shall be included in
  20. * all copies or substantial portions of the Software.
  21. *
  22. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  23. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  24. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  25. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  26. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  27. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  28. * THE SOFTWARE.
  29. */
  30. #include "qemu/osdep.h"
  31. #include "trace.h"
  32. #include "qemu/option.h"
  33. #include "qemu/cutils.h"
  34. #include "qemu/main-loop.h"
  35. #include "qapi/qapi-visit-sockets.h"
  36. #include "qobject/qstring.h"
  37. #include "qapi/clone-visitor.h"
  38. #include "block/qdict.h"
  39. #include "block/nbd.h"
  40. #include "block/block_int.h"
  41. #include "block/coroutines.h"
  42. #include "qemu/yank.h"
  43. #define EN_OPTSTR ":exportname="
  44. #define MAX_NBD_REQUESTS 16
  45. #define COOKIE_TO_INDEX(cookie) ((cookie) - 1)
  46. #define INDEX_TO_COOKIE(index) ((index) + 1)
  47. typedef struct {
  48. Coroutine *coroutine;
  49. uint64_t offset; /* original offset of the request */
  50. bool receiving; /* sleeping in the yield in nbd_receive_replies */
  51. } NBDClientRequest;
  52. typedef enum NBDClientState {
  53. NBD_CLIENT_CONNECTING_WAIT,
  54. NBD_CLIENT_CONNECTING_NOWAIT,
  55. NBD_CLIENT_CONNECTED,
  56. NBD_CLIENT_QUIT
  57. } NBDClientState;
  58. typedef struct BDRVNBDState {
  59. QIOChannel *ioc; /* The current I/O channel */
  60. NBDExportInfo info;
  61. /*
  62. * Protects state, free_sema, in_flight, requests[].coroutine,
  63. * reconnect_delay_timer.
  64. */
  65. QemuMutex requests_lock;
  66. NBDClientState state;
  67. CoQueue free_sema;
  68. unsigned in_flight;
  69. NBDClientRequest requests[MAX_NBD_REQUESTS];
  70. QEMUTimer *reconnect_delay_timer;
  71. /* Protects sending data on the socket. */
  72. CoMutex send_mutex;
  73. /*
  74. * Protects receiving reply headers from the socket, as well as the
  75. * fields reply and requests[].receiving
  76. */
  77. CoMutex receive_mutex;
  78. NBDReply reply;
  79. QEMUTimer *open_timer;
  80. BlockDriverState *bs;
  81. /* Connection parameters */
  82. uint32_t reconnect_delay;
  83. uint32_t open_timeout;
  84. SocketAddress *saddr;
  85. char *export;
  86. char *tlscredsid;
  87. QCryptoTLSCreds *tlscreds;
  88. char *tlshostname;
  89. char *x_dirty_bitmap;
  90. bool alloc_depth;
  91. NBDClientConnection *conn;
  92. } BDRVNBDState;
  93. static void nbd_yank(void *opaque);
  94. static void nbd_clear_bdrvstate(BlockDriverState *bs)
  95. {
  96. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  97. nbd_client_connection_release(s->conn);
  98. s->conn = NULL;
  99. yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name));
  100. /* Must not leave timers behind that would access freed data */
  101. assert(!s->reconnect_delay_timer);
  102. assert(!s->open_timer);
  103. object_unref(OBJECT(s->tlscreds));
  104. qapi_free_SocketAddress(s->saddr);
  105. s->saddr = NULL;
  106. g_free(s->export);
  107. s->export = NULL;
  108. g_free(s->tlscredsid);
  109. s->tlscredsid = NULL;
  110. g_free(s->tlshostname);
  111. s->tlshostname = NULL;
  112. g_free(s->x_dirty_bitmap);
  113. s->x_dirty_bitmap = NULL;
  114. }
  115. /* Called with s->receive_mutex taken. */
  116. static bool coroutine_fn nbd_recv_coroutine_wake_one(NBDClientRequest *req)
  117. {
  118. if (req->receiving) {
  119. req->receiving = false;
  120. aio_co_wake(req->coroutine);
  121. return true;
  122. }
  123. return false;
  124. }
  125. static void coroutine_fn nbd_recv_coroutines_wake(BDRVNBDState *s)
  126. {
  127. int i;
  128. QEMU_LOCK_GUARD(&s->receive_mutex);
  129. for (i = 0; i < MAX_NBD_REQUESTS; i++) {
  130. if (nbd_recv_coroutine_wake_one(&s->requests[i])) {
  131. return;
  132. }
  133. }
  134. }
  135. /* Called with s->requests_lock held. */
  136. static void coroutine_fn nbd_channel_error_locked(BDRVNBDState *s, int ret)
  137. {
  138. if (s->state == NBD_CLIENT_CONNECTED) {
  139. qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
  140. }
  141. if (ret == -EIO) {
  142. if (s->state == NBD_CLIENT_CONNECTED) {
  143. s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
  144. NBD_CLIENT_CONNECTING_NOWAIT;
  145. }
  146. } else {
  147. s->state = NBD_CLIENT_QUIT;
  148. }
  149. }
  150. static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret)
  151. {
  152. QEMU_LOCK_GUARD(&s->requests_lock);
  153. nbd_channel_error_locked(s, ret);
  154. }
  155. static void reconnect_delay_timer_del(BDRVNBDState *s)
  156. {
  157. if (s->reconnect_delay_timer) {
  158. timer_free(s->reconnect_delay_timer);
  159. s->reconnect_delay_timer = NULL;
  160. }
  161. }
  162. static void reconnect_delay_timer_cb(void *opaque)
  163. {
  164. BDRVNBDState *s = opaque;
  165. reconnect_delay_timer_del(s);
  166. WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
  167. if (s->state != NBD_CLIENT_CONNECTING_WAIT) {
  168. return;
  169. }
  170. s->state = NBD_CLIENT_CONNECTING_NOWAIT;
  171. }
  172. nbd_co_establish_connection_cancel(s->conn);
  173. }
  174. static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
  175. {
  176. assert(!s->reconnect_delay_timer);
  177. s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
  178. QEMU_CLOCK_REALTIME,
  179. SCALE_NS,
  180. reconnect_delay_timer_cb, s);
  181. timer_mod(s->reconnect_delay_timer, expire_time_ns);
  182. }
  183. static void nbd_teardown_connection(BlockDriverState *bs)
  184. {
  185. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  186. assert(!s->in_flight);
  187. if (s->ioc) {
  188. qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
  189. yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
  190. nbd_yank, s->bs);
  191. object_unref(OBJECT(s->ioc));
  192. s->ioc = NULL;
  193. }
  194. WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
  195. s->state = NBD_CLIENT_QUIT;
  196. }
  197. }
  198. static void open_timer_del(BDRVNBDState *s)
  199. {
  200. if (s->open_timer) {
  201. timer_free(s->open_timer);
  202. s->open_timer = NULL;
  203. }
  204. }
  205. static void open_timer_cb(void *opaque)
  206. {
  207. BDRVNBDState *s = opaque;
  208. nbd_co_establish_connection_cancel(s->conn);
  209. open_timer_del(s);
  210. }
  211. static void open_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
  212. {
  213. assert(!s->open_timer);
  214. s->open_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
  215. QEMU_CLOCK_REALTIME,
  216. SCALE_NS,
  217. open_timer_cb, s);
  218. timer_mod(s->open_timer, expire_time_ns);
  219. }
  220. static bool nbd_client_will_reconnect(BDRVNBDState *s)
  221. {
  222. /*
  223. * Called only after a socket error, so this is not performance sensitive.
  224. */
  225. QEMU_LOCK_GUARD(&s->requests_lock);
  226. return s->state == NBD_CLIENT_CONNECTING_WAIT;
  227. }
  228. /*
  229. * Update @bs with information learned during a completed negotiation process.
  230. * Return failure if the server's advertised options are incompatible with the
  231. * client's needs.
  232. */
  233. static int coroutine_fn GRAPH_RDLOCK
  234. nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
  235. {
  236. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  237. int ret;
  238. if (s->x_dirty_bitmap) {
  239. if (!s->info.base_allocation) {
  240. error_setg(errp, "requested x-dirty-bitmap %s not found",
  241. s->x_dirty_bitmap);
  242. return -EINVAL;
  243. }
  244. if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) {
  245. s->alloc_depth = true;
  246. }
  247. }
  248. if (s->info.flags & NBD_FLAG_READ_ONLY) {
  249. ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
  250. if (ret < 0) {
  251. return ret;
  252. }
  253. }
  254. if (s->info.flags & NBD_FLAG_SEND_FUA) {
  255. bs->supported_write_flags = BDRV_REQ_FUA;
  256. bs->supported_zero_flags |= BDRV_REQ_FUA;
  257. }
  258. if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
  259. bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
  260. if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) {
  261. bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK;
  262. }
  263. }
  264. trace_nbd_client_handshake_success(s->export);
  265. return 0;
  266. }
  267. int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
  268. bool blocking, Error **errp)
  269. {
  270. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  271. int ret;
  272. IO_CODE();
  273. assert_bdrv_graph_readable();
  274. assert(!s->ioc);
  275. s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp);
  276. if (!s->ioc) {
  277. return -ECONNREFUSED;
  278. }
  279. yank_register_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), nbd_yank,
  280. bs);
  281. ret = nbd_handle_updated_info(s->bs, NULL);
  282. if (ret < 0) {
  283. /*
  284. * We have connected, but must fail for other reasons.
  285. * Send NBD_CMD_DISC as a courtesy to the server.
  286. */
  287. NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
  288. nbd_send_request(s->ioc, &request);
  289. yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
  290. nbd_yank, bs);
  291. object_unref(OBJECT(s->ioc));
  292. s->ioc = NULL;
  293. return ret;
  294. }
  295. qio_channel_set_blocking(s->ioc, false, NULL);
  296. qio_channel_set_follow_coroutine_ctx(s->ioc, true);
  297. /* successfully connected */
  298. WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
  299. s->state = NBD_CLIENT_CONNECTED;
  300. }
  301. return 0;
  302. }
  303. /* Called with s->requests_lock held. */
  304. static bool nbd_client_connecting(BDRVNBDState *s)
  305. {
  306. return s->state == NBD_CLIENT_CONNECTING_WAIT ||
  307. s->state == NBD_CLIENT_CONNECTING_NOWAIT;
  308. }
  309. /* Called with s->requests_lock taken. */
  310. static void coroutine_fn GRAPH_RDLOCK nbd_reconnect_attempt(BDRVNBDState *s)
  311. {
  312. int ret;
  313. bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT;
  314. /*
  315. * Now we are sure that nobody is accessing the channel, and no one will
  316. * try until we set the state to CONNECTED.
  317. */
  318. assert(nbd_client_connecting(s));
  319. assert(s->in_flight == 1);
  320. trace_nbd_reconnect_attempt(s->bs->in_flight);
  321. if (blocking && !s->reconnect_delay_timer) {
  322. /*
  323. * It's the first reconnect attempt after switching to
  324. * NBD_CLIENT_CONNECTING_WAIT
  325. */
  326. g_assert(s->reconnect_delay);
  327. reconnect_delay_timer_init(s,
  328. qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
  329. s->reconnect_delay * NANOSECONDS_PER_SECOND);
  330. }
  331. /* Finalize previous connection if any */
  332. if (s->ioc) {
  333. yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
  334. nbd_yank, s->bs);
  335. object_unref(OBJECT(s->ioc));
  336. s->ioc = NULL;
  337. }
  338. qemu_mutex_unlock(&s->requests_lock);
  339. ret = nbd_co_do_establish_connection(s->bs, blocking, NULL);
  340. trace_nbd_reconnect_attempt_result(ret, s->bs->in_flight);
  341. qemu_mutex_lock(&s->requests_lock);
  342. /*
  343. * The reconnect attempt is done (maybe successfully, maybe not), so
  344. * we no longer need this timer. Delete it so it will not outlive
  345. * this I/O request (so draining removes all timers).
  346. */
  347. reconnect_delay_timer_del(s);
  348. }
  349. static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t cookie,
  350. Error **errp)
  351. {
  352. int ret;
  353. uint64_t ind = COOKIE_TO_INDEX(cookie), ind2;
  354. QEMU_LOCK_GUARD(&s->receive_mutex);
  355. while (true) {
  356. if (s->reply.cookie == cookie) {
  357. /* We are done */
  358. return 0;
  359. }
  360. if (s->reply.cookie != 0) {
  361. /*
  362. * Some other request is being handled now. It should already be
  363. * woken by whoever set s->reply.cookie (or never wait in this
  364. * yield). So, we should not wake it here.
  365. */
  366. ind2 = COOKIE_TO_INDEX(s->reply.cookie);
  367. assert(!s->requests[ind2].receiving);
  368. s->requests[ind].receiving = true;
  369. qemu_co_mutex_unlock(&s->receive_mutex);
  370. qemu_coroutine_yield();
  371. /*
  372. * We may be woken for 2 reasons:
  373. * 1. From this function, executing in parallel coroutine, when our
  374. * cookie is received.
  375. * 2. From nbd_co_receive_one_chunk(), when previous request is
  376. * finished and s->reply.cookie set to 0.
  377. * Anyway, it's OK to lock the mutex and go to the next iteration.
  378. */
  379. qemu_co_mutex_lock(&s->receive_mutex);
  380. assert(!s->requests[ind].receiving);
  381. continue;
  382. }
  383. /* We are under mutex and cookie is 0. We have to do the dirty work. */
  384. assert(s->reply.cookie == 0);
  385. ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, s->info.mode, errp);
  386. if (ret == 0) {
  387. ret = -EIO;
  388. error_setg(errp, "server dropped connection");
  389. }
  390. if (ret < 0) {
  391. nbd_channel_error(s, ret);
  392. return ret;
  393. }
  394. if (nbd_reply_is_structured(&s->reply) &&
  395. s->info.mode < NBD_MODE_STRUCTURED) {
  396. nbd_channel_error(s, -EINVAL);
  397. error_setg(errp, "unexpected structured reply");
  398. return -EINVAL;
  399. }
  400. ind2 = COOKIE_TO_INDEX(s->reply.cookie);
  401. if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) {
  402. nbd_channel_error(s, -EINVAL);
  403. error_setg(errp, "unexpected cookie value");
  404. return -EINVAL;
  405. }
  406. if (s->reply.cookie == cookie) {
  407. /* We are done */
  408. return 0;
  409. }
  410. nbd_recv_coroutine_wake_one(&s->requests[ind2]);
  411. }
  412. }
  413. static int coroutine_fn GRAPH_RDLOCK
  414. nbd_co_send_request(BlockDriverState *bs, NBDRequest *request,
  415. QEMUIOVector *qiov)
  416. {
  417. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  418. int rc, i = -1;
  419. qemu_mutex_lock(&s->requests_lock);
  420. while (s->in_flight == MAX_NBD_REQUESTS ||
  421. (s->state != NBD_CLIENT_CONNECTED && s->in_flight > 0)) {
  422. qemu_co_queue_wait(&s->free_sema, &s->requests_lock);
  423. }
  424. s->in_flight++;
  425. if (s->state != NBD_CLIENT_CONNECTED) {
  426. if (nbd_client_connecting(s)) {
  427. nbd_reconnect_attempt(s);
  428. qemu_co_queue_restart_all(&s->free_sema);
  429. }
  430. if (s->state != NBD_CLIENT_CONNECTED) {
  431. rc = -EIO;
  432. goto err;
  433. }
  434. }
  435. for (i = 0; i < MAX_NBD_REQUESTS; i++) {
  436. if (s->requests[i].coroutine == NULL) {
  437. break;
  438. }
  439. }
  440. assert(i < MAX_NBD_REQUESTS);
  441. s->requests[i].coroutine = qemu_coroutine_self();
  442. s->requests[i].offset = request->from;
  443. s->requests[i].receiving = false;
  444. qemu_mutex_unlock(&s->requests_lock);
  445. qemu_co_mutex_lock(&s->send_mutex);
  446. request->cookie = INDEX_TO_COOKIE(i);
  447. request->mode = s->info.mode;
  448. assert(s->ioc);
  449. if (qiov) {
  450. qio_channel_set_cork(s->ioc, true);
  451. rc = nbd_send_request(s->ioc, request);
  452. if (rc >= 0 && qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
  453. NULL) < 0) {
  454. rc = -EIO;
  455. }
  456. qio_channel_set_cork(s->ioc, false);
  457. } else {
  458. rc = nbd_send_request(s->ioc, request);
  459. }
  460. qemu_co_mutex_unlock(&s->send_mutex);
  461. if (rc < 0) {
  462. qemu_mutex_lock(&s->requests_lock);
  463. err:
  464. nbd_channel_error_locked(s, rc);
  465. if (i != -1) {
  466. s->requests[i].coroutine = NULL;
  467. }
  468. s->in_flight--;
  469. qemu_co_queue_next(&s->free_sema);
  470. qemu_mutex_unlock(&s->requests_lock);
  471. }
  472. return rc;
  473. }
  474. static inline uint16_t payload_advance16(uint8_t **payload)
  475. {
  476. *payload += 2;
  477. return lduw_be_p(*payload - 2);
  478. }
  479. static inline uint32_t payload_advance32(uint8_t **payload)
  480. {
  481. *payload += 4;
  482. return ldl_be_p(*payload - 4);
  483. }
  484. static inline uint64_t payload_advance64(uint8_t **payload)
  485. {
  486. *payload += 8;
  487. return ldq_be_p(*payload - 8);
  488. }
  489. static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
  490. NBDStructuredReplyChunk *chunk,
  491. uint8_t *payload, uint64_t orig_offset,
  492. QEMUIOVector *qiov, Error **errp)
  493. {
  494. uint64_t offset;
  495. uint32_t hole_size;
  496. if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
  497. error_setg(errp, "Protocol error: invalid payload for "
  498. "NBD_REPLY_TYPE_OFFSET_HOLE");
  499. return -EINVAL;
  500. }
  501. offset = payload_advance64(&payload);
  502. hole_size = payload_advance32(&payload);
  503. if (!hole_size || offset < orig_offset || hole_size > qiov->size ||
  504. offset > orig_offset + qiov->size - hole_size) {
  505. error_setg(errp, "Protocol error: server sent chunk exceeding requested"
  506. " region");
  507. return -EINVAL;
  508. }
  509. if (s->info.min_block &&
  510. !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) {
  511. trace_nbd_structured_read_compliance("hole");
  512. }
  513. qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
  514. return 0;
  515. }
  516. /*
  517. * nbd_parse_blockstatus_payload
  518. * Based on our request, we expect only one extent in reply, for the
  519. * base:allocation context.
  520. */
  521. static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
  522. NBDStructuredReplyChunk *chunk,
  523. uint8_t *payload, bool wide,
  524. uint64_t orig_length,
  525. NBDExtent64 *extent, Error **errp)
  526. {
  527. uint32_t context_id;
  528. uint32_t count;
  529. size_t ext_len = wide ? sizeof(*extent) : sizeof(NBDExtent32);
  530. size_t pay_len = sizeof(context_id) + wide * sizeof(count) + ext_len;
  531. /* The server succeeded, so it must have sent [at least] one extent */
  532. if (chunk->length < pay_len) {
  533. error_setg(errp, "Protocol error: invalid payload for "
  534. "NBD_REPLY_TYPE_BLOCK_STATUS");
  535. return -EINVAL;
  536. }
  537. context_id = payload_advance32(&payload);
  538. if (s->info.context_id != context_id) {
  539. error_setg(errp, "Protocol error: unexpected context id %d for "
  540. "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
  541. "id is %d", context_id,
  542. s->info.context_id);
  543. return -EINVAL;
  544. }
  545. if (wide) {
  546. count = payload_advance32(&payload);
  547. extent->length = payload_advance64(&payload);
  548. extent->flags = payload_advance64(&payload);
  549. } else {
  550. count = 0;
  551. extent->length = payload_advance32(&payload);
  552. extent->flags = payload_advance32(&payload);
  553. }
  554. if (extent->length == 0) {
  555. error_setg(errp, "Protocol error: server sent status chunk with "
  556. "zero length");
  557. return -EINVAL;
  558. }
  559. /*
  560. * A server sending unaligned block status is in violation of the
  561. * protocol, but as qemu-nbd 3.1 is such a server (at least for
  562. * POSIX files that are not a multiple of 512 bytes, since qemu
  563. * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
  564. * still sees an implicit hole beyond the real EOF), it's nicer to
  565. * work around the misbehaving server. If the request included
  566. * more than the final unaligned block, truncate it back to an
  567. * aligned result; if the request was only the final block, round
  568. * up to the full block and change the status to fully-allocated
  569. * (always a safe status, even if it loses information).
  570. */
  571. if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length,
  572. s->info.min_block)) {
  573. trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
  574. if (extent->length > s->info.min_block) {
  575. extent->length = QEMU_ALIGN_DOWN(extent->length,
  576. s->info.min_block);
  577. } else {
  578. extent->length = s->info.min_block;
  579. extent->flags = 0;
  580. }
  581. }
  582. /*
  583. * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
  584. * sent us any more than one extent, nor should it have included
  585. * status beyond our request in that extent. Furthermore, a wide
  586. * server should have replied with an accurate count (we left
  587. * count at 0 for a narrow server). However, it's easy enough to
  588. * ignore the server's noncompliance without killing the
  589. * connection; just ignore trailing extents, and clamp things to
  590. * the length of our request.
  591. */
  592. if (count != wide || chunk->length > pay_len) {
  593. trace_nbd_parse_blockstatus_compliance("unexpected extent count");
  594. }
  595. if (extent->length > orig_length) {
  596. extent->length = orig_length;
  597. trace_nbd_parse_blockstatus_compliance("extent length too large");
  598. }
  599. /*
  600. * HACK: if we are using x-dirty-bitmaps to access
  601. * qemu:allocation-depth, treat all depths > 2 the same as 2,
  602. * since nbd_client_co_block_status is only expecting the low two
  603. * bits to be set.
  604. */
  605. if (s->alloc_depth && extent->flags > 2) {
  606. extent->flags = 2;
  607. }
  608. return 0;
  609. }
  610. /*
  611. * nbd_parse_error_payload
  612. * on success @errp contains message describing nbd error reply
  613. */
  614. static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
  615. uint8_t *payload, int *request_ret,
  616. Error **errp)
  617. {
  618. uint32_t error;
  619. uint16_t message_size;
  620. assert(chunk->type & (1 << 15));
  621. if (chunk->length < sizeof(error) + sizeof(message_size)) {
  622. error_setg(errp,
  623. "Protocol error: invalid payload for structured error");
  624. return -EINVAL;
  625. }
  626. error = nbd_errno_to_system_errno(payload_advance32(&payload));
  627. if (error == 0) {
  628. error_setg(errp, "Protocol error: server sent structured error chunk "
  629. "with error = 0");
  630. return -EINVAL;
  631. }
  632. *request_ret = -error;
  633. message_size = payload_advance16(&payload);
  634. if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
  635. error_setg(errp, "Protocol error: server sent structured error chunk "
  636. "with incorrect message size");
  637. return -EINVAL;
  638. }
  639. /* TODO: Add a trace point to mention the server complaint */
  640. /* TODO handle ERROR_OFFSET */
  641. return 0;
  642. }
  643. static int coroutine_fn
  644. nbd_co_receive_offset_data_payload(BDRVNBDState *s, uint64_t orig_offset,
  645. QEMUIOVector *qiov, Error **errp)
  646. {
  647. QEMUIOVector sub_qiov;
  648. uint64_t offset;
  649. size_t data_size;
  650. int ret;
  651. NBDStructuredReplyChunk *chunk = &s->reply.structured;
  652. assert(nbd_reply_is_structured(&s->reply));
  653. /* The NBD spec requires at least one byte of payload */
  654. if (chunk->length <= sizeof(offset)) {
  655. error_setg(errp, "Protocol error: invalid payload for "
  656. "NBD_REPLY_TYPE_OFFSET_DATA");
  657. return -EINVAL;
  658. }
  659. if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) {
  660. return -EIO;
  661. }
  662. data_size = chunk->length - sizeof(offset);
  663. assert(data_size);
  664. if (offset < orig_offset || data_size > qiov->size ||
  665. offset > orig_offset + qiov->size - data_size) {
  666. error_setg(errp, "Protocol error: server sent chunk exceeding requested"
  667. " region");
  668. return -EINVAL;
  669. }
  670. if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) {
  671. trace_nbd_structured_read_compliance("data");
  672. }
  673. qemu_iovec_init(&sub_qiov, qiov->niov);
  674. qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
  675. ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
  676. qemu_iovec_destroy(&sub_qiov);
  677. return ret < 0 ? -EIO : 0;
  678. }
  679. #define NBD_MAX_MALLOC_PAYLOAD 1000
  680. static coroutine_fn int nbd_co_receive_structured_payload(
  681. BDRVNBDState *s, void **payload, Error **errp)
  682. {
  683. int ret;
  684. uint32_t len;
  685. assert(nbd_reply_is_structured(&s->reply));
  686. len = s->reply.structured.length;
  687. if (len == 0) {
  688. return 0;
  689. }
  690. if (payload == NULL) {
  691. error_setg(errp, "Unexpected structured payload");
  692. return -EINVAL;
  693. }
  694. if (len > NBD_MAX_MALLOC_PAYLOAD) {
  695. error_setg(errp, "Payload too large");
  696. return -EINVAL;
  697. }
  698. *payload = g_new(char, len);
  699. ret = nbd_read(s->ioc, *payload, len, "structured payload", errp);
  700. if (ret < 0) {
  701. g_free(*payload);
  702. *payload = NULL;
  703. return ret;
  704. }
  705. return 0;
  706. }
  707. /*
  708. * nbd_co_do_receive_one_chunk
  709. * for simple reply:
  710. * set request_ret to received reply error
  711. * if qiov is not NULL: read payload to @qiov
  712. * for structured reply chunk:
  713. * if error chunk: read payload, set @request_ret, do not set @payload
  714. * else if offset_data chunk: read payload data to @qiov, do not set @payload
  715. * else: read payload to @payload
  716. *
  717. * If function fails, @errp contains corresponding error message, and the
  718. * connection with the server is suspect. If it returns 0, then the
  719. * transaction succeeded (although @request_ret may be a negative errno
  720. * corresponding to the server's error reply), and errp is unchanged.
  721. */
  722. static coroutine_fn int nbd_co_do_receive_one_chunk(
  723. BDRVNBDState *s, uint64_t cookie, bool only_structured,
  724. int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
  725. {
  726. ERRP_GUARD();
  727. int ret;
  728. int i = COOKIE_TO_INDEX(cookie);
  729. void *local_payload = NULL;
  730. NBDStructuredReplyChunk *chunk;
  731. if (payload) {
  732. *payload = NULL;
  733. }
  734. *request_ret = 0;
  735. ret = nbd_receive_replies(s, cookie, errp);
  736. if (ret < 0) {
  737. error_prepend(errp, "Connection closed: ");
  738. return -EIO;
  739. }
  740. assert(s->ioc);
  741. assert(s->reply.cookie == cookie);
  742. if (nbd_reply_is_simple(&s->reply)) {
  743. if (only_structured) {
  744. error_setg(errp, "Protocol error: simple reply when structured "
  745. "reply chunk was expected");
  746. return -EINVAL;
  747. }
  748. *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
  749. if (*request_ret < 0 || !qiov) {
  750. return 0;
  751. }
  752. return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
  753. errp) < 0 ? -EIO : 0;
  754. }
  755. /* handle structured reply chunk */
  756. assert(s->info.mode >= NBD_MODE_STRUCTURED);
  757. chunk = &s->reply.structured;
  758. if (chunk->type == NBD_REPLY_TYPE_NONE) {
  759. if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
  760. error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
  761. " NBD_REPLY_FLAG_DONE flag set");
  762. return -EINVAL;
  763. }
  764. if (chunk->length) {
  765. error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
  766. " nonzero length");
  767. return -EINVAL;
  768. }
  769. return 0;
  770. }
  771. if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
  772. if (!qiov) {
  773. error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
  774. return -EINVAL;
  775. }
  776. return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
  777. qiov, errp);
  778. }
  779. if (nbd_reply_type_is_error(chunk->type)) {
  780. payload = &local_payload;
  781. }
  782. ret = nbd_co_receive_structured_payload(s, payload, errp);
  783. if (ret < 0) {
  784. return ret;
  785. }
  786. if (nbd_reply_type_is_error(chunk->type)) {
  787. ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
  788. g_free(local_payload);
  789. return ret;
  790. }
  791. return 0;
  792. }
  793. /*
  794. * nbd_co_receive_one_chunk
  795. * Read reply, wake up connection_co and set s->quit if needed.
  796. * Return value is a fatal error code or normal nbd reply error code
  797. */
  798. static coroutine_fn int nbd_co_receive_one_chunk(
  799. BDRVNBDState *s, uint64_t cookie, bool only_structured,
  800. int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
  801. Error **errp)
  802. {
  803. int ret = nbd_co_do_receive_one_chunk(s, cookie, only_structured,
  804. request_ret, qiov, payload, errp);
  805. if (ret < 0) {
  806. memset(reply, 0, sizeof(*reply));
  807. nbd_channel_error(s, ret);
  808. } else {
  809. /* For assert at loop start in nbd_connection_entry */
  810. *reply = s->reply;
  811. }
  812. s->reply.cookie = 0;
  813. nbd_recv_coroutines_wake(s);
  814. return ret;
  815. }
  816. typedef struct NBDReplyChunkIter {
  817. int ret;
  818. int request_ret;
  819. Error *err;
  820. bool done, only_structured;
  821. } NBDReplyChunkIter;
  822. static void nbd_iter_channel_error(NBDReplyChunkIter *iter,
  823. int ret, Error **local_err)
  824. {
  825. assert(local_err && *local_err);
  826. assert(ret < 0);
  827. if (!iter->ret) {
  828. iter->ret = ret;
  829. error_propagate(&iter->err, *local_err);
  830. } else {
  831. error_free(*local_err);
  832. }
  833. *local_err = NULL;
  834. }
  835. static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
  836. {
  837. assert(ret < 0);
  838. if (!iter->request_ret) {
  839. iter->request_ret = ret;
  840. }
  841. }
  842. /*
  843. * NBD_FOREACH_REPLY_CHUNK
  844. * The pointer stored in @payload requires g_free() to free it.
  845. */
  846. #define NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, structured, \
  847. qiov, reply, payload) \
  848. for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
  849. nbd_reply_chunk_iter_receive(s, &iter, cookie, qiov, reply, payload);)
  850. /*
  851. * nbd_reply_chunk_iter_receive
  852. * The pointer stored in @payload requires g_free() to free it.
  853. */
  854. static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
  855. NBDReplyChunkIter *iter,
  856. uint64_t cookie,
  857. QEMUIOVector *qiov,
  858. NBDReply *reply,
  859. void **payload)
  860. {
  861. int ret, request_ret;
  862. NBDReply local_reply;
  863. NBDStructuredReplyChunk *chunk;
  864. Error *local_err = NULL;
  865. if (iter->done) {
  866. /* Previous iteration was last. */
  867. goto break_loop;
  868. }
  869. if (reply == NULL) {
  870. reply = &local_reply;
  871. }
  872. ret = nbd_co_receive_one_chunk(s, cookie, iter->only_structured,
  873. &request_ret, qiov, reply, payload,
  874. &local_err);
  875. if (ret < 0) {
  876. nbd_iter_channel_error(iter, ret, &local_err);
  877. } else if (request_ret < 0) {
  878. nbd_iter_request_error(iter, request_ret);
  879. }
  880. /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
  881. if (nbd_reply_is_simple(reply) || iter->ret < 0) {
  882. goto break_loop;
  883. }
  884. chunk = &reply->structured;
  885. iter->only_structured = true;
  886. if (chunk->type == NBD_REPLY_TYPE_NONE) {
  887. /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
  888. assert(chunk->flags & NBD_REPLY_FLAG_DONE);
  889. goto break_loop;
  890. }
  891. if (chunk->flags & NBD_REPLY_FLAG_DONE) {
  892. /* This iteration is last. */
  893. iter->done = true;
  894. }
  895. /* Execute the loop body */
  896. return true;
  897. break_loop:
  898. qemu_mutex_lock(&s->requests_lock);
  899. s->requests[COOKIE_TO_INDEX(cookie)].coroutine = NULL;
  900. s->in_flight--;
  901. qemu_co_queue_next(&s->free_sema);
  902. qemu_mutex_unlock(&s->requests_lock);
  903. return false;
  904. }
  905. static int coroutine_fn
  906. nbd_co_receive_return_code(BDRVNBDState *s, uint64_t cookie,
  907. int *request_ret, Error **errp)
  908. {
  909. NBDReplyChunkIter iter;
  910. NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, NULL, NULL) {
  911. /* nbd_reply_chunk_iter_receive does all the work */
  912. }
  913. error_propagate(errp, iter.err);
  914. *request_ret = iter.request_ret;
  915. return iter.ret;
  916. }
  917. static int coroutine_fn
  918. nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
  919. uint64_t offset, QEMUIOVector *qiov,
  920. int *request_ret, Error **errp)
  921. {
  922. NBDReplyChunkIter iter;
  923. NBDReply reply;
  924. void *payload = NULL;
  925. Error *local_err = NULL;
  926. NBD_FOREACH_REPLY_CHUNK(s, iter, cookie,
  927. s->info.mode >= NBD_MODE_STRUCTURED,
  928. qiov, &reply, &payload)
  929. {
  930. int ret;
  931. NBDStructuredReplyChunk *chunk = &reply.structured;
  932. assert(nbd_reply_is_structured(&reply));
  933. switch (chunk->type) {
  934. case NBD_REPLY_TYPE_OFFSET_DATA:
  935. /*
  936. * special cased in nbd_co_receive_one_chunk, data is already
  937. * in qiov
  938. */
  939. break;
  940. case NBD_REPLY_TYPE_OFFSET_HOLE:
  941. ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload,
  942. offset, qiov, &local_err);
  943. if (ret < 0) {
  944. nbd_channel_error(s, ret);
  945. nbd_iter_channel_error(&iter, ret, &local_err);
  946. }
  947. break;
  948. default:
  949. if (!nbd_reply_type_is_error(chunk->type)) {
  950. /* not allowed reply type */
  951. nbd_channel_error(s, -EINVAL);
  952. error_setg(&local_err,
  953. "Unexpected reply type: %d (%s) for CMD_READ",
  954. chunk->type, nbd_reply_type_lookup(chunk->type));
  955. nbd_iter_channel_error(&iter, -EINVAL, &local_err);
  956. }
  957. }
  958. g_free(payload);
  959. payload = NULL;
  960. }
  961. error_propagate(errp, iter.err);
  962. *request_ret = iter.request_ret;
  963. return iter.ret;
  964. }
  965. static int coroutine_fn
  966. nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
  967. uint64_t length, NBDExtent64 *extent,
  968. int *request_ret, Error **errp)
  969. {
  970. NBDReplyChunkIter iter;
  971. NBDReply reply;
  972. void *payload = NULL;
  973. Error *local_err = NULL;
  974. bool received = false;
  975. assert(!extent->length);
  976. NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, &reply, &payload) {
  977. int ret;
  978. NBDStructuredReplyChunk *chunk = &reply.structured;
  979. bool wide;
  980. assert(nbd_reply_is_structured(&reply));
  981. switch (chunk->type) {
  982. case NBD_REPLY_TYPE_BLOCK_STATUS_EXT:
  983. case NBD_REPLY_TYPE_BLOCK_STATUS:
  984. wide = chunk->type == NBD_REPLY_TYPE_BLOCK_STATUS_EXT;
  985. if ((s->info.mode >= NBD_MODE_EXTENDED) != wide) {
  986. trace_nbd_extended_headers_compliance("block_status");
  987. }
  988. if (received) {
  989. nbd_channel_error(s, -EINVAL);
  990. error_setg(&local_err, "Several BLOCK_STATUS chunks in reply");
  991. nbd_iter_channel_error(&iter, -EINVAL, &local_err);
  992. }
  993. received = true;
  994. ret = nbd_parse_blockstatus_payload(
  995. s, &reply.structured, payload, wide,
  996. length, extent, &local_err);
  997. if (ret < 0) {
  998. nbd_channel_error(s, ret);
  999. nbd_iter_channel_error(&iter, ret, &local_err);
  1000. }
  1001. break;
  1002. default:
  1003. if (!nbd_reply_type_is_error(chunk->type)) {
  1004. nbd_channel_error(s, -EINVAL);
  1005. error_setg(&local_err,
  1006. "Unexpected reply type: %d (%s) "
  1007. "for CMD_BLOCK_STATUS",
  1008. chunk->type, nbd_reply_type_lookup(chunk->type));
  1009. nbd_iter_channel_error(&iter, -EINVAL, &local_err);
  1010. }
  1011. }
  1012. g_free(payload);
  1013. payload = NULL;
  1014. }
  1015. if (!extent->length && !iter.request_ret) {
  1016. error_setg(&local_err, "Server did not reply with any status extents");
  1017. nbd_iter_channel_error(&iter, -EIO, &local_err);
  1018. }
  1019. error_propagate(errp, iter.err);
  1020. *request_ret = iter.request_ret;
  1021. return iter.ret;
  1022. }
  1023. static int coroutine_fn GRAPH_RDLOCK
  1024. nbd_co_request(BlockDriverState *bs, NBDRequest *request,
  1025. QEMUIOVector *write_qiov)
  1026. {
  1027. int ret, request_ret;
  1028. Error *local_err = NULL;
  1029. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1030. assert(request->type != NBD_CMD_READ);
  1031. if (write_qiov) {
  1032. assert(request->type == NBD_CMD_WRITE);
  1033. assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
  1034. } else {
  1035. assert(request->type != NBD_CMD_WRITE);
  1036. }
  1037. do {
  1038. ret = nbd_co_send_request(bs, request, write_qiov);
  1039. if (ret < 0) {
  1040. continue;
  1041. }
  1042. ret = nbd_co_receive_return_code(s, request->cookie,
  1043. &request_ret, &local_err);
  1044. if (local_err) {
  1045. trace_nbd_co_request_fail(request->from, request->len,
  1046. request->cookie, request->flags,
  1047. request->type,
  1048. nbd_cmd_lookup(request->type),
  1049. ret, error_get_pretty(local_err));
  1050. error_free(local_err);
  1051. local_err = NULL;
  1052. }
  1053. } while (ret < 0 && nbd_client_will_reconnect(s));
  1054. return ret ? ret : request_ret;
  1055. }
  1056. static int coroutine_fn GRAPH_RDLOCK
  1057. nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
  1058. QEMUIOVector *qiov, BdrvRequestFlags flags)
  1059. {
  1060. int ret, request_ret;
  1061. Error *local_err = NULL;
  1062. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1063. NBDRequest request = {
  1064. .type = NBD_CMD_READ,
  1065. .from = offset,
  1066. .len = bytes,
  1067. };
  1068. assert(bytes <= NBD_MAX_BUFFER_SIZE);
  1069. if (!bytes) {
  1070. return 0;
  1071. }
  1072. /*
  1073. * Work around the fact that the block layer doesn't do
  1074. * byte-accurate sizing yet - if the read exceeds the server's
  1075. * advertised size because the block layer rounded size up, then
  1076. * truncate the request to the server and tail-pad with zero.
  1077. */
  1078. if (offset >= s->info.size) {
  1079. assert(bytes < BDRV_SECTOR_SIZE);
  1080. qemu_iovec_memset(qiov, 0, 0, bytes);
  1081. return 0;
  1082. }
  1083. if (offset + bytes > s->info.size) {
  1084. uint64_t slop = offset + bytes - s->info.size;
  1085. assert(slop < BDRV_SECTOR_SIZE);
  1086. qemu_iovec_memset(qiov, bytes - slop, 0, slop);
  1087. request.len -= slop;
  1088. }
  1089. do {
  1090. ret = nbd_co_send_request(bs, &request, NULL);
  1091. if (ret < 0) {
  1092. continue;
  1093. }
  1094. ret = nbd_co_receive_cmdread_reply(s, request.cookie, offset, qiov,
  1095. &request_ret, &local_err);
  1096. if (local_err) {
  1097. trace_nbd_co_request_fail(request.from, request.len, request.cookie,
  1098. request.flags, request.type,
  1099. nbd_cmd_lookup(request.type),
  1100. ret, error_get_pretty(local_err));
  1101. error_free(local_err);
  1102. local_err = NULL;
  1103. }
  1104. } while (ret < 0 && nbd_client_will_reconnect(s));
  1105. return ret ? ret : request_ret;
  1106. }
  1107. static int coroutine_fn GRAPH_RDLOCK
  1108. nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
  1109. QEMUIOVector *qiov, BdrvRequestFlags flags)
  1110. {
  1111. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1112. NBDRequest request = {
  1113. .type = NBD_CMD_WRITE,
  1114. .from = offset,
  1115. .len = bytes,
  1116. };
  1117. assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
  1118. if (flags & BDRV_REQ_FUA) {
  1119. assert(s->info.flags & NBD_FLAG_SEND_FUA);
  1120. request.flags |= NBD_CMD_FLAG_FUA;
  1121. }
  1122. assert(bytes <= NBD_MAX_BUFFER_SIZE);
  1123. if (!bytes) {
  1124. return 0;
  1125. }
  1126. return nbd_co_request(bs, &request, qiov);
  1127. }
  1128. static int coroutine_fn GRAPH_RDLOCK
  1129. nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
  1130. BdrvRequestFlags flags)
  1131. {
  1132. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1133. NBDRequest request = {
  1134. .type = NBD_CMD_WRITE_ZEROES,
  1135. .from = offset,
  1136. .len = bytes,
  1137. };
  1138. /* rely on max_pwrite_zeroes */
  1139. assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
  1140. assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
  1141. if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
  1142. return -ENOTSUP;
  1143. }
  1144. if (flags & BDRV_REQ_FUA) {
  1145. assert(s->info.flags & NBD_FLAG_SEND_FUA);
  1146. request.flags |= NBD_CMD_FLAG_FUA;
  1147. }
  1148. if (!(flags & BDRV_REQ_MAY_UNMAP)) {
  1149. request.flags |= NBD_CMD_FLAG_NO_HOLE;
  1150. }
  1151. if (flags & BDRV_REQ_NO_FALLBACK) {
  1152. assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO);
  1153. request.flags |= NBD_CMD_FLAG_FAST_ZERO;
  1154. }
  1155. if (!bytes) {
  1156. return 0;
  1157. }
  1158. return nbd_co_request(bs, &request, NULL);
  1159. }
  1160. static int coroutine_fn GRAPH_RDLOCK nbd_client_co_flush(BlockDriverState *bs)
  1161. {
  1162. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1163. NBDRequest request = { .type = NBD_CMD_FLUSH };
  1164. if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) {
  1165. return 0;
  1166. }
  1167. request.from = 0;
  1168. request.len = 0;
  1169. return nbd_co_request(bs, &request, NULL);
  1170. }
  1171. static int coroutine_fn GRAPH_RDLOCK
  1172. nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
  1173. {
  1174. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1175. NBDRequest request = {
  1176. .type = NBD_CMD_TRIM,
  1177. .from = offset,
  1178. .len = bytes,
  1179. };
  1180. /* rely on max_pdiscard */
  1181. assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED);
  1182. assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
  1183. if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
  1184. return 0;
  1185. }
  1186. return nbd_co_request(bs, &request, NULL);
  1187. }
  1188. static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
  1189. BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
  1190. int64_t *pnum, int64_t *map, BlockDriverState **file)
  1191. {
  1192. int ret, request_ret;
  1193. NBDExtent64 extent = { 0 };
  1194. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1195. Error *local_err = NULL;
  1196. NBDRequest request = {
  1197. .type = NBD_CMD_BLOCK_STATUS,
  1198. .from = offset,
  1199. .len = MIN(bytes, s->info.size - offset),
  1200. .flags = NBD_CMD_FLAG_REQ_ONE,
  1201. };
  1202. if (!s->info.base_allocation) {
  1203. *pnum = bytes;
  1204. *map = offset;
  1205. *file = bs;
  1206. return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
  1207. }
  1208. if (s->info.mode < NBD_MODE_EXTENDED) {
  1209. request.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
  1210. request.len);
  1211. }
  1212. /*
  1213. * Work around the fact that the block layer doesn't do
  1214. * byte-accurate sizing yet - if the status request exceeds the
  1215. * server's advertised size because the block layer rounded size
  1216. * up, we truncated the request to the server (above), or are
  1217. * called on just the hole.
  1218. */
  1219. if (offset >= s->info.size) {
  1220. *pnum = bytes;
  1221. assert(bytes < BDRV_SECTOR_SIZE);
  1222. /* Intentionally don't report offset_valid for the hole */
  1223. return BDRV_BLOCK_ZERO;
  1224. }
  1225. if (s->info.min_block) {
  1226. assert(QEMU_IS_ALIGNED(request.len, s->info.min_block));
  1227. }
  1228. do {
  1229. ret = nbd_co_send_request(bs, &request, NULL);
  1230. if (ret < 0) {
  1231. continue;
  1232. }
  1233. ret = nbd_co_receive_blockstatus_reply(s, request.cookie, bytes,
  1234. &extent, &request_ret,
  1235. &local_err);
  1236. if (local_err) {
  1237. trace_nbd_co_request_fail(request.from, request.len, request.cookie,
  1238. request.flags, request.type,
  1239. nbd_cmd_lookup(request.type),
  1240. ret, error_get_pretty(local_err));
  1241. error_free(local_err);
  1242. local_err = NULL;
  1243. }
  1244. } while (ret < 0 && nbd_client_will_reconnect(s));
  1245. if (ret < 0 || request_ret < 0) {
  1246. return ret ? ret : request_ret;
  1247. }
  1248. assert(extent.length);
  1249. *pnum = extent.length;
  1250. *map = offset;
  1251. *file = bs;
  1252. return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) |
  1253. (extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) |
  1254. BDRV_BLOCK_OFFSET_VALID;
  1255. }
  1256. static int nbd_client_reopen_prepare(BDRVReopenState *state,
  1257. BlockReopenQueue *queue, Error **errp)
  1258. {
  1259. BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque;
  1260. if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) {
  1261. error_setg(errp, "Can't reopen read-only NBD mount as read/write");
  1262. return -EACCES;
  1263. }
  1264. return 0;
  1265. }
  1266. static void nbd_yank(void *opaque)
  1267. {
  1268. BlockDriverState *bs = opaque;
  1269. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1270. QEMU_LOCK_GUARD(&s->requests_lock);
  1271. qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
  1272. s->state = NBD_CLIENT_QUIT;
  1273. }
  1274. static void nbd_client_close(BlockDriverState *bs)
  1275. {
  1276. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1277. NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode };
  1278. if (s->ioc) {
  1279. nbd_send_request(s->ioc, &request);
  1280. }
  1281. nbd_teardown_connection(bs);
  1282. }
  1283. /*
  1284. * Parse nbd_open options
  1285. */
  1286. static int nbd_parse_uri(const char *filename, QDict *options)
  1287. {
  1288. g_autoptr(GUri) uri = g_uri_parse(filename, G_URI_FLAGS_NONE, NULL);
  1289. g_autoptr(GHashTable) qp = NULL;
  1290. const char *p;
  1291. int qp_n;
  1292. bool is_unix;
  1293. const char *uri_scheme, *uri_query, *uri_server;
  1294. int uri_port;
  1295. if (!uri) {
  1296. return -EINVAL;
  1297. }
  1298. /* transport */
  1299. uri_scheme = g_uri_get_scheme(uri);
  1300. if (!g_strcmp0(uri_scheme, "nbd")) {
  1301. is_unix = false;
  1302. } else if (!g_strcmp0(uri_scheme, "nbd+tcp")) {
  1303. is_unix = false;
  1304. } else if (!g_strcmp0(uri_scheme, "nbd+unix")) {
  1305. is_unix = true;
  1306. } else {
  1307. return -EINVAL;
  1308. }
  1309. p = g_uri_get_path(uri) ?: "";
  1310. if (p[0] == '/') {
  1311. p++;
  1312. }
  1313. if (p[0]) {
  1314. qdict_put_str(options, "export", p);
  1315. }
  1316. uri_query = g_uri_get_query(uri);
  1317. if (uri_query) {
  1318. qp = g_uri_parse_params(uri_query, -1, "&", G_URI_PARAMS_NONE, NULL);
  1319. if (!qp) {
  1320. return -EINVAL;
  1321. }
  1322. qp_n = g_hash_table_size(qp);
  1323. if (qp_n > 1 || (is_unix && !qp_n) || (!is_unix && qp_n)) {
  1324. return -EINVAL;
  1325. }
  1326. }
  1327. uri_server = g_uri_get_host(uri);
  1328. if (uri_server && !uri_server[0]) {
  1329. uri_server = NULL;
  1330. }
  1331. uri_port = g_uri_get_port(uri);
  1332. if (is_unix) {
  1333. /* nbd+unix:///export?socket=path */
  1334. const char *uri_socket = g_hash_table_lookup(qp, "socket");
  1335. if (uri_server || uri_port != -1 || !uri_socket) {
  1336. return -EINVAL;
  1337. }
  1338. qdict_put_str(options, "server.type", "unix");
  1339. qdict_put_str(options, "server.path", uri_socket);
  1340. } else {
  1341. char *port_str;
  1342. /* nbd[+tcp]://host[:port]/export */
  1343. if (!uri_server) {
  1344. return -EINVAL;
  1345. }
  1346. qdict_put_str(options, "server.type", "inet");
  1347. qdict_put_str(options, "server.host", uri_server);
  1348. port_str = g_strdup_printf("%d", uri_port > 0 ? uri_port
  1349. : NBD_DEFAULT_PORT);
  1350. qdict_put_str(options, "server.port", port_str);
  1351. g_free(port_str);
  1352. }
  1353. return 0;
  1354. }
  1355. static bool nbd_has_filename_options_conflict(QDict *options, Error **errp)
  1356. {
  1357. const QDictEntry *e;
  1358. for (e = qdict_first(options); e; e = qdict_next(options, e)) {
  1359. if (!strcmp(e->key, "host") ||
  1360. !strcmp(e->key, "port") ||
  1361. !strcmp(e->key, "path") ||
  1362. !strcmp(e->key, "export") ||
  1363. strstart(e->key, "server.", NULL))
  1364. {
  1365. error_setg(errp, "Option '%s' cannot be used with a file name",
  1366. e->key);
  1367. return true;
  1368. }
  1369. }
  1370. return false;
  1371. }
  1372. static void nbd_parse_filename(const char *filename, QDict *options,
  1373. Error **errp)
  1374. {
  1375. g_autofree char *file = NULL;
  1376. char *export_name;
  1377. const char *host_spec;
  1378. const char *unixpath;
  1379. if (nbd_has_filename_options_conflict(options, errp)) {
  1380. return;
  1381. }
  1382. if (strstr(filename, "://")) {
  1383. int ret = nbd_parse_uri(filename, options);
  1384. if (ret < 0) {
  1385. error_setg(errp, "No valid URL specified");
  1386. }
  1387. return;
  1388. }
  1389. file = g_strdup(filename);
  1390. export_name = strstr(file, EN_OPTSTR);
  1391. if (export_name) {
  1392. if (export_name[strlen(EN_OPTSTR)] == 0) {
  1393. return;
  1394. }
  1395. export_name[0] = 0; /* truncate 'file' */
  1396. export_name += strlen(EN_OPTSTR);
  1397. qdict_put_str(options, "export", export_name);
  1398. }
  1399. /* extract the host_spec - fail if it's not nbd:... */
  1400. if (!strstart(file, "nbd:", &host_spec)) {
  1401. error_setg(errp, "File name string for NBD must start with 'nbd:'");
  1402. return;
  1403. }
  1404. if (!*host_spec) {
  1405. return;
  1406. }
  1407. /* are we a UNIX or TCP socket? */
  1408. if (strstart(host_spec, "unix:", &unixpath)) {
  1409. qdict_put_str(options, "server.type", "unix");
  1410. qdict_put_str(options, "server.path", unixpath);
  1411. } else {
  1412. InetSocketAddress *addr = g_new(InetSocketAddress, 1);
  1413. if (inet_parse(addr, host_spec, errp)) {
  1414. goto out_inet;
  1415. }
  1416. qdict_put_str(options, "server.type", "inet");
  1417. qdict_put_str(options, "server.host", addr->host);
  1418. qdict_put_str(options, "server.port", addr->port);
  1419. out_inet:
  1420. qapi_free_InetSocketAddress(addr);
  1421. }
  1422. }
  1423. static bool nbd_process_legacy_socket_options(QDict *output_options,
  1424. QemuOpts *legacy_opts,
  1425. Error **errp)
  1426. {
  1427. const char *path = qemu_opt_get(legacy_opts, "path");
  1428. const char *host = qemu_opt_get(legacy_opts, "host");
  1429. const char *port = qemu_opt_get(legacy_opts, "port");
  1430. const QDictEntry *e;
  1431. if (!path && !host && !port) {
  1432. return true;
  1433. }
  1434. for (e = qdict_first(output_options); e; e = qdict_next(output_options, e))
  1435. {
  1436. if (strstart(e->key, "server.", NULL)) {
  1437. error_setg(errp, "Cannot use 'server' and path/host/port at the "
  1438. "same time");
  1439. return false;
  1440. }
  1441. }
  1442. if (path && host) {
  1443. error_setg(errp, "path and host may not be used at the same time");
  1444. return false;
  1445. } else if (path) {
  1446. if (port) {
  1447. error_setg(errp, "port may not be used without host");
  1448. return false;
  1449. }
  1450. qdict_put_str(output_options, "server.type", "unix");
  1451. qdict_put_str(output_options, "server.path", path);
  1452. } else if (host) {
  1453. qdict_put_str(output_options, "server.type", "inet");
  1454. qdict_put_str(output_options, "server.host", host);
  1455. qdict_put_str(output_options, "server.port",
  1456. port ?: stringify(NBD_DEFAULT_PORT));
  1457. }
  1458. return true;
  1459. }
  1460. static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options,
  1461. Error **errp)
  1462. {
  1463. SocketAddress *saddr = NULL;
  1464. QDict *addr = NULL;
  1465. Visitor *iv = NULL;
  1466. qdict_extract_subqdict(options, &addr, "server.");
  1467. if (!qdict_size(addr)) {
  1468. error_setg(errp, "NBD server address missing");
  1469. goto done;
  1470. }
  1471. iv = qobject_input_visitor_new_flat_confused(addr, errp);
  1472. if (!iv) {
  1473. goto done;
  1474. }
  1475. if (!visit_type_SocketAddress(iv, NULL, &saddr, errp)) {
  1476. goto done;
  1477. }
  1478. if (socket_address_parse_named_fd(saddr, errp) < 0) {
  1479. qapi_free_SocketAddress(saddr);
  1480. saddr = NULL;
  1481. goto done;
  1482. }
  1483. done:
  1484. qobject_unref(addr);
  1485. visit_free(iv);
  1486. return saddr;
  1487. }
  1488. static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
  1489. {
  1490. Object *obj;
  1491. QCryptoTLSCreds *creds;
  1492. obj = object_resolve_path_component(
  1493. object_get_objects_root(), id);
  1494. if (!obj) {
  1495. error_setg(errp, "No TLS credentials with id '%s'",
  1496. id);
  1497. return NULL;
  1498. }
  1499. creds = (QCryptoTLSCreds *)
  1500. object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
  1501. if (!creds) {
  1502. error_setg(errp, "Object with id '%s' is not TLS credentials",
  1503. id);
  1504. return NULL;
  1505. }
  1506. if (!qcrypto_tls_creds_check_endpoint(creds,
  1507. QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT,
  1508. errp)) {
  1509. return NULL;
  1510. }
  1511. object_ref(obj);
  1512. return creds;
  1513. }
  1514. static QemuOptsList nbd_runtime_opts = {
  1515. .name = "nbd",
  1516. .head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head),
  1517. .desc = {
  1518. {
  1519. .name = "host",
  1520. .type = QEMU_OPT_STRING,
  1521. .help = "TCP host to connect to",
  1522. },
  1523. {
  1524. .name = "port",
  1525. .type = QEMU_OPT_STRING,
  1526. .help = "TCP port to connect to",
  1527. },
  1528. {
  1529. .name = "path",
  1530. .type = QEMU_OPT_STRING,
  1531. .help = "Unix socket path to connect to",
  1532. },
  1533. {
  1534. .name = "export",
  1535. .type = QEMU_OPT_STRING,
  1536. .help = "Name of the NBD export to open",
  1537. },
  1538. {
  1539. .name = "tls-creds",
  1540. .type = QEMU_OPT_STRING,
  1541. .help = "ID of the TLS credentials to use",
  1542. },
  1543. {
  1544. .name = "tls-hostname",
  1545. .type = QEMU_OPT_STRING,
  1546. .help = "Override hostname for validating TLS x509 certificate",
  1547. },
  1548. {
  1549. .name = "x-dirty-bitmap",
  1550. .type = QEMU_OPT_STRING,
  1551. .help = "experimental: expose named dirty bitmap in place of "
  1552. "block status",
  1553. },
  1554. {
  1555. .name = "reconnect-delay",
  1556. .type = QEMU_OPT_NUMBER,
  1557. .help = "On an unexpected disconnect, the nbd client tries to "
  1558. "connect again until succeeding or encountering a serious "
  1559. "error. During the first @reconnect-delay seconds, all "
  1560. "requests are paused and will be rerun on a successful "
  1561. "reconnect. After that time, any delayed requests and all "
  1562. "future requests before a successful reconnect will "
  1563. "immediately fail. Default 0",
  1564. },
  1565. {
  1566. .name = "open-timeout",
  1567. .type = QEMU_OPT_NUMBER,
  1568. .help = "In seconds. If zero, the nbd driver tries the connection "
  1569. "only once, and fails to open if the connection fails. "
  1570. "If non-zero, the nbd driver will repeat connection "
  1571. "attempts until successful or until @open-timeout seconds "
  1572. "have elapsed. Default 0",
  1573. },
  1574. { /* end of list */ }
  1575. },
  1576. };
  1577. static int nbd_process_options(BlockDriverState *bs, QDict *options,
  1578. Error **errp)
  1579. {
  1580. BDRVNBDState *s = bs->opaque;
  1581. QemuOpts *opts;
  1582. int ret = -EINVAL;
  1583. opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort);
  1584. if (!qemu_opts_absorb_qdict(opts, options, errp)) {
  1585. goto error;
  1586. }
  1587. /* Translate @host, @port, and @path to a SocketAddress */
  1588. if (!nbd_process_legacy_socket_options(options, opts, errp)) {
  1589. goto error;
  1590. }
  1591. /* Pop the config into our state object. Exit if invalid. */
  1592. s->saddr = nbd_config(s, options, errp);
  1593. if (!s->saddr) {
  1594. goto error;
  1595. }
  1596. s->export = g_strdup(qemu_opt_get(opts, "export"));
  1597. if (s->export && strlen(s->export) > NBD_MAX_STRING_SIZE) {
  1598. error_setg(errp, "export name too long to send to server");
  1599. goto error;
  1600. }
  1601. s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds"));
  1602. if (s->tlscredsid) {
  1603. s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp);
  1604. if (!s->tlscreds) {
  1605. goto error;
  1606. }
  1607. s->tlshostname = g_strdup(qemu_opt_get(opts, "tls-hostname"));
  1608. if (!s->tlshostname &&
  1609. s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
  1610. s->tlshostname = g_strdup(s->saddr->u.inet.host);
  1611. }
  1612. }
  1613. s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap"));
  1614. if (s->x_dirty_bitmap && strlen(s->x_dirty_bitmap) > NBD_MAX_STRING_SIZE) {
  1615. error_setg(errp, "x-dirty-bitmap query too long to send to server");
  1616. goto error;
  1617. }
  1618. s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0);
  1619. s->open_timeout = qemu_opt_get_number(opts, "open-timeout", 0);
  1620. ret = 0;
  1621. error:
  1622. qemu_opts_del(opts);
  1623. return ret;
  1624. }
  1625. static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
  1626. Error **errp)
  1627. {
  1628. int ret;
  1629. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1630. s->bs = bs;
  1631. qemu_mutex_init(&s->requests_lock);
  1632. qemu_co_queue_init(&s->free_sema);
  1633. qemu_co_mutex_init(&s->send_mutex);
  1634. qemu_co_mutex_init(&s->receive_mutex);
  1635. if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) {
  1636. return -EEXIST;
  1637. }
  1638. ret = nbd_process_options(bs, options, errp);
  1639. if (ret < 0) {
  1640. goto fail;
  1641. }
  1642. s->conn = nbd_client_connection_new(s->saddr, true, s->export,
  1643. s->x_dirty_bitmap, s->tlscreds,
  1644. s->tlshostname);
  1645. if (s->open_timeout) {
  1646. nbd_client_connection_enable_retry(s->conn);
  1647. open_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
  1648. s->open_timeout * NANOSECONDS_PER_SECOND);
  1649. }
  1650. s->state = NBD_CLIENT_CONNECTING_WAIT;
  1651. ret = nbd_do_establish_connection(bs, true, errp);
  1652. if (ret < 0) {
  1653. goto fail;
  1654. }
  1655. /*
  1656. * The connect attempt is done, so we no longer need this timer.
  1657. * Delete it, because we do not want it to be around when this node
  1658. * is drained or closed.
  1659. */
  1660. open_timer_del(s);
  1661. nbd_client_connection_enable_retry(s->conn);
  1662. return 0;
  1663. fail:
  1664. open_timer_del(s);
  1665. nbd_clear_bdrvstate(bs);
  1666. return ret;
  1667. }
  1668. static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
  1669. {
  1670. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1671. uint32_t min = s->info.min_block;
  1672. uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block);
  1673. /*
  1674. * If the server did not advertise an alignment:
  1675. * - a size that is not sector-aligned implies that an alignment
  1676. * of 1 can be used to access those tail bytes
  1677. * - advertisement of block status requires an alignment of 1, so
  1678. * that we don't violate block layer constraints that block
  1679. * status is always aligned (as we can't control whether the
  1680. * server will report sub-sector extents, such as a hole at EOF
  1681. * on an unaligned POSIX file)
  1682. * - otherwise, assume the server is so old that we are safer avoiding
  1683. * sub-sector requests
  1684. */
  1685. if (!min) {
  1686. min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) ||
  1687. s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE;
  1688. }
  1689. bs->bl.request_alignment = min;
  1690. bs->bl.max_pdiscard = QEMU_ALIGN_DOWN(INT_MAX, min);
  1691. bs->bl.max_pwrite_zeroes = max;
  1692. bs->bl.max_transfer = max;
  1693. /*
  1694. * Assume that if the server supports extended headers, it also
  1695. * supports unlimited size zero and trim commands.
  1696. */
  1697. if (s->info.mode >= NBD_MODE_EXTENDED) {
  1698. bs->bl.max_pdiscard = bs->bl.max_pwrite_zeroes = 0;
  1699. }
  1700. if (s->info.opt_block &&
  1701. s->info.opt_block > bs->bl.opt_transfer) {
  1702. bs->bl.opt_transfer = s->info.opt_block;
  1703. }
  1704. }
  1705. static void nbd_close(BlockDriverState *bs)
  1706. {
  1707. nbd_client_close(bs);
  1708. nbd_clear_bdrvstate(bs);
  1709. }
  1710. /*
  1711. * NBD cannot truncate, but if the caller asks to truncate to the same size, or
  1712. * to a smaller size with exact=false, there is no reason to fail the
  1713. * operation.
  1714. *
  1715. * Preallocation mode is ignored since it does not seems useful to fail when
  1716. * we never change anything.
  1717. */
  1718. static int coroutine_fn nbd_co_truncate(BlockDriverState *bs, int64_t offset,
  1719. bool exact, PreallocMode prealloc,
  1720. BdrvRequestFlags flags, Error **errp)
  1721. {
  1722. BDRVNBDState *s = bs->opaque;
  1723. if (offset != s->info.size && exact) {
  1724. error_setg(errp, "Cannot resize NBD nodes");
  1725. return -ENOTSUP;
  1726. }
  1727. if (offset > s->info.size) {
  1728. error_setg(errp, "Cannot grow NBD nodes");
  1729. return -EINVAL;
  1730. }
  1731. return 0;
  1732. }
  1733. static int64_t coroutine_fn nbd_co_getlength(BlockDriverState *bs)
  1734. {
  1735. BDRVNBDState *s = bs->opaque;
  1736. return s->info.size;
  1737. }
  1738. static void nbd_refresh_filename(BlockDriverState *bs)
  1739. {
  1740. BDRVNBDState *s = bs->opaque;
  1741. const char *host = NULL, *port = NULL, *path = NULL;
  1742. size_t len = 0;
  1743. if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
  1744. const InetSocketAddress *inet = &s->saddr->u.inet;
  1745. if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) {
  1746. host = inet->host;
  1747. port = inet->port;
  1748. }
  1749. } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) {
  1750. path = s->saddr->u.q_unix.path;
  1751. } /* else can't represent as pseudo-filename */
  1752. if (path && s->export) {
  1753. len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
  1754. "nbd+unix:///%s?socket=%s", s->export, path);
  1755. } else if (path && !s->export) {
  1756. len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
  1757. "nbd+unix://?socket=%s", path);
  1758. } else if (host && s->export) {
  1759. len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
  1760. "nbd://%s:%s/%s", host, port, s->export);
  1761. } else if (host && !s->export) {
  1762. len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
  1763. "nbd://%s:%s", host, port);
  1764. }
  1765. if (len >= sizeof(bs->exact_filename)) {
  1766. /* Name is too long to represent exactly, so leave it empty. */
  1767. bs->exact_filename[0] = '\0';
  1768. }
  1769. }
  1770. static char *nbd_dirname(BlockDriverState *bs, Error **errp)
  1771. {
  1772. /* The generic bdrv_dirname() implementation is able to work out some
  1773. * directory name for NBD nodes, but that would be wrong. So far there is no
  1774. * specification for how "export paths" would work, so NBD does not have
  1775. * directory names. */
  1776. error_setg(errp, "Cannot generate a base directory for NBD nodes");
  1777. return NULL;
  1778. }
  1779. static const char *const nbd_strong_runtime_opts[] = {
  1780. "path",
  1781. "host",
  1782. "port",
  1783. "export",
  1784. "tls-creds",
  1785. "tls-hostname",
  1786. "server.",
  1787. NULL
  1788. };
  1789. static void nbd_cancel_in_flight(BlockDriverState *bs)
  1790. {
  1791. BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
  1792. reconnect_delay_timer_del(s);
  1793. qemu_mutex_lock(&s->requests_lock);
  1794. if (s->state == NBD_CLIENT_CONNECTING_WAIT) {
  1795. s->state = NBD_CLIENT_CONNECTING_NOWAIT;
  1796. }
  1797. qemu_mutex_unlock(&s->requests_lock);
  1798. nbd_co_establish_connection_cancel(s->conn);
  1799. }
  1800. static void nbd_attach_aio_context(BlockDriverState *bs,
  1801. AioContext *new_context)
  1802. {
  1803. BDRVNBDState *s = bs->opaque;
  1804. /* The open_timer is used only during nbd_open() */
  1805. assert(!s->open_timer);
  1806. /*
  1807. * The reconnect_delay_timer is scheduled in I/O paths when the
  1808. * connection is lost, to cancel the reconnection attempt after a
  1809. * given time. Once this attempt is done (successfully or not),
  1810. * nbd_reconnect_attempt() ensures the timer is deleted before the
  1811. * respective I/O request is resumed.
  1812. * Since the AioContext can only be changed when a node is drained,
  1813. * the reconnect_delay_timer cannot be active here.
  1814. */
  1815. assert(!s->reconnect_delay_timer);
  1816. }
  1817. static void nbd_detach_aio_context(BlockDriverState *bs)
  1818. {
  1819. BDRVNBDState *s = bs->opaque;
  1820. assert(!s->open_timer);
  1821. assert(!s->reconnect_delay_timer);
  1822. }
  1823. static BlockDriver bdrv_nbd = {
  1824. .format_name = "nbd",
  1825. .protocol_name = "nbd",
  1826. .instance_size = sizeof(BDRVNBDState),
  1827. .bdrv_parse_filename = nbd_parse_filename,
  1828. .bdrv_co_create_opts = bdrv_co_create_opts_simple,
  1829. .create_opts = &bdrv_create_opts_simple,
  1830. .bdrv_open = nbd_open,
  1831. .bdrv_reopen_prepare = nbd_client_reopen_prepare,
  1832. .bdrv_co_preadv = nbd_client_co_preadv,
  1833. .bdrv_co_pwritev = nbd_client_co_pwritev,
  1834. .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
  1835. .bdrv_close = nbd_close,
  1836. .bdrv_co_flush_to_os = nbd_client_co_flush,
  1837. .bdrv_co_pdiscard = nbd_client_co_pdiscard,
  1838. .bdrv_refresh_limits = nbd_refresh_limits,
  1839. .bdrv_co_truncate = nbd_co_truncate,
  1840. .bdrv_co_getlength = nbd_co_getlength,
  1841. .bdrv_refresh_filename = nbd_refresh_filename,
  1842. .bdrv_co_block_status = nbd_client_co_block_status,
  1843. .bdrv_dirname = nbd_dirname,
  1844. .strong_runtime_opts = nbd_strong_runtime_opts,
  1845. .bdrv_cancel_in_flight = nbd_cancel_in_flight,
  1846. .bdrv_attach_aio_context = nbd_attach_aio_context,
  1847. .bdrv_detach_aio_context = nbd_detach_aio_context,
  1848. };
  1849. static BlockDriver bdrv_nbd_tcp = {
  1850. .format_name = "nbd",
  1851. .protocol_name = "nbd+tcp",
  1852. .instance_size = sizeof(BDRVNBDState),
  1853. .bdrv_parse_filename = nbd_parse_filename,
  1854. .bdrv_co_create_opts = bdrv_co_create_opts_simple,
  1855. .create_opts = &bdrv_create_opts_simple,
  1856. .bdrv_open = nbd_open,
  1857. .bdrv_reopen_prepare = nbd_client_reopen_prepare,
  1858. .bdrv_co_preadv = nbd_client_co_preadv,
  1859. .bdrv_co_pwritev = nbd_client_co_pwritev,
  1860. .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
  1861. .bdrv_close = nbd_close,
  1862. .bdrv_co_flush_to_os = nbd_client_co_flush,
  1863. .bdrv_co_pdiscard = nbd_client_co_pdiscard,
  1864. .bdrv_refresh_limits = nbd_refresh_limits,
  1865. .bdrv_co_truncate = nbd_co_truncate,
  1866. .bdrv_co_getlength = nbd_co_getlength,
  1867. .bdrv_refresh_filename = nbd_refresh_filename,
  1868. .bdrv_co_block_status = nbd_client_co_block_status,
  1869. .bdrv_dirname = nbd_dirname,
  1870. .strong_runtime_opts = nbd_strong_runtime_opts,
  1871. .bdrv_cancel_in_flight = nbd_cancel_in_flight,
  1872. .bdrv_attach_aio_context = nbd_attach_aio_context,
  1873. .bdrv_detach_aio_context = nbd_detach_aio_context,
  1874. };
  1875. static BlockDriver bdrv_nbd_unix = {
  1876. .format_name = "nbd",
  1877. .protocol_name = "nbd+unix",
  1878. .instance_size = sizeof(BDRVNBDState),
  1879. .bdrv_parse_filename = nbd_parse_filename,
  1880. .bdrv_co_create_opts = bdrv_co_create_opts_simple,
  1881. .create_opts = &bdrv_create_opts_simple,
  1882. .bdrv_open = nbd_open,
  1883. .bdrv_reopen_prepare = nbd_client_reopen_prepare,
  1884. .bdrv_co_preadv = nbd_client_co_preadv,
  1885. .bdrv_co_pwritev = nbd_client_co_pwritev,
  1886. .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
  1887. .bdrv_close = nbd_close,
  1888. .bdrv_co_flush_to_os = nbd_client_co_flush,
  1889. .bdrv_co_pdiscard = nbd_client_co_pdiscard,
  1890. .bdrv_refresh_limits = nbd_refresh_limits,
  1891. .bdrv_co_truncate = nbd_co_truncate,
  1892. .bdrv_co_getlength = nbd_co_getlength,
  1893. .bdrv_refresh_filename = nbd_refresh_filename,
  1894. .bdrv_co_block_status = nbd_client_co_block_status,
  1895. .bdrv_dirname = nbd_dirname,
  1896. .strong_runtime_opts = nbd_strong_runtime_opts,
  1897. .bdrv_cancel_in_flight = nbd_cancel_in_flight,
  1898. .bdrv_attach_aio_context = nbd_attach_aio_context,
  1899. .bdrv_detach_aio_context = nbd_detach_aio_context,
  1900. };
  1901. static void bdrv_nbd_init(void)
  1902. {
  1903. bdrv_register(&bdrv_nbd);
  1904. bdrv_register(&bdrv_nbd_tcp);
  1905. bdrv_register(&bdrv_nbd_unix);
  1906. }
  1907. block_init(bdrv_nbd_init);