server.c 86 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768
  1. /*
  2. * Copyright (C) 2016-2022 Red Hat, Inc.
  3. * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
  4. *
  5. * Network Block Device Server Side
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; under version 2 of the License.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "block/block_int.h"
  21. #include "block/export.h"
  22. #include "block/dirty-bitmap.h"
  23. #include "qapi/error.h"
  24. #include "qemu/queue.h"
  25. #include "trace.h"
  26. #include "nbd-internal.h"
  27. #include "qemu/units.h"
  28. #include "qemu/memalign.h"
  29. #define NBD_META_ID_BASE_ALLOCATION 0
  30. #define NBD_META_ID_ALLOCATION_DEPTH 1
  31. /* Dirty bitmaps use 'NBD_META_ID_DIRTY_BITMAP + i', so keep this id last. */
  32. #define NBD_META_ID_DIRTY_BITMAP 2
  33. /*
  34. * NBD_MAX_BLOCK_STATUS_EXTENTS: 1 MiB of extents data. An empirical
  35. * constant. If an increase is needed, note that the NBD protocol
  36. * recommends no larger than 32 mb, so that the client won't consider
  37. * the reply as a denial of service attack.
  38. */
  39. #define NBD_MAX_BLOCK_STATUS_EXTENTS (1 * MiB / 8)
  40. static int system_errno_to_nbd_errno(int err)
  41. {
  42. switch (err) {
  43. case 0:
  44. return NBD_SUCCESS;
  45. case EPERM:
  46. case EROFS:
  47. return NBD_EPERM;
  48. case EIO:
  49. return NBD_EIO;
  50. case ENOMEM:
  51. return NBD_ENOMEM;
  52. #ifdef EDQUOT
  53. case EDQUOT:
  54. #endif
  55. case EFBIG:
  56. case ENOSPC:
  57. return NBD_ENOSPC;
  58. case EOVERFLOW:
  59. return NBD_EOVERFLOW;
  60. case ENOTSUP:
  61. #if ENOTSUP != EOPNOTSUPP
  62. case EOPNOTSUPP:
  63. #endif
  64. return NBD_ENOTSUP;
  65. case ESHUTDOWN:
  66. return NBD_ESHUTDOWN;
  67. case EINVAL:
  68. default:
  69. return NBD_EINVAL;
  70. }
  71. }
  72. /* Definitions for opaque data types */
  73. typedef struct NBDRequestData NBDRequestData;
  74. struct NBDRequestData {
  75. NBDClient *client;
  76. uint8_t *data;
  77. bool complete;
  78. };
  79. struct NBDExport {
  80. BlockExport common;
  81. char *name;
  82. char *description;
  83. uint64_t size;
  84. uint16_t nbdflags;
  85. QTAILQ_HEAD(, NBDClient) clients;
  86. QTAILQ_ENTRY(NBDExport) next;
  87. BlockBackend *eject_notifier_blk;
  88. Notifier eject_notifier;
  89. bool allocation_depth;
  90. BdrvDirtyBitmap **export_bitmaps;
  91. size_t nr_export_bitmaps;
  92. };
  93. static QTAILQ_HEAD(, NBDExport) exports = QTAILQ_HEAD_INITIALIZER(exports);
  94. /* NBDExportMetaContexts represents a list of contexts to be exported,
  95. * as selected by NBD_OPT_SET_META_CONTEXT. Also used for
  96. * NBD_OPT_LIST_META_CONTEXT. */
  97. typedef struct NBDExportMetaContexts {
  98. NBDExport *exp;
  99. size_t count; /* number of negotiated contexts */
  100. bool base_allocation; /* export base:allocation context (block status) */
  101. bool allocation_depth; /* export qemu:allocation-depth */
  102. bool *bitmaps; /*
  103. * export qemu:dirty-bitmap:<export bitmap name>,
  104. * sized by exp->nr_export_bitmaps
  105. */
  106. } NBDExportMetaContexts;
  107. struct NBDClient {
  108. int refcount;
  109. void (*close_fn)(NBDClient *client, bool negotiated);
  110. NBDExport *exp;
  111. QCryptoTLSCreds *tlscreds;
  112. char *tlsauthz;
  113. QIOChannelSocket *sioc; /* The underlying data channel */
  114. QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
  115. Coroutine *recv_coroutine;
  116. CoMutex send_lock;
  117. Coroutine *send_coroutine;
  118. bool read_yielding;
  119. bool quiescing;
  120. QTAILQ_ENTRY(NBDClient) next;
  121. int nb_requests;
  122. bool closing;
  123. uint32_t check_align; /* If non-zero, check for aligned client requests */
  124. bool structured_reply;
  125. NBDExportMetaContexts export_meta;
  126. uint32_t opt; /* Current option being negotiated */
  127. uint32_t optlen; /* remaining length of data in ioc for the option being
  128. negotiated now */
  129. };
  130. static void nbd_client_receive_next_request(NBDClient *client);
  131. /* Basic flow for negotiation
  132. Server Client
  133. Negotiate
  134. or
  135. Server Client
  136. Negotiate #1
  137. Option
  138. Negotiate #2
  139. ----
  140. followed by
  141. Server Client
  142. Request
  143. Response
  144. Request
  145. Response
  146. ...
  147. ...
  148. Request (type == 2)
  149. */
  150. static inline void set_be_option_rep(NBDOptionReply *rep, uint32_t option,
  151. uint32_t type, uint32_t length)
  152. {
  153. stq_be_p(&rep->magic, NBD_REP_MAGIC);
  154. stl_be_p(&rep->option, option);
  155. stl_be_p(&rep->type, type);
  156. stl_be_p(&rep->length, length);
  157. }
  158. /* Send a reply header, including length, but no payload.
  159. * Return -errno on error, 0 on success. */
  160. static int nbd_negotiate_send_rep_len(NBDClient *client, uint32_t type,
  161. uint32_t len, Error **errp)
  162. {
  163. NBDOptionReply rep;
  164. trace_nbd_negotiate_send_rep_len(client->opt, nbd_opt_lookup(client->opt),
  165. type, nbd_rep_lookup(type), len);
  166. assert(len < NBD_MAX_BUFFER_SIZE);
  167. set_be_option_rep(&rep, client->opt, type, len);
  168. return nbd_write(client->ioc, &rep, sizeof(rep), errp);
  169. }
  170. /* Send a reply header with default 0 length.
  171. * Return -errno on error, 0 on success. */
  172. static int nbd_negotiate_send_rep(NBDClient *client, uint32_t type,
  173. Error **errp)
  174. {
  175. return nbd_negotiate_send_rep_len(client, type, 0, errp);
  176. }
  177. /* Send an error reply.
  178. * Return -errno on error, 0 on success. */
  179. static int G_GNUC_PRINTF(4, 0)
  180. nbd_negotiate_send_rep_verr(NBDClient *client, uint32_t type,
  181. Error **errp, const char *fmt, va_list va)
  182. {
  183. ERRP_GUARD();
  184. g_autofree char *msg = NULL;
  185. int ret;
  186. size_t len;
  187. msg = g_strdup_vprintf(fmt, va);
  188. len = strlen(msg);
  189. assert(len < NBD_MAX_STRING_SIZE);
  190. trace_nbd_negotiate_send_rep_err(msg);
  191. ret = nbd_negotiate_send_rep_len(client, type, len, errp);
  192. if (ret < 0) {
  193. return ret;
  194. }
  195. if (nbd_write(client->ioc, msg, len, errp) < 0) {
  196. error_prepend(errp, "write failed (error message): ");
  197. return -EIO;
  198. }
  199. return 0;
  200. }
  201. /*
  202. * Return a malloc'd copy of @name suitable for use in an error reply.
  203. */
  204. static char *
  205. nbd_sanitize_name(const char *name)
  206. {
  207. if (strnlen(name, 80) < 80) {
  208. return g_strdup(name);
  209. }
  210. /* XXX Should we also try to sanitize any control characters? */
  211. return g_strdup_printf("%.80s...", name);
  212. }
  213. /* Send an error reply.
  214. * Return -errno on error, 0 on success. */
  215. static int G_GNUC_PRINTF(4, 5)
  216. nbd_negotiate_send_rep_err(NBDClient *client, uint32_t type,
  217. Error **errp, const char *fmt, ...)
  218. {
  219. va_list va;
  220. int ret;
  221. va_start(va, fmt);
  222. ret = nbd_negotiate_send_rep_verr(client, type, errp, fmt, va);
  223. va_end(va);
  224. return ret;
  225. }
  226. /* Drop remainder of the current option, and send a reply with the
  227. * given error type and message. Return -errno on read or write
  228. * failure; or 0 if connection is still live. */
  229. static int G_GNUC_PRINTF(4, 0)
  230. nbd_opt_vdrop(NBDClient *client, uint32_t type, Error **errp,
  231. const char *fmt, va_list va)
  232. {
  233. int ret = nbd_drop(client->ioc, client->optlen, errp);
  234. client->optlen = 0;
  235. if (!ret) {
  236. ret = nbd_negotiate_send_rep_verr(client, type, errp, fmt, va);
  237. }
  238. return ret;
  239. }
  240. static int G_GNUC_PRINTF(4, 5)
  241. nbd_opt_drop(NBDClient *client, uint32_t type, Error **errp,
  242. const char *fmt, ...)
  243. {
  244. int ret;
  245. va_list va;
  246. va_start(va, fmt);
  247. ret = nbd_opt_vdrop(client, type, errp, fmt, va);
  248. va_end(va);
  249. return ret;
  250. }
  251. static int G_GNUC_PRINTF(3, 4)
  252. nbd_opt_invalid(NBDClient *client, Error **errp, const char *fmt, ...)
  253. {
  254. int ret;
  255. va_list va;
  256. va_start(va, fmt);
  257. ret = nbd_opt_vdrop(client, NBD_REP_ERR_INVALID, errp, fmt, va);
  258. va_end(va);
  259. return ret;
  260. }
  261. /* Read size bytes from the unparsed payload of the current option.
  262. * If @check_nul, require that no NUL bytes appear in buffer.
  263. * Return -errno on I/O error, 0 if option was completely handled by
  264. * sending a reply about inconsistent lengths, or 1 on success. */
  265. static int nbd_opt_read(NBDClient *client, void *buffer, size_t size,
  266. bool check_nul, Error **errp)
  267. {
  268. if (size > client->optlen) {
  269. return nbd_opt_invalid(client, errp,
  270. "Inconsistent lengths in option %s",
  271. nbd_opt_lookup(client->opt));
  272. }
  273. client->optlen -= size;
  274. if (qio_channel_read_all(client->ioc, buffer, size, errp) < 0) {
  275. return -EIO;
  276. }
  277. if (check_nul && strnlen(buffer, size) != size) {
  278. return nbd_opt_invalid(client, errp,
  279. "Unexpected embedded NUL in option %s",
  280. nbd_opt_lookup(client->opt));
  281. }
  282. return 1;
  283. }
  284. /* Drop size bytes from the unparsed payload of the current option.
  285. * Return -errno on I/O error, 0 if option was completely handled by
  286. * sending a reply about inconsistent lengths, or 1 on success. */
  287. static int nbd_opt_skip(NBDClient *client, size_t size, Error **errp)
  288. {
  289. if (size > client->optlen) {
  290. return nbd_opt_invalid(client, errp,
  291. "Inconsistent lengths in option %s",
  292. nbd_opt_lookup(client->opt));
  293. }
  294. client->optlen -= size;
  295. return nbd_drop(client->ioc, size, errp) < 0 ? -EIO : 1;
  296. }
  297. /* nbd_opt_read_name
  298. *
  299. * Read a string with the format:
  300. * uint32_t len (<= NBD_MAX_STRING_SIZE)
  301. * len bytes string (not 0-terminated)
  302. *
  303. * On success, @name will be allocated.
  304. * If @length is non-null, it will be set to the actual string length.
  305. *
  306. * Return -errno on I/O error, 0 if option was completely handled by
  307. * sending a reply about inconsistent lengths, or 1 on success.
  308. */
  309. static int nbd_opt_read_name(NBDClient *client, char **name, uint32_t *length,
  310. Error **errp)
  311. {
  312. int ret;
  313. uint32_t len;
  314. g_autofree char *local_name = NULL;
  315. *name = NULL;
  316. ret = nbd_opt_read(client, &len, sizeof(len), false, errp);
  317. if (ret <= 0) {
  318. return ret;
  319. }
  320. len = cpu_to_be32(len);
  321. if (len > NBD_MAX_STRING_SIZE) {
  322. return nbd_opt_invalid(client, errp,
  323. "Invalid name length: %" PRIu32, len);
  324. }
  325. local_name = g_malloc(len + 1);
  326. ret = nbd_opt_read(client, local_name, len, true, errp);
  327. if (ret <= 0) {
  328. return ret;
  329. }
  330. local_name[len] = '\0';
  331. if (length) {
  332. *length = len;
  333. }
  334. *name = g_steal_pointer(&local_name);
  335. return 1;
  336. }
  337. /* Send a single NBD_REP_SERVER reply to NBD_OPT_LIST, including payload.
  338. * Return -errno on error, 0 on success. */
  339. static int nbd_negotiate_send_rep_list(NBDClient *client, NBDExport *exp,
  340. Error **errp)
  341. {
  342. ERRP_GUARD();
  343. size_t name_len, desc_len;
  344. uint32_t len;
  345. const char *name = exp->name ? exp->name : "";
  346. const char *desc = exp->description ? exp->description : "";
  347. QIOChannel *ioc = client->ioc;
  348. int ret;
  349. trace_nbd_negotiate_send_rep_list(name, desc);
  350. name_len = strlen(name);
  351. desc_len = strlen(desc);
  352. assert(name_len <= NBD_MAX_STRING_SIZE && desc_len <= NBD_MAX_STRING_SIZE);
  353. len = name_len + desc_len + sizeof(len);
  354. ret = nbd_negotiate_send_rep_len(client, NBD_REP_SERVER, len, errp);
  355. if (ret < 0) {
  356. return ret;
  357. }
  358. len = cpu_to_be32(name_len);
  359. if (nbd_write(ioc, &len, sizeof(len), errp) < 0) {
  360. error_prepend(errp, "write failed (name length): ");
  361. return -EINVAL;
  362. }
  363. if (nbd_write(ioc, name, name_len, errp) < 0) {
  364. error_prepend(errp, "write failed (name buffer): ");
  365. return -EINVAL;
  366. }
  367. if (nbd_write(ioc, desc, desc_len, errp) < 0) {
  368. error_prepend(errp, "write failed (description buffer): ");
  369. return -EINVAL;
  370. }
  371. return 0;
  372. }
  373. /* Process the NBD_OPT_LIST command, with a potential series of replies.
  374. * Return -errno on error, 0 on success. */
  375. static int nbd_negotiate_handle_list(NBDClient *client, Error **errp)
  376. {
  377. NBDExport *exp;
  378. assert(client->opt == NBD_OPT_LIST);
  379. /* For each export, send a NBD_REP_SERVER reply. */
  380. QTAILQ_FOREACH(exp, &exports, next) {
  381. if (nbd_negotiate_send_rep_list(client, exp, errp)) {
  382. return -EINVAL;
  383. }
  384. }
  385. /* Finish with a NBD_REP_ACK. */
  386. return nbd_negotiate_send_rep(client, NBD_REP_ACK, errp);
  387. }
  388. static void nbd_check_meta_export(NBDClient *client)
  389. {
  390. if (client->exp != client->export_meta.exp) {
  391. client->export_meta.count = 0;
  392. }
  393. }
  394. /* Send a reply to NBD_OPT_EXPORT_NAME.
  395. * Return -errno on error, 0 on success. */
  396. static int nbd_negotiate_handle_export_name(NBDClient *client, bool no_zeroes,
  397. Error **errp)
  398. {
  399. ERRP_GUARD();
  400. g_autofree char *name = NULL;
  401. char buf[NBD_REPLY_EXPORT_NAME_SIZE] = "";
  402. size_t len;
  403. int ret;
  404. uint16_t myflags;
  405. /* Client sends:
  406. [20 .. xx] export name (length bytes)
  407. Server replies:
  408. [ 0 .. 7] size
  409. [ 8 .. 9] export flags
  410. [10 .. 133] reserved (0) [unless no_zeroes]
  411. */
  412. trace_nbd_negotiate_handle_export_name();
  413. if (client->optlen > NBD_MAX_STRING_SIZE) {
  414. error_setg(errp, "Bad length received");
  415. return -EINVAL;
  416. }
  417. name = g_malloc(client->optlen + 1);
  418. if (nbd_read(client->ioc, name, client->optlen, "export name", errp) < 0) {
  419. return -EIO;
  420. }
  421. name[client->optlen] = '\0';
  422. client->optlen = 0;
  423. trace_nbd_negotiate_handle_export_name_request(name);
  424. client->exp = nbd_export_find(name);
  425. if (!client->exp) {
  426. error_setg(errp, "export not found");
  427. return -EINVAL;
  428. }
  429. myflags = client->exp->nbdflags;
  430. if (client->structured_reply) {
  431. myflags |= NBD_FLAG_SEND_DF;
  432. }
  433. trace_nbd_negotiate_new_style_size_flags(client->exp->size, myflags);
  434. stq_be_p(buf, client->exp->size);
  435. stw_be_p(buf + 8, myflags);
  436. len = no_zeroes ? 10 : sizeof(buf);
  437. ret = nbd_write(client->ioc, buf, len, errp);
  438. if (ret < 0) {
  439. error_prepend(errp, "write failed: ");
  440. return ret;
  441. }
  442. QTAILQ_INSERT_TAIL(&client->exp->clients, client, next);
  443. blk_exp_ref(&client->exp->common);
  444. nbd_check_meta_export(client);
  445. return 0;
  446. }
  447. /* Send a single NBD_REP_INFO, with a buffer @buf of @length bytes.
  448. * The buffer does NOT include the info type prefix.
  449. * Return -errno on error, 0 if ready to send more. */
  450. static int nbd_negotiate_send_info(NBDClient *client,
  451. uint16_t info, uint32_t length, void *buf,
  452. Error **errp)
  453. {
  454. int rc;
  455. trace_nbd_negotiate_send_info(info, nbd_info_lookup(info), length);
  456. rc = nbd_negotiate_send_rep_len(client, NBD_REP_INFO,
  457. sizeof(info) + length, errp);
  458. if (rc < 0) {
  459. return rc;
  460. }
  461. info = cpu_to_be16(info);
  462. if (nbd_write(client->ioc, &info, sizeof(info), errp) < 0) {
  463. return -EIO;
  464. }
  465. if (nbd_write(client->ioc, buf, length, errp) < 0) {
  466. return -EIO;
  467. }
  468. return 0;
  469. }
  470. /* nbd_reject_length: Handle any unexpected payload.
  471. * @fatal requests that we quit talking to the client, even if we are able
  472. * to successfully send an error reply.
  473. * Return:
  474. * -errno transmission error occurred or @fatal was requested, errp is set
  475. * 0 error message successfully sent to client, errp is not set
  476. */
  477. static int nbd_reject_length(NBDClient *client, bool fatal, Error **errp)
  478. {
  479. int ret;
  480. assert(client->optlen);
  481. ret = nbd_opt_invalid(client, errp, "option '%s' has unexpected length",
  482. nbd_opt_lookup(client->opt));
  483. if (fatal && !ret) {
  484. error_setg(errp, "option '%s' has unexpected length",
  485. nbd_opt_lookup(client->opt));
  486. return -EINVAL;
  487. }
  488. return ret;
  489. }
  490. /* Handle NBD_OPT_INFO and NBD_OPT_GO.
  491. * Return -errno on error, 0 if ready for next option, and 1 to move
  492. * into transmission phase. */
  493. static int nbd_negotiate_handle_info(NBDClient *client, Error **errp)
  494. {
  495. int rc;
  496. g_autofree char *name = NULL;
  497. NBDExport *exp;
  498. uint16_t requests;
  499. uint16_t request;
  500. uint32_t namelen = 0;
  501. bool sendname = false;
  502. bool blocksize = false;
  503. uint32_t sizes[3];
  504. char buf[sizeof(uint64_t) + sizeof(uint16_t)];
  505. uint32_t check_align = 0;
  506. uint16_t myflags;
  507. /* Client sends:
  508. 4 bytes: L, name length (can be 0)
  509. L bytes: export name
  510. 2 bytes: N, number of requests (can be 0)
  511. N * 2 bytes: N requests
  512. */
  513. rc = nbd_opt_read_name(client, &name, &namelen, errp);
  514. if (rc <= 0) {
  515. return rc;
  516. }
  517. trace_nbd_negotiate_handle_export_name_request(name);
  518. rc = nbd_opt_read(client, &requests, sizeof(requests), false, errp);
  519. if (rc <= 0) {
  520. return rc;
  521. }
  522. requests = be16_to_cpu(requests);
  523. trace_nbd_negotiate_handle_info_requests(requests);
  524. while (requests--) {
  525. rc = nbd_opt_read(client, &request, sizeof(request), false, errp);
  526. if (rc <= 0) {
  527. return rc;
  528. }
  529. request = be16_to_cpu(request);
  530. trace_nbd_negotiate_handle_info_request(request,
  531. nbd_info_lookup(request));
  532. /* We care about NBD_INFO_NAME and NBD_INFO_BLOCK_SIZE;
  533. * everything else is either a request we don't know or
  534. * something we send regardless of request */
  535. switch (request) {
  536. case NBD_INFO_NAME:
  537. sendname = true;
  538. break;
  539. case NBD_INFO_BLOCK_SIZE:
  540. blocksize = true;
  541. break;
  542. }
  543. }
  544. if (client->optlen) {
  545. return nbd_reject_length(client, false, errp);
  546. }
  547. exp = nbd_export_find(name);
  548. if (!exp) {
  549. g_autofree char *sane_name = nbd_sanitize_name(name);
  550. return nbd_negotiate_send_rep_err(client, NBD_REP_ERR_UNKNOWN,
  551. errp, "export '%s' not present",
  552. sane_name);
  553. }
  554. /* Don't bother sending NBD_INFO_NAME unless client requested it */
  555. if (sendname) {
  556. rc = nbd_negotiate_send_info(client, NBD_INFO_NAME, namelen, name,
  557. errp);
  558. if (rc < 0) {
  559. return rc;
  560. }
  561. }
  562. /* Send NBD_INFO_DESCRIPTION only if available, regardless of
  563. * client request */
  564. if (exp->description) {
  565. size_t len = strlen(exp->description);
  566. assert(len <= NBD_MAX_STRING_SIZE);
  567. rc = nbd_negotiate_send_info(client, NBD_INFO_DESCRIPTION,
  568. len, exp->description, errp);
  569. if (rc < 0) {
  570. return rc;
  571. }
  572. }
  573. /* Send NBD_INFO_BLOCK_SIZE always, but tweak the minimum size
  574. * according to whether the client requested it, and according to
  575. * whether this is OPT_INFO or OPT_GO. */
  576. /* minimum - 1 for back-compat, or actual if client will obey it. */
  577. if (client->opt == NBD_OPT_INFO || blocksize) {
  578. check_align = sizes[0] = blk_get_request_alignment(exp->common.blk);
  579. } else {
  580. sizes[0] = 1;
  581. }
  582. assert(sizes[0] <= NBD_MAX_BUFFER_SIZE);
  583. /* preferred - Hard-code to 4096 for now.
  584. * TODO: is blk_bs(blk)->bl.opt_transfer appropriate? */
  585. sizes[1] = MAX(4096, sizes[0]);
  586. /* maximum - At most 32M, but smaller as appropriate. */
  587. sizes[2] = MIN(blk_get_max_transfer(exp->common.blk), NBD_MAX_BUFFER_SIZE);
  588. trace_nbd_negotiate_handle_info_block_size(sizes[0], sizes[1], sizes[2]);
  589. sizes[0] = cpu_to_be32(sizes[0]);
  590. sizes[1] = cpu_to_be32(sizes[1]);
  591. sizes[2] = cpu_to_be32(sizes[2]);
  592. rc = nbd_negotiate_send_info(client, NBD_INFO_BLOCK_SIZE,
  593. sizeof(sizes), sizes, errp);
  594. if (rc < 0) {
  595. return rc;
  596. }
  597. /* Send NBD_INFO_EXPORT always */
  598. myflags = exp->nbdflags;
  599. if (client->structured_reply) {
  600. myflags |= NBD_FLAG_SEND_DF;
  601. }
  602. trace_nbd_negotiate_new_style_size_flags(exp->size, myflags);
  603. stq_be_p(buf, exp->size);
  604. stw_be_p(buf + 8, myflags);
  605. rc = nbd_negotiate_send_info(client, NBD_INFO_EXPORT,
  606. sizeof(buf), buf, errp);
  607. if (rc < 0) {
  608. return rc;
  609. }
  610. /*
  611. * If the client is just asking for NBD_OPT_INFO, but forgot to
  612. * request block sizes in a situation that would impact
  613. * performance, then return an error. But for NBD_OPT_GO, we
  614. * tolerate all clients, regardless of alignments.
  615. */
  616. if (client->opt == NBD_OPT_INFO && !blocksize &&
  617. blk_get_request_alignment(exp->common.blk) > 1) {
  618. return nbd_negotiate_send_rep_err(client,
  619. NBD_REP_ERR_BLOCK_SIZE_REQD,
  620. errp,
  621. "request NBD_INFO_BLOCK_SIZE to "
  622. "use this export");
  623. }
  624. /* Final reply */
  625. rc = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp);
  626. if (rc < 0) {
  627. return rc;
  628. }
  629. if (client->opt == NBD_OPT_GO) {
  630. client->exp = exp;
  631. client->check_align = check_align;
  632. QTAILQ_INSERT_TAIL(&client->exp->clients, client, next);
  633. blk_exp_ref(&client->exp->common);
  634. nbd_check_meta_export(client);
  635. rc = 1;
  636. }
  637. return rc;
  638. }
  639. /* Handle NBD_OPT_STARTTLS. Return NULL to drop connection, or else the
  640. * new channel for all further (now-encrypted) communication. */
  641. static QIOChannel *nbd_negotiate_handle_starttls(NBDClient *client,
  642. Error **errp)
  643. {
  644. QIOChannel *ioc;
  645. QIOChannelTLS *tioc;
  646. struct NBDTLSHandshakeData data = { 0 };
  647. assert(client->opt == NBD_OPT_STARTTLS);
  648. trace_nbd_negotiate_handle_starttls();
  649. ioc = client->ioc;
  650. if (nbd_negotiate_send_rep(client, NBD_REP_ACK, errp) < 0) {
  651. return NULL;
  652. }
  653. tioc = qio_channel_tls_new_server(ioc,
  654. client->tlscreds,
  655. client->tlsauthz,
  656. errp);
  657. if (!tioc) {
  658. return NULL;
  659. }
  660. qio_channel_set_name(QIO_CHANNEL(tioc), "nbd-server-tls");
  661. trace_nbd_negotiate_handle_starttls_handshake();
  662. data.loop = g_main_loop_new(g_main_context_default(), FALSE);
  663. qio_channel_tls_handshake(tioc,
  664. nbd_tls_handshake,
  665. &data,
  666. NULL,
  667. NULL);
  668. if (!data.complete) {
  669. g_main_loop_run(data.loop);
  670. }
  671. g_main_loop_unref(data.loop);
  672. if (data.error) {
  673. object_unref(OBJECT(tioc));
  674. error_propagate(errp, data.error);
  675. return NULL;
  676. }
  677. return QIO_CHANNEL(tioc);
  678. }
  679. /* nbd_negotiate_send_meta_context
  680. *
  681. * Send one chunk of reply to NBD_OPT_{LIST,SET}_META_CONTEXT
  682. *
  683. * For NBD_OPT_LIST_META_CONTEXT @context_id is ignored, 0 is used instead.
  684. */
  685. static int nbd_negotiate_send_meta_context(NBDClient *client,
  686. const char *context,
  687. uint32_t context_id,
  688. Error **errp)
  689. {
  690. NBDOptionReplyMetaContext opt;
  691. struct iovec iov[] = {
  692. {.iov_base = &opt, .iov_len = sizeof(opt)},
  693. {.iov_base = (void *)context, .iov_len = strlen(context)}
  694. };
  695. assert(iov[1].iov_len <= NBD_MAX_STRING_SIZE);
  696. if (client->opt == NBD_OPT_LIST_META_CONTEXT) {
  697. context_id = 0;
  698. }
  699. trace_nbd_negotiate_meta_query_reply(context, context_id);
  700. set_be_option_rep(&opt.h, client->opt, NBD_REP_META_CONTEXT,
  701. sizeof(opt) - sizeof(opt.h) + iov[1].iov_len);
  702. stl_be_p(&opt.context_id, context_id);
  703. return qio_channel_writev_all(client->ioc, iov, 2, errp) < 0 ? -EIO : 0;
  704. }
  705. /*
  706. * Return true if @query matches @pattern, or if @query is empty when
  707. * the @client is performing _LIST_.
  708. */
  709. static bool nbd_meta_empty_or_pattern(NBDClient *client, const char *pattern,
  710. const char *query)
  711. {
  712. if (!*query) {
  713. trace_nbd_negotiate_meta_query_parse("empty");
  714. return client->opt == NBD_OPT_LIST_META_CONTEXT;
  715. }
  716. if (strcmp(query, pattern) == 0) {
  717. trace_nbd_negotiate_meta_query_parse(pattern);
  718. return true;
  719. }
  720. trace_nbd_negotiate_meta_query_skip("pattern not matched");
  721. return false;
  722. }
  723. /*
  724. * Return true and adjust @str in place if it begins with @prefix.
  725. */
  726. static bool nbd_strshift(const char **str, const char *prefix)
  727. {
  728. size_t len = strlen(prefix);
  729. if (strncmp(*str, prefix, len) == 0) {
  730. *str += len;
  731. return true;
  732. }
  733. return false;
  734. }
  735. /* nbd_meta_base_query
  736. *
  737. * Handle queries to 'base' namespace. For now, only the base:allocation
  738. * context is available. Return true if @query has been handled.
  739. */
  740. static bool nbd_meta_base_query(NBDClient *client, NBDExportMetaContexts *meta,
  741. const char *query)
  742. {
  743. if (!nbd_strshift(&query, "base:")) {
  744. return false;
  745. }
  746. trace_nbd_negotiate_meta_query_parse("base:");
  747. if (nbd_meta_empty_or_pattern(client, "allocation", query)) {
  748. meta->base_allocation = true;
  749. }
  750. return true;
  751. }
  752. /* nbd_meta_qemu_query
  753. *
  754. * Handle queries to 'qemu' namespace. For now, only the qemu:dirty-bitmap:
  755. * and qemu:allocation-depth contexts are available. Return true if @query
  756. * has been handled.
  757. */
  758. static bool nbd_meta_qemu_query(NBDClient *client, NBDExportMetaContexts *meta,
  759. const char *query)
  760. {
  761. size_t i;
  762. if (!nbd_strshift(&query, "qemu:")) {
  763. return false;
  764. }
  765. trace_nbd_negotiate_meta_query_parse("qemu:");
  766. if (!*query) {
  767. if (client->opt == NBD_OPT_LIST_META_CONTEXT) {
  768. meta->allocation_depth = meta->exp->allocation_depth;
  769. if (meta->exp->nr_export_bitmaps) {
  770. memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps);
  771. }
  772. }
  773. trace_nbd_negotiate_meta_query_parse("empty");
  774. return true;
  775. }
  776. if (strcmp(query, "allocation-depth") == 0) {
  777. trace_nbd_negotiate_meta_query_parse("allocation-depth");
  778. meta->allocation_depth = meta->exp->allocation_depth;
  779. return true;
  780. }
  781. if (nbd_strshift(&query, "dirty-bitmap:")) {
  782. trace_nbd_negotiate_meta_query_parse("dirty-bitmap:");
  783. if (!*query) {
  784. if (client->opt == NBD_OPT_LIST_META_CONTEXT &&
  785. meta->exp->nr_export_bitmaps) {
  786. memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps);
  787. }
  788. trace_nbd_negotiate_meta_query_parse("empty");
  789. return true;
  790. }
  791. for (i = 0; i < meta->exp->nr_export_bitmaps; i++) {
  792. const char *bm_name;
  793. bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]);
  794. if (strcmp(bm_name, query) == 0) {
  795. meta->bitmaps[i] = true;
  796. trace_nbd_negotiate_meta_query_parse(query);
  797. return true;
  798. }
  799. }
  800. trace_nbd_negotiate_meta_query_skip("no dirty-bitmap match");
  801. return true;
  802. }
  803. trace_nbd_negotiate_meta_query_skip("unknown qemu context");
  804. return true;
  805. }
  806. /* nbd_negotiate_meta_query
  807. *
  808. * Parse namespace name and call corresponding function to parse body of the
  809. * query.
  810. *
  811. * The only supported namespaces are 'base' and 'qemu'.
  812. *
  813. * Return -errno on I/O error, 0 if option was completely handled by
  814. * sending a reply about inconsistent lengths, or 1 on success. */
  815. static int nbd_negotiate_meta_query(NBDClient *client,
  816. NBDExportMetaContexts *meta, Error **errp)
  817. {
  818. int ret;
  819. g_autofree char *query = NULL;
  820. uint32_t len;
  821. ret = nbd_opt_read(client, &len, sizeof(len), false, errp);
  822. if (ret <= 0) {
  823. return ret;
  824. }
  825. len = cpu_to_be32(len);
  826. if (len > NBD_MAX_STRING_SIZE) {
  827. trace_nbd_negotiate_meta_query_skip("length too long");
  828. return nbd_opt_skip(client, len, errp);
  829. }
  830. query = g_malloc(len + 1);
  831. ret = nbd_opt_read(client, query, len, true, errp);
  832. if (ret <= 0) {
  833. return ret;
  834. }
  835. query[len] = '\0';
  836. if (nbd_meta_base_query(client, meta, query)) {
  837. return 1;
  838. }
  839. if (nbd_meta_qemu_query(client, meta, query)) {
  840. return 1;
  841. }
  842. trace_nbd_negotiate_meta_query_skip("unknown namespace");
  843. return 1;
  844. }
  845. /* nbd_negotiate_meta_queries
  846. * Handle NBD_OPT_LIST_META_CONTEXT and NBD_OPT_SET_META_CONTEXT
  847. *
  848. * Return -errno on I/O error, or 0 if option was completely handled. */
  849. static int nbd_negotiate_meta_queries(NBDClient *client,
  850. NBDExportMetaContexts *meta, Error **errp)
  851. {
  852. int ret;
  853. g_autofree char *export_name = NULL;
  854. /* Mark unused to work around https://bugs.llvm.org/show_bug.cgi?id=3888 */
  855. g_autofree G_GNUC_UNUSED bool *bitmaps = NULL;
  856. NBDExportMetaContexts local_meta = {0};
  857. uint32_t nb_queries;
  858. size_t i;
  859. size_t count = 0;
  860. if (client->opt == NBD_OPT_SET_META_CONTEXT && !client->structured_reply) {
  861. return nbd_opt_invalid(client, errp,
  862. "request option '%s' when structured reply "
  863. "is not negotiated",
  864. nbd_opt_lookup(client->opt));
  865. }
  866. if (client->opt == NBD_OPT_LIST_META_CONTEXT) {
  867. /* Only change the caller's meta on SET. */
  868. meta = &local_meta;
  869. }
  870. g_free(meta->bitmaps);
  871. memset(meta, 0, sizeof(*meta));
  872. ret = nbd_opt_read_name(client, &export_name, NULL, errp);
  873. if (ret <= 0) {
  874. return ret;
  875. }
  876. meta->exp = nbd_export_find(export_name);
  877. if (meta->exp == NULL) {
  878. g_autofree char *sane_name = nbd_sanitize_name(export_name);
  879. return nbd_opt_drop(client, NBD_REP_ERR_UNKNOWN, errp,
  880. "export '%s' not present", sane_name);
  881. }
  882. meta->bitmaps = g_new0(bool, meta->exp->nr_export_bitmaps);
  883. if (client->opt == NBD_OPT_LIST_META_CONTEXT) {
  884. bitmaps = meta->bitmaps;
  885. }
  886. ret = nbd_opt_read(client, &nb_queries, sizeof(nb_queries), false, errp);
  887. if (ret <= 0) {
  888. return ret;
  889. }
  890. nb_queries = cpu_to_be32(nb_queries);
  891. trace_nbd_negotiate_meta_context(nbd_opt_lookup(client->opt),
  892. export_name, nb_queries);
  893. if (client->opt == NBD_OPT_LIST_META_CONTEXT && !nb_queries) {
  894. /* enable all known contexts */
  895. meta->base_allocation = true;
  896. meta->allocation_depth = meta->exp->allocation_depth;
  897. if (meta->exp->nr_export_bitmaps) {
  898. memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps);
  899. }
  900. } else {
  901. for (i = 0; i < nb_queries; ++i) {
  902. ret = nbd_negotiate_meta_query(client, meta, errp);
  903. if (ret <= 0) {
  904. return ret;
  905. }
  906. }
  907. }
  908. if (meta->base_allocation) {
  909. ret = nbd_negotiate_send_meta_context(client, "base:allocation",
  910. NBD_META_ID_BASE_ALLOCATION,
  911. errp);
  912. if (ret < 0) {
  913. return ret;
  914. }
  915. count++;
  916. }
  917. if (meta->allocation_depth) {
  918. ret = nbd_negotiate_send_meta_context(client, "qemu:allocation-depth",
  919. NBD_META_ID_ALLOCATION_DEPTH,
  920. errp);
  921. if (ret < 0) {
  922. return ret;
  923. }
  924. count++;
  925. }
  926. for (i = 0; i < meta->exp->nr_export_bitmaps; i++) {
  927. const char *bm_name;
  928. g_autofree char *context = NULL;
  929. if (!meta->bitmaps[i]) {
  930. continue;
  931. }
  932. bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]);
  933. context = g_strdup_printf("qemu:dirty-bitmap:%s", bm_name);
  934. ret = nbd_negotiate_send_meta_context(client, context,
  935. NBD_META_ID_DIRTY_BITMAP + i,
  936. errp);
  937. if (ret < 0) {
  938. return ret;
  939. }
  940. count++;
  941. }
  942. ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp);
  943. if (ret == 0) {
  944. meta->count = count;
  945. }
  946. return ret;
  947. }
  948. /* nbd_negotiate_options
  949. * Process all NBD_OPT_* client option commands, during fixed newstyle
  950. * negotiation.
  951. * Return:
  952. * -errno on error, errp is set
  953. * 0 on successful negotiation, errp is not set
  954. * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect,
  955. * errp is not set
  956. */
  957. static int nbd_negotiate_options(NBDClient *client, Error **errp)
  958. {
  959. uint32_t flags;
  960. bool fixedNewstyle = false;
  961. bool no_zeroes = false;
  962. /* Client sends:
  963. [ 0 .. 3] client flags
  964. Then we loop until NBD_OPT_EXPORT_NAME or NBD_OPT_GO:
  965. [ 0 .. 7] NBD_OPTS_MAGIC
  966. [ 8 .. 11] NBD option
  967. [12 .. 15] Data length
  968. ... Rest of request
  969. [ 0 .. 7] NBD_OPTS_MAGIC
  970. [ 8 .. 11] Second NBD option
  971. [12 .. 15] Data length
  972. ... Rest of request
  973. */
  974. if (nbd_read32(client->ioc, &flags, "flags", errp) < 0) {
  975. return -EIO;
  976. }
  977. trace_nbd_negotiate_options_flags(flags);
  978. if (flags & NBD_FLAG_C_FIXED_NEWSTYLE) {
  979. fixedNewstyle = true;
  980. flags &= ~NBD_FLAG_C_FIXED_NEWSTYLE;
  981. }
  982. if (flags & NBD_FLAG_C_NO_ZEROES) {
  983. no_zeroes = true;
  984. flags &= ~NBD_FLAG_C_NO_ZEROES;
  985. }
  986. if (flags != 0) {
  987. error_setg(errp, "Unknown client flags 0x%" PRIx32 " received", flags);
  988. return -EINVAL;
  989. }
  990. while (1) {
  991. int ret;
  992. uint32_t option, length;
  993. uint64_t magic;
  994. if (nbd_read64(client->ioc, &magic, "opts magic", errp) < 0) {
  995. return -EINVAL;
  996. }
  997. trace_nbd_negotiate_options_check_magic(magic);
  998. if (magic != NBD_OPTS_MAGIC) {
  999. error_setg(errp, "Bad magic received");
  1000. return -EINVAL;
  1001. }
  1002. if (nbd_read32(client->ioc, &option, "option", errp) < 0) {
  1003. return -EINVAL;
  1004. }
  1005. client->opt = option;
  1006. if (nbd_read32(client->ioc, &length, "option length", errp) < 0) {
  1007. return -EINVAL;
  1008. }
  1009. assert(!client->optlen);
  1010. client->optlen = length;
  1011. if (length > NBD_MAX_BUFFER_SIZE) {
  1012. error_setg(errp, "len (%" PRIu32" ) is larger than max len (%u)",
  1013. length, NBD_MAX_BUFFER_SIZE);
  1014. return -EINVAL;
  1015. }
  1016. trace_nbd_negotiate_options_check_option(option,
  1017. nbd_opt_lookup(option));
  1018. if (client->tlscreds &&
  1019. client->ioc == (QIOChannel *)client->sioc) {
  1020. QIOChannel *tioc;
  1021. if (!fixedNewstyle) {
  1022. error_setg(errp, "Unsupported option 0x%" PRIx32, option);
  1023. return -EINVAL;
  1024. }
  1025. switch (option) {
  1026. case NBD_OPT_STARTTLS:
  1027. if (length) {
  1028. /* Unconditionally drop the connection if the client
  1029. * can't start a TLS negotiation correctly */
  1030. return nbd_reject_length(client, true, errp);
  1031. }
  1032. tioc = nbd_negotiate_handle_starttls(client, errp);
  1033. if (!tioc) {
  1034. return -EIO;
  1035. }
  1036. ret = 0;
  1037. object_unref(OBJECT(client->ioc));
  1038. client->ioc = QIO_CHANNEL(tioc);
  1039. break;
  1040. case NBD_OPT_EXPORT_NAME:
  1041. /* No way to return an error to client, so drop connection */
  1042. error_setg(errp, "Option 0x%x not permitted before TLS",
  1043. option);
  1044. return -EINVAL;
  1045. default:
  1046. /* Let the client keep trying, unless they asked to
  1047. * quit. Always try to give an error back to the
  1048. * client; but when replying to OPT_ABORT, be aware
  1049. * that the client may hang up before receiving the
  1050. * error, in which case we are fine ignoring the
  1051. * resulting EPIPE. */
  1052. ret = nbd_opt_drop(client, NBD_REP_ERR_TLS_REQD,
  1053. option == NBD_OPT_ABORT ? NULL : errp,
  1054. "Option 0x%" PRIx32
  1055. " not permitted before TLS", option);
  1056. if (option == NBD_OPT_ABORT) {
  1057. return 1;
  1058. }
  1059. break;
  1060. }
  1061. } else if (fixedNewstyle) {
  1062. switch (option) {
  1063. case NBD_OPT_LIST:
  1064. if (length) {
  1065. ret = nbd_reject_length(client, false, errp);
  1066. } else {
  1067. ret = nbd_negotiate_handle_list(client, errp);
  1068. }
  1069. break;
  1070. case NBD_OPT_ABORT:
  1071. /* NBD spec says we must try to reply before
  1072. * disconnecting, but that we must also tolerate
  1073. * guests that don't wait for our reply. */
  1074. nbd_negotiate_send_rep(client, NBD_REP_ACK, NULL);
  1075. return 1;
  1076. case NBD_OPT_EXPORT_NAME:
  1077. return nbd_negotiate_handle_export_name(client, no_zeroes,
  1078. errp);
  1079. case NBD_OPT_INFO:
  1080. case NBD_OPT_GO:
  1081. ret = nbd_negotiate_handle_info(client, errp);
  1082. if (ret == 1) {
  1083. assert(option == NBD_OPT_GO);
  1084. return 0;
  1085. }
  1086. break;
  1087. case NBD_OPT_STARTTLS:
  1088. if (length) {
  1089. ret = nbd_reject_length(client, false, errp);
  1090. } else if (client->tlscreds) {
  1091. ret = nbd_negotiate_send_rep_err(client,
  1092. NBD_REP_ERR_INVALID, errp,
  1093. "TLS already enabled");
  1094. } else {
  1095. ret = nbd_negotiate_send_rep_err(client,
  1096. NBD_REP_ERR_POLICY, errp,
  1097. "TLS not configured");
  1098. }
  1099. break;
  1100. case NBD_OPT_STRUCTURED_REPLY:
  1101. if (length) {
  1102. ret = nbd_reject_length(client, false, errp);
  1103. } else if (client->structured_reply) {
  1104. ret = nbd_negotiate_send_rep_err(
  1105. client, NBD_REP_ERR_INVALID, errp,
  1106. "structured reply already negotiated");
  1107. } else {
  1108. ret = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp);
  1109. client->structured_reply = true;
  1110. }
  1111. break;
  1112. case NBD_OPT_LIST_META_CONTEXT:
  1113. case NBD_OPT_SET_META_CONTEXT:
  1114. ret = nbd_negotiate_meta_queries(client, &client->export_meta,
  1115. errp);
  1116. break;
  1117. default:
  1118. ret = nbd_opt_drop(client, NBD_REP_ERR_UNSUP, errp,
  1119. "Unsupported option %" PRIu32 " (%s)",
  1120. option, nbd_opt_lookup(option));
  1121. break;
  1122. }
  1123. } else {
  1124. /*
  1125. * If broken new-style we should drop the connection
  1126. * for anything except NBD_OPT_EXPORT_NAME
  1127. */
  1128. switch (option) {
  1129. case NBD_OPT_EXPORT_NAME:
  1130. return nbd_negotiate_handle_export_name(client, no_zeroes,
  1131. errp);
  1132. default:
  1133. error_setg(errp, "Unsupported option %" PRIu32 " (%s)",
  1134. option, nbd_opt_lookup(option));
  1135. return -EINVAL;
  1136. }
  1137. }
  1138. if (ret < 0) {
  1139. return ret;
  1140. }
  1141. }
  1142. }
  1143. /* nbd_negotiate
  1144. * Return:
  1145. * -errno on error, errp is set
  1146. * 0 on successful negotiation, errp is not set
  1147. * 1 if client sent NBD_OPT_ABORT, i.e. on valid disconnect,
  1148. * errp is not set
  1149. */
  1150. static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp)
  1151. {
  1152. ERRP_GUARD();
  1153. char buf[NBD_OLDSTYLE_NEGOTIATE_SIZE] = "";
  1154. int ret;
  1155. /* Old style negotiation header, no room for options
  1156. [ 0 .. 7] passwd ("NBDMAGIC")
  1157. [ 8 .. 15] magic (NBD_CLIENT_MAGIC)
  1158. [16 .. 23] size
  1159. [24 .. 27] export flags (zero-extended)
  1160. [28 .. 151] reserved (0)
  1161. New style negotiation header, client can send options
  1162. [ 0 .. 7] passwd ("NBDMAGIC")
  1163. [ 8 .. 15] magic (NBD_OPTS_MAGIC)
  1164. [16 .. 17] server flags (0)
  1165. ....options sent, ending in NBD_OPT_EXPORT_NAME or NBD_OPT_GO....
  1166. */
  1167. qio_channel_set_blocking(client->ioc, false, NULL);
  1168. trace_nbd_negotiate_begin();
  1169. memcpy(buf, "NBDMAGIC", 8);
  1170. stq_be_p(buf + 8, NBD_OPTS_MAGIC);
  1171. stw_be_p(buf + 16, NBD_FLAG_FIXED_NEWSTYLE | NBD_FLAG_NO_ZEROES);
  1172. if (nbd_write(client->ioc, buf, 18, errp) < 0) {
  1173. error_prepend(errp, "write failed: ");
  1174. return -EINVAL;
  1175. }
  1176. ret = nbd_negotiate_options(client, errp);
  1177. if (ret != 0) {
  1178. if (ret < 0) {
  1179. error_prepend(errp, "option negotiation failed: ");
  1180. }
  1181. return ret;
  1182. }
  1183. /* Attach the channel to the same AioContext as the export */
  1184. if (client->exp && client->exp->common.ctx) {
  1185. qio_channel_attach_aio_context(client->ioc, client->exp->common.ctx);
  1186. }
  1187. assert(!client->optlen);
  1188. trace_nbd_negotiate_success();
  1189. return 0;
  1190. }
  1191. /* nbd_read_eof
  1192. * Tries to read @size bytes from @ioc. This is a local implementation of
  1193. * qio_channel_readv_all_eof. We have it here because we need it to be
  1194. * interruptible and to know when the coroutine is yielding.
  1195. * Returns 1 on success
  1196. * 0 on eof, when no data was read (errp is not set)
  1197. * negative errno on failure (errp is set)
  1198. */
  1199. static inline int coroutine_fn
  1200. nbd_read_eof(NBDClient *client, void *buffer, size_t size, Error **errp)
  1201. {
  1202. bool partial = false;
  1203. assert(size);
  1204. while (size > 0) {
  1205. struct iovec iov = { .iov_base = buffer, .iov_len = size };
  1206. ssize_t len;
  1207. len = qio_channel_readv(client->ioc, &iov, 1, errp);
  1208. if (len == QIO_CHANNEL_ERR_BLOCK) {
  1209. client->read_yielding = true;
  1210. qio_channel_yield(client->ioc, G_IO_IN);
  1211. client->read_yielding = false;
  1212. if (client->quiescing) {
  1213. return -EAGAIN;
  1214. }
  1215. continue;
  1216. } else if (len < 0) {
  1217. return -EIO;
  1218. } else if (len == 0) {
  1219. if (partial) {
  1220. error_setg(errp,
  1221. "Unexpected end-of-file before all bytes were read");
  1222. return -EIO;
  1223. } else {
  1224. return 0;
  1225. }
  1226. }
  1227. partial = true;
  1228. size -= len;
  1229. buffer = (uint8_t *) buffer + len;
  1230. }
  1231. return 1;
  1232. }
  1233. static int nbd_receive_request(NBDClient *client, NBDRequest *request,
  1234. Error **errp)
  1235. {
  1236. uint8_t buf[NBD_REQUEST_SIZE];
  1237. uint32_t magic;
  1238. int ret;
  1239. ret = nbd_read_eof(client, buf, sizeof(buf), errp);
  1240. if (ret < 0) {
  1241. return ret;
  1242. }
  1243. if (ret == 0) {
  1244. return -EIO;
  1245. }
  1246. /* Request
  1247. [ 0 .. 3] magic (NBD_REQUEST_MAGIC)
  1248. [ 4 .. 5] flags (NBD_CMD_FLAG_FUA, ...)
  1249. [ 6 .. 7] type (NBD_CMD_READ, ...)
  1250. [ 8 .. 15] handle
  1251. [16 .. 23] from
  1252. [24 .. 27] len
  1253. */
  1254. magic = ldl_be_p(buf);
  1255. request->flags = lduw_be_p(buf + 4);
  1256. request->type = lduw_be_p(buf + 6);
  1257. request->handle = ldq_be_p(buf + 8);
  1258. request->from = ldq_be_p(buf + 16);
  1259. request->len = ldl_be_p(buf + 24);
  1260. trace_nbd_receive_request(magic, request->flags, request->type,
  1261. request->from, request->len);
  1262. if (magic != NBD_REQUEST_MAGIC) {
  1263. error_setg(errp, "invalid magic (got 0x%" PRIx32 ")", magic);
  1264. return -EINVAL;
  1265. }
  1266. return 0;
  1267. }
  1268. #define MAX_NBD_REQUESTS 16
  1269. void nbd_client_get(NBDClient *client)
  1270. {
  1271. client->refcount++;
  1272. }
  1273. void nbd_client_put(NBDClient *client)
  1274. {
  1275. if (--client->refcount == 0) {
  1276. /* The last reference should be dropped by client->close,
  1277. * which is called by client_close.
  1278. */
  1279. assert(client->closing);
  1280. qio_channel_detach_aio_context(client->ioc);
  1281. object_unref(OBJECT(client->sioc));
  1282. object_unref(OBJECT(client->ioc));
  1283. if (client->tlscreds) {
  1284. object_unref(OBJECT(client->tlscreds));
  1285. }
  1286. g_free(client->tlsauthz);
  1287. if (client->exp) {
  1288. QTAILQ_REMOVE(&client->exp->clients, client, next);
  1289. blk_exp_unref(&client->exp->common);
  1290. }
  1291. g_free(client->export_meta.bitmaps);
  1292. g_free(client);
  1293. }
  1294. }
  1295. static void client_close(NBDClient *client, bool negotiated)
  1296. {
  1297. if (client->closing) {
  1298. return;
  1299. }
  1300. client->closing = true;
  1301. /* Force requests to finish. They will drop their own references,
  1302. * then we'll close the socket and free the NBDClient.
  1303. */
  1304. qio_channel_shutdown(client->ioc, QIO_CHANNEL_SHUTDOWN_BOTH,
  1305. NULL);
  1306. /* Also tell the client, so that they release their reference. */
  1307. if (client->close_fn) {
  1308. client->close_fn(client, negotiated);
  1309. }
  1310. }
  1311. static NBDRequestData *nbd_request_get(NBDClient *client)
  1312. {
  1313. NBDRequestData *req;
  1314. assert(client->nb_requests <= MAX_NBD_REQUESTS - 1);
  1315. client->nb_requests++;
  1316. req = g_new0(NBDRequestData, 1);
  1317. nbd_client_get(client);
  1318. req->client = client;
  1319. return req;
  1320. }
  1321. static void nbd_request_put(NBDRequestData *req)
  1322. {
  1323. NBDClient *client = req->client;
  1324. if (req->data) {
  1325. qemu_vfree(req->data);
  1326. }
  1327. g_free(req);
  1328. client->nb_requests--;
  1329. if (client->quiescing && client->nb_requests == 0) {
  1330. aio_wait_kick();
  1331. }
  1332. nbd_client_receive_next_request(client);
  1333. nbd_client_put(client);
  1334. }
  1335. static void blk_aio_attached(AioContext *ctx, void *opaque)
  1336. {
  1337. NBDExport *exp = opaque;
  1338. NBDClient *client;
  1339. trace_nbd_blk_aio_attached(exp->name, ctx);
  1340. exp->common.ctx = ctx;
  1341. QTAILQ_FOREACH(client, &exp->clients, next) {
  1342. qio_channel_attach_aio_context(client->ioc, ctx);
  1343. assert(client->nb_requests == 0);
  1344. assert(client->recv_coroutine == NULL);
  1345. assert(client->send_coroutine == NULL);
  1346. }
  1347. }
  1348. static void blk_aio_detach(void *opaque)
  1349. {
  1350. NBDExport *exp = opaque;
  1351. NBDClient *client;
  1352. trace_nbd_blk_aio_detach(exp->name, exp->common.ctx);
  1353. QTAILQ_FOREACH(client, &exp->clients, next) {
  1354. qio_channel_detach_aio_context(client->ioc);
  1355. }
  1356. exp->common.ctx = NULL;
  1357. }
  1358. static void nbd_drained_begin(void *opaque)
  1359. {
  1360. NBDExport *exp = opaque;
  1361. NBDClient *client;
  1362. QTAILQ_FOREACH(client, &exp->clients, next) {
  1363. client->quiescing = true;
  1364. }
  1365. }
  1366. static void nbd_drained_end(void *opaque)
  1367. {
  1368. NBDExport *exp = opaque;
  1369. NBDClient *client;
  1370. QTAILQ_FOREACH(client, &exp->clients, next) {
  1371. client->quiescing = false;
  1372. nbd_client_receive_next_request(client);
  1373. }
  1374. }
  1375. static bool nbd_drained_poll(void *opaque)
  1376. {
  1377. NBDExport *exp = opaque;
  1378. NBDClient *client;
  1379. QTAILQ_FOREACH(client, &exp->clients, next) {
  1380. if (client->nb_requests != 0) {
  1381. /*
  1382. * If there's a coroutine waiting for a request on nbd_read_eof()
  1383. * enter it here so we don't depend on the client to wake it up.
  1384. */
  1385. if (client->recv_coroutine != NULL && client->read_yielding) {
  1386. qio_channel_wake_read(client->ioc);
  1387. }
  1388. return true;
  1389. }
  1390. }
  1391. return false;
  1392. }
  1393. static void nbd_eject_notifier(Notifier *n, void *data)
  1394. {
  1395. NBDExport *exp = container_of(n, NBDExport, eject_notifier);
  1396. blk_exp_request_shutdown(&exp->common);
  1397. }
  1398. void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk)
  1399. {
  1400. NBDExport *nbd_exp = container_of(exp, NBDExport, common);
  1401. assert(exp->drv == &blk_exp_nbd);
  1402. assert(nbd_exp->eject_notifier_blk == NULL);
  1403. blk_ref(blk);
  1404. nbd_exp->eject_notifier_blk = blk;
  1405. nbd_exp->eject_notifier.notify = nbd_eject_notifier;
  1406. blk_add_remove_bs_notifier(blk, &nbd_exp->eject_notifier);
  1407. }
  1408. static const BlockDevOps nbd_block_ops = {
  1409. .drained_begin = nbd_drained_begin,
  1410. .drained_end = nbd_drained_end,
  1411. .drained_poll = nbd_drained_poll,
  1412. };
  1413. static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args,
  1414. Error **errp)
  1415. {
  1416. NBDExport *exp = container_of(blk_exp, NBDExport, common);
  1417. BlockExportOptionsNbd *arg = &exp_args->u.nbd;
  1418. const char *name = arg->name ?: exp_args->node_name;
  1419. BlockBackend *blk = blk_exp->blk;
  1420. int64_t size;
  1421. uint64_t perm, shared_perm;
  1422. bool readonly = !exp_args->writable;
  1423. BlockDirtyBitmapOrStrList *bitmaps;
  1424. size_t i;
  1425. int ret;
  1426. assert(exp_args->type == BLOCK_EXPORT_TYPE_NBD);
  1427. if (!nbd_server_is_running()) {
  1428. error_setg(errp, "NBD server not running");
  1429. return -EINVAL;
  1430. }
  1431. if (strlen(name) > NBD_MAX_STRING_SIZE) {
  1432. error_setg(errp, "export name '%s' too long", name);
  1433. return -EINVAL;
  1434. }
  1435. if (arg->description && strlen(arg->description) > NBD_MAX_STRING_SIZE) {
  1436. error_setg(errp, "description '%s' too long", arg->description);
  1437. return -EINVAL;
  1438. }
  1439. if (nbd_export_find(name)) {
  1440. error_setg(errp, "NBD server already has export named '%s'", name);
  1441. return -EEXIST;
  1442. }
  1443. size = blk_getlength(blk);
  1444. if (size < 0) {
  1445. error_setg_errno(errp, -size,
  1446. "Failed to determine the NBD export's length");
  1447. return size;
  1448. }
  1449. /* Don't allow resize while the NBD server is running, otherwise we don't
  1450. * care what happens with the node. */
  1451. blk_get_perm(blk, &perm, &shared_perm);
  1452. ret = blk_set_perm(blk, perm, shared_perm & ~BLK_PERM_RESIZE, errp);
  1453. if (ret < 0) {
  1454. return ret;
  1455. }
  1456. QTAILQ_INIT(&exp->clients);
  1457. exp->name = g_strdup(name);
  1458. exp->description = g_strdup(arg->description);
  1459. exp->nbdflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_FLUSH |
  1460. NBD_FLAG_SEND_FUA | NBD_FLAG_SEND_CACHE);
  1461. if (nbd_server_max_connections() != 1) {
  1462. exp->nbdflags |= NBD_FLAG_CAN_MULTI_CONN;
  1463. }
  1464. if (readonly) {
  1465. exp->nbdflags |= NBD_FLAG_READ_ONLY;
  1466. } else {
  1467. exp->nbdflags |= (NBD_FLAG_SEND_TRIM | NBD_FLAG_SEND_WRITE_ZEROES |
  1468. NBD_FLAG_SEND_FAST_ZERO);
  1469. }
  1470. exp->size = QEMU_ALIGN_DOWN(size, BDRV_SECTOR_SIZE);
  1471. for (bitmaps = arg->bitmaps; bitmaps; bitmaps = bitmaps->next) {
  1472. exp->nr_export_bitmaps++;
  1473. }
  1474. exp->export_bitmaps = g_new0(BdrvDirtyBitmap *, exp->nr_export_bitmaps);
  1475. for (i = 0, bitmaps = arg->bitmaps; bitmaps;
  1476. i++, bitmaps = bitmaps->next)
  1477. {
  1478. const char *bitmap;
  1479. BlockDriverState *bs = blk_bs(blk);
  1480. BdrvDirtyBitmap *bm = NULL;
  1481. switch (bitmaps->value->type) {
  1482. case QTYPE_QSTRING:
  1483. bitmap = bitmaps->value->u.local;
  1484. while (bs) {
  1485. bm = bdrv_find_dirty_bitmap(bs, bitmap);
  1486. if (bm != NULL) {
  1487. break;
  1488. }
  1489. bs = bdrv_filter_or_cow_bs(bs);
  1490. }
  1491. if (bm == NULL) {
  1492. ret = -ENOENT;
  1493. error_setg(errp, "Bitmap '%s' is not found",
  1494. bitmaps->value->u.local);
  1495. goto fail;
  1496. }
  1497. if (readonly && bdrv_is_writable(bs) &&
  1498. bdrv_dirty_bitmap_enabled(bm)) {
  1499. ret = -EINVAL;
  1500. error_setg(errp, "Enabled bitmap '%s' incompatible with "
  1501. "readonly export", bitmap);
  1502. goto fail;
  1503. }
  1504. break;
  1505. case QTYPE_QDICT:
  1506. bitmap = bitmaps->value->u.external.name;
  1507. bm = block_dirty_bitmap_lookup(bitmaps->value->u.external.node,
  1508. bitmap, NULL, errp);
  1509. if (!bm) {
  1510. ret = -ENOENT;
  1511. goto fail;
  1512. }
  1513. break;
  1514. default:
  1515. abort();
  1516. }
  1517. assert(bm);
  1518. if (bdrv_dirty_bitmap_check(bm, BDRV_BITMAP_ALLOW_RO, errp)) {
  1519. ret = -EINVAL;
  1520. goto fail;
  1521. }
  1522. exp->export_bitmaps[i] = bm;
  1523. assert(strlen(bitmap) <= BDRV_BITMAP_MAX_NAME_SIZE);
  1524. }
  1525. /* Mark bitmaps busy in a separate loop, to simplify roll-back concerns. */
  1526. for (i = 0; i < exp->nr_export_bitmaps; i++) {
  1527. bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], true);
  1528. }
  1529. exp->allocation_depth = arg->allocation_depth;
  1530. /*
  1531. * We need to inhibit request queuing in the block layer to ensure we can
  1532. * be properly quiesced when entering a drained section, as our coroutines
  1533. * servicing pending requests might enter blk_pread().
  1534. */
  1535. blk_set_disable_request_queuing(blk, true);
  1536. blk_add_aio_context_notifier(blk, blk_aio_attached, blk_aio_detach, exp);
  1537. blk_set_dev_ops(blk, &nbd_block_ops, exp);
  1538. QTAILQ_INSERT_TAIL(&exports, exp, next);
  1539. return 0;
  1540. fail:
  1541. g_free(exp->export_bitmaps);
  1542. g_free(exp->name);
  1543. g_free(exp->description);
  1544. return ret;
  1545. }
  1546. NBDExport *nbd_export_find(const char *name)
  1547. {
  1548. NBDExport *exp;
  1549. QTAILQ_FOREACH(exp, &exports, next) {
  1550. if (strcmp(name, exp->name) == 0) {
  1551. return exp;
  1552. }
  1553. }
  1554. return NULL;
  1555. }
  1556. AioContext *
  1557. nbd_export_aio_context(NBDExport *exp)
  1558. {
  1559. return exp->common.ctx;
  1560. }
  1561. static void nbd_export_request_shutdown(BlockExport *blk_exp)
  1562. {
  1563. NBDExport *exp = container_of(blk_exp, NBDExport, common);
  1564. NBDClient *client, *next;
  1565. blk_exp_ref(&exp->common);
  1566. /*
  1567. * TODO: Should we expand QMP NbdServerRemoveNode enum to allow a
  1568. * close mode that stops advertising the export to new clients but
  1569. * still permits existing clients to run to completion? Because of
  1570. * that possibility, nbd_export_close() can be called more than
  1571. * once on an export.
  1572. */
  1573. QTAILQ_FOREACH_SAFE(client, &exp->clients, next, next) {
  1574. client_close(client, true);
  1575. }
  1576. if (exp->name) {
  1577. g_free(exp->name);
  1578. exp->name = NULL;
  1579. QTAILQ_REMOVE(&exports, exp, next);
  1580. }
  1581. blk_exp_unref(&exp->common);
  1582. }
  1583. static void nbd_export_delete(BlockExport *blk_exp)
  1584. {
  1585. size_t i;
  1586. NBDExport *exp = container_of(blk_exp, NBDExport, common);
  1587. assert(exp->name == NULL);
  1588. assert(QTAILQ_EMPTY(&exp->clients));
  1589. g_free(exp->description);
  1590. exp->description = NULL;
  1591. if (exp->common.blk) {
  1592. if (exp->eject_notifier_blk) {
  1593. notifier_remove(&exp->eject_notifier);
  1594. blk_unref(exp->eject_notifier_blk);
  1595. }
  1596. blk_remove_aio_context_notifier(exp->common.blk, blk_aio_attached,
  1597. blk_aio_detach, exp);
  1598. blk_set_disable_request_queuing(exp->common.blk, false);
  1599. }
  1600. for (i = 0; i < exp->nr_export_bitmaps; i++) {
  1601. bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], false);
  1602. }
  1603. }
  1604. const BlockExportDriver blk_exp_nbd = {
  1605. .type = BLOCK_EXPORT_TYPE_NBD,
  1606. .instance_size = sizeof(NBDExport),
  1607. .create = nbd_export_create,
  1608. .delete = nbd_export_delete,
  1609. .request_shutdown = nbd_export_request_shutdown,
  1610. };
  1611. static int coroutine_fn nbd_co_send_iov(NBDClient *client, struct iovec *iov,
  1612. unsigned niov, Error **errp)
  1613. {
  1614. int ret;
  1615. g_assert(qemu_in_coroutine());
  1616. qemu_co_mutex_lock(&client->send_lock);
  1617. client->send_coroutine = qemu_coroutine_self();
  1618. ret = qio_channel_writev_all(client->ioc, iov, niov, errp) < 0 ? -EIO : 0;
  1619. client->send_coroutine = NULL;
  1620. qemu_co_mutex_unlock(&client->send_lock);
  1621. return ret;
  1622. }
  1623. static inline void set_be_simple_reply(NBDSimpleReply *reply, uint64_t error,
  1624. uint64_t handle)
  1625. {
  1626. stl_be_p(&reply->magic, NBD_SIMPLE_REPLY_MAGIC);
  1627. stl_be_p(&reply->error, error);
  1628. stq_be_p(&reply->handle, handle);
  1629. }
  1630. static int nbd_co_send_simple_reply(NBDClient *client,
  1631. uint64_t handle,
  1632. uint32_t error,
  1633. void *data,
  1634. size_t len,
  1635. Error **errp)
  1636. {
  1637. NBDSimpleReply reply;
  1638. int nbd_err = system_errno_to_nbd_errno(error);
  1639. struct iovec iov[] = {
  1640. {.iov_base = &reply, .iov_len = sizeof(reply)},
  1641. {.iov_base = data, .iov_len = len}
  1642. };
  1643. trace_nbd_co_send_simple_reply(handle, nbd_err, nbd_err_lookup(nbd_err),
  1644. len);
  1645. set_be_simple_reply(&reply, nbd_err, handle);
  1646. return nbd_co_send_iov(client, iov, len ? 2 : 1, errp);
  1647. }
  1648. static inline void set_be_chunk(NBDStructuredReplyChunk *chunk, uint16_t flags,
  1649. uint16_t type, uint64_t handle, uint32_t length)
  1650. {
  1651. stl_be_p(&chunk->magic, NBD_STRUCTURED_REPLY_MAGIC);
  1652. stw_be_p(&chunk->flags, flags);
  1653. stw_be_p(&chunk->type, type);
  1654. stq_be_p(&chunk->handle, handle);
  1655. stl_be_p(&chunk->length, length);
  1656. }
  1657. static int coroutine_fn nbd_co_send_structured_done(NBDClient *client,
  1658. uint64_t handle,
  1659. Error **errp)
  1660. {
  1661. NBDStructuredReplyChunk chunk;
  1662. struct iovec iov[] = {
  1663. {.iov_base = &chunk, .iov_len = sizeof(chunk)},
  1664. };
  1665. trace_nbd_co_send_structured_done(handle);
  1666. set_be_chunk(&chunk, NBD_REPLY_FLAG_DONE, NBD_REPLY_TYPE_NONE, handle, 0);
  1667. return nbd_co_send_iov(client, iov, 1, errp);
  1668. }
  1669. static int coroutine_fn nbd_co_send_structured_read(NBDClient *client,
  1670. uint64_t handle,
  1671. uint64_t offset,
  1672. void *data,
  1673. size_t size,
  1674. bool final,
  1675. Error **errp)
  1676. {
  1677. NBDStructuredReadData chunk;
  1678. struct iovec iov[] = {
  1679. {.iov_base = &chunk, .iov_len = sizeof(chunk)},
  1680. {.iov_base = data, .iov_len = size}
  1681. };
  1682. assert(size);
  1683. trace_nbd_co_send_structured_read(handle, offset, data, size);
  1684. set_be_chunk(&chunk.h, final ? NBD_REPLY_FLAG_DONE : 0,
  1685. NBD_REPLY_TYPE_OFFSET_DATA, handle,
  1686. sizeof(chunk) - sizeof(chunk.h) + size);
  1687. stq_be_p(&chunk.offset, offset);
  1688. return nbd_co_send_iov(client, iov, 2, errp);
  1689. }
  1690. static int coroutine_fn nbd_co_send_structured_error(NBDClient *client,
  1691. uint64_t handle,
  1692. uint32_t error,
  1693. const char *msg,
  1694. Error **errp)
  1695. {
  1696. NBDStructuredError chunk;
  1697. int nbd_err = system_errno_to_nbd_errno(error);
  1698. struct iovec iov[] = {
  1699. {.iov_base = &chunk, .iov_len = sizeof(chunk)},
  1700. {.iov_base = (char *)msg, .iov_len = msg ? strlen(msg) : 0},
  1701. };
  1702. assert(nbd_err);
  1703. trace_nbd_co_send_structured_error(handle, nbd_err,
  1704. nbd_err_lookup(nbd_err), msg ? msg : "");
  1705. set_be_chunk(&chunk.h, NBD_REPLY_FLAG_DONE, NBD_REPLY_TYPE_ERROR, handle,
  1706. sizeof(chunk) - sizeof(chunk.h) + iov[1].iov_len);
  1707. stl_be_p(&chunk.error, nbd_err);
  1708. stw_be_p(&chunk.message_length, iov[1].iov_len);
  1709. return nbd_co_send_iov(client, iov, 1 + !!iov[1].iov_len, errp);
  1710. }
  1711. /* Do a sparse read and send the structured reply to the client.
  1712. * Returns -errno if sending fails. blk_co_block_status_above() failure is
  1713. * reported to the client, at which point this function succeeds.
  1714. */
  1715. static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
  1716. uint64_t handle,
  1717. uint64_t offset,
  1718. uint8_t *data,
  1719. size_t size,
  1720. Error **errp)
  1721. {
  1722. int ret = 0;
  1723. NBDExport *exp = client->exp;
  1724. size_t progress = 0;
  1725. while (progress < size) {
  1726. int64_t pnum;
  1727. int status = blk_co_block_status_above(exp->common.blk, NULL,
  1728. offset + progress,
  1729. size - progress, &pnum, NULL,
  1730. NULL);
  1731. bool final;
  1732. if (status < 0) {
  1733. char *msg = g_strdup_printf("unable to check for holes: %s",
  1734. strerror(-status));
  1735. ret = nbd_co_send_structured_error(client, handle, -status, msg,
  1736. errp);
  1737. g_free(msg);
  1738. return ret;
  1739. }
  1740. assert(pnum && pnum <= size - progress);
  1741. final = progress + pnum == size;
  1742. if (status & BDRV_BLOCK_ZERO) {
  1743. NBDStructuredReadHole chunk;
  1744. struct iovec iov[] = {
  1745. {.iov_base = &chunk, .iov_len = sizeof(chunk)},
  1746. };
  1747. trace_nbd_co_send_structured_read_hole(handle, offset + progress,
  1748. pnum);
  1749. set_be_chunk(&chunk.h, final ? NBD_REPLY_FLAG_DONE : 0,
  1750. NBD_REPLY_TYPE_OFFSET_HOLE,
  1751. handle, sizeof(chunk) - sizeof(chunk.h));
  1752. stq_be_p(&chunk.offset, offset + progress);
  1753. stl_be_p(&chunk.length, pnum);
  1754. ret = nbd_co_send_iov(client, iov, 1, errp);
  1755. } else {
  1756. ret = blk_pread(exp->common.blk, offset + progress, pnum,
  1757. data + progress, 0);
  1758. if (ret < 0) {
  1759. error_setg_errno(errp, -ret, "reading from file failed");
  1760. break;
  1761. }
  1762. ret = nbd_co_send_structured_read(client, handle, offset + progress,
  1763. data + progress, pnum, final,
  1764. errp);
  1765. }
  1766. if (ret < 0) {
  1767. break;
  1768. }
  1769. progress += pnum;
  1770. }
  1771. return ret;
  1772. }
  1773. typedef struct NBDExtentArray {
  1774. NBDExtent *extents;
  1775. unsigned int nb_alloc;
  1776. unsigned int count;
  1777. uint64_t total_length;
  1778. bool can_add;
  1779. bool converted_to_be;
  1780. } NBDExtentArray;
  1781. static NBDExtentArray *nbd_extent_array_new(unsigned int nb_alloc)
  1782. {
  1783. NBDExtentArray *ea = g_new0(NBDExtentArray, 1);
  1784. ea->nb_alloc = nb_alloc;
  1785. ea->extents = g_new(NBDExtent, nb_alloc);
  1786. ea->can_add = true;
  1787. return ea;
  1788. }
  1789. static void nbd_extent_array_free(NBDExtentArray *ea)
  1790. {
  1791. g_free(ea->extents);
  1792. g_free(ea);
  1793. }
  1794. G_DEFINE_AUTOPTR_CLEANUP_FUNC(NBDExtentArray, nbd_extent_array_free)
  1795. /* Further modifications of the array after conversion are abandoned */
  1796. static void nbd_extent_array_convert_to_be(NBDExtentArray *ea)
  1797. {
  1798. int i;
  1799. assert(!ea->converted_to_be);
  1800. ea->can_add = false;
  1801. ea->converted_to_be = true;
  1802. for (i = 0; i < ea->count; i++) {
  1803. ea->extents[i].flags = cpu_to_be32(ea->extents[i].flags);
  1804. ea->extents[i].length = cpu_to_be32(ea->extents[i].length);
  1805. }
  1806. }
  1807. /*
  1808. * Add extent to NBDExtentArray. If extent can't be added (no available space),
  1809. * return -1.
  1810. * For safety, when returning -1 for the first time, .can_add is set to false,
  1811. * and further calls to nbd_extent_array_add() will crash.
  1812. * (this avoids the situation where a caller ignores failure to add one extent,
  1813. * where adding another extent that would squash into the last array entry
  1814. * would result in an incorrect range reported to the client)
  1815. */
  1816. static int nbd_extent_array_add(NBDExtentArray *ea,
  1817. uint32_t length, uint32_t flags)
  1818. {
  1819. assert(ea->can_add);
  1820. if (!length) {
  1821. return 0;
  1822. }
  1823. /* Extend previous extent if flags are the same */
  1824. if (ea->count > 0 && flags == ea->extents[ea->count - 1].flags) {
  1825. uint64_t sum = (uint64_t)length + ea->extents[ea->count - 1].length;
  1826. if (sum <= UINT32_MAX) {
  1827. ea->extents[ea->count - 1].length = sum;
  1828. ea->total_length += length;
  1829. return 0;
  1830. }
  1831. }
  1832. if (ea->count >= ea->nb_alloc) {
  1833. ea->can_add = false;
  1834. return -1;
  1835. }
  1836. ea->total_length += length;
  1837. ea->extents[ea->count] = (NBDExtent) {.length = length, .flags = flags};
  1838. ea->count++;
  1839. return 0;
  1840. }
  1841. static int coroutine_fn blockstatus_to_extents(BlockBackend *blk,
  1842. uint64_t offset, uint64_t bytes,
  1843. NBDExtentArray *ea)
  1844. {
  1845. while (bytes) {
  1846. uint32_t flags;
  1847. int64_t num;
  1848. int ret = blk_co_block_status_above(blk, NULL, offset, bytes, &num,
  1849. NULL, NULL);
  1850. if (ret < 0) {
  1851. return ret;
  1852. }
  1853. flags = (ret & BDRV_BLOCK_DATA ? 0 : NBD_STATE_HOLE) |
  1854. (ret & BDRV_BLOCK_ZERO ? NBD_STATE_ZERO : 0);
  1855. if (nbd_extent_array_add(ea, num, flags) < 0) {
  1856. return 0;
  1857. }
  1858. offset += num;
  1859. bytes -= num;
  1860. }
  1861. return 0;
  1862. }
  1863. static int coroutine_fn blockalloc_to_extents(BlockBackend *blk,
  1864. uint64_t offset, uint64_t bytes,
  1865. NBDExtentArray *ea)
  1866. {
  1867. while (bytes) {
  1868. int64_t num;
  1869. int ret = blk_co_is_allocated_above(blk, NULL, false, offset, bytes,
  1870. &num);
  1871. if (ret < 0) {
  1872. return ret;
  1873. }
  1874. if (nbd_extent_array_add(ea, num, ret) < 0) {
  1875. return 0;
  1876. }
  1877. offset += num;
  1878. bytes -= num;
  1879. }
  1880. return 0;
  1881. }
  1882. /*
  1883. * nbd_co_send_extents
  1884. *
  1885. * @ea is converted to BE by the function
  1886. * @last controls whether NBD_REPLY_FLAG_DONE is sent.
  1887. */
  1888. static int nbd_co_send_extents(NBDClient *client, uint64_t handle,
  1889. NBDExtentArray *ea,
  1890. bool last, uint32_t context_id, Error **errp)
  1891. {
  1892. NBDStructuredMeta chunk;
  1893. struct iovec iov[] = {
  1894. {.iov_base = &chunk, .iov_len = sizeof(chunk)},
  1895. {.iov_base = ea->extents, .iov_len = ea->count * sizeof(ea->extents[0])}
  1896. };
  1897. nbd_extent_array_convert_to_be(ea);
  1898. trace_nbd_co_send_extents(handle, ea->count, context_id, ea->total_length,
  1899. last);
  1900. set_be_chunk(&chunk.h, last ? NBD_REPLY_FLAG_DONE : 0,
  1901. NBD_REPLY_TYPE_BLOCK_STATUS,
  1902. handle, sizeof(chunk) - sizeof(chunk.h) + iov[1].iov_len);
  1903. stl_be_p(&chunk.context_id, context_id);
  1904. return nbd_co_send_iov(client, iov, 2, errp);
  1905. }
  1906. /* Get block status from the exported device and send it to the client */
  1907. static int
  1908. coroutine_fn nbd_co_send_block_status(NBDClient *client, uint64_t handle,
  1909. BlockBackend *blk, uint64_t offset,
  1910. uint32_t length, bool dont_fragment,
  1911. bool last, uint32_t context_id,
  1912. Error **errp)
  1913. {
  1914. int ret;
  1915. unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS;
  1916. g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents);
  1917. if (context_id == NBD_META_ID_BASE_ALLOCATION) {
  1918. ret = blockstatus_to_extents(blk, offset, length, ea);
  1919. } else {
  1920. ret = blockalloc_to_extents(blk, offset, length, ea);
  1921. }
  1922. if (ret < 0) {
  1923. return nbd_co_send_structured_error(
  1924. client, handle, -ret, "can't get block status", errp);
  1925. }
  1926. return nbd_co_send_extents(client, handle, ea, last, context_id, errp);
  1927. }
  1928. /* Populate @ea from a dirty bitmap. */
  1929. static void bitmap_to_extents(BdrvDirtyBitmap *bitmap,
  1930. uint64_t offset, uint64_t length,
  1931. NBDExtentArray *es)
  1932. {
  1933. int64_t start, dirty_start, dirty_count;
  1934. int64_t end = offset + length;
  1935. bool full = false;
  1936. bdrv_dirty_bitmap_lock(bitmap);
  1937. for (start = offset;
  1938. bdrv_dirty_bitmap_next_dirty_area(bitmap, start, end, INT32_MAX,
  1939. &dirty_start, &dirty_count);
  1940. start = dirty_start + dirty_count)
  1941. {
  1942. if ((nbd_extent_array_add(es, dirty_start - start, 0) < 0) ||
  1943. (nbd_extent_array_add(es, dirty_count, NBD_STATE_DIRTY) < 0))
  1944. {
  1945. full = true;
  1946. break;
  1947. }
  1948. }
  1949. if (!full) {
  1950. /* last non dirty extent, nothing to do if array is now full */
  1951. (void) nbd_extent_array_add(es, end - start, 0);
  1952. }
  1953. bdrv_dirty_bitmap_unlock(bitmap);
  1954. }
  1955. static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle,
  1956. BdrvDirtyBitmap *bitmap, uint64_t offset,
  1957. uint32_t length, bool dont_fragment, bool last,
  1958. uint32_t context_id, Error **errp)
  1959. {
  1960. unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS;
  1961. g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents);
  1962. bitmap_to_extents(bitmap, offset, length, ea);
  1963. return nbd_co_send_extents(client, handle, ea, last, context_id, errp);
  1964. }
  1965. /* nbd_co_receive_request
  1966. * Collect a client request. Return 0 if request looks valid, -EIO to drop
  1967. * connection right away, -EAGAIN to indicate we were interrupted and the
  1968. * channel should be quiesced, and any other negative value to report an error
  1969. * to the client (although the caller may still need to disconnect after
  1970. * reporting the error).
  1971. */
  1972. static int nbd_co_receive_request(NBDRequestData *req, NBDRequest *request,
  1973. Error **errp)
  1974. {
  1975. NBDClient *client = req->client;
  1976. int valid_flags;
  1977. int ret;
  1978. g_assert(qemu_in_coroutine());
  1979. assert(client->recv_coroutine == qemu_coroutine_self());
  1980. ret = nbd_receive_request(client, request, errp);
  1981. if (ret < 0) {
  1982. return ret;
  1983. }
  1984. trace_nbd_co_receive_request_decode_type(request->handle, request->type,
  1985. nbd_cmd_lookup(request->type));
  1986. if (request->type != NBD_CMD_WRITE) {
  1987. /* No payload, we are ready to read the next request. */
  1988. req->complete = true;
  1989. }
  1990. if (request->type == NBD_CMD_DISC) {
  1991. /* Special case: we're going to disconnect without a reply,
  1992. * whether or not flags, from, or len are bogus */
  1993. return -EIO;
  1994. }
  1995. if (request->type == NBD_CMD_READ || request->type == NBD_CMD_WRITE ||
  1996. request->type == NBD_CMD_CACHE)
  1997. {
  1998. if (request->len > NBD_MAX_BUFFER_SIZE) {
  1999. error_setg(errp, "len (%" PRIu32" ) is larger than max len (%u)",
  2000. request->len, NBD_MAX_BUFFER_SIZE);
  2001. return -EINVAL;
  2002. }
  2003. if (request->type != NBD_CMD_CACHE) {
  2004. req->data = blk_try_blockalign(client->exp->common.blk,
  2005. request->len);
  2006. if (req->data == NULL) {
  2007. error_setg(errp, "No memory");
  2008. return -ENOMEM;
  2009. }
  2010. }
  2011. }
  2012. if (request->type == NBD_CMD_WRITE) {
  2013. if (nbd_read(client->ioc, req->data, request->len, "CMD_WRITE data",
  2014. errp) < 0)
  2015. {
  2016. return -EIO;
  2017. }
  2018. req->complete = true;
  2019. trace_nbd_co_receive_request_payload_received(request->handle,
  2020. request->len);
  2021. }
  2022. /* Sanity checks. */
  2023. if (client->exp->nbdflags & NBD_FLAG_READ_ONLY &&
  2024. (request->type == NBD_CMD_WRITE ||
  2025. request->type == NBD_CMD_WRITE_ZEROES ||
  2026. request->type == NBD_CMD_TRIM)) {
  2027. error_setg(errp, "Export is read-only");
  2028. return -EROFS;
  2029. }
  2030. if (request->from > client->exp->size ||
  2031. request->len > client->exp->size - request->from) {
  2032. error_setg(errp, "operation past EOF; From: %" PRIu64 ", Len: %" PRIu32
  2033. ", Size: %" PRIu64, request->from, request->len,
  2034. client->exp->size);
  2035. return (request->type == NBD_CMD_WRITE ||
  2036. request->type == NBD_CMD_WRITE_ZEROES) ? -ENOSPC : -EINVAL;
  2037. }
  2038. if (client->check_align && !QEMU_IS_ALIGNED(request->from | request->len,
  2039. client->check_align)) {
  2040. /*
  2041. * The block layer gracefully handles unaligned requests, but
  2042. * it's still worth tracing client non-compliance
  2043. */
  2044. trace_nbd_co_receive_align_compliance(nbd_cmd_lookup(request->type),
  2045. request->from,
  2046. request->len,
  2047. client->check_align);
  2048. }
  2049. valid_flags = NBD_CMD_FLAG_FUA;
  2050. if (request->type == NBD_CMD_READ && client->structured_reply) {
  2051. valid_flags |= NBD_CMD_FLAG_DF;
  2052. } else if (request->type == NBD_CMD_WRITE_ZEROES) {
  2053. valid_flags |= NBD_CMD_FLAG_NO_HOLE | NBD_CMD_FLAG_FAST_ZERO;
  2054. } else if (request->type == NBD_CMD_BLOCK_STATUS) {
  2055. valid_flags |= NBD_CMD_FLAG_REQ_ONE;
  2056. }
  2057. if (request->flags & ~valid_flags) {
  2058. error_setg(errp, "unsupported flags for command %s (got 0x%x)",
  2059. nbd_cmd_lookup(request->type), request->flags);
  2060. return -EINVAL;
  2061. }
  2062. return 0;
  2063. }
  2064. /* Send simple reply without a payload, or a structured error
  2065. * @error_msg is ignored if @ret >= 0
  2066. * Returns 0 if connection is still live, -errno on failure to talk to client
  2067. */
  2068. static coroutine_fn int nbd_send_generic_reply(NBDClient *client,
  2069. uint64_t handle,
  2070. int ret,
  2071. const char *error_msg,
  2072. Error **errp)
  2073. {
  2074. if (client->structured_reply && ret < 0) {
  2075. return nbd_co_send_structured_error(client, handle, -ret, error_msg,
  2076. errp);
  2077. } else {
  2078. return nbd_co_send_simple_reply(client, handle, ret < 0 ? -ret : 0,
  2079. NULL, 0, errp);
  2080. }
  2081. }
  2082. /* Handle NBD_CMD_READ request.
  2083. * Return -errno if sending fails. Other errors are reported directly to the
  2084. * client as an error reply. */
  2085. static coroutine_fn int nbd_do_cmd_read(NBDClient *client, NBDRequest *request,
  2086. uint8_t *data, Error **errp)
  2087. {
  2088. int ret;
  2089. NBDExport *exp = client->exp;
  2090. assert(request->type == NBD_CMD_READ);
  2091. /* XXX: NBD Protocol only documents use of FUA with WRITE */
  2092. if (request->flags & NBD_CMD_FLAG_FUA) {
  2093. ret = blk_co_flush(exp->common.blk);
  2094. if (ret < 0) {
  2095. return nbd_send_generic_reply(client, request->handle, ret,
  2096. "flush failed", errp);
  2097. }
  2098. }
  2099. if (client->structured_reply && !(request->flags & NBD_CMD_FLAG_DF) &&
  2100. request->len)
  2101. {
  2102. return nbd_co_send_sparse_read(client, request->handle, request->from,
  2103. data, request->len, errp);
  2104. }
  2105. ret = blk_pread(exp->common.blk, request->from, request->len, data, 0);
  2106. if (ret < 0) {
  2107. return nbd_send_generic_reply(client, request->handle, ret,
  2108. "reading from file failed", errp);
  2109. }
  2110. if (client->structured_reply) {
  2111. if (request->len) {
  2112. return nbd_co_send_structured_read(client, request->handle,
  2113. request->from, data,
  2114. request->len, true, errp);
  2115. } else {
  2116. return nbd_co_send_structured_done(client, request->handle, errp);
  2117. }
  2118. } else {
  2119. return nbd_co_send_simple_reply(client, request->handle, 0,
  2120. data, request->len, errp);
  2121. }
  2122. }
  2123. /*
  2124. * nbd_do_cmd_cache
  2125. *
  2126. * Handle NBD_CMD_CACHE request.
  2127. * Return -errno if sending fails. Other errors are reported directly to the
  2128. * client as an error reply.
  2129. */
  2130. static coroutine_fn int nbd_do_cmd_cache(NBDClient *client, NBDRequest *request,
  2131. Error **errp)
  2132. {
  2133. int ret;
  2134. NBDExport *exp = client->exp;
  2135. assert(request->type == NBD_CMD_CACHE);
  2136. ret = blk_co_preadv(exp->common.blk, request->from, request->len,
  2137. NULL, BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH);
  2138. return nbd_send_generic_reply(client, request->handle, ret,
  2139. "caching data failed", errp);
  2140. }
  2141. /* Handle NBD request.
  2142. * Return -errno if sending fails. Other errors are reported directly to the
  2143. * client as an error reply. */
  2144. static coroutine_fn int nbd_handle_request(NBDClient *client,
  2145. NBDRequest *request,
  2146. uint8_t *data, Error **errp)
  2147. {
  2148. int ret;
  2149. int flags;
  2150. NBDExport *exp = client->exp;
  2151. char *msg;
  2152. size_t i;
  2153. switch (request->type) {
  2154. case NBD_CMD_CACHE:
  2155. return nbd_do_cmd_cache(client, request, errp);
  2156. case NBD_CMD_READ:
  2157. return nbd_do_cmd_read(client, request, data, errp);
  2158. case NBD_CMD_WRITE:
  2159. flags = 0;
  2160. if (request->flags & NBD_CMD_FLAG_FUA) {
  2161. flags |= BDRV_REQ_FUA;
  2162. }
  2163. ret = blk_pwrite(exp->common.blk, request->from, request->len, data,
  2164. flags);
  2165. return nbd_send_generic_reply(client, request->handle, ret,
  2166. "writing to file failed", errp);
  2167. case NBD_CMD_WRITE_ZEROES:
  2168. flags = 0;
  2169. if (request->flags & NBD_CMD_FLAG_FUA) {
  2170. flags |= BDRV_REQ_FUA;
  2171. }
  2172. if (!(request->flags & NBD_CMD_FLAG_NO_HOLE)) {
  2173. flags |= BDRV_REQ_MAY_UNMAP;
  2174. }
  2175. if (request->flags & NBD_CMD_FLAG_FAST_ZERO) {
  2176. flags |= BDRV_REQ_NO_FALLBACK;
  2177. }
  2178. ret = blk_pwrite_zeroes(exp->common.blk, request->from, request->len,
  2179. flags);
  2180. return nbd_send_generic_reply(client, request->handle, ret,
  2181. "writing to file failed", errp);
  2182. case NBD_CMD_DISC:
  2183. /* unreachable, thanks to special case in nbd_co_receive_request() */
  2184. abort();
  2185. case NBD_CMD_FLUSH:
  2186. ret = blk_co_flush(exp->common.blk);
  2187. return nbd_send_generic_reply(client, request->handle, ret,
  2188. "flush failed", errp);
  2189. case NBD_CMD_TRIM:
  2190. ret = blk_co_pdiscard(exp->common.blk, request->from, request->len);
  2191. if (ret >= 0 && request->flags & NBD_CMD_FLAG_FUA) {
  2192. ret = blk_co_flush(exp->common.blk);
  2193. }
  2194. return nbd_send_generic_reply(client, request->handle, ret,
  2195. "discard failed", errp);
  2196. case NBD_CMD_BLOCK_STATUS:
  2197. if (!request->len) {
  2198. return nbd_send_generic_reply(client, request->handle, -EINVAL,
  2199. "need non-zero length", errp);
  2200. }
  2201. if (client->export_meta.count) {
  2202. bool dont_fragment = request->flags & NBD_CMD_FLAG_REQ_ONE;
  2203. int contexts_remaining = client->export_meta.count;
  2204. if (client->export_meta.base_allocation) {
  2205. ret = nbd_co_send_block_status(client, request->handle,
  2206. exp->common.blk,
  2207. request->from,
  2208. request->len, dont_fragment,
  2209. !--contexts_remaining,
  2210. NBD_META_ID_BASE_ALLOCATION,
  2211. errp);
  2212. if (ret < 0) {
  2213. return ret;
  2214. }
  2215. }
  2216. if (client->export_meta.allocation_depth) {
  2217. ret = nbd_co_send_block_status(client, request->handle,
  2218. exp->common.blk,
  2219. request->from, request->len,
  2220. dont_fragment,
  2221. !--contexts_remaining,
  2222. NBD_META_ID_ALLOCATION_DEPTH,
  2223. errp);
  2224. if (ret < 0) {
  2225. return ret;
  2226. }
  2227. }
  2228. for (i = 0; i < client->exp->nr_export_bitmaps; i++) {
  2229. if (!client->export_meta.bitmaps[i]) {
  2230. continue;
  2231. }
  2232. ret = nbd_co_send_bitmap(client, request->handle,
  2233. client->exp->export_bitmaps[i],
  2234. request->from, request->len,
  2235. dont_fragment, !--contexts_remaining,
  2236. NBD_META_ID_DIRTY_BITMAP + i, errp);
  2237. if (ret < 0) {
  2238. return ret;
  2239. }
  2240. }
  2241. assert(!contexts_remaining);
  2242. return 0;
  2243. } else {
  2244. return nbd_send_generic_reply(client, request->handle, -EINVAL,
  2245. "CMD_BLOCK_STATUS not negotiated",
  2246. errp);
  2247. }
  2248. default:
  2249. msg = g_strdup_printf("invalid request type (%" PRIu32 ") received",
  2250. request->type);
  2251. ret = nbd_send_generic_reply(client, request->handle, -EINVAL, msg,
  2252. errp);
  2253. g_free(msg);
  2254. return ret;
  2255. }
  2256. }
  2257. /* Owns a reference to the NBDClient passed as opaque. */
  2258. static coroutine_fn void nbd_trip(void *opaque)
  2259. {
  2260. NBDClient *client = opaque;
  2261. NBDRequestData *req;
  2262. NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */
  2263. int ret;
  2264. Error *local_err = NULL;
  2265. trace_nbd_trip();
  2266. if (client->closing) {
  2267. nbd_client_put(client);
  2268. return;
  2269. }
  2270. if (client->quiescing) {
  2271. /*
  2272. * We're switching between AIO contexts. Don't attempt to receive a new
  2273. * request and kick the main context which may be waiting for us.
  2274. */
  2275. nbd_client_put(client);
  2276. client->recv_coroutine = NULL;
  2277. aio_wait_kick();
  2278. return;
  2279. }
  2280. req = nbd_request_get(client);
  2281. ret = nbd_co_receive_request(req, &request, &local_err);
  2282. client->recv_coroutine = NULL;
  2283. if (client->closing) {
  2284. /*
  2285. * The client may be closed when we are blocked in
  2286. * nbd_co_receive_request()
  2287. */
  2288. goto done;
  2289. }
  2290. if (ret == -EAGAIN) {
  2291. assert(client->quiescing);
  2292. goto done;
  2293. }
  2294. nbd_client_receive_next_request(client);
  2295. if (ret == -EIO) {
  2296. goto disconnect;
  2297. }
  2298. qio_channel_set_cork(client->ioc, true);
  2299. if (ret < 0) {
  2300. /* It wasn't -EIO, so, according to nbd_co_receive_request()
  2301. * semantics, we should return the error to the client. */
  2302. Error *export_err = local_err;
  2303. local_err = NULL;
  2304. ret = nbd_send_generic_reply(client, request.handle, -EINVAL,
  2305. error_get_pretty(export_err), &local_err);
  2306. error_free(export_err);
  2307. } else {
  2308. ret = nbd_handle_request(client, &request, req->data, &local_err);
  2309. }
  2310. if (ret < 0) {
  2311. error_prepend(&local_err, "Failed to send reply: ");
  2312. goto disconnect;
  2313. }
  2314. /* We must disconnect after NBD_CMD_WRITE if we did not
  2315. * read the payload.
  2316. */
  2317. if (!req->complete) {
  2318. error_setg(&local_err, "Request handling failed in intermediate state");
  2319. goto disconnect;
  2320. }
  2321. qio_channel_set_cork(client->ioc, false);
  2322. done:
  2323. nbd_request_put(req);
  2324. nbd_client_put(client);
  2325. return;
  2326. disconnect:
  2327. if (local_err) {
  2328. error_reportf_err(local_err, "Disconnect client, due to: ");
  2329. }
  2330. nbd_request_put(req);
  2331. client_close(client, true);
  2332. nbd_client_put(client);
  2333. }
  2334. static void nbd_client_receive_next_request(NBDClient *client)
  2335. {
  2336. if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS &&
  2337. !client->quiescing) {
  2338. nbd_client_get(client);
  2339. client->recv_coroutine = qemu_coroutine_create(nbd_trip, client);
  2340. aio_co_schedule(client->exp->common.ctx, client->recv_coroutine);
  2341. }
  2342. }
  2343. static coroutine_fn void nbd_co_client_start(void *opaque)
  2344. {
  2345. NBDClient *client = opaque;
  2346. Error *local_err = NULL;
  2347. qemu_co_mutex_init(&client->send_lock);
  2348. if (nbd_negotiate(client, &local_err)) {
  2349. if (local_err) {
  2350. error_report_err(local_err);
  2351. }
  2352. client_close(client, false);
  2353. return;
  2354. }
  2355. nbd_client_receive_next_request(client);
  2356. }
  2357. /*
  2358. * Create a new client listener using the given channel @sioc.
  2359. * Begin servicing it in a coroutine. When the connection closes, call
  2360. * @close_fn with an indication of whether the client completed negotiation.
  2361. */
  2362. void nbd_client_new(QIOChannelSocket *sioc,
  2363. QCryptoTLSCreds *tlscreds,
  2364. const char *tlsauthz,
  2365. void (*close_fn)(NBDClient *, bool))
  2366. {
  2367. NBDClient *client;
  2368. Coroutine *co;
  2369. client = g_new0(NBDClient, 1);
  2370. client->refcount = 1;
  2371. client->tlscreds = tlscreds;
  2372. if (tlscreds) {
  2373. object_ref(OBJECT(client->tlscreds));
  2374. }
  2375. client->tlsauthz = g_strdup(tlsauthz);
  2376. client->sioc = sioc;
  2377. qio_channel_set_delay(QIO_CHANNEL(sioc), false);
  2378. object_ref(OBJECT(client->sioc));
  2379. client->ioc = QIO_CHANNEL(sioc);
  2380. object_ref(OBJECT(client->ioc));
  2381. client->close_fn = close_fn;
  2382. co = qemu_coroutine_create(nbd_co_client_start, client);
  2383. qemu_coroutine_enter(co);
  2384. }