2
0

l2tpv3.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. /*
  2. * QEMU System Emulator
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. * Copyright (c) 2012-2014 Cisco Systems
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "qemu/osdep.h"
  26. #include <linux/ip.h>
  27. #include <netdb.h>
  28. #include "net/net.h"
  29. #include "clients.h"
  30. #include "qapi/error.h"
  31. #include "qemu/error-report.h"
  32. #include "qemu/option.h"
  33. #include "qemu/sockets.h"
  34. #include "qemu/iov.h"
  35. #include "qemu/main-loop.h"
  36. #include "qemu/memalign.h"
  37. /* The buffer size needs to be investigated for optimum numbers and
  38. * optimum means of paging in on different systems. This size is
  39. * chosen to be sufficient to accommodate one packet with some headers
  40. */
  41. #define BUFFER_ALIGN sysconf(_SC_PAGESIZE)
  42. #define BUFFER_SIZE 16384
  43. #define IOVSIZE 2
  44. #define MAX_L2TPV3_MSGCNT 64
  45. #define MAX_L2TPV3_IOVCNT (MAX_L2TPV3_MSGCNT * IOVSIZE)
  46. /* Header set to 0x30000 signifies a data packet */
  47. #define L2TPV3_DATA_PACKET 0x30000
  48. /* IANA-assigned IP protocol ID for L2TPv3 */
  49. #ifndef IPPROTO_L2TP
  50. #define IPPROTO_L2TP 0x73
  51. #endif
  52. typedef struct NetL2TPV3State {
  53. NetClientState nc;
  54. int fd;
  55. /*
  56. * these are used for xmit - that happens packet a time
  57. * and for first sign of life packet (easier to parse that once)
  58. */
  59. uint8_t *header_buf;
  60. struct iovec *vec;
  61. /*
  62. * these are used for receive - try to "eat" up to 32 packets at a time
  63. */
  64. struct mmsghdr *msgvec;
  65. /*
  66. * peer address
  67. */
  68. struct sockaddr_storage *dgram_dst;
  69. uint32_t dst_size;
  70. /*
  71. * L2TPv3 parameters
  72. */
  73. uint64_t rx_cookie;
  74. uint64_t tx_cookie;
  75. uint32_t rx_session;
  76. uint32_t tx_session;
  77. uint32_t header_size;
  78. uint32_t counter;
  79. /*
  80. * DOS avoidance in error handling
  81. */
  82. bool header_mismatch;
  83. /*
  84. * Ring buffer handling
  85. */
  86. int queue_head;
  87. int queue_tail;
  88. int queue_depth;
  89. /*
  90. * Precomputed offsets
  91. */
  92. uint32_t offset;
  93. uint32_t cookie_offset;
  94. uint32_t counter_offset;
  95. uint32_t session_offset;
  96. /* Poll Control */
  97. bool read_poll;
  98. bool write_poll;
  99. /* Flags */
  100. bool ipv6;
  101. bool udp;
  102. bool has_counter;
  103. bool pin_counter;
  104. bool cookie;
  105. bool cookie_is_64;
  106. } NetL2TPV3State;
  107. static void net_l2tpv3_send(void *opaque);
  108. static void l2tpv3_writable(void *opaque);
  109. static void l2tpv3_update_fd_handler(NetL2TPV3State *s)
  110. {
  111. qemu_set_fd_handler(s->fd,
  112. s->read_poll ? net_l2tpv3_send : NULL,
  113. s->write_poll ? l2tpv3_writable : NULL,
  114. s);
  115. }
  116. static void l2tpv3_read_poll(NetL2TPV3State *s, bool enable)
  117. {
  118. if (s->read_poll != enable) {
  119. s->read_poll = enable;
  120. l2tpv3_update_fd_handler(s);
  121. }
  122. }
  123. static void l2tpv3_write_poll(NetL2TPV3State *s, bool enable)
  124. {
  125. if (s->write_poll != enable) {
  126. s->write_poll = enable;
  127. l2tpv3_update_fd_handler(s);
  128. }
  129. }
  130. static void l2tpv3_writable(void *opaque)
  131. {
  132. NetL2TPV3State *s = opaque;
  133. l2tpv3_write_poll(s, false);
  134. qemu_flush_queued_packets(&s->nc);
  135. }
  136. static void l2tpv3_send_completed(NetClientState *nc, ssize_t len)
  137. {
  138. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  139. l2tpv3_read_poll(s, true);
  140. }
  141. static void l2tpv3_poll(NetClientState *nc, bool enable)
  142. {
  143. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  144. l2tpv3_write_poll(s, enable);
  145. l2tpv3_read_poll(s, enable);
  146. }
  147. static void l2tpv3_form_header(NetL2TPV3State *s)
  148. {
  149. uint32_t *counter;
  150. if (s->udp) {
  151. stl_be_p((uint32_t *) s->header_buf, L2TPV3_DATA_PACKET);
  152. }
  153. stl_be_p(
  154. (uint32_t *) (s->header_buf + s->session_offset),
  155. s->tx_session
  156. );
  157. if (s->cookie) {
  158. if (s->cookie_is_64) {
  159. stq_be_p(
  160. (uint64_t *)(s->header_buf + s->cookie_offset),
  161. s->tx_cookie
  162. );
  163. } else {
  164. stl_be_p(
  165. (uint32_t *) (s->header_buf + s->cookie_offset),
  166. s->tx_cookie
  167. );
  168. }
  169. }
  170. if (s->has_counter) {
  171. counter = (uint32_t *)(s->header_buf + s->counter_offset);
  172. if (s->pin_counter) {
  173. *counter = 0;
  174. } else {
  175. stl_be_p(counter, ++s->counter);
  176. }
  177. }
  178. }
  179. static ssize_t net_l2tpv3_receive_dgram_iov(NetClientState *nc,
  180. const struct iovec *iov,
  181. int iovcnt)
  182. {
  183. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  184. struct msghdr message;
  185. int ret;
  186. if (iovcnt > MAX_L2TPV3_IOVCNT - 1) {
  187. error_report(
  188. "iovec too long %d > %d, change l2tpv3.h",
  189. iovcnt, MAX_L2TPV3_IOVCNT
  190. );
  191. return -1;
  192. }
  193. l2tpv3_form_header(s);
  194. memcpy(s->vec + 1, iov, iovcnt * sizeof(struct iovec));
  195. s->vec->iov_base = s->header_buf;
  196. s->vec->iov_len = s->offset;
  197. message.msg_name = s->dgram_dst;
  198. message.msg_namelen = s->dst_size;
  199. message.msg_iov = s->vec;
  200. message.msg_iovlen = iovcnt + 1;
  201. message.msg_control = NULL;
  202. message.msg_controllen = 0;
  203. message.msg_flags = 0;
  204. ret = RETRY_ON_EINTR(sendmsg(s->fd, &message, 0));
  205. if (ret > 0) {
  206. ret -= s->offset;
  207. } else if (ret == 0) {
  208. /* belt and braces - should not occur on DGRAM
  209. * we should get an error and never a 0 send
  210. */
  211. ret = iov_size(iov, iovcnt);
  212. } else {
  213. /* signal upper layer that socket buffer is full */
  214. ret = -errno;
  215. if (ret == -EAGAIN || ret == -ENOBUFS) {
  216. l2tpv3_write_poll(s, true);
  217. ret = 0;
  218. }
  219. }
  220. return ret;
  221. }
  222. static ssize_t net_l2tpv3_receive_dgram(NetClientState *nc,
  223. const uint8_t *buf,
  224. size_t size)
  225. {
  226. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  227. struct iovec *vec;
  228. struct msghdr message;
  229. ssize_t ret = 0;
  230. l2tpv3_form_header(s);
  231. vec = s->vec;
  232. vec->iov_base = s->header_buf;
  233. vec->iov_len = s->offset;
  234. vec++;
  235. vec->iov_base = (void *) buf;
  236. vec->iov_len = size;
  237. message.msg_name = s->dgram_dst;
  238. message.msg_namelen = s->dst_size;
  239. message.msg_iov = s->vec;
  240. message.msg_iovlen = 2;
  241. message.msg_control = NULL;
  242. message.msg_controllen = 0;
  243. message.msg_flags = 0;
  244. ret = RETRY_ON_EINTR(sendmsg(s->fd, &message, 0));
  245. if (ret > 0) {
  246. ret -= s->offset;
  247. } else if (ret == 0) {
  248. /* belt and braces - should not occur on DGRAM
  249. * we should get an error and never a 0 send
  250. */
  251. ret = size;
  252. } else {
  253. ret = -errno;
  254. if (ret == -EAGAIN || ret == -ENOBUFS) {
  255. /* signal upper layer that socket buffer is full */
  256. l2tpv3_write_poll(s, true);
  257. ret = 0;
  258. }
  259. }
  260. return ret;
  261. }
  262. static int l2tpv3_verify_header(NetL2TPV3State *s, uint8_t *buf)
  263. {
  264. uint32_t *session;
  265. uint64_t cookie;
  266. if ((!s->udp) && (!s->ipv6)) {
  267. buf += sizeof(struct iphdr) /* fix for ipv4 raw */;
  268. }
  269. /* we do not do a strict check for "data" packets as per
  270. * the RFC spec because the pure IP spec does not have
  271. * that anyway.
  272. */
  273. if (s->cookie) {
  274. if (s->cookie_is_64) {
  275. cookie = ldq_be_p(buf + s->cookie_offset);
  276. } else {
  277. cookie = ldl_be_p(buf + s->cookie_offset) & 0xffffffffULL;
  278. }
  279. if (cookie != s->rx_cookie) {
  280. if (!s->header_mismatch) {
  281. error_report("unknown cookie id");
  282. }
  283. return -1;
  284. }
  285. }
  286. session = (uint32_t *) (buf + s->session_offset);
  287. if (ldl_be_p(session) != s->rx_session) {
  288. if (!s->header_mismatch) {
  289. error_report("session mismatch");
  290. }
  291. return -1;
  292. }
  293. return 0;
  294. }
  295. static void net_l2tpv3_process_queue(NetL2TPV3State *s)
  296. {
  297. int size = 0;
  298. struct iovec *vec;
  299. bool bad_read;
  300. int data_size;
  301. struct mmsghdr *msgvec;
  302. /* go into ring mode only if there is a "pending" tail */
  303. if (s->queue_depth > 0) {
  304. do {
  305. msgvec = s->msgvec + s->queue_tail;
  306. if (msgvec->msg_len > 0) {
  307. data_size = msgvec->msg_len - s->header_size;
  308. vec = msgvec->msg_hdr.msg_iov;
  309. if ((data_size > 0) &&
  310. (l2tpv3_verify_header(s, vec->iov_base) == 0)) {
  311. vec++;
  312. /* Use the legacy delivery for now, we will
  313. * switch to using our own ring as a queueing mechanism
  314. * at a later date
  315. */
  316. size = qemu_send_packet_async(
  317. &s->nc,
  318. vec->iov_base,
  319. data_size,
  320. l2tpv3_send_completed
  321. );
  322. if (size == 0) {
  323. l2tpv3_read_poll(s, false);
  324. }
  325. bad_read = false;
  326. } else {
  327. bad_read = true;
  328. if (!s->header_mismatch) {
  329. /* report error only once */
  330. error_report("l2tpv3 header verification failed");
  331. s->header_mismatch = true;
  332. }
  333. }
  334. } else {
  335. bad_read = true;
  336. }
  337. s->queue_tail = (s->queue_tail + 1) % MAX_L2TPV3_MSGCNT;
  338. s->queue_depth--;
  339. } while (
  340. (s->queue_depth > 0) &&
  341. qemu_can_send_packet(&s->nc) &&
  342. ((size > 0) || bad_read)
  343. );
  344. }
  345. }
  346. static void net_l2tpv3_send(void *opaque)
  347. {
  348. NetL2TPV3State *s = opaque;
  349. int target_count, count;
  350. struct mmsghdr *msgvec;
  351. /* go into ring mode only if there is a "pending" tail */
  352. if (s->queue_depth) {
  353. /* The ring buffer we use has variable intake
  354. * count of how much we can read varies - adjust accordingly
  355. */
  356. target_count = MAX_L2TPV3_MSGCNT - s->queue_depth;
  357. /* Ensure we do not overrun the ring when we have
  358. * a lot of enqueued packets
  359. */
  360. if (s->queue_head + target_count > MAX_L2TPV3_MSGCNT) {
  361. target_count = MAX_L2TPV3_MSGCNT - s->queue_head;
  362. }
  363. } else {
  364. /* we do not have any pending packets - we can use
  365. * the whole message vector linearly instead of using
  366. * it as a ring
  367. */
  368. s->queue_head = 0;
  369. s->queue_tail = 0;
  370. target_count = MAX_L2TPV3_MSGCNT;
  371. }
  372. msgvec = s->msgvec + s->queue_head;
  373. if (target_count > 0) {
  374. count = RETRY_ON_EINTR(
  375. recvmmsg(s->fd, msgvec, target_count, MSG_DONTWAIT, NULL)
  376. );
  377. if (count < 0) {
  378. /* Recv error - we still need to flush packets here,
  379. * (re)set queue head to current position
  380. */
  381. count = 0;
  382. }
  383. s->queue_head = (s->queue_head + count) % MAX_L2TPV3_MSGCNT;
  384. s->queue_depth += count;
  385. }
  386. net_l2tpv3_process_queue(s);
  387. }
  388. static void destroy_vector(struct mmsghdr *msgvec, int count, int iovcount)
  389. {
  390. int i, j;
  391. struct iovec *iov;
  392. struct mmsghdr *cleanup = msgvec;
  393. if (cleanup) {
  394. for (i = 0; i < count; i++) {
  395. if (cleanup->msg_hdr.msg_iov) {
  396. iov = cleanup->msg_hdr.msg_iov;
  397. for (j = 0; j < iovcount; j++) {
  398. g_free(iov->iov_base);
  399. iov++;
  400. }
  401. g_free(cleanup->msg_hdr.msg_iov);
  402. }
  403. cleanup++;
  404. }
  405. g_free(msgvec);
  406. }
  407. }
  408. static struct mmsghdr *build_l2tpv3_vector(NetL2TPV3State *s, int count)
  409. {
  410. int i;
  411. struct iovec *iov;
  412. struct mmsghdr *msgvec, *result;
  413. msgvec = g_new(struct mmsghdr, count);
  414. result = msgvec;
  415. for (i = 0; i < count ; i++) {
  416. msgvec->msg_hdr.msg_name = NULL;
  417. msgvec->msg_hdr.msg_namelen = 0;
  418. iov = g_new(struct iovec, IOVSIZE);
  419. msgvec->msg_hdr.msg_iov = iov;
  420. iov->iov_base = g_malloc(s->header_size);
  421. iov->iov_len = s->header_size;
  422. iov++ ;
  423. iov->iov_base = qemu_memalign(BUFFER_ALIGN, BUFFER_SIZE);
  424. iov->iov_len = BUFFER_SIZE;
  425. msgvec->msg_hdr.msg_iovlen = 2;
  426. msgvec->msg_hdr.msg_control = NULL;
  427. msgvec->msg_hdr.msg_controllen = 0;
  428. msgvec->msg_hdr.msg_flags = 0;
  429. msgvec++;
  430. }
  431. return result;
  432. }
  433. static void net_l2tpv3_cleanup(NetClientState *nc)
  434. {
  435. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  436. qemu_purge_queued_packets(nc);
  437. l2tpv3_read_poll(s, false);
  438. l2tpv3_write_poll(s, false);
  439. if (s->fd >= 0) {
  440. close(s->fd);
  441. }
  442. destroy_vector(s->msgvec, MAX_L2TPV3_MSGCNT, IOVSIZE);
  443. g_free(s->vec);
  444. g_free(s->header_buf);
  445. g_free(s->dgram_dst);
  446. }
  447. static NetClientInfo net_l2tpv3_info = {
  448. .type = NET_CLIENT_DRIVER_L2TPV3,
  449. .size = sizeof(NetL2TPV3State),
  450. .receive = net_l2tpv3_receive_dgram,
  451. .receive_iov = net_l2tpv3_receive_dgram_iov,
  452. .poll = l2tpv3_poll,
  453. .cleanup = net_l2tpv3_cleanup,
  454. };
  455. int net_init_l2tpv3(const Netdev *netdev,
  456. const char *name,
  457. NetClientState *peer, Error **errp)
  458. {
  459. const NetdevL2TPv3Options *l2tpv3;
  460. NetL2TPV3State *s;
  461. NetClientState *nc;
  462. int fd = -1, gairet;
  463. struct addrinfo hints;
  464. struct addrinfo *result = NULL;
  465. char *srcport, *dstport;
  466. nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3", name);
  467. s = DO_UPCAST(NetL2TPV3State, nc, nc);
  468. s->queue_head = 0;
  469. s->queue_tail = 0;
  470. s->header_mismatch = false;
  471. assert(netdev->type == NET_CLIENT_DRIVER_L2TPV3);
  472. l2tpv3 = &netdev->u.l2tpv3;
  473. if (l2tpv3->has_ipv6 && l2tpv3->ipv6) {
  474. s->ipv6 = l2tpv3->ipv6;
  475. } else {
  476. s->ipv6 = false;
  477. }
  478. if ((l2tpv3->has_offset) && (l2tpv3->offset > 256)) {
  479. error_setg(errp, "offset must be less than 256 bytes");
  480. goto outerr;
  481. }
  482. if (l2tpv3->has_rxcookie || l2tpv3->has_txcookie) {
  483. if (l2tpv3->has_rxcookie && l2tpv3->has_txcookie) {
  484. s->cookie = true;
  485. } else {
  486. error_setg(errp,
  487. "require both 'rxcookie' and 'txcookie' or neither");
  488. goto outerr;
  489. }
  490. } else {
  491. s->cookie = false;
  492. }
  493. if (l2tpv3->has_cookie64 || l2tpv3->cookie64) {
  494. s->cookie_is_64 = true;
  495. } else {
  496. s->cookie_is_64 = false;
  497. }
  498. if (l2tpv3->has_udp && l2tpv3->udp) {
  499. s->udp = true;
  500. if (!(l2tpv3->srcport && l2tpv3->dstport)) {
  501. error_setg(errp, "need both src and dst port for udp");
  502. goto outerr;
  503. } else {
  504. srcport = l2tpv3->srcport;
  505. dstport = l2tpv3->dstport;
  506. }
  507. } else {
  508. s->udp = false;
  509. srcport = NULL;
  510. dstport = NULL;
  511. }
  512. s->offset = 4;
  513. s->session_offset = 0;
  514. s->cookie_offset = 4;
  515. s->counter_offset = 4;
  516. s->tx_session = l2tpv3->txsession;
  517. if (l2tpv3->has_rxsession) {
  518. s->rx_session = l2tpv3->rxsession;
  519. } else {
  520. s->rx_session = s->tx_session;
  521. }
  522. if (s->cookie) {
  523. s->rx_cookie = l2tpv3->rxcookie;
  524. s->tx_cookie = l2tpv3->txcookie;
  525. if (s->cookie_is_64 == true) {
  526. /* 64 bit cookie */
  527. s->offset += 8;
  528. s->counter_offset += 8;
  529. } else {
  530. /* 32 bit cookie */
  531. s->offset += 4;
  532. s->counter_offset += 4;
  533. }
  534. }
  535. memset(&hints, 0, sizeof(hints));
  536. if (s->ipv6) {
  537. hints.ai_family = AF_INET6;
  538. } else {
  539. hints.ai_family = AF_INET;
  540. }
  541. if (s->udp) {
  542. hints.ai_socktype = SOCK_DGRAM;
  543. hints.ai_protocol = 0;
  544. s->offset += 4;
  545. s->counter_offset += 4;
  546. s->session_offset += 4;
  547. s->cookie_offset += 4;
  548. } else {
  549. hints.ai_socktype = SOCK_RAW;
  550. hints.ai_protocol = IPPROTO_L2TP;
  551. }
  552. gairet = getaddrinfo(l2tpv3->src, srcport, &hints, &result);
  553. if ((gairet != 0) || (result == NULL)) {
  554. error_setg(errp, "could not resolve src, errno = %s",
  555. gai_strerror(gairet));
  556. goto outerr;
  557. }
  558. fd = socket(result->ai_family, result->ai_socktype, result->ai_protocol);
  559. if (fd == -1) {
  560. fd = -errno;
  561. error_setg(errp, "socket creation failed, errno = %d",
  562. -fd);
  563. goto outerr;
  564. }
  565. if (bind(fd, (struct sockaddr *) result->ai_addr, result->ai_addrlen)) {
  566. error_setg(errp, "could not bind socket err=%i", errno);
  567. goto outerr;
  568. }
  569. freeaddrinfo(result);
  570. memset(&hints, 0, sizeof(hints));
  571. if (s->ipv6) {
  572. hints.ai_family = AF_INET6;
  573. } else {
  574. hints.ai_family = AF_INET;
  575. }
  576. if (s->udp) {
  577. hints.ai_socktype = SOCK_DGRAM;
  578. hints.ai_protocol = 0;
  579. } else {
  580. hints.ai_socktype = SOCK_RAW;
  581. hints.ai_protocol = IPPROTO_L2TP;
  582. }
  583. result = NULL;
  584. gairet = getaddrinfo(l2tpv3->dst, dstport, &hints, &result);
  585. if ((gairet != 0) || (result == NULL)) {
  586. error_setg(errp, "could not resolve dst, error = %s",
  587. gai_strerror(gairet));
  588. goto outerr;
  589. }
  590. s->dgram_dst = g_new0(struct sockaddr_storage, 1);
  591. memcpy(s->dgram_dst, result->ai_addr, result->ai_addrlen);
  592. s->dst_size = result->ai_addrlen;
  593. freeaddrinfo(result);
  594. if (l2tpv3->has_counter && l2tpv3->counter) {
  595. s->has_counter = true;
  596. s->offset += 4;
  597. } else {
  598. s->has_counter = false;
  599. }
  600. if (l2tpv3->has_pincounter && l2tpv3->pincounter) {
  601. s->has_counter = true; /* pin counter implies that there is counter */
  602. s->pin_counter = true;
  603. } else {
  604. s->pin_counter = false;
  605. }
  606. if (l2tpv3->has_offset) {
  607. /* extra offset */
  608. s->offset += l2tpv3->offset;
  609. }
  610. if ((s->ipv6) || (s->udp)) {
  611. s->header_size = s->offset;
  612. } else {
  613. s->header_size = s->offset + sizeof(struct iphdr);
  614. }
  615. s->msgvec = build_l2tpv3_vector(s, MAX_L2TPV3_MSGCNT);
  616. s->vec = g_new(struct iovec, MAX_L2TPV3_IOVCNT);
  617. s->header_buf = g_malloc(s->header_size);
  618. qemu_socket_set_nonblock(fd);
  619. s->fd = fd;
  620. s->counter = 0;
  621. l2tpv3_read_poll(s, true);
  622. qemu_set_info_str(&s->nc, "l2tpv3: connected");
  623. return 0;
  624. outerr:
  625. qemu_del_net_client(nc);
  626. if (fd >= 0) {
  627. close(fd);
  628. }
  629. if (result) {
  630. freeaddrinfo(result);
  631. }
  632. return -1;
  633. }