l2tpv3.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. /*
  2. * QEMU System Emulator
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. * Copyright (c) 2012-2014 Cisco Systems
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "qemu/osdep.h"
  26. #include <linux/ip.h>
  27. #include <netdb.h>
  28. #include "net/net.h"
  29. #include "clients.h"
  30. #include "qemu-common.h"
  31. #include "qemu/error-report.h"
  32. #include "qemu/option.h"
  33. #include "qemu/sockets.h"
  34. #include "qemu/iov.h"
  35. #include "qemu/main-loop.h"
  36. /* The buffer size needs to be investigated for optimum numbers and
  37. * optimum means of paging in on different systems. This size is
  38. * chosen to be sufficient to accommodate one packet with some headers
  39. */
  40. #define BUFFER_ALIGN sysconf(_SC_PAGESIZE)
  41. #define BUFFER_SIZE 2048
  42. #define IOVSIZE 2
  43. #define MAX_L2TPV3_MSGCNT 64
  44. #define MAX_L2TPV3_IOVCNT (MAX_L2TPV3_MSGCNT * IOVSIZE)
  45. /* Header set to 0x30000 signifies a data packet */
  46. #define L2TPV3_DATA_PACKET 0x30000
  47. /* IANA-assigned IP protocol ID for L2TPv3 */
  48. #ifndef IPPROTO_L2TP
  49. #define IPPROTO_L2TP 0x73
  50. #endif
  51. typedef struct NetL2TPV3State {
  52. NetClientState nc;
  53. int fd;
  54. /*
  55. * these are used for xmit - that happens packet a time
  56. * and for first sign of life packet (easier to parse that once)
  57. */
  58. uint8_t *header_buf;
  59. struct iovec *vec;
  60. /*
  61. * these are used for receive - try to "eat" up to 32 packets at a time
  62. */
  63. struct mmsghdr *msgvec;
  64. /*
  65. * peer address
  66. */
  67. struct sockaddr_storage *dgram_dst;
  68. uint32_t dst_size;
  69. /*
  70. * L2TPv3 parameters
  71. */
  72. uint64_t rx_cookie;
  73. uint64_t tx_cookie;
  74. uint32_t rx_session;
  75. uint32_t tx_session;
  76. uint32_t header_size;
  77. uint32_t counter;
  78. /*
  79. * DOS avoidance in error handling
  80. */
  81. bool header_mismatch;
  82. /*
  83. * Ring buffer handling
  84. */
  85. int queue_head;
  86. int queue_tail;
  87. int queue_depth;
  88. /*
  89. * Precomputed offsets
  90. */
  91. uint32_t offset;
  92. uint32_t cookie_offset;
  93. uint32_t counter_offset;
  94. uint32_t session_offset;
  95. /* Poll Control */
  96. bool read_poll;
  97. bool write_poll;
  98. /* Flags */
  99. bool ipv6;
  100. bool udp;
  101. bool has_counter;
  102. bool pin_counter;
  103. bool cookie;
  104. bool cookie_is_64;
  105. } NetL2TPV3State;
  106. static void net_l2tpv3_send(void *opaque);
  107. static void l2tpv3_writable(void *opaque);
  108. static void l2tpv3_update_fd_handler(NetL2TPV3State *s)
  109. {
  110. qemu_set_fd_handler(s->fd,
  111. s->read_poll ? net_l2tpv3_send : NULL,
  112. s->write_poll ? l2tpv3_writable : NULL,
  113. s);
  114. }
  115. static void l2tpv3_read_poll(NetL2TPV3State *s, bool enable)
  116. {
  117. if (s->read_poll != enable) {
  118. s->read_poll = enable;
  119. l2tpv3_update_fd_handler(s);
  120. }
  121. }
  122. static void l2tpv3_write_poll(NetL2TPV3State *s, bool enable)
  123. {
  124. if (s->write_poll != enable) {
  125. s->write_poll = enable;
  126. l2tpv3_update_fd_handler(s);
  127. }
  128. }
  129. static void l2tpv3_writable(void *opaque)
  130. {
  131. NetL2TPV3State *s = opaque;
  132. l2tpv3_write_poll(s, false);
  133. qemu_flush_queued_packets(&s->nc);
  134. }
  135. static void l2tpv3_send_completed(NetClientState *nc, ssize_t len)
  136. {
  137. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  138. l2tpv3_read_poll(s, true);
  139. }
  140. static void l2tpv3_poll(NetClientState *nc, bool enable)
  141. {
  142. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  143. l2tpv3_write_poll(s, enable);
  144. l2tpv3_read_poll(s, enable);
  145. }
  146. static void l2tpv3_form_header(NetL2TPV3State *s)
  147. {
  148. uint32_t *counter;
  149. if (s->udp) {
  150. stl_be_p((uint32_t *) s->header_buf, L2TPV3_DATA_PACKET);
  151. }
  152. stl_be_p(
  153. (uint32_t *) (s->header_buf + s->session_offset),
  154. s->tx_session
  155. );
  156. if (s->cookie) {
  157. if (s->cookie_is_64) {
  158. stq_be_p(
  159. (uint64_t *)(s->header_buf + s->cookie_offset),
  160. s->tx_cookie
  161. );
  162. } else {
  163. stl_be_p(
  164. (uint32_t *) (s->header_buf + s->cookie_offset),
  165. s->tx_cookie
  166. );
  167. }
  168. }
  169. if (s->has_counter) {
  170. counter = (uint32_t *)(s->header_buf + s->counter_offset);
  171. if (s->pin_counter) {
  172. *counter = 0;
  173. } else {
  174. stl_be_p(counter, ++s->counter);
  175. }
  176. }
  177. }
  178. static ssize_t net_l2tpv3_receive_dgram_iov(NetClientState *nc,
  179. const struct iovec *iov,
  180. int iovcnt)
  181. {
  182. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  183. struct msghdr message;
  184. int ret;
  185. if (iovcnt > MAX_L2TPV3_IOVCNT - 1) {
  186. error_report(
  187. "iovec too long %d > %d, change l2tpv3.h",
  188. iovcnt, MAX_L2TPV3_IOVCNT
  189. );
  190. return -1;
  191. }
  192. l2tpv3_form_header(s);
  193. memcpy(s->vec + 1, iov, iovcnt * sizeof(struct iovec));
  194. s->vec->iov_base = s->header_buf;
  195. s->vec->iov_len = s->offset;
  196. message.msg_name = s->dgram_dst;
  197. message.msg_namelen = s->dst_size;
  198. message.msg_iov = s->vec;
  199. message.msg_iovlen = iovcnt + 1;
  200. message.msg_control = NULL;
  201. message.msg_controllen = 0;
  202. message.msg_flags = 0;
  203. do {
  204. ret = sendmsg(s->fd, &message, 0);
  205. } while ((ret == -1) && (errno == EINTR));
  206. if (ret > 0) {
  207. ret -= s->offset;
  208. } else if (ret == 0) {
  209. /* belt and braces - should not occur on DGRAM
  210. * we should get an error and never a 0 send
  211. */
  212. ret = iov_size(iov, iovcnt);
  213. } else {
  214. /* signal upper layer that socket buffer is full */
  215. ret = -errno;
  216. if (ret == -EAGAIN || ret == -ENOBUFS) {
  217. l2tpv3_write_poll(s, true);
  218. ret = 0;
  219. }
  220. }
  221. return ret;
  222. }
  223. static ssize_t net_l2tpv3_receive_dgram(NetClientState *nc,
  224. const uint8_t *buf,
  225. size_t size)
  226. {
  227. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  228. struct iovec *vec;
  229. struct msghdr message;
  230. ssize_t ret = 0;
  231. l2tpv3_form_header(s);
  232. vec = s->vec;
  233. vec->iov_base = s->header_buf;
  234. vec->iov_len = s->offset;
  235. vec++;
  236. vec->iov_base = (void *) buf;
  237. vec->iov_len = size;
  238. message.msg_name = s->dgram_dst;
  239. message.msg_namelen = s->dst_size;
  240. message.msg_iov = s->vec;
  241. message.msg_iovlen = 2;
  242. message.msg_control = NULL;
  243. message.msg_controllen = 0;
  244. message.msg_flags = 0;
  245. do {
  246. ret = sendmsg(s->fd, &message, 0);
  247. } while ((ret == -1) && (errno == EINTR));
  248. if (ret > 0) {
  249. ret -= s->offset;
  250. } else if (ret == 0) {
  251. /* belt and braces - should not occur on DGRAM
  252. * we should get an error and never a 0 send
  253. */
  254. ret = size;
  255. } else {
  256. ret = -errno;
  257. if (ret == -EAGAIN || ret == -ENOBUFS) {
  258. /* signal upper layer that socket buffer is full */
  259. l2tpv3_write_poll(s, true);
  260. ret = 0;
  261. }
  262. }
  263. return ret;
  264. }
  265. static int l2tpv3_verify_header(NetL2TPV3State *s, uint8_t *buf)
  266. {
  267. uint32_t *session;
  268. uint64_t cookie;
  269. if ((!s->udp) && (!s->ipv6)) {
  270. buf += sizeof(struct iphdr) /* fix for ipv4 raw */;
  271. }
  272. /* we do not do a strict check for "data" packets as per
  273. * the RFC spec because the pure IP spec does not have
  274. * that anyway.
  275. */
  276. if (s->cookie) {
  277. if (s->cookie_is_64) {
  278. cookie = ldq_be_p(buf + s->cookie_offset);
  279. } else {
  280. cookie = ldl_be_p(buf + s->cookie_offset) & 0xffffffffULL;
  281. }
  282. if (cookie != s->rx_cookie) {
  283. if (!s->header_mismatch) {
  284. error_report("unknown cookie id");
  285. }
  286. return -1;
  287. }
  288. }
  289. session = (uint32_t *) (buf + s->session_offset);
  290. if (ldl_be_p(session) != s->rx_session) {
  291. if (!s->header_mismatch) {
  292. error_report("session mismatch");
  293. }
  294. return -1;
  295. }
  296. return 0;
  297. }
  298. static void net_l2tpv3_process_queue(NetL2TPV3State *s)
  299. {
  300. int size = 0;
  301. struct iovec *vec;
  302. bool bad_read;
  303. int data_size;
  304. struct mmsghdr *msgvec;
  305. /* go into ring mode only if there is a "pending" tail */
  306. if (s->queue_depth > 0) {
  307. do {
  308. msgvec = s->msgvec + s->queue_tail;
  309. if (msgvec->msg_len > 0) {
  310. data_size = msgvec->msg_len - s->header_size;
  311. vec = msgvec->msg_hdr.msg_iov;
  312. if ((data_size > 0) &&
  313. (l2tpv3_verify_header(s, vec->iov_base) == 0)) {
  314. vec++;
  315. /* Use the legacy delivery for now, we will
  316. * switch to using our own ring as a queueing mechanism
  317. * at a later date
  318. */
  319. size = qemu_send_packet_async(
  320. &s->nc,
  321. vec->iov_base,
  322. data_size,
  323. l2tpv3_send_completed
  324. );
  325. if (size == 0) {
  326. l2tpv3_read_poll(s, false);
  327. }
  328. bad_read = false;
  329. } else {
  330. bad_read = true;
  331. if (!s->header_mismatch) {
  332. /* report error only once */
  333. error_report("l2tpv3 header verification failed");
  334. s->header_mismatch = true;
  335. }
  336. }
  337. } else {
  338. bad_read = true;
  339. }
  340. s->queue_tail = (s->queue_tail + 1) % MAX_L2TPV3_MSGCNT;
  341. s->queue_depth--;
  342. } while (
  343. (s->queue_depth > 0) &&
  344. qemu_can_send_packet(&s->nc) &&
  345. ((size > 0) || bad_read)
  346. );
  347. }
  348. }
  349. static void net_l2tpv3_send(void *opaque)
  350. {
  351. NetL2TPV3State *s = opaque;
  352. int target_count, count;
  353. struct mmsghdr *msgvec;
  354. /* go into ring mode only if there is a "pending" tail */
  355. if (s->queue_depth) {
  356. /* The ring buffer we use has variable intake
  357. * count of how much we can read varies - adjust accordingly
  358. */
  359. target_count = MAX_L2TPV3_MSGCNT - s->queue_depth;
  360. /* Ensure we do not overrun the ring when we have
  361. * a lot of enqueued packets
  362. */
  363. if (s->queue_head + target_count > MAX_L2TPV3_MSGCNT) {
  364. target_count = MAX_L2TPV3_MSGCNT - s->queue_head;
  365. }
  366. } else {
  367. /* we do not have any pending packets - we can use
  368. * the whole message vector linearly instead of using
  369. * it as a ring
  370. */
  371. s->queue_head = 0;
  372. s->queue_tail = 0;
  373. target_count = MAX_L2TPV3_MSGCNT;
  374. }
  375. msgvec = s->msgvec + s->queue_head;
  376. if (target_count > 0) {
  377. do {
  378. count = recvmmsg(
  379. s->fd,
  380. msgvec,
  381. target_count, MSG_DONTWAIT, NULL);
  382. } while ((count == -1) && (errno == EINTR));
  383. if (count < 0) {
  384. /* Recv error - we still need to flush packets here,
  385. * (re)set queue head to current position
  386. */
  387. count = 0;
  388. }
  389. s->queue_head = (s->queue_head + count) % MAX_L2TPV3_MSGCNT;
  390. s->queue_depth += count;
  391. }
  392. net_l2tpv3_process_queue(s);
  393. }
  394. static void destroy_vector(struct mmsghdr *msgvec, int count, int iovcount)
  395. {
  396. int i, j;
  397. struct iovec *iov;
  398. struct mmsghdr *cleanup = msgvec;
  399. if (cleanup) {
  400. for (i = 0; i < count; i++) {
  401. if (cleanup->msg_hdr.msg_iov) {
  402. iov = cleanup->msg_hdr.msg_iov;
  403. for (j = 0; j < iovcount; j++) {
  404. g_free(iov->iov_base);
  405. iov++;
  406. }
  407. g_free(cleanup->msg_hdr.msg_iov);
  408. }
  409. cleanup++;
  410. }
  411. g_free(msgvec);
  412. }
  413. }
  414. static struct mmsghdr *build_l2tpv3_vector(NetL2TPV3State *s, int count)
  415. {
  416. int i;
  417. struct iovec *iov;
  418. struct mmsghdr *msgvec, *result;
  419. msgvec = g_new(struct mmsghdr, count);
  420. result = msgvec;
  421. for (i = 0; i < count ; i++) {
  422. msgvec->msg_hdr.msg_name = NULL;
  423. msgvec->msg_hdr.msg_namelen = 0;
  424. iov = g_new(struct iovec, IOVSIZE);
  425. msgvec->msg_hdr.msg_iov = iov;
  426. iov->iov_base = g_malloc(s->header_size);
  427. iov->iov_len = s->header_size;
  428. iov++ ;
  429. iov->iov_base = qemu_memalign(BUFFER_ALIGN, BUFFER_SIZE);
  430. iov->iov_len = BUFFER_SIZE;
  431. msgvec->msg_hdr.msg_iovlen = 2;
  432. msgvec->msg_hdr.msg_control = NULL;
  433. msgvec->msg_hdr.msg_controllen = 0;
  434. msgvec->msg_hdr.msg_flags = 0;
  435. msgvec++;
  436. }
  437. return result;
  438. }
  439. static void net_l2tpv3_cleanup(NetClientState *nc)
  440. {
  441. NetL2TPV3State *s = DO_UPCAST(NetL2TPV3State, nc, nc);
  442. qemu_purge_queued_packets(nc);
  443. l2tpv3_read_poll(s, false);
  444. l2tpv3_write_poll(s, false);
  445. if (s->fd >= 0) {
  446. close(s->fd);
  447. }
  448. destroy_vector(s->msgvec, MAX_L2TPV3_MSGCNT, IOVSIZE);
  449. g_free(s->vec);
  450. g_free(s->header_buf);
  451. g_free(s->dgram_dst);
  452. }
  453. static NetClientInfo net_l2tpv3_info = {
  454. .type = NET_CLIENT_DRIVER_L2TPV3,
  455. .size = sizeof(NetL2TPV3State),
  456. .receive = net_l2tpv3_receive_dgram,
  457. .receive_iov = net_l2tpv3_receive_dgram_iov,
  458. .poll = l2tpv3_poll,
  459. .cleanup = net_l2tpv3_cleanup,
  460. };
  461. int net_init_l2tpv3(const Netdev *netdev,
  462. const char *name,
  463. NetClientState *peer, Error **errp)
  464. {
  465. /* FIXME error_setg(errp, ...) on failure */
  466. const NetdevL2TPv3Options *l2tpv3;
  467. NetL2TPV3State *s;
  468. NetClientState *nc;
  469. int fd = -1, gairet;
  470. struct addrinfo hints;
  471. struct addrinfo *result = NULL;
  472. char *srcport, *dstport;
  473. nc = qemu_new_net_client(&net_l2tpv3_info, peer, "l2tpv3", name);
  474. s = DO_UPCAST(NetL2TPV3State, nc, nc);
  475. s->queue_head = 0;
  476. s->queue_tail = 0;
  477. s->header_mismatch = false;
  478. assert(netdev->type == NET_CLIENT_DRIVER_L2TPV3);
  479. l2tpv3 = &netdev->u.l2tpv3;
  480. if (l2tpv3->has_ipv6 && l2tpv3->ipv6) {
  481. s->ipv6 = l2tpv3->ipv6;
  482. } else {
  483. s->ipv6 = false;
  484. }
  485. if ((l2tpv3->has_offset) && (l2tpv3->offset > 256)) {
  486. error_report("l2tpv3_open : offset must be less than 256 bytes");
  487. goto outerr;
  488. }
  489. if (l2tpv3->has_rxcookie || l2tpv3->has_txcookie) {
  490. if (l2tpv3->has_rxcookie && l2tpv3->has_txcookie) {
  491. s->cookie = true;
  492. } else {
  493. goto outerr;
  494. }
  495. } else {
  496. s->cookie = false;
  497. }
  498. if (l2tpv3->has_cookie64 || l2tpv3->cookie64) {
  499. s->cookie_is_64 = true;
  500. } else {
  501. s->cookie_is_64 = false;
  502. }
  503. if (l2tpv3->has_udp && l2tpv3->udp) {
  504. s->udp = true;
  505. if (!(l2tpv3->has_srcport && l2tpv3->has_dstport)) {
  506. error_report("l2tpv3_open : need both src and dst port for udp");
  507. goto outerr;
  508. } else {
  509. srcport = l2tpv3->srcport;
  510. dstport = l2tpv3->dstport;
  511. }
  512. } else {
  513. s->udp = false;
  514. srcport = NULL;
  515. dstport = NULL;
  516. }
  517. s->offset = 4;
  518. s->session_offset = 0;
  519. s->cookie_offset = 4;
  520. s->counter_offset = 4;
  521. s->tx_session = l2tpv3->txsession;
  522. if (l2tpv3->has_rxsession) {
  523. s->rx_session = l2tpv3->rxsession;
  524. } else {
  525. s->rx_session = s->tx_session;
  526. }
  527. if (s->cookie) {
  528. s->rx_cookie = l2tpv3->rxcookie;
  529. s->tx_cookie = l2tpv3->txcookie;
  530. if (s->cookie_is_64 == true) {
  531. /* 64 bit cookie */
  532. s->offset += 8;
  533. s->counter_offset += 8;
  534. } else {
  535. /* 32 bit cookie */
  536. s->offset += 4;
  537. s->counter_offset += 4;
  538. }
  539. }
  540. memset(&hints, 0, sizeof(hints));
  541. if (s->ipv6) {
  542. hints.ai_family = AF_INET6;
  543. } else {
  544. hints.ai_family = AF_INET;
  545. }
  546. if (s->udp) {
  547. hints.ai_socktype = SOCK_DGRAM;
  548. hints.ai_protocol = 0;
  549. s->offset += 4;
  550. s->counter_offset += 4;
  551. s->session_offset += 4;
  552. s->cookie_offset += 4;
  553. } else {
  554. hints.ai_socktype = SOCK_RAW;
  555. hints.ai_protocol = IPPROTO_L2TP;
  556. }
  557. gairet = getaddrinfo(l2tpv3->src, srcport, &hints, &result);
  558. if ((gairet != 0) || (result == NULL)) {
  559. error_report(
  560. "l2tpv3_open : could not resolve src, errno = %s",
  561. gai_strerror(gairet)
  562. );
  563. goto outerr;
  564. }
  565. fd = socket(result->ai_family, result->ai_socktype, result->ai_protocol);
  566. if (fd == -1) {
  567. fd = -errno;
  568. error_report("l2tpv3_open : socket creation failed, errno = %d", -fd);
  569. goto outerr;
  570. }
  571. if (bind(fd, (struct sockaddr *) result->ai_addr, result->ai_addrlen)) {
  572. error_report("l2tpv3_open : could not bind socket err=%i", errno);
  573. goto outerr;
  574. }
  575. if (result) {
  576. freeaddrinfo(result);
  577. }
  578. memset(&hints, 0, sizeof(hints));
  579. if (s->ipv6) {
  580. hints.ai_family = AF_INET6;
  581. } else {
  582. hints.ai_family = AF_INET;
  583. }
  584. if (s->udp) {
  585. hints.ai_socktype = SOCK_DGRAM;
  586. hints.ai_protocol = 0;
  587. } else {
  588. hints.ai_socktype = SOCK_RAW;
  589. hints.ai_protocol = IPPROTO_L2TP;
  590. }
  591. result = NULL;
  592. gairet = getaddrinfo(l2tpv3->dst, dstport, &hints, &result);
  593. if ((gairet != 0) || (result == NULL)) {
  594. error_report(
  595. "l2tpv3_open : could not resolve dst, error = %s",
  596. gai_strerror(gairet)
  597. );
  598. goto outerr;
  599. }
  600. s->dgram_dst = g_new0(struct sockaddr_storage, 1);
  601. memcpy(s->dgram_dst, result->ai_addr, result->ai_addrlen);
  602. s->dst_size = result->ai_addrlen;
  603. if (result) {
  604. freeaddrinfo(result);
  605. }
  606. if (l2tpv3->has_counter && l2tpv3->counter) {
  607. s->has_counter = true;
  608. s->offset += 4;
  609. } else {
  610. s->has_counter = false;
  611. }
  612. if (l2tpv3->has_pincounter && l2tpv3->pincounter) {
  613. s->has_counter = true; /* pin counter implies that there is counter */
  614. s->pin_counter = true;
  615. } else {
  616. s->pin_counter = false;
  617. }
  618. if (l2tpv3->has_offset) {
  619. /* extra offset */
  620. s->offset += l2tpv3->offset;
  621. }
  622. if ((s->ipv6) || (s->udp)) {
  623. s->header_size = s->offset;
  624. } else {
  625. s->header_size = s->offset + sizeof(struct iphdr);
  626. }
  627. s->msgvec = build_l2tpv3_vector(s, MAX_L2TPV3_MSGCNT);
  628. s->vec = g_new(struct iovec, MAX_L2TPV3_IOVCNT);
  629. s->header_buf = g_malloc(s->header_size);
  630. qemu_set_nonblock(fd);
  631. s->fd = fd;
  632. s->counter = 0;
  633. l2tpv3_read_poll(s, true);
  634. snprintf(s->nc.info_str, sizeof(s->nc.info_str),
  635. "l2tpv3: connected");
  636. return 0;
  637. outerr:
  638. qemu_del_net_client(nc);
  639. if (fd >= 0) {
  640. close(fd);
  641. }
  642. if (result) {
  643. freeaddrinfo(result);
  644. }
  645. return -1;
  646. }