2
0

xen_nic.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. * xen paravirt network card backend
  3. *
  4. * (c) Gerd Hoffmann <kraxel@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; under version 2 of the License.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program; if not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * Contributions after 2012-01-13 are licensed under the terms of the
  19. * GNU GPL, version 2 or (at your option) any later version.
  20. */
  21. #include "qemu/osdep.h"
  22. #include <sys/socket.h>
  23. #include <sys/ioctl.h>
  24. #include <sys/wait.h>
  25. #include "net/net.h"
  26. #include "net/checksum.h"
  27. #include "net/util.h"
  28. #include "hw/xen/xen-legacy-backend.h"
  29. #include "hw/xen/interface/io/netif.h"
  30. /* ------------------------------------------------------------- */
  31. struct XenNetDev {
  32. struct XenLegacyDevice xendev; /* must be first */
  33. char *mac;
  34. int tx_work;
  35. int tx_ring_ref;
  36. int rx_ring_ref;
  37. struct netif_tx_sring *txs;
  38. struct netif_rx_sring *rxs;
  39. netif_tx_back_ring_t tx_ring;
  40. netif_rx_back_ring_t rx_ring;
  41. NICConf conf;
  42. NICState *nic;
  43. };
  44. /* ------------------------------------------------------------- */
  45. static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st)
  46. {
  47. RING_IDX i = netdev->tx_ring.rsp_prod_pvt;
  48. netif_tx_response_t *resp;
  49. int notify;
  50. resp = RING_GET_RESPONSE(&netdev->tx_ring, i);
  51. resp->id = txp->id;
  52. resp->status = st;
  53. #if 0
  54. if (txp->flags & NETTXF_extra_info) {
  55. RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL;
  56. }
  57. #endif
  58. netdev->tx_ring.rsp_prod_pvt = ++i;
  59. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify);
  60. if (notify) {
  61. xen_pv_send_notify(&netdev->xendev);
  62. }
  63. if (i == netdev->tx_ring.req_cons) {
  64. int more_to_do;
  65. RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do);
  66. if (more_to_do) {
  67. netdev->tx_work++;
  68. }
  69. }
  70. }
  71. static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end)
  72. {
  73. #if 0
  74. /*
  75. * Hmm, why netback fails everything in the ring?
  76. * Should we do that even when not supporting SG and TSO?
  77. */
  78. RING_IDX cons = netdev->tx_ring.req_cons;
  79. do {
  80. make_tx_response(netif, txp, NETIF_RSP_ERROR);
  81. if (cons >= end) {
  82. break;
  83. }
  84. txp = RING_GET_REQUEST(&netdev->tx_ring, cons++);
  85. } while (1);
  86. netdev->tx_ring.req_cons = cons;
  87. netif_schedule_work(netif);
  88. netif_put(netif);
  89. #else
  90. net_tx_response(netdev, txp, NETIF_RSP_ERROR);
  91. #endif
  92. }
  93. static void net_tx_packets(struct XenNetDev *netdev)
  94. {
  95. netif_tx_request_t txreq;
  96. RING_IDX rc, rp;
  97. void *page;
  98. void *tmpbuf = NULL;
  99. for (;;) {
  100. rc = netdev->tx_ring.req_cons;
  101. rp = netdev->tx_ring.sring->req_prod;
  102. xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
  103. while ((rc != rp)) {
  104. if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) {
  105. break;
  106. }
  107. memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq));
  108. netdev->tx_ring.req_cons = ++rc;
  109. #if 1
  110. /* should not happen in theory, we don't announce the *
  111. * feature-{sg,gso,whatelse} flags in xenstore (yet?) */
  112. if (txreq.flags & NETTXF_extra_info) {
  113. xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n");
  114. net_tx_error(netdev, &txreq, rc);
  115. continue;
  116. }
  117. if (txreq.flags & NETTXF_more_data) {
  118. xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n");
  119. net_tx_error(netdev, &txreq, rc);
  120. continue;
  121. }
  122. #endif
  123. if (txreq.size < 14) {
  124. xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n",
  125. txreq.size);
  126. net_tx_error(netdev, &txreq, rc);
  127. continue;
  128. }
  129. if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
  130. xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
  131. net_tx_error(netdev, &txreq, rc);
  132. continue;
  133. }
  134. xen_pv_printf(&netdev->xendev, 3,
  135. "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n",
  136. txreq.gref, txreq.offset, txreq.size, txreq.flags,
  137. (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "",
  138. (txreq.flags & NETTXF_data_validated) ? " data_validated" : "",
  139. (txreq.flags & NETTXF_more_data) ? " more_data" : "",
  140. (txreq.flags & NETTXF_extra_info) ? " extra_info" : "");
  141. page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref,
  142. PROT_READ);
  143. if (page == NULL) {
  144. xen_pv_printf(&netdev->xendev, 0,
  145. "error: tx gref dereference failed (%d)\n",
  146. txreq.gref);
  147. net_tx_error(netdev, &txreq, rc);
  148. continue;
  149. }
  150. if (txreq.flags & NETTXF_csum_blank) {
  151. /* have read-only mapping -> can't fill checksum in-place */
  152. if (!tmpbuf) {
  153. tmpbuf = g_malloc(XC_PAGE_SIZE);
  154. }
  155. memcpy(tmpbuf, page + txreq.offset, txreq.size);
  156. net_checksum_calculate(tmpbuf, txreq.size);
  157. qemu_send_packet(qemu_get_queue(netdev->nic), tmpbuf,
  158. txreq.size);
  159. } else {
  160. qemu_send_packet(qemu_get_queue(netdev->nic),
  161. page + txreq.offset, txreq.size);
  162. }
  163. xen_be_unmap_grant_ref(&netdev->xendev, page);
  164. net_tx_response(netdev, &txreq, NETIF_RSP_OKAY);
  165. }
  166. if (!netdev->tx_work) {
  167. break;
  168. }
  169. netdev->tx_work = 0;
  170. }
  171. g_free(tmpbuf);
  172. }
  173. /* ------------------------------------------------------------- */
  174. static void net_rx_response(struct XenNetDev *netdev,
  175. netif_rx_request_t *req, int8_t st,
  176. uint16_t offset, uint16_t size,
  177. uint16_t flags)
  178. {
  179. RING_IDX i = netdev->rx_ring.rsp_prod_pvt;
  180. netif_rx_response_t *resp;
  181. int notify;
  182. resp = RING_GET_RESPONSE(&netdev->rx_ring, i);
  183. resp->offset = offset;
  184. resp->flags = flags;
  185. resp->id = req->id;
  186. resp->status = (int16_t)size;
  187. if (st < 0) {
  188. resp->status = (int16_t)st;
  189. }
  190. xen_pv_printf(&netdev->xendev, 3,
  191. "rx response: idx %d, status %d, flags 0x%x\n",
  192. i, resp->status, resp->flags);
  193. netdev->rx_ring.rsp_prod_pvt = ++i;
  194. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify);
  195. if (notify) {
  196. xen_pv_send_notify(&netdev->xendev);
  197. }
  198. }
  199. #define NET_IP_ALIGN 2
  200. static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size)
  201. {
  202. struct XenNetDev *netdev = qemu_get_nic_opaque(nc);
  203. netif_rx_request_t rxreq;
  204. RING_IDX rc, rp;
  205. void *page;
  206. if (netdev->xendev.be_state != XenbusStateConnected) {
  207. return -1;
  208. }
  209. rc = netdev->rx_ring.req_cons;
  210. rp = netdev->rx_ring.sring->req_prod;
  211. xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
  212. if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
  213. return 0;
  214. }
  215. if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
  216. xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
  217. (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
  218. return -1;
  219. }
  220. memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq));
  221. netdev->rx_ring.req_cons = ++rc;
  222. page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE);
  223. if (page == NULL) {
  224. xen_pv_printf(&netdev->xendev, 0,
  225. "error: rx gref dereference failed (%d)\n",
  226. rxreq.gref);
  227. net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0);
  228. return -1;
  229. }
  230. memcpy(page + NET_IP_ALIGN, buf, size);
  231. xen_be_unmap_grant_ref(&netdev->xendev, page);
  232. net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0);
  233. return size;
  234. }
  235. /* ------------------------------------------------------------- */
  236. static NetClientInfo net_xen_info = {
  237. .type = NET_CLIENT_DRIVER_NIC,
  238. .size = sizeof(NICState),
  239. .receive = net_rx_packet,
  240. };
  241. static int net_init(struct XenLegacyDevice *xendev)
  242. {
  243. struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
  244. /* read xenstore entries */
  245. if (netdev->mac == NULL) {
  246. netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac");
  247. }
  248. /* do we have all we need? */
  249. if (netdev->mac == NULL) {
  250. return -1;
  251. }
  252. if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) {
  253. return -1;
  254. }
  255. netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf,
  256. "xen", NULL, netdev);
  257. snprintf(qemu_get_queue(netdev->nic)->info_str,
  258. sizeof(qemu_get_queue(netdev->nic)->info_str),
  259. "nic: xenbus vif macaddr=%s", netdev->mac);
  260. /* fill info */
  261. xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1);
  262. xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0);
  263. return 0;
  264. }
  265. static int net_connect(struct XenLegacyDevice *xendev)
  266. {
  267. struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
  268. int rx_copy;
  269. if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref",
  270. &netdev->tx_ring_ref) == -1) {
  271. return -1;
  272. }
  273. if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref",
  274. &netdev->rx_ring_ref) == -1) {
  275. return 1;
  276. }
  277. if (xenstore_read_fe_int(&netdev->xendev, "event-channel",
  278. &netdev->xendev.remote_port) == -1) {
  279. return -1;
  280. }
  281. if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) {
  282. rx_copy = 0;
  283. }
  284. if (rx_copy == 0) {
  285. xen_pv_printf(&netdev->xendev, 0,
  286. "frontend doesn't support rx-copy.\n");
  287. return -1;
  288. }
  289. netdev->txs = xen_be_map_grant_ref(&netdev->xendev,
  290. netdev->tx_ring_ref,
  291. PROT_READ | PROT_WRITE);
  292. if (!netdev->txs) {
  293. return -1;
  294. }
  295. netdev->rxs = xen_be_map_grant_ref(&netdev->xendev,
  296. netdev->rx_ring_ref,
  297. PROT_READ | PROT_WRITE);
  298. if (!netdev->rxs) {
  299. xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
  300. netdev->txs = NULL;
  301. return -1;
  302. }
  303. BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
  304. BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);
  305. xen_be_bind_evtchn(&netdev->xendev);
  306. xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, "
  307. "remote port %d, local port %d\n",
  308. netdev->tx_ring_ref, netdev->rx_ring_ref,
  309. netdev->xendev.remote_port, netdev->xendev.local_port);
  310. net_tx_packets(netdev);
  311. return 0;
  312. }
  313. static void net_disconnect(struct XenLegacyDevice *xendev)
  314. {
  315. struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
  316. xen_pv_unbind_evtchn(&netdev->xendev);
  317. if (netdev->txs) {
  318. xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
  319. netdev->txs = NULL;
  320. }
  321. if (netdev->rxs) {
  322. xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs);
  323. netdev->rxs = NULL;
  324. }
  325. }
  326. static void net_event(struct XenLegacyDevice *xendev)
  327. {
  328. struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
  329. net_tx_packets(netdev);
  330. qemu_flush_queued_packets(qemu_get_queue(netdev->nic));
  331. }
  332. static int net_free(struct XenLegacyDevice *xendev)
  333. {
  334. struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
  335. if (netdev->nic) {
  336. qemu_del_nic(netdev->nic);
  337. netdev->nic = NULL;
  338. }
  339. g_free(netdev->mac);
  340. netdev->mac = NULL;
  341. return 0;
  342. }
  343. /* ------------------------------------------------------------- */
  344. struct XenDevOps xen_netdev_ops = {
  345. .size = sizeof(struct XenNetDev),
  346. .flags = DEVOPS_FLAG_NEED_GNTDEV,
  347. .init = net_init,
  348. .initialise = net_connect,
  349. .event = net_event,
  350. .disconnect = net_disconnect,
  351. .free = net_free,
  352. };