virtio-net.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764
  1. /*
  2. * Virtio Network Device
  3. *
  4. * Copyright IBM, Corp. 2007
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu/iov.h"
  14. #include "hw/virtio/virtio.h"
  15. #include "net/net.h"
  16. #include "net/checksum.h"
  17. #include "net/tap.h"
  18. #include "qemu/error-report.h"
  19. #include "qemu/timer.h"
  20. #include "hw/virtio/virtio-net.h"
  21. #include "net/vhost_net.h"
  22. #include "hw/virtio/virtio-bus.h"
  23. #include "qapi/qmp/qjson.h"
  24. #include "qapi-event.h"
  25. #include "hw/virtio/virtio-access.h"
  26. #define VIRTIO_NET_VM_VERSION 11
  27. #define MAC_TABLE_ENTRIES 64
  28. #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
  29. /*
  30. * Calculate the number of bytes up to and including the given 'field' of
  31. * 'container'.
  32. */
  33. #define endof(container, field) \
  34. (offsetof(container, field) + sizeof(((container *)0)->field))
  35. typedef struct VirtIOFeature {
  36. uint32_t flags;
  37. size_t end;
  38. } VirtIOFeature;
  39. static VirtIOFeature feature_sizes[] = {
  40. {.flags = 1 << VIRTIO_NET_F_MAC,
  41. .end = endof(struct virtio_net_config, mac)},
  42. {.flags = 1 << VIRTIO_NET_F_STATUS,
  43. .end = endof(struct virtio_net_config, status)},
  44. {.flags = 1 << VIRTIO_NET_F_MQ,
  45. .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
  46. {}
  47. };
  48. static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
  49. {
  50. VirtIONet *n = qemu_get_nic_opaque(nc);
  51. return &n->vqs[nc->queue_index];
  52. }
  53. static int vq2q(int queue_index)
  54. {
  55. return queue_index / 2;
  56. }
  57. /* TODO
  58. * - we could suppress RX interrupt if we were so inclined.
  59. */
  60. static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
  61. {
  62. VirtIONet *n = VIRTIO_NET(vdev);
  63. struct virtio_net_config netcfg;
  64. virtio_stw_p(vdev, &netcfg.status, n->status);
  65. virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
  66. memcpy(netcfg.mac, n->mac, ETH_ALEN);
  67. memcpy(config, &netcfg, n->config_size);
  68. }
  69. static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
  70. {
  71. VirtIONet *n = VIRTIO_NET(vdev);
  72. struct virtio_net_config netcfg = {};
  73. memcpy(&netcfg, config, n->config_size);
  74. if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) &&
  75. memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
  76. memcpy(n->mac, netcfg.mac, ETH_ALEN);
  77. qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
  78. }
  79. }
  80. static bool virtio_net_started(VirtIONet *n, uint8_t status)
  81. {
  82. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  83. return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
  84. (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
  85. }
  86. static void virtio_net_announce_timer(void *opaque)
  87. {
  88. VirtIONet *n = opaque;
  89. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  90. n->announce_counter--;
  91. n->status |= VIRTIO_NET_S_ANNOUNCE;
  92. virtio_notify_config(vdev);
  93. }
  94. static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
  95. {
  96. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  97. NetClientState *nc = qemu_get_queue(n->nic);
  98. int queues = n->multiqueue ? n->max_queues : 1;
  99. if (!get_vhost_net(nc->peer)) {
  100. return;
  101. }
  102. if (!!n->vhost_started ==
  103. (virtio_net_started(n, status) && !nc->peer->link_down)) {
  104. return;
  105. }
  106. if (!n->vhost_started) {
  107. int r, i;
  108. if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) {
  109. return;
  110. }
  111. /* Any packets outstanding? Purge them to avoid touching rings
  112. * when vhost is running.
  113. */
  114. for (i = 0; i < queues; i++) {
  115. NetClientState *qnc = qemu_get_subqueue(n->nic, i);
  116. /* Purge both directions: TX and RX. */
  117. qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
  118. qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
  119. }
  120. n->vhost_started = 1;
  121. r = vhost_net_start(vdev, n->nic->ncs, queues);
  122. if (r < 0) {
  123. error_report("unable to start vhost net: %d: "
  124. "falling back on userspace virtio", -r);
  125. n->vhost_started = 0;
  126. }
  127. } else {
  128. vhost_net_stop(vdev, n->nic->ncs, queues);
  129. n->vhost_started = 0;
  130. }
  131. }
  132. static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
  133. {
  134. VirtIONet *n = VIRTIO_NET(vdev);
  135. VirtIONetQueue *q;
  136. int i;
  137. uint8_t queue_status;
  138. virtio_net_vhost_status(n, status);
  139. for (i = 0; i < n->max_queues; i++) {
  140. q = &n->vqs[i];
  141. if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
  142. queue_status = 0;
  143. } else {
  144. queue_status = status;
  145. }
  146. if (!q->tx_waiting) {
  147. continue;
  148. }
  149. if (virtio_net_started(n, queue_status) && !n->vhost_started) {
  150. if (q->tx_timer) {
  151. timer_mod(q->tx_timer,
  152. qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
  153. } else {
  154. qemu_bh_schedule(q->tx_bh);
  155. }
  156. } else {
  157. if (q->tx_timer) {
  158. timer_del(q->tx_timer);
  159. } else {
  160. qemu_bh_cancel(q->tx_bh);
  161. }
  162. }
  163. }
  164. }
  165. static void virtio_net_set_link_status(NetClientState *nc)
  166. {
  167. VirtIONet *n = qemu_get_nic_opaque(nc);
  168. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  169. uint16_t old_status = n->status;
  170. if (nc->link_down)
  171. n->status &= ~VIRTIO_NET_S_LINK_UP;
  172. else
  173. n->status |= VIRTIO_NET_S_LINK_UP;
  174. if (n->status != old_status)
  175. virtio_notify_config(vdev);
  176. virtio_net_set_status(vdev, vdev->status);
  177. }
  178. static void rxfilter_notify(NetClientState *nc)
  179. {
  180. VirtIONet *n = qemu_get_nic_opaque(nc);
  181. if (nc->rxfilter_notify_enabled) {
  182. gchar *path = object_get_canonical_path(OBJECT(n->qdev));
  183. qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
  184. n->netclient_name, path, &error_abort);
  185. g_free(path);
  186. /* disable event notification to avoid events flooding */
  187. nc->rxfilter_notify_enabled = 0;
  188. }
  189. }
  190. static char *mac_strdup_printf(const uint8_t *mac)
  191. {
  192. return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0],
  193. mac[1], mac[2], mac[3], mac[4], mac[5]);
  194. }
  195. static intList *get_vlan_table(VirtIONet *n)
  196. {
  197. intList *list, *entry;
  198. int i, j;
  199. list = NULL;
  200. for (i = 0; i < MAX_VLAN >> 5; i++) {
  201. for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
  202. if (n->vlans[i] & (1U << j)) {
  203. entry = g_malloc0(sizeof(*entry));
  204. entry->value = (i << 5) + j;
  205. entry->next = list;
  206. list = entry;
  207. }
  208. }
  209. }
  210. return list;
  211. }
  212. static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
  213. {
  214. VirtIONet *n = qemu_get_nic_opaque(nc);
  215. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  216. RxFilterInfo *info;
  217. strList *str_list, *entry;
  218. int i;
  219. info = g_malloc0(sizeof(*info));
  220. info->name = g_strdup(nc->name);
  221. info->promiscuous = n->promisc;
  222. if (n->nouni) {
  223. info->unicast = RX_STATE_NONE;
  224. } else if (n->alluni) {
  225. info->unicast = RX_STATE_ALL;
  226. } else {
  227. info->unicast = RX_STATE_NORMAL;
  228. }
  229. if (n->nomulti) {
  230. info->multicast = RX_STATE_NONE;
  231. } else if (n->allmulti) {
  232. info->multicast = RX_STATE_ALL;
  233. } else {
  234. info->multicast = RX_STATE_NORMAL;
  235. }
  236. info->broadcast_allowed = n->nobcast;
  237. info->multicast_overflow = n->mac_table.multi_overflow;
  238. info->unicast_overflow = n->mac_table.uni_overflow;
  239. info->main_mac = mac_strdup_printf(n->mac);
  240. str_list = NULL;
  241. for (i = 0; i < n->mac_table.first_multi; i++) {
  242. entry = g_malloc0(sizeof(*entry));
  243. entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
  244. entry->next = str_list;
  245. str_list = entry;
  246. }
  247. info->unicast_table = str_list;
  248. str_list = NULL;
  249. for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
  250. entry = g_malloc0(sizeof(*entry));
  251. entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
  252. entry->next = str_list;
  253. str_list = entry;
  254. }
  255. info->multicast_table = str_list;
  256. info->vlan_table = get_vlan_table(n);
  257. if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) {
  258. info->vlan = RX_STATE_ALL;
  259. } else if (!info->vlan_table) {
  260. info->vlan = RX_STATE_NONE;
  261. } else {
  262. info->vlan = RX_STATE_NORMAL;
  263. }
  264. /* enable event notification after query */
  265. nc->rxfilter_notify_enabled = 1;
  266. return info;
  267. }
  268. static void virtio_net_reset(VirtIODevice *vdev)
  269. {
  270. VirtIONet *n = VIRTIO_NET(vdev);
  271. /* Reset back to compatibility mode */
  272. n->promisc = 1;
  273. n->allmulti = 0;
  274. n->alluni = 0;
  275. n->nomulti = 0;
  276. n->nouni = 0;
  277. n->nobcast = 0;
  278. /* multiqueue is disabled by default */
  279. n->curr_queues = 1;
  280. timer_del(n->announce_timer);
  281. n->announce_counter = 0;
  282. n->status &= ~VIRTIO_NET_S_ANNOUNCE;
  283. /* Flush any MAC and VLAN filter table state */
  284. n->mac_table.in_use = 0;
  285. n->mac_table.first_multi = 0;
  286. n->mac_table.multi_overflow = 0;
  287. n->mac_table.uni_overflow = 0;
  288. memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
  289. memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
  290. qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
  291. memset(n->vlans, 0, MAX_VLAN >> 3);
  292. }
  293. static void peer_test_vnet_hdr(VirtIONet *n)
  294. {
  295. NetClientState *nc = qemu_get_queue(n->nic);
  296. if (!nc->peer) {
  297. return;
  298. }
  299. n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
  300. }
  301. static int peer_has_vnet_hdr(VirtIONet *n)
  302. {
  303. return n->has_vnet_hdr;
  304. }
  305. static int peer_has_ufo(VirtIONet *n)
  306. {
  307. if (!peer_has_vnet_hdr(n))
  308. return 0;
  309. n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
  310. return n->has_ufo;
  311. }
  312. static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
  313. {
  314. int i;
  315. NetClientState *nc;
  316. n->mergeable_rx_bufs = mergeable_rx_bufs;
  317. n->guest_hdr_len = n->mergeable_rx_bufs ?
  318. sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
  319. for (i = 0; i < n->max_queues; i++) {
  320. nc = qemu_get_subqueue(n->nic, i);
  321. if (peer_has_vnet_hdr(n) &&
  322. qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
  323. qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
  324. n->host_hdr_len = n->guest_hdr_len;
  325. }
  326. }
  327. }
  328. static int peer_attach(VirtIONet *n, int index)
  329. {
  330. NetClientState *nc = qemu_get_subqueue(n->nic, index);
  331. if (!nc->peer) {
  332. return 0;
  333. }
  334. if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
  335. return 0;
  336. }
  337. return tap_enable(nc->peer);
  338. }
  339. static int peer_detach(VirtIONet *n, int index)
  340. {
  341. NetClientState *nc = qemu_get_subqueue(n->nic, index);
  342. if (!nc->peer) {
  343. return 0;
  344. }
  345. if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
  346. return 0;
  347. }
  348. return tap_disable(nc->peer);
  349. }
  350. static void virtio_net_set_queues(VirtIONet *n)
  351. {
  352. int i;
  353. int r;
  354. for (i = 0; i < n->max_queues; i++) {
  355. if (i < n->curr_queues) {
  356. r = peer_attach(n, i);
  357. assert(!r);
  358. } else {
  359. r = peer_detach(n, i);
  360. assert(!r);
  361. }
  362. }
  363. }
  364. static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
  365. static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
  366. {
  367. VirtIONet *n = VIRTIO_NET(vdev);
  368. NetClientState *nc = qemu_get_queue(n->nic);
  369. features |= (1 << VIRTIO_NET_F_MAC);
  370. if (!peer_has_vnet_hdr(n)) {
  371. features &= ~(0x1 << VIRTIO_NET_F_CSUM);
  372. features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
  373. features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
  374. features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
  375. features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
  376. features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
  377. features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
  378. features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
  379. }
  380. if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
  381. features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
  382. features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
  383. }
  384. if (!get_vhost_net(nc->peer)) {
  385. return features;
  386. }
  387. return vhost_net_get_features(get_vhost_net(nc->peer), features);
  388. }
  389. static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
  390. {
  391. uint32_t features = 0;
  392. /* Linux kernel 2.6.25. It understood MAC (as everyone must),
  393. * but also these: */
  394. features |= (1 << VIRTIO_NET_F_MAC);
  395. features |= (1 << VIRTIO_NET_F_CSUM);
  396. features |= (1 << VIRTIO_NET_F_HOST_TSO4);
  397. features |= (1 << VIRTIO_NET_F_HOST_TSO6);
  398. features |= (1 << VIRTIO_NET_F_HOST_ECN);
  399. return features;
  400. }
  401. static void virtio_net_apply_guest_offloads(VirtIONet *n)
  402. {
  403. qemu_set_offload(qemu_get_queue(n->nic)->peer,
  404. !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
  405. !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
  406. !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
  407. !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
  408. !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
  409. }
  410. static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
  411. {
  412. static const uint64_t guest_offloads_mask =
  413. (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
  414. (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
  415. (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
  416. (1ULL << VIRTIO_NET_F_GUEST_ECN) |
  417. (1ULL << VIRTIO_NET_F_GUEST_UFO);
  418. return guest_offloads_mask & features;
  419. }
  420. static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
  421. {
  422. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  423. return virtio_net_guest_offloads_by_features(vdev->guest_features);
  424. }
  425. static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
  426. {
  427. VirtIONet *n = VIRTIO_NET(vdev);
  428. int i;
  429. virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)));
  430. virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
  431. if (n->has_vnet_hdr) {
  432. n->curr_guest_offloads =
  433. virtio_net_guest_offloads_by_features(features);
  434. virtio_net_apply_guest_offloads(n);
  435. }
  436. for (i = 0; i < n->max_queues; i++) {
  437. NetClientState *nc = qemu_get_subqueue(n->nic, i);
  438. if (!get_vhost_net(nc->peer)) {
  439. continue;
  440. }
  441. vhost_net_ack_features(get_vhost_net(nc->peer), features);
  442. }
  443. if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
  444. memset(n->vlans, 0, MAX_VLAN >> 3);
  445. } else {
  446. memset(n->vlans, 0xff, MAX_VLAN >> 3);
  447. }
  448. }
  449. static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
  450. struct iovec *iov, unsigned int iov_cnt)
  451. {
  452. uint8_t on;
  453. size_t s;
  454. NetClientState *nc = qemu_get_queue(n->nic);
  455. s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
  456. if (s != sizeof(on)) {
  457. return VIRTIO_NET_ERR;
  458. }
  459. if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
  460. n->promisc = on;
  461. } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
  462. n->allmulti = on;
  463. } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
  464. n->alluni = on;
  465. } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
  466. n->nomulti = on;
  467. } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
  468. n->nouni = on;
  469. } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
  470. n->nobcast = on;
  471. } else {
  472. return VIRTIO_NET_ERR;
  473. }
  474. rxfilter_notify(nc);
  475. return VIRTIO_NET_OK;
  476. }
  477. static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
  478. struct iovec *iov, unsigned int iov_cnt)
  479. {
  480. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  481. uint64_t offloads;
  482. size_t s;
  483. if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) {
  484. return VIRTIO_NET_ERR;
  485. }
  486. s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
  487. if (s != sizeof(offloads)) {
  488. return VIRTIO_NET_ERR;
  489. }
  490. if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
  491. uint64_t supported_offloads;
  492. if (!n->has_vnet_hdr) {
  493. return VIRTIO_NET_ERR;
  494. }
  495. supported_offloads = virtio_net_supported_guest_offloads(n);
  496. if (offloads & ~supported_offloads) {
  497. return VIRTIO_NET_ERR;
  498. }
  499. n->curr_guest_offloads = offloads;
  500. virtio_net_apply_guest_offloads(n);
  501. return VIRTIO_NET_OK;
  502. } else {
  503. return VIRTIO_NET_ERR;
  504. }
  505. }
  506. static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
  507. struct iovec *iov, unsigned int iov_cnt)
  508. {
  509. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  510. struct virtio_net_ctrl_mac mac_data;
  511. size_t s;
  512. NetClientState *nc = qemu_get_queue(n->nic);
  513. if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
  514. if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
  515. return VIRTIO_NET_ERR;
  516. }
  517. s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
  518. assert(s == sizeof(n->mac));
  519. qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
  520. rxfilter_notify(nc);
  521. return VIRTIO_NET_OK;
  522. }
  523. if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
  524. return VIRTIO_NET_ERR;
  525. }
  526. int in_use = 0;
  527. int first_multi = 0;
  528. uint8_t uni_overflow = 0;
  529. uint8_t multi_overflow = 0;
  530. uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
  531. s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
  532. sizeof(mac_data.entries));
  533. mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
  534. if (s != sizeof(mac_data.entries)) {
  535. goto error;
  536. }
  537. iov_discard_front(&iov, &iov_cnt, s);
  538. if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
  539. goto error;
  540. }
  541. if (mac_data.entries <= MAC_TABLE_ENTRIES) {
  542. s = iov_to_buf(iov, iov_cnt, 0, macs,
  543. mac_data.entries * ETH_ALEN);
  544. if (s != mac_data.entries * ETH_ALEN) {
  545. goto error;
  546. }
  547. in_use += mac_data.entries;
  548. } else {
  549. uni_overflow = 1;
  550. }
  551. iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
  552. first_multi = in_use;
  553. s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
  554. sizeof(mac_data.entries));
  555. mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
  556. if (s != sizeof(mac_data.entries)) {
  557. goto error;
  558. }
  559. iov_discard_front(&iov, &iov_cnt, s);
  560. if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
  561. goto error;
  562. }
  563. if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
  564. s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
  565. mac_data.entries * ETH_ALEN);
  566. if (s != mac_data.entries * ETH_ALEN) {
  567. goto error;
  568. }
  569. in_use += mac_data.entries;
  570. } else {
  571. multi_overflow = 1;
  572. }
  573. n->mac_table.in_use = in_use;
  574. n->mac_table.first_multi = first_multi;
  575. n->mac_table.uni_overflow = uni_overflow;
  576. n->mac_table.multi_overflow = multi_overflow;
  577. memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
  578. g_free(macs);
  579. rxfilter_notify(nc);
  580. return VIRTIO_NET_OK;
  581. error:
  582. g_free(macs);
  583. return VIRTIO_NET_ERR;
  584. }
  585. static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
  586. struct iovec *iov, unsigned int iov_cnt)
  587. {
  588. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  589. uint16_t vid;
  590. size_t s;
  591. NetClientState *nc = qemu_get_queue(n->nic);
  592. s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
  593. vid = virtio_lduw_p(vdev, &vid);
  594. if (s != sizeof(vid)) {
  595. return VIRTIO_NET_ERR;
  596. }
  597. if (vid >= MAX_VLAN)
  598. return VIRTIO_NET_ERR;
  599. if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
  600. n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
  601. else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
  602. n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
  603. else
  604. return VIRTIO_NET_ERR;
  605. rxfilter_notify(nc);
  606. return VIRTIO_NET_OK;
  607. }
  608. static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
  609. struct iovec *iov, unsigned int iov_cnt)
  610. {
  611. if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
  612. n->status & VIRTIO_NET_S_ANNOUNCE) {
  613. n->status &= ~VIRTIO_NET_S_ANNOUNCE;
  614. if (n->announce_counter) {
  615. timer_mod(n->announce_timer,
  616. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
  617. self_announce_delay(n->announce_counter));
  618. }
  619. return VIRTIO_NET_OK;
  620. } else {
  621. return VIRTIO_NET_ERR;
  622. }
  623. }
  624. static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
  625. struct iovec *iov, unsigned int iov_cnt)
  626. {
  627. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  628. struct virtio_net_ctrl_mq mq;
  629. size_t s;
  630. uint16_t queues;
  631. s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
  632. if (s != sizeof(mq)) {
  633. return VIRTIO_NET_ERR;
  634. }
  635. if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
  636. return VIRTIO_NET_ERR;
  637. }
  638. queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
  639. if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
  640. queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
  641. queues > n->max_queues ||
  642. !n->multiqueue) {
  643. return VIRTIO_NET_ERR;
  644. }
  645. n->curr_queues = queues;
  646. /* stop the backend before changing the number of queues to avoid handling a
  647. * disabled queue */
  648. virtio_net_set_status(vdev, vdev->status);
  649. virtio_net_set_queues(n);
  650. return VIRTIO_NET_OK;
  651. }
  652. static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  653. {
  654. VirtIONet *n = VIRTIO_NET(vdev);
  655. struct virtio_net_ctrl_hdr ctrl;
  656. virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
  657. VirtQueueElement elem;
  658. size_t s;
  659. struct iovec *iov;
  660. unsigned int iov_cnt;
  661. while (virtqueue_pop(vq, &elem)) {
  662. if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
  663. iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
  664. error_report("virtio-net ctrl missing headers");
  665. exit(1);
  666. }
  667. iov = elem.out_sg;
  668. iov_cnt = elem.out_num;
  669. s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
  670. iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
  671. if (s != sizeof(ctrl)) {
  672. status = VIRTIO_NET_ERR;
  673. } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
  674. status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
  675. } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
  676. status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
  677. } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
  678. status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
  679. } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
  680. status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
  681. } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
  682. status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
  683. } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
  684. status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
  685. }
  686. s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
  687. assert(s == sizeof(status));
  688. virtqueue_push(vq, &elem, sizeof(status));
  689. virtio_notify(vdev, vq);
  690. }
  691. }
  692. /* RX */
  693. static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
  694. {
  695. VirtIONet *n = VIRTIO_NET(vdev);
  696. int queue_index = vq2q(virtio_get_queue_index(vq));
  697. qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
  698. }
  699. static int virtio_net_can_receive(NetClientState *nc)
  700. {
  701. VirtIONet *n = qemu_get_nic_opaque(nc);
  702. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  703. VirtIONetQueue *q = virtio_net_get_subqueue(nc);
  704. if (!vdev->vm_running) {
  705. return 0;
  706. }
  707. if (nc->queue_index >= n->curr_queues) {
  708. return 0;
  709. }
  710. if (!virtio_queue_ready(q->rx_vq) ||
  711. !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
  712. return 0;
  713. }
  714. return 1;
  715. }
  716. static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
  717. {
  718. VirtIONet *n = q->n;
  719. if (virtio_queue_empty(q->rx_vq) ||
  720. (n->mergeable_rx_bufs &&
  721. !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
  722. virtio_queue_set_notification(q->rx_vq, 1);
  723. /* To avoid a race condition where the guest has made some buffers
  724. * available after the above check but before notification was
  725. * enabled, check for available buffers again.
  726. */
  727. if (virtio_queue_empty(q->rx_vq) ||
  728. (n->mergeable_rx_bufs &&
  729. !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
  730. return 0;
  731. }
  732. }
  733. virtio_queue_set_notification(q->rx_vq, 0);
  734. return 1;
  735. }
  736. static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
  737. {
  738. virtio_tswap16s(vdev, &hdr->hdr_len);
  739. virtio_tswap16s(vdev, &hdr->gso_size);
  740. virtio_tswap16s(vdev, &hdr->csum_start);
  741. virtio_tswap16s(vdev, &hdr->csum_offset);
  742. }
  743. /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
  744. * it never finds out that the packets don't have valid checksums. This
  745. * causes dhclient to get upset. Fedora's carried a patch for ages to
  746. * fix this with Xen but it hasn't appeared in an upstream release of
  747. * dhclient yet.
  748. *
  749. * To avoid breaking existing guests, we catch udp packets and add
  750. * checksums. This is terrible but it's better than hacking the guest
  751. * kernels.
  752. *
  753. * N.B. if we introduce a zero-copy API, this operation is no longer free so
  754. * we should provide a mechanism to disable it to avoid polluting the host
  755. * cache.
  756. */
  757. static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
  758. uint8_t *buf, size_t size)
  759. {
  760. if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
  761. (size > 27 && size < 1500) && /* normal sized MTU */
  762. (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
  763. (buf[23] == 17) && /* ip.protocol == UDP */
  764. (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
  765. net_checksum_calculate(buf, size);
  766. hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
  767. }
  768. }
  769. static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
  770. const void *buf, size_t size)
  771. {
  772. if (n->has_vnet_hdr) {
  773. /* FIXME this cast is evil */
  774. void *wbuf = (void *)buf;
  775. work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
  776. size - n->host_hdr_len);
  777. virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
  778. iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
  779. } else {
  780. struct virtio_net_hdr hdr = {
  781. .flags = 0,
  782. .gso_type = VIRTIO_NET_HDR_GSO_NONE
  783. };
  784. iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
  785. }
  786. }
  787. static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
  788. {
  789. static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  790. static const uint8_t vlan[] = {0x81, 0x00};
  791. uint8_t *ptr = (uint8_t *)buf;
  792. int i;
  793. if (n->promisc)
  794. return 1;
  795. ptr += n->host_hdr_len;
  796. if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
  797. int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
  798. if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
  799. return 0;
  800. }
  801. if (ptr[0] & 1) { // multicast
  802. if (!memcmp(ptr, bcast, sizeof(bcast))) {
  803. return !n->nobcast;
  804. } else if (n->nomulti) {
  805. return 0;
  806. } else if (n->allmulti || n->mac_table.multi_overflow) {
  807. return 1;
  808. }
  809. for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
  810. if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
  811. return 1;
  812. }
  813. }
  814. } else { // unicast
  815. if (n->nouni) {
  816. return 0;
  817. } else if (n->alluni || n->mac_table.uni_overflow) {
  818. return 1;
  819. } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
  820. return 1;
  821. }
  822. for (i = 0; i < n->mac_table.first_multi; i++) {
  823. if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
  824. return 1;
  825. }
  826. }
  827. }
  828. return 0;
  829. }
  830. static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  831. {
  832. VirtIONet *n = qemu_get_nic_opaque(nc);
  833. VirtIONetQueue *q = virtio_net_get_subqueue(nc);
  834. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  835. struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
  836. struct virtio_net_hdr_mrg_rxbuf mhdr;
  837. unsigned mhdr_cnt = 0;
  838. size_t offset, i, guest_offset;
  839. if (!virtio_net_can_receive(nc)) {
  840. return -1;
  841. }
  842. /* hdr_len refers to the header we supply to the guest */
  843. if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
  844. return 0;
  845. }
  846. if (!receive_filter(n, buf, size))
  847. return size;
  848. offset = i = 0;
  849. while (offset < size) {
  850. VirtQueueElement elem;
  851. int len, total;
  852. const struct iovec *sg = elem.in_sg;
  853. total = 0;
  854. if (virtqueue_pop(q->rx_vq, &elem) == 0) {
  855. if (i == 0)
  856. return -1;
  857. error_report("virtio-net unexpected empty queue: "
  858. "i %zd mergeable %d offset %zd, size %zd, "
  859. "guest hdr len %zd, host hdr len %zd guest features 0x%x",
  860. i, n->mergeable_rx_bufs, offset, size,
  861. n->guest_hdr_len, n->host_hdr_len, vdev->guest_features);
  862. exit(1);
  863. }
  864. if (elem.in_num < 1) {
  865. error_report("virtio-net receive queue contains no in buffers");
  866. exit(1);
  867. }
  868. if (i == 0) {
  869. assert(offset == 0);
  870. if (n->mergeable_rx_bufs) {
  871. mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
  872. sg, elem.in_num,
  873. offsetof(typeof(mhdr), num_buffers),
  874. sizeof(mhdr.num_buffers));
  875. }
  876. receive_header(n, sg, elem.in_num, buf, size);
  877. offset = n->host_hdr_len;
  878. total += n->guest_hdr_len;
  879. guest_offset = n->guest_hdr_len;
  880. } else {
  881. guest_offset = 0;
  882. }
  883. /* copy in packet. ugh */
  884. len = iov_from_buf(sg, elem.in_num, guest_offset,
  885. buf + offset, size - offset);
  886. total += len;
  887. offset += len;
  888. /* If buffers can't be merged, at this point we
  889. * must have consumed the complete packet.
  890. * Otherwise, drop it. */
  891. if (!n->mergeable_rx_bufs && offset < size) {
  892. #if 0
  893. error_report("virtio-net truncated non-mergeable packet: "
  894. "i %zd mergeable %d offset %zd, size %zd, "
  895. "guest hdr len %zd, host hdr len %zd",
  896. i, n->mergeable_rx_bufs,
  897. offset, size, n->guest_hdr_len, n->host_hdr_len);
  898. #endif
  899. return size;
  900. }
  901. /* signal other side */
  902. virtqueue_fill(q->rx_vq, &elem, total, i++);
  903. }
  904. if (mhdr_cnt) {
  905. virtio_stw_p(vdev, &mhdr.num_buffers, i);
  906. iov_from_buf(mhdr_sg, mhdr_cnt,
  907. 0,
  908. &mhdr.num_buffers, sizeof mhdr.num_buffers);
  909. }
  910. virtqueue_flush(q->rx_vq, i);
  911. virtio_notify(vdev, q->rx_vq);
  912. return size;
  913. }
  914. static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
  915. static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
  916. {
  917. VirtIONet *n = qemu_get_nic_opaque(nc);
  918. VirtIONetQueue *q = virtio_net_get_subqueue(nc);
  919. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  920. virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
  921. virtio_notify(vdev, q->tx_vq);
  922. q->async_tx.elem.out_num = q->async_tx.len = 0;
  923. virtio_queue_set_notification(q->tx_vq, 1);
  924. virtio_net_flush_tx(q);
  925. }
  926. /* TX */
  927. static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
  928. {
  929. VirtIONet *n = q->n;
  930. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  931. VirtQueueElement elem;
  932. int32_t num_packets = 0;
  933. int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
  934. if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
  935. return num_packets;
  936. }
  937. if (q->async_tx.elem.out_num) {
  938. virtio_queue_set_notification(q->tx_vq, 0);
  939. return num_packets;
  940. }
  941. while (virtqueue_pop(q->tx_vq, &elem)) {
  942. ssize_t ret, len;
  943. unsigned int out_num = elem.out_num;
  944. struct iovec *out_sg = &elem.out_sg[0];
  945. struct iovec sg[VIRTQUEUE_MAX_SIZE];
  946. if (out_num < 1) {
  947. error_report("virtio-net header not in first element");
  948. exit(1);
  949. }
  950. if (n->has_vnet_hdr) {
  951. if (out_sg[0].iov_len < n->guest_hdr_len) {
  952. error_report("virtio-net header incorrect");
  953. exit(1);
  954. }
  955. virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
  956. }
  957. /*
  958. * If host wants to see the guest header as is, we can
  959. * pass it on unchanged. Otherwise, copy just the parts
  960. * that host is interested in.
  961. */
  962. assert(n->host_hdr_len <= n->guest_hdr_len);
  963. if (n->host_hdr_len != n->guest_hdr_len) {
  964. unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
  965. out_sg, out_num,
  966. 0, n->host_hdr_len);
  967. sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
  968. out_sg, out_num,
  969. n->guest_hdr_len, -1);
  970. out_num = sg_num;
  971. out_sg = sg;
  972. }
  973. len = n->guest_hdr_len;
  974. ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
  975. out_sg, out_num, virtio_net_tx_complete);
  976. if (ret == 0) {
  977. virtio_queue_set_notification(q->tx_vq, 0);
  978. q->async_tx.elem = elem;
  979. q->async_tx.len = len;
  980. return -EBUSY;
  981. }
  982. len += ret;
  983. virtqueue_push(q->tx_vq, &elem, 0);
  984. virtio_notify(vdev, q->tx_vq);
  985. if (++num_packets >= n->tx_burst) {
  986. break;
  987. }
  988. }
  989. return num_packets;
  990. }
  991. static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
  992. {
  993. VirtIONet *n = VIRTIO_NET(vdev);
  994. VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
  995. /* This happens when device was stopped but VCPU wasn't. */
  996. if (!vdev->vm_running) {
  997. q->tx_waiting = 1;
  998. return;
  999. }
  1000. if (q->tx_waiting) {
  1001. virtio_queue_set_notification(vq, 1);
  1002. timer_del(q->tx_timer);
  1003. q->tx_waiting = 0;
  1004. virtio_net_flush_tx(q);
  1005. } else {
  1006. timer_mod(q->tx_timer,
  1007. qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
  1008. q->tx_waiting = 1;
  1009. virtio_queue_set_notification(vq, 0);
  1010. }
  1011. }
  1012. static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
  1013. {
  1014. VirtIONet *n = VIRTIO_NET(vdev);
  1015. VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
  1016. if (unlikely(q->tx_waiting)) {
  1017. return;
  1018. }
  1019. q->tx_waiting = 1;
  1020. /* This happens when device was stopped but VCPU wasn't. */
  1021. if (!vdev->vm_running) {
  1022. return;
  1023. }
  1024. virtio_queue_set_notification(vq, 0);
  1025. qemu_bh_schedule(q->tx_bh);
  1026. }
  1027. static void virtio_net_tx_timer(void *opaque)
  1028. {
  1029. VirtIONetQueue *q = opaque;
  1030. VirtIONet *n = q->n;
  1031. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  1032. /* This happens when device was stopped but BH wasn't. */
  1033. if (!vdev->vm_running) {
  1034. /* Make sure tx waiting is set, so we'll run when restarted. */
  1035. assert(q->tx_waiting);
  1036. return;
  1037. }
  1038. q->tx_waiting = 0;
  1039. /* Just in case the driver is not ready on more */
  1040. if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
  1041. return;
  1042. }
  1043. virtio_queue_set_notification(q->tx_vq, 1);
  1044. virtio_net_flush_tx(q);
  1045. }
  1046. static void virtio_net_tx_bh(void *opaque)
  1047. {
  1048. VirtIONetQueue *q = opaque;
  1049. VirtIONet *n = q->n;
  1050. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  1051. int32_t ret;
  1052. /* This happens when device was stopped but BH wasn't. */
  1053. if (!vdev->vm_running) {
  1054. /* Make sure tx waiting is set, so we'll run when restarted. */
  1055. assert(q->tx_waiting);
  1056. return;
  1057. }
  1058. q->tx_waiting = 0;
  1059. /* Just in case the driver is not ready on more */
  1060. if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
  1061. return;
  1062. }
  1063. ret = virtio_net_flush_tx(q);
  1064. if (ret == -EBUSY) {
  1065. return; /* Notification re-enable handled by tx_complete */
  1066. }
  1067. /* If we flush a full burst of packets, assume there are
  1068. * more coming and immediately reschedule */
  1069. if (ret >= n->tx_burst) {
  1070. qemu_bh_schedule(q->tx_bh);
  1071. q->tx_waiting = 1;
  1072. return;
  1073. }
  1074. /* If less than a full burst, re-enable notification and flush
  1075. * anything that may have come in while we weren't looking. If
  1076. * we find something, assume the guest is still active and reschedule */
  1077. virtio_queue_set_notification(q->tx_vq, 1);
  1078. if (virtio_net_flush_tx(q) > 0) {
  1079. virtio_queue_set_notification(q->tx_vq, 0);
  1080. qemu_bh_schedule(q->tx_bh);
  1081. q->tx_waiting = 1;
  1082. }
  1083. }
  1084. static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
  1085. {
  1086. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  1087. int i, max = multiqueue ? n->max_queues : 1;
  1088. n->multiqueue = multiqueue;
  1089. for (i = 2; i <= n->max_queues * 2 + 1; i++) {
  1090. virtio_del_queue(vdev, i);
  1091. }
  1092. for (i = 1; i < max; i++) {
  1093. n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
  1094. if (n->vqs[i].tx_timer) {
  1095. n->vqs[i].tx_vq =
  1096. virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
  1097. n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
  1098. virtio_net_tx_timer,
  1099. &n->vqs[i]);
  1100. } else {
  1101. n->vqs[i].tx_vq =
  1102. virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
  1103. n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
  1104. }
  1105. n->vqs[i].tx_waiting = 0;
  1106. n->vqs[i].n = n;
  1107. }
  1108. /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
  1109. * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
  1110. * breaking them.
  1111. */
  1112. n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
  1113. virtio_net_set_queues(n);
  1114. }
  1115. static void virtio_net_save(QEMUFile *f, void *opaque)
  1116. {
  1117. VirtIONet *n = opaque;
  1118. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  1119. /* At this point, backend must be stopped, otherwise
  1120. * it might keep writing to memory. */
  1121. assert(!n->vhost_started);
  1122. virtio_save(vdev, f);
  1123. }
  1124. static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
  1125. {
  1126. VirtIONet *n = VIRTIO_NET(vdev);
  1127. int i;
  1128. qemu_put_buffer(f, n->mac, ETH_ALEN);
  1129. qemu_put_be32(f, n->vqs[0].tx_waiting);
  1130. qemu_put_be32(f, n->mergeable_rx_bufs);
  1131. qemu_put_be16(f, n->status);
  1132. qemu_put_byte(f, n->promisc);
  1133. qemu_put_byte(f, n->allmulti);
  1134. qemu_put_be32(f, n->mac_table.in_use);
  1135. qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN);
  1136. qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
  1137. qemu_put_be32(f, n->has_vnet_hdr);
  1138. qemu_put_byte(f, n->mac_table.multi_overflow);
  1139. qemu_put_byte(f, n->mac_table.uni_overflow);
  1140. qemu_put_byte(f, n->alluni);
  1141. qemu_put_byte(f, n->nomulti);
  1142. qemu_put_byte(f, n->nouni);
  1143. qemu_put_byte(f, n->nobcast);
  1144. qemu_put_byte(f, n->has_ufo);
  1145. if (n->max_queues > 1) {
  1146. qemu_put_be16(f, n->max_queues);
  1147. qemu_put_be16(f, n->curr_queues);
  1148. for (i = 1; i < n->curr_queues; i++) {
  1149. qemu_put_be32(f, n->vqs[i].tx_waiting);
  1150. }
  1151. }
  1152. if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
  1153. qemu_put_be64(f, n->curr_guest_offloads);
  1154. }
  1155. }
  1156. static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
  1157. {
  1158. VirtIONet *n = opaque;
  1159. VirtIODevice *vdev = VIRTIO_DEVICE(n);
  1160. if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
  1161. return -EINVAL;
  1162. return virtio_load(vdev, f, version_id);
  1163. }
  1164. static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
  1165. int version_id)
  1166. {
  1167. VirtIONet *n = VIRTIO_NET(vdev);
  1168. int i, link_down;
  1169. qemu_get_buffer(f, n->mac, ETH_ALEN);
  1170. n->vqs[0].tx_waiting = qemu_get_be32(f);
  1171. virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
  1172. if (version_id >= 3)
  1173. n->status = qemu_get_be16(f);
  1174. if (version_id >= 4) {
  1175. if (version_id < 8) {
  1176. n->promisc = qemu_get_be32(f);
  1177. n->allmulti = qemu_get_be32(f);
  1178. } else {
  1179. n->promisc = qemu_get_byte(f);
  1180. n->allmulti = qemu_get_byte(f);
  1181. }
  1182. }
  1183. if (version_id >= 5) {
  1184. n->mac_table.in_use = qemu_get_be32(f);
  1185. /* MAC_TABLE_ENTRIES may be different from the saved image */
  1186. if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
  1187. qemu_get_buffer(f, n->mac_table.macs,
  1188. n->mac_table.in_use * ETH_ALEN);
  1189. } else {
  1190. int64_t i;
  1191. /* Overflow detected - can happen if source has a larger MAC table.
  1192. * We simply set overflow flag so there's no need to maintain the
  1193. * table of addresses, discard them all.
  1194. * Note: 64 bit math to avoid integer overflow.
  1195. */
  1196. for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
  1197. qemu_get_byte(f);
  1198. }
  1199. n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
  1200. n->mac_table.in_use = 0;
  1201. }
  1202. }
  1203. if (version_id >= 6)
  1204. qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
  1205. if (version_id >= 7) {
  1206. if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
  1207. error_report("virtio-net: saved image requires vnet_hdr=on");
  1208. return -1;
  1209. }
  1210. }
  1211. if (version_id >= 9) {
  1212. n->mac_table.multi_overflow = qemu_get_byte(f);
  1213. n->mac_table.uni_overflow = qemu_get_byte(f);
  1214. }
  1215. if (version_id >= 10) {
  1216. n->alluni = qemu_get_byte(f);
  1217. n->nomulti = qemu_get_byte(f);
  1218. n->nouni = qemu_get_byte(f);
  1219. n->nobcast = qemu_get_byte(f);
  1220. }
  1221. if (version_id >= 11) {
  1222. if (qemu_get_byte(f) && !peer_has_ufo(n)) {
  1223. error_report("virtio-net: saved image requires TUN_F_UFO support");
  1224. return -1;
  1225. }
  1226. }
  1227. if (n->max_queues > 1) {
  1228. if (n->max_queues != qemu_get_be16(f)) {
  1229. error_report("virtio-net: different max_queues ");
  1230. return -1;
  1231. }
  1232. n->curr_queues = qemu_get_be16(f);
  1233. if (n->curr_queues > n->max_queues) {
  1234. error_report("virtio-net: curr_queues %x > max_queues %x",
  1235. n->curr_queues, n->max_queues);
  1236. return -1;
  1237. }
  1238. for (i = 1; i < n->curr_queues; i++) {
  1239. n->vqs[i].tx_waiting = qemu_get_be32(f);
  1240. }
  1241. }
  1242. if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
  1243. n->curr_guest_offloads = qemu_get_be64(f);
  1244. } else {
  1245. n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
  1246. }
  1247. if (peer_has_vnet_hdr(n)) {
  1248. virtio_net_apply_guest_offloads(n);
  1249. }
  1250. virtio_net_set_queues(n);
  1251. /* Find the first multicast entry in the saved MAC filter */
  1252. for (i = 0; i < n->mac_table.in_use; i++) {
  1253. if (n->mac_table.macs[i * ETH_ALEN] & 1) {
  1254. break;
  1255. }
  1256. }
  1257. n->mac_table.first_multi = i;
  1258. /* nc.link_down can't be migrated, so infer link_down according
  1259. * to link status bit in n->status */
  1260. link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
  1261. for (i = 0; i < n->max_queues; i++) {
  1262. qemu_get_subqueue(n->nic, i)->link_down = link_down;
  1263. }
  1264. if (vdev->guest_features & (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE) &&
  1265. vdev->guest_features & (0x1 << VIRTIO_NET_F_CTRL_VQ)) {
  1266. n->announce_counter = SELF_ANNOUNCE_ROUNDS;
  1267. timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
  1268. }
  1269. return 0;
  1270. }
  1271. static void virtio_net_cleanup(NetClientState *nc)
  1272. {
  1273. VirtIONet *n = qemu_get_nic_opaque(nc);
  1274. n->nic = NULL;
  1275. }
  1276. static NetClientInfo net_virtio_info = {
  1277. .type = NET_CLIENT_OPTIONS_KIND_NIC,
  1278. .size = sizeof(NICState),
  1279. .can_receive = virtio_net_can_receive,
  1280. .receive = virtio_net_receive,
  1281. .cleanup = virtio_net_cleanup,
  1282. .link_status_changed = virtio_net_set_link_status,
  1283. .query_rx_filter = virtio_net_query_rxfilter,
  1284. };
  1285. static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
  1286. {
  1287. VirtIONet *n = VIRTIO_NET(vdev);
  1288. NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
  1289. assert(n->vhost_started);
  1290. return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
  1291. }
  1292. static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
  1293. bool mask)
  1294. {
  1295. VirtIONet *n = VIRTIO_NET(vdev);
  1296. NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
  1297. assert(n->vhost_started);
  1298. vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
  1299. vdev, idx, mask);
  1300. }
  1301. void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
  1302. {
  1303. int i, config_size = 0;
  1304. host_features |= (1 << VIRTIO_NET_F_MAC);
  1305. for (i = 0; feature_sizes[i].flags != 0; i++) {
  1306. if (host_features & feature_sizes[i].flags) {
  1307. config_size = MAX(feature_sizes[i].end, config_size);
  1308. }
  1309. }
  1310. n->config_size = config_size;
  1311. }
  1312. void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
  1313. const char *type)
  1314. {
  1315. /*
  1316. * The name can be NULL, the netclient name will be type.x.
  1317. */
  1318. assert(type != NULL);
  1319. g_free(n->netclient_name);
  1320. g_free(n->netclient_type);
  1321. n->netclient_name = g_strdup(name);
  1322. n->netclient_type = g_strdup(type);
  1323. }
  1324. static void virtio_net_device_realize(DeviceState *dev, Error **errp)
  1325. {
  1326. VirtIODevice *vdev = VIRTIO_DEVICE(dev);
  1327. VirtIONet *n = VIRTIO_NET(dev);
  1328. NetClientState *nc;
  1329. int i;
  1330. virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
  1331. n->max_queues = MAX(n->nic_conf.peers.queues, 1);
  1332. n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
  1333. n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
  1334. n->curr_queues = 1;
  1335. n->vqs[0].n = n;
  1336. n->tx_timeout = n->net_conf.txtimer;
  1337. if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
  1338. && strcmp(n->net_conf.tx, "bh")) {
  1339. error_report("virtio-net: "
  1340. "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
  1341. n->net_conf.tx);
  1342. error_report("Defaulting to \"bh\"");
  1343. }
  1344. if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
  1345. n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
  1346. virtio_net_handle_tx_timer);
  1347. n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer,
  1348. &n->vqs[0]);
  1349. } else {
  1350. n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
  1351. virtio_net_handle_tx_bh);
  1352. n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
  1353. }
  1354. n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
  1355. qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
  1356. memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
  1357. n->status = VIRTIO_NET_S_LINK_UP;
  1358. n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  1359. virtio_net_announce_timer, n);
  1360. if (n->netclient_type) {
  1361. /*
  1362. * Happen when virtio_net_set_netclient_name has been called.
  1363. */
  1364. n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
  1365. n->netclient_type, n->netclient_name, n);
  1366. } else {
  1367. n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
  1368. object_get_typename(OBJECT(dev)), dev->id, n);
  1369. }
  1370. peer_test_vnet_hdr(n);
  1371. if (peer_has_vnet_hdr(n)) {
  1372. for (i = 0; i < n->max_queues; i++) {
  1373. qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
  1374. }
  1375. n->host_hdr_len = sizeof(struct virtio_net_hdr);
  1376. } else {
  1377. n->host_hdr_len = 0;
  1378. }
  1379. qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
  1380. n->vqs[0].tx_waiting = 0;
  1381. n->tx_burst = n->net_conf.txburst;
  1382. virtio_net_set_mrg_rx_bufs(n, 0);
  1383. n->promisc = 1; /* for compatibility */
  1384. n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
  1385. n->vlans = g_malloc0(MAX_VLAN >> 3);
  1386. nc = qemu_get_queue(n->nic);
  1387. nc->rxfilter_notify_enabled = 1;
  1388. n->qdev = dev;
  1389. register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
  1390. virtio_net_save, virtio_net_load, n);
  1391. }
  1392. static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
  1393. {
  1394. VirtIODevice *vdev = VIRTIO_DEVICE(dev);
  1395. VirtIONet *n = VIRTIO_NET(dev);
  1396. int i;
  1397. /* This will stop vhost backend if appropriate. */
  1398. virtio_net_set_status(vdev, 0);
  1399. unregister_savevm(dev, "virtio-net", n);
  1400. g_free(n->netclient_name);
  1401. n->netclient_name = NULL;
  1402. g_free(n->netclient_type);
  1403. n->netclient_type = NULL;
  1404. g_free(n->mac_table.macs);
  1405. g_free(n->vlans);
  1406. for (i = 0; i < n->max_queues; i++) {
  1407. VirtIONetQueue *q = &n->vqs[i];
  1408. NetClientState *nc = qemu_get_subqueue(n->nic, i);
  1409. qemu_purge_queued_packets(nc);
  1410. if (q->tx_timer) {
  1411. timer_del(q->tx_timer);
  1412. timer_free(q->tx_timer);
  1413. } else if (q->tx_bh) {
  1414. qemu_bh_delete(q->tx_bh);
  1415. }
  1416. }
  1417. timer_del(n->announce_timer);
  1418. timer_free(n->announce_timer);
  1419. g_free(n->vqs);
  1420. qemu_del_nic(n->nic);
  1421. virtio_cleanup(vdev);
  1422. }
  1423. static void virtio_net_instance_init(Object *obj)
  1424. {
  1425. VirtIONet *n = VIRTIO_NET(obj);
  1426. /*
  1427. * The default config_size is sizeof(struct virtio_net_config).
  1428. * Can be overriden with virtio_net_set_config_size.
  1429. */
  1430. n->config_size = sizeof(struct virtio_net_config);
  1431. device_add_bootindex_property(obj, &n->nic_conf.bootindex,
  1432. "bootindex", "/ethernet-phy@0",
  1433. DEVICE(n), NULL);
  1434. }
  1435. static Property virtio_net_properties[] = {
  1436. DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
  1437. DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
  1438. TX_TIMER_INTERVAL),
  1439. DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
  1440. DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
  1441. DEFINE_PROP_END_OF_LIST(),
  1442. };
  1443. static void virtio_net_class_init(ObjectClass *klass, void *data)
  1444. {
  1445. DeviceClass *dc = DEVICE_CLASS(klass);
  1446. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  1447. dc->props = virtio_net_properties;
  1448. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1449. vdc->realize = virtio_net_device_realize;
  1450. vdc->unrealize = virtio_net_device_unrealize;
  1451. vdc->get_config = virtio_net_get_config;
  1452. vdc->set_config = virtio_net_set_config;
  1453. vdc->get_features = virtio_net_get_features;
  1454. vdc->set_features = virtio_net_set_features;
  1455. vdc->bad_features = virtio_net_bad_features;
  1456. vdc->reset = virtio_net_reset;
  1457. vdc->set_status = virtio_net_set_status;
  1458. vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
  1459. vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
  1460. vdc->load = virtio_net_load_device;
  1461. vdc->save = virtio_net_save_device;
  1462. }
  1463. static const TypeInfo virtio_net_info = {
  1464. .name = TYPE_VIRTIO_NET,
  1465. .parent = TYPE_VIRTIO_DEVICE,
  1466. .instance_size = sizeof(VirtIONet),
  1467. .instance_init = virtio_net_instance_init,
  1468. .class_init = virtio_net_class_init,
  1469. };
  1470. static void virtio_register_types(void)
  1471. {
  1472. type_register_static(&virtio_net_info);
  1473. }
  1474. type_init(virtio_register_types)