e1000e.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. /*
  2. * QEMU INTEL 82574 GbE NIC emulation
  3. *
  4. * Software developer's manuals:
  5. * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf
  6. *
  7. * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
  8. * Developed by Daynix Computing LTD (http://www.daynix.com)
  9. *
  10. * Authors:
  11. * Dmitry Fleytman <dmitry@daynix.com>
  12. * Leonid Bloch <leonid@daynix.com>
  13. * Yan Vugenfirer <yan@daynix.com>
  14. *
  15. * Based on work done by:
  16. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  17. * Copyright (c) 2008 Qumranet
  18. * Based on work done by:
  19. * Copyright (c) 2007 Dan Aloni
  20. * Copyright (c) 2004 Antony T Curtis
  21. *
  22. * This library is free software; you can redistribute it and/or
  23. * modify it under the terms of the GNU Lesser General Public
  24. * License as published by the Free Software Foundation; either
  25. * version 2 of the License, or (at your option) any later version.
  26. *
  27. * This library is distributed in the hope that it will be useful,
  28. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  29. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  30. * Lesser General Public License for more details.
  31. *
  32. * You should have received a copy of the GNU Lesser General Public
  33. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  34. */
  35. #include "qemu/osdep.h"
  36. #include "qemu/units.h"
  37. #include "net/net.h"
  38. #include "net/tap.h"
  39. #include "qemu/module.h"
  40. #include "qemu/range.h"
  41. #include "sysemu/sysemu.h"
  42. #include "hw/hw.h"
  43. #include "hw/pci/msi.h"
  44. #include "hw/pci/msix.h"
  45. #include "hw/qdev-properties.h"
  46. #include "migration/vmstate.h"
  47. #include "e1000_regs.h"
  48. #include "e1000x_common.h"
  49. #include "e1000e_core.h"
  50. #include "trace.h"
  51. #include "qapi/error.h"
  52. #include "qom/object.h"
  53. #define TYPE_E1000E "e1000e"
  54. OBJECT_DECLARE_SIMPLE_TYPE(E1000EState, E1000E)
  55. struct E1000EState {
  56. PCIDevice parent_obj;
  57. NICState *nic;
  58. NICConf conf;
  59. MemoryRegion mmio;
  60. MemoryRegion flash;
  61. MemoryRegion io;
  62. MemoryRegion msix;
  63. uint32_t ioaddr;
  64. uint16_t subsys_ven;
  65. uint16_t subsys;
  66. uint16_t subsys_ven_used;
  67. uint16_t subsys_used;
  68. bool disable_vnet;
  69. E1000ECore core;
  70. };
  71. #define E1000E_MMIO_IDX 0
  72. #define E1000E_FLASH_IDX 1
  73. #define E1000E_IO_IDX 2
  74. #define E1000E_MSIX_IDX 3
  75. #define E1000E_MMIO_SIZE (128 * KiB)
  76. #define E1000E_FLASH_SIZE (128 * KiB)
  77. #define E1000E_IO_SIZE (32)
  78. #define E1000E_MSIX_SIZE (16 * KiB)
  79. #define E1000E_MSIX_TABLE (0x0000)
  80. #define E1000E_MSIX_PBA (0x2000)
  81. static uint64_t
  82. e1000e_mmio_read(void *opaque, hwaddr addr, unsigned size)
  83. {
  84. E1000EState *s = opaque;
  85. return e1000e_core_read(&s->core, addr, size);
  86. }
  87. static void
  88. e1000e_mmio_write(void *opaque, hwaddr addr,
  89. uint64_t val, unsigned size)
  90. {
  91. E1000EState *s = opaque;
  92. e1000e_core_write(&s->core, addr, val, size);
  93. }
  94. static bool
  95. e1000e_io_get_reg_index(E1000EState *s, uint32_t *idx)
  96. {
  97. if (s->ioaddr < 0x1FFFF) {
  98. *idx = s->ioaddr;
  99. return true;
  100. }
  101. if (s->ioaddr < 0x7FFFF) {
  102. trace_e1000e_wrn_io_addr_undefined(s->ioaddr);
  103. return false;
  104. }
  105. if (s->ioaddr < 0xFFFFF) {
  106. trace_e1000e_wrn_io_addr_flash(s->ioaddr);
  107. return false;
  108. }
  109. trace_e1000e_wrn_io_addr_unknown(s->ioaddr);
  110. return false;
  111. }
  112. static uint64_t
  113. e1000e_io_read(void *opaque, hwaddr addr, unsigned size)
  114. {
  115. E1000EState *s = opaque;
  116. uint32_t idx = 0;
  117. uint64_t val;
  118. switch (addr) {
  119. case E1000_IOADDR:
  120. trace_e1000e_io_read_addr(s->ioaddr);
  121. return s->ioaddr;
  122. case E1000_IODATA:
  123. if (e1000e_io_get_reg_index(s, &idx)) {
  124. val = e1000e_core_read(&s->core, idx, sizeof(val));
  125. trace_e1000e_io_read_data(idx, val);
  126. return val;
  127. }
  128. return 0;
  129. default:
  130. trace_e1000e_wrn_io_read_unknown(addr);
  131. return 0;
  132. }
  133. }
  134. static void
  135. e1000e_io_write(void *opaque, hwaddr addr,
  136. uint64_t val, unsigned size)
  137. {
  138. E1000EState *s = opaque;
  139. uint32_t idx = 0;
  140. switch (addr) {
  141. case E1000_IOADDR:
  142. trace_e1000e_io_write_addr(val);
  143. s->ioaddr = (uint32_t) val;
  144. return;
  145. case E1000_IODATA:
  146. if (e1000e_io_get_reg_index(s, &idx)) {
  147. trace_e1000e_io_write_data(idx, val);
  148. e1000e_core_write(&s->core, idx, val, sizeof(val));
  149. }
  150. return;
  151. default:
  152. trace_e1000e_wrn_io_write_unknown(addr);
  153. return;
  154. }
  155. }
  156. static const MemoryRegionOps mmio_ops = {
  157. .read = e1000e_mmio_read,
  158. .write = e1000e_mmio_write,
  159. .endianness = DEVICE_LITTLE_ENDIAN,
  160. .impl = {
  161. .min_access_size = 4,
  162. .max_access_size = 4,
  163. },
  164. };
  165. static const MemoryRegionOps io_ops = {
  166. .read = e1000e_io_read,
  167. .write = e1000e_io_write,
  168. .endianness = DEVICE_LITTLE_ENDIAN,
  169. .impl = {
  170. .min_access_size = 4,
  171. .max_access_size = 4,
  172. },
  173. };
  174. static bool
  175. e1000e_nc_can_receive(NetClientState *nc)
  176. {
  177. E1000EState *s = qemu_get_nic_opaque(nc);
  178. return e1000e_can_receive(&s->core);
  179. }
  180. static ssize_t
  181. e1000e_nc_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
  182. {
  183. E1000EState *s = qemu_get_nic_opaque(nc);
  184. return e1000e_receive_iov(&s->core, iov, iovcnt);
  185. }
  186. static ssize_t
  187. e1000e_nc_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  188. {
  189. E1000EState *s = qemu_get_nic_opaque(nc);
  190. return e1000e_receive(&s->core, buf, size);
  191. }
  192. static void
  193. e1000e_set_link_status(NetClientState *nc)
  194. {
  195. E1000EState *s = qemu_get_nic_opaque(nc);
  196. e1000e_core_set_link_status(&s->core);
  197. }
  198. static NetClientInfo net_e1000e_info = {
  199. .type = NET_CLIENT_DRIVER_NIC,
  200. .size = sizeof(NICState),
  201. .can_receive = e1000e_nc_can_receive,
  202. .receive = e1000e_nc_receive,
  203. .receive_iov = e1000e_nc_receive_iov,
  204. .link_status_changed = e1000e_set_link_status,
  205. };
  206. /*
  207. * EEPROM (NVM) contents documented in Table 36, section 6.1
  208. * and generally 6.1.2 Software accessed words.
  209. */
  210. static const uint16_t e1000e_eeprom_template[64] = {
  211. /* Address | Compat. | ImVer | Compat. */
  212. 0x0000, 0x0000, 0x0000, 0x0420, 0xf746, 0x2010, 0xffff, 0xffff,
  213. /* PBA |ICtrl1 | SSID | SVID | DevID |-------|ICtrl2 */
  214. 0x0000, 0x0000, 0x026b, 0x0000, 0x8086, 0x0000, 0x0000, 0x8058,
  215. /* NVM words 1,2,3 |-------------------------------|PCI-EID*/
  216. 0x0000, 0x2001, 0x7e7c, 0xffff, 0x1000, 0x00c8, 0x0000, 0x2704,
  217. /* PCIe Init. Conf 1,2,3 |PCICtrl|PHY|LD1|-------| RevID | LD0,2 */
  218. 0x6cc9, 0x3150, 0x070e, 0x460b, 0x2d84, 0x0100, 0xf000, 0x0706,
  219. /* FLPAR |FLANADD|LAN-PWR|FlVndr |ICtrl3 |APTSMBA|APTRxEP|APTSMBC*/
  220. 0x6000, 0x0080, 0x0f04, 0x7fff, 0x4f01, 0xc600, 0x0000, 0x20ff,
  221. /* APTIF | APTMC |APTuCP |LSWFWID|MSWFWID|NC-SIMC|NC-SIC | VPDP */
  222. 0x0028, 0x0003, 0x0000, 0x0000, 0x0000, 0x0003, 0x0000, 0xffff,
  223. /* SW Section */
  224. 0x0100, 0xc000, 0x121c, 0xc007, 0xffff, 0xffff, 0xffff, 0xffff,
  225. /* SW Section |CHKSUM */
  226. 0xffff, 0xffff, 0xffff, 0xffff, 0x0000, 0x0120, 0xffff, 0x0000,
  227. };
  228. static void e1000e_core_realize(E1000EState *s)
  229. {
  230. s->core.owner = &s->parent_obj;
  231. s->core.owner_nic = s->nic;
  232. }
  233. static void
  234. e1000e_unuse_msix_vectors(E1000EState *s, int num_vectors)
  235. {
  236. int i;
  237. for (i = 0; i < num_vectors; i++) {
  238. msix_vector_unuse(PCI_DEVICE(s), i);
  239. }
  240. }
  241. static bool
  242. e1000e_use_msix_vectors(E1000EState *s, int num_vectors)
  243. {
  244. int i;
  245. for (i = 0; i < num_vectors; i++) {
  246. int res = msix_vector_use(PCI_DEVICE(s), i);
  247. if (res < 0) {
  248. trace_e1000e_msix_use_vector_fail(i, res);
  249. e1000e_unuse_msix_vectors(s, i);
  250. return false;
  251. }
  252. }
  253. return true;
  254. }
  255. static void
  256. e1000e_init_msix(E1000EState *s)
  257. {
  258. PCIDevice *d = PCI_DEVICE(s);
  259. int res = msix_init(PCI_DEVICE(s), E1000E_MSIX_VEC_NUM,
  260. &s->msix,
  261. E1000E_MSIX_IDX, E1000E_MSIX_TABLE,
  262. &s->msix,
  263. E1000E_MSIX_IDX, E1000E_MSIX_PBA,
  264. 0xA0, NULL);
  265. if (res < 0) {
  266. trace_e1000e_msix_init_fail(res);
  267. } else {
  268. if (!e1000e_use_msix_vectors(s, E1000E_MSIX_VEC_NUM)) {
  269. msix_uninit(d, &s->msix, &s->msix);
  270. }
  271. }
  272. }
  273. static void
  274. e1000e_cleanup_msix(E1000EState *s)
  275. {
  276. if (msix_present(PCI_DEVICE(s))) {
  277. e1000e_unuse_msix_vectors(s, E1000E_MSIX_VEC_NUM);
  278. msix_uninit(PCI_DEVICE(s), &s->msix, &s->msix);
  279. }
  280. }
  281. static void
  282. e1000e_init_net_peer(E1000EState *s, PCIDevice *pci_dev, uint8_t *macaddr)
  283. {
  284. DeviceState *dev = DEVICE(pci_dev);
  285. NetClientState *nc;
  286. int i;
  287. s->nic = qemu_new_nic(&net_e1000e_info, &s->conf,
  288. object_get_typename(OBJECT(s)), dev->id, s);
  289. s->core.max_queue_num = s->conf.peers.queues ? s->conf.peers.queues - 1 : 0;
  290. trace_e1000e_mac_set_permanent(MAC_ARG(macaddr));
  291. memcpy(s->core.permanent_mac, macaddr, sizeof(s->core.permanent_mac));
  292. qemu_format_nic_info_str(qemu_get_queue(s->nic), macaddr);
  293. /* Setup virtio headers */
  294. if (s->disable_vnet) {
  295. s->core.has_vnet = false;
  296. trace_e1000e_cfg_support_virtio(false);
  297. return;
  298. } else {
  299. s->core.has_vnet = true;
  300. }
  301. for (i = 0; i < s->conf.peers.queues; i++) {
  302. nc = qemu_get_subqueue(s->nic, i);
  303. if (!nc->peer || !qemu_has_vnet_hdr(nc->peer)) {
  304. s->core.has_vnet = false;
  305. trace_e1000e_cfg_support_virtio(false);
  306. return;
  307. }
  308. }
  309. trace_e1000e_cfg_support_virtio(true);
  310. for (i = 0; i < s->conf.peers.queues; i++) {
  311. nc = qemu_get_subqueue(s->nic, i);
  312. qemu_set_vnet_hdr_len(nc->peer, sizeof(struct virtio_net_hdr));
  313. qemu_using_vnet_hdr(nc->peer, true);
  314. }
  315. }
  316. static inline uint64_t
  317. e1000e_gen_dsn(uint8_t *mac)
  318. {
  319. return (uint64_t)(mac[5]) |
  320. (uint64_t)(mac[4]) << 8 |
  321. (uint64_t)(mac[3]) << 16 |
  322. (uint64_t)(0x00FF) << 24 |
  323. (uint64_t)(0x00FF) << 32 |
  324. (uint64_t)(mac[2]) << 40 |
  325. (uint64_t)(mac[1]) << 48 |
  326. (uint64_t)(mac[0]) << 56;
  327. }
  328. static int
  329. e1000e_add_pm_capability(PCIDevice *pdev, uint8_t offset, uint16_t pmc)
  330. {
  331. Error *local_err = NULL;
  332. int ret = pci_add_capability(pdev, PCI_CAP_ID_PM, offset,
  333. PCI_PM_SIZEOF, &local_err);
  334. if (local_err) {
  335. error_report_err(local_err);
  336. return ret;
  337. }
  338. pci_set_word(pdev->config + offset + PCI_PM_PMC,
  339. PCI_PM_CAP_VER_1_1 |
  340. pmc);
  341. pci_set_word(pdev->wmask + offset + PCI_PM_CTRL,
  342. PCI_PM_CTRL_STATE_MASK |
  343. PCI_PM_CTRL_PME_ENABLE |
  344. PCI_PM_CTRL_DATA_SEL_MASK);
  345. pci_set_word(pdev->w1cmask + offset + PCI_PM_CTRL,
  346. PCI_PM_CTRL_PME_STATUS);
  347. return ret;
  348. }
  349. static void e1000e_write_config(PCIDevice *pci_dev, uint32_t address,
  350. uint32_t val, int len)
  351. {
  352. E1000EState *s = E1000E(pci_dev);
  353. pci_default_write_config(pci_dev, address, val, len);
  354. if (range_covers_byte(address, len, PCI_COMMAND) &&
  355. (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
  356. e1000e_start_recv(&s->core);
  357. }
  358. }
  359. static void e1000e_pci_realize(PCIDevice *pci_dev, Error **errp)
  360. {
  361. static const uint16_t e1000e_pmrb_offset = 0x0C8;
  362. static const uint16_t e1000e_pcie_offset = 0x0E0;
  363. static const uint16_t e1000e_aer_offset = 0x100;
  364. static const uint16_t e1000e_dsn_offset = 0x140;
  365. E1000EState *s = E1000E(pci_dev);
  366. uint8_t *macaddr;
  367. int ret;
  368. trace_e1000e_cb_pci_realize();
  369. pci_dev->config_write = e1000e_write_config;
  370. pci_dev->config[PCI_CACHE_LINE_SIZE] = 0x10;
  371. pci_dev->config[PCI_INTERRUPT_PIN] = 1;
  372. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, s->subsys_ven);
  373. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, s->subsys);
  374. s->subsys_ven_used = s->subsys_ven;
  375. s->subsys_used = s->subsys;
  376. /* Define IO/MMIO regions */
  377. memory_region_init_io(&s->mmio, OBJECT(s), &mmio_ops, s,
  378. "e1000e-mmio", E1000E_MMIO_SIZE);
  379. pci_register_bar(pci_dev, E1000E_MMIO_IDX,
  380. PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio);
  381. /*
  382. * We provide a dummy implementation for the flash BAR
  383. * for drivers that may theoretically probe for its presence.
  384. */
  385. memory_region_init(&s->flash, OBJECT(s),
  386. "e1000e-flash", E1000E_FLASH_SIZE);
  387. pci_register_bar(pci_dev, E1000E_FLASH_IDX,
  388. PCI_BASE_ADDRESS_SPACE_MEMORY, &s->flash);
  389. memory_region_init_io(&s->io, OBJECT(s), &io_ops, s,
  390. "e1000e-io", E1000E_IO_SIZE);
  391. pci_register_bar(pci_dev, E1000E_IO_IDX,
  392. PCI_BASE_ADDRESS_SPACE_IO, &s->io);
  393. memory_region_init(&s->msix, OBJECT(s), "e1000e-msix",
  394. E1000E_MSIX_SIZE);
  395. pci_register_bar(pci_dev, E1000E_MSIX_IDX,
  396. PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix);
  397. /* Create networking backend */
  398. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  399. macaddr = s->conf.macaddr.a;
  400. e1000e_init_msix(s);
  401. if (pcie_endpoint_cap_v1_init(pci_dev, e1000e_pcie_offset) < 0) {
  402. hw_error("Failed to initialize PCIe capability");
  403. }
  404. ret = msi_init(PCI_DEVICE(s), 0xD0, 1, true, false, NULL);
  405. if (ret) {
  406. trace_e1000e_msi_init_fail(ret);
  407. }
  408. if (e1000e_add_pm_capability(pci_dev, e1000e_pmrb_offset,
  409. PCI_PM_CAP_DSI) < 0) {
  410. hw_error("Failed to initialize PM capability");
  411. }
  412. if (pcie_aer_init(pci_dev, PCI_ERR_VER, e1000e_aer_offset,
  413. PCI_ERR_SIZEOF, NULL) < 0) {
  414. hw_error("Failed to initialize AER capability");
  415. }
  416. pcie_dev_ser_num_init(pci_dev, e1000e_dsn_offset,
  417. e1000e_gen_dsn(macaddr));
  418. e1000e_init_net_peer(s, pci_dev, macaddr);
  419. /* Initialize core */
  420. e1000e_core_realize(s);
  421. e1000e_core_pci_realize(&s->core,
  422. e1000e_eeprom_template,
  423. sizeof(e1000e_eeprom_template),
  424. macaddr);
  425. }
  426. static void e1000e_pci_uninit(PCIDevice *pci_dev)
  427. {
  428. E1000EState *s = E1000E(pci_dev);
  429. trace_e1000e_cb_pci_uninit();
  430. e1000e_core_pci_uninit(&s->core);
  431. pcie_aer_exit(pci_dev);
  432. pcie_cap_exit(pci_dev);
  433. qemu_del_nic(s->nic);
  434. e1000e_cleanup_msix(s);
  435. msi_uninit(pci_dev);
  436. }
  437. static void e1000e_qdev_reset(DeviceState *dev)
  438. {
  439. E1000EState *s = E1000E(dev);
  440. trace_e1000e_cb_qdev_reset();
  441. e1000e_core_reset(&s->core);
  442. }
  443. static int e1000e_pre_save(void *opaque)
  444. {
  445. E1000EState *s = opaque;
  446. trace_e1000e_cb_pre_save();
  447. e1000e_core_pre_save(&s->core);
  448. return 0;
  449. }
  450. static int e1000e_post_load(void *opaque, int version_id)
  451. {
  452. E1000EState *s = opaque;
  453. trace_e1000e_cb_post_load();
  454. if ((s->subsys != s->subsys_used) ||
  455. (s->subsys_ven != s->subsys_ven_used)) {
  456. fprintf(stderr,
  457. "ERROR: Cannot migrate while device properties "
  458. "(subsys/subsys_ven) differ");
  459. return -1;
  460. }
  461. return e1000e_core_post_load(&s->core);
  462. }
  463. static const VMStateDescription e1000e_vmstate_tx = {
  464. .name = "e1000e-tx",
  465. .version_id = 1,
  466. .minimum_version_id = 1,
  467. .fields = (VMStateField[]) {
  468. VMSTATE_UINT8(sum_needed, struct e1000e_tx),
  469. VMSTATE_UINT8(props.ipcss, struct e1000e_tx),
  470. VMSTATE_UINT8(props.ipcso, struct e1000e_tx),
  471. VMSTATE_UINT16(props.ipcse, struct e1000e_tx),
  472. VMSTATE_UINT8(props.tucss, struct e1000e_tx),
  473. VMSTATE_UINT8(props.tucso, struct e1000e_tx),
  474. VMSTATE_UINT16(props.tucse, struct e1000e_tx),
  475. VMSTATE_UINT8(props.hdr_len, struct e1000e_tx),
  476. VMSTATE_UINT16(props.mss, struct e1000e_tx),
  477. VMSTATE_UINT32(props.paylen, struct e1000e_tx),
  478. VMSTATE_INT8(props.ip, struct e1000e_tx),
  479. VMSTATE_INT8(props.tcp, struct e1000e_tx),
  480. VMSTATE_BOOL(props.tse, struct e1000e_tx),
  481. VMSTATE_BOOL(cptse, struct e1000e_tx),
  482. VMSTATE_BOOL(skip_cp, struct e1000e_tx),
  483. VMSTATE_END_OF_LIST()
  484. }
  485. };
  486. static const VMStateDescription e1000e_vmstate_intr_timer = {
  487. .name = "e1000e-intr-timer",
  488. .version_id = 1,
  489. .minimum_version_id = 1,
  490. .fields = (VMStateField[]) {
  491. VMSTATE_TIMER_PTR(timer, E1000IntrDelayTimer),
  492. VMSTATE_BOOL(running, E1000IntrDelayTimer),
  493. VMSTATE_END_OF_LIST()
  494. }
  495. };
  496. #define VMSTATE_E1000E_INTR_DELAY_TIMER(_f, _s) \
  497. VMSTATE_STRUCT(_f, _s, 0, \
  498. e1000e_vmstate_intr_timer, E1000IntrDelayTimer)
  499. #define VMSTATE_E1000E_INTR_DELAY_TIMER_ARRAY(_f, _s, _num) \
  500. VMSTATE_STRUCT_ARRAY(_f, _s, _num, 0, \
  501. e1000e_vmstate_intr_timer, E1000IntrDelayTimer)
  502. static const VMStateDescription e1000e_vmstate = {
  503. .name = "e1000e",
  504. .version_id = 1,
  505. .minimum_version_id = 1,
  506. .pre_save = e1000e_pre_save,
  507. .post_load = e1000e_post_load,
  508. .fields = (VMStateField[]) {
  509. VMSTATE_PCI_DEVICE(parent_obj, E1000EState),
  510. VMSTATE_MSIX(parent_obj, E1000EState),
  511. VMSTATE_UINT32(ioaddr, E1000EState),
  512. VMSTATE_UINT32(core.rxbuf_min_shift, E1000EState),
  513. VMSTATE_UINT8(core.rx_desc_len, E1000EState),
  514. VMSTATE_UINT32_ARRAY(core.rxbuf_sizes, E1000EState,
  515. E1000_PSRCTL_BUFFS_PER_DESC),
  516. VMSTATE_UINT32(core.rx_desc_buf_size, E1000EState),
  517. VMSTATE_UINT16_ARRAY(core.eeprom, E1000EState, E1000E_EEPROM_SIZE),
  518. VMSTATE_UINT16_2DARRAY(core.phy, E1000EState,
  519. E1000E_PHY_PAGES, E1000E_PHY_PAGE_SIZE),
  520. VMSTATE_UINT32_ARRAY(core.mac, E1000EState, E1000E_MAC_SIZE),
  521. VMSTATE_UINT8_ARRAY(core.permanent_mac, E1000EState, ETH_ALEN),
  522. VMSTATE_UINT32(core.delayed_causes, E1000EState),
  523. VMSTATE_UINT16(subsys, E1000EState),
  524. VMSTATE_UINT16(subsys_ven, E1000EState),
  525. VMSTATE_E1000E_INTR_DELAY_TIMER(core.rdtr, E1000EState),
  526. VMSTATE_E1000E_INTR_DELAY_TIMER(core.radv, E1000EState),
  527. VMSTATE_E1000E_INTR_DELAY_TIMER(core.raid, E1000EState),
  528. VMSTATE_E1000E_INTR_DELAY_TIMER(core.tadv, E1000EState),
  529. VMSTATE_E1000E_INTR_DELAY_TIMER(core.tidv, E1000EState),
  530. VMSTATE_E1000E_INTR_DELAY_TIMER(core.itr, E1000EState),
  531. VMSTATE_BOOL(core.itr_intr_pending, E1000EState),
  532. VMSTATE_E1000E_INTR_DELAY_TIMER_ARRAY(core.eitr, E1000EState,
  533. E1000E_MSIX_VEC_NUM),
  534. VMSTATE_BOOL_ARRAY(core.eitr_intr_pending, E1000EState,
  535. E1000E_MSIX_VEC_NUM),
  536. VMSTATE_UINT32(core.itr_guest_value, E1000EState),
  537. VMSTATE_UINT32_ARRAY(core.eitr_guest_value, E1000EState,
  538. E1000E_MSIX_VEC_NUM),
  539. VMSTATE_UINT16(core.vet, E1000EState),
  540. VMSTATE_STRUCT_ARRAY(core.tx, E1000EState, E1000E_NUM_QUEUES, 0,
  541. e1000e_vmstate_tx, struct e1000e_tx),
  542. VMSTATE_END_OF_LIST()
  543. }
  544. };
  545. static PropertyInfo e1000e_prop_disable_vnet,
  546. e1000e_prop_subsys_ven,
  547. e1000e_prop_subsys;
  548. static Property e1000e_properties[] = {
  549. DEFINE_NIC_PROPERTIES(E1000EState, conf),
  550. DEFINE_PROP_SIGNED("disable_vnet_hdr", E1000EState, disable_vnet, false,
  551. e1000e_prop_disable_vnet, bool),
  552. DEFINE_PROP_SIGNED("subsys_ven", E1000EState, subsys_ven,
  553. PCI_VENDOR_ID_INTEL,
  554. e1000e_prop_subsys_ven, uint16_t),
  555. DEFINE_PROP_SIGNED("subsys", E1000EState, subsys, 0,
  556. e1000e_prop_subsys, uint16_t),
  557. DEFINE_PROP_END_OF_LIST(),
  558. };
  559. static void e1000e_class_init(ObjectClass *class, void *data)
  560. {
  561. DeviceClass *dc = DEVICE_CLASS(class);
  562. PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
  563. c->realize = e1000e_pci_realize;
  564. c->exit = e1000e_pci_uninit;
  565. c->vendor_id = PCI_VENDOR_ID_INTEL;
  566. c->device_id = E1000_DEV_ID_82574L;
  567. c->revision = 0;
  568. c->romfile = "efi-e1000e.rom";
  569. c->class_id = PCI_CLASS_NETWORK_ETHERNET;
  570. dc->desc = "Intel 82574L GbE Controller";
  571. dc->reset = e1000e_qdev_reset;
  572. dc->vmsd = &e1000e_vmstate;
  573. e1000e_prop_disable_vnet = qdev_prop_uint8;
  574. e1000e_prop_disable_vnet.description = "Do not use virtio headers, "
  575. "perform SW offloads emulation "
  576. "instead";
  577. e1000e_prop_subsys_ven = qdev_prop_uint16;
  578. e1000e_prop_subsys_ven.description = "PCI device Subsystem Vendor ID";
  579. e1000e_prop_subsys = qdev_prop_uint16;
  580. e1000e_prop_subsys.description = "PCI device Subsystem ID";
  581. device_class_set_props(dc, e1000e_properties);
  582. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  583. }
  584. static void e1000e_instance_init(Object *obj)
  585. {
  586. E1000EState *s = E1000E(obj);
  587. device_add_bootindex_property(obj, &s->conf.bootindex,
  588. "bootindex", "/ethernet-phy@0",
  589. DEVICE(obj));
  590. }
  591. static const TypeInfo e1000e_info = {
  592. .name = TYPE_E1000E,
  593. .parent = TYPE_PCI_DEVICE,
  594. .instance_size = sizeof(E1000EState),
  595. .class_init = e1000e_class_init,
  596. .instance_init = e1000e_instance_init,
  597. .interfaces = (InterfaceInfo[]) {
  598. { INTERFACE_PCIE_DEVICE },
  599. { }
  600. },
  601. };
  602. static void e1000e_register_types(void)
  603. {
  604. type_register_static(&e1000e_info);
  605. }
  606. type_init(e1000e_register_types)