2
0

e1000.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780
  1. /*
  2. * QEMU e1000 emulation
  3. *
  4. * Software developer's manual:
  5. * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
  6. *
  7. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  8. * Copyright (c) 2008 Qumranet
  9. * Based on work done by:
  10. * Copyright (c) 2007 Dan Aloni
  11. * Copyright (c) 2004 Antony T Curtis
  12. *
  13. * This library is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2.1 of the License, or (at your option) any later version.
  17. *
  18. * This library is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  25. */
  26. #include "qemu/osdep.h"
  27. #include "hw/net/mii.h"
  28. #include "hw/pci/pci_device.h"
  29. #include "hw/qdev-properties.h"
  30. #include "migration/vmstate.h"
  31. #include "net/eth.h"
  32. #include "net/net.h"
  33. #include "net/checksum.h"
  34. #include "system/system.h"
  35. #include "system/dma.h"
  36. #include "qemu/iov.h"
  37. #include "qemu/module.h"
  38. #include "qemu/range.h"
  39. #include "e1000_common.h"
  40. #include "e1000x_common.h"
  41. #include "trace.h"
  42. #include "qom/object.h"
  43. /* #define E1000_DEBUG */
  44. #ifdef E1000_DEBUG
  45. enum {
  46. DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
  47. DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
  48. DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
  49. DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
  50. };
  51. #define DBGBIT(x) (1<<DEBUG_##x)
  52. static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
  53. #define DBGOUT(what, fmt, ...) do { \
  54. if (debugflags & DBGBIT(what)) \
  55. fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
  56. } while (0)
  57. #else
  58. #define DBGOUT(what, fmt, ...) do {} while (0)
  59. #endif
  60. #define IOPORT_SIZE 0x40
  61. #define PNPMMIO_SIZE 0x20000
  62. #define MAXIMUM_ETHERNET_HDR_LEN (ETH_HLEN + 4)
  63. /*
  64. * HW models:
  65. * E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
  66. * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
  67. * E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
  68. * Others never tested
  69. */
  70. struct E1000State_st {
  71. /*< private >*/
  72. PCIDevice parent_obj;
  73. /*< public >*/
  74. NICState *nic;
  75. NICConf conf;
  76. MemoryRegion mmio;
  77. MemoryRegion io;
  78. uint32_t mac_reg[0x8000];
  79. uint16_t phy_reg[0x20];
  80. uint16_t eeprom_data[64];
  81. uint32_t rxbuf_size;
  82. uint32_t rxbuf_min_shift;
  83. struct e1000_tx {
  84. unsigned char header[256];
  85. unsigned char vlan_header[4];
  86. /* Fields vlan and data must not be reordered or separated. */
  87. unsigned char vlan[4];
  88. unsigned char data[0x10000];
  89. uint16_t size;
  90. unsigned char vlan_needed;
  91. unsigned char sum_needed;
  92. bool cptse;
  93. e1000x_txd_props props;
  94. e1000x_txd_props tso_props;
  95. uint16_t tso_frames;
  96. bool busy;
  97. } tx;
  98. struct {
  99. uint32_t val_in; /* shifted in from guest driver */
  100. uint16_t bitnum_in;
  101. uint16_t bitnum_out;
  102. uint16_t reading;
  103. uint32_t old_eecd;
  104. } eecd_state;
  105. QEMUTimer *autoneg_timer;
  106. QEMUTimer *mit_timer; /* Mitigation timer. */
  107. bool mit_timer_on; /* Mitigation timer is running. */
  108. bool mit_irq_level; /* Tracks interrupt pin level. */
  109. uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */
  110. QEMUTimer *flush_queue_timer;
  111. /* Compatibility flags for migration to/from qemu 1.3.0 and older */
  112. #define E1000_FLAG_MAC_BIT 2
  113. #define E1000_FLAG_TSO_BIT 3
  114. #define E1000_FLAG_VET_BIT 4
  115. #define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
  116. #define E1000_FLAG_TSO (1 << E1000_FLAG_TSO_BIT)
  117. #define E1000_FLAG_VET (1 << E1000_FLAG_VET_BIT)
  118. uint32_t compat_flags;
  119. bool received_tx_tso;
  120. bool use_tso_for_migration;
  121. e1000x_txd_props mig_props;
  122. };
  123. typedef struct E1000State_st E1000State;
  124. #define chkflag(x) (s->compat_flags & E1000_FLAG_##x)
  125. struct E1000BaseClass {
  126. PCIDeviceClass parent_class;
  127. uint16_t phy_id2;
  128. };
  129. typedef struct E1000BaseClass E1000BaseClass;
  130. #define TYPE_E1000_BASE "e1000-base"
  131. DECLARE_OBJ_CHECKERS(E1000State, E1000BaseClass,
  132. E1000, TYPE_E1000_BASE)
  133. static void
  134. e1000_link_up(E1000State *s)
  135. {
  136. e1000x_update_regs_on_link_up(s->mac_reg, s->phy_reg);
  137. /* E1000_STATUS_LU is tested by e1000_can_receive() */
  138. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  139. }
  140. static void
  141. e1000_autoneg_done(E1000State *s)
  142. {
  143. e1000x_update_regs_on_autoneg_done(s->mac_reg, s->phy_reg);
  144. /* E1000_STATUS_LU is tested by e1000_can_receive() */
  145. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  146. }
  147. static bool
  148. have_autoneg(E1000State *s)
  149. {
  150. return (s->phy_reg[MII_BMCR] & MII_BMCR_AUTOEN);
  151. }
  152. static void
  153. set_phy_ctrl(E1000State *s, int index, uint16_t val)
  154. {
  155. /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
  156. s->phy_reg[MII_BMCR] = val & ~(0x3f |
  157. MII_BMCR_RESET |
  158. MII_BMCR_ANRESTART);
  159. /*
  160. * QEMU 1.3 does not support link auto-negotiation emulation, so if we
  161. * migrate during auto negotiation, after migration the link will be
  162. * down.
  163. */
  164. if (have_autoneg(s) && (val & MII_BMCR_ANRESTART)) {
  165. e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
  166. }
  167. }
  168. static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
  169. [MII_BMCR] = set_phy_ctrl,
  170. };
  171. enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
  172. enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
  173. static const char phy_regcap[0x20] = {
  174. [MII_BMSR] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
  175. [MII_PHYID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
  176. [MII_BMCR] = PHY_RW, [MII_CTRL1000] = PHY_RW,
  177. [MII_ANLPAR] = PHY_R, [MII_STAT1000] = PHY_R,
  178. [MII_ANAR] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
  179. [MII_PHYID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
  180. [MII_ANER] = PHY_R,
  181. };
  182. /* MII_PHYID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
  183. static const uint16_t phy_reg_init[] = {
  184. [MII_BMCR] = MII_BMCR_SPEED1000 |
  185. MII_BMCR_FD |
  186. MII_BMCR_AUTOEN,
  187. [MII_BMSR] = MII_BMSR_EXTCAP |
  188. MII_BMSR_LINK_ST | /* link initially up */
  189. MII_BMSR_AUTONEG |
  190. /* MII_BMSR_AN_COMP: initially NOT completed */
  191. MII_BMSR_MFPS |
  192. MII_BMSR_EXTSTAT |
  193. MII_BMSR_10T_HD |
  194. MII_BMSR_10T_FD |
  195. MII_BMSR_100TX_HD |
  196. MII_BMSR_100TX_FD,
  197. [MII_PHYID1] = 0x141,
  198. /* [MII_PHYID2] configured per DevId, from e1000_reset() */
  199. [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 |
  200. MII_ANAR_10FD | MII_ANAR_TX |
  201. MII_ANAR_TXFD | MII_ANAR_PAUSE |
  202. MII_ANAR_PAUSE_ASYM,
  203. [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD |
  204. MII_ANLPAR_TX | MII_ANLPAR_TXFD,
  205. [MII_CTRL1000] = MII_CTRL1000_FULL | MII_CTRL1000_PORT |
  206. MII_CTRL1000_MASTER,
  207. [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL |
  208. MII_STAT1000_ROK | MII_STAT1000_LOK,
  209. [M88E1000_PHY_SPEC_CTRL] = 0x360,
  210. [M88E1000_PHY_SPEC_STATUS] = 0xac00,
  211. [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
  212. };
  213. static const uint32_t mac_reg_init[] = {
  214. [PBA] = 0x00100030,
  215. [LEDCTL] = 0x602,
  216. [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
  217. E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
  218. [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
  219. E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
  220. E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
  221. E1000_STATUS_LU,
  222. [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
  223. E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
  224. E1000_MANC_RMCP_EN,
  225. };
  226. /* Helper function, *curr == 0 means the value is not set */
  227. static inline void
  228. mit_update_delay(uint32_t *curr, uint32_t value)
  229. {
  230. if (value && (*curr == 0 || value < *curr)) {
  231. *curr = value;
  232. }
  233. }
  234. static void
  235. set_interrupt_cause(E1000State *s, int index, uint32_t val)
  236. {
  237. PCIDevice *d = PCI_DEVICE(s);
  238. uint32_t pending_ints;
  239. uint32_t mit_delay;
  240. s->mac_reg[ICR] = val;
  241. /*
  242. * Make sure ICR and ICS registers have the same value.
  243. * The spec says that the ICS register is write-only. However in practice,
  244. * on real hardware ICS is readable, and for reads it has the same value as
  245. * ICR (except that ICS does not have the clear on read behaviour of ICR).
  246. *
  247. * The VxWorks PRO/1000 driver uses this behaviour.
  248. */
  249. s->mac_reg[ICS] = val;
  250. pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
  251. if (!s->mit_irq_level && pending_ints) {
  252. /*
  253. * Here we detect a potential raising edge. We postpone raising the
  254. * interrupt line if we are inside the mitigation delay window
  255. * (s->mit_timer_on == 1).
  256. * We provide a partial implementation of interrupt mitigation,
  257. * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
  258. * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
  259. * RADV; relative timers based on TIDV and RDTR are not implemented.
  260. */
  261. if (s->mit_timer_on) {
  262. return;
  263. }
  264. /* Compute the next mitigation delay according to pending
  265. * interrupts and the current values of RADV (provided
  266. * RDTR!=0), TADV and ITR.
  267. * Then rearm the timer.
  268. */
  269. mit_delay = 0;
  270. if (s->mit_ide &&
  271. (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
  272. mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
  273. }
  274. if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
  275. mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
  276. }
  277. mit_update_delay(&mit_delay, s->mac_reg[ITR]);
  278. /*
  279. * According to e1000 SPEC, the Ethernet controller guarantees
  280. * a maximum observable interrupt rate of 7813 interrupts/sec.
  281. * Thus if mit_delay < 500 then the delay should be set to the
  282. * minimum delay possible which is 500.
  283. */
  284. mit_delay = (mit_delay < 500) ? 500 : mit_delay;
  285. s->mit_timer_on = 1;
  286. timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
  287. mit_delay * 256);
  288. s->mit_ide = 0;
  289. }
  290. s->mit_irq_level = (pending_ints != 0);
  291. pci_set_irq(d, s->mit_irq_level);
  292. }
  293. static void
  294. e1000_mit_timer(void *opaque)
  295. {
  296. E1000State *s = opaque;
  297. s->mit_timer_on = 0;
  298. /* Call set_interrupt_cause to update the irq level (if necessary). */
  299. set_interrupt_cause(s, 0, s->mac_reg[ICR]);
  300. }
  301. static void
  302. set_ics(E1000State *s, int index, uint32_t val)
  303. {
  304. DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
  305. s->mac_reg[IMS]);
  306. set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
  307. }
  308. static void
  309. e1000_autoneg_timer(void *opaque)
  310. {
  311. E1000State *s = opaque;
  312. if (!qemu_get_queue(s->nic)->link_down) {
  313. e1000_autoneg_done(s);
  314. set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
  315. }
  316. }
  317. static bool e1000_vet_init_need(void *opaque)
  318. {
  319. E1000State *s = opaque;
  320. return chkflag(VET);
  321. }
  322. static void e1000_reset_hold(Object *obj, ResetType type)
  323. {
  324. E1000State *d = E1000(obj);
  325. E1000BaseClass *edc = E1000_GET_CLASS(d);
  326. uint8_t *macaddr = d->conf.macaddr.a;
  327. timer_del(d->autoneg_timer);
  328. timer_del(d->mit_timer);
  329. timer_del(d->flush_queue_timer);
  330. d->mit_timer_on = 0;
  331. d->mit_irq_level = 0;
  332. d->mit_ide = 0;
  333. memset(d->phy_reg, 0, sizeof d->phy_reg);
  334. memcpy(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
  335. d->phy_reg[MII_PHYID2] = edc->phy_id2;
  336. memset(d->mac_reg, 0, sizeof d->mac_reg);
  337. memcpy(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
  338. d->rxbuf_min_shift = 1;
  339. memset(&d->tx, 0, sizeof d->tx);
  340. if (qemu_get_queue(d->nic)->link_down) {
  341. e1000x_update_regs_on_link_down(d->mac_reg, d->phy_reg);
  342. }
  343. e1000x_reset_mac_addr(d->nic, d->mac_reg, macaddr);
  344. if (e1000_vet_init_need(d)) {
  345. d->mac_reg[VET] = ETH_P_VLAN;
  346. }
  347. }
  348. static void
  349. set_ctrl(E1000State *s, int index, uint32_t val)
  350. {
  351. /* RST is self clearing */
  352. s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
  353. }
  354. static void
  355. e1000_flush_queue_timer(void *opaque)
  356. {
  357. E1000State *s = opaque;
  358. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  359. }
  360. static void
  361. set_rx_control(E1000State *s, int index, uint32_t val)
  362. {
  363. s->mac_reg[RCTL] = val;
  364. s->rxbuf_size = e1000x_rxbufsize(val);
  365. s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
  366. DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
  367. s->mac_reg[RCTL]);
  368. timer_mod(s->flush_queue_timer,
  369. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
  370. }
  371. static void
  372. set_mdic(E1000State *s, int index, uint32_t val)
  373. {
  374. uint32_t data = val & E1000_MDIC_DATA_MASK;
  375. uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
  376. if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
  377. val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
  378. else if (val & E1000_MDIC_OP_READ) {
  379. DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
  380. if (!(phy_regcap[addr] & PHY_R)) {
  381. DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
  382. val |= E1000_MDIC_ERROR;
  383. } else
  384. val = (val ^ data) | s->phy_reg[addr];
  385. } else if (val & E1000_MDIC_OP_WRITE) {
  386. DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
  387. if (!(phy_regcap[addr] & PHY_W)) {
  388. DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
  389. val |= E1000_MDIC_ERROR;
  390. } else {
  391. if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
  392. phyreg_writeops[addr](s, index, data);
  393. } else {
  394. s->phy_reg[addr] = data;
  395. }
  396. }
  397. }
  398. s->mac_reg[MDIC] = val | E1000_MDIC_READY;
  399. if (val & E1000_MDIC_INT_EN) {
  400. set_ics(s, 0, E1000_ICR_MDAC);
  401. }
  402. }
  403. static uint32_t
  404. get_eecd(E1000State *s, int index)
  405. {
  406. uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
  407. DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
  408. s->eecd_state.bitnum_out, s->eecd_state.reading);
  409. if (!s->eecd_state.reading ||
  410. ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
  411. ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
  412. ret |= E1000_EECD_DO;
  413. return ret;
  414. }
  415. static void
  416. set_eecd(E1000State *s, int index, uint32_t val)
  417. {
  418. uint32_t oldval = s->eecd_state.old_eecd;
  419. s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
  420. E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
  421. if (!(E1000_EECD_CS & val)) { /* CS inactive; nothing to do */
  422. return;
  423. }
  424. if (E1000_EECD_CS & (val ^ oldval)) { /* CS rise edge; reset state */
  425. s->eecd_state.val_in = 0;
  426. s->eecd_state.bitnum_in = 0;
  427. s->eecd_state.bitnum_out = 0;
  428. s->eecd_state.reading = 0;
  429. }
  430. if (!(E1000_EECD_SK & (val ^ oldval))) { /* no clock edge */
  431. return;
  432. }
  433. if (!(E1000_EECD_SK & val)) { /* falling edge */
  434. s->eecd_state.bitnum_out++;
  435. return;
  436. }
  437. s->eecd_state.val_in <<= 1;
  438. if (val & E1000_EECD_DI)
  439. s->eecd_state.val_in |= 1;
  440. if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
  441. s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
  442. s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
  443. EEPROM_READ_OPCODE_MICROWIRE);
  444. }
  445. DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
  446. s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
  447. s->eecd_state.reading);
  448. }
  449. static uint32_t
  450. flash_eerd_read(E1000State *s, int x)
  451. {
  452. unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
  453. if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
  454. return (s->mac_reg[EERD]);
  455. if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
  456. return (E1000_EEPROM_RW_REG_DONE | r);
  457. return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
  458. E1000_EEPROM_RW_REG_DONE | r);
  459. }
  460. static void
  461. putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
  462. {
  463. uint32_t sum;
  464. if (cse && cse < n)
  465. n = cse + 1;
  466. if (sloc < n-1) {
  467. sum = net_checksum_add(n-css, data+css);
  468. stw_be_p(data + sloc, net_checksum_finish_nozero(sum));
  469. }
  470. }
  471. static inline void
  472. inc_tx_bcast_or_mcast_count(E1000State *s, const unsigned char *arr)
  473. {
  474. if (is_broadcast_ether_addr(arr)) {
  475. e1000x_inc_reg_if_not_full(s->mac_reg, BPTC);
  476. } else if (is_multicast_ether_addr(arr)) {
  477. e1000x_inc_reg_if_not_full(s->mac_reg, MPTC);
  478. }
  479. }
  480. static void
  481. e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
  482. {
  483. static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
  484. PTC1023, PTC1522 };
  485. NetClientState *nc = qemu_get_queue(s->nic);
  486. if (s->phy_reg[MII_BMCR] & MII_BMCR_LOOPBACK) {
  487. qemu_receive_packet(nc, buf, size);
  488. } else {
  489. qemu_send_packet(nc, buf, size);
  490. }
  491. inc_tx_bcast_or_mcast_count(s, buf);
  492. e1000x_increase_size_stats(s->mac_reg, PTCregs, size + 4);
  493. }
  494. static void
  495. xmit_seg(E1000State *s)
  496. {
  497. uint16_t len;
  498. unsigned int frames = s->tx.tso_frames, css, sofar;
  499. struct e1000_tx *tp = &s->tx;
  500. struct e1000x_txd_props *props = tp->cptse ? &tp->tso_props : &tp->props;
  501. if (tp->cptse) {
  502. css = props->ipcss;
  503. DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
  504. frames, tp->size, css);
  505. if (props->ip) { /* IPv4 */
  506. stw_be_p(tp->data+css+2, tp->size - css);
  507. stw_be_p(tp->data+css+4,
  508. lduw_be_p(tp->data + css + 4) + frames);
  509. } else { /* IPv6 */
  510. stw_be_p(tp->data+css+4, tp->size - css);
  511. }
  512. css = props->tucss;
  513. len = tp->size - css;
  514. DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", props->tcp, css, len);
  515. if (props->tcp) {
  516. sofar = frames * props->mss;
  517. stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
  518. if (props->paylen - sofar > props->mss) {
  519. tp->data[css + 13] &= ~9; /* PSH, FIN */
  520. } else if (frames) {
  521. e1000x_inc_reg_if_not_full(s->mac_reg, TSCTC);
  522. }
  523. } else { /* UDP */
  524. stw_be_p(tp->data+css+4, len);
  525. }
  526. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  527. unsigned int phsum;
  528. // add pseudo-header length before checksum calculation
  529. void *sp = tp->data + props->tucso;
  530. phsum = lduw_be_p(sp) + len;
  531. phsum = (phsum >> 16) + (phsum & 0xffff);
  532. stw_be_p(sp, phsum);
  533. }
  534. tp->tso_frames++;
  535. }
  536. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  537. putsum(tp->data, tp->size, props->tucso, props->tucss, props->tucse);
  538. }
  539. if (tp->sum_needed & E1000_TXD_POPTS_IXSM) {
  540. putsum(tp->data, tp->size, props->ipcso, props->ipcss, props->ipcse);
  541. }
  542. if (tp->vlan_needed) {
  543. memmove(tp->vlan, tp->data, 4);
  544. memmove(tp->data, tp->data + 4, 8);
  545. memcpy(tp->data + 8, tp->vlan_header, 4);
  546. e1000_send_packet(s, tp->vlan, tp->size + 4);
  547. } else {
  548. e1000_send_packet(s, tp->data, tp->size);
  549. }
  550. e1000x_inc_reg_if_not_full(s->mac_reg, TPT);
  551. e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4);
  552. e1000x_inc_reg_if_not_full(s->mac_reg, GPTC);
  553. e1000x_grow_8reg_if_not_full(s->mac_reg, GOTCL, s->tx.size + 4);
  554. }
  555. static void
  556. process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
  557. {
  558. PCIDevice *d = PCI_DEVICE(s);
  559. uint32_t txd_lower = le32_to_cpu(dp->lower.data);
  560. uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
  561. unsigned int split_size = txd_lower & 0xffff, bytes, sz;
  562. unsigned int msh = 0xfffff;
  563. uint64_t addr;
  564. struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
  565. struct e1000_tx *tp = &s->tx;
  566. s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
  567. if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
  568. if (le32_to_cpu(xp->cmd_and_length) & E1000_TXD_CMD_TSE) {
  569. e1000x_read_tx_ctx_descr(xp, &tp->tso_props);
  570. s->use_tso_for_migration = 1;
  571. tp->tso_frames = 0;
  572. } else {
  573. e1000x_read_tx_ctx_descr(xp, &tp->props);
  574. s->use_tso_for_migration = 0;
  575. }
  576. return;
  577. } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
  578. // data descriptor
  579. if (tp->size == 0) {
  580. tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
  581. }
  582. tp->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
  583. } else {
  584. // legacy descriptor
  585. tp->cptse = 0;
  586. }
  587. if (e1000x_vlan_enabled(s->mac_reg) &&
  588. e1000x_is_vlan_txd(txd_lower) &&
  589. (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
  590. tp->vlan_needed = 1;
  591. stw_be_p(tp->vlan_header,
  592. le16_to_cpu(s->mac_reg[VET]));
  593. stw_be_p(tp->vlan_header + 2,
  594. le16_to_cpu(dp->upper.fields.special));
  595. }
  596. addr = le64_to_cpu(dp->buffer_addr);
  597. if (tp->cptse) {
  598. msh = tp->tso_props.hdr_len + tp->tso_props.mss;
  599. do {
  600. bytes = split_size;
  601. if (tp->size >= msh) {
  602. goto eop;
  603. }
  604. if (tp->size + bytes > msh)
  605. bytes = msh - tp->size;
  606. bytes = MIN(sizeof(tp->data) - tp->size, bytes);
  607. pci_dma_read(d, addr, tp->data + tp->size, bytes);
  608. sz = tp->size + bytes;
  609. if (sz >= tp->tso_props.hdr_len
  610. && tp->size < tp->tso_props.hdr_len) {
  611. memmove(tp->header, tp->data, tp->tso_props.hdr_len);
  612. }
  613. tp->size = sz;
  614. addr += bytes;
  615. if (sz == msh) {
  616. xmit_seg(s);
  617. memmove(tp->data, tp->header, tp->tso_props.hdr_len);
  618. tp->size = tp->tso_props.hdr_len;
  619. }
  620. split_size -= bytes;
  621. } while (bytes && split_size);
  622. } else {
  623. split_size = MIN(sizeof(tp->data) - tp->size, split_size);
  624. pci_dma_read(d, addr, tp->data + tp->size, split_size);
  625. tp->size += split_size;
  626. }
  627. eop:
  628. if (!(txd_lower & E1000_TXD_CMD_EOP))
  629. return;
  630. if (!(tp->cptse && tp->size < tp->tso_props.hdr_len)) {
  631. xmit_seg(s);
  632. }
  633. tp->tso_frames = 0;
  634. tp->sum_needed = 0;
  635. tp->vlan_needed = 0;
  636. tp->size = 0;
  637. tp->cptse = 0;
  638. }
  639. static uint32_t
  640. txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
  641. {
  642. PCIDevice *d = PCI_DEVICE(s);
  643. uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
  644. if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
  645. return 0;
  646. txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
  647. ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
  648. dp->upper.data = cpu_to_le32(txd_upper);
  649. pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
  650. &dp->upper, sizeof(dp->upper));
  651. return E1000_ICR_TXDW;
  652. }
  653. static uint64_t tx_desc_base(E1000State *s)
  654. {
  655. uint64_t bah = s->mac_reg[TDBAH];
  656. uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
  657. return (bah << 32) + bal;
  658. }
  659. static void
  660. start_xmit(E1000State *s)
  661. {
  662. PCIDevice *d = PCI_DEVICE(s);
  663. dma_addr_t base;
  664. struct e1000_tx_desc desc;
  665. uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
  666. if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
  667. DBGOUT(TX, "tx disabled\n");
  668. return;
  669. }
  670. if (s->tx.busy) {
  671. return;
  672. }
  673. s->tx.busy = true;
  674. while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
  675. base = tx_desc_base(s) +
  676. sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
  677. pci_dma_read(d, base, &desc, sizeof(desc));
  678. DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
  679. (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
  680. desc.upper.data);
  681. process_tx_desc(s, &desc);
  682. cause |= txdesc_writeback(s, base, &desc);
  683. if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
  684. s->mac_reg[TDH] = 0;
  685. /*
  686. * the following could happen only if guest sw assigns
  687. * bogus values to TDT/TDLEN.
  688. * there's nothing too intelligent we could do about this.
  689. */
  690. if (s->mac_reg[TDH] == tdh_start ||
  691. tdh_start >= s->mac_reg[TDLEN] / sizeof(desc)) {
  692. DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
  693. tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
  694. break;
  695. }
  696. }
  697. s->tx.busy = false;
  698. set_ics(s, 0, cause);
  699. }
  700. static int
  701. receive_filter(E1000State *s, const void *buf)
  702. {
  703. return (!e1000x_is_vlan_packet(buf, s->mac_reg[VET]) ||
  704. e1000x_rx_vlan_filter(s->mac_reg, PKT_GET_VLAN_HDR(buf))) &&
  705. e1000x_rx_group_filter(s->mac_reg, buf);
  706. }
  707. static void
  708. e1000_set_link_status(NetClientState *nc)
  709. {
  710. E1000State *s = qemu_get_nic_opaque(nc);
  711. uint32_t old_status = s->mac_reg[STATUS];
  712. if (nc->link_down) {
  713. e1000x_update_regs_on_link_down(s->mac_reg, s->phy_reg);
  714. } else {
  715. if (have_autoneg(s) &&
  716. !(s->phy_reg[MII_BMSR] & MII_BMSR_AN_COMP)) {
  717. e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
  718. } else {
  719. e1000_link_up(s);
  720. }
  721. }
  722. if (s->mac_reg[STATUS] != old_status)
  723. set_ics(s, 0, E1000_ICR_LSC);
  724. }
  725. static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
  726. {
  727. int bufs;
  728. /* Fast-path short packets */
  729. if (total_size <= s->rxbuf_size) {
  730. return s->mac_reg[RDH] != s->mac_reg[RDT];
  731. }
  732. if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
  733. bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
  734. } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
  735. bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
  736. s->mac_reg[RDT] - s->mac_reg[RDH];
  737. } else {
  738. return false;
  739. }
  740. return total_size <= bufs * s->rxbuf_size;
  741. }
  742. static bool
  743. e1000_can_receive(NetClientState *nc)
  744. {
  745. E1000State *s = qemu_get_nic_opaque(nc);
  746. return e1000x_rx_ready(&s->parent_obj, s->mac_reg) &&
  747. e1000_has_rxbufs(s, 1) && !timer_pending(s->flush_queue_timer);
  748. }
  749. static uint64_t rx_desc_base(E1000State *s)
  750. {
  751. uint64_t bah = s->mac_reg[RDBAH];
  752. uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
  753. return (bah << 32) + bal;
  754. }
  755. static void
  756. e1000_receiver_overrun(E1000State *s, size_t size)
  757. {
  758. trace_e1000_receiver_overrun(size, s->mac_reg[RDH], s->mac_reg[RDT]);
  759. e1000x_inc_reg_if_not_full(s->mac_reg, RNBC);
  760. e1000x_inc_reg_if_not_full(s->mac_reg, MPC);
  761. set_ics(s, 0, E1000_ICS_RXO);
  762. }
  763. static ssize_t
  764. e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
  765. {
  766. E1000State *s = qemu_get_nic_opaque(nc);
  767. PCIDevice *d = PCI_DEVICE(s);
  768. struct e1000_rx_desc desc;
  769. dma_addr_t base;
  770. unsigned int n, rdt;
  771. uint32_t rdh_start;
  772. uint16_t vlan_special = 0;
  773. uint8_t vlan_status = 0;
  774. uint8_t min_buf[ETH_ZLEN];
  775. uint8_t *filter_buf = iov->iov_base;
  776. size_t size = iov_size(iov, iovcnt);
  777. size_t iov_ofs = 0;
  778. size_t desc_offset;
  779. size_t desc_size;
  780. size_t total_size;
  781. eth_pkt_types_e pkt_type;
  782. if (!e1000x_hw_rx_enabled(s->mac_reg)) {
  783. return -1;
  784. }
  785. if (timer_pending(s->flush_queue_timer)) {
  786. return 0;
  787. }
  788. if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
  789. /* This is very unlikely, but may happen. */
  790. iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
  791. filter_buf = min_buf;
  792. }
  793. /* Discard oversized packets if !LPE and !SBP. */
  794. if (e1000x_is_oversized(s->mac_reg, size)) {
  795. return size;
  796. }
  797. if (!receive_filter(s, filter_buf)) {
  798. return size;
  799. }
  800. if (e1000x_vlan_enabled(s->mac_reg) &&
  801. e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) {
  802. vlan_special = cpu_to_le16(lduw_be_p(filter_buf + 14));
  803. iov_ofs = 4;
  804. if (filter_buf == iov->iov_base) {
  805. memmove(filter_buf + 4, filter_buf, 12);
  806. } else {
  807. iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
  808. while (iov->iov_len <= iov_ofs) {
  809. iov_ofs -= iov->iov_len;
  810. iov++;
  811. }
  812. }
  813. vlan_status = E1000_RXD_STAT_VP;
  814. size -= 4;
  815. }
  816. pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf));
  817. rdh_start = s->mac_reg[RDH];
  818. desc_offset = 0;
  819. total_size = size + e1000x_fcs_len(s->mac_reg);
  820. if (!e1000_has_rxbufs(s, total_size)) {
  821. e1000_receiver_overrun(s, total_size);
  822. return -1;
  823. }
  824. do {
  825. desc_size = total_size - desc_offset;
  826. if (desc_size > s->rxbuf_size) {
  827. desc_size = s->rxbuf_size;
  828. }
  829. base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
  830. pci_dma_read(d, base, &desc, sizeof(desc));
  831. desc.special = vlan_special;
  832. desc.status &= ~E1000_RXD_STAT_DD;
  833. if (desc.buffer_addr) {
  834. if (desc_offset < size) {
  835. size_t iov_copy;
  836. hwaddr ba = le64_to_cpu(desc.buffer_addr);
  837. size_t copy_size = size - desc_offset;
  838. if (copy_size > s->rxbuf_size) {
  839. copy_size = s->rxbuf_size;
  840. }
  841. do {
  842. iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
  843. pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
  844. copy_size -= iov_copy;
  845. ba += iov_copy;
  846. iov_ofs += iov_copy;
  847. if (iov_ofs == iov->iov_len) {
  848. iov++;
  849. iov_ofs = 0;
  850. }
  851. } while (copy_size);
  852. }
  853. desc_offset += desc_size;
  854. desc.length = cpu_to_le16(desc_size);
  855. if (desc_offset >= total_size) {
  856. desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
  857. } else {
  858. /* Guest zeroing out status is not a hardware requirement.
  859. Clear EOP in case guest didn't do it. */
  860. desc.status &= ~E1000_RXD_STAT_EOP;
  861. }
  862. } else { // as per intel docs; skip descriptors with null buf addr
  863. DBGOUT(RX, "Null RX descriptor!!\n");
  864. }
  865. pci_dma_write(d, base, &desc, sizeof(desc));
  866. desc.status |= (vlan_status | E1000_RXD_STAT_DD);
  867. pci_dma_write(d, base + offsetof(struct e1000_rx_desc, status),
  868. &desc.status, sizeof(desc.status));
  869. if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
  870. s->mac_reg[RDH] = 0;
  871. /* see comment in start_xmit; same here */
  872. if (s->mac_reg[RDH] == rdh_start ||
  873. rdh_start >= s->mac_reg[RDLEN] / sizeof(desc)) {
  874. DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
  875. rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
  876. e1000_receiver_overrun(s, total_size);
  877. return -1;
  878. }
  879. } while (desc_offset < total_size);
  880. e1000x_update_rx_total_stats(s->mac_reg, pkt_type, size, total_size);
  881. n = E1000_ICS_RXT0;
  882. if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
  883. rdt += s->mac_reg[RDLEN] / sizeof(desc);
  884. if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
  885. s->rxbuf_min_shift)
  886. n |= E1000_ICS_RXDMT0;
  887. set_ics(s, 0, n);
  888. return size;
  889. }
  890. static ssize_t
  891. e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  892. {
  893. const struct iovec iov = {
  894. .iov_base = (uint8_t *)buf,
  895. .iov_len = size
  896. };
  897. return e1000_receive_iov(nc, &iov, 1);
  898. }
  899. static uint32_t
  900. mac_readreg(E1000State *s, int index)
  901. {
  902. return s->mac_reg[index];
  903. }
  904. static uint32_t
  905. mac_icr_read(E1000State *s, int index)
  906. {
  907. uint32_t ret = s->mac_reg[ICR];
  908. DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
  909. set_interrupt_cause(s, 0, 0);
  910. return ret;
  911. }
  912. static uint32_t
  913. mac_read_clr4(E1000State *s, int index)
  914. {
  915. uint32_t ret = s->mac_reg[index];
  916. s->mac_reg[index] = 0;
  917. return ret;
  918. }
  919. static uint32_t
  920. mac_read_clr8(E1000State *s, int index)
  921. {
  922. uint32_t ret = s->mac_reg[index];
  923. s->mac_reg[index] = 0;
  924. s->mac_reg[index-1] = 0;
  925. return ret;
  926. }
  927. static void
  928. mac_writereg(E1000State *s, int index, uint32_t val)
  929. {
  930. uint32_t macaddr[2];
  931. s->mac_reg[index] = val;
  932. if (index == RA + 1) {
  933. macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
  934. macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
  935. qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
  936. }
  937. }
  938. static void
  939. set_rdt(E1000State *s, int index, uint32_t val)
  940. {
  941. s->mac_reg[index] = val & 0xffff;
  942. if (e1000_has_rxbufs(s, 1)) {
  943. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  944. }
  945. }
  946. #define LOW_BITS_SET_FUNC(num) \
  947. static void \
  948. set_##num##bit(E1000State *s, int index, uint32_t val) \
  949. { \
  950. s->mac_reg[index] = val & (BIT(num) - 1); \
  951. }
  952. LOW_BITS_SET_FUNC(4)
  953. LOW_BITS_SET_FUNC(11)
  954. LOW_BITS_SET_FUNC(13)
  955. LOW_BITS_SET_FUNC(16)
  956. static void
  957. set_dlen(E1000State *s, int index, uint32_t val)
  958. {
  959. s->mac_reg[index] = val & 0xfff80;
  960. }
  961. static void
  962. set_tctl(E1000State *s, int index, uint32_t val)
  963. {
  964. s->mac_reg[index] = val;
  965. s->mac_reg[TDT] &= 0xffff;
  966. start_xmit(s);
  967. }
  968. static void
  969. set_icr(E1000State *s, int index, uint32_t val)
  970. {
  971. DBGOUT(INTERRUPT, "set_icr %x\n", val);
  972. set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
  973. }
  974. static void
  975. set_imc(E1000State *s, int index, uint32_t val)
  976. {
  977. s->mac_reg[IMS] &= ~val;
  978. set_ics(s, 0, 0);
  979. }
  980. static void
  981. set_ims(E1000State *s, int index, uint32_t val)
  982. {
  983. s->mac_reg[IMS] |= val;
  984. set_ics(s, 0, 0);
  985. }
  986. #define getreg(x) [x] = mac_readreg
  987. typedef uint32_t (*readops)(E1000State *, int);
  988. static const readops macreg_readops[] = {
  989. getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
  990. getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
  991. getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
  992. getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
  993. getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
  994. getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
  995. getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
  996. getreg(TADV), getreg(ITR), getreg(FCRUC), getreg(IPAV),
  997. getreg(WUC), getreg(WUS), getreg(SCC), getreg(ECOL),
  998. getreg(MCC), getreg(LATECOL), getreg(COLC), getreg(DC),
  999. getreg(TNCRS), getreg(SEQEC), getreg(CEXTERR), getreg(RLEC),
  1000. getreg(XONRXC), getreg(XONTXC), getreg(XOFFRXC), getreg(XOFFTXC),
  1001. getreg(RFC), getreg(RJC), getreg(RNBC), getreg(TSCTFC),
  1002. getreg(MGTPRC), getreg(MGTPDC), getreg(MGTPTC), getreg(GORCL),
  1003. getreg(GOTCL), getreg(RDFH), getreg(RDFT), getreg(RDFHS),
  1004. getreg(RDFTS), getreg(RDFPC), getreg(TDFH), getreg(TDFT),
  1005. getreg(TDFHS), getreg(TDFTS), getreg(TDFPC), getreg(AIT),
  1006. [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8,
  1007. [GOTCH] = mac_read_clr8, [GORCH] = mac_read_clr8,
  1008. [PRC64] = mac_read_clr4, [PRC127] = mac_read_clr4,
  1009. [PRC255] = mac_read_clr4, [PRC511] = mac_read_clr4,
  1010. [PRC1023] = mac_read_clr4, [PRC1522] = mac_read_clr4,
  1011. [PTC64] = mac_read_clr4, [PTC127] = mac_read_clr4,
  1012. [PTC255] = mac_read_clr4, [PTC511] = mac_read_clr4,
  1013. [PTC1023] = mac_read_clr4, [PTC1522] = mac_read_clr4,
  1014. [GPRC] = mac_read_clr4, [GPTC] = mac_read_clr4,
  1015. [TPT] = mac_read_clr4, [TPR] = mac_read_clr4,
  1016. [RUC] = mac_read_clr4, [ROC] = mac_read_clr4,
  1017. [BPRC] = mac_read_clr4, [MPRC] = mac_read_clr4,
  1018. [TSCTC] = mac_read_clr4, [BPTC] = mac_read_clr4,
  1019. [MPTC] = mac_read_clr4,
  1020. [ICR] = mac_icr_read, [EECD] = get_eecd,
  1021. [EERD] = flash_eerd_read,
  1022. [CRCERRS ... MPC] = &mac_readreg,
  1023. [IP6AT ... IP6AT + 3] = &mac_readreg, [IP4AT ... IP4AT + 6] = &mac_readreg,
  1024. [FFLT ... FFLT + 6] = &mac_readreg,
  1025. [RA ... RA + 31] = &mac_readreg,
  1026. [WUPM ... WUPM + 31] = &mac_readreg,
  1027. [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = &mac_readreg,
  1028. [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = &mac_readreg,
  1029. [FFMT ... FFMT + 254] = &mac_readreg,
  1030. [FFVT ... FFVT + 254] = &mac_readreg,
  1031. [PBM ... PBM + 16383] = &mac_readreg,
  1032. };
  1033. enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
  1034. #define putreg(x) [x] = mac_writereg
  1035. typedef void (*writeops)(E1000State *, int, uint32_t);
  1036. static const writeops macreg_writeops[] = {
  1037. putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
  1038. putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
  1039. putreg(RDBAL), putreg(LEDCTL), putreg(VET), putreg(FCRUC),
  1040. putreg(IPAV), putreg(WUC),
  1041. putreg(WUS),
  1042. [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
  1043. [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
  1044. [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
  1045. [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
  1046. [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
  1047. [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
  1048. [ITR] = set_16bit, [TDFH] = set_11bit, [TDFT] = set_11bit,
  1049. [TDFHS] = set_13bit, [TDFTS] = set_13bit, [TDFPC] = set_13bit,
  1050. [RDFH] = set_13bit, [RDFT] = set_13bit, [RDFHS] = set_13bit,
  1051. [RDFTS] = set_13bit, [RDFPC] = set_13bit, [AIT] = set_16bit,
  1052. [IP6AT ... IP6AT + 3] = &mac_writereg, [IP4AT ... IP4AT + 6] = &mac_writereg,
  1053. [FFLT ... FFLT + 6] = &set_11bit,
  1054. [RA ... RA + 31] = &mac_writereg,
  1055. [WUPM ... WUPM + 31] = &mac_writereg,
  1056. [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = &mac_writereg,
  1057. [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = &mac_writereg,
  1058. [FFMT ... FFMT + 254] = &set_4bit, [FFVT ... FFVT + 254] = &mac_writereg,
  1059. [PBM ... PBM + 16383] = &mac_writereg,
  1060. };
  1061. enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
  1062. enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 };
  1063. #define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED)
  1064. /* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p]
  1065. * f - flag bits (up to 6 possible flags)
  1066. * n - flag needed
  1067. * p - partially implenented */
  1068. static const uint8_t mac_reg_access[0x8000] = {
  1069. [IPAV] = markflag(MAC), [WUC] = markflag(MAC),
  1070. [IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC),
  1071. [FFVT] = markflag(MAC), [WUPM] = markflag(MAC),
  1072. [ECOL] = markflag(MAC), [MCC] = markflag(MAC),
  1073. [DC] = markflag(MAC), [TNCRS] = markflag(MAC),
  1074. [RLEC] = markflag(MAC), [XONRXC] = markflag(MAC),
  1075. [XOFFTXC] = markflag(MAC), [RFC] = markflag(MAC),
  1076. [TSCTFC] = markflag(MAC), [MGTPRC] = markflag(MAC),
  1077. [WUS] = markflag(MAC), [AIT] = markflag(MAC),
  1078. [FFLT] = markflag(MAC), [FFMT] = markflag(MAC),
  1079. [SCC] = markflag(MAC), [FCRUC] = markflag(MAC),
  1080. [LATECOL] = markflag(MAC), [COLC] = markflag(MAC),
  1081. [SEQEC] = markflag(MAC), [CEXTERR] = markflag(MAC),
  1082. [XONTXC] = markflag(MAC), [XOFFRXC] = markflag(MAC),
  1083. [RJC] = markflag(MAC), [RNBC] = markflag(MAC),
  1084. [MGTPDC] = markflag(MAC), [MGTPTC] = markflag(MAC),
  1085. [RUC] = markflag(MAC), [ROC] = markflag(MAC),
  1086. [GORCL] = markflag(MAC), [GORCH] = markflag(MAC),
  1087. [GOTCL] = markflag(MAC), [GOTCH] = markflag(MAC),
  1088. [BPRC] = markflag(MAC), [MPRC] = markflag(MAC),
  1089. [TSCTC] = markflag(MAC), [PRC64] = markflag(MAC),
  1090. [PRC127] = markflag(MAC), [PRC255] = markflag(MAC),
  1091. [PRC511] = markflag(MAC), [PRC1023] = markflag(MAC),
  1092. [PRC1522] = markflag(MAC), [PTC64] = markflag(MAC),
  1093. [PTC127] = markflag(MAC), [PTC255] = markflag(MAC),
  1094. [PTC511] = markflag(MAC), [PTC1023] = markflag(MAC),
  1095. [PTC1522] = markflag(MAC), [MPTC] = markflag(MAC),
  1096. [BPTC] = markflag(MAC),
  1097. [TDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1098. [TDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1099. [TDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1100. [TDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1101. [TDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1102. [RDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1103. [RDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1104. [RDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1105. [RDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1106. [RDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1107. [PBM] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1108. };
  1109. static void
  1110. e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
  1111. unsigned size)
  1112. {
  1113. E1000State *s = opaque;
  1114. unsigned int index = (addr & 0x1ffff) >> 2;
  1115. if (index < NWRITEOPS && macreg_writeops[index]) {
  1116. if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
  1117. || (s->compat_flags & (mac_reg_access[index] >> 2))) {
  1118. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  1119. DBGOUT(GENERAL, "Writing to register at offset: 0x%08x. "
  1120. "It is not fully implemented.\n", index<<2);
  1121. }
  1122. macreg_writeops[index](s, index, val);
  1123. } else { /* "flag needed" bit is set, but the flag is not active */
  1124. DBGOUT(MMIO, "MMIO write attempt to disabled reg. addr=0x%08x\n",
  1125. index<<2);
  1126. }
  1127. } else if (index < NREADOPS && macreg_readops[index]) {
  1128. DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n",
  1129. index<<2, val);
  1130. } else {
  1131. DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
  1132. index<<2, val);
  1133. }
  1134. }
  1135. static uint64_t
  1136. e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
  1137. {
  1138. E1000State *s = opaque;
  1139. unsigned int index = (addr & 0x1ffff) >> 2;
  1140. if (index < NREADOPS && macreg_readops[index]) {
  1141. if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
  1142. || (s->compat_flags & (mac_reg_access[index] >> 2))) {
  1143. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  1144. DBGOUT(GENERAL, "Reading register at offset: 0x%08x. "
  1145. "It is not fully implemented.\n", index<<2);
  1146. }
  1147. return macreg_readops[index](s, index);
  1148. } else { /* "flag needed" bit is set, but the flag is not active */
  1149. DBGOUT(MMIO, "MMIO read attempt of disabled reg. addr=0x%08x\n",
  1150. index<<2);
  1151. }
  1152. } else {
  1153. DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
  1154. }
  1155. return 0;
  1156. }
  1157. static const MemoryRegionOps e1000_mmio_ops = {
  1158. .read = e1000_mmio_read,
  1159. .write = e1000_mmio_write,
  1160. .endianness = DEVICE_LITTLE_ENDIAN,
  1161. .impl = {
  1162. .min_access_size = 4,
  1163. .max_access_size = 4,
  1164. },
  1165. };
  1166. static uint64_t e1000_io_read(void *opaque, hwaddr addr,
  1167. unsigned size)
  1168. {
  1169. E1000State *s = opaque;
  1170. (void)s;
  1171. return 0;
  1172. }
  1173. static void e1000_io_write(void *opaque, hwaddr addr,
  1174. uint64_t val, unsigned size)
  1175. {
  1176. E1000State *s = opaque;
  1177. (void)s;
  1178. }
  1179. static const MemoryRegionOps e1000_io_ops = {
  1180. .read = e1000_io_read,
  1181. .write = e1000_io_write,
  1182. .endianness = DEVICE_LITTLE_ENDIAN,
  1183. };
  1184. static bool is_version_1(void *opaque, int version_id)
  1185. {
  1186. return version_id == 1;
  1187. }
  1188. static int e1000_pre_save(void *opaque)
  1189. {
  1190. E1000State *s = opaque;
  1191. NetClientState *nc = qemu_get_queue(s->nic);
  1192. /*
  1193. * If link is down and auto-negotiation is supported and ongoing,
  1194. * complete auto-negotiation immediately. This allows us to look
  1195. * at MII_BMSR_AN_COMP to infer link status on load.
  1196. */
  1197. if (nc->link_down && have_autoneg(s)) {
  1198. s->phy_reg[MII_BMSR] |= MII_BMSR_AN_COMP;
  1199. }
  1200. /* Decide which set of props to migrate in the main structure */
  1201. if (chkflag(TSO) || !s->use_tso_for_migration) {
  1202. /* Either we're migrating with the extra subsection, in which
  1203. * case the mig_props is always 'props' OR
  1204. * we've not got the subsection, but 'props' was the last
  1205. * updated.
  1206. */
  1207. s->mig_props = s->tx.props;
  1208. } else {
  1209. /* We're not using the subsection, and 'tso_props' was
  1210. * the last updated.
  1211. */
  1212. s->mig_props = s->tx.tso_props;
  1213. }
  1214. return 0;
  1215. }
  1216. static int e1000_post_load(void *opaque, int version_id)
  1217. {
  1218. E1000State *s = opaque;
  1219. NetClientState *nc = qemu_get_queue(s->nic);
  1220. s->mit_ide = 0;
  1221. s->mit_timer_on = true;
  1222. timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1);
  1223. /* nc.link_down can't be migrated, so infer link_down according
  1224. * to link status bit in mac_reg[STATUS].
  1225. * Alternatively, restart link negotiation if it was in progress. */
  1226. nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
  1227. if (have_autoneg(s) && !(s->phy_reg[MII_BMSR] & MII_BMSR_AN_COMP)) {
  1228. nc->link_down = false;
  1229. timer_mod(s->autoneg_timer,
  1230. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
  1231. }
  1232. s->tx.props = s->mig_props;
  1233. if (!s->received_tx_tso) {
  1234. /* We received only one set of offload data (tx.props)
  1235. * and haven't got tx.tso_props. The best we can do
  1236. * is dupe the data.
  1237. */
  1238. s->tx.tso_props = s->mig_props;
  1239. }
  1240. return 0;
  1241. }
  1242. static int e1000_tx_tso_post_load(void *opaque, int version_id)
  1243. {
  1244. E1000State *s = opaque;
  1245. s->received_tx_tso = true;
  1246. return 0;
  1247. }
  1248. static bool e1000_full_mac_needed(void *opaque)
  1249. {
  1250. E1000State *s = opaque;
  1251. return chkflag(MAC);
  1252. }
  1253. static bool e1000_tso_state_needed(void *opaque)
  1254. {
  1255. E1000State *s = opaque;
  1256. return chkflag(TSO);
  1257. }
  1258. static const VMStateDescription vmstate_e1000_mit_state = {
  1259. .name = "e1000/mit_state",
  1260. .version_id = 1,
  1261. .minimum_version_id = 1,
  1262. .fields = (const VMStateField[]) {
  1263. VMSTATE_UINT32(mac_reg[RDTR], E1000State),
  1264. VMSTATE_UINT32(mac_reg[RADV], E1000State),
  1265. VMSTATE_UINT32(mac_reg[TADV], E1000State),
  1266. VMSTATE_UINT32(mac_reg[ITR], E1000State),
  1267. VMSTATE_BOOL(mit_irq_level, E1000State),
  1268. VMSTATE_END_OF_LIST()
  1269. }
  1270. };
  1271. static const VMStateDescription vmstate_e1000_full_mac_state = {
  1272. .name = "e1000/full_mac_state",
  1273. .version_id = 1,
  1274. .minimum_version_id = 1,
  1275. .needed = e1000_full_mac_needed,
  1276. .fields = (const VMStateField[]) {
  1277. VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000),
  1278. VMSTATE_END_OF_LIST()
  1279. }
  1280. };
  1281. static const VMStateDescription vmstate_e1000_tx_tso_state = {
  1282. .name = "e1000/tx_tso_state",
  1283. .version_id = 1,
  1284. .minimum_version_id = 1,
  1285. .needed = e1000_tso_state_needed,
  1286. .post_load = e1000_tx_tso_post_load,
  1287. .fields = (const VMStateField[]) {
  1288. VMSTATE_UINT8(tx.tso_props.ipcss, E1000State),
  1289. VMSTATE_UINT8(tx.tso_props.ipcso, E1000State),
  1290. VMSTATE_UINT16(tx.tso_props.ipcse, E1000State),
  1291. VMSTATE_UINT8(tx.tso_props.tucss, E1000State),
  1292. VMSTATE_UINT8(tx.tso_props.tucso, E1000State),
  1293. VMSTATE_UINT16(tx.tso_props.tucse, E1000State),
  1294. VMSTATE_UINT32(tx.tso_props.paylen, E1000State),
  1295. VMSTATE_UINT8(tx.tso_props.hdr_len, E1000State),
  1296. VMSTATE_UINT16(tx.tso_props.mss, E1000State),
  1297. VMSTATE_INT8(tx.tso_props.ip, E1000State),
  1298. VMSTATE_INT8(tx.tso_props.tcp, E1000State),
  1299. VMSTATE_END_OF_LIST()
  1300. }
  1301. };
  1302. static const VMStateDescription vmstate_e1000 = {
  1303. .name = "e1000",
  1304. .version_id = 2,
  1305. .minimum_version_id = 1,
  1306. .pre_save = e1000_pre_save,
  1307. .post_load = e1000_post_load,
  1308. .fields = (const VMStateField[]) {
  1309. VMSTATE_PCI_DEVICE(parent_obj, E1000State),
  1310. VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
  1311. VMSTATE_UNUSED(4), /* Was mmio_base. */
  1312. VMSTATE_UINT32(rxbuf_size, E1000State),
  1313. VMSTATE_UINT32(rxbuf_min_shift, E1000State),
  1314. VMSTATE_UINT32(eecd_state.val_in, E1000State),
  1315. VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
  1316. VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
  1317. VMSTATE_UINT16(eecd_state.reading, E1000State),
  1318. VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
  1319. VMSTATE_UINT8(mig_props.ipcss, E1000State),
  1320. VMSTATE_UINT8(mig_props.ipcso, E1000State),
  1321. VMSTATE_UINT16(mig_props.ipcse, E1000State),
  1322. VMSTATE_UINT8(mig_props.tucss, E1000State),
  1323. VMSTATE_UINT8(mig_props.tucso, E1000State),
  1324. VMSTATE_UINT16(mig_props.tucse, E1000State),
  1325. VMSTATE_UINT32(mig_props.paylen, E1000State),
  1326. VMSTATE_UINT8(mig_props.hdr_len, E1000State),
  1327. VMSTATE_UINT16(mig_props.mss, E1000State),
  1328. VMSTATE_UINT16(tx.size, E1000State),
  1329. VMSTATE_UINT16(tx.tso_frames, E1000State),
  1330. VMSTATE_UINT8(tx.sum_needed, E1000State),
  1331. VMSTATE_INT8(mig_props.ip, E1000State),
  1332. VMSTATE_INT8(mig_props.tcp, E1000State),
  1333. VMSTATE_BUFFER(tx.header, E1000State),
  1334. VMSTATE_BUFFER(tx.data, E1000State),
  1335. VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
  1336. VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
  1337. VMSTATE_UINT32(mac_reg[CTRL], E1000State),
  1338. VMSTATE_UINT32(mac_reg[EECD], E1000State),
  1339. VMSTATE_UINT32(mac_reg[EERD], E1000State),
  1340. VMSTATE_UINT32(mac_reg[GPRC], E1000State),
  1341. VMSTATE_UINT32(mac_reg[GPTC], E1000State),
  1342. VMSTATE_UINT32(mac_reg[ICR], E1000State),
  1343. VMSTATE_UINT32(mac_reg[ICS], E1000State),
  1344. VMSTATE_UINT32(mac_reg[IMC], E1000State),
  1345. VMSTATE_UINT32(mac_reg[IMS], E1000State),
  1346. VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
  1347. VMSTATE_UINT32(mac_reg[MANC], E1000State),
  1348. VMSTATE_UINT32(mac_reg[MDIC], E1000State),
  1349. VMSTATE_UINT32(mac_reg[MPC], E1000State),
  1350. VMSTATE_UINT32(mac_reg[PBA], E1000State),
  1351. VMSTATE_UINT32(mac_reg[RCTL], E1000State),
  1352. VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
  1353. VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
  1354. VMSTATE_UINT32(mac_reg[RDH], E1000State),
  1355. VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
  1356. VMSTATE_UINT32(mac_reg[RDT], E1000State),
  1357. VMSTATE_UINT32(mac_reg[STATUS], E1000State),
  1358. VMSTATE_UINT32(mac_reg[SWSM], E1000State),
  1359. VMSTATE_UINT32(mac_reg[TCTL], E1000State),
  1360. VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
  1361. VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
  1362. VMSTATE_UINT32(mac_reg[TDH], E1000State),
  1363. VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
  1364. VMSTATE_UINT32(mac_reg[TDT], E1000State),
  1365. VMSTATE_UINT32(mac_reg[TORH], E1000State),
  1366. VMSTATE_UINT32(mac_reg[TORL], E1000State),
  1367. VMSTATE_UINT32(mac_reg[TOTH], E1000State),
  1368. VMSTATE_UINT32(mac_reg[TOTL], E1000State),
  1369. VMSTATE_UINT32(mac_reg[TPR], E1000State),
  1370. VMSTATE_UINT32(mac_reg[TPT], E1000State),
  1371. VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
  1372. VMSTATE_UINT32(mac_reg[WUFC], E1000State),
  1373. VMSTATE_UINT32(mac_reg[VET], E1000State),
  1374. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
  1375. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, E1000_MC_TBL_SIZE),
  1376. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA,
  1377. E1000_VLAN_FILTER_TBL_SIZE),
  1378. VMSTATE_END_OF_LIST()
  1379. },
  1380. .subsections = (const VMStateDescription * const []) {
  1381. &vmstate_e1000_mit_state,
  1382. &vmstate_e1000_full_mac_state,
  1383. &vmstate_e1000_tx_tso_state,
  1384. NULL
  1385. }
  1386. };
  1387. /*
  1388. * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
  1389. * Note: A valid DevId will be inserted during pci_e1000_realize().
  1390. */
  1391. static const uint16_t e1000_eeprom_template[64] = {
  1392. 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
  1393. 0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
  1394. 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
  1395. 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
  1396. 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
  1397. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1398. 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1399. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
  1400. };
  1401. /* PCI interface */
  1402. static void
  1403. e1000_mmio_setup(E1000State *d)
  1404. {
  1405. int i;
  1406. const uint32_t excluded_regs[] = {
  1407. E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
  1408. E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
  1409. };
  1410. memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
  1411. "e1000-mmio", PNPMMIO_SIZE);
  1412. memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
  1413. for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
  1414. memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
  1415. excluded_regs[i+1] - excluded_regs[i] - 4);
  1416. memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
  1417. }
  1418. static void
  1419. pci_e1000_uninit(PCIDevice *dev)
  1420. {
  1421. E1000State *d = E1000(dev);
  1422. timer_free(d->autoneg_timer);
  1423. timer_free(d->mit_timer);
  1424. timer_free(d->flush_queue_timer);
  1425. qemu_del_nic(d->nic);
  1426. }
  1427. static NetClientInfo net_e1000_info = {
  1428. .type = NET_CLIENT_DRIVER_NIC,
  1429. .size = sizeof(NICState),
  1430. .can_receive = e1000_can_receive,
  1431. .receive = e1000_receive,
  1432. .receive_iov = e1000_receive_iov,
  1433. .link_status_changed = e1000_set_link_status,
  1434. };
  1435. static void e1000_write_config(PCIDevice *pci_dev, uint32_t address,
  1436. uint32_t val, int len)
  1437. {
  1438. E1000State *s = E1000(pci_dev);
  1439. pci_default_write_config(pci_dev, address, val, len);
  1440. if (range_covers_byte(address, len, PCI_COMMAND) &&
  1441. (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
  1442. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  1443. }
  1444. }
  1445. static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
  1446. {
  1447. DeviceState *dev = DEVICE(pci_dev);
  1448. E1000State *d = E1000(pci_dev);
  1449. uint8_t *pci_conf;
  1450. uint8_t *macaddr;
  1451. pci_dev->config_write = e1000_write_config;
  1452. pci_conf = pci_dev->config;
  1453. /* TODO: RST# value should be 0, PCI spec 6.2.4 */
  1454. pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
  1455. pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
  1456. e1000_mmio_setup(d);
  1457. pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
  1458. pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
  1459. qemu_macaddr_default_if_unset(&d->conf.macaddr);
  1460. macaddr = d->conf.macaddr.a;
  1461. e1000x_core_prepare_eeprom(d->eeprom_data,
  1462. e1000_eeprom_template,
  1463. sizeof(e1000_eeprom_template),
  1464. PCI_DEVICE_GET_CLASS(pci_dev)->device_id,
  1465. macaddr);
  1466. d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
  1467. object_get_typename(OBJECT(d)), dev->id,
  1468. &dev->mem_reentrancy_guard, d);
  1469. qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
  1470. d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
  1471. d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
  1472. d->flush_queue_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  1473. e1000_flush_queue_timer, d);
  1474. }
  1475. static const Property e1000_properties[] = {
  1476. DEFINE_NIC_PROPERTIES(E1000State, conf),
  1477. DEFINE_PROP_BIT("extra_mac_registers", E1000State,
  1478. compat_flags, E1000_FLAG_MAC_BIT, true),
  1479. DEFINE_PROP_BIT("migrate_tso_props", E1000State,
  1480. compat_flags, E1000_FLAG_TSO_BIT, true),
  1481. DEFINE_PROP_BIT("init-vet", E1000State,
  1482. compat_flags, E1000_FLAG_VET_BIT, true),
  1483. };
  1484. typedef struct E1000Info {
  1485. const char *name;
  1486. uint16_t device_id;
  1487. uint8_t revision;
  1488. uint16_t phy_id2;
  1489. } E1000Info;
  1490. static void e1000_class_init(ObjectClass *klass, void *data)
  1491. {
  1492. DeviceClass *dc = DEVICE_CLASS(klass);
  1493. ResettableClass *rc = RESETTABLE_CLASS(klass);
  1494. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1495. E1000BaseClass *e = E1000_CLASS(klass);
  1496. const E1000Info *info = data;
  1497. k->realize = pci_e1000_realize;
  1498. k->exit = pci_e1000_uninit;
  1499. k->romfile = "efi-e1000.rom";
  1500. k->vendor_id = PCI_VENDOR_ID_INTEL;
  1501. k->device_id = info->device_id;
  1502. k->revision = info->revision;
  1503. e->phy_id2 = info->phy_id2;
  1504. k->class_id = PCI_CLASS_NETWORK_ETHERNET;
  1505. rc->phases.hold = e1000_reset_hold;
  1506. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1507. dc->desc = "Intel Gigabit Ethernet";
  1508. dc->vmsd = &vmstate_e1000;
  1509. device_class_set_props(dc, e1000_properties);
  1510. }
  1511. static void e1000_instance_init(Object *obj)
  1512. {
  1513. E1000State *n = E1000(obj);
  1514. device_add_bootindex_property(obj, &n->conf.bootindex,
  1515. "bootindex", "/ethernet-phy@0",
  1516. DEVICE(n));
  1517. }
  1518. static const TypeInfo e1000_base_info = {
  1519. .name = TYPE_E1000_BASE,
  1520. .parent = TYPE_PCI_DEVICE,
  1521. .instance_size = sizeof(E1000State),
  1522. .instance_init = e1000_instance_init,
  1523. .class_size = sizeof(E1000BaseClass),
  1524. .abstract = true,
  1525. .interfaces = (InterfaceInfo[]) {
  1526. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1527. { },
  1528. },
  1529. };
  1530. static const E1000Info e1000_devices[] = {
  1531. {
  1532. .name = "e1000",
  1533. .device_id = E1000_DEV_ID_82540EM,
  1534. .revision = 0x03,
  1535. .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
  1536. },
  1537. {
  1538. .name = "e1000-82544gc",
  1539. .device_id = E1000_DEV_ID_82544GC_COPPER,
  1540. .revision = 0x03,
  1541. .phy_id2 = E1000_PHY_ID2_82544x,
  1542. },
  1543. {
  1544. .name = "e1000-82545em",
  1545. .device_id = E1000_DEV_ID_82545EM_COPPER,
  1546. .revision = 0x03,
  1547. .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
  1548. },
  1549. };
  1550. static void e1000_register_types(void)
  1551. {
  1552. int i;
  1553. type_register_static(&e1000_base_info);
  1554. for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
  1555. const E1000Info *info = &e1000_devices[i];
  1556. TypeInfo type_info = {};
  1557. type_info.name = info->name;
  1558. type_info.parent = TYPE_E1000_BASE;
  1559. type_info.class_data = (void *)info;
  1560. type_info.class_init = e1000_class_init;
  1561. type_register_static(&type_info);
  1562. }
  1563. }
  1564. type_init(e1000_register_types)