e1000.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839
  1. /*
  2. * QEMU e1000 emulation
  3. *
  4. * Software developer's manual:
  5. * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
  6. *
  7. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  8. * Copyright (c) 2008 Qumranet
  9. * Based on work done by:
  10. * Copyright (c) 2007 Dan Aloni
  11. * Copyright (c) 2004 Antony T Curtis
  12. *
  13. * This library is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2.1 of the License, or (at your option) any later version.
  17. *
  18. * This library is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  25. */
  26. #include "qemu/osdep.h"
  27. #include "hw/net/mii.h"
  28. #include "hw/pci/pci_device.h"
  29. #include "hw/qdev-properties.h"
  30. #include "migration/vmstate.h"
  31. #include "net/eth.h"
  32. #include "net/net.h"
  33. #include "net/checksum.h"
  34. #include "sysemu/sysemu.h"
  35. #include "sysemu/dma.h"
  36. #include "qemu/iov.h"
  37. #include "qemu/module.h"
  38. #include "qemu/range.h"
  39. #include "e1000_common.h"
  40. #include "e1000x_common.h"
  41. #include "trace.h"
  42. #include "qom/object.h"
  43. /* #define E1000_DEBUG */
  44. #ifdef E1000_DEBUG
  45. enum {
  46. DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
  47. DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
  48. DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
  49. DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
  50. };
  51. #define DBGBIT(x) (1<<DEBUG_##x)
  52. static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
  53. #define DBGOUT(what, fmt, ...) do { \
  54. if (debugflags & DBGBIT(what)) \
  55. fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
  56. } while (0)
  57. #else
  58. #define DBGOUT(what, fmt, ...) do {} while (0)
  59. #endif
  60. #define IOPORT_SIZE 0x40
  61. #define PNPMMIO_SIZE 0x20000
  62. #define MAXIMUM_ETHERNET_HDR_LEN (ETH_HLEN + 4)
  63. /*
  64. * HW models:
  65. * E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
  66. * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
  67. * E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
  68. * Others never tested
  69. */
  70. struct E1000State_st {
  71. /*< private >*/
  72. PCIDevice parent_obj;
  73. /*< public >*/
  74. NICState *nic;
  75. NICConf conf;
  76. MemoryRegion mmio;
  77. MemoryRegion io;
  78. uint32_t mac_reg[0x8000];
  79. uint16_t phy_reg[0x20];
  80. uint16_t eeprom_data[64];
  81. uint32_t rxbuf_size;
  82. uint32_t rxbuf_min_shift;
  83. struct e1000_tx {
  84. unsigned char header[256];
  85. unsigned char vlan_header[4];
  86. /* Fields vlan and data must not be reordered or separated. */
  87. unsigned char vlan[4];
  88. unsigned char data[0x10000];
  89. uint16_t size;
  90. unsigned char vlan_needed;
  91. unsigned char sum_needed;
  92. bool cptse;
  93. e1000x_txd_props props;
  94. e1000x_txd_props tso_props;
  95. uint16_t tso_frames;
  96. bool busy;
  97. } tx;
  98. struct {
  99. uint32_t val_in; /* shifted in from guest driver */
  100. uint16_t bitnum_in;
  101. uint16_t bitnum_out;
  102. uint16_t reading;
  103. uint32_t old_eecd;
  104. } eecd_state;
  105. QEMUTimer *autoneg_timer;
  106. QEMUTimer *mit_timer; /* Mitigation timer. */
  107. bool mit_timer_on; /* Mitigation timer is running. */
  108. bool mit_irq_level; /* Tracks interrupt pin level. */
  109. uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */
  110. QEMUTimer *flush_queue_timer;
  111. /* Compatibility flags for migration to/from qemu 1.3.0 and older */
  112. #define E1000_FLAG_AUTONEG_BIT 0
  113. #define E1000_FLAG_MIT_BIT 1
  114. #define E1000_FLAG_MAC_BIT 2
  115. #define E1000_FLAG_TSO_BIT 3
  116. #define E1000_FLAG_VET_BIT 4
  117. #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
  118. #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
  119. #define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
  120. #define E1000_FLAG_TSO (1 << E1000_FLAG_TSO_BIT)
  121. #define E1000_FLAG_VET (1 << E1000_FLAG_VET_BIT)
  122. uint32_t compat_flags;
  123. bool received_tx_tso;
  124. bool use_tso_for_migration;
  125. e1000x_txd_props mig_props;
  126. };
  127. typedef struct E1000State_st E1000State;
  128. #define chkflag(x) (s->compat_flags & E1000_FLAG_##x)
  129. struct E1000BaseClass {
  130. PCIDeviceClass parent_class;
  131. uint16_t phy_id2;
  132. };
  133. typedef struct E1000BaseClass E1000BaseClass;
  134. #define TYPE_E1000_BASE "e1000-base"
  135. DECLARE_OBJ_CHECKERS(E1000State, E1000BaseClass,
  136. E1000, TYPE_E1000_BASE)
  137. static void
  138. e1000_link_up(E1000State *s)
  139. {
  140. e1000x_update_regs_on_link_up(s->mac_reg, s->phy_reg);
  141. /* E1000_STATUS_LU is tested by e1000_can_receive() */
  142. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  143. }
  144. static void
  145. e1000_autoneg_done(E1000State *s)
  146. {
  147. e1000x_update_regs_on_autoneg_done(s->mac_reg, s->phy_reg);
  148. /* E1000_STATUS_LU is tested by e1000_can_receive() */
  149. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  150. }
  151. static bool
  152. have_autoneg(E1000State *s)
  153. {
  154. return chkflag(AUTONEG) && (s->phy_reg[MII_BMCR] & MII_BMCR_AUTOEN);
  155. }
  156. static void
  157. set_phy_ctrl(E1000State *s, int index, uint16_t val)
  158. {
  159. /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
  160. s->phy_reg[MII_BMCR] = val & ~(0x3f |
  161. MII_BMCR_RESET |
  162. MII_BMCR_ANRESTART);
  163. /*
  164. * QEMU 1.3 does not support link auto-negotiation emulation, so if we
  165. * migrate during auto negotiation, after migration the link will be
  166. * down.
  167. */
  168. if (have_autoneg(s) && (val & MII_BMCR_ANRESTART)) {
  169. e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
  170. }
  171. }
  172. static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
  173. [MII_BMCR] = set_phy_ctrl,
  174. };
  175. enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
  176. enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
  177. static const char phy_regcap[0x20] = {
  178. [MII_BMSR] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
  179. [MII_PHYID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
  180. [MII_BMCR] = PHY_RW, [MII_CTRL1000] = PHY_RW,
  181. [MII_ANLPAR] = PHY_R, [MII_STAT1000] = PHY_R,
  182. [MII_ANAR] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
  183. [MII_PHYID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
  184. [MII_ANER] = PHY_R,
  185. };
  186. /* MII_PHYID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
  187. static const uint16_t phy_reg_init[] = {
  188. [MII_BMCR] = MII_BMCR_SPEED1000 |
  189. MII_BMCR_FD |
  190. MII_BMCR_AUTOEN,
  191. [MII_BMSR] = MII_BMSR_EXTCAP |
  192. MII_BMSR_LINK_ST | /* link initially up */
  193. MII_BMSR_AUTONEG |
  194. /* MII_BMSR_AN_COMP: initially NOT completed */
  195. MII_BMSR_MFPS |
  196. MII_BMSR_EXTSTAT |
  197. MII_BMSR_10T_HD |
  198. MII_BMSR_10T_FD |
  199. MII_BMSR_100TX_HD |
  200. MII_BMSR_100TX_FD,
  201. [MII_PHYID1] = 0x141,
  202. /* [MII_PHYID2] configured per DevId, from e1000_reset() */
  203. [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 |
  204. MII_ANAR_10FD | MII_ANAR_TX |
  205. MII_ANAR_TXFD | MII_ANAR_PAUSE |
  206. MII_ANAR_PAUSE_ASYM,
  207. [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD |
  208. MII_ANLPAR_TX | MII_ANLPAR_TXFD,
  209. [MII_CTRL1000] = MII_CTRL1000_FULL | MII_CTRL1000_PORT |
  210. MII_CTRL1000_MASTER,
  211. [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL |
  212. MII_STAT1000_ROK | MII_STAT1000_LOK,
  213. [M88E1000_PHY_SPEC_CTRL] = 0x360,
  214. [M88E1000_PHY_SPEC_STATUS] = 0xac00,
  215. [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
  216. };
  217. static const uint32_t mac_reg_init[] = {
  218. [PBA] = 0x00100030,
  219. [LEDCTL] = 0x602,
  220. [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
  221. E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
  222. [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
  223. E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
  224. E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
  225. E1000_STATUS_LU,
  226. [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
  227. E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
  228. E1000_MANC_RMCP_EN,
  229. };
  230. /* Helper function, *curr == 0 means the value is not set */
  231. static inline void
  232. mit_update_delay(uint32_t *curr, uint32_t value)
  233. {
  234. if (value && (*curr == 0 || value < *curr)) {
  235. *curr = value;
  236. }
  237. }
  238. static void
  239. set_interrupt_cause(E1000State *s, int index, uint32_t val)
  240. {
  241. PCIDevice *d = PCI_DEVICE(s);
  242. uint32_t pending_ints;
  243. uint32_t mit_delay;
  244. s->mac_reg[ICR] = val;
  245. /*
  246. * Make sure ICR and ICS registers have the same value.
  247. * The spec says that the ICS register is write-only. However in practice,
  248. * on real hardware ICS is readable, and for reads it has the same value as
  249. * ICR (except that ICS does not have the clear on read behaviour of ICR).
  250. *
  251. * The VxWorks PRO/1000 driver uses this behaviour.
  252. */
  253. s->mac_reg[ICS] = val;
  254. pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
  255. if (!s->mit_irq_level && pending_ints) {
  256. /*
  257. * Here we detect a potential raising edge. We postpone raising the
  258. * interrupt line if we are inside the mitigation delay window
  259. * (s->mit_timer_on == 1).
  260. * We provide a partial implementation of interrupt mitigation,
  261. * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
  262. * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
  263. * RADV; relative timers based on TIDV and RDTR are not implemented.
  264. */
  265. if (s->mit_timer_on) {
  266. return;
  267. }
  268. if (chkflag(MIT)) {
  269. /* Compute the next mitigation delay according to pending
  270. * interrupts and the current values of RADV (provided
  271. * RDTR!=0), TADV and ITR.
  272. * Then rearm the timer.
  273. */
  274. mit_delay = 0;
  275. if (s->mit_ide &&
  276. (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
  277. mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
  278. }
  279. if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
  280. mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
  281. }
  282. mit_update_delay(&mit_delay, s->mac_reg[ITR]);
  283. /*
  284. * According to e1000 SPEC, the Ethernet controller guarantees
  285. * a maximum observable interrupt rate of 7813 interrupts/sec.
  286. * Thus if mit_delay < 500 then the delay should be set to the
  287. * minimum delay possible which is 500.
  288. */
  289. mit_delay = (mit_delay < 500) ? 500 : mit_delay;
  290. s->mit_timer_on = 1;
  291. timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
  292. mit_delay * 256);
  293. s->mit_ide = 0;
  294. }
  295. }
  296. s->mit_irq_level = (pending_ints != 0);
  297. pci_set_irq(d, s->mit_irq_level);
  298. }
  299. static void
  300. e1000_mit_timer(void *opaque)
  301. {
  302. E1000State *s = opaque;
  303. s->mit_timer_on = 0;
  304. /* Call set_interrupt_cause to update the irq level (if necessary). */
  305. set_interrupt_cause(s, 0, s->mac_reg[ICR]);
  306. }
  307. static void
  308. set_ics(E1000State *s, int index, uint32_t val)
  309. {
  310. DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
  311. s->mac_reg[IMS]);
  312. set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
  313. }
  314. static void
  315. e1000_autoneg_timer(void *opaque)
  316. {
  317. E1000State *s = opaque;
  318. if (!qemu_get_queue(s->nic)->link_down) {
  319. e1000_autoneg_done(s);
  320. set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
  321. }
  322. }
  323. static bool e1000_vet_init_need(void *opaque)
  324. {
  325. E1000State *s = opaque;
  326. return chkflag(VET);
  327. }
  328. static void e1000_reset_hold(Object *obj)
  329. {
  330. E1000State *d = E1000(obj);
  331. E1000BaseClass *edc = E1000_GET_CLASS(d);
  332. uint8_t *macaddr = d->conf.macaddr.a;
  333. timer_del(d->autoneg_timer);
  334. timer_del(d->mit_timer);
  335. timer_del(d->flush_queue_timer);
  336. d->mit_timer_on = 0;
  337. d->mit_irq_level = 0;
  338. d->mit_ide = 0;
  339. memset(d->phy_reg, 0, sizeof d->phy_reg);
  340. memcpy(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
  341. d->phy_reg[MII_PHYID2] = edc->phy_id2;
  342. memset(d->mac_reg, 0, sizeof d->mac_reg);
  343. memcpy(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
  344. d->rxbuf_min_shift = 1;
  345. memset(&d->tx, 0, sizeof d->tx);
  346. if (qemu_get_queue(d->nic)->link_down) {
  347. e1000x_update_regs_on_link_down(d->mac_reg, d->phy_reg);
  348. }
  349. e1000x_reset_mac_addr(d->nic, d->mac_reg, macaddr);
  350. if (e1000_vet_init_need(d)) {
  351. d->mac_reg[VET] = ETH_P_VLAN;
  352. }
  353. }
  354. static void
  355. set_ctrl(E1000State *s, int index, uint32_t val)
  356. {
  357. /* RST is self clearing */
  358. s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
  359. }
  360. static void
  361. e1000_flush_queue_timer(void *opaque)
  362. {
  363. E1000State *s = opaque;
  364. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  365. }
  366. static void
  367. set_rx_control(E1000State *s, int index, uint32_t val)
  368. {
  369. s->mac_reg[RCTL] = val;
  370. s->rxbuf_size = e1000x_rxbufsize(val);
  371. s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
  372. DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
  373. s->mac_reg[RCTL]);
  374. timer_mod(s->flush_queue_timer,
  375. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
  376. }
  377. static void
  378. set_mdic(E1000State *s, int index, uint32_t val)
  379. {
  380. uint32_t data = val & E1000_MDIC_DATA_MASK;
  381. uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
  382. if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
  383. val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
  384. else if (val & E1000_MDIC_OP_READ) {
  385. DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
  386. if (!(phy_regcap[addr] & PHY_R)) {
  387. DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
  388. val |= E1000_MDIC_ERROR;
  389. } else
  390. val = (val ^ data) | s->phy_reg[addr];
  391. } else if (val & E1000_MDIC_OP_WRITE) {
  392. DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
  393. if (!(phy_regcap[addr] & PHY_W)) {
  394. DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
  395. val |= E1000_MDIC_ERROR;
  396. } else {
  397. if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
  398. phyreg_writeops[addr](s, index, data);
  399. } else {
  400. s->phy_reg[addr] = data;
  401. }
  402. }
  403. }
  404. s->mac_reg[MDIC] = val | E1000_MDIC_READY;
  405. if (val & E1000_MDIC_INT_EN) {
  406. set_ics(s, 0, E1000_ICR_MDAC);
  407. }
  408. }
  409. static uint32_t
  410. get_eecd(E1000State *s, int index)
  411. {
  412. uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
  413. DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
  414. s->eecd_state.bitnum_out, s->eecd_state.reading);
  415. if (!s->eecd_state.reading ||
  416. ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
  417. ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
  418. ret |= E1000_EECD_DO;
  419. return ret;
  420. }
  421. static void
  422. set_eecd(E1000State *s, int index, uint32_t val)
  423. {
  424. uint32_t oldval = s->eecd_state.old_eecd;
  425. s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
  426. E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
  427. if (!(E1000_EECD_CS & val)) { /* CS inactive; nothing to do */
  428. return;
  429. }
  430. if (E1000_EECD_CS & (val ^ oldval)) { /* CS rise edge; reset state */
  431. s->eecd_state.val_in = 0;
  432. s->eecd_state.bitnum_in = 0;
  433. s->eecd_state.bitnum_out = 0;
  434. s->eecd_state.reading = 0;
  435. }
  436. if (!(E1000_EECD_SK & (val ^ oldval))) { /* no clock edge */
  437. return;
  438. }
  439. if (!(E1000_EECD_SK & val)) { /* falling edge */
  440. s->eecd_state.bitnum_out++;
  441. return;
  442. }
  443. s->eecd_state.val_in <<= 1;
  444. if (val & E1000_EECD_DI)
  445. s->eecd_state.val_in |= 1;
  446. if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
  447. s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
  448. s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
  449. EEPROM_READ_OPCODE_MICROWIRE);
  450. }
  451. DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
  452. s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
  453. s->eecd_state.reading);
  454. }
  455. static uint32_t
  456. flash_eerd_read(E1000State *s, int x)
  457. {
  458. unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
  459. if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
  460. return (s->mac_reg[EERD]);
  461. if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
  462. return (E1000_EEPROM_RW_REG_DONE | r);
  463. return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
  464. E1000_EEPROM_RW_REG_DONE | r);
  465. }
  466. static void
  467. putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
  468. {
  469. uint32_t sum;
  470. if (cse && cse < n)
  471. n = cse + 1;
  472. if (sloc < n-1) {
  473. sum = net_checksum_add(n-css, data+css);
  474. stw_be_p(data + sloc, net_checksum_finish_nozero(sum));
  475. }
  476. }
  477. static inline void
  478. inc_tx_bcast_or_mcast_count(E1000State *s, const unsigned char *arr)
  479. {
  480. if (is_broadcast_ether_addr(arr)) {
  481. e1000x_inc_reg_if_not_full(s->mac_reg, BPTC);
  482. } else if (is_multicast_ether_addr(arr)) {
  483. e1000x_inc_reg_if_not_full(s->mac_reg, MPTC);
  484. }
  485. }
  486. static void
  487. e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
  488. {
  489. static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
  490. PTC1023, PTC1522 };
  491. NetClientState *nc = qemu_get_queue(s->nic);
  492. if (s->phy_reg[MII_BMCR] & MII_BMCR_LOOPBACK) {
  493. qemu_receive_packet(nc, buf, size);
  494. } else {
  495. qemu_send_packet(nc, buf, size);
  496. }
  497. inc_tx_bcast_or_mcast_count(s, buf);
  498. e1000x_increase_size_stats(s->mac_reg, PTCregs, size + 4);
  499. }
  500. static void
  501. xmit_seg(E1000State *s)
  502. {
  503. uint16_t len;
  504. unsigned int frames = s->tx.tso_frames, css, sofar;
  505. struct e1000_tx *tp = &s->tx;
  506. struct e1000x_txd_props *props = tp->cptse ? &tp->tso_props : &tp->props;
  507. if (tp->cptse) {
  508. css = props->ipcss;
  509. DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
  510. frames, tp->size, css);
  511. if (props->ip) { /* IPv4 */
  512. stw_be_p(tp->data+css+2, tp->size - css);
  513. stw_be_p(tp->data+css+4,
  514. lduw_be_p(tp->data + css + 4) + frames);
  515. } else { /* IPv6 */
  516. stw_be_p(tp->data+css+4, tp->size - css);
  517. }
  518. css = props->tucss;
  519. len = tp->size - css;
  520. DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", props->tcp, css, len);
  521. if (props->tcp) {
  522. sofar = frames * props->mss;
  523. stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
  524. if (props->paylen - sofar > props->mss) {
  525. tp->data[css + 13] &= ~9; /* PSH, FIN */
  526. } else if (frames) {
  527. e1000x_inc_reg_if_not_full(s->mac_reg, TSCTC);
  528. }
  529. } else { /* UDP */
  530. stw_be_p(tp->data+css+4, len);
  531. }
  532. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  533. unsigned int phsum;
  534. // add pseudo-header length before checksum calculation
  535. void *sp = tp->data + props->tucso;
  536. phsum = lduw_be_p(sp) + len;
  537. phsum = (phsum >> 16) + (phsum & 0xffff);
  538. stw_be_p(sp, phsum);
  539. }
  540. tp->tso_frames++;
  541. }
  542. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  543. putsum(tp->data, tp->size, props->tucso, props->tucss, props->tucse);
  544. }
  545. if (tp->sum_needed & E1000_TXD_POPTS_IXSM) {
  546. putsum(tp->data, tp->size, props->ipcso, props->ipcss, props->ipcse);
  547. }
  548. if (tp->vlan_needed) {
  549. memmove(tp->vlan, tp->data, 4);
  550. memmove(tp->data, tp->data + 4, 8);
  551. memcpy(tp->data + 8, tp->vlan_header, 4);
  552. e1000_send_packet(s, tp->vlan, tp->size + 4);
  553. } else {
  554. e1000_send_packet(s, tp->data, tp->size);
  555. }
  556. e1000x_inc_reg_if_not_full(s->mac_reg, TPT);
  557. e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size + 4);
  558. e1000x_inc_reg_if_not_full(s->mac_reg, GPTC);
  559. e1000x_grow_8reg_if_not_full(s->mac_reg, GOTCL, s->tx.size + 4);
  560. }
  561. static void
  562. process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
  563. {
  564. PCIDevice *d = PCI_DEVICE(s);
  565. uint32_t txd_lower = le32_to_cpu(dp->lower.data);
  566. uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
  567. unsigned int split_size = txd_lower & 0xffff, bytes, sz;
  568. unsigned int msh = 0xfffff;
  569. uint64_t addr;
  570. struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
  571. struct e1000_tx *tp = &s->tx;
  572. s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
  573. if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
  574. if (le32_to_cpu(xp->cmd_and_length) & E1000_TXD_CMD_TSE) {
  575. e1000x_read_tx_ctx_descr(xp, &tp->tso_props);
  576. s->use_tso_for_migration = 1;
  577. tp->tso_frames = 0;
  578. } else {
  579. e1000x_read_tx_ctx_descr(xp, &tp->props);
  580. s->use_tso_for_migration = 0;
  581. }
  582. return;
  583. } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
  584. // data descriptor
  585. if (tp->size == 0) {
  586. tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
  587. }
  588. tp->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
  589. } else {
  590. // legacy descriptor
  591. tp->cptse = 0;
  592. }
  593. if (e1000x_vlan_enabled(s->mac_reg) &&
  594. e1000x_is_vlan_txd(txd_lower) &&
  595. (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
  596. tp->vlan_needed = 1;
  597. stw_be_p(tp->vlan_header,
  598. le16_to_cpu(s->mac_reg[VET]));
  599. stw_be_p(tp->vlan_header + 2,
  600. le16_to_cpu(dp->upper.fields.special));
  601. }
  602. addr = le64_to_cpu(dp->buffer_addr);
  603. if (tp->cptse) {
  604. msh = tp->tso_props.hdr_len + tp->tso_props.mss;
  605. do {
  606. bytes = split_size;
  607. if (tp->size >= msh) {
  608. goto eop;
  609. }
  610. if (tp->size + bytes > msh)
  611. bytes = msh - tp->size;
  612. bytes = MIN(sizeof(tp->data) - tp->size, bytes);
  613. pci_dma_read(d, addr, tp->data + tp->size, bytes);
  614. sz = tp->size + bytes;
  615. if (sz >= tp->tso_props.hdr_len
  616. && tp->size < tp->tso_props.hdr_len) {
  617. memmove(tp->header, tp->data, tp->tso_props.hdr_len);
  618. }
  619. tp->size = sz;
  620. addr += bytes;
  621. if (sz == msh) {
  622. xmit_seg(s);
  623. memmove(tp->data, tp->header, tp->tso_props.hdr_len);
  624. tp->size = tp->tso_props.hdr_len;
  625. }
  626. split_size -= bytes;
  627. } while (bytes && split_size);
  628. } else {
  629. split_size = MIN(sizeof(tp->data) - tp->size, split_size);
  630. pci_dma_read(d, addr, tp->data + tp->size, split_size);
  631. tp->size += split_size;
  632. }
  633. eop:
  634. if (!(txd_lower & E1000_TXD_CMD_EOP))
  635. return;
  636. if (!(tp->cptse && tp->size < tp->tso_props.hdr_len)) {
  637. xmit_seg(s);
  638. }
  639. tp->tso_frames = 0;
  640. tp->sum_needed = 0;
  641. tp->vlan_needed = 0;
  642. tp->size = 0;
  643. tp->cptse = 0;
  644. }
  645. static uint32_t
  646. txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
  647. {
  648. PCIDevice *d = PCI_DEVICE(s);
  649. uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
  650. if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
  651. return 0;
  652. txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
  653. ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
  654. dp->upper.data = cpu_to_le32(txd_upper);
  655. pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
  656. &dp->upper, sizeof(dp->upper));
  657. return E1000_ICR_TXDW;
  658. }
  659. static uint64_t tx_desc_base(E1000State *s)
  660. {
  661. uint64_t bah = s->mac_reg[TDBAH];
  662. uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
  663. return (bah << 32) + bal;
  664. }
  665. static void
  666. start_xmit(E1000State *s)
  667. {
  668. PCIDevice *d = PCI_DEVICE(s);
  669. dma_addr_t base;
  670. struct e1000_tx_desc desc;
  671. uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
  672. if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
  673. DBGOUT(TX, "tx disabled\n");
  674. return;
  675. }
  676. if (s->tx.busy) {
  677. return;
  678. }
  679. s->tx.busy = true;
  680. while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
  681. base = tx_desc_base(s) +
  682. sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
  683. pci_dma_read(d, base, &desc, sizeof(desc));
  684. DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
  685. (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
  686. desc.upper.data);
  687. process_tx_desc(s, &desc);
  688. cause |= txdesc_writeback(s, base, &desc);
  689. if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
  690. s->mac_reg[TDH] = 0;
  691. /*
  692. * the following could happen only if guest sw assigns
  693. * bogus values to TDT/TDLEN.
  694. * there's nothing too intelligent we could do about this.
  695. */
  696. if (s->mac_reg[TDH] == tdh_start ||
  697. tdh_start >= s->mac_reg[TDLEN] / sizeof(desc)) {
  698. DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
  699. tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
  700. break;
  701. }
  702. }
  703. s->tx.busy = false;
  704. set_ics(s, 0, cause);
  705. }
  706. static int
  707. receive_filter(E1000State *s, const uint8_t *buf, int size)
  708. {
  709. uint32_t rctl = s->mac_reg[RCTL];
  710. int isbcast = is_broadcast_ether_addr(buf);
  711. int ismcast = is_multicast_ether_addr(buf);
  712. if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) &&
  713. e1000x_vlan_rx_filter_enabled(s->mac_reg)) {
  714. uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(buf)->h_tci);
  715. uint32_t vfta =
  716. ldl_le_p((uint32_t *)(s->mac_reg + VFTA) +
  717. ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK));
  718. if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) {
  719. return 0;
  720. }
  721. }
  722. if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */
  723. return 1;
  724. }
  725. if (ismcast && (rctl & E1000_RCTL_MPE)) { /* promiscuous mcast */
  726. return 1;
  727. }
  728. if (isbcast && (rctl & E1000_RCTL_BAM)) { /* broadcast enabled */
  729. return 1;
  730. }
  731. return e1000x_rx_group_filter(s->mac_reg, buf);
  732. }
  733. static void
  734. e1000_set_link_status(NetClientState *nc)
  735. {
  736. E1000State *s = qemu_get_nic_opaque(nc);
  737. uint32_t old_status = s->mac_reg[STATUS];
  738. if (nc->link_down) {
  739. e1000x_update_regs_on_link_down(s->mac_reg, s->phy_reg);
  740. } else {
  741. if (have_autoneg(s) &&
  742. !(s->phy_reg[MII_BMSR] & MII_BMSR_AN_COMP)) {
  743. e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
  744. } else {
  745. e1000_link_up(s);
  746. }
  747. }
  748. if (s->mac_reg[STATUS] != old_status)
  749. set_ics(s, 0, E1000_ICR_LSC);
  750. }
  751. static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
  752. {
  753. int bufs;
  754. /* Fast-path short packets */
  755. if (total_size <= s->rxbuf_size) {
  756. return s->mac_reg[RDH] != s->mac_reg[RDT];
  757. }
  758. if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
  759. bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
  760. } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
  761. bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
  762. s->mac_reg[RDT] - s->mac_reg[RDH];
  763. } else {
  764. return false;
  765. }
  766. return total_size <= bufs * s->rxbuf_size;
  767. }
  768. static bool
  769. e1000_can_receive(NetClientState *nc)
  770. {
  771. E1000State *s = qemu_get_nic_opaque(nc);
  772. return e1000x_rx_ready(&s->parent_obj, s->mac_reg) &&
  773. e1000_has_rxbufs(s, 1) && !timer_pending(s->flush_queue_timer);
  774. }
  775. static uint64_t rx_desc_base(E1000State *s)
  776. {
  777. uint64_t bah = s->mac_reg[RDBAH];
  778. uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
  779. return (bah << 32) + bal;
  780. }
  781. static void
  782. e1000_receiver_overrun(E1000State *s, size_t size)
  783. {
  784. trace_e1000_receiver_overrun(size, s->mac_reg[RDH], s->mac_reg[RDT]);
  785. e1000x_inc_reg_if_not_full(s->mac_reg, RNBC);
  786. e1000x_inc_reg_if_not_full(s->mac_reg, MPC);
  787. set_ics(s, 0, E1000_ICS_RXO);
  788. }
  789. static ssize_t
  790. e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
  791. {
  792. E1000State *s = qemu_get_nic_opaque(nc);
  793. PCIDevice *d = PCI_DEVICE(s);
  794. struct e1000_rx_desc desc;
  795. dma_addr_t base;
  796. unsigned int n, rdt;
  797. uint32_t rdh_start;
  798. uint16_t vlan_special = 0;
  799. uint8_t vlan_status = 0;
  800. uint8_t min_buf[ETH_ZLEN];
  801. struct iovec min_iov;
  802. uint8_t *filter_buf = iov->iov_base;
  803. size_t size = iov_size(iov, iovcnt);
  804. size_t iov_ofs = 0;
  805. size_t desc_offset;
  806. size_t desc_size;
  807. size_t total_size;
  808. eth_pkt_types_e pkt_type;
  809. if (!e1000x_hw_rx_enabled(s->mac_reg)) {
  810. return -1;
  811. }
  812. if (timer_pending(s->flush_queue_timer)) {
  813. return 0;
  814. }
  815. /* Pad to minimum Ethernet frame length */
  816. if (size < sizeof(min_buf)) {
  817. iov_to_buf(iov, iovcnt, 0, min_buf, size);
  818. memset(&min_buf[size], 0, sizeof(min_buf) - size);
  819. min_iov.iov_base = filter_buf = min_buf;
  820. min_iov.iov_len = size = sizeof(min_buf);
  821. iovcnt = 1;
  822. iov = &min_iov;
  823. } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
  824. /* This is very unlikely, but may happen. */
  825. iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
  826. filter_buf = min_buf;
  827. }
  828. /* Discard oversized packets if !LPE and !SBP. */
  829. if (e1000x_is_oversized(s->mac_reg, size)) {
  830. return size;
  831. }
  832. if (!receive_filter(s, filter_buf, size)) {
  833. return size;
  834. }
  835. if (e1000x_vlan_enabled(s->mac_reg) &&
  836. e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) {
  837. vlan_special = cpu_to_le16(lduw_be_p(filter_buf + 14));
  838. iov_ofs = 4;
  839. if (filter_buf == iov->iov_base) {
  840. memmove(filter_buf + 4, filter_buf, 12);
  841. } else {
  842. iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
  843. while (iov->iov_len <= iov_ofs) {
  844. iov_ofs -= iov->iov_len;
  845. iov++;
  846. }
  847. }
  848. vlan_status = E1000_RXD_STAT_VP;
  849. size -= 4;
  850. }
  851. pkt_type = get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf));
  852. rdh_start = s->mac_reg[RDH];
  853. desc_offset = 0;
  854. total_size = size + e1000x_fcs_len(s->mac_reg);
  855. if (!e1000_has_rxbufs(s, total_size)) {
  856. e1000_receiver_overrun(s, total_size);
  857. return -1;
  858. }
  859. do {
  860. desc_size = total_size - desc_offset;
  861. if (desc_size > s->rxbuf_size) {
  862. desc_size = s->rxbuf_size;
  863. }
  864. base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
  865. pci_dma_read(d, base, &desc, sizeof(desc));
  866. desc.special = vlan_special;
  867. desc.status &= ~E1000_RXD_STAT_DD;
  868. if (desc.buffer_addr) {
  869. if (desc_offset < size) {
  870. size_t iov_copy;
  871. hwaddr ba = le64_to_cpu(desc.buffer_addr);
  872. size_t copy_size = size - desc_offset;
  873. if (copy_size > s->rxbuf_size) {
  874. copy_size = s->rxbuf_size;
  875. }
  876. do {
  877. iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
  878. pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
  879. copy_size -= iov_copy;
  880. ba += iov_copy;
  881. iov_ofs += iov_copy;
  882. if (iov_ofs == iov->iov_len) {
  883. iov++;
  884. iov_ofs = 0;
  885. }
  886. } while (copy_size);
  887. }
  888. desc_offset += desc_size;
  889. desc.length = cpu_to_le16(desc_size);
  890. if (desc_offset >= total_size) {
  891. desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
  892. } else {
  893. /* Guest zeroing out status is not a hardware requirement.
  894. Clear EOP in case guest didn't do it. */
  895. desc.status &= ~E1000_RXD_STAT_EOP;
  896. }
  897. } else { // as per intel docs; skip descriptors with null buf addr
  898. DBGOUT(RX, "Null RX descriptor!!\n");
  899. }
  900. pci_dma_write(d, base, &desc, sizeof(desc));
  901. desc.status |= (vlan_status | E1000_RXD_STAT_DD);
  902. pci_dma_write(d, base + offsetof(struct e1000_rx_desc, status),
  903. &desc.status, sizeof(desc.status));
  904. if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
  905. s->mac_reg[RDH] = 0;
  906. /* see comment in start_xmit; same here */
  907. if (s->mac_reg[RDH] == rdh_start ||
  908. rdh_start >= s->mac_reg[RDLEN] / sizeof(desc)) {
  909. DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
  910. rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
  911. e1000_receiver_overrun(s, total_size);
  912. return -1;
  913. }
  914. } while (desc_offset < total_size);
  915. e1000x_update_rx_total_stats(s->mac_reg, pkt_type, size, total_size);
  916. n = E1000_ICS_RXT0;
  917. if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
  918. rdt += s->mac_reg[RDLEN] / sizeof(desc);
  919. if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
  920. s->rxbuf_min_shift)
  921. n |= E1000_ICS_RXDMT0;
  922. set_ics(s, 0, n);
  923. return size;
  924. }
  925. static ssize_t
  926. e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  927. {
  928. const struct iovec iov = {
  929. .iov_base = (uint8_t *)buf,
  930. .iov_len = size
  931. };
  932. return e1000_receive_iov(nc, &iov, 1);
  933. }
  934. static uint32_t
  935. mac_readreg(E1000State *s, int index)
  936. {
  937. return s->mac_reg[index];
  938. }
  939. static uint32_t
  940. mac_icr_read(E1000State *s, int index)
  941. {
  942. uint32_t ret = s->mac_reg[ICR];
  943. DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
  944. set_interrupt_cause(s, 0, 0);
  945. return ret;
  946. }
  947. static uint32_t
  948. mac_read_clr4(E1000State *s, int index)
  949. {
  950. uint32_t ret = s->mac_reg[index];
  951. s->mac_reg[index] = 0;
  952. return ret;
  953. }
  954. static uint32_t
  955. mac_read_clr8(E1000State *s, int index)
  956. {
  957. uint32_t ret = s->mac_reg[index];
  958. s->mac_reg[index] = 0;
  959. s->mac_reg[index-1] = 0;
  960. return ret;
  961. }
  962. static void
  963. mac_writereg(E1000State *s, int index, uint32_t val)
  964. {
  965. uint32_t macaddr[2];
  966. s->mac_reg[index] = val;
  967. if (index == RA + 1) {
  968. macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
  969. macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
  970. qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
  971. }
  972. }
  973. static void
  974. set_rdt(E1000State *s, int index, uint32_t val)
  975. {
  976. s->mac_reg[index] = val & 0xffff;
  977. if (e1000_has_rxbufs(s, 1)) {
  978. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  979. }
  980. }
  981. #define LOW_BITS_SET_FUNC(num) \
  982. static void \
  983. set_##num##bit(E1000State *s, int index, uint32_t val) \
  984. { \
  985. s->mac_reg[index] = val & (BIT(num) - 1); \
  986. }
  987. LOW_BITS_SET_FUNC(4)
  988. LOW_BITS_SET_FUNC(11)
  989. LOW_BITS_SET_FUNC(13)
  990. LOW_BITS_SET_FUNC(16)
  991. static void
  992. set_dlen(E1000State *s, int index, uint32_t val)
  993. {
  994. s->mac_reg[index] = val & 0xfff80;
  995. }
  996. static void
  997. set_tctl(E1000State *s, int index, uint32_t val)
  998. {
  999. s->mac_reg[index] = val;
  1000. s->mac_reg[TDT] &= 0xffff;
  1001. start_xmit(s);
  1002. }
  1003. static void
  1004. set_icr(E1000State *s, int index, uint32_t val)
  1005. {
  1006. DBGOUT(INTERRUPT, "set_icr %x\n", val);
  1007. set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
  1008. }
  1009. static void
  1010. set_imc(E1000State *s, int index, uint32_t val)
  1011. {
  1012. s->mac_reg[IMS] &= ~val;
  1013. set_ics(s, 0, 0);
  1014. }
  1015. static void
  1016. set_ims(E1000State *s, int index, uint32_t val)
  1017. {
  1018. s->mac_reg[IMS] |= val;
  1019. set_ics(s, 0, 0);
  1020. }
  1021. #define getreg(x) [x] = mac_readreg
  1022. typedef uint32_t (*readops)(E1000State *, int);
  1023. static const readops macreg_readops[] = {
  1024. getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
  1025. getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
  1026. getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
  1027. getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
  1028. getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
  1029. getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
  1030. getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
  1031. getreg(TADV), getreg(ITR), getreg(FCRUC), getreg(IPAV),
  1032. getreg(WUC), getreg(WUS), getreg(SCC), getreg(ECOL),
  1033. getreg(MCC), getreg(LATECOL), getreg(COLC), getreg(DC),
  1034. getreg(TNCRS), getreg(SEQEC), getreg(CEXTERR), getreg(RLEC),
  1035. getreg(XONRXC), getreg(XONTXC), getreg(XOFFRXC), getreg(XOFFTXC),
  1036. getreg(RFC), getreg(RJC), getreg(RNBC), getreg(TSCTFC),
  1037. getreg(MGTPRC), getreg(MGTPDC), getreg(MGTPTC), getreg(GORCL),
  1038. getreg(GOTCL), getreg(RDFH), getreg(RDFT), getreg(RDFHS),
  1039. getreg(RDFTS), getreg(RDFPC), getreg(TDFH), getreg(TDFT),
  1040. getreg(TDFHS), getreg(TDFTS), getreg(TDFPC), getreg(AIT),
  1041. [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8,
  1042. [GOTCH] = mac_read_clr8, [GORCH] = mac_read_clr8,
  1043. [PRC64] = mac_read_clr4, [PRC127] = mac_read_clr4,
  1044. [PRC255] = mac_read_clr4, [PRC511] = mac_read_clr4,
  1045. [PRC1023] = mac_read_clr4, [PRC1522] = mac_read_clr4,
  1046. [PTC64] = mac_read_clr4, [PTC127] = mac_read_clr4,
  1047. [PTC255] = mac_read_clr4, [PTC511] = mac_read_clr4,
  1048. [PTC1023] = mac_read_clr4, [PTC1522] = mac_read_clr4,
  1049. [GPRC] = mac_read_clr4, [GPTC] = mac_read_clr4,
  1050. [TPT] = mac_read_clr4, [TPR] = mac_read_clr4,
  1051. [RUC] = mac_read_clr4, [ROC] = mac_read_clr4,
  1052. [BPRC] = mac_read_clr4, [MPRC] = mac_read_clr4,
  1053. [TSCTC] = mac_read_clr4, [BPTC] = mac_read_clr4,
  1054. [MPTC] = mac_read_clr4,
  1055. [ICR] = mac_icr_read, [EECD] = get_eecd,
  1056. [EERD] = flash_eerd_read,
  1057. [CRCERRS ... MPC] = &mac_readreg,
  1058. [IP6AT ... IP6AT + 3] = &mac_readreg, [IP4AT ... IP4AT + 6] = &mac_readreg,
  1059. [FFLT ... FFLT + 6] = &mac_readreg,
  1060. [RA ... RA + 31] = &mac_readreg,
  1061. [WUPM ... WUPM + 31] = &mac_readreg,
  1062. [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = &mac_readreg,
  1063. [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = &mac_readreg,
  1064. [FFMT ... FFMT + 254] = &mac_readreg,
  1065. [FFVT ... FFVT + 254] = &mac_readreg,
  1066. [PBM ... PBM + 16383] = &mac_readreg,
  1067. };
  1068. enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
  1069. #define putreg(x) [x] = mac_writereg
  1070. typedef void (*writeops)(E1000State *, int, uint32_t);
  1071. static const writeops macreg_writeops[] = {
  1072. putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
  1073. putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
  1074. putreg(RDBAL), putreg(LEDCTL), putreg(VET), putreg(FCRUC),
  1075. putreg(IPAV), putreg(WUC),
  1076. putreg(WUS),
  1077. [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
  1078. [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
  1079. [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
  1080. [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
  1081. [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
  1082. [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
  1083. [ITR] = set_16bit, [TDFH] = set_11bit, [TDFT] = set_11bit,
  1084. [TDFHS] = set_13bit, [TDFTS] = set_13bit, [TDFPC] = set_13bit,
  1085. [RDFH] = set_13bit, [RDFT] = set_13bit, [RDFHS] = set_13bit,
  1086. [RDFTS] = set_13bit, [RDFPC] = set_13bit, [AIT] = set_16bit,
  1087. [IP6AT ... IP6AT + 3] = &mac_writereg, [IP4AT ... IP4AT + 6] = &mac_writereg,
  1088. [FFLT ... FFLT + 6] = &set_11bit,
  1089. [RA ... RA + 31] = &mac_writereg,
  1090. [WUPM ... WUPM + 31] = &mac_writereg,
  1091. [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = &mac_writereg,
  1092. [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = &mac_writereg,
  1093. [FFMT ... FFMT + 254] = &set_4bit, [FFVT ... FFVT + 254] = &mac_writereg,
  1094. [PBM ... PBM + 16383] = &mac_writereg,
  1095. };
  1096. enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
  1097. enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 };
  1098. #define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED)
  1099. /* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p]
  1100. * f - flag bits (up to 6 possible flags)
  1101. * n - flag needed
  1102. * p - partially implenented */
  1103. static const uint8_t mac_reg_access[0x8000] = {
  1104. [RDTR] = markflag(MIT), [TADV] = markflag(MIT),
  1105. [RADV] = markflag(MIT), [ITR] = markflag(MIT),
  1106. [IPAV] = markflag(MAC), [WUC] = markflag(MAC),
  1107. [IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC),
  1108. [FFVT] = markflag(MAC), [WUPM] = markflag(MAC),
  1109. [ECOL] = markflag(MAC), [MCC] = markflag(MAC),
  1110. [DC] = markflag(MAC), [TNCRS] = markflag(MAC),
  1111. [RLEC] = markflag(MAC), [XONRXC] = markflag(MAC),
  1112. [XOFFTXC] = markflag(MAC), [RFC] = markflag(MAC),
  1113. [TSCTFC] = markflag(MAC), [MGTPRC] = markflag(MAC),
  1114. [WUS] = markflag(MAC), [AIT] = markflag(MAC),
  1115. [FFLT] = markflag(MAC), [FFMT] = markflag(MAC),
  1116. [SCC] = markflag(MAC), [FCRUC] = markflag(MAC),
  1117. [LATECOL] = markflag(MAC), [COLC] = markflag(MAC),
  1118. [SEQEC] = markflag(MAC), [CEXTERR] = markflag(MAC),
  1119. [XONTXC] = markflag(MAC), [XOFFRXC] = markflag(MAC),
  1120. [RJC] = markflag(MAC), [RNBC] = markflag(MAC),
  1121. [MGTPDC] = markflag(MAC), [MGTPTC] = markflag(MAC),
  1122. [RUC] = markflag(MAC), [ROC] = markflag(MAC),
  1123. [GORCL] = markflag(MAC), [GORCH] = markflag(MAC),
  1124. [GOTCL] = markflag(MAC), [GOTCH] = markflag(MAC),
  1125. [BPRC] = markflag(MAC), [MPRC] = markflag(MAC),
  1126. [TSCTC] = markflag(MAC), [PRC64] = markflag(MAC),
  1127. [PRC127] = markflag(MAC), [PRC255] = markflag(MAC),
  1128. [PRC511] = markflag(MAC), [PRC1023] = markflag(MAC),
  1129. [PRC1522] = markflag(MAC), [PTC64] = markflag(MAC),
  1130. [PTC127] = markflag(MAC), [PTC255] = markflag(MAC),
  1131. [PTC511] = markflag(MAC), [PTC1023] = markflag(MAC),
  1132. [PTC1522] = markflag(MAC), [MPTC] = markflag(MAC),
  1133. [BPTC] = markflag(MAC),
  1134. [TDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1135. [TDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1136. [TDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1137. [TDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1138. [TDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1139. [RDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1140. [RDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1141. [RDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1142. [RDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1143. [RDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1144. [PBM] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1145. };
  1146. static void
  1147. e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
  1148. unsigned size)
  1149. {
  1150. E1000State *s = opaque;
  1151. unsigned int index = (addr & 0x1ffff) >> 2;
  1152. if (index < NWRITEOPS && macreg_writeops[index]) {
  1153. if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
  1154. || (s->compat_flags & (mac_reg_access[index] >> 2))) {
  1155. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  1156. DBGOUT(GENERAL, "Writing to register at offset: 0x%08x. "
  1157. "It is not fully implemented.\n", index<<2);
  1158. }
  1159. macreg_writeops[index](s, index, val);
  1160. } else { /* "flag needed" bit is set, but the flag is not active */
  1161. DBGOUT(MMIO, "MMIO write attempt to disabled reg. addr=0x%08x\n",
  1162. index<<2);
  1163. }
  1164. } else if (index < NREADOPS && macreg_readops[index]) {
  1165. DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n",
  1166. index<<2, val);
  1167. } else {
  1168. DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
  1169. index<<2, val);
  1170. }
  1171. }
  1172. static uint64_t
  1173. e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
  1174. {
  1175. E1000State *s = opaque;
  1176. unsigned int index = (addr & 0x1ffff) >> 2;
  1177. if (index < NREADOPS && macreg_readops[index]) {
  1178. if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
  1179. || (s->compat_flags & (mac_reg_access[index] >> 2))) {
  1180. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  1181. DBGOUT(GENERAL, "Reading register at offset: 0x%08x. "
  1182. "It is not fully implemented.\n", index<<2);
  1183. }
  1184. return macreg_readops[index](s, index);
  1185. } else { /* "flag needed" bit is set, but the flag is not active */
  1186. DBGOUT(MMIO, "MMIO read attempt of disabled reg. addr=0x%08x\n",
  1187. index<<2);
  1188. }
  1189. } else {
  1190. DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
  1191. }
  1192. return 0;
  1193. }
  1194. static const MemoryRegionOps e1000_mmio_ops = {
  1195. .read = e1000_mmio_read,
  1196. .write = e1000_mmio_write,
  1197. .endianness = DEVICE_LITTLE_ENDIAN,
  1198. .impl = {
  1199. .min_access_size = 4,
  1200. .max_access_size = 4,
  1201. },
  1202. };
  1203. static uint64_t e1000_io_read(void *opaque, hwaddr addr,
  1204. unsigned size)
  1205. {
  1206. E1000State *s = opaque;
  1207. (void)s;
  1208. return 0;
  1209. }
  1210. static void e1000_io_write(void *opaque, hwaddr addr,
  1211. uint64_t val, unsigned size)
  1212. {
  1213. E1000State *s = opaque;
  1214. (void)s;
  1215. }
  1216. static const MemoryRegionOps e1000_io_ops = {
  1217. .read = e1000_io_read,
  1218. .write = e1000_io_write,
  1219. .endianness = DEVICE_LITTLE_ENDIAN,
  1220. };
  1221. static bool is_version_1(void *opaque, int version_id)
  1222. {
  1223. return version_id == 1;
  1224. }
  1225. static int e1000_pre_save(void *opaque)
  1226. {
  1227. E1000State *s = opaque;
  1228. NetClientState *nc = qemu_get_queue(s->nic);
  1229. /*
  1230. * If link is down and auto-negotiation is supported and ongoing,
  1231. * complete auto-negotiation immediately. This allows us to look
  1232. * at MII_BMSR_AN_COMP to infer link status on load.
  1233. */
  1234. if (nc->link_down && have_autoneg(s)) {
  1235. s->phy_reg[MII_BMSR] |= MII_BMSR_AN_COMP;
  1236. }
  1237. /* Decide which set of props to migrate in the main structure */
  1238. if (chkflag(TSO) || !s->use_tso_for_migration) {
  1239. /* Either we're migrating with the extra subsection, in which
  1240. * case the mig_props is always 'props' OR
  1241. * we've not got the subsection, but 'props' was the last
  1242. * updated.
  1243. */
  1244. s->mig_props = s->tx.props;
  1245. } else {
  1246. /* We're not using the subsection, and 'tso_props' was
  1247. * the last updated.
  1248. */
  1249. s->mig_props = s->tx.tso_props;
  1250. }
  1251. return 0;
  1252. }
  1253. static int e1000_post_load(void *opaque, int version_id)
  1254. {
  1255. E1000State *s = opaque;
  1256. NetClientState *nc = qemu_get_queue(s->nic);
  1257. if (!chkflag(MIT)) {
  1258. s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
  1259. s->mac_reg[TADV] = 0;
  1260. s->mit_irq_level = false;
  1261. }
  1262. s->mit_ide = 0;
  1263. s->mit_timer_on = true;
  1264. timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1);
  1265. /* nc.link_down can't be migrated, so infer link_down according
  1266. * to link status bit in mac_reg[STATUS].
  1267. * Alternatively, restart link negotiation if it was in progress. */
  1268. nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
  1269. if (have_autoneg(s) && !(s->phy_reg[MII_BMSR] & MII_BMSR_AN_COMP)) {
  1270. nc->link_down = false;
  1271. timer_mod(s->autoneg_timer,
  1272. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
  1273. }
  1274. s->tx.props = s->mig_props;
  1275. if (!s->received_tx_tso) {
  1276. /* We received only one set of offload data (tx.props)
  1277. * and haven't got tx.tso_props. The best we can do
  1278. * is dupe the data.
  1279. */
  1280. s->tx.tso_props = s->mig_props;
  1281. }
  1282. return 0;
  1283. }
  1284. static int e1000_tx_tso_post_load(void *opaque, int version_id)
  1285. {
  1286. E1000State *s = opaque;
  1287. s->received_tx_tso = true;
  1288. return 0;
  1289. }
  1290. static bool e1000_mit_state_needed(void *opaque)
  1291. {
  1292. E1000State *s = opaque;
  1293. return chkflag(MIT);
  1294. }
  1295. static bool e1000_full_mac_needed(void *opaque)
  1296. {
  1297. E1000State *s = opaque;
  1298. return chkflag(MAC);
  1299. }
  1300. static bool e1000_tso_state_needed(void *opaque)
  1301. {
  1302. E1000State *s = opaque;
  1303. return chkflag(TSO);
  1304. }
  1305. static const VMStateDescription vmstate_e1000_mit_state = {
  1306. .name = "e1000/mit_state",
  1307. .version_id = 1,
  1308. .minimum_version_id = 1,
  1309. .needed = e1000_mit_state_needed,
  1310. .fields = (VMStateField[]) {
  1311. VMSTATE_UINT32(mac_reg[RDTR], E1000State),
  1312. VMSTATE_UINT32(mac_reg[RADV], E1000State),
  1313. VMSTATE_UINT32(mac_reg[TADV], E1000State),
  1314. VMSTATE_UINT32(mac_reg[ITR], E1000State),
  1315. VMSTATE_BOOL(mit_irq_level, E1000State),
  1316. VMSTATE_END_OF_LIST()
  1317. }
  1318. };
  1319. static const VMStateDescription vmstate_e1000_full_mac_state = {
  1320. .name = "e1000/full_mac_state",
  1321. .version_id = 1,
  1322. .minimum_version_id = 1,
  1323. .needed = e1000_full_mac_needed,
  1324. .fields = (VMStateField[]) {
  1325. VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000),
  1326. VMSTATE_END_OF_LIST()
  1327. }
  1328. };
  1329. static const VMStateDescription vmstate_e1000_tx_tso_state = {
  1330. .name = "e1000/tx_tso_state",
  1331. .version_id = 1,
  1332. .minimum_version_id = 1,
  1333. .needed = e1000_tso_state_needed,
  1334. .post_load = e1000_tx_tso_post_load,
  1335. .fields = (VMStateField[]) {
  1336. VMSTATE_UINT8(tx.tso_props.ipcss, E1000State),
  1337. VMSTATE_UINT8(tx.tso_props.ipcso, E1000State),
  1338. VMSTATE_UINT16(tx.tso_props.ipcse, E1000State),
  1339. VMSTATE_UINT8(tx.tso_props.tucss, E1000State),
  1340. VMSTATE_UINT8(tx.tso_props.tucso, E1000State),
  1341. VMSTATE_UINT16(tx.tso_props.tucse, E1000State),
  1342. VMSTATE_UINT32(tx.tso_props.paylen, E1000State),
  1343. VMSTATE_UINT8(tx.tso_props.hdr_len, E1000State),
  1344. VMSTATE_UINT16(tx.tso_props.mss, E1000State),
  1345. VMSTATE_INT8(tx.tso_props.ip, E1000State),
  1346. VMSTATE_INT8(tx.tso_props.tcp, E1000State),
  1347. VMSTATE_END_OF_LIST()
  1348. }
  1349. };
  1350. static const VMStateDescription vmstate_e1000 = {
  1351. .name = "e1000",
  1352. .version_id = 2,
  1353. .minimum_version_id = 1,
  1354. .pre_save = e1000_pre_save,
  1355. .post_load = e1000_post_load,
  1356. .fields = (VMStateField[]) {
  1357. VMSTATE_PCI_DEVICE(parent_obj, E1000State),
  1358. VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
  1359. VMSTATE_UNUSED(4), /* Was mmio_base. */
  1360. VMSTATE_UINT32(rxbuf_size, E1000State),
  1361. VMSTATE_UINT32(rxbuf_min_shift, E1000State),
  1362. VMSTATE_UINT32(eecd_state.val_in, E1000State),
  1363. VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
  1364. VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
  1365. VMSTATE_UINT16(eecd_state.reading, E1000State),
  1366. VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
  1367. VMSTATE_UINT8(mig_props.ipcss, E1000State),
  1368. VMSTATE_UINT8(mig_props.ipcso, E1000State),
  1369. VMSTATE_UINT16(mig_props.ipcse, E1000State),
  1370. VMSTATE_UINT8(mig_props.tucss, E1000State),
  1371. VMSTATE_UINT8(mig_props.tucso, E1000State),
  1372. VMSTATE_UINT16(mig_props.tucse, E1000State),
  1373. VMSTATE_UINT32(mig_props.paylen, E1000State),
  1374. VMSTATE_UINT8(mig_props.hdr_len, E1000State),
  1375. VMSTATE_UINT16(mig_props.mss, E1000State),
  1376. VMSTATE_UINT16(tx.size, E1000State),
  1377. VMSTATE_UINT16(tx.tso_frames, E1000State),
  1378. VMSTATE_UINT8(tx.sum_needed, E1000State),
  1379. VMSTATE_INT8(mig_props.ip, E1000State),
  1380. VMSTATE_INT8(mig_props.tcp, E1000State),
  1381. VMSTATE_BUFFER(tx.header, E1000State),
  1382. VMSTATE_BUFFER(tx.data, E1000State),
  1383. VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
  1384. VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
  1385. VMSTATE_UINT32(mac_reg[CTRL], E1000State),
  1386. VMSTATE_UINT32(mac_reg[EECD], E1000State),
  1387. VMSTATE_UINT32(mac_reg[EERD], E1000State),
  1388. VMSTATE_UINT32(mac_reg[GPRC], E1000State),
  1389. VMSTATE_UINT32(mac_reg[GPTC], E1000State),
  1390. VMSTATE_UINT32(mac_reg[ICR], E1000State),
  1391. VMSTATE_UINT32(mac_reg[ICS], E1000State),
  1392. VMSTATE_UINT32(mac_reg[IMC], E1000State),
  1393. VMSTATE_UINT32(mac_reg[IMS], E1000State),
  1394. VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
  1395. VMSTATE_UINT32(mac_reg[MANC], E1000State),
  1396. VMSTATE_UINT32(mac_reg[MDIC], E1000State),
  1397. VMSTATE_UINT32(mac_reg[MPC], E1000State),
  1398. VMSTATE_UINT32(mac_reg[PBA], E1000State),
  1399. VMSTATE_UINT32(mac_reg[RCTL], E1000State),
  1400. VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
  1401. VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
  1402. VMSTATE_UINT32(mac_reg[RDH], E1000State),
  1403. VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
  1404. VMSTATE_UINT32(mac_reg[RDT], E1000State),
  1405. VMSTATE_UINT32(mac_reg[STATUS], E1000State),
  1406. VMSTATE_UINT32(mac_reg[SWSM], E1000State),
  1407. VMSTATE_UINT32(mac_reg[TCTL], E1000State),
  1408. VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
  1409. VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
  1410. VMSTATE_UINT32(mac_reg[TDH], E1000State),
  1411. VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
  1412. VMSTATE_UINT32(mac_reg[TDT], E1000State),
  1413. VMSTATE_UINT32(mac_reg[TORH], E1000State),
  1414. VMSTATE_UINT32(mac_reg[TORL], E1000State),
  1415. VMSTATE_UINT32(mac_reg[TOTH], E1000State),
  1416. VMSTATE_UINT32(mac_reg[TOTL], E1000State),
  1417. VMSTATE_UINT32(mac_reg[TPR], E1000State),
  1418. VMSTATE_UINT32(mac_reg[TPT], E1000State),
  1419. VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
  1420. VMSTATE_UINT32(mac_reg[WUFC], E1000State),
  1421. VMSTATE_UINT32(mac_reg[VET], E1000State),
  1422. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
  1423. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, E1000_MC_TBL_SIZE),
  1424. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA,
  1425. E1000_VLAN_FILTER_TBL_SIZE),
  1426. VMSTATE_END_OF_LIST()
  1427. },
  1428. .subsections = (const VMStateDescription*[]) {
  1429. &vmstate_e1000_mit_state,
  1430. &vmstate_e1000_full_mac_state,
  1431. &vmstate_e1000_tx_tso_state,
  1432. NULL
  1433. }
  1434. };
  1435. /*
  1436. * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
  1437. * Note: A valid DevId will be inserted during pci_e1000_realize().
  1438. */
  1439. static const uint16_t e1000_eeprom_template[64] = {
  1440. 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
  1441. 0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
  1442. 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
  1443. 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
  1444. 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
  1445. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1446. 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1447. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
  1448. };
  1449. /* PCI interface */
  1450. static void
  1451. e1000_mmio_setup(E1000State *d)
  1452. {
  1453. int i;
  1454. const uint32_t excluded_regs[] = {
  1455. E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
  1456. E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
  1457. };
  1458. memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
  1459. "e1000-mmio", PNPMMIO_SIZE);
  1460. memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
  1461. for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
  1462. memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
  1463. excluded_regs[i+1] - excluded_regs[i] - 4);
  1464. memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
  1465. }
  1466. static void
  1467. pci_e1000_uninit(PCIDevice *dev)
  1468. {
  1469. E1000State *d = E1000(dev);
  1470. timer_free(d->autoneg_timer);
  1471. timer_free(d->mit_timer);
  1472. timer_free(d->flush_queue_timer);
  1473. qemu_del_nic(d->nic);
  1474. }
  1475. static NetClientInfo net_e1000_info = {
  1476. .type = NET_CLIENT_DRIVER_NIC,
  1477. .size = sizeof(NICState),
  1478. .can_receive = e1000_can_receive,
  1479. .receive = e1000_receive,
  1480. .receive_iov = e1000_receive_iov,
  1481. .link_status_changed = e1000_set_link_status,
  1482. };
  1483. static void e1000_write_config(PCIDevice *pci_dev, uint32_t address,
  1484. uint32_t val, int len)
  1485. {
  1486. E1000State *s = E1000(pci_dev);
  1487. pci_default_write_config(pci_dev, address, val, len);
  1488. if (range_covers_byte(address, len, PCI_COMMAND) &&
  1489. (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
  1490. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  1491. }
  1492. }
  1493. static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
  1494. {
  1495. DeviceState *dev = DEVICE(pci_dev);
  1496. E1000State *d = E1000(pci_dev);
  1497. uint8_t *pci_conf;
  1498. uint8_t *macaddr;
  1499. pci_dev->config_write = e1000_write_config;
  1500. pci_conf = pci_dev->config;
  1501. /* TODO: RST# value should be 0, PCI spec 6.2.4 */
  1502. pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
  1503. pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
  1504. e1000_mmio_setup(d);
  1505. pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
  1506. pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
  1507. qemu_macaddr_default_if_unset(&d->conf.macaddr);
  1508. macaddr = d->conf.macaddr.a;
  1509. e1000x_core_prepare_eeprom(d->eeprom_data,
  1510. e1000_eeprom_template,
  1511. sizeof(e1000_eeprom_template),
  1512. PCI_DEVICE_GET_CLASS(pci_dev)->device_id,
  1513. macaddr);
  1514. d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
  1515. object_get_typename(OBJECT(d)), dev->id, d);
  1516. qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
  1517. d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
  1518. d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
  1519. d->flush_queue_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  1520. e1000_flush_queue_timer, d);
  1521. }
  1522. static Property e1000_properties[] = {
  1523. DEFINE_NIC_PROPERTIES(E1000State, conf),
  1524. DEFINE_PROP_BIT("autonegotiation", E1000State,
  1525. compat_flags, E1000_FLAG_AUTONEG_BIT, true),
  1526. DEFINE_PROP_BIT("mitigation", E1000State,
  1527. compat_flags, E1000_FLAG_MIT_BIT, true),
  1528. DEFINE_PROP_BIT("extra_mac_registers", E1000State,
  1529. compat_flags, E1000_FLAG_MAC_BIT, true),
  1530. DEFINE_PROP_BIT("migrate_tso_props", E1000State,
  1531. compat_flags, E1000_FLAG_TSO_BIT, true),
  1532. DEFINE_PROP_BIT("init-vet", E1000State,
  1533. compat_flags, E1000_FLAG_VET_BIT, true),
  1534. DEFINE_PROP_END_OF_LIST(),
  1535. };
  1536. typedef struct E1000Info {
  1537. const char *name;
  1538. uint16_t device_id;
  1539. uint8_t revision;
  1540. uint16_t phy_id2;
  1541. } E1000Info;
  1542. static void e1000_class_init(ObjectClass *klass, void *data)
  1543. {
  1544. DeviceClass *dc = DEVICE_CLASS(klass);
  1545. ResettableClass *rc = RESETTABLE_CLASS(klass);
  1546. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1547. E1000BaseClass *e = E1000_CLASS(klass);
  1548. const E1000Info *info = data;
  1549. k->realize = pci_e1000_realize;
  1550. k->exit = pci_e1000_uninit;
  1551. k->romfile = "efi-e1000.rom";
  1552. k->vendor_id = PCI_VENDOR_ID_INTEL;
  1553. k->device_id = info->device_id;
  1554. k->revision = info->revision;
  1555. e->phy_id2 = info->phy_id2;
  1556. k->class_id = PCI_CLASS_NETWORK_ETHERNET;
  1557. rc->phases.hold = e1000_reset_hold;
  1558. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1559. dc->desc = "Intel Gigabit Ethernet";
  1560. dc->vmsd = &vmstate_e1000;
  1561. device_class_set_props(dc, e1000_properties);
  1562. }
  1563. static void e1000_instance_init(Object *obj)
  1564. {
  1565. E1000State *n = E1000(obj);
  1566. device_add_bootindex_property(obj, &n->conf.bootindex,
  1567. "bootindex", "/ethernet-phy@0",
  1568. DEVICE(n));
  1569. }
  1570. static const TypeInfo e1000_base_info = {
  1571. .name = TYPE_E1000_BASE,
  1572. .parent = TYPE_PCI_DEVICE,
  1573. .instance_size = sizeof(E1000State),
  1574. .instance_init = e1000_instance_init,
  1575. .class_size = sizeof(E1000BaseClass),
  1576. .abstract = true,
  1577. .interfaces = (InterfaceInfo[]) {
  1578. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1579. { },
  1580. },
  1581. };
  1582. static const E1000Info e1000_devices[] = {
  1583. {
  1584. .name = "e1000",
  1585. .device_id = E1000_DEV_ID_82540EM,
  1586. .revision = 0x03,
  1587. .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
  1588. },
  1589. {
  1590. .name = "e1000-82544gc",
  1591. .device_id = E1000_DEV_ID_82544GC_COPPER,
  1592. .revision = 0x03,
  1593. .phy_id2 = E1000_PHY_ID2_82544x,
  1594. },
  1595. {
  1596. .name = "e1000-82545em",
  1597. .device_id = E1000_DEV_ID_82545EM_COPPER,
  1598. .revision = 0x03,
  1599. .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
  1600. },
  1601. };
  1602. static void e1000_register_types(void)
  1603. {
  1604. int i;
  1605. type_register_static(&e1000_base_info);
  1606. for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
  1607. const E1000Info *info = &e1000_devices[i];
  1608. TypeInfo type_info = {};
  1609. type_info.name = info->name;
  1610. type_info.parent = TYPE_E1000_BASE;
  1611. type_info.class_data = (void *)info;
  1612. type_info.class_init = e1000_class_init;
  1613. type_register(&type_info);
  1614. }
  1615. }
  1616. type_init(e1000_register_types)