2
0

e1000.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831
  1. /*
  2. * QEMU e1000 emulation
  3. *
  4. * Software developer's manual:
  5. * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
  6. *
  7. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  8. * Copyright (c) 2008 Qumranet
  9. * Based on work done by:
  10. * Copyright (c) 2007 Dan Aloni
  11. * Copyright (c) 2004 Antony T Curtis
  12. *
  13. * This library is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2 of the License, or (at your option) any later version.
  17. *
  18. * This library is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  25. */
  26. #include "qemu/osdep.h"
  27. #include "hw/pci/pci.h"
  28. #include "hw/qdev-properties.h"
  29. #include "migration/vmstate.h"
  30. #include "net/net.h"
  31. #include "net/checksum.h"
  32. #include "sysemu/sysemu.h"
  33. #include "sysemu/dma.h"
  34. #include "qemu/iov.h"
  35. #include "qemu/module.h"
  36. #include "qemu/range.h"
  37. #include "e1000x_common.h"
  38. #include "trace.h"
  39. #include "qom/object.h"
  40. static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  41. /* #define E1000_DEBUG */
  42. #ifdef E1000_DEBUG
  43. enum {
  44. DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
  45. DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
  46. DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
  47. DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
  48. };
  49. #define DBGBIT(x) (1<<DEBUG_##x)
  50. static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
  51. #define DBGOUT(what, fmt, ...) do { \
  52. if (debugflags & DBGBIT(what)) \
  53. fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
  54. } while (0)
  55. #else
  56. #define DBGOUT(what, fmt, ...) do {} while (0)
  57. #endif
  58. #define IOPORT_SIZE 0x40
  59. #define PNPMMIO_SIZE 0x20000
  60. #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
  61. #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
  62. /*
  63. * HW models:
  64. * E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
  65. * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
  66. * E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
  67. * Others never tested
  68. */
  69. struct E1000State_st {
  70. /*< private >*/
  71. PCIDevice parent_obj;
  72. /*< public >*/
  73. NICState *nic;
  74. NICConf conf;
  75. MemoryRegion mmio;
  76. MemoryRegion io;
  77. uint32_t mac_reg[0x8000];
  78. uint16_t phy_reg[0x20];
  79. uint16_t eeprom_data[64];
  80. uint32_t rxbuf_size;
  81. uint32_t rxbuf_min_shift;
  82. struct e1000_tx {
  83. unsigned char header[256];
  84. unsigned char vlan_header[4];
  85. /* Fields vlan and data must not be reordered or separated. */
  86. unsigned char vlan[4];
  87. unsigned char data[0x10000];
  88. uint16_t size;
  89. unsigned char vlan_needed;
  90. unsigned char sum_needed;
  91. bool cptse;
  92. e1000x_txd_props props;
  93. e1000x_txd_props tso_props;
  94. uint16_t tso_frames;
  95. } tx;
  96. struct {
  97. uint32_t val_in; /* shifted in from guest driver */
  98. uint16_t bitnum_in;
  99. uint16_t bitnum_out;
  100. uint16_t reading;
  101. uint32_t old_eecd;
  102. } eecd_state;
  103. QEMUTimer *autoneg_timer;
  104. QEMUTimer *mit_timer; /* Mitigation timer. */
  105. bool mit_timer_on; /* Mitigation timer is running. */
  106. bool mit_irq_level; /* Tracks interrupt pin level. */
  107. uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */
  108. QEMUTimer *flush_queue_timer;
  109. /* Compatibility flags for migration to/from qemu 1.3.0 and older */
  110. #define E1000_FLAG_AUTONEG_BIT 0
  111. #define E1000_FLAG_MIT_BIT 1
  112. #define E1000_FLAG_MAC_BIT 2
  113. #define E1000_FLAG_TSO_BIT 3
  114. #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
  115. #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
  116. #define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
  117. #define E1000_FLAG_TSO (1 << E1000_FLAG_TSO_BIT)
  118. uint32_t compat_flags;
  119. bool received_tx_tso;
  120. bool use_tso_for_migration;
  121. e1000x_txd_props mig_props;
  122. };
  123. typedef struct E1000State_st E1000State;
  124. #define chkflag(x) (s->compat_flags & E1000_FLAG_##x)
  125. struct E1000BaseClass {
  126. PCIDeviceClass parent_class;
  127. uint16_t phy_id2;
  128. };
  129. typedef struct E1000BaseClass E1000BaseClass;
  130. #define TYPE_E1000_BASE "e1000-base"
  131. DECLARE_OBJ_CHECKERS(E1000State, E1000BaseClass,
  132. E1000, TYPE_E1000_BASE)
  133. static void
  134. e1000_link_up(E1000State *s)
  135. {
  136. e1000x_update_regs_on_link_up(s->mac_reg, s->phy_reg);
  137. /* E1000_STATUS_LU is tested by e1000_can_receive() */
  138. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  139. }
  140. static void
  141. e1000_autoneg_done(E1000State *s)
  142. {
  143. e1000x_update_regs_on_autoneg_done(s->mac_reg, s->phy_reg);
  144. /* E1000_STATUS_LU is tested by e1000_can_receive() */
  145. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  146. }
  147. static bool
  148. have_autoneg(E1000State *s)
  149. {
  150. return chkflag(AUTONEG) && (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
  151. }
  152. static void
  153. set_phy_ctrl(E1000State *s, int index, uint16_t val)
  154. {
  155. /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
  156. s->phy_reg[PHY_CTRL] = val & ~(0x3f |
  157. MII_CR_RESET |
  158. MII_CR_RESTART_AUTO_NEG);
  159. /*
  160. * QEMU 1.3 does not support link auto-negotiation emulation, so if we
  161. * migrate during auto negotiation, after migration the link will be
  162. * down.
  163. */
  164. if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) {
  165. e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
  166. }
  167. }
  168. static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
  169. [PHY_CTRL] = set_phy_ctrl,
  170. };
  171. enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
  172. enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
  173. static const char phy_regcap[0x20] = {
  174. [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
  175. [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
  176. [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
  177. [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
  178. [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
  179. [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
  180. [PHY_AUTONEG_EXP] = PHY_R,
  181. };
  182. /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
  183. static const uint16_t phy_reg_init[] = {
  184. [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
  185. MII_CR_FULL_DUPLEX |
  186. MII_CR_AUTO_NEG_EN,
  187. [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
  188. MII_SR_LINK_STATUS | /* link initially up */
  189. MII_SR_AUTONEG_CAPS |
  190. /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */
  191. MII_SR_PREAMBLE_SUPPRESS |
  192. MII_SR_EXTENDED_STATUS |
  193. MII_SR_10T_HD_CAPS |
  194. MII_SR_10T_FD_CAPS |
  195. MII_SR_100X_HD_CAPS |
  196. MII_SR_100X_FD_CAPS,
  197. [PHY_ID1] = 0x141,
  198. /* [PHY_ID2] configured per DevId, from e1000_reset() */
  199. [PHY_AUTONEG_ADV] = 0xde1,
  200. [PHY_LP_ABILITY] = 0x1e0,
  201. [PHY_1000T_CTRL] = 0x0e00,
  202. [PHY_1000T_STATUS] = 0x3c00,
  203. [M88E1000_PHY_SPEC_CTRL] = 0x360,
  204. [M88E1000_PHY_SPEC_STATUS] = 0xac00,
  205. [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
  206. };
  207. static const uint32_t mac_reg_init[] = {
  208. [PBA] = 0x00100030,
  209. [LEDCTL] = 0x602,
  210. [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
  211. E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
  212. [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
  213. E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
  214. E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
  215. E1000_STATUS_LU,
  216. [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
  217. E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
  218. E1000_MANC_RMCP_EN,
  219. };
  220. /* Helper function, *curr == 0 means the value is not set */
  221. static inline void
  222. mit_update_delay(uint32_t *curr, uint32_t value)
  223. {
  224. if (value && (*curr == 0 || value < *curr)) {
  225. *curr = value;
  226. }
  227. }
  228. static void
  229. set_interrupt_cause(E1000State *s, int index, uint32_t val)
  230. {
  231. PCIDevice *d = PCI_DEVICE(s);
  232. uint32_t pending_ints;
  233. uint32_t mit_delay;
  234. s->mac_reg[ICR] = val;
  235. /*
  236. * Make sure ICR and ICS registers have the same value.
  237. * The spec says that the ICS register is write-only. However in practice,
  238. * on real hardware ICS is readable, and for reads it has the same value as
  239. * ICR (except that ICS does not have the clear on read behaviour of ICR).
  240. *
  241. * The VxWorks PRO/1000 driver uses this behaviour.
  242. */
  243. s->mac_reg[ICS] = val;
  244. pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
  245. if (!s->mit_irq_level && pending_ints) {
  246. /*
  247. * Here we detect a potential raising edge. We postpone raising the
  248. * interrupt line if we are inside the mitigation delay window
  249. * (s->mit_timer_on == 1).
  250. * We provide a partial implementation of interrupt mitigation,
  251. * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
  252. * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
  253. * RADV; relative timers based on TIDV and RDTR are not implemented.
  254. */
  255. if (s->mit_timer_on) {
  256. return;
  257. }
  258. if (chkflag(MIT)) {
  259. /* Compute the next mitigation delay according to pending
  260. * interrupts and the current values of RADV (provided
  261. * RDTR!=0), TADV and ITR.
  262. * Then rearm the timer.
  263. */
  264. mit_delay = 0;
  265. if (s->mit_ide &&
  266. (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
  267. mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
  268. }
  269. if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
  270. mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
  271. }
  272. mit_update_delay(&mit_delay, s->mac_reg[ITR]);
  273. /*
  274. * According to e1000 SPEC, the Ethernet controller guarantees
  275. * a maximum observable interrupt rate of 7813 interrupts/sec.
  276. * Thus if mit_delay < 500 then the delay should be set to the
  277. * minimum delay possible which is 500.
  278. */
  279. mit_delay = (mit_delay < 500) ? 500 : mit_delay;
  280. s->mit_timer_on = 1;
  281. timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
  282. mit_delay * 256);
  283. s->mit_ide = 0;
  284. }
  285. }
  286. s->mit_irq_level = (pending_ints != 0);
  287. pci_set_irq(d, s->mit_irq_level);
  288. }
  289. static void
  290. e1000_mit_timer(void *opaque)
  291. {
  292. E1000State *s = opaque;
  293. s->mit_timer_on = 0;
  294. /* Call set_interrupt_cause to update the irq level (if necessary). */
  295. set_interrupt_cause(s, 0, s->mac_reg[ICR]);
  296. }
  297. static void
  298. set_ics(E1000State *s, int index, uint32_t val)
  299. {
  300. DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
  301. s->mac_reg[IMS]);
  302. set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
  303. }
  304. static void
  305. e1000_autoneg_timer(void *opaque)
  306. {
  307. E1000State *s = opaque;
  308. if (!qemu_get_queue(s->nic)->link_down) {
  309. e1000_autoneg_done(s);
  310. set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
  311. }
  312. }
  313. static void e1000_reset(void *opaque)
  314. {
  315. E1000State *d = opaque;
  316. E1000BaseClass *edc = E1000_GET_CLASS(d);
  317. uint8_t *macaddr = d->conf.macaddr.a;
  318. timer_del(d->autoneg_timer);
  319. timer_del(d->mit_timer);
  320. timer_del(d->flush_queue_timer);
  321. d->mit_timer_on = 0;
  322. d->mit_irq_level = 0;
  323. d->mit_ide = 0;
  324. memset(d->phy_reg, 0, sizeof d->phy_reg);
  325. memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
  326. d->phy_reg[PHY_ID2] = edc->phy_id2;
  327. memset(d->mac_reg, 0, sizeof d->mac_reg);
  328. memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
  329. d->rxbuf_min_shift = 1;
  330. memset(&d->tx, 0, sizeof d->tx);
  331. if (qemu_get_queue(d->nic)->link_down) {
  332. e1000x_update_regs_on_link_down(d->mac_reg, d->phy_reg);
  333. }
  334. e1000x_reset_mac_addr(d->nic, d->mac_reg, macaddr);
  335. }
  336. static void
  337. set_ctrl(E1000State *s, int index, uint32_t val)
  338. {
  339. /* RST is self clearing */
  340. s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
  341. }
  342. static void
  343. e1000_flush_queue_timer(void *opaque)
  344. {
  345. E1000State *s = opaque;
  346. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  347. }
  348. static void
  349. set_rx_control(E1000State *s, int index, uint32_t val)
  350. {
  351. s->mac_reg[RCTL] = val;
  352. s->rxbuf_size = e1000x_rxbufsize(val);
  353. s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
  354. DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
  355. s->mac_reg[RCTL]);
  356. timer_mod(s->flush_queue_timer,
  357. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
  358. }
  359. static void
  360. set_mdic(E1000State *s, int index, uint32_t val)
  361. {
  362. uint32_t data = val & E1000_MDIC_DATA_MASK;
  363. uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
  364. if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
  365. val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
  366. else if (val & E1000_MDIC_OP_READ) {
  367. DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
  368. if (!(phy_regcap[addr] & PHY_R)) {
  369. DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
  370. val |= E1000_MDIC_ERROR;
  371. } else
  372. val = (val ^ data) | s->phy_reg[addr];
  373. } else if (val & E1000_MDIC_OP_WRITE) {
  374. DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
  375. if (!(phy_regcap[addr] & PHY_W)) {
  376. DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
  377. val |= E1000_MDIC_ERROR;
  378. } else {
  379. if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
  380. phyreg_writeops[addr](s, index, data);
  381. } else {
  382. s->phy_reg[addr] = data;
  383. }
  384. }
  385. }
  386. s->mac_reg[MDIC] = val | E1000_MDIC_READY;
  387. if (val & E1000_MDIC_INT_EN) {
  388. set_ics(s, 0, E1000_ICR_MDAC);
  389. }
  390. }
  391. static uint32_t
  392. get_eecd(E1000State *s, int index)
  393. {
  394. uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
  395. DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
  396. s->eecd_state.bitnum_out, s->eecd_state.reading);
  397. if (!s->eecd_state.reading ||
  398. ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
  399. ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
  400. ret |= E1000_EECD_DO;
  401. return ret;
  402. }
  403. static void
  404. set_eecd(E1000State *s, int index, uint32_t val)
  405. {
  406. uint32_t oldval = s->eecd_state.old_eecd;
  407. s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
  408. E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
  409. if (!(E1000_EECD_CS & val)) { /* CS inactive; nothing to do */
  410. return;
  411. }
  412. if (E1000_EECD_CS & (val ^ oldval)) { /* CS rise edge; reset state */
  413. s->eecd_state.val_in = 0;
  414. s->eecd_state.bitnum_in = 0;
  415. s->eecd_state.bitnum_out = 0;
  416. s->eecd_state.reading = 0;
  417. }
  418. if (!(E1000_EECD_SK & (val ^ oldval))) { /* no clock edge */
  419. return;
  420. }
  421. if (!(E1000_EECD_SK & val)) { /* falling edge */
  422. s->eecd_state.bitnum_out++;
  423. return;
  424. }
  425. s->eecd_state.val_in <<= 1;
  426. if (val & E1000_EECD_DI)
  427. s->eecd_state.val_in |= 1;
  428. if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
  429. s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
  430. s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
  431. EEPROM_READ_OPCODE_MICROWIRE);
  432. }
  433. DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
  434. s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
  435. s->eecd_state.reading);
  436. }
  437. static uint32_t
  438. flash_eerd_read(E1000State *s, int x)
  439. {
  440. unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
  441. if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
  442. return (s->mac_reg[EERD]);
  443. if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
  444. return (E1000_EEPROM_RW_REG_DONE | r);
  445. return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
  446. E1000_EEPROM_RW_REG_DONE | r);
  447. }
  448. static void
  449. putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
  450. {
  451. uint32_t sum;
  452. if (cse && cse < n)
  453. n = cse + 1;
  454. if (sloc < n-1) {
  455. sum = net_checksum_add(n-css, data+css);
  456. stw_be_p(data + sloc, net_checksum_finish_nozero(sum));
  457. }
  458. }
  459. static inline void
  460. inc_tx_bcast_or_mcast_count(E1000State *s, const unsigned char *arr)
  461. {
  462. if (!memcmp(arr, bcast, sizeof bcast)) {
  463. e1000x_inc_reg_if_not_full(s->mac_reg, BPTC);
  464. } else if (arr[0] & 1) {
  465. e1000x_inc_reg_if_not_full(s->mac_reg, MPTC);
  466. }
  467. }
  468. static void
  469. e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
  470. {
  471. static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
  472. PTC1023, PTC1522 };
  473. NetClientState *nc = qemu_get_queue(s->nic);
  474. if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
  475. nc->info->receive(nc, buf, size);
  476. } else {
  477. qemu_send_packet(nc, buf, size);
  478. }
  479. inc_tx_bcast_or_mcast_count(s, buf);
  480. e1000x_increase_size_stats(s->mac_reg, PTCregs, size);
  481. }
  482. static void
  483. xmit_seg(E1000State *s)
  484. {
  485. uint16_t len;
  486. unsigned int frames = s->tx.tso_frames, css, sofar;
  487. struct e1000_tx *tp = &s->tx;
  488. struct e1000x_txd_props *props = tp->cptse ? &tp->tso_props : &tp->props;
  489. if (tp->cptse) {
  490. css = props->ipcss;
  491. DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
  492. frames, tp->size, css);
  493. if (props->ip) { /* IPv4 */
  494. stw_be_p(tp->data+css+2, tp->size - css);
  495. stw_be_p(tp->data+css+4,
  496. lduw_be_p(tp->data + css + 4) + frames);
  497. } else { /* IPv6 */
  498. stw_be_p(tp->data+css+4, tp->size - css);
  499. }
  500. css = props->tucss;
  501. len = tp->size - css;
  502. DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", props->tcp, css, len);
  503. if (props->tcp) {
  504. sofar = frames * props->mss;
  505. stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
  506. if (props->paylen - sofar > props->mss) {
  507. tp->data[css + 13] &= ~9; /* PSH, FIN */
  508. } else if (frames) {
  509. e1000x_inc_reg_if_not_full(s->mac_reg, TSCTC);
  510. }
  511. } else { /* UDP */
  512. stw_be_p(tp->data+css+4, len);
  513. }
  514. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  515. unsigned int phsum;
  516. // add pseudo-header length before checksum calculation
  517. void *sp = tp->data + props->tucso;
  518. phsum = lduw_be_p(sp) + len;
  519. phsum = (phsum >> 16) + (phsum & 0xffff);
  520. stw_be_p(sp, phsum);
  521. }
  522. tp->tso_frames++;
  523. }
  524. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  525. putsum(tp->data, tp->size, props->tucso, props->tucss, props->tucse);
  526. }
  527. if (tp->sum_needed & E1000_TXD_POPTS_IXSM) {
  528. putsum(tp->data, tp->size, props->ipcso, props->ipcss, props->ipcse);
  529. }
  530. if (tp->vlan_needed) {
  531. memmove(tp->vlan, tp->data, 4);
  532. memmove(tp->data, tp->data + 4, 8);
  533. memcpy(tp->data + 8, tp->vlan_header, 4);
  534. e1000_send_packet(s, tp->vlan, tp->size + 4);
  535. } else {
  536. e1000_send_packet(s, tp->data, tp->size);
  537. }
  538. e1000x_inc_reg_if_not_full(s->mac_reg, TPT);
  539. e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size);
  540. s->mac_reg[GPTC] = s->mac_reg[TPT];
  541. s->mac_reg[GOTCL] = s->mac_reg[TOTL];
  542. s->mac_reg[GOTCH] = s->mac_reg[TOTH];
  543. }
  544. static void
  545. process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
  546. {
  547. PCIDevice *d = PCI_DEVICE(s);
  548. uint32_t txd_lower = le32_to_cpu(dp->lower.data);
  549. uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
  550. unsigned int split_size = txd_lower & 0xffff, bytes, sz;
  551. unsigned int msh = 0xfffff;
  552. uint64_t addr;
  553. struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
  554. struct e1000_tx *tp = &s->tx;
  555. s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
  556. if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
  557. if (le32_to_cpu(xp->cmd_and_length) & E1000_TXD_CMD_TSE) {
  558. e1000x_read_tx_ctx_descr(xp, &tp->tso_props);
  559. s->use_tso_for_migration = 1;
  560. tp->tso_frames = 0;
  561. } else {
  562. e1000x_read_tx_ctx_descr(xp, &tp->props);
  563. s->use_tso_for_migration = 0;
  564. }
  565. return;
  566. } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
  567. // data descriptor
  568. if (tp->size == 0) {
  569. tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
  570. }
  571. tp->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
  572. } else {
  573. // legacy descriptor
  574. tp->cptse = 0;
  575. }
  576. if (e1000x_vlan_enabled(s->mac_reg) &&
  577. e1000x_is_vlan_txd(txd_lower) &&
  578. (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
  579. tp->vlan_needed = 1;
  580. stw_be_p(tp->vlan_header,
  581. le16_to_cpu(s->mac_reg[VET]));
  582. stw_be_p(tp->vlan_header + 2,
  583. le16_to_cpu(dp->upper.fields.special));
  584. }
  585. addr = le64_to_cpu(dp->buffer_addr);
  586. if (tp->cptse) {
  587. msh = tp->tso_props.hdr_len + tp->tso_props.mss;
  588. do {
  589. bytes = split_size;
  590. if (tp->size + bytes > msh)
  591. bytes = msh - tp->size;
  592. bytes = MIN(sizeof(tp->data) - tp->size, bytes);
  593. pci_dma_read(d, addr, tp->data + tp->size, bytes);
  594. sz = tp->size + bytes;
  595. if (sz >= tp->tso_props.hdr_len
  596. && tp->size < tp->tso_props.hdr_len) {
  597. memmove(tp->header, tp->data, tp->tso_props.hdr_len);
  598. }
  599. tp->size = sz;
  600. addr += bytes;
  601. if (sz == msh) {
  602. xmit_seg(s);
  603. memmove(tp->data, tp->header, tp->tso_props.hdr_len);
  604. tp->size = tp->tso_props.hdr_len;
  605. }
  606. split_size -= bytes;
  607. } while (bytes && split_size);
  608. } else {
  609. split_size = MIN(sizeof(tp->data) - tp->size, split_size);
  610. pci_dma_read(d, addr, tp->data + tp->size, split_size);
  611. tp->size += split_size;
  612. }
  613. if (!(txd_lower & E1000_TXD_CMD_EOP))
  614. return;
  615. if (!(tp->cptse && tp->size < tp->tso_props.hdr_len)) {
  616. xmit_seg(s);
  617. }
  618. tp->tso_frames = 0;
  619. tp->sum_needed = 0;
  620. tp->vlan_needed = 0;
  621. tp->size = 0;
  622. tp->cptse = 0;
  623. }
  624. static uint32_t
  625. txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
  626. {
  627. PCIDevice *d = PCI_DEVICE(s);
  628. uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
  629. if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
  630. return 0;
  631. txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
  632. ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
  633. dp->upper.data = cpu_to_le32(txd_upper);
  634. pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
  635. &dp->upper, sizeof(dp->upper));
  636. return E1000_ICR_TXDW;
  637. }
  638. static uint64_t tx_desc_base(E1000State *s)
  639. {
  640. uint64_t bah = s->mac_reg[TDBAH];
  641. uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
  642. return (bah << 32) + bal;
  643. }
  644. static void
  645. start_xmit(E1000State *s)
  646. {
  647. PCIDevice *d = PCI_DEVICE(s);
  648. dma_addr_t base;
  649. struct e1000_tx_desc desc;
  650. uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
  651. if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
  652. DBGOUT(TX, "tx disabled\n");
  653. return;
  654. }
  655. while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
  656. base = tx_desc_base(s) +
  657. sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
  658. pci_dma_read(d, base, &desc, sizeof(desc));
  659. DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
  660. (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
  661. desc.upper.data);
  662. process_tx_desc(s, &desc);
  663. cause |= txdesc_writeback(s, base, &desc);
  664. if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
  665. s->mac_reg[TDH] = 0;
  666. /*
  667. * the following could happen only if guest sw assigns
  668. * bogus values to TDT/TDLEN.
  669. * there's nothing too intelligent we could do about this.
  670. */
  671. if (s->mac_reg[TDH] == tdh_start ||
  672. tdh_start >= s->mac_reg[TDLEN] / sizeof(desc)) {
  673. DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
  674. tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
  675. break;
  676. }
  677. }
  678. set_ics(s, 0, cause);
  679. }
  680. static int
  681. receive_filter(E1000State *s, const uint8_t *buf, int size)
  682. {
  683. uint32_t rctl = s->mac_reg[RCTL];
  684. int isbcast = !memcmp(buf, bcast, sizeof bcast), ismcast = (buf[0] & 1);
  685. if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) &&
  686. e1000x_vlan_rx_filter_enabled(s->mac_reg)) {
  687. uint16_t vid = lduw_be_p(buf + 14);
  688. uint32_t vfta = ldl_le_p((uint32_t*)(s->mac_reg + VFTA) +
  689. ((vid >> 5) & 0x7f));
  690. if ((vfta & (1 << (vid & 0x1f))) == 0)
  691. return 0;
  692. }
  693. if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */
  694. return 1;
  695. }
  696. if (ismcast && (rctl & E1000_RCTL_MPE)) { /* promiscuous mcast */
  697. e1000x_inc_reg_if_not_full(s->mac_reg, MPRC);
  698. return 1;
  699. }
  700. if (isbcast && (rctl & E1000_RCTL_BAM)) { /* broadcast enabled */
  701. e1000x_inc_reg_if_not_full(s->mac_reg, BPRC);
  702. return 1;
  703. }
  704. return e1000x_rx_group_filter(s->mac_reg, buf);
  705. }
  706. static void
  707. e1000_set_link_status(NetClientState *nc)
  708. {
  709. E1000State *s = qemu_get_nic_opaque(nc);
  710. uint32_t old_status = s->mac_reg[STATUS];
  711. if (nc->link_down) {
  712. e1000x_update_regs_on_link_down(s->mac_reg, s->phy_reg);
  713. } else {
  714. if (have_autoneg(s) &&
  715. !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
  716. e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
  717. } else {
  718. e1000_link_up(s);
  719. }
  720. }
  721. if (s->mac_reg[STATUS] != old_status)
  722. set_ics(s, 0, E1000_ICR_LSC);
  723. }
  724. static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
  725. {
  726. int bufs;
  727. /* Fast-path short packets */
  728. if (total_size <= s->rxbuf_size) {
  729. return s->mac_reg[RDH] != s->mac_reg[RDT];
  730. }
  731. if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
  732. bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
  733. } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
  734. bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
  735. s->mac_reg[RDT] - s->mac_reg[RDH];
  736. } else {
  737. return false;
  738. }
  739. return total_size <= bufs * s->rxbuf_size;
  740. }
  741. static bool
  742. e1000_can_receive(NetClientState *nc)
  743. {
  744. E1000State *s = qemu_get_nic_opaque(nc);
  745. return e1000x_rx_ready(&s->parent_obj, s->mac_reg) &&
  746. e1000_has_rxbufs(s, 1) && !timer_pending(s->flush_queue_timer);
  747. }
  748. static uint64_t rx_desc_base(E1000State *s)
  749. {
  750. uint64_t bah = s->mac_reg[RDBAH];
  751. uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
  752. return (bah << 32) + bal;
  753. }
  754. static void
  755. e1000_receiver_overrun(E1000State *s, size_t size)
  756. {
  757. trace_e1000_receiver_overrun(size, s->mac_reg[RDH], s->mac_reg[RDT]);
  758. e1000x_inc_reg_if_not_full(s->mac_reg, RNBC);
  759. e1000x_inc_reg_if_not_full(s->mac_reg, MPC);
  760. set_ics(s, 0, E1000_ICS_RXO);
  761. }
  762. static ssize_t
  763. e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
  764. {
  765. E1000State *s = qemu_get_nic_opaque(nc);
  766. PCIDevice *d = PCI_DEVICE(s);
  767. struct e1000_rx_desc desc;
  768. dma_addr_t base;
  769. unsigned int n, rdt;
  770. uint32_t rdh_start;
  771. uint16_t vlan_special = 0;
  772. uint8_t vlan_status = 0;
  773. uint8_t min_buf[MIN_BUF_SIZE];
  774. struct iovec min_iov;
  775. uint8_t *filter_buf = iov->iov_base;
  776. size_t size = iov_size(iov, iovcnt);
  777. size_t iov_ofs = 0;
  778. size_t desc_offset;
  779. size_t desc_size;
  780. size_t total_size;
  781. if (!e1000x_hw_rx_enabled(s->mac_reg)) {
  782. return -1;
  783. }
  784. if (timer_pending(s->flush_queue_timer)) {
  785. return 0;
  786. }
  787. /* Pad to minimum Ethernet frame length */
  788. if (size < sizeof(min_buf)) {
  789. iov_to_buf(iov, iovcnt, 0, min_buf, size);
  790. memset(&min_buf[size], 0, sizeof(min_buf) - size);
  791. min_iov.iov_base = filter_buf = min_buf;
  792. min_iov.iov_len = size = sizeof(min_buf);
  793. iovcnt = 1;
  794. iov = &min_iov;
  795. } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
  796. /* This is very unlikely, but may happen. */
  797. iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
  798. filter_buf = min_buf;
  799. }
  800. /* Discard oversized packets if !LPE and !SBP. */
  801. if (e1000x_is_oversized(s->mac_reg, size)) {
  802. return size;
  803. }
  804. if (!receive_filter(s, filter_buf, size)) {
  805. return size;
  806. }
  807. if (e1000x_vlan_enabled(s->mac_reg) &&
  808. e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) {
  809. vlan_special = cpu_to_le16(lduw_be_p(filter_buf + 14));
  810. iov_ofs = 4;
  811. if (filter_buf == iov->iov_base) {
  812. memmove(filter_buf + 4, filter_buf, 12);
  813. } else {
  814. iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
  815. while (iov->iov_len <= iov_ofs) {
  816. iov_ofs -= iov->iov_len;
  817. iov++;
  818. }
  819. }
  820. vlan_status = E1000_RXD_STAT_VP;
  821. size -= 4;
  822. }
  823. rdh_start = s->mac_reg[RDH];
  824. desc_offset = 0;
  825. total_size = size + e1000x_fcs_len(s->mac_reg);
  826. if (!e1000_has_rxbufs(s, total_size)) {
  827. e1000_receiver_overrun(s, total_size);
  828. return -1;
  829. }
  830. do {
  831. desc_size = total_size - desc_offset;
  832. if (desc_size > s->rxbuf_size) {
  833. desc_size = s->rxbuf_size;
  834. }
  835. base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
  836. pci_dma_read(d, base, &desc, sizeof(desc));
  837. desc.special = vlan_special;
  838. desc.status |= (vlan_status | E1000_RXD_STAT_DD);
  839. if (desc.buffer_addr) {
  840. if (desc_offset < size) {
  841. size_t iov_copy;
  842. hwaddr ba = le64_to_cpu(desc.buffer_addr);
  843. size_t copy_size = size - desc_offset;
  844. if (copy_size > s->rxbuf_size) {
  845. copy_size = s->rxbuf_size;
  846. }
  847. do {
  848. iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
  849. pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
  850. copy_size -= iov_copy;
  851. ba += iov_copy;
  852. iov_ofs += iov_copy;
  853. if (iov_ofs == iov->iov_len) {
  854. iov++;
  855. iov_ofs = 0;
  856. }
  857. } while (copy_size);
  858. }
  859. desc_offset += desc_size;
  860. desc.length = cpu_to_le16(desc_size);
  861. if (desc_offset >= total_size) {
  862. desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
  863. } else {
  864. /* Guest zeroing out status is not a hardware requirement.
  865. Clear EOP in case guest didn't do it. */
  866. desc.status &= ~E1000_RXD_STAT_EOP;
  867. }
  868. } else { // as per intel docs; skip descriptors with null buf addr
  869. DBGOUT(RX, "Null RX descriptor!!\n");
  870. }
  871. pci_dma_write(d, base, &desc, sizeof(desc));
  872. if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
  873. s->mac_reg[RDH] = 0;
  874. /* see comment in start_xmit; same here */
  875. if (s->mac_reg[RDH] == rdh_start ||
  876. rdh_start >= s->mac_reg[RDLEN] / sizeof(desc)) {
  877. DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
  878. rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
  879. e1000_receiver_overrun(s, total_size);
  880. return -1;
  881. }
  882. } while (desc_offset < total_size);
  883. e1000x_update_rx_total_stats(s->mac_reg, size, total_size);
  884. n = E1000_ICS_RXT0;
  885. if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
  886. rdt += s->mac_reg[RDLEN] / sizeof(desc);
  887. if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
  888. s->rxbuf_min_shift)
  889. n |= E1000_ICS_RXDMT0;
  890. set_ics(s, 0, n);
  891. return size;
  892. }
  893. static ssize_t
  894. e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  895. {
  896. const struct iovec iov = {
  897. .iov_base = (uint8_t *)buf,
  898. .iov_len = size
  899. };
  900. return e1000_receive_iov(nc, &iov, 1);
  901. }
  902. static uint32_t
  903. mac_readreg(E1000State *s, int index)
  904. {
  905. return s->mac_reg[index];
  906. }
  907. static uint32_t
  908. mac_low4_read(E1000State *s, int index)
  909. {
  910. return s->mac_reg[index] & 0xf;
  911. }
  912. static uint32_t
  913. mac_low11_read(E1000State *s, int index)
  914. {
  915. return s->mac_reg[index] & 0x7ff;
  916. }
  917. static uint32_t
  918. mac_low13_read(E1000State *s, int index)
  919. {
  920. return s->mac_reg[index] & 0x1fff;
  921. }
  922. static uint32_t
  923. mac_low16_read(E1000State *s, int index)
  924. {
  925. return s->mac_reg[index] & 0xffff;
  926. }
  927. static uint32_t
  928. mac_icr_read(E1000State *s, int index)
  929. {
  930. uint32_t ret = s->mac_reg[ICR];
  931. DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
  932. set_interrupt_cause(s, 0, 0);
  933. return ret;
  934. }
  935. static uint32_t
  936. mac_read_clr4(E1000State *s, int index)
  937. {
  938. uint32_t ret = s->mac_reg[index];
  939. s->mac_reg[index] = 0;
  940. return ret;
  941. }
  942. static uint32_t
  943. mac_read_clr8(E1000State *s, int index)
  944. {
  945. uint32_t ret = s->mac_reg[index];
  946. s->mac_reg[index] = 0;
  947. s->mac_reg[index-1] = 0;
  948. return ret;
  949. }
  950. static void
  951. mac_writereg(E1000State *s, int index, uint32_t val)
  952. {
  953. uint32_t macaddr[2];
  954. s->mac_reg[index] = val;
  955. if (index == RA + 1) {
  956. macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
  957. macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
  958. qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
  959. }
  960. }
  961. static void
  962. set_rdt(E1000State *s, int index, uint32_t val)
  963. {
  964. s->mac_reg[index] = val & 0xffff;
  965. if (e1000_has_rxbufs(s, 1)) {
  966. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  967. }
  968. }
  969. static void
  970. set_16bit(E1000State *s, int index, uint32_t val)
  971. {
  972. s->mac_reg[index] = val & 0xffff;
  973. }
  974. static void
  975. set_dlen(E1000State *s, int index, uint32_t val)
  976. {
  977. s->mac_reg[index] = val & 0xfff80;
  978. }
  979. static void
  980. set_tctl(E1000State *s, int index, uint32_t val)
  981. {
  982. s->mac_reg[index] = val;
  983. s->mac_reg[TDT] &= 0xffff;
  984. start_xmit(s);
  985. }
  986. static void
  987. set_icr(E1000State *s, int index, uint32_t val)
  988. {
  989. DBGOUT(INTERRUPT, "set_icr %x\n", val);
  990. set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
  991. }
  992. static void
  993. set_imc(E1000State *s, int index, uint32_t val)
  994. {
  995. s->mac_reg[IMS] &= ~val;
  996. set_ics(s, 0, 0);
  997. }
  998. static void
  999. set_ims(E1000State *s, int index, uint32_t val)
  1000. {
  1001. s->mac_reg[IMS] |= val;
  1002. set_ics(s, 0, 0);
  1003. }
  1004. #define getreg(x) [x] = mac_readreg
  1005. typedef uint32_t (*readops)(E1000State *, int);
  1006. static const readops macreg_readops[] = {
  1007. getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
  1008. getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
  1009. getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
  1010. getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
  1011. getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
  1012. getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
  1013. getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
  1014. getreg(TADV), getreg(ITR), getreg(FCRUC), getreg(IPAV),
  1015. getreg(WUC), getreg(WUS), getreg(SCC), getreg(ECOL),
  1016. getreg(MCC), getreg(LATECOL), getreg(COLC), getreg(DC),
  1017. getreg(TNCRS), getreg(SEQEC), getreg(CEXTERR), getreg(RLEC),
  1018. getreg(XONRXC), getreg(XONTXC), getreg(XOFFRXC), getreg(XOFFTXC),
  1019. getreg(RFC), getreg(RJC), getreg(RNBC), getreg(TSCTFC),
  1020. getreg(MGTPRC), getreg(MGTPDC), getreg(MGTPTC), getreg(GORCL),
  1021. getreg(GOTCL),
  1022. [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8,
  1023. [GOTCH] = mac_read_clr8, [GORCH] = mac_read_clr8,
  1024. [PRC64] = mac_read_clr4, [PRC127] = mac_read_clr4,
  1025. [PRC255] = mac_read_clr4, [PRC511] = mac_read_clr4,
  1026. [PRC1023] = mac_read_clr4, [PRC1522] = mac_read_clr4,
  1027. [PTC64] = mac_read_clr4, [PTC127] = mac_read_clr4,
  1028. [PTC255] = mac_read_clr4, [PTC511] = mac_read_clr4,
  1029. [PTC1023] = mac_read_clr4, [PTC1522] = mac_read_clr4,
  1030. [GPRC] = mac_read_clr4, [GPTC] = mac_read_clr4,
  1031. [TPT] = mac_read_clr4, [TPR] = mac_read_clr4,
  1032. [RUC] = mac_read_clr4, [ROC] = mac_read_clr4,
  1033. [BPRC] = mac_read_clr4, [MPRC] = mac_read_clr4,
  1034. [TSCTC] = mac_read_clr4, [BPTC] = mac_read_clr4,
  1035. [MPTC] = mac_read_clr4,
  1036. [ICR] = mac_icr_read, [EECD] = get_eecd,
  1037. [EERD] = flash_eerd_read,
  1038. [RDFH] = mac_low13_read, [RDFT] = mac_low13_read,
  1039. [RDFHS] = mac_low13_read, [RDFTS] = mac_low13_read,
  1040. [RDFPC] = mac_low13_read,
  1041. [TDFH] = mac_low11_read, [TDFT] = mac_low11_read,
  1042. [TDFHS] = mac_low13_read, [TDFTS] = mac_low13_read,
  1043. [TDFPC] = mac_low13_read,
  1044. [AIT] = mac_low16_read,
  1045. [CRCERRS ... MPC] = &mac_readreg,
  1046. [IP6AT ... IP6AT+3] = &mac_readreg, [IP4AT ... IP4AT+6] = &mac_readreg,
  1047. [FFLT ... FFLT+6] = &mac_low11_read,
  1048. [RA ... RA+31] = &mac_readreg,
  1049. [WUPM ... WUPM+31] = &mac_readreg,
  1050. [MTA ... MTA+127] = &mac_readreg,
  1051. [VFTA ... VFTA+127] = &mac_readreg,
  1052. [FFMT ... FFMT+254] = &mac_low4_read,
  1053. [FFVT ... FFVT+254] = &mac_readreg,
  1054. [PBM ... PBM+16383] = &mac_readreg,
  1055. };
  1056. enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
  1057. #define putreg(x) [x] = mac_writereg
  1058. typedef void (*writeops)(E1000State *, int, uint32_t);
  1059. static const writeops macreg_writeops[] = {
  1060. putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
  1061. putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
  1062. putreg(RDBAL), putreg(LEDCTL), putreg(VET), putreg(FCRUC),
  1063. putreg(TDFH), putreg(TDFT), putreg(TDFHS), putreg(TDFTS),
  1064. putreg(TDFPC), putreg(RDFH), putreg(RDFT), putreg(RDFHS),
  1065. putreg(RDFTS), putreg(RDFPC), putreg(IPAV), putreg(WUC),
  1066. putreg(WUS), putreg(AIT),
  1067. [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
  1068. [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
  1069. [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
  1070. [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
  1071. [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
  1072. [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
  1073. [ITR] = set_16bit,
  1074. [IP6AT ... IP6AT+3] = &mac_writereg, [IP4AT ... IP4AT+6] = &mac_writereg,
  1075. [FFLT ... FFLT+6] = &mac_writereg,
  1076. [RA ... RA+31] = &mac_writereg,
  1077. [WUPM ... WUPM+31] = &mac_writereg,
  1078. [MTA ... MTA+127] = &mac_writereg,
  1079. [VFTA ... VFTA+127] = &mac_writereg,
  1080. [FFMT ... FFMT+254] = &mac_writereg, [FFVT ... FFVT+254] = &mac_writereg,
  1081. [PBM ... PBM+16383] = &mac_writereg,
  1082. };
  1083. enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
  1084. enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 };
  1085. #define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED)
  1086. /* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p]
  1087. * f - flag bits (up to 6 possible flags)
  1088. * n - flag needed
  1089. * p - partially implenented */
  1090. static const uint8_t mac_reg_access[0x8000] = {
  1091. [RDTR] = markflag(MIT), [TADV] = markflag(MIT),
  1092. [RADV] = markflag(MIT), [ITR] = markflag(MIT),
  1093. [IPAV] = markflag(MAC), [WUC] = markflag(MAC),
  1094. [IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC),
  1095. [FFVT] = markflag(MAC), [WUPM] = markflag(MAC),
  1096. [ECOL] = markflag(MAC), [MCC] = markflag(MAC),
  1097. [DC] = markflag(MAC), [TNCRS] = markflag(MAC),
  1098. [RLEC] = markflag(MAC), [XONRXC] = markflag(MAC),
  1099. [XOFFTXC] = markflag(MAC), [RFC] = markflag(MAC),
  1100. [TSCTFC] = markflag(MAC), [MGTPRC] = markflag(MAC),
  1101. [WUS] = markflag(MAC), [AIT] = markflag(MAC),
  1102. [FFLT] = markflag(MAC), [FFMT] = markflag(MAC),
  1103. [SCC] = markflag(MAC), [FCRUC] = markflag(MAC),
  1104. [LATECOL] = markflag(MAC), [COLC] = markflag(MAC),
  1105. [SEQEC] = markflag(MAC), [CEXTERR] = markflag(MAC),
  1106. [XONTXC] = markflag(MAC), [XOFFRXC] = markflag(MAC),
  1107. [RJC] = markflag(MAC), [RNBC] = markflag(MAC),
  1108. [MGTPDC] = markflag(MAC), [MGTPTC] = markflag(MAC),
  1109. [RUC] = markflag(MAC), [ROC] = markflag(MAC),
  1110. [GORCL] = markflag(MAC), [GORCH] = markflag(MAC),
  1111. [GOTCL] = markflag(MAC), [GOTCH] = markflag(MAC),
  1112. [BPRC] = markflag(MAC), [MPRC] = markflag(MAC),
  1113. [TSCTC] = markflag(MAC), [PRC64] = markflag(MAC),
  1114. [PRC127] = markflag(MAC), [PRC255] = markflag(MAC),
  1115. [PRC511] = markflag(MAC), [PRC1023] = markflag(MAC),
  1116. [PRC1522] = markflag(MAC), [PTC64] = markflag(MAC),
  1117. [PTC127] = markflag(MAC), [PTC255] = markflag(MAC),
  1118. [PTC511] = markflag(MAC), [PTC1023] = markflag(MAC),
  1119. [PTC1522] = markflag(MAC), [MPTC] = markflag(MAC),
  1120. [BPTC] = markflag(MAC),
  1121. [TDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1122. [TDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1123. [TDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1124. [TDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1125. [TDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1126. [RDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1127. [RDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1128. [RDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1129. [RDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1130. [RDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1131. [PBM] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1132. };
  1133. static void
  1134. e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
  1135. unsigned size)
  1136. {
  1137. E1000State *s = opaque;
  1138. unsigned int index = (addr & 0x1ffff) >> 2;
  1139. if (index < NWRITEOPS && macreg_writeops[index]) {
  1140. if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
  1141. || (s->compat_flags & (mac_reg_access[index] >> 2))) {
  1142. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  1143. DBGOUT(GENERAL, "Writing to register at offset: 0x%08x. "
  1144. "It is not fully implemented.\n", index<<2);
  1145. }
  1146. macreg_writeops[index](s, index, val);
  1147. } else { /* "flag needed" bit is set, but the flag is not active */
  1148. DBGOUT(MMIO, "MMIO write attempt to disabled reg. addr=0x%08x\n",
  1149. index<<2);
  1150. }
  1151. } else if (index < NREADOPS && macreg_readops[index]) {
  1152. DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n",
  1153. index<<2, val);
  1154. } else {
  1155. DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
  1156. index<<2, val);
  1157. }
  1158. }
  1159. static uint64_t
  1160. e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
  1161. {
  1162. E1000State *s = opaque;
  1163. unsigned int index = (addr & 0x1ffff) >> 2;
  1164. if (index < NREADOPS && macreg_readops[index]) {
  1165. if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
  1166. || (s->compat_flags & (mac_reg_access[index] >> 2))) {
  1167. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  1168. DBGOUT(GENERAL, "Reading register at offset: 0x%08x. "
  1169. "It is not fully implemented.\n", index<<2);
  1170. }
  1171. return macreg_readops[index](s, index);
  1172. } else { /* "flag needed" bit is set, but the flag is not active */
  1173. DBGOUT(MMIO, "MMIO read attempt of disabled reg. addr=0x%08x\n",
  1174. index<<2);
  1175. }
  1176. } else {
  1177. DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
  1178. }
  1179. return 0;
  1180. }
  1181. static const MemoryRegionOps e1000_mmio_ops = {
  1182. .read = e1000_mmio_read,
  1183. .write = e1000_mmio_write,
  1184. .endianness = DEVICE_LITTLE_ENDIAN,
  1185. .impl = {
  1186. .min_access_size = 4,
  1187. .max_access_size = 4,
  1188. },
  1189. };
  1190. static uint64_t e1000_io_read(void *opaque, hwaddr addr,
  1191. unsigned size)
  1192. {
  1193. E1000State *s = opaque;
  1194. (void)s;
  1195. return 0;
  1196. }
  1197. static void e1000_io_write(void *opaque, hwaddr addr,
  1198. uint64_t val, unsigned size)
  1199. {
  1200. E1000State *s = opaque;
  1201. (void)s;
  1202. }
  1203. static const MemoryRegionOps e1000_io_ops = {
  1204. .read = e1000_io_read,
  1205. .write = e1000_io_write,
  1206. .endianness = DEVICE_LITTLE_ENDIAN,
  1207. };
  1208. static bool is_version_1(void *opaque, int version_id)
  1209. {
  1210. return version_id == 1;
  1211. }
  1212. static int e1000_pre_save(void *opaque)
  1213. {
  1214. E1000State *s = opaque;
  1215. NetClientState *nc = qemu_get_queue(s->nic);
  1216. /*
  1217. * If link is down and auto-negotiation is supported and ongoing,
  1218. * complete auto-negotiation immediately. This allows us to look
  1219. * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
  1220. */
  1221. if (nc->link_down && have_autoneg(s)) {
  1222. s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
  1223. }
  1224. /* Decide which set of props to migrate in the main structure */
  1225. if (chkflag(TSO) || !s->use_tso_for_migration) {
  1226. /* Either we're migrating with the extra subsection, in which
  1227. * case the mig_props is always 'props' OR
  1228. * we've not got the subsection, but 'props' was the last
  1229. * updated.
  1230. */
  1231. s->mig_props = s->tx.props;
  1232. } else {
  1233. /* We're not using the subsection, and 'tso_props' was
  1234. * the last updated.
  1235. */
  1236. s->mig_props = s->tx.tso_props;
  1237. }
  1238. return 0;
  1239. }
  1240. static int e1000_post_load(void *opaque, int version_id)
  1241. {
  1242. E1000State *s = opaque;
  1243. NetClientState *nc = qemu_get_queue(s->nic);
  1244. if (!chkflag(MIT)) {
  1245. s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
  1246. s->mac_reg[TADV] = 0;
  1247. s->mit_irq_level = false;
  1248. }
  1249. s->mit_ide = 0;
  1250. s->mit_timer_on = true;
  1251. timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1);
  1252. /* nc.link_down can't be migrated, so infer link_down according
  1253. * to link status bit in mac_reg[STATUS].
  1254. * Alternatively, restart link negotiation if it was in progress. */
  1255. nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
  1256. if (have_autoneg(s) &&
  1257. !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
  1258. nc->link_down = false;
  1259. timer_mod(s->autoneg_timer,
  1260. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
  1261. }
  1262. s->tx.props = s->mig_props;
  1263. if (!s->received_tx_tso) {
  1264. /* We received only one set of offload data (tx.props)
  1265. * and haven't got tx.tso_props. The best we can do
  1266. * is dupe the data.
  1267. */
  1268. s->tx.tso_props = s->mig_props;
  1269. }
  1270. return 0;
  1271. }
  1272. static int e1000_tx_tso_post_load(void *opaque, int version_id)
  1273. {
  1274. E1000State *s = opaque;
  1275. s->received_tx_tso = true;
  1276. return 0;
  1277. }
  1278. static bool e1000_mit_state_needed(void *opaque)
  1279. {
  1280. E1000State *s = opaque;
  1281. return chkflag(MIT);
  1282. }
  1283. static bool e1000_full_mac_needed(void *opaque)
  1284. {
  1285. E1000State *s = opaque;
  1286. return chkflag(MAC);
  1287. }
  1288. static bool e1000_tso_state_needed(void *opaque)
  1289. {
  1290. E1000State *s = opaque;
  1291. return chkflag(TSO);
  1292. }
  1293. static const VMStateDescription vmstate_e1000_mit_state = {
  1294. .name = "e1000/mit_state",
  1295. .version_id = 1,
  1296. .minimum_version_id = 1,
  1297. .needed = e1000_mit_state_needed,
  1298. .fields = (VMStateField[]) {
  1299. VMSTATE_UINT32(mac_reg[RDTR], E1000State),
  1300. VMSTATE_UINT32(mac_reg[RADV], E1000State),
  1301. VMSTATE_UINT32(mac_reg[TADV], E1000State),
  1302. VMSTATE_UINT32(mac_reg[ITR], E1000State),
  1303. VMSTATE_BOOL(mit_irq_level, E1000State),
  1304. VMSTATE_END_OF_LIST()
  1305. }
  1306. };
  1307. static const VMStateDescription vmstate_e1000_full_mac_state = {
  1308. .name = "e1000/full_mac_state",
  1309. .version_id = 1,
  1310. .minimum_version_id = 1,
  1311. .needed = e1000_full_mac_needed,
  1312. .fields = (VMStateField[]) {
  1313. VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000),
  1314. VMSTATE_END_OF_LIST()
  1315. }
  1316. };
  1317. static const VMStateDescription vmstate_e1000_tx_tso_state = {
  1318. .name = "e1000/tx_tso_state",
  1319. .version_id = 1,
  1320. .minimum_version_id = 1,
  1321. .needed = e1000_tso_state_needed,
  1322. .post_load = e1000_tx_tso_post_load,
  1323. .fields = (VMStateField[]) {
  1324. VMSTATE_UINT8(tx.tso_props.ipcss, E1000State),
  1325. VMSTATE_UINT8(tx.tso_props.ipcso, E1000State),
  1326. VMSTATE_UINT16(tx.tso_props.ipcse, E1000State),
  1327. VMSTATE_UINT8(tx.tso_props.tucss, E1000State),
  1328. VMSTATE_UINT8(tx.tso_props.tucso, E1000State),
  1329. VMSTATE_UINT16(tx.tso_props.tucse, E1000State),
  1330. VMSTATE_UINT32(tx.tso_props.paylen, E1000State),
  1331. VMSTATE_UINT8(tx.tso_props.hdr_len, E1000State),
  1332. VMSTATE_UINT16(tx.tso_props.mss, E1000State),
  1333. VMSTATE_INT8(tx.tso_props.ip, E1000State),
  1334. VMSTATE_INT8(tx.tso_props.tcp, E1000State),
  1335. VMSTATE_END_OF_LIST()
  1336. }
  1337. };
  1338. static const VMStateDescription vmstate_e1000 = {
  1339. .name = "e1000",
  1340. .version_id = 2,
  1341. .minimum_version_id = 1,
  1342. .pre_save = e1000_pre_save,
  1343. .post_load = e1000_post_load,
  1344. .fields = (VMStateField[]) {
  1345. VMSTATE_PCI_DEVICE(parent_obj, E1000State),
  1346. VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
  1347. VMSTATE_UNUSED(4), /* Was mmio_base. */
  1348. VMSTATE_UINT32(rxbuf_size, E1000State),
  1349. VMSTATE_UINT32(rxbuf_min_shift, E1000State),
  1350. VMSTATE_UINT32(eecd_state.val_in, E1000State),
  1351. VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
  1352. VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
  1353. VMSTATE_UINT16(eecd_state.reading, E1000State),
  1354. VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
  1355. VMSTATE_UINT8(mig_props.ipcss, E1000State),
  1356. VMSTATE_UINT8(mig_props.ipcso, E1000State),
  1357. VMSTATE_UINT16(mig_props.ipcse, E1000State),
  1358. VMSTATE_UINT8(mig_props.tucss, E1000State),
  1359. VMSTATE_UINT8(mig_props.tucso, E1000State),
  1360. VMSTATE_UINT16(mig_props.tucse, E1000State),
  1361. VMSTATE_UINT32(mig_props.paylen, E1000State),
  1362. VMSTATE_UINT8(mig_props.hdr_len, E1000State),
  1363. VMSTATE_UINT16(mig_props.mss, E1000State),
  1364. VMSTATE_UINT16(tx.size, E1000State),
  1365. VMSTATE_UINT16(tx.tso_frames, E1000State),
  1366. VMSTATE_UINT8(tx.sum_needed, E1000State),
  1367. VMSTATE_INT8(mig_props.ip, E1000State),
  1368. VMSTATE_INT8(mig_props.tcp, E1000State),
  1369. VMSTATE_BUFFER(tx.header, E1000State),
  1370. VMSTATE_BUFFER(tx.data, E1000State),
  1371. VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
  1372. VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
  1373. VMSTATE_UINT32(mac_reg[CTRL], E1000State),
  1374. VMSTATE_UINT32(mac_reg[EECD], E1000State),
  1375. VMSTATE_UINT32(mac_reg[EERD], E1000State),
  1376. VMSTATE_UINT32(mac_reg[GPRC], E1000State),
  1377. VMSTATE_UINT32(mac_reg[GPTC], E1000State),
  1378. VMSTATE_UINT32(mac_reg[ICR], E1000State),
  1379. VMSTATE_UINT32(mac_reg[ICS], E1000State),
  1380. VMSTATE_UINT32(mac_reg[IMC], E1000State),
  1381. VMSTATE_UINT32(mac_reg[IMS], E1000State),
  1382. VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
  1383. VMSTATE_UINT32(mac_reg[MANC], E1000State),
  1384. VMSTATE_UINT32(mac_reg[MDIC], E1000State),
  1385. VMSTATE_UINT32(mac_reg[MPC], E1000State),
  1386. VMSTATE_UINT32(mac_reg[PBA], E1000State),
  1387. VMSTATE_UINT32(mac_reg[RCTL], E1000State),
  1388. VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
  1389. VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
  1390. VMSTATE_UINT32(mac_reg[RDH], E1000State),
  1391. VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
  1392. VMSTATE_UINT32(mac_reg[RDT], E1000State),
  1393. VMSTATE_UINT32(mac_reg[STATUS], E1000State),
  1394. VMSTATE_UINT32(mac_reg[SWSM], E1000State),
  1395. VMSTATE_UINT32(mac_reg[TCTL], E1000State),
  1396. VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
  1397. VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
  1398. VMSTATE_UINT32(mac_reg[TDH], E1000State),
  1399. VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
  1400. VMSTATE_UINT32(mac_reg[TDT], E1000State),
  1401. VMSTATE_UINT32(mac_reg[TORH], E1000State),
  1402. VMSTATE_UINT32(mac_reg[TORL], E1000State),
  1403. VMSTATE_UINT32(mac_reg[TOTH], E1000State),
  1404. VMSTATE_UINT32(mac_reg[TOTL], E1000State),
  1405. VMSTATE_UINT32(mac_reg[TPR], E1000State),
  1406. VMSTATE_UINT32(mac_reg[TPT], E1000State),
  1407. VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
  1408. VMSTATE_UINT32(mac_reg[WUFC], E1000State),
  1409. VMSTATE_UINT32(mac_reg[VET], E1000State),
  1410. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
  1411. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
  1412. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
  1413. VMSTATE_END_OF_LIST()
  1414. },
  1415. .subsections = (const VMStateDescription*[]) {
  1416. &vmstate_e1000_mit_state,
  1417. &vmstate_e1000_full_mac_state,
  1418. &vmstate_e1000_tx_tso_state,
  1419. NULL
  1420. }
  1421. };
  1422. /*
  1423. * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
  1424. * Note: A valid DevId will be inserted during pci_e1000_realize().
  1425. */
  1426. static const uint16_t e1000_eeprom_template[64] = {
  1427. 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
  1428. 0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
  1429. 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
  1430. 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
  1431. 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
  1432. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1433. 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1434. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
  1435. };
  1436. /* PCI interface */
  1437. static void
  1438. e1000_mmio_setup(E1000State *d)
  1439. {
  1440. int i;
  1441. const uint32_t excluded_regs[] = {
  1442. E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
  1443. E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
  1444. };
  1445. memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
  1446. "e1000-mmio", PNPMMIO_SIZE);
  1447. memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
  1448. for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
  1449. memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
  1450. excluded_regs[i+1] - excluded_regs[i] - 4);
  1451. memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
  1452. }
  1453. static void
  1454. pci_e1000_uninit(PCIDevice *dev)
  1455. {
  1456. E1000State *d = E1000(dev);
  1457. timer_del(d->autoneg_timer);
  1458. timer_free(d->autoneg_timer);
  1459. timer_del(d->mit_timer);
  1460. timer_free(d->mit_timer);
  1461. timer_del(d->flush_queue_timer);
  1462. timer_free(d->flush_queue_timer);
  1463. qemu_del_nic(d->nic);
  1464. }
  1465. static NetClientInfo net_e1000_info = {
  1466. .type = NET_CLIENT_DRIVER_NIC,
  1467. .size = sizeof(NICState),
  1468. .can_receive = e1000_can_receive,
  1469. .receive = e1000_receive,
  1470. .receive_iov = e1000_receive_iov,
  1471. .link_status_changed = e1000_set_link_status,
  1472. };
  1473. static void e1000_write_config(PCIDevice *pci_dev, uint32_t address,
  1474. uint32_t val, int len)
  1475. {
  1476. E1000State *s = E1000(pci_dev);
  1477. pci_default_write_config(pci_dev, address, val, len);
  1478. if (range_covers_byte(address, len, PCI_COMMAND) &&
  1479. (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
  1480. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  1481. }
  1482. }
  1483. static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
  1484. {
  1485. DeviceState *dev = DEVICE(pci_dev);
  1486. E1000State *d = E1000(pci_dev);
  1487. uint8_t *pci_conf;
  1488. uint8_t *macaddr;
  1489. pci_dev->config_write = e1000_write_config;
  1490. pci_conf = pci_dev->config;
  1491. /* TODO: RST# value should be 0, PCI spec 6.2.4 */
  1492. pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
  1493. pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
  1494. e1000_mmio_setup(d);
  1495. pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
  1496. pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
  1497. qemu_macaddr_default_if_unset(&d->conf.macaddr);
  1498. macaddr = d->conf.macaddr.a;
  1499. e1000x_core_prepare_eeprom(d->eeprom_data,
  1500. e1000_eeprom_template,
  1501. sizeof(e1000_eeprom_template),
  1502. PCI_DEVICE_GET_CLASS(pci_dev)->device_id,
  1503. macaddr);
  1504. d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
  1505. object_get_typename(OBJECT(d)), dev->id, d);
  1506. qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
  1507. d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
  1508. d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
  1509. d->flush_queue_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  1510. e1000_flush_queue_timer, d);
  1511. }
  1512. static void qdev_e1000_reset(DeviceState *dev)
  1513. {
  1514. E1000State *d = E1000(dev);
  1515. e1000_reset(d);
  1516. }
  1517. static Property e1000_properties[] = {
  1518. DEFINE_NIC_PROPERTIES(E1000State, conf),
  1519. DEFINE_PROP_BIT("autonegotiation", E1000State,
  1520. compat_flags, E1000_FLAG_AUTONEG_BIT, true),
  1521. DEFINE_PROP_BIT("mitigation", E1000State,
  1522. compat_flags, E1000_FLAG_MIT_BIT, true),
  1523. DEFINE_PROP_BIT("extra_mac_registers", E1000State,
  1524. compat_flags, E1000_FLAG_MAC_BIT, true),
  1525. DEFINE_PROP_BIT("migrate_tso_props", E1000State,
  1526. compat_flags, E1000_FLAG_TSO_BIT, true),
  1527. DEFINE_PROP_END_OF_LIST(),
  1528. };
  1529. typedef struct E1000Info {
  1530. const char *name;
  1531. uint16_t device_id;
  1532. uint8_t revision;
  1533. uint16_t phy_id2;
  1534. } E1000Info;
  1535. static void e1000_class_init(ObjectClass *klass, void *data)
  1536. {
  1537. DeviceClass *dc = DEVICE_CLASS(klass);
  1538. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1539. E1000BaseClass *e = E1000_CLASS(klass);
  1540. const E1000Info *info = data;
  1541. k->realize = pci_e1000_realize;
  1542. k->exit = pci_e1000_uninit;
  1543. k->romfile = "efi-e1000.rom";
  1544. k->vendor_id = PCI_VENDOR_ID_INTEL;
  1545. k->device_id = info->device_id;
  1546. k->revision = info->revision;
  1547. e->phy_id2 = info->phy_id2;
  1548. k->class_id = PCI_CLASS_NETWORK_ETHERNET;
  1549. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1550. dc->desc = "Intel Gigabit Ethernet";
  1551. dc->reset = qdev_e1000_reset;
  1552. dc->vmsd = &vmstate_e1000;
  1553. device_class_set_props(dc, e1000_properties);
  1554. }
  1555. static void e1000_instance_init(Object *obj)
  1556. {
  1557. E1000State *n = E1000(obj);
  1558. device_add_bootindex_property(obj, &n->conf.bootindex,
  1559. "bootindex", "/ethernet-phy@0",
  1560. DEVICE(n));
  1561. }
  1562. static const TypeInfo e1000_base_info = {
  1563. .name = TYPE_E1000_BASE,
  1564. .parent = TYPE_PCI_DEVICE,
  1565. .instance_size = sizeof(E1000State),
  1566. .instance_init = e1000_instance_init,
  1567. .class_size = sizeof(E1000BaseClass),
  1568. .abstract = true,
  1569. .interfaces = (InterfaceInfo[]) {
  1570. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1571. { },
  1572. },
  1573. };
  1574. static const E1000Info e1000_devices[] = {
  1575. {
  1576. .name = "e1000",
  1577. .device_id = E1000_DEV_ID_82540EM,
  1578. .revision = 0x03,
  1579. .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
  1580. },
  1581. {
  1582. .name = "e1000-82544gc",
  1583. .device_id = E1000_DEV_ID_82544GC_COPPER,
  1584. .revision = 0x03,
  1585. .phy_id2 = E1000_PHY_ID2_82544x,
  1586. },
  1587. {
  1588. .name = "e1000-82545em",
  1589. .device_id = E1000_DEV_ID_82545EM_COPPER,
  1590. .revision = 0x03,
  1591. .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
  1592. },
  1593. };
  1594. static void e1000_register_types(void)
  1595. {
  1596. int i;
  1597. type_register_static(&e1000_base_info);
  1598. for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
  1599. const E1000Info *info = &e1000_devices[i];
  1600. TypeInfo type_info = {};
  1601. type_info.name = info->name;
  1602. type_info.parent = TYPE_E1000_BASE;
  1603. type_info.class_data = (void *)info;
  1604. type_info.class_init = e1000_class_init;
  1605. type_register(&type_info);
  1606. }
  1607. }
  1608. type_init(e1000_register_types)