2
0

e1000.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831
  1. /*
  2. * QEMU e1000 emulation
  3. *
  4. * Software developer's manual:
  5. * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
  6. *
  7. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  8. * Copyright (c) 2008 Qumranet
  9. * Based on work done by:
  10. * Copyright (c) 2007 Dan Aloni
  11. * Copyright (c) 2004 Antony T Curtis
  12. *
  13. * This library is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2 of the License, or (at your option) any later version.
  17. *
  18. * This library is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  25. */
  26. #include "qemu/osdep.h"
  27. #include "hw/pci/pci.h"
  28. #include "hw/qdev-properties.h"
  29. #include "migration/vmstate.h"
  30. #include "net/net.h"
  31. #include "net/checksum.h"
  32. #include "sysemu/sysemu.h"
  33. #include "sysemu/dma.h"
  34. #include "qemu/iov.h"
  35. #include "qemu/module.h"
  36. #include "qemu/range.h"
  37. #include "e1000x_common.h"
  38. #include "trace.h"
  39. static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  40. /* #define E1000_DEBUG */
  41. #ifdef E1000_DEBUG
  42. enum {
  43. DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
  44. DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
  45. DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
  46. DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
  47. };
  48. #define DBGBIT(x) (1<<DEBUG_##x)
  49. static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
  50. #define DBGOUT(what, fmt, ...) do { \
  51. if (debugflags & DBGBIT(what)) \
  52. fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
  53. } while (0)
  54. #else
  55. #define DBGOUT(what, fmt, ...) do {} while (0)
  56. #endif
  57. #define IOPORT_SIZE 0x40
  58. #define PNPMMIO_SIZE 0x20000
  59. #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
  60. #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
  61. /*
  62. * HW models:
  63. * E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
  64. * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
  65. * E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
  66. * Others never tested
  67. */
  68. typedef struct E1000State_st {
  69. /*< private >*/
  70. PCIDevice parent_obj;
  71. /*< public >*/
  72. NICState *nic;
  73. NICConf conf;
  74. MemoryRegion mmio;
  75. MemoryRegion io;
  76. uint32_t mac_reg[0x8000];
  77. uint16_t phy_reg[0x20];
  78. uint16_t eeprom_data[64];
  79. uint32_t rxbuf_size;
  80. uint32_t rxbuf_min_shift;
  81. struct e1000_tx {
  82. unsigned char header[256];
  83. unsigned char vlan_header[4];
  84. /* Fields vlan and data must not be reordered or separated. */
  85. unsigned char vlan[4];
  86. unsigned char data[0x10000];
  87. uint16_t size;
  88. unsigned char vlan_needed;
  89. unsigned char sum_needed;
  90. bool cptse;
  91. e1000x_txd_props props;
  92. e1000x_txd_props tso_props;
  93. uint16_t tso_frames;
  94. } tx;
  95. struct {
  96. uint32_t val_in; /* shifted in from guest driver */
  97. uint16_t bitnum_in;
  98. uint16_t bitnum_out;
  99. uint16_t reading;
  100. uint32_t old_eecd;
  101. } eecd_state;
  102. QEMUTimer *autoneg_timer;
  103. QEMUTimer *mit_timer; /* Mitigation timer. */
  104. bool mit_timer_on; /* Mitigation timer is running. */
  105. bool mit_irq_level; /* Tracks interrupt pin level. */
  106. uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */
  107. QEMUTimer *flush_queue_timer;
  108. /* Compatibility flags for migration to/from qemu 1.3.0 and older */
  109. #define E1000_FLAG_AUTONEG_BIT 0
  110. #define E1000_FLAG_MIT_BIT 1
  111. #define E1000_FLAG_MAC_BIT 2
  112. #define E1000_FLAG_TSO_BIT 3
  113. #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
  114. #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
  115. #define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
  116. #define E1000_FLAG_TSO (1 << E1000_FLAG_TSO_BIT)
  117. uint32_t compat_flags;
  118. bool received_tx_tso;
  119. bool use_tso_for_migration;
  120. e1000x_txd_props mig_props;
  121. } E1000State;
  122. #define chkflag(x) (s->compat_flags & E1000_FLAG_##x)
  123. typedef struct E1000BaseClass {
  124. PCIDeviceClass parent_class;
  125. uint16_t phy_id2;
  126. } E1000BaseClass;
  127. #define TYPE_E1000_BASE "e1000-base"
  128. #define E1000(obj) \
  129. OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
  130. #define E1000_DEVICE_CLASS(klass) \
  131. OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
  132. #define E1000_DEVICE_GET_CLASS(obj) \
  133. OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
  134. static void
  135. e1000_link_up(E1000State *s)
  136. {
  137. e1000x_update_regs_on_link_up(s->mac_reg, s->phy_reg);
  138. /* E1000_STATUS_LU is tested by e1000_can_receive() */
  139. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  140. }
  141. static void
  142. e1000_autoneg_done(E1000State *s)
  143. {
  144. e1000x_update_regs_on_autoneg_done(s->mac_reg, s->phy_reg);
  145. /* E1000_STATUS_LU is tested by e1000_can_receive() */
  146. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  147. }
  148. static bool
  149. have_autoneg(E1000State *s)
  150. {
  151. return chkflag(AUTONEG) && (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
  152. }
  153. static void
  154. set_phy_ctrl(E1000State *s, int index, uint16_t val)
  155. {
  156. /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
  157. s->phy_reg[PHY_CTRL] = val & ~(0x3f |
  158. MII_CR_RESET |
  159. MII_CR_RESTART_AUTO_NEG);
  160. /*
  161. * QEMU 1.3 does not support link auto-negotiation emulation, so if we
  162. * migrate during auto negotiation, after migration the link will be
  163. * down.
  164. */
  165. if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) {
  166. e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
  167. }
  168. }
  169. static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
  170. [PHY_CTRL] = set_phy_ctrl,
  171. };
  172. enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
  173. enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
  174. static const char phy_regcap[0x20] = {
  175. [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
  176. [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
  177. [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
  178. [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
  179. [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
  180. [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
  181. [PHY_AUTONEG_EXP] = PHY_R,
  182. };
  183. /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
  184. static const uint16_t phy_reg_init[] = {
  185. [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
  186. MII_CR_FULL_DUPLEX |
  187. MII_CR_AUTO_NEG_EN,
  188. [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
  189. MII_SR_LINK_STATUS | /* link initially up */
  190. MII_SR_AUTONEG_CAPS |
  191. /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */
  192. MII_SR_PREAMBLE_SUPPRESS |
  193. MII_SR_EXTENDED_STATUS |
  194. MII_SR_10T_HD_CAPS |
  195. MII_SR_10T_FD_CAPS |
  196. MII_SR_100X_HD_CAPS |
  197. MII_SR_100X_FD_CAPS,
  198. [PHY_ID1] = 0x141,
  199. /* [PHY_ID2] configured per DevId, from e1000_reset() */
  200. [PHY_AUTONEG_ADV] = 0xde1,
  201. [PHY_LP_ABILITY] = 0x1e0,
  202. [PHY_1000T_CTRL] = 0x0e00,
  203. [PHY_1000T_STATUS] = 0x3c00,
  204. [M88E1000_PHY_SPEC_CTRL] = 0x360,
  205. [M88E1000_PHY_SPEC_STATUS] = 0xac00,
  206. [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
  207. };
  208. static const uint32_t mac_reg_init[] = {
  209. [PBA] = 0x00100030,
  210. [LEDCTL] = 0x602,
  211. [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
  212. E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
  213. [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
  214. E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
  215. E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
  216. E1000_STATUS_LU,
  217. [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
  218. E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
  219. E1000_MANC_RMCP_EN,
  220. };
  221. /* Helper function, *curr == 0 means the value is not set */
  222. static inline void
  223. mit_update_delay(uint32_t *curr, uint32_t value)
  224. {
  225. if (value && (*curr == 0 || value < *curr)) {
  226. *curr = value;
  227. }
  228. }
  229. static void
  230. set_interrupt_cause(E1000State *s, int index, uint32_t val)
  231. {
  232. PCIDevice *d = PCI_DEVICE(s);
  233. uint32_t pending_ints;
  234. uint32_t mit_delay;
  235. s->mac_reg[ICR] = val;
  236. /*
  237. * Make sure ICR and ICS registers have the same value.
  238. * The spec says that the ICS register is write-only. However in practice,
  239. * on real hardware ICS is readable, and for reads it has the same value as
  240. * ICR (except that ICS does not have the clear on read behaviour of ICR).
  241. *
  242. * The VxWorks PRO/1000 driver uses this behaviour.
  243. */
  244. s->mac_reg[ICS] = val;
  245. pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
  246. if (!s->mit_irq_level && pending_ints) {
  247. /*
  248. * Here we detect a potential raising edge. We postpone raising the
  249. * interrupt line if we are inside the mitigation delay window
  250. * (s->mit_timer_on == 1).
  251. * We provide a partial implementation of interrupt mitigation,
  252. * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
  253. * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
  254. * RADV; relative timers based on TIDV and RDTR are not implemented.
  255. */
  256. if (s->mit_timer_on) {
  257. return;
  258. }
  259. if (chkflag(MIT)) {
  260. /* Compute the next mitigation delay according to pending
  261. * interrupts and the current values of RADV (provided
  262. * RDTR!=0), TADV and ITR.
  263. * Then rearm the timer.
  264. */
  265. mit_delay = 0;
  266. if (s->mit_ide &&
  267. (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
  268. mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
  269. }
  270. if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
  271. mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
  272. }
  273. mit_update_delay(&mit_delay, s->mac_reg[ITR]);
  274. /*
  275. * According to e1000 SPEC, the Ethernet controller guarantees
  276. * a maximum observable interrupt rate of 7813 interrupts/sec.
  277. * Thus if mit_delay < 500 then the delay should be set to the
  278. * minimum delay possible which is 500.
  279. */
  280. mit_delay = (mit_delay < 500) ? 500 : mit_delay;
  281. s->mit_timer_on = 1;
  282. timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
  283. mit_delay * 256);
  284. s->mit_ide = 0;
  285. }
  286. }
  287. s->mit_irq_level = (pending_ints != 0);
  288. pci_set_irq(d, s->mit_irq_level);
  289. }
  290. static void
  291. e1000_mit_timer(void *opaque)
  292. {
  293. E1000State *s = opaque;
  294. s->mit_timer_on = 0;
  295. /* Call set_interrupt_cause to update the irq level (if necessary). */
  296. set_interrupt_cause(s, 0, s->mac_reg[ICR]);
  297. }
  298. static void
  299. set_ics(E1000State *s, int index, uint32_t val)
  300. {
  301. DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
  302. s->mac_reg[IMS]);
  303. set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
  304. }
  305. static void
  306. e1000_autoneg_timer(void *opaque)
  307. {
  308. E1000State *s = opaque;
  309. if (!qemu_get_queue(s->nic)->link_down) {
  310. e1000_autoneg_done(s);
  311. set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
  312. }
  313. }
  314. static void e1000_reset(void *opaque)
  315. {
  316. E1000State *d = opaque;
  317. E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
  318. uint8_t *macaddr = d->conf.macaddr.a;
  319. timer_del(d->autoneg_timer);
  320. timer_del(d->mit_timer);
  321. timer_del(d->flush_queue_timer);
  322. d->mit_timer_on = 0;
  323. d->mit_irq_level = 0;
  324. d->mit_ide = 0;
  325. memset(d->phy_reg, 0, sizeof d->phy_reg);
  326. memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
  327. d->phy_reg[PHY_ID2] = edc->phy_id2;
  328. memset(d->mac_reg, 0, sizeof d->mac_reg);
  329. memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
  330. d->rxbuf_min_shift = 1;
  331. memset(&d->tx, 0, sizeof d->tx);
  332. if (qemu_get_queue(d->nic)->link_down) {
  333. e1000x_update_regs_on_link_down(d->mac_reg, d->phy_reg);
  334. }
  335. e1000x_reset_mac_addr(d->nic, d->mac_reg, macaddr);
  336. }
  337. static void
  338. set_ctrl(E1000State *s, int index, uint32_t val)
  339. {
  340. /* RST is self clearing */
  341. s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
  342. }
  343. static void
  344. e1000_flush_queue_timer(void *opaque)
  345. {
  346. E1000State *s = opaque;
  347. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  348. }
  349. static void
  350. set_rx_control(E1000State *s, int index, uint32_t val)
  351. {
  352. s->mac_reg[RCTL] = val;
  353. s->rxbuf_size = e1000x_rxbufsize(val);
  354. s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
  355. DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
  356. s->mac_reg[RCTL]);
  357. timer_mod(s->flush_queue_timer,
  358. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
  359. }
  360. static void
  361. set_mdic(E1000State *s, int index, uint32_t val)
  362. {
  363. uint32_t data = val & E1000_MDIC_DATA_MASK;
  364. uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
  365. if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
  366. val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
  367. else if (val & E1000_MDIC_OP_READ) {
  368. DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
  369. if (!(phy_regcap[addr] & PHY_R)) {
  370. DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
  371. val |= E1000_MDIC_ERROR;
  372. } else
  373. val = (val ^ data) | s->phy_reg[addr];
  374. } else if (val & E1000_MDIC_OP_WRITE) {
  375. DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
  376. if (!(phy_regcap[addr] & PHY_W)) {
  377. DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
  378. val |= E1000_MDIC_ERROR;
  379. } else {
  380. if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
  381. phyreg_writeops[addr](s, index, data);
  382. } else {
  383. s->phy_reg[addr] = data;
  384. }
  385. }
  386. }
  387. s->mac_reg[MDIC] = val | E1000_MDIC_READY;
  388. if (val & E1000_MDIC_INT_EN) {
  389. set_ics(s, 0, E1000_ICR_MDAC);
  390. }
  391. }
  392. static uint32_t
  393. get_eecd(E1000State *s, int index)
  394. {
  395. uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
  396. DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
  397. s->eecd_state.bitnum_out, s->eecd_state.reading);
  398. if (!s->eecd_state.reading ||
  399. ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
  400. ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
  401. ret |= E1000_EECD_DO;
  402. return ret;
  403. }
  404. static void
  405. set_eecd(E1000State *s, int index, uint32_t val)
  406. {
  407. uint32_t oldval = s->eecd_state.old_eecd;
  408. s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
  409. E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
  410. if (!(E1000_EECD_CS & val)) { /* CS inactive; nothing to do */
  411. return;
  412. }
  413. if (E1000_EECD_CS & (val ^ oldval)) { /* CS rise edge; reset state */
  414. s->eecd_state.val_in = 0;
  415. s->eecd_state.bitnum_in = 0;
  416. s->eecd_state.bitnum_out = 0;
  417. s->eecd_state.reading = 0;
  418. }
  419. if (!(E1000_EECD_SK & (val ^ oldval))) { /* no clock edge */
  420. return;
  421. }
  422. if (!(E1000_EECD_SK & val)) { /* falling edge */
  423. s->eecd_state.bitnum_out++;
  424. return;
  425. }
  426. s->eecd_state.val_in <<= 1;
  427. if (val & E1000_EECD_DI)
  428. s->eecd_state.val_in |= 1;
  429. if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
  430. s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
  431. s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
  432. EEPROM_READ_OPCODE_MICROWIRE);
  433. }
  434. DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
  435. s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
  436. s->eecd_state.reading);
  437. }
  438. static uint32_t
  439. flash_eerd_read(E1000State *s, int x)
  440. {
  441. unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
  442. if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
  443. return (s->mac_reg[EERD]);
  444. if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
  445. return (E1000_EEPROM_RW_REG_DONE | r);
  446. return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
  447. E1000_EEPROM_RW_REG_DONE | r);
  448. }
  449. static void
  450. putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
  451. {
  452. uint32_t sum;
  453. if (cse && cse < n)
  454. n = cse + 1;
  455. if (sloc < n-1) {
  456. sum = net_checksum_add(n-css, data+css);
  457. stw_be_p(data + sloc, net_checksum_finish_nozero(sum));
  458. }
  459. }
  460. static inline void
  461. inc_tx_bcast_or_mcast_count(E1000State *s, const unsigned char *arr)
  462. {
  463. if (!memcmp(arr, bcast, sizeof bcast)) {
  464. e1000x_inc_reg_if_not_full(s->mac_reg, BPTC);
  465. } else if (arr[0] & 1) {
  466. e1000x_inc_reg_if_not_full(s->mac_reg, MPTC);
  467. }
  468. }
  469. static void
  470. e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
  471. {
  472. static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
  473. PTC1023, PTC1522 };
  474. NetClientState *nc = qemu_get_queue(s->nic);
  475. if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
  476. nc->info->receive(nc, buf, size);
  477. } else {
  478. qemu_send_packet(nc, buf, size);
  479. }
  480. inc_tx_bcast_or_mcast_count(s, buf);
  481. e1000x_increase_size_stats(s->mac_reg, PTCregs, size);
  482. }
  483. static void
  484. xmit_seg(E1000State *s)
  485. {
  486. uint16_t len;
  487. unsigned int frames = s->tx.tso_frames, css, sofar;
  488. struct e1000_tx *tp = &s->tx;
  489. struct e1000x_txd_props *props = tp->cptse ? &tp->tso_props : &tp->props;
  490. if (tp->cptse) {
  491. css = props->ipcss;
  492. DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
  493. frames, tp->size, css);
  494. if (props->ip) { /* IPv4 */
  495. stw_be_p(tp->data+css+2, tp->size - css);
  496. stw_be_p(tp->data+css+4,
  497. lduw_be_p(tp->data + css + 4) + frames);
  498. } else { /* IPv6 */
  499. stw_be_p(tp->data+css+4, tp->size - css);
  500. }
  501. css = props->tucss;
  502. len = tp->size - css;
  503. DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", props->tcp, css, len);
  504. if (props->tcp) {
  505. sofar = frames * props->mss;
  506. stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
  507. if (props->paylen - sofar > props->mss) {
  508. tp->data[css + 13] &= ~9; /* PSH, FIN */
  509. } else if (frames) {
  510. e1000x_inc_reg_if_not_full(s->mac_reg, TSCTC);
  511. }
  512. } else { /* UDP */
  513. stw_be_p(tp->data+css+4, len);
  514. }
  515. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  516. unsigned int phsum;
  517. // add pseudo-header length before checksum calculation
  518. void *sp = tp->data + props->tucso;
  519. phsum = lduw_be_p(sp) + len;
  520. phsum = (phsum >> 16) + (phsum & 0xffff);
  521. stw_be_p(sp, phsum);
  522. }
  523. tp->tso_frames++;
  524. }
  525. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  526. putsum(tp->data, tp->size, props->tucso, props->tucss, props->tucse);
  527. }
  528. if (tp->sum_needed & E1000_TXD_POPTS_IXSM) {
  529. putsum(tp->data, tp->size, props->ipcso, props->ipcss, props->ipcse);
  530. }
  531. if (tp->vlan_needed) {
  532. memmove(tp->vlan, tp->data, 4);
  533. memmove(tp->data, tp->data + 4, 8);
  534. memcpy(tp->data + 8, tp->vlan_header, 4);
  535. e1000_send_packet(s, tp->vlan, tp->size + 4);
  536. } else {
  537. e1000_send_packet(s, tp->data, tp->size);
  538. }
  539. e1000x_inc_reg_if_not_full(s->mac_reg, TPT);
  540. e1000x_grow_8reg_if_not_full(s->mac_reg, TOTL, s->tx.size);
  541. s->mac_reg[GPTC] = s->mac_reg[TPT];
  542. s->mac_reg[GOTCL] = s->mac_reg[TOTL];
  543. s->mac_reg[GOTCH] = s->mac_reg[TOTH];
  544. }
  545. static void
  546. process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
  547. {
  548. PCIDevice *d = PCI_DEVICE(s);
  549. uint32_t txd_lower = le32_to_cpu(dp->lower.data);
  550. uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
  551. unsigned int split_size = txd_lower & 0xffff, bytes, sz;
  552. unsigned int msh = 0xfffff;
  553. uint64_t addr;
  554. struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
  555. struct e1000_tx *tp = &s->tx;
  556. s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
  557. if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
  558. if (le32_to_cpu(xp->cmd_and_length) & E1000_TXD_CMD_TSE) {
  559. e1000x_read_tx_ctx_descr(xp, &tp->tso_props);
  560. s->use_tso_for_migration = 1;
  561. tp->tso_frames = 0;
  562. } else {
  563. e1000x_read_tx_ctx_descr(xp, &tp->props);
  564. s->use_tso_for_migration = 0;
  565. }
  566. return;
  567. } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
  568. // data descriptor
  569. if (tp->size == 0) {
  570. tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
  571. }
  572. tp->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
  573. } else {
  574. // legacy descriptor
  575. tp->cptse = 0;
  576. }
  577. if (e1000x_vlan_enabled(s->mac_reg) &&
  578. e1000x_is_vlan_txd(txd_lower) &&
  579. (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
  580. tp->vlan_needed = 1;
  581. stw_be_p(tp->vlan_header,
  582. le16_to_cpu(s->mac_reg[VET]));
  583. stw_be_p(tp->vlan_header + 2,
  584. le16_to_cpu(dp->upper.fields.special));
  585. }
  586. addr = le64_to_cpu(dp->buffer_addr);
  587. if (tp->cptse) {
  588. msh = tp->tso_props.hdr_len + tp->tso_props.mss;
  589. do {
  590. bytes = split_size;
  591. if (tp->size + bytes > msh)
  592. bytes = msh - tp->size;
  593. bytes = MIN(sizeof(tp->data) - tp->size, bytes);
  594. pci_dma_read(d, addr, tp->data + tp->size, bytes);
  595. sz = tp->size + bytes;
  596. if (sz >= tp->tso_props.hdr_len
  597. && tp->size < tp->tso_props.hdr_len) {
  598. memmove(tp->header, tp->data, tp->tso_props.hdr_len);
  599. }
  600. tp->size = sz;
  601. addr += bytes;
  602. if (sz == msh) {
  603. xmit_seg(s);
  604. memmove(tp->data, tp->header, tp->tso_props.hdr_len);
  605. tp->size = tp->tso_props.hdr_len;
  606. }
  607. split_size -= bytes;
  608. } while (bytes && split_size);
  609. } else {
  610. split_size = MIN(sizeof(tp->data) - tp->size, split_size);
  611. pci_dma_read(d, addr, tp->data + tp->size, split_size);
  612. tp->size += split_size;
  613. }
  614. if (!(txd_lower & E1000_TXD_CMD_EOP))
  615. return;
  616. if (!(tp->cptse && tp->size < tp->tso_props.hdr_len)) {
  617. xmit_seg(s);
  618. }
  619. tp->tso_frames = 0;
  620. tp->sum_needed = 0;
  621. tp->vlan_needed = 0;
  622. tp->size = 0;
  623. tp->cptse = 0;
  624. }
  625. static uint32_t
  626. txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
  627. {
  628. PCIDevice *d = PCI_DEVICE(s);
  629. uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
  630. if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
  631. return 0;
  632. txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
  633. ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
  634. dp->upper.data = cpu_to_le32(txd_upper);
  635. pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
  636. &dp->upper, sizeof(dp->upper));
  637. return E1000_ICR_TXDW;
  638. }
  639. static uint64_t tx_desc_base(E1000State *s)
  640. {
  641. uint64_t bah = s->mac_reg[TDBAH];
  642. uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
  643. return (bah << 32) + bal;
  644. }
  645. static void
  646. start_xmit(E1000State *s)
  647. {
  648. PCIDevice *d = PCI_DEVICE(s);
  649. dma_addr_t base;
  650. struct e1000_tx_desc desc;
  651. uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
  652. if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
  653. DBGOUT(TX, "tx disabled\n");
  654. return;
  655. }
  656. while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
  657. base = tx_desc_base(s) +
  658. sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
  659. pci_dma_read(d, base, &desc, sizeof(desc));
  660. DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
  661. (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
  662. desc.upper.data);
  663. process_tx_desc(s, &desc);
  664. cause |= txdesc_writeback(s, base, &desc);
  665. if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
  666. s->mac_reg[TDH] = 0;
  667. /*
  668. * the following could happen only if guest sw assigns
  669. * bogus values to TDT/TDLEN.
  670. * there's nothing too intelligent we could do about this.
  671. */
  672. if (s->mac_reg[TDH] == tdh_start ||
  673. tdh_start >= s->mac_reg[TDLEN] / sizeof(desc)) {
  674. DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
  675. tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
  676. break;
  677. }
  678. }
  679. set_ics(s, 0, cause);
  680. }
  681. static int
  682. receive_filter(E1000State *s, const uint8_t *buf, int size)
  683. {
  684. uint32_t rctl = s->mac_reg[RCTL];
  685. int isbcast = !memcmp(buf, bcast, sizeof bcast), ismcast = (buf[0] & 1);
  686. if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) &&
  687. e1000x_vlan_rx_filter_enabled(s->mac_reg)) {
  688. uint16_t vid = lduw_be_p(buf + 14);
  689. uint32_t vfta = ldl_le_p((uint32_t*)(s->mac_reg + VFTA) +
  690. ((vid >> 5) & 0x7f));
  691. if ((vfta & (1 << (vid & 0x1f))) == 0)
  692. return 0;
  693. }
  694. if (!isbcast && !ismcast && (rctl & E1000_RCTL_UPE)) { /* promiscuous ucast */
  695. return 1;
  696. }
  697. if (ismcast && (rctl & E1000_RCTL_MPE)) { /* promiscuous mcast */
  698. e1000x_inc_reg_if_not_full(s->mac_reg, MPRC);
  699. return 1;
  700. }
  701. if (isbcast && (rctl & E1000_RCTL_BAM)) { /* broadcast enabled */
  702. e1000x_inc_reg_if_not_full(s->mac_reg, BPRC);
  703. return 1;
  704. }
  705. return e1000x_rx_group_filter(s->mac_reg, buf);
  706. }
  707. static void
  708. e1000_set_link_status(NetClientState *nc)
  709. {
  710. E1000State *s = qemu_get_nic_opaque(nc);
  711. uint32_t old_status = s->mac_reg[STATUS];
  712. if (nc->link_down) {
  713. e1000x_update_regs_on_link_down(s->mac_reg, s->phy_reg);
  714. } else {
  715. if (have_autoneg(s) &&
  716. !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
  717. e1000x_restart_autoneg(s->mac_reg, s->phy_reg, s->autoneg_timer);
  718. } else {
  719. e1000_link_up(s);
  720. }
  721. }
  722. if (s->mac_reg[STATUS] != old_status)
  723. set_ics(s, 0, E1000_ICR_LSC);
  724. }
  725. static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
  726. {
  727. int bufs;
  728. /* Fast-path short packets */
  729. if (total_size <= s->rxbuf_size) {
  730. return s->mac_reg[RDH] != s->mac_reg[RDT];
  731. }
  732. if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
  733. bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
  734. } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
  735. bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
  736. s->mac_reg[RDT] - s->mac_reg[RDH];
  737. } else {
  738. return false;
  739. }
  740. return total_size <= bufs * s->rxbuf_size;
  741. }
  742. static int
  743. e1000_can_receive(NetClientState *nc)
  744. {
  745. E1000State *s = qemu_get_nic_opaque(nc);
  746. return e1000x_rx_ready(&s->parent_obj, s->mac_reg) &&
  747. e1000_has_rxbufs(s, 1) && !timer_pending(s->flush_queue_timer);
  748. }
  749. static uint64_t rx_desc_base(E1000State *s)
  750. {
  751. uint64_t bah = s->mac_reg[RDBAH];
  752. uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
  753. return (bah << 32) + bal;
  754. }
  755. static void
  756. e1000_receiver_overrun(E1000State *s, size_t size)
  757. {
  758. trace_e1000_receiver_overrun(size, s->mac_reg[RDH], s->mac_reg[RDT]);
  759. e1000x_inc_reg_if_not_full(s->mac_reg, RNBC);
  760. e1000x_inc_reg_if_not_full(s->mac_reg, MPC);
  761. set_ics(s, 0, E1000_ICS_RXO);
  762. }
  763. static ssize_t
  764. e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
  765. {
  766. E1000State *s = qemu_get_nic_opaque(nc);
  767. PCIDevice *d = PCI_DEVICE(s);
  768. struct e1000_rx_desc desc;
  769. dma_addr_t base;
  770. unsigned int n, rdt;
  771. uint32_t rdh_start;
  772. uint16_t vlan_special = 0;
  773. uint8_t vlan_status = 0;
  774. uint8_t min_buf[MIN_BUF_SIZE];
  775. struct iovec min_iov;
  776. uint8_t *filter_buf = iov->iov_base;
  777. size_t size = iov_size(iov, iovcnt);
  778. size_t iov_ofs = 0;
  779. size_t desc_offset;
  780. size_t desc_size;
  781. size_t total_size;
  782. if (!e1000x_hw_rx_enabled(s->mac_reg)) {
  783. return -1;
  784. }
  785. if (timer_pending(s->flush_queue_timer)) {
  786. return 0;
  787. }
  788. /* Pad to minimum Ethernet frame length */
  789. if (size < sizeof(min_buf)) {
  790. iov_to_buf(iov, iovcnt, 0, min_buf, size);
  791. memset(&min_buf[size], 0, sizeof(min_buf) - size);
  792. min_iov.iov_base = filter_buf = min_buf;
  793. min_iov.iov_len = size = sizeof(min_buf);
  794. iovcnt = 1;
  795. iov = &min_iov;
  796. } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
  797. /* This is very unlikely, but may happen. */
  798. iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
  799. filter_buf = min_buf;
  800. }
  801. /* Discard oversized packets if !LPE and !SBP. */
  802. if (e1000x_is_oversized(s->mac_reg, size)) {
  803. return size;
  804. }
  805. if (!receive_filter(s, filter_buf, size)) {
  806. return size;
  807. }
  808. if (e1000x_vlan_enabled(s->mac_reg) &&
  809. e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) {
  810. vlan_special = cpu_to_le16(lduw_be_p(filter_buf + 14));
  811. iov_ofs = 4;
  812. if (filter_buf == iov->iov_base) {
  813. memmove(filter_buf + 4, filter_buf, 12);
  814. } else {
  815. iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
  816. while (iov->iov_len <= iov_ofs) {
  817. iov_ofs -= iov->iov_len;
  818. iov++;
  819. }
  820. }
  821. vlan_status = E1000_RXD_STAT_VP;
  822. size -= 4;
  823. }
  824. rdh_start = s->mac_reg[RDH];
  825. desc_offset = 0;
  826. total_size = size + e1000x_fcs_len(s->mac_reg);
  827. if (!e1000_has_rxbufs(s, total_size)) {
  828. e1000_receiver_overrun(s, total_size);
  829. return -1;
  830. }
  831. do {
  832. desc_size = total_size - desc_offset;
  833. if (desc_size > s->rxbuf_size) {
  834. desc_size = s->rxbuf_size;
  835. }
  836. base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
  837. pci_dma_read(d, base, &desc, sizeof(desc));
  838. desc.special = vlan_special;
  839. desc.status |= (vlan_status | E1000_RXD_STAT_DD);
  840. if (desc.buffer_addr) {
  841. if (desc_offset < size) {
  842. size_t iov_copy;
  843. hwaddr ba = le64_to_cpu(desc.buffer_addr);
  844. size_t copy_size = size - desc_offset;
  845. if (copy_size > s->rxbuf_size) {
  846. copy_size = s->rxbuf_size;
  847. }
  848. do {
  849. iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
  850. pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
  851. copy_size -= iov_copy;
  852. ba += iov_copy;
  853. iov_ofs += iov_copy;
  854. if (iov_ofs == iov->iov_len) {
  855. iov++;
  856. iov_ofs = 0;
  857. }
  858. } while (copy_size);
  859. }
  860. desc_offset += desc_size;
  861. desc.length = cpu_to_le16(desc_size);
  862. if (desc_offset >= total_size) {
  863. desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
  864. } else {
  865. /* Guest zeroing out status is not a hardware requirement.
  866. Clear EOP in case guest didn't do it. */
  867. desc.status &= ~E1000_RXD_STAT_EOP;
  868. }
  869. } else { // as per intel docs; skip descriptors with null buf addr
  870. DBGOUT(RX, "Null RX descriptor!!\n");
  871. }
  872. pci_dma_write(d, base, &desc, sizeof(desc));
  873. if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
  874. s->mac_reg[RDH] = 0;
  875. /* see comment in start_xmit; same here */
  876. if (s->mac_reg[RDH] == rdh_start ||
  877. rdh_start >= s->mac_reg[RDLEN] / sizeof(desc)) {
  878. DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
  879. rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
  880. e1000_receiver_overrun(s, total_size);
  881. return -1;
  882. }
  883. } while (desc_offset < total_size);
  884. e1000x_update_rx_total_stats(s->mac_reg, size, total_size);
  885. n = E1000_ICS_RXT0;
  886. if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
  887. rdt += s->mac_reg[RDLEN] / sizeof(desc);
  888. if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
  889. s->rxbuf_min_shift)
  890. n |= E1000_ICS_RXDMT0;
  891. set_ics(s, 0, n);
  892. return size;
  893. }
  894. static ssize_t
  895. e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  896. {
  897. const struct iovec iov = {
  898. .iov_base = (uint8_t *)buf,
  899. .iov_len = size
  900. };
  901. return e1000_receive_iov(nc, &iov, 1);
  902. }
  903. static uint32_t
  904. mac_readreg(E1000State *s, int index)
  905. {
  906. return s->mac_reg[index];
  907. }
  908. static uint32_t
  909. mac_low4_read(E1000State *s, int index)
  910. {
  911. return s->mac_reg[index] & 0xf;
  912. }
  913. static uint32_t
  914. mac_low11_read(E1000State *s, int index)
  915. {
  916. return s->mac_reg[index] & 0x7ff;
  917. }
  918. static uint32_t
  919. mac_low13_read(E1000State *s, int index)
  920. {
  921. return s->mac_reg[index] & 0x1fff;
  922. }
  923. static uint32_t
  924. mac_low16_read(E1000State *s, int index)
  925. {
  926. return s->mac_reg[index] & 0xffff;
  927. }
  928. static uint32_t
  929. mac_icr_read(E1000State *s, int index)
  930. {
  931. uint32_t ret = s->mac_reg[ICR];
  932. DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
  933. set_interrupt_cause(s, 0, 0);
  934. return ret;
  935. }
  936. static uint32_t
  937. mac_read_clr4(E1000State *s, int index)
  938. {
  939. uint32_t ret = s->mac_reg[index];
  940. s->mac_reg[index] = 0;
  941. return ret;
  942. }
  943. static uint32_t
  944. mac_read_clr8(E1000State *s, int index)
  945. {
  946. uint32_t ret = s->mac_reg[index];
  947. s->mac_reg[index] = 0;
  948. s->mac_reg[index-1] = 0;
  949. return ret;
  950. }
  951. static void
  952. mac_writereg(E1000State *s, int index, uint32_t val)
  953. {
  954. uint32_t macaddr[2];
  955. s->mac_reg[index] = val;
  956. if (index == RA + 1) {
  957. macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
  958. macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
  959. qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
  960. }
  961. }
  962. static void
  963. set_rdt(E1000State *s, int index, uint32_t val)
  964. {
  965. s->mac_reg[index] = val & 0xffff;
  966. if (e1000_has_rxbufs(s, 1)) {
  967. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  968. }
  969. }
  970. static void
  971. set_16bit(E1000State *s, int index, uint32_t val)
  972. {
  973. s->mac_reg[index] = val & 0xffff;
  974. }
  975. static void
  976. set_dlen(E1000State *s, int index, uint32_t val)
  977. {
  978. s->mac_reg[index] = val & 0xfff80;
  979. }
  980. static void
  981. set_tctl(E1000State *s, int index, uint32_t val)
  982. {
  983. s->mac_reg[index] = val;
  984. s->mac_reg[TDT] &= 0xffff;
  985. start_xmit(s);
  986. }
  987. static void
  988. set_icr(E1000State *s, int index, uint32_t val)
  989. {
  990. DBGOUT(INTERRUPT, "set_icr %x\n", val);
  991. set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
  992. }
  993. static void
  994. set_imc(E1000State *s, int index, uint32_t val)
  995. {
  996. s->mac_reg[IMS] &= ~val;
  997. set_ics(s, 0, 0);
  998. }
  999. static void
  1000. set_ims(E1000State *s, int index, uint32_t val)
  1001. {
  1002. s->mac_reg[IMS] |= val;
  1003. set_ics(s, 0, 0);
  1004. }
  1005. #define getreg(x) [x] = mac_readreg
  1006. static uint32_t (*macreg_readops[])(E1000State *, int) = {
  1007. getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
  1008. getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
  1009. getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
  1010. getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
  1011. getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
  1012. getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
  1013. getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
  1014. getreg(TADV), getreg(ITR), getreg(FCRUC), getreg(IPAV),
  1015. getreg(WUC), getreg(WUS), getreg(SCC), getreg(ECOL),
  1016. getreg(MCC), getreg(LATECOL), getreg(COLC), getreg(DC),
  1017. getreg(TNCRS), getreg(SEQEC), getreg(CEXTERR), getreg(RLEC),
  1018. getreg(XONRXC), getreg(XONTXC), getreg(XOFFRXC), getreg(XOFFTXC),
  1019. getreg(RFC), getreg(RJC), getreg(RNBC), getreg(TSCTFC),
  1020. getreg(MGTPRC), getreg(MGTPDC), getreg(MGTPTC), getreg(GORCL),
  1021. getreg(GOTCL),
  1022. [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8,
  1023. [GOTCH] = mac_read_clr8, [GORCH] = mac_read_clr8,
  1024. [PRC64] = mac_read_clr4, [PRC127] = mac_read_clr4,
  1025. [PRC255] = mac_read_clr4, [PRC511] = mac_read_clr4,
  1026. [PRC1023] = mac_read_clr4, [PRC1522] = mac_read_clr4,
  1027. [PTC64] = mac_read_clr4, [PTC127] = mac_read_clr4,
  1028. [PTC255] = mac_read_clr4, [PTC511] = mac_read_clr4,
  1029. [PTC1023] = mac_read_clr4, [PTC1522] = mac_read_clr4,
  1030. [GPRC] = mac_read_clr4, [GPTC] = mac_read_clr4,
  1031. [TPT] = mac_read_clr4, [TPR] = mac_read_clr4,
  1032. [RUC] = mac_read_clr4, [ROC] = mac_read_clr4,
  1033. [BPRC] = mac_read_clr4, [MPRC] = mac_read_clr4,
  1034. [TSCTC] = mac_read_clr4, [BPTC] = mac_read_clr4,
  1035. [MPTC] = mac_read_clr4,
  1036. [ICR] = mac_icr_read, [EECD] = get_eecd,
  1037. [EERD] = flash_eerd_read,
  1038. [RDFH] = mac_low13_read, [RDFT] = mac_low13_read,
  1039. [RDFHS] = mac_low13_read, [RDFTS] = mac_low13_read,
  1040. [RDFPC] = mac_low13_read,
  1041. [TDFH] = mac_low11_read, [TDFT] = mac_low11_read,
  1042. [TDFHS] = mac_low13_read, [TDFTS] = mac_low13_read,
  1043. [TDFPC] = mac_low13_read,
  1044. [AIT] = mac_low16_read,
  1045. [CRCERRS ... MPC] = &mac_readreg,
  1046. [IP6AT ... IP6AT+3] = &mac_readreg, [IP4AT ... IP4AT+6] = &mac_readreg,
  1047. [FFLT ... FFLT+6] = &mac_low11_read,
  1048. [RA ... RA+31] = &mac_readreg,
  1049. [WUPM ... WUPM+31] = &mac_readreg,
  1050. [MTA ... MTA+127] = &mac_readreg,
  1051. [VFTA ... VFTA+127] = &mac_readreg,
  1052. [FFMT ... FFMT+254] = &mac_low4_read,
  1053. [FFVT ... FFVT+254] = &mac_readreg,
  1054. [PBM ... PBM+16383] = &mac_readreg,
  1055. };
  1056. enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
  1057. #define putreg(x) [x] = mac_writereg
  1058. static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
  1059. putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
  1060. putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
  1061. putreg(RDBAL), putreg(LEDCTL), putreg(VET), putreg(FCRUC),
  1062. putreg(TDFH), putreg(TDFT), putreg(TDFHS), putreg(TDFTS),
  1063. putreg(TDFPC), putreg(RDFH), putreg(RDFT), putreg(RDFHS),
  1064. putreg(RDFTS), putreg(RDFPC), putreg(IPAV), putreg(WUC),
  1065. putreg(WUS), putreg(AIT),
  1066. [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
  1067. [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
  1068. [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
  1069. [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
  1070. [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
  1071. [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
  1072. [ITR] = set_16bit,
  1073. [IP6AT ... IP6AT+3] = &mac_writereg, [IP4AT ... IP4AT+6] = &mac_writereg,
  1074. [FFLT ... FFLT+6] = &mac_writereg,
  1075. [RA ... RA+31] = &mac_writereg,
  1076. [WUPM ... WUPM+31] = &mac_writereg,
  1077. [MTA ... MTA+127] = &mac_writereg,
  1078. [VFTA ... VFTA+127] = &mac_writereg,
  1079. [FFMT ... FFMT+254] = &mac_writereg, [FFVT ... FFVT+254] = &mac_writereg,
  1080. [PBM ... PBM+16383] = &mac_writereg,
  1081. };
  1082. enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
  1083. enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 };
  1084. #define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED)
  1085. /* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p]
  1086. * f - flag bits (up to 6 possible flags)
  1087. * n - flag needed
  1088. * p - partially implenented */
  1089. static const uint8_t mac_reg_access[0x8000] = {
  1090. [RDTR] = markflag(MIT), [TADV] = markflag(MIT),
  1091. [RADV] = markflag(MIT), [ITR] = markflag(MIT),
  1092. [IPAV] = markflag(MAC), [WUC] = markflag(MAC),
  1093. [IP6AT] = markflag(MAC), [IP4AT] = markflag(MAC),
  1094. [FFVT] = markflag(MAC), [WUPM] = markflag(MAC),
  1095. [ECOL] = markflag(MAC), [MCC] = markflag(MAC),
  1096. [DC] = markflag(MAC), [TNCRS] = markflag(MAC),
  1097. [RLEC] = markflag(MAC), [XONRXC] = markflag(MAC),
  1098. [XOFFTXC] = markflag(MAC), [RFC] = markflag(MAC),
  1099. [TSCTFC] = markflag(MAC), [MGTPRC] = markflag(MAC),
  1100. [WUS] = markflag(MAC), [AIT] = markflag(MAC),
  1101. [FFLT] = markflag(MAC), [FFMT] = markflag(MAC),
  1102. [SCC] = markflag(MAC), [FCRUC] = markflag(MAC),
  1103. [LATECOL] = markflag(MAC), [COLC] = markflag(MAC),
  1104. [SEQEC] = markflag(MAC), [CEXTERR] = markflag(MAC),
  1105. [XONTXC] = markflag(MAC), [XOFFRXC] = markflag(MAC),
  1106. [RJC] = markflag(MAC), [RNBC] = markflag(MAC),
  1107. [MGTPDC] = markflag(MAC), [MGTPTC] = markflag(MAC),
  1108. [RUC] = markflag(MAC), [ROC] = markflag(MAC),
  1109. [GORCL] = markflag(MAC), [GORCH] = markflag(MAC),
  1110. [GOTCL] = markflag(MAC), [GOTCH] = markflag(MAC),
  1111. [BPRC] = markflag(MAC), [MPRC] = markflag(MAC),
  1112. [TSCTC] = markflag(MAC), [PRC64] = markflag(MAC),
  1113. [PRC127] = markflag(MAC), [PRC255] = markflag(MAC),
  1114. [PRC511] = markflag(MAC), [PRC1023] = markflag(MAC),
  1115. [PRC1522] = markflag(MAC), [PTC64] = markflag(MAC),
  1116. [PTC127] = markflag(MAC), [PTC255] = markflag(MAC),
  1117. [PTC511] = markflag(MAC), [PTC1023] = markflag(MAC),
  1118. [PTC1522] = markflag(MAC), [MPTC] = markflag(MAC),
  1119. [BPTC] = markflag(MAC),
  1120. [TDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1121. [TDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1122. [TDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1123. [TDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1124. [TDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1125. [RDFH] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1126. [RDFT] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1127. [RDFHS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1128. [RDFTS] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1129. [RDFPC] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1130. [PBM] = markflag(MAC) | MAC_ACCESS_PARTIAL,
  1131. };
  1132. static void
  1133. e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
  1134. unsigned size)
  1135. {
  1136. E1000State *s = opaque;
  1137. unsigned int index = (addr & 0x1ffff) >> 2;
  1138. if (index < NWRITEOPS && macreg_writeops[index]) {
  1139. if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
  1140. || (s->compat_flags & (mac_reg_access[index] >> 2))) {
  1141. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  1142. DBGOUT(GENERAL, "Writing to register at offset: 0x%08x. "
  1143. "It is not fully implemented.\n", index<<2);
  1144. }
  1145. macreg_writeops[index](s, index, val);
  1146. } else { /* "flag needed" bit is set, but the flag is not active */
  1147. DBGOUT(MMIO, "MMIO write attempt to disabled reg. addr=0x%08x\n",
  1148. index<<2);
  1149. }
  1150. } else if (index < NREADOPS && macreg_readops[index]) {
  1151. DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n",
  1152. index<<2, val);
  1153. } else {
  1154. DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
  1155. index<<2, val);
  1156. }
  1157. }
  1158. static uint64_t
  1159. e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
  1160. {
  1161. E1000State *s = opaque;
  1162. unsigned int index = (addr & 0x1ffff) >> 2;
  1163. if (index < NREADOPS && macreg_readops[index]) {
  1164. if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
  1165. || (s->compat_flags & (mac_reg_access[index] >> 2))) {
  1166. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  1167. DBGOUT(GENERAL, "Reading register at offset: 0x%08x. "
  1168. "It is not fully implemented.\n", index<<2);
  1169. }
  1170. return macreg_readops[index](s, index);
  1171. } else { /* "flag needed" bit is set, but the flag is not active */
  1172. DBGOUT(MMIO, "MMIO read attempt of disabled reg. addr=0x%08x\n",
  1173. index<<2);
  1174. }
  1175. } else {
  1176. DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
  1177. }
  1178. return 0;
  1179. }
  1180. static const MemoryRegionOps e1000_mmio_ops = {
  1181. .read = e1000_mmio_read,
  1182. .write = e1000_mmio_write,
  1183. .endianness = DEVICE_LITTLE_ENDIAN,
  1184. .impl = {
  1185. .min_access_size = 4,
  1186. .max_access_size = 4,
  1187. },
  1188. };
  1189. static uint64_t e1000_io_read(void *opaque, hwaddr addr,
  1190. unsigned size)
  1191. {
  1192. E1000State *s = opaque;
  1193. (void)s;
  1194. return 0;
  1195. }
  1196. static void e1000_io_write(void *opaque, hwaddr addr,
  1197. uint64_t val, unsigned size)
  1198. {
  1199. E1000State *s = opaque;
  1200. (void)s;
  1201. }
  1202. static const MemoryRegionOps e1000_io_ops = {
  1203. .read = e1000_io_read,
  1204. .write = e1000_io_write,
  1205. .endianness = DEVICE_LITTLE_ENDIAN,
  1206. };
  1207. static bool is_version_1(void *opaque, int version_id)
  1208. {
  1209. return version_id == 1;
  1210. }
  1211. static int e1000_pre_save(void *opaque)
  1212. {
  1213. E1000State *s = opaque;
  1214. NetClientState *nc = qemu_get_queue(s->nic);
  1215. /*
  1216. * If link is down and auto-negotiation is supported and ongoing,
  1217. * complete auto-negotiation immediately. This allows us to look
  1218. * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
  1219. */
  1220. if (nc->link_down && have_autoneg(s)) {
  1221. s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
  1222. }
  1223. /* Decide which set of props to migrate in the main structure */
  1224. if (chkflag(TSO) || !s->use_tso_for_migration) {
  1225. /* Either we're migrating with the extra subsection, in which
  1226. * case the mig_props is always 'props' OR
  1227. * we've not got the subsection, but 'props' was the last
  1228. * updated.
  1229. */
  1230. s->mig_props = s->tx.props;
  1231. } else {
  1232. /* We're not using the subsection, and 'tso_props' was
  1233. * the last updated.
  1234. */
  1235. s->mig_props = s->tx.tso_props;
  1236. }
  1237. return 0;
  1238. }
  1239. static int e1000_post_load(void *opaque, int version_id)
  1240. {
  1241. E1000State *s = opaque;
  1242. NetClientState *nc = qemu_get_queue(s->nic);
  1243. if (!chkflag(MIT)) {
  1244. s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
  1245. s->mac_reg[TADV] = 0;
  1246. s->mit_irq_level = false;
  1247. }
  1248. s->mit_ide = 0;
  1249. s->mit_timer_on = true;
  1250. timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1);
  1251. /* nc.link_down can't be migrated, so infer link_down according
  1252. * to link status bit in mac_reg[STATUS].
  1253. * Alternatively, restart link negotiation if it was in progress. */
  1254. nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
  1255. if (have_autoneg(s) &&
  1256. !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
  1257. nc->link_down = false;
  1258. timer_mod(s->autoneg_timer,
  1259. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
  1260. }
  1261. s->tx.props = s->mig_props;
  1262. if (!s->received_tx_tso) {
  1263. /* We received only one set of offload data (tx.props)
  1264. * and haven't got tx.tso_props. The best we can do
  1265. * is dupe the data.
  1266. */
  1267. s->tx.tso_props = s->mig_props;
  1268. }
  1269. return 0;
  1270. }
  1271. static int e1000_tx_tso_post_load(void *opaque, int version_id)
  1272. {
  1273. E1000State *s = opaque;
  1274. s->received_tx_tso = true;
  1275. return 0;
  1276. }
  1277. static bool e1000_mit_state_needed(void *opaque)
  1278. {
  1279. E1000State *s = opaque;
  1280. return chkflag(MIT);
  1281. }
  1282. static bool e1000_full_mac_needed(void *opaque)
  1283. {
  1284. E1000State *s = opaque;
  1285. return chkflag(MAC);
  1286. }
  1287. static bool e1000_tso_state_needed(void *opaque)
  1288. {
  1289. E1000State *s = opaque;
  1290. return chkflag(TSO);
  1291. }
  1292. static const VMStateDescription vmstate_e1000_mit_state = {
  1293. .name = "e1000/mit_state",
  1294. .version_id = 1,
  1295. .minimum_version_id = 1,
  1296. .needed = e1000_mit_state_needed,
  1297. .fields = (VMStateField[]) {
  1298. VMSTATE_UINT32(mac_reg[RDTR], E1000State),
  1299. VMSTATE_UINT32(mac_reg[RADV], E1000State),
  1300. VMSTATE_UINT32(mac_reg[TADV], E1000State),
  1301. VMSTATE_UINT32(mac_reg[ITR], E1000State),
  1302. VMSTATE_BOOL(mit_irq_level, E1000State),
  1303. VMSTATE_END_OF_LIST()
  1304. }
  1305. };
  1306. static const VMStateDescription vmstate_e1000_full_mac_state = {
  1307. .name = "e1000/full_mac_state",
  1308. .version_id = 1,
  1309. .minimum_version_id = 1,
  1310. .needed = e1000_full_mac_needed,
  1311. .fields = (VMStateField[]) {
  1312. VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000),
  1313. VMSTATE_END_OF_LIST()
  1314. }
  1315. };
  1316. static const VMStateDescription vmstate_e1000_tx_tso_state = {
  1317. .name = "e1000/tx_tso_state",
  1318. .version_id = 1,
  1319. .minimum_version_id = 1,
  1320. .needed = e1000_tso_state_needed,
  1321. .post_load = e1000_tx_tso_post_load,
  1322. .fields = (VMStateField[]) {
  1323. VMSTATE_UINT8(tx.tso_props.ipcss, E1000State),
  1324. VMSTATE_UINT8(tx.tso_props.ipcso, E1000State),
  1325. VMSTATE_UINT16(tx.tso_props.ipcse, E1000State),
  1326. VMSTATE_UINT8(tx.tso_props.tucss, E1000State),
  1327. VMSTATE_UINT8(tx.tso_props.tucso, E1000State),
  1328. VMSTATE_UINT16(tx.tso_props.tucse, E1000State),
  1329. VMSTATE_UINT32(tx.tso_props.paylen, E1000State),
  1330. VMSTATE_UINT8(tx.tso_props.hdr_len, E1000State),
  1331. VMSTATE_UINT16(tx.tso_props.mss, E1000State),
  1332. VMSTATE_INT8(tx.tso_props.ip, E1000State),
  1333. VMSTATE_INT8(tx.tso_props.tcp, E1000State),
  1334. VMSTATE_END_OF_LIST()
  1335. }
  1336. };
  1337. static const VMStateDescription vmstate_e1000 = {
  1338. .name = "e1000",
  1339. .version_id = 2,
  1340. .minimum_version_id = 1,
  1341. .pre_save = e1000_pre_save,
  1342. .post_load = e1000_post_load,
  1343. .fields = (VMStateField[]) {
  1344. VMSTATE_PCI_DEVICE(parent_obj, E1000State),
  1345. VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
  1346. VMSTATE_UNUSED(4), /* Was mmio_base. */
  1347. VMSTATE_UINT32(rxbuf_size, E1000State),
  1348. VMSTATE_UINT32(rxbuf_min_shift, E1000State),
  1349. VMSTATE_UINT32(eecd_state.val_in, E1000State),
  1350. VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
  1351. VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
  1352. VMSTATE_UINT16(eecd_state.reading, E1000State),
  1353. VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
  1354. VMSTATE_UINT8(mig_props.ipcss, E1000State),
  1355. VMSTATE_UINT8(mig_props.ipcso, E1000State),
  1356. VMSTATE_UINT16(mig_props.ipcse, E1000State),
  1357. VMSTATE_UINT8(mig_props.tucss, E1000State),
  1358. VMSTATE_UINT8(mig_props.tucso, E1000State),
  1359. VMSTATE_UINT16(mig_props.tucse, E1000State),
  1360. VMSTATE_UINT32(mig_props.paylen, E1000State),
  1361. VMSTATE_UINT8(mig_props.hdr_len, E1000State),
  1362. VMSTATE_UINT16(mig_props.mss, E1000State),
  1363. VMSTATE_UINT16(tx.size, E1000State),
  1364. VMSTATE_UINT16(tx.tso_frames, E1000State),
  1365. VMSTATE_UINT8(tx.sum_needed, E1000State),
  1366. VMSTATE_INT8(mig_props.ip, E1000State),
  1367. VMSTATE_INT8(mig_props.tcp, E1000State),
  1368. VMSTATE_BUFFER(tx.header, E1000State),
  1369. VMSTATE_BUFFER(tx.data, E1000State),
  1370. VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
  1371. VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
  1372. VMSTATE_UINT32(mac_reg[CTRL], E1000State),
  1373. VMSTATE_UINT32(mac_reg[EECD], E1000State),
  1374. VMSTATE_UINT32(mac_reg[EERD], E1000State),
  1375. VMSTATE_UINT32(mac_reg[GPRC], E1000State),
  1376. VMSTATE_UINT32(mac_reg[GPTC], E1000State),
  1377. VMSTATE_UINT32(mac_reg[ICR], E1000State),
  1378. VMSTATE_UINT32(mac_reg[ICS], E1000State),
  1379. VMSTATE_UINT32(mac_reg[IMC], E1000State),
  1380. VMSTATE_UINT32(mac_reg[IMS], E1000State),
  1381. VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
  1382. VMSTATE_UINT32(mac_reg[MANC], E1000State),
  1383. VMSTATE_UINT32(mac_reg[MDIC], E1000State),
  1384. VMSTATE_UINT32(mac_reg[MPC], E1000State),
  1385. VMSTATE_UINT32(mac_reg[PBA], E1000State),
  1386. VMSTATE_UINT32(mac_reg[RCTL], E1000State),
  1387. VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
  1388. VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
  1389. VMSTATE_UINT32(mac_reg[RDH], E1000State),
  1390. VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
  1391. VMSTATE_UINT32(mac_reg[RDT], E1000State),
  1392. VMSTATE_UINT32(mac_reg[STATUS], E1000State),
  1393. VMSTATE_UINT32(mac_reg[SWSM], E1000State),
  1394. VMSTATE_UINT32(mac_reg[TCTL], E1000State),
  1395. VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
  1396. VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
  1397. VMSTATE_UINT32(mac_reg[TDH], E1000State),
  1398. VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
  1399. VMSTATE_UINT32(mac_reg[TDT], E1000State),
  1400. VMSTATE_UINT32(mac_reg[TORH], E1000State),
  1401. VMSTATE_UINT32(mac_reg[TORL], E1000State),
  1402. VMSTATE_UINT32(mac_reg[TOTH], E1000State),
  1403. VMSTATE_UINT32(mac_reg[TOTL], E1000State),
  1404. VMSTATE_UINT32(mac_reg[TPR], E1000State),
  1405. VMSTATE_UINT32(mac_reg[TPT], E1000State),
  1406. VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
  1407. VMSTATE_UINT32(mac_reg[WUFC], E1000State),
  1408. VMSTATE_UINT32(mac_reg[VET], E1000State),
  1409. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
  1410. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
  1411. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
  1412. VMSTATE_END_OF_LIST()
  1413. },
  1414. .subsections = (const VMStateDescription*[]) {
  1415. &vmstate_e1000_mit_state,
  1416. &vmstate_e1000_full_mac_state,
  1417. &vmstate_e1000_tx_tso_state,
  1418. NULL
  1419. }
  1420. };
  1421. /*
  1422. * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
  1423. * Note: A valid DevId will be inserted during pci_e1000_realize().
  1424. */
  1425. static const uint16_t e1000_eeprom_template[64] = {
  1426. 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
  1427. 0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
  1428. 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
  1429. 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
  1430. 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
  1431. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1432. 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1433. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
  1434. };
  1435. /* PCI interface */
  1436. static void
  1437. e1000_mmio_setup(E1000State *d)
  1438. {
  1439. int i;
  1440. const uint32_t excluded_regs[] = {
  1441. E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
  1442. E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
  1443. };
  1444. memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
  1445. "e1000-mmio", PNPMMIO_SIZE);
  1446. memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
  1447. for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
  1448. memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
  1449. excluded_regs[i+1] - excluded_regs[i] - 4);
  1450. memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
  1451. }
  1452. static void
  1453. pci_e1000_uninit(PCIDevice *dev)
  1454. {
  1455. E1000State *d = E1000(dev);
  1456. timer_del(d->autoneg_timer);
  1457. timer_free(d->autoneg_timer);
  1458. timer_del(d->mit_timer);
  1459. timer_free(d->mit_timer);
  1460. timer_del(d->flush_queue_timer);
  1461. timer_free(d->flush_queue_timer);
  1462. qemu_del_nic(d->nic);
  1463. }
  1464. static NetClientInfo net_e1000_info = {
  1465. .type = NET_CLIENT_DRIVER_NIC,
  1466. .size = sizeof(NICState),
  1467. .can_receive = e1000_can_receive,
  1468. .receive = e1000_receive,
  1469. .receive_iov = e1000_receive_iov,
  1470. .link_status_changed = e1000_set_link_status,
  1471. };
  1472. static void e1000_write_config(PCIDevice *pci_dev, uint32_t address,
  1473. uint32_t val, int len)
  1474. {
  1475. E1000State *s = E1000(pci_dev);
  1476. pci_default_write_config(pci_dev, address, val, len);
  1477. if (range_covers_byte(address, len, PCI_COMMAND) &&
  1478. (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
  1479. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  1480. }
  1481. }
  1482. static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
  1483. {
  1484. DeviceState *dev = DEVICE(pci_dev);
  1485. E1000State *d = E1000(pci_dev);
  1486. uint8_t *pci_conf;
  1487. uint8_t *macaddr;
  1488. pci_dev->config_write = e1000_write_config;
  1489. pci_conf = pci_dev->config;
  1490. /* TODO: RST# value should be 0, PCI spec 6.2.4 */
  1491. pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
  1492. pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
  1493. e1000_mmio_setup(d);
  1494. pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
  1495. pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
  1496. qemu_macaddr_default_if_unset(&d->conf.macaddr);
  1497. macaddr = d->conf.macaddr.a;
  1498. e1000x_core_prepare_eeprom(d->eeprom_data,
  1499. e1000_eeprom_template,
  1500. sizeof(e1000_eeprom_template),
  1501. PCI_DEVICE_GET_CLASS(pci_dev)->device_id,
  1502. macaddr);
  1503. d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
  1504. object_get_typename(OBJECT(d)), dev->id, d);
  1505. qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
  1506. d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
  1507. d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
  1508. d->flush_queue_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  1509. e1000_flush_queue_timer, d);
  1510. }
  1511. static void qdev_e1000_reset(DeviceState *dev)
  1512. {
  1513. E1000State *d = E1000(dev);
  1514. e1000_reset(d);
  1515. }
  1516. static Property e1000_properties[] = {
  1517. DEFINE_NIC_PROPERTIES(E1000State, conf),
  1518. DEFINE_PROP_BIT("autonegotiation", E1000State,
  1519. compat_flags, E1000_FLAG_AUTONEG_BIT, true),
  1520. DEFINE_PROP_BIT("mitigation", E1000State,
  1521. compat_flags, E1000_FLAG_MIT_BIT, true),
  1522. DEFINE_PROP_BIT("extra_mac_registers", E1000State,
  1523. compat_flags, E1000_FLAG_MAC_BIT, true),
  1524. DEFINE_PROP_BIT("migrate_tso_props", E1000State,
  1525. compat_flags, E1000_FLAG_TSO_BIT, true),
  1526. DEFINE_PROP_END_OF_LIST(),
  1527. };
  1528. typedef struct E1000Info {
  1529. const char *name;
  1530. uint16_t device_id;
  1531. uint8_t revision;
  1532. uint16_t phy_id2;
  1533. } E1000Info;
  1534. static void e1000_class_init(ObjectClass *klass, void *data)
  1535. {
  1536. DeviceClass *dc = DEVICE_CLASS(klass);
  1537. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1538. E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
  1539. const E1000Info *info = data;
  1540. k->realize = pci_e1000_realize;
  1541. k->exit = pci_e1000_uninit;
  1542. k->romfile = "efi-e1000.rom";
  1543. k->vendor_id = PCI_VENDOR_ID_INTEL;
  1544. k->device_id = info->device_id;
  1545. k->revision = info->revision;
  1546. e->phy_id2 = info->phy_id2;
  1547. k->class_id = PCI_CLASS_NETWORK_ETHERNET;
  1548. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1549. dc->desc = "Intel Gigabit Ethernet";
  1550. dc->reset = qdev_e1000_reset;
  1551. dc->vmsd = &vmstate_e1000;
  1552. dc->props = e1000_properties;
  1553. }
  1554. static void e1000_instance_init(Object *obj)
  1555. {
  1556. E1000State *n = E1000(obj);
  1557. device_add_bootindex_property(obj, &n->conf.bootindex,
  1558. "bootindex", "/ethernet-phy@0",
  1559. DEVICE(n), NULL);
  1560. }
  1561. static const TypeInfo e1000_base_info = {
  1562. .name = TYPE_E1000_BASE,
  1563. .parent = TYPE_PCI_DEVICE,
  1564. .instance_size = sizeof(E1000State),
  1565. .instance_init = e1000_instance_init,
  1566. .class_size = sizeof(E1000BaseClass),
  1567. .abstract = true,
  1568. .interfaces = (InterfaceInfo[]) {
  1569. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1570. { },
  1571. },
  1572. };
  1573. static const E1000Info e1000_devices[] = {
  1574. {
  1575. .name = "e1000",
  1576. .device_id = E1000_DEV_ID_82540EM,
  1577. .revision = 0x03,
  1578. .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
  1579. },
  1580. {
  1581. .name = "e1000-82544gc",
  1582. .device_id = E1000_DEV_ID_82544GC_COPPER,
  1583. .revision = 0x03,
  1584. .phy_id2 = E1000_PHY_ID2_82544x,
  1585. },
  1586. {
  1587. .name = "e1000-82545em",
  1588. .device_id = E1000_DEV_ID_82545EM_COPPER,
  1589. .revision = 0x03,
  1590. .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
  1591. },
  1592. };
  1593. static void e1000_register_types(void)
  1594. {
  1595. int i;
  1596. type_register_static(&e1000_base_info);
  1597. for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
  1598. const E1000Info *info = &e1000_devices[i];
  1599. TypeInfo type_info = {};
  1600. type_info.name = info->name;
  1601. type_info.parent = TYPE_E1000_BASE;
  1602. type_info.class_data = (void *)info;
  1603. type_info.class_init = e1000_class_init;
  1604. type_info.instance_init = e1000_instance_init;
  1605. type_register(&type_info);
  1606. }
  1607. }
  1608. type_init(e1000_register_types)