2
0

e1000.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598
  1. /*
  2. * QEMU e1000 emulation
  3. *
  4. * Software developer's manual:
  5. * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
  6. *
  7. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  8. * Copyright (c) 2008 Qumranet
  9. * Based on work done by:
  10. * Copyright (c) 2007 Dan Aloni
  11. * Copyright (c) 2004 Antony T Curtis
  12. *
  13. * This library is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2 of the License, or (at your option) any later version.
  17. *
  18. * This library is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  25. */
  26. #include "hw/hw.h"
  27. #include "hw/pci/pci.h"
  28. #include "net/net.h"
  29. #include "net/checksum.h"
  30. #include "hw/loader.h"
  31. #include "sysemu/sysemu.h"
  32. #include "sysemu/dma.h"
  33. #include "qemu/iov.h"
  34. #include "e1000_regs.h"
  35. #define E1000_DEBUG
  36. #ifdef E1000_DEBUG
  37. enum {
  38. DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
  39. DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
  40. DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
  41. DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
  42. };
  43. #define DBGBIT(x) (1<<DEBUG_##x)
  44. static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
  45. #define DBGOUT(what, fmt, ...) do { \
  46. if (debugflags & DBGBIT(what)) \
  47. fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
  48. } while (0)
  49. #else
  50. #define DBGOUT(what, fmt, ...) do {} while (0)
  51. #endif
  52. #define IOPORT_SIZE 0x40
  53. #define PNPMMIO_SIZE 0x20000
  54. #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
  55. /* this is the size past which hardware will drop packets when setting LPE=0 */
  56. #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
  57. /* this is the size past which hardware will drop packets when setting LPE=1 */
  58. #define MAXIMUM_ETHERNET_LPE_SIZE 16384
  59. #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
  60. /*
  61. * HW models:
  62. * E1000_DEV_ID_82540EM works with Windows and Linux
  63. * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
  64. * appears to perform better than 82540EM, but breaks with Linux 2.6.18
  65. * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
  66. * Others never tested
  67. */
  68. enum { E1000_DEVID = E1000_DEV_ID_82540EM };
  69. /*
  70. * May need to specify additional MAC-to-PHY entries --
  71. * Intel's Windows driver refuses to initialize unless they match
  72. */
  73. enum {
  74. PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
  75. E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
  76. /* default to E1000_DEV_ID_82540EM */ 0xc20
  77. };
  78. typedef struct E1000State_st {
  79. /*< private >*/
  80. PCIDevice parent_obj;
  81. /*< public >*/
  82. NICState *nic;
  83. NICConf conf;
  84. MemoryRegion mmio;
  85. MemoryRegion io;
  86. uint32_t mac_reg[0x8000];
  87. uint16_t phy_reg[0x20];
  88. uint16_t eeprom_data[64];
  89. uint32_t rxbuf_size;
  90. uint32_t rxbuf_min_shift;
  91. struct e1000_tx {
  92. unsigned char header[256];
  93. unsigned char vlan_header[4];
  94. /* Fields vlan and data must not be reordered or separated. */
  95. unsigned char vlan[4];
  96. unsigned char data[0x10000];
  97. uint16_t size;
  98. unsigned char sum_needed;
  99. unsigned char vlan_needed;
  100. uint8_t ipcss;
  101. uint8_t ipcso;
  102. uint16_t ipcse;
  103. uint8_t tucss;
  104. uint8_t tucso;
  105. uint16_t tucse;
  106. uint8_t hdr_len;
  107. uint16_t mss;
  108. uint32_t paylen;
  109. uint16_t tso_frames;
  110. char tse;
  111. int8_t ip;
  112. int8_t tcp;
  113. char cptse; // current packet tse bit
  114. } tx;
  115. struct {
  116. uint32_t val_in; // shifted in from guest driver
  117. uint16_t bitnum_in;
  118. uint16_t bitnum_out;
  119. uint16_t reading;
  120. uint32_t old_eecd;
  121. } eecd_state;
  122. QEMUTimer *autoneg_timer;
  123. QEMUTimer *mit_timer; /* Mitigation timer. */
  124. bool mit_timer_on; /* Mitigation timer is running. */
  125. bool mit_irq_level; /* Tracks interrupt pin level. */
  126. uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */
  127. /* Compatibility flags for migration to/from qemu 1.3.0 and older */
  128. #define E1000_FLAG_AUTONEG_BIT 0
  129. #define E1000_FLAG_MIT_BIT 1
  130. #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
  131. #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
  132. uint32_t compat_flags;
  133. } E1000State;
  134. #define TYPE_E1000 "e1000"
  135. #define E1000(obj) \
  136. OBJECT_CHECK(E1000State, (obj), TYPE_E1000)
  137. #define defreg(x) x = (E1000_##x>>2)
  138. enum {
  139. defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
  140. defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
  141. defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
  142. defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
  143. defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
  144. defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
  145. defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
  146. defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
  147. defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
  148. defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
  149. defreg(VET), defreg(RDTR), defreg(RADV), defreg(TADV),
  150. defreg(ITR),
  151. };
  152. static void
  153. e1000_link_down(E1000State *s)
  154. {
  155. s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
  156. s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
  157. }
  158. static void
  159. e1000_link_up(E1000State *s)
  160. {
  161. s->mac_reg[STATUS] |= E1000_STATUS_LU;
  162. s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
  163. }
  164. static void
  165. set_phy_ctrl(E1000State *s, int index, uint16_t val)
  166. {
  167. /*
  168. * QEMU 1.3 does not support link auto-negotiation emulation, so if we
  169. * migrate during auto negotiation, after migration the link will be
  170. * down.
  171. */
  172. if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
  173. return;
  174. }
  175. if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
  176. e1000_link_down(s);
  177. s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
  178. DBGOUT(PHY, "Start link auto negotiation\n");
  179. timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
  180. }
  181. }
  182. static void
  183. e1000_autoneg_timer(void *opaque)
  184. {
  185. E1000State *s = opaque;
  186. if (!qemu_get_queue(s->nic)->link_down) {
  187. e1000_link_up(s);
  188. }
  189. s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
  190. DBGOUT(PHY, "Auto negotiation is completed\n");
  191. }
  192. static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
  193. [PHY_CTRL] = set_phy_ctrl,
  194. };
  195. enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
  196. enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
  197. static const char phy_regcap[0x20] = {
  198. [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
  199. [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
  200. [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
  201. [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
  202. [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
  203. [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
  204. };
  205. static const uint16_t phy_reg_init[] = {
  206. [PHY_CTRL] = 0x1140,
  207. [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
  208. [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
  209. [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
  210. [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
  211. [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
  212. [M88E1000_PHY_SPEC_STATUS] = 0xac00,
  213. };
  214. static const uint32_t mac_reg_init[] = {
  215. [PBA] = 0x00100030,
  216. [LEDCTL] = 0x602,
  217. [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
  218. E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
  219. [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
  220. E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
  221. E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
  222. E1000_STATUS_LU,
  223. [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
  224. E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
  225. E1000_MANC_RMCP_EN,
  226. };
  227. /* Helper function, *curr == 0 means the value is not set */
  228. static inline void
  229. mit_update_delay(uint32_t *curr, uint32_t value)
  230. {
  231. if (value && (*curr == 0 || value < *curr)) {
  232. *curr = value;
  233. }
  234. }
  235. static void
  236. set_interrupt_cause(E1000State *s, int index, uint32_t val)
  237. {
  238. PCIDevice *d = PCI_DEVICE(s);
  239. uint32_t pending_ints;
  240. uint32_t mit_delay;
  241. if (val && (E1000_DEVID >= E1000_DEV_ID_82547EI_MOBILE)) {
  242. /* Only for 8257x */
  243. val |= E1000_ICR_INT_ASSERTED;
  244. }
  245. s->mac_reg[ICR] = val;
  246. /*
  247. * Make sure ICR and ICS registers have the same value.
  248. * The spec says that the ICS register is write-only. However in practice,
  249. * on real hardware ICS is readable, and for reads it has the same value as
  250. * ICR (except that ICS does not have the clear on read behaviour of ICR).
  251. *
  252. * The VxWorks PRO/1000 driver uses this behaviour.
  253. */
  254. s->mac_reg[ICS] = val;
  255. pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
  256. if (!s->mit_irq_level && pending_ints) {
  257. /*
  258. * Here we detect a potential raising edge. We postpone raising the
  259. * interrupt line if we are inside the mitigation delay window
  260. * (s->mit_timer_on == 1).
  261. * We provide a partial implementation of interrupt mitigation,
  262. * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
  263. * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
  264. * RADV; relative timers based on TIDV and RDTR are not implemented.
  265. */
  266. if (s->mit_timer_on) {
  267. return;
  268. }
  269. if (s->compat_flags & E1000_FLAG_MIT) {
  270. /* Compute the next mitigation delay according to pending
  271. * interrupts and the current values of RADV (provided
  272. * RDTR!=0), TADV and ITR.
  273. * Then rearm the timer.
  274. */
  275. mit_delay = 0;
  276. if (s->mit_ide &&
  277. (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
  278. mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
  279. }
  280. if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
  281. mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
  282. }
  283. mit_update_delay(&mit_delay, s->mac_reg[ITR]);
  284. if (mit_delay) {
  285. s->mit_timer_on = 1;
  286. timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
  287. mit_delay * 256);
  288. }
  289. s->mit_ide = 0;
  290. }
  291. }
  292. s->mit_irq_level = (pending_ints != 0);
  293. pci_set_irq(d, s->mit_irq_level);
  294. }
  295. static void
  296. e1000_mit_timer(void *opaque)
  297. {
  298. E1000State *s = opaque;
  299. s->mit_timer_on = 0;
  300. /* Call set_interrupt_cause to update the irq level (if necessary). */
  301. set_interrupt_cause(s, 0, s->mac_reg[ICR]);
  302. }
  303. static void
  304. set_ics(E1000State *s, int index, uint32_t val)
  305. {
  306. DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
  307. s->mac_reg[IMS]);
  308. set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
  309. }
  310. static int
  311. rxbufsize(uint32_t v)
  312. {
  313. v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
  314. E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
  315. E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
  316. switch (v) {
  317. case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
  318. return 16384;
  319. case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
  320. return 8192;
  321. case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
  322. return 4096;
  323. case E1000_RCTL_SZ_1024:
  324. return 1024;
  325. case E1000_RCTL_SZ_512:
  326. return 512;
  327. case E1000_RCTL_SZ_256:
  328. return 256;
  329. }
  330. return 2048;
  331. }
  332. static void e1000_reset(void *opaque)
  333. {
  334. E1000State *d = opaque;
  335. uint8_t *macaddr = d->conf.macaddr.a;
  336. int i;
  337. timer_del(d->autoneg_timer);
  338. timer_del(d->mit_timer);
  339. d->mit_timer_on = 0;
  340. d->mit_irq_level = 0;
  341. d->mit_ide = 0;
  342. memset(d->phy_reg, 0, sizeof d->phy_reg);
  343. memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
  344. memset(d->mac_reg, 0, sizeof d->mac_reg);
  345. memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
  346. d->rxbuf_min_shift = 1;
  347. memset(&d->tx, 0, sizeof d->tx);
  348. if (qemu_get_queue(d->nic)->link_down) {
  349. e1000_link_down(d);
  350. }
  351. /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
  352. d->mac_reg[RA] = 0;
  353. d->mac_reg[RA + 1] = E1000_RAH_AV;
  354. for (i = 0; i < 4; i++) {
  355. d->mac_reg[RA] |= macaddr[i] << (8 * i);
  356. d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
  357. }
  358. qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
  359. }
  360. static void
  361. set_ctrl(E1000State *s, int index, uint32_t val)
  362. {
  363. /* RST is self clearing */
  364. s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
  365. }
  366. static void
  367. set_rx_control(E1000State *s, int index, uint32_t val)
  368. {
  369. s->mac_reg[RCTL] = val;
  370. s->rxbuf_size = rxbufsize(val);
  371. s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
  372. DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
  373. s->mac_reg[RCTL]);
  374. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  375. }
  376. static void
  377. set_mdic(E1000State *s, int index, uint32_t val)
  378. {
  379. uint32_t data = val & E1000_MDIC_DATA_MASK;
  380. uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
  381. if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
  382. val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
  383. else if (val & E1000_MDIC_OP_READ) {
  384. DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
  385. if (!(phy_regcap[addr] & PHY_R)) {
  386. DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
  387. val |= E1000_MDIC_ERROR;
  388. } else
  389. val = (val ^ data) | s->phy_reg[addr];
  390. } else if (val & E1000_MDIC_OP_WRITE) {
  391. DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
  392. if (!(phy_regcap[addr] & PHY_W)) {
  393. DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
  394. val |= E1000_MDIC_ERROR;
  395. } else {
  396. if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
  397. phyreg_writeops[addr](s, index, data);
  398. }
  399. s->phy_reg[addr] = data;
  400. }
  401. }
  402. s->mac_reg[MDIC] = val | E1000_MDIC_READY;
  403. if (val & E1000_MDIC_INT_EN) {
  404. set_ics(s, 0, E1000_ICR_MDAC);
  405. }
  406. }
  407. static uint32_t
  408. get_eecd(E1000State *s, int index)
  409. {
  410. uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
  411. DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
  412. s->eecd_state.bitnum_out, s->eecd_state.reading);
  413. if (!s->eecd_state.reading ||
  414. ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
  415. ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
  416. ret |= E1000_EECD_DO;
  417. return ret;
  418. }
  419. static void
  420. set_eecd(E1000State *s, int index, uint32_t val)
  421. {
  422. uint32_t oldval = s->eecd_state.old_eecd;
  423. s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
  424. E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
  425. if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
  426. return;
  427. if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
  428. s->eecd_state.val_in = 0;
  429. s->eecd_state.bitnum_in = 0;
  430. s->eecd_state.bitnum_out = 0;
  431. s->eecd_state.reading = 0;
  432. }
  433. if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
  434. return;
  435. if (!(E1000_EECD_SK & val)) { // falling edge
  436. s->eecd_state.bitnum_out++;
  437. return;
  438. }
  439. s->eecd_state.val_in <<= 1;
  440. if (val & E1000_EECD_DI)
  441. s->eecd_state.val_in |= 1;
  442. if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
  443. s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
  444. s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
  445. EEPROM_READ_OPCODE_MICROWIRE);
  446. }
  447. DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
  448. s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
  449. s->eecd_state.reading);
  450. }
  451. static uint32_t
  452. flash_eerd_read(E1000State *s, int x)
  453. {
  454. unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
  455. if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
  456. return (s->mac_reg[EERD]);
  457. if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
  458. return (E1000_EEPROM_RW_REG_DONE | r);
  459. return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
  460. E1000_EEPROM_RW_REG_DONE | r);
  461. }
  462. static void
  463. putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
  464. {
  465. uint32_t sum;
  466. if (cse && cse < n)
  467. n = cse + 1;
  468. if (sloc < n-1) {
  469. sum = net_checksum_add(n-css, data+css);
  470. stw_be_p(data + sloc, net_checksum_finish(sum));
  471. }
  472. }
  473. static inline int
  474. vlan_enabled(E1000State *s)
  475. {
  476. return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
  477. }
  478. static inline int
  479. vlan_rx_filter_enabled(E1000State *s)
  480. {
  481. return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
  482. }
  483. static inline int
  484. is_vlan_packet(E1000State *s, const uint8_t *buf)
  485. {
  486. return (be16_to_cpup((uint16_t *)(buf + 12)) ==
  487. le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
  488. }
  489. static inline int
  490. is_vlan_txd(uint32_t txd_lower)
  491. {
  492. return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
  493. }
  494. /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
  495. * fill it in, just pad descriptor length by 4 bytes unless guest
  496. * told us to strip it off the packet. */
  497. static inline int
  498. fcs_len(E1000State *s)
  499. {
  500. return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
  501. }
  502. static void
  503. e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
  504. {
  505. NetClientState *nc = qemu_get_queue(s->nic);
  506. if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
  507. nc->info->receive(nc, buf, size);
  508. } else {
  509. qemu_send_packet(nc, buf, size);
  510. }
  511. }
  512. static void
  513. xmit_seg(E1000State *s)
  514. {
  515. uint16_t len, *sp;
  516. unsigned int frames = s->tx.tso_frames, css, sofar, n;
  517. struct e1000_tx *tp = &s->tx;
  518. if (tp->tse && tp->cptse) {
  519. css = tp->ipcss;
  520. DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
  521. frames, tp->size, css);
  522. if (tp->ip) { // IPv4
  523. stw_be_p(tp->data+css+2, tp->size - css);
  524. stw_be_p(tp->data+css+4,
  525. be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
  526. } else // IPv6
  527. stw_be_p(tp->data+css+4, tp->size - css);
  528. css = tp->tucss;
  529. len = tp->size - css;
  530. DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
  531. if (tp->tcp) {
  532. sofar = frames * tp->mss;
  533. stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
  534. if (tp->paylen - sofar > tp->mss)
  535. tp->data[css + 13] &= ~9; // PSH, FIN
  536. } else // UDP
  537. stw_be_p(tp->data+css+4, len);
  538. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  539. unsigned int phsum;
  540. // add pseudo-header length before checksum calculation
  541. sp = (uint16_t *)(tp->data + tp->tucso);
  542. phsum = be16_to_cpup(sp) + len;
  543. phsum = (phsum >> 16) + (phsum & 0xffff);
  544. stw_be_p(sp, phsum);
  545. }
  546. tp->tso_frames++;
  547. }
  548. if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
  549. putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
  550. if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
  551. putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
  552. if (tp->vlan_needed) {
  553. memmove(tp->vlan, tp->data, 4);
  554. memmove(tp->data, tp->data + 4, 8);
  555. memcpy(tp->data + 8, tp->vlan_header, 4);
  556. e1000_send_packet(s, tp->vlan, tp->size + 4);
  557. } else
  558. e1000_send_packet(s, tp->data, tp->size);
  559. s->mac_reg[TPT]++;
  560. s->mac_reg[GPTC]++;
  561. n = s->mac_reg[TOTL];
  562. if ((s->mac_reg[TOTL] += s->tx.size) < n)
  563. s->mac_reg[TOTH]++;
  564. }
  565. static void
  566. process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
  567. {
  568. PCIDevice *d = PCI_DEVICE(s);
  569. uint32_t txd_lower = le32_to_cpu(dp->lower.data);
  570. uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
  571. unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
  572. unsigned int msh = 0xfffff;
  573. uint64_t addr;
  574. struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
  575. struct e1000_tx *tp = &s->tx;
  576. s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
  577. if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
  578. op = le32_to_cpu(xp->cmd_and_length);
  579. tp->ipcss = xp->lower_setup.ip_fields.ipcss;
  580. tp->ipcso = xp->lower_setup.ip_fields.ipcso;
  581. tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
  582. tp->tucss = xp->upper_setup.tcp_fields.tucss;
  583. tp->tucso = xp->upper_setup.tcp_fields.tucso;
  584. tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
  585. tp->paylen = op & 0xfffff;
  586. tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
  587. tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
  588. tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
  589. tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
  590. tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
  591. tp->tso_frames = 0;
  592. if (tp->tucso == 0) { // this is probably wrong
  593. DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
  594. tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
  595. }
  596. return;
  597. } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
  598. // data descriptor
  599. if (tp->size == 0) {
  600. tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
  601. }
  602. tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
  603. } else {
  604. // legacy descriptor
  605. tp->cptse = 0;
  606. }
  607. if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
  608. (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
  609. tp->vlan_needed = 1;
  610. stw_be_p(tp->vlan_header,
  611. le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
  612. stw_be_p(tp->vlan_header + 2,
  613. le16_to_cpu(dp->upper.fields.special));
  614. }
  615. addr = le64_to_cpu(dp->buffer_addr);
  616. if (tp->tse && tp->cptse) {
  617. msh = tp->hdr_len + tp->mss;
  618. do {
  619. bytes = split_size;
  620. if (tp->size + bytes > msh)
  621. bytes = msh - tp->size;
  622. bytes = MIN(sizeof(tp->data) - tp->size, bytes);
  623. pci_dma_read(d, addr, tp->data + tp->size, bytes);
  624. sz = tp->size + bytes;
  625. if (sz >= tp->hdr_len && tp->size < tp->hdr_len) {
  626. memmove(tp->header, tp->data, tp->hdr_len);
  627. }
  628. tp->size = sz;
  629. addr += bytes;
  630. if (sz == msh) {
  631. xmit_seg(s);
  632. memmove(tp->data, tp->header, tp->hdr_len);
  633. tp->size = tp->hdr_len;
  634. }
  635. } while (split_size -= bytes);
  636. } else if (!tp->tse && tp->cptse) {
  637. // context descriptor TSE is not set, while data descriptor TSE is set
  638. DBGOUT(TXERR, "TCP segmentation error\n");
  639. } else {
  640. split_size = MIN(sizeof(tp->data) - tp->size, split_size);
  641. pci_dma_read(d, addr, tp->data + tp->size, split_size);
  642. tp->size += split_size;
  643. }
  644. if (!(txd_lower & E1000_TXD_CMD_EOP))
  645. return;
  646. if (!(tp->tse && tp->cptse && tp->size < tp->hdr_len)) {
  647. xmit_seg(s);
  648. }
  649. tp->tso_frames = 0;
  650. tp->sum_needed = 0;
  651. tp->vlan_needed = 0;
  652. tp->size = 0;
  653. tp->cptse = 0;
  654. }
  655. static uint32_t
  656. txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
  657. {
  658. PCIDevice *d = PCI_DEVICE(s);
  659. uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
  660. if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
  661. return 0;
  662. txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
  663. ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
  664. dp->upper.data = cpu_to_le32(txd_upper);
  665. pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
  666. &dp->upper, sizeof(dp->upper));
  667. return E1000_ICR_TXDW;
  668. }
  669. static uint64_t tx_desc_base(E1000State *s)
  670. {
  671. uint64_t bah = s->mac_reg[TDBAH];
  672. uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
  673. return (bah << 32) + bal;
  674. }
  675. static void
  676. start_xmit(E1000State *s)
  677. {
  678. PCIDevice *d = PCI_DEVICE(s);
  679. dma_addr_t base;
  680. struct e1000_tx_desc desc;
  681. uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
  682. if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
  683. DBGOUT(TX, "tx disabled\n");
  684. return;
  685. }
  686. while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
  687. base = tx_desc_base(s) +
  688. sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
  689. pci_dma_read(d, base, &desc, sizeof(desc));
  690. DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
  691. (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
  692. desc.upper.data);
  693. process_tx_desc(s, &desc);
  694. cause |= txdesc_writeback(s, base, &desc);
  695. if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
  696. s->mac_reg[TDH] = 0;
  697. /*
  698. * the following could happen only if guest sw assigns
  699. * bogus values to TDT/TDLEN.
  700. * there's nothing too intelligent we could do about this.
  701. */
  702. if (s->mac_reg[TDH] == tdh_start) {
  703. DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
  704. tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
  705. break;
  706. }
  707. }
  708. set_ics(s, 0, cause);
  709. }
  710. static int
  711. receive_filter(E1000State *s, const uint8_t *buf, int size)
  712. {
  713. static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  714. static const int mta_shift[] = {4, 3, 2, 0};
  715. uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
  716. if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
  717. uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
  718. uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
  719. ((vid >> 5) & 0x7f));
  720. if ((vfta & (1 << (vid & 0x1f))) == 0)
  721. return 0;
  722. }
  723. if (rctl & E1000_RCTL_UPE) // promiscuous
  724. return 1;
  725. if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
  726. return 1;
  727. if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
  728. return 1;
  729. for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
  730. if (!(rp[1] & E1000_RAH_AV))
  731. continue;
  732. ra[0] = cpu_to_le32(rp[0]);
  733. ra[1] = cpu_to_le32(rp[1]);
  734. if (!memcmp(buf, (uint8_t *)ra, 6)) {
  735. DBGOUT(RXFILTER,
  736. "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
  737. (int)(rp - s->mac_reg - RA)/2,
  738. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
  739. return 1;
  740. }
  741. }
  742. DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
  743. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
  744. f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
  745. f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
  746. if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
  747. return 1;
  748. DBGOUT(RXFILTER,
  749. "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
  750. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
  751. (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
  752. s->mac_reg[MTA + (f >> 5)]);
  753. return 0;
  754. }
  755. static void
  756. e1000_set_link_status(NetClientState *nc)
  757. {
  758. E1000State *s = qemu_get_nic_opaque(nc);
  759. uint32_t old_status = s->mac_reg[STATUS];
  760. if (nc->link_down) {
  761. e1000_link_down(s);
  762. } else {
  763. e1000_link_up(s);
  764. }
  765. if (s->mac_reg[STATUS] != old_status)
  766. set_ics(s, 0, E1000_ICR_LSC);
  767. }
  768. static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
  769. {
  770. int bufs;
  771. /* Fast-path short packets */
  772. if (total_size <= s->rxbuf_size) {
  773. return s->mac_reg[RDH] != s->mac_reg[RDT];
  774. }
  775. if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
  776. bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
  777. } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
  778. bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
  779. s->mac_reg[RDT] - s->mac_reg[RDH];
  780. } else {
  781. return false;
  782. }
  783. return total_size <= bufs * s->rxbuf_size;
  784. }
  785. static int
  786. e1000_can_receive(NetClientState *nc)
  787. {
  788. E1000State *s = qemu_get_nic_opaque(nc);
  789. return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
  790. (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
  791. }
  792. static uint64_t rx_desc_base(E1000State *s)
  793. {
  794. uint64_t bah = s->mac_reg[RDBAH];
  795. uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
  796. return (bah << 32) + bal;
  797. }
  798. static ssize_t
  799. e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
  800. {
  801. E1000State *s = qemu_get_nic_opaque(nc);
  802. PCIDevice *d = PCI_DEVICE(s);
  803. struct e1000_rx_desc desc;
  804. dma_addr_t base;
  805. unsigned int n, rdt;
  806. uint32_t rdh_start;
  807. uint16_t vlan_special = 0;
  808. uint8_t vlan_status = 0;
  809. uint8_t min_buf[MIN_BUF_SIZE];
  810. struct iovec min_iov;
  811. uint8_t *filter_buf = iov->iov_base;
  812. size_t size = iov_size(iov, iovcnt);
  813. size_t iov_ofs = 0;
  814. size_t desc_offset;
  815. size_t desc_size;
  816. size_t total_size;
  817. if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
  818. return -1;
  819. }
  820. if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
  821. return -1;
  822. }
  823. /* Pad to minimum Ethernet frame length */
  824. if (size < sizeof(min_buf)) {
  825. iov_to_buf(iov, iovcnt, 0, min_buf, size);
  826. memset(&min_buf[size], 0, sizeof(min_buf) - size);
  827. min_iov.iov_base = filter_buf = min_buf;
  828. min_iov.iov_len = size = sizeof(min_buf);
  829. iovcnt = 1;
  830. iov = &min_iov;
  831. } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
  832. /* This is very unlikely, but may happen. */
  833. iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
  834. filter_buf = min_buf;
  835. }
  836. /* Discard oversized packets if !LPE and !SBP. */
  837. if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
  838. (size > MAXIMUM_ETHERNET_VLAN_SIZE
  839. && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
  840. && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
  841. return size;
  842. }
  843. if (!receive_filter(s, filter_buf, size)) {
  844. return size;
  845. }
  846. if (vlan_enabled(s) && is_vlan_packet(s, filter_buf)) {
  847. vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
  848. + 14)));
  849. iov_ofs = 4;
  850. if (filter_buf == iov->iov_base) {
  851. memmove(filter_buf + 4, filter_buf, 12);
  852. } else {
  853. iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
  854. while (iov->iov_len <= iov_ofs) {
  855. iov_ofs -= iov->iov_len;
  856. iov++;
  857. }
  858. }
  859. vlan_status = E1000_RXD_STAT_VP;
  860. size -= 4;
  861. }
  862. rdh_start = s->mac_reg[RDH];
  863. desc_offset = 0;
  864. total_size = size + fcs_len(s);
  865. if (!e1000_has_rxbufs(s, total_size)) {
  866. set_ics(s, 0, E1000_ICS_RXO);
  867. return -1;
  868. }
  869. do {
  870. desc_size = total_size - desc_offset;
  871. if (desc_size > s->rxbuf_size) {
  872. desc_size = s->rxbuf_size;
  873. }
  874. base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
  875. pci_dma_read(d, base, &desc, sizeof(desc));
  876. desc.special = vlan_special;
  877. desc.status |= (vlan_status | E1000_RXD_STAT_DD);
  878. if (desc.buffer_addr) {
  879. if (desc_offset < size) {
  880. size_t iov_copy;
  881. hwaddr ba = le64_to_cpu(desc.buffer_addr);
  882. size_t copy_size = size - desc_offset;
  883. if (copy_size > s->rxbuf_size) {
  884. copy_size = s->rxbuf_size;
  885. }
  886. do {
  887. iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
  888. pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
  889. copy_size -= iov_copy;
  890. ba += iov_copy;
  891. iov_ofs += iov_copy;
  892. if (iov_ofs == iov->iov_len) {
  893. iov++;
  894. iov_ofs = 0;
  895. }
  896. } while (copy_size);
  897. }
  898. desc_offset += desc_size;
  899. desc.length = cpu_to_le16(desc_size);
  900. if (desc_offset >= total_size) {
  901. desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
  902. } else {
  903. /* Guest zeroing out status is not a hardware requirement.
  904. Clear EOP in case guest didn't do it. */
  905. desc.status &= ~E1000_RXD_STAT_EOP;
  906. }
  907. } else { // as per intel docs; skip descriptors with null buf addr
  908. DBGOUT(RX, "Null RX descriptor!!\n");
  909. }
  910. pci_dma_write(d, base, &desc, sizeof(desc));
  911. if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
  912. s->mac_reg[RDH] = 0;
  913. /* see comment in start_xmit; same here */
  914. if (s->mac_reg[RDH] == rdh_start) {
  915. DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
  916. rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
  917. set_ics(s, 0, E1000_ICS_RXO);
  918. return -1;
  919. }
  920. } while (desc_offset < total_size);
  921. s->mac_reg[GPRC]++;
  922. s->mac_reg[TPR]++;
  923. /* TOR - Total Octets Received:
  924. * This register includes bytes received in a packet from the <Destination
  925. * Address> field through the <CRC> field, inclusively.
  926. */
  927. n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
  928. if (n < s->mac_reg[TORL])
  929. s->mac_reg[TORH]++;
  930. s->mac_reg[TORL] = n;
  931. n = E1000_ICS_RXT0;
  932. if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
  933. rdt += s->mac_reg[RDLEN] / sizeof(desc);
  934. if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
  935. s->rxbuf_min_shift)
  936. n |= E1000_ICS_RXDMT0;
  937. set_ics(s, 0, n);
  938. return size;
  939. }
  940. static ssize_t
  941. e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  942. {
  943. const struct iovec iov = {
  944. .iov_base = (uint8_t *)buf,
  945. .iov_len = size
  946. };
  947. return e1000_receive_iov(nc, &iov, 1);
  948. }
  949. static uint32_t
  950. mac_readreg(E1000State *s, int index)
  951. {
  952. return s->mac_reg[index];
  953. }
  954. static uint32_t
  955. mac_icr_read(E1000State *s, int index)
  956. {
  957. uint32_t ret = s->mac_reg[ICR];
  958. DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
  959. set_interrupt_cause(s, 0, 0);
  960. return ret;
  961. }
  962. static uint32_t
  963. mac_read_clr4(E1000State *s, int index)
  964. {
  965. uint32_t ret = s->mac_reg[index];
  966. s->mac_reg[index] = 0;
  967. return ret;
  968. }
  969. static uint32_t
  970. mac_read_clr8(E1000State *s, int index)
  971. {
  972. uint32_t ret = s->mac_reg[index];
  973. s->mac_reg[index] = 0;
  974. s->mac_reg[index-1] = 0;
  975. return ret;
  976. }
  977. static void
  978. mac_writereg(E1000State *s, int index, uint32_t val)
  979. {
  980. uint32_t macaddr[2];
  981. s->mac_reg[index] = val;
  982. if (index == RA + 1) {
  983. macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
  984. macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
  985. qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
  986. }
  987. }
  988. static void
  989. set_rdt(E1000State *s, int index, uint32_t val)
  990. {
  991. s->mac_reg[index] = val & 0xffff;
  992. if (e1000_has_rxbufs(s, 1)) {
  993. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  994. }
  995. }
  996. static void
  997. set_16bit(E1000State *s, int index, uint32_t val)
  998. {
  999. s->mac_reg[index] = val & 0xffff;
  1000. }
  1001. static void
  1002. set_dlen(E1000State *s, int index, uint32_t val)
  1003. {
  1004. s->mac_reg[index] = val & 0xfff80;
  1005. }
  1006. static void
  1007. set_tctl(E1000State *s, int index, uint32_t val)
  1008. {
  1009. s->mac_reg[index] = val;
  1010. s->mac_reg[TDT] &= 0xffff;
  1011. start_xmit(s);
  1012. }
  1013. static void
  1014. set_icr(E1000State *s, int index, uint32_t val)
  1015. {
  1016. DBGOUT(INTERRUPT, "set_icr %x\n", val);
  1017. set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
  1018. }
  1019. static void
  1020. set_imc(E1000State *s, int index, uint32_t val)
  1021. {
  1022. s->mac_reg[IMS] &= ~val;
  1023. set_ics(s, 0, 0);
  1024. }
  1025. static void
  1026. set_ims(E1000State *s, int index, uint32_t val)
  1027. {
  1028. s->mac_reg[IMS] |= val;
  1029. set_ics(s, 0, 0);
  1030. }
  1031. #define getreg(x) [x] = mac_readreg
  1032. static uint32_t (*macreg_readops[])(E1000State *, int) = {
  1033. getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
  1034. getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
  1035. getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
  1036. getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
  1037. getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
  1038. getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
  1039. getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
  1040. getreg(TADV), getreg(ITR),
  1041. [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
  1042. [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
  1043. [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
  1044. [CRCERRS ... MPC] = &mac_readreg,
  1045. [RA ... RA+31] = &mac_readreg,
  1046. [MTA ... MTA+127] = &mac_readreg,
  1047. [VFTA ... VFTA+127] = &mac_readreg,
  1048. };
  1049. enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
  1050. #define putreg(x) [x] = mac_writereg
  1051. static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
  1052. putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
  1053. putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
  1054. putreg(RDBAL), putreg(LEDCTL), putreg(VET),
  1055. [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
  1056. [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
  1057. [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
  1058. [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
  1059. [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
  1060. [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
  1061. [ITR] = set_16bit,
  1062. [RA ... RA+31] = &mac_writereg,
  1063. [MTA ... MTA+127] = &mac_writereg,
  1064. [VFTA ... VFTA+127] = &mac_writereg,
  1065. };
  1066. enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
  1067. static void
  1068. e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
  1069. unsigned size)
  1070. {
  1071. E1000State *s = opaque;
  1072. unsigned int index = (addr & 0x1ffff) >> 2;
  1073. if (index < NWRITEOPS && macreg_writeops[index]) {
  1074. macreg_writeops[index](s, index, val);
  1075. } else if (index < NREADOPS && macreg_readops[index]) {
  1076. DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
  1077. } else {
  1078. DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
  1079. index<<2, val);
  1080. }
  1081. }
  1082. static uint64_t
  1083. e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
  1084. {
  1085. E1000State *s = opaque;
  1086. unsigned int index = (addr & 0x1ffff) >> 2;
  1087. if (index < NREADOPS && macreg_readops[index])
  1088. {
  1089. return macreg_readops[index](s, index);
  1090. }
  1091. DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
  1092. return 0;
  1093. }
  1094. static const MemoryRegionOps e1000_mmio_ops = {
  1095. .read = e1000_mmio_read,
  1096. .write = e1000_mmio_write,
  1097. .endianness = DEVICE_LITTLE_ENDIAN,
  1098. .impl = {
  1099. .min_access_size = 4,
  1100. .max_access_size = 4,
  1101. },
  1102. };
  1103. static uint64_t e1000_io_read(void *opaque, hwaddr addr,
  1104. unsigned size)
  1105. {
  1106. E1000State *s = opaque;
  1107. (void)s;
  1108. return 0;
  1109. }
  1110. static void e1000_io_write(void *opaque, hwaddr addr,
  1111. uint64_t val, unsigned size)
  1112. {
  1113. E1000State *s = opaque;
  1114. (void)s;
  1115. }
  1116. static const MemoryRegionOps e1000_io_ops = {
  1117. .read = e1000_io_read,
  1118. .write = e1000_io_write,
  1119. .endianness = DEVICE_LITTLE_ENDIAN,
  1120. };
  1121. static bool is_version_1(void *opaque, int version_id)
  1122. {
  1123. return version_id == 1;
  1124. }
  1125. static void e1000_pre_save(void *opaque)
  1126. {
  1127. E1000State *s = opaque;
  1128. NetClientState *nc = qemu_get_queue(s->nic);
  1129. /* If the mitigation timer is active, emulate a timeout now. */
  1130. if (s->mit_timer_on) {
  1131. e1000_mit_timer(s);
  1132. }
  1133. if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
  1134. return;
  1135. }
  1136. /*
  1137. * If link is down and auto-negotiation is ongoing, complete
  1138. * auto-negotiation immediately. This allows is to look at
  1139. * MII_SR_AUTONEG_COMPLETE to infer link status on load.
  1140. */
  1141. if (nc->link_down &&
  1142. s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
  1143. s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG) {
  1144. s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
  1145. }
  1146. }
  1147. static int e1000_post_load(void *opaque, int version_id)
  1148. {
  1149. E1000State *s = opaque;
  1150. NetClientState *nc = qemu_get_queue(s->nic);
  1151. if (!(s->compat_flags & E1000_FLAG_MIT)) {
  1152. s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
  1153. s->mac_reg[TADV] = 0;
  1154. s->mit_irq_level = false;
  1155. }
  1156. s->mit_ide = 0;
  1157. s->mit_timer_on = false;
  1158. /* nc.link_down can't be migrated, so infer link_down according
  1159. * to link status bit in mac_reg[STATUS].
  1160. * Alternatively, restart link negotiation if it was in progress. */
  1161. nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
  1162. if (!(s->compat_flags & E1000_FLAG_AUTONEG)) {
  1163. return 0;
  1164. }
  1165. if (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN &&
  1166. s->phy_reg[PHY_CTRL] & MII_CR_RESTART_AUTO_NEG &&
  1167. !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
  1168. nc->link_down = false;
  1169. timer_mod(s->autoneg_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
  1170. }
  1171. return 0;
  1172. }
  1173. static bool e1000_mit_state_needed(void *opaque)
  1174. {
  1175. E1000State *s = opaque;
  1176. return s->compat_flags & E1000_FLAG_MIT;
  1177. }
  1178. static const VMStateDescription vmstate_e1000_mit_state = {
  1179. .name = "e1000/mit_state",
  1180. .version_id = 1,
  1181. .minimum_version_id = 1,
  1182. .minimum_version_id_old = 1,
  1183. .fields = (VMStateField[]) {
  1184. VMSTATE_UINT32(mac_reg[RDTR], E1000State),
  1185. VMSTATE_UINT32(mac_reg[RADV], E1000State),
  1186. VMSTATE_UINT32(mac_reg[TADV], E1000State),
  1187. VMSTATE_UINT32(mac_reg[ITR], E1000State),
  1188. VMSTATE_BOOL(mit_irq_level, E1000State),
  1189. VMSTATE_END_OF_LIST()
  1190. }
  1191. };
  1192. static const VMStateDescription vmstate_e1000 = {
  1193. .name = "e1000",
  1194. .version_id = 2,
  1195. .minimum_version_id = 1,
  1196. .minimum_version_id_old = 1,
  1197. .pre_save = e1000_pre_save,
  1198. .post_load = e1000_post_load,
  1199. .fields = (VMStateField []) {
  1200. VMSTATE_PCI_DEVICE(parent_obj, E1000State),
  1201. VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
  1202. VMSTATE_UNUSED(4), /* Was mmio_base. */
  1203. VMSTATE_UINT32(rxbuf_size, E1000State),
  1204. VMSTATE_UINT32(rxbuf_min_shift, E1000State),
  1205. VMSTATE_UINT32(eecd_state.val_in, E1000State),
  1206. VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
  1207. VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
  1208. VMSTATE_UINT16(eecd_state.reading, E1000State),
  1209. VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
  1210. VMSTATE_UINT8(tx.ipcss, E1000State),
  1211. VMSTATE_UINT8(tx.ipcso, E1000State),
  1212. VMSTATE_UINT16(tx.ipcse, E1000State),
  1213. VMSTATE_UINT8(tx.tucss, E1000State),
  1214. VMSTATE_UINT8(tx.tucso, E1000State),
  1215. VMSTATE_UINT16(tx.tucse, E1000State),
  1216. VMSTATE_UINT32(tx.paylen, E1000State),
  1217. VMSTATE_UINT8(tx.hdr_len, E1000State),
  1218. VMSTATE_UINT16(tx.mss, E1000State),
  1219. VMSTATE_UINT16(tx.size, E1000State),
  1220. VMSTATE_UINT16(tx.tso_frames, E1000State),
  1221. VMSTATE_UINT8(tx.sum_needed, E1000State),
  1222. VMSTATE_INT8(tx.ip, E1000State),
  1223. VMSTATE_INT8(tx.tcp, E1000State),
  1224. VMSTATE_BUFFER(tx.header, E1000State),
  1225. VMSTATE_BUFFER(tx.data, E1000State),
  1226. VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
  1227. VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
  1228. VMSTATE_UINT32(mac_reg[CTRL], E1000State),
  1229. VMSTATE_UINT32(mac_reg[EECD], E1000State),
  1230. VMSTATE_UINT32(mac_reg[EERD], E1000State),
  1231. VMSTATE_UINT32(mac_reg[GPRC], E1000State),
  1232. VMSTATE_UINT32(mac_reg[GPTC], E1000State),
  1233. VMSTATE_UINT32(mac_reg[ICR], E1000State),
  1234. VMSTATE_UINT32(mac_reg[ICS], E1000State),
  1235. VMSTATE_UINT32(mac_reg[IMC], E1000State),
  1236. VMSTATE_UINT32(mac_reg[IMS], E1000State),
  1237. VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
  1238. VMSTATE_UINT32(mac_reg[MANC], E1000State),
  1239. VMSTATE_UINT32(mac_reg[MDIC], E1000State),
  1240. VMSTATE_UINT32(mac_reg[MPC], E1000State),
  1241. VMSTATE_UINT32(mac_reg[PBA], E1000State),
  1242. VMSTATE_UINT32(mac_reg[RCTL], E1000State),
  1243. VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
  1244. VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
  1245. VMSTATE_UINT32(mac_reg[RDH], E1000State),
  1246. VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
  1247. VMSTATE_UINT32(mac_reg[RDT], E1000State),
  1248. VMSTATE_UINT32(mac_reg[STATUS], E1000State),
  1249. VMSTATE_UINT32(mac_reg[SWSM], E1000State),
  1250. VMSTATE_UINT32(mac_reg[TCTL], E1000State),
  1251. VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
  1252. VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
  1253. VMSTATE_UINT32(mac_reg[TDH], E1000State),
  1254. VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
  1255. VMSTATE_UINT32(mac_reg[TDT], E1000State),
  1256. VMSTATE_UINT32(mac_reg[TORH], E1000State),
  1257. VMSTATE_UINT32(mac_reg[TORL], E1000State),
  1258. VMSTATE_UINT32(mac_reg[TOTH], E1000State),
  1259. VMSTATE_UINT32(mac_reg[TOTL], E1000State),
  1260. VMSTATE_UINT32(mac_reg[TPR], E1000State),
  1261. VMSTATE_UINT32(mac_reg[TPT], E1000State),
  1262. VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
  1263. VMSTATE_UINT32(mac_reg[WUFC], E1000State),
  1264. VMSTATE_UINT32(mac_reg[VET], E1000State),
  1265. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
  1266. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
  1267. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
  1268. VMSTATE_END_OF_LIST()
  1269. },
  1270. .subsections = (VMStateSubsection[]) {
  1271. {
  1272. .vmsd = &vmstate_e1000_mit_state,
  1273. .needed = e1000_mit_state_needed,
  1274. }, {
  1275. /* empty */
  1276. }
  1277. }
  1278. };
  1279. static const uint16_t e1000_eeprom_template[64] = {
  1280. 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
  1281. 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
  1282. 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
  1283. 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
  1284. 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
  1285. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1286. 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1287. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
  1288. };
  1289. /* PCI interface */
  1290. static void
  1291. e1000_mmio_setup(E1000State *d)
  1292. {
  1293. int i;
  1294. const uint32_t excluded_regs[] = {
  1295. E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
  1296. E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
  1297. };
  1298. memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
  1299. "e1000-mmio", PNPMMIO_SIZE);
  1300. memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
  1301. for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
  1302. memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
  1303. excluded_regs[i+1] - excluded_regs[i] - 4);
  1304. memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
  1305. }
  1306. static void
  1307. e1000_cleanup(NetClientState *nc)
  1308. {
  1309. E1000State *s = qemu_get_nic_opaque(nc);
  1310. s->nic = NULL;
  1311. }
  1312. static void
  1313. pci_e1000_uninit(PCIDevice *dev)
  1314. {
  1315. E1000State *d = E1000(dev);
  1316. timer_del(d->autoneg_timer);
  1317. timer_free(d->autoneg_timer);
  1318. timer_del(d->mit_timer);
  1319. timer_free(d->mit_timer);
  1320. memory_region_destroy(&d->mmio);
  1321. memory_region_destroy(&d->io);
  1322. qemu_del_nic(d->nic);
  1323. }
  1324. static NetClientInfo net_e1000_info = {
  1325. .type = NET_CLIENT_OPTIONS_KIND_NIC,
  1326. .size = sizeof(NICState),
  1327. .can_receive = e1000_can_receive,
  1328. .receive = e1000_receive,
  1329. .receive_iov = e1000_receive_iov,
  1330. .cleanup = e1000_cleanup,
  1331. .link_status_changed = e1000_set_link_status,
  1332. };
  1333. static int pci_e1000_init(PCIDevice *pci_dev)
  1334. {
  1335. DeviceState *dev = DEVICE(pci_dev);
  1336. E1000State *d = E1000(pci_dev);
  1337. uint8_t *pci_conf;
  1338. uint16_t checksum = 0;
  1339. int i;
  1340. uint8_t *macaddr;
  1341. pci_conf = pci_dev->config;
  1342. /* TODO: RST# value should be 0, PCI spec 6.2.4 */
  1343. pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
  1344. pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
  1345. e1000_mmio_setup(d);
  1346. pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
  1347. pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
  1348. memmove(d->eeprom_data, e1000_eeprom_template,
  1349. sizeof e1000_eeprom_template);
  1350. qemu_macaddr_default_if_unset(&d->conf.macaddr);
  1351. macaddr = d->conf.macaddr.a;
  1352. for (i = 0; i < 3; i++)
  1353. d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
  1354. for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
  1355. checksum += d->eeprom_data[i];
  1356. checksum = (uint16_t) EEPROM_SUM - checksum;
  1357. d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
  1358. d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
  1359. object_get_typename(OBJECT(d)), dev->id, d);
  1360. qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
  1361. add_boot_device_path(d->conf.bootindex, dev, "/ethernet-phy@0");
  1362. d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
  1363. d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
  1364. return 0;
  1365. }
  1366. static void qdev_e1000_reset(DeviceState *dev)
  1367. {
  1368. E1000State *d = E1000(dev);
  1369. e1000_reset(d);
  1370. }
  1371. static Property e1000_properties[] = {
  1372. DEFINE_NIC_PROPERTIES(E1000State, conf),
  1373. DEFINE_PROP_BIT("autonegotiation", E1000State,
  1374. compat_flags, E1000_FLAG_AUTONEG_BIT, true),
  1375. DEFINE_PROP_BIT("mitigation", E1000State,
  1376. compat_flags, E1000_FLAG_MIT_BIT, true),
  1377. DEFINE_PROP_END_OF_LIST(),
  1378. };
  1379. static void e1000_class_init(ObjectClass *klass, void *data)
  1380. {
  1381. DeviceClass *dc = DEVICE_CLASS(klass);
  1382. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1383. k->init = pci_e1000_init;
  1384. k->exit = pci_e1000_uninit;
  1385. k->romfile = "efi-e1000.rom";
  1386. k->vendor_id = PCI_VENDOR_ID_INTEL;
  1387. k->device_id = E1000_DEVID;
  1388. k->revision = 0x03;
  1389. k->class_id = PCI_CLASS_NETWORK_ETHERNET;
  1390. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1391. dc->desc = "Intel Gigabit Ethernet";
  1392. dc->reset = qdev_e1000_reset;
  1393. dc->vmsd = &vmstate_e1000;
  1394. dc->props = e1000_properties;
  1395. }
  1396. static const TypeInfo e1000_info = {
  1397. .name = TYPE_E1000,
  1398. .parent = TYPE_PCI_DEVICE,
  1399. .instance_size = sizeof(E1000State),
  1400. .class_init = e1000_class_init,
  1401. };
  1402. static void e1000_register_types(void)
  1403. {
  1404. type_register_static(&e1000_info);
  1405. }
  1406. type_init(e1000_register_types)