imx_fec.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376
  1. /*
  2. * i.MX Fast Ethernet Controller emulation.
  3. *
  4. * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
  5. *
  6. * Based on Coldfire Fast Ethernet Controller emulation.
  7. *
  8. * Copyright (c) 2007 CodeSourcery.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  18. * for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include "qemu/osdep.h"
  24. #include "hw/irq.h"
  25. #include "hw/net/imx_fec.h"
  26. #include "hw/qdev-properties.h"
  27. #include "migration/vmstate.h"
  28. #include "sysemu/dma.h"
  29. #include "qemu/log.h"
  30. #include "qemu/module.h"
  31. #include "net/checksum.h"
  32. #include "net/eth.h"
  33. #include "trace.h"
  34. /* For crc32 */
  35. #include <zlib.h>
  36. #define IMX_MAX_DESC 1024
  37. static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
  38. {
  39. static char tmp[20];
  40. sprintf(tmp, "index %d", index);
  41. return tmp;
  42. }
  43. static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
  44. {
  45. switch (index) {
  46. case ENET_FRBR:
  47. return "FRBR";
  48. case ENET_FRSR:
  49. return "FRSR";
  50. case ENET_MIIGSK_CFGR:
  51. return "MIIGSK_CFGR";
  52. case ENET_MIIGSK_ENR:
  53. return "MIIGSK_ENR";
  54. default:
  55. return imx_default_reg_name(s, index);
  56. }
  57. }
  58. static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
  59. {
  60. switch (index) {
  61. case ENET_RSFL:
  62. return "RSFL";
  63. case ENET_RSEM:
  64. return "RSEM";
  65. case ENET_RAEM:
  66. return "RAEM";
  67. case ENET_RAFL:
  68. return "RAFL";
  69. case ENET_TSEM:
  70. return "TSEM";
  71. case ENET_TAEM:
  72. return "TAEM";
  73. case ENET_TAFL:
  74. return "TAFL";
  75. case ENET_TIPG:
  76. return "TIPG";
  77. case ENET_FTRL:
  78. return "FTRL";
  79. case ENET_TACC:
  80. return "TACC";
  81. case ENET_RACC:
  82. return "RACC";
  83. case ENET_ATCR:
  84. return "ATCR";
  85. case ENET_ATVR:
  86. return "ATVR";
  87. case ENET_ATOFF:
  88. return "ATOFF";
  89. case ENET_ATPER:
  90. return "ATPER";
  91. case ENET_ATCOR:
  92. return "ATCOR";
  93. case ENET_ATINC:
  94. return "ATINC";
  95. case ENET_ATSTMP:
  96. return "ATSTMP";
  97. case ENET_TGSR:
  98. return "TGSR";
  99. case ENET_TCSR0:
  100. return "TCSR0";
  101. case ENET_TCCR0:
  102. return "TCCR0";
  103. case ENET_TCSR1:
  104. return "TCSR1";
  105. case ENET_TCCR1:
  106. return "TCCR1";
  107. case ENET_TCSR2:
  108. return "TCSR2";
  109. case ENET_TCCR2:
  110. return "TCCR2";
  111. case ENET_TCSR3:
  112. return "TCSR3";
  113. case ENET_TCCR3:
  114. return "TCCR3";
  115. default:
  116. return imx_default_reg_name(s, index);
  117. }
  118. }
  119. static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
  120. {
  121. switch (index) {
  122. case ENET_EIR:
  123. return "EIR";
  124. case ENET_EIMR:
  125. return "EIMR";
  126. case ENET_RDAR:
  127. return "RDAR";
  128. case ENET_TDAR:
  129. return "TDAR";
  130. case ENET_ECR:
  131. return "ECR";
  132. case ENET_MMFR:
  133. return "MMFR";
  134. case ENET_MSCR:
  135. return "MSCR";
  136. case ENET_MIBC:
  137. return "MIBC";
  138. case ENET_RCR:
  139. return "RCR";
  140. case ENET_TCR:
  141. return "TCR";
  142. case ENET_PALR:
  143. return "PALR";
  144. case ENET_PAUR:
  145. return "PAUR";
  146. case ENET_OPD:
  147. return "OPD";
  148. case ENET_IAUR:
  149. return "IAUR";
  150. case ENET_IALR:
  151. return "IALR";
  152. case ENET_GAUR:
  153. return "GAUR";
  154. case ENET_GALR:
  155. return "GALR";
  156. case ENET_TFWR:
  157. return "TFWR";
  158. case ENET_RDSR:
  159. return "RDSR";
  160. case ENET_TDSR:
  161. return "TDSR";
  162. case ENET_MRBR:
  163. return "MRBR";
  164. default:
  165. if (s->is_fec) {
  166. return imx_fec_reg_name(s, index);
  167. } else {
  168. return imx_enet_reg_name(s, index);
  169. }
  170. }
  171. }
  172. /*
  173. * Versions of this device with more than one TX descriptor save the
  174. * 2nd and 3rd descriptors in a subsection, to maintain migration
  175. * compatibility with previous versions of the device that only
  176. * supported a single descriptor.
  177. */
  178. static bool imx_eth_is_multi_tx_ring(void *opaque)
  179. {
  180. IMXFECState *s = IMX_FEC(opaque);
  181. return s->tx_ring_num > 1;
  182. }
  183. static const VMStateDescription vmstate_imx_eth_txdescs = {
  184. .name = "imx.fec/txdescs",
  185. .version_id = 1,
  186. .minimum_version_id = 1,
  187. .needed = imx_eth_is_multi_tx_ring,
  188. .fields = (VMStateField[]) {
  189. VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
  190. VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
  191. VMSTATE_END_OF_LIST()
  192. }
  193. };
  194. static const VMStateDescription vmstate_imx_eth = {
  195. .name = TYPE_IMX_FEC,
  196. .version_id = 2,
  197. .minimum_version_id = 2,
  198. .fields = (VMStateField[]) {
  199. VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
  200. VMSTATE_UINT32(rx_descriptor, IMXFECState),
  201. VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
  202. VMSTATE_UINT32(phy_status, IMXFECState),
  203. VMSTATE_UINT32(phy_control, IMXFECState),
  204. VMSTATE_UINT32(phy_advertise, IMXFECState),
  205. VMSTATE_UINT32(phy_int, IMXFECState),
  206. VMSTATE_UINT32(phy_int_mask, IMXFECState),
  207. VMSTATE_END_OF_LIST()
  208. },
  209. .subsections = (const VMStateDescription * []) {
  210. &vmstate_imx_eth_txdescs,
  211. NULL
  212. },
  213. };
  214. #define PHY_INT_ENERGYON (1 << 7)
  215. #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
  216. #define PHY_INT_FAULT (1 << 5)
  217. #define PHY_INT_DOWN (1 << 4)
  218. #define PHY_INT_AUTONEG_LP (1 << 3)
  219. #define PHY_INT_PARFAULT (1 << 2)
  220. #define PHY_INT_AUTONEG_PAGE (1 << 1)
  221. static void imx_eth_update(IMXFECState *s);
  222. /*
  223. * The MII phy could raise a GPIO to the processor which in turn
  224. * could be handled as an interrpt by the OS.
  225. * For now we don't handle any GPIO/interrupt line, so the OS will
  226. * have to poll for the PHY status.
  227. */
  228. static void imx_phy_update_irq(IMXFECState *s)
  229. {
  230. imx_eth_update(s);
  231. }
  232. static void imx_phy_update_link(IMXFECState *s)
  233. {
  234. /* Autonegotiation status mirrors link status. */
  235. if (qemu_get_queue(s->nic)->link_down) {
  236. trace_imx_phy_update_link("down");
  237. s->phy_status &= ~0x0024;
  238. s->phy_int |= PHY_INT_DOWN;
  239. } else {
  240. trace_imx_phy_update_link("up");
  241. s->phy_status |= 0x0024;
  242. s->phy_int |= PHY_INT_ENERGYON;
  243. s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
  244. }
  245. imx_phy_update_irq(s);
  246. }
  247. static void imx_eth_set_link(NetClientState *nc)
  248. {
  249. imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
  250. }
  251. static void imx_phy_reset(IMXFECState *s)
  252. {
  253. trace_imx_phy_reset();
  254. s->phy_status = 0x7809;
  255. s->phy_control = 0x3000;
  256. s->phy_advertise = 0x01e1;
  257. s->phy_int_mask = 0;
  258. s->phy_int = 0;
  259. imx_phy_update_link(s);
  260. }
  261. static uint32_t imx_phy_read(IMXFECState *s, int reg)
  262. {
  263. uint32_t val;
  264. uint32_t phy = reg / 32;
  265. if (phy != s->phy_num) {
  266. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad phy num %u\n",
  267. TYPE_IMX_FEC, __func__, phy);
  268. return 0;
  269. }
  270. reg %= 32;
  271. switch (reg) {
  272. case 0: /* Basic Control */
  273. val = s->phy_control;
  274. break;
  275. case 1: /* Basic Status */
  276. val = s->phy_status;
  277. break;
  278. case 2: /* ID1 */
  279. val = 0x0007;
  280. break;
  281. case 3: /* ID2 */
  282. val = 0xc0d1;
  283. break;
  284. case 4: /* Auto-neg advertisement */
  285. val = s->phy_advertise;
  286. break;
  287. case 5: /* Auto-neg Link Partner Ability */
  288. val = 0x0f71;
  289. break;
  290. case 6: /* Auto-neg Expansion */
  291. val = 1;
  292. break;
  293. case 29: /* Interrupt source. */
  294. val = s->phy_int;
  295. s->phy_int = 0;
  296. imx_phy_update_irq(s);
  297. break;
  298. case 30: /* Interrupt mask */
  299. val = s->phy_int_mask;
  300. break;
  301. case 17:
  302. case 18:
  303. case 27:
  304. case 31:
  305. qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
  306. TYPE_IMX_FEC, __func__, reg);
  307. val = 0;
  308. break;
  309. default:
  310. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
  311. TYPE_IMX_FEC, __func__, reg);
  312. val = 0;
  313. break;
  314. }
  315. trace_imx_phy_read(val, phy, reg);
  316. return val;
  317. }
  318. static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
  319. {
  320. uint32_t phy = reg / 32;
  321. if (phy != s->phy_num) {
  322. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad phy num %u\n",
  323. TYPE_IMX_FEC, __func__, phy);
  324. return;
  325. }
  326. reg %= 32;
  327. trace_imx_phy_write(val, phy, reg);
  328. switch (reg) {
  329. case 0: /* Basic Control */
  330. if (val & 0x8000) {
  331. imx_phy_reset(s);
  332. } else {
  333. s->phy_control = val & 0x7980;
  334. /* Complete autonegotiation immediately. */
  335. if (val & 0x1000) {
  336. s->phy_status |= 0x0020;
  337. }
  338. }
  339. break;
  340. case 4: /* Auto-neg advertisement */
  341. s->phy_advertise = (val & 0x2d7f) | 0x80;
  342. break;
  343. case 30: /* Interrupt mask */
  344. s->phy_int_mask = val & 0xff;
  345. imx_phy_update_irq(s);
  346. break;
  347. case 17:
  348. case 18:
  349. case 27:
  350. case 31:
  351. qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
  352. TYPE_IMX_FEC, __func__, reg);
  353. break;
  354. default:
  355. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
  356. TYPE_IMX_FEC, __func__, reg);
  357. break;
  358. }
  359. }
  360. static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  361. {
  362. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
  363. trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
  364. }
  365. static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  366. {
  367. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
  368. }
  369. static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  370. {
  371. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
  372. trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
  373. bd->option, bd->status);
  374. }
  375. static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  376. {
  377. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
  378. }
  379. static void imx_eth_update(IMXFECState *s)
  380. {
  381. /*
  382. * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
  383. * interrupts swapped. This worked with older versions of Linux (4.14
  384. * and older) since Linux associated both interrupt lines with Ethernet
  385. * MAC interrupts. Specifically,
  386. * - Linux 4.15 and later have separate interrupt handlers for the MAC and
  387. * timer interrupts. Those versions of Linux fail with versions of QEMU
  388. * with swapped interrupt assignments.
  389. * - In linux 4.14, both interrupt lines were registered with the Ethernet
  390. * MAC interrupt handler. As a result, all versions of qemu happen to
  391. * work, though that is accidental.
  392. * - In Linux 4.9 and older, the timer interrupt was registered directly
  393. * with the Ethernet MAC interrupt handler. The MAC interrupt was
  394. * redirected to a GPIO interrupt to work around erratum ERR006687.
  395. * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
  396. * interrupt never fired since IOMUX is currently not supported in qemu.
  397. * Linux instead received MAC interrupts on the timer interrupt.
  398. * As a result, qemu versions with the swapped interrupt assignment work,
  399. * albeit accidentally, but qemu versions with the correct interrupt
  400. * assignment fail.
  401. *
  402. * To ensure that all versions of Linux work, generate ENET_INT_MAC
  403. * interrrupts on both interrupt lines. This should be changed if and when
  404. * qemu supports IOMUX.
  405. */
  406. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
  407. (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
  408. qemu_set_irq(s->irq[1], 1);
  409. } else {
  410. qemu_set_irq(s->irq[1], 0);
  411. }
  412. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
  413. qemu_set_irq(s->irq[0], 1);
  414. } else {
  415. qemu_set_irq(s->irq[0], 0);
  416. }
  417. }
  418. static void imx_fec_do_tx(IMXFECState *s)
  419. {
  420. int frame_size = 0, descnt = 0;
  421. uint8_t *ptr = s->frame;
  422. uint32_t addr = s->tx_descriptor[0];
  423. while (descnt++ < IMX_MAX_DESC) {
  424. IMXFECBufDesc bd;
  425. int len;
  426. imx_fec_read_bd(&bd, addr);
  427. if ((bd.flags & ENET_BD_R) == 0) {
  428. /* Run out of descriptors to transmit. */
  429. trace_imx_eth_tx_bd_busy();
  430. break;
  431. }
  432. len = bd.length;
  433. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  434. len = ENET_MAX_FRAME_SIZE - frame_size;
  435. s->regs[ENET_EIR] |= ENET_INT_BABT;
  436. }
  437. dma_memory_read(&address_space_memory, bd.data, ptr, len);
  438. ptr += len;
  439. frame_size += len;
  440. if (bd.flags & ENET_BD_L) {
  441. /* Last buffer in frame. */
  442. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  443. ptr = s->frame;
  444. frame_size = 0;
  445. s->regs[ENET_EIR] |= ENET_INT_TXF;
  446. }
  447. s->regs[ENET_EIR] |= ENET_INT_TXB;
  448. bd.flags &= ~ENET_BD_R;
  449. /* Write back the modified descriptor. */
  450. imx_fec_write_bd(&bd, addr);
  451. /* Advance to the next descriptor. */
  452. if ((bd.flags & ENET_BD_W) != 0) {
  453. addr = s->regs[ENET_TDSR];
  454. } else {
  455. addr += sizeof(bd);
  456. }
  457. }
  458. s->tx_descriptor[0] = addr;
  459. imx_eth_update(s);
  460. }
  461. static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
  462. {
  463. int frame_size = 0, descnt = 0;
  464. uint8_t *ptr = s->frame;
  465. uint32_t addr, int_txb, int_txf, tdsr;
  466. size_t ring;
  467. switch (index) {
  468. case ENET_TDAR:
  469. ring = 0;
  470. int_txb = ENET_INT_TXB;
  471. int_txf = ENET_INT_TXF;
  472. tdsr = ENET_TDSR;
  473. break;
  474. case ENET_TDAR1:
  475. ring = 1;
  476. int_txb = ENET_INT_TXB1;
  477. int_txf = ENET_INT_TXF1;
  478. tdsr = ENET_TDSR1;
  479. break;
  480. case ENET_TDAR2:
  481. ring = 2;
  482. int_txb = ENET_INT_TXB2;
  483. int_txf = ENET_INT_TXF2;
  484. tdsr = ENET_TDSR2;
  485. break;
  486. default:
  487. qemu_log_mask(LOG_GUEST_ERROR,
  488. "%s: bogus value for index %x\n",
  489. __func__, index);
  490. abort();
  491. break;
  492. }
  493. addr = s->tx_descriptor[ring];
  494. while (descnt++ < IMX_MAX_DESC) {
  495. IMXENETBufDesc bd;
  496. int len;
  497. imx_enet_read_bd(&bd, addr);
  498. if ((bd.flags & ENET_BD_R) == 0) {
  499. /* Run out of descriptors to transmit. */
  500. trace_imx_eth_tx_bd_busy();
  501. break;
  502. }
  503. len = bd.length;
  504. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  505. len = ENET_MAX_FRAME_SIZE - frame_size;
  506. s->regs[ENET_EIR] |= ENET_INT_BABT;
  507. }
  508. dma_memory_read(&address_space_memory, bd.data, ptr, len);
  509. ptr += len;
  510. frame_size += len;
  511. if (bd.flags & ENET_BD_L) {
  512. if (bd.option & ENET_BD_PINS) {
  513. struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
  514. if (IP_HEADER_VERSION(ip_hd) == 4) {
  515. net_checksum_calculate(s->frame, frame_size);
  516. }
  517. }
  518. if (bd.option & ENET_BD_IINS) {
  519. struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
  520. /* We compute checksum only for IPv4 frames */
  521. if (IP_HEADER_VERSION(ip_hd) == 4) {
  522. uint16_t csum;
  523. ip_hd->ip_sum = 0;
  524. csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd));
  525. ip_hd->ip_sum = cpu_to_be16(csum);
  526. }
  527. }
  528. /* Last buffer in frame. */
  529. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  530. ptr = s->frame;
  531. frame_size = 0;
  532. if (bd.option & ENET_BD_TX_INT) {
  533. s->regs[ENET_EIR] |= int_txf;
  534. }
  535. /* Indicate that we've updated the last buffer descriptor. */
  536. bd.last_buffer = ENET_BD_BDU;
  537. }
  538. if (bd.option & ENET_BD_TX_INT) {
  539. s->regs[ENET_EIR] |= int_txb;
  540. }
  541. bd.flags &= ~ENET_BD_R;
  542. /* Write back the modified descriptor. */
  543. imx_enet_write_bd(&bd, addr);
  544. /* Advance to the next descriptor. */
  545. if ((bd.flags & ENET_BD_W) != 0) {
  546. addr = s->regs[tdsr];
  547. } else {
  548. addr += sizeof(bd);
  549. }
  550. }
  551. s->tx_descriptor[ring] = addr;
  552. imx_eth_update(s);
  553. }
  554. static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
  555. {
  556. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  557. imx_enet_do_tx(s, index);
  558. } else {
  559. imx_fec_do_tx(s);
  560. }
  561. }
  562. static void imx_eth_enable_rx(IMXFECState *s, bool flush)
  563. {
  564. IMXFECBufDesc bd;
  565. imx_fec_read_bd(&bd, s->rx_descriptor);
  566. s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
  567. if (!s->regs[ENET_RDAR]) {
  568. trace_imx_eth_rx_bd_full();
  569. } else if (flush) {
  570. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  571. }
  572. }
  573. static void imx_eth_reset(DeviceState *d)
  574. {
  575. IMXFECState *s = IMX_FEC(d);
  576. /* Reset the Device */
  577. memset(s->regs, 0, sizeof(s->regs));
  578. s->regs[ENET_ECR] = 0xf0000000;
  579. s->regs[ENET_MIBC] = 0xc0000000;
  580. s->regs[ENET_RCR] = 0x05ee0001;
  581. s->regs[ENET_OPD] = 0x00010000;
  582. s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
  583. | (s->conf.macaddr.a[1] << 16)
  584. | (s->conf.macaddr.a[2] << 8)
  585. | s->conf.macaddr.a[3];
  586. s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
  587. | (s->conf.macaddr.a[5] << 16)
  588. | 0x8808;
  589. if (s->is_fec) {
  590. s->regs[ENET_FRBR] = 0x00000600;
  591. s->regs[ENET_FRSR] = 0x00000500;
  592. s->regs[ENET_MIIGSK_ENR] = 0x00000006;
  593. } else {
  594. s->regs[ENET_RAEM] = 0x00000004;
  595. s->regs[ENET_RAFL] = 0x00000004;
  596. s->regs[ENET_TAEM] = 0x00000004;
  597. s->regs[ENET_TAFL] = 0x00000008;
  598. s->regs[ENET_TIPG] = 0x0000000c;
  599. s->regs[ENET_FTRL] = 0x000007ff;
  600. s->regs[ENET_ATPER] = 0x3b9aca00;
  601. }
  602. s->rx_descriptor = 0;
  603. memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
  604. /* We also reset the PHY */
  605. imx_phy_reset(s);
  606. }
  607. static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
  608. {
  609. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
  610. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  611. return 0;
  612. }
  613. static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
  614. {
  615. switch (index) {
  616. case ENET_FRBR:
  617. case ENET_FRSR:
  618. case ENET_MIIGSK_CFGR:
  619. case ENET_MIIGSK_ENR:
  620. return s->regs[index];
  621. default:
  622. return imx_default_read(s, index);
  623. }
  624. }
  625. static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
  626. {
  627. switch (index) {
  628. case ENET_RSFL:
  629. case ENET_RSEM:
  630. case ENET_RAEM:
  631. case ENET_RAFL:
  632. case ENET_TSEM:
  633. case ENET_TAEM:
  634. case ENET_TAFL:
  635. case ENET_TIPG:
  636. case ENET_FTRL:
  637. case ENET_TACC:
  638. case ENET_RACC:
  639. case ENET_ATCR:
  640. case ENET_ATVR:
  641. case ENET_ATOFF:
  642. case ENET_ATPER:
  643. case ENET_ATCOR:
  644. case ENET_ATINC:
  645. case ENET_ATSTMP:
  646. case ENET_TGSR:
  647. case ENET_TCSR0:
  648. case ENET_TCCR0:
  649. case ENET_TCSR1:
  650. case ENET_TCCR1:
  651. case ENET_TCSR2:
  652. case ENET_TCCR2:
  653. case ENET_TCSR3:
  654. case ENET_TCCR3:
  655. return s->regs[index];
  656. default:
  657. return imx_default_read(s, index);
  658. }
  659. }
  660. static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
  661. {
  662. uint32_t value = 0;
  663. IMXFECState *s = IMX_FEC(opaque);
  664. uint32_t index = offset >> 2;
  665. switch (index) {
  666. case ENET_EIR:
  667. case ENET_EIMR:
  668. case ENET_RDAR:
  669. case ENET_TDAR:
  670. case ENET_ECR:
  671. case ENET_MMFR:
  672. case ENET_MSCR:
  673. case ENET_MIBC:
  674. case ENET_RCR:
  675. case ENET_TCR:
  676. case ENET_PALR:
  677. case ENET_PAUR:
  678. case ENET_OPD:
  679. case ENET_IAUR:
  680. case ENET_IALR:
  681. case ENET_GAUR:
  682. case ENET_GALR:
  683. case ENET_TFWR:
  684. case ENET_RDSR:
  685. case ENET_TDSR:
  686. case ENET_MRBR:
  687. value = s->regs[index];
  688. break;
  689. default:
  690. if (s->is_fec) {
  691. value = imx_fec_read(s, index);
  692. } else {
  693. value = imx_enet_read(s, index);
  694. }
  695. break;
  696. }
  697. trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
  698. return value;
  699. }
  700. static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
  701. {
  702. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
  703. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  704. return;
  705. }
  706. static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
  707. {
  708. switch (index) {
  709. case ENET_FRBR:
  710. /* FRBR is read only */
  711. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
  712. TYPE_IMX_FEC, __func__);
  713. break;
  714. case ENET_FRSR:
  715. s->regs[index] = (value & 0x000003fc) | 0x00000400;
  716. break;
  717. case ENET_MIIGSK_CFGR:
  718. s->regs[index] = value & 0x00000053;
  719. break;
  720. case ENET_MIIGSK_ENR:
  721. s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
  722. break;
  723. default:
  724. imx_default_write(s, index, value);
  725. break;
  726. }
  727. }
  728. static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
  729. {
  730. switch (index) {
  731. case ENET_RSFL:
  732. case ENET_RSEM:
  733. case ENET_RAEM:
  734. case ENET_RAFL:
  735. case ENET_TSEM:
  736. case ENET_TAEM:
  737. case ENET_TAFL:
  738. s->regs[index] = value & 0x000001ff;
  739. break;
  740. case ENET_TIPG:
  741. s->regs[index] = value & 0x0000001f;
  742. break;
  743. case ENET_FTRL:
  744. s->regs[index] = value & 0x00003fff;
  745. break;
  746. case ENET_TACC:
  747. s->regs[index] = value & 0x00000019;
  748. break;
  749. case ENET_RACC:
  750. s->regs[index] = value & 0x000000C7;
  751. break;
  752. case ENET_ATCR:
  753. s->regs[index] = value & 0x00002a9d;
  754. break;
  755. case ENET_ATVR:
  756. case ENET_ATOFF:
  757. case ENET_ATPER:
  758. s->regs[index] = value;
  759. break;
  760. case ENET_ATSTMP:
  761. /* ATSTMP is read only */
  762. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
  763. TYPE_IMX_FEC, __func__);
  764. break;
  765. case ENET_ATCOR:
  766. s->regs[index] = value & 0x7fffffff;
  767. break;
  768. case ENET_ATINC:
  769. s->regs[index] = value & 0x00007f7f;
  770. break;
  771. case ENET_TGSR:
  772. /* implement clear timer flag */
  773. s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
  774. break;
  775. case ENET_TCSR0:
  776. case ENET_TCSR1:
  777. case ENET_TCSR2:
  778. case ENET_TCSR3:
  779. s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
  780. s->regs[index] &= ~0x0000007d; /* writable fields */
  781. s->regs[index] |= (value & 0x0000007d);
  782. break;
  783. case ENET_TCCR0:
  784. case ENET_TCCR1:
  785. case ENET_TCCR2:
  786. case ENET_TCCR3:
  787. s->regs[index] = value;
  788. break;
  789. default:
  790. imx_default_write(s, index, value);
  791. break;
  792. }
  793. }
  794. static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
  795. unsigned size)
  796. {
  797. IMXFECState *s = IMX_FEC(opaque);
  798. const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
  799. uint32_t index = offset >> 2;
  800. trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
  801. switch (index) {
  802. case ENET_EIR:
  803. s->regs[index] &= ~value;
  804. break;
  805. case ENET_EIMR:
  806. s->regs[index] = value;
  807. break;
  808. case ENET_RDAR:
  809. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  810. if (!s->regs[index]) {
  811. imx_eth_enable_rx(s, true);
  812. }
  813. } else {
  814. s->regs[index] = 0;
  815. }
  816. break;
  817. case ENET_TDAR1:
  818. case ENET_TDAR2:
  819. if (unlikely(single_tx_ring)) {
  820. qemu_log_mask(LOG_GUEST_ERROR,
  821. "[%s]%s: trying to access TDAR2 or TDAR1\n",
  822. TYPE_IMX_FEC, __func__);
  823. return;
  824. }
  825. /* fall through */
  826. case ENET_TDAR:
  827. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  828. s->regs[index] = ENET_TDAR_TDAR;
  829. imx_eth_do_tx(s, index);
  830. }
  831. s->regs[index] = 0;
  832. break;
  833. case ENET_ECR:
  834. if (value & ENET_ECR_RESET) {
  835. return imx_eth_reset(DEVICE(s));
  836. }
  837. s->regs[index] = value;
  838. if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
  839. s->regs[ENET_RDAR] = 0;
  840. s->rx_descriptor = s->regs[ENET_RDSR];
  841. s->regs[ENET_TDAR] = 0;
  842. s->regs[ENET_TDAR1] = 0;
  843. s->regs[ENET_TDAR2] = 0;
  844. s->tx_descriptor[0] = s->regs[ENET_TDSR];
  845. s->tx_descriptor[1] = s->regs[ENET_TDSR1];
  846. s->tx_descriptor[2] = s->regs[ENET_TDSR2];
  847. }
  848. break;
  849. case ENET_MMFR:
  850. s->regs[index] = value;
  851. if (extract32(value, 29, 1)) {
  852. /* This is a read operation */
  853. s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
  854. imx_phy_read(s,
  855. extract32(value,
  856. 18, 10)));
  857. } else {
  858. /* This is a write operation */
  859. imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
  860. }
  861. /* raise the interrupt as the PHY operation is done */
  862. s->regs[ENET_EIR] |= ENET_INT_MII;
  863. break;
  864. case ENET_MSCR:
  865. s->regs[index] = value & 0xfe;
  866. break;
  867. case ENET_MIBC:
  868. /* TODO: Implement MIB. */
  869. s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
  870. break;
  871. case ENET_RCR:
  872. s->regs[index] = value & 0x07ff003f;
  873. /* TODO: Implement LOOP mode. */
  874. break;
  875. case ENET_TCR:
  876. /* We transmit immediately, so raise GRA immediately. */
  877. s->regs[index] = value;
  878. if (value & 1) {
  879. s->regs[ENET_EIR] |= ENET_INT_GRA;
  880. }
  881. break;
  882. case ENET_PALR:
  883. s->regs[index] = value;
  884. s->conf.macaddr.a[0] = value >> 24;
  885. s->conf.macaddr.a[1] = value >> 16;
  886. s->conf.macaddr.a[2] = value >> 8;
  887. s->conf.macaddr.a[3] = value;
  888. break;
  889. case ENET_PAUR:
  890. s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
  891. s->conf.macaddr.a[4] = value >> 24;
  892. s->conf.macaddr.a[5] = value >> 16;
  893. break;
  894. case ENET_OPD:
  895. s->regs[index] = (value & 0x0000ffff) | 0x00010000;
  896. break;
  897. case ENET_IAUR:
  898. case ENET_IALR:
  899. case ENET_GAUR:
  900. case ENET_GALR:
  901. /* TODO: implement MAC hash filtering. */
  902. break;
  903. case ENET_TFWR:
  904. if (s->is_fec) {
  905. s->regs[index] = value & 0x3;
  906. } else {
  907. s->regs[index] = value & 0x13f;
  908. }
  909. break;
  910. case ENET_RDSR:
  911. if (s->is_fec) {
  912. s->regs[index] = value & ~3;
  913. } else {
  914. s->regs[index] = value & ~7;
  915. }
  916. s->rx_descriptor = s->regs[index];
  917. break;
  918. case ENET_TDSR:
  919. if (s->is_fec) {
  920. s->regs[index] = value & ~3;
  921. } else {
  922. s->regs[index] = value & ~7;
  923. }
  924. s->tx_descriptor[0] = s->regs[index];
  925. break;
  926. case ENET_TDSR1:
  927. if (unlikely(single_tx_ring)) {
  928. qemu_log_mask(LOG_GUEST_ERROR,
  929. "[%s]%s: trying to access TDSR1\n",
  930. TYPE_IMX_FEC, __func__);
  931. return;
  932. }
  933. s->regs[index] = value & ~7;
  934. s->tx_descriptor[1] = s->regs[index];
  935. break;
  936. case ENET_TDSR2:
  937. if (unlikely(single_tx_ring)) {
  938. qemu_log_mask(LOG_GUEST_ERROR,
  939. "[%s]%s: trying to access TDSR2\n",
  940. TYPE_IMX_FEC, __func__);
  941. return;
  942. }
  943. s->regs[index] = value & ~7;
  944. s->tx_descriptor[2] = s->regs[index];
  945. break;
  946. case ENET_MRBR:
  947. s->regs[index] = value & 0x00003ff0;
  948. break;
  949. default:
  950. if (s->is_fec) {
  951. imx_fec_write(s, index, value);
  952. } else {
  953. imx_enet_write(s, index, value);
  954. }
  955. return;
  956. }
  957. imx_eth_update(s);
  958. }
  959. static bool imx_eth_can_receive(NetClientState *nc)
  960. {
  961. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  962. return !!s->regs[ENET_RDAR];
  963. }
  964. static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
  965. size_t len)
  966. {
  967. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  968. IMXFECBufDesc bd;
  969. uint32_t flags = 0;
  970. uint32_t addr;
  971. uint32_t crc;
  972. uint32_t buf_addr;
  973. uint8_t *crc_ptr;
  974. unsigned int buf_len;
  975. size_t size = len;
  976. trace_imx_fec_receive(size);
  977. if (!s->regs[ENET_RDAR]) {
  978. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  979. TYPE_IMX_FEC, __func__);
  980. return 0;
  981. }
  982. /* 4 bytes for the CRC. */
  983. size += 4;
  984. crc = cpu_to_be32(crc32(~0, buf, size));
  985. crc_ptr = (uint8_t *) &crc;
  986. /* Huge frames are truncated. */
  987. if (size > ENET_MAX_FRAME_SIZE) {
  988. size = ENET_MAX_FRAME_SIZE;
  989. flags |= ENET_BD_TR | ENET_BD_LG;
  990. }
  991. /* Frames larger than the user limit just set error flags. */
  992. if (size > (s->regs[ENET_RCR] >> 16)) {
  993. flags |= ENET_BD_LG;
  994. }
  995. addr = s->rx_descriptor;
  996. while (size > 0) {
  997. imx_fec_read_bd(&bd, addr);
  998. if ((bd.flags & ENET_BD_E) == 0) {
  999. /* No descriptors available. Bail out. */
  1000. /*
  1001. * FIXME: This is wrong. We should probably either
  1002. * save the remainder for when more RX buffers are
  1003. * available, or flag an error.
  1004. */
  1005. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  1006. TYPE_IMX_FEC, __func__);
  1007. break;
  1008. }
  1009. buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
  1010. bd.length = buf_len;
  1011. size -= buf_len;
  1012. trace_imx_fec_receive_len(addr, bd.length);
  1013. /* The last 4 bytes are the CRC. */
  1014. if (size < 4) {
  1015. buf_len += size - 4;
  1016. }
  1017. buf_addr = bd.data;
  1018. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
  1019. buf += buf_len;
  1020. if (size < 4) {
  1021. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  1022. crc_ptr, 4 - size);
  1023. crc_ptr += 4 - size;
  1024. }
  1025. bd.flags &= ~ENET_BD_E;
  1026. if (size == 0) {
  1027. /* Last buffer in frame. */
  1028. bd.flags |= flags | ENET_BD_L;
  1029. trace_imx_fec_receive_last(bd.flags);
  1030. s->regs[ENET_EIR] |= ENET_INT_RXF;
  1031. } else {
  1032. s->regs[ENET_EIR] |= ENET_INT_RXB;
  1033. }
  1034. imx_fec_write_bd(&bd, addr);
  1035. /* Advance to the next descriptor. */
  1036. if ((bd.flags & ENET_BD_W) != 0) {
  1037. addr = s->regs[ENET_RDSR];
  1038. } else {
  1039. addr += sizeof(bd);
  1040. }
  1041. }
  1042. s->rx_descriptor = addr;
  1043. imx_eth_enable_rx(s, false);
  1044. imx_eth_update(s);
  1045. return len;
  1046. }
  1047. static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
  1048. size_t len)
  1049. {
  1050. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1051. IMXENETBufDesc bd;
  1052. uint32_t flags = 0;
  1053. uint32_t addr;
  1054. uint32_t crc;
  1055. uint32_t buf_addr;
  1056. uint8_t *crc_ptr;
  1057. unsigned int buf_len;
  1058. size_t size = len;
  1059. bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
  1060. trace_imx_enet_receive(size);
  1061. if (!s->regs[ENET_RDAR]) {
  1062. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  1063. TYPE_IMX_FEC, __func__);
  1064. return 0;
  1065. }
  1066. /* 4 bytes for the CRC. */
  1067. size += 4;
  1068. crc = cpu_to_be32(crc32(~0, buf, size));
  1069. crc_ptr = (uint8_t *) &crc;
  1070. if (shift16) {
  1071. size += 2;
  1072. }
  1073. /* Huge frames are truncated. */
  1074. if (size > s->regs[ENET_FTRL]) {
  1075. size = s->regs[ENET_FTRL];
  1076. flags |= ENET_BD_TR | ENET_BD_LG;
  1077. }
  1078. /* Frames larger than the user limit just set error flags. */
  1079. if (size > (s->regs[ENET_RCR] >> 16)) {
  1080. flags |= ENET_BD_LG;
  1081. }
  1082. addr = s->rx_descriptor;
  1083. while (size > 0) {
  1084. imx_enet_read_bd(&bd, addr);
  1085. if ((bd.flags & ENET_BD_E) == 0) {
  1086. /* No descriptors available. Bail out. */
  1087. /*
  1088. * FIXME: This is wrong. We should probably either
  1089. * save the remainder for when more RX buffers are
  1090. * available, or flag an error.
  1091. */
  1092. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  1093. TYPE_IMX_FEC, __func__);
  1094. break;
  1095. }
  1096. buf_len = MIN(size, s->regs[ENET_MRBR]);
  1097. bd.length = buf_len;
  1098. size -= buf_len;
  1099. trace_imx_enet_receive_len(addr, bd.length);
  1100. /* The last 4 bytes are the CRC. */
  1101. if (size < 4) {
  1102. buf_len += size - 4;
  1103. }
  1104. buf_addr = bd.data;
  1105. if (shift16) {
  1106. /*
  1107. * If SHIFT16 bit of ENETx_RACC register is set we need to
  1108. * align the payload to 4-byte boundary.
  1109. */
  1110. const uint8_t zeros[2] = { 0 };
  1111. dma_memory_write(&address_space_memory, buf_addr,
  1112. zeros, sizeof(zeros));
  1113. buf_addr += sizeof(zeros);
  1114. buf_len -= sizeof(zeros);
  1115. /* We only do this once per Ethernet frame */
  1116. shift16 = false;
  1117. }
  1118. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
  1119. buf += buf_len;
  1120. if (size < 4) {
  1121. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  1122. crc_ptr, 4 - size);
  1123. crc_ptr += 4 - size;
  1124. }
  1125. bd.flags &= ~ENET_BD_E;
  1126. if (size == 0) {
  1127. /* Last buffer in frame. */
  1128. bd.flags |= flags | ENET_BD_L;
  1129. trace_imx_enet_receive_last(bd.flags);
  1130. /* Indicate that we've updated the last buffer descriptor. */
  1131. bd.last_buffer = ENET_BD_BDU;
  1132. if (bd.option & ENET_BD_RX_INT) {
  1133. s->regs[ENET_EIR] |= ENET_INT_RXF;
  1134. }
  1135. } else {
  1136. if (bd.option & ENET_BD_RX_INT) {
  1137. s->regs[ENET_EIR] |= ENET_INT_RXB;
  1138. }
  1139. }
  1140. imx_enet_write_bd(&bd, addr);
  1141. /* Advance to the next descriptor. */
  1142. if ((bd.flags & ENET_BD_W) != 0) {
  1143. addr = s->regs[ENET_RDSR];
  1144. } else {
  1145. addr += sizeof(bd);
  1146. }
  1147. }
  1148. s->rx_descriptor = addr;
  1149. imx_eth_enable_rx(s, false);
  1150. imx_eth_update(s);
  1151. return len;
  1152. }
  1153. static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
  1154. size_t len)
  1155. {
  1156. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1157. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  1158. return imx_enet_receive(nc, buf, len);
  1159. } else {
  1160. return imx_fec_receive(nc, buf, len);
  1161. }
  1162. }
  1163. static const MemoryRegionOps imx_eth_ops = {
  1164. .read = imx_eth_read,
  1165. .write = imx_eth_write,
  1166. .valid.min_access_size = 4,
  1167. .valid.max_access_size = 4,
  1168. .endianness = DEVICE_NATIVE_ENDIAN,
  1169. };
  1170. static void imx_eth_cleanup(NetClientState *nc)
  1171. {
  1172. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1173. s->nic = NULL;
  1174. }
  1175. static NetClientInfo imx_eth_net_info = {
  1176. .type = NET_CLIENT_DRIVER_NIC,
  1177. .size = sizeof(NICState),
  1178. .can_receive = imx_eth_can_receive,
  1179. .receive = imx_eth_receive,
  1180. .cleanup = imx_eth_cleanup,
  1181. .link_status_changed = imx_eth_set_link,
  1182. };
  1183. static void imx_eth_realize(DeviceState *dev, Error **errp)
  1184. {
  1185. IMXFECState *s = IMX_FEC(dev);
  1186. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  1187. memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
  1188. TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
  1189. sysbus_init_mmio(sbd, &s->iomem);
  1190. sysbus_init_irq(sbd, &s->irq[0]);
  1191. sysbus_init_irq(sbd, &s->irq[1]);
  1192. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  1193. s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
  1194. object_get_typename(OBJECT(dev)),
  1195. dev->id, s);
  1196. qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
  1197. }
  1198. static Property imx_eth_properties[] = {
  1199. DEFINE_NIC_PROPERTIES(IMXFECState, conf),
  1200. DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
  1201. DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
  1202. DEFINE_PROP_END_OF_LIST(),
  1203. };
  1204. static void imx_eth_class_init(ObjectClass *klass, void *data)
  1205. {
  1206. DeviceClass *dc = DEVICE_CLASS(klass);
  1207. dc->vmsd = &vmstate_imx_eth;
  1208. dc->reset = imx_eth_reset;
  1209. device_class_set_props(dc, imx_eth_properties);
  1210. dc->realize = imx_eth_realize;
  1211. dc->desc = "i.MX FEC/ENET Ethernet Controller";
  1212. }
  1213. static void imx_fec_init(Object *obj)
  1214. {
  1215. IMXFECState *s = IMX_FEC(obj);
  1216. s->is_fec = true;
  1217. }
  1218. static void imx_enet_init(Object *obj)
  1219. {
  1220. IMXFECState *s = IMX_FEC(obj);
  1221. s->is_fec = false;
  1222. }
  1223. static const TypeInfo imx_fec_info = {
  1224. .name = TYPE_IMX_FEC,
  1225. .parent = TYPE_SYS_BUS_DEVICE,
  1226. .instance_size = sizeof(IMXFECState),
  1227. .instance_init = imx_fec_init,
  1228. .class_init = imx_eth_class_init,
  1229. };
  1230. static const TypeInfo imx_enet_info = {
  1231. .name = TYPE_IMX_ENET,
  1232. .parent = TYPE_IMX_FEC,
  1233. .instance_init = imx_enet_init,
  1234. };
  1235. static void imx_eth_register_types(void)
  1236. {
  1237. type_register_static(&imx_fec_info);
  1238. type_register_static(&imx_enet_info);
  1239. }
  1240. type_init(imx_eth_register_types)