imx_fec.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378
  1. /*
  2. * i.MX Fast Ethernet Controller emulation.
  3. *
  4. * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
  5. *
  6. * Based on Coldfire Fast Ethernet Controller emulation.
  7. *
  8. * Copyright (c) 2007 CodeSourcery.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  18. * for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include "qemu/osdep.h"
  24. #include "hw/irq.h"
  25. #include "hw/net/imx_fec.h"
  26. #include "hw/qdev-properties.h"
  27. #include "migration/vmstate.h"
  28. #include "sysemu/dma.h"
  29. #include "qemu/log.h"
  30. #include "qemu/module.h"
  31. #include "net/checksum.h"
  32. #include "net/eth.h"
  33. #include "trace.h"
  34. /* For crc32 */
  35. #include <zlib.h>
  36. #define IMX_MAX_DESC 1024
  37. static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
  38. {
  39. static char tmp[20];
  40. sprintf(tmp, "index %d", index);
  41. return tmp;
  42. }
  43. static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
  44. {
  45. switch (index) {
  46. case ENET_FRBR:
  47. return "FRBR";
  48. case ENET_FRSR:
  49. return "FRSR";
  50. case ENET_MIIGSK_CFGR:
  51. return "MIIGSK_CFGR";
  52. case ENET_MIIGSK_ENR:
  53. return "MIIGSK_ENR";
  54. default:
  55. return imx_default_reg_name(s, index);
  56. }
  57. }
  58. static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
  59. {
  60. switch (index) {
  61. case ENET_RSFL:
  62. return "RSFL";
  63. case ENET_RSEM:
  64. return "RSEM";
  65. case ENET_RAEM:
  66. return "RAEM";
  67. case ENET_RAFL:
  68. return "RAFL";
  69. case ENET_TSEM:
  70. return "TSEM";
  71. case ENET_TAEM:
  72. return "TAEM";
  73. case ENET_TAFL:
  74. return "TAFL";
  75. case ENET_TIPG:
  76. return "TIPG";
  77. case ENET_FTRL:
  78. return "FTRL";
  79. case ENET_TACC:
  80. return "TACC";
  81. case ENET_RACC:
  82. return "RACC";
  83. case ENET_ATCR:
  84. return "ATCR";
  85. case ENET_ATVR:
  86. return "ATVR";
  87. case ENET_ATOFF:
  88. return "ATOFF";
  89. case ENET_ATPER:
  90. return "ATPER";
  91. case ENET_ATCOR:
  92. return "ATCOR";
  93. case ENET_ATINC:
  94. return "ATINC";
  95. case ENET_ATSTMP:
  96. return "ATSTMP";
  97. case ENET_TGSR:
  98. return "TGSR";
  99. case ENET_TCSR0:
  100. return "TCSR0";
  101. case ENET_TCCR0:
  102. return "TCCR0";
  103. case ENET_TCSR1:
  104. return "TCSR1";
  105. case ENET_TCCR1:
  106. return "TCCR1";
  107. case ENET_TCSR2:
  108. return "TCSR2";
  109. case ENET_TCCR2:
  110. return "TCCR2";
  111. case ENET_TCSR3:
  112. return "TCSR3";
  113. case ENET_TCCR3:
  114. return "TCCR3";
  115. default:
  116. return imx_default_reg_name(s, index);
  117. }
  118. }
  119. static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
  120. {
  121. switch (index) {
  122. case ENET_EIR:
  123. return "EIR";
  124. case ENET_EIMR:
  125. return "EIMR";
  126. case ENET_RDAR:
  127. return "RDAR";
  128. case ENET_TDAR:
  129. return "TDAR";
  130. case ENET_ECR:
  131. return "ECR";
  132. case ENET_MMFR:
  133. return "MMFR";
  134. case ENET_MSCR:
  135. return "MSCR";
  136. case ENET_MIBC:
  137. return "MIBC";
  138. case ENET_RCR:
  139. return "RCR";
  140. case ENET_TCR:
  141. return "TCR";
  142. case ENET_PALR:
  143. return "PALR";
  144. case ENET_PAUR:
  145. return "PAUR";
  146. case ENET_OPD:
  147. return "OPD";
  148. case ENET_IAUR:
  149. return "IAUR";
  150. case ENET_IALR:
  151. return "IALR";
  152. case ENET_GAUR:
  153. return "GAUR";
  154. case ENET_GALR:
  155. return "GALR";
  156. case ENET_TFWR:
  157. return "TFWR";
  158. case ENET_RDSR:
  159. return "RDSR";
  160. case ENET_TDSR:
  161. return "TDSR";
  162. case ENET_MRBR:
  163. return "MRBR";
  164. default:
  165. if (s->is_fec) {
  166. return imx_fec_reg_name(s, index);
  167. } else {
  168. return imx_enet_reg_name(s, index);
  169. }
  170. }
  171. }
  172. /*
  173. * Versions of this device with more than one TX descriptor save the
  174. * 2nd and 3rd descriptors in a subsection, to maintain migration
  175. * compatibility with previous versions of the device that only
  176. * supported a single descriptor.
  177. */
  178. static bool imx_eth_is_multi_tx_ring(void *opaque)
  179. {
  180. IMXFECState *s = IMX_FEC(opaque);
  181. return s->tx_ring_num > 1;
  182. }
  183. static const VMStateDescription vmstate_imx_eth_txdescs = {
  184. .name = "imx.fec/txdescs",
  185. .version_id = 1,
  186. .minimum_version_id = 1,
  187. .needed = imx_eth_is_multi_tx_ring,
  188. .fields = (VMStateField[]) {
  189. VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
  190. VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
  191. VMSTATE_END_OF_LIST()
  192. }
  193. };
  194. static const VMStateDescription vmstate_imx_eth = {
  195. .name = TYPE_IMX_FEC,
  196. .version_id = 2,
  197. .minimum_version_id = 2,
  198. .fields = (VMStateField[]) {
  199. VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
  200. VMSTATE_UINT32(rx_descriptor, IMXFECState),
  201. VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
  202. VMSTATE_UINT32(phy_status, IMXFECState),
  203. VMSTATE_UINT32(phy_control, IMXFECState),
  204. VMSTATE_UINT32(phy_advertise, IMXFECState),
  205. VMSTATE_UINT32(phy_int, IMXFECState),
  206. VMSTATE_UINT32(phy_int_mask, IMXFECState),
  207. VMSTATE_END_OF_LIST()
  208. },
  209. .subsections = (const VMStateDescription * []) {
  210. &vmstate_imx_eth_txdescs,
  211. NULL
  212. },
  213. };
  214. #define PHY_INT_ENERGYON (1 << 7)
  215. #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
  216. #define PHY_INT_FAULT (1 << 5)
  217. #define PHY_INT_DOWN (1 << 4)
  218. #define PHY_INT_AUTONEG_LP (1 << 3)
  219. #define PHY_INT_PARFAULT (1 << 2)
  220. #define PHY_INT_AUTONEG_PAGE (1 << 1)
  221. static void imx_eth_update(IMXFECState *s);
  222. /*
  223. * The MII phy could raise a GPIO to the processor which in turn
  224. * could be handled as an interrpt by the OS.
  225. * For now we don't handle any GPIO/interrupt line, so the OS will
  226. * have to poll for the PHY status.
  227. */
  228. static void imx_phy_update_irq(IMXFECState *s)
  229. {
  230. imx_eth_update(s);
  231. }
  232. static void imx_phy_update_link(IMXFECState *s)
  233. {
  234. /* Autonegotiation status mirrors link status. */
  235. if (qemu_get_queue(s->nic)->link_down) {
  236. trace_imx_phy_update_link("down");
  237. s->phy_status &= ~0x0024;
  238. s->phy_int |= PHY_INT_DOWN;
  239. } else {
  240. trace_imx_phy_update_link("up");
  241. s->phy_status |= 0x0024;
  242. s->phy_int |= PHY_INT_ENERGYON;
  243. s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
  244. }
  245. imx_phy_update_irq(s);
  246. }
  247. static void imx_eth_set_link(NetClientState *nc)
  248. {
  249. imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
  250. }
  251. static void imx_phy_reset(IMXFECState *s)
  252. {
  253. trace_imx_phy_reset();
  254. s->phy_status = 0x7809;
  255. s->phy_control = 0x3000;
  256. s->phy_advertise = 0x01e1;
  257. s->phy_int_mask = 0;
  258. s->phy_int = 0;
  259. imx_phy_update_link(s);
  260. }
  261. static uint32_t imx_phy_read(IMXFECState *s, int reg)
  262. {
  263. uint32_t val;
  264. uint32_t phy = reg / 32;
  265. if (phy != s->phy_num) {
  266. trace_imx_phy_read_num(phy, s->phy_num);
  267. return 0xffff;
  268. }
  269. reg %= 32;
  270. switch (reg) {
  271. case 0: /* Basic Control */
  272. val = s->phy_control;
  273. break;
  274. case 1: /* Basic Status */
  275. val = s->phy_status;
  276. break;
  277. case 2: /* ID1 */
  278. val = 0x0007;
  279. break;
  280. case 3: /* ID2 */
  281. val = 0xc0d1;
  282. break;
  283. case 4: /* Auto-neg advertisement */
  284. val = s->phy_advertise;
  285. break;
  286. case 5: /* Auto-neg Link Partner Ability */
  287. val = 0x0f71;
  288. break;
  289. case 6: /* Auto-neg Expansion */
  290. val = 1;
  291. break;
  292. case 29: /* Interrupt source. */
  293. val = s->phy_int;
  294. s->phy_int = 0;
  295. imx_phy_update_irq(s);
  296. break;
  297. case 30: /* Interrupt mask */
  298. val = s->phy_int_mask;
  299. break;
  300. case 17:
  301. case 18:
  302. case 27:
  303. case 31:
  304. qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
  305. TYPE_IMX_FEC, __func__, reg);
  306. val = 0;
  307. break;
  308. default:
  309. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
  310. TYPE_IMX_FEC, __func__, reg);
  311. val = 0;
  312. break;
  313. }
  314. trace_imx_phy_read(val, phy, reg);
  315. return val;
  316. }
  317. static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
  318. {
  319. uint32_t phy = reg / 32;
  320. if (phy != s->phy_num) {
  321. trace_imx_phy_write_num(phy, s->phy_num);
  322. return;
  323. }
  324. reg %= 32;
  325. trace_imx_phy_write(val, phy, reg);
  326. switch (reg) {
  327. case 0: /* Basic Control */
  328. if (val & 0x8000) {
  329. imx_phy_reset(s);
  330. } else {
  331. s->phy_control = val & 0x7980;
  332. /* Complete autonegotiation immediately. */
  333. if (val & 0x1000) {
  334. s->phy_status |= 0x0020;
  335. }
  336. }
  337. break;
  338. case 4: /* Auto-neg advertisement */
  339. s->phy_advertise = (val & 0x2d7f) | 0x80;
  340. break;
  341. case 30: /* Interrupt mask */
  342. s->phy_int_mask = val & 0xff;
  343. imx_phy_update_irq(s);
  344. break;
  345. case 17:
  346. case 18:
  347. case 27:
  348. case 31:
  349. qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
  350. TYPE_IMX_FEC, __func__, reg);
  351. break;
  352. default:
  353. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
  354. TYPE_IMX_FEC, __func__, reg);
  355. break;
  356. }
  357. }
  358. static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  359. {
  360. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
  361. MEMTXATTRS_UNSPECIFIED);
  362. trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
  363. }
  364. static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  365. {
  366. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
  367. MEMTXATTRS_UNSPECIFIED);
  368. }
  369. static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  370. {
  371. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
  372. MEMTXATTRS_UNSPECIFIED);
  373. trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
  374. bd->option, bd->status);
  375. }
  376. static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  377. {
  378. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
  379. MEMTXATTRS_UNSPECIFIED);
  380. }
  381. static void imx_eth_update(IMXFECState *s)
  382. {
  383. /*
  384. * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
  385. * interrupts swapped. This worked with older versions of Linux (4.14
  386. * and older) since Linux associated both interrupt lines with Ethernet
  387. * MAC interrupts. Specifically,
  388. * - Linux 4.15 and later have separate interrupt handlers for the MAC and
  389. * timer interrupts. Those versions of Linux fail with versions of QEMU
  390. * with swapped interrupt assignments.
  391. * - In linux 4.14, both interrupt lines were registered with the Ethernet
  392. * MAC interrupt handler. As a result, all versions of qemu happen to
  393. * work, though that is accidental.
  394. * - In Linux 4.9 and older, the timer interrupt was registered directly
  395. * with the Ethernet MAC interrupt handler. The MAC interrupt was
  396. * redirected to a GPIO interrupt to work around erratum ERR006687.
  397. * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
  398. * interrupt never fired since IOMUX is currently not supported in qemu.
  399. * Linux instead received MAC interrupts on the timer interrupt.
  400. * As a result, qemu versions with the swapped interrupt assignment work,
  401. * albeit accidentally, but qemu versions with the correct interrupt
  402. * assignment fail.
  403. *
  404. * To ensure that all versions of Linux work, generate ENET_INT_MAC
  405. * interrupts on both interrupt lines. This should be changed if and when
  406. * qemu supports IOMUX.
  407. */
  408. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
  409. (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
  410. qemu_set_irq(s->irq[1], 1);
  411. } else {
  412. qemu_set_irq(s->irq[1], 0);
  413. }
  414. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
  415. qemu_set_irq(s->irq[0], 1);
  416. } else {
  417. qemu_set_irq(s->irq[0], 0);
  418. }
  419. }
  420. static void imx_fec_do_tx(IMXFECState *s)
  421. {
  422. int frame_size = 0, descnt = 0;
  423. uint8_t *ptr = s->frame;
  424. uint32_t addr = s->tx_descriptor[0];
  425. while (descnt++ < IMX_MAX_DESC) {
  426. IMXFECBufDesc bd;
  427. int len;
  428. imx_fec_read_bd(&bd, addr);
  429. if ((bd.flags & ENET_BD_R) == 0) {
  430. /* Run out of descriptors to transmit. */
  431. trace_imx_eth_tx_bd_busy();
  432. break;
  433. }
  434. len = bd.length;
  435. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  436. len = ENET_MAX_FRAME_SIZE - frame_size;
  437. s->regs[ENET_EIR] |= ENET_INT_BABT;
  438. }
  439. dma_memory_read(&address_space_memory, bd.data, ptr, len,
  440. MEMTXATTRS_UNSPECIFIED);
  441. ptr += len;
  442. frame_size += len;
  443. if (bd.flags & ENET_BD_L) {
  444. /* Last buffer in frame. */
  445. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  446. ptr = s->frame;
  447. frame_size = 0;
  448. s->regs[ENET_EIR] |= ENET_INT_TXF;
  449. }
  450. s->regs[ENET_EIR] |= ENET_INT_TXB;
  451. bd.flags &= ~ENET_BD_R;
  452. /* Write back the modified descriptor. */
  453. imx_fec_write_bd(&bd, addr);
  454. /* Advance to the next descriptor. */
  455. if ((bd.flags & ENET_BD_W) != 0) {
  456. addr = s->regs[ENET_TDSR];
  457. } else {
  458. addr += sizeof(bd);
  459. }
  460. }
  461. s->tx_descriptor[0] = addr;
  462. imx_eth_update(s);
  463. }
  464. static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
  465. {
  466. int frame_size = 0, descnt = 0;
  467. uint8_t *ptr = s->frame;
  468. uint32_t addr, int_txb, int_txf, tdsr;
  469. size_t ring;
  470. switch (index) {
  471. case ENET_TDAR:
  472. ring = 0;
  473. int_txb = ENET_INT_TXB;
  474. int_txf = ENET_INT_TXF;
  475. tdsr = ENET_TDSR;
  476. break;
  477. case ENET_TDAR1:
  478. ring = 1;
  479. int_txb = ENET_INT_TXB1;
  480. int_txf = ENET_INT_TXF1;
  481. tdsr = ENET_TDSR1;
  482. break;
  483. case ENET_TDAR2:
  484. ring = 2;
  485. int_txb = ENET_INT_TXB2;
  486. int_txf = ENET_INT_TXF2;
  487. tdsr = ENET_TDSR2;
  488. break;
  489. default:
  490. qemu_log_mask(LOG_GUEST_ERROR,
  491. "%s: bogus value for index %x\n",
  492. __func__, index);
  493. abort();
  494. break;
  495. }
  496. addr = s->tx_descriptor[ring];
  497. while (descnt++ < IMX_MAX_DESC) {
  498. IMXENETBufDesc bd;
  499. int len;
  500. imx_enet_read_bd(&bd, addr);
  501. if ((bd.flags & ENET_BD_R) == 0) {
  502. /* Run out of descriptors to transmit. */
  503. trace_imx_eth_tx_bd_busy();
  504. break;
  505. }
  506. len = bd.length;
  507. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  508. len = ENET_MAX_FRAME_SIZE - frame_size;
  509. s->regs[ENET_EIR] |= ENET_INT_BABT;
  510. }
  511. dma_memory_read(&address_space_memory, bd.data, ptr, len,
  512. MEMTXATTRS_UNSPECIFIED);
  513. ptr += len;
  514. frame_size += len;
  515. if (bd.flags & ENET_BD_L) {
  516. int csum = 0;
  517. if (bd.option & ENET_BD_PINS) {
  518. csum |= (CSUM_TCP | CSUM_UDP);
  519. }
  520. if (bd.option & ENET_BD_IINS) {
  521. csum |= CSUM_IP;
  522. }
  523. if (csum) {
  524. net_checksum_calculate(s->frame, frame_size, csum);
  525. }
  526. /* Last buffer in frame. */
  527. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  528. ptr = s->frame;
  529. frame_size = 0;
  530. if (bd.option & ENET_BD_TX_INT) {
  531. s->regs[ENET_EIR] |= int_txf;
  532. }
  533. /* Indicate that we've updated the last buffer descriptor. */
  534. bd.last_buffer = ENET_BD_BDU;
  535. }
  536. if (bd.option & ENET_BD_TX_INT) {
  537. s->regs[ENET_EIR] |= int_txb;
  538. }
  539. bd.flags &= ~ENET_BD_R;
  540. /* Write back the modified descriptor. */
  541. imx_enet_write_bd(&bd, addr);
  542. /* Advance to the next descriptor. */
  543. if ((bd.flags & ENET_BD_W) != 0) {
  544. addr = s->regs[tdsr];
  545. } else {
  546. addr += sizeof(bd);
  547. }
  548. }
  549. s->tx_descriptor[ring] = addr;
  550. imx_eth_update(s);
  551. }
  552. static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
  553. {
  554. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  555. imx_enet_do_tx(s, index);
  556. } else {
  557. imx_fec_do_tx(s);
  558. }
  559. }
  560. static void imx_eth_enable_rx(IMXFECState *s, bool flush)
  561. {
  562. IMXFECBufDesc bd;
  563. imx_fec_read_bd(&bd, s->rx_descriptor);
  564. s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
  565. if (!s->regs[ENET_RDAR]) {
  566. trace_imx_eth_rx_bd_full();
  567. } else if (flush) {
  568. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  569. }
  570. }
  571. static void imx_eth_reset(DeviceState *d)
  572. {
  573. IMXFECState *s = IMX_FEC(d);
  574. /* Reset the Device */
  575. memset(s->regs, 0, sizeof(s->regs));
  576. s->regs[ENET_ECR] = 0xf0000000;
  577. s->regs[ENET_MIBC] = 0xc0000000;
  578. s->regs[ENET_RCR] = 0x05ee0001;
  579. s->regs[ENET_OPD] = 0x00010000;
  580. s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
  581. | (s->conf.macaddr.a[1] << 16)
  582. | (s->conf.macaddr.a[2] << 8)
  583. | s->conf.macaddr.a[3];
  584. s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
  585. | (s->conf.macaddr.a[5] << 16)
  586. | 0x8808;
  587. if (s->is_fec) {
  588. s->regs[ENET_FRBR] = 0x00000600;
  589. s->regs[ENET_FRSR] = 0x00000500;
  590. s->regs[ENET_MIIGSK_ENR] = 0x00000006;
  591. } else {
  592. s->regs[ENET_RAEM] = 0x00000004;
  593. s->regs[ENET_RAFL] = 0x00000004;
  594. s->regs[ENET_TAEM] = 0x00000004;
  595. s->regs[ENET_TAFL] = 0x00000008;
  596. s->regs[ENET_TIPG] = 0x0000000c;
  597. s->regs[ENET_FTRL] = 0x000007ff;
  598. s->regs[ENET_ATPER] = 0x3b9aca00;
  599. }
  600. s->rx_descriptor = 0;
  601. memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
  602. /* We also reset the PHY */
  603. imx_phy_reset(s);
  604. }
  605. static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
  606. {
  607. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
  608. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  609. return 0;
  610. }
  611. static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
  612. {
  613. switch (index) {
  614. case ENET_FRBR:
  615. case ENET_FRSR:
  616. case ENET_MIIGSK_CFGR:
  617. case ENET_MIIGSK_ENR:
  618. return s->regs[index];
  619. default:
  620. return imx_default_read(s, index);
  621. }
  622. }
  623. static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
  624. {
  625. switch (index) {
  626. case ENET_RSFL:
  627. case ENET_RSEM:
  628. case ENET_RAEM:
  629. case ENET_RAFL:
  630. case ENET_TSEM:
  631. case ENET_TAEM:
  632. case ENET_TAFL:
  633. case ENET_TIPG:
  634. case ENET_FTRL:
  635. case ENET_TACC:
  636. case ENET_RACC:
  637. case ENET_ATCR:
  638. case ENET_ATVR:
  639. case ENET_ATOFF:
  640. case ENET_ATPER:
  641. case ENET_ATCOR:
  642. case ENET_ATINC:
  643. case ENET_ATSTMP:
  644. case ENET_TGSR:
  645. case ENET_TCSR0:
  646. case ENET_TCCR0:
  647. case ENET_TCSR1:
  648. case ENET_TCCR1:
  649. case ENET_TCSR2:
  650. case ENET_TCCR2:
  651. case ENET_TCSR3:
  652. case ENET_TCCR3:
  653. return s->regs[index];
  654. default:
  655. return imx_default_read(s, index);
  656. }
  657. }
  658. static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
  659. {
  660. uint32_t value = 0;
  661. IMXFECState *s = IMX_FEC(opaque);
  662. uint32_t index = offset >> 2;
  663. switch (index) {
  664. case ENET_EIR:
  665. case ENET_EIMR:
  666. case ENET_RDAR:
  667. case ENET_TDAR:
  668. case ENET_ECR:
  669. case ENET_MMFR:
  670. case ENET_MSCR:
  671. case ENET_MIBC:
  672. case ENET_RCR:
  673. case ENET_TCR:
  674. case ENET_PALR:
  675. case ENET_PAUR:
  676. case ENET_OPD:
  677. case ENET_IAUR:
  678. case ENET_IALR:
  679. case ENET_GAUR:
  680. case ENET_GALR:
  681. case ENET_TFWR:
  682. case ENET_RDSR:
  683. case ENET_TDSR:
  684. case ENET_MRBR:
  685. value = s->regs[index];
  686. break;
  687. default:
  688. if (s->is_fec) {
  689. value = imx_fec_read(s, index);
  690. } else {
  691. value = imx_enet_read(s, index);
  692. }
  693. break;
  694. }
  695. trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
  696. return value;
  697. }
  698. static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
  699. {
  700. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
  701. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  702. return;
  703. }
  704. static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
  705. {
  706. switch (index) {
  707. case ENET_FRBR:
  708. /* FRBR is read only */
  709. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
  710. TYPE_IMX_FEC, __func__);
  711. break;
  712. case ENET_FRSR:
  713. s->regs[index] = (value & 0x000003fc) | 0x00000400;
  714. break;
  715. case ENET_MIIGSK_CFGR:
  716. s->regs[index] = value & 0x00000053;
  717. break;
  718. case ENET_MIIGSK_ENR:
  719. s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
  720. break;
  721. default:
  722. imx_default_write(s, index, value);
  723. break;
  724. }
  725. }
  726. static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
  727. {
  728. switch (index) {
  729. case ENET_RSFL:
  730. case ENET_RSEM:
  731. case ENET_RAEM:
  732. case ENET_RAFL:
  733. case ENET_TSEM:
  734. case ENET_TAEM:
  735. case ENET_TAFL:
  736. s->regs[index] = value & 0x000001ff;
  737. break;
  738. case ENET_TIPG:
  739. s->regs[index] = value & 0x0000001f;
  740. break;
  741. case ENET_FTRL:
  742. s->regs[index] = value & 0x00003fff;
  743. break;
  744. case ENET_TACC:
  745. s->regs[index] = value & 0x00000019;
  746. break;
  747. case ENET_RACC:
  748. s->regs[index] = value & 0x000000C7;
  749. break;
  750. case ENET_ATCR:
  751. s->regs[index] = value & 0x00002a9d;
  752. break;
  753. case ENET_ATVR:
  754. case ENET_ATOFF:
  755. case ENET_ATPER:
  756. s->regs[index] = value;
  757. break;
  758. case ENET_ATSTMP:
  759. /* ATSTMP is read only */
  760. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
  761. TYPE_IMX_FEC, __func__);
  762. break;
  763. case ENET_ATCOR:
  764. s->regs[index] = value & 0x7fffffff;
  765. break;
  766. case ENET_ATINC:
  767. s->regs[index] = value & 0x00007f7f;
  768. break;
  769. case ENET_TGSR:
  770. /* implement clear timer flag */
  771. s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
  772. break;
  773. case ENET_TCSR0:
  774. case ENET_TCSR1:
  775. case ENET_TCSR2:
  776. case ENET_TCSR3:
  777. s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
  778. s->regs[index] &= ~0x0000007d; /* writable fields */
  779. s->regs[index] |= (value & 0x0000007d);
  780. break;
  781. case ENET_TCCR0:
  782. case ENET_TCCR1:
  783. case ENET_TCCR2:
  784. case ENET_TCCR3:
  785. s->regs[index] = value;
  786. break;
  787. default:
  788. imx_default_write(s, index, value);
  789. break;
  790. }
  791. }
  792. static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
  793. unsigned size)
  794. {
  795. IMXFECState *s = IMX_FEC(opaque);
  796. const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
  797. uint32_t index = offset >> 2;
  798. trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
  799. switch (index) {
  800. case ENET_EIR:
  801. s->regs[index] &= ~value;
  802. break;
  803. case ENET_EIMR:
  804. s->regs[index] = value;
  805. break;
  806. case ENET_RDAR:
  807. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  808. if (!s->regs[index]) {
  809. imx_eth_enable_rx(s, true);
  810. }
  811. } else {
  812. s->regs[index] = 0;
  813. }
  814. break;
  815. case ENET_TDAR1:
  816. case ENET_TDAR2:
  817. if (unlikely(single_tx_ring)) {
  818. qemu_log_mask(LOG_GUEST_ERROR,
  819. "[%s]%s: trying to access TDAR2 or TDAR1\n",
  820. TYPE_IMX_FEC, __func__);
  821. return;
  822. }
  823. /* fall through */
  824. case ENET_TDAR:
  825. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  826. s->regs[index] = ENET_TDAR_TDAR;
  827. imx_eth_do_tx(s, index);
  828. }
  829. s->regs[index] = 0;
  830. break;
  831. case ENET_ECR:
  832. if (value & ENET_ECR_RESET) {
  833. return imx_eth_reset(DEVICE(s));
  834. }
  835. s->regs[index] = value;
  836. if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
  837. s->regs[ENET_RDAR] = 0;
  838. s->rx_descriptor = s->regs[ENET_RDSR];
  839. s->regs[ENET_TDAR] = 0;
  840. s->regs[ENET_TDAR1] = 0;
  841. s->regs[ENET_TDAR2] = 0;
  842. s->tx_descriptor[0] = s->regs[ENET_TDSR];
  843. s->tx_descriptor[1] = s->regs[ENET_TDSR1];
  844. s->tx_descriptor[2] = s->regs[ENET_TDSR2];
  845. }
  846. break;
  847. case ENET_MMFR:
  848. s->regs[index] = value;
  849. if (extract32(value, 29, 1)) {
  850. /* This is a read operation */
  851. s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
  852. imx_phy_read(s,
  853. extract32(value,
  854. 18, 10)));
  855. } else {
  856. /* This is a write operation */
  857. imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
  858. }
  859. /* raise the interrupt as the PHY operation is done */
  860. s->regs[ENET_EIR] |= ENET_INT_MII;
  861. break;
  862. case ENET_MSCR:
  863. s->regs[index] = value & 0xfe;
  864. break;
  865. case ENET_MIBC:
  866. /* TODO: Implement MIB. */
  867. s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
  868. break;
  869. case ENET_RCR:
  870. s->regs[index] = value & 0x07ff003f;
  871. /* TODO: Implement LOOP mode. */
  872. break;
  873. case ENET_TCR:
  874. /* We transmit immediately, so raise GRA immediately. */
  875. s->regs[index] = value;
  876. if (value & 1) {
  877. s->regs[ENET_EIR] |= ENET_INT_GRA;
  878. }
  879. break;
  880. case ENET_PALR:
  881. s->regs[index] = value;
  882. s->conf.macaddr.a[0] = value >> 24;
  883. s->conf.macaddr.a[1] = value >> 16;
  884. s->conf.macaddr.a[2] = value >> 8;
  885. s->conf.macaddr.a[3] = value;
  886. break;
  887. case ENET_PAUR:
  888. s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
  889. s->conf.macaddr.a[4] = value >> 24;
  890. s->conf.macaddr.a[5] = value >> 16;
  891. break;
  892. case ENET_OPD:
  893. s->regs[index] = (value & 0x0000ffff) | 0x00010000;
  894. break;
  895. case ENET_IAUR:
  896. case ENET_IALR:
  897. case ENET_GAUR:
  898. case ENET_GALR:
  899. /* TODO: implement MAC hash filtering. */
  900. break;
  901. case ENET_TFWR:
  902. if (s->is_fec) {
  903. s->regs[index] = value & 0x3;
  904. } else {
  905. s->regs[index] = value & 0x13f;
  906. }
  907. break;
  908. case ENET_RDSR:
  909. if (s->is_fec) {
  910. s->regs[index] = value & ~3;
  911. } else {
  912. s->regs[index] = value & ~7;
  913. }
  914. s->rx_descriptor = s->regs[index];
  915. break;
  916. case ENET_TDSR:
  917. if (s->is_fec) {
  918. s->regs[index] = value & ~3;
  919. } else {
  920. s->regs[index] = value & ~7;
  921. }
  922. s->tx_descriptor[0] = s->regs[index];
  923. break;
  924. case ENET_TDSR1:
  925. if (unlikely(single_tx_ring)) {
  926. qemu_log_mask(LOG_GUEST_ERROR,
  927. "[%s]%s: trying to access TDSR1\n",
  928. TYPE_IMX_FEC, __func__);
  929. return;
  930. }
  931. s->regs[index] = value & ~7;
  932. s->tx_descriptor[1] = s->regs[index];
  933. break;
  934. case ENET_TDSR2:
  935. if (unlikely(single_tx_ring)) {
  936. qemu_log_mask(LOG_GUEST_ERROR,
  937. "[%s]%s: trying to access TDSR2\n",
  938. TYPE_IMX_FEC, __func__);
  939. return;
  940. }
  941. s->regs[index] = value & ~7;
  942. s->tx_descriptor[2] = s->regs[index];
  943. break;
  944. case ENET_MRBR:
  945. s->regs[index] = value & 0x00003ff0;
  946. break;
  947. default:
  948. if (s->is_fec) {
  949. imx_fec_write(s, index, value);
  950. } else {
  951. imx_enet_write(s, index, value);
  952. }
  953. return;
  954. }
  955. imx_eth_update(s);
  956. }
  957. static bool imx_eth_can_receive(NetClientState *nc)
  958. {
  959. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  960. return !!s->regs[ENET_RDAR];
  961. }
  962. static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
  963. size_t len)
  964. {
  965. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  966. IMXFECBufDesc bd;
  967. uint32_t flags = 0;
  968. uint32_t addr;
  969. uint32_t crc;
  970. uint32_t buf_addr;
  971. uint8_t *crc_ptr;
  972. unsigned int buf_len;
  973. size_t size = len;
  974. trace_imx_fec_receive(size);
  975. if (!s->regs[ENET_RDAR]) {
  976. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  977. TYPE_IMX_FEC, __func__);
  978. return 0;
  979. }
  980. crc = cpu_to_be32(crc32(~0, buf, size));
  981. /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
  982. size += 4;
  983. crc_ptr = (uint8_t *) &crc;
  984. /* Huge frames are truncated. */
  985. if (size > ENET_MAX_FRAME_SIZE) {
  986. size = ENET_MAX_FRAME_SIZE;
  987. flags |= ENET_BD_TR | ENET_BD_LG;
  988. }
  989. /* Frames larger than the user limit just set error flags. */
  990. if (size > (s->regs[ENET_RCR] >> 16)) {
  991. flags |= ENET_BD_LG;
  992. }
  993. addr = s->rx_descriptor;
  994. while (size > 0) {
  995. imx_fec_read_bd(&bd, addr);
  996. if ((bd.flags & ENET_BD_E) == 0) {
  997. /* No descriptors available. Bail out. */
  998. /*
  999. * FIXME: This is wrong. We should probably either
  1000. * save the remainder for when more RX buffers are
  1001. * available, or flag an error.
  1002. */
  1003. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  1004. TYPE_IMX_FEC, __func__);
  1005. break;
  1006. }
  1007. buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
  1008. bd.length = buf_len;
  1009. size -= buf_len;
  1010. trace_imx_fec_receive_len(addr, bd.length);
  1011. /* The last 4 bytes are the CRC. */
  1012. if (size < 4) {
  1013. buf_len += size - 4;
  1014. }
  1015. buf_addr = bd.data;
  1016. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
  1017. MEMTXATTRS_UNSPECIFIED);
  1018. buf += buf_len;
  1019. if (size < 4) {
  1020. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  1021. crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
  1022. crc_ptr += 4 - size;
  1023. }
  1024. bd.flags &= ~ENET_BD_E;
  1025. if (size == 0) {
  1026. /* Last buffer in frame. */
  1027. bd.flags |= flags | ENET_BD_L;
  1028. trace_imx_fec_receive_last(bd.flags);
  1029. s->regs[ENET_EIR] |= ENET_INT_RXF;
  1030. } else {
  1031. s->regs[ENET_EIR] |= ENET_INT_RXB;
  1032. }
  1033. imx_fec_write_bd(&bd, addr);
  1034. /* Advance to the next descriptor. */
  1035. if ((bd.flags & ENET_BD_W) != 0) {
  1036. addr = s->regs[ENET_RDSR];
  1037. } else {
  1038. addr += sizeof(bd);
  1039. }
  1040. }
  1041. s->rx_descriptor = addr;
  1042. imx_eth_enable_rx(s, false);
  1043. imx_eth_update(s);
  1044. return len;
  1045. }
  1046. static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
  1047. size_t len)
  1048. {
  1049. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1050. IMXENETBufDesc bd;
  1051. uint32_t flags = 0;
  1052. uint32_t addr;
  1053. uint32_t crc;
  1054. uint32_t buf_addr;
  1055. uint8_t *crc_ptr;
  1056. unsigned int buf_len;
  1057. size_t size = len;
  1058. bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
  1059. trace_imx_enet_receive(size);
  1060. if (!s->regs[ENET_RDAR]) {
  1061. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  1062. TYPE_IMX_FEC, __func__);
  1063. return 0;
  1064. }
  1065. crc = cpu_to_be32(crc32(~0, buf, size));
  1066. /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
  1067. size += 4;
  1068. crc_ptr = (uint8_t *) &crc;
  1069. if (shift16) {
  1070. size += 2;
  1071. }
  1072. /* Huge frames are truncated. */
  1073. if (size > s->regs[ENET_FTRL]) {
  1074. size = s->regs[ENET_FTRL];
  1075. flags |= ENET_BD_TR | ENET_BD_LG;
  1076. }
  1077. /* Frames larger than the user limit just set error flags. */
  1078. if (size > (s->regs[ENET_RCR] >> 16)) {
  1079. flags |= ENET_BD_LG;
  1080. }
  1081. addr = s->rx_descriptor;
  1082. while (size > 0) {
  1083. imx_enet_read_bd(&bd, addr);
  1084. if ((bd.flags & ENET_BD_E) == 0) {
  1085. /* No descriptors available. Bail out. */
  1086. /*
  1087. * FIXME: This is wrong. We should probably either
  1088. * save the remainder for when more RX buffers are
  1089. * available, or flag an error.
  1090. */
  1091. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  1092. TYPE_IMX_FEC, __func__);
  1093. break;
  1094. }
  1095. buf_len = MIN(size, s->regs[ENET_MRBR]);
  1096. bd.length = buf_len;
  1097. size -= buf_len;
  1098. trace_imx_enet_receive_len(addr, bd.length);
  1099. /* The last 4 bytes are the CRC. */
  1100. if (size < 4) {
  1101. buf_len += size - 4;
  1102. }
  1103. buf_addr = bd.data;
  1104. if (shift16) {
  1105. /*
  1106. * If SHIFT16 bit of ENETx_RACC register is set we need to
  1107. * align the payload to 4-byte boundary.
  1108. */
  1109. const uint8_t zeros[2] = { 0 };
  1110. dma_memory_write(&address_space_memory, buf_addr, zeros,
  1111. sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
  1112. buf_addr += sizeof(zeros);
  1113. buf_len -= sizeof(zeros);
  1114. /* We only do this once per Ethernet frame */
  1115. shift16 = false;
  1116. }
  1117. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
  1118. MEMTXATTRS_UNSPECIFIED);
  1119. buf += buf_len;
  1120. if (size < 4) {
  1121. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  1122. crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
  1123. crc_ptr += 4 - size;
  1124. }
  1125. bd.flags &= ~ENET_BD_E;
  1126. if (size == 0) {
  1127. /* Last buffer in frame. */
  1128. bd.flags |= flags | ENET_BD_L;
  1129. trace_imx_enet_receive_last(bd.flags);
  1130. /* Indicate that we've updated the last buffer descriptor. */
  1131. bd.last_buffer = ENET_BD_BDU;
  1132. if (bd.option & ENET_BD_RX_INT) {
  1133. s->regs[ENET_EIR] |= ENET_INT_RXF;
  1134. }
  1135. } else {
  1136. if (bd.option & ENET_BD_RX_INT) {
  1137. s->regs[ENET_EIR] |= ENET_INT_RXB;
  1138. }
  1139. }
  1140. imx_enet_write_bd(&bd, addr);
  1141. /* Advance to the next descriptor. */
  1142. if ((bd.flags & ENET_BD_W) != 0) {
  1143. addr = s->regs[ENET_RDSR];
  1144. } else {
  1145. addr += sizeof(bd);
  1146. }
  1147. }
  1148. s->rx_descriptor = addr;
  1149. imx_eth_enable_rx(s, false);
  1150. imx_eth_update(s);
  1151. return len;
  1152. }
  1153. static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
  1154. size_t len)
  1155. {
  1156. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1157. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  1158. return imx_enet_receive(nc, buf, len);
  1159. } else {
  1160. return imx_fec_receive(nc, buf, len);
  1161. }
  1162. }
  1163. static const MemoryRegionOps imx_eth_ops = {
  1164. .read = imx_eth_read,
  1165. .write = imx_eth_write,
  1166. .valid.min_access_size = 4,
  1167. .valid.max_access_size = 4,
  1168. .endianness = DEVICE_NATIVE_ENDIAN,
  1169. };
  1170. static void imx_eth_cleanup(NetClientState *nc)
  1171. {
  1172. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1173. s->nic = NULL;
  1174. }
  1175. static NetClientInfo imx_eth_net_info = {
  1176. .type = NET_CLIENT_DRIVER_NIC,
  1177. .size = sizeof(NICState),
  1178. .can_receive = imx_eth_can_receive,
  1179. .receive = imx_eth_receive,
  1180. .cleanup = imx_eth_cleanup,
  1181. .link_status_changed = imx_eth_set_link,
  1182. };
  1183. static void imx_eth_realize(DeviceState *dev, Error **errp)
  1184. {
  1185. IMXFECState *s = IMX_FEC(dev);
  1186. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  1187. memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
  1188. TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
  1189. sysbus_init_mmio(sbd, &s->iomem);
  1190. sysbus_init_irq(sbd, &s->irq[0]);
  1191. sysbus_init_irq(sbd, &s->irq[1]);
  1192. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  1193. s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
  1194. object_get_typename(OBJECT(dev)),
  1195. dev->id, s);
  1196. qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
  1197. }
  1198. static Property imx_eth_properties[] = {
  1199. DEFINE_NIC_PROPERTIES(IMXFECState, conf),
  1200. DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
  1201. DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
  1202. DEFINE_PROP_END_OF_LIST(),
  1203. };
  1204. static void imx_eth_class_init(ObjectClass *klass, void *data)
  1205. {
  1206. DeviceClass *dc = DEVICE_CLASS(klass);
  1207. dc->vmsd = &vmstate_imx_eth;
  1208. dc->reset = imx_eth_reset;
  1209. device_class_set_props(dc, imx_eth_properties);
  1210. dc->realize = imx_eth_realize;
  1211. dc->desc = "i.MX FEC/ENET Ethernet Controller";
  1212. }
  1213. static void imx_fec_init(Object *obj)
  1214. {
  1215. IMXFECState *s = IMX_FEC(obj);
  1216. s->is_fec = true;
  1217. }
  1218. static void imx_enet_init(Object *obj)
  1219. {
  1220. IMXFECState *s = IMX_FEC(obj);
  1221. s->is_fec = false;
  1222. }
  1223. static const TypeInfo imx_fec_info = {
  1224. .name = TYPE_IMX_FEC,
  1225. .parent = TYPE_SYS_BUS_DEVICE,
  1226. .instance_size = sizeof(IMXFECState),
  1227. .instance_init = imx_fec_init,
  1228. .class_init = imx_eth_class_init,
  1229. };
  1230. static const TypeInfo imx_enet_info = {
  1231. .name = TYPE_IMX_ENET,
  1232. .parent = TYPE_IMX_FEC,
  1233. .instance_init = imx_enet_init,
  1234. };
  1235. static void imx_eth_register_types(void)
  1236. {
  1237. type_register_static(&imx_fec_info);
  1238. type_register_static(&imx_enet_info);
  1239. }
  1240. type_init(imx_eth_register_types)