imx_fec.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279
  1. /*
  2. * i.MX Fast Ethernet Controller emulation.
  3. *
  4. * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
  5. *
  6. * Based on Coldfire Fast Ethernet Controller emulation.
  7. *
  8. * Copyright (c) 2007 CodeSourcery.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  18. * for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include "qemu/osdep.h"
  24. #include "hw/irq.h"
  25. #include "hw/net/imx_fec.h"
  26. #include "hw/qdev-properties.h"
  27. #include "migration/vmstate.h"
  28. #include "system/dma.h"
  29. #include "qemu/log.h"
  30. #include "qemu/module.h"
  31. #include "net/checksum.h"
  32. #include "net/eth.h"
  33. #include "trace.h"
  34. #include <zlib.h> /* for crc32 */
  35. #define IMX_MAX_DESC 1024
  36. static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
  37. {
  38. static char tmp[20];
  39. snprintf(tmp, sizeof(tmp), "index %d", index);
  40. return tmp;
  41. }
  42. static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
  43. {
  44. switch (index) {
  45. case ENET_FRBR:
  46. return "FRBR";
  47. case ENET_FRSR:
  48. return "FRSR";
  49. case ENET_MIIGSK_CFGR:
  50. return "MIIGSK_CFGR";
  51. case ENET_MIIGSK_ENR:
  52. return "MIIGSK_ENR";
  53. default:
  54. return imx_default_reg_name(s, index);
  55. }
  56. }
  57. static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
  58. {
  59. switch (index) {
  60. case ENET_RSFL:
  61. return "RSFL";
  62. case ENET_RSEM:
  63. return "RSEM";
  64. case ENET_RAEM:
  65. return "RAEM";
  66. case ENET_RAFL:
  67. return "RAFL";
  68. case ENET_TSEM:
  69. return "TSEM";
  70. case ENET_TAEM:
  71. return "TAEM";
  72. case ENET_TAFL:
  73. return "TAFL";
  74. case ENET_TIPG:
  75. return "TIPG";
  76. case ENET_FTRL:
  77. return "FTRL";
  78. case ENET_TACC:
  79. return "TACC";
  80. case ENET_RACC:
  81. return "RACC";
  82. case ENET_ATCR:
  83. return "ATCR";
  84. case ENET_ATVR:
  85. return "ATVR";
  86. case ENET_ATOFF:
  87. return "ATOFF";
  88. case ENET_ATPER:
  89. return "ATPER";
  90. case ENET_ATCOR:
  91. return "ATCOR";
  92. case ENET_ATINC:
  93. return "ATINC";
  94. case ENET_ATSTMP:
  95. return "ATSTMP";
  96. case ENET_TGSR:
  97. return "TGSR";
  98. case ENET_TCSR0:
  99. return "TCSR0";
  100. case ENET_TCCR0:
  101. return "TCCR0";
  102. case ENET_TCSR1:
  103. return "TCSR1";
  104. case ENET_TCCR1:
  105. return "TCCR1";
  106. case ENET_TCSR2:
  107. return "TCSR2";
  108. case ENET_TCCR2:
  109. return "TCCR2";
  110. case ENET_TCSR3:
  111. return "TCSR3";
  112. case ENET_TCCR3:
  113. return "TCCR3";
  114. default:
  115. return imx_default_reg_name(s, index);
  116. }
  117. }
  118. static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
  119. {
  120. switch (index) {
  121. case ENET_EIR:
  122. return "EIR";
  123. case ENET_EIMR:
  124. return "EIMR";
  125. case ENET_RDAR:
  126. return "RDAR";
  127. case ENET_TDAR:
  128. return "TDAR";
  129. case ENET_ECR:
  130. return "ECR";
  131. case ENET_MMFR:
  132. return "MMFR";
  133. case ENET_MSCR:
  134. return "MSCR";
  135. case ENET_MIBC:
  136. return "MIBC";
  137. case ENET_RCR:
  138. return "RCR";
  139. case ENET_TCR:
  140. return "TCR";
  141. case ENET_PALR:
  142. return "PALR";
  143. case ENET_PAUR:
  144. return "PAUR";
  145. case ENET_OPD:
  146. return "OPD";
  147. case ENET_IAUR:
  148. return "IAUR";
  149. case ENET_IALR:
  150. return "IALR";
  151. case ENET_GAUR:
  152. return "GAUR";
  153. case ENET_GALR:
  154. return "GALR";
  155. case ENET_TFWR:
  156. return "TFWR";
  157. case ENET_RDSR:
  158. return "RDSR";
  159. case ENET_TDSR:
  160. return "TDSR";
  161. case ENET_MRBR:
  162. return "MRBR";
  163. default:
  164. if (s->is_fec) {
  165. return imx_fec_reg_name(s, index);
  166. } else {
  167. return imx_enet_reg_name(s, index);
  168. }
  169. }
  170. }
  171. /*
  172. * Versions of this device with more than one TX descriptor save the
  173. * 2nd and 3rd descriptors in a subsection, to maintain migration
  174. * compatibility with previous versions of the device that only
  175. * supported a single descriptor.
  176. */
  177. static bool imx_eth_is_multi_tx_ring(void *opaque)
  178. {
  179. IMXFECState *s = IMX_FEC(opaque);
  180. return s->tx_ring_num > 1;
  181. }
  182. static const VMStateDescription vmstate_imx_eth_txdescs = {
  183. .name = "imx.fec/txdescs",
  184. .version_id = 1,
  185. .minimum_version_id = 1,
  186. .needed = imx_eth_is_multi_tx_ring,
  187. .fields = (const VMStateField[]) {
  188. VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
  189. VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
  190. VMSTATE_END_OF_LIST()
  191. }
  192. };
  193. static const VMStateDescription vmstate_imx_eth = {
  194. .name = TYPE_IMX_FEC,
  195. .version_id = 3,
  196. .minimum_version_id = 3,
  197. .fields = (const VMStateField[]) {
  198. VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
  199. VMSTATE_UINT32(rx_descriptor, IMXFECState),
  200. VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
  201. VMSTATE_END_OF_LIST()
  202. },
  203. .subsections = (const VMStateDescription * const []) {
  204. &vmstate_imx_eth_txdescs,
  205. NULL
  206. },
  207. };
  208. static void imx_eth_update(IMXFECState *s);
  209. /*
  210. * The MII phy could raise a GPIO to the processor which in turn
  211. * could be handled as an interrpt by the OS.
  212. * For now we don't handle any GPIO/interrupt line, so the OS will
  213. * have to poll for the PHY status.
  214. */
  215. static void imx_phy_update_irq(void *opaque, int n, int level)
  216. {
  217. imx_eth_update(opaque);
  218. }
  219. static void imx_eth_set_link(NetClientState *nc)
  220. {
  221. lan9118_phy_update_link(&IMX_FEC(qemu_get_nic_opaque(nc))->mii,
  222. nc->link_down);
  223. }
  224. static uint32_t imx_phy_read(IMXFECState *s, int reg)
  225. {
  226. uint32_t phy = reg / 32;
  227. if (!s->phy_connected) {
  228. return 0xffff;
  229. }
  230. if (phy != s->phy_num) {
  231. if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
  232. s = s->phy_consumer;
  233. } else {
  234. trace_imx_phy_read_num(phy, s->phy_num);
  235. return 0xffff;
  236. }
  237. }
  238. reg %= 32;
  239. return lan9118_phy_read(&s->mii, reg);
  240. }
  241. static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
  242. {
  243. uint32_t phy = reg / 32;
  244. if (!s->phy_connected) {
  245. return;
  246. }
  247. if (phy != s->phy_num) {
  248. if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
  249. s = s->phy_consumer;
  250. } else {
  251. trace_imx_phy_write_num(phy, s->phy_num);
  252. return;
  253. }
  254. }
  255. reg %= 32;
  256. lan9118_phy_write(&s->mii, reg, val);
  257. }
  258. static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  259. {
  260. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
  261. MEMTXATTRS_UNSPECIFIED);
  262. trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
  263. }
  264. static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  265. {
  266. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
  267. MEMTXATTRS_UNSPECIFIED);
  268. }
  269. static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  270. {
  271. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
  272. MEMTXATTRS_UNSPECIFIED);
  273. trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
  274. bd->option, bd->status);
  275. }
  276. static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  277. {
  278. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
  279. MEMTXATTRS_UNSPECIFIED);
  280. }
  281. static void imx_eth_update(IMXFECState *s)
  282. {
  283. /*
  284. * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
  285. * interrupts swapped. This worked with older versions of Linux (4.14
  286. * and older) since Linux associated both interrupt lines with Ethernet
  287. * MAC interrupts. Specifically,
  288. * - Linux 4.15 and later have separate interrupt handlers for the MAC and
  289. * timer interrupts. Those versions of Linux fail with versions of QEMU
  290. * with swapped interrupt assignments.
  291. * - In linux 4.14, both interrupt lines were registered with the Ethernet
  292. * MAC interrupt handler. As a result, all versions of qemu happen to
  293. * work, though that is accidental.
  294. * - In Linux 4.9 and older, the timer interrupt was registered directly
  295. * with the Ethernet MAC interrupt handler. The MAC interrupt was
  296. * redirected to a GPIO interrupt to work around erratum ERR006687.
  297. * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
  298. * interrupt never fired since IOMUX is currently not supported in qemu.
  299. * Linux instead received MAC interrupts on the timer interrupt.
  300. * As a result, qemu versions with the swapped interrupt assignment work,
  301. * albeit accidentally, but qemu versions with the correct interrupt
  302. * assignment fail.
  303. *
  304. * To ensure that all versions of Linux work, generate ENET_INT_MAC
  305. * interrupts on both interrupt lines. This should be changed if and when
  306. * qemu supports IOMUX.
  307. */
  308. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
  309. (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
  310. qemu_set_irq(s->irq[1], 1);
  311. } else {
  312. qemu_set_irq(s->irq[1], 0);
  313. }
  314. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
  315. qemu_set_irq(s->irq[0], 1);
  316. } else {
  317. qemu_set_irq(s->irq[0], 0);
  318. }
  319. }
  320. static void imx_fec_do_tx(IMXFECState *s)
  321. {
  322. int frame_size = 0, descnt = 0;
  323. uint8_t *ptr = s->frame;
  324. uint32_t addr = s->tx_descriptor[0];
  325. while (descnt++ < IMX_MAX_DESC) {
  326. IMXFECBufDesc bd;
  327. int len;
  328. imx_fec_read_bd(&bd, addr);
  329. if ((bd.flags & ENET_BD_R) == 0) {
  330. /* Run out of descriptors to transmit. */
  331. trace_imx_eth_tx_bd_busy();
  332. break;
  333. }
  334. len = bd.length;
  335. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  336. len = ENET_MAX_FRAME_SIZE - frame_size;
  337. s->regs[ENET_EIR] |= ENET_INT_BABT;
  338. }
  339. dma_memory_read(&address_space_memory, bd.data, ptr, len,
  340. MEMTXATTRS_UNSPECIFIED);
  341. ptr += len;
  342. frame_size += len;
  343. if (bd.flags & ENET_BD_L) {
  344. /* Last buffer in frame. */
  345. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  346. ptr = s->frame;
  347. frame_size = 0;
  348. s->regs[ENET_EIR] |= ENET_INT_TXF;
  349. }
  350. s->regs[ENET_EIR] |= ENET_INT_TXB;
  351. bd.flags &= ~ENET_BD_R;
  352. /* Write back the modified descriptor. */
  353. imx_fec_write_bd(&bd, addr);
  354. /* Advance to the next descriptor. */
  355. if ((bd.flags & ENET_BD_W) != 0) {
  356. addr = s->regs[ENET_TDSR];
  357. } else {
  358. addr += sizeof(bd);
  359. }
  360. }
  361. s->tx_descriptor[0] = addr;
  362. imx_eth_update(s);
  363. }
  364. static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
  365. {
  366. int frame_size = 0, descnt = 0;
  367. uint8_t *ptr = s->frame;
  368. uint32_t addr, int_txb, int_txf, tdsr;
  369. size_t ring;
  370. switch (index) {
  371. case ENET_TDAR:
  372. ring = 0;
  373. int_txb = ENET_INT_TXB;
  374. int_txf = ENET_INT_TXF;
  375. tdsr = ENET_TDSR;
  376. break;
  377. case ENET_TDAR1:
  378. ring = 1;
  379. int_txb = ENET_INT_TXB1;
  380. int_txf = ENET_INT_TXF1;
  381. tdsr = ENET_TDSR1;
  382. break;
  383. case ENET_TDAR2:
  384. ring = 2;
  385. int_txb = ENET_INT_TXB2;
  386. int_txf = ENET_INT_TXF2;
  387. tdsr = ENET_TDSR2;
  388. break;
  389. default:
  390. qemu_log_mask(LOG_GUEST_ERROR,
  391. "%s: bogus value for index %x\n",
  392. __func__, index);
  393. abort();
  394. break;
  395. }
  396. addr = s->tx_descriptor[ring];
  397. while (descnt++ < IMX_MAX_DESC) {
  398. IMXENETBufDesc bd;
  399. int len;
  400. imx_enet_read_bd(&bd, addr);
  401. if ((bd.flags & ENET_BD_R) == 0) {
  402. /* Run out of descriptors to transmit. */
  403. trace_imx_eth_tx_bd_busy();
  404. break;
  405. }
  406. len = bd.length;
  407. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  408. len = ENET_MAX_FRAME_SIZE - frame_size;
  409. s->regs[ENET_EIR] |= ENET_INT_BABT;
  410. }
  411. dma_memory_read(&address_space_memory, bd.data, ptr, len,
  412. MEMTXATTRS_UNSPECIFIED);
  413. ptr += len;
  414. frame_size += len;
  415. if (bd.flags & ENET_BD_L) {
  416. int csum = 0;
  417. if (bd.option & ENET_BD_PINS) {
  418. csum |= (CSUM_TCP | CSUM_UDP);
  419. }
  420. if (bd.option & ENET_BD_IINS) {
  421. csum |= CSUM_IP;
  422. }
  423. if (csum) {
  424. net_checksum_calculate(s->frame, frame_size, csum);
  425. }
  426. /* Last buffer in frame. */
  427. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  428. ptr = s->frame;
  429. frame_size = 0;
  430. if (bd.option & ENET_BD_TX_INT) {
  431. s->regs[ENET_EIR] |= int_txf;
  432. }
  433. /* Indicate that we've updated the last buffer descriptor. */
  434. bd.last_buffer = ENET_BD_BDU;
  435. }
  436. if (bd.option & ENET_BD_TX_INT) {
  437. s->regs[ENET_EIR] |= int_txb;
  438. }
  439. bd.flags &= ~ENET_BD_R;
  440. /* Write back the modified descriptor. */
  441. imx_enet_write_bd(&bd, addr);
  442. /* Advance to the next descriptor. */
  443. if ((bd.flags & ENET_BD_W) != 0) {
  444. addr = s->regs[tdsr];
  445. } else {
  446. addr += sizeof(bd);
  447. }
  448. }
  449. s->tx_descriptor[ring] = addr;
  450. imx_eth_update(s);
  451. }
  452. static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
  453. {
  454. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  455. imx_enet_do_tx(s, index);
  456. } else {
  457. imx_fec_do_tx(s);
  458. }
  459. }
  460. static void imx_eth_enable_rx(IMXFECState *s, bool flush)
  461. {
  462. IMXFECBufDesc bd;
  463. imx_fec_read_bd(&bd, s->rx_descriptor);
  464. s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
  465. if (!s->regs[ENET_RDAR]) {
  466. trace_imx_eth_rx_bd_full();
  467. } else if (flush) {
  468. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  469. }
  470. }
  471. static void imx_eth_reset(DeviceState *d)
  472. {
  473. IMXFECState *s = IMX_FEC(d);
  474. /* Reset the Device */
  475. memset(s->regs, 0, sizeof(s->regs));
  476. s->regs[ENET_ECR] = 0xf0000000;
  477. s->regs[ENET_MIBC] = 0xc0000000;
  478. s->regs[ENET_RCR] = 0x05ee0001;
  479. s->regs[ENET_OPD] = 0x00010000;
  480. s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
  481. | (s->conf.macaddr.a[1] << 16)
  482. | (s->conf.macaddr.a[2] << 8)
  483. | s->conf.macaddr.a[3];
  484. s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
  485. | (s->conf.macaddr.a[5] << 16)
  486. | 0x8808;
  487. if (s->is_fec) {
  488. s->regs[ENET_FRBR] = 0x00000600;
  489. s->regs[ENET_FRSR] = 0x00000500;
  490. s->regs[ENET_MIIGSK_ENR] = 0x00000006;
  491. } else {
  492. s->regs[ENET_RAEM] = 0x00000004;
  493. s->regs[ENET_RAFL] = 0x00000004;
  494. s->regs[ENET_TAEM] = 0x00000004;
  495. s->regs[ENET_TAFL] = 0x00000008;
  496. s->regs[ENET_TIPG] = 0x0000000c;
  497. s->regs[ENET_FTRL] = 0x000007ff;
  498. s->regs[ENET_ATPER] = 0x3b9aca00;
  499. }
  500. s->rx_descriptor = 0;
  501. memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
  502. }
  503. static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
  504. {
  505. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
  506. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  507. return 0;
  508. }
  509. static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
  510. {
  511. switch (index) {
  512. case ENET_FRBR:
  513. case ENET_FRSR:
  514. case ENET_MIIGSK_CFGR:
  515. case ENET_MIIGSK_ENR:
  516. return s->regs[index];
  517. default:
  518. return imx_default_read(s, index);
  519. }
  520. }
  521. static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
  522. {
  523. switch (index) {
  524. case ENET_RSFL:
  525. case ENET_RSEM:
  526. case ENET_RAEM:
  527. case ENET_RAFL:
  528. case ENET_TSEM:
  529. case ENET_TAEM:
  530. case ENET_TAFL:
  531. case ENET_TIPG:
  532. case ENET_FTRL:
  533. case ENET_TACC:
  534. case ENET_RACC:
  535. case ENET_ATCR:
  536. case ENET_ATVR:
  537. case ENET_ATOFF:
  538. case ENET_ATPER:
  539. case ENET_ATCOR:
  540. case ENET_ATINC:
  541. case ENET_ATSTMP:
  542. case ENET_TGSR:
  543. case ENET_TCSR0:
  544. case ENET_TCCR0:
  545. case ENET_TCSR1:
  546. case ENET_TCCR1:
  547. case ENET_TCSR2:
  548. case ENET_TCCR2:
  549. case ENET_TCSR3:
  550. case ENET_TCCR3:
  551. return s->regs[index];
  552. default:
  553. return imx_default_read(s, index);
  554. }
  555. }
  556. static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
  557. {
  558. uint32_t value = 0;
  559. IMXFECState *s = IMX_FEC(opaque);
  560. uint32_t index = offset >> 2;
  561. switch (index) {
  562. case ENET_EIR:
  563. case ENET_EIMR:
  564. case ENET_RDAR:
  565. case ENET_TDAR:
  566. case ENET_ECR:
  567. case ENET_MMFR:
  568. case ENET_MSCR:
  569. case ENET_MIBC:
  570. case ENET_RCR:
  571. case ENET_TCR:
  572. case ENET_PALR:
  573. case ENET_PAUR:
  574. case ENET_OPD:
  575. case ENET_IAUR:
  576. case ENET_IALR:
  577. case ENET_GAUR:
  578. case ENET_GALR:
  579. case ENET_TFWR:
  580. case ENET_RDSR:
  581. case ENET_TDSR:
  582. case ENET_MRBR:
  583. value = s->regs[index];
  584. break;
  585. default:
  586. if (s->is_fec) {
  587. value = imx_fec_read(s, index);
  588. } else {
  589. value = imx_enet_read(s, index);
  590. }
  591. break;
  592. }
  593. trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
  594. return value;
  595. }
  596. static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
  597. {
  598. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
  599. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  600. return;
  601. }
  602. static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
  603. {
  604. switch (index) {
  605. case ENET_FRBR:
  606. /* FRBR is read only */
  607. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
  608. TYPE_IMX_FEC, __func__);
  609. break;
  610. case ENET_FRSR:
  611. s->regs[index] = (value & 0x000003fc) | 0x00000400;
  612. break;
  613. case ENET_MIIGSK_CFGR:
  614. s->regs[index] = value & 0x00000053;
  615. break;
  616. case ENET_MIIGSK_ENR:
  617. s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
  618. break;
  619. default:
  620. imx_default_write(s, index, value);
  621. break;
  622. }
  623. }
  624. static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
  625. {
  626. switch (index) {
  627. case ENET_RSFL:
  628. case ENET_RSEM:
  629. case ENET_RAEM:
  630. case ENET_RAFL:
  631. case ENET_TSEM:
  632. case ENET_TAEM:
  633. case ENET_TAFL:
  634. s->regs[index] = value & 0x000001ff;
  635. break;
  636. case ENET_TIPG:
  637. s->regs[index] = value & 0x0000001f;
  638. break;
  639. case ENET_FTRL:
  640. s->regs[index] = value & 0x00003fff;
  641. break;
  642. case ENET_TACC:
  643. s->regs[index] = value & 0x00000019;
  644. break;
  645. case ENET_RACC:
  646. s->regs[index] = value & 0x000000C7;
  647. break;
  648. case ENET_ATCR:
  649. s->regs[index] = value & 0x00002a9d;
  650. break;
  651. case ENET_ATVR:
  652. case ENET_ATOFF:
  653. case ENET_ATPER:
  654. s->regs[index] = value;
  655. break;
  656. case ENET_ATSTMP:
  657. /* ATSTMP is read only */
  658. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
  659. TYPE_IMX_FEC, __func__);
  660. break;
  661. case ENET_ATCOR:
  662. s->regs[index] = value & 0x7fffffff;
  663. break;
  664. case ENET_ATINC:
  665. s->regs[index] = value & 0x00007f7f;
  666. break;
  667. case ENET_TGSR:
  668. /* implement clear timer flag */
  669. s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
  670. break;
  671. case ENET_TCSR0:
  672. case ENET_TCSR1:
  673. case ENET_TCSR2:
  674. case ENET_TCSR3:
  675. s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
  676. s->regs[index] &= ~0x0000007d; /* writable fields */
  677. s->regs[index] |= (value & 0x0000007d);
  678. break;
  679. case ENET_TCCR0:
  680. case ENET_TCCR1:
  681. case ENET_TCCR2:
  682. case ENET_TCCR3:
  683. s->regs[index] = value;
  684. break;
  685. default:
  686. imx_default_write(s, index, value);
  687. break;
  688. }
  689. }
  690. static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
  691. unsigned size)
  692. {
  693. IMXFECState *s = IMX_FEC(opaque);
  694. const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
  695. uint32_t index = offset >> 2;
  696. trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
  697. switch (index) {
  698. case ENET_EIR:
  699. s->regs[index] &= ~value;
  700. break;
  701. case ENET_EIMR:
  702. s->regs[index] = value;
  703. break;
  704. case ENET_RDAR:
  705. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  706. if (!s->regs[index]) {
  707. imx_eth_enable_rx(s, true);
  708. }
  709. } else {
  710. s->regs[index] = 0;
  711. }
  712. break;
  713. case ENET_TDAR1:
  714. case ENET_TDAR2:
  715. if (unlikely(single_tx_ring)) {
  716. qemu_log_mask(LOG_GUEST_ERROR,
  717. "[%s]%s: trying to access TDAR2 or TDAR1\n",
  718. TYPE_IMX_FEC, __func__);
  719. return;
  720. }
  721. /* fall through */
  722. case ENET_TDAR:
  723. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  724. s->regs[index] = ENET_TDAR_TDAR;
  725. imx_eth_do_tx(s, index);
  726. }
  727. s->regs[index] = 0;
  728. break;
  729. case ENET_ECR:
  730. if (value & ENET_ECR_RESET) {
  731. return imx_eth_reset(DEVICE(s));
  732. }
  733. s->regs[index] = value;
  734. if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
  735. s->regs[ENET_RDAR] = 0;
  736. s->rx_descriptor = s->regs[ENET_RDSR];
  737. s->regs[ENET_TDAR] = 0;
  738. s->regs[ENET_TDAR1] = 0;
  739. s->regs[ENET_TDAR2] = 0;
  740. s->tx_descriptor[0] = s->regs[ENET_TDSR];
  741. s->tx_descriptor[1] = s->regs[ENET_TDSR1];
  742. s->tx_descriptor[2] = s->regs[ENET_TDSR2];
  743. }
  744. break;
  745. case ENET_MMFR:
  746. s->regs[index] = value;
  747. if (extract32(value, 29, 1)) {
  748. /* This is a read operation */
  749. s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
  750. imx_phy_read(s,
  751. extract32(value,
  752. 18, 10)));
  753. } else {
  754. /* This is a write operation */
  755. imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
  756. }
  757. /* raise the interrupt as the PHY operation is done */
  758. s->regs[ENET_EIR] |= ENET_INT_MII;
  759. break;
  760. case ENET_MSCR:
  761. s->regs[index] = value & 0xfe;
  762. break;
  763. case ENET_MIBC:
  764. /* TODO: Implement MIB. */
  765. s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
  766. break;
  767. case ENET_RCR:
  768. s->regs[index] = value & 0x07ff003f;
  769. /* TODO: Implement LOOP mode. */
  770. break;
  771. case ENET_TCR:
  772. /* We transmit immediately, so raise GRA immediately. */
  773. s->regs[index] = value;
  774. if (value & 1) {
  775. s->regs[ENET_EIR] |= ENET_INT_GRA;
  776. }
  777. break;
  778. case ENET_PALR:
  779. s->regs[index] = value;
  780. s->conf.macaddr.a[0] = value >> 24;
  781. s->conf.macaddr.a[1] = value >> 16;
  782. s->conf.macaddr.a[2] = value >> 8;
  783. s->conf.macaddr.a[3] = value;
  784. break;
  785. case ENET_PAUR:
  786. s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
  787. s->conf.macaddr.a[4] = value >> 24;
  788. s->conf.macaddr.a[5] = value >> 16;
  789. break;
  790. case ENET_OPD:
  791. s->regs[index] = (value & 0x0000ffff) | 0x00010000;
  792. break;
  793. case ENET_IAUR:
  794. case ENET_IALR:
  795. case ENET_GAUR:
  796. case ENET_GALR:
  797. /* TODO: implement MAC hash filtering. */
  798. break;
  799. case ENET_TFWR:
  800. if (s->is_fec) {
  801. s->regs[index] = value & 0x3;
  802. } else {
  803. s->regs[index] = value & 0x13f;
  804. }
  805. break;
  806. case ENET_RDSR:
  807. if (s->is_fec) {
  808. s->regs[index] = value & ~3;
  809. } else {
  810. s->regs[index] = value & ~7;
  811. }
  812. s->rx_descriptor = s->regs[index];
  813. break;
  814. case ENET_TDSR:
  815. if (s->is_fec) {
  816. s->regs[index] = value & ~3;
  817. } else {
  818. s->regs[index] = value & ~7;
  819. }
  820. s->tx_descriptor[0] = s->regs[index];
  821. break;
  822. case ENET_TDSR1:
  823. if (unlikely(single_tx_ring)) {
  824. qemu_log_mask(LOG_GUEST_ERROR,
  825. "[%s]%s: trying to access TDSR1\n",
  826. TYPE_IMX_FEC, __func__);
  827. return;
  828. }
  829. s->regs[index] = value & ~7;
  830. s->tx_descriptor[1] = s->regs[index];
  831. break;
  832. case ENET_TDSR2:
  833. if (unlikely(single_tx_ring)) {
  834. qemu_log_mask(LOG_GUEST_ERROR,
  835. "[%s]%s: trying to access TDSR2\n",
  836. TYPE_IMX_FEC, __func__);
  837. return;
  838. }
  839. s->regs[index] = value & ~7;
  840. s->tx_descriptor[2] = s->regs[index];
  841. break;
  842. case ENET_MRBR:
  843. s->regs[index] = value & 0x00003ff0;
  844. break;
  845. default:
  846. if (s->is_fec) {
  847. imx_fec_write(s, index, value);
  848. } else {
  849. imx_enet_write(s, index, value);
  850. }
  851. return;
  852. }
  853. imx_eth_update(s);
  854. }
  855. static bool imx_eth_can_receive(NetClientState *nc)
  856. {
  857. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  858. return !!s->regs[ENET_RDAR];
  859. }
  860. static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
  861. size_t len)
  862. {
  863. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  864. IMXFECBufDesc bd;
  865. uint32_t flags = 0;
  866. uint32_t addr;
  867. uint32_t crc;
  868. uint32_t buf_addr;
  869. uint8_t *crc_ptr;
  870. unsigned int buf_len;
  871. size_t size = len;
  872. trace_imx_fec_receive(size);
  873. if (!s->regs[ENET_RDAR]) {
  874. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  875. TYPE_IMX_FEC, __func__);
  876. return 0;
  877. }
  878. crc = cpu_to_be32(crc32(~0, buf, size));
  879. /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
  880. size += 4;
  881. crc_ptr = (uint8_t *) &crc;
  882. /* Huge frames are truncated. */
  883. if (size > ENET_MAX_FRAME_SIZE) {
  884. size = ENET_MAX_FRAME_SIZE;
  885. flags |= ENET_BD_TR | ENET_BD_LG;
  886. }
  887. /* Frames larger than the user limit just set error flags. */
  888. if (size > (s->regs[ENET_RCR] >> 16)) {
  889. flags |= ENET_BD_LG;
  890. }
  891. addr = s->rx_descriptor;
  892. while (size > 0) {
  893. imx_fec_read_bd(&bd, addr);
  894. if ((bd.flags & ENET_BD_E) == 0) {
  895. /* No descriptors available. Bail out. */
  896. /*
  897. * FIXME: This is wrong. We should probably either
  898. * save the remainder for when more RX buffers are
  899. * available, or flag an error.
  900. */
  901. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  902. TYPE_IMX_FEC, __func__);
  903. break;
  904. }
  905. buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
  906. bd.length = buf_len;
  907. size -= buf_len;
  908. trace_imx_fec_receive_len(addr, bd.length);
  909. /* The last 4 bytes are the CRC. */
  910. if (size < 4) {
  911. buf_len += size - 4;
  912. }
  913. buf_addr = bd.data;
  914. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
  915. MEMTXATTRS_UNSPECIFIED);
  916. buf += buf_len;
  917. if (size < 4) {
  918. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  919. crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
  920. crc_ptr += 4 - size;
  921. }
  922. bd.flags &= ~ENET_BD_E;
  923. if (size == 0) {
  924. /* Last buffer in frame. */
  925. bd.flags |= flags | ENET_BD_L;
  926. trace_imx_fec_receive_last(bd.flags);
  927. s->regs[ENET_EIR] |= ENET_INT_RXF;
  928. } else {
  929. s->regs[ENET_EIR] |= ENET_INT_RXB;
  930. }
  931. imx_fec_write_bd(&bd, addr);
  932. /* Advance to the next descriptor. */
  933. if ((bd.flags & ENET_BD_W) != 0) {
  934. addr = s->regs[ENET_RDSR];
  935. } else {
  936. addr += sizeof(bd);
  937. }
  938. }
  939. s->rx_descriptor = addr;
  940. imx_eth_enable_rx(s, false);
  941. imx_eth_update(s);
  942. return len;
  943. }
  944. static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
  945. size_t len)
  946. {
  947. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  948. IMXENETBufDesc bd;
  949. uint32_t flags = 0;
  950. uint32_t addr;
  951. uint32_t crc;
  952. uint32_t buf_addr;
  953. uint8_t *crc_ptr;
  954. unsigned int buf_len;
  955. size_t size = len;
  956. bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
  957. trace_imx_enet_receive(size);
  958. if (!s->regs[ENET_RDAR]) {
  959. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  960. TYPE_IMX_FEC, __func__);
  961. return 0;
  962. }
  963. crc = cpu_to_be32(crc32(~0, buf, size));
  964. /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
  965. size += 4;
  966. crc_ptr = (uint8_t *) &crc;
  967. if (shift16) {
  968. size += 2;
  969. }
  970. /* Huge frames are truncated. */
  971. if (size > s->regs[ENET_FTRL]) {
  972. size = s->regs[ENET_FTRL];
  973. flags |= ENET_BD_TR | ENET_BD_LG;
  974. }
  975. /* Frames larger than the user limit just set error flags. */
  976. if (size > (s->regs[ENET_RCR] >> 16)) {
  977. flags |= ENET_BD_LG;
  978. }
  979. addr = s->rx_descriptor;
  980. while (size > 0) {
  981. imx_enet_read_bd(&bd, addr);
  982. if ((bd.flags & ENET_BD_E) == 0) {
  983. /* No descriptors available. Bail out. */
  984. /*
  985. * FIXME: This is wrong. We should probably either
  986. * save the remainder for when more RX buffers are
  987. * available, or flag an error.
  988. */
  989. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  990. TYPE_IMX_FEC, __func__);
  991. break;
  992. }
  993. buf_len = MIN(size, s->regs[ENET_MRBR]);
  994. bd.length = buf_len;
  995. size -= buf_len;
  996. trace_imx_enet_receive_len(addr, bd.length);
  997. /* The last 4 bytes are the CRC. */
  998. if (size < 4) {
  999. buf_len += size - 4;
  1000. }
  1001. buf_addr = bd.data;
  1002. if (shift16) {
  1003. /*
  1004. * If SHIFT16 bit of ENETx_RACC register is set we need to
  1005. * align the payload to 4-byte boundary.
  1006. */
  1007. const uint8_t zeros[2] = { 0 };
  1008. dma_memory_write(&address_space_memory, buf_addr, zeros,
  1009. sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
  1010. buf_addr += sizeof(zeros);
  1011. buf_len -= sizeof(zeros);
  1012. /* We only do this once per Ethernet frame */
  1013. shift16 = false;
  1014. }
  1015. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
  1016. MEMTXATTRS_UNSPECIFIED);
  1017. buf += buf_len;
  1018. if (size < 4) {
  1019. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  1020. crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
  1021. crc_ptr += 4 - size;
  1022. }
  1023. bd.flags &= ~ENET_BD_E;
  1024. if (size == 0) {
  1025. /* Last buffer in frame. */
  1026. bd.flags |= flags | ENET_BD_L;
  1027. trace_imx_enet_receive_last(bd.flags);
  1028. /* Indicate that we've updated the last buffer descriptor. */
  1029. bd.last_buffer = ENET_BD_BDU;
  1030. if (bd.option & ENET_BD_RX_INT) {
  1031. s->regs[ENET_EIR] |= ENET_INT_RXF;
  1032. }
  1033. } else {
  1034. if (bd.option & ENET_BD_RX_INT) {
  1035. s->regs[ENET_EIR] |= ENET_INT_RXB;
  1036. }
  1037. }
  1038. imx_enet_write_bd(&bd, addr);
  1039. /* Advance to the next descriptor. */
  1040. if ((bd.flags & ENET_BD_W) != 0) {
  1041. addr = s->regs[ENET_RDSR];
  1042. } else {
  1043. addr += sizeof(bd);
  1044. }
  1045. }
  1046. s->rx_descriptor = addr;
  1047. imx_eth_enable_rx(s, false);
  1048. imx_eth_update(s);
  1049. return len;
  1050. }
  1051. static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
  1052. size_t len)
  1053. {
  1054. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1055. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  1056. return imx_enet_receive(nc, buf, len);
  1057. } else {
  1058. return imx_fec_receive(nc, buf, len);
  1059. }
  1060. }
  1061. static const MemoryRegionOps imx_eth_ops = {
  1062. .read = imx_eth_read,
  1063. .write = imx_eth_write,
  1064. .valid.min_access_size = 4,
  1065. .valid.max_access_size = 4,
  1066. .endianness = DEVICE_NATIVE_ENDIAN,
  1067. };
  1068. static void imx_eth_cleanup(NetClientState *nc)
  1069. {
  1070. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1071. s->nic = NULL;
  1072. }
  1073. static NetClientInfo imx_eth_net_info = {
  1074. .type = NET_CLIENT_DRIVER_NIC,
  1075. .size = sizeof(NICState),
  1076. .can_receive = imx_eth_can_receive,
  1077. .receive = imx_eth_receive,
  1078. .cleanup = imx_eth_cleanup,
  1079. .link_status_changed = imx_eth_set_link,
  1080. };
  1081. static void imx_eth_realize(DeviceState *dev, Error **errp)
  1082. {
  1083. IMXFECState *s = IMX_FEC(dev);
  1084. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  1085. memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
  1086. TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
  1087. sysbus_init_mmio(sbd, &s->iomem);
  1088. sysbus_init_irq(sbd, &s->irq[0]);
  1089. sysbus_init_irq(sbd, &s->irq[1]);
  1090. qemu_init_irq(&s->mii_irq, imx_phy_update_irq, s, 0);
  1091. object_initialize_child(OBJECT(s), "mii", &s->mii, TYPE_LAN9118_PHY);
  1092. if (!sysbus_realize_and_unref(SYS_BUS_DEVICE(&s->mii), errp)) {
  1093. return;
  1094. }
  1095. qdev_connect_gpio_out(DEVICE(&s->mii), 0, &s->mii_irq);
  1096. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  1097. s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
  1098. object_get_typename(OBJECT(dev)),
  1099. dev->id, &dev->mem_reentrancy_guard, s);
  1100. qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
  1101. }
  1102. static const Property imx_eth_properties[] = {
  1103. DEFINE_NIC_PROPERTIES(IMXFECState, conf),
  1104. DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
  1105. DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
  1106. DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true),
  1107. DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC,
  1108. IMXFECState *),
  1109. };
  1110. static void imx_eth_class_init(ObjectClass *klass, void *data)
  1111. {
  1112. DeviceClass *dc = DEVICE_CLASS(klass);
  1113. dc->vmsd = &vmstate_imx_eth;
  1114. device_class_set_legacy_reset(dc, imx_eth_reset);
  1115. device_class_set_props(dc, imx_eth_properties);
  1116. dc->realize = imx_eth_realize;
  1117. dc->desc = "i.MX FEC/ENET Ethernet Controller";
  1118. }
  1119. static void imx_fec_init(Object *obj)
  1120. {
  1121. IMXFECState *s = IMX_FEC(obj);
  1122. s->is_fec = true;
  1123. }
  1124. static void imx_enet_init(Object *obj)
  1125. {
  1126. IMXFECState *s = IMX_FEC(obj);
  1127. s->is_fec = false;
  1128. }
  1129. static const TypeInfo imx_fec_info = {
  1130. .name = TYPE_IMX_FEC,
  1131. .parent = TYPE_SYS_BUS_DEVICE,
  1132. .instance_size = sizeof(IMXFECState),
  1133. .instance_init = imx_fec_init,
  1134. .class_init = imx_eth_class_init,
  1135. };
  1136. static const TypeInfo imx_enet_info = {
  1137. .name = TYPE_IMX_ENET,
  1138. .parent = TYPE_IMX_FEC,
  1139. .instance_init = imx_enet_init,
  1140. };
  1141. static void imx_eth_register_types(void)
  1142. {
  1143. type_register_static(&imx_fec_info);
  1144. type_register_static(&imx_enet_info);
  1145. }
  1146. type_init(imx_eth_register_types)