esp-pci.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. /*
  2. * QEMU ESP/NCR53C9x emulation
  3. *
  4. * Copyright (c) 2005-2006 Fabrice Bellard
  5. * Copyright (c) 2012 Herve Poussineau
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "qemu/osdep.h"
  26. #include "hw/pci/pci_device.h"
  27. #include "hw/irq.h"
  28. #include "hw/nvram/eeprom93xx.h"
  29. #include "hw/scsi/esp.h"
  30. #include "migration/vmstate.h"
  31. #include "trace.h"
  32. #include "qapi/error.h"
  33. #include "qemu/log.h"
  34. #include "qemu/module.h"
  35. #include "qom/object.h"
  36. #define TYPE_AM53C974_DEVICE "am53c974"
  37. typedef struct PCIESPState PCIESPState;
  38. DECLARE_INSTANCE_CHECKER(PCIESPState, PCI_ESP,
  39. TYPE_AM53C974_DEVICE)
  40. #define DMA_CMD 0x0
  41. #define DMA_STC 0x1
  42. #define DMA_SPA 0x2
  43. #define DMA_WBC 0x3
  44. #define DMA_WAC 0x4
  45. #define DMA_STAT 0x5
  46. #define DMA_SMDLA 0x6
  47. #define DMA_WMAC 0x7
  48. #define DMA_CMD_MASK 0x03
  49. #define DMA_CMD_DIAG 0x04
  50. #define DMA_CMD_MDL 0x10
  51. #define DMA_CMD_INTE_P 0x20
  52. #define DMA_CMD_INTE_D 0x40
  53. #define DMA_CMD_DIR 0x80
  54. #define DMA_STAT_PWDN 0x01
  55. #define DMA_STAT_ERROR 0x02
  56. #define DMA_STAT_ABORT 0x04
  57. #define DMA_STAT_DONE 0x08
  58. #define DMA_STAT_SCSIINT 0x10
  59. #define DMA_STAT_BCMBLT 0x20
  60. #define SBAC_STATUS (1 << 24)
  61. struct PCIESPState {
  62. /*< private >*/
  63. PCIDevice parent_obj;
  64. /*< public >*/
  65. MemoryRegion io;
  66. uint32_t dma_regs[8];
  67. uint32_t sbac;
  68. ESPState esp;
  69. };
  70. static void esp_pci_update_irq(PCIESPState *pci)
  71. {
  72. int scsi_level = !!(pci->dma_regs[DMA_STAT] & DMA_STAT_SCSIINT);
  73. int dma_level = (pci->dma_regs[DMA_CMD] & DMA_CMD_INTE_D) ?
  74. !!(pci->dma_regs[DMA_STAT] & DMA_STAT_DONE) : 0;
  75. int level = scsi_level || dma_level;
  76. pci_set_irq(PCI_DEVICE(pci), level);
  77. }
  78. static void esp_irq_handler(void *opaque, int irq_num, int level)
  79. {
  80. PCIESPState *pci = PCI_ESP(opaque);
  81. if (level) {
  82. pci->dma_regs[DMA_STAT] |= DMA_STAT_SCSIINT;
  83. /*
  84. * If raising the ESP IRQ to indicate end of DMA transfer, set
  85. * DMA_STAT_DONE at the same time. In theory this should be done in
  86. * esp_pci_dma_memory_rw(), however there is a delay between setting
  87. * DMA_STAT_DONE and the ESP IRQ arriving which is visible to the
  88. * guest that can cause confusion e.g. Linux
  89. */
  90. if ((pci->dma_regs[DMA_CMD] & DMA_CMD_MASK) == 0x3 &&
  91. pci->dma_regs[DMA_WBC] == 0) {
  92. pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
  93. }
  94. } else {
  95. pci->dma_regs[DMA_STAT] &= ~DMA_STAT_SCSIINT;
  96. }
  97. esp_pci_update_irq(pci);
  98. }
  99. static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
  100. {
  101. ESPState *s = &pci->esp;
  102. trace_esp_pci_dma_idle(val);
  103. esp_dma_enable(s, 0, 0);
  104. }
  105. static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
  106. {
  107. trace_esp_pci_dma_blast(val);
  108. qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n");
  109. pci->dma_regs[DMA_STAT] |= DMA_STAT_BCMBLT;
  110. }
  111. static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
  112. {
  113. ESPState *s = &pci->esp;
  114. trace_esp_pci_dma_abort(val);
  115. if (s->current_req) {
  116. scsi_req_cancel(s->current_req);
  117. }
  118. }
  119. static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
  120. {
  121. ESPState *s = &pci->esp;
  122. trace_esp_pci_dma_start(val);
  123. pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC];
  124. pci->dma_regs[DMA_WAC] = pci->dma_regs[DMA_SPA];
  125. pci->dma_regs[DMA_WMAC] = pci->dma_regs[DMA_SMDLA];
  126. pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
  127. | DMA_STAT_DONE | DMA_STAT_ABORT
  128. | DMA_STAT_ERROR | DMA_STAT_PWDN);
  129. esp_dma_enable(s, 0, 1);
  130. }
  131. static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
  132. {
  133. trace_esp_pci_dma_write(saddr, pci->dma_regs[saddr], val);
  134. switch (saddr) {
  135. case DMA_CMD:
  136. pci->dma_regs[saddr] = val;
  137. switch (val & DMA_CMD_MASK) {
  138. case 0x0: /* IDLE */
  139. esp_pci_handle_idle(pci, val);
  140. break;
  141. case 0x1: /* BLAST */
  142. esp_pci_handle_blast(pci, val);
  143. break;
  144. case 0x2: /* ABORT */
  145. esp_pci_handle_abort(pci, val);
  146. break;
  147. case 0x3: /* START */
  148. esp_pci_handle_start(pci, val);
  149. break;
  150. default: /* can't happen */
  151. abort();
  152. }
  153. break;
  154. case DMA_STC:
  155. case DMA_SPA:
  156. case DMA_SMDLA:
  157. pci->dma_regs[saddr] = val;
  158. break;
  159. case DMA_STAT:
  160. if (pci->sbac & SBAC_STATUS) {
  161. /* clear some bits on write */
  162. uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE;
  163. pci->dma_regs[DMA_STAT] &= ~(val & mask);
  164. esp_pci_update_irq(pci);
  165. }
  166. break;
  167. default:
  168. trace_esp_pci_error_invalid_write_dma(val, saddr);
  169. return;
  170. }
  171. }
  172. static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
  173. {
  174. uint32_t val;
  175. val = pci->dma_regs[saddr];
  176. if (saddr == DMA_STAT) {
  177. if (!(pci->sbac & SBAC_STATUS)) {
  178. pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT |
  179. DMA_STAT_DONE);
  180. esp_pci_update_irq(pci);
  181. }
  182. }
  183. trace_esp_pci_dma_read(saddr, val);
  184. return val;
  185. }
  186. static void esp_pci_io_write(void *opaque, hwaddr addr,
  187. uint64_t val, unsigned int size)
  188. {
  189. PCIESPState *pci = opaque;
  190. ESPState *s = &pci->esp;
  191. if (size < 4 || addr & 3) {
  192. /* need to upgrade request: we only support 4-bytes accesses */
  193. uint32_t current = 0, mask;
  194. int shift;
  195. if (addr < 0x40) {
  196. current = s->wregs[addr >> 2];
  197. } else if (addr < 0x60) {
  198. current = pci->dma_regs[(addr - 0x40) >> 2];
  199. } else if (addr < 0x74) {
  200. current = pci->sbac;
  201. }
  202. shift = (4 - size) * 8;
  203. mask = (~(uint32_t)0 << shift) >> shift;
  204. shift = ((4 - (addr & 3)) & 3) * 8;
  205. val <<= shift;
  206. val |= current & ~(mask << shift);
  207. addr &= ~3;
  208. size = 4;
  209. }
  210. g_assert(size >= 4);
  211. if (addr < 0x40) {
  212. /* SCSI core reg */
  213. esp_reg_write(s, addr >> 2, val);
  214. } else if (addr < 0x60) {
  215. /* PCI DMA CCB */
  216. esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
  217. } else if (addr == 0x70) {
  218. /* DMA SCSI Bus and control */
  219. trace_esp_pci_sbac_write(pci->sbac, val);
  220. pci->sbac = val;
  221. } else {
  222. trace_esp_pci_error_invalid_write((int)addr);
  223. }
  224. }
  225. static uint64_t esp_pci_io_read(void *opaque, hwaddr addr,
  226. unsigned int size)
  227. {
  228. PCIESPState *pci = opaque;
  229. ESPState *s = &pci->esp;
  230. uint32_t ret;
  231. if (addr < 0x40) {
  232. /* SCSI core reg */
  233. ret = esp_reg_read(s, addr >> 2);
  234. } else if (addr < 0x60) {
  235. /* PCI DMA CCB */
  236. ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2);
  237. } else if (addr == 0x70) {
  238. /* DMA SCSI Bus and control */
  239. trace_esp_pci_sbac_read(pci->sbac);
  240. ret = pci->sbac;
  241. } else {
  242. /* Invalid region */
  243. trace_esp_pci_error_invalid_read((int)addr);
  244. ret = 0;
  245. }
  246. /* give only requested data */
  247. ret >>= (addr & 3) * 8;
  248. ret &= ~(~(uint64_t)0 << (8 * size));
  249. return ret;
  250. }
  251. static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
  252. DMADirection dir)
  253. {
  254. dma_addr_t addr;
  255. DMADirection expected_dir;
  256. if (pci->dma_regs[DMA_CMD] & DMA_CMD_DIR) {
  257. expected_dir = DMA_DIRECTION_FROM_DEVICE;
  258. } else {
  259. expected_dir = DMA_DIRECTION_TO_DEVICE;
  260. }
  261. if (dir != expected_dir) {
  262. trace_esp_pci_error_invalid_dma_direction();
  263. return;
  264. }
  265. if (pci->dma_regs[DMA_STAT] & DMA_CMD_MDL) {
  266. qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n");
  267. }
  268. addr = pci->dma_regs[DMA_WAC];
  269. if (pci->dma_regs[DMA_WBC] < len) {
  270. len = pci->dma_regs[DMA_WBC];
  271. }
  272. pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir, MEMTXATTRS_UNSPECIFIED);
  273. /* update status registers */
  274. pci->dma_regs[DMA_WBC] -= len;
  275. pci->dma_regs[DMA_WAC] += len;
  276. }
  277. static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len)
  278. {
  279. PCIESPState *pci = opaque;
  280. esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_TO_DEVICE);
  281. }
  282. static void esp_pci_dma_memory_write(void *opaque, uint8_t *buf, int len)
  283. {
  284. PCIESPState *pci = opaque;
  285. esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_FROM_DEVICE);
  286. }
  287. static const MemoryRegionOps esp_pci_io_ops = {
  288. .read = esp_pci_io_read,
  289. .write = esp_pci_io_write,
  290. .endianness = DEVICE_LITTLE_ENDIAN,
  291. .impl = {
  292. .min_access_size = 1,
  293. .max_access_size = 4,
  294. },
  295. };
  296. static void esp_pci_hard_reset(DeviceState *dev)
  297. {
  298. PCIESPState *pci = PCI_ESP(dev);
  299. ESPState *s = &pci->esp;
  300. esp_hard_reset(s);
  301. pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P
  302. | DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK);
  303. pci->dma_regs[DMA_WBC] &= ~0xffff;
  304. pci->dma_regs[DMA_WAC] = 0xffffffff;
  305. pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
  306. | DMA_STAT_DONE | DMA_STAT_ABORT
  307. | DMA_STAT_ERROR);
  308. pci->dma_regs[DMA_WMAC] = 0xfffffffd;
  309. }
  310. static const VMStateDescription vmstate_esp_pci_scsi = {
  311. .name = "pciespscsi",
  312. .version_id = 2,
  313. .minimum_version_id = 1,
  314. .pre_save = esp_pre_save,
  315. .fields = (const VMStateField[]) {
  316. VMSTATE_PCI_DEVICE(parent_obj, PCIESPState),
  317. VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)),
  318. VMSTATE_UINT8_V(esp.mig_version_id, PCIESPState, 2),
  319. VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState),
  320. VMSTATE_END_OF_LIST()
  321. }
  322. };
  323. static const struct SCSIBusInfo esp_pci_scsi_info = {
  324. .tcq = false,
  325. .max_target = ESP_MAX_DEVS,
  326. .max_lun = 7,
  327. .transfer_data = esp_transfer_data,
  328. .complete = esp_command_complete,
  329. .cancel = esp_request_cancelled,
  330. };
  331. static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
  332. {
  333. PCIESPState *pci = PCI_ESP(dev);
  334. DeviceState *d = DEVICE(dev);
  335. ESPState *s = &pci->esp;
  336. uint8_t *pci_conf;
  337. if (!qdev_realize(DEVICE(s), NULL, errp)) {
  338. return;
  339. }
  340. pci_conf = dev->config;
  341. /* Interrupt pin A */
  342. pci_conf[PCI_INTERRUPT_PIN] = 0x01;
  343. s->dma_memory_read = esp_pci_dma_memory_read;
  344. s->dma_memory_write = esp_pci_dma_memory_write;
  345. s->dma_opaque = pci;
  346. s->chip_id = TCHI_AM53C974;
  347. memory_region_init_io(&pci->io, OBJECT(pci), &esp_pci_io_ops, pci,
  348. "esp-io", 0x80);
  349. pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
  350. s->irq = qemu_allocate_irq(esp_irq_handler, pci, 0);
  351. scsi_bus_init(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info);
  352. }
  353. static void esp_pci_scsi_exit(PCIDevice *d)
  354. {
  355. PCIESPState *pci = PCI_ESP(d);
  356. ESPState *s = &pci->esp;
  357. qemu_free_irq(s->irq);
  358. }
  359. static void esp_pci_init(Object *obj)
  360. {
  361. PCIESPState *pci = PCI_ESP(obj);
  362. object_initialize_child(obj, "esp", &pci->esp, TYPE_ESP);
  363. }
  364. static void esp_pci_class_init(ObjectClass *klass, void *data)
  365. {
  366. DeviceClass *dc = DEVICE_CLASS(klass);
  367. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  368. k->realize = esp_pci_scsi_realize;
  369. k->exit = esp_pci_scsi_exit;
  370. k->vendor_id = PCI_VENDOR_ID_AMD;
  371. k->device_id = PCI_DEVICE_ID_AMD_SCSI;
  372. k->revision = 0x10;
  373. k->class_id = PCI_CLASS_STORAGE_SCSI;
  374. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  375. dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter";
  376. device_class_set_legacy_reset(dc, esp_pci_hard_reset);
  377. dc->vmsd = &vmstate_esp_pci_scsi;
  378. }
  379. static const TypeInfo esp_pci_info = {
  380. .name = TYPE_AM53C974_DEVICE,
  381. .parent = TYPE_PCI_DEVICE,
  382. .instance_init = esp_pci_init,
  383. .instance_size = sizeof(PCIESPState),
  384. .class_init = esp_pci_class_init,
  385. .interfaces = (InterfaceInfo[]) {
  386. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  387. { },
  388. },
  389. };
  390. struct DC390State {
  391. PCIESPState pci;
  392. eeprom_t *eeprom;
  393. };
  394. typedef struct DC390State DC390State;
  395. #define TYPE_DC390_DEVICE "dc390"
  396. DECLARE_INSTANCE_CHECKER(DC390State, DC390,
  397. TYPE_DC390_DEVICE)
  398. #define EE_ADAPT_SCSI_ID 64
  399. #define EE_MODE2 65
  400. #define EE_DELAY 66
  401. #define EE_TAG_CMD_NUM 67
  402. #define EE_ADAPT_OPTIONS 68
  403. #define EE_BOOT_SCSI_ID 69
  404. #define EE_BOOT_SCSI_LUN 70
  405. #define EE_CHKSUM1 126
  406. #define EE_CHKSUM2 127
  407. #define EE_ADAPT_OPTION_F6_F8_AT_BOOT 0x01
  408. #define EE_ADAPT_OPTION_BOOT_FROM_CDROM 0x02
  409. #define EE_ADAPT_OPTION_INT13 0x04
  410. #define EE_ADAPT_OPTION_SCAM_SUPPORT 0x08
  411. static uint32_t dc390_read_config(PCIDevice *dev, uint32_t addr, int l)
  412. {
  413. DC390State *pci = DC390(dev);
  414. uint32_t val;
  415. val = pci_default_read_config(dev, addr, l);
  416. if (addr == 0x00 && l == 1) {
  417. /* First byte of address space is AND-ed with EEPROM DO line */
  418. if (!eeprom93xx_read(pci->eeprom)) {
  419. val &= ~0xff;
  420. }
  421. }
  422. return val;
  423. }
  424. static void dc390_write_config(PCIDevice *dev,
  425. uint32_t addr, uint32_t val, int l)
  426. {
  427. DC390State *pci = DC390(dev);
  428. if (addr == 0x80) {
  429. /* EEPROM write */
  430. int eesk = val & 0x80 ? 1 : 0;
  431. int eedi = val & 0x40 ? 1 : 0;
  432. eeprom93xx_write(pci->eeprom, 1, eesk, eedi);
  433. } else if (addr == 0xc0) {
  434. /* EEPROM CS low */
  435. eeprom93xx_write(pci->eeprom, 0, 0, 0);
  436. } else {
  437. pci_default_write_config(dev, addr, val, l);
  438. }
  439. }
  440. static void dc390_scsi_realize(PCIDevice *dev, Error **errp)
  441. {
  442. DC390State *pci = DC390(dev);
  443. Error *err = NULL;
  444. uint8_t *contents;
  445. uint16_t chksum = 0;
  446. int i;
  447. /* init base class */
  448. esp_pci_scsi_realize(dev, &err);
  449. if (err) {
  450. error_propagate(errp, err);
  451. return;
  452. }
  453. /* EEPROM */
  454. pci->eeprom = eeprom93xx_new(DEVICE(dev), 64);
  455. /* set default eeprom values */
  456. contents = (uint8_t *)eeprom93xx_data(pci->eeprom);
  457. for (i = 0; i < 16; i++) {
  458. contents[i * 2] = 0x57;
  459. contents[i * 2 + 1] = 0x00;
  460. }
  461. contents[EE_ADAPT_SCSI_ID] = 7;
  462. contents[EE_MODE2] = 0x0f;
  463. contents[EE_TAG_CMD_NUM] = 0x04;
  464. contents[EE_ADAPT_OPTIONS] = EE_ADAPT_OPTION_F6_F8_AT_BOOT
  465. | EE_ADAPT_OPTION_BOOT_FROM_CDROM
  466. | EE_ADAPT_OPTION_INT13;
  467. /* update eeprom checksum */
  468. for (i = 0; i < EE_CHKSUM1; i += 2) {
  469. chksum += contents[i] + (((uint16_t)contents[i + 1]) << 8);
  470. }
  471. chksum = 0x1234 - chksum;
  472. contents[EE_CHKSUM1] = chksum & 0xff;
  473. contents[EE_CHKSUM2] = chksum >> 8;
  474. }
  475. static void dc390_class_init(ObjectClass *klass, void *data)
  476. {
  477. DeviceClass *dc = DEVICE_CLASS(klass);
  478. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  479. k->realize = dc390_scsi_realize;
  480. k->config_read = dc390_read_config;
  481. k->config_write = dc390_write_config;
  482. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  483. dc->desc = "Tekram DC-390 SCSI adapter";
  484. }
  485. static const TypeInfo dc390_info = {
  486. .name = TYPE_DC390_DEVICE,
  487. .parent = TYPE_AM53C974_DEVICE,
  488. .instance_size = sizeof(DC390State),
  489. .class_init = dc390_class_init,
  490. };
  491. static void esp_pci_register_types(void)
  492. {
  493. type_register_static(&esp_pci_info);
  494. type_register_static(&dc390_info);
  495. }
  496. type_init(esp_pci_register_types)