2
0

parallel.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. /*
  2. * QEMU Parallel PORT emulation
  3. *
  4. * Copyright (c) 2003-2005 Fabrice Bellard
  5. * Copyright (c) 2007 Marko Kohtala
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "hw.h"
  26. #include "qemu-char.h"
  27. #include "isa.h"
  28. #include "pc.h"
  29. #include "sysemu.h"
  30. //#define DEBUG_PARALLEL
  31. #ifdef DEBUG_PARALLEL
  32. #define pdebug(fmt, ...) printf("pp: " fmt, ## __VA_ARGS__)
  33. #else
  34. #define pdebug(fmt, ...) ((void)0)
  35. #endif
  36. #define PARA_REG_DATA 0
  37. #define PARA_REG_STS 1
  38. #define PARA_REG_CTR 2
  39. #define PARA_REG_EPP_ADDR 3
  40. #define PARA_REG_EPP_DATA 4
  41. /*
  42. * These are the definitions for the Printer Status Register
  43. */
  44. #define PARA_STS_BUSY 0x80 /* Busy complement */
  45. #define PARA_STS_ACK 0x40 /* Acknowledge */
  46. #define PARA_STS_PAPER 0x20 /* Out of paper */
  47. #define PARA_STS_ONLINE 0x10 /* Online */
  48. #define PARA_STS_ERROR 0x08 /* Error complement */
  49. #define PARA_STS_TMOUT 0x01 /* EPP timeout */
  50. /*
  51. * These are the definitions for the Printer Control Register
  52. */
  53. #define PARA_CTR_DIR 0x20 /* Direction (1=read, 0=write) */
  54. #define PARA_CTR_INTEN 0x10 /* IRQ Enable */
  55. #define PARA_CTR_SELECT 0x08 /* Select In complement */
  56. #define PARA_CTR_INIT 0x04 /* Initialize Printer complement */
  57. #define PARA_CTR_AUTOLF 0x02 /* Auto linefeed complement */
  58. #define PARA_CTR_STROBE 0x01 /* Strobe complement */
  59. #define PARA_CTR_SIGNAL (PARA_CTR_SELECT|PARA_CTR_INIT|PARA_CTR_AUTOLF|PARA_CTR_STROBE)
  60. typedef struct ParallelState {
  61. uint8_t dataw;
  62. uint8_t datar;
  63. uint8_t status;
  64. uint8_t control;
  65. qemu_irq irq;
  66. int irq_pending;
  67. CharDriverState *chr;
  68. int hw_driver;
  69. int epp_timeout;
  70. uint32_t last_read_offset; /* For debugging */
  71. /* Memory-mapped interface */
  72. int it_shift;
  73. } ParallelState;
  74. typedef struct ISAParallelState {
  75. ISADevice dev;
  76. uint32_t index;
  77. uint32_t iobase;
  78. uint32_t isairq;
  79. ParallelState state;
  80. } ISAParallelState;
  81. static void parallel_update_irq(ParallelState *s)
  82. {
  83. if (s->irq_pending)
  84. qemu_irq_raise(s->irq);
  85. else
  86. qemu_irq_lower(s->irq);
  87. }
  88. static void
  89. parallel_ioport_write_sw(void *opaque, uint32_t addr, uint32_t val)
  90. {
  91. ParallelState *s = opaque;
  92. pdebug("write addr=0x%02x val=0x%02x\n", addr, val);
  93. addr &= 7;
  94. switch(addr) {
  95. case PARA_REG_DATA:
  96. s->dataw = val;
  97. parallel_update_irq(s);
  98. break;
  99. case PARA_REG_CTR:
  100. val |= 0xc0;
  101. if ((val & PARA_CTR_INIT) == 0 ) {
  102. s->status = PARA_STS_BUSY;
  103. s->status |= PARA_STS_ACK;
  104. s->status |= PARA_STS_ONLINE;
  105. s->status |= PARA_STS_ERROR;
  106. }
  107. else if (val & PARA_CTR_SELECT) {
  108. if (val & PARA_CTR_STROBE) {
  109. s->status &= ~PARA_STS_BUSY;
  110. if ((s->control & PARA_CTR_STROBE) == 0)
  111. qemu_chr_write(s->chr, &s->dataw, 1);
  112. } else {
  113. if (s->control & PARA_CTR_INTEN) {
  114. s->irq_pending = 1;
  115. }
  116. }
  117. }
  118. parallel_update_irq(s);
  119. s->control = val;
  120. break;
  121. }
  122. }
  123. static void parallel_ioport_write_hw(void *opaque, uint32_t addr, uint32_t val)
  124. {
  125. ParallelState *s = opaque;
  126. uint8_t parm = val;
  127. int dir;
  128. /* Sometimes programs do several writes for timing purposes on old
  129. HW. Take care not to waste time on writes that do nothing. */
  130. s->last_read_offset = ~0U;
  131. addr &= 7;
  132. switch(addr) {
  133. case PARA_REG_DATA:
  134. if (s->dataw == val)
  135. return;
  136. pdebug("wd%02x\n", val);
  137. qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_WRITE_DATA, &parm);
  138. s->dataw = val;
  139. break;
  140. case PARA_REG_STS:
  141. pdebug("ws%02x\n", val);
  142. if (val & PARA_STS_TMOUT)
  143. s->epp_timeout = 0;
  144. break;
  145. case PARA_REG_CTR:
  146. val |= 0xc0;
  147. if (s->control == val)
  148. return;
  149. pdebug("wc%02x\n", val);
  150. if ((val & PARA_CTR_DIR) != (s->control & PARA_CTR_DIR)) {
  151. if (val & PARA_CTR_DIR) {
  152. dir = 1;
  153. } else {
  154. dir = 0;
  155. }
  156. qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_DATA_DIR, &dir);
  157. parm &= ~PARA_CTR_DIR;
  158. }
  159. qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_WRITE_CONTROL, &parm);
  160. s->control = val;
  161. break;
  162. case PARA_REG_EPP_ADDR:
  163. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT)
  164. /* Controls not correct for EPP address cycle, so do nothing */
  165. pdebug("wa%02x s\n", val);
  166. else {
  167. struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 };
  168. if (qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_EPP_WRITE_ADDR, &ioarg)) {
  169. s->epp_timeout = 1;
  170. pdebug("wa%02x t\n", val);
  171. }
  172. else
  173. pdebug("wa%02x\n", val);
  174. }
  175. break;
  176. case PARA_REG_EPP_DATA:
  177. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT)
  178. /* Controls not correct for EPP data cycle, so do nothing */
  179. pdebug("we%02x s\n", val);
  180. else {
  181. struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 };
  182. if (qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg)) {
  183. s->epp_timeout = 1;
  184. pdebug("we%02x t\n", val);
  185. }
  186. else
  187. pdebug("we%02x\n", val);
  188. }
  189. break;
  190. }
  191. }
  192. static void
  193. parallel_ioport_eppdata_write_hw2(void *opaque, uint32_t addr, uint32_t val)
  194. {
  195. ParallelState *s = opaque;
  196. uint16_t eppdata = cpu_to_le16(val);
  197. int err;
  198. struct ParallelIOArg ioarg = {
  199. .buffer = &eppdata, .count = sizeof(eppdata)
  200. };
  201. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) {
  202. /* Controls not correct for EPP data cycle, so do nothing */
  203. pdebug("we%04x s\n", val);
  204. return;
  205. }
  206. err = qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
  207. if (err) {
  208. s->epp_timeout = 1;
  209. pdebug("we%04x t\n", val);
  210. }
  211. else
  212. pdebug("we%04x\n", val);
  213. }
  214. static void
  215. parallel_ioport_eppdata_write_hw4(void *opaque, uint32_t addr, uint32_t val)
  216. {
  217. ParallelState *s = opaque;
  218. uint32_t eppdata = cpu_to_le32(val);
  219. int err;
  220. struct ParallelIOArg ioarg = {
  221. .buffer = &eppdata, .count = sizeof(eppdata)
  222. };
  223. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) {
  224. /* Controls not correct for EPP data cycle, so do nothing */
  225. pdebug("we%08x s\n", val);
  226. return;
  227. }
  228. err = qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
  229. if (err) {
  230. s->epp_timeout = 1;
  231. pdebug("we%08x t\n", val);
  232. }
  233. else
  234. pdebug("we%08x\n", val);
  235. }
  236. static uint32_t parallel_ioport_read_sw(void *opaque, uint32_t addr)
  237. {
  238. ParallelState *s = opaque;
  239. uint32_t ret = 0xff;
  240. addr &= 7;
  241. switch(addr) {
  242. case PARA_REG_DATA:
  243. if (s->control & PARA_CTR_DIR)
  244. ret = s->datar;
  245. else
  246. ret = s->dataw;
  247. break;
  248. case PARA_REG_STS:
  249. ret = s->status;
  250. s->irq_pending = 0;
  251. if ((s->status & PARA_STS_BUSY) == 0 && (s->control & PARA_CTR_STROBE) == 0) {
  252. /* XXX Fixme: wait 5 microseconds */
  253. if (s->status & PARA_STS_ACK)
  254. s->status &= ~PARA_STS_ACK;
  255. else {
  256. /* XXX Fixme: wait 5 microseconds */
  257. s->status |= PARA_STS_ACK;
  258. s->status |= PARA_STS_BUSY;
  259. }
  260. }
  261. parallel_update_irq(s);
  262. break;
  263. case PARA_REG_CTR:
  264. ret = s->control;
  265. break;
  266. }
  267. pdebug("read addr=0x%02x val=0x%02x\n", addr, ret);
  268. return ret;
  269. }
  270. static uint32_t parallel_ioport_read_hw(void *opaque, uint32_t addr)
  271. {
  272. ParallelState *s = opaque;
  273. uint8_t ret = 0xff;
  274. addr &= 7;
  275. switch(addr) {
  276. case PARA_REG_DATA:
  277. qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_READ_DATA, &ret);
  278. if (s->last_read_offset != addr || s->datar != ret)
  279. pdebug("rd%02x\n", ret);
  280. s->datar = ret;
  281. break;
  282. case PARA_REG_STS:
  283. qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_READ_STATUS, &ret);
  284. ret &= ~PARA_STS_TMOUT;
  285. if (s->epp_timeout)
  286. ret |= PARA_STS_TMOUT;
  287. if (s->last_read_offset != addr || s->status != ret)
  288. pdebug("rs%02x\n", ret);
  289. s->status = ret;
  290. break;
  291. case PARA_REG_CTR:
  292. /* s->control has some bits fixed to 1. It is zero only when
  293. it has not been yet written to. */
  294. if (s->control == 0) {
  295. qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_READ_CONTROL, &ret);
  296. if (s->last_read_offset != addr)
  297. pdebug("rc%02x\n", ret);
  298. s->control = ret;
  299. }
  300. else {
  301. ret = s->control;
  302. if (s->last_read_offset != addr)
  303. pdebug("rc%02x\n", ret);
  304. }
  305. break;
  306. case PARA_REG_EPP_ADDR:
  307. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT))
  308. /* Controls not correct for EPP addr cycle, so do nothing */
  309. pdebug("ra%02x s\n", ret);
  310. else {
  311. struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 };
  312. if (qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_EPP_READ_ADDR, &ioarg)) {
  313. s->epp_timeout = 1;
  314. pdebug("ra%02x t\n", ret);
  315. }
  316. else
  317. pdebug("ra%02x\n", ret);
  318. }
  319. break;
  320. case PARA_REG_EPP_DATA:
  321. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT))
  322. /* Controls not correct for EPP data cycle, so do nothing */
  323. pdebug("re%02x s\n", ret);
  324. else {
  325. struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 };
  326. if (qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg)) {
  327. s->epp_timeout = 1;
  328. pdebug("re%02x t\n", ret);
  329. }
  330. else
  331. pdebug("re%02x\n", ret);
  332. }
  333. break;
  334. }
  335. s->last_read_offset = addr;
  336. return ret;
  337. }
  338. static uint32_t
  339. parallel_ioport_eppdata_read_hw2(void *opaque, uint32_t addr)
  340. {
  341. ParallelState *s = opaque;
  342. uint32_t ret;
  343. uint16_t eppdata = ~0;
  344. int err;
  345. struct ParallelIOArg ioarg = {
  346. .buffer = &eppdata, .count = sizeof(eppdata)
  347. };
  348. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT)) {
  349. /* Controls not correct for EPP data cycle, so do nothing */
  350. pdebug("re%04x s\n", eppdata);
  351. return eppdata;
  352. }
  353. err = qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
  354. ret = le16_to_cpu(eppdata);
  355. if (err) {
  356. s->epp_timeout = 1;
  357. pdebug("re%04x t\n", ret);
  358. }
  359. else
  360. pdebug("re%04x\n", ret);
  361. return ret;
  362. }
  363. static uint32_t
  364. parallel_ioport_eppdata_read_hw4(void *opaque, uint32_t addr)
  365. {
  366. ParallelState *s = opaque;
  367. uint32_t ret;
  368. uint32_t eppdata = ~0U;
  369. int err;
  370. struct ParallelIOArg ioarg = {
  371. .buffer = &eppdata, .count = sizeof(eppdata)
  372. };
  373. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT)) {
  374. /* Controls not correct for EPP data cycle, so do nothing */
  375. pdebug("re%08x s\n", eppdata);
  376. return eppdata;
  377. }
  378. err = qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
  379. ret = le32_to_cpu(eppdata);
  380. if (err) {
  381. s->epp_timeout = 1;
  382. pdebug("re%08x t\n", ret);
  383. }
  384. else
  385. pdebug("re%08x\n", ret);
  386. return ret;
  387. }
  388. static void parallel_ioport_ecp_write(void *opaque, uint32_t addr, uint32_t val)
  389. {
  390. pdebug("wecp%d=%02x\n", addr & 7, val);
  391. }
  392. static uint32_t parallel_ioport_ecp_read(void *opaque, uint32_t addr)
  393. {
  394. uint8_t ret = 0xff;
  395. pdebug("recp%d:%02x\n", addr & 7, ret);
  396. return ret;
  397. }
  398. static void parallel_reset(void *opaque)
  399. {
  400. ParallelState *s = opaque;
  401. s->datar = ~0;
  402. s->dataw = ~0;
  403. s->status = PARA_STS_BUSY;
  404. s->status |= PARA_STS_ACK;
  405. s->status |= PARA_STS_ONLINE;
  406. s->status |= PARA_STS_ERROR;
  407. s->status |= PARA_STS_TMOUT;
  408. s->control = PARA_CTR_SELECT;
  409. s->control |= PARA_CTR_INIT;
  410. s->control |= 0xc0;
  411. s->irq_pending = 0;
  412. s->hw_driver = 0;
  413. s->epp_timeout = 0;
  414. s->last_read_offset = ~0U;
  415. }
  416. static const int isa_parallel_io[MAX_PARALLEL_PORTS] = { 0x378, 0x278, 0x3bc };
  417. static int parallel_isa_initfn(ISADevice *dev)
  418. {
  419. static int index;
  420. ISAParallelState *isa = DO_UPCAST(ISAParallelState, dev, dev);
  421. ParallelState *s = &isa->state;
  422. int base;
  423. uint8_t dummy;
  424. if (!s->chr) {
  425. fprintf(stderr, "Can't create parallel device, empty char device\n");
  426. exit(1);
  427. }
  428. if (isa->index == -1)
  429. isa->index = index;
  430. if (isa->index >= MAX_PARALLEL_PORTS)
  431. return -1;
  432. if (isa->iobase == -1)
  433. isa->iobase = isa_parallel_io[isa->index];
  434. index++;
  435. base = isa->iobase;
  436. isa_init_irq(dev, &s->irq, isa->isairq);
  437. qemu_register_reset(parallel_reset, s);
  438. if (qemu_chr_ioctl(s->chr, CHR_IOCTL_PP_READ_STATUS, &dummy) == 0) {
  439. s->hw_driver = 1;
  440. s->status = dummy;
  441. }
  442. if (s->hw_driver) {
  443. register_ioport_write(base, 8, 1, parallel_ioport_write_hw, s);
  444. register_ioport_read(base, 8, 1, parallel_ioport_read_hw, s);
  445. isa_init_ioport_range(dev, base, 8);
  446. register_ioport_write(base+4, 1, 2, parallel_ioport_eppdata_write_hw2, s);
  447. register_ioport_read(base+4, 1, 2, parallel_ioport_eppdata_read_hw2, s);
  448. register_ioport_write(base+4, 1, 4, parallel_ioport_eppdata_write_hw4, s);
  449. register_ioport_read(base+4, 1, 4, parallel_ioport_eppdata_read_hw4, s);
  450. isa_init_ioport(dev, base+4);
  451. register_ioport_write(base+0x400, 8, 1, parallel_ioport_ecp_write, s);
  452. register_ioport_read(base+0x400, 8, 1, parallel_ioport_ecp_read, s);
  453. isa_init_ioport_range(dev, base+0x400, 8);
  454. }
  455. else {
  456. register_ioport_write(base, 8, 1, parallel_ioport_write_sw, s);
  457. register_ioport_read(base, 8, 1, parallel_ioport_read_sw, s);
  458. isa_init_ioport_range(dev, base, 8);
  459. }
  460. return 0;
  461. }
  462. /* Memory mapped interface */
  463. static uint32_t parallel_mm_readb (void *opaque, target_phys_addr_t addr)
  464. {
  465. ParallelState *s = opaque;
  466. return parallel_ioport_read_sw(s, addr >> s->it_shift) & 0xFF;
  467. }
  468. static void parallel_mm_writeb (void *opaque,
  469. target_phys_addr_t addr, uint32_t value)
  470. {
  471. ParallelState *s = opaque;
  472. parallel_ioport_write_sw(s, addr >> s->it_shift, value & 0xFF);
  473. }
  474. static uint32_t parallel_mm_readw (void *opaque, target_phys_addr_t addr)
  475. {
  476. ParallelState *s = opaque;
  477. return parallel_ioport_read_sw(s, addr >> s->it_shift) & 0xFFFF;
  478. }
  479. static void parallel_mm_writew (void *opaque,
  480. target_phys_addr_t addr, uint32_t value)
  481. {
  482. ParallelState *s = opaque;
  483. parallel_ioport_write_sw(s, addr >> s->it_shift, value & 0xFFFF);
  484. }
  485. static uint32_t parallel_mm_readl (void *opaque, target_phys_addr_t addr)
  486. {
  487. ParallelState *s = opaque;
  488. return parallel_ioport_read_sw(s, addr >> s->it_shift);
  489. }
  490. static void parallel_mm_writel (void *opaque,
  491. target_phys_addr_t addr, uint32_t value)
  492. {
  493. ParallelState *s = opaque;
  494. parallel_ioport_write_sw(s, addr >> s->it_shift, value);
  495. }
  496. static CPUReadMemoryFunc * const parallel_mm_read_sw[] = {
  497. &parallel_mm_readb,
  498. &parallel_mm_readw,
  499. &parallel_mm_readl,
  500. };
  501. static CPUWriteMemoryFunc * const parallel_mm_write_sw[] = {
  502. &parallel_mm_writeb,
  503. &parallel_mm_writew,
  504. &parallel_mm_writel,
  505. };
  506. /* If fd is zero, it means that the parallel device uses the console */
  507. bool parallel_mm_init(target_phys_addr_t base, int it_shift, qemu_irq irq,
  508. CharDriverState *chr)
  509. {
  510. ParallelState *s;
  511. int io_sw;
  512. s = qemu_mallocz(sizeof(ParallelState));
  513. s->irq = irq;
  514. s->chr = chr;
  515. s->it_shift = it_shift;
  516. qemu_register_reset(parallel_reset, s);
  517. io_sw = cpu_register_io_memory(parallel_mm_read_sw, parallel_mm_write_sw,
  518. s, DEVICE_NATIVE_ENDIAN);
  519. cpu_register_physical_memory(base, 8 << it_shift, io_sw);
  520. return true;
  521. }
  522. static ISADeviceInfo parallel_isa_info = {
  523. .qdev.name = "isa-parallel",
  524. .qdev.size = sizeof(ISAParallelState),
  525. .init = parallel_isa_initfn,
  526. .qdev.props = (Property[]) {
  527. DEFINE_PROP_UINT32("index", ISAParallelState, index, -1),
  528. DEFINE_PROP_HEX32("iobase", ISAParallelState, iobase, -1),
  529. DEFINE_PROP_UINT32("irq", ISAParallelState, isairq, 7),
  530. DEFINE_PROP_CHR("chardev", ISAParallelState, state.chr),
  531. DEFINE_PROP_END_OF_LIST(),
  532. },
  533. };
  534. static void parallel_register_devices(void)
  535. {
  536. isa_qdev_register(&parallel_isa_info);
  537. }
  538. device_init(parallel_register_devices)