parallel.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. /*
  2. * QEMU Parallel PORT emulation
  3. *
  4. * Copyright (c) 2003-2005 Fabrice Bellard
  5. * Copyright (c) 2007 Marko Kohtala
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "qemu/osdep.h"
  26. #include "qapi/error.h"
  27. #include "qemu/module.h"
  28. #include "chardev/char-parallel.h"
  29. #include "hw/acpi/acpi_aml_interface.h"
  30. #include "hw/qdev-properties.h"
  31. #include "hw/qdev-properties-system.h"
  32. #include "migration/vmstate.h"
  33. #include "hw/char/parallel-isa.h"
  34. #include "hw/char/parallel.h"
  35. #include "sysemu/reset.h"
  36. #include "sysemu/sysemu.h"
  37. #include "trace.h"
  38. #include "qom/object.h"
  39. //#define DEBUG_PARALLEL
  40. #ifdef DEBUG_PARALLEL
  41. #define pdebug(fmt, ...) printf("pp: " fmt, ## __VA_ARGS__)
  42. #else
  43. #define pdebug(fmt, ...) ((void)0)
  44. #endif
  45. #define PARA_REG_DATA 0
  46. #define PARA_REG_STS 1
  47. #define PARA_REG_CTR 2
  48. #define PARA_REG_EPP_ADDR 3
  49. #define PARA_REG_EPP_DATA 4
  50. /*
  51. * These are the definitions for the Printer Status Register
  52. */
  53. #define PARA_STS_BUSY 0x80 /* Busy complement */
  54. #define PARA_STS_ACK 0x40 /* Acknowledge */
  55. #define PARA_STS_PAPER 0x20 /* Out of paper */
  56. #define PARA_STS_ONLINE 0x10 /* Online */
  57. #define PARA_STS_ERROR 0x08 /* Error complement */
  58. #define PARA_STS_TMOUT 0x01 /* EPP timeout */
  59. /*
  60. * These are the definitions for the Printer Control Register
  61. */
  62. #define PARA_CTR_DIR 0x20 /* Direction (1=read, 0=write) */
  63. #define PARA_CTR_INTEN 0x10 /* IRQ Enable */
  64. #define PARA_CTR_SELECT 0x08 /* Select In complement */
  65. #define PARA_CTR_INIT 0x04 /* Initialize Printer complement */
  66. #define PARA_CTR_AUTOLF 0x02 /* Auto linefeed complement */
  67. #define PARA_CTR_STROBE 0x01 /* Strobe complement */
  68. #define PARA_CTR_SIGNAL (PARA_CTR_SELECT|PARA_CTR_INIT|PARA_CTR_AUTOLF|PARA_CTR_STROBE)
  69. static void parallel_update_irq(ParallelState *s)
  70. {
  71. if (s->irq_pending)
  72. qemu_irq_raise(s->irq);
  73. else
  74. qemu_irq_lower(s->irq);
  75. }
  76. static void
  77. parallel_ioport_write_sw(void *opaque, uint32_t addr, uint32_t val)
  78. {
  79. ParallelState *s = opaque;
  80. addr &= 7;
  81. trace_parallel_ioport_write("SW", addr, val);
  82. switch(addr) {
  83. case PARA_REG_DATA:
  84. s->dataw = val;
  85. parallel_update_irq(s);
  86. break;
  87. case PARA_REG_CTR:
  88. val |= 0xc0;
  89. if ((val & PARA_CTR_INIT) == 0 ) {
  90. s->status = PARA_STS_BUSY;
  91. s->status |= PARA_STS_ACK;
  92. s->status |= PARA_STS_ONLINE;
  93. s->status |= PARA_STS_ERROR;
  94. }
  95. else if (val & PARA_CTR_SELECT) {
  96. if (val & PARA_CTR_STROBE) {
  97. s->status &= ~PARA_STS_BUSY;
  98. if ((s->control & PARA_CTR_STROBE) == 0)
  99. /* XXX this blocks entire thread. Rewrite to use
  100. * qemu_chr_fe_write and background I/O callbacks */
  101. qemu_chr_fe_write_all(&s->chr, &s->dataw, 1);
  102. } else {
  103. if (s->control & PARA_CTR_INTEN) {
  104. s->irq_pending = 1;
  105. }
  106. }
  107. }
  108. parallel_update_irq(s);
  109. s->control = val;
  110. break;
  111. }
  112. }
  113. static void parallel_ioport_write_hw(void *opaque, uint32_t addr, uint32_t val)
  114. {
  115. ParallelState *s = opaque;
  116. uint8_t parm = val;
  117. int dir;
  118. /* Sometimes programs do several writes for timing purposes on old
  119. HW. Take care not to waste time on writes that do nothing. */
  120. s->last_read_offset = ~0U;
  121. addr &= 7;
  122. trace_parallel_ioport_write("HW", addr, val);
  123. switch(addr) {
  124. case PARA_REG_DATA:
  125. if (s->dataw == val)
  126. return;
  127. pdebug("wd%02x\n", val);
  128. qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_WRITE_DATA, &parm);
  129. s->dataw = val;
  130. break;
  131. case PARA_REG_STS:
  132. pdebug("ws%02x\n", val);
  133. if (val & PARA_STS_TMOUT)
  134. s->epp_timeout = 0;
  135. break;
  136. case PARA_REG_CTR:
  137. val |= 0xc0;
  138. if (s->control == val)
  139. return;
  140. pdebug("wc%02x\n", val);
  141. if ((val & PARA_CTR_DIR) != (s->control & PARA_CTR_DIR)) {
  142. if (val & PARA_CTR_DIR) {
  143. dir = 1;
  144. } else {
  145. dir = 0;
  146. }
  147. qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_DATA_DIR, &dir);
  148. parm &= ~PARA_CTR_DIR;
  149. }
  150. qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_WRITE_CONTROL, &parm);
  151. s->control = val;
  152. break;
  153. case PARA_REG_EPP_ADDR:
  154. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT)
  155. /* Controls not correct for EPP address cycle, so do nothing */
  156. pdebug("wa%02x s\n", val);
  157. else {
  158. struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 };
  159. if (qemu_chr_fe_ioctl(&s->chr,
  160. CHR_IOCTL_PP_EPP_WRITE_ADDR, &ioarg)) {
  161. s->epp_timeout = 1;
  162. pdebug("wa%02x t\n", val);
  163. }
  164. else
  165. pdebug("wa%02x\n", val);
  166. }
  167. break;
  168. case PARA_REG_EPP_DATA:
  169. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT)
  170. /* Controls not correct for EPP data cycle, so do nothing */
  171. pdebug("we%02x s\n", val);
  172. else {
  173. struct ParallelIOArg ioarg = { .buffer = &parm, .count = 1 };
  174. if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg)) {
  175. s->epp_timeout = 1;
  176. pdebug("we%02x t\n", val);
  177. }
  178. else
  179. pdebug("we%02x\n", val);
  180. }
  181. break;
  182. }
  183. }
  184. static void
  185. parallel_ioport_eppdata_write_hw2(void *opaque, uint32_t addr, uint32_t val)
  186. {
  187. ParallelState *s = opaque;
  188. uint16_t eppdata = cpu_to_le16(val);
  189. int err;
  190. struct ParallelIOArg ioarg = {
  191. .buffer = &eppdata, .count = sizeof(eppdata)
  192. };
  193. trace_parallel_ioport_write("EPP", addr, val);
  194. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) {
  195. /* Controls not correct for EPP data cycle, so do nothing */
  196. pdebug("we%04x s\n", val);
  197. return;
  198. }
  199. err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
  200. if (err) {
  201. s->epp_timeout = 1;
  202. pdebug("we%04x t\n", val);
  203. }
  204. else
  205. pdebug("we%04x\n", val);
  206. }
  207. static void
  208. parallel_ioport_eppdata_write_hw4(void *opaque, uint32_t addr, uint32_t val)
  209. {
  210. ParallelState *s = opaque;
  211. uint32_t eppdata = cpu_to_le32(val);
  212. int err;
  213. struct ParallelIOArg ioarg = {
  214. .buffer = &eppdata, .count = sizeof(eppdata)
  215. };
  216. trace_parallel_ioport_write("EPP", addr, val);
  217. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != PARA_CTR_INIT) {
  218. /* Controls not correct for EPP data cycle, so do nothing */
  219. pdebug("we%08x s\n", val);
  220. return;
  221. }
  222. err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_WRITE, &ioarg);
  223. if (err) {
  224. s->epp_timeout = 1;
  225. pdebug("we%08x t\n", val);
  226. }
  227. else
  228. pdebug("we%08x\n", val);
  229. }
  230. static uint32_t parallel_ioport_read_sw(void *opaque, uint32_t addr)
  231. {
  232. ParallelState *s = opaque;
  233. uint32_t ret = 0xff;
  234. addr &= 7;
  235. switch(addr) {
  236. case PARA_REG_DATA:
  237. if (s->control & PARA_CTR_DIR)
  238. ret = s->datar;
  239. else
  240. ret = s->dataw;
  241. break;
  242. case PARA_REG_STS:
  243. ret = s->status;
  244. s->irq_pending = 0;
  245. if ((s->status & PARA_STS_BUSY) == 0 && (s->control & PARA_CTR_STROBE) == 0) {
  246. /* XXX Fixme: wait 5 microseconds */
  247. if (s->status & PARA_STS_ACK)
  248. s->status &= ~PARA_STS_ACK;
  249. else {
  250. /* XXX Fixme: wait 5 microseconds */
  251. s->status |= PARA_STS_ACK;
  252. s->status |= PARA_STS_BUSY;
  253. }
  254. }
  255. parallel_update_irq(s);
  256. break;
  257. case PARA_REG_CTR:
  258. ret = s->control;
  259. break;
  260. }
  261. trace_parallel_ioport_read("SW", addr, ret);
  262. return ret;
  263. }
  264. static uint32_t parallel_ioport_read_hw(void *opaque, uint32_t addr)
  265. {
  266. ParallelState *s = opaque;
  267. uint8_t ret = 0xff;
  268. addr &= 7;
  269. switch(addr) {
  270. case PARA_REG_DATA:
  271. qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_DATA, &ret);
  272. if (s->last_read_offset != addr || s->datar != ret)
  273. pdebug("rd%02x\n", ret);
  274. s->datar = ret;
  275. break;
  276. case PARA_REG_STS:
  277. qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_STATUS, &ret);
  278. ret &= ~PARA_STS_TMOUT;
  279. if (s->epp_timeout)
  280. ret |= PARA_STS_TMOUT;
  281. if (s->last_read_offset != addr || s->status != ret)
  282. pdebug("rs%02x\n", ret);
  283. s->status = ret;
  284. break;
  285. case PARA_REG_CTR:
  286. /* s->control has some bits fixed to 1. It is zero only when
  287. it has not been yet written to. */
  288. if (s->control == 0) {
  289. qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_CONTROL, &ret);
  290. if (s->last_read_offset != addr)
  291. pdebug("rc%02x\n", ret);
  292. s->control = ret;
  293. }
  294. else {
  295. ret = s->control;
  296. if (s->last_read_offset != addr)
  297. pdebug("rc%02x\n", ret);
  298. }
  299. break;
  300. case PARA_REG_EPP_ADDR:
  301. if ((s->control & (PARA_CTR_DIR | PARA_CTR_SIGNAL)) !=
  302. (PARA_CTR_DIR | PARA_CTR_INIT))
  303. /* Controls not correct for EPP addr cycle, so do nothing */
  304. pdebug("ra%02x s\n", ret);
  305. else {
  306. struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 };
  307. if (qemu_chr_fe_ioctl(&s->chr,
  308. CHR_IOCTL_PP_EPP_READ_ADDR, &ioarg)) {
  309. s->epp_timeout = 1;
  310. pdebug("ra%02x t\n", ret);
  311. }
  312. else
  313. pdebug("ra%02x\n", ret);
  314. }
  315. break;
  316. case PARA_REG_EPP_DATA:
  317. if ((s->control & (PARA_CTR_DIR | PARA_CTR_SIGNAL)) !=
  318. (PARA_CTR_DIR | PARA_CTR_INIT))
  319. /* Controls not correct for EPP data cycle, so do nothing */
  320. pdebug("re%02x s\n", ret);
  321. else {
  322. struct ParallelIOArg ioarg = { .buffer = &ret, .count = 1 };
  323. if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg)) {
  324. s->epp_timeout = 1;
  325. pdebug("re%02x t\n", ret);
  326. }
  327. else
  328. pdebug("re%02x\n", ret);
  329. }
  330. break;
  331. }
  332. trace_parallel_ioport_read("HW", addr, ret);
  333. s->last_read_offset = addr;
  334. return ret;
  335. }
  336. static uint32_t
  337. parallel_ioport_eppdata_read_hw2(void *opaque, uint32_t addr)
  338. {
  339. ParallelState *s = opaque;
  340. uint32_t ret;
  341. uint16_t eppdata = ~0;
  342. int err;
  343. struct ParallelIOArg ioarg = {
  344. .buffer = &eppdata, .count = sizeof(eppdata)
  345. };
  346. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT)) {
  347. /* Controls not correct for EPP data cycle, so do nothing */
  348. pdebug("re%04x s\n", eppdata);
  349. return eppdata;
  350. }
  351. err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
  352. ret = le16_to_cpu(eppdata);
  353. if (err) {
  354. s->epp_timeout = 1;
  355. pdebug("re%04x t\n", ret);
  356. }
  357. else
  358. pdebug("re%04x\n", ret);
  359. trace_parallel_ioport_read("EPP", addr, ret);
  360. return ret;
  361. }
  362. static uint32_t
  363. parallel_ioport_eppdata_read_hw4(void *opaque, uint32_t addr)
  364. {
  365. ParallelState *s = opaque;
  366. uint32_t ret;
  367. uint32_t eppdata = ~0U;
  368. int err;
  369. struct ParallelIOArg ioarg = {
  370. .buffer = &eppdata, .count = sizeof(eppdata)
  371. };
  372. if ((s->control & (PARA_CTR_DIR|PARA_CTR_SIGNAL)) != (PARA_CTR_DIR|PARA_CTR_INIT)) {
  373. /* Controls not correct for EPP data cycle, so do nothing */
  374. pdebug("re%08x s\n", eppdata);
  375. return eppdata;
  376. }
  377. err = qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_EPP_READ, &ioarg);
  378. ret = le32_to_cpu(eppdata);
  379. if (err) {
  380. s->epp_timeout = 1;
  381. pdebug("re%08x t\n", ret);
  382. }
  383. else
  384. pdebug("re%08x\n", ret);
  385. trace_parallel_ioport_read("EPP", addr, ret);
  386. return ret;
  387. }
  388. static void parallel_ioport_ecp_write(void *opaque, uint32_t addr, uint32_t val)
  389. {
  390. trace_parallel_ioport_write("ECP", addr & 7, val);
  391. pdebug("wecp%d=%02x\n", addr & 7, val);
  392. }
  393. static uint32_t parallel_ioport_ecp_read(void *opaque, uint32_t addr)
  394. {
  395. uint8_t ret = 0xff;
  396. trace_parallel_ioport_read("ECP", addr & 7, ret);
  397. pdebug("recp%d:%02x\n", addr & 7, ret);
  398. return ret;
  399. }
  400. static void parallel_reset(void *opaque)
  401. {
  402. ParallelState *s = opaque;
  403. s->datar = ~0;
  404. s->dataw = ~0;
  405. s->status = PARA_STS_BUSY;
  406. s->status |= PARA_STS_ACK;
  407. s->status |= PARA_STS_ONLINE;
  408. s->status |= PARA_STS_ERROR;
  409. s->status |= PARA_STS_TMOUT;
  410. s->control = PARA_CTR_SELECT;
  411. s->control |= PARA_CTR_INIT;
  412. s->control |= 0xc0;
  413. s->irq_pending = 0;
  414. s->hw_driver = 0;
  415. s->epp_timeout = 0;
  416. s->last_read_offset = ~0U;
  417. }
  418. static const int isa_parallel_io[MAX_PARALLEL_PORTS] = { 0x378, 0x278, 0x3bc };
  419. static const MemoryRegionPortio isa_parallel_portio_hw_list[] = {
  420. { 0, 8, 1,
  421. .read = parallel_ioport_read_hw,
  422. .write = parallel_ioport_write_hw },
  423. { 4, 1, 2,
  424. .read = parallel_ioport_eppdata_read_hw2,
  425. .write = parallel_ioport_eppdata_write_hw2 },
  426. { 4, 1, 4,
  427. .read = parallel_ioport_eppdata_read_hw4,
  428. .write = parallel_ioport_eppdata_write_hw4 },
  429. { 0x400, 8, 1,
  430. .read = parallel_ioport_ecp_read,
  431. .write = parallel_ioport_ecp_write },
  432. PORTIO_END_OF_LIST(),
  433. };
  434. static const MemoryRegionPortio isa_parallel_portio_sw_list[] = {
  435. { 0, 8, 1,
  436. .read = parallel_ioport_read_sw,
  437. .write = parallel_ioport_write_sw },
  438. PORTIO_END_OF_LIST(),
  439. };
  440. static const VMStateDescription vmstate_parallel_isa = {
  441. .name = "parallel_isa",
  442. .version_id = 1,
  443. .minimum_version_id = 1,
  444. .fields = (const VMStateField[]) {
  445. VMSTATE_UINT8(state.dataw, ISAParallelState),
  446. VMSTATE_UINT8(state.datar, ISAParallelState),
  447. VMSTATE_UINT8(state.status, ISAParallelState),
  448. VMSTATE_UINT8(state.control, ISAParallelState),
  449. VMSTATE_INT32(state.irq_pending, ISAParallelState),
  450. VMSTATE_INT32(state.epp_timeout, ISAParallelState),
  451. VMSTATE_END_OF_LIST()
  452. }
  453. };
  454. static int parallel_can_receive(void *opaque)
  455. {
  456. return 1;
  457. }
  458. static void parallel_isa_realizefn(DeviceState *dev, Error **errp)
  459. {
  460. static int index;
  461. ISADevice *isadev = ISA_DEVICE(dev);
  462. ISAParallelState *isa = ISA_PARALLEL(dev);
  463. ParallelState *s = &isa->state;
  464. int base;
  465. uint8_t dummy;
  466. if (!qemu_chr_fe_backend_connected(&s->chr)) {
  467. error_setg(errp, "Can't create parallel device, empty char device");
  468. return;
  469. }
  470. if (isa->index == -1) {
  471. isa->index = index;
  472. }
  473. if (isa->index >= MAX_PARALLEL_PORTS) {
  474. error_setg(errp, "Max. supported number of parallel ports is %d.",
  475. MAX_PARALLEL_PORTS);
  476. return;
  477. }
  478. if (isa->iobase == -1) {
  479. isa->iobase = isa_parallel_io[isa->index];
  480. }
  481. index++;
  482. base = isa->iobase;
  483. s->irq = isa_get_irq(isadev, isa->isairq);
  484. qemu_register_reset(parallel_reset, s);
  485. qemu_chr_fe_set_handlers(&s->chr, parallel_can_receive, NULL,
  486. NULL, NULL, s, NULL, true);
  487. if (qemu_chr_fe_ioctl(&s->chr, CHR_IOCTL_PP_READ_STATUS, &dummy) == 0) {
  488. s->hw_driver = 1;
  489. s->status = dummy;
  490. }
  491. isa_register_portio_list(isadev, &isa->portio_list, base,
  492. (s->hw_driver
  493. ? &isa_parallel_portio_hw_list[0]
  494. : &isa_parallel_portio_sw_list[0]),
  495. s, "parallel");
  496. }
  497. static void parallel_isa_build_aml(AcpiDevAmlIf *adev, Aml *scope)
  498. {
  499. ISAParallelState *isa = ISA_PARALLEL(adev);
  500. Aml *dev;
  501. Aml *crs;
  502. crs = aml_resource_template();
  503. aml_append(crs, aml_io(AML_DECODE16, isa->iobase, isa->iobase, 0x08, 0x08));
  504. aml_append(crs, aml_irq_no_flags(isa->isairq));
  505. dev = aml_device("LPT%d", isa->index + 1);
  506. aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0400")));
  507. aml_append(dev, aml_name_decl("_UID", aml_int(isa->index + 1)));
  508. aml_append(dev, aml_name_decl("_STA", aml_int(0xf)));
  509. aml_append(dev, aml_name_decl("_CRS", crs));
  510. aml_append(scope, dev);
  511. }
  512. /* Memory mapped interface */
  513. static uint64_t parallel_mm_readfn(void *opaque, hwaddr addr, unsigned size)
  514. {
  515. ParallelState *s = opaque;
  516. return parallel_ioport_read_sw(s, addr >> s->it_shift) &
  517. MAKE_64BIT_MASK(0, size * 8);
  518. }
  519. static void parallel_mm_writefn(void *opaque, hwaddr addr,
  520. uint64_t value, unsigned size)
  521. {
  522. ParallelState *s = opaque;
  523. parallel_ioport_write_sw(s, addr >> s->it_shift,
  524. value & MAKE_64BIT_MASK(0, size * 8));
  525. }
  526. static const MemoryRegionOps parallel_mm_ops = {
  527. .read = parallel_mm_readfn,
  528. .write = parallel_mm_writefn,
  529. .valid.min_access_size = 1,
  530. .valid.max_access_size = 4,
  531. .endianness = DEVICE_NATIVE_ENDIAN,
  532. };
  533. /* If fd is zero, it means that the parallel device uses the console */
  534. bool parallel_mm_init(MemoryRegion *address_space,
  535. hwaddr base, int it_shift, qemu_irq irq,
  536. Chardev *chr)
  537. {
  538. ParallelState *s;
  539. s = g_new0(ParallelState, 1);
  540. s->irq = irq;
  541. qemu_chr_fe_init(&s->chr, chr, &error_abort);
  542. s->it_shift = it_shift;
  543. qemu_register_reset(parallel_reset, s);
  544. memory_region_init_io(&s->iomem, NULL, &parallel_mm_ops, s,
  545. "parallel", 8 << it_shift);
  546. memory_region_add_subregion(address_space, base, &s->iomem);
  547. return true;
  548. }
  549. static const Property parallel_isa_properties[] = {
  550. DEFINE_PROP_UINT32("index", ISAParallelState, index, -1),
  551. DEFINE_PROP_UINT32("iobase", ISAParallelState, iobase, -1),
  552. DEFINE_PROP_UINT32("irq", ISAParallelState, isairq, 7),
  553. DEFINE_PROP_CHR("chardev", ISAParallelState, state.chr),
  554. };
  555. static void parallel_isa_class_initfn(ObjectClass *klass, void *data)
  556. {
  557. DeviceClass *dc = DEVICE_CLASS(klass);
  558. AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
  559. dc->realize = parallel_isa_realizefn;
  560. dc->vmsd = &vmstate_parallel_isa;
  561. adevc->build_dev_aml = parallel_isa_build_aml;
  562. device_class_set_props(dc, parallel_isa_properties);
  563. set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
  564. }
  565. static const TypeInfo parallel_isa_info = {
  566. .name = TYPE_ISA_PARALLEL,
  567. .parent = TYPE_ISA_DEVICE,
  568. .instance_size = sizeof(ISAParallelState),
  569. .class_init = parallel_isa_class_initfn,
  570. .interfaces = (InterfaceInfo[]) {
  571. { TYPE_ACPI_DEV_AML_IF },
  572. { },
  573. },
  574. };
  575. static void parallel_register_types(void)
  576. {
  577. type_register_static(&parallel_isa_info);
  578. }
  579. type_init(parallel_register_types)