dma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. /*
  2. * QEMU DMA emulation
  3. *
  4. * Copyright (c) 2003-2004 Vassili Karpov (malc)
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "hw.h"
  25. #include "isa.h"
  26. /* #define DEBUG_DMA */
  27. #define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
  28. #ifdef DEBUG_DMA
  29. #define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
  30. #define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
  31. #else
  32. #define linfo(...)
  33. #define ldebug(...)
  34. #endif
  35. struct dma_regs {
  36. int now[2];
  37. uint16_t base[2];
  38. uint8_t mode;
  39. uint8_t page;
  40. uint8_t pageh;
  41. uint8_t dack;
  42. uint8_t eop;
  43. DMA_transfer_handler transfer_handler;
  44. void *opaque;
  45. };
  46. #define ADDR 0
  47. #define COUNT 1
  48. static struct dma_cont {
  49. uint8_t status;
  50. uint8_t command;
  51. uint8_t mask;
  52. uint8_t flip_flop;
  53. int dshift;
  54. struct dma_regs regs[4];
  55. qemu_irq *cpu_request_exit;
  56. } dma_controllers[2];
  57. enum {
  58. CMD_MEMORY_TO_MEMORY = 0x01,
  59. CMD_FIXED_ADDRESS = 0x02,
  60. CMD_BLOCK_CONTROLLER = 0x04,
  61. CMD_COMPRESSED_TIME = 0x08,
  62. CMD_CYCLIC_PRIORITY = 0x10,
  63. CMD_EXTENDED_WRITE = 0x20,
  64. CMD_LOW_DREQ = 0x40,
  65. CMD_LOW_DACK = 0x80,
  66. CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
  67. | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
  68. | CMD_LOW_DREQ | CMD_LOW_DACK
  69. };
  70. static void DMA_run (void);
  71. static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
  72. static void write_page (void *opaque, uint32_t nport, uint32_t data)
  73. {
  74. struct dma_cont *d = opaque;
  75. int ichan;
  76. ichan = channels[nport & 7];
  77. if (-1 == ichan) {
  78. dolog ("invalid channel %#x %#x\n", nport, data);
  79. return;
  80. }
  81. d->regs[ichan].page = data;
  82. }
  83. static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
  84. {
  85. struct dma_cont *d = opaque;
  86. int ichan;
  87. ichan = channels[nport & 7];
  88. if (-1 == ichan) {
  89. dolog ("invalid channel %#x %#x\n", nport, data);
  90. return;
  91. }
  92. d->regs[ichan].pageh = data;
  93. }
  94. static uint32_t read_page (void *opaque, uint32_t nport)
  95. {
  96. struct dma_cont *d = opaque;
  97. int ichan;
  98. ichan = channels[nport & 7];
  99. if (-1 == ichan) {
  100. dolog ("invalid channel read %#x\n", nport);
  101. return 0;
  102. }
  103. return d->regs[ichan].page;
  104. }
  105. static uint32_t read_pageh (void *opaque, uint32_t nport)
  106. {
  107. struct dma_cont *d = opaque;
  108. int ichan;
  109. ichan = channels[nport & 7];
  110. if (-1 == ichan) {
  111. dolog ("invalid channel read %#x\n", nport);
  112. return 0;
  113. }
  114. return d->regs[ichan].pageh;
  115. }
  116. static inline void init_chan (struct dma_cont *d, int ichan)
  117. {
  118. struct dma_regs *r;
  119. r = d->regs + ichan;
  120. r->now[ADDR] = r->base[ADDR] << d->dshift;
  121. r->now[COUNT] = 0;
  122. }
  123. static inline int getff (struct dma_cont *d)
  124. {
  125. int ff;
  126. ff = d->flip_flop;
  127. d->flip_flop = !ff;
  128. return ff;
  129. }
  130. static uint32_t read_chan (void *opaque, uint32_t nport)
  131. {
  132. struct dma_cont *d = opaque;
  133. int ichan, nreg, iport, ff, val, dir;
  134. struct dma_regs *r;
  135. iport = (nport >> d->dshift) & 0x0f;
  136. ichan = iport >> 1;
  137. nreg = iport & 1;
  138. r = d->regs + ichan;
  139. dir = ((r->mode >> 5) & 1) ? -1 : 1;
  140. ff = getff (d);
  141. if (nreg)
  142. val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
  143. else
  144. val = r->now[ADDR] + r->now[COUNT] * dir;
  145. ldebug ("read_chan %#x -> %d\n", iport, val);
  146. return (val >> (d->dshift + (ff << 3))) & 0xff;
  147. }
  148. static void write_chan (void *opaque, uint32_t nport, uint32_t data)
  149. {
  150. struct dma_cont *d = opaque;
  151. int iport, ichan, nreg;
  152. struct dma_regs *r;
  153. iport = (nport >> d->dshift) & 0x0f;
  154. ichan = iport >> 1;
  155. nreg = iport & 1;
  156. r = d->regs + ichan;
  157. if (getff (d)) {
  158. r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
  159. init_chan (d, ichan);
  160. } else {
  161. r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
  162. }
  163. }
  164. static void write_cont (void *opaque, uint32_t nport, uint32_t data)
  165. {
  166. struct dma_cont *d = opaque;
  167. int iport, ichan = 0;
  168. iport = (nport >> d->dshift) & 0x0f;
  169. switch (iport) {
  170. case 0x08: /* command */
  171. if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
  172. dolog ("command %#x not supported\n", data);
  173. return;
  174. }
  175. d->command = data;
  176. break;
  177. case 0x09:
  178. ichan = data & 3;
  179. if (data & 4) {
  180. d->status |= 1 << (ichan + 4);
  181. }
  182. else {
  183. d->status &= ~(1 << (ichan + 4));
  184. }
  185. d->status &= ~(1 << ichan);
  186. DMA_run();
  187. break;
  188. case 0x0a: /* single mask */
  189. if (data & 4)
  190. d->mask |= 1 << (data & 3);
  191. else
  192. d->mask &= ~(1 << (data & 3));
  193. DMA_run();
  194. break;
  195. case 0x0b: /* mode */
  196. {
  197. ichan = data & 3;
  198. #ifdef DEBUG_DMA
  199. {
  200. int op, ai, dir, opmode;
  201. op = (data >> 2) & 3;
  202. ai = (data >> 4) & 1;
  203. dir = (data >> 5) & 1;
  204. opmode = (data >> 6) & 3;
  205. linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
  206. ichan, op, ai, dir, opmode);
  207. }
  208. #endif
  209. d->regs[ichan].mode = data;
  210. break;
  211. }
  212. case 0x0c: /* clear flip flop */
  213. d->flip_flop = 0;
  214. break;
  215. case 0x0d: /* reset */
  216. d->flip_flop = 0;
  217. d->mask = ~0;
  218. d->status = 0;
  219. d->command = 0;
  220. break;
  221. case 0x0e: /* clear mask for all channels */
  222. d->mask = 0;
  223. DMA_run();
  224. break;
  225. case 0x0f: /* write mask for all channels */
  226. d->mask = data;
  227. DMA_run();
  228. break;
  229. default:
  230. dolog ("unknown iport %#x\n", iport);
  231. break;
  232. }
  233. #ifdef DEBUG_DMA
  234. if (0xc != iport) {
  235. linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
  236. nport, ichan, data);
  237. }
  238. #endif
  239. }
  240. static uint32_t read_cont (void *opaque, uint32_t nport)
  241. {
  242. struct dma_cont *d = opaque;
  243. int iport, val;
  244. iport = (nport >> d->dshift) & 0x0f;
  245. switch (iport) {
  246. case 0x08: /* status */
  247. val = d->status;
  248. d->status &= 0xf0;
  249. break;
  250. case 0x0f: /* mask */
  251. val = d->mask;
  252. break;
  253. default:
  254. val = 0;
  255. break;
  256. }
  257. ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
  258. return val;
  259. }
  260. int DMA_get_channel_mode (int nchan)
  261. {
  262. return dma_controllers[nchan > 3].regs[nchan & 3].mode;
  263. }
  264. void DMA_hold_DREQ (int nchan)
  265. {
  266. int ncont, ichan;
  267. ncont = nchan > 3;
  268. ichan = nchan & 3;
  269. linfo ("held cont=%d chan=%d\n", ncont, ichan);
  270. dma_controllers[ncont].status |= 1 << (ichan + 4);
  271. DMA_run();
  272. }
  273. void DMA_release_DREQ (int nchan)
  274. {
  275. int ncont, ichan;
  276. ncont = nchan > 3;
  277. ichan = nchan & 3;
  278. linfo ("released cont=%d chan=%d\n", ncont, ichan);
  279. dma_controllers[ncont].status &= ~(1 << (ichan + 4));
  280. DMA_run();
  281. }
  282. static void channel_run (int ncont, int ichan)
  283. {
  284. int n;
  285. struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
  286. #ifdef DEBUG_DMA
  287. int dir, opmode;
  288. dir = (r->mode >> 5) & 1;
  289. opmode = (r->mode >> 6) & 3;
  290. if (dir) {
  291. dolog ("DMA in address decrement mode\n");
  292. }
  293. if (opmode != 1) {
  294. dolog ("DMA not in single mode select %#x\n", opmode);
  295. }
  296. #endif
  297. n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
  298. r->now[COUNT], (r->base[COUNT] + 1) << ncont);
  299. r->now[COUNT] = n;
  300. ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
  301. }
  302. static QEMUBH *dma_bh;
  303. static void DMA_run (void)
  304. {
  305. struct dma_cont *d;
  306. int icont, ichan;
  307. int rearm = 0;
  308. static int running = 0;
  309. if (running) {
  310. rearm = 1;
  311. goto out;
  312. } else {
  313. running = 1;
  314. }
  315. d = dma_controllers;
  316. for (icont = 0; icont < 2; icont++, d++) {
  317. for (ichan = 0; ichan < 4; ichan++) {
  318. int mask;
  319. mask = 1 << ichan;
  320. if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) {
  321. channel_run (icont, ichan);
  322. rearm = 1;
  323. }
  324. }
  325. }
  326. running = 0;
  327. out:
  328. if (rearm)
  329. qemu_bh_schedule_idle(dma_bh);
  330. }
  331. static void DMA_run_bh(void *unused)
  332. {
  333. DMA_run();
  334. }
  335. void DMA_register_channel (int nchan,
  336. DMA_transfer_handler transfer_handler,
  337. void *opaque)
  338. {
  339. struct dma_regs *r;
  340. int ichan, ncont;
  341. ncont = nchan > 3;
  342. ichan = nchan & 3;
  343. r = dma_controllers[ncont].regs + ichan;
  344. r->transfer_handler = transfer_handler;
  345. r->opaque = opaque;
  346. }
  347. int DMA_read_memory (int nchan, void *buf, int pos, int len)
  348. {
  349. struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
  350. target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
  351. if (r->mode & 0x20) {
  352. int i;
  353. uint8_t *p = buf;
  354. cpu_physical_memory_read (addr - pos - len, buf, len);
  355. /* What about 16bit transfers? */
  356. for (i = 0; i < len >> 1; i++) {
  357. uint8_t b = p[len - i - 1];
  358. p[i] = b;
  359. }
  360. }
  361. else
  362. cpu_physical_memory_read (addr + pos, buf, len);
  363. return len;
  364. }
  365. int DMA_write_memory (int nchan, void *buf, int pos, int len)
  366. {
  367. struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
  368. target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
  369. if (r->mode & 0x20) {
  370. int i;
  371. uint8_t *p = buf;
  372. cpu_physical_memory_write (addr - pos - len, buf, len);
  373. /* What about 16bit transfers? */
  374. for (i = 0; i < len; i++) {
  375. uint8_t b = p[len - i - 1];
  376. p[i] = b;
  377. }
  378. }
  379. else
  380. cpu_physical_memory_write (addr + pos, buf, len);
  381. return len;
  382. }
  383. /* request the emulator to transfer a new DMA memory block ASAP */
  384. void DMA_schedule(int nchan)
  385. {
  386. struct dma_cont *d = &dma_controllers[nchan > 3];
  387. qemu_irq_pulse(*d->cpu_request_exit);
  388. }
  389. static void dma_reset(void *opaque)
  390. {
  391. struct dma_cont *d = opaque;
  392. write_cont (d, (0x0d << d->dshift), 0);
  393. }
  394. static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len)
  395. {
  396. dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n",
  397. nchan, dma_pos, dma_len);
  398. return dma_pos;
  399. }
  400. /* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
  401. static void dma_init2(struct dma_cont *d, int base, int dshift,
  402. int page_base, int pageh_base,
  403. qemu_irq *cpu_request_exit)
  404. {
  405. static const int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
  406. int i;
  407. d->dshift = dshift;
  408. d->cpu_request_exit = cpu_request_exit;
  409. for (i = 0; i < 8; i++) {
  410. register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
  411. register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
  412. }
  413. for (i = 0; i < ARRAY_SIZE (page_port_list); i++) {
  414. register_ioport_write (page_base + page_port_list[i], 1, 1,
  415. write_page, d);
  416. register_ioport_read (page_base + page_port_list[i], 1, 1,
  417. read_page, d);
  418. if (pageh_base >= 0) {
  419. register_ioport_write (pageh_base + page_port_list[i], 1, 1,
  420. write_pageh, d);
  421. register_ioport_read (pageh_base + page_port_list[i], 1, 1,
  422. read_pageh, d);
  423. }
  424. }
  425. for (i = 0; i < 8; i++) {
  426. register_ioport_write (base + ((i + 8) << dshift), 1, 1,
  427. write_cont, d);
  428. register_ioport_read (base + ((i + 8) << dshift), 1, 1,
  429. read_cont, d);
  430. }
  431. qemu_register_reset(dma_reset, d);
  432. dma_reset(d);
  433. for (i = 0; i < ARRAY_SIZE (d->regs); ++i) {
  434. d->regs[i].transfer_handler = dma_phony_handler;
  435. }
  436. }
  437. static const VMStateDescription vmstate_dma_regs = {
  438. .name = "dma_regs",
  439. .version_id = 1,
  440. .minimum_version_id = 1,
  441. .minimum_version_id_old = 1,
  442. .fields = (VMStateField []) {
  443. VMSTATE_INT32_ARRAY(now, struct dma_regs, 2),
  444. VMSTATE_UINT16_ARRAY(base, struct dma_regs, 2),
  445. VMSTATE_UINT8(mode, struct dma_regs),
  446. VMSTATE_UINT8(page, struct dma_regs),
  447. VMSTATE_UINT8(pageh, struct dma_regs),
  448. VMSTATE_UINT8(dack, struct dma_regs),
  449. VMSTATE_UINT8(eop, struct dma_regs),
  450. VMSTATE_END_OF_LIST()
  451. }
  452. };
  453. static int dma_post_load(void *opaque, int version_id)
  454. {
  455. DMA_run();
  456. return 0;
  457. }
  458. static const VMStateDescription vmstate_dma = {
  459. .name = "dma",
  460. .version_id = 1,
  461. .minimum_version_id = 1,
  462. .minimum_version_id_old = 1,
  463. .post_load = dma_post_load,
  464. .fields = (VMStateField []) {
  465. VMSTATE_UINT8(command, struct dma_cont),
  466. VMSTATE_UINT8(mask, struct dma_cont),
  467. VMSTATE_UINT8(flip_flop, struct dma_cont),
  468. VMSTATE_INT32(dshift, struct dma_cont),
  469. VMSTATE_STRUCT_ARRAY(regs, struct dma_cont, 4, 1, vmstate_dma_regs, struct dma_regs),
  470. VMSTATE_END_OF_LIST()
  471. }
  472. };
  473. void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
  474. {
  475. dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
  476. high_page_enable ? 0x480 : -1, cpu_request_exit);
  477. dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
  478. high_page_enable ? 0x488 : -1, cpu_request_exit);
  479. vmstate_register (NULL, 0, &vmstate_dma, &dma_controllers[0]);
  480. vmstate_register (NULL, 1, &vmstate_dma, &dma_controllers[1]);
  481. dma_bh = qemu_bh_new(DMA_run_bh, NULL);
  482. }