2
0

dma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. /*
  2. * QEMU DMA emulation
  3. *
  4. * Copyright (c) 2003-2004 Vassili Karpov (malc)
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "hw.h"
  25. #include "isa.h"
  26. /* #define DEBUG_DMA */
  27. #define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
  28. #ifdef DEBUG_DMA
  29. #define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
  30. #define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
  31. #define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
  32. #else
  33. #define lwarn(...)
  34. #define linfo(...)
  35. #define ldebug(...)
  36. #endif
  37. struct dma_regs {
  38. int now[2];
  39. uint16_t base[2];
  40. uint8_t mode;
  41. uint8_t page;
  42. uint8_t pageh;
  43. uint8_t dack;
  44. uint8_t eop;
  45. DMA_transfer_handler transfer_handler;
  46. void *opaque;
  47. };
  48. #define ADDR 0
  49. #define COUNT 1
  50. static struct dma_cont {
  51. uint8_t status;
  52. uint8_t command;
  53. uint8_t mask;
  54. uint8_t flip_flop;
  55. int dshift;
  56. struct dma_regs regs[4];
  57. } dma_controllers[2];
  58. enum {
  59. CMD_MEMORY_TO_MEMORY = 0x01,
  60. CMD_FIXED_ADDRESS = 0x02,
  61. CMD_BLOCK_CONTROLLER = 0x04,
  62. CMD_COMPRESSED_TIME = 0x08,
  63. CMD_CYCLIC_PRIORITY = 0x10,
  64. CMD_EXTENDED_WRITE = 0x20,
  65. CMD_LOW_DREQ = 0x40,
  66. CMD_LOW_DACK = 0x80,
  67. CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
  68. | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
  69. | CMD_LOW_DREQ | CMD_LOW_DACK
  70. };
  71. static void DMA_run (void);
  72. static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
  73. static void write_page (void *opaque, uint32_t nport, uint32_t data)
  74. {
  75. struct dma_cont *d = opaque;
  76. int ichan;
  77. ichan = channels[nport & 7];
  78. if (-1 == ichan) {
  79. dolog ("invalid channel %#x %#x\n", nport, data);
  80. return;
  81. }
  82. d->regs[ichan].page = data;
  83. }
  84. static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
  85. {
  86. struct dma_cont *d = opaque;
  87. int ichan;
  88. ichan = channels[nport & 7];
  89. if (-1 == ichan) {
  90. dolog ("invalid channel %#x %#x\n", nport, data);
  91. return;
  92. }
  93. d->regs[ichan].pageh = data;
  94. }
  95. static uint32_t read_page (void *opaque, uint32_t nport)
  96. {
  97. struct dma_cont *d = opaque;
  98. int ichan;
  99. ichan = channels[nport & 7];
  100. if (-1 == ichan) {
  101. dolog ("invalid channel read %#x\n", nport);
  102. return 0;
  103. }
  104. return d->regs[ichan].page;
  105. }
  106. static uint32_t read_pageh (void *opaque, uint32_t nport)
  107. {
  108. struct dma_cont *d = opaque;
  109. int ichan;
  110. ichan = channels[nport & 7];
  111. if (-1 == ichan) {
  112. dolog ("invalid channel read %#x\n", nport);
  113. return 0;
  114. }
  115. return d->regs[ichan].pageh;
  116. }
  117. static inline void init_chan (struct dma_cont *d, int ichan)
  118. {
  119. struct dma_regs *r;
  120. r = d->regs + ichan;
  121. r->now[ADDR] = r->base[ADDR] << d->dshift;
  122. r->now[COUNT] = 0;
  123. }
  124. static inline int getff (struct dma_cont *d)
  125. {
  126. int ff;
  127. ff = d->flip_flop;
  128. d->flip_flop = !ff;
  129. return ff;
  130. }
  131. static uint32_t read_chan (void *opaque, uint32_t nport)
  132. {
  133. struct dma_cont *d = opaque;
  134. int ichan, nreg, iport, ff, val, dir;
  135. struct dma_regs *r;
  136. iport = (nport >> d->dshift) & 0x0f;
  137. ichan = iport >> 1;
  138. nreg = iport & 1;
  139. r = d->regs + ichan;
  140. dir = ((r->mode >> 5) & 1) ? -1 : 1;
  141. ff = getff (d);
  142. if (nreg)
  143. val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
  144. else
  145. val = r->now[ADDR] + r->now[COUNT] * dir;
  146. ldebug ("read_chan %#x -> %d\n", iport, val);
  147. return (val >> (d->dshift + (ff << 3))) & 0xff;
  148. }
  149. static void write_chan (void *opaque, uint32_t nport, uint32_t data)
  150. {
  151. struct dma_cont *d = opaque;
  152. int iport, ichan, nreg;
  153. struct dma_regs *r;
  154. iport = (nport >> d->dshift) & 0x0f;
  155. ichan = iport >> 1;
  156. nreg = iport & 1;
  157. r = d->regs + ichan;
  158. if (getff (d)) {
  159. r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
  160. init_chan (d, ichan);
  161. } else {
  162. r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
  163. }
  164. }
  165. static void write_cont (void *opaque, uint32_t nport, uint32_t data)
  166. {
  167. struct dma_cont *d = opaque;
  168. int iport, ichan = 0;
  169. iport = (nport >> d->dshift) & 0x0f;
  170. switch (iport) {
  171. case 0x08: /* command */
  172. if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
  173. dolog ("command %#x not supported\n", data);
  174. return;
  175. }
  176. d->command = data;
  177. break;
  178. case 0x09:
  179. ichan = data & 3;
  180. if (data & 4) {
  181. d->status |= 1 << (ichan + 4);
  182. }
  183. else {
  184. d->status &= ~(1 << (ichan + 4));
  185. }
  186. d->status &= ~(1 << ichan);
  187. DMA_run();
  188. break;
  189. case 0x0a: /* single mask */
  190. if (data & 4)
  191. d->mask |= 1 << (data & 3);
  192. else
  193. d->mask &= ~(1 << (data & 3));
  194. DMA_run();
  195. break;
  196. case 0x0b: /* mode */
  197. {
  198. ichan = data & 3;
  199. #ifdef DEBUG_DMA
  200. {
  201. int op, ai, dir, opmode;
  202. op = (data >> 2) & 3;
  203. ai = (data >> 4) & 1;
  204. dir = (data >> 5) & 1;
  205. opmode = (data >> 6) & 3;
  206. linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
  207. ichan, op, ai, dir, opmode);
  208. }
  209. #endif
  210. d->regs[ichan].mode = data;
  211. break;
  212. }
  213. case 0x0c: /* clear flip flop */
  214. d->flip_flop = 0;
  215. break;
  216. case 0x0d: /* reset */
  217. d->flip_flop = 0;
  218. d->mask = ~0;
  219. d->status = 0;
  220. d->command = 0;
  221. break;
  222. case 0x0e: /* clear mask for all channels */
  223. d->mask = 0;
  224. DMA_run();
  225. break;
  226. case 0x0f: /* write mask for all channels */
  227. d->mask = data;
  228. DMA_run();
  229. break;
  230. default:
  231. dolog ("unknown iport %#x\n", iport);
  232. break;
  233. }
  234. #ifdef DEBUG_DMA
  235. if (0xc != iport) {
  236. linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
  237. nport, ichan, data);
  238. }
  239. #endif
  240. }
  241. static uint32_t read_cont (void *opaque, uint32_t nport)
  242. {
  243. struct dma_cont *d = opaque;
  244. int iport, val;
  245. iport = (nport >> d->dshift) & 0x0f;
  246. switch (iport) {
  247. case 0x08: /* status */
  248. val = d->status;
  249. d->status &= 0xf0;
  250. break;
  251. case 0x0f: /* mask */
  252. val = d->mask;
  253. break;
  254. default:
  255. val = 0;
  256. break;
  257. }
  258. ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
  259. return val;
  260. }
  261. int DMA_get_channel_mode (int nchan)
  262. {
  263. return dma_controllers[nchan > 3].regs[nchan & 3].mode;
  264. }
  265. void DMA_hold_DREQ (int nchan)
  266. {
  267. int ncont, ichan;
  268. ncont = nchan > 3;
  269. ichan = nchan & 3;
  270. linfo ("held cont=%d chan=%d\n", ncont, ichan);
  271. dma_controllers[ncont].status |= 1 << (ichan + 4);
  272. DMA_run();
  273. }
  274. void DMA_release_DREQ (int nchan)
  275. {
  276. int ncont, ichan;
  277. ncont = nchan > 3;
  278. ichan = nchan & 3;
  279. linfo ("released cont=%d chan=%d\n", ncont, ichan);
  280. dma_controllers[ncont].status &= ~(1 << (ichan + 4));
  281. DMA_run();
  282. }
  283. static void channel_run (int ncont, int ichan)
  284. {
  285. int n;
  286. struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
  287. #ifdef DEBUG_DMA
  288. int dir, opmode;
  289. dir = (r->mode >> 5) & 1;
  290. opmode = (r->mode >> 6) & 3;
  291. if (dir) {
  292. dolog ("DMA in address decrement mode\n");
  293. }
  294. if (opmode != 1) {
  295. dolog ("DMA not in single mode select %#x\n", opmode);
  296. }
  297. #endif
  298. r = dma_controllers[ncont].regs + ichan;
  299. n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
  300. r->now[COUNT], (r->base[COUNT] + 1) << ncont);
  301. r->now[COUNT] = n;
  302. ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
  303. }
  304. static QEMUBH *dma_bh;
  305. static void DMA_run (void)
  306. {
  307. struct dma_cont *d;
  308. int icont, ichan;
  309. int rearm = 0;
  310. d = dma_controllers;
  311. for (icont = 0; icont < 2; icont++, d++) {
  312. for (ichan = 0; ichan < 4; ichan++) {
  313. int mask;
  314. mask = 1 << ichan;
  315. if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) {
  316. channel_run (icont, ichan);
  317. rearm = 1;
  318. }
  319. }
  320. }
  321. if (rearm)
  322. qemu_bh_schedule_idle(dma_bh);
  323. }
  324. static void DMA_run_bh(void *unused)
  325. {
  326. DMA_run();
  327. }
  328. void DMA_register_channel (int nchan,
  329. DMA_transfer_handler transfer_handler,
  330. void *opaque)
  331. {
  332. struct dma_regs *r;
  333. int ichan, ncont;
  334. ncont = nchan > 3;
  335. ichan = nchan & 3;
  336. r = dma_controllers[ncont].regs + ichan;
  337. r->transfer_handler = transfer_handler;
  338. r->opaque = opaque;
  339. }
  340. int DMA_read_memory (int nchan, void *buf, int pos, int len)
  341. {
  342. struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
  343. target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
  344. if (r->mode & 0x20) {
  345. int i;
  346. uint8_t *p = buf;
  347. cpu_physical_memory_read (addr - pos - len, buf, len);
  348. /* What about 16bit transfers? */
  349. for (i = 0; i < len >> 1; i++) {
  350. uint8_t b = p[len - i - 1];
  351. p[i] = b;
  352. }
  353. }
  354. else
  355. cpu_physical_memory_read (addr + pos, buf, len);
  356. return len;
  357. }
  358. int DMA_write_memory (int nchan, void *buf, int pos, int len)
  359. {
  360. struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
  361. target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
  362. if (r->mode & 0x20) {
  363. int i;
  364. uint8_t *p = buf;
  365. cpu_physical_memory_write (addr - pos - len, buf, len);
  366. /* What about 16bit transfers? */
  367. for (i = 0; i < len; i++) {
  368. uint8_t b = p[len - i - 1];
  369. p[i] = b;
  370. }
  371. }
  372. else
  373. cpu_physical_memory_write (addr + pos, buf, len);
  374. return len;
  375. }
  376. /* request the emulator to transfer a new DMA memory block ASAP */
  377. void DMA_schedule(int nchan)
  378. {
  379. CPUState *env = cpu_single_env;
  380. if (env)
  381. cpu_interrupt(env, CPU_INTERRUPT_EXIT);
  382. }
  383. static void dma_reset(void *opaque)
  384. {
  385. struct dma_cont *d = opaque;
  386. write_cont (d, (0x0d << d->dshift), 0);
  387. }
  388. static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len)
  389. {
  390. dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n",
  391. nchan, dma_pos, dma_len);
  392. return dma_pos;
  393. }
  394. /* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
  395. static void dma_init2(struct dma_cont *d, int base, int dshift,
  396. int page_base, int pageh_base)
  397. {
  398. static const int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
  399. int i;
  400. d->dshift = dshift;
  401. for (i = 0; i < 8; i++) {
  402. register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
  403. register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
  404. }
  405. for (i = 0; i < ARRAY_SIZE (page_port_list); i++) {
  406. register_ioport_write (page_base + page_port_list[i], 1, 1,
  407. write_page, d);
  408. register_ioport_read (page_base + page_port_list[i], 1, 1,
  409. read_page, d);
  410. if (pageh_base >= 0) {
  411. register_ioport_write (pageh_base + page_port_list[i], 1, 1,
  412. write_pageh, d);
  413. register_ioport_read (pageh_base + page_port_list[i], 1, 1,
  414. read_pageh, d);
  415. }
  416. }
  417. for (i = 0; i < 8; i++) {
  418. register_ioport_write (base + ((i + 8) << dshift), 1, 1,
  419. write_cont, d);
  420. register_ioport_read (base + ((i + 8) << dshift), 1, 1,
  421. read_cont, d);
  422. }
  423. qemu_register_reset(dma_reset, d);
  424. dma_reset(d);
  425. for (i = 0; i < ARRAY_SIZE (d->regs); ++i) {
  426. d->regs[i].transfer_handler = dma_phony_handler;
  427. }
  428. }
  429. static void dma_save (QEMUFile *f, void *opaque)
  430. {
  431. struct dma_cont *d = opaque;
  432. int i;
  433. /* qemu_put_8s (f, &d->status); */
  434. qemu_put_8s (f, &d->command);
  435. qemu_put_8s (f, &d->mask);
  436. qemu_put_8s (f, &d->flip_flop);
  437. qemu_put_be32 (f, d->dshift);
  438. for (i = 0; i < 4; ++i) {
  439. struct dma_regs *r = &d->regs[i];
  440. qemu_put_be32 (f, r->now[0]);
  441. qemu_put_be32 (f, r->now[1]);
  442. qemu_put_be16s (f, &r->base[0]);
  443. qemu_put_be16s (f, &r->base[1]);
  444. qemu_put_8s (f, &r->mode);
  445. qemu_put_8s (f, &r->page);
  446. qemu_put_8s (f, &r->pageh);
  447. qemu_put_8s (f, &r->dack);
  448. qemu_put_8s (f, &r->eop);
  449. }
  450. }
  451. static int dma_load (QEMUFile *f, void *opaque, int version_id)
  452. {
  453. struct dma_cont *d = opaque;
  454. int i;
  455. if (version_id != 1)
  456. return -EINVAL;
  457. /* qemu_get_8s (f, &d->status); */
  458. qemu_get_8s (f, &d->command);
  459. qemu_get_8s (f, &d->mask);
  460. qemu_get_8s (f, &d->flip_flop);
  461. d->dshift=qemu_get_be32 (f);
  462. for (i = 0; i < 4; ++i) {
  463. struct dma_regs *r = &d->regs[i];
  464. r->now[0]=qemu_get_be32 (f);
  465. r->now[1]=qemu_get_be32 (f);
  466. qemu_get_be16s (f, &r->base[0]);
  467. qemu_get_be16s (f, &r->base[1]);
  468. qemu_get_8s (f, &r->mode);
  469. qemu_get_8s (f, &r->page);
  470. qemu_get_8s (f, &r->pageh);
  471. qemu_get_8s (f, &r->dack);
  472. qemu_get_8s (f, &r->eop);
  473. }
  474. DMA_run();
  475. return 0;
  476. }
  477. void DMA_init (int high_page_enable)
  478. {
  479. dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
  480. high_page_enable ? 0x480 : -1);
  481. dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
  482. high_page_enable ? 0x488 : -1);
  483. register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]);
  484. register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]);
  485. dma_bh = qemu_bh_new(DMA_run_bh, NULL);
  486. }