pci.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. /*
  2. * QEMU IDE Emulation: PCI Bus support.
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. * Copyright (c) 2006 Openedhand Ltd.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include <hw/hw.h>
  26. #include <hw/pc.h>
  27. #include <hw/pci/pci.h>
  28. #include <hw/isa.h>
  29. #include "block.h"
  30. #include "dma.h"
  31. #include <hw/ide/pci.h>
  32. #define BMDMA_PAGE_SIZE 4096
  33. static void bmdma_start_dma(IDEDMA *dma, IDEState *s,
  34. BlockDriverCompletionFunc *dma_cb)
  35. {
  36. BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
  37. bm->unit = s->unit;
  38. bm->dma_cb = dma_cb;
  39. bm->cur_prd_last = 0;
  40. bm->cur_prd_addr = 0;
  41. bm->cur_prd_len = 0;
  42. bm->sector_num = ide_get_sector(s);
  43. bm->nsector = s->nsector;
  44. if (bm->status & BM_STATUS_DMAING) {
  45. bm->dma_cb(bmdma_active_if(bm), 0);
  46. }
  47. }
  48. /* return 0 if buffer completed */
  49. static int bmdma_prepare_buf(IDEDMA *dma, int is_write)
  50. {
  51. BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
  52. IDEState *s = bmdma_active_if(bm);
  53. struct {
  54. uint32_t addr;
  55. uint32_t size;
  56. } prd;
  57. int l, len;
  58. pci_dma_sglist_init(&s->sg, &bm->pci_dev->dev,
  59. s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
  60. s->io_buffer_size = 0;
  61. for(;;) {
  62. if (bm->cur_prd_len == 0) {
  63. /* end of table (with a fail safe of one page) */
  64. if (bm->cur_prd_last ||
  65. (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
  66. return s->io_buffer_size != 0;
  67. pci_dma_read(&bm->pci_dev->dev, bm->cur_addr, &prd, 8);
  68. bm->cur_addr += 8;
  69. prd.addr = le32_to_cpu(prd.addr);
  70. prd.size = le32_to_cpu(prd.size);
  71. len = prd.size & 0xfffe;
  72. if (len == 0)
  73. len = 0x10000;
  74. bm->cur_prd_len = len;
  75. bm->cur_prd_addr = prd.addr;
  76. bm->cur_prd_last = (prd.size & 0x80000000);
  77. }
  78. l = bm->cur_prd_len;
  79. if (l > 0) {
  80. qemu_sglist_add(&s->sg, bm->cur_prd_addr, l);
  81. bm->cur_prd_addr += l;
  82. bm->cur_prd_len -= l;
  83. s->io_buffer_size += l;
  84. }
  85. }
  86. return 1;
  87. }
  88. /* return 0 if buffer completed */
  89. static int bmdma_rw_buf(IDEDMA *dma, int is_write)
  90. {
  91. BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
  92. IDEState *s = bmdma_active_if(bm);
  93. struct {
  94. uint32_t addr;
  95. uint32_t size;
  96. } prd;
  97. int l, len;
  98. for(;;) {
  99. l = s->io_buffer_size - s->io_buffer_index;
  100. if (l <= 0)
  101. break;
  102. if (bm->cur_prd_len == 0) {
  103. /* end of table (with a fail safe of one page) */
  104. if (bm->cur_prd_last ||
  105. (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
  106. return 0;
  107. pci_dma_read(&bm->pci_dev->dev, bm->cur_addr, &prd, 8);
  108. bm->cur_addr += 8;
  109. prd.addr = le32_to_cpu(prd.addr);
  110. prd.size = le32_to_cpu(prd.size);
  111. len = prd.size & 0xfffe;
  112. if (len == 0)
  113. len = 0x10000;
  114. bm->cur_prd_len = len;
  115. bm->cur_prd_addr = prd.addr;
  116. bm->cur_prd_last = (prd.size & 0x80000000);
  117. }
  118. if (l > bm->cur_prd_len)
  119. l = bm->cur_prd_len;
  120. if (l > 0) {
  121. if (is_write) {
  122. pci_dma_write(&bm->pci_dev->dev, bm->cur_prd_addr,
  123. s->io_buffer + s->io_buffer_index, l);
  124. } else {
  125. pci_dma_read(&bm->pci_dev->dev, bm->cur_prd_addr,
  126. s->io_buffer + s->io_buffer_index, l);
  127. }
  128. bm->cur_prd_addr += l;
  129. bm->cur_prd_len -= l;
  130. s->io_buffer_index += l;
  131. }
  132. }
  133. return 1;
  134. }
  135. static int bmdma_set_unit(IDEDMA *dma, int unit)
  136. {
  137. BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
  138. bm->unit = unit;
  139. return 0;
  140. }
  141. static int bmdma_add_status(IDEDMA *dma, int status)
  142. {
  143. BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
  144. bm->status |= status;
  145. return 0;
  146. }
  147. static int bmdma_set_inactive(IDEDMA *dma)
  148. {
  149. BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
  150. bm->status &= ~BM_STATUS_DMAING;
  151. bm->dma_cb = NULL;
  152. bm->unit = -1;
  153. return 0;
  154. }
  155. static void bmdma_restart_dma(BMDMAState *bm, enum ide_dma_cmd dma_cmd)
  156. {
  157. IDEState *s = bmdma_active_if(bm);
  158. ide_set_sector(s, bm->sector_num);
  159. s->io_buffer_index = 0;
  160. s->io_buffer_size = 0;
  161. s->nsector = bm->nsector;
  162. s->dma_cmd = dma_cmd;
  163. bm->cur_addr = bm->addr;
  164. bm->dma_cb = ide_dma_cb;
  165. bmdma_start_dma(&bm->dma, s, bm->dma_cb);
  166. }
  167. /* TODO This should be common IDE code */
  168. static void bmdma_restart_bh(void *opaque)
  169. {
  170. BMDMAState *bm = opaque;
  171. IDEBus *bus = bm->bus;
  172. bool is_read;
  173. int error_status;
  174. qemu_bh_delete(bm->bh);
  175. bm->bh = NULL;
  176. if (bm->unit == (uint8_t) -1) {
  177. return;
  178. }
  179. is_read = (bus->error_status & BM_STATUS_RETRY_READ) != 0;
  180. /* The error status must be cleared before resubmitting the request: The
  181. * request may fail again, and this case can only be distinguished if the
  182. * called function can set a new error status. */
  183. error_status = bus->error_status;
  184. bus->error_status = 0;
  185. if (error_status & BM_STATUS_DMA_RETRY) {
  186. if (error_status & BM_STATUS_RETRY_TRIM) {
  187. bmdma_restart_dma(bm, IDE_DMA_TRIM);
  188. } else {
  189. bmdma_restart_dma(bm, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
  190. }
  191. } else if (error_status & BM_STATUS_PIO_RETRY) {
  192. if (is_read) {
  193. ide_sector_read(bmdma_active_if(bm));
  194. } else {
  195. ide_sector_write(bmdma_active_if(bm));
  196. }
  197. } else if (error_status & BM_STATUS_RETRY_FLUSH) {
  198. ide_flush_cache(bmdma_active_if(bm));
  199. }
  200. }
  201. static void bmdma_restart_cb(void *opaque, int running, RunState state)
  202. {
  203. IDEDMA *dma = opaque;
  204. BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
  205. if (!running)
  206. return;
  207. if (!bm->bh) {
  208. bm->bh = qemu_bh_new(bmdma_restart_bh, &bm->dma);
  209. qemu_bh_schedule(bm->bh);
  210. }
  211. }
  212. static void bmdma_cancel(BMDMAState *bm)
  213. {
  214. if (bm->status & BM_STATUS_DMAING) {
  215. /* cancel DMA request */
  216. bmdma_set_inactive(&bm->dma);
  217. }
  218. }
  219. static int bmdma_reset(IDEDMA *dma)
  220. {
  221. BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
  222. #ifdef DEBUG_IDE
  223. printf("ide: dma_reset\n");
  224. #endif
  225. bmdma_cancel(bm);
  226. bm->cmd = 0;
  227. bm->status = 0;
  228. bm->addr = 0;
  229. bm->cur_addr = 0;
  230. bm->cur_prd_last = 0;
  231. bm->cur_prd_addr = 0;
  232. bm->cur_prd_len = 0;
  233. bm->sector_num = 0;
  234. bm->nsector = 0;
  235. return 0;
  236. }
  237. static int bmdma_start_transfer(IDEDMA *dma)
  238. {
  239. return 0;
  240. }
  241. static void bmdma_irq(void *opaque, int n, int level)
  242. {
  243. BMDMAState *bm = opaque;
  244. if (!level) {
  245. /* pass through lower */
  246. qemu_set_irq(bm->irq, level);
  247. return;
  248. }
  249. bm->status |= BM_STATUS_INT;
  250. /* trigger the real irq */
  251. qemu_set_irq(bm->irq, level);
  252. }
  253. void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
  254. {
  255. #ifdef DEBUG_IDE
  256. printf("%s: 0x%08x\n", __func__, val);
  257. #endif
  258. /* Ignore writes to SSBM if it keeps the old value */
  259. if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
  260. if (!(val & BM_CMD_START)) {
  261. /*
  262. * We can't cancel Scatter Gather DMA in the middle of the
  263. * operation or a partial (not full) DMA transfer would reach
  264. * the storage so we wait for completion instead (we beahve
  265. * like if the DMA was completed by the time the guest trying
  266. * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
  267. * set).
  268. *
  269. * In the future we'll be able to safely cancel the I/O if the
  270. * whole DMA operation will be submitted to disk with a single
  271. * aio operation with preadv/pwritev.
  272. */
  273. if (bm->bus->dma->aiocb) {
  274. bdrv_drain_all();
  275. assert(bm->bus->dma->aiocb == NULL);
  276. assert((bm->status & BM_STATUS_DMAING) == 0);
  277. }
  278. } else {
  279. bm->cur_addr = bm->addr;
  280. if (!(bm->status & BM_STATUS_DMAING)) {
  281. bm->status |= BM_STATUS_DMAING;
  282. /* start dma transfer if possible */
  283. if (bm->dma_cb)
  284. bm->dma_cb(bmdma_active_if(bm), 0);
  285. }
  286. }
  287. }
  288. bm->cmd = val & 0x09;
  289. }
  290. static uint64_t bmdma_addr_read(void *opaque, hwaddr addr,
  291. unsigned width)
  292. {
  293. BMDMAState *bm = opaque;
  294. uint32_t mask = (1ULL << (width * 8)) - 1;
  295. uint64_t data;
  296. data = (bm->addr >> (addr * 8)) & mask;
  297. #ifdef DEBUG_IDE
  298. printf("%s: 0x%08x\n", __func__, (unsigned)data);
  299. #endif
  300. return data;
  301. }
  302. static void bmdma_addr_write(void *opaque, hwaddr addr,
  303. uint64_t data, unsigned width)
  304. {
  305. BMDMAState *bm = opaque;
  306. int shift = addr * 8;
  307. uint32_t mask = (1ULL << (width * 8)) - 1;
  308. #ifdef DEBUG_IDE
  309. printf("%s: 0x%08x\n", __func__, (unsigned)data);
  310. #endif
  311. bm->addr &= ~(mask << shift);
  312. bm->addr |= ((data & mask) << shift) & ~3;
  313. }
  314. MemoryRegionOps bmdma_addr_ioport_ops = {
  315. .read = bmdma_addr_read,
  316. .write = bmdma_addr_write,
  317. .endianness = DEVICE_LITTLE_ENDIAN,
  318. };
  319. static bool ide_bmdma_current_needed(void *opaque)
  320. {
  321. BMDMAState *bm = opaque;
  322. return (bm->cur_prd_len != 0);
  323. }
  324. static bool ide_bmdma_status_needed(void *opaque)
  325. {
  326. BMDMAState *bm = opaque;
  327. /* Older versions abused some bits in the status register for internal
  328. * error state. If any of these bits are set, we must add a subsection to
  329. * transfer the real status register */
  330. uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
  331. return ((bm->status & abused_bits) != 0);
  332. }
  333. static void ide_bmdma_pre_save(void *opaque)
  334. {
  335. BMDMAState *bm = opaque;
  336. uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
  337. bm->migration_compat_status =
  338. (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits);
  339. }
  340. /* This function accesses bm->bus->error_status which is loaded only after
  341. * BMDMA itself. This is why the function is called from ide_pci_post_load
  342. * instead of being registered with VMState where it would run too early. */
  343. static int ide_bmdma_post_load(void *opaque, int version_id)
  344. {
  345. BMDMAState *bm = opaque;
  346. uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
  347. if (bm->status == 0) {
  348. bm->status = bm->migration_compat_status & ~abused_bits;
  349. bm->bus->error_status |= bm->migration_compat_status & abused_bits;
  350. }
  351. return 0;
  352. }
  353. static const VMStateDescription vmstate_bmdma_current = {
  354. .name = "ide bmdma_current",
  355. .version_id = 1,
  356. .minimum_version_id = 1,
  357. .minimum_version_id_old = 1,
  358. .fields = (VMStateField []) {
  359. VMSTATE_UINT32(cur_addr, BMDMAState),
  360. VMSTATE_UINT32(cur_prd_last, BMDMAState),
  361. VMSTATE_UINT32(cur_prd_addr, BMDMAState),
  362. VMSTATE_UINT32(cur_prd_len, BMDMAState),
  363. VMSTATE_END_OF_LIST()
  364. }
  365. };
  366. const VMStateDescription vmstate_bmdma_status = {
  367. .name ="ide bmdma/status",
  368. .version_id = 1,
  369. .minimum_version_id = 1,
  370. .minimum_version_id_old = 1,
  371. .fields = (VMStateField []) {
  372. VMSTATE_UINT8(status, BMDMAState),
  373. VMSTATE_END_OF_LIST()
  374. }
  375. };
  376. static const VMStateDescription vmstate_bmdma = {
  377. .name = "ide bmdma",
  378. .version_id = 3,
  379. .minimum_version_id = 0,
  380. .minimum_version_id_old = 0,
  381. .pre_save = ide_bmdma_pre_save,
  382. .fields = (VMStateField []) {
  383. VMSTATE_UINT8(cmd, BMDMAState),
  384. VMSTATE_UINT8(migration_compat_status, BMDMAState),
  385. VMSTATE_UINT32(addr, BMDMAState),
  386. VMSTATE_INT64(sector_num, BMDMAState),
  387. VMSTATE_UINT32(nsector, BMDMAState),
  388. VMSTATE_UINT8(unit, BMDMAState),
  389. VMSTATE_END_OF_LIST()
  390. },
  391. .subsections = (VMStateSubsection []) {
  392. {
  393. .vmsd = &vmstate_bmdma_current,
  394. .needed = ide_bmdma_current_needed,
  395. }, {
  396. .vmsd = &vmstate_bmdma_status,
  397. .needed = ide_bmdma_status_needed,
  398. }, {
  399. /* empty */
  400. }
  401. }
  402. };
  403. static int ide_pci_post_load(void *opaque, int version_id)
  404. {
  405. PCIIDEState *d = opaque;
  406. int i;
  407. for(i = 0; i < 2; i++) {
  408. /* current versions always store 0/1, but older version
  409. stored bigger values. We only need last bit */
  410. d->bmdma[i].unit &= 1;
  411. ide_bmdma_post_load(&d->bmdma[i], -1);
  412. }
  413. return 0;
  414. }
  415. const VMStateDescription vmstate_ide_pci = {
  416. .name = "ide",
  417. .version_id = 3,
  418. .minimum_version_id = 0,
  419. .minimum_version_id_old = 0,
  420. .post_load = ide_pci_post_load,
  421. .fields = (VMStateField []) {
  422. VMSTATE_PCI_DEVICE(dev, PCIIDEState),
  423. VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0,
  424. vmstate_bmdma, BMDMAState),
  425. VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
  426. VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState),
  427. VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState),
  428. VMSTATE_END_OF_LIST()
  429. }
  430. };
  431. void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
  432. {
  433. PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev);
  434. static const int bus[4] = { 0, 0, 1, 1 };
  435. static const int unit[4] = { 0, 1, 0, 1 };
  436. int i;
  437. for (i = 0; i < 4; i++) {
  438. if (hd_table[i] == NULL)
  439. continue;
  440. ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
  441. }
  442. }
  443. static const struct IDEDMAOps bmdma_ops = {
  444. .start_dma = bmdma_start_dma,
  445. .start_transfer = bmdma_start_transfer,
  446. .prepare_buf = bmdma_prepare_buf,
  447. .rw_buf = bmdma_rw_buf,
  448. .set_unit = bmdma_set_unit,
  449. .add_status = bmdma_add_status,
  450. .set_inactive = bmdma_set_inactive,
  451. .restart_cb = bmdma_restart_cb,
  452. .reset = bmdma_reset,
  453. };
  454. void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d)
  455. {
  456. qemu_irq *irq;
  457. if (bus->dma == &bm->dma) {
  458. return;
  459. }
  460. bm->dma.ops = &bmdma_ops;
  461. bus->dma = &bm->dma;
  462. bm->irq = bus->irq;
  463. irq = qemu_allocate_irqs(bmdma_irq, bm, 1);
  464. bus->irq = *irq;
  465. bm->pci_dev = d;
  466. }