2
0

ahci.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813
  1. /*
  2. * QEMU AHCI Emulation
  3. *
  4. * Copyright (c) 2010 qiaochong@loongson.cn
  5. * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
  6. * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
  7. * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
  8. *
  9. * This library is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * This library is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  21. *
  22. */
  23. #include "qemu/osdep.h"
  24. #include "hw/irq.h"
  25. #include "migration/vmstate.h"
  26. #include "qemu/error-report.h"
  27. #include "qemu/log.h"
  28. #include "qemu/main-loop.h"
  29. #include "system/block-backend.h"
  30. #include "system/dma.h"
  31. #include "ahci-internal.h"
  32. #include "ide-internal.h"
  33. #include "trace.h"
  34. static void check_cmd(AHCIState *s, int port);
  35. static void handle_cmd(AHCIState *s, int port, uint8_t slot);
  36. static void ahci_reset_port(AHCIState *s, int port);
  37. static bool ahci_write_fis_d2h(AHCIDevice *ad, bool d2h_fis_i);
  38. static void ahci_clear_cmd_issue(AHCIDevice *ad, uint8_t slot);
  39. static void ahci_init_d2h(AHCIDevice *ad);
  40. static int ahci_dma_prepare_buf(const IDEDMA *dma, int32_t limit);
  41. static bool ahci_map_clb_address(AHCIDevice *ad);
  42. static bool ahci_map_fis_address(AHCIDevice *ad);
  43. static void ahci_unmap_clb_address(AHCIDevice *ad);
  44. static void ahci_unmap_fis_address(AHCIDevice *ad);
  45. static const char *AHCIHostReg_lookup[AHCI_HOST_REG__COUNT] = {
  46. [AHCI_HOST_REG_CAP] = "CAP",
  47. [AHCI_HOST_REG_CTL] = "GHC",
  48. [AHCI_HOST_REG_IRQ_STAT] = "IS",
  49. [AHCI_HOST_REG_PORTS_IMPL] = "PI",
  50. [AHCI_HOST_REG_VERSION] = "VS",
  51. [AHCI_HOST_REG_CCC_CTL] = "CCC_CTL",
  52. [AHCI_HOST_REG_CCC_PORTS] = "CCC_PORTS",
  53. [AHCI_HOST_REG_EM_LOC] = "EM_LOC",
  54. [AHCI_HOST_REG_EM_CTL] = "EM_CTL",
  55. [AHCI_HOST_REG_CAP2] = "CAP2",
  56. [AHCI_HOST_REG_BOHC] = "BOHC",
  57. };
  58. static const char *AHCIPortReg_lookup[AHCI_PORT_REG__COUNT] = {
  59. [AHCI_PORT_REG_LST_ADDR] = "PxCLB",
  60. [AHCI_PORT_REG_LST_ADDR_HI] = "PxCLBU",
  61. [AHCI_PORT_REG_FIS_ADDR] = "PxFB",
  62. [AHCI_PORT_REG_FIS_ADDR_HI] = "PxFBU",
  63. [AHCI_PORT_REG_IRQ_STAT] = "PxIS",
  64. [AHCI_PORT_REG_IRQ_MASK] = "PXIE",
  65. [AHCI_PORT_REG_CMD] = "PxCMD",
  66. [7] = "Reserved",
  67. [AHCI_PORT_REG_TFDATA] = "PxTFD",
  68. [AHCI_PORT_REG_SIG] = "PxSIG",
  69. [AHCI_PORT_REG_SCR_STAT] = "PxSSTS",
  70. [AHCI_PORT_REG_SCR_CTL] = "PxSCTL",
  71. [AHCI_PORT_REG_SCR_ERR] = "PxSERR",
  72. [AHCI_PORT_REG_SCR_ACT] = "PxSACT",
  73. [AHCI_PORT_REG_CMD_ISSUE] = "PxCI",
  74. [AHCI_PORT_REG_SCR_NOTIF] = "PxSNTF",
  75. [AHCI_PORT_REG_FIS_CTL] = "PxFBS",
  76. [AHCI_PORT_REG_DEV_SLEEP] = "PxDEVSLP",
  77. [18 ... 27] = "Reserved",
  78. [AHCI_PORT_REG_VENDOR_1 ...
  79. AHCI_PORT_REG_VENDOR_4] = "PxVS",
  80. };
  81. static const char *AHCIPortIRQ_lookup[AHCI_PORT_IRQ__COUNT] = {
  82. [AHCI_PORT_IRQ_BIT_DHRS] = "DHRS",
  83. [AHCI_PORT_IRQ_BIT_PSS] = "PSS",
  84. [AHCI_PORT_IRQ_BIT_DSS] = "DSS",
  85. [AHCI_PORT_IRQ_BIT_SDBS] = "SDBS",
  86. [AHCI_PORT_IRQ_BIT_UFS] = "UFS",
  87. [AHCI_PORT_IRQ_BIT_DPS] = "DPS",
  88. [AHCI_PORT_IRQ_BIT_PCS] = "PCS",
  89. [AHCI_PORT_IRQ_BIT_DMPS] = "DMPS",
  90. [8 ... 21] = "RESERVED",
  91. [AHCI_PORT_IRQ_BIT_PRCS] = "PRCS",
  92. [AHCI_PORT_IRQ_BIT_IPMS] = "IPMS",
  93. [AHCI_PORT_IRQ_BIT_OFS] = "OFS",
  94. [25] = "RESERVED",
  95. [AHCI_PORT_IRQ_BIT_INFS] = "INFS",
  96. [AHCI_PORT_IRQ_BIT_IFS] = "IFS",
  97. [AHCI_PORT_IRQ_BIT_HBDS] = "HBDS",
  98. [AHCI_PORT_IRQ_BIT_HBFS] = "HBFS",
  99. [AHCI_PORT_IRQ_BIT_TFES] = "TFES",
  100. [AHCI_PORT_IRQ_BIT_CPDS] = "CPDS"
  101. };
  102. static uint32_t ahci_port_read(AHCIState *s, int port, int offset)
  103. {
  104. uint32_t val;
  105. AHCIPortRegs *pr = &s->dev[port].port_regs;
  106. enum AHCIPortReg regnum = offset / sizeof(uint32_t);
  107. assert(regnum < (AHCI_PORT_ADDR_OFFSET_LEN / sizeof(uint32_t)));
  108. switch (regnum) {
  109. case AHCI_PORT_REG_LST_ADDR:
  110. val = pr->lst_addr;
  111. break;
  112. case AHCI_PORT_REG_LST_ADDR_HI:
  113. val = pr->lst_addr_hi;
  114. break;
  115. case AHCI_PORT_REG_FIS_ADDR:
  116. val = pr->fis_addr;
  117. break;
  118. case AHCI_PORT_REG_FIS_ADDR_HI:
  119. val = pr->fis_addr_hi;
  120. break;
  121. case AHCI_PORT_REG_IRQ_STAT:
  122. val = pr->irq_stat;
  123. break;
  124. case AHCI_PORT_REG_IRQ_MASK:
  125. val = pr->irq_mask;
  126. break;
  127. case AHCI_PORT_REG_CMD:
  128. val = pr->cmd;
  129. break;
  130. case AHCI_PORT_REG_TFDATA:
  131. val = pr->tfdata;
  132. break;
  133. case AHCI_PORT_REG_SIG:
  134. val = pr->sig;
  135. break;
  136. case AHCI_PORT_REG_SCR_STAT:
  137. if (s->dev[port].port.ifs[0].blk) {
  138. val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP |
  139. SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE;
  140. } else {
  141. val = SATA_SCR_SSTATUS_DET_NODEV;
  142. }
  143. break;
  144. case AHCI_PORT_REG_SCR_CTL:
  145. val = pr->scr_ctl;
  146. break;
  147. case AHCI_PORT_REG_SCR_ERR:
  148. val = pr->scr_err;
  149. break;
  150. case AHCI_PORT_REG_SCR_ACT:
  151. val = pr->scr_act;
  152. break;
  153. case AHCI_PORT_REG_CMD_ISSUE:
  154. val = pr->cmd_issue;
  155. break;
  156. default:
  157. trace_ahci_port_read_default(s, port, AHCIPortReg_lookup[regnum],
  158. offset);
  159. val = 0;
  160. }
  161. trace_ahci_port_read(s, port, AHCIPortReg_lookup[regnum], offset, val);
  162. return val;
  163. }
  164. static void ahci_check_irq(AHCIState *s)
  165. {
  166. int i;
  167. uint32_t old_irq = s->control_regs.irqstatus;
  168. s->control_regs.irqstatus = 0;
  169. for (i = 0; i < s->ports; i++) {
  170. AHCIPortRegs *pr = &s->dev[i].port_regs;
  171. if (pr->irq_stat & pr->irq_mask) {
  172. s->control_regs.irqstatus |= (1 << i);
  173. }
  174. }
  175. trace_ahci_check_irq(s, old_irq, s->control_regs.irqstatus);
  176. if (s->control_regs.irqstatus &&
  177. (s->control_regs.ghc & HOST_CTL_IRQ_EN)) {
  178. trace_ahci_irq_raise(s);
  179. qemu_irq_raise(s->irq);
  180. } else {
  181. trace_ahci_irq_lower(s);
  182. qemu_irq_lower(s->irq);
  183. }
  184. }
  185. static void ahci_trigger_irq(AHCIState *s, AHCIDevice *d,
  186. enum AHCIPortIRQ irqbit)
  187. {
  188. g_assert((unsigned)irqbit < 32);
  189. uint32_t irq = 1U << irqbit;
  190. uint32_t irqstat = d->port_regs.irq_stat | irq;
  191. trace_ahci_trigger_irq(s, d->port_no,
  192. AHCIPortIRQ_lookup[irqbit], irq,
  193. d->port_regs.irq_stat, irqstat,
  194. irqstat & d->port_regs.irq_mask);
  195. d->port_regs.irq_stat = irqstat;
  196. ahci_check_irq(s);
  197. }
  198. static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr,
  199. uint32_t wanted)
  200. {
  201. hwaddr len = wanted;
  202. if (*ptr) {
  203. dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
  204. }
  205. *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE,
  206. MEMTXATTRS_UNSPECIFIED);
  207. if (len < wanted && *ptr) {
  208. dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
  209. *ptr = NULL;
  210. }
  211. }
  212. /**
  213. * Check the cmd register to see if we should start or stop
  214. * the DMA or FIS RX engines.
  215. *
  216. * @ad: Device to dis/engage.
  217. *
  218. * @return 0 on success, -1 on error.
  219. */
  220. static int ahci_cond_start_engines(AHCIDevice *ad)
  221. {
  222. AHCIPortRegs *pr = &ad->port_regs;
  223. bool cmd_start = pr->cmd & PORT_CMD_START;
  224. bool cmd_on = pr->cmd & PORT_CMD_LIST_ON;
  225. bool fis_start = pr->cmd & PORT_CMD_FIS_RX;
  226. bool fis_on = pr->cmd & PORT_CMD_FIS_ON;
  227. if (cmd_start && !cmd_on) {
  228. if (!ahci_map_clb_address(ad)) {
  229. pr->cmd &= ~PORT_CMD_START;
  230. error_report("AHCI: Failed to start DMA engine: "
  231. "bad command list buffer address");
  232. return -1;
  233. }
  234. } else if (!cmd_start && cmd_on) {
  235. ahci_unmap_clb_address(ad);
  236. }
  237. if (fis_start && !fis_on) {
  238. if (!ahci_map_fis_address(ad)) {
  239. pr->cmd &= ~PORT_CMD_FIS_RX;
  240. error_report("AHCI: Failed to start FIS receive engine: "
  241. "bad FIS receive buffer address");
  242. return -1;
  243. }
  244. } else if (!fis_start && fis_on) {
  245. ahci_unmap_fis_address(ad);
  246. }
  247. return 0;
  248. }
  249. static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val)
  250. {
  251. AHCIPortRegs *pr = &s->dev[port].port_regs;
  252. enum AHCIPortReg regnum = offset / sizeof(uint32_t);
  253. assert(regnum < (AHCI_PORT_ADDR_OFFSET_LEN / sizeof(uint32_t)));
  254. trace_ahci_port_write(s, port, AHCIPortReg_lookup[regnum], offset, val);
  255. switch (regnum) {
  256. case AHCI_PORT_REG_LST_ADDR:
  257. pr->lst_addr = val;
  258. break;
  259. case AHCI_PORT_REG_LST_ADDR_HI:
  260. pr->lst_addr_hi = val;
  261. break;
  262. case AHCI_PORT_REG_FIS_ADDR:
  263. pr->fis_addr = val;
  264. break;
  265. case AHCI_PORT_REG_FIS_ADDR_HI:
  266. pr->fis_addr_hi = val;
  267. break;
  268. case AHCI_PORT_REG_IRQ_STAT:
  269. pr->irq_stat &= ~val;
  270. ahci_check_irq(s);
  271. break;
  272. case AHCI_PORT_REG_IRQ_MASK:
  273. pr->irq_mask = val & 0xfdc000ff;
  274. ahci_check_irq(s);
  275. break;
  276. case AHCI_PORT_REG_CMD:
  277. if ((pr->cmd & PORT_CMD_START) && !(val & PORT_CMD_START)) {
  278. pr->scr_act = 0;
  279. pr->cmd_issue = 0;
  280. }
  281. /* Block any Read-only fields from being set;
  282. * including LIST_ON and FIS_ON.
  283. * The spec requires to set ICC bits to zero after the ICC change
  284. * is done. We don't support ICC state changes, therefore always
  285. * force the ICC bits to zero.
  286. */
  287. pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) |
  288. (val & ~(PORT_CMD_RO_MASK | PORT_CMD_ICC_MASK));
  289. /* Check FIS RX and CLB engines */
  290. ahci_cond_start_engines(&s->dev[port]);
  291. /* XXX usually the FIS would be pending on the bus here and
  292. issuing deferred until the OS enables FIS receival.
  293. Instead, we only submit it once - which works in most
  294. cases, but is a hack. */
  295. if ((pr->cmd & PORT_CMD_FIS_ON) &&
  296. !s->dev[port].init_d2h_sent) {
  297. ahci_init_d2h(&s->dev[port]);
  298. }
  299. check_cmd(s, port);
  300. break;
  301. case AHCI_PORT_REG_TFDATA:
  302. case AHCI_PORT_REG_SIG:
  303. case AHCI_PORT_REG_SCR_STAT:
  304. /* Read Only */
  305. break;
  306. case AHCI_PORT_REG_SCR_CTL:
  307. if (((pr->scr_ctl & AHCI_SCR_SCTL_DET) == 1) &&
  308. ((val & AHCI_SCR_SCTL_DET) == 0)) {
  309. ahci_reset_port(s, port);
  310. }
  311. pr->scr_ctl = val;
  312. break;
  313. case AHCI_PORT_REG_SCR_ERR:
  314. pr->scr_err &= ~val;
  315. break;
  316. case AHCI_PORT_REG_SCR_ACT:
  317. /* RW1 */
  318. pr->scr_act |= val;
  319. break;
  320. case AHCI_PORT_REG_CMD_ISSUE:
  321. pr->cmd_issue |= val;
  322. check_cmd(s, port);
  323. break;
  324. default:
  325. trace_ahci_port_write_unimpl(s, port, AHCIPortReg_lookup[regnum],
  326. offset, val);
  327. qemu_log_mask(LOG_UNIMP, "Attempted write to unimplemented register: "
  328. "AHCI port %d register %s, offset 0x%x: 0x%"PRIx32,
  329. port, AHCIPortReg_lookup[regnum], offset, val);
  330. break;
  331. }
  332. }
  333. static uint64_t ahci_mem_read_32(void *opaque, hwaddr addr)
  334. {
  335. AHCIState *s = opaque;
  336. uint32_t val = 0;
  337. if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) {
  338. enum AHCIHostReg regnum = addr / 4;
  339. assert(regnum < AHCI_HOST_REG__COUNT);
  340. switch (regnum) {
  341. case AHCI_HOST_REG_CAP:
  342. val = s->control_regs.cap;
  343. break;
  344. case AHCI_HOST_REG_CTL:
  345. val = s->control_regs.ghc;
  346. break;
  347. case AHCI_HOST_REG_IRQ_STAT:
  348. val = s->control_regs.irqstatus;
  349. break;
  350. case AHCI_HOST_REG_PORTS_IMPL:
  351. val = s->control_regs.impl;
  352. break;
  353. case AHCI_HOST_REG_VERSION:
  354. val = s->control_regs.version;
  355. break;
  356. default:
  357. trace_ahci_mem_read_32_host_default(s, AHCIHostReg_lookup[regnum],
  358. addr);
  359. }
  360. trace_ahci_mem_read_32_host(s, AHCIHostReg_lookup[regnum], addr, val);
  361. } else if ((addr >= AHCI_PORT_REGS_START_ADDR) &&
  362. (addr < (AHCI_PORT_REGS_START_ADDR +
  363. (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) {
  364. val = ahci_port_read(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7,
  365. addr & AHCI_PORT_ADDR_OFFSET_MASK);
  366. } else {
  367. trace_ahci_mem_read_32_default(s, addr, val);
  368. }
  369. trace_ahci_mem_read_32(s, addr, val);
  370. return val;
  371. }
  372. /**
  373. * AHCI 1.3 section 3 ("HBA Memory Registers")
  374. * Support unaligned 8/16/32 bit reads, and 64 bit aligned reads.
  375. * Caller is responsible for masking unwanted higher order bytes.
  376. */
  377. static uint64_t ahci_mem_read(void *opaque, hwaddr addr, unsigned size)
  378. {
  379. hwaddr aligned = addr & ~0x3;
  380. int ofst = addr - aligned;
  381. uint64_t lo = ahci_mem_read_32(opaque, aligned);
  382. uint64_t hi;
  383. uint64_t val;
  384. /* if < 8 byte read does not cross 4 byte boundary */
  385. if (ofst + size <= 4) {
  386. val = lo >> (ofst * 8);
  387. } else {
  388. g_assert(size > 1);
  389. /* If the 64bit read is unaligned, we will produce undefined
  390. * results. AHCI does not support unaligned 64bit reads. */
  391. hi = ahci_mem_read_32(opaque, aligned + 4);
  392. val = (hi << 32 | lo) >> (ofst * 8);
  393. }
  394. trace_ahci_mem_read(opaque, size, addr, val);
  395. return val;
  396. }
  397. static void ahci_mem_write(void *opaque, hwaddr addr,
  398. uint64_t val, unsigned size)
  399. {
  400. AHCIState *s = opaque;
  401. trace_ahci_mem_write(s, size, addr, val);
  402. /* Only aligned reads are allowed on AHCI */
  403. if (addr & 3) {
  404. qemu_log_mask(LOG_GUEST_ERROR,
  405. "ahci: Mis-aligned write to addr 0x%03" HWADDR_PRIX "\n",
  406. addr);
  407. return;
  408. }
  409. if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) {
  410. enum AHCIHostReg regnum = addr / 4;
  411. assert(regnum < AHCI_HOST_REG__COUNT);
  412. switch (regnum) {
  413. case AHCI_HOST_REG_CAP: /* R/WO, RO */
  414. /* FIXME handle R/WO */
  415. break;
  416. case AHCI_HOST_REG_CTL: /* R/W */
  417. if (val & HOST_CTL_RESET) {
  418. ahci_reset(s);
  419. } else {
  420. s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN;
  421. ahci_check_irq(s);
  422. }
  423. break;
  424. case AHCI_HOST_REG_IRQ_STAT: /* R/WC, RO */
  425. s->control_regs.irqstatus &= ~val;
  426. ahci_check_irq(s);
  427. break;
  428. case AHCI_HOST_REG_PORTS_IMPL: /* R/WO, RO */
  429. /* FIXME handle R/WO */
  430. break;
  431. case AHCI_HOST_REG_VERSION: /* RO */
  432. /* FIXME report write? */
  433. break;
  434. default:
  435. qemu_log_mask(LOG_UNIMP,
  436. "Attempted write to unimplemented register: "
  437. "AHCI host register %s, "
  438. "offset 0x%"PRIx64": 0x%"PRIx64,
  439. AHCIHostReg_lookup[regnum], addr, val);
  440. trace_ahci_mem_write_host_unimpl(s, size,
  441. AHCIHostReg_lookup[regnum], addr);
  442. }
  443. trace_ahci_mem_write_host(s, size, AHCIHostReg_lookup[regnum],
  444. addr, val);
  445. } else if ((addr >= AHCI_PORT_REGS_START_ADDR) &&
  446. (addr < (AHCI_PORT_REGS_START_ADDR +
  447. (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) {
  448. ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7,
  449. addr & AHCI_PORT_ADDR_OFFSET_MASK, val);
  450. } else {
  451. qemu_log_mask(LOG_UNIMP, "Attempted write to unimplemented register: "
  452. "AHCI global register at offset 0x%"PRIx64": 0x%"PRIx64,
  453. addr, val);
  454. trace_ahci_mem_write_unimpl(s, size, addr, val);
  455. }
  456. }
  457. static const MemoryRegionOps ahci_mem_ops = {
  458. .read = ahci_mem_read,
  459. .write = ahci_mem_write,
  460. .endianness = DEVICE_LITTLE_ENDIAN,
  461. };
  462. static uint64_t ahci_idp_read(void *opaque, hwaddr addr,
  463. unsigned size)
  464. {
  465. AHCIState *s = opaque;
  466. if (addr == s->idp_offset) {
  467. /* index register */
  468. return s->idp_index;
  469. } else if (addr == s->idp_offset + 4) {
  470. /* data register - do memory read at location selected by index */
  471. return ahci_mem_read(opaque, s->idp_index, size);
  472. } else {
  473. return 0;
  474. }
  475. }
  476. static void ahci_idp_write(void *opaque, hwaddr addr,
  477. uint64_t val, unsigned size)
  478. {
  479. AHCIState *s = opaque;
  480. if (addr == s->idp_offset) {
  481. /* index register - mask off reserved bits */
  482. s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3);
  483. } else if (addr == s->idp_offset + 4) {
  484. /* data register - do memory write at location selected by index */
  485. ahci_mem_write(opaque, s->idp_index, val, size);
  486. }
  487. }
  488. static const MemoryRegionOps ahci_idp_ops = {
  489. .read = ahci_idp_read,
  490. .write = ahci_idp_write,
  491. .endianness = DEVICE_LITTLE_ENDIAN,
  492. };
  493. static void ahci_reg_init(AHCIState *s)
  494. {
  495. int i;
  496. s->control_regs.cap = (s->ports - 1) |
  497. (AHCI_NUM_COMMAND_SLOTS << 8) |
  498. (AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) |
  499. HOST_CAP_NCQ | HOST_CAP_AHCI | HOST_CAP_64;
  500. s->control_regs.impl = (1 << s->ports) - 1;
  501. s->control_regs.version = AHCI_VERSION_1_0;
  502. for (i = 0; i < s->ports; i++) {
  503. s->dev[i].port_state = STATE_RUN;
  504. }
  505. }
  506. static void check_cmd(AHCIState *s, int port)
  507. {
  508. AHCIPortRegs *pr = &s->dev[port].port_regs;
  509. uint8_t slot;
  510. if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) {
  511. for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) {
  512. if (pr->cmd_issue & (1U << slot)) {
  513. handle_cmd(s, port, slot);
  514. }
  515. }
  516. }
  517. }
  518. static void ahci_check_cmd_bh(void *opaque)
  519. {
  520. AHCIDevice *ad = opaque;
  521. qemu_bh_delete(ad->check_bh);
  522. ad->check_bh = NULL;
  523. check_cmd(ad->hba, ad->port_no);
  524. }
  525. static void ahci_init_d2h(AHCIDevice *ad)
  526. {
  527. IDEState *ide_state = &ad->port.ifs[0];
  528. AHCIPortRegs *pr = &ad->port_regs;
  529. if (ad->init_d2h_sent) {
  530. return;
  531. }
  532. /*
  533. * For simplicity, do not call ahci_clear_cmd_issue() for this
  534. * ahci_write_fis_d2h(). (The reset value for PxCI is 0.)
  535. */
  536. if (ahci_write_fis_d2h(ad, true)) {
  537. ad->init_d2h_sent = true;
  538. /* We're emulating receiving the first Reg D2H FIS from the device;
  539. * Update the SIG register, but otherwise proceed as normal. */
  540. pr->sig = ((uint32_t)ide_state->hcyl << 24) |
  541. (ide_state->lcyl << 16) |
  542. (ide_state->sector << 8) |
  543. (ide_state->nsector & 0xFF);
  544. }
  545. }
  546. static void ahci_set_signature(AHCIDevice *ad, uint32_t sig)
  547. {
  548. IDEState *s = &ad->port.ifs[0];
  549. s->hcyl = sig >> 24 & 0xFF;
  550. s->lcyl = sig >> 16 & 0xFF;
  551. s->sector = sig >> 8 & 0xFF;
  552. s->nsector = sig & 0xFF;
  553. trace_ahci_set_signature(ad->hba, ad->port_no, s->nsector, s->sector,
  554. s->lcyl, s->hcyl, sig);
  555. }
  556. static void ahci_reset_port(AHCIState *s, int port)
  557. {
  558. AHCIDevice *d = &s->dev[port];
  559. AHCIPortRegs *pr = &d->port_regs;
  560. IDEState *ide_state = &d->port.ifs[0];
  561. int i;
  562. trace_ahci_reset_port(s, port);
  563. ide_bus_reset(&d->port);
  564. ide_state->ncq_queues = AHCI_MAX_CMDS;
  565. pr->scr_stat = 0;
  566. pr->scr_err = 0;
  567. pr->scr_act = 0;
  568. pr->tfdata = 0x7F;
  569. pr->sig = 0xFFFFFFFF;
  570. pr->cmd_issue = 0;
  571. d->busy_slot = -1;
  572. d->init_d2h_sent = false;
  573. ide_state = &s->dev[port].port.ifs[0];
  574. if (!ide_state->blk) {
  575. return;
  576. }
  577. /* reset ncq queue */
  578. for (i = 0; i < AHCI_MAX_CMDS; i++) {
  579. NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i];
  580. ncq_tfs->halt = false;
  581. if (!ncq_tfs->used) {
  582. continue;
  583. }
  584. if (ncq_tfs->aiocb) {
  585. blk_aio_cancel(ncq_tfs->aiocb);
  586. ncq_tfs->aiocb = NULL;
  587. }
  588. /* Maybe we just finished the request thanks to blk_aio_cancel() */
  589. if (!ncq_tfs->used) {
  590. continue;
  591. }
  592. qemu_sglist_destroy(&ncq_tfs->sglist);
  593. ncq_tfs->used = 0;
  594. }
  595. s->dev[port].port_state = STATE_RUN;
  596. if (ide_state->drive_kind == IDE_CD) {
  597. ahci_set_signature(d, SATA_SIGNATURE_CDROM);
  598. ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT;
  599. } else {
  600. ahci_set_signature(d, SATA_SIGNATURE_DISK);
  601. ide_state->status = SEEK_STAT | WRERR_STAT;
  602. }
  603. ide_state->error = 1;
  604. ahci_init_d2h(d);
  605. }
  606. /* Buffer pretty output based on a raw FIS structure. */
  607. static char *ahci_pretty_buffer_fis(const uint8_t *fis, int cmd_len)
  608. {
  609. int i;
  610. GString *s = g_string_new("FIS:");
  611. for (i = 0; i < cmd_len; i++) {
  612. if ((i & 0xf) == 0) {
  613. g_string_append_printf(s, "\n0x%02x: ", i);
  614. }
  615. g_string_append_printf(s, "%02x ", fis[i]);
  616. }
  617. g_string_append_c(s, '\n');
  618. return g_string_free(s, FALSE);
  619. }
  620. static bool ahci_map_fis_address(AHCIDevice *ad)
  621. {
  622. AHCIPortRegs *pr = &ad->port_regs;
  623. map_page(ad->hba->as, &ad->res_fis,
  624. ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256);
  625. if (ad->res_fis != NULL) {
  626. pr->cmd |= PORT_CMD_FIS_ON;
  627. return true;
  628. }
  629. pr->cmd &= ~PORT_CMD_FIS_ON;
  630. return false;
  631. }
  632. static void ahci_unmap_fis_address(AHCIDevice *ad)
  633. {
  634. if (ad->res_fis == NULL) {
  635. trace_ahci_unmap_fis_address_null(ad->hba, ad->port_no);
  636. return;
  637. }
  638. ad->port_regs.cmd &= ~PORT_CMD_FIS_ON;
  639. dma_memory_unmap(ad->hba->as, ad->res_fis, 256,
  640. DMA_DIRECTION_FROM_DEVICE, 256);
  641. ad->res_fis = NULL;
  642. }
  643. static bool ahci_map_clb_address(AHCIDevice *ad)
  644. {
  645. AHCIPortRegs *pr = &ad->port_regs;
  646. ad->cur_cmd = NULL;
  647. map_page(ad->hba->as, &ad->lst,
  648. ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024);
  649. if (ad->lst != NULL) {
  650. pr->cmd |= PORT_CMD_LIST_ON;
  651. return true;
  652. }
  653. pr->cmd &= ~PORT_CMD_LIST_ON;
  654. return false;
  655. }
  656. static void ahci_unmap_clb_address(AHCIDevice *ad)
  657. {
  658. if (ad->lst == NULL) {
  659. trace_ahci_unmap_clb_address_null(ad->hba, ad->port_no);
  660. return;
  661. }
  662. ad->port_regs.cmd &= ~PORT_CMD_LIST_ON;
  663. dma_memory_unmap(ad->hba->as, ad->lst, 1024,
  664. DMA_DIRECTION_FROM_DEVICE, 1024);
  665. ad->lst = NULL;
  666. }
  667. static void ahci_write_fis_sdb(AHCIState *s, NCQTransferState *ncq_tfs)
  668. {
  669. AHCIDevice *ad = ncq_tfs->drive;
  670. AHCIPortRegs *pr = &ad->port_regs;
  671. IDEState *ide_state;
  672. SDBFIS *sdb_fis;
  673. if (!ad->res_fis ||
  674. !(pr->cmd & PORT_CMD_FIS_RX)) {
  675. return;
  676. }
  677. sdb_fis = (SDBFIS *)&ad->res_fis[RES_FIS_SDBFIS];
  678. ide_state = &ad->port.ifs[0];
  679. sdb_fis->type = SATA_FIS_TYPE_SDB;
  680. /* Interrupt pending & Notification bit */
  681. sdb_fis->flags = 0x40; /* Interrupt bit, always 1 for NCQ */
  682. sdb_fis->status = ide_state->status & 0x77;
  683. sdb_fis->error = ide_state->error;
  684. /* update SAct field in SDB_FIS */
  685. sdb_fis->payload = cpu_to_le32(ad->finished);
  686. /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */
  687. pr->tfdata = (ad->port.ifs[0].error << 8) |
  688. (ad->port.ifs[0].status & 0x77) |
  689. (pr->tfdata & 0x88);
  690. pr->scr_act &= ~ad->finished;
  691. ad->finished = 0;
  692. /*
  693. * TFES IRQ is always raised if ERR_STAT is set, regardless of I bit.
  694. * If ERR_STAT is not set, trigger SDBS IRQ if interrupt bit is set
  695. * (which currently, it always is).
  696. */
  697. if (sdb_fis->status & ERR_STAT) {
  698. ahci_trigger_irq(s, ad, AHCI_PORT_IRQ_BIT_TFES);
  699. } else if (sdb_fis->flags & 0x40) {
  700. ahci_trigger_irq(s, ad, AHCI_PORT_IRQ_BIT_SDBS);
  701. }
  702. }
  703. static void ahci_write_fis_pio(AHCIDevice *ad, uint16_t len, bool pio_fis_i)
  704. {
  705. AHCIPortRegs *pr = &ad->port_regs;
  706. uint8_t *pio_fis;
  707. IDEState *s = &ad->port.ifs[0];
  708. if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) {
  709. return;
  710. }
  711. pio_fis = &ad->res_fis[RES_FIS_PSFIS];
  712. pio_fis[0] = SATA_FIS_TYPE_PIO_SETUP;
  713. pio_fis[1] = (pio_fis_i ? (1 << 6) : 0);
  714. pio_fis[2] = s->status;
  715. pio_fis[3] = s->error;
  716. pio_fis[4] = s->sector;
  717. pio_fis[5] = s->lcyl;
  718. pio_fis[6] = s->hcyl;
  719. pio_fis[7] = s->select;
  720. pio_fis[8] = s->hob_sector;
  721. pio_fis[9] = s->hob_lcyl;
  722. pio_fis[10] = s->hob_hcyl;
  723. pio_fis[11] = 0;
  724. pio_fis[12] = s->nsector & 0xFF;
  725. pio_fis[13] = (s->nsector >> 8) & 0xFF;
  726. pio_fis[14] = 0;
  727. pio_fis[15] = s->status;
  728. pio_fis[16] = len & 255;
  729. pio_fis[17] = len >> 8;
  730. pio_fis[18] = 0;
  731. pio_fis[19] = 0;
  732. /* Update shadow registers: */
  733. pr->tfdata = (ad->port.ifs[0].error << 8) |
  734. ad->port.ifs[0].status;
  735. if (pio_fis[2] & ERR_STAT) {
  736. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_TFES);
  737. }
  738. }
  739. static bool ahci_write_fis_d2h(AHCIDevice *ad, bool d2h_fis_i)
  740. {
  741. AHCIPortRegs *pr = &ad->port_regs;
  742. uint8_t *d2h_fis;
  743. int i;
  744. IDEState *s = &ad->port.ifs[0];
  745. if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) {
  746. return false;
  747. }
  748. d2h_fis = &ad->res_fis[RES_FIS_RFIS];
  749. d2h_fis[0] = SATA_FIS_TYPE_REGISTER_D2H;
  750. d2h_fis[1] = d2h_fis_i ? (1 << 6) : 0; /* interrupt bit */
  751. d2h_fis[2] = s->status;
  752. d2h_fis[3] = s->error;
  753. d2h_fis[4] = s->sector;
  754. d2h_fis[5] = s->lcyl;
  755. d2h_fis[6] = s->hcyl;
  756. d2h_fis[7] = s->select;
  757. d2h_fis[8] = s->hob_sector;
  758. d2h_fis[9] = s->hob_lcyl;
  759. d2h_fis[10] = s->hob_hcyl;
  760. d2h_fis[11] = 0;
  761. d2h_fis[12] = s->nsector & 0xFF;
  762. d2h_fis[13] = (s->nsector >> 8) & 0xFF;
  763. for (i = 14; i < 20; i++) {
  764. d2h_fis[i] = 0;
  765. }
  766. /* Update shadow registers: */
  767. pr->tfdata = (ad->port.ifs[0].error << 8) |
  768. ad->port.ifs[0].status;
  769. /* TFES IRQ is always raised if ERR_STAT is set, regardless of I bit. */
  770. if (d2h_fis[2] & ERR_STAT) {
  771. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_TFES);
  772. } else if (d2h_fis_i) {
  773. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_DHRS);
  774. }
  775. return true;
  776. }
  777. static int prdt_tbl_entry_size(const AHCI_SG *tbl)
  778. {
  779. /* flags_size is zero-based */
  780. return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1;
  781. }
  782. /**
  783. * Fetch entries in a guest-provided PRDT and convert it into a QEMU SGlist.
  784. * @ad: The AHCIDevice for whom we are building the SGList.
  785. * @sglist: The SGList target to add PRD entries to.
  786. * @cmd: The AHCI Command Header that describes where the PRDT is.
  787. * @limit: The remaining size of the S/ATA transaction, in bytes.
  788. * @offset: The number of bytes already transferred, in bytes.
  789. *
  790. * The AHCI PRDT can describe up to 256GiB. S/ATA only support transactions of
  791. * up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 512 byte sector size. We stop
  792. * building the sglist from the PRDT as soon as we hit @limit bytes,
  793. * which is <= INT32_MAX/2GiB.
  794. */
  795. static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
  796. AHCICmdHdr *cmd, int64_t limit, uint64_t offset)
  797. {
  798. uint16_t opts = le16_to_cpu(cmd->opts);
  799. uint16_t prdtl = le16_to_cpu(cmd->prdtl);
  800. uint64_t cfis_addr = le64_to_cpu(cmd->tbl_addr);
  801. uint64_t prdt_addr = cfis_addr + 0x80;
  802. dma_addr_t prdt_len = (prdtl * sizeof(AHCI_SG));
  803. dma_addr_t real_prdt_len = prdt_len;
  804. uint8_t *prdt;
  805. int i;
  806. int r = 0;
  807. uint64_t sum = 0;
  808. int off_idx = -1;
  809. int64_t off_pos = -1;
  810. IDEBus *bus = &ad->port;
  811. BusState *qbus = BUS(bus);
  812. trace_ahci_populate_sglist(ad->hba, ad->port_no);
  813. if (!prdtl) {
  814. trace_ahci_populate_sglist_no_prdtl(ad->hba, ad->port_no, opts);
  815. return -1;
  816. }
  817. /* map PRDT */
  818. if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len,
  819. DMA_DIRECTION_TO_DEVICE,
  820. MEMTXATTRS_UNSPECIFIED))){
  821. trace_ahci_populate_sglist_no_map(ad->hba, ad->port_no);
  822. return -1;
  823. }
  824. if (prdt_len < real_prdt_len) {
  825. trace_ahci_populate_sglist_short_map(ad->hba, ad->port_no);
  826. r = -1;
  827. goto out;
  828. }
  829. /* Get entries in the PRDT, init a qemu sglist accordingly */
  830. if (prdtl > 0) {
  831. AHCI_SG *tbl = (AHCI_SG *)prdt;
  832. int tbl_entry_size = 0;
  833. sum = 0;
  834. for (i = 0; i < prdtl; i++) {
  835. tbl_entry_size = prdt_tbl_entry_size(&tbl[i]);
  836. if (offset < (sum + tbl_entry_size)) {
  837. off_idx = i;
  838. off_pos = offset - sum;
  839. break;
  840. }
  841. sum += tbl_entry_size;
  842. }
  843. if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) {
  844. trace_ahci_populate_sglist_bad_offset(ad->hba, ad->port_no,
  845. off_idx, off_pos);
  846. r = -1;
  847. goto out;
  848. }
  849. qemu_sglist_init(sglist, qbus->parent, (prdtl - off_idx),
  850. ad->hba->as);
  851. qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr) + off_pos,
  852. MIN(prdt_tbl_entry_size(&tbl[off_idx]) - off_pos,
  853. limit));
  854. for (i = off_idx + 1; i < prdtl && sglist->size < limit; i++) {
  855. qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
  856. MIN(prdt_tbl_entry_size(&tbl[i]),
  857. limit - sglist->size));
  858. }
  859. }
  860. out:
  861. dma_memory_unmap(ad->hba->as, prdt, prdt_len,
  862. DMA_DIRECTION_TO_DEVICE, prdt_len);
  863. return r;
  864. }
  865. static void ncq_err(NCQTransferState *ncq_tfs)
  866. {
  867. IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
  868. ide_state->error = ABRT_ERR;
  869. ide_state->status = READY_STAT | ERR_STAT;
  870. qemu_sglist_destroy(&ncq_tfs->sglist);
  871. ncq_tfs->used = 0;
  872. }
  873. static void ncq_finish(NCQTransferState *ncq_tfs)
  874. {
  875. /* If we didn't error out, set our finished bit. Errored commands
  876. * do not get a bit set for the SDB FIS ACT register, nor do they
  877. * clear the outstanding bit in scr_act (PxSACT). */
  878. if (ncq_tfs->used) {
  879. ncq_tfs->drive->finished |= (1 << ncq_tfs->tag);
  880. }
  881. ahci_write_fis_sdb(ncq_tfs->drive->hba, ncq_tfs);
  882. trace_ncq_finish(ncq_tfs->drive->hba, ncq_tfs->drive->port_no,
  883. ncq_tfs->tag);
  884. block_acct_done(blk_get_stats(ncq_tfs->drive->port.ifs[0].blk),
  885. &ncq_tfs->acct);
  886. qemu_sglist_destroy(&ncq_tfs->sglist);
  887. ncq_tfs->used = 0;
  888. }
  889. static void ncq_cb(void *opaque, int ret)
  890. {
  891. NCQTransferState *ncq_tfs = (NCQTransferState *)opaque;
  892. IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
  893. ncq_tfs->aiocb = NULL;
  894. if (ret < 0) {
  895. bool is_read = ncq_tfs->cmd == READ_FPDMA_QUEUED;
  896. BlockErrorAction action = blk_get_error_action(ide_state->blk,
  897. is_read, -ret);
  898. if (action == BLOCK_ERROR_ACTION_STOP) {
  899. ncq_tfs->halt = true;
  900. ide_state->bus->error_status = IDE_RETRY_HBA;
  901. } else if (action == BLOCK_ERROR_ACTION_REPORT) {
  902. ncq_err(ncq_tfs);
  903. }
  904. blk_error_action(ide_state->blk, action, is_read, -ret);
  905. } else {
  906. ide_state->status = READY_STAT | SEEK_STAT;
  907. }
  908. if (!ncq_tfs->halt) {
  909. ncq_finish(ncq_tfs);
  910. }
  911. }
  912. static int is_ncq(uint8_t ata_cmd)
  913. {
  914. /* Based on SATA 3.2 section 13.6.3.2 */
  915. switch (ata_cmd) {
  916. case READ_FPDMA_QUEUED:
  917. case WRITE_FPDMA_QUEUED:
  918. case NCQ_NON_DATA:
  919. case RECEIVE_FPDMA_QUEUED:
  920. case SEND_FPDMA_QUEUED:
  921. return 1;
  922. default:
  923. return 0;
  924. }
  925. }
  926. static void execute_ncq_command(NCQTransferState *ncq_tfs)
  927. {
  928. AHCIDevice *ad = ncq_tfs->drive;
  929. IDEState *ide_state = &ad->port.ifs[0];
  930. int port = ad->port_no;
  931. g_assert(is_ncq(ncq_tfs->cmd));
  932. ncq_tfs->halt = false;
  933. switch (ncq_tfs->cmd) {
  934. case READ_FPDMA_QUEUED:
  935. trace_execute_ncq_command_read(ad->hba, port, ncq_tfs->tag,
  936. ncq_tfs->sector_count, ncq_tfs->lba);
  937. dma_acct_start(ide_state->blk, &ncq_tfs->acct,
  938. &ncq_tfs->sglist, BLOCK_ACCT_READ);
  939. ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist,
  940. ncq_tfs->lba << BDRV_SECTOR_BITS,
  941. BDRV_SECTOR_SIZE,
  942. ncq_cb, ncq_tfs);
  943. break;
  944. case WRITE_FPDMA_QUEUED:
  945. trace_execute_ncq_command_write(ad->hba, port, ncq_tfs->tag,
  946. ncq_tfs->sector_count, ncq_tfs->lba);
  947. dma_acct_start(ide_state->blk, &ncq_tfs->acct,
  948. &ncq_tfs->sglist, BLOCK_ACCT_WRITE);
  949. ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist,
  950. ncq_tfs->lba << BDRV_SECTOR_BITS,
  951. BDRV_SECTOR_SIZE,
  952. ncq_cb, ncq_tfs);
  953. break;
  954. default:
  955. trace_execute_ncq_command_unsup(ad->hba, port,
  956. ncq_tfs->tag, ncq_tfs->cmd);
  957. ncq_err(ncq_tfs);
  958. }
  959. }
  960. static void process_ncq_command(AHCIState *s, int port, const uint8_t *cmd_fis,
  961. uint8_t slot)
  962. {
  963. AHCIDevice *ad = &s->dev[port];
  964. const NCQFrame *ncq_fis = (NCQFrame *)cmd_fis;
  965. uint8_t tag = ncq_fis->tag >> 3;
  966. NCQTransferState *ncq_tfs = &ad->ncq_tfs[tag];
  967. size_t size;
  968. g_assert(is_ncq(ncq_fis->command));
  969. if (ncq_tfs->used) {
  970. /* error - already in use */
  971. qemu_log_mask(LOG_GUEST_ERROR, "%s: tag %d already used\n",
  972. __func__, tag);
  973. return;
  974. }
  975. /*
  976. * A NCQ command clears the bit in PxCI after the command has been QUEUED
  977. * successfully (ERROR not set, BUSY and DRQ cleared).
  978. *
  979. * For NCQ commands, PxCI will always be cleared here.
  980. *
  981. * (Once the NCQ command is COMPLETED, the device will send a SDB FIS with
  982. * the interrupt bit set, which will clear PxSACT and raise an interrupt.)
  983. */
  984. ahci_clear_cmd_issue(ad, slot);
  985. /*
  986. * In reality, for NCQ commands, PxCI is cleared after receiving a D2H FIS
  987. * without the interrupt bit set, but since ahci_write_fis_d2h() can raise
  988. * an IRQ on error, we need to call them in reverse order.
  989. */
  990. ahci_write_fis_d2h(ad, false);
  991. ncq_tfs->used = 1;
  992. ncq_tfs->drive = ad;
  993. ncq_tfs->slot = slot;
  994. ncq_tfs->cmdh = &((AHCICmdHdr *)ad->lst)[slot];
  995. ncq_tfs->cmd = ncq_fis->command;
  996. ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) |
  997. ((uint64_t)ncq_fis->lba4 << 32) |
  998. ((uint64_t)ncq_fis->lba3 << 24) |
  999. ((uint64_t)ncq_fis->lba2 << 16) |
  1000. ((uint64_t)ncq_fis->lba1 << 8) |
  1001. (uint64_t)ncq_fis->lba0;
  1002. ncq_tfs->tag = tag;
  1003. /* Sanity-check the NCQ packet */
  1004. if (tag != slot) {
  1005. trace_process_ncq_command_mismatch(s, port, tag, slot);
  1006. }
  1007. if (ncq_fis->aux0 || ncq_fis->aux1 || ncq_fis->aux2 || ncq_fis->aux3) {
  1008. trace_process_ncq_command_aux(s, port, tag);
  1009. }
  1010. if (ncq_fis->prio || ncq_fis->icc) {
  1011. trace_process_ncq_command_prioicc(s, port, tag);
  1012. }
  1013. if (ncq_fis->fua & NCQ_FIS_FUA_MASK) {
  1014. trace_process_ncq_command_fua(s, port, tag);
  1015. }
  1016. if (ncq_fis->tag & NCQ_FIS_RARC_MASK) {
  1017. trace_process_ncq_command_rarc(s, port, tag);
  1018. }
  1019. ncq_tfs->sector_count = ((ncq_fis->sector_count_high << 8) |
  1020. ncq_fis->sector_count_low);
  1021. if (!ncq_tfs->sector_count) {
  1022. ncq_tfs->sector_count = 0x10000;
  1023. }
  1024. size = ncq_tfs->sector_count * BDRV_SECTOR_SIZE;
  1025. ahci_populate_sglist(ad, &ncq_tfs->sglist, ncq_tfs->cmdh, size, 0);
  1026. if (ncq_tfs->sglist.size < size) {
  1027. error_report("ahci: PRDT length for NCQ command (0x" DMA_ADDR_FMT ") "
  1028. "is smaller than the requested size (0x%zx)",
  1029. ncq_tfs->sglist.size, size);
  1030. ncq_err(ncq_tfs);
  1031. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_OFS);
  1032. return;
  1033. } else if (ncq_tfs->sglist.size != size) {
  1034. trace_process_ncq_command_large(s, port, tag,
  1035. ncq_tfs->sglist.size, size);
  1036. }
  1037. trace_process_ncq_command(s, port, tag,
  1038. ncq_fis->command,
  1039. ncq_tfs->lba,
  1040. ncq_tfs->lba + ncq_tfs->sector_count - 1);
  1041. execute_ncq_command(ncq_tfs);
  1042. }
  1043. static AHCICmdHdr *get_cmd_header(AHCIState *s, uint8_t port, uint8_t slot)
  1044. {
  1045. if (port >= s->ports || slot >= AHCI_MAX_CMDS) {
  1046. return NULL;
  1047. }
  1048. return s->dev[port].lst ? &((AHCICmdHdr *)s->dev[port].lst)[slot] : NULL;
  1049. }
  1050. static void handle_reg_h2d_fis(AHCIState *s, int port,
  1051. uint8_t slot, const uint8_t *cmd_fis)
  1052. {
  1053. IDEState *ide_state = &s->dev[port].port.ifs[0];
  1054. AHCICmdHdr *cmd = get_cmd_header(s, port, slot);
  1055. AHCIDevice *ad = &s->dev[port];
  1056. uint16_t opts = le16_to_cpu(cmd->opts);
  1057. if (cmd_fis[1] & 0x0F) {
  1058. trace_handle_reg_h2d_fis_pmp(s, port, cmd_fis[1],
  1059. cmd_fis[2], cmd_fis[3]);
  1060. return;
  1061. }
  1062. if (cmd_fis[1] & 0x70) {
  1063. trace_handle_reg_h2d_fis_res(s, port, cmd_fis[1],
  1064. cmd_fis[2], cmd_fis[3]);
  1065. return;
  1066. }
  1067. if (!(cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER)) {
  1068. switch (s->dev[port].port_state) {
  1069. case STATE_RUN:
  1070. if (cmd_fis[15] & ATA_SRST) {
  1071. s->dev[port].port_state = STATE_RESET;
  1072. /*
  1073. * When setting SRST in the first H2D FIS in the reset sequence,
  1074. * the device does not send a D2H FIS. Host software thus has to
  1075. * set the "Clear Busy upon R_OK" bit such that PxCI (and BUSY)
  1076. * gets cleared. See AHCI 1.3.1, section 10.4.1 Software Reset.
  1077. */
  1078. if (opts & AHCI_CMD_CLR_BUSY) {
  1079. ahci_clear_cmd_issue(ad, slot);
  1080. }
  1081. }
  1082. break;
  1083. case STATE_RESET:
  1084. if (!(cmd_fis[15] & ATA_SRST)) {
  1085. /*
  1086. * When clearing SRST in the second H2D FIS in the reset
  1087. * sequence, the device will execute diagnostics. When this is
  1088. * done, the device will send a D2H FIS with the good status.
  1089. * See SATA 3.5a Gold, section 11.4 Software reset protocol.
  1090. *
  1091. * This D2H FIS is the first D2H FIS received from the device,
  1092. * and is received regardless if the reset was performed by a
  1093. * COMRESET or by setting and clearing the SRST bit. Therefore,
  1094. * the logic for this is found in ahci_init_d2h() and not here.
  1095. */
  1096. ahci_reset_port(s, port);
  1097. }
  1098. break;
  1099. }
  1100. return;
  1101. }
  1102. /* Check for NCQ command */
  1103. if (is_ncq(cmd_fis[2])) {
  1104. process_ncq_command(s, port, cmd_fis, slot);
  1105. return;
  1106. }
  1107. /* Decompose the FIS:
  1108. * AHCI does not interpret FIS packets, it only forwards them.
  1109. * SATA 1.0 describes how to decode LBA28 and CHS FIS packets.
  1110. * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets.
  1111. *
  1112. * ATA4 describes sector number for LBA28/CHS commands.
  1113. * ATA6 describes sector number for LBA48 commands.
  1114. * ATA8 deprecates CHS fully, describing only LBA28/48.
  1115. *
  1116. * We dutifully convert the FIS into IDE registers, and allow the
  1117. * core layer to interpret them as needed. */
  1118. ide_state->feature = cmd_fis[3];
  1119. ide_state->sector = cmd_fis[4]; /* LBA 7:0 */
  1120. ide_state->lcyl = cmd_fis[5]; /* LBA 15:8 */
  1121. ide_state->hcyl = cmd_fis[6]; /* LBA 23:16 */
  1122. ide_state->select = cmd_fis[7]; /* LBA 27:24 (LBA28) */
  1123. ide_state->hob_sector = cmd_fis[8]; /* LBA 31:24 */
  1124. ide_state->hob_lcyl = cmd_fis[9]; /* LBA 39:32 */
  1125. ide_state->hob_hcyl = cmd_fis[10]; /* LBA 47:40 */
  1126. ide_state->hob_feature = cmd_fis[11];
  1127. ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]);
  1128. /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */
  1129. /* 15: Only valid when UPDATE_COMMAND not set. */
  1130. /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
  1131. * table to ide_state->io_buffer */
  1132. if (opts & AHCI_CMD_ATAPI) {
  1133. memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10);
  1134. if (trace_event_get_state_backends(TRACE_HANDLE_REG_H2D_FIS_DUMP)) {
  1135. char *pretty_fis = ahci_pretty_buffer_fis(ide_state->io_buffer, 0x10);
  1136. trace_handle_reg_h2d_fis_dump(s, port, pretty_fis);
  1137. g_free(pretty_fis);
  1138. }
  1139. }
  1140. ide_state->error = 0;
  1141. s->dev[port].done_first_drq = false;
  1142. /* Reset transferred byte counter */
  1143. cmd->status = 0;
  1144. /*
  1145. * A non-NCQ command clears the bit in PxCI after the command has COMPLETED
  1146. * successfully (ERROR not set, BUSY and DRQ cleared).
  1147. *
  1148. * For non-NCQ commands, PxCI will always be cleared by ahci_cmd_done().
  1149. */
  1150. ad->busy_slot = slot;
  1151. /* We're ready to process the command in FIS byte 2. */
  1152. ide_bus_exec_cmd(&s->dev[port].port, cmd_fis[2]);
  1153. }
  1154. static void handle_cmd(AHCIState *s, int port, uint8_t slot)
  1155. {
  1156. IDEState *ide_state;
  1157. uint64_t tbl_addr;
  1158. AHCICmdHdr *cmd;
  1159. uint8_t *cmd_fis;
  1160. dma_addr_t cmd_len;
  1161. if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) {
  1162. /* Engine currently busy, try again later */
  1163. trace_handle_cmd_busy(s, port);
  1164. return;
  1165. }
  1166. if (!s->dev[port].lst) {
  1167. trace_handle_cmd_nolist(s, port);
  1168. return;
  1169. }
  1170. cmd = get_cmd_header(s, port, slot);
  1171. /* remember current slot handle for later */
  1172. s->dev[port].cur_cmd = cmd;
  1173. /* The device we are working for */
  1174. ide_state = &s->dev[port].port.ifs[0];
  1175. if (!ide_state->blk) {
  1176. trace_handle_cmd_badport(s, port);
  1177. return;
  1178. }
  1179. tbl_addr = le64_to_cpu(cmd->tbl_addr);
  1180. cmd_len = 0x80;
  1181. cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len,
  1182. DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
  1183. if (!cmd_fis) {
  1184. trace_handle_cmd_badfis(s, port);
  1185. return;
  1186. } else if (cmd_len != 0x80) {
  1187. ahci_trigger_irq(s, &s->dev[port], AHCI_PORT_IRQ_BIT_HBFS);
  1188. trace_handle_cmd_badmap(s, port, cmd_len);
  1189. goto out;
  1190. }
  1191. if (trace_event_get_state_backends(TRACE_HANDLE_CMD_FIS_DUMP)) {
  1192. char *pretty_fis = ahci_pretty_buffer_fis(cmd_fis, 0x80);
  1193. trace_handle_cmd_fis_dump(s, port, pretty_fis);
  1194. g_free(pretty_fis);
  1195. }
  1196. switch (cmd_fis[0]) {
  1197. case SATA_FIS_TYPE_REGISTER_H2D:
  1198. handle_reg_h2d_fis(s, port, slot, cmd_fis);
  1199. break;
  1200. default:
  1201. trace_handle_cmd_unhandled_fis(s, port,
  1202. cmd_fis[0], cmd_fis[1], cmd_fis[2]);
  1203. break;
  1204. }
  1205. out:
  1206. dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_TO_DEVICE,
  1207. cmd_len);
  1208. }
  1209. /* Transfer PIO data between RAM and device */
  1210. static void ahci_pio_transfer(const IDEDMA *dma)
  1211. {
  1212. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1213. IDEState *s = &ad->port.ifs[0];
  1214. uint32_t size = (uint32_t)(s->data_end - s->data_ptr);
  1215. /* write == ram -> device */
  1216. uint16_t opts = le16_to_cpu(ad->cur_cmd->opts);
  1217. int is_write = opts & AHCI_CMD_WRITE;
  1218. int is_atapi = opts & AHCI_CMD_ATAPI;
  1219. int has_sglist = 0;
  1220. bool pio_fis_i;
  1221. /* The PIO Setup FIS is received prior to transfer, but the interrupt
  1222. * is only triggered after data is received.
  1223. *
  1224. * The device only sets the 'I' bit in the PIO Setup FIS for device->host
  1225. * requests (see "DPIOI1" in the SATA spec), or for host->device DRQs after
  1226. * the first (see "DPIOO1"). The latter is consistent with the spec's
  1227. * description of the PACKET protocol, where the command part of ATAPI requests
  1228. * ("DPKT0") has the 'I' bit clear, while the data part of PIO ATAPI requests
  1229. * ("DPKT4a" and "DPKT7") has the 'I' bit set for both directions for all DRQs.
  1230. */
  1231. pio_fis_i = ad->done_first_drq || (!is_atapi && !is_write);
  1232. ahci_write_fis_pio(ad, size, pio_fis_i);
  1233. if (is_atapi && !ad->done_first_drq) {
  1234. /* already prepopulated iobuffer */
  1235. goto out;
  1236. }
  1237. if (ahci_dma_prepare_buf(dma, size)) {
  1238. has_sglist = 1;
  1239. }
  1240. trace_ahci_pio_transfer(ad->hba, ad->port_no, is_write ? "writ" : "read",
  1241. size, is_atapi ? "atapi" : "ata",
  1242. has_sglist ? "" : "o");
  1243. if (has_sglist && size) {
  1244. const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
  1245. if (is_write) {
  1246. dma_buf_write(s->data_ptr, size, NULL, &s->sg, attrs);
  1247. } else {
  1248. dma_buf_read(s->data_ptr, size, NULL, &s->sg, attrs);
  1249. }
  1250. }
  1251. /* Update number of transferred bytes, destroy sglist */
  1252. dma_buf_commit(s, size);
  1253. out:
  1254. /* declare that we processed everything */
  1255. s->data_ptr = s->data_end;
  1256. ad->done_first_drq = true;
  1257. if (pio_fis_i) {
  1258. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_PSS);
  1259. }
  1260. }
  1261. static void ahci_start_dma(const IDEDMA *dma, IDEState *s,
  1262. BlockCompletionFunc *dma_cb)
  1263. {
  1264. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1265. trace_ahci_start_dma(ad->hba, ad->port_no);
  1266. s->io_buffer_offset = 0;
  1267. dma_cb(s, 0);
  1268. }
  1269. static void ahci_restart_dma(const IDEDMA *dma)
  1270. {
  1271. /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */
  1272. }
  1273. /**
  1274. * IDE/PIO restarts are handled by the core layer, but NCQ commands
  1275. * need an extra kick from the AHCI HBA.
  1276. */
  1277. static void ahci_restart(const IDEDMA *dma)
  1278. {
  1279. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1280. int i;
  1281. for (i = 0; i < AHCI_MAX_CMDS; i++) {
  1282. NCQTransferState *ncq_tfs = &ad->ncq_tfs[i];
  1283. if (ncq_tfs->halt) {
  1284. execute_ncq_command(ncq_tfs);
  1285. }
  1286. }
  1287. }
  1288. /**
  1289. * Called in DMA and PIO R/W chains to read the PRDT.
  1290. * Not shared with NCQ pathways.
  1291. */
  1292. static int32_t ahci_dma_prepare_buf(const IDEDMA *dma, int32_t limit)
  1293. {
  1294. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1295. IDEState *s = &ad->port.ifs[0];
  1296. if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd,
  1297. limit, s->io_buffer_offset) == -1) {
  1298. trace_ahci_dma_prepare_buf_fail(ad->hba, ad->port_no);
  1299. return -1;
  1300. }
  1301. s->io_buffer_size = s->sg.size;
  1302. trace_ahci_dma_prepare_buf(ad->hba, ad->port_no, limit, s->io_buffer_size);
  1303. return s->io_buffer_size;
  1304. }
  1305. /**
  1306. * Updates the command header with a bytes-read value.
  1307. * Called via dma_buf_commit, for both DMA and PIO paths.
  1308. * sglist destruction is handled within dma_buf_commit.
  1309. */
  1310. static void ahci_commit_buf(const IDEDMA *dma, uint32_t tx_bytes)
  1311. {
  1312. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1313. tx_bytes += le32_to_cpu(ad->cur_cmd->status);
  1314. ad->cur_cmd->status = cpu_to_le32(tx_bytes);
  1315. }
  1316. static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write)
  1317. {
  1318. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1319. IDEState *s = &ad->port.ifs[0];
  1320. uint8_t *p = s->io_buffer + s->io_buffer_index;
  1321. int l = s->io_buffer_size - s->io_buffer_index;
  1322. if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd, l, s->io_buffer_offset)) {
  1323. return 0;
  1324. }
  1325. if (is_write) {
  1326. dma_buf_read(p, l, NULL, &s->sg, MEMTXATTRS_UNSPECIFIED);
  1327. } else {
  1328. dma_buf_write(p, l, NULL, &s->sg, MEMTXATTRS_UNSPECIFIED);
  1329. }
  1330. /* free sglist, update byte count */
  1331. dma_buf_commit(s, l);
  1332. s->io_buffer_index += l;
  1333. trace_ahci_dma_rw_buf(ad->hba, ad->port_no, l);
  1334. return 1;
  1335. }
  1336. static void ahci_clear_cmd_issue(AHCIDevice *ad, uint8_t slot)
  1337. {
  1338. IDEState *ide_state = &ad->port.ifs[0];
  1339. if (!(ide_state->status & ERR_STAT) &&
  1340. !(ide_state->status & (BUSY_STAT | DRQ_STAT))) {
  1341. ad->port_regs.cmd_issue &= ~(1 << slot);
  1342. }
  1343. }
  1344. /* Non-NCQ command is done - This function is never called for NCQ commands. */
  1345. static void ahci_cmd_done(const IDEDMA *dma)
  1346. {
  1347. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1348. IDEState *ide_state = &ad->port.ifs[0];
  1349. trace_ahci_cmd_done(ad->hba, ad->port_no);
  1350. /* no longer busy */
  1351. if (ad->busy_slot != -1) {
  1352. ahci_clear_cmd_issue(ad, ad->busy_slot);
  1353. ad->busy_slot = -1;
  1354. }
  1355. /*
  1356. * In reality, for non-NCQ commands, PxCI is cleared after receiving a D2H
  1357. * FIS with the interrupt bit set, but since ahci_write_fis_d2h() will raise
  1358. * an IRQ, we need to call them in reverse order.
  1359. */
  1360. ahci_write_fis_d2h(ad, true);
  1361. if (!(ide_state->status & ERR_STAT) &&
  1362. ad->port_regs.cmd_issue && !ad->check_bh) {
  1363. ad->check_bh = qemu_bh_new_guarded(ahci_check_cmd_bh, ad,
  1364. &ad->mem_reentrancy_guard);
  1365. qemu_bh_schedule(ad->check_bh);
  1366. }
  1367. }
  1368. static void ahci_irq_set(void *opaque, int n, int level)
  1369. {
  1370. qemu_log_mask(LOG_UNIMP, "ahci: IRQ#%d level:%d\n", n, level);
  1371. }
  1372. static const IDEDMAOps ahci_dma_ops = {
  1373. .start_dma = ahci_start_dma,
  1374. .restart = ahci_restart,
  1375. .restart_dma = ahci_restart_dma,
  1376. .pio_transfer = ahci_pio_transfer,
  1377. .prepare_buf = ahci_dma_prepare_buf,
  1378. .commit_buf = ahci_commit_buf,
  1379. .rw_buf = ahci_dma_rw_buf,
  1380. .cmd_done = ahci_cmd_done,
  1381. };
  1382. void ahci_init(AHCIState *s, DeviceState *qdev)
  1383. {
  1384. /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
  1385. memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s,
  1386. "ahci", AHCI_MEM_BAR_SIZE);
  1387. memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s,
  1388. "ahci-idp", 32);
  1389. }
  1390. void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as)
  1391. {
  1392. qemu_irq *irqs;
  1393. int i;
  1394. s->as = as;
  1395. assert(s->ports > 0);
  1396. s->dev = g_new0(AHCIDevice, s->ports);
  1397. ahci_reg_init(s);
  1398. irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports);
  1399. for (i = 0; i < s->ports; i++) {
  1400. AHCIDevice *ad = &s->dev[i];
  1401. ide_bus_init(&ad->port, sizeof(ad->port), qdev, i, 1);
  1402. ide_bus_init_output_irq(&ad->port, irqs[i]);
  1403. ad->hba = s;
  1404. ad->port_no = i;
  1405. ad->port.dma = &ad->dma;
  1406. ad->port.dma->ops = &ahci_dma_ops;
  1407. ide_bus_register_restart_cb(&ad->port);
  1408. }
  1409. g_free(irqs);
  1410. }
  1411. void ahci_uninit(AHCIState *s)
  1412. {
  1413. int i, j;
  1414. for (i = 0; i < s->ports; i++) {
  1415. AHCIDevice *ad = &s->dev[i];
  1416. for (j = 0; j < 2; j++) {
  1417. ide_exit(&ad->port.ifs[j]);
  1418. }
  1419. object_unparent(OBJECT(&ad->port));
  1420. }
  1421. g_free(s->dev);
  1422. }
  1423. void ahci_reset(AHCIState *s)
  1424. {
  1425. AHCIPortRegs *pr;
  1426. int i;
  1427. trace_ahci_reset(s);
  1428. s->control_regs.irqstatus = 0;
  1429. /* AHCI Enable (AE)
  1430. * The implementation of this bit is dependent upon the value of the
  1431. * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
  1432. * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
  1433. * read-only and shall have a reset value of '1'.
  1434. *
  1435. * We set HOST_CAP_AHCI so we must enable AHCI at reset.
  1436. */
  1437. s->control_regs.ghc = HOST_CTL_AHCI_EN;
  1438. for (i = 0; i < s->ports; i++) {
  1439. pr = &s->dev[i].port_regs;
  1440. pr->irq_stat = 0;
  1441. pr->irq_mask = 0;
  1442. pr->scr_ctl = 0;
  1443. pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON;
  1444. ahci_reset_port(s, i);
  1445. }
  1446. }
  1447. static const VMStateDescription vmstate_ncq_tfs = {
  1448. .name = "ncq state",
  1449. .version_id = 1,
  1450. .fields = (const VMStateField[]) {
  1451. VMSTATE_UINT32(sector_count, NCQTransferState),
  1452. VMSTATE_UINT64(lba, NCQTransferState),
  1453. VMSTATE_UINT8(tag, NCQTransferState),
  1454. VMSTATE_UINT8(cmd, NCQTransferState),
  1455. VMSTATE_UINT8(slot, NCQTransferState),
  1456. VMSTATE_BOOL(used, NCQTransferState),
  1457. VMSTATE_BOOL(halt, NCQTransferState),
  1458. VMSTATE_END_OF_LIST()
  1459. },
  1460. };
  1461. static const VMStateDescription vmstate_ahci_device = {
  1462. .name = "ahci port",
  1463. .version_id = 1,
  1464. .fields = (const VMStateField[]) {
  1465. VMSTATE_IDE_BUS(port, AHCIDevice),
  1466. VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice),
  1467. VMSTATE_UINT32(port_state, AHCIDevice),
  1468. VMSTATE_UINT32(finished, AHCIDevice),
  1469. VMSTATE_UINT32(port_regs.lst_addr, AHCIDevice),
  1470. VMSTATE_UINT32(port_regs.lst_addr_hi, AHCIDevice),
  1471. VMSTATE_UINT32(port_regs.fis_addr, AHCIDevice),
  1472. VMSTATE_UINT32(port_regs.fis_addr_hi, AHCIDevice),
  1473. VMSTATE_UINT32(port_regs.irq_stat, AHCIDevice),
  1474. VMSTATE_UINT32(port_regs.irq_mask, AHCIDevice),
  1475. VMSTATE_UINT32(port_regs.cmd, AHCIDevice),
  1476. VMSTATE_UINT32(port_regs.tfdata, AHCIDevice),
  1477. VMSTATE_UINT32(port_regs.sig, AHCIDevice),
  1478. VMSTATE_UINT32(port_regs.scr_stat, AHCIDevice),
  1479. VMSTATE_UINT32(port_regs.scr_ctl, AHCIDevice),
  1480. VMSTATE_UINT32(port_regs.scr_err, AHCIDevice),
  1481. VMSTATE_UINT32(port_regs.scr_act, AHCIDevice),
  1482. VMSTATE_UINT32(port_regs.cmd_issue, AHCIDevice),
  1483. VMSTATE_BOOL(done_first_drq, AHCIDevice),
  1484. VMSTATE_INT32(busy_slot, AHCIDevice),
  1485. VMSTATE_BOOL(init_d2h_sent, AHCIDevice),
  1486. VMSTATE_STRUCT_ARRAY(ncq_tfs, AHCIDevice, AHCI_MAX_CMDS,
  1487. 1, vmstate_ncq_tfs, NCQTransferState),
  1488. VMSTATE_END_OF_LIST()
  1489. },
  1490. };
  1491. static int ahci_state_post_load(void *opaque, int version_id)
  1492. {
  1493. int i, j;
  1494. struct AHCIDevice *ad;
  1495. NCQTransferState *ncq_tfs;
  1496. AHCIPortRegs *pr;
  1497. AHCIState *s = opaque;
  1498. for (i = 0; i < s->ports; i++) {
  1499. ad = &s->dev[i];
  1500. pr = &ad->port_regs;
  1501. if (!(pr->cmd & PORT_CMD_START) && (pr->cmd & PORT_CMD_LIST_ON)) {
  1502. error_report("AHCI: DMA engine should be off, but status bit "
  1503. "indicates it is still running.");
  1504. return -1;
  1505. }
  1506. if (!(pr->cmd & PORT_CMD_FIS_RX) && (pr->cmd & PORT_CMD_FIS_ON)) {
  1507. error_report("AHCI: FIS RX engine should be off, but status bit "
  1508. "indicates it is still running.");
  1509. return -1;
  1510. }
  1511. /* After a migrate, the DMA/FIS engines are "off" and
  1512. * need to be conditionally restarted */
  1513. pr->cmd &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON);
  1514. if (ahci_cond_start_engines(ad) != 0) {
  1515. return -1;
  1516. }
  1517. for (j = 0; j < AHCI_MAX_CMDS; j++) {
  1518. ncq_tfs = &ad->ncq_tfs[j];
  1519. ncq_tfs->drive = ad;
  1520. if (ncq_tfs->used != ncq_tfs->halt) {
  1521. return -1;
  1522. }
  1523. if (!ncq_tfs->halt) {
  1524. continue;
  1525. }
  1526. if (!is_ncq(ncq_tfs->cmd)) {
  1527. return -1;
  1528. }
  1529. if (ncq_tfs->slot != ncq_tfs->tag) {
  1530. return -1;
  1531. }
  1532. /* If ncq_tfs->halt is justly set, the engine should be engaged,
  1533. * and the command list buffer should be mapped. */
  1534. ncq_tfs->cmdh = get_cmd_header(s, i, ncq_tfs->slot);
  1535. if (!ncq_tfs->cmdh) {
  1536. return -1;
  1537. }
  1538. ahci_populate_sglist(ncq_tfs->drive, &ncq_tfs->sglist,
  1539. ncq_tfs->cmdh,
  1540. ncq_tfs->sector_count * BDRV_SECTOR_SIZE,
  1541. 0);
  1542. if (ncq_tfs->sector_count != ncq_tfs->sglist.size >> 9) {
  1543. return -1;
  1544. }
  1545. }
  1546. /*
  1547. * If an error is present, ad->busy_slot will be valid and not -1.
  1548. * In this case, an operation is waiting to resume and will re-check
  1549. * for additional AHCI commands to execute upon completion.
  1550. *
  1551. * In the case where no error was present, busy_slot will be -1,
  1552. * and we should check to see if there are additional commands waiting.
  1553. */
  1554. if (ad->busy_slot == -1) {
  1555. check_cmd(s, i);
  1556. } else {
  1557. /* We are in the middle of a command, and may need to access
  1558. * the command header in guest memory again. */
  1559. if (ad->busy_slot < 0 || ad->busy_slot >= AHCI_MAX_CMDS) {
  1560. return -1;
  1561. }
  1562. ad->cur_cmd = get_cmd_header(s, i, ad->busy_slot);
  1563. }
  1564. }
  1565. return 0;
  1566. }
  1567. const VMStateDescription vmstate_ahci = {
  1568. .name = "ahci",
  1569. .version_id = 1,
  1570. .post_load = ahci_state_post_load,
  1571. .fields = (const VMStateField[]) {
  1572. VMSTATE_STRUCT_VARRAY_POINTER_UINT32(dev, AHCIState, ports,
  1573. vmstate_ahci_device, AHCIDevice),
  1574. VMSTATE_UINT32(control_regs.cap, AHCIState),
  1575. VMSTATE_UINT32(control_regs.ghc, AHCIState),
  1576. VMSTATE_UINT32(control_regs.irqstatus, AHCIState),
  1577. VMSTATE_UINT32(control_regs.impl, AHCIState),
  1578. VMSTATE_UINT32(control_regs.version, AHCIState),
  1579. VMSTATE_UINT32(idp_index, AHCIState),
  1580. VMSTATE_UINT32_EQUAL(ports, AHCIState, NULL),
  1581. VMSTATE_END_OF_LIST()
  1582. },
  1583. };
  1584. void ahci_ide_create_devs(AHCIState *ahci, DriveInfo **hd)
  1585. {
  1586. int i;
  1587. for (i = 0; i < ahci->ports; i++) {
  1588. if (hd[i] == NULL) {
  1589. continue;
  1590. }
  1591. ide_bus_create_drive(&ahci->dev[i].port, 0, hd[i]);
  1592. }
  1593. }