ahci.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848
  1. /*
  2. * QEMU AHCI Emulation
  3. *
  4. * Copyright (c) 2010 qiaochong@loongson.cn
  5. * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com>
  6. * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de>
  7. * Copyright (c) 2010 Alexander Graf <agraf@suse.de>
  8. *
  9. * This library is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * This library is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  21. *
  22. */
  23. #include "qemu/osdep.h"
  24. #include "hw/irq.h"
  25. #include "hw/pci/msi.h"
  26. #include "hw/pci/pci.h"
  27. #include "hw/qdev-properties.h"
  28. #include "migration/vmstate.h"
  29. #include "qemu/error-report.h"
  30. #include "qemu/log.h"
  31. #include "qemu/main-loop.h"
  32. #include "qemu/module.h"
  33. #include "sysemu/block-backend.h"
  34. #include "sysemu/dma.h"
  35. #include "hw/ide/internal.h"
  36. #include "hw/ide/pci.h"
  37. #include "ahci_internal.h"
  38. #include "trace.h"
  39. static void check_cmd(AHCIState *s, int port);
  40. static int handle_cmd(AHCIState *s, int port, uint8_t slot);
  41. static void ahci_reset_port(AHCIState *s, int port);
  42. static bool ahci_write_fis_d2h(AHCIDevice *ad);
  43. static void ahci_init_d2h(AHCIDevice *ad);
  44. static int ahci_dma_prepare_buf(const IDEDMA *dma, int32_t limit);
  45. static bool ahci_map_clb_address(AHCIDevice *ad);
  46. static bool ahci_map_fis_address(AHCIDevice *ad);
  47. static void ahci_unmap_clb_address(AHCIDevice *ad);
  48. static void ahci_unmap_fis_address(AHCIDevice *ad);
  49. static const char *AHCIHostReg_lookup[AHCI_HOST_REG__COUNT] = {
  50. [AHCI_HOST_REG_CAP] = "CAP",
  51. [AHCI_HOST_REG_CTL] = "GHC",
  52. [AHCI_HOST_REG_IRQ_STAT] = "IS",
  53. [AHCI_HOST_REG_PORTS_IMPL] = "PI",
  54. [AHCI_HOST_REG_VERSION] = "VS",
  55. [AHCI_HOST_REG_CCC_CTL] = "CCC_CTL",
  56. [AHCI_HOST_REG_CCC_PORTS] = "CCC_PORTS",
  57. [AHCI_HOST_REG_EM_LOC] = "EM_LOC",
  58. [AHCI_HOST_REG_EM_CTL] = "EM_CTL",
  59. [AHCI_HOST_REG_CAP2] = "CAP2",
  60. [AHCI_HOST_REG_BOHC] = "BOHC",
  61. };
  62. static const char *AHCIPortReg_lookup[AHCI_PORT_REG__COUNT] = {
  63. [AHCI_PORT_REG_LST_ADDR] = "PxCLB",
  64. [AHCI_PORT_REG_LST_ADDR_HI] = "PxCLBU",
  65. [AHCI_PORT_REG_FIS_ADDR] = "PxFB",
  66. [AHCI_PORT_REG_FIS_ADDR_HI] = "PxFBU",
  67. [AHCI_PORT_REG_IRQ_STAT] = "PxIS",
  68. [AHCI_PORT_REG_IRQ_MASK] = "PXIE",
  69. [AHCI_PORT_REG_CMD] = "PxCMD",
  70. [7] = "Reserved",
  71. [AHCI_PORT_REG_TFDATA] = "PxTFD",
  72. [AHCI_PORT_REG_SIG] = "PxSIG",
  73. [AHCI_PORT_REG_SCR_STAT] = "PxSSTS",
  74. [AHCI_PORT_REG_SCR_CTL] = "PxSCTL",
  75. [AHCI_PORT_REG_SCR_ERR] = "PxSERR",
  76. [AHCI_PORT_REG_SCR_ACT] = "PxSACT",
  77. [AHCI_PORT_REG_CMD_ISSUE] = "PxCI",
  78. [AHCI_PORT_REG_SCR_NOTIF] = "PxSNTF",
  79. [AHCI_PORT_REG_FIS_CTL] = "PxFBS",
  80. [AHCI_PORT_REG_DEV_SLEEP] = "PxDEVSLP",
  81. [18 ... 27] = "Reserved",
  82. [AHCI_PORT_REG_VENDOR_1 ...
  83. AHCI_PORT_REG_VENDOR_4] = "PxVS",
  84. };
  85. static const char *AHCIPortIRQ_lookup[AHCI_PORT_IRQ__COUNT] = {
  86. [AHCI_PORT_IRQ_BIT_DHRS] = "DHRS",
  87. [AHCI_PORT_IRQ_BIT_PSS] = "PSS",
  88. [AHCI_PORT_IRQ_BIT_DSS] = "DSS",
  89. [AHCI_PORT_IRQ_BIT_SDBS] = "SDBS",
  90. [AHCI_PORT_IRQ_BIT_UFS] = "UFS",
  91. [AHCI_PORT_IRQ_BIT_DPS] = "DPS",
  92. [AHCI_PORT_IRQ_BIT_PCS] = "PCS",
  93. [AHCI_PORT_IRQ_BIT_DMPS] = "DMPS",
  94. [8 ... 21] = "RESERVED",
  95. [AHCI_PORT_IRQ_BIT_PRCS] = "PRCS",
  96. [AHCI_PORT_IRQ_BIT_IPMS] = "IPMS",
  97. [AHCI_PORT_IRQ_BIT_OFS] = "OFS",
  98. [25] = "RESERVED",
  99. [AHCI_PORT_IRQ_BIT_INFS] = "INFS",
  100. [AHCI_PORT_IRQ_BIT_IFS] = "IFS",
  101. [AHCI_PORT_IRQ_BIT_HBDS] = "HBDS",
  102. [AHCI_PORT_IRQ_BIT_HBFS] = "HBFS",
  103. [AHCI_PORT_IRQ_BIT_TFES] = "TFES",
  104. [AHCI_PORT_IRQ_BIT_CPDS] = "CPDS"
  105. };
  106. static uint32_t ahci_port_read(AHCIState *s, int port, int offset)
  107. {
  108. uint32_t val;
  109. AHCIPortRegs *pr = &s->dev[port].port_regs;
  110. enum AHCIPortReg regnum = offset / sizeof(uint32_t);
  111. assert(regnum < (AHCI_PORT_ADDR_OFFSET_LEN / sizeof(uint32_t)));
  112. switch (regnum) {
  113. case AHCI_PORT_REG_LST_ADDR:
  114. val = pr->lst_addr;
  115. break;
  116. case AHCI_PORT_REG_LST_ADDR_HI:
  117. val = pr->lst_addr_hi;
  118. break;
  119. case AHCI_PORT_REG_FIS_ADDR:
  120. val = pr->fis_addr;
  121. break;
  122. case AHCI_PORT_REG_FIS_ADDR_HI:
  123. val = pr->fis_addr_hi;
  124. break;
  125. case AHCI_PORT_REG_IRQ_STAT:
  126. val = pr->irq_stat;
  127. break;
  128. case AHCI_PORT_REG_IRQ_MASK:
  129. val = pr->irq_mask;
  130. break;
  131. case AHCI_PORT_REG_CMD:
  132. val = pr->cmd;
  133. break;
  134. case AHCI_PORT_REG_TFDATA:
  135. val = pr->tfdata;
  136. break;
  137. case AHCI_PORT_REG_SIG:
  138. val = pr->sig;
  139. break;
  140. case AHCI_PORT_REG_SCR_STAT:
  141. if (s->dev[port].port.ifs[0].blk) {
  142. val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP |
  143. SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE;
  144. } else {
  145. val = SATA_SCR_SSTATUS_DET_NODEV;
  146. }
  147. break;
  148. case AHCI_PORT_REG_SCR_CTL:
  149. val = pr->scr_ctl;
  150. break;
  151. case AHCI_PORT_REG_SCR_ERR:
  152. val = pr->scr_err;
  153. break;
  154. case AHCI_PORT_REG_SCR_ACT:
  155. val = pr->scr_act;
  156. break;
  157. case AHCI_PORT_REG_CMD_ISSUE:
  158. val = pr->cmd_issue;
  159. break;
  160. default:
  161. trace_ahci_port_read_default(s, port, AHCIPortReg_lookup[regnum],
  162. offset);
  163. val = 0;
  164. }
  165. trace_ahci_port_read(s, port, AHCIPortReg_lookup[regnum], offset, val);
  166. return val;
  167. }
  168. static void ahci_irq_raise(AHCIState *s)
  169. {
  170. DeviceState *dev_state = s->container;
  171. PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state),
  172. TYPE_PCI_DEVICE);
  173. trace_ahci_irq_raise(s);
  174. if (pci_dev && msi_enabled(pci_dev)) {
  175. msi_notify(pci_dev, 0);
  176. } else {
  177. qemu_irq_raise(s->irq);
  178. }
  179. }
  180. static void ahci_irq_lower(AHCIState *s)
  181. {
  182. DeviceState *dev_state = s->container;
  183. PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state),
  184. TYPE_PCI_DEVICE);
  185. trace_ahci_irq_lower(s);
  186. if (!pci_dev || !msi_enabled(pci_dev)) {
  187. qemu_irq_lower(s->irq);
  188. }
  189. }
  190. static void ahci_check_irq(AHCIState *s)
  191. {
  192. int i;
  193. uint32_t old_irq = s->control_regs.irqstatus;
  194. s->control_regs.irqstatus = 0;
  195. for (i = 0; i < s->ports; i++) {
  196. AHCIPortRegs *pr = &s->dev[i].port_regs;
  197. if (pr->irq_stat & pr->irq_mask) {
  198. s->control_regs.irqstatus |= (1 << i);
  199. }
  200. }
  201. trace_ahci_check_irq(s, old_irq, s->control_regs.irqstatus);
  202. if (s->control_regs.irqstatus &&
  203. (s->control_regs.ghc & HOST_CTL_IRQ_EN)) {
  204. ahci_irq_raise(s);
  205. } else {
  206. ahci_irq_lower(s);
  207. }
  208. }
  209. static void ahci_trigger_irq(AHCIState *s, AHCIDevice *d,
  210. enum AHCIPortIRQ irqbit)
  211. {
  212. g_assert((unsigned)irqbit < 32);
  213. uint32_t irq = 1U << irqbit;
  214. uint32_t irqstat = d->port_regs.irq_stat | irq;
  215. trace_ahci_trigger_irq(s, d->port_no,
  216. AHCIPortIRQ_lookup[irqbit], irq,
  217. d->port_regs.irq_stat, irqstat,
  218. irqstat & d->port_regs.irq_mask);
  219. d->port_regs.irq_stat = irqstat;
  220. ahci_check_irq(s);
  221. }
  222. static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr,
  223. uint32_t wanted)
  224. {
  225. hwaddr len = wanted;
  226. if (*ptr) {
  227. dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
  228. }
  229. *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE,
  230. MEMTXATTRS_UNSPECIFIED);
  231. if (len < wanted && *ptr) {
  232. dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len);
  233. *ptr = NULL;
  234. }
  235. }
  236. /**
  237. * Check the cmd register to see if we should start or stop
  238. * the DMA or FIS RX engines.
  239. *
  240. * @ad: Device to dis/engage.
  241. *
  242. * @return 0 on success, -1 on error.
  243. */
  244. static int ahci_cond_start_engines(AHCIDevice *ad)
  245. {
  246. AHCIPortRegs *pr = &ad->port_regs;
  247. bool cmd_start = pr->cmd & PORT_CMD_START;
  248. bool cmd_on = pr->cmd & PORT_CMD_LIST_ON;
  249. bool fis_start = pr->cmd & PORT_CMD_FIS_RX;
  250. bool fis_on = pr->cmd & PORT_CMD_FIS_ON;
  251. if (cmd_start && !cmd_on) {
  252. if (!ahci_map_clb_address(ad)) {
  253. pr->cmd &= ~PORT_CMD_START;
  254. error_report("AHCI: Failed to start DMA engine: "
  255. "bad command list buffer address");
  256. return -1;
  257. }
  258. } else if (!cmd_start && cmd_on) {
  259. ahci_unmap_clb_address(ad);
  260. }
  261. if (fis_start && !fis_on) {
  262. if (!ahci_map_fis_address(ad)) {
  263. pr->cmd &= ~PORT_CMD_FIS_RX;
  264. error_report("AHCI: Failed to start FIS receive engine: "
  265. "bad FIS receive buffer address");
  266. return -1;
  267. }
  268. } else if (!fis_start && fis_on) {
  269. ahci_unmap_fis_address(ad);
  270. }
  271. return 0;
  272. }
  273. static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val)
  274. {
  275. AHCIPortRegs *pr = &s->dev[port].port_regs;
  276. enum AHCIPortReg regnum = offset / sizeof(uint32_t);
  277. assert(regnum < (AHCI_PORT_ADDR_OFFSET_LEN / sizeof(uint32_t)));
  278. trace_ahci_port_write(s, port, AHCIPortReg_lookup[regnum], offset, val);
  279. switch (regnum) {
  280. case AHCI_PORT_REG_LST_ADDR:
  281. pr->lst_addr = val;
  282. break;
  283. case AHCI_PORT_REG_LST_ADDR_HI:
  284. pr->lst_addr_hi = val;
  285. break;
  286. case AHCI_PORT_REG_FIS_ADDR:
  287. pr->fis_addr = val;
  288. break;
  289. case AHCI_PORT_REG_FIS_ADDR_HI:
  290. pr->fis_addr_hi = val;
  291. break;
  292. case AHCI_PORT_REG_IRQ_STAT:
  293. pr->irq_stat &= ~val;
  294. ahci_check_irq(s);
  295. break;
  296. case AHCI_PORT_REG_IRQ_MASK:
  297. pr->irq_mask = val & 0xfdc000ff;
  298. ahci_check_irq(s);
  299. break;
  300. case AHCI_PORT_REG_CMD:
  301. /* Block any Read-only fields from being set;
  302. * including LIST_ON and FIS_ON.
  303. * The spec requires to set ICC bits to zero after the ICC change
  304. * is done. We don't support ICC state changes, therefore always
  305. * force the ICC bits to zero.
  306. */
  307. pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) |
  308. (val & ~(PORT_CMD_RO_MASK | PORT_CMD_ICC_MASK));
  309. /* Check FIS RX and CLB engines */
  310. ahci_cond_start_engines(&s->dev[port]);
  311. /* XXX usually the FIS would be pending on the bus here and
  312. issuing deferred until the OS enables FIS receival.
  313. Instead, we only submit it once - which works in most
  314. cases, but is a hack. */
  315. if ((pr->cmd & PORT_CMD_FIS_ON) &&
  316. !s->dev[port].init_d2h_sent) {
  317. ahci_init_d2h(&s->dev[port]);
  318. }
  319. check_cmd(s, port);
  320. break;
  321. case AHCI_PORT_REG_TFDATA:
  322. case AHCI_PORT_REG_SIG:
  323. case AHCI_PORT_REG_SCR_STAT:
  324. /* Read Only */
  325. break;
  326. case AHCI_PORT_REG_SCR_CTL:
  327. if (((pr->scr_ctl & AHCI_SCR_SCTL_DET) == 1) &&
  328. ((val & AHCI_SCR_SCTL_DET) == 0)) {
  329. ahci_reset_port(s, port);
  330. }
  331. pr->scr_ctl = val;
  332. break;
  333. case AHCI_PORT_REG_SCR_ERR:
  334. pr->scr_err &= ~val;
  335. break;
  336. case AHCI_PORT_REG_SCR_ACT:
  337. /* RW1 */
  338. pr->scr_act |= val;
  339. break;
  340. case AHCI_PORT_REG_CMD_ISSUE:
  341. pr->cmd_issue |= val;
  342. check_cmd(s, port);
  343. break;
  344. default:
  345. trace_ahci_port_write_unimpl(s, port, AHCIPortReg_lookup[regnum],
  346. offset, val);
  347. qemu_log_mask(LOG_UNIMP, "Attempted write to unimplemented register: "
  348. "AHCI port %d register %s, offset 0x%x: 0x%"PRIx32,
  349. port, AHCIPortReg_lookup[regnum], offset, val);
  350. break;
  351. }
  352. }
  353. static uint64_t ahci_mem_read_32(void *opaque, hwaddr addr)
  354. {
  355. AHCIState *s = opaque;
  356. uint32_t val = 0;
  357. if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) {
  358. enum AHCIHostReg regnum = addr / 4;
  359. assert(regnum < AHCI_HOST_REG__COUNT);
  360. switch (regnum) {
  361. case AHCI_HOST_REG_CAP:
  362. val = s->control_regs.cap;
  363. break;
  364. case AHCI_HOST_REG_CTL:
  365. val = s->control_regs.ghc;
  366. break;
  367. case AHCI_HOST_REG_IRQ_STAT:
  368. val = s->control_regs.irqstatus;
  369. break;
  370. case AHCI_HOST_REG_PORTS_IMPL:
  371. val = s->control_regs.impl;
  372. break;
  373. case AHCI_HOST_REG_VERSION:
  374. val = s->control_regs.version;
  375. break;
  376. default:
  377. trace_ahci_mem_read_32_host_default(s, AHCIHostReg_lookup[regnum],
  378. addr);
  379. }
  380. trace_ahci_mem_read_32_host(s, AHCIHostReg_lookup[regnum], addr, val);
  381. } else if ((addr >= AHCI_PORT_REGS_START_ADDR) &&
  382. (addr < (AHCI_PORT_REGS_START_ADDR +
  383. (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) {
  384. val = ahci_port_read(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7,
  385. addr & AHCI_PORT_ADDR_OFFSET_MASK);
  386. } else {
  387. trace_ahci_mem_read_32_default(s, addr, val);
  388. }
  389. trace_ahci_mem_read_32(s, addr, val);
  390. return val;
  391. }
  392. /**
  393. * AHCI 1.3 section 3 ("HBA Memory Registers")
  394. * Support unaligned 8/16/32 bit reads, and 64 bit aligned reads.
  395. * Caller is responsible for masking unwanted higher order bytes.
  396. */
  397. static uint64_t ahci_mem_read(void *opaque, hwaddr addr, unsigned size)
  398. {
  399. hwaddr aligned = addr & ~0x3;
  400. int ofst = addr - aligned;
  401. uint64_t lo = ahci_mem_read_32(opaque, aligned);
  402. uint64_t hi;
  403. uint64_t val;
  404. /* if < 8 byte read does not cross 4 byte boundary */
  405. if (ofst + size <= 4) {
  406. val = lo >> (ofst * 8);
  407. } else {
  408. g_assert(size > 1);
  409. /* If the 64bit read is unaligned, we will produce undefined
  410. * results. AHCI does not support unaligned 64bit reads. */
  411. hi = ahci_mem_read_32(opaque, aligned + 4);
  412. val = (hi << 32 | lo) >> (ofst * 8);
  413. }
  414. trace_ahci_mem_read(opaque, size, addr, val);
  415. return val;
  416. }
  417. static void ahci_mem_write(void *opaque, hwaddr addr,
  418. uint64_t val, unsigned size)
  419. {
  420. AHCIState *s = opaque;
  421. trace_ahci_mem_write(s, size, addr, val);
  422. /* Only aligned reads are allowed on AHCI */
  423. if (addr & 3) {
  424. qemu_log_mask(LOG_GUEST_ERROR,
  425. "ahci: Mis-aligned write to addr 0x%03" HWADDR_PRIX "\n",
  426. addr);
  427. return;
  428. }
  429. if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) {
  430. enum AHCIHostReg regnum = addr / 4;
  431. assert(regnum < AHCI_HOST_REG__COUNT);
  432. switch (regnum) {
  433. case AHCI_HOST_REG_CAP: /* R/WO, RO */
  434. /* FIXME handle R/WO */
  435. break;
  436. case AHCI_HOST_REG_CTL: /* R/W */
  437. if (val & HOST_CTL_RESET) {
  438. ahci_reset(s);
  439. } else {
  440. s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN;
  441. ahci_check_irq(s);
  442. }
  443. break;
  444. case AHCI_HOST_REG_IRQ_STAT: /* R/WC, RO */
  445. s->control_regs.irqstatus &= ~val;
  446. ahci_check_irq(s);
  447. break;
  448. case AHCI_HOST_REG_PORTS_IMPL: /* R/WO, RO */
  449. /* FIXME handle R/WO */
  450. break;
  451. case AHCI_HOST_REG_VERSION: /* RO */
  452. /* FIXME report write? */
  453. break;
  454. default:
  455. qemu_log_mask(LOG_UNIMP,
  456. "Attempted write to unimplemented register: "
  457. "AHCI host register %s, "
  458. "offset 0x%"PRIx64": 0x%"PRIx64,
  459. AHCIHostReg_lookup[regnum], addr, val);
  460. trace_ahci_mem_write_host_unimpl(s, size,
  461. AHCIHostReg_lookup[regnum], addr);
  462. }
  463. trace_ahci_mem_write_host(s, size, AHCIHostReg_lookup[regnum],
  464. addr, val);
  465. } else if ((addr >= AHCI_PORT_REGS_START_ADDR) &&
  466. (addr < (AHCI_PORT_REGS_START_ADDR +
  467. (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) {
  468. ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7,
  469. addr & AHCI_PORT_ADDR_OFFSET_MASK, val);
  470. } else {
  471. qemu_log_mask(LOG_UNIMP, "Attempted write to unimplemented register: "
  472. "AHCI global register at offset 0x%"PRIx64": 0x%"PRIx64,
  473. addr, val);
  474. trace_ahci_mem_write_unimpl(s, size, addr, val);
  475. }
  476. }
  477. static const MemoryRegionOps ahci_mem_ops = {
  478. .read = ahci_mem_read,
  479. .write = ahci_mem_write,
  480. .endianness = DEVICE_LITTLE_ENDIAN,
  481. };
  482. static uint64_t ahci_idp_read(void *opaque, hwaddr addr,
  483. unsigned size)
  484. {
  485. AHCIState *s = opaque;
  486. if (addr == s->idp_offset) {
  487. /* index register */
  488. return s->idp_index;
  489. } else if (addr == s->idp_offset + 4) {
  490. /* data register - do memory read at location selected by index */
  491. return ahci_mem_read(opaque, s->idp_index, size);
  492. } else {
  493. return 0;
  494. }
  495. }
  496. static void ahci_idp_write(void *opaque, hwaddr addr,
  497. uint64_t val, unsigned size)
  498. {
  499. AHCIState *s = opaque;
  500. if (addr == s->idp_offset) {
  501. /* index register - mask off reserved bits */
  502. s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3);
  503. } else if (addr == s->idp_offset + 4) {
  504. /* data register - do memory write at location selected by index */
  505. ahci_mem_write(opaque, s->idp_index, val, size);
  506. }
  507. }
  508. static const MemoryRegionOps ahci_idp_ops = {
  509. .read = ahci_idp_read,
  510. .write = ahci_idp_write,
  511. .endianness = DEVICE_LITTLE_ENDIAN,
  512. };
  513. static void ahci_reg_init(AHCIState *s)
  514. {
  515. int i;
  516. s->control_regs.cap = (s->ports - 1) |
  517. (AHCI_NUM_COMMAND_SLOTS << 8) |
  518. (AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) |
  519. HOST_CAP_NCQ | HOST_CAP_AHCI | HOST_CAP_64;
  520. s->control_regs.impl = (1 << s->ports) - 1;
  521. s->control_regs.version = AHCI_VERSION_1_0;
  522. for (i = 0; i < s->ports; i++) {
  523. s->dev[i].port_state = STATE_RUN;
  524. }
  525. }
  526. static void check_cmd(AHCIState *s, int port)
  527. {
  528. AHCIPortRegs *pr = &s->dev[port].port_regs;
  529. uint8_t slot;
  530. if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) {
  531. for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) {
  532. if ((pr->cmd_issue & (1U << slot)) &&
  533. !handle_cmd(s, port, slot)) {
  534. pr->cmd_issue &= ~(1U << slot);
  535. }
  536. }
  537. }
  538. }
  539. static void ahci_check_cmd_bh(void *opaque)
  540. {
  541. AHCIDevice *ad = opaque;
  542. qemu_bh_delete(ad->check_bh);
  543. ad->check_bh = NULL;
  544. check_cmd(ad->hba, ad->port_no);
  545. }
  546. static void ahci_init_d2h(AHCIDevice *ad)
  547. {
  548. IDEState *ide_state = &ad->port.ifs[0];
  549. AHCIPortRegs *pr = &ad->port_regs;
  550. if (ad->init_d2h_sent) {
  551. return;
  552. }
  553. if (ahci_write_fis_d2h(ad)) {
  554. ad->init_d2h_sent = true;
  555. /* We're emulating receiving the first Reg H2D Fis from the device;
  556. * Update the SIG register, but otherwise proceed as normal. */
  557. pr->sig = ((uint32_t)ide_state->hcyl << 24) |
  558. (ide_state->lcyl << 16) |
  559. (ide_state->sector << 8) |
  560. (ide_state->nsector & 0xFF);
  561. }
  562. }
  563. static void ahci_set_signature(AHCIDevice *ad, uint32_t sig)
  564. {
  565. IDEState *s = &ad->port.ifs[0];
  566. s->hcyl = sig >> 24 & 0xFF;
  567. s->lcyl = sig >> 16 & 0xFF;
  568. s->sector = sig >> 8 & 0xFF;
  569. s->nsector = sig & 0xFF;
  570. trace_ahci_set_signature(ad->hba, ad->port_no, s->nsector, s->sector,
  571. s->lcyl, s->hcyl, sig);
  572. }
  573. static void ahci_reset_port(AHCIState *s, int port)
  574. {
  575. AHCIDevice *d = &s->dev[port];
  576. AHCIPortRegs *pr = &d->port_regs;
  577. IDEState *ide_state = &d->port.ifs[0];
  578. int i;
  579. trace_ahci_reset_port(s, port);
  580. ide_bus_reset(&d->port);
  581. ide_state->ncq_queues = AHCI_MAX_CMDS;
  582. pr->scr_stat = 0;
  583. pr->scr_err = 0;
  584. pr->scr_act = 0;
  585. pr->tfdata = 0x7F;
  586. pr->sig = 0xFFFFFFFF;
  587. d->busy_slot = -1;
  588. d->init_d2h_sent = false;
  589. ide_state = &s->dev[port].port.ifs[0];
  590. if (!ide_state->blk) {
  591. return;
  592. }
  593. /* reset ncq queue */
  594. for (i = 0; i < AHCI_MAX_CMDS; i++) {
  595. NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i];
  596. ncq_tfs->halt = false;
  597. if (!ncq_tfs->used) {
  598. continue;
  599. }
  600. if (ncq_tfs->aiocb) {
  601. blk_aio_cancel(ncq_tfs->aiocb);
  602. ncq_tfs->aiocb = NULL;
  603. }
  604. /* Maybe we just finished the request thanks to blk_aio_cancel() */
  605. if (!ncq_tfs->used) {
  606. continue;
  607. }
  608. qemu_sglist_destroy(&ncq_tfs->sglist);
  609. ncq_tfs->used = 0;
  610. }
  611. s->dev[port].port_state = STATE_RUN;
  612. if (ide_state->drive_kind == IDE_CD) {
  613. ahci_set_signature(d, SATA_SIGNATURE_CDROM);\
  614. ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT;
  615. } else {
  616. ahci_set_signature(d, SATA_SIGNATURE_DISK);
  617. ide_state->status = SEEK_STAT | WRERR_STAT;
  618. }
  619. ide_state->error = 1;
  620. ahci_init_d2h(d);
  621. }
  622. /* Buffer pretty output based on a raw FIS structure. */
  623. static char *ahci_pretty_buffer_fis(const uint8_t *fis, int cmd_len)
  624. {
  625. int i;
  626. GString *s = g_string_new("FIS:");
  627. for (i = 0; i < cmd_len; i++) {
  628. if ((i & 0xf) == 0) {
  629. g_string_append_printf(s, "\n0x%02x: ", i);
  630. }
  631. g_string_append_printf(s, "%02x ", fis[i]);
  632. }
  633. g_string_append_c(s, '\n');
  634. return g_string_free(s, FALSE);
  635. }
  636. static bool ahci_map_fis_address(AHCIDevice *ad)
  637. {
  638. AHCIPortRegs *pr = &ad->port_regs;
  639. map_page(ad->hba->as, &ad->res_fis,
  640. ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256);
  641. if (ad->res_fis != NULL) {
  642. pr->cmd |= PORT_CMD_FIS_ON;
  643. return true;
  644. }
  645. pr->cmd &= ~PORT_CMD_FIS_ON;
  646. return false;
  647. }
  648. static void ahci_unmap_fis_address(AHCIDevice *ad)
  649. {
  650. if (ad->res_fis == NULL) {
  651. trace_ahci_unmap_fis_address_null(ad->hba, ad->port_no);
  652. return;
  653. }
  654. ad->port_regs.cmd &= ~PORT_CMD_FIS_ON;
  655. dma_memory_unmap(ad->hba->as, ad->res_fis, 256,
  656. DMA_DIRECTION_FROM_DEVICE, 256);
  657. ad->res_fis = NULL;
  658. }
  659. static bool ahci_map_clb_address(AHCIDevice *ad)
  660. {
  661. AHCIPortRegs *pr = &ad->port_regs;
  662. ad->cur_cmd = NULL;
  663. map_page(ad->hba->as, &ad->lst,
  664. ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024);
  665. if (ad->lst != NULL) {
  666. pr->cmd |= PORT_CMD_LIST_ON;
  667. return true;
  668. }
  669. pr->cmd &= ~PORT_CMD_LIST_ON;
  670. return false;
  671. }
  672. static void ahci_unmap_clb_address(AHCIDevice *ad)
  673. {
  674. if (ad->lst == NULL) {
  675. trace_ahci_unmap_clb_address_null(ad->hba, ad->port_no);
  676. return;
  677. }
  678. ad->port_regs.cmd &= ~PORT_CMD_LIST_ON;
  679. dma_memory_unmap(ad->hba->as, ad->lst, 1024,
  680. DMA_DIRECTION_FROM_DEVICE, 1024);
  681. ad->lst = NULL;
  682. }
  683. static void ahci_write_fis_sdb(AHCIState *s, NCQTransferState *ncq_tfs)
  684. {
  685. AHCIDevice *ad = ncq_tfs->drive;
  686. AHCIPortRegs *pr = &ad->port_regs;
  687. IDEState *ide_state;
  688. SDBFIS *sdb_fis;
  689. if (!ad->res_fis ||
  690. !(pr->cmd & PORT_CMD_FIS_RX)) {
  691. return;
  692. }
  693. sdb_fis = (SDBFIS *)&ad->res_fis[RES_FIS_SDBFIS];
  694. ide_state = &ad->port.ifs[0];
  695. sdb_fis->type = SATA_FIS_TYPE_SDB;
  696. /* Interrupt pending & Notification bit */
  697. sdb_fis->flags = 0x40; /* Interrupt bit, always 1 for NCQ */
  698. sdb_fis->status = ide_state->status & 0x77;
  699. sdb_fis->error = ide_state->error;
  700. /* update SAct field in SDB_FIS */
  701. sdb_fis->payload = cpu_to_le32(ad->finished);
  702. /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */
  703. pr->tfdata = (ad->port.ifs[0].error << 8) |
  704. (ad->port.ifs[0].status & 0x77) |
  705. (pr->tfdata & 0x88);
  706. pr->scr_act &= ~ad->finished;
  707. ad->finished = 0;
  708. /* Trigger IRQ if interrupt bit is set (which currently, it always is) */
  709. if (sdb_fis->flags & 0x40) {
  710. ahci_trigger_irq(s, ad, AHCI_PORT_IRQ_BIT_SDBS);
  711. }
  712. }
  713. static void ahci_write_fis_pio(AHCIDevice *ad, uint16_t len, bool pio_fis_i)
  714. {
  715. AHCIPortRegs *pr = &ad->port_regs;
  716. uint8_t *pio_fis;
  717. IDEState *s = &ad->port.ifs[0];
  718. if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) {
  719. return;
  720. }
  721. pio_fis = &ad->res_fis[RES_FIS_PSFIS];
  722. pio_fis[0] = SATA_FIS_TYPE_PIO_SETUP;
  723. pio_fis[1] = (pio_fis_i ? (1 << 6) : 0);
  724. pio_fis[2] = s->status;
  725. pio_fis[3] = s->error;
  726. pio_fis[4] = s->sector;
  727. pio_fis[5] = s->lcyl;
  728. pio_fis[6] = s->hcyl;
  729. pio_fis[7] = s->select;
  730. pio_fis[8] = s->hob_sector;
  731. pio_fis[9] = s->hob_lcyl;
  732. pio_fis[10] = s->hob_hcyl;
  733. pio_fis[11] = 0;
  734. pio_fis[12] = s->nsector & 0xFF;
  735. pio_fis[13] = (s->nsector >> 8) & 0xFF;
  736. pio_fis[14] = 0;
  737. pio_fis[15] = s->status;
  738. pio_fis[16] = len & 255;
  739. pio_fis[17] = len >> 8;
  740. pio_fis[18] = 0;
  741. pio_fis[19] = 0;
  742. /* Update shadow registers: */
  743. pr->tfdata = (ad->port.ifs[0].error << 8) |
  744. ad->port.ifs[0].status;
  745. if (pio_fis[2] & ERR_STAT) {
  746. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_TFES);
  747. }
  748. }
  749. static bool ahci_write_fis_d2h(AHCIDevice *ad)
  750. {
  751. AHCIPortRegs *pr = &ad->port_regs;
  752. uint8_t *d2h_fis;
  753. int i;
  754. IDEState *s = &ad->port.ifs[0];
  755. if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) {
  756. return false;
  757. }
  758. d2h_fis = &ad->res_fis[RES_FIS_RFIS];
  759. d2h_fis[0] = SATA_FIS_TYPE_REGISTER_D2H;
  760. d2h_fis[1] = (1 << 6); /* interrupt bit */
  761. d2h_fis[2] = s->status;
  762. d2h_fis[3] = s->error;
  763. d2h_fis[4] = s->sector;
  764. d2h_fis[5] = s->lcyl;
  765. d2h_fis[6] = s->hcyl;
  766. d2h_fis[7] = s->select;
  767. d2h_fis[8] = s->hob_sector;
  768. d2h_fis[9] = s->hob_lcyl;
  769. d2h_fis[10] = s->hob_hcyl;
  770. d2h_fis[11] = 0;
  771. d2h_fis[12] = s->nsector & 0xFF;
  772. d2h_fis[13] = (s->nsector >> 8) & 0xFF;
  773. for (i = 14; i < 20; i++) {
  774. d2h_fis[i] = 0;
  775. }
  776. /* Update shadow registers: */
  777. pr->tfdata = (ad->port.ifs[0].error << 8) |
  778. ad->port.ifs[0].status;
  779. if (d2h_fis[2] & ERR_STAT) {
  780. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_TFES);
  781. }
  782. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_DHRS);
  783. return true;
  784. }
  785. static int prdt_tbl_entry_size(const AHCI_SG *tbl)
  786. {
  787. /* flags_size is zero-based */
  788. return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1;
  789. }
  790. /**
  791. * Fetch entries in a guest-provided PRDT and convert it into a QEMU SGlist.
  792. * @ad: The AHCIDevice for whom we are building the SGList.
  793. * @sglist: The SGList target to add PRD entries to.
  794. * @cmd: The AHCI Command Header that describes where the PRDT is.
  795. * @limit: The remaining size of the S/ATA transaction, in bytes.
  796. * @offset: The number of bytes already transferred, in bytes.
  797. *
  798. * The AHCI PRDT can describe up to 256GiB. S/ATA only support transactions of
  799. * up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 512 byte sector size. We stop
  800. * building the sglist from the PRDT as soon as we hit @limit bytes,
  801. * which is <= INT32_MAX/2GiB.
  802. */
  803. static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist,
  804. AHCICmdHdr *cmd, int64_t limit, uint64_t offset)
  805. {
  806. uint16_t opts = le16_to_cpu(cmd->opts);
  807. uint16_t prdtl = le16_to_cpu(cmd->prdtl);
  808. uint64_t cfis_addr = le64_to_cpu(cmd->tbl_addr);
  809. uint64_t prdt_addr = cfis_addr + 0x80;
  810. dma_addr_t prdt_len = (prdtl * sizeof(AHCI_SG));
  811. dma_addr_t real_prdt_len = prdt_len;
  812. uint8_t *prdt;
  813. int i;
  814. int r = 0;
  815. uint64_t sum = 0;
  816. int off_idx = -1;
  817. int64_t off_pos = -1;
  818. int tbl_entry_size;
  819. IDEBus *bus = &ad->port;
  820. BusState *qbus = BUS(bus);
  821. trace_ahci_populate_sglist(ad->hba, ad->port_no);
  822. if (!prdtl) {
  823. trace_ahci_populate_sglist_no_prdtl(ad->hba, ad->port_no, opts);
  824. return -1;
  825. }
  826. /* map PRDT */
  827. if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len,
  828. DMA_DIRECTION_TO_DEVICE,
  829. MEMTXATTRS_UNSPECIFIED))){
  830. trace_ahci_populate_sglist_no_map(ad->hba, ad->port_no);
  831. return -1;
  832. }
  833. if (prdt_len < real_prdt_len) {
  834. trace_ahci_populate_sglist_short_map(ad->hba, ad->port_no);
  835. r = -1;
  836. goto out;
  837. }
  838. /* Get entries in the PRDT, init a qemu sglist accordingly */
  839. if (prdtl > 0) {
  840. AHCI_SG *tbl = (AHCI_SG *)prdt;
  841. sum = 0;
  842. for (i = 0; i < prdtl; i++) {
  843. tbl_entry_size = prdt_tbl_entry_size(&tbl[i]);
  844. if (offset < (sum + tbl_entry_size)) {
  845. off_idx = i;
  846. off_pos = offset - sum;
  847. break;
  848. }
  849. sum += tbl_entry_size;
  850. }
  851. if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) {
  852. trace_ahci_populate_sglist_bad_offset(ad->hba, ad->port_no,
  853. off_idx, off_pos);
  854. r = -1;
  855. goto out;
  856. }
  857. qemu_sglist_init(sglist, qbus->parent, (prdtl - off_idx),
  858. ad->hba->as);
  859. qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr) + off_pos,
  860. MIN(prdt_tbl_entry_size(&tbl[off_idx]) - off_pos,
  861. limit));
  862. for (i = off_idx + 1; i < prdtl && sglist->size < limit; i++) {
  863. qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
  864. MIN(prdt_tbl_entry_size(&tbl[i]),
  865. limit - sglist->size));
  866. }
  867. }
  868. out:
  869. dma_memory_unmap(ad->hba->as, prdt, prdt_len,
  870. DMA_DIRECTION_TO_DEVICE, prdt_len);
  871. return r;
  872. }
  873. static void ncq_err(NCQTransferState *ncq_tfs)
  874. {
  875. IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
  876. ide_state->error = ABRT_ERR;
  877. ide_state->status = READY_STAT | ERR_STAT;
  878. ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag);
  879. qemu_sglist_destroy(&ncq_tfs->sglist);
  880. ncq_tfs->used = 0;
  881. }
  882. static void ncq_finish(NCQTransferState *ncq_tfs)
  883. {
  884. /* If we didn't error out, set our finished bit. Errored commands
  885. * do not get a bit set for the SDB FIS ACT register, nor do they
  886. * clear the outstanding bit in scr_act (PxSACT). */
  887. if (!(ncq_tfs->drive->port_regs.scr_err & (1 << ncq_tfs->tag))) {
  888. ncq_tfs->drive->finished |= (1 << ncq_tfs->tag);
  889. }
  890. ahci_write_fis_sdb(ncq_tfs->drive->hba, ncq_tfs);
  891. trace_ncq_finish(ncq_tfs->drive->hba, ncq_tfs->drive->port_no,
  892. ncq_tfs->tag);
  893. block_acct_done(blk_get_stats(ncq_tfs->drive->port.ifs[0].blk),
  894. &ncq_tfs->acct);
  895. qemu_sglist_destroy(&ncq_tfs->sglist);
  896. ncq_tfs->used = 0;
  897. }
  898. static void ncq_cb(void *opaque, int ret)
  899. {
  900. NCQTransferState *ncq_tfs = (NCQTransferState *)opaque;
  901. IDEState *ide_state = &ncq_tfs->drive->port.ifs[0];
  902. ncq_tfs->aiocb = NULL;
  903. if (ret < 0) {
  904. bool is_read = ncq_tfs->cmd == READ_FPDMA_QUEUED;
  905. BlockErrorAction action = blk_get_error_action(ide_state->blk,
  906. is_read, -ret);
  907. if (action == BLOCK_ERROR_ACTION_STOP) {
  908. ncq_tfs->halt = true;
  909. ide_state->bus->error_status = IDE_RETRY_HBA;
  910. } else if (action == BLOCK_ERROR_ACTION_REPORT) {
  911. ncq_err(ncq_tfs);
  912. }
  913. blk_error_action(ide_state->blk, action, is_read, -ret);
  914. } else {
  915. ide_state->status = READY_STAT | SEEK_STAT;
  916. }
  917. if (!ncq_tfs->halt) {
  918. ncq_finish(ncq_tfs);
  919. }
  920. }
  921. static int is_ncq(uint8_t ata_cmd)
  922. {
  923. /* Based on SATA 3.2 section 13.6.3.2 */
  924. switch (ata_cmd) {
  925. case READ_FPDMA_QUEUED:
  926. case WRITE_FPDMA_QUEUED:
  927. case NCQ_NON_DATA:
  928. case RECEIVE_FPDMA_QUEUED:
  929. case SEND_FPDMA_QUEUED:
  930. return 1;
  931. default:
  932. return 0;
  933. }
  934. }
  935. static void execute_ncq_command(NCQTransferState *ncq_tfs)
  936. {
  937. AHCIDevice *ad = ncq_tfs->drive;
  938. IDEState *ide_state = &ad->port.ifs[0];
  939. int port = ad->port_no;
  940. g_assert(is_ncq(ncq_tfs->cmd));
  941. ncq_tfs->halt = false;
  942. switch (ncq_tfs->cmd) {
  943. case READ_FPDMA_QUEUED:
  944. trace_execute_ncq_command_read(ad->hba, port, ncq_tfs->tag,
  945. ncq_tfs->sector_count, ncq_tfs->lba);
  946. dma_acct_start(ide_state->blk, &ncq_tfs->acct,
  947. &ncq_tfs->sglist, BLOCK_ACCT_READ);
  948. ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist,
  949. ncq_tfs->lba << BDRV_SECTOR_BITS,
  950. BDRV_SECTOR_SIZE,
  951. ncq_cb, ncq_tfs);
  952. break;
  953. case WRITE_FPDMA_QUEUED:
  954. trace_execute_ncq_command_write(ad->hba, port, ncq_tfs->tag,
  955. ncq_tfs->sector_count, ncq_tfs->lba);
  956. dma_acct_start(ide_state->blk, &ncq_tfs->acct,
  957. &ncq_tfs->sglist, BLOCK_ACCT_WRITE);
  958. ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist,
  959. ncq_tfs->lba << BDRV_SECTOR_BITS,
  960. BDRV_SECTOR_SIZE,
  961. ncq_cb, ncq_tfs);
  962. break;
  963. default:
  964. trace_execute_ncq_command_unsup(ad->hba, port,
  965. ncq_tfs->tag, ncq_tfs->cmd);
  966. ncq_err(ncq_tfs);
  967. }
  968. }
  969. static void process_ncq_command(AHCIState *s, int port, const uint8_t *cmd_fis,
  970. uint8_t slot)
  971. {
  972. AHCIDevice *ad = &s->dev[port];
  973. const NCQFrame *ncq_fis = (NCQFrame *)cmd_fis;
  974. uint8_t tag = ncq_fis->tag >> 3;
  975. NCQTransferState *ncq_tfs = &ad->ncq_tfs[tag];
  976. size_t size;
  977. g_assert(is_ncq(ncq_fis->command));
  978. if (ncq_tfs->used) {
  979. /* error - already in use */
  980. qemu_log_mask(LOG_GUEST_ERROR, "%s: tag %d already used\n",
  981. __func__, tag);
  982. return;
  983. }
  984. ncq_tfs->used = 1;
  985. ncq_tfs->drive = ad;
  986. ncq_tfs->slot = slot;
  987. ncq_tfs->cmdh = &((AHCICmdHdr *)ad->lst)[slot];
  988. ncq_tfs->cmd = ncq_fis->command;
  989. ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) |
  990. ((uint64_t)ncq_fis->lba4 << 32) |
  991. ((uint64_t)ncq_fis->lba3 << 24) |
  992. ((uint64_t)ncq_fis->lba2 << 16) |
  993. ((uint64_t)ncq_fis->lba1 << 8) |
  994. (uint64_t)ncq_fis->lba0;
  995. ncq_tfs->tag = tag;
  996. /* Sanity-check the NCQ packet */
  997. if (tag != slot) {
  998. trace_process_ncq_command_mismatch(s, port, tag, slot);
  999. }
  1000. if (ncq_fis->aux0 || ncq_fis->aux1 || ncq_fis->aux2 || ncq_fis->aux3) {
  1001. trace_process_ncq_command_aux(s, port, tag);
  1002. }
  1003. if (ncq_fis->prio || ncq_fis->icc) {
  1004. trace_process_ncq_command_prioicc(s, port, tag);
  1005. }
  1006. if (ncq_fis->fua & NCQ_FIS_FUA_MASK) {
  1007. trace_process_ncq_command_fua(s, port, tag);
  1008. }
  1009. if (ncq_fis->tag & NCQ_FIS_RARC_MASK) {
  1010. trace_process_ncq_command_rarc(s, port, tag);
  1011. }
  1012. ncq_tfs->sector_count = ((ncq_fis->sector_count_high << 8) |
  1013. ncq_fis->sector_count_low);
  1014. if (!ncq_tfs->sector_count) {
  1015. ncq_tfs->sector_count = 0x10000;
  1016. }
  1017. size = ncq_tfs->sector_count * BDRV_SECTOR_SIZE;
  1018. ahci_populate_sglist(ad, &ncq_tfs->sglist, ncq_tfs->cmdh, size, 0);
  1019. if (ncq_tfs->sglist.size < size) {
  1020. error_report("ahci: PRDT length for NCQ command (0x" DMA_ADDR_FMT ") "
  1021. "is smaller than the requested size (0x%zx)",
  1022. ncq_tfs->sglist.size, size);
  1023. ncq_err(ncq_tfs);
  1024. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_OFS);
  1025. return;
  1026. } else if (ncq_tfs->sglist.size != size) {
  1027. trace_process_ncq_command_large(s, port, tag,
  1028. ncq_tfs->sglist.size, size);
  1029. }
  1030. trace_process_ncq_command(s, port, tag,
  1031. ncq_fis->command,
  1032. ncq_tfs->lba,
  1033. ncq_tfs->lba + ncq_tfs->sector_count - 1);
  1034. execute_ncq_command(ncq_tfs);
  1035. }
  1036. static AHCICmdHdr *get_cmd_header(AHCIState *s, uint8_t port, uint8_t slot)
  1037. {
  1038. if (port >= s->ports || slot >= AHCI_MAX_CMDS) {
  1039. return NULL;
  1040. }
  1041. return s->dev[port].lst ? &((AHCICmdHdr *)s->dev[port].lst)[slot] : NULL;
  1042. }
  1043. static void handle_reg_h2d_fis(AHCIState *s, int port,
  1044. uint8_t slot, const uint8_t *cmd_fis)
  1045. {
  1046. IDEState *ide_state = &s->dev[port].port.ifs[0];
  1047. AHCICmdHdr *cmd = get_cmd_header(s, port, slot);
  1048. uint16_t opts = le16_to_cpu(cmd->opts);
  1049. if (cmd_fis[1] & 0x0F) {
  1050. trace_handle_reg_h2d_fis_pmp(s, port, cmd_fis[1],
  1051. cmd_fis[2], cmd_fis[3]);
  1052. return;
  1053. }
  1054. if (cmd_fis[1] & 0x70) {
  1055. trace_handle_reg_h2d_fis_res(s, port, cmd_fis[1],
  1056. cmd_fis[2], cmd_fis[3]);
  1057. return;
  1058. }
  1059. if (!(cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER)) {
  1060. switch (s->dev[port].port_state) {
  1061. case STATE_RUN:
  1062. if (cmd_fis[15] & ATA_SRST) {
  1063. s->dev[port].port_state = STATE_RESET;
  1064. }
  1065. break;
  1066. case STATE_RESET:
  1067. if (!(cmd_fis[15] & ATA_SRST)) {
  1068. ahci_reset_port(s, port);
  1069. }
  1070. break;
  1071. }
  1072. return;
  1073. }
  1074. /* Check for NCQ command */
  1075. if (is_ncq(cmd_fis[2])) {
  1076. process_ncq_command(s, port, cmd_fis, slot);
  1077. return;
  1078. }
  1079. /* Decompose the FIS:
  1080. * AHCI does not interpret FIS packets, it only forwards them.
  1081. * SATA 1.0 describes how to decode LBA28 and CHS FIS packets.
  1082. * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets.
  1083. *
  1084. * ATA4 describes sector number for LBA28/CHS commands.
  1085. * ATA6 describes sector number for LBA48 commands.
  1086. * ATA8 deprecates CHS fully, describing only LBA28/48.
  1087. *
  1088. * We dutifully convert the FIS into IDE registers, and allow the
  1089. * core layer to interpret them as needed. */
  1090. ide_state->feature = cmd_fis[3];
  1091. ide_state->sector = cmd_fis[4]; /* LBA 7:0 */
  1092. ide_state->lcyl = cmd_fis[5]; /* LBA 15:8 */
  1093. ide_state->hcyl = cmd_fis[6]; /* LBA 23:16 */
  1094. ide_state->select = cmd_fis[7]; /* LBA 27:24 (LBA28) */
  1095. ide_state->hob_sector = cmd_fis[8]; /* LBA 31:24 */
  1096. ide_state->hob_lcyl = cmd_fis[9]; /* LBA 39:32 */
  1097. ide_state->hob_hcyl = cmd_fis[10]; /* LBA 47:40 */
  1098. ide_state->hob_feature = cmd_fis[11];
  1099. ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]);
  1100. /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */
  1101. /* 15: Only valid when UPDATE_COMMAND not set. */
  1102. /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command
  1103. * table to ide_state->io_buffer */
  1104. if (opts & AHCI_CMD_ATAPI) {
  1105. memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10);
  1106. if (trace_event_get_state_backends(TRACE_HANDLE_REG_H2D_FIS_DUMP)) {
  1107. char *pretty_fis = ahci_pretty_buffer_fis(ide_state->io_buffer, 0x10);
  1108. trace_handle_reg_h2d_fis_dump(s, port, pretty_fis);
  1109. g_free(pretty_fis);
  1110. }
  1111. }
  1112. ide_state->error = 0;
  1113. s->dev[port].done_first_drq = false;
  1114. /* Reset transferred byte counter */
  1115. cmd->status = 0;
  1116. /* We're ready to process the command in FIS byte 2. */
  1117. ide_bus_exec_cmd(&s->dev[port].port, cmd_fis[2]);
  1118. }
  1119. static int handle_cmd(AHCIState *s, int port, uint8_t slot)
  1120. {
  1121. IDEState *ide_state;
  1122. uint64_t tbl_addr;
  1123. AHCICmdHdr *cmd;
  1124. uint8_t *cmd_fis;
  1125. dma_addr_t cmd_len;
  1126. if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) {
  1127. /* Engine currently busy, try again later */
  1128. trace_handle_cmd_busy(s, port);
  1129. return -1;
  1130. }
  1131. if (!s->dev[port].lst) {
  1132. trace_handle_cmd_nolist(s, port);
  1133. return -1;
  1134. }
  1135. cmd = get_cmd_header(s, port, slot);
  1136. /* remember current slot handle for later */
  1137. s->dev[port].cur_cmd = cmd;
  1138. /* The device we are working for */
  1139. ide_state = &s->dev[port].port.ifs[0];
  1140. if (!ide_state->blk) {
  1141. trace_handle_cmd_badport(s, port);
  1142. return -1;
  1143. }
  1144. tbl_addr = le64_to_cpu(cmd->tbl_addr);
  1145. cmd_len = 0x80;
  1146. cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len,
  1147. DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
  1148. if (!cmd_fis) {
  1149. trace_handle_cmd_badfis(s, port);
  1150. return -1;
  1151. } else if (cmd_len != 0x80) {
  1152. ahci_trigger_irq(s, &s->dev[port], AHCI_PORT_IRQ_BIT_HBFS);
  1153. trace_handle_cmd_badmap(s, port, cmd_len);
  1154. goto out;
  1155. }
  1156. if (trace_event_get_state_backends(TRACE_HANDLE_CMD_FIS_DUMP)) {
  1157. char *pretty_fis = ahci_pretty_buffer_fis(cmd_fis, 0x80);
  1158. trace_handle_cmd_fis_dump(s, port, pretty_fis);
  1159. g_free(pretty_fis);
  1160. }
  1161. switch (cmd_fis[0]) {
  1162. case SATA_FIS_TYPE_REGISTER_H2D:
  1163. handle_reg_h2d_fis(s, port, slot, cmd_fis);
  1164. break;
  1165. default:
  1166. trace_handle_cmd_unhandled_fis(s, port,
  1167. cmd_fis[0], cmd_fis[1], cmd_fis[2]);
  1168. break;
  1169. }
  1170. out:
  1171. dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_TO_DEVICE,
  1172. cmd_len);
  1173. if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) {
  1174. /* async command, complete later */
  1175. s->dev[port].busy_slot = slot;
  1176. return -1;
  1177. }
  1178. /* done handling the command */
  1179. return 0;
  1180. }
  1181. /* Transfer PIO data between RAM and device */
  1182. static void ahci_pio_transfer(const IDEDMA *dma)
  1183. {
  1184. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1185. IDEState *s = &ad->port.ifs[0];
  1186. uint32_t size = (uint32_t)(s->data_end - s->data_ptr);
  1187. /* write == ram -> device */
  1188. uint16_t opts = le16_to_cpu(ad->cur_cmd->opts);
  1189. int is_write = opts & AHCI_CMD_WRITE;
  1190. int is_atapi = opts & AHCI_CMD_ATAPI;
  1191. int has_sglist = 0;
  1192. bool pio_fis_i;
  1193. /* The PIO Setup FIS is received prior to transfer, but the interrupt
  1194. * is only triggered after data is received.
  1195. *
  1196. * The device only sets the 'I' bit in the PIO Setup FIS for device->host
  1197. * requests (see "DPIOI1" in the SATA spec), or for host->device DRQs after
  1198. * the first (see "DPIOO1"). The latter is consistent with the spec's
  1199. * description of the PACKET protocol, where the command part of ATAPI requests
  1200. * ("DPKT0") has the 'I' bit clear, while the data part of PIO ATAPI requests
  1201. * ("DPKT4a" and "DPKT7") has the 'I' bit set for both directions for all DRQs.
  1202. */
  1203. pio_fis_i = ad->done_first_drq || (!is_atapi && !is_write);
  1204. ahci_write_fis_pio(ad, size, pio_fis_i);
  1205. if (is_atapi && !ad->done_first_drq) {
  1206. /* already prepopulated iobuffer */
  1207. goto out;
  1208. }
  1209. if (ahci_dma_prepare_buf(dma, size)) {
  1210. has_sglist = 1;
  1211. }
  1212. trace_ahci_pio_transfer(ad->hba, ad->port_no, is_write ? "writ" : "read",
  1213. size, is_atapi ? "atapi" : "ata",
  1214. has_sglist ? "" : "o");
  1215. if (has_sglist && size) {
  1216. const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
  1217. if (is_write) {
  1218. dma_buf_write(s->data_ptr, size, NULL, &s->sg, attrs);
  1219. } else {
  1220. dma_buf_read(s->data_ptr, size, NULL, &s->sg, attrs);
  1221. }
  1222. }
  1223. /* Update number of transferred bytes, destroy sglist */
  1224. dma_buf_commit(s, size);
  1225. out:
  1226. /* declare that we processed everything */
  1227. s->data_ptr = s->data_end;
  1228. ad->done_first_drq = true;
  1229. if (pio_fis_i) {
  1230. ahci_trigger_irq(ad->hba, ad, AHCI_PORT_IRQ_BIT_PSS);
  1231. }
  1232. }
  1233. static void ahci_start_dma(const IDEDMA *dma, IDEState *s,
  1234. BlockCompletionFunc *dma_cb)
  1235. {
  1236. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1237. trace_ahci_start_dma(ad->hba, ad->port_no);
  1238. s->io_buffer_offset = 0;
  1239. dma_cb(s, 0);
  1240. }
  1241. static void ahci_restart_dma(const IDEDMA *dma)
  1242. {
  1243. /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */
  1244. }
  1245. /**
  1246. * IDE/PIO restarts are handled by the core layer, but NCQ commands
  1247. * need an extra kick from the AHCI HBA.
  1248. */
  1249. static void ahci_restart(const IDEDMA *dma)
  1250. {
  1251. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1252. int i;
  1253. for (i = 0; i < AHCI_MAX_CMDS; i++) {
  1254. NCQTransferState *ncq_tfs = &ad->ncq_tfs[i];
  1255. if (ncq_tfs->halt) {
  1256. execute_ncq_command(ncq_tfs);
  1257. }
  1258. }
  1259. }
  1260. /**
  1261. * Called in DMA and PIO R/W chains to read the PRDT.
  1262. * Not shared with NCQ pathways.
  1263. */
  1264. static int32_t ahci_dma_prepare_buf(const IDEDMA *dma, int32_t limit)
  1265. {
  1266. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1267. IDEState *s = &ad->port.ifs[0];
  1268. if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd,
  1269. limit, s->io_buffer_offset) == -1) {
  1270. trace_ahci_dma_prepare_buf_fail(ad->hba, ad->port_no);
  1271. return -1;
  1272. }
  1273. s->io_buffer_size = s->sg.size;
  1274. trace_ahci_dma_prepare_buf(ad->hba, ad->port_no, limit, s->io_buffer_size);
  1275. return s->io_buffer_size;
  1276. }
  1277. /**
  1278. * Updates the command header with a bytes-read value.
  1279. * Called via dma_buf_commit, for both DMA and PIO paths.
  1280. * sglist destruction is handled within dma_buf_commit.
  1281. */
  1282. static void ahci_commit_buf(const IDEDMA *dma, uint32_t tx_bytes)
  1283. {
  1284. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1285. tx_bytes += le32_to_cpu(ad->cur_cmd->status);
  1286. ad->cur_cmd->status = cpu_to_le32(tx_bytes);
  1287. }
  1288. static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write)
  1289. {
  1290. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1291. IDEState *s = &ad->port.ifs[0];
  1292. uint8_t *p = s->io_buffer + s->io_buffer_index;
  1293. int l = s->io_buffer_size - s->io_buffer_index;
  1294. if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd, l, s->io_buffer_offset)) {
  1295. return 0;
  1296. }
  1297. if (is_write) {
  1298. dma_buf_read(p, l, NULL, &s->sg, MEMTXATTRS_UNSPECIFIED);
  1299. } else {
  1300. dma_buf_write(p, l, NULL, &s->sg, MEMTXATTRS_UNSPECIFIED);
  1301. }
  1302. /* free sglist, update byte count */
  1303. dma_buf_commit(s, l);
  1304. s->io_buffer_index += l;
  1305. trace_ahci_dma_rw_buf(ad->hba, ad->port_no, l);
  1306. return 1;
  1307. }
  1308. static void ahci_cmd_done(const IDEDMA *dma)
  1309. {
  1310. AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
  1311. trace_ahci_cmd_done(ad->hba, ad->port_no);
  1312. /* no longer busy */
  1313. if (ad->busy_slot != -1) {
  1314. ad->port_regs.cmd_issue &= ~(1 << ad->busy_slot);
  1315. ad->busy_slot = -1;
  1316. }
  1317. /* update d2h status */
  1318. ahci_write_fis_d2h(ad);
  1319. if (ad->port_regs.cmd_issue && !ad->check_bh) {
  1320. ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad);
  1321. qemu_bh_schedule(ad->check_bh);
  1322. }
  1323. }
  1324. static void ahci_irq_set(void *opaque, int n, int level)
  1325. {
  1326. qemu_log_mask(LOG_UNIMP, "ahci: IRQ#%d level:%d\n", n, level);
  1327. }
  1328. static const IDEDMAOps ahci_dma_ops = {
  1329. .start_dma = ahci_start_dma,
  1330. .restart = ahci_restart,
  1331. .restart_dma = ahci_restart_dma,
  1332. .pio_transfer = ahci_pio_transfer,
  1333. .prepare_buf = ahci_dma_prepare_buf,
  1334. .commit_buf = ahci_commit_buf,
  1335. .rw_buf = ahci_dma_rw_buf,
  1336. .cmd_done = ahci_cmd_done,
  1337. };
  1338. void ahci_init(AHCIState *s, DeviceState *qdev)
  1339. {
  1340. s->container = qdev;
  1341. /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */
  1342. memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s,
  1343. "ahci", AHCI_MEM_BAR_SIZE);
  1344. memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s,
  1345. "ahci-idp", 32);
  1346. }
  1347. void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports)
  1348. {
  1349. qemu_irq *irqs;
  1350. int i;
  1351. s->as = as;
  1352. s->ports = ports;
  1353. s->dev = g_new0(AHCIDevice, ports);
  1354. ahci_reg_init(s);
  1355. irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports);
  1356. for (i = 0; i < s->ports; i++) {
  1357. AHCIDevice *ad = &s->dev[i];
  1358. ide_bus_init(&ad->port, sizeof(ad->port), qdev, i, 1);
  1359. ide_bus_init_output_irq(&ad->port, irqs[i]);
  1360. ad->hba = s;
  1361. ad->port_no = i;
  1362. ad->port.dma = &ad->dma;
  1363. ad->port.dma->ops = &ahci_dma_ops;
  1364. ide_bus_register_restart_cb(&ad->port);
  1365. }
  1366. g_free(irqs);
  1367. }
  1368. void ahci_uninit(AHCIState *s)
  1369. {
  1370. int i, j;
  1371. for (i = 0; i < s->ports; i++) {
  1372. AHCIDevice *ad = &s->dev[i];
  1373. for (j = 0; j < 2; j++) {
  1374. IDEState *s = &ad->port.ifs[j];
  1375. ide_exit(s);
  1376. }
  1377. object_unparent(OBJECT(&ad->port));
  1378. }
  1379. g_free(s->dev);
  1380. }
  1381. void ahci_reset(AHCIState *s)
  1382. {
  1383. AHCIPortRegs *pr;
  1384. int i;
  1385. trace_ahci_reset(s);
  1386. s->control_regs.irqstatus = 0;
  1387. /* AHCI Enable (AE)
  1388. * The implementation of this bit is dependent upon the value of the
  1389. * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and
  1390. * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be
  1391. * read-only and shall have a reset value of '1'.
  1392. *
  1393. * We set HOST_CAP_AHCI so we must enable AHCI at reset.
  1394. */
  1395. s->control_regs.ghc = HOST_CTL_AHCI_EN;
  1396. for (i = 0; i < s->ports; i++) {
  1397. pr = &s->dev[i].port_regs;
  1398. pr->irq_stat = 0;
  1399. pr->irq_mask = 0;
  1400. pr->scr_ctl = 0;
  1401. pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON;
  1402. ahci_reset_port(s, i);
  1403. }
  1404. }
  1405. static const VMStateDescription vmstate_ncq_tfs = {
  1406. .name = "ncq state",
  1407. .version_id = 1,
  1408. .fields = (VMStateField[]) {
  1409. VMSTATE_UINT32(sector_count, NCQTransferState),
  1410. VMSTATE_UINT64(lba, NCQTransferState),
  1411. VMSTATE_UINT8(tag, NCQTransferState),
  1412. VMSTATE_UINT8(cmd, NCQTransferState),
  1413. VMSTATE_UINT8(slot, NCQTransferState),
  1414. VMSTATE_BOOL(used, NCQTransferState),
  1415. VMSTATE_BOOL(halt, NCQTransferState),
  1416. VMSTATE_END_OF_LIST()
  1417. },
  1418. };
  1419. static const VMStateDescription vmstate_ahci_device = {
  1420. .name = "ahci port",
  1421. .version_id = 1,
  1422. .fields = (VMStateField[]) {
  1423. VMSTATE_IDE_BUS(port, AHCIDevice),
  1424. VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice),
  1425. VMSTATE_UINT32(port_state, AHCIDevice),
  1426. VMSTATE_UINT32(finished, AHCIDevice),
  1427. VMSTATE_UINT32(port_regs.lst_addr, AHCIDevice),
  1428. VMSTATE_UINT32(port_regs.lst_addr_hi, AHCIDevice),
  1429. VMSTATE_UINT32(port_regs.fis_addr, AHCIDevice),
  1430. VMSTATE_UINT32(port_regs.fis_addr_hi, AHCIDevice),
  1431. VMSTATE_UINT32(port_regs.irq_stat, AHCIDevice),
  1432. VMSTATE_UINT32(port_regs.irq_mask, AHCIDevice),
  1433. VMSTATE_UINT32(port_regs.cmd, AHCIDevice),
  1434. VMSTATE_UINT32(port_regs.tfdata, AHCIDevice),
  1435. VMSTATE_UINT32(port_regs.sig, AHCIDevice),
  1436. VMSTATE_UINT32(port_regs.scr_stat, AHCIDevice),
  1437. VMSTATE_UINT32(port_regs.scr_ctl, AHCIDevice),
  1438. VMSTATE_UINT32(port_regs.scr_err, AHCIDevice),
  1439. VMSTATE_UINT32(port_regs.scr_act, AHCIDevice),
  1440. VMSTATE_UINT32(port_regs.cmd_issue, AHCIDevice),
  1441. VMSTATE_BOOL(done_first_drq, AHCIDevice),
  1442. VMSTATE_INT32(busy_slot, AHCIDevice),
  1443. VMSTATE_BOOL(init_d2h_sent, AHCIDevice),
  1444. VMSTATE_STRUCT_ARRAY(ncq_tfs, AHCIDevice, AHCI_MAX_CMDS,
  1445. 1, vmstate_ncq_tfs, NCQTransferState),
  1446. VMSTATE_END_OF_LIST()
  1447. },
  1448. };
  1449. static int ahci_state_post_load(void *opaque, int version_id)
  1450. {
  1451. int i, j;
  1452. struct AHCIDevice *ad;
  1453. NCQTransferState *ncq_tfs;
  1454. AHCIPortRegs *pr;
  1455. AHCIState *s = opaque;
  1456. for (i = 0; i < s->ports; i++) {
  1457. ad = &s->dev[i];
  1458. pr = &ad->port_regs;
  1459. if (!(pr->cmd & PORT_CMD_START) && (pr->cmd & PORT_CMD_LIST_ON)) {
  1460. error_report("AHCI: DMA engine should be off, but status bit "
  1461. "indicates it is still running.");
  1462. return -1;
  1463. }
  1464. if (!(pr->cmd & PORT_CMD_FIS_RX) && (pr->cmd & PORT_CMD_FIS_ON)) {
  1465. error_report("AHCI: FIS RX engine should be off, but status bit "
  1466. "indicates it is still running.");
  1467. return -1;
  1468. }
  1469. /* After a migrate, the DMA/FIS engines are "off" and
  1470. * need to be conditionally restarted */
  1471. pr->cmd &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON);
  1472. if (ahci_cond_start_engines(ad) != 0) {
  1473. return -1;
  1474. }
  1475. for (j = 0; j < AHCI_MAX_CMDS; j++) {
  1476. ncq_tfs = &ad->ncq_tfs[j];
  1477. ncq_tfs->drive = ad;
  1478. if (ncq_tfs->used != ncq_tfs->halt) {
  1479. return -1;
  1480. }
  1481. if (!ncq_tfs->halt) {
  1482. continue;
  1483. }
  1484. if (!is_ncq(ncq_tfs->cmd)) {
  1485. return -1;
  1486. }
  1487. if (ncq_tfs->slot != ncq_tfs->tag) {
  1488. return -1;
  1489. }
  1490. /* If ncq_tfs->halt is justly set, the engine should be engaged,
  1491. * and the command list buffer should be mapped. */
  1492. ncq_tfs->cmdh = get_cmd_header(s, i, ncq_tfs->slot);
  1493. if (!ncq_tfs->cmdh) {
  1494. return -1;
  1495. }
  1496. ahci_populate_sglist(ncq_tfs->drive, &ncq_tfs->sglist,
  1497. ncq_tfs->cmdh,
  1498. ncq_tfs->sector_count * BDRV_SECTOR_SIZE,
  1499. 0);
  1500. if (ncq_tfs->sector_count != ncq_tfs->sglist.size >> 9) {
  1501. return -1;
  1502. }
  1503. }
  1504. /*
  1505. * If an error is present, ad->busy_slot will be valid and not -1.
  1506. * In this case, an operation is waiting to resume and will re-check
  1507. * for additional AHCI commands to execute upon completion.
  1508. *
  1509. * In the case where no error was present, busy_slot will be -1,
  1510. * and we should check to see if there are additional commands waiting.
  1511. */
  1512. if (ad->busy_slot == -1) {
  1513. check_cmd(s, i);
  1514. } else {
  1515. /* We are in the middle of a command, and may need to access
  1516. * the command header in guest memory again. */
  1517. if (ad->busy_slot < 0 || ad->busy_slot >= AHCI_MAX_CMDS) {
  1518. return -1;
  1519. }
  1520. ad->cur_cmd = get_cmd_header(s, i, ad->busy_slot);
  1521. }
  1522. }
  1523. return 0;
  1524. }
  1525. const VMStateDescription vmstate_ahci = {
  1526. .name = "ahci",
  1527. .version_id = 1,
  1528. .post_load = ahci_state_post_load,
  1529. .fields = (VMStateField[]) {
  1530. VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports,
  1531. vmstate_ahci_device, AHCIDevice),
  1532. VMSTATE_UINT32(control_regs.cap, AHCIState),
  1533. VMSTATE_UINT32(control_regs.ghc, AHCIState),
  1534. VMSTATE_UINT32(control_regs.irqstatus, AHCIState),
  1535. VMSTATE_UINT32(control_regs.impl, AHCIState),
  1536. VMSTATE_UINT32(control_regs.version, AHCIState),
  1537. VMSTATE_UINT32(idp_index, AHCIState),
  1538. VMSTATE_INT32_EQUAL(ports, AHCIState, NULL),
  1539. VMSTATE_END_OF_LIST()
  1540. },
  1541. };
  1542. static const VMStateDescription vmstate_sysbus_ahci = {
  1543. .name = "sysbus-ahci",
  1544. .fields = (VMStateField[]) {
  1545. VMSTATE_AHCI(ahci, SysbusAHCIState),
  1546. VMSTATE_END_OF_LIST()
  1547. },
  1548. };
  1549. static void sysbus_ahci_reset(DeviceState *dev)
  1550. {
  1551. SysbusAHCIState *s = SYSBUS_AHCI(dev);
  1552. ahci_reset(&s->ahci);
  1553. }
  1554. static void sysbus_ahci_init(Object *obj)
  1555. {
  1556. SysbusAHCIState *s = SYSBUS_AHCI(obj);
  1557. SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
  1558. ahci_init(&s->ahci, DEVICE(obj));
  1559. sysbus_init_mmio(sbd, &s->ahci.mem);
  1560. sysbus_init_irq(sbd, &s->ahci.irq);
  1561. }
  1562. static void sysbus_ahci_realize(DeviceState *dev, Error **errp)
  1563. {
  1564. SysbusAHCIState *s = SYSBUS_AHCI(dev);
  1565. ahci_realize(&s->ahci, dev, &address_space_memory, s->num_ports);
  1566. }
  1567. static Property sysbus_ahci_properties[] = {
  1568. DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1),
  1569. DEFINE_PROP_END_OF_LIST(),
  1570. };
  1571. static void sysbus_ahci_class_init(ObjectClass *klass, void *data)
  1572. {
  1573. DeviceClass *dc = DEVICE_CLASS(klass);
  1574. dc->realize = sysbus_ahci_realize;
  1575. dc->vmsd = &vmstate_sysbus_ahci;
  1576. device_class_set_props(dc, sysbus_ahci_properties);
  1577. dc->reset = sysbus_ahci_reset;
  1578. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  1579. }
  1580. static const TypeInfo sysbus_ahci_info = {
  1581. .name = TYPE_SYSBUS_AHCI,
  1582. .parent = TYPE_SYS_BUS_DEVICE,
  1583. .instance_size = sizeof(SysbusAHCIState),
  1584. .instance_init = sysbus_ahci_init,
  1585. .class_init = sysbus_ahci_class_init,
  1586. };
  1587. static void sysbus_ahci_register_types(void)
  1588. {
  1589. type_register_static(&sysbus_ahci_info);
  1590. }
  1591. type_init(sysbus_ahci_register_types)
  1592. int32_t ahci_get_num_ports(PCIDevice *dev)
  1593. {
  1594. AHCIPCIState *d = ICH9_AHCI(dev);
  1595. AHCIState *ahci = &d->ahci;
  1596. return ahci->ports;
  1597. }
  1598. void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd)
  1599. {
  1600. AHCIPCIState *d = ICH9_AHCI(dev);
  1601. AHCIState *ahci = &d->ahci;
  1602. int i;
  1603. for (i = 0; i < ahci->ports; i++) {
  1604. if (hd[i] == NULL) {
  1605. continue;
  1606. }
  1607. ide_bus_create_drive(&ahci->dev[i].port, 0, hd[i]);
  1608. }
  1609. }