2
0

omap_dma.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124
  1. /*
  2. * TI OMAP DMA gigacell.
  3. *
  4. * Copyright (C) 2006-2008 Andrzej Zaborowski <balrog@zabor.org>
  5. * Copyright (C) 2007-2008 Lauro Ramos Venancio <lauro.venancio@indt.org.br>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation; either version 2 of
  10. * the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "qemu/osdep.h"
  21. #include "qemu/log.h"
  22. #include "qemu/timer.h"
  23. #include "hw/arm/omap.h"
  24. #include "hw/irq.h"
  25. #include "hw/arm/soc_dma.h"
  26. struct omap_dma_channel_s {
  27. /* transfer data */
  28. int burst[2];
  29. int pack[2];
  30. int endian[2];
  31. int endian_lock[2];
  32. int translate[2];
  33. enum omap_dma_port port[2];
  34. hwaddr addr[2];
  35. omap_dma_addressing_t mode[2];
  36. uint32_t elements;
  37. uint16_t frames;
  38. int32_t frame_index[2];
  39. int16_t element_index[2];
  40. int data_type;
  41. /* transfer type */
  42. int transparent_copy;
  43. int constant_fill;
  44. uint32_t color;
  45. int prefetch;
  46. /* auto init and linked channel data */
  47. int end_prog;
  48. int repeat;
  49. int auto_init;
  50. int link_enabled;
  51. int link_next_ch;
  52. /* interruption data */
  53. int interrupts;
  54. int status;
  55. int cstatus;
  56. /* state data */
  57. int active;
  58. int enable;
  59. int sync;
  60. int src_sync;
  61. int pending_request;
  62. int waiting_end_prog;
  63. uint16_t cpc;
  64. int set_update;
  65. /* sync type */
  66. int fs;
  67. int bs;
  68. /* compatibility */
  69. int omap_3_1_compatible_disable;
  70. qemu_irq irq;
  71. struct omap_dma_channel_s *sibling;
  72. struct omap_dma_reg_set_s {
  73. hwaddr src, dest;
  74. int frame;
  75. int element;
  76. int pck_element;
  77. int frame_delta[2];
  78. int elem_delta[2];
  79. int frames;
  80. int elements;
  81. int pck_elements;
  82. } active_set;
  83. struct soc_dma_ch_s *dma;
  84. /* unused parameters */
  85. int write_mode;
  86. int priority;
  87. int interleave_disabled;
  88. int type;
  89. int suspend;
  90. int buf_disable;
  91. };
  92. struct omap_dma_s {
  93. struct soc_dma_s *dma;
  94. MemoryRegion iomem;
  95. struct omap_mpu_state_s *mpu;
  96. omap_clk clk;
  97. qemu_irq irq[4];
  98. void (*intr_update)(struct omap_dma_s *s);
  99. enum omap_dma_model model;
  100. int omap_3_1_mapping_disabled;
  101. uint32_t gcr;
  102. uint32_t ocp;
  103. uint32_t caps[5];
  104. uint32_t irqen[4];
  105. uint32_t irqstat[4];
  106. int chans;
  107. struct omap_dma_channel_s ch[32];
  108. struct omap_dma_lcd_channel_s lcd_ch;
  109. };
  110. /* Interrupts */
  111. #define TIMEOUT_INTR (1 << 0)
  112. #define EVENT_DROP_INTR (1 << 1)
  113. #define HALF_FRAME_INTR (1 << 2)
  114. #define END_FRAME_INTR (1 << 3)
  115. #define LAST_FRAME_INTR (1 << 4)
  116. #define END_BLOCK_INTR (1 << 5)
  117. #define SYNC (1 << 6)
  118. #define END_PKT_INTR (1 << 7)
  119. #define TRANS_ERR_INTR (1 << 8)
  120. #define MISALIGN_INTR (1 << 11)
  121. static inline void omap_dma_interrupts_update(struct omap_dma_s *s)
  122. {
  123. s->intr_update(s);
  124. }
  125. static void omap_dma_channel_load(struct omap_dma_channel_s *ch)
  126. {
  127. struct omap_dma_reg_set_s *a = &ch->active_set;
  128. int i, normal;
  129. int omap_3_1 = !ch->omap_3_1_compatible_disable;
  130. /*
  131. * TODO: verify address ranges and alignment
  132. * TODO: port endianness
  133. */
  134. a->src = ch->addr[0];
  135. a->dest = ch->addr[1];
  136. a->frames = ch->frames;
  137. a->elements = ch->elements;
  138. a->pck_elements = ch->frame_index[!ch->src_sync];
  139. a->frame = 0;
  140. a->element = 0;
  141. a->pck_element = 0;
  142. if (unlikely(!ch->elements || !ch->frames)) {
  143. printf("%s: bad DMA request\n", __func__);
  144. return;
  145. }
  146. for (i = 0; i < 2; i ++)
  147. switch (ch->mode[i]) {
  148. case constant:
  149. a->elem_delta[i] = 0;
  150. a->frame_delta[i] = 0;
  151. break;
  152. case post_incremented:
  153. a->elem_delta[i] = ch->data_type;
  154. a->frame_delta[i] = 0;
  155. break;
  156. case single_index:
  157. a->elem_delta[i] = ch->data_type +
  158. ch->element_index[omap_3_1 ? 0 : i] - 1;
  159. a->frame_delta[i] = 0;
  160. break;
  161. case double_index:
  162. a->elem_delta[i] = ch->data_type +
  163. ch->element_index[omap_3_1 ? 0 : i] - 1;
  164. a->frame_delta[i] = ch->frame_index[omap_3_1 ? 0 : i] -
  165. ch->element_index[omap_3_1 ? 0 : i];
  166. break;
  167. default:
  168. break;
  169. }
  170. normal = !ch->transparent_copy && !ch->constant_fill &&
  171. /* FIFO is big-endian so either (ch->endian[n] == 1) OR
  172. * (ch->endian_lock[n] == 1) mean no endianism conversion. */
  173. (ch->endian[0] | ch->endian_lock[0]) ==
  174. (ch->endian[1] | ch->endian_lock[1]);
  175. for (i = 0; i < 2; i ++) {
  176. /* TODO: for a->frame_delta[i] > 0 still use the fast path, just
  177. * limit min_elems in omap_dma_transfer_setup to the nearest frame
  178. * end. */
  179. if (!a->elem_delta[i] && normal &&
  180. (a->frames == 1 || !a->frame_delta[i]))
  181. ch->dma->type[i] = soc_dma_access_const;
  182. else if (a->elem_delta[i] == ch->data_type && normal &&
  183. (a->frames == 1 || !a->frame_delta[i]))
  184. ch->dma->type[i] = soc_dma_access_linear;
  185. else
  186. ch->dma->type[i] = soc_dma_access_other;
  187. ch->dma->vaddr[i] = ch->addr[i];
  188. }
  189. soc_dma_ch_update(ch->dma);
  190. }
  191. static void omap_dma_activate_channel(struct omap_dma_s *s,
  192. struct omap_dma_channel_s *ch)
  193. {
  194. if (!ch->active) {
  195. if (ch->set_update) {
  196. /* It's not clear when the active set is supposed to be
  197. * loaded from registers. We're already loading it when the
  198. * channel is enabled, and for some guests this is not enough
  199. * but that may be also because of a race condition (no
  200. * delays in qemu) in the guest code, which we're just
  201. * working around here. */
  202. omap_dma_channel_load(ch);
  203. ch->set_update = 0;
  204. }
  205. ch->active = 1;
  206. soc_dma_set_request(ch->dma, 1);
  207. if (ch->sync)
  208. ch->status |= SYNC;
  209. }
  210. }
  211. static void omap_dma_deactivate_channel(struct omap_dma_s *s,
  212. struct omap_dma_channel_s *ch)
  213. {
  214. /* Update cpc */
  215. ch->cpc = ch->active_set.dest & 0xffff;
  216. if (ch->pending_request && !ch->waiting_end_prog && ch->enable) {
  217. /* Don't deactivate the channel */
  218. ch->pending_request = 0;
  219. return;
  220. }
  221. /* Don't deactive the channel if it is synchronized and the DMA request is
  222. active */
  223. if (ch->sync && ch->enable && (s->dma->drqbmp & (1ULL << ch->sync)))
  224. return;
  225. if (ch->active) {
  226. ch->active = 0;
  227. ch->status &= ~SYNC;
  228. soc_dma_set_request(ch->dma, 0);
  229. }
  230. }
  231. static void omap_dma_enable_channel(struct omap_dma_s *s,
  232. struct omap_dma_channel_s *ch)
  233. {
  234. if (!ch->enable) {
  235. ch->enable = 1;
  236. ch->waiting_end_prog = 0;
  237. omap_dma_channel_load(ch);
  238. /* TODO: theoretically if ch->sync && ch->prefetch &&
  239. * !s->dma->drqbmp[ch->sync], we should also activate and fetch
  240. * from source and then stall until signalled. */
  241. if ((!ch->sync) || (s->dma->drqbmp & (1ULL << ch->sync))) {
  242. omap_dma_activate_channel(s, ch);
  243. }
  244. }
  245. }
  246. static void omap_dma_disable_channel(struct omap_dma_s *s,
  247. struct omap_dma_channel_s *ch)
  248. {
  249. if (ch->enable) {
  250. ch->enable = 0;
  251. /* Discard any pending request */
  252. ch->pending_request = 0;
  253. omap_dma_deactivate_channel(s, ch);
  254. }
  255. }
  256. static void omap_dma_channel_end_prog(struct omap_dma_s *s,
  257. struct omap_dma_channel_s *ch)
  258. {
  259. if (ch->waiting_end_prog) {
  260. ch->waiting_end_prog = 0;
  261. if (!ch->sync || ch->pending_request) {
  262. ch->pending_request = 0;
  263. omap_dma_activate_channel(s, ch);
  264. }
  265. }
  266. }
  267. static void omap_dma_interrupts_3_1_update(struct omap_dma_s *s)
  268. {
  269. struct omap_dma_channel_s *ch = s->ch;
  270. /* First three interrupts are shared between two channels each. */
  271. if (ch[0].status | ch[6].status)
  272. qemu_irq_raise(ch[0].irq);
  273. if (ch[1].status | ch[7].status)
  274. qemu_irq_raise(ch[1].irq);
  275. if (ch[2].status | ch[8].status)
  276. qemu_irq_raise(ch[2].irq);
  277. if (ch[3].status)
  278. qemu_irq_raise(ch[3].irq);
  279. if (ch[4].status)
  280. qemu_irq_raise(ch[4].irq);
  281. if (ch[5].status)
  282. qemu_irq_raise(ch[5].irq);
  283. }
  284. static void omap_dma_interrupts_3_2_update(struct omap_dma_s *s)
  285. {
  286. struct omap_dma_channel_s *ch = s->ch;
  287. int i;
  288. for (i = s->chans; i; ch ++, i --)
  289. if (ch->status)
  290. qemu_irq_raise(ch->irq);
  291. }
  292. static void omap_dma_enable_3_1_mapping(struct omap_dma_s *s)
  293. {
  294. s->omap_3_1_mapping_disabled = 0;
  295. s->chans = 9;
  296. s->intr_update = omap_dma_interrupts_3_1_update;
  297. }
  298. static void omap_dma_disable_3_1_mapping(struct omap_dma_s *s)
  299. {
  300. s->omap_3_1_mapping_disabled = 1;
  301. s->chans = 16;
  302. s->intr_update = omap_dma_interrupts_3_2_update;
  303. }
  304. static void omap_dma_process_request(struct omap_dma_s *s, int request)
  305. {
  306. int channel;
  307. int drop_event = 0;
  308. struct omap_dma_channel_s *ch = s->ch;
  309. for (channel = 0; channel < s->chans; channel ++, ch ++) {
  310. if (ch->enable && ch->sync == request) {
  311. if (!ch->active)
  312. omap_dma_activate_channel(s, ch);
  313. else if (!ch->pending_request)
  314. ch->pending_request = 1;
  315. else {
  316. /* Request collision */
  317. /* Second request received while processing other request */
  318. ch->status |= EVENT_DROP_INTR;
  319. drop_event = 1;
  320. }
  321. }
  322. }
  323. if (drop_event)
  324. omap_dma_interrupts_update(s);
  325. }
  326. static void omap_dma_transfer_generic(struct soc_dma_ch_s *dma)
  327. {
  328. uint8_t value[4];
  329. struct omap_dma_channel_s *ch = dma->opaque;
  330. struct omap_dma_reg_set_s *a = &ch->active_set;
  331. int bytes = dma->bytes;
  332. #ifdef MULTI_REQ
  333. uint16_t status = ch->status;
  334. #endif
  335. do {
  336. /* Transfer a single element */
  337. /* FIXME: check the endianness */
  338. if (!ch->constant_fill)
  339. cpu_physical_memory_read(a->src, value, ch->data_type);
  340. else
  341. *(uint32_t *) value = ch->color;
  342. if (!ch->transparent_copy || *(uint32_t *) value != ch->color)
  343. cpu_physical_memory_write(a->dest, value, ch->data_type);
  344. a->src += a->elem_delta[0];
  345. a->dest += a->elem_delta[1];
  346. a->element ++;
  347. #ifndef MULTI_REQ
  348. if (a->element == a->elements) {
  349. /* End of Frame */
  350. a->element = 0;
  351. a->src += a->frame_delta[0];
  352. a->dest += a->frame_delta[1];
  353. a->frame ++;
  354. /* If the channel is async, update cpc */
  355. if (!ch->sync)
  356. ch->cpc = a->dest & 0xffff;
  357. }
  358. } while ((bytes -= ch->data_type));
  359. #else
  360. /* If the channel is element synchronized, deactivate it */
  361. if (ch->sync && !ch->fs && !ch->bs)
  362. omap_dma_deactivate_channel(s, ch);
  363. /* If it is the last frame, set the LAST_FRAME interrupt */
  364. if (a->element == 1 && a->frame == a->frames - 1)
  365. if (ch->interrupts & LAST_FRAME_INTR)
  366. ch->status |= LAST_FRAME_INTR;
  367. /* If the half of the frame was reached, set the HALF_FRAME
  368. interrupt */
  369. if (a->element == (a->elements >> 1))
  370. if (ch->interrupts & HALF_FRAME_INTR)
  371. ch->status |= HALF_FRAME_INTR;
  372. if (ch->fs && ch->bs) {
  373. a->pck_element ++;
  374. /* Check if a full packet has beed transferred. */
  375. if (a->pck_element == a->pck_elements) {
  376. a->pck_element = 0;
  377. /* Set the END_PKT interrupt */
  378. if ((ch->interrupts & END_PKT_INTR) && !ch->src_sync)
  379. ch->status |= END_PKT_INTR;
  380. /* If the channel is packet-synchronized, deactivate it */
  381. if (ch->sync)
  382. omap_dma_deactivate_channel(s, ch);
  383. }
  384. }
  385. if (a->element == a->elements) {
  386. /* End of Frame */
  387. a->element = 0;
  388. a->src += a->frame_delta[0];
  389. a->dest += a->frame_delta[1];
  390. a->frame ++;
  391. /* If the channel is frame synchronized, deactivate it */
  392. if (ch->sync && ch->fs && !ch->bs)
  393. omap_dma_deactivate_channel(s, ch);
  394. /* If the channel is async, update cpc */
  395. if (!ch->sync)
  396. ch->cpc = a->dest & 0xffff;
  397. /* Set the END_FRAME interrupt */
  398. if (ch->interrupts & END_FRAME_INTR)
  399. ch->status |= END_FRAME_INTR;
  400. if (a->frame == a->frames) {
  401. /* End of Block */
  402. /* Disable the channel */
  403. if (ch->omap_3_1_compatible_disable) {
  404. omap_dma_disable_channel(s, ch);
  405. if (ch->link_enabled)
  406. omap_dma_enable_channel(s,
  407. &s->ch[ch->link_next_ch]);
  408. } else {
  409. if (!ch->auto_init)
  410. omap_dma_disable_channel(s, ch);
  411. else if (ch->repeat || ch->end_prog)
  412. omap_dma_channel_load(ch);
  413. else {
  414. ch->waiting_end_prog = 1;
  415. omap_dma_deactivate_channel(s, ch);
  416. }
  417. }
  418. if (ch->interrupts & END_BLOCK_INTR)
  419. ch->status |= END_BLOCK_INTR;
  420. }
  421. }
  422. } while (status == ch->status && ch->active);
  423. omap_dma_interrupts_update(s);
  424. #endif
  425. }
  426. enum {
  427. omap_dma_intr_element_sync,
  428. omap_dma_intr_last_frame,
  429. omap_dma_intr_half_frame,
  430. omap_dma_intr_frame,
  431. omap_dma_intr_frame_sync,
  432. omap_dma_intr_packet,
  433. omap_dma_intr_packet_sync,
  434. omap_dma_intr_block,
  435. __omap_dma_intr_last,
  436. };
  437. static void omap_dma_transfer_setup(struct soc_dma_ch_s *dma)
  438. {
  439. struct omap_dma_port_if_s *src_p, *dest_p;
  440. struct omap_dma_reg_set_s *a;
  441. struct omap_dma_channel_s *ch = dma->opaque;
  442. struct omap_dma_s *s = dma->dma->opaque;
  443. int frames, min_elems, elements[__omap_dma_intr_last];
  444. a = &ch->active_set;
  445. src_p = &s->mpu->port[ch->port[0]];
  446. dest_p = &s->mpu->port[ch->port[1]];
  447. if ((!ch->constant_fill && !src_p->addr_valid(s->mpu, a->src)) ||
  448. (!dest_p->addr_valid(s->mpu, a->dest))) {
  449. #if 0
  450. /* Bus time-out */
  451. if (ch->interrupts & TIMEOUT_INTR)
  452. ch->status |= TIMEOUT_INTR;
  453. omap_dma_deactivate_channel(s, ch);
  454. continue;
  455. #endif
  456. printf("%s: Bus time-out in DMA%i operation\n",
  457. __func__, dma->num);
  458. }
  459. min_elems = INT_MAX;
  460. /* Check all the conditions that terminate the transfer starting
  461. * with those that can occur the soonest. */
  462. #define INTR_CHECK(cond, id, nelements) \
  463. if (cond) { \
  464. elements[id] = nelements; \
  465. if (elements[id] < min_elems) \
  466. min_elems = elements[id]; \
  467. } else \
  468. elements[id] = INT_MAX;
  469. /* Elements */
  470. INTR_CHECK(
  471. ch->sync && !ch->fs && !ch->bs,
  472. omap_dma_intr_element_sync,
  473. 1)
  474. /* Frames */
  475. /* TODO: for transfers where entire frames can be read and written
  476. * using memcpy() but a->frame_delta is non-zero, try to still do
  477. * transfers using soc_dma but limit min_elems to a->elements - ...
  478. * See also the TODO in omap_dma_channel_load. */
  479. INTR_CHECK(
  480. (ch->interrupts & LAST_FRAME_INTR) &&
  481. ((a->frame < a->frames - 1) || !a->element),
  482. omap_dma_intr_last_frame,
  483. (a->frames - a->frame - 2) * a->elements +
  484. (a->elements - a->element + 1))
  485. INTR_CHECK(
  486. ch->interrupts & HALF_FRAME_INTR,
  487. omap_dma_intr_half_frame,
  488. (a->elements >> 1) +
  489. (a->element >= (a->elements >> 1) ? a->elements : 0) -
  490. a->element)
  491. INTR_CHECK(
  492. ch->sync && ch->fs && (ch->interrupts & END_FRAME_INTR),
  493. omap_dma_intr_frame,
  494. a->elements - a->element)
  495. INTR_CHECK(
  496. ch->sync && ch->fs && !ch->bs,
  497. omap_dma_intr_frame_sync,
  498. a->elements - a->element)
  499. /* Packets */
  500. INTR_CHECK(
  501. ch->fs && ch->bs &&
  502. (ch->interrupts & END_PKT_INTR) && !ch->src_sync,
  503. omap_dma_intr_packet,
  504. a->pck_elements - a->pck_element)
  505. INTR_CHECK(
  506. ch->fs && ch->bs && ch->sync,
  507. omap_dma_intr_packet_sync,
  508. a->pck_elements - a->pck_element)
  509. /* Blocks */
  510. INTR_CHECK(
  511. 1,
  512. omap_dma_intr_block,
  513. (a->frames - a->frame - 1) * a->elements +
  514. (a->elements - a->element))
  515. dma->bytes = min_elems * ch->data_type;
  516. /* Set appropriate interrupts and/or deactivate channels */
  517. #ifdef MULTI_REQ
  518. /* TODO: should all of this only be done if dma->update, and otherwise
  519. * inside omap_dma_transfer_generic below - check what's faster. */
  520. if (dma->update) {
  521. #endif
  522. /* If the channel is element synchronized, deactivate it */
  523. if (min_elems == elements[omap_dma_intr_element_sync])
  524. omap_dma_deactivate_channel(s, ch);
  525. /* If it is the last frame, set the LAST_FRAME interrupt */
  526. if (min_elems == elements[omap_dma_intr_last_frame])
  527. ch->status |= LAST_FRAME_INTR;
  528. /* If exactly half of the frame was reached, set the HALF_FRAME
  529. interrupt */
  530. if (min_elems == elements[omap_dma_intr_half_frame])
  531. ch->status |= HALF_FRAME_INTR;
  532. /* If a full packet has been transferred, set the END_PKT interrupt */
  533. if (min_elems == elements[omap_dma_intr_packet])
  534. ch->status |= END_PKT_INTR;
  535. /* If the channel is packet-synchronized, deactivate it */
  536. if (min_elems == elements[omap_dma_intr_packet_sync])
  537. omap_dma_deactivate_channel(s, ch);
  538. /* If the channel is frame synchronized, deactivate it */
  539. if (min_elems == elements[omap_dma_intr_frame_sync])
  540. omap_dma_deactivate_channel(s, ch);
  541. /* Set the END_FRAME interrupt */
  542. if (min_elems == elements[omap_dma_intr_frame])
  543. ch->status |= END_FRAME_INTR;
  544. if (min_elems == elements[omap_dma_intr_block]) {
  545. /* End of Block */
  546. /* Disable the channel */
  547. if (ch->omap_3_1_compatible_disable) {
  548. omap_dma_disable_channel(s, ch);
  549. if (ch->link_enabled)
  550. omap_dma_enable_channel(s, &s->ch[ch->link_next_ch]);
  551. } else {
  552. if (!ch->auto_init)
  553. omap_dma_disable_channel(s, ch);
  554. else if (ch->repeat || ch->end_prog)
  555. omap_dma_channel_load(ch);
  556. else {
  557. ch->waiting_end_prog = 1;
  558. omap_dma_deactivate_channel(s, ch);
  559. }
  560. }
  561. if (ch->interrupts & END_BLOCK_INTR)
  562. ch->status |= END_BLOCK_INTR;
  563. }
  564. /* Update packet number */
  565. if (ch->fs && ch->bs) {
  566. a->pck_element += min_elems;
  567. a->pck_element %= a->pck_elements;
  568. }
  569. /* TODO: check if we really need to update anything here or perhaps we
  570. * can skip part of this. */
  571. #ifndef MULTI_REQ
  572. if (dma->update) {
  573. #endif
  574. a->element += min_elems;
  575. frames = a->element / a->elements;
  576. a->element = a->element % a->elements;
  577. a->frame += frames;
  578. a->src += min_elems * a->elem_delta[0] + frames * a->frame_delta[0];
  579. a->dest += min_elems * a->elem_delta[1] + frames * a->frame_delta[1];
  580. /* If the channel is async, update cpc */
  581. if (!ch->sync && frames)
  582. ch->cpc = a->dest & 0xffff;
  583. /* TODO: if the destination port is IMIF or EMIFF, set the dirty
  584. * bits on it. */
  585. #ifndef MULTI_REQ
  586. }
  587. #else
  588. }
  589. #endif
  590. omap_dma_interrupts_update(s);
  591. }
  592. void omap_dma_reset(struct soc_dma_s *dma)
  593. {
  594. int i;
  595. struct omap_dma_s *s = dma->opaque;
  596. soc_dma_reset(s->dma);
  597. if (s->model < omap_dma_4)
  598. s->gcr = 0x0004;
  599. else
  600. s->gcr = 0x00010010;
  601. s->ocp = 0x00000000;
  602. memset(&s->irqstat, 0, sizeof(s->irqstat));
  603. memset(&s->irqen, 0, sizeof(s->irqen));
  604. s->lcd_ch.src = emiff;
  605. s->lcd_ch.condition = 0;
  606. s->lcd_ch.interrupts = 0;
  607. s->lcd_ch.dual = 0;
  608. if (s->model < omap_dma_4)
  609. omap_dma_enable_3_1_mapping(s);
  610. for (i = 0; i < s->chans; i ++) {
  611. s->ch[i].suspend = 0;
  612. s->ch[i].prefetch = 0;
  613. s->ch[i].buf_disable = 0;
  614. s->ch[i].src_sync = 0;
  615. memset(&s->ch[i].burst, 0, sizeof(s->ch[i].burst));
  616. memset(&s->ch[i].port, 0, sizeof(s->ch[i].port));
  617. memset(&s->ch[i].mode, 0, sizeof(s->ch[i].mode));
  618. memset(&s->ch[i].frame_index, 0, sizeof(s->ch[i].frame_index));
  619. memset(&s->ch[i].element_index, 0, sizeof(s->ch[i].element_index));
  620. memset(&s->ch[i].endian, 0, sizeof(s->ch[i].endian));
  621. memset(&s->ch[i].endian_lock, 0, sizeof(s->ch[i].endian_lock));
  622. memset(&s->ch[i].translate, 0, sizeof(s->ch[i].translate));
  623. s->ch[i].write_mode = 0;
  624. s->ch[i].data_type = 0;
  625. s->ch[i].transparent_copy = 0;
  626. s->ch[i].constant_fill = 0;
  627. s->ch[i].color = 0x00000000;
  628. s->ch[i].end_prog = 0;
  629. s->ch[i].repeat = 0;
  630. s->ch[i].auto_init = 0;
  631. s->ch[i].link_enabled = 0;
  632. if (s->model < omap_dma_4)
  633. s->ch[i].interrupts = 0x0003;
  634. else
  635. s->ch[i].interrupts = 0x0000;
  636. s->ch[i].status = 0;
  637. s->ch[i].cstatus = 0;
  638. s->ch[i].active = 0;
  639. s->ch[i].enable = 0;
  640. s->ch[i].sync = 0;
  641. s->ch[i].pending_request = 0;
  642. s->ch[i].waiting_end_prog = 0;
  643. s->ch[i].cpc = 0x0000;
  644. s->ch[i].fs = 0;
  645. s->ch[i].bs = 0;
  646. s->ch[i].omap_3_1_compatible_disable = 0;
  647. memset(&s->ch[i].active_set, 0, sizeof(s->ch[i].active_set));
  648. s->ch[i].priority = 0;
  649. s->ch[i].interleave_disabled = 0;
  650. s->ch[i].type = 0;
  651. }
  652. }
  653. static int omap_dma_ch_reg_read(struct omap_dma_s *s,
  654. struct omap_dma_channel_s *ch, int reg, uint16_t *value)
  655. {
  656. switch (reg) {
  657. case 0x00: /* SYS_DMA_CSDP_CH0 */
  658. *value = (ch->burst[1] << 14) |
  659. (ch->pack[1] << 13) |
  660. (ch->port[1] << 9) |
  661. (ch->burst[0] << 7) |
  662. (ch->pack[0] << 6) |
  663. (ch->port[0] << 2) |
  664. (ch->data_type >> 1);
  665. break;
  666. case 0x02: /* SYS_DMA_CCR_CH0 */
  667. if (s->model <= omap_dma_3_1)
  668. *value = 0 << 10; /* FIFO_FLUSH reads as 0 */
  669. else
  670. *value = ch->omap_3_1_compatible_disable << 10;
  671. *value |= (ch->mode[1] << 14) |
  672. (ch->mode[0] << 12) |
  673. (ch->end_prog << 11) |
  674. (ch->repeat << 9) |
  675. (ch->auto_init << 8) |
  676. (ch->enable << 7) |
  677. (ch->priority << 6) |
  678. (ch->fs << 5) | ch->sync;
  679. break;
  680. case 0x04: /* SYS_DMA_CICR_CH0 */
  681. *value = ch->interrupts;
  682. break;
  683. case 0x06: /* SYS_DMA_CSR_CH0 */
  684. *value = ch->status;
  685. ch->status &= SYNC;
  686. if (!ch->omap_3_1_compatible_disable && ch->sibling) {
  687. *value |= (ch->sibling->status & 0x3f) << 6;
  688. ch->sibling->status &= SYNC;
  689. }
  690. qemu_irq_lower(ch->irq);
  691. break;
  692. case 0x08: /* SYS_DMA_CSSA_L_CH0 */
  693. *value = ch->addr[0] & 0x0000ffff;
  694. break;
  695. case 0x0a: /* SYS_DMA_CSSA_U_CH0 */
  696. *value = ch->addr[0] >> 16;
  697. break;
  698. case 0x0c: /* SYS_DMA_CDSA_L_CH0 */
  699. *value = ch->addr[1] & 0x0000ffff;
  700. break;
  701. case 0x0e: /* SYS_DMA_CDSA_U_CH0 */
  702. *value = ch->addr[1] >> 16;
  703. break;
  704. case 0x10: /* SYS_DMA_CEN_CH0 */
  705. *value = ch->elements;
  706. break;
  707. case 0x12: /* SYS_DMA_CFN_CH0 */
  708. *value = ch->frames;
  709. break;
  710. case 0x14: /* SYS_DMA_CFI_CH0 */
  711. *value = ch->frame_index[0];
  712. break;
  713. case 0x16: /* SYS_DMA_CEI_CH0 */
  714. *value = ch->element_index[0];
  715. break;
  716. case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */
  717. if (ch->omap_3_1_compatible_disable)
  718. *value = ch->active_set.src & 0xffff; /* CSAC */
  719. else
  720. *value = ch->cpc;
  721. break;
  722. case 0x1a: /* DMA_CDAC */
  723. *value = ch->active_set.dest & 0xffff; /* CDAC */
  724. break;
  725. case 0x1c: /* DMA_CDEI */
  726. *value = ch->element_index[1];
  727. break;
  728. case 0x1e: /* DMA_CDFI */
  729. *value = ch->frame_index[1];
  730. break;
  731. case 0x20: /* DMA_COLOR_L */
  732. *value = ch->color & 0xffff;
  733. break;
  734. case 0x22: /* DMA_COLOR_U */
  735. *value = ch->color >> 16;
  736. break;
  737. case 0x24: /* DMA_CCR2 */
  738. *value = (ch->bs << 2) |
  739. (ch->transparent_copy << 1) |
  740. ch->constant_fill;
  741. break;
  742. case 0x28: /* DMA_CLNK_CTRL */
  743. *value = (ch->link_enabled << 15) |
  744. (ch->link_next_ch & 0xf);
  745. break;
  746. case 0x2a: /* DMA_LCH_CTRL */
  747. *value = (ch->interleave_disabled << 15) |
  748. ch->type;
  749. break;
  750. default:
  751. return 1;
  752. }
  753. return 0;
  754. }
  755. static int omap_dma_ch_reg_write(struct omap_dma_s *s,
  756. struct omap_dma_channel_s *ch, int reg, uint16_t value)
  757. {
  758. switch (reg) {
  759. case 0x00: /* SYS_DMA_CSDP_CH0 */
  760. ch->burst[1] = (value & 0xc000) >> 14;
  761. ch->pack[1] = (value & 0x2000) >> 13;
  762. ch->port[1] = (enum omap_dma_port) ((value & 0x1e00) >> 9);
  763. ch->burst[0] = (value & 0x0180) >> 7;
  764. ch->pack[0] = (value & 0x0040) >> 6;
  765. ch->port[0] = (enum omap_dma_port) ((value & 0x003c) >> 2);
  766. if (ch->port[0] >= __omap_dma_port_last) {
  767. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n",
  768. __func__, ch->port[0]);
  769. }
  770. if (ch->port[1] >= __omap_dma_port_last) {
  771. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n",
  772. __func__, ch->port[1]);
  773. }
  774. ch->data_type = 1 << (value & 3);
  775. if ((value & 3) == 3) {
  776. qemu_log_mask(LOG_GUEST_ERROR,
  777. "%s: bad data_type for DMA channel\n", __func__);
  778. ch->data_type >>= 1;
  779. }
  780. break;
  781. case 0x02: /* SYS_DMA_CCR_CH0 */
  782. ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
  783. ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
  784. ch->end_prog = (value & 0x0800) >> 11;
  785. if (s->model >= omap_dma_3_2)
  786. ch->omap_3_1_compatible_disable = (value >> 10) & 0x1;
  787. ch->repeat = (value & 0x0200) >> 9;
  788. ch->auto_init = (value & 0x0100) >> 8;
  789. ch->priority = (value & 0x0040) >> 6;
  790. ch->fs = (value & 0x0020) >> 5;
  791. ch->sync = value & 0x001f;
  792. if (value & 0x0080)
  793. omap_dma_enable_channel(s, ch);
  794. else
  795. omap_dma_disable_channel(s, ch);
  796. if (ch->end_prog)
  797. omap_dma_channel_end_prog(s, ch);
  798. break;
  799. case 0x04: /* SYS_DMA_CICR_CH0 */
  800. ch->interrupts = value & 0x3f;
  801. break;
  802. case 0x06: /* SYS_DMA_CSR_CH0 */
  803. OMAP_RO_REG((hwaddr) reg);
  804. break;
  805. case 0x08: /* SYS_DMA_CSSA_L_CH0 */
  806. ch->addr[0] &= 0xffff0000;
  807. ch->addr[0] |= value;
  808. break;
  809. case 0x0a: /* SYS_DMA_CSSA_U_CH0 */
  810. ch->addr[0] &= 0x0000ffff;
  811. ch->addr[0] |= (uint32_t) value << 16;
  812. break;
  813. case 0x0c: /* SYS_DMA_CDSA_L_CH0 */
  814. ch->addr[1] &= 0xffff0000;
  815. ch->addr[1] |= value;
  816. break;
  817. case 0x0e: /* SYS_DMA_CDSA_U_CH0 */
  818. ch->addr[1] &= 0x0000ffff;
  819. ch->addr[1] |= (uint32_t) value << 16;
  820. break;
  821. case 0x10: /* SYS_DMA_CEN_CH0 */
  822. ch->elements = value;
  823. break;
  824. case 0x12: /* SYS_DMA_CFN_CH0 */
  825. ch->frames = value;
  826. break;
  827. case 0x14: /* SYS_DMA_CFI_CH0 */
  828. ch->frame_index[0] = (int16_t) value;
  829. break;
  830. case 0x16: /* SYS_DMA_CEI_CH0 */
  831. ch->element_index[0] = (int16_t) value;
  832. break;
  833. case 0x18: /* SYS_DMA_CPC_CH0 or DMA_CSAC */
  834. OMAP_RO_REG((hwaddr) reg);
  835. break;
  836. case 0x1c: /* DMA_CDEI */
  837. ch->element_index[1] = (int16_t) value;
  838. break;
  839. case 0x1e: /* DMA_CDFI */
  840. ch->frame_index[1] = (int16_t) value;
  841. break;
  842. case 0x20: /* DMA_COLOR_L */
  843. ch->color &= 0xffff0000;
  844. ch->color |= value;
  845. break;
  846. case 0x22: /* DMA_COLOR_U */
  847. ch->color &= 0xffff;
  848. ch->color |= (uint32_t)value << 16;
  849. break;
  850. case 0x24: /* DMA_CCR2 */
  851. ch->bs = (value >> 2) & 0x1;
  852. ch->transparent_copy = (value >> 1) & 0x1;
  853. ch->constant_fill = value & 0x1;
  854. break;
  855. case 0x28: /* DMA_CLNK_CTRL */
  856. ch->link_enabled = (value >> 15) & 0x1;
  857. if (value & (1 << 14)) { /* Stop_Lnk */
  858. ch->link_enabled = 0;
  859. omap_dma_disable_channel(s, ch);
  860. }
  861. ch->link_next_ch = value & 0x1f;
  862. break;
  863. case 0x2a: /* DMA_LCH_CTRL */
  864. ch->interleave_disabled = (value >> 15) & 0x1;
  865. ch->type = value & 0xf;
  866. break;
  867. default:
  868. return 1;
  869. }
  870. return 0;
  871. }
  872. static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
  873. uint16_t value)
  874. {
  875. switch (offset) {
  876. case 0xbc0: /* DMA_LCD_CSDP */
  877. s->brust_f2 = (value >> 14) & 0x3;
  878. s->pack_f2 = (value >> 13) & 0x1;
  879. s->data_type_f2 = (1 << ((value >> 11) & 0x3));
  880. s->brust_f1 = (value >> 7) & 0x3;
  881. s->pack_f1 = (value >> 6) & 0x1;
  882. s->data_type_f1 = (1 << ((value >> 0) & 0x3));
  883. break;
  884. case 0xbc2: /* DMA_LCD_CCR */
  885. s->mode_f2 = (value >> 14) & 0x3;
  886. s->mode_f1 = (value >> 12) & 0x3;
  887. s->end_prog = (value >> 11) & 0x1;
  888. s->omap_3_1_compatible_disable = (value >> 10) & 0x1;
  889. s->repeat = (value >> 9) & 0x1;
  890. s->auto_init = (value >> 8) & 0x1;
  891. s->running = (value >> 7) & 0x1;
  892. s->priority = (value >> 6) & 0x1;
  893. s->bs = (value >> 4) & 0x1;
  894. break;
  895. case 0xbc4: /* DMA_LCD_CTRL */
  896. s->dst = (value >> 8) & 0x1;
  897. s->src = ((value >> 6) & 0x3) << 1;
  898. s->condition = 0;
  899. /* Assume no bus errors and thus no BUS_ERROR irq bits. */
  900. s->interrupts = (value >> 1) & 1;
  901. s->dual = value & 1;
  902. break;
  903. case 0xbc8: /* TOP_B1_L */
  904. s->src_f1_top &= 0xffff0000;
  905. s->src_f1_top |= 0x0000ffff & value;
  906. break;
  907. case 0xbca: /* TOP_B1_U */
  908. s->src_f1_top &= 0x0000ffff;
  909. s->src_f1_top |= (uint32_t)value << 16;
  910. break;
  911. case 0xbcc: /* BOT_B1_L */
  912. s->src_f1_bottom &= 0xffff0000;
  913. s->src_f1_bottom |= 0x0000ffff & value;
  914. break;
  915. case 0xbce: /* BOT_B1_U */
  916. s->src_f1_bottom &= 0x0000ffff;
  917. s->src_f1_bottom |= (uint32_t) value << 16;
  918. break;
  919. case 0xbd0: /* TOP_B2_L */
  920. s->src_f2_top &= 0xffff0000;
  921. s->src_f2_top |= 0x0000ffff & value;
  922. break;
  923. case 0xbd2: /* TOP_B2_U */
  924. s->src_f2_top &= 0x0000ffff;
  925. s->src_f2_top |= (uint32_t) value << 16;
  926. break;
  927. case 0xbd4: /* BOT_B2_L */
  928. s->src_f2_bottom &= 0xffff0000;
  929. s->src_f2_bottom |= 0x0000ffff & value;
  930. break;
  931. case 0xbd6: /* BOT_B2_U */
  932. s->src_f2_bottom &= 0x0000ffff;
  933. s->src_f2_bottom |= (uint32_t) value << 16;
  934. break;
  935. case 0xbd8: /* DMA_LCD_SRC_EI_B1 */
  936. s->element_index_f1 = value;
  937. break;
  938. case 0xbda: /* DMA_LCD_SRC_FI_B1_L */
  939. s->frame_index_f1 &= 0xffff0000;
  940. s->frame_index_f1 |= 0x0000ffff & value;
  941. break;
  942. case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */
  943. s->frame_index_f1 &= 0x0000ffff;
  944. s->frame_index_f1 |= (uint32_t) value << 16;
  945. break;
  946. case 0xbdc: /* DMA_LCD_SRC_EI_B2 */
  947. s->element_index_f2 = value;
  948. break;
  949. case 0xbde: /* DMA_LCD_SRC_FI_B2_L */
  950. s->frame_index_f2 &= 0xffff0000;
  951. s->frame_index_f2 |= 0x0000ffff & value;
  952. break;
  953. case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */
  954. s->frame_index_f2 &= 0x0000ffff;
  955. s->frame_index_f2 |= (uint32_t) value << 16;
  956. break;
  957. case 0xbe0: /* DMA_LCD_SRC_EN_B1 */
  958. s->elements_f1 = value;
  959. break;
  960. case 0xbe4: /* DMA_LCD_SRC_FN_B1 */
  961. s->frames_f1 = value;
  962. break;
  963. case 0xbe2: /* DMA_LCD_SRC_EN_B2 */
  964. s->elements_f2 = value;
  965. break;
  966. case 0xbe6: /* DMA_LCD_SRC_FN_B2 */
  967. s->frames_f2 = value;
  968. break;
  969. case 0xbea: /* DMA_LCD_LCH_CTRL */
  970. s->lch_type = value & 0xf;
  971. break;
  972. default:
  973. return 1;
  974. }
  975. return 0;
  976. }
  977. static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
  978. uint16_t *ret)
  979. {
  980. switch (offset) {
  981. case 0xbc0: /* DMA_LCD_CSDP */
  982. *ret = (s->brust_f2 << 14) |
  983. (s->pack_f2 << 13) |
  984. ((s->data_type_f2 >> 1) << 11) |
  985. (s->brust_f1 << 7) |
  986. (s->pack_f1 << 6) |
  987. ((s->data_type_f1 >> 1) << 0);
  988. break;
  989. case 0xbc2: /* DMA_LCD_CCR */
  990. *ret = (s->mode_f2 << 14) |
  991. (s->mode_f1 << 12) |
  992. (s->end_prog << 11) |
  993. (s->omap_3_1_compatible_disable << 10) |
  994. (s->repeat << 9) |
  995. (s->auto_init << 8) |
  996. (s->running << 7) |
  997. (s->priority << 6) |
  998. (s->bs << 4);
  999. break;
  1000. case 0xbc4: /* DMA_LCD_CTRL */
  1001. qemu_irq_lower(s->irq);
  1002. *ret = (s->dst << 8) |
  1003. ((s->src & 0x6) << 5) |
  1004. (s->condition << 3) |
  1005. (s->interrupts << 1) |
  1006. s->dual;
  1007. break;
  1008. case 0xbc8: /* TOP_B1_L */
  1009. *ret = s->src_f1_top & 0xffff;
  1010. break;
  1011. case 0xbca: /* TOP_B1_U */
  1012. *ret = s->src_f1_top >> 16;
  1013. break;
  1014. case 0xbcc: /* BOT_B1_L */
  1015. *ret = s->src_f1_bottom & 0xffff;
  1016. break;
  1017. case 0xbce: /* BOT_B1_U */
  1018. *ret = s->src_f1_bottom >> 16;
  1019. break;
  1020. case 0xbd0: /* TOP_B2_L */
  1021. *ret = s->src_f2_top & 0xffff;
  1022. break;
  1023. case 0xbd2: /* TOP_B2_U */
  1024. *ret = s->src_f2_top >> 16;
  1025. break;
  1026. case 0xbd4: /* BOT_B2_L */
  1027. *ret = s->src_f2_bottom & 0xffff;
  1028. break;
  1029. case 0xbd6: /* BOT_B2_U */
  1030. *ret = s->src_f2_bottom >> 16;
  1031. break;
  1032. case 0xbd8: /* DMA_LCD_SRC_EI_B1 */
  1033. *ret = s->element_index_f1;
  1034. break;
  1035. case 0xbda: /* DMA_LCD_SRC_FI_B1_L */
  1036. *ret = s->frame_index_f1 & 0xffff;
  1037. break;
  1038. case 0xbf4: /* DMA_LCD_SRC_FI_B1_U */
  1039. *ret = s->frame_index_f1 >> 16;
  1040. break;
  1041. case 0xbdc: /* DMA_LCD_SRC_EI_B2 */
  1042. *ret = s->element_index_f2;
  1043. break;
  1044. case 0xbde: /* DMA_LCD_SRC_FI_B2_L */
  1045. *ret = s->frame_index_f2 & 0xffff;
  1046. break;
  1047. case 0xbf6: /* DMA_LCD_SRC_FI_B2_U */
  1048. *ret = s->frame_index_f2 >> 16;
  1049. break;
  1050. case 0xbe0: /* DMA_LCD_SRC_EN_B1 */
  1051. *ret = s->elements_f1;
  1052. break;
  1053. case 0xbe4: /* DMA_LCD_SRC_FN_B1 */
  1054. *ret = s->frames_f1;
  1055. break;
  1056. case 0xbe2: /* DMA_LCD_SRC_EN_B2 */
  1057. *ret = s->elements_f2;
  1058. break;
  1059. case 0xbe6: /* DMA_LCD_SRC_FN_B2 */
  1060. *ret = s->frames_f2;
  1061. break;
  1062. case 0xbea: /* DMA_LCD_LCH_CTRL */
  1063. *ret = s->lch_type;
  1064. break;
  1065. default:
  1066. return 1;
  1067. }
  1068. return 0;
  1069. }
  1070. static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
  1071. uint16_t value)
  1072. {
  1073. switch (offset) {
  1074. case 0x300: /* SYS_DMA_LCD_CTRL */
  1075. s->src = (value & 0x40) ? imif : emiff;
  1076. s->condition = 0;
  1077. /* Assume no bus errors and thus no BUS_ERROR irq bits. */
  1078. s->interrupts = (value >> 1) & 1;
  1079. s->dual = value & 1;
  1080. break;
  1081. case 0x302: /* SYS_DMA_LCD_TOP_F1_L */
  1082. s->src_f1_top &= 0xffff0000;
  1083. s->src_f1_top |= 0x0000ffff & value;
  1084. break;
  1085. case 0x304: /* SYS_DMA_LCD_TOP_F1_U */
  1086. s->src_f1_top &= 0x0000ffff;
  1087. s->src_f1_top |= (uint32_t)value << 16;
  1088. break;
  1089. case 0x306: /* SYS_DMA_LCD_BOT_F1_L */
  1090. s->src_f1_bottom &= 0xffff0000;
  1091. s->src_f1_bottom |= 0x0000ffff & value;
  1092. break;
  1093. case 0x308: /* SYS_DMA_LCD_BOT_F1_U */
  1094. s->src_f1_bottom &= 0x0000ffff;
  1095. s->src_f1_bottom |= (uint32_t)value << 16;
  1096. break;
  1097. case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */
  1098. s->src_f2_top &= 0xffff0000;
  1099. s->src_f2_top |= 0x0000ffff & value;
  1100. break;
  1101. case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */
  1102. s->src_f2_top &= 0x0000ffff;
  1103. s->src_f2_top |= (uint32_t)value << 16;
  1104. break;
  1105. case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */
  1106. s->src_f2_bottom &= 0xffff0000;
  1107. s->src_f2_bottom |= 0x0000ffff & value;
  1108. break;
  1109. case 0x310: /* SYS_DMA_LCD_BOT_F2_U */
  1110. s->src_f2_bottom &= 0x0000ffff;
  1111. s->src_f2_bottom |= (uint32_t)value << 16;
  1112. break;
  1113. default:
  1114. return 1;
  1115. }
  1116. return 0;
  1117. }
  1118. static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
  1119. uint16_t *ret)
  1120. {
  1121. int i;
  1122. switch (offset) {
  1123. case 0x300: /* SYS_DMA_LCD_CTRL */
  1124. i = s->condition;
  1125. s->condition = 0;
  1126. qemu_irq_lower(s->irq);
  1127. *ret = ((s->src == imif) << 6) | (i << 3) |
  1128. (s->interrupts << 1) | s->dual;
  1129. break;
  1130. case 0x302: /* SYS_DMA_LCD_TOP_F1_L */
  1131. *ret = s->src_f1_top & 0xffff;
  1132. break;
  1133. case 0x304: /* SYS_DMA_LCD_TOP_F1_U */
  1134. *ret = s->src_f1_top >> 16;
  1135. break;
  1136. case 0x306: /* SYS_DMA_LCD_BOT_F1_L */
  1137. *ret = s->src_f1_bottom & 0xffff;
  1138. break;
  1139. case 0x308: /* SYS_DMA_LCD_BOT_F1_U */
  1140. *ret = s->src_f1_bottom >> 16;
  1141. break;
  1142. case 0x30a: /* SYS_DMA_LCD_TOP_F2_L */
  1143. *ret = s->src_f2_top & 0xffff;
  1144. break;
  1145. case 0x30c: /* SYS_DMA_LCD_TOP_F2_U */
  1146. *ret = s->src_f2_top >> 16;
  1147. break;
  1148. case 0x30e: /* SYS_DMA_LCD_BOT_F2_L */
  1149. *ret = s->src_f2_bottom & 0xffff;
  1150. break;
  1151. case 0x310: /* SYS_DMA_LCD_BOT_F2_U */
  1152. *ret = s->src_f2_bottom >> 16;
  1153. break;
  1154. default:
  1155. return 1;
  1156. }
  1157. return 0;
  1158. }
  1159. static int omap_dma_sys_write(struct omap_dma_s *s, int offset, uint16_t value)
  1160. {
  1161. switch (offset) {
  1162. case 0x400: /* SYS_DMA_GCR */
  1163. s->gcr = value;
  1164. break;
  1165. case 0x404: /* DMA_GSCR */
  1166. if (value & 0x8)
  1167. omap_dma_disable_3_1_mapping(s);
  1168. else
  1169. omap_dma_enable_3_1_mapping(s);
  1170. break;
  1171. case 0x408: /* DMA_GRST */
  1172. if (value & 0x1)
  1173. omap_dma_reset(s->dma);
  1174. break;
  1175. default:
  1176. return 1;
  1177. }
  1178. return 0;
  1179. }
  1180. static int omap_dma_sys_read(struct omap_dma_s *s, int offset,
  1181. uint16_t *ret)
  1182. {
  1183. switch (offset) {
  1184. case 0x400: /* SYS_DMA_GCR */
  1185. *ret = s->gcr;
  1186. break;
  1187. case 0x404: /* DMA_GSCR */
  1188. *ret = s->omap_3_1_mapping_disabled << 3;
  1189. break;
  1190. case 0x408: /* DMA_GRST */
  1191. *ret = 0;
  1192. break;
  1193. case 0x442: /* DMA_HW_ID */
  1194. case 0x444: /* DMA_PCh2_ID */
  1195. case 0x446: /* DMA_PCh0_ID */
  1196. case 0x448: /* DMA_PCh1_ID */
  1197. case 0x44a: /* DMA_PChG_ID */
  1198. case 0x44c: /* DMA_PChD_ID */
  1199. *ret = 1;
  1200. break;
  1201. case 0x44e: /* DMA_CAPS_0_U */
  1202. *ret = (s->caps[0] >> 16) & 0xffff;
  1203. break;
  1204. case 0x450: /* DMA_CAPS_0_L */
  1205. *ret = (s->caps[0] >> 0) & 0xffff;
  1206. break;
  1207. case 0x452: /* DMA_CAPS_1_U */
  1208. *ret = (s->caps[1] >> 16) & 0xffff;
  1209. break;
  1210. case 0x454: /* DMA_CAPS_1_L */
  1211. *ret = (s->caps[1] >> 0) & 0xffff;
  1212. break;
  1213. case 0x456: /* DMA_CAPS_2 */
  1214. *ret = s->caps[2];
  1215. break;
  1216. case 0x458: /* DMA_CAPS_3 */
  1217. *ret = s->caps[3];
  1218. break;
  1219. case 0x45a: /* DMA_CAPS_4 */
  1220. *ret = s->caps[4];
  1221. break;
  1222. case 0x460: /* DMA_PCh2_SR */
  1223. case 0x480: /* DMA_PCh0_SR */
  1224. case 0x482: /* DMA_PCh1_SR */
  1225. case 0x4c0: /* DMA_PChD_SR_0 */
  1226. qemu_log_mask(LOG_UNIMP,
  1227. "%s: Physical Channel Status Registers not implemented\n",
  1228. __func__);
  1229. *ret = 0xff;
  1230. break;
  1231. default:
  1232. return 1;
  1233. }
  1234. return 0;
  1235. }
  1236. static uint64_t omap_dma_read(void *opaque, hwaddr addr,
  1237. unsigned size)
  1238. {
  1239. struct omap_dma_s *s = (struct omap_dma_s *) opaque;
  1240. int reg, ch;
  1241. uint16_t ret;
  1242. if (size != 2) {
  1243. return omap_badwidth_read16(opaque, addr);
  1244. }
  1245. switch (addr) {
  1246. case 0x300 ... 0x3fe:
  1247. if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) {
  1248. if (omap_dma_3_1_lcd_read(&s->lcd_ch, addr, &ret))
  1249. break;
  1250. return ret;
  1251. }
  1252. /* Fall through. */
  1253. case 0x000 ... 0x2fe:
  1254. reg = addr & 0x3f;
  1255. ch = (addr >> 6) & 0x0f;
  1256. if (omap_dma_ch_reg_read(s, &s->ch[ch], reg, &ret))
  1257. break;
  1258. return ret;
  1259. case 0x404 ... 0x4fe:
  1260. if (s->model <= omap_dma_3_1)
  1261. break;
  1262. /* Fall through. */
  1263. case 0x400:
  1264. if (omap_dma_sys_read(s, addr, &ret))
  1265. break;
  1266. return ret;
  1267. case 0xb00 ... 0xbfe:
  1268. if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) {
  1269. if (omap_dma_3_2_lcd_read(&s->lcd_ch, addr, &ret))
  1270. break;
  1271. return ret;
  1272. }
  1273. break;
  1274. }
  1275. OMAP_BAD_REG(addr);
  1276. return 0;
  1277. }
  1278. static void omap_dma_write(void *opaque, hwaddr addr,
  1279. uint64_t value, unsigned size)
  1280. {
  1281. struct omap_dma_s *s = (struct omap_dma_s *) opaque;
  1282. int reg, ch;
  1283. if (size != 2) {
  1284. omap_badwidth_write16(opaque, addr, value);
  1285. return;
  1286. }
  1287. switch (addr) {
  1288. case 0x300 ... 0x3fe:
  1289. if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) {
  1290. if (omap_dma_3_1_lcd_write(&s->lcd_ch, addr, value))
  1291. break;
  1292. return;
  1293. }
  1294. /* Fall through. */
  1295. case 0x000 ... 0x2fe:
  1296. reg = addr & 0x3f;
  1297. ch = (addr >> 6) & 0x0f;
  1298. if (omap_dma_ch_reg_write(s, &s->ch[ch], reg, value))
  1299. break;
  1300. return;
  1301. case 0x404 ... 0x4fe:
  1302. if (s->model <= omap_dma_3_1)
  1303. break;
  1304. /* fall through */
  1305. case 0x400:
  1306. if (omap_dma_sys_write(s, addr, value))
  1307. break;
  1308. return;
  1309. case 0xb00 ... 0xbfe:
  1310. if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) {
  1311. if (omap_dma_3_2_lcd_write(&s->lcd_ch, addr, value))
  1312. break;
  1313. return;
  1314. }
  1315. break;
  1316. }
  1317. OMAP_BAD_REG(addr);
  1318. }
  1319. static const MemoryRegionOps omap_dma_ops = {
  1320. .read = omap_dma_read,
  1321. .write = omap_dma_write,
  1322. .endianness = DEVICE_NATIVE_ENDIAN,
  1323. };
  1324. static void omap_dma_request(void *opaque, int drq, int req)
  1325. {
  1326. struct omap_dma_s *s = (struct omap_dma_s *) opaque;
  1327. /* The request pins are level triggered in QEMU. */
  1328. if (req) {
  1329. if (~s->dma->drqbmp & (1ULL << drq)) {
  1330. s->dma->drqbmp |= 1ULL << drq;
  1331. omap_dma_process_request(s, drq);
  1332. }
  1333. } else
  1334. s->dma->drqbmp &= ~(1ULL << drq);
  1335. }
  1336. /* XXX: this won't be needed once soc_dma knows about clocks. */
  1337. static void omap_dma_clk_update(void *opaque, int line, int on)
  1338. {
  1339. struct omap_dma_s *s = (struct omap_dma_s *) opaque;
  1340. int i;
  1341. s->dma->freq = omap_clk_getrate(s->clk);
  1342. for (i = 0; i < s->chans; i ++)
  1343. if (s->ch[i].active)
  1344. soc_dma_set_request(s->ch[i].dma, on);
  1345. }
  1346. static void omap_dma_setcaps(struct omap_dma_s *s)
  1347. {
  1348. switch (s->model) {
  1349. default:
  1350. case omap_dma_3_1:
  1351. break;
  1352. case omap_dma_3_2:
  1353. case omap_dma_4:
  1354. /* XXX Only available for sDMA */
  1355. s->caps[0] =
  1356. (1 << 19) | /* Constant Fill Capability */
  1357. (1 << 18); /* Transparent BLT Capability */
  1358. s->caps[1] =
  1359. (1 << 1); /* 1-bit palettized capability (DMA 3.2 only) */
  1360. s->caps[2] =
  1361. (1 << 8) | /* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */
  1362. (1 << 7) | /* DST_DOUBLE_INDEX_ADRS_CPBLTY */
  1363. (1 << 6) | /* DST_SINGLE_INDEX_ADRS_CPBLTY */
  1364. (1 << 5) | /* DST_POST_INCRMNT_ADRS_CPBLTY */
  1365. (1 << 4) | /* DST_CONST_ADRS_CPBLTY */
  1366. (1 << 3) | /* SRC_DOUBLE_INDEX_ADRS_CPBLTY */
  1367. (1 << 2) | /* SRC_SINGLE_INDEX_ADRS_CPBLTY */
  1368. (1 << 1) | /* SRC_POST_INCRMNT_ADRS_CPBLTY */
  1369. (1 << 0); /* SRC_CONST_ADRS_CPBLTY */
  1370. s->caps[3] =
  1371. (1 << 6) | /* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */
  1372. (1 << 7) | /* PKT_SYNCHR_CPBLTY (DMA 4 only) */
  1373. (1 << 5) | /* CHANNEL_CHAINING_CPBLTY */
  1374. (1 << 4) | /* LCh_INTERLEAVE_CPBLTY */
  1375. (1 << 3) | /* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */
  1376. (1 << 2) | /* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */
  1377. (1 << 1) | /* FRAME_SYNCHR_CPBLTY */
  1378. (1 << 0); /* ELMNT_SYNCHR_CPBLTY */
  1379. s->caps[4] =
  1380. (1 << 7) | /* PKT_INTERRUPT_CPBLTY (DMA 4 only) */
  1381. (1 << 6) | /* SYNC_STATUS_CPBLTY */
  1382. (1 << 5) | /* BLOCK_INTERRUPT_CPBLTY */
  1383. (1 << 4) | /* LAST_FRAME_INTERRUPT_CPBLTY */
  1384. (1 << 3) | /* FRAME_INTERRUPT_CPBLTY */
  1385. (1 << 2) | /* HALF_FRAME_INTERRUPT_CPBLTY */
  1386. (1 << 1) | /* EVENT_DROP_INTERRUPT_CPBLTY */
  1387. (1 << 0); /* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */
  1388. break;
  1389. }
  1390. }
  1391. struct soc_dma_s *omap_dma_init(hwaddr base, qemu_irq *irqs,
  1392. MemoryRegion *sysmem,
  1393. qemu_irq lcd_irq, struct omap_mpu_state_s *mpu, omap_clk clk,
  1394. enum omap_dma_model model)
  1395. {
  1396. int num_irqs, memsize, i;
  1397. struct omap_dma_s *s = g_new0(struct omap_dma_s, 1);
  1398. if (model <= omap_dma_3_1) {
  1399. num_irqs = 6;
  1400. memsize = 0x800;
  1401. } else {
  1402. num_irqs = 16;
  1403. memsize = 0xc00;
  1404. }
  1405. s->model = model;
  1406. s->mpu = mpu;
  1407. s->clk = clk;
  1408. s->lcd_ch.irq = lcd_irq;
  1409. s->lcd_ch.mpu = mpu;
  1410. s->dma = soc_dma_init((model <= omap_dma_3_1) ? 9 : 16);
  1411. s->dma->freq = omap_clk_getrate(clk);
  1412. s->dma->transfer_fn = omap_dma_transfer_generic;
  1413. s->dma->setup_fn = omap_dma_transfer_setup;
  1414. s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 32);
  1415. s->dma->opaque = s;
  1416. while (num_irqs --)
  1417. s->ch[num_irqs].irq = irqs[num_irqs];
  1418. for (i = 0; i < 3; i ++) {
  1419. s->ch[i].sibling = &s->ch[i + 6];
  1420. s->ch[i + 6].sibling = &s->ch[i];
  1421. }
  1422. for (i = (model <= omap_dma_3_1) ? 8 : 15; i >= 0; i --) {
  1423. s->ch[i].dma = &s->dma->ch[i];
  1424. s->dma->ch[i].opaque = &s->ch[i];
  1425. }
  1426. omap_dma_setcaps(s);
  1427. omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0));
  1428. omap_dma_reset(s->dma);
  1429. omap_dma_clk_update(s, 0, 1);
  1430. memory_region_init_io(&s->iomem, NULL, &omap_dma_ops, s, "omap.dma", memsize);
  1431. memory_region_add_subregion(sysmem, base, &s->iomem);
  1432. mpu->drq = s->dma->drq;
  1433. return s->dma;
  1434. }
  1435. static void omap_dma_interrupts_4_update(struct omap_dma_s *s)
  1436. {
  1437. struct omap_dma_channel_s *ch = s->ch;
  1438. uint32_t bmp, bit;
  1439. for (bmp = 0, bit = 1; bit; ch ++, bit <<= 1)
  1440. if (ch->status) {
  1441. bmp |= bit;
  1442. ch->cstatus |= ch->status;
  1443. ch->status = 0;
  1444. }
  1445. if ((s->irqstat[0] |= s->irqen[0] & bmp))
  1446. qemu_irq_raise(s->irq[0]);
  1447. if ((s->irqstat[1] |= s->irqen[1] & bmp))
  1448. qemu_irq_raise(s->irq[1]);
  1449. if ((s->irqstat[2] |= s->irqen[2] & bmp))
  1450. qemu_irq_raise(s->irq[2]);
  1451. if ((s->irqstat[3] |= s->irqen[3] & bmp))
  1452. qemu_irq_raise(s->irq[3]);
  1453. }
  1454. static uint64_t omap_dma4_read(void *opaque, hwaddr addr,
  1455. unsigned size)
  1456. {
  1457. struct omap_dma_s *s = (struct omap_dma_s *) opaque;
  1458. int irqn = 0, chnum;
  1459. struct omap_dma_channel_s *ch;
  1460. if (size == 1) {
  1461. return omap_badwidth_read16(opaque, addr);
  1462. }
  1463. switch (addr) {
  1464. case 0x00: /* DMA4_REVISION */
  1465. return 0x40;
  1466. case 0x14: /* DMA4_IRQSTATUS_L3 */
  1467. irqn ++;
  1468. /* fall through */
  1469. case 0x10: /* DMA4_IRQSTATUS_L2 */
  1470. irqn ++;
  1471. /* fall through */
  1472. case 0x0c: /* DMA4_IRQSTATUS_L1 */
  1473. irqn ++;
  1474. /* fall through */
  1475. case 0x08: /* DMA4_IRQSTATUS_L0 */
  1476. return s->irqstat[irqn];
  1477. case 0x24: /* DMA4_IRQENABLE_L3 */
  1478. irqn ++;
  1479. /* fall through */
  1480. case 0x20: /* DMA4_IRQENABLE_L2 */
  1481. irqn ++;
  1482. /* fall through */
  1483. case 0x1c: /* DMA4_IRQENABLE_L1 */
  1484. irqn ++;
  1485. /* fall through */
  1486. case 0x18: /* DMA4_IRQENABLE_L0 */
  1487. return s->irqen[irqn];
  1488. case 0x28: /* DMA4_SYSSTATUS */
  1489. return 1; /* RESETDONE */
  1490. case 0x2c: /* DMA4_OCP_SYSCONFIG */
  1491. return s->ocp;
  1492. case 0x64: /* DMA4_CAPS_0 */
  1493. return s->caps[0];
  1494. case 0x6c: /* DMA4_CAPS_2 */
  1495. return s->caps[2];
  1496. case 0x70: /* DMA4_CAPS_3 */
  1497. return s->caps[3];
  1498. case 0x74: /* DMA4_CAPS_4 */
  1499. return s->caps[4];
  1500. case 0x78: /* DMA4_GCR */
  1501. return s->gcr;
  1502. case 0x80 ... 0xfff:
  1503. addr -= 0x80;
  1504. chnum = addr / 0x60;
  1505. ch = s->ch + chnum;
  1506. addr -= chnum * 0x60;
  1507. break;
  1508. default:
  1509. OMAP_BAD_REG(addr);
  1510. return 0;
  1511. }
  1512. /* Per-channel registers */
  1513. switch (addr) {
  1514. case 0x00: /* DMA4_CCR */
  1515. return (ch->buf_disable << 25) |
  1516. (ch->src_sync << 24) |
  1517. (ch->prefetch << 23) |
  1518. ((ch->sync & 0x60) << 14) |
  1519. (ch->bs << 18) |
  1520. (ch->transparent_copy << 17) |
  1521. (ch->constant_fill << 16) |
  1522. (ch->mode[1] << 14) |
  1523. (ch->mode[0] << 12) |
  1524. (0 << 10) | (0 << 9) |
  1525. (ch->suspend << 8) |
  1526. (ch->enable << 7) |
  1527. (ch->priority << 6) |
  1528. (ch->fs << 5) | (ch->sync & 0x1f);
  1529. case 0x04: /* DMA4_CLNK_CTRL */
  1530. return (ch->link_enabled << 15) | ch->link_next_ch;
  1531. case 0x08: /* DMA4_CICR */
  1532. return ch->interrupts;
  1533. case 0x0c: /* DMA4_CSR */
  1534. return ch->cstatus;
  1535. case 0x10: /* DMA4_CSDP */
  1536. return (ch->endian[0] << 21) |
  1537. (ch->endian_lock[0] << 20) |
  1538. (ch->endian[1] << 19) |
  1539. (ch->endian_lock[1] << 18) |
  1540. (ch->write_mode << 16) |
  1541. (ch->burst[1] << 14) |
  1542. (ch->pack[1] << 13) |
  1543. (ch->translate[1] << 9) |
  1544. (ch->burst[0] << 7) |
  1545. (ch->pack[0] << 6) |
  1546. (ch->translate[0] << 2) |
  1547. (ch->data_type >> 1);
  1548. case 0x14: /* DMA4_CEN */
  1549. return ch->elements;
  1550. case 0x18: /* DMA4_CFN */
  1551. return ch->frames;
  1552. case 0x1c: /* DMA4_CSSA */
  1553. return ch->addr[0];
  1554. case 0x20: /* DMA4_CDSA */
  1555. return ch->addr[1];
  1556. case 0x24: /* DMA4_CSEI */
  1557. return ch->element_index[0];
  1558. case 0x28: /* DMA4_CSFI */
  1559. return ch->frame_index[0];
  1560. case 0x2c: /* DMA4_CDEI */
  1561. return ch->element_index[1];
  1562. case 0x30: /* DMA4_CDFI */
  1563. return ch->frame_index[1];
  1564. case 0x34: /* DMA4_CSAC */
  1565. return ch->active_set.src & 0xffff;
  1566. case 0x38: /* DMA4_CDAC */
  1567. return ch->active_set.dest & 0xffff;
  1568. case 0x3c: /* DMA4_CCEN */
  1569. return ch->active_set.element;
  1570. case 0x40: /* DMA4_CCFN */
  1571. return ch->active_set.frame;
  1572. case 0x44: /* DMA4_COLOR */
  1573. /* XXX only in sDMA */
  1574. return ch->color;
  1575. default:
  1576. OMAP_BAD_REG(addr);
  1577. return 0;
  1578. }
  1579. }
  1580. static void omap_dma4_write(void *opaque, hwaddr addr,
  1581. uint64_t value, unsigned size)
  1582. {
  1583. struct omap_dma_s *s = (struct omap_dma_s *) opaque;
  1584. int chnum, irqn = 0;
  1585. struct omap_dma_channel_s *ch;
  1586. if (size == 1) {
  1587. omap_badwidth_write16(opaque, addr, value);
  1588. return;
  1589. }
  1590. switch (addr) {
  1591. case 0x14: /* DMA4_IRQSTATUS_L3 */
  1592. irqn ++;
  1593. /* fall through */
  1594. case 0x10: /* DMA4_IRQSTATUS_L2 */
  1595. irqn ++;
  1596. /* fall through */
  1597. case 0x0c: /* DMA4_IRQSTATUS_L1 */
  1598. irqn ++;
  1599. /* fall through */
  1600. case 0x08: /* DMA4_IRQSTATUS_L0 */
  1601. s->irqstat[irqn] &= ~value;
  1602. if (!s->irqstat[irqn])
  1603. qemu_irq_lower(s->irq[irqn]);
  1604. return;
  1605. case 0x24: /* DMA4_IRQENABLE_L3 */
  1606. irqn ++;
  1607. /* fall through */
  1608. case 0x20: /* DMA4_IRQENABLE_L2 */
  1609. irqn ++;
  1610. /* fall through */
  1611. case 0x1c: /* DMA4_IRQENABLE_L1 */
  1612. irqn ++;
  1613. /* fall through */
  1614. case 0x18: /* DMA4_IRQENABLE_L0 */
  1615. s->irqen[irqn] = value;
  1616. return;
  1617. case 0x2c: /* DMA4_OCP_SYSCONFIG */
  1618. if (value & 2) /* SOFTRESET */
  1619. omap_dma_reset(s->dma);
  1620. s->ocp = value & 0x3321;
  1621. if (((s->ocp >> 12) & 3) == 3) { /* MIDLEMODE */
  1622. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA power mode\n",
  1623. __func__);
  1624. }
  1625. return;
  1626. case 0x78: /* DMA4_GCR */
  1627. s->gcr = value & 0x00ff00ff;
  1628. if ((value & 0xff) == 0x00) { /* MAX_CHANNEL_FIFO_DEPTH */
  1629. qemu_log_mask(LOG_GUEST_ERROR, "%s: wrong FIFO depth in GCR\n",
  1630. __func__);
  1631. }
  1632. return;
  1633. case 0x80 ... 0xfff:
  1634. addr -= 0x80;
  1635. chnum = addr / 0x60;
  1636. ch = s->ch + chnum;
  1637. addr -= chnum * 0x60;
  1638. break;
  1639. case 0x00: /* DMA4_REVISION */
  1640. case 0x28: /* DMA4_SYSSTATUS */
  1641. case 0x64: /* DMA4_CAPS_0 */
  1642. case 0x6c: /* DMA4_CAPS_2 */
  1643. case 0x70: /* DMA4_CAPS_3 */
  1644. case 0x74: /* DMA4_CAPS_4 */
  1645. OMAP_RO_REG(addr);
  1646. return;
  1647. default:
  1648. OMAP_BAD_REG(addr);
  1649. return;
  1650. }
  1651. /* Per-channel registers */
  1652. switch (addr) {
  1653. case 0x00: /* DMA4_CCR */
  1654. ch->buf_disable = (value >> 25) & 1;
  1655. ch->src_sync = (value >> 24) & 1; /* XXX For CamDMA must be 1 */
  1656. if (ch->buf_disable && !ch->src_sync) {
  1657. qemu_log_mask(LOG_GUEST_ERROR,
  1658. "%s: Buffering disable is not allowed in "
  1659. "destination synchronised mode\n", __func__);
  1660. }
  1661. ch->prefetch = (value >> 23) & 1;
  1662. ch->bs = (value >> 18) & 1;
  1663. ch->transparent_copy = (value >> 17) & 1;
  1664. ch->constant_fill = (value >> 16) & 1;
  1665. ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
  1666. ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
  1667. ch->suspend = (value & 0x0100) >> 8;
  1668. ch->priority = (value & 0x0040) >> 6;
  1669. ch->fs = (value & 0x0020) >> 5;
  1670. if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1]) {
  1671. qemu_log_mask(LOG_GUEST_ERROR,
  1672. "%s: For a packet transfer at least one port "
  1673. "must be constant-addressed\n", __func__);
  1674. }
  1675. ch->sync = (value & 0x001f) | ((value >> 14) & 0x0060);
  1676. /* XXX must be 0x01 for CamDMA */
  1677. if (value & 0x0080)
  1678. omap_dma_enable_channel(s, ch);
  1679. else
  1680. omap_dma_disable_channel(s, ch);
  1681. break;
  1682. case 0x04: /* DMA4_CLNK_CTRL */
  1683. ch->link_enabled = (value >> 15) & 0x1;
  1684. ch->link_next_ch = value & 0x1f;
  1685. break;
  1686. case 0x08: /* DMA4_CICR */
  1687. ch->interrupts = value & 0x09be;
  1688. break;
  1689. case 0x0c: /* DMA4_CSR */
  1690. ch->cstatus &= ~value;
  1691. break;
  1692. case 0x10: /* DMA4_CSDP */
  1693. ch->endian[0] =(value >> 21) & 1;
  1694. ch->endian_lock[0] =(value >> 20) & 1;
  1695. ch->endian[1] =(value >> 19) & 1;
  1696. ch->endian_lock[1] =(value >> 18) & 1;
  1697. if (ch->endian[0] != ch->endian[1]) {
  1698. qemu_log_mask(LOG_GUEST_ERROR,
  1699. "%s: DMA endianness conversion enable attempt\n",
  1700. __func__);
  1701. }
  1702. ch->write_mode = (value >> 16) & 3;
  1703. ch->burst[1] = (value & 0xc000) >> 14;
  1704. ch->pack[1] = (value & 0x2000) >> 13;
  1705. ch->translate[1] = (value & 0x1e00) >> 9;
  1706. ch->burst[0] = (value & 0x0180) >> 7;
  1707. ch->pack[0] = (value & 0x0040) >> 6;
  1708. ch->translate[0] = (value & 0x003c) >> 2;
  1709. if (ch->translate[0] | ch->translate[1]) {
  1710. qemu_log_mask(LOG_GUEST_ERROR,
  1711. "%s: bad MReqAddressTranslate sideband signal\n",
  1712. __func__);
  1713. }
  1714. ch->data_type = 1 << (value & 3);
  1715. if ((value & 3) == 3) {
  1716. qemu_log_mask(LOG_GUEST_ERROR,
  1717. "%s: bad data_type for DMA channel\n", __func__);
  1718. ch->data_type >>= 1;
  1719. }
  1720. break;
  1721. case 0x14: /* DMA4_CEN */
  1722. ch->set_update = 1;
  1723. ch->elements = value & 0xffffff;
  1724. break;
  1725. case 0x18: /* DMA4_CFN */
  1726. ch->frames = value & 0xffff;
  1727. ch->set_update = 1;
  1728. break;
  1729. case 0x1c: /* DMA4_CSSA */
  1730. ch->addr[0] = (hwaddr) (uint32_t) value;
  1731. ch->set_update = 1;
  1732. break;
  1733. case 0x20: /* DMA4_CDSA */
  1734. ch->addr[1] = (hwaddr) (uint32_t) value;
  1735. ch->set_update = 1;
  1736. break;
  1737. case 0x24: /* DMA4_CSEI */
  1738. ch->element_index[0] = (int16_t) value;
  1739. ch->set_update = 1;
  1740. break;
  1741. case 0x28: /* DMA4_CSFI */
  1742. ch->frame_index[0] = (int32_t) value;
  1743. ch->set_update = 1;
  1744. break;
  1745. case 0x2c: /* DMA4_CDEI */
  1746. ch->element_index[1] = (int16_t) value;
  1747. ch->set_update = 1;
  1748. break;
  1749. case 0x30: /* DMA4_CDFI */
  1750. ch->frame_index[1] = (int32_t) value;
  1751. ch->set_update = 1;
  1752. break;
  1753. case 0x44: /* DMA4_COLOR */
  1754. /* XXX only in sDMA */
  1755. ch->color = value;
  1756. break;
  1757. case 0x34: /* DMA4_CSAC */
  1758. case 0x38: /* DMA4_CDAC */
  1759. case 0x3c: /* DMA4_CCEN */
  1760. case 0x40: /* DMA4_CCFN */
  1761. OMAP_RO_REG(addr);
  1762. break;
  1763. default:
  1764. OMAP_BAD_REG(addr);
  1765. }
  1766. }
  1767. static const MemoryRegionOps omap_dma4_ops = {
  1768. .read = omap_dma4_read,
  1769. .write = omap_dma4_write,
  1770. .endianness = DEVICE_NATIVE_ENDIAN,
  1771. };
  1772. struct soc_dma_s *omap_dma4_init(hwaddr base, qemu_irq *irqs,
  1773. MemoryRegion *sysmem,
  1774. struct omap_mpu_state_s *mpu, int fifo,
  1775. int chans, omap_clk iclk, omap_clk fclk)
  1776. {
  1777. int i;
  1778. struct omap_dma_s *s = g_new0(struct omap_dma_s, 1);
  1779. s->model = omap_dma_4;
  1780. s->chans = chans;
  1781. s->mpu = mpu;
  1782. s->clk = fclk;
  1783. s->dma = soc_dma_init(s->chans);
  1784. s->dma->freq = omap_clk_getrate(fclk);
  1785. s->dma->transfer_fn = omap_dma_transfer_generic;
  1786. s->dma->setup_fn = omap_dma_transfer_setup;
  1787. s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 64);
  1788. s->dma->opaque = s;
  1789. for (i = 0; i < s->chans; i ++) {
  1790. s->ch[i].dma = &s->dma->ch[i];
  1791. s->dma->ch[i].opaque = &s->ch[i];
  1792. }
  1793. memcpy(&s->irq, irqs, sizeof(s->irq));
  1794. s->intr_update = omap_dma_interrupts_4_update;
  1795. omap_dma_setcaps(s);
  1796. omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0));
  1797. omap_dma_reset(s->dma);
  1798. omap_dma_clk_update(s, 0, !!s->dma->freq);
  1799. memory_region_init_io(&s->iomem, NULL, &omap_dma4_ops, s, "omap.dma4", 0x1000);
  1800. memory_region_add_subregion(sysmem, base, &s->iomem);
  1801. mpu->drq = s->dma->drq;
  1802. return s->dma;
  1803. }
  1804. struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct soc_dma_s *dma)
  1805. {
  1806. struct omap_dma_s *s = dma->opaque;
  1807. return &s->lcd_ch;
  1808. }