2
0

stm32l4x5_rcc.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462
  1. /*
  2. * STM32L4X5 RCC (Reset and clock control)
  3. *
  4. * Copyright (c) 2023 Arnaud Minier <arnaud.minier@telecom-paris.fr>
  5. * Copyright (c) 2023 Inès Varhol <ines.varhol@telecom-paris.fr>
  6. *
  7. * SPDX-License-Identifier: GPL-2.0-or-later
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. * The reference used is the STMicroElectronics RM0351 Reference manual
  13. * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs.
  14. *
  15. * Inspired by the BCM2835 CPRMAN clock manager implementation by Luc Michel.
  16. */
  17. #include "qemu/osdep.h"
  18. #include "qemu/log.h"
  19. #include "qemu/module.h"
  20. #include "qemu/timer.h"
  21. #include "qapi/error.h"
  22. #include "migration/vmstate.h"
  23. #include "hw/misc/stm32l4x5_rcc.h"
  24. #include "hw/misc/stm32l4x5_rcc_internals.h"
  25. #include "hw/clock.h"
  26. #include "hw/irq.h"
  27. #include "hw/qdev-clock.h"
  28. #include "hw/qdev-properties.h"
  29. #include "hw/qdev-properties-system.h"
  30. #include "hw/registerfields.h"
  31. #include "trace.h"
  32. #define HSE_DEFAULT_FRQ 48000000ULL
  33. #define HSI_FRQ 16000000ULL
  34. #define MSI_DEFAULT_FRQ 4000000ULL
  35. #define LSE_FRQ 32768ULL
  36. #define LSI_FRQ 32000ULL
  37. /*
  38. * Function to simply acknowledge and propagate changes in a clock mux
  39. * frequency.
  40. * `bypass_source` allows to bypass the period of the current source and just
  41. * consider it equal to 0. This is useful during the hold phase of reset.
  42. */
  43. static void clock_mux_update(RccClockMuxState *mux, bool bypass_source)
  44. {
  45. uint64_t src_freq;
  46. Clock *current_source = mux->srcs[mux->src];
  47. uint32_t freq_multiplier = 0;
  48. bool clk_changed = false;
  49. /*
  50. * To avoid rounding errors, we use the clock period instead of the
  51. * frequency.
  52. * This means that the multiplier of the mux becomes the divider of
  53. * the clock and the divider of the mux becomes the multiplier of the
  54. * clock.
  55. */
  56. if (!bypass_source && mux->enabled && mux->divider) {
  57. freq_multiplier = mux->divider;
  58. }
  59. clk_changed |= clock_set_mul_div(mux->out, freq_multiplier, mux->multiplier);
  60. clk_changed |= clock_set(mux->out, clock_get(current_source));
  61. if (clk_changed) {
  62. clock_propagate(mux->out);
  63. }
  64. src_freq = clock_get_hz(current_source);
  65. /* TODO: can we simply detect if the config changed so that we reduce log spam ? */
  66. trace_stm32l4x5_rcc_mux_update(mux->id, mux->src, src_freq,
  67. mux->multiplier, mux->divider);
  68. }
  69. static void clock_mux_src_update(void *opaque, ClockEvent event)
  70. {
  71. RccClockMuxState **backref = opaque;
  72. RccClockMuxState *s = *backref;
  73. /*
  74. * The backref value is equal to:
  75. * s->backref + (sizeof(RccClockMuxState *) * update_src).
  76. * By subtracting we can get back the index of the updated clock.
  77. */
  78. const uint32_t update_src = backref - s->backref;
  79. /* Only update if the clock that was updated is the current source */
  80. if (update_src == s->src) {
  81. clock_mux_update(s, false);
  82. }
  83. }
  84. static void clock_mux_init(Object *obj)
  85. {
  86. RccClockMuxState *s = RCC_CLOCK_MUX(obj);
  87. size_t i;
  88. for (i = 0; i < RCC_NUM_CLOCK_MUX_SRC; i++) {
  89. char *name = g_strdup_printf("srcs[%zu]", i);
  90. s->backref[i] = s;
  91. s->srcs[i] = qdev_init_clock_in(DEVICE(s), name,
  92. clock_mux_src_update,
  93. &s->backref[i],
  94. ClockUpdate);
  95. g_free(name);
  96. }
  97. s->out = qdev_init_clock_out(DEVICE(s), "out");
  98. }
  99. static void clock_mux_reset_enter(Object *obj, ResetType type)
  100. {
  101. RccClockMuxState *s = RCC_CLOCK_MUX(obj);
  102. set_clock_mux_init_info(s, s->id);
  103. }
  104. static void clock_mux_reset_hold(Object *obj, ResetType type)
  105. {
  106. RccClockMuxState *s = RCC_CLOCK_MUX(obj);
  107. clock_mux_update(s, true);
  108. }
  109. static void clock_mux_reset_exit(Object *obj, ResetType type)
  110. {
  111. RccClockMuxState *s = RCC_CLOCK_MUX(obj);
  112. clock_mux_update(s, false);
  113. }
  114. static const VMStateDescription clock_mux_vmstate = {
  115. .name = TYPE_RCC_CLOCK_MUX,
  116. .version_id = 1,
  117. .minimum_version_id = 1,
  118. .fields = (VMStateField[]) {
  119. VMSTATE_UINT32(id, RccClockMuxState),
  120. VMSTATE_ARRAY_CLOCK(srcs, RccClockMuxState,
  121. RCC_NUM_CLOCK_MUX_SRC),
  122. VMSTATE_BOOL(enabled, RccClockMuxState),
  123. VMSTATE_UINT32(src, RccClockMuxState),
  124. VMSTATE_UINT32(multiplier, RccClockMuxState),
  125. VMSTATE_UINT32(divider, RccClockMuxState),
  126. VMSTATE_END_OF_LIST()
  127. }
  128. };
  129. static void clock_mux_class_init(ObjectClass *klass, void *data)
  130. {
  131. DeviceClass *dc = DEVICE_CLASS(klass);
  132. ResettableClass *rc = RESETTABLE_CLASS(klass);
  133. rc->phases.enter = clock_mux_reset_enter;
  134. rc->phases.hold = clock_mux_reset_hold;
  135. rc->phases.exit = clock_mux_reset_exit;
  136. dc->vmsd = &clock_mux_vmstate;
  137. }
  138. static void clock_mux_set_enable(RccClockMuxState *mux, bool enabled)
  139. {
  140. if (mux->enabled == enabled) {
  141. return;
  142. }
  143. if (enabled) {
  144. trace_stm32l4x5_rcc_mux_enable(mux->id);
  145. } else {
  146. trace_stm32l4x5_rcc_mux_disable(mux->id);
  147. }
  148. mux->enabled = enabled;
  149. clock_mux_update(mux, false);
  150. }
  151. static void clock_mux_set_factor(RccClockMuxState *mux,
  152. uint32_t multiplier, uint32_t divider)
  153. {
  154. if (mux->multiplier == multiplier && mux->divider == divider) {
  155. return;
  156. }
  157. trace_stm32l4x5_rcc_mux_set_factor(mux->id,
  158. mux->multiplier, multiplier, mux->divider, divider);
  159. mux->multiplier = multiplier;
  160. mux->divider = divider;
  161. clock_mux_update(mux, false);
  162. }
  163. static void clock_mux_set_source(RccClockMuxState *mux, RccClockMuxSource src)
  164. {
  165. if (mux->src == src) {
  166. return;
  167. }
  168. trace_stm32l4x5_rcc_mux_set_src(mux->id, mux->src, src);
  169. mux->src = src;
  170. clock_mux_update(mux, false);
  171. }
  172. /*
  173. * Acknowledge and propagate changes in a PLL frequency.
  174. * `bypass_source` allows to bypass the period of the current source and just
  175. * consider it equal to 0. This is useful during the hold phase of reset.
  176. */
  177. static void pll_update(RccPllState *pll, bool bypass_source)
  178. {
  179. uint64_t vco_freq, old_channel_freq, channel_freq;
  180. int i;
  181. /* The common PLLM factor is handled by the PLL mux */
  182. vco_freq = muldiv64(clock_get_hz(pll->in), pll->vco_multiplier, 1);
  183. for (i = 0; i < RCC_NUM_CHANNEL_PLL_OUT; i++) {
  184. if (!pll->channel_exists[i]) {
  185. continue;
  186. }
  187. old_channel_freq = clock_get_hz(pll->channels[i]);
  188. if (bypass_source ||
  189. !pll->enabled ||
  190. !pll->channel_enabled[i] ||
  191. !pll->channel_divider[i]) {
  192. channel_freq = 0;
  193. } else {
  194. channel_freq = muldiv64(vco_freq,
  195. 1,
  196. pll->channel_divider[i]);
  197. }
  198. /* No change, early continue to avoid log spam and useless propagation */
  199. if (old_channel_freq == channel_freq) {
  200. continue;
  201. }
  202. clock_update_hz(pll->channels[i], channel_freq);
  203. trace_stm32l4x5_rcc_pll_update(pll->id, i, vco_freq,
  204. old_channel_freq, channel_freq);
  205. }
  206. }
  207. static void pll_src_update(void *opaque, ClockEvent event)
  208. {
  209. RccPllState *s = opaque;
  210. pll_update(s, false);
  211. }
  212. static void pll_init(Object *obj)
  213. {
  214. RccPllState *s = RCC_PLL(obj);
  215. size_t i;
  216. s->in = qdev_init_clock_in(DEVICE(s), "in",
  217. pll_src_update, s, ClockUpdate);
  218. const char *names[] = {
  219. "out-p", "out-q", "out-r",
  220. };
  221. for (i = 0; i < RCC_NUM_CHANNEL_PLL_OUT; i++) {
  222. s->channels[i] = qdev_init_clock_out(DEVICE(s), names[i]);
  223. }
  224. }
  225. static void pll_reset_enter(Object *obj, ResetType type)
  226. {
  227. RccPllState *s = RCC_PLL(obj);
  228. set_pll_init_info(s, s->id);
  229. }
  230. static void pll_reset_hold(Object *obj, ResetType type)
  231. {
  232. RccPllState *s = RCC_PLL(obj);
  233. pll_update(s, true);
  234. }
  235. static void pll_reset_exit(Object *obj, ResetType type)
  236. {
  237. RccPllState *s = RCC_PLL(obj);
  238. pll_update(s, false);
  239. }
  240. static const VMStateDescription pll_vmstate = {
  241. .name = TYPE_RCC_PLL,
  242. .version_id = 1,
  243. .minimum_version_id = 1,
  244. .fields = (VMStateField[]) {
  245. VMSTATE_UINT32(id, RccPllState),
  246. VMSTATE_CLOCK(in, RccPllState),
  247. VMSTATE_ARRAY_CLOCK(channels, RccPllState,
  248. RCC_NUM_CHANNEL_PLL_OUT),
  249. VMSTATE_BOOL(enabled, RccPllState),
  250. VMSTATE_UINT32(vco_multiplier, RccPllState),
  251. VMSTATE_BOOL_ARRAY(channel_enabled, RccPllState, RCC_NUM_CHANNEL_PLL_OUT),
  252. VMSTATE_BOOL_ARRAY(channel_exists, RccPllState, RCC_NUM_CHANNEL_PLL_OUT),
  253. VMSTATE_UINT32_ARRAY(channel_divider, RccPllState, RCC_NUM_CHANNEL_PLL_OUT),
  254. VMSTATE_END_OF_LIST()
  255. }
  256. };
  257. static void pll_class_init(ObjectClass *klass, void *data)
  258. {
  259. DeviceClass *dc = DEVICE_CLASS(klass);
  260. ResettableClass *rc = RESETTABLE_CLASS(klass);
  261. rc->phases.enter = pll_reset_enter;
  262. rc->phases.hold = pll_reset_hold;
  263. rc->phases.exit = pll_reset_exit;
  264. dc->vmsd = &pll_vmstate;
  265. }
  266. static void pll_set_vco_multiplier(RccPllState *pll, uint32_t vco_multiplier)
  267. {
  268. if (pll->vco_multiplier == vco_multiplier) {
  269. return;
  270. }
  271. if (vco_multiplier < 8 || vco_multiplier > 86) {
  272. qemu_log_mask(LOG_GUEST_ERROR,
  273. "%s: VCO multiplier is out of bound (%u) for PLL %u\n",
  274. __func__, vco_multiplier, pll->id);
  275. return;
  276. }
  277. trace_stm32l4x5_rcc_pll_set_vco_multiplier(pll->id,
  278. pll->vco_multiplier, vco_multiplier);
  279. pll->vco_multiplier = vco_multiplier;
  280. pll_update(pll, false);
  281. }
  282. static void pll_set_enable(RccPllState *pll, bool enabled)
  283. {
  284. if (pll->enabled == enabled) {
  285. return;
  286. }
  287. pll->enabled = enabled;
  288. pll_update(pll, false);
  289. }
  290. static void pll_set_channel_enable(RccPllState *pll,
  291. PllCommonChannels channel,
  292. bool enabled)
  293. {
  294. if (pll->channel_enabled[channel] == enabled) {
  295. return;
  296. }
  297. if (enabled) {
  298. trace_stm32l4x5_rcc_pll_channel_enable(pll->id, channel);
  299. } else {
  300. trace_stm32l4x5_rcc_pll_channel_disable(pll->id, channel);
  301. }
  302. pll->channel_enabled[channel] = enabled;
  303. pll_update(pll, false);
  304. }
  305. static void pll_set_channel_divider(RccPllState *pll,
  306. PllCommonChannels channel,
  307. uint32_t divider)
  308. {
  309. if (pll->channel_divider[channel] == divider) {
  310. return;
  311. }
  312. trace_stm32l4x5_rcc_pll_set_channel_divider(pll->id,
  313. channel, pll->channel_divider[channel], divider);
  314. pll->channel_divider[channel] = divider;
  315. pll_update(pll, false);
  316. }
  317. static void rcc_update_irq(Stm32l4x5RccState *s)
  318. {
  319. /*
  320. * TODO: Handle LSECSSF and CSSF flags when the CSS is implemented.
  321. */
  322. if (s->cifr & CIFR_IRQ_MASK) {
  323. qemu_irq_raise(s->irq);
  324. } else {
  325. qemu_irq_lower(s->irq);
  326. }
  327. }
  328. static void rcc_update_msi(Stm32l4x5RccState *s, uint32_t previous_value)
  329. {
  330. uint32_t val;
  331. static const uint32_t msirange[] = {
  332. 100000, 200000, 400000, 800000, 1000000, 2000000,
  333. 4000000, 8000000, 16000000, 24000000, 32000000, 48000000
  334. };
  335. /* MSIRANGE and MSIRGSEL */
  336. val = extract32(s->cr, R_CR_MSIRGSEL_SHIFT, R_CR_MSIRGSEL_LENGTH);
  337. if (val) {
  338. /* MSIRGSEL is set, use the MSIRANGE field */
  339. val = extract32(s->cr, R_CR_MSIRANGE_SHIFT, R_CR_MSIRANGE_LENGTH);
  340. } else {
  341. /* MSIRGSEL is not set, use the MSISRANGE field */
  342. val = extract32(s->csr, R_CSR_MSISRANGE_SHIFT, R_CSR_MSISRANGE_LENGTH);
  343. }
  344. if (val < ARRAY_SIZE(msirange)) {
  345. clock_update_hz(s->msi_rc, msirange[val]);
  346. } else {
  347. /*
  348. * There is a hardware write protection if the value is out of bound.
  349. * Restore the previous value.
  350. */
  351. s->cr = (s->cr & ~R_CSR_MSISRANGE_MASK) |
  352. (previous_value & R_CSR_MSISRANGE_MASK);
  353. }
  354. }
  355. /*
  356. * TODO: Add write-protection for all registers:
  357. * DONE: CR
  358. */
  359. static void rcc_update_cr_register(Stm32l4x5RccState *s, uint32_t previous_value)
  360. {
  361. int val;
  362. const RccClockMuxSource current_pll_src =
  363. CLOCK_MUX_INIT_INFO[RCC_CLOCK_MUX_PLL_INPUT].src_mapping[
  364. s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT].src];
  365. /* PLLSAI2ON and update PLLSAI2RDY */
  366. val = FIELD_EX32(s->cr, CR, PLLSAI2ON);
  367. pll_set_enable(&s->plls[RCC_PLL_PLLSAI2], val);
  368. s->cr = (s->cr & ~R_CR_PLLSAI2RDY_MASK) |
  369. (val << R_CR_PLLSAI2RDY_SHIFT);
  370. if (s->cier & R_CIER_PLLSAI2RDYIE_MASK) {
  371. s->cifr |= R_CIFR_PLLSAI2RDYF_MASK;
  372. }
  373. /* PLLSAI1ON and update PLLSAI1RDY */
  374. val = FIELD_EX32(s->cr, CR, PLLSAI1ON);
  375. pll_set_enable(&s->plls[RCC_PLL_PLLSAI1], val);
  376. s->cr = (s->cr & ~R_CR_PLLSAI1RDY_MASK) |
  377. (val << R_CR_PLLSAI1RDY_SHIFT);
  378. if (s->cier & R_CIER_PLLSAI1RDYIE_MASK) {
  379. s->cifr |= R_CIFR_PLLSAI1RDYF_MASK;
  380. }
  381. /*
  382. * PLLON and update PLLRDY
  383. * PLLON cannot be reset if the PLL clock is used as the system clock.
  384. */
  385. val = FIELD_EX32(s->cr, CR, PLLON);
  386. if (FIELD_EX32(s->cfgr, CFGR, SWS) != 0b11) {
  387. pll_set_enable(&s->plls[RCC_PLL_PLL], val);
  388. s->cr = (s->cr & ~R_CR_PLLRDY_MASK) |
  389. (val << R_CR_PLLRDY_SHIFT);
  390. if (s->cier & R_CIER_PLLRDYIE_MASK) {
  391. s->cifr |= R_CIFR_PLLRDYF_MASK;
  392. }
  393. } else {
  394. s->cr |= R_CR_PLLON_MASK;
  395. }
  396. /* CSSON: TODO */
  397. /* HSEBYP: TODO */
  398. /*
  399. * HSEON and update HSERDY.
  400. * HSEON cannot be reset if the HSE oscillator is used directly or
  401. * indirectly as the system clock.
  402. */
  403. val = FIELD_EX32(s->cr, CR, HSEON);
  404. if (FIELD_EX32(s->cfgr, CFGR, SWS) != 0b10 &&
  405. current_pll_src != RCC_CLOCK_MUX_SRC_HSE) {
  406. s->cr = (s->cr & ~R_CR_HSERDY_MASK) |
  407. (val << R_CR_HSERDY_SHIFT);
  408. if (val) {
  409. clock_update_hz(s->hse, s->hse_frequency);
  410. if (s->cier & R_CIER_HSERDYIE_MASK) {
  411. s->cifr |= R_CIFR_HSERDYF_MASK;
  412. }
  413. } else {
  414. clock_update(s->hse, 0);
  415. }
  416. } else {
  417. s->cr |= R_CR_HSEON_MASK;
  418. }
  419. /* HSIAFS: TODO*/
  420. /* HSIKERON: TODO*/
  421. /*
  422. * HSION and update HSIRDY
  423. * HSION is set by hardware if the HSI16 is used directly
  424. * or indirectly as system clock.
  425. */
  426. if (FIELD_EX32(s->cfgr, CFGR, SWS) == 0b01 ||
  427. current_pll_src == RCC_CLOCK_MUX_SRC_HSI) {
  428. s->cr |= (R_CR_HSION_MASK | R_CR_HSIRDY_MASK);
  429. clock_update_hz(s->hsi16_rc, HSI_FRQ);
  430. if (s->cier & R_CIER_HSIRDYIE_MASK) {
  431. s->cifr |= R_CIFR_HSIRDYF_MASK;
  432. }
  433. } else {
  434. val = FIELD_EX32(s->cr, CR, HSION);
  435. if (val) {
  436. clock_update_hz(s->hsi16_rc, HSI_FRQ);
  437. s->cr |= R_CR_HSIRDY_MASK;
  438. if (s->cier & R_CIER_HSIRDYIE_MASK) {
  439. s->cifr |= R_CIFR_HSIRDYF_MASK;
  440. }
  441. } else {
  442. clock_update(s->hsi16_rc, 0);
  443. s->cr &= ~R_CR_HSIRDY_MASK;
  444. }
  445. }
  446. /* MSIPLLEN: TODO */
  447. /*
  448. * MSION and update MSIRDY
  449. * Set by hardware when used directly or indirectly as system clock.
  450. */
  451. if (FIELD_EX32(s->cfgr, CFGR, SWS) == 0b00 ||
  452. current_pll_src == RCC_CLOCK_MUX_SRC_MSI) {
  453. s->cr |= (R_CR_MSION_MASK | R_CR_MSIRDY_MASK);
  454. if (!(previous_value & R_CR_MSION_MASK) && (s->cier & R_CIER_MSIRDYIE_MASK)) {
  455. s->cifr |= R_CIFR_MSIRDYF_MASK;
  456. }
  457. rcc_update_msi(s, previous_value);
  458. } else {
  459. val = FIELD_EX32(s->cr, CR, MSION);
  460. if (val) {
  461. s->cr |= R_CR_MSIRDY_MASK;
  462. rcc_update_msi(s, previous_value);
  463. if (s->cier & R_CIER_MSIRDYIE_MASK) {
  464. s->cifr |= R_CIFR_MSIRDYF_MASK;
  465. }
  466. } else {
  467. s->cr &= ~R_CR_MSIRDY_MASK;
  468. clock_update(s->msi_rc, 0);
  469. }
  470. }
  471. rcc_update_irq(s);
  472. }
  473. static void rcc_update_cfgr_register(Stm32l4x5RccState *s)
  474. {
  475. uint32_t val;
  476. /* MCOPRE */
  477. val = FIELD_EX32(s->cfgr, CFGR, MCOPRE);
  478. assert(val <= 0b100);
  479. clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
  480. 1, 1 << val);
  481. /* MCOSEL */
  482. val = FIELD_EX32(s->cfgr, CFGR, MCOSEL);
  483. assert(val <= 0b111);
  484. if (val == 0) {
  485. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], false);
  486. } else {
  487. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_MCO], true);
  488. clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_MCO],
  489. val - 1);
  490. }
  491. /* STOPWUCK */
  492. /* TODO */
  493. /* PPRE2 */
  494. val = FIELD_EX32(s->cfgr, CFGR, PPRE2);
  495. if (val < 0b100) {
  496. clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PCLK2],
  497. 1, 1);
  498. } else {
  499. clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PCLK2],
  500. 1, 1 << (val - 0b11));
  501. }
  502. /* PPRE1 */
  503. val = FIELD_EX32(s->cfgr, CFGR, PPRE1);
  504. if (val < 0b100) {
  505. clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PCLK1],
  506. 1, 1);
  507. } else {
  508. clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PCLK1],
  509. 1, 1 << (val - 0b11));
  510. }
  511. /* HPRE */
  512. val = FIELD_EX32(s->cfgr, CFGR, HPRE);
  513. if (val < 0b1000) {
  514. clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_HCLK],
  515. 1, 1);
  516. } else {
  517. clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_HCLK],
  518. 1, 1 << (val - 0b111));
  519. }
  520. /* Update SWS */
  521. val = FIELD_EX32(s->cfgr, CFGR, SW);
  522. clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_SYSCLK],
  523. val);
  524. s->cfgr &= ~R_CFGR_SWS_MASK;
  525. s->cfgr |= val << R_CFGR_SWS_SHIFT;
  526. }
  527. static void rcc_update_ahb1enr(Stm32l4x5RccState *s)
  528. {
  529. #define AHB1ENR_SET_ENABLE(_peripheral_name) \
  530. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \
  531. FIELD_EX32(s->ahb1enr, AHB1ENR, _peripheral_name##EN))
  532. /* DMA2DEN: reserved for STM32L475xx */
  533. AHB1ENR_SET_ENABLE(TSC);
  534. AHB1ENR_SET_ENABLE(CRC);
  535. AHB1ENR_SET_ENABLE(FLASH);
  536. AHB1ENR_SET_ENABLE(DMA2);
  537. AHB1ENR_SET_ENABLE(DMA1);
  538. #undef AHB1ENR_SET_ENABLE
  539. }
  540. static void rcc_update_ahb2enr(Stm32l4x5RccState *s)
  541. {
  542. #define AHB2ENR_SET_ENABLE(_peripheral_name) \
  543. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \
  544. FIELD_EX32(s->ahb2enr, AHB2ENR, _peripheral_name##EN))
  545. AHB2ENR_SET_ENABLE(RNG);
  546. /* HASHEN: reserved for STM32L475xx */
  547. AHB2ENR_SET_ENABLE(AES);
  548. /* DCMIEN: reserved for STM32L475xx */
  549. AHB2ENR_SET_ENABLE(ADC);
  550. AHB2ENR_SET_ENABLE(OTGFS);
  551. /* GPIOIEN: reserved for STM32L475xx */
  552. AHB2ENR_SET_ENABLE(GPIOA);
  553. AHB2ENR_SET_ENABLE(GPIOB);
  554. AHB2ENR_SET_ENABLE(GPIOC);
  555. AHB2ENR_SET_ENABLE(GPIOD);
  556. AHB2ENR_SET_ENABLE(GPIOE);
  557. AHB2ENR_SET_ENABLE(GPIOF);
  558. AHB2ENR_SET_ENABLE(GPIOG);
  559. AHB2ENR_SET_ENABLE(GPIOH);
  560. #undef AHB2ENR_SET_ENABLE
  561. }
  562. static void rcc_update_ahb3enr(Stm32l4x5RccState *s)
  563. {
  564. #define AHB3ENR_SET_ENABLE(_peripheral_name) \
  565. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \
  566. FIELD_EX32(s->ahb3enr, AHB3ENR, _peripheral_name##EN))
  567. AHB3ENR_SET_ENABLE(QSPI);
  568. AHB3ENR_SET_ENABLE(FMC);
  569. #undef AHB3ENR_SET_ENABLE
  570. }
  571. static void rcc_update_apb1enr(Stm32l4x5RccState *s)
  572. {
  573. #define APB1ENR1_SET_ENABLE(_peripheral_name) \
  574. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \
  575. FIELD_EX32(s->apb1enr1, APB1ENR1, _peripheral_name##EN))
  576. #define APB1ENR2_SET_ENABLE(_peripheral_name) \
  577. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \
  578. FIELD_EX32(s->apb1enr2, APB1ENR2, _peripheral_name##EN))
  579. /* APB1ENR1 */
  580. APB1ENR1_SET_ENABLE(LPTIM1);
  581. APB1ENR1_SET_ENABLE(OPAMP);
  582. APB1ENR1_SET_ENABLE(DAC1);
  583. APB1ENR1_SET_ENABLE(PWR);
  584. /* CAN2: reserved for STM32L4x5 */
  585. APB1ENR1_SET_ENABLE(CAN1);
  586. /* CRSEN: reserved for STM32L4x5 */
  587. APB1ENR1_SET_ENABLE(I2C3);
  588. APB1ENR1_SET_ENABLE(I2C2);
  589. APB1ENR1_SET_ENABLE(I2C1);
  590. APB1ENR1_SET_ENABLE(UART5);
  591. APB1ENR1_SET_ENABLE(UART4);
  592. APB1ENR1_SET_ENABLE(USART3);
  593. APB1ENR1_SET_ENABLE(USART2);
  594. APB1ENR1_SET_ENABLE(SPI3);
  595. APB1ENR1_SET_ENABLE(SPI2);
  596. APB1ENR1_SET_ENABLE(WWDG);
  597. /* RTCAPB: reserved for STM32L4x5 */
  598. APB1ENR1_SET_ENABLE(LCD);
  599. APB1ENR1_SET_ENABLE(TIM7);
  600. APB1ENR1_SET_ENABLE(TIM6);
  601. APB1ENR1_SET_ENABLE(TIM5);
  602. APB1ENR1_SET_ENABLE(TIM4);
  603. APB1ENR1_SET_ENABLE(TIM3);
  604. APB1ENR1_SET_ENABLE(TIM2);
  605. /* APB1ENR2 */
  606. APB1ENR2_SET_ENABLE(LPTIM2);
  607. APB1ENR2_SET_ENABLE(SWPMI1);
  608. /* I2C4EN: reserved for STM32L4x5 */
  609. APB1ENR2_SET_ENABLE(LPUART1);
  610. #undef APB1ENR1_SET_ENABLE
  611. #undef APB1ENR2_SET_ENABLE
  612. }
  613. static void rcc_update_apb2enr(Stm32l4x5RccState *s)
  614. {
  615. #define APB2ENR_SET_ENABLE(_peripheral_name) \
  616. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \
  617. FIELD_EX32(s->apb2enr, APB2ENR, _peripheral_name##EN))
  618. APB2ENR_SET_ENABLE(DFSDM1);
  619. APB2ENR_SET_ENABLE(SAI2);
  620. APB2ENR_SET_ENABLE(SAI1);
  621. APB2ENR_SET_ENABLE(TIM17);
  622. APB2ENR_SET_ENABLE(TIM16);
  623. APB2ENR_SET_ENABLE(TIM15);
  624. APB2ENR_SET_ENABLE(USART1);
  625. APB2ENR_SET_ENABLE(TIM8);
  626. APB2ENR_SET_ENABLE(SPI1);
  627. APB2ENR_SET_ENABLE(TIM1);
  628. APB2ENR_SET_ENABLE(SDMMC1);
  629. APB2ENR_SET_ENABLE(FW);
  630. APB2ENR_SET_ENABLE(SYSCFG);
  631. #undef APB2ENR_SET_ENABLE
  632. }
  633. /*
  634. * The 3 PLLs share the same register layout
  635. * so we can use the same function for all of them
  636. * Note: no frequency bounds checking is done here.
  637. */
  638. static void rcc_update_pllsaixcfgr(Stm32l4x5RccState *s, RccPll pll_id)
  639. {
  640. uint32_t reg, val;
  641. switch (pll_id) {
  642. case RCC_PLL_PLL:
  643. reg = s->pllcfgr;
  644. break;
  645. case RCC_PLL_PLLSAI1:
  646. reg = s->pllsai1cfgr;
  647. break;
  648. case RCC_PLL_PLLSAI2:
  649. reg = s->pllsai2cfgr;
  650. break;
  651. default:
  652. qemu_log_mask(LOG_GUEST_ERROR,
  653. "%s: Invalid PLL ID: %u\n", __func__, pll_id);
  654. return;
  655. }
  656. /* PLLPDIV */
  657. val = FIELD_EX32(reg, PLLCFGR, PLLPDIV);
  658. /* 1 is a reserved value */
  659. if (val == 0) {
  660. /* Get PLLP value */
  661. val = FIELD_EX32(reg, PLLCFGR, PLLP);
  662. pll_set_channel_divider(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_P,
  663. (val ? 17 : 7));
  664. } else if (val > 1) {
  665. pll_set_channel_divider(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_P,
  666. val);
  667. }
  668. /* PLLR */
  669. val = FIELD_EX32(reg, PLLCFGR, PLLR);
  670. pll_set_channel_divider(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_R,
  671. 2 * (val + 1));
  672. /* PLLREN */
  673. val = FIELD_EX32(reg, PLLCFGR, PLLREN);
  674. pll_set_channel_enable(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_R, val);
  675. /* PLLQ */
  676. val = FIELD_EX32(reg, PLLCFGR, PLLQ);
  677. pll_set_channel_divider(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_Q,
  678. 2 * (val + 1));
  679. /* PLLQEN */
  680. val = FIELD_EX32(reg, PLLCFGR, PLLQEN);
  681. pll_set_channel_enable(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_Q, val);
  682. /* PLLPEN */
  683. val = FIELD_EX32(reg, PLLCFGR, PLLPEN);
  684. pll_set_channel_enable(&s->plls[pll_id], RCC_PLL_COMMON_CHANNEL_P, val);
  685. /* PLLN */
  686. val = FIELD_EX32(reg, PLLCFGR, PLLN);
  687. pll_set_vco_multiplier(&s->plls[pll_id], val);
  688. }
  689. static void rcc_update_pllcfgr(Stm32l4x5RccState *s)
  690. {
  691. int val;
  692. /* Use common layout */
  693. rcc_update_pllsaixcfgr(s, RCC_PLL_PLL);
  694. /* Fetch specific fields for pllcfgr */
  695. /* PLLM */
  696. val = FIELD_EX32(s->pllcfgr, PLLCFGR, PLLM);
  697. clock_mux_set_factor(&s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT], 1, (val + 1));
  698. /* PLLSRC */
  699. val = FIELD_EX32(s->pllcfgr, PLLCFGR, PLLSRC);
  700. if (val == 0) {
  701. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT], false);
  702. } else {
  703. clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT], val - 1);
  704. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT], true);
  705. }
  706. }
  707. static void rcc_update_ccipr(Stm32l4x5RccState *s)
  708. {
  709. #define CCIPR_SET_SOURCE(_peripheral_name) \
  710. clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_##_peripheral_name], \
  711. FIELD_EX32(s->ccipr, CCIPR, _peripheral_name##SEL))
  712. CCIPR_SET_SOURCE(DFSDM1);
  713. CCIPR_SET_SOURCE(SWPMI1);
  714. CCIPR_SET_SOURCE(ADC);
  715. CCIPR_SET_SOURCE(CLK48);
  716. CCIPR_SET_SOURCE(SAI2);
  717. CCIPR_SET_SOURCE(SAI1);
  718. CCIPR_SET_SOURCE(LPTIM2);
  719. CCIPR_SET_SOURCE(LPTIM1);
  720. CCIPR_SET_SOURCE(I2C3);
  721. CCIPR_SET_SOURCE(I2C2);
  722. CCIPR_SET_SOURCE(I2C1);
  723. CCIPR_SET_SOURCE(LPUART1);
  724. CCIPR_SET_SOURCE(UART5);
  725. CCIPR_SET_SOURCE(UART4);
  726. CCIPR_SET_SOURCE(USART3);
  727. CCIPR_SET_SOURCE(USART2);
  728. CCIPR_SET_SOURCE(USART1);
  729. #undef CCIPR_SET_SOURCE
  730. }
  731. static void rcc_update_bdcr(Stm32l4x5RccState *s)
  732. {
  733. int val;
  734. /* LSCOSEL */
  735. val = FIELD_EX32(s->bdcr, BDCR, LSCOSEL);
  736. clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_LSCO], val);
  737. val = FIELD_EX32(s->bdcr, BDCR, LSCOEN);
  738. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_LSCO], val);
  739. /* BDRST */
  740. /*
  741. * The documentation is not clear if the RTCEN flag disables the RTC and
  742. * the LCD common mux or if it only affects the RTC.
  743. * As the LCDEN flag exists, we assume here that it only affects the RTC.
  744. */
  745. val = FIELD_EX32(s->bdcr, BDCR, RTCEN);
  746. clock_mux_set_enable(&s->clock_muxes[RCC_CLOCK_MUX_RTC], val);
  747. /* LCD and RTC share the same clock */
  748. val = FIELD_EX32(s->bdcr, BDCR, RTCSEL);
  749. clock_mux_set_source(&s->clock_muxes[RCC_CLOCK_MUX_LCD_AND_RTC_COMMON], val);
  750. /* LSECSSON */
  751. /* LSEDRV[1:0] */
  752. /* LSEBYP */
  753. /* LSEON: Update LSERDY at the same time */
  754. val = FIELD_EX32(s->bdcr, BDCR, LSEON);
  755. if (val) {
  756. clock_update_hz(s->lse_crystal, LSE_FRQ);
  757. s->bdcr |= R_BDCR_LSERDY_MASK;
  758. if (s->cier & R_CIER_LSERDYIE_MASK) {
  759. s->cifr |= R_CIFR_LSERDYF_MASK;
  760. }
  761. } else {
  762. clock_update(s->lse_crystal, 0);
  763. s->bdcr &= ~R_BDCR_LSERDY_MASK;
  764. }
  765. rcc_update_irq(s);
  766. }
  767. static void rcc_update_csr(Stm32l4x5RccState *s)
  768. {
  769. int val;
  770. /* Reset flags: Not implemented */
  771. /* MSISRANGE: Not implemented after reset */
  772. /* LSION: Update LSIRDY at the same time */
  773. val = FIELD_EX32(s->csr, CSR, LSION);
  774. if (val) {
  775. clock_update_hz(s->lsi_rc, LSI_FRQ);
  776. s->csr |= R_CSR_LSIRDY_MASK;
  777. if (s->cier & R_CIER_LSIRDYIE_MASK) {
  778. s->cifr |= R_CIFR_LSIRDYF_MASK;
  779. }
  780. } else {
  781. /*
  782. * TODO: Handle when the LSI is set independently of LSION.
  783. * E.g. when the LSI is set by the RTC.
  784. * See the reference manual for more details.
  785. */
  786. clock_update(s->lsi_rc, 0);
  787. s->csr &= ~R_CSR_LSIRDY_MASK;
  788. }
  789. rcc_update_irq(s);
  790. }
  791. static void stm32l4x5_rcc_reset_hold(Object *obj, ResetType type)
  792. {
  793. Stm32l4x5RccState *s = STM32L4X5_RCC(obj);
  794. s->cr = 0x00000063;
  795. /*
  796. * Factory-programmed calibration data
  797. * From the reference manual: 0x10XX 00XX
  798. * Value taken from a real card.
  799. */
  800. s->icscr = 0x106E0082;
  801. s->cfgr = 0x0;
  802. s->pllcfgr = 0x00001000;
  803. s->pllsai1cfgr = 0x00001000;
  804. s->pllsai2cfgr = 0x00001000;
  805. s->cier = 0x0;
  806. s->cifr = 0x0;
  807. s->ahb1rstr = 0x0;
  808. s->ahb2rstr = 0x0;
  809. s->ahb3rstr = 0x0;
  810. s->apb1rstr1 = 0x0;
  811. s->apb1rstr2 = 0x0;
  812. s->apb2rstr = 0x0;
  813. s->ahb1enr = 0x00000100;
  814. s->ahb2enr = 0x0;
  815. s->ahb3enr = 0x0;
  816. s->apb1enr1 = 0x0;
  817. s->apb1enr2 = 0x0;
  818. s->apb2enr = 0x0;
  819. s->ahb1smenr = 0x00011303;
  820. s->ahb2smenr = 0x000532FF;
  821. s->ahb3smenr = 0x00000101;
  822. s->apb1smenr1 = 0xF2FECA3F;
  823. s->apb1smenr2 = 0x00000025;
  824. s->apb2smenr = 0x01677C01;
  825. s->ccipr = 0x0;
  826. s->bdcr = 0x0;
  827. s->csr = 0x0C000600;
  828. }
  829. static uint64_t stm32l4x5_rcc_read(void *opaque, hwaddr addr,
  830. unsigned int size)
  831. {
  832. Stm32l4x5RccState *s = opaque;
  833. uint64_t retvalue = 0;
  834. switch (addr) {
  835. case A_CR:
  836. retvalue = s->cr;
  837. break;
  838. case A_ICSCR:
  839. retvalue = s->icscr;
  840. break;
  841. case A_CFGR:
  842. retvalue = s->cfgr;
  843. break;
  844. case A_PLLCFGR:
  845. retvalue = s->pllcfgr;
  846. break;
  847. case A_PLLSAI1CFGR:
  848. retvalue = s->pllsai1cfgr;
  849. break;
  850. case A_PLLSAI2CFGR:
  851. retvalue = s->pllsai2cfgr;
  852. break;
  853. case A_CIER:
  854. retvalue = s->cier;
  855. break;
  856. case A_CIFR:
  857. retvalue = s->cifr;
  858. break;
  859. case A_CICR:
  860. /* CICR is write only, return the reset value = 0 */
  861. break;
  862. case A_AHB1RSTR:
  863. retvalue = s->ahb1rstr;
  864. break;
  865. case A_AHB2RSTR:
  866. retvalue = s->ahb2rstr;
  867. break;
  868. case A_AHB3RSTR:
  869. retvalue = s->ahb3rstr;
  870. break;
  871. case A_APB1RSTR1:
  872. retvalue = s->apb1rstr1;
  873. break;
  874. case A_APB1RSTR2:
  875. retvalue = s->apb1rstr2;
  876. break;
  877. case A_APB2RSTR:
  878. retvalue = s->apb2rstr;
  879. break;
  880. case A_AHB1ENR:
  881. retvalue = s->ahb1enr;
  882. break;
  883. case A_AHB2ENR:
  884. retvalue = s->ahb2enr;
  885. break;
  886. case A_AHB3ENR:
  887. retvalue = s->ahb3enr;
  888. break;
  889. case A_APB1ENR1:
  890. retvalue = s->apb1enr1;
  891. break;
  892. case A_APB1ENR2:
  893. retvalue = s->apb1enr2;
  894. break;
  895. case A_APB2ENR:
  896. retvalue = s->apb2enr;
  897. break;
  898. case A_AHB1SMENR:
  899. retvalue = s->ahb1smenr;
  900. break;
  901. case A_AHB2SMENR:
  902. retvalue = s->ahb2smenr;
  903. break;
  904. case A_AHB3SMENR:
  905. retvalue = s->ahb3smenr;
  906. break;
  907. case A_APB1SMENR1:
  908. retvalue = s->apb1smenr1;
  909. break;
  910. case A_APB1SMENR2:
  911. retvalue = s->apb1smenr2;
  912. break;
  913. case A_APB2SMENR:
  914. retvalue = s->apb2smenr;
  915. break;
  916. case A_CCIPR:
  917. retvalue = s->ccipr;
  918. break;
  919. case A_BDCR:
  920. retvalue = s->bdcr;
  921. break;
  922. case A_CSR:
  923. retvalue = s->csr;
  924. break;
  925. default:
  926. qemu_log_mask(LOG_GUEST_ERROR,
  927. "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr);
  928. break;
  929. }
  930. trace_stm32l4x5_rcc_read(addr, retvalue);
  931. return retvalue;
  932. }
  933. static void stm32l4x5_rcc_write(void *opaque, hwaddr addr,
  934. uint64_t val64, unsigned int size)
  935. {
  936. Stm32l4x5RccState *s = opaque;
  937. uint32_t previous_value = 0;
  938. const uint32_t value = val64;
  939. trace_stm32l4x5_rcc_write(addr, value);
  940. switch (addr) {
  941. case A_CR:
  942. previous_value = s->cr;
  943. s->cr = (s->cr & CR_READ_SET_MASK) |
  944. (value & (CR_READ_SET_MASK | ~CR_READ_ONLY_MASK));
  945. rcc_update_cr_register(s, previous_value);
  946. break;
  947. case A_ICSCR:
  948. s->icscr = value & ~ICSCR_READ_ONLY_MASK;
  949. qemu_log_mask(LOG_UNIMP,
  950. "%s: Side-effects not implemented for ICSCR\n", __func__);
  951. break;
  952. case A_CFGR:
  953. s->cfgr = value & ~CFGR_READ_ONLY_MASK;
  954. rcc_update_cfgr_register(s);
  955. break;
  956. case A_PLLCFGR:
  957. s->pllcfgr = value;
  958. rcc_update_pllcfgr(s);
  959. break;
  960. case A_PLLSAI1CFGR:
  961. s->pllsai1cfgr = value;
  962. rcc_update_pllsaixcfgr(s, RCC_PLL_PLLSAI1);
  963. break;
  964. case A_PLLSAI2CFGR:
  965. s->pllsai2cfgr = value;
  966. rcc_update_pllsaixcfgr(s, RCC_PLL_PLLSAI2);
  967. break;
  968. case A_CIER:
  969. s->cier = value;
  970. qemu_log_mask(LOG_UNIMP,
  971. "%s: Side-effects not implemented for CIER\n", __func__);
  972. break;
  973. case A_CIFR:
  974. qemu_log_mask(LOG_GUEST_ERROR,
  975. "%s: Write attempt into read-only register (CIFR) 0x%"PRIx32"\n",
  976. __func__, value);
  977. break;
  978. case A_CICR:
  979. /* Clear interrupt flags by writing a 1 to the CICR register */
  980. s->cifr &= ~value;
  981. rcc_update_irq(s);
  982. break;
  983. /* Reset behaviors are not implemented */
  984. case A_AHB1RSTR:
  985. s->ahb1rstr = value;
  986. qemu_log_mask(LOG_UNIMP,
  987. "%s: Side-effects not implemented for AHB1RSTR\n", __func__);
  988. break;
  989. case A_AHB2RSTR:
  990. s->ahb2rstr = value;
  991. qemu_log_mask(LOG_UNIMP,
  992. "%s: Side-effects not implemented for AHB2RSTR\n", __func__);
  993. break;
  994. case A_AHB3RSTR:
  995. s->ahb3rstr = value;
  996. qemu_log_mask(LOG_UNIMP,
  997. "%s: Side-effects not implemented for AHB3RSTR\n", __func__);
  998. break;
  999. case A_APB1RSTR1:
  1000. s->apb1rstr1 = value;
  1001. qemu_log_mask(LOG_UNIMP,
  1002. "%s: Side-effects not implemented for APB1RSTR1\n", __func__);
  1003. break;
  1004. case A_APB1RSTR2:
  1005. s->apb1rstr2 = value;
  1006. qemu_log_mask(LOG_UNIMP,
  1007. "%s: Side-effects not implemented for APB1RSTR2\n", __func__);
  1008. break;
  1009. case A_APB2RSTR:
  1010. s->apb2rstr = value;
  1011. qemu_log_mask(LOG_UNIMP,
  1012. "%s: Side-effects not implemented for APB2RSTR\n", __func__);
  1013. break;
  1014. case A_AHB1ENR:
  1015. s->ahb1enr = value;
  1016. rcc_update_ahb1enr(s);
  1017. break;
  1018. case A_AHB2ENR:
  1019. s->ahb2enr = value;
  1020. rcc_update_ahb2enr(s);
  1021. break;
  1022. case A_AHB3ENR:
  1023. s->ahb3enr = value;
  1024. rcc_update_ahb3enr(s);
  1025. break;
  1026. case A_APB1ENR1:
  1027. s->apb1enr1 = value;
  1028. rcc_update_apb1enr(s);
  1029. break;
  1030. case A_APB1ENR2:
  1031. s->apb1enr2 = value;
  1032. rcc_update_apb1enr(s);
  1033. break;
  1034. case A_APB2ENR:
  1035. s->apb2enr = (s->apb2enr & APB2ENR_READ_SET_MASK) | value;
  1036. rcc_update_apb2enr(s);
  1037. break;
  1038. /* Behaviors for Sleep and Stop modes are not implemented */
  1039. case A_AHB1SMENR:
  1040. s->ahb1smenr = value;
  1041. qemu_log_mask(LOG_UNIMP,
  1042. "%s: Side-effects not implemented for AHB1SMENR\n", __func__);
  1043. break;
  1044. case A_AHB2SMENR:
  1045. s->ahb2smenr = value;
  1046. qemu_log_mask(LOG_UNIMP,
  1047. "%s: Side-effects not implemented for AHB2SMENR\n", __func__);
  1048. break;
  1049. case A_AHB3SMENR:
  1050. s->ahb3smenr = value;
  1051. qemu_log_mask(LOG_UNIMP,
  1052. "%s: Side-effects not implemented for AHB3SMENR\n", __func__);
  1053. break;
  1054. case A_APB1SMENR1:
  1055. s->apb1smenr1 = value;
  1056. qemu_log_mask(LOG_UNIMP,
  1057. "%s: Side-effects not implemented for APB1SMENR1\n", __func__);
  1058. break;
  1059. case A_APB1SMENR2:
  1060. s->apb1smenr2 = value;
  1061. qemu_log_mask(LOG_UNIMP,
  1062. "%s: Side-effects not implemented for APB1SMENR2\n", __func__);
  1063. break;
  1064. case A_APB2SMENR:
  1065. s->apb2smenr = value;
  1066. qemu_log_mask(LOG_UNIMP,
  1067. "%s: Side-effects not implemented for APB2SMENR\n", __func__);
  1068. break;
  1069. case A_CCIPR:
  1070. s->ccipr = value;
  1071. rcc_update_ccipr(s);
  1072. break;
  1073. case A_BDCR:
  1074. s->bdcr = value & ~BDCR_READ_ONLY_MASK;
  1075. rcc_update_bdcr(s);
  1076. break;
  1077. case A_CSR:
  1078. s->csr = value & ~CSR_READ_ONLY_MASK;
  1079. rcc_update_csr(s);
  1080. break;
  1081. default:
  1082. qemu_log_mask(LOG_GUEST_ERROR,
  1083. "%s: Bad offset 0x%"HWADDR_PRIx"\n", __func__, addr);
  1084. }
  1085. }
  1086. static const MemoryRegionOps stm32l4x5_rcc_ops = {
  1087. .read = stm32l4x5_rcc_read,
  1088. .write = stm32l4x5_rcc_write,
  1089. .endianness = DEVICE_NATIVE_ENDIAN,
  1090. .valid = {
  1091. .max_access_size = 4,
  1092. .min_access_size = 4,
  1093. .unaligned = false
  1094. },
  1095. .impl = {
  1096. .max_access_size = 4,
  1097. .min_access_size = 4,
  1098. .unaligned = false
  1099. },
  1100. };
  1101. static const ClockPortInitArray stm32l4x5_rcc_clocks = {
  1102. QDEV_CLOCK_IN(Stm32l4x5RccState, hsi16_rc, NULL, 0),
  1103. QDEV_CLOCK_IN(Stm32l4x5RccState, msi_rc, NULL, 0),
  1104. QDEV_CLOCK_IN(Stm32l4x5RccState, hse, NULL, 0),
  1105. QDEV_CLOCK_IN(Stm32l4x5RccState, lsi_rc, NULL, 0),
  1106. QDEV_CLOCK_IN(Stm32l4x5RccState, lse_crystal, NULL, 0),
  1107. QDEV_CLOCK_IN(Stm32l4x5RccState, sai1_extclk, NULL, 0),
  1108. QDEV_CLOCK_IN(Stm32l4x5RccState, sai2_extclk, NULL, 0),
  1109. QDEV_CLOCK_END
  1110. };
  1111. static void stm32l4x5_rcc_init(Object *obj)
  1112. {
  1113. Stm32l4x5RccState *s = STM32L4X5_RCC(obj);
  1114. size_t i;
  1115. sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq);
  1116. memory_region_init_io(&s->mmio, obj, &stm32l4x5_rcc_ops, s,
  1117. TYPE_STM32L4X5_RCC, 0x400);
  1118. sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
  1119. qdev_init_clocks(DEVICE(s), stm32l4x5_rcc_clocks);
  1120. for (i = 0; i < RCC_NUM_PLL; i++) {
  1121. object_initialize_child(obj, PLL_INIT_INFO[i].name,
  1122. &s->plls[i], TYPE_RCC_PLL);
  1123. set_pll_init_info(&s->plls[i], i);
  1124. }
  1125. for (i = 0; i < RCC_NUM_CLOCK_MUX; i++) {
  1126. char *alias;
  1127. object_initialize_child(obj, CLOCK_MUX_INIT_INFO[i].name,
  1128. &s->clock_muxes[i],
  1129. TYPE_RCC_CLOCK_MUX);
  1130. set_clock_mux_init_info(&s->clock_muxes[i], i);
  1131. if (!CLOCK_MUX_INIT_INFO[i].hidden) {
  1132. /* Expose muxes output as RCC outputs */
  1133. alias = g_strdup_printf("%s-out", CLOCK_MUX_INIT_INFO[i].name);
  1134. qdev_alias_clock(DEVICE(&s->clock_muxes[i]), "out", DEVICE(obj), alias);
  1135. g_free(alias);
  1136. }
  1137. }
  1138. s->gnd = clock_new(obj, "gnd");
  1139. }
  1140. static void connect_mux_sources(Stm32l4x5RccState *s,
  1141. RccClockMuxState *mux,
  1142. const RccClockMuxSource *clk_mapping)
  1143. {
  1144. size_t i;
  1145. Clock * const CLK_SRC_MAPPING[] = {
  1146. [RCC_CLOCK_MUX_SRC_GND] = s->gnd,
  1147. [RCC_CLOCK_MUX_SRC_HSI] = s->hsi16_rc,
  1148. [RCC_CLOCK_MUX_SRC_HSE] = s->hse,
  1149. [RCC_CLOCK_MUX_SRC_MSI] = s->msi_rc,
  1150. [RCC_CLOCK_MUX_SRC_LSI] = s->lsi_rc,
  1151. [RCC_CLOCK_MUX_SRC_LSE] = s->lse_crystal,
  1152. [RCC_CLOCK_MUX_SRC_SAI1_EXTCLK] = s->sai1_extclk,
  1153. [RCC_CLOCK_MUX_SRC_SAI2_EXTCLK] = s->sai2_extclk,
  1154. [RCC_CLOCK_MUX_SRC_PLL] =
  1155. s->plls[RCC_PLL_PLL].channels[RCC_PLL_CHANNEL_PLLCLK],
  1156. [RCC_CLOCK_MUX_SRC_PLLSAI1] =
  1157. s->plls[RCC_PLL_PLLSAI1].channels[RCC_PLLSAI1_CHANNEL_PLLSAI1CLK],
  1158. [RCC_CLOCK_MUX_SRC_PLLSAI2] =
  1159. s->plls[RCC_PLL_PLLSAI2].channels[RCC_PLLSAI2_CHANNEL_PLLSAI2CLK],
  1160. [RCC_CLOCK_MUX_SRC_PLLSAI3] =
  1161. s->plls[RCC_PLL_PLL].channels[RCC_PLL_CHANNEL_PLLSAI3CLK],
  1162. [RCC_CLOCK_MUX_SRC_PLL48M1] =
  1163. s->plls[RCC_PLL_PLL].channels[RCC_PLL_CHANNEL_PLL48M1CLK],
  1164. [RCC_CLOCK_MUX_SRC_PLL48M2] =
  1165. s->plls[RCC_PLL_PLLSAI1].channels[RCC_PLLSAI1_CHANNEL_PLL48M2CLK],
  1166. [RCC_CLOCK_MUX_SRC_PLLADC1] =
  1167. s->plls[RCC_PLL_PLLSAI1].channels[RCC_PLLSAI1_CHANNEL_PLLADC1CLK],
  1168. [RCC_CLOCK_MUX_SRC_PLLADC2] =
  1169. s->plls[RCC_PLL_PLLSAI2] .channels[RCC_PLLSAI2_CHANNEL_PLLADC2CLK],
  1170. [RCC_CLOCK_MUX_SRC_SYSCLK] = s->clock_muxes[RCC_CLOCK_MUX_SYSCLK].out,
  1171. [RCC_CLOCK_MUX_SRC_HCLK] = s->clock_muxes[RCC_CLOCK_MUX_HCLK].out,
  1172. [RCC_CLOCK_MUX_SRC_PCLK1] = s->clock_muxes[RCC_CLOCK_MUX_PCLK1].out,
  1173. [RCC_CLOCK_MUX_SRC_PCLK2] = s->clock_muxes[RCC_CLOCK_MUX_PCLK2].out,
  1174. [RCC_CLOCK_MUX_SRC_HSE_OVER_32] = s->clock_muxes[RCC_CLOCK_MUX_HSE_OVER_32].out,
  1175. [RCC_CLOCK_MUX_SRC_LCD_AND_RTC_COMMON] =
  1176. s->clock_muxes[RCC_CLOCK_MUX_LCD_AND_RTC_COMMON].out,
  1177. };
  1178. assert(ARRAY_SIZE(CLK_SRC_MAPPING) == RCC_CLOCK_MUX_SRC_NUMBER);
  1179. for (i = 0; i < RCC_NUM_CLOCK_MUX_SRC; i++) {
  1180. RccClockMuxSource mapping = clk_mapping[i];
  1181. clock_set_source(mux->srcs[i], CLK_SRC_MAPPING[mapping]);
  1182. }
  1183. }
  1184. static const VMStateDescription vmstate_stm32l4x5_rcc = {
  1185. .name = TYPE_STM32L4X5_RCC,
  1186. .version_id = 1,
  1187. .minimum_version_id = 1,
  1188. .fields = (VMStateField[]) {
  1189. VMSTATE_UINT32(cr, Stm32l4x5RccState),
  1190. VMSTATE_UINT32(icscr, Stm32l4x5RccState),
  1191. VMSTATE_UINT32(cfgr, Stm32l4x5RccState),
  1192. VMSTATE_UINT32(pllcfgr, Stm32l4x5RccState),
  1193. VMSTATE_UINT32(pllsai1cfgr, Stm32l4x5RccState),
  1194. VMSTATE_UINT32(pllsai2cfgr, Stm32l4x5RccState),
  1195. VMSTATE_UINT32(cier, Stm32l4x5RccState),
  1196. VMSTATE_UINT32(cifr, Stm32l4x5RccState),
  1197. VMSTATE_UINT32(ahb1rstr, Stm32l4x5RccState),
  1198. VMSTATE_UINT32(ahb2rstr, Stm32l4x5RccState),
  1199. VMSTATE_UINT32(ahb3rstr, Stm32l4x5RccState),
  1200. VMSTATE_UINT32(apb1rstr1, Stm32l4x5RccState),
  1201. VMSTATE_UINT32(apb1rstr2, Stm32l4x5RccState),
  1202. VMSTATE_UINT32(apb2rstr, Stm32l4x5RccState),
  1203. VMSTATE_UINT32(ahb1enr, Stm32l4x5RccState),
  1204. VMSTATE_UINT32(ahb2enr, Stm32l4x5RccState),
  1205. VMSTATE_UINT32(ahb3enr, Stm32l4x5RccState),
  1206. VMSTATE_UINT32(apb1enr1, Stm32l4x5RccState),
  1207. VMSTATE_UINT32(apb1enr2, Stm32l4x5RccState),
  1208. VMSTATE_UINT32(apb2enr, Stm32l4x5RccState),
  1209. VMSTATE_UINT32(ahb1smenr, Stm32l4x5RccState),
  1210. VMSTATE_UINT32(ahb2smenr, Stm32l4x5RccState),
  1211. VMSTATE_UINT32(ahb3smenr, Stm32l4x5RccState),
  1212. VMSTATE_UINT32(apb1smenr1, Stm32l4x5RccState),
  1213. VMSTATE_UINT32(apb1smenr2, Stm32l4x5RccState),
  1214. VMSTATE_UINT32(apb2smenr, Stm32l4x5RccState),
  1215. VMSTATE_UINT32(ccipr, Stm32l4x5RccState),
  1216. VMSTATE_UINT32(bdcr, Stm32l4x5RccState),
  1217. VMSTATE_UINT32(csr, Stm32l4x5RccState),
  1218. VMSTATE_CLOCK(hsi16_rc, Stm32l4x5RccState),
  1219. VMSTATE_CLOCK(msi_rc, Stm32l4x5RccState),
  1220. VMSTATE_CLOCK(hse, Stm32l4x5RccState),
  1221. VMSTATE_CLOCK(lsi_rc, Stm32l4x5RccState),
  1222. VMSTATE_CLOCK(lse_crystal, Stm32l4x5RccState),
  1223. VMSTATE_CLOCK(sai1_extclk, Stm32l4x5RccState),
  1224. VMSTATE_CLOCK(sai2_extclk, Stm32l4x5RccState),
  1225. VMSTATE_END_OF_LIST()
  1226. }
  1227. };
  1228. static void stm32l4x5_rcc_realize(DeviceState *dev, Error **errp)
  1229. {
  1230. Stm32l4x5RccState *s = STM32L4X5_RCC(dev);
  1231. size_t i;
  1232. if (s->hse_frequency < 4000000ULL ||
  1233. s->hse_frequency > 48000000ULL) {
  1234. error_setg(errp,
  1235. "HSE frequency is outside of the allowed [4-48]Mhz range: %" PRIx64 "",
  1236. s->hse_frequency);
  1237. return;
  1238. }
  1239. for (i = 0; i < RCC_NUM_PLL; i++) {
  1240. RccPllState *pll = &s->plls[i];
  1241. clock_set_source(pll->in, s->clock_muxes[RCC_CLOCK_MUX_PLL_INPUT].out);
  1242. if (!qdev_realize(DEVICE(pll), NULL, errp)) {
  1243. return;
  1244. }
  1245. }
  1246. for (i = 0; i < RCC_NUM_CLOCK_MUX; i++) {
  1247. RccClockMuxState *clock_mux = &s->clock_muxes[i];
  1248. connect_mux_sources(s, clock_mux, CLOCK_MUX_INIT_INFO[i].src_mapping);
  1249. if (!qdev_realize(DEVICE(clock_mux), NULL, errp)) {
  1250. return;
  1251. }
  1252. }
  1253. /*
  1254. * Start clocks after everything is connected
  1255. * to propagate the frequencies along the tree.
  1256. */
  1257. clock_update_hz(s->msi_rc, MSI_DEFAULT_FRQ);
  1258. clock_update_hz(s->sai1_extclk, s->sai1_extclk_frequency);
  1259. clock_update_hz(s->sai2_extclk, s->sai2_extclk_frequency);
  1260. clock_update(s->gnd, 0);
  1261. }
  1262. static Property stm32l4x5_rcc_properties[] = {
  1263. DEFINE_PROP_UINT64("hse_frequency", Stm32l4x5RccState,
  1264. hse_frequency, HSE_DEFAULT_FRQ),
  1265. DEFINE_PROP_UINT64("sai1_extclk_frequency", Stm32l4x5RccState,
  1266. sai1_extclk_frequency, 0),
  1267. DEFINE_PROP_UINT64("sai2_extclk_frequency", Stm32l4x5RccState,
  1268. sai2_extclk_frequency, 0),
  1269. DEFINE_PROP_END_OF_LIST(),
  1270. };
  1271. static void stm32l4x5_rcc_class_init(ObjectClass *klass, void *data)
  1272. {
  1273. DeviceClass *dc = DEVICE_CLASS(klass);
  1274. ResettableClass *rc = RESETTABLE_CLASS(klass);
  1275. assert(ARRAY_SIZE(CLOCK_MUX_INIT_INFO) == RCC_NUM_CLOCK_MUX);
  1276. rc->phases.hold = stm32l4x5_rcc_reset_hold;
  1277. device_class_set_props(dc, stm32l4x5_rcc_properties);
  1278. dc->realize = stm32l4x5_rcc_realize;
  1279. dc->vmsd = &vmstate_stm32l4x5_rcc;
  1280. }
  1281. static const TypeInfo stm32l4x5_rcc_types[] = {
  1282. {
  1283. .name = TYPE_STM32L4X5_RCC,
  1284. .parent = TYPE_SYS_BUS_DEVICE,
  1285. .instance_size = sizeof(Stm32l4x5RccState),
  1286. .instance_init = stm32l4x5_rcc_init,
  1287. .class_init = stm32l4x5_rcc_class_init,
  1288. }, {
  1289. .name = TYPE_RCC_CLOCK_MUX,
  1290. .parent = TYPE_DEVICE,
  1291. .instance_size = sizeof(RccClockMuxState),
  1292. .instance_init = clock_mux_init,
  1293. .class_init = clock_mux_class_init,
  1294. }, {
  1295. .name = TYPE_RCC_PLL,
  1296. .parent = TYPE_DEVICE,
  1297. .instance_size = sizeof(RccPllState),
  1298. .instance_init = pll_init,
  1299. .class_init = pll_class_init,
  1300. }
  1301. };
  1302. DEFINE_TYPES(stm32l4x5_rcc_types)