2
0

cputimer.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. /*
  2. * QEMU OpenRISC timer support
  3. *
  4. * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
  5. * Zhizhou Zhang <etouzh@gmail.com>
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "qemu/osdep.h"
  21. #include "cpu.h"
  22. #include "migration/vmstate.h"
  23. #include "qemu/timer.h"
  24. #include "system/reset.h"
  25. #define TIMER_PERIOD 50 /* 50 ns period for 20 MHz timer */
  26. /* Tick Timer global state to allow all cores to be in sync */
  27. typedef struct OR1KTimerState {
  28. uint32_t ttcr;
  29. uint32_t ttcr_offset;
  30. uint64_t clk_offset;
  31. } OR1KTimerState;
  32. static OR1KTimerState *or1k_timer;
  33. void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val)
  34. {
  35. or1k_timer->ttcr = val;
  36. or1k_timer->ttcr_offset = val;
  37. or1k_timer->clk_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  38. }
  39. uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu)
  40. {
  41. return or1k_timer->ttcr;
  42. }
  43. /* Add elapsed ticks to ttcr */
  44. void cpu_openrisc_count_update(OpenRISCCPU *cpu)
  45. {
  46. uint64_t now;
  47. if (!cpu->env.is_counting) {
  48. return;
  49. }
  50. now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  51. or1k_timer->ttcr = or1k_timer->ttcr_offset +
  52. DIV_ROUND_UP(now - or1k_timer->clk_offset, TIMER_PERIOD);
  53. }
  54. /* Update the next timeout time as difference between ttmr and ttcr */
  55. void cpu_openrisc_timer_update(OpenRISCCPU *cpu)
  56. {
  57. uint32_t wait;
  58. uint64_t now, next;
  59. if (!cpu->env.is_counting) {
  60. return;
  61. }
  62. cpu_openrisc_count_update(cpu);
  63. now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  64. if ((cpu->env.ttmr & TTMR_TP) <= (or1k_timer->ttcr & TTMR_TP)) {
  65. wait = TTMR_TP - (or1k_timer->ttcr & TTMR_TP) + 1;
  66. wait += cpu->env.ttmr & TTMR_TP;
  67. } else {
  68. wait = (cpu->env.ttmr & TTMR_TP) - (or1k_timer->ttcr & TTMR_TP);
  69. }
  70. next = now + (uint64_t)wait * TIMER_PERIOD;
  71. timer_mod(cpu->env.timer, next);
  72. }
  73. void cpu_openrisc_count_start(OpenRISCCPU *cpu)
  74. {
  75. cpu->env.is_counting = 1;
  76. cpu_openrisc_count_update(cpu);
  77. }
  78. void cpu_openrisc_count_stop(OpenRISCCPU *cpu)
  79. {
  80. timer_del(cpu->env.timer);
  81. cpu_openrisc_count_update(cpu);
  82. cpu->env.is_counting = 0;
  83. }
  84. static void openrisc_timer_cb(void *opaque)
  85. {
  86. OpenRISCCPU *cpu = opaque;
  87. if ((cpu->env.ttmr & TTMR_IE) &&
  88. timer_expired(cpu->env.timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL))) {
  89. CPUState *cs = CPU(cpu);
  90. cpu->env.ttmr |= TTMR_IP;
  91. cs->interrupt_request |= CPU_INTERRUPT_TIMER;
  92. }
  93. switch (cpu->env.ttmr & TTMR_M) {
  94. case TIMER_NONE:
  95. break;
  96. case TIMER_INTR:
  97. /* Zero the count by applying a negative offset to the counter */
  98. or1k_timer->ttcr_offset -= (cpu->env.ttmr & TTMR_TP);
  99. break;
  100. case TIMER_SHOT:
  101. cpu_openrisc_count_stop(cpu);
  102. break;
  103. case TIMER_CONT:
  104. break;
  105. }
  106. cpu_openrisc_timer_update(cpu);
  107. qemu_cpu_kick(CPU(cpu));
  108. }
  109. /* Reset the per CPU counter state. */
  110. static void openrisc_count_reset(void *opaque)
  111. {
  112. OpenRISCCPU *cpu = opaque;
  113. if (cpu->env.is_counting) {
  114. cpu_openrisc_count_stop(cpu);
  115. }
  116. cpu->env.ttmr = 0x00000000;
  117. }
  118. /* Reset the global timer state. */
  119. static void openrisc_timer_reset(void *opaque)
  120. {
  121. OpenRISCCPU *cpu = opaque;
  122. cpu_openrisc_count_set(cpu, 0);
  123. }
  124. static const VMStateDescription vmstate_or1k_timer = {
  125. .name = "or1k_timer",
  126. .version_id = 2,
  127. .minimum_version_id = 2,
  128. .fields = (const VMStateField[]) {
  129. VMSTATE_UINT32(ttcr, OR1KTimerState),
  130. VMSTATE_UINT32(ttcr_offset, OR1KTimerState),
  131. VMSTATE_UINT64(clk_offset, OR1KTimerState),
  132. VMSTATE_END_OF_LIST()
  133. }
  134. };
  135. void cpu_openrisc_clock_init(OpenRISCCPU *cpu)
  136. {
  137. cpu->env.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &openrisc_timer_cb, cpu);
  138. qemu_register_reset(openrisc_count_reset, cpu);
  139. if (or1k_timer == NULL) {
  140. or1k_timer = g_new0(OR1KTimerState, 1);
  141. qemu_register_reset(openrisc_timer_reset, cpu);
  142. vmstate_register(NULL, 0, &vmstate_or1k_timer, or1k_timer);
  143. }
  144. }