2
0

cputimer.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /*
  2. * QEMU OpenRISC timer support
  3. *
  4. * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
  5. * Zhizhou Zhang <etouzh@gmail.com>
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "qemu/osdep.h"
  21. #include "cpu.h"
  22. #include "migration/vmstate.h"
  23. #include "qemu/timer.h"
  24. #include "sysemu/reset.h"
  25. #define TIMER_PERIOD 50 /* 50 ns period for 20 MHz timer */
  26. /* Tick Timer global state to allow all cores to be in sync */
  27. typedef struct OR1KTimerState {
  28. uint32_t ttcr;
  29. uint64_t last_clk;
  30. } OR1KTimerState;
  31. static OR1KTimerState *or1k_timer;
  32. void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val)
  33. {
  34. or1k_timer->ttcr = val;
  35. }
  36. uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu)
  37. {
  38. return or1k_timer->ttcr;
  39. }
  40. /* Add elapsed ticks to ttcr */
  41. void cpu_openrisc_count_update(OpenRISCCPU *cpu)
  42. {
  43. uint64_t now;
  44. if (!cpu->env.is_counting) {
  45. return;
  46. }
  47. now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  48. or1k_timer->ttcr += (uint32_t)((now - or1k_timer->last_clk)
  49. / TIMER_PERIOD);
  50. or1k_timer->last_clk = now;
  51. }
  52. /* Update the next timeout time as difference between ttmr and ttcr */
  53. void cpu_openrisc_timer_update(OpenRISCCPU *cpu)
  54. {
  55. uint32_t wait;
  56. uint64_t now, next;
  57. if (!cpu->env.is_counting) {
  58. return;
  59. }
  60. cpu_openrisc_count_update(cpu);
  61. now = or1k_timer->last_clk;
  62. if ((cpu->env.ttmr & TTMR_TP) <= (or1k_timer->ttcr & TTMR_TP)) {
  63. wait = TTMR_TP - (or1k_timer->ttcr & TTMR_TP) + 1;
  64. wait += cpu->env.ttmr & TTMR_TP;
  65. } else {
  66. wait = (cpu->env.ttmr & TTMR_TP) - (or1k_timer->ttcr & TTMR_TP);
  67. }
  68. next = now + (uint64_t)wait * TIMER_PERIOD;
  69. timer_mod(cpu->env.timer, next);
  70. }
  71. void cpu_openrisc_count_start(OpenRISCCPU *cpu)
  72. {
  73. cpu->env.is_counting = 1;
  74. cpu_openrisc_count_update(cpu);
  75. }
  76. void cpu_openrisc_count_stop(OpenRISCCPU *cpu)
  77. {
  78. timer_del(cpu->env.timer);
  79. cpu_openrisc_count_update(cpu);
  80. cpu->env.is_counting = 0;
  81. }
  82. static void openrisc_timer_cb(void *opaque)
  83. {
  84. OpenRISCCPU *cpu = opaque;
  85. if ((cpu->env.ttmr & TTMR_IE) &&
  86. timer_expired(cpu->env.timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL))) {
  87. CPUState *cs = CPU(cpu);
  88. cpu->env.ttmr |= TTMR_IP;
  89. cs->interrupt_request |= CPU_INTERRUPT_TIMER;
  90. }
  91. switch (cpu->env.ttmr & TTMR_M) {
  92. case TIMER_NONE:
  93. break;
  94. case TIMER_INTR:
  95. or1k_timer->ttcr = 0;
  96. break;
  97. case TIMER_SHOT:
  98. cpu_openrisc_count_stop(cpu);
  99. break;
  100. case TIMER_CONT:
  101. break;
  102. }
  103. cpu_openrisc_timer_update(cpu);
  104. qemu_cpu_kick(CPU(cpu));
  105. }
  106. /* Reset the per CPU counter state. */
  107. static void openrisc_count_reset(void *opaque)
  108. {
  109. OpenRISCCPU *cpu = opaque;
  110. if (cpu->env.is_counting) {
  111. cpu_openrisc_count_stop(cpu);
  112. }
  113. cpu->env.ttmr = 0x00000000;
  114. }
  115. /* Reset the global timer state. */
  116. static void openrisc_timer_reset(void *opaque)
  117. {
  118. or1k_timer->ttcr = 0x00000000;
  119. or1k_timer->last_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  120. }
  121. static const VMStateDescription vmstate_or1k_timer = {
  122. .name = "or1k_timer",
  123. .version_id = 1,
  124. .minimum_version_id = 1,
  125. .fields = (VMStateField[]) {
  126. VMSTATE_UINT32(ttcr, OR1KTimerState),
  127. VMSTATE_UINT64(last_clk, OR1KTimerState),
  128. VMSTATE_END_OF_LIST()
  129. }
  130. };
  131. void cpu_openrisc_clock_init(OpenRISCCPU *cpu)
  132. {
  133. cpu->env.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &openrisc_timer_cb, cpu);
  134. qemu_register_reset(openrisc_count_reset, cpu);
  135. if (or1k_timer == NULL) {
  136. or1k_timer = g_new0(OR1KTimerState, 1);
  137. qemu_register_reset(openrisc_timer_reset, cpu);
  138. vmstate_register(NULL, 0, &vmstate_or1k_timer, or1k_timer);
  139. }
  140. }