2
0

cpus-common.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. /*
  2. * CPU thread main loop - common bits for user and system mode emulation
  3. *
  4. * Copyright (c) 2003-2005 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "qemu/main-loop.h"
  21. #include "exec/cpu-common.h"
  22. #include "hw/core/cpu.h"
  23. #include "sysemu/cpus.h"
  24. #include "qemu/lockable.h"
  25. static QemuMutex qemu_cpu_list_lock;
  26. static QemuCond exclusive_cond;
  27. static QemuCond exclusive_resume;
  28. static QemuCond qemu_work_cond;
  29. /* >= 1 if a thread is inside start_exclusive/end_exclusive. Written
  30. * under qemu_cpu_list_lock, read with atomic operations.
  31. */
  32. static int pending_cpus;
  33. void qemu_init_cpu_list(void)
  34. {
  35. /* This is needed because qemu_init_cpu_list is also called by the
  36. * child process in a fork. */
  37. pending_cpus = 0;
  38. qemu_mutex_init(&qemu_cpu_list_lock);
  39. qemu_cond_init(&exclusive_cond);
  40. qemu_cond_init(&exclusive_resume);
  41. qemu_cond_init(&qemu_work_cond);
  42. }
  43. void cpu_list_lock(void)
  44. {
  45. qemu_mutex_lock(&qemu_cpu_list_lock);
  46. }
  47. void cpu_list_unlock(void)
  48. {
  49. qemu_mutex_unlock(&qemu_cpu_list_lock);
  50. }
  51. static bool cpu_index_auto_assigned;
  52. static int cpu_get_free_index(void)
  53. {
  54. CPUState *some_cpu;
  55. int max_cpu_index = 0;
  56. cpu_index_auto_assigned = true;
  57. CPU_FOREACH(some_cpu) {
  58. if (some_cpu->cpu_index >= max_cpu_index) {
  59. max_cpu_index = some_cpu->cpu_index + 1;
  60. }
  61. }
  62. return max_cpu_index;
  63. }
  64. CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
  65. void cpu_list_add(CPUState *cpu)
  66. {
  67. QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
  68. if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
  69. cpu->cpu_index = cpu_get_free_index();
  70. assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
  71. } else {
  72. assert(!cpu_index_auto_assigned);
  73. }
  74. QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
  75. }
  76. void cpu_list_remove(CPUState *cpu)
  77. {
  78. QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
  79. if (!QTAILQ_IN_USE(cpu, node)) {
  80. /* there is nothing to undo since cpu_exec_init() hasn't been called */
  81. return;
  82. }
  83. QTAILQ_REMOVE_RCU(&cpus, cpu, node);
  84. cpu->cpu_index = UNASSIGNED_CPU_INDEX;
  85. }
  86. CPUState *qemu_get_cpu(int index)
  87. {
  88. CPUState *cpu;
  89. CPU_FOREACH(cpu) {
  90. if (cpu->cpu_index == index) {
  91. return cpu;
  92. }
  93. }
  94. return NULL;
  95. }
  96. /* current CPU in the current thread. It is only valid inside cpu_exec() */
  97. __thread CPUState *current_cpu;
  98. struct qemu_work_item {
  99. QSIMPLEQ_ENTRY(qemu_work_item) node;
  100. run_on_cpu_func func;
  101. run_on_cpu_data data;
  102. bool free, exclusive, done;
  103. };
  104. static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
  105. {
  106. qemu_mutex_lock(&cpu->work_mutex);
  107. QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
  108. wi->done = false;
  109. qemu_mutex_unlock(&cpu->work_mutex);
  110. qemu_cpu_kick(cpu);
  111. }
  112. void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
  113. QemuMutex *mutex)
  114. {
  115. struct qemu_work_item wi;
  116. if (qemu_cpu_is_self(cpu)) {
  117. func(cpu, data);
  118. return;
  119. }
  120. wi.func = func;
  121. wi.data = data;
  122. wi.done = false;
  123. wi.free = false;
  124. wi.exclusive = false;
  125. queue_work_on_cpu(cpu, &wi);
  126. while (!atomic_mb_read(&wi.done)) {
  127. CPUState *self_cpu = current_cpu;
  128. qemu_cond_wait(&qemu_work_cond, mutex);
  129. current_cpu = self_cpu;
  130. }
  131. }
  132. void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
  133. {
  134. struct qemu_work_item *wi;
  135. wi = g_malloc0(sizeof(struct qemu_work_item));
  136. wi->func = func;
  137. wi->data = data;
  138. wi->free = true;
  139. queue_work_on_cpu(cpu, wi);
  140. }
  141. /* Wait for pending exclusive operations to complete. The CPU list lock
  142. must be held. */
  143. static inline void exclusive_idle(void)
  144. {
  145. while (pending_cpus) {
  146. qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
  147. }
  148. }
  149. /* Start an exclusive operation.
  150. Must only be called from outside cpu_exec. */
  151. void start_exclusive(void)
  152. {
  153. CPUState *other_cpu;
  154. int running_cpus;
  155. qemu_mutex_lock(&qemu_cpu_list_lock);
  156. exclusive_idle();
  157. /* Make all other cpus stop executing. */
  158. atomic_set(&pending_cpus, 1);
  159. /* Write pending_cpus before reading other_cpu->running. */
  160. smp_mb();
  161. running_cpus = 0;
  162. CPU_FOREACH(other_cpu) {
  163. if (atomic_read(&other_cpu->running)) {
  164. other_cpu->has_waiter = true;
  165. running_cpus++;
  166. qemu_cpu_kick(other_cpu);
  167. }
  168. }
  169. atomic_set(&pending_cpus, running_cpus + 1);
  170. while (pending_cpus > 1) {
  171. qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
  172. }
  173. /* Can release mutex, no one will enter another exclusive
  174. * section until end_exclusive resets pending_cpus to 0.
  175. */
  176. qemu_mutex_unlock(&qemu_cpu_list_lock);
  177. current_cpu->in_exclusive_context = true;
  178. }
  179. /* Finish an exclusive operation. */
  180. void end_exclusive(void)
  181. {
  182. current_cpu->in_exclusive_context = false;
  183. qemu_mutex_lock(&qemu_cpu_list_lock);
  184. atomic_set(&pending_cpus, 0);
  185. qemu_cond_broadcast(&exclusive_resume);
  186. qemu_mutex_unlock(&qemu_cpu_list_lock);
  187. }
  188. /* Wait for exclusive ops to finish, and begin cpu execution. */
  189. void cpu_exec_start(CPUState *cpu)
  190. {
  191. atomic_set(&cpu->running, true);
  192. /* Write cpu->running before reading pending_cpus. */
  193. smp_mb();
  194. /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
  195. * After taking the lock we'll see cpu->has_waiter == true and run---not
  196. * for long because start_exclusive kicked us. cpu_exec_end will
  197. * decrement pending_cpus and signal the waiter.
  198. *
  199. * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
  200. * This includes the case when an exclusive item is running now.
  201. * Then we'll see cpu->has_waiter == false and wait for the item to
  202. * complete.
  203. *
  204. * 3. pending_cpus == 0. Then start_exclusive is definitely going to
  205. * see cpu->running == true, and it will kick the CPU.
  206. */
  207. if (unlikely(atomic_read(&pending_cpus))) {
  208. QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
  209. if (!cpu->has_waiter) {
  210. /* Not counted in pending_cpus, let the exclusive item
  211. * run. Since we have the lock, just set cpu->running to true
  212. * while holding it; no need to check pending_cpus again.
  213. */
  214. atomic_set(&cpu->running, false);
  215. exclusive_idle();
  216. /* Now pending_cpus is zero. */
  217. atomic_set(&cpu->running, true);
  218. } else {
  219. /* Counted in pending_cpus, go ahead and release the
  220. * waiter at cpu_exec_end.
  221. */
  222. }
  223. }
  224. }
  225. /* Mark cpu as not executing, and release pending exclusive ops. */
  226. void cpu_exec_end(CPUState *cpu)
  227. {
  228. atomic_set(&cpu->running, false);
  229. /* Write cpu->running before reading pending_cpus. */
  230. smp_mb();
  231. /* 1. start_exclusive saw cpu->running == true. Then it will increment
  232. * pending_cpus and wait for exclusive_cond. After taking the lock
  233. * we'll see cpu->has_waiter == true.
  234. *
  235. * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
  236. * This includes the case when an exclusive item started after setting
  237. * cpu->running to false and before we read pending_cpus. Then we'll see
  238. * cpu->has_waiter == false and not touch pending_cpus. The next call to
  239. * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
  240. * for the item to complete.
  241. *
  242. * 3. pending_cpus == 0. Then start_exclusive is definitely going to
  243. * see cpu->running == false, and it can ignore this CPU until the
  244. * next cpu_exec_start.
  245. */
  246. if (unlikely(atomic_read(&pending_cpus))) {
  247. QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
  248. if (cpu->has_waiter) {
  249. cpu->has_waiter = false;
  250. atomic_set(&pending_cpus, pending_cpus - 1);
  251. if (pending_cpus == 1) {
  252. qemu_cond_signal(&exclusive_cond);
  253. }
  254. }
  255. }
  256. }
  257. void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
  258. run_on_cpu_data data)
  259. {
  260. struct qemu_work_item *wi;
  261. wi = g_malloc0(sizeof(struct qemu_work_item));
  262. wi->func = func;
  263. wi->data = data;
  264. wi->free = true;
  265. wi->exclusive = true;
  266. queue_work_on_cpu(cpu, wi);
  267. }
  268. void process_queued_cpu_work(CPUState *cpu)
  269. {
  270. struct qemu_work_item *wi;
  271. qemu_mutex_lock(&cpu->work_mutex);
  272. if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
  273. qemu_mutex_unlock(&cpu->work_mutex);
  274. return;
  275. }
  276. while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
  277. wi = QSIMPLEQ_FIRST(&cpu->work_list);
  278. QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
  279. qemu_mutex_unlock(&cpu->work_mutex);
  280. if (wi->exclusive) {
  281. /* Running work items outside the BQL avoids the following deadlock:
  282. * 1) start_exclusive() is called with the BQL taken while another
  283. * CPU is running; 2) cpu_exec in the other CPU tries to takes the
  284. * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
  285. * neither CPU can proceed.
  286. */
  287. qemu_mutex_unlock_iothread();
  288. start_exclusive();
  289. wi->func(cpu, wi->data);
  290. end_exclusive();
  291. qemu_mutex_lock_iothread();
  292. } else {
  293. wi->func(cpu, wi->data);
  294. }
  295. qemu_mutex_lock(&cpu->work_mutex);
  296. if (wi->free) {
  297. g_free(wi);
  298. } else {
  299. atomic_mb_set(&wi->done, true);
  300. }
  301. }
  302. qemu_mutex_unlock(&cpu->work_mutex);
  303. qemu_cond_broadcast(&qemu_work_cond);
  304. }