2
0

qemu-thread-win32.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Win32 implementation for mutex/cond/thread functions
  3. *
  4. * Copyright Red Hat, Inc. 2010
  5. *
  6. * Author:
  7. * Paolo Bonzini <pbonzini@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu-common.h"
  14. #include "qemu-thread.h"
  15. #include <process.h>
  16. #include <assert.h>
  17. #include <limits.h>
  18. static void error_exit(int err, const char *msg)
  19. {
  20. char *pstr;
  21. FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
  22. NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
  23. fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
  24. LocalFree(pstr);
  25. exit(1);
  26. }
  27. void qemu_mutex_init(QemuMutex *mutex)
  28. {
  29. mutex->owner = 0;
  30. InitializeCriticalSection(&mutex->lock);
  31. }
  32. void qemu_mutex_destroy(QemuMutex *mutex)
  33. {
  34. assert(mutex->owner == 0);
  35. DeleteCriticalSection(&mutex->lock);
  36. }
  37. void qemu_mutex_lock(QemuMutex *mutex)
  38. {
  39. EnterCriticalSection(&mutex->lock);
  40. /* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not
  41. * using them as such.
  42. */
  43. assert(mutex->owner == 0);
  44. mutex->owner = GetCurrentThreadId();
  45. }
  46. int qemu_mutex_trylock(QemuMutex *mutex)
  47. {
  48. int owned;
  49. owned = TryEnterCriticalSection(&mutex->lock);
  50. if (owned) {
  51. assert(mutex->owner == 0);
  52. mutex->owner = GetCurrentThreadId();
  53. }
  54. return !owned;
  55. }
  56. void qemu_mutex_unlock(QemuMutex *mutex)
  57. {
  58. assert(mutex->owner == GetCurrentThreadId());
  59. mutex->owner = 0;
  60. LeaveCriticalSection(&mutex->lock);
  61. }
  62. void qemu_cond_init(QemuCond *cond)
  63. {
  64. memset(cond, 0, sizeof(*cond));
  65. cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
  66. if (!cond->sema) {
  67. error_exit(GetLastError(), __func__);
  68. }
  69. cond->continue_event = CreateEvent(NULL, /* security */
  70. FALSE, /* auto-reset */
  71. FALSE, /* not signaled */
  72. NULL); /* name */
  73. if (!cond->continue_event) {
  74. error_exit(GetLastError(), __func__);
  75. }
  76. }
  77. void qemu_cond_destroy(QemuCond *cond)
  78. {
  79. BOOL result;
  80. result = CloseHandle(cond->continue_event);
  81. if (!result) {
  82. error_exit(GetLastError(), __func__);
  83. }
  84. cond->continue_event = 0;
  85. result = CloseHandle(cond->sema);
  86. if (!result) {
  87. error_exit(GetLastError(), __func__);
  88. }
  89. cond->sema = 0;
  90. }
  91. void qemu_cond_signal(QemuCond *cond)
  92. {
  93. DWORD result;
  94. /*
  95. * Signal only when there are waiters. cond->waiters is
  96. * incremented by pthread_cond_wait under the external lock,
  97. * so we are safe about that.
  98. */
  99. if (cond->waiters == 0) {
  100. return;
  101. }
  102. /*
  103. * Waiting threads decrement it outside the external lock, but
  104. * only if another thread is executing pthread_cond_broadcast and
  105. * has the mutex. So, it also cannot be decremented concurrently
  106. * with this particular access.
  107. */
  108. cond->target = cond->waiters - 1;
  109. result = SignalObjectAndWait(cond->sema, cond->continue_event,
  110. INFINITE, FALSE);
  111. if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
  112. error_exit(GetLastError(), __func__);
  113. }
  114. }
  115. void qemu_cond_broadcast(QemuCond *cond)
  116. {
  117. BOOLEAN result;
  118. /*
  119. * As in pthread_cond_signal, access to cond->waiters and
  120. * cond->target is locked via the external mutex.
  121. */
  122. if (cond->waiters == 0) {
  123. return;
  124. }
  125. cond->target = 0;
  126. result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
  127. if (!result) {
  128. error_exit(GetLastError(), __func__);
  129. }
  130. /*
  131. * At this point all waiters continue. Each one takes its
  132. * slice of the semaphore. Now it's our turn to wait: Since
  133. * the external mutex is held, no thread can leave cond_wait,
  134. * yet. For this reason, we can be sure that no thread gets
  135. * a chance to eat *more* than one slice. OTOH, it means
  136. * that the last waiter must send us a wake-up.
  137. */
  138. WaitForSingleObject(cond->continue_event, INFINITE);
  139. }
  140. void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
  141. {
  142. /*
  143. * This access is protected under the mutex.
  144. */
  145. cond->waiters++;
  146. /*
  147. * Unlock external mutex and wait for signal.
  148. * NOTE: we've held mutex locked long enough to increment
  149. * waiters count above, so there's no problem with
  150. * leaving mutex unlocked before we wait on semaphore.
  151. */
  152. qemu_mutex_unlock(mutex);
  153. WaitForSingleObject(cond->sema, INFINITE);
  154. /* Now waiters must rendez-vous with the signaling thread and
  155. * let it continue. For cond_broadcast this has heavy contention
  156. * and triggers thundering herd. So goes life.
  157. *
  158. * Decrease waiters count. The mutex is not taken, so we have
  159. * to do this atomically.
  160. *
  161. * All waiters contend for the mutex at the end of this function
  162. * until the signaling thread relinquishes it. To ensure
  163. * each waiter consumes exactly one slice of the semaphore,
  164. * the signaling thread stops until it is told by the last
  165. * waiter that it can go on.
  166. */
  167. if (InterlockedDecrement(&cond->waiters) == cond->target) {
  168. SetEvent(cond->continue_event);
  169. }
  170. qemu_mutex_lock(mutex);
  171. }
  172. struct QemuThreadData {
  173. QemuThread *thread;
  174. void *(*start_routine)(void *);
  175. void *arg;
  176. };
  177. static int qemu_thread_tls_index = TLS_OUT_OF_INDEXES;
  178. static unsigned __stdcall win32_start_routine(void *arg)
  179. {
  180. struct QemuThreadData data = *(struct QemuThreadData *) arg;
  181. QemuThread *thread = data.thread;
  182. free(arg);
  183. TlsSetValue(qemu_thread_tls_index, thread);
  184. /*
  185. * Use DuplicateHandle instead of assigning thread->thread in the
  186. * creating thread to avoid races. It's simpler this way than with
  187. * synchronization.
  188. */
  189. DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
  190. GetCurrentProcess(), &thread->thread,
  191. 0, FALSE, DUPLICATE_SAME_ACCESS);
  192. qemu_thread_exit(data.start_routine(data.arg));
  193. abort();
  194. }
  195. void qemu_thread_exit(void *arg)
  196. {
  197. QemuThread *thread = TlsGetValue(qemu_thread_tls_index);
  198. thread->ret = arg;
  199. CloseHandle(thread->thread);
  200. thread->thread = NULL;
  201. ExitThread(0);
  202. }
  203. static inline void qemu_thread_init(void)
  204. {
  205. if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
  206. qemu_thread_tls_index = TlsAlloc();
  207. if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
  208. error_exit(ERROR_NO_SYSTEM_RESOURCES, __func__);
  209. }
  210. }
  211. }
  212. void qemu_thread_create(QemuThread *thread,
  213. void *(*start_routine)(void *),
  214. void *arg)
  215. {
  216. HANDLE hThread;
  217. struct QemuThreadData *data;
  218. qemu_thread_init();
  219. data = qemu_malloc(sizeof *data);
  220. data->thread = thread;
  221. data->start_routine = start_routine;
  222. data->arg = arg;
  223. hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
  224. data, 0, NULL);
  225. if (!hThread) {
  226. error_exit(GetLastError(), __func__);
  227. }
  228. CloseHandle(hThread);
  229. }
  230. void qemu_thread_get_self(QemuThread *thread)
  231. {
  232. if (!thread->thread) {
  233. /* In the main thread of the process. Initialize the QemuThread
  234. pointer in TLS, and use the dummy GetCurrentThread handle as
  235. the identifier for qemu_thread_is_self. */
  236. qemu_thread_init();
  237. TlsSetValue(qemu_thread_tls_index, thread);
  238. thread->thread = GetCurrentThread();
  239. }
  240. }
  241. int qemu_thread_is_self(QemuThread *thread)
  242. {
  243. QemuThread *this_thread = TlsGetValue(qemu_thread_tls_index);
  244. return this_thread->thread == thread->thread;
  245. }