qemu-thread-win32.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /*
  2. * Win32 implementation for mutex/cond/thread functions
  3. *
  4. * Copyright Red Hat, Inc. 2010
  5. *
  6. * Author:
  7. * Paolo Bonzini <pbonzini@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu-common.h"
  14. #include "qemu-thread.h"
  15. #include <process.h>
  16. #include <assert.h>
  17. #include <limits.h>
  18. static void error_exit(int err, const char *msg)
  19. {
  20. char *pstr;
  21. FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
  22. NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
  23. fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
  24. LocalFree(pstr);
  25. abort();
  26. }
  27. void qemu_mutex_init(QemuMutex *mutex)
  28. {
  29. mutex->owner = 0;
  30. InitializeCriticalSection(&mutex->lock);
  31. }
  32. void qemu_mutex_destroy(QemuMutex *mutex)
  33. {
  34. assert(mutex->owner == 0);
  35. DeleteCriticalSection(&mutex->lock);
  36. }
  37. void qemu_mutex_lock(QemuMutex *mutex)
  38. {
  39. EnterCriticalSection(&mutex->lock);
  40. /* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not
  41. * using them as such.
  42. */
  43. assert(mutex->owner == 0);
  44. mutex->owner = GetCurrentThreadId();
  45. }
  46. int qemu_mutex_trylock(QemuMutex *mutex)
  47. {
  48. int owned;
  49. owned = TryEnterCriticalSection(&mutex->lock);
  50. if (owned) {
  51. assert(mutex->owner == 0);
  52. mutex->owner = GetCurrentThreadId();
  53. }
  54. return !owned;
  55. }
  56. void qemu_mutex_unlock(QemuMutex *mutex)
  57. {
  58. assert(mutex->owner == GetCurrentThreadId());
  59. mutex->owner = 0;
  60. LeaveCriticalSection(&mutex->lock);
  61. }
  62. void qemu_cond_init(QemuCond *cond)
  63. {
  64. memset(cond, 0, sizeof(*cond));
  65. cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
  66. if (!cond->sema) {
  67. error_exit(GetLastError(), __func__);
  68. }
  69. cond->continue_event = CreateEvent(NULL, /* security */
  70. FALSE, /* auto-reset */
  71. FALSE, /* not signaled */
  72. NULL); /* name */
  73. if (!cond->continue_event) {
  74. error_exit(GetLastError(), __func__);
  75. }
  76. }
  77. void qemu_cond_destroy(QemuCond *cond)
  78. {
  79. BOOL result;
  80. result = CloseHandle(cond->continue_event);
  81. if (!result) {
  82. error_exit(GetLastError(), __func__);
  83. }
  84. cond->continue_event = 0;
  85. result = CloseHandle(cond->sema);
  86. if (!result) {
  87. error_exit(GetLastError(), __func__);
  88. }
  89. cond->sema = 0;
  90. }
  91. void qemu_cond_signal(QemuCond *cond)
  92. {
  93. DWORD result;
  94. /*
  95. * Signal only when there are waiters. cond->waiters is
  96. * incremented by pthread_cond_wait under the external lock,
  97. * so we are safe about that.
  98. */
  99. if (cond->waiters == 0) {
  100. return;
  101. }
  102. /*
  103. * Waiting threads decrement it outside the external lock, but
  104. * only if another thread is executing pthread_cond_broadcast and
  105. * has the mutex. So, it also cannot be decremented concurrently
  106. * with this particular access.
  107. */
  108. cond->target = cond->waiters - 1;
  109. result = SignalObjectAndWait(cond->sema, cond->continue_event,
  110. INFINITE, FALSE);
  111. if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
  112. error_exit(GetLastError(), __func__);
  113. }
  114. }
  115. void qemu_cond_broadcast(QemuCond *cond)
  116. {
  117. BOOLEAN result;
  118. /*
  119. * As in pthread_cond_signal, access to cond->waiters and
  120. * cond->target is locked via the external mutex.
  121. */
  122. if (cond->waiters == 0) {
  123. return;
  124. }
  125. cond->target = 0;
  126. result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
  127. if (!result) {
  128. error_exit(GetLastError(), __func__);
  129. }
  130. /*
  131. * At this point all waiters continue. Each one takes its
  132. * slice of the semaphore. Now it's our turn to wait: Since
  133. * the external mutex is held, no thread can leave cond_wait,
  134. * yet. For this reason, we can be sure that no thread gets
  135. * a chance to eat *more* than one slice. OTOH, it means
  136. * that the last waiter must send us a wake-up.
  137. */
  138. WaitForSingleObject(cond->continue_event, INFINITE);
  139. }
  140. void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
  141. {
  142. /*
  143. * This access is protected under the mutex.
  144. */
  145. cond->waiters++;
  146. /*
  147. * Unlock external mutex and wait for signal.
  148. * NOTE: we've held mutex locked long enough to increment
  149. * waiters count above, so there's no problem with
  150. * leaving mutex unlocked before we wait on semaphore.
  151. */
  152. qemu_mutex_unlock(mutex);
  153. WaitForSingleObject(cond->sema, INFINITE);
  154. /* Now waiters must rendez-vous with the signaling thread and
  155. * let it continue. For cond_broadcast this has heavy contention
  156. * and triggers thundering herd. So goes life.
  157. *
  158. * Decrease waiters count. The mutex is not taken, so we have
  159. * to do this atomically.
  160. *
  161. * All waiters contend for the mutex at the end of this function
  162. * until the signaling thread relinquishes it. To ensure
  163. * each waiter consumes exactly one slice of the semaphore,
  164. * the signaling thread stops until it is told by the last
  165. * waiter that it can go on.
  166. */
  167. if (InterlockedDecrement(&cond->waiters) == cond->target) {
  168. SetEvent(cond->continue_event);
  169. }
  170. qemu_mutex_lock(mutex);
  171. }
  172. struct QemuThreadData {
  173. /* Passed to win32_start_routine. */
  174. void *(*start_routine)(void *);
  175. void *arg;
  176. short mode;
  177. /* Only used for joinable threads. */
  178. bool exited;
  179. void *ret;
  180. CRITICAL_SECTION cs;
  181. };
  182. static int qemu_thread_tls_index = TLS_OUT_OF_INDEXES;
  183. static unsigned __stdcall win32_start_routine(void *arg)
  184. {
  185. QemuThreadData *data = (QemuThreadData *) arg;
  186. void *(*start_routine)(void *) = data->start_routine;
  187. void *thread_arg = data->arg;
  188. if (data->mode == QEMU_THREAD_DETACHED) {
  189. g_free(data);
  190. data = NULL;
  191. }
  192. TlsSetValue(qemu_thread_tls_index, data);
  193. qemu_thread_exit(start_routine(thread_arg));
  194. abort();
  195. }
  196. void qemu_thread_exit(void *arg)
  197. {
  198. QemuThreadData *data = TlsGetValue(qemu_thread_tls_index);
  199. if (data) {
  200. assert(data->mode != QEMU_THREAD_DETACHED);
  201. data->ret = arg;
  202. EnterCriticalSection(&data->cs);
  203. data->exited = true;
  204. LeaveCriticalSection(&data->cs);
  205. }
  206. _endthreadex(0);
  207. }
  208. void *qemu_thread_join(QemuThread *thread)
  209. {
  210. QemuThreadData *data;
  211. void *ret;
  212. HANDLE handle;
  213. data = thread->data;
  214. if (!data) {
  215. return NULL;
  216. }
  217. /*
  218. * Because multiple copies of the QemuThread can exist via
  219. * qemu_thread_get_self, we need to store a value that cannot
  220. * leak there. The simplest, non racy way is to store the TID,
  221. * discard the handle that _beginthreadex gives back, and
  222. * get another copy of the handle here.
  223. */
  224. handle = qemu_thread_get_handle(thread);
  225. if (handle) {
  226. WaitForSingleObject(handle, INFINITE);
  227. CloseHandle(handle);
  228. }
  229. ret = data->ret;
  230. assert(data->mode != QEMU_THREAD_DETACHED);
  231. DeleteCriticalSection(&data->cs);
  232. g_free(data);
  233. return ret;
  234. }
  235. static inline void qemu_thread_init(void)
  236. {
  237. if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
  238. qemu_thread_tls_index = TlsAlloc();
  239. if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) {
  240. error_exit(ERROR_NO_SYSTEM_RESOURCES, __func__);
  241. }
  242. }
  243. }
  244. void qemu_thread_create(QemuThread *thread,
  245. void *(*start_routine)(void *),
  246. void *arg, int mode)
  247. {
  248. HANDLE hThread;
  249. struct QemuThreadData *data;
  250. qemu_thread_init();
  251. data = g_malloc(sizeof *data);
  252. data->start_routine = start_routine;
  253. data->arg = arg;
  254. data->mode = mode;
  255. data->exited = false;
  256. if (data->mode != QEMU_THREAD_DETACHED) {
  257. InitializeCriticalSection(&data->cs);
  258. }
  259. hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
  260. data, 0, &thread->tid);
  261. if (!hThread) {
  262. error_exit(GetLastError(), __func__);
  263. }
  264. CloseHandle(hThread);
  265. thread->data = (mode == QEMU_THREAD_DETACHED) ? NULL : data;
  266. }
  267. void qemu_thread_get_self(QemuThread *thread)
  268. {
  269. qemu_thread_init();
  270. thread->data = TlsGetValue(qemu_thread_tls_index);
  271. thread->tid = GetCurrentThreadId();
  272. }
  273. HANDLE qemu_thread_get_handle(QemuThread *thread)
  274. {
  275. QemuThreadData *data;
  276. HANDLE handle;
  277. data = thread->data;
  278. if (!data) {
  279. return NULL;
  280. }
  281. assert(data->mode != QEMU_THREAD_DETACHED);
  282. EnterCriticalSection(&data->cs);
  283. if (!data->exited) {
  284. handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME, FALSE,
  285. thread->tid);
  286. } else {
  287. handle = NULL;
  288. }
  289. LeaveCriticalSection(&data->cs);
  290. return handle;
  291. }
  292. int qemu_thread_is_self(QemuThread *thread)
  293. {
  294. return GetCurrentThreadId() == thread->tid;
  295. }