2
0

simple.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. /*
  2. * Simple trace backend
  3. *
  4. * Copyright IBM, Corp. 2010
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See
  7. * the COPYING file in the top-level directory.
  8. *
  9. */
  10. #include "qemu/osdep.h"
  11. #ifndef _WIN32
  12. #include <pthread.h>
  13. #endif
  14. #include "qemu/timer.h"
  15. #include "trace/control.h"
  16. #include "trace/simple.h"
  17. /** Trace file header event ID, picked to avoid conflict with real event IDs */
  18. #define HEADER_EVENT_ID (~(uint64_t)0)
  19. /** Trace file magic number */
  20. #define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
  21. /** Trace file version number, bump if format changes */
  22. #define HEADER_VERSION 4
  23. /** Records were dropped event ID */
  24. #define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
  25. /** Trace record is valid */
  26. #define TRACE_RECORD_VALID ((uint64_t)1 << 63)
  27. /*
  28. * Trace records are written out by a dedicated thread. The thread waits for
  29. * records to become available, writes them out, and then waits again.
  30. */
  31. static CompatGMutex trace_lock;
  32. static CompatGCond trace_available_cond;
  33. static CompatGCond trace_empty_cond;
  34. static bool trace_available;
  35. static bool trace_writeout_enabled;
  36. enum {
  37. TRACE_BUF_LEN = 4096 * 64,
  38. TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
  39. };
  40. uint8_t trace_buf[TRACE_BUF_LEN];
  41. static volatile gint trace_idx;
  42. static unsigned int writeout_idx;
  43. static volatile gint dropped_events;
  44. static uint32_t trace_pid;
  45. static FILE *trace_fp;
  46. static char *trace_file_name;
  47. #define TRACE_RECORD_TYPE_MAPPING 0
  48. #define TRACE_RECORD_TYPE_EVENT 1
  49. /* * Trace buffer entry */
  50. typedef struct {
  51. uint64_t event; /* event ID value */
  52. uint64_t timestamp_ns;
  53. uint32_t length; /* in bytes */
  54. uint32_t pid;
  55. uint64_t arguments[];
  56. } TraceRecord;
  57. typedef struct {
  58. uint64_t header_event_id; /* HEADER_EVENT_ID */
  59. uint64_t header_magic; /* HEADER_MAGIC */
  60. uint64_t header_version; /* HEADER_VERSION */
  61. } TraceLogHeader;
  62. static void read_from_buffer(unsigned int idx, void *dataptr, size_t size);
  63. static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size);
  64. static void clear_buffer_range(unsigned int idx, size_t len)
  65. {
  66. uint32_t num = 0;
  67. while (num < len) {
  68. if (idx >= TRACE_BUF_LEN) {
  69. idx = idx % TRACE_BUF_LEN;
  70. }
  71. trace_buf[idx++] = 0;
  72. num++;
  73. }
  74. }
  75. /**
  76. * Read a trace record from the trace buffer
  77. *
  78. * @idx Trace buffer index
  79. * @record Trace record to fill
  80. *
  81. * Returns false if the record is not valid.
  82. */
  83. static bool get_trace_record(unsigned int idx, TraceRecord **recordptr)
  84. {
  85. uint64_t event_flag = 0;
  86. TraceRecord record;
  87. /* read the event flag to see if its a valid record */
  88. read_from_buffer(idx, &record, sizeof(event_flag));
  89. if (!(record.event & TRACE_RECORD_VALID)) {
  90. return false;
  91. }
  92. smp_rmb(); /* read memory barrier before accessing record */
  93. /* read the record header to know record length */
  94. read_from_buffer(idx, &record, sizeof(TraceRecord));
  95. *recordptr = malloc(record.length); /* don't use g_malloc, can deadlock when traced */
  96. /* make a copy of record to avoid being overwritten */
  97. read_from_buffer(idx, *recordptr, record.length);
  98. smp_rmb(); /* memory barrier before clearing valid flag */
  99. (*recordptr)->event &= ~TRACE_RECORD_VALID;
  100. /* clear the trace buffer range for consumed record otherwise any byte
  101. * with its MSB set may be considered as a valid event id when the writer
  102. * thread crosses this range of buffer again.
  103. */
  104. clear_buffer_range(idx, record.length);
  105. return true;
  106. }
  107. /**
  108. * Kick writeout thread
  109. *
  110. * @wait Whether to wait for writeout thread to complete
  111. */
  112. static void flush_trace_file(bool wait)
  113. {
  114. g_mutex_lock(&trace_lock);
  115. trace_available = true;
  116. g_cond_signal(&trace_available_cond);
  117. if (wait) {
  118. g_cond_wait(&trace_empty_cond, &trace_lock);
  119. }
  120. g_mutex_unlock(&trace_lock);
  121. }
  122. static void wait_for_trace_records_available(void)
  123. {
  124. g_mutex_lock(&trace_lock);
  125. while (!(trace_available && trace_writeout_enabled)) {
  126. g_cond_signal(&trace_empty_cond);
  127. g_cond_wait(&trace_available_cond, &trace_lock);
  128. }
  129. trace_available = false;
  130. g_mutex_unlock(&trace_lock);
  131. }
  132. static gpointer writeout_thread(gpointer opaque)
  133. {
  134. TraceRecord *recordptr;
  135. union {
  136. TraceRecord rec;
  137. uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)];
  138. } dropped;
  139. unsigned int idx = 0;
  140. int dropped_count;
  141. size_t unused __attribute__ ((unused));
  142. uint64_t type = TRACE_RECORD_TYPE_EVENT;
  143. for (;;) {
  144. wait_for_trace_records_available();
  145. if (g_atomic_int_get(&dropped_events)) {
  146. dropped.rec.event = DROPPED_EVENT_ID,
  147. dropped.rec.timestamp_ns = get_clock();
  148. dropped.rec.length = sizeof(TraceRecord) + sizeof(uint64_t),
  149. dropped.rec.pid = trace_pid;
  150. do {
  151. dropped_count = g_atomic_int_get(&dropped_events);
  152. } while (!g_atomic_int_compare_and_exchange(&dropped_events,
  153. dropped_count, 0));
  154. dropped.rec.arguments[0] = dropped_count;
  155. unused = fwrite(&type, sizeof(type), 1, trace_fp);
  156. unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp);
  157. }
  158. while (get_trace_record(idx, &recordptr)) {
  159. unused = fwrite(&type, sizeof(type), 1, trace_fp);
  160. unused = fwrite(recordptr, recordptr->length, 1, trace_fp);
  161. writeout_idx += recordptr->length;
  162. free(recordptr); /* don't use g_free, can deadlock when traced */
  163. idx = writeout_idx % TRACE_BUF_LEN;
  164. }
  165. fflush(trace_fp);
  166. }
  167. return NULL;
  168. }
  169. void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val)
  170. {
  171. rec->rec_off = write_to_buffer(rec->rec_off, &val, sizeof(uint64_t));
  172. }
  173. void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen)
  174. {
  175. /* Write string length first */
  176. rec->rec_off = write_to_buffer(rec->rec_off, &slen, sizeof(slen));
  177. /* Write actual string now */
  178. rec->rec_off = write_to_buffer(rec->rec_off, (void*)s, slen);
  179. }
  180. int trace_record_start(TraceBufferRecord *rec, uint32_t event, size_t datasize)
  181. {
  182. unsigned int idx, rec_off, old_idx, new_idx;
  183. uint32_t rec_len = sizeof(TraceRecord) + datasize;
  184. uint64_t event_u64 = event;
  185. uint64_t timestamp_ns = get_clock();
  186. do {
  187. old_idx = g_atomic_int_get(&trace_idx);
  188. smp_rmb();
  189. new_idx = old_idx + rec_len;
  190. if (new_idx - writeout_idx > TRACE_BUF_LEN) {
  191. /* Trace Buffer Full, Event dropped ! */
  192. g_atomic_int_inc(&dropped_events);
  193. return -ENOSPC;
  194. }
  195. } while (!g_atomic_int_compare_and_exchange(&trace_idx, old_idx, new_idx));
  196. idx = old_idx % TRACE_BUF_LEN;
  197. rec_off = idx;
  198. rec_off = write_to_buffer(rec_off, &event_u64, sizeof(event_u64));
  199. rec_off = write_to_buffer(rec_off, &timestamp_ns, sizeof(timestamp_ns));
  200. rec_off = write_to_buffer(rec_off, &rec_len, sizeof(rec_len));
  201. rec_off = write_to_buffer(rec_off, &trace_pid, sizeof(trace_pid));
  202. rec->tbuf_idx = idx;
  203. rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN;
  204. return 0;
  205. }
  206. static void read_from_buffer(unsigned int idx, void *dataptr, size_t size)
  207. {
  208. uint8_t *data_ptr = dataptr;
  209. uint32_t x = 0;
  210. while (x < size) {
  211. if (idx >= TRACE_BUF_LEN) {
  212. idx = idx % TRACE_BUF_LEN;
  213. }
  214. data_ptr[x++] = trace_buf[idx++];
  215. }
  216. }
  217. static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size)
  218. {
  219. uint8_t *data_ptr = dataptr;
  220. uint32_t x = 0;
  221. while (x < size) {
  222. if (idx >= TRACE_BUF_LEN) {
  223. idx = idx % TRACE_BUF_LEN;
  224. }
  225. trace_buf[idx++] = data_ptr[x++];
  226. }
  227. return idx; /* most callers wants to know where to write next */
  228. }
  229. void trace_record_finish(TraceBufferRecord *rec)
  230. {
  231. TraceRecord record;
  232. read_from_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
  233. smp_wmb(); /* write barrier before marking as valid */
  234. record.event |= TRACE_RECORD_VALID;
  235. write_to_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
  236. if (((unsigned int)g_atomic_int_get(&trace_idx) - writeout_idx)
  237. > TRACE_BUF_FLUSH_THRESHOLD) {
  238. flush_trace_file(false);
  239. }
  240. }
  241. static int st_write_event_mapping(void)
  242. {
  243. uint64_t type = TRACE_RECORD_TYPE_MAPPING;
  244. TraceEventIter iter;
  245. TraceEvent *ev;
  246. trace_event_iter_init(&iter, NULL);
  247. while ((ev = trace_event_iter_next(&iter)) != NULL) {
  248. uint64_t id = trace_event_get_id(ev);
  249. const char *name = trace_event_get_name(ev);
  250. uint32_t len = strlen(name);
  251. if (fwrite(&type, sizeof(type), 1, trace_fp) != 1 ||
  252. fwrite(&id, sizeof(id), 1, trace_fp) != 1 ||
  253. fwrite(&len, sizeof(len), 1, trace_fp) != 1 ||
  254. fwrite(name, len, 1, trace_fp) != 1) {
  255. return -1;
  256. }
  257. }
  258. return 0;
  259. }
  260. void st_set_trace_file_enabled(bool enable)
  261. {
  262. if (enable == !!trace_fp) {
  263. return; /* no change */
  264. }
  265. /* Halt trace writeout */
  266. flush_trace_file(true);
  267. trace_writeout_enabled = false;
  268. flush_trace_file(true);
  269. if (enable) {
  270. static const TraceLogHeader header = {
  271. .header_event_id = HEADER_EVENT_ID,
  272. .header_magic = HEADER_MAGIC,
  273. /* Older log readers will check for version at next location */
  274. .header_version = HEADER_VERSION,
  275. };
  276. trace_fp = fopen(trace_file_name, "wb");
  277. if (!trace_fp) {
  278. return;
  279. }
  280. if (fwrite(&header, sizeof header, 1, trace_fp) != 1 ||
  281. st_write_event_mapping() < 0) {
  282. fclose(trace_fp);
  283. trace_fp = NULL;
  284. return;
  285. }
  286. /* Resume trace writeout */
  287. trace_writeout_enabled = true;
  288. flush_trace_file(false);
  289. } else {
  290. fclose(trace_fp);
  291. trace_fp = NULL;
  292. }
  293. }
  294. /**
  295. * Set the name of a trace file
  296. *
  297. * @file The trace file name or NULL for the default name-<pid> set at
  298. * config time
  299. */
  300. void st_set_trace_file(const char *file)
  301. {
  302. st_set_trace_file_enabled(false);
  303. g_free(trace_file_name);
  304. if (!file) {
  305. /* Type cast needed for Windows where getpid() returns an int. */
  306. trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE, (pid_t)getpid());
  307. } else {
  308. trace_file_name = g_strdup_printf("%s", file);
  309. }
  310. st_set_trace_file_enabled(true);
  311. }
  312. void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
  313. {
  314. stream_printf(stream, "Trace file \"%s\" %s.\n",
  315. trace_file_name, trace_fp ? "on" : "off");
  316. }
  317. void st_flush_trace_buffer(void)
  318. {
  319. flush_trace_file(true);
  320. }
  321. /* Helper function to create a thread with signals blocked. Use glib's
  322. * portable threads since QEMU abstractions cannot be used due to reentrancy in
  323. * the tracer. Also note the signal masking on POSIX hosts so that the thread
  324. * does not steal signals when the rest of the program wants them blocked.
  325. */
  326. static GThread *trace_thread_create(GThreadFunc fn)
  327. {
  328. GThread *thread;
  329. #ifndef _WIN32
  330. sigset_t set, oldset;
  331. sigfillset(&set);
  332. pthread_sigmask(SIG_SETMASK, &set, &oldset);
  333. #endif
  334. thread = g_thread_new("trace-thread", fn, NULL);
  335. #ifndef _WIN32
  336. pthread_sigmask(SIG_SETMASK, &oldset, NULL);
  337. #endif
  338. return thread;
  339. }
  340. bool st_init(void)
  341. {
  342. GThread *thread;
  343. trace_pid = getpid();
  344. thread = trace_thread_create(writeout_thread);
  345. if (!thread) {
  346. fprintf(stderr, "warning: unable to initialize simple trace backend\n");
  347. return false;
  348. }
  349. atexit(st_flush_trace_buffer);
  350. return true;
  351. }