|
@@ -84,15 +84,19 @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
|
|
|
enum qemu_plugin_cb_flags flags,
|
|
|
void *udata)
|
|
|
{
|
|
|
- plugin_register_dyn_cb__udata(&tb->cbs[PLUGIN_CB_REGULAR],
|
|
|
- cb, flags, udata);
|
|
|
+ if (!tb->mem_only) {
|
|
|
+ plugin_register_dyn_cb__udata(&tb->cbs[PLUGIN_CB_REGULAR],
|
|
|
+ cb, flags, udata);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb,
|
|
|
enum qemu_plugin_op op,
|
|
|
void *ptr, uint64_t imm)
|
|
|
{
|
|
|
- plugin_register_inline_op(&tb->cbs[PLUGIN_CB_INLINE], 0, op, ptr, imm);
|
|
|
+ if (!tb->mem_only) {
|
|
|
+ plugin_register_inline_op(&tb->cbs[PLUGIN_CB_INLINE], 0, op, ptr, imm);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
|
|
@@ -100,20 +104,27 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
|
|
|
enum qemu_plugin_cb_flags flags,
|
|
|
void *udata)
|
|
|
{
|
|
|
- plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR],
|
|
|
- cb, flags, udata);
|
|
|
+ if (!insn->mem_only) {
|
|
|
+ plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR],
|
|
|
+ cb, flags, udata);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn,
|
|
|
enum qemu_plugin_op op,
|
|
|
void *ptr, uint64_t imm)
|
|
|
{
|
|
|
- plugin_register_inline_op(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
|
|
|
- 0, op, ptr, imm);
|
|
|
+ if (!insn->mem_only) {
|
|
|
+ plugin_register_inline_op(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
|
|
|
+ 0, op, ptr, imm);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
|
|
|
-
|
|
|
+/*
|
|
|
+ * We always plant memory instrumentation because they don't finalise until
|
|
|
+ * after the operation has complete.
|
|
|
+ */
|
|
|
void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
|
|
|
qemu_plugin_vcpu_mem_cb_t cb,
|
|
|
enum qemu_plugin_cb_flags flags,
|
|
@@ -121,7 +132,7 @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
|
|
|
void *udata)
|
|
|
{
|
|
|
plugin_register_vcpu_mem_cb(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR],
|
|
|
- cb, flags, rw, udata);
|
|
|
+ cb, flags, rw, udata);
|
|
|
}
|
|
|
|
|
|
void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn,
|
|
@@ -130,7 +141,7 @@ void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn,
|
|
|
uint64_t imm)
|
|
|
{
|
|
|
plugin_register_inline_op(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE],
|
|
|
- rw, op, ptr, imm);
|
|
|
+ rw, op, ptr, imm);
|
|
|
}
|
|
|
|
|
|
void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,
|
|
@@ -181,10 +192,13 @@ uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb)
|
|
|
struct qemu_plugin_insn *
|
|
|
qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx)
|
|
|
{
|
|
|
+ struct qemu_plugin_insn *insn;
|
|
|
if (unlikely(idx >= tb->n)) {
|
|
|
return NULL;
|
|
|
}
|
|
|
- return g_ptr_array_index(tb->insns, idx);
|
|
|
+ insn = g_ptr_array_index(tb->insns, idx);
|
|
|
+ insn->mem_only = tb->mem_only;
|
|
|
+ return insn;
|
|
|
}
|
|
|
|
|
|
/*
|