Skip to content

Commit

Permalink
i#6495 syscall templs: More invariant adaptation
Browse files Browse the repository at this point in the history
Adapts more invariant checks to the kernel system call trace
templates collected on QEMU.

Skips tracking the retaddr_stack_ for calls and rets inside
the system call trace part, since this is used only to verify
the function marker return address which is user-space only.

Allows reads following xsaveopt which apparently also reads
the header of the xsave area.

Issue: #6495
  • Loading branch information
abhinav92003 committed Apr 10, 2024
1 parent 7d1a9ef commit 34ffa88
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 35 deletions.
14 changes: 14 additions & 0 deletions clients/drcachesim/tests/invariant_checker_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3279,13 +3279,20 @@ check_kernel_syscall_trace(void)
instr_t *xsaves = INSTR_CREATE_xsaves64(
GLOBAL_DCONTEXT,
opnd_create_base_disp(DR_REG_XCX, DR_REG_NULL, 0, 0, OPSZ_xsave));
instr_t *xsaveopt = INSTR_CREATE_xsaveopt64(
GLOBAL_DCONTEXT,
opnd_create_base_disp(DR_REG_XCX, DR_REG_NULL, 0, 0, OPSZ_xsave));

# else
instr_t *xrstors = INSTR_CREATE_xrstors32(
GLOBAL_DCONTEXT,
opnd_create_base_disp(DR_REG_XCX, DR_REG_NULL, 0, 0, OPSZ_xsave));
instr_t *xsaves = INSTR_CREATE_xsaves32(
GLOBAL_DCONTEXT,
opnd_create_base_disp(DR_REG_XCX, DR_REG_NULL, 0, 0, OPSZ_xsave));
instr_t *xsaveopt = INSTR_CREATE_xsaveopt32(
GLOBAL_DCONTEXT,
opnd_create_base_disp(DR_REG_XCX, DR_REG_NULL, 0, 0, OPSZ_xsave));
# endif
instr_t *hlt = INSTR_CREATE_hlt(GLOBAL_DCONTEXT);
instr_t *nop3 = XINST_CREATE_nop(GLOBAL_DCONTEXT);
Expand All @@ -3306,6 +3313,7 @@ check_kernel_syscall_trace(void)
instrlist_append(ilist2, nop2);
instrlist_append(ilist2, xrstors);
instrlist_append(ilist2, xsaves);
instrlist_append(ilist2, xsaveopt);
instrlist_append(ilist2, hlt);
instrlist_append(ilist2, nop3);
instrlist_append(ilist2, prefetch);
Expand Down Expand Up @@ -3349,6 +3357,12 @@ check_kernel_syscall_trace(void)
{ gen_data(TID_A, false, 42, 8), nullptr },
{ gen_data(TID_A, false, 42, 8), nullptr },
{ gen_data(TID_A, false, 42, 8), nullptr },
{ gen_instr(TID_A), xsaveopt },
// Multiple writes and a read. Acceptable because of the prior xsaveopt.
{ gen_data(TID_A, false, 42, 8), nullptr },
{ gen_data(TID_A, false, 42, 8), nullptr },
{ gen_data(TID_A, true, 42, 8), nullptr },
{ gen_data(TID_A, false, 42, 8), nullptr },
{ gen_instr(TID_A), hlt },
// Missing nop3. Acceptable because of the prior hlt.
{ gen_instr(TID_A), prefetch },
Expand Down
82 changes: 47 additions & 35 deletions clients/drcachesim/tools/invariant_checker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -777,8 +777,8 @@ invariant_checker_t::parallel_shard_memref(void *shard_data, const memref_t &mem
// variable is not cached as it can dynamically vary based on data values.
cur_instr_info.memref = memref;
if (knob_verbose_ >= 3) {
std::cerr << "::" << memref.data.pid << ":" << memref.data.tid << ":: "
<< " @" << (void *)memref.instr.addr
std::cerr << "::" << memref.data.pid << ":" << memref.data.tid
<< ":: " << " @" << (void *)memref.instr.addr
<< ((memref.instr.type == TRACE_TYPE_INSTR_NO_FETCH)
? " non-fetched"
: "")
Expand All @@ -790,13 +790,19 @@ invariant_checker_t::parallel_shard_memref(void *shard_data, const memref_t &mem
if (shard->instrs_until_interrupt_ > 0)
--shard->instrs_until_interrupt_;
#endif
if (memref.instr.type == TRACE_TYPE_INSTR_DIRECT_CALL ||
memref.instr.type == TRACE_TYPE_INSTR_INDIRECT_CALL) {
shard->retaddr_stack_.push(memref.instr.addr + memref.instr.size);
}
if (memref.instr.type == TRACE_TYPE_INSTR_RETURN) {
if (!shard->retaddr_stack_.empty()) {
shard->retaddr_stack_.pop();

// retaddr_stack_ is used for verifying invariants related to the function
// return markers; these are only user-space.
if (!(shard->between_kernel_context_switch_markers_ ||
shard->between_kernel_syscall_trace_markers_)) {
if (memref.instr.type == TRACE_TYPE_INSTR_DIRECT_CALL ||
memref.instr.type == TRACE_TYPE_INSTR_INDIRECT_CALL) {
shard->retaddr_stack_.push(memref.instr.addr + memref.instr.size);
}
if (memref.instr.type == TRACE_TYPE_INSTR_RETURN) {
if (!shard->retaddr_stack_.empty()) {
shard->retaddr_stack_.pop();
}
}
}
// Invariant: offline traces guarantee that a branch target must immediately
Expand Down Expand Up @@ -884,8 +890,8 @@ invariant_checker_t::parallel_shard_memref(void *shard_data, const memref_t &mem
// Clear window transitions on instrs.
shard->window_transition_ = false;
} else if (knob_verbose_ >= 3) {
std::cerr << "::" << memref.data.pid << ":" << memref.data.tid << ":: "
<< " type " << memref.instr.type << "\n";
std::cerr << "::" << memref.data.pid << ":" << memref.data.tid
<< ":: " << " type " << memref.instr.type << "\n";
}

if (memref.marker.type == TRACE_TYPE_MARKER &&
Expand Down Expand Up @@ -913,8 +919,8 @@ invariant_checker_t::parallel_shard_memref(void *shard_data, const memref_t &mem
// Reset this since we just saw a timestamp marker.
shard->instr_count_since_last_timestamp_ = 0;
if (knob_verbose_ >= 3) {
std::cerr << "::" << memref.data.pid << ":" << memref.data.tid << ":: "
<< " timestamp " << memref.marker.marker_value << "\n";
std::cerr << "::" << memref.data.pid << ":" << memref.data.tid
<< ":: " << " timestamp " << memref.marker.marker_value << "\n";
}
}
if (memref.marker.type == TRACE_TYPE_MARKER &&
Expand All @@ -935,9 +941,10 @@ invariant_checker_t::parallel_shard_memref(void *shard_data, const memref_t &mem
(memref.marker.marker_type == TRACE_MARKER_TYPE_KERNEL_EVENT ||
memref.marker.marker_type == TRACE_MARKER_TYPE_KERNEL_XFER)) {
if (knob_verbose_ >= 3) {
std::cerr << "::" << memref.data.pid << ":" << memref.data.tid << ":: "
<< "marker type " << memref.marker.marker_type << " value 0x"
<< std::hex << memref.marker.marker_value << std::dec << "\n";
std::cerr << "::" << memref.data.pid << ":" << memref.data.tid
<< ":: " << "marker type " << memref.marker.marker_type
<< " value 0x" << std::hex << memref.marker.marker_value << std::dec
<< "\n";
}
// Zero is pushed as a sentinel. This push matches the return used by post
// signal handler to run the restorer code. It is assumed that all signal
Expand Down Expand Up @@ -1093,25 +1100,30 @@ invariant_checker_t::parallel_shard_memref(void *shard_data, const memref_t &mem
shard,
shard->expected_read_records_ >
0 IF_X86(
// iret pops bunch of data from the stack at interrupt
// returns. System call trace templates collected on
// QEMU would show all reads.
// TODO i#6742: iret has different behavior in user vs
// protected mode. This difference in iret behavior can
// be detected in the decoder and perhaps be accounted
// for here in a way better than relying on
// between_kernel_syscall_trace_markers_.
|| (shard->between_kernel_syscall_trace_markers_ &&
shard->prev_instr_.decoding.opcode == OP_iret)
// Xrstor does multiple reads. Read count cannot be
// determined using the decoder. System call trace
// templates collected on QEMU would show all reads.
// XXX: Perhaps we can query the expected size using
// cpuid and add it as a marker in the system call
// trace template, and then adjust it according to the
// cpuid value for the trace being injected into.
|| (shard->between_kernel_syscall_trace_markers_ &&
shard->prev_instr_.decoding.is_xrstor)),
||
(shard->between_kernel_syscall_trace_markers_ &&
(
// iret pops bunch of data from the stack at interrupt
// returns. System call trace templates collected on
// QEMU would show all reads.
// TODO i#6742: iret has different behavior in user vs
// protected mode. This difference in iret behavior can
// be detected in the decoder and perhaps be accounted
// for here in a way better than relying on
// between_kernel_syscall_trace_markers_.
shard->prev_instr_.decoding.opcode == OP_iret
// Xrstor does multiple reads. Read count cannot be
// determined using the decoder. System call trace
// templates collected on QEMU would show all reads.
// XXX: Perhaps we can query the expected size using
// cpuid and add it as a marker in the system call
// trace template, and then adjust it according to the
// cpuid value for the trace being injected into.
|| shard->prev_instr_.decoding.is_xrstor
// xsaveopt also reads some fields from the xsave
// header (https://www.felixcloutier.com/x86/xsaveopt).
|| shard->prev_instr_.decoding.opcode == OP_xsaveopt64 ||
shard->prev_instr_.decoding.opcode == OP_xsaveopt32))),
"Too many read records");
if (shard->expected_read_records_ > 0) {
shard->expected_read_records_--;
Expand Down

0 comments on commit 34ffa88

Please sign in to comment.