#endif
}
+// This is expected to be inlined by the compiler so each use of execute_insn
+// includes a duplicated body of the function to get separate fetch.func
+// function calls.
static reg_t execute_insn(processor_t* p, reg_t pc, insn_fetch_t fetch)
{
commit_log_stash_privilege(p->get_state());
void processor_t::step(size_t n)
{
if (state.dcsr.cause == DCSR_CAUSE_NONE) {
- // TODO: get_interrupt() isn't super fast. Does that matter?
- if (sim->debug_module.get_interrupt(id)) {
+ if (halt_request) {
enter_debug_mode(DCSR_CAUSE_DEBUGINT);
- } else if (state.dcsr.halt) {
+ } // !!!The halt bit in DCSR is deprecated.
+ else if (state.dcsr.halt) {
enter_debug_mode(DCSR_CAUSE_HALT);
}
- } else {
- // In Debug Mode, just do 11 steps at a time. Otherwise we're going to be
- // spinning the rest of the time anyway.
- n = std::min(n, (size_t) 11);
}
while (n > 0) {
if (unlikely(invalid_pc(pc))) { \
switch (pc) { \
case PC_SERIALIZE_BEFORE: state.serialized = true; break; \
- case PC_SERIALIZE_AFTER: instret++; break; \
+ case PC_SERIALIZE_AFTER: n = ++instret; break; \
default: abort(); \
} \
pc = state.pc; \
try
{
- take_interrupt();
+ take_pending_interrupt();
if (unlikely(slow_path()))
{
// enter_debug_mode changed state.pc, so we can't just continue.
break;
}
+
+ if (unlikely(state.pc >= DEBUG_START &&
+ state.pc < DEBUG_END)) {
+ // We're waiting for the debugger to tell us something.
+ return;
+ }
+
+
+
}
}
else while (instret < n)
{
+ // This code uses a modified Duff's Device to improve the performance
+ // of executing instructions. While typical Duff's Devices are used
+ // for software pipelining, the switch statement below primarily
+ // benefits from separate call points for the fetch.func function call
+ // found in each execute_insn. This function call is an indirect jump
+ // that depends on the current instruction. By having an indirect jump
+ // dedicated for each icache entry, you improve the performance of the
+ // host's next address predictor. Each case in the switch statement
+ // allows for the program flow to contine to the next case if it
+ // corresponds to the next instruction in the program and instret is
+ // still less than n.
+ //
+ // According to Andrew Waterman's recollection, this optimization
+ // resulted in approximately a 2x performance increase.
+ //
+ // If there is support for compressed instructions, the mmu and the
+ // switch statement get more complicated. Each branch target is stored
+ // in the index corresponding to mmu->icache_index(), but consecutive
+ // non-branching instructions are stored in consecutive indices even if
+ // mmu->icache_index() specifies a different index (which is the case
+ // for 32-bit instructions in the presence of compressed instructions).
+
+ // This figures out where to jump to in the switch statement
size_t idx = _mmu->icache_index(pc);
+
+ // This gets the cached decoded instruction from the MMU. If the MMU
+ // does not have the current pc cached, it will refill the MMU and
+ // return the correct entry. ic_entry->data.func is the C++ function
+ // corresponding to the instruction.
auto ic_entry = _mmu->access_icache(pc);
+ // This macro is included in "icache.h" included within the switch
+ // statement below. The indirect jump corresponding to the instruction
+ // is located within the execute_insn() function call.
#define ICACHE_ACCESS(i) { \
insn_fetch_t fetch = ic_entry->data; \
ic_entry++; \
state.pc = pc; \
}
+ // This switch statement implements the modified Duff's device as
+ // explained above.
switch (idx) {
+ // "icache.h" is generated by the gen_icache script
#include "icache.h"
}
{
take_trap(t, pc);
n = instret;
+
+ if (unlikely(state.single_step == state.STEP_STEPPED)) {
+ state.single_step = state.STEP_NONE;
+ enter_debug_mode(DCSR_CAUSE_STEP);
+ }
}
catch (trigger_matched_t& t)
{