X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=riscv%2Fexecute.cc;h=c5cafc2ee5ead5df9b35f2156540f288167a3ec5;hb=4299874ad4b07ef457776513a64e5b2397a6a75e;hp=878893c80d18b840ee985e4c792004bd7304eb21;hpb=46a67860915391458d7cc8cb93248059df20b8f2;p=riscv-isa-sim.git diff --git a/riscv/execute.cc b/riscv/execute.cc index 878893c..c5cafc2 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -114,6 +114,7 @@ void processor_t::step(size_t n) default: abort(); \ } \ pc = state.pc; \ + check_pc_alignment(pc); \ break; \ } else { \ state.pc = pc; \ @@ -171,13 +172,6 @@ void processor_t::step(size_t n) // // According to Andrew Waterman's recollection, this optimization // resulted in approximately a 2x performance increase. - // - // If there is support for compressed instructions, the mmu and the - // switch statement get more complicated. Each branch target is stored - // in the index corresponding to mmu->icache_index(), but consecutive - // non-branching instructions are stored in consecutive indices even if - // mmu->icache_index() specifies a different index (which is the case - // for 32-bit instructions in the presence of compressed instructions). // This figures out where to jump to in the switch statement size_t idx = _mmu->icache_index(pc); @@ -193,10 +187,10 @@ void processor_t::step(size_t n) // is located within the execute_insn() function call. #define ICACHE_ACCESS(i) { \ insn_fetch_t fetch = ic_entry->data; \ - ic_entry++; \ pc = execute_insn(this, pc, fetch); \ + ic_entry = ic_entry->next; \ if (i == mmu_t::ICACHE_ENTRIES-1) break; \ - if (unlikely(ic_entry->tag != pc)) goto miss; \ + if (unlikely(ic_entry->tag != pc)) break; \ if (unlikely(instret+1 == n)) break; \ instret++; \ state.pc = pc; \ @@ -210,13 +204,6 @@ void processor_t::step(size_t n) } advance_pc(); - continue; - -miss: - advance_pc(); - // refill I$ if it looks like there wasn't a taken branch - if (pc > (ic_entry-1)->tag && pc <= (ic_entry-1)->tag + MAX_INSN_LENGTH) - _mmu->refill_icache(pc, ic_entry); } } catch(trap_t& t)