diff options
author | Chih-Min Chao <chihmin.chao@sifive.com> | 2020-07-29 20:37:24 -0700 |
---|---|---|
committer | Chih-Min Chao <chihmin.chao@sifive.com> | 2020-07-29 21:38:43 -0700 |
commit | a602aa595c3f2a99c77aff2fb4b8b15adf15cacc (patch) | |
tree | ed437bb22c374f3f9c3ca46fd56d1c5620265420 /riscv | |
parent | cdda51cb0af8f846ab10bb8d9c1af3aab6a21b46 (diff) | |
download | riscv-isa-sim-a602aa595c3f2a99c77aff2fb4b8b15adf15cacc.zip riscv-isa-sim-a602aa595c3f2a99c77aff2fb4b8b15adf15cacc.tar.gz riscv-isa-sim-a602aa595c3f2a99c77aff2fb4b8b15adf15cacc.tar.bz2 |
rvv: remove veew/vemul state
They aren't arch state
Signed-off-by: Chih-Min Chao <chihmin.chao@sifive.com>
Diffstat (limited to 'riscv')
-rw-r--r-- | riscv/decode.h | 52 | ||||
-rw-r--r-- | riscv/processor.cc | 2 | ||||
-rw-r--r-- | riscv/processor.h | 3 |
3 files changed, 25 insertions, 32 deletions
diff --git a/riscv/decode.h b/riscv/decode.h index bf7757a..e07020e 100644 --- a/riscv/decode.h +++ b/riscv/decode.h @@ -484,13 +484,12 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) #define VI_CHECK_ST_INDEX(elt_width) \ require_vector; \ - P.VU.veew = elt_width; \ - P.VU.vemul = ((float)P.VU.veew / P.VU.vsew * P.VU.vflmul); \ - require(P.VU.vemul >= 0.125 && P.VU.vemul <= 8); \ - reg_t emul = P.VU.vemul < 1 ? 1 : P.VU.vemul; \ + float vemul = ((float)elt_width / P.VU.vsew * P.VU.vflmul); \ + require(vemul >= 0.125 && vemul <= 8); \ + reg_t emul = vemul < 1 ? 1 : vemul; \ reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ require_align(insn.rd(), P.VU.vflmul); \ - require_align(insn.rs2(), P.VU.vemul); \ + require_align(insn.rs2(), vemul); \ require((nf * flmul) <= (NVPR / 4) && \ (insn.rd() + nf * flmul) <= NVPR); \ if (nf > 1) \ @@ -498,18 +497,18 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) #define VI_CHECK_LD_INDEX(elt_width) \ VI_CHECK_ST_INDEX(elt_width); \ - if (P.VU.veew > P.VU.vsew) { \ + if (elt_width > P.VU.vsew) { \ if (insn.rd() != insn.rs2()) \ - require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vemul); \ - } else if (P.VU.veew < P.VU.vsew) { \ - if (P.VU.vemul < 1) {\ - require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vemul); \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } else if (elt_width < P.VU.vsew) { \ + if (vemul < 1) {\ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ } else {\ - require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vemul); \ + require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ } \ } \ if (insn.v_nf() > 0) {\ - require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vemul); \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ require_noover(vd, nf, insn.rs2(), 1); \ } \ require_vm; \ @@ -536,11 +535,11 @@ static inline bool is_aligned(const unsigned val, const unsigned pos) #define VI_CHECK_STORE(elt_width) \ require_vector; \ - P.VU.veew = sizeof(elt_width##_t) * 8; \ - P.VU.vemul = ((float)P.VU.veew / P.VU.vsew * P.VU.vflmul); \ - reg_t emul = P.VU.vemul < 1 ? 1 : P.VU.vemul; \ - require(P.VU.vemul >= 0.125 && P.VU.vemul <= 8); \ - require_align(insn.rd(), P.VU.vemul); \ + reg_t veew = sizeof(elt_width##_t) * 8; \ + float vemul = ((float)veew / P.VU.vsew * P.VU.vflmul); \ + reg_t emul = vemul < 1 ? 1 : vemul; \ + require(vemul >= 0.125 && vemul <= 8); \ + require_align(insn.rd(), vemul); \ require((nf * emul) <= (NVPR / 4) && \ (insn.rd() + nf * emul) <= NVPR); \ if (nf > 1) \ @@ -1822,20 +1821,19 @@ for (reg_t i = 0; i < P.VU.vlmax && P.VU.vl != 0; ++i) { \ require_align(insn.rd(), P.VU.vflmul); \ require(P.VU.vsew <= P.get_xlen() && P.VU.vsew >= 32); \ require_align(insn.rd(), P.VU.vflmul); \ - P.VU.veew = idx_type; \ - P.VU.vemul = ((float)P.VU.veew / P.VU.vsew * P.VU.vflmul); \ - require(P.VU.vemul >= 0.125 && P.VU.vemul <= 8); \ - require_align(insn.rs2(), P.VU.vemul); \ + float vemul = ((float)idx_type / P.VU.vsew * P.VU.vflmul); \ + require(vemul >= 0.125 && vemul <= 8); \ + require_align(insn.rs2(), vemul); \ if (insn.v_wd()) {\ require_vm; \ - if (P.VU.veew > P.VU.vsew) { \ + if (idx_type > P.VU.vsew) { \ if (insn.rd() != insn.rs2()) \ - require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vemul); \ - } else if (P.VU.veew < P.VU.vsew) { \ - if (P.VU.vemul < 1) {\ - require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vemul); \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } else if (idx_type < P.VU.vsew) { \ + if (vemul < 1) {\ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ } else {\ - require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vemul); \ + require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ } \ } \ } \ diff --git a/riscv/processor.cc b/riscv/processor.cc index de90add..d59b96b 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -398,8 +398,6 @@ reg_t processor_t::vectorUnit_t::set_vl(int rd, int rs1, reg_t reqVL, reg_t newT new_vlmul = int8_t(BITS(newType, 2, 0) << 5) >> 5; vflmul = new_vlmul >= 0 ? 1 << new_vlmul : 1.0 / (1 << -new_vlmul); vlmax = (VLEN/vsew) * vflmul; - vemul = vflmul; - veew = vsew; vta = BITS(newType, 6, 6); vma = BITS(newType, 7, 7); vediv = 1 << BITS(newType, 9, 8); diff --git a/riscv/processor.h b/riscv/processor.h index a494b4d..3d540dd 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -476,10 +476,7 @@ public: reg_t vstart, vxrm, vxsat, vl, vtype, vlenb; reg_t vma, vta; reg_t vediv, vsew; - reg_t veew; - float vemul; float vflmul; - reg_t vmel; reg_t ELEN, VLEN; bool vill; |