diff options
author | Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> | 2019-01-02 09:14:23 +0000 |
---|---|---|
committer | David Gibson <david@gibson.dropbear.id.au> | 2019-01-09 09:28:14 +1100 |
commit | 2dea57db6051afe3162192fe62c4694e17ec871f (patch) | |
tree | 0ebb84758999d562a4e45dd5aa0eac7238461082 | |
parent | ef96e3ae9698d6726a8113f448c82985a9f31ff5 (diff) | |
download | qemu-2dea57db6051afe3162192fe62c4694e17ec871f.zip qemu-2dea57db6051afe3162192fe62c4694e17ec871f.tar.gz qemu-2dea57db6051afe3162192fe62c4694e17ec871f.tar.bz2 |
target/ppc: replace AVR* macros with Vsr* macros
Now that the VMX and VSR register sets have been combined, the same macros can
be used to access both AVR and VSR field members.
Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
-rw-r--r-- | target/ppc/int_helper.c | 30 |
1 files changed, 13 insertions, 17 deletions
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index 9d715be..598731d 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -391,13 +391,9 @@ target_ulong helper_602_mfrom(target_ulong arg) #if defined(HOST_WORDS_BIGENDIAN) #define HI_IDX 0 #define LO_IDX 1 -#define AVRB(i) u8[i] -#define AVRW(i) u32[i] #else #define HI_IDX 1 #define LO_IDX 0 -#define AVRB(i) u8[15-(i)] -#define AVRW(i) u32[3-(i)] #endif #if defined(HOST_WORDS_BIGENDIAN) @@ -3277,11 +3273,11 @@ void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) int i; VECTOR_FOR_INORDER_I(i, u32) { - result.AVRW(i) = b->AVRW(i) ^ - (AES_Te0[a->AVRB(AES_shifts[4*i + 0])] ^ - AES_Te1[a->AVRB(AES_shifts[4*i + 1])] ^ - AES_Te2[a->AVRB(AES_shifts[4*i + 2])] ^ - AES_Te3[a->AVRB(AES_shifts[4*i + 3])]); + result.VsrW(i) = b->VsrW(i) ^ + (AES_Te0[a->VsrB(AES_shifts[4 * i + 0])] ^ + AES_Te1[a->VsrB(AES_shifts[4 * i + 1])] ^ + AES_Te2[a->VsrB(AES_shifts[4 * i + 2])] ^ + AES_Te3[a->VsrB(AES_shifts[4 * i + 3])]); } *r = result; } @@ -3292,7 +3288,7 @@ void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) int i; VECTOR_FOR_INORDER_I(i, u8) { - result.AVRB(i) = b->AVRB(i) ^ (AES_sbox[a->AVRB(AES_shifts[i])]); + result.VsrB(i) = b->VsrB(i) ^ (AES_sbox[a->VsrB(AES_shifts[i])]); } *r = result; } @@ -3305,15 +3301,15 @@ void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) ppc_avr_t tmp; VECTOR_FOR_INORDER_I(i, u8) { - tmp.AVRB(i) = b->AVRB(i) ^ AES_isbox[a->AVRB(AES_ishifts[i])]; + tmp.VsrB(i) = b->VsrB(i) ^ AES_isbox[a->VsrB(AES_ishifts[i])]; } VECTOR_FOR_INORDER_I(i, u32) { - r->AVRW(i) = - AES_imc[tmp.AVRB(4*i + 0)][0] ^ - AES_imc[tmp.AVRB(4*i + 1)][1] ^ - AES_imc[tmp.AVRB(4*i + 2)][2] ^ - AES_imc[tmp.AVRB(4*i + 3)][3]; + r->VsrW(i) = + AES_imc[tmp.VsrB(4 * i + 0)][0] ^ + AES_imc[tmp.VsrB(4 * i + 1)][1] ^ + AES_imc[tmp.VsrB(4 * i + 2)][2] ^ + AES_imc[tmp.VsrB(4 * i + 3)][3]; } } @@ -3323,7 +3319,7 @@ void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) int i; VECTOR_FOR_INORDER_I(i, u8) { - result.AVRB(i) = b->AVRB(i) ^ (AES_isbox[a->AVRB(AES_ishifts[i])]); + result.VsrB(i) = b->VsrB(i) ^ (AES_isbox[a->VsrB(AES_ishifts[i])]); } *r = result; } |