1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
|
// vsha2ch.vv vd, vs2, vs1
#include "zvknh_ext_macros.h"
// Ensures VSEW is 32 or 64, and vd doesn't overlap with either vs1 or vs2.
require_vsha2_common_constraints;
switch (P.VU.vsew) {
case e32: {
require_vsha2_vsew32_constraints;
VI_ZVK_VD_VS1_VS2_EGU32x4_NOVM_LOOP(
{},
{
// {c, d, g, h} <- vd
EXTRACT_EGU32x4_WORDS_BE(vd, c, d, g, h);
// {a, b, e, f} <- vs2
EXTRACT_EGU32x4_WORDS_BE(vs2, a, b, e, f);
// {kw3, kw2, kw1, kw0} <- vs1. "kw" stands for K+W
EXTRACT_EGU32x4_WORDS_BE(vs1, kw3, kw2,
UNUSED _unused_kw1, UNUSED _unused_kw0);
ZVK_SHA256_COMPRESS(a, b, c, d, e, f, g, h, kw2);
ZVK_SHA256_COMPRESS(a, b, c, d, e, f, g, h, kw3);
// Update the destination register, vd <- {a, b, e, f}.
SET_EGU32x4_BE(vd, a, b, e, f);
}
);
break;
}
case e64: {
require_vsha2_vsew64_constraints;
VI_ZVK_VD_VS1_VS2_EGU64x4_NOVM_LOOP(
{},
{
// {c, d, g, h} <- vd
EXTRACT_EGU64x4_WORDS_BE(vd, c, d, g, h);
// {a, b, e, f} <- vs2
EXTRACT_EGU64x4_WORDS_BE(vs2, a, b, e, f);
// {kw3, kw2, kw1, kw0} <- vs1. "kw" stands for K+W
EXTRACT_EGU64x4_WORDS_BE(vs1, kw3, kw2,
UNUSED _unused_kw1, UNUSED _unused_kw0);
ZVK_SHA512_COMPRESS(a, b, c, d, e, f, g, h, kw2);
ZVK_SHA512_COMPRESS(a, b, c, d, e, f, g, h, kw3);
// Update the destination register, vd <- {a, b, e, f}.
SET_EGU64x4_BE(vd, a, b, e, f);
}
);
break;
}
// 'require_vsha2_common_constraints' ensures that
// VSEW is either 32 or 64.
default:
require(false);
}
|