diff options
author | Andy Polyakov <appro@openssl.org> | 2015-12-13 21:40:20 +0100 |
---|---|---|
committer | Andy Polyakov <appro@openssl.org> | 2016-02-10 10:31:14 +0100 |
commit | a98c648e40ea5158c8ba29b5a70ccc239d426a20 (patch) | |
tree | b08065ccfa3e15a1b9be1b77118c60182fe32313 /crypto/chacha | |
parent | d40cf9bc9c0913310ef2232b13d8e15e73e9d2b4 (diff) | |
download | openssl-a98c648e40ea5158c8ba29b5a70ccc239d426a20.zip openssl-a98c648e40ea5158c8ba29b5a70ccc239d426a20.tar.gz openssl-a98c648e40ea5158c8ba29b5a70ccc239d426a20.tar.bz2 |
x86[_64] assembly pack: add ChaCha20 and Poly1305 modules.
Reviewed-by: Rich Salz <rsalz@openssl.org>
Diffstat (limited to 'crypto/chacha')
-rw-r--r-- | crypto/chacha/Makefile.in | 5 | ||||
-rwxr-xr-x | crypto/chacha/asm/chacha-x86.pl | 1128 | ||||
-rwxr-xr-x | crypto/chacha/asm/chacha-x86_64.pl | 2234 |
3 files changed, 3367 insertions, 0 deletions
diff --git a/crypto/chacha/Makefile.in b/crypto/chacha/Makefile.in index 8987a85..6fb63c1 100644 --- a/crypto/chacha/Makefile.in +++ b/crypto/chacha/Makefile.in @@ -36,6 +36,11 @@ lib: $(LIBOBJ) $(RANLIB) $(LIB) || echo Never mind. @touch lib +chacha-x86.s: asm/chacha-x86.pl + $(PERL) asm/chacha-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@ +chacha-x86_64.s: asm/chacha-x86_64.pl + $(PERL) asm/chacha-x86_64.pl $(PERLASM_SCHEME) > $@ + chacha-%.S: asm/chacha-%.pl; $(PERL) $< $(PERLASM_SCHEME) $@ files: diff --git a/crypto/chacha/asm/chacha-x86.pl b/crypto/chacha/asm/chacha-x86.pl new file mode 100755 index 0000000..5d097ad --- /dev/null +++ b/crypto/chacha/asm/chacha-x86.pl @@ -0,0 +1,1128 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# January 2015 +# +# ChaCha20 for x86. +# +# Performance in cycles per byte out of large buffer. +# +# 1xIALU/gcc 4xSSSE3 +# Pentium 17.5/+80% +# PIII 14.2/+60% +# P4 18.6/+84% +# Core2 9.56/+89% 4.83 +# Westmere 9.50/+45% 3.35 +# Sandy Bridge 10.5/+47% 3.20 +# Haswell 8.15/+50% 2.83 +# Silvermont 17.4/+36% 8.35 +# Sledgehammer 10.2/+54% +# Bulldozer 13.4/+50% 4.38(*) +# +# (*) Bulldozer actually executes 4xXOP code path that delivers 3.55; + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],"chacha-x86.pl",$ARGV[$#ARGV] eq "386"); + +$xmm=$ymm=0; +for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); } + +$ymm=1 if ($xmm && + `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` + =~ /GNU assembler version ([2-9]\.[0-9]+)/ && + $1>=2.19); # first version supporting AVX + +$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" && + `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ && + $1>=2.03); # first version supporting AVX + +$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" && + `ml 2>&1` =~ /Version ([0-9]+)\./ && + $1>=10); # first version supporting AVX + +$ymm=1 if ($xmm && !$ymm && + `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/ && + $2>=3.0); # first version supporting AVX + +$a="eax"; +($b,$b_)=("ebx","ebp"); +($c,$c_)=("ecx","esi"); +($d,$d_)=("edx","edi"); + +sub QUARTERROUND { +my ($ai,$bi,$ci,$di,$i)=@_; +my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next +my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous + + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + + if ($i==0) { + my $j=4; + ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp)); + } elsif ($i==3) { + my $j=0; + ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn)); + } elsif ($i==4) { + my $j=4; + ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp)); + } elsif ($i==7) { + my $j=0; + ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn)); + } + + #&add ($a,$b); # see elsewhere + &xor ($d,$a); + &mov (&DWP(4*$cp,"esp"),$c_) if ($ai>0 && $ai<3); + &rol ($d,16); + &mov (&DWP(4*$bp,"esp"),$b_) if ($i!=0); + &add ($c,$d); + &mov ($c_,&DWP(4*$cn,"esp")) if ($ai>0 && $ai<3); + &xor ($b,$c); + &mov ($d_,&DWP(4*$dn,"esp")) if ($di!=$dn); + &rol ($b,12); + &mov ($b_,&DWP(4*$bn,"esp")) if ($i<7); + &mov ($b_,&DWP(128,"esp")) if ($i==7); # loop counter + &add ($a,$b); + &xor ($d,$a); + &mov (&DWP(4*$ai,"esp"),$a); + &rol ($d,8); + &mov ($a,&DWP(4*$an,"esp")); + &add ($c,$d); + &mov (&DWP(4*$di,"esp"),$d) if ($di!=$dn); + &mov ($d_,$d) if ($di==$dn); + &xor ($b,$c); + &add ($a,$b_) if ($i<7); # elsewhere + &rol ($b,7); + + ($b,$b_)=($b_,$b); + ($c,$c_)=($c_,$c); + ($d,$d_)=($d_,$d); +} + +&static_label("ssse3_shortcut"); +&static_label("xop_shortcut"); +&static_label("ssse3_data"); +&static_label("pic_point"); + +&function_begin("ChaCha20_ctr32"); +if ($xmm) { + &call (&label("pic_point")); +&set_label("pic_point"); + &blindpop("eax"); + &picmeup("ebp","OPENSSL_ia32cap_P","eax",&label("pic_point")); + &test (&DWP(0,"ebp"),1<<24); # test FXSR bit + &jz (&label("x86")); + &test (&DWP(4,"ebp"),1<<9); # test SSSE3 bit + &jz (&label("x86")); + &jmp (&label("ssse3_shortcut")); +&set_label("x86"); +} + &mov ("esi",&wparam(3)); # key + &mov ("edi",&wparam(4)); # counter and nonce + + &stack_push(33); + + &mov ("eax",&DWP(4*0,"esi")); # copy key + &mov ("ebx",&DWP(4*1,"esi")); + &mov ("ecx",&DWP(4*2,"esi")); + &mov ("edx",&DWP(4*3,"esi")); + &mov (&DWP(64+4*4,"esp"),"eax"); + &mov (&DWP(64+4*5,"esp"),"ebx"); + &mov (&DWP(64+4*6,"esp"),"ecx"); + &mov (&DWP(64+4*7,"esp"),"edx"); + &mov ("eax",&DWP(4*4,"esi")); + &mov ("ebx",&DWP(4*5,"esi")); + &mov ("ecx",&DWP(4*6,"esi")); + &mov ("edx",&DWP(4*7,"esi")); + &mov (&DWP(64+4*8,"esp"),"eax"); + &mov (&DWP(64+4*9,"esp"),"ebx"); + &mov (&DWP(64+4*10,"esp"),"ecx"); + &mov (&DWP(64+4*11,"esp"),"edx"); + &mov ("eax",&DWP(4*0,"edi")); # copy counter and nonce + &mov ("ebx",&DWP(4*1,"edi")); + &mov ("ecx",&DWP(4*2,"edi")); + &mov ("edx",&DWP(4*3,"edi")); + &sub ("eax",1); + &mov (&DWP(64+4*12,"esp"),"eax"); + &mov (&DWP(64+4*13,"esp"),"ebx"); + &mov (&DWP(64+4*14,"esp"),"ecx"); + &mov (&DWP(64+4*15,"esp"),"edx"); + &jmp (&label("entry")); + +&set_label("outer_loop",16); + &mov (&wparam(1),$b); # save input + &mov (&wparam(0),$a); # save output + &mov (&wparam(2),$c); # save len +&set_label("entry"); + &mov ($a,0x61707865); + &mov (&DWP(4*1,"esp"),0x3320646e); + &mov (&DWP(4*2,"esp"),0x79622d32); + &mov (&DWP(4*3,"esp"),0x6b206574); + + &mov ($b, &DWP(64+4*5,"esp")); # copy key material + &mov ($b_,&DWP(64+4*6,"esp")); + &mov ($c, &DWP(64+4*10,"esp")); + &mov ($c_,&DWP(64+4*11,"esp")); + &mov ($d, &DWP(64+4*13,"esp")); + &mov ($d_,&DWP(64+4*14,"esp")); + &mov (&DWP(4*5,"esp"),$b); + &mov (&DWP(4*6,"esp"),$b_); + &mov (&DWP(4*10,"esp"),$c); + &mov (&DWP(4*11,"esp"),$c_); + &mov (&DWP(4*13,"esp"),$d); + &mov (&DWP(4*14,"esp"),$d_); + + &mov ($b, &DWP(64+4*7,"esp")); + &mov ($d_,&DWP(64+4*15,"esp")); + &mov ($d, &DWP(64+4*12,"esp")); + &mov ($b_,&DWP(64+4*4,"esp")); + &mov ($c, &DWP(64+4*8,"esp")); + &mov ($c_,&DWP(64+4*9,"esp")); + &add ($d,1); # counter value + &mov (&DWP(4*7,"esp"),$b); + &mov (&DWP(4*15,"esp"),$d_); + &mov (&DWP(64+4*12,"esp"),$d); # save counter value + + &mov ($b,10); # loop counter + &jmp (&label("loop")); + +&set_label("loop",16); + &add ($a,$b_); # elsewhere + &mov (&DWP(128,"esp"),$b); # save loop counter + &mov ($b,$b_); + &QUARTERROUND(0, 4, 8, 12, 0); + &QUARTERROUND(1, 5, 9, 13, 1); + &QUARTERROUND(2, 6,10, 14, 2); + &QUARTERROUND(3, 7,11, 15, 3); + &QUARTERROUND(0, 5,10, 15, 4); + &QUARTERROUND(1, 6,11, 12, 5); + &QUARTERROUND(2, 7, 8, 13, 6); + &QUARTERROUND(3, 4, 9, 14, 7); + &dec ($b); + &jnz (&label("loop")); + + &mov ($b,&wparam(3)); # load len + + &add ($a,0x61707865); # accumulate key material + &add ($b_,&DWP(64+4*4,"esp")); + &add ($c, &DWP(64+4*8,"esp")); + &add ($c_,&DWP(64+4*9,"esp")); + + &cmp ($b,64); + &jb (&label("tail")); + + &mov ($b,&wparam(1)); # load input pointer + &add ($d, &DWP(64+4*12,"esp")); + &add ($d_,&DWP(64+4*14,"esp")); + + &xor ($a, &DWP(4*0,$b)); # xor with input + &xor ($b_,&DWP(4*4,$b)); + &mov (&DWP(4*0,"esp"),$a); + &mov ($a,&wparam(0)); # load output pointer + &xor ($c, &DWP(4*8,$b)); + &xor ($c_,&DWP(4*9,$b)); + &xor ($d, &DWP(4*12,$b)); + &xor ($d_,&DWP(4*14,$b)); + &mov (&DWP(4*4,$a),$b_); # write output + &mov (&DWP(4*8,$a),$c); + &mov (&DWP(4*9,$a),$c_); + &mov (&DWP(4*12,$a),$d); + &mov (&DWP(4*14,$a),$d_); + + &mov ($b_,&DWP(4*1,"esp")); + &mov ($c, &DWP(4*2,"esp")); + &mov ($c_,&DWP(4*3,"esp")); + &mov ($d, &DWP(4*5,"esp")); + &mov ($d_,&DWP(4*6,"esp")); + &add ($b_,0x3320646e); # accumulate key material + &add ($c, 0x79622d32); + &add ($c_,0x6b206574); + &add ($d, &DWP(64+4*5,"esp")); + &add ($d_,&DWP(64+4*6,"esp")); + &xor ($b_,&DWP(4*1,$b)); + &xor ($c, &DWP(4*2,$b)); + &xor ($c_,&DWP(4*3,$b)); + &xor ($d, &DWP(4*5,$b)); + &xor ($d_,&DWP(4*6,$b)); + &mov (&DWP(4*1,$a),$b_); + &mov (&DWP(4*2,$a),$c); + &mov (&DWP(4*3,$a),$c_); + &mov (&DWP(4*5,$a),$d); + &mov (&DWP(4*6,$a),$d_); + + &mov ($b_,&DWP(4*7,"esp")); + &mov ($c, &DWP(4*10,"esp")); + &mov ($c_,&DWP(4*11,"esp")); + &mov ($d, &DWP(4*13,"esp")); + &mov ($d_,&DWP(4*15,"esp")); + &add ($b_,&DWP(64+4*7,"esp")); + &add ($c, &DWP(64+4*10,"esp")); + &add ($c_,&DWP(64+4*11,"esp")); + &add ($d, &DWP(64+4*13,"esp")); + &add ($d_,&DWP(64+4*15,"esp")); + &xor ($b_,&DWP(4*7,$b)); + &xor ($c, &DWP(4*10,$b)); + &xor ($c_,&DWP(4*11,$b)); + &xor ($d, &DWP(4*13,$b)); + &xor ($d_,&DWP(4*15,$b)); + &lea ($b,&DWP(4*16,$b)); + &mov (&DWP(4*7,$a),$b_); + &mov ($b_,&DWP(4*0,"esp")); + &mov (&DWP(4*10,$a),$c); + &mov ($c,&wparam(2)); # len + &mov (&DWP(4*11,$a),$c_); + &mov (&DWP(4*13,$a),$d); + &mov (&DWP(4*15,$a),$d_); + &mov (&DWP(4*0,$a),$b_); + &lea ($a,&DWP(4*16,$a)); + &sub ($c,64); + &jnz (&label("outer_loop")); + + &jmp (&label("done")); + +&set_label("tail"); + &add ($d, &DWP(64+4*12,"esp")); + &add ($d_,&DWP(64+4*14,"esp")); + &mov (&DWP(4*0,"esp"),$a); + &mov (&DWP(4*4,"esp"),$b_); + &mov (&DWP(4*8,"esp"),$c); + &mov (&DWP(4*9,"esp"),$c_); + &mov (&DWP(4*12,"esp"),$d); + &mov (&DWP(4*14,"esp"),$d_); + + &mov ($b_,&DWP(4*1,"esp")); + &mov ($c, &DWP(4*2,"esp")); + &mov ($c_,&DWP(4*3,"esp")); + &mov ($d, &DWP(4*5,"esp")); + &mov ($d_,&DWP(4*6,"esp")); + &add ($b_,0x3320646e); # accumulate key material + &add ($c, 0x79622d32); + &add ($c_,0x6b206574); + &add ($d, &DWP(64+4*5,"esp")); + &add ($d_,&DWP(64+4*6,"esp")); + &mov (&DWP(4*1,"esp"),$b_); + &mov (&DWP(4*2,"esp"),$c); + &mov (&DWP(4*3,"esp"),$c_); + &mov (&DWP(4*5,"esp"),$d); + &mov (&DWP(4*6,"esp"),$d_); + + &mov ($b_,&DWP(4*7,"esp")); + &mov ($c, &DWP(4*10,"esp")); + &mov ($c_,&DWP(4*11,"esp")); + &mov ($d, &DWP(4*13,"esp")); + &mov ($d_,&DWP(4*15,"esp")); + &add ($b_,&DWP(64+4*7,"esp")); + &add ($c, &DWP(64+4*10,"esp")); + &add ($c_,&DWP(64+4*11,"esp")); + &add ($d, &DWP(64+4*13,"esp")); + &add ($d_,&DWP(64+4*15,"esp")); + &mov (&DWP(4*7,"esp"),$b_); + &mov ($b_,&wparam(1)); # load input + &mov (&DWP(4*10,"esp"),$c); + &mov ($c,&wparam(0)); # load output + &mov (&DWP(4*11,"esp"),$c_); + &xor ($c_,$c_); + &mov (&DWP(4*13,"esp"),$d); + &mov (&DWP(4*15,"esp"),$d_); + + &xor ("eax","eax"); + &xor ("edx","edx"); +&set_label("tail_loop"); + &movb ("al",&DWP(0,$c_,$b_)); + &movb ("dl",&DWP(0,"esp",$c_)); + &lea ($c_,&DWP(1,$c_)); + &xor ("al","dl"); + &mov (&DWP(-1,$c,$c_),"al"); + &dec ($b); + &jnz (&label("tail_loop")); + +&set_label("done"); + &stack_pop(33); +&function_end("ChaCha20_ctr32"); + +if ($xmm) { +my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7)); +my ($out,$inp,$len)=("edi","esi","ecx"); + +sub QUARTERROUND_SSSE3 { +my ($ai,$bi,$ci,$di,$i)=@_; +my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next +my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous + + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + + if ($i==0) { + my $j=4; + ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp)); + } elsif ($i==3) { + my $j=0; + ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn)); + } elsif ($i==4) { + my $j=4; + ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp)); + } elsif ($i==7) { + my $j=0; + ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn)); + } + + #&paddd ($xa,$xb); # see elsewhere + #&pxor ($xd,$xa); # see elsewhere + &movdqa(&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3); + &pshufb ($xd,&QWP(0,"eax")); # rot16 + &movdqa(&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0); + &paddd ($xc,$xd); + &movdqa($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3); + &pxor ($xb,$xc); + &movdqa($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7); + &movdqa ($xa_,$xb); # borrow as temporary + &pslld ($xb,12); + &psrld ($xa_,20); + &por ($xb,$xa_); + &movdqa($xa_,&QWP(16*$an-128,"ebx")); + &paddd ($xa,$xb); + &movdqa($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn); + &pxor ($xd,$xa); + &movdqa (&QWP(16*$ai-128,"ebx"),$xa); + &pshufb ($xd,&QWP(16,"eax")); # rot8 + &paddd ($xc,$xd); + &movdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn); + &movdqa ($xd_,$xd) if ($di==$dn); + &pxor ($xb,$xc); + &paddd ($xa_,$xb_) if ($i<7); # elsewhere + &movdqa ($xa,$xb); # borrow as temporary + &pslld ($xb,7); + &psrld ($xa,25); + &pxor ($xd_,$xa_) if ($i<7); # elsewhere + &por ($xb,$xa); + + ($xa,$xa_)=($xa_,$xa); + ($xb,$xb_)=($xb_,$xb); + ($xc,$xc_)=($xc_,$xc); + ($xd,$xd_)=($xd_,$xd); +} + +&function_begin("ChaCha20_ssse3"); +&set_label("ssse3_shortcut"); + &test (&DWP(4,"ebp"),1<<11); # test XOP bit + &jnz (&label("xop_shortcut")); + + &mov ($out,&wparam(0)); + &mov ($inp,&wparam(1)); + &mov ($len,&wparam(2)); + &mov ("edx",&wparam(3)); # key + &mov ("ebx",&wparam(4)); # counter and nonce + + &mov ("ebp","esp"); + &stack_push (131); + &and ("esp",-64); + &mov (&DWP(512,"esp"),"ebp"); + + &lea ("eax",&DWP(&label("ssse3_data")."-". + &label("pic_point"),"eax")); + &movdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce + + &cmp ($len,64*4); + &jb (&label("1x")); + + &mov (&DWP(512+4,"esp"),"edx"); # offload pointers + &mov (&DWP(512+8,"esp"),"ebx"); + &sub ($len,64*4); # bias len + &lea ("ebp",&DWP(256+128,"esp")); # size optimization + + &movdqu ("xmm7",&DWP(0,"edx")); # key + &pshufd ("xmm0","xmm3",0x00); + &pshufd ("xmm1","xmm3",0x55); + &pshufd ("xmm2","xmm3",0xaa); + &pshufd ("xmm3","xmm3",0xff); + &paddd ("xmm0",&QWP(16*3,"eax")); # fix counters + &pshufd ("xmm4","xmm7",0x00); + &pshufd ("xmm5","xmm7",0x55); + &psubd ("xmm0",&QWP(16*4,"eax")); + &pshufd ("xmm6","xmm7",0xaa); + &pshufd ("xmm7","xmm7",0xff); + &movdqa (&QWP(16*12-128,"ebp"),"xmm0"); + &movdqa (&QWP(16*13-128,"ebp"),"xmm1"); + &movdqa (&QWP(16*14-128,"ebp"),"xmm2"); + &movdqa (&QWP(16*15-128,"ebp"),"xmm3"); + &movdqu ("xmm3",&DWP(16,"edx")); # key + &movdqa (&QWP(16*4-128,"ebp"),"xmm4"); + &movdqa (&QWP(16*5-128,"ebp"),"xmm5"); + &movdqa (&QWP(16*6-128,"ebp"),"xmm6"); + &movdqa (&QWP(16*7-128,"ebp"),"xmm7"); + &movdqa ("xmm7",&DWP(16*2,"eax")); # sigma + &lea ("ebx",&DWP(128,"esp")); # size optimization + + &pshufd ("xmm0","xmm3",0x00); + &pshufd ("xmm1","xmm3",0x55); + &pshufd ("xmm2","xmm3",0xaa); + &pshufd ("xmm3","xmm3",0xff); + &pshufd ("xmm4","xmm7",0x00); + &pshufd ("xmm5","xmm7",0x55); + &pshufd ("xmm6","xmm7",0xaa); + &pshufd ("xmm7","xmm7",0xff); + &movdqa (&QWP(16*8-128,"ebp"),"xmm0"); + &movdqa (&QWP(16*9-128,"ebp"),"xmm1"); + &movdqa (&QWP(16*10-128,"ebp"),"xmm2"); + &movdqa (&QWP(16*11-128,"ebp"),"xmm3"); + &movdqa (&QWP(16*0-128,"ebp"),"xmm4"); + &movdqa (&QWP(16*1-128,"ebp"),"xmm5"); + &movdqa (&QWP(16*2-128,"ebp"),"xmm6"); + &movdqa (&QWP(16*3-128,"ebp"),"xmm7"); + + &lea ($inp,&DWP(128,$inp)); # size optimization + &lea ($out,&DWP(128,$out)); # size optimization + &jmp (&label("outer_loop")); + +&set_label("outer_loop",16); + #&movdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material + &movdqa ("xmm1",&QWP(16*1-128,"ebp")); + &movdqa ("xmm2",&QWP(16*2-128,"ebp")); + &movdqa ("xmm3",&QWP(16*3-128,"ebp")); + #&movdqa ("xmm4",&QWP(16*4-128,"ebp")); + &movdqa ("xmm5",&QWP(16*5-128,"ebp")); + &movdqa ("xmm6",&QWP(16*6-128,"ebp")); + &movdqa ("xmm7",&QWP(16*7-128,"ebp")); + #&movdqa (&QWP(16*0-128,"ebx"),"xmm0"); + &movdqa (&QWP(16*1-128,"ebx"),"xmm1"); + &movdqa (&QWP(16*2-128,"ebx"),"xmm2"); + &movdqa (&QWP(16*3-128,"ebx"),"xmm3"); + #&movdqa (&QWP(16*4-128,"ebx"),"xmm4"); + &movdqa (&QWP(16*5-128,"ebx"),"xmm5"); + &movdqa (&QWP(16*6-128,"ebx"),"xmm6"); + &movdqa (&QWP(16*7-128,"ebx"),"xmm7"); + #&movdqa ("xmm0",&QWP(16*8-128,"ebp")); + #&movdqa ("xmm1",&QWP(16*9-128,"ebp")); + &movdqa ("xmm2",&QWP(16*10-128,"ebp")); + &movdqa ("xmm3",&QWP(16*11-128,"ebp")); + &movdqa ("xmm4",&QWP(16*12-128,"ebp")); + &movdqa ("xmm5",&QWP(16*13-128,"ebp")); + &movdqa ("xmm6",&QWP(16*14-128,"ebp")); + &movdqa ("xmm7",&QWP(16*15-128,"ebp")); + &paddd ("xmm4",&QWP(16*4,"eax")); # counter value + #&movdqa (&QWP(16*8-128,"ebx"),"xmm0"); + #&movdqa (&QWP(16*9-128,"ebx"),"xmm1"); + &movdqa (&QWP(16*10-128,"ebx"),"xmm2"); + &movdqa (&QWP(16*11-128,"ebx"),"xmm3"); + &movdqa (&QWP(16*12-128,"ebx"),"xmm4"); + &movdqa (&QWP(16*13-128,"ebx"),"xmm5"); + &movdqa (&QWP(16*14-128,"ebx"),"xmm6"); + &movdqa (&QWP(16*15-128,"ebx"),"xmm7"); + &movdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value + + &movdqa ($xa, &QWP(16*0-128,"ebp")); + &movdqa ($xd, "xmm4"); + &movdqa ($xb_,&QWP(16*4-128,"ebp")); + &movdqa ($xc, &QWP(16*8-128,"ebp")); + &movdqa ($xc_,&QWP(16*9-128,"ebp")); + + &mov ("edx",10); # loop counter + &nop (); + +&set_label("loop",16); + &paddd ($xa,$xb_); # elsewhere + &movdqa ($xb,$xb_); + &pxor ($xd,$xa); # elsewhere + &QUARTERROUND_SSSE3(0, 4, 8, 12, 0); + &QUARTERROUND_SSSE3(1, 5, 9, 13, 1); + &QUARTERROUND_SSSE3(2, 6,10, 14, 2); + &QUARTERROUND_SSSE3(3, 7,11, 15, 3); + &QUARTERROUND_SSSE3(0, 5,10, 15, 4); + &QUARTERROUND_SSSE3(1, 6,11, 12, 5); + &QUARTERROUND_SSSE3(2, 7, 8, 13, 6); + &QUARTERROUND_SSSE3(3, 4, 9, 14, 7); + &dec ("edx"); + &jnz (&label("loop")); + + &movdqa (&QWP(16*4-128,"ebx"),$xb_); + &movdqa (&QWP(16*8-128,"ebx"),$xc); + &movdqa (&QWP(16*9-128,"ebx"),$xc_); + &movdqa (&QWP(16*12-128,"ebx"),$xd); + &movdqa (&QWP(16*14-128,"ebx"),$xd_); + + my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7)); + + #&movdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there + &movdqa ($xa1,&QWP(16*1-128,"ebx")); + &movdqa ($xa2,&QWP(16*2-128,"ebx")); + &movdqa ($xa3,&QWP(16*3-128,"ebx")); + + for($i=0;$i<256;$i+=64) { + &paddd ($xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material + &paddd ($xa1,&QWP($i+16*1-128,"ebp")); + &paddd ($xa2,&QWP($i+16*2-128,"ebp")); + &paddd ($xa3,&QWP($i+16*3-128,"ebp")); + + &movdqa ($xt2,$xa0); # "de-interlace" data + &punpckldq ($xa0,$xa1); + &movdqa ($xt3,$xa2); + &punpckldq ($xa2,$xa3); + &punpckhdq ($xt2,$xa1); + &punpckhdq ($xt3,$xa3); + &movdqa ($xa1,$xa0); + &punpcklqdq ($xa0,$xa2); # "a0" + &movdqa ($xa3,$xt2); + &punpcklqdq ($xt2,$xt3); # "a2" + &punpckhqdq ($xa1,$xa2); # "a1" + &punpckhqdq ($xa3,$xt3); # "a3" + + #($xa2,$xt2)=($xt2,$xa2); + + &movdqu ($xt0,&QWP(64*0-128,$inp)); # load input + &movdqu ($xt1,&QWP(64*1-128,$inp)); + &movdqu ($xa2,&QWP(64*2-128,$inp)); + &movdqu ($xt3,&QWP(64*3-128,$inp)); + &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp)); + &pxor ($xt0,$xa0); + &movdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192); + &pxor ($xt1,$xa1); + &movdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192); + &pxor ($xt2,$xa2); + &movdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192); + &pxor ($xt3,$xa3); + &movdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192); + &movdqu (&QWP(64*0-128,$out),$xt0); # store output + &movdqu (&QWP(64*1-128,$out),$xt1); + &movdqu (&QWP(64*2-128,$out),$xt2); + &movdqu (&QWP(64*3-128,$out),$xt3); + &lea ($out,&QWP($i<192?16:(64*4-16*3),$out)); + } + &sub ($len,64*4); + &jnc (&label("outer_loop")); + + &add ($len,64*4); + &jz (&label("done")); + + &mov ("ebx",&DWP(512+8,"esp")); # restore pointers + &lea ($inp,&DWP(-128,$inp)); + &mov ("edx",&DWP(512+4,"esp")); + &lea ($out,&DWP(-128,$out)); + + &movd ("xmm2",&DWP(16*12-128,"ebp")); # counter value + &movdqu ("xmm3",&QWP(0,"ebx")); + &paddd ("xmm2",&QWP(16*6,"eax")); # +four + &pand ("xmm3",&QWP(16*7,"eax")); + &por ("xmm3","xmm2"); # counter value +{ +my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7)); + +sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round + &paddd ($a,$b); + &pxor ($d,$a); + &pshufb ($d,$rot16); + + &paddd ($c,$d); + &pxor ($b,$c); + &movdqa ($t,$b); + &psrld ($b,20); + &pslld ($t,12); + &por ($b,$t); + + &paddd ($a,$b); + &pxor ($d,$a); + &pshufb ($d,$rot24); + + &paddd ($c,$d); + &pxor ($b,$c); + &movdqa ($t,$b); + &psrld ($b,25); + &pslld ($t,7); + &por ($b,$t); +} + +&set_label("1x"); + &movdqa ($a,&QWP(16*2,"eax")); # sigma + &movdqu ($b,&QWP(0,"edx")); + &movdqu ($c,&QWP(16,"edx")); + #&movdqu ($d,&QWP(0,"ebx")); # already loaded + &movdqa ($rot16,&QWP(0,"eax")); + &movdqa ($rot24,&QWP(16,"eax")); + &mov (&DWP(16*3,"esp"),"ebp"); + + &movdqa (&QWP(16*0,"esp"),$a); + &movdqa (&QWP(16*1,"esp"),$b); + &movdqa (&QWP(16*2,"esp"),$c); + &movdqa (&QWP(16*3,"esp"),$d); + &mov ("edx",10); + &jmp (&label("loop1x")); + +&set_label("outer1x",16); + &movdqa ($d,&QWP(16*5,"eax")); # one + &movdqa ($a,&QWP(16*0,"esp")); + &movdqa ($b,&QWP(16*1,"esp")); + &movdqa ($c,&QWP(16*2,"esp")); + &paddd ($d,&QWP(16*3,"esp")); + &mov ("edx",10); + &movdqa (&QWP(16*3,"esp"),$d); + &jmp (&label("loop1x")); + +&set_label("loop1x",16); + &SSSE3ROUND(); + &pshufd ($c,$c,0b01001110); + &pshufd ($b,$b,0b00111001); + &pshufd ($d,$d,0b10010011); + &nop (); + + &SSSE3ROUND(); + &pshufd ($c,$c,0b01001110); + &pshufd ($b,$b,0b10010011); + &pshufd ($d,$d,0b00111001); + + &dec ("edx"); + &jnz (&label("loop1x")); + + &paddd ($a,&QWP(16*0,"esp")); + &paddd ($b,&QWP(16*1,"esp")); + &paddd ($c,&QWP(16*2,"esp")); + &paddd ($d,&QWP(16*3,"esp")); + + &cmp ($len,64); + &jb (&label("tail")); + + &movdqu ($t,&QWP(16*0,$inp)); + &movdqu ($t1,&QWP(16*1,$inp)); + &pxor ($a,$t); # xor with input + &movdqu ($t,&QWP(16*2,$inp)); + &pxor ($b,$t1); + &movdqu ($t1,&QWP(16*3,$inp)); + &pxor ($c,$t); + &pxor ($d,$t1); + &lea ($inp,&DWP(16*4,$inp)); # inp+=64 + + &movdqu (&QWP(16*0,$out),$a); # write output + &movdqu (&QWP(16*1,$out),$b); + &movdqu (&QWP(16*2,$out),$c); + &movdqu (&QWP(16*3,$out),$d); + &lea ($out,&DWP(16*4,$out)); # inp+=64 + + &sub ($len,64); + &jnz (&label("outer1x")); + + &jmp (&label("done")); + +&set_label("tail"); + &movdqa (&QWP(16*0,"esp"),$a); + &movdqa (&QWP(16*1,"esp"),$b); + &movdqa (&QWP(16*2,"esp"),$c); + &movdqa (&QWP(16*3,"esp"),$d); + + &xor ("eax","eax"); + &xor ("edx","edx"); + &xor ("ebp","ebp"); + +&set_label("tail_loop"); + &movb ("al",&BP(0,"esp","ebp")); + &movb ("dl",&BP(0,$inp,"ebp")); + &lea ("ebp",&DWP(1,"ebp")); + &xor ("al","dl"); + &movb (&BP(-1,$out,"ebp"),"al"); + &dec ($len); + &jnz (&label("tail_loop")); +} +&set_label("done"); + &mov ("esp",&DWP(512,"esp")); +&function_end("ChaCha20_ssse3"); + +&align (64); +&set_label("ssse3_data"); +&data_byte(0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd); +&data_byte(0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe); +&data_word(0x61707865,0x3320646e,0x79622d32,0x6b206574); +&data_word(0,1,2,3); +&data_word(4,4,4,4); +&data_word(1,0,0,0); +&data_word(4,0,0,0); +&data_word(0,-1,-1,-1); +&align (64); +} +&asciz ("ChaCha20 for x86, CRYPTOGAMS by <appro\@openssl.org>"); + +if ($xmm) { +my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7)); +my ($out,$inp,$len)=("edi","esi","ecx"); + +sub QUARTERROUND_XOP { +my ($ai,$bi,$ci,$di,$i)=@_; +my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next +my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous + + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + + if ($i==0) { + my $j=4; + ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp)); + } elsif ($i==3) { + my $j=0; + ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn)); + } elsif ($i==4) { + my $j=4; + ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp)); + } elsif ($i==7) { + my $j=0; + ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn)); + } + + #&vpaddd ($xa,$xa,$xb); # see elsewhere + #&vpxor ($xd,$xd,$xa); # see elsewhere + &vmovdqa (&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3); + &vprotd ($xd,$xd,16); + &vmovdqa (&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0); + &vpaddd ($xc,$xc,$xd); + &vmovdqa ($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3); + &vpxor ($xb,$i!=0?$xb:$xb_,$xc); + &vmovdqa ($xa_,&QWP(16*$an-128,"ebx")); + &vprotd ($xb,$xb,12); + &vmovdqa ($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7); + &vpaddd ($xa,$xa,$xb); + &vmovdqa ($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn); + &vpxor ($xd,$xd,$xa); + &vpaddd ($xa_,$xa_,$xb_) if ($i<7); # elsewhere + &vprotd ($xd,$xd,8); + &vmovdqa (&QWP(16*$ai-128,"ebx"),$xa); + &vpaddd ($xc,$xc,$xd); + &vmovdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn); + &vpxor ($xb,$xb,$xc); + &vpxor ($xd_,$di==$dn?$xd:$xd_,$xa_) if ($i<7); # elsewhere + &vprotd ($xb,$xb,7); + + ($xa,$xa_)=($xa_,$xa); + ($xb,$xb_)=($xb_,$xb); + ($xc,$xc_)=($xc_,$xc); + ($xd,$xd_)=($xd_,$xd); +} + +&function_begin("ChaCha20_xop"); +&set_label("xop_shortcut"); + &mov ($out,&wparam(0)); + &mov ($inp,&wparam(1)); + &mov ($len,&wparam(2)); + &mov ("edx",&wparam(3)); # key + &mov ("ebx",&wparam(4)); # counter and nonce + &vzeroupper (); + + &mov ("ebp","esp"); + &stack_push (131); + &and ("esp",-64); + &mov (&DWP(512,"esp"),"ebp"); + + &lea ("eax",&DWP(&label("ssse3_data")."-". + &label("pic_point"),"eax")); + &vmovdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce + + &cmp ($len,64*4); + &jb (&label("1x")); + + &mov (&DWP(512+4,"esp"),"edx"); # offload pointers + &mov (&DWP(512+8,"esp"),"ebx"); + &sub ($len,64*4); # bias len + &lea ("ebp",&DWP(256+128,"esp")); # size optimization + + &vmovdqu ("xmm7",&DWP(0,"edx")); # key + &vpshufd ("xmm0","xmm3",0x00); + &vpshufd ("xmm1","xmm3",0x55); + &vpshufd ("xmm2","xmm3",0xaa); + &vpshufd ("xmm3","xmm3",0xff); + &vpaddd ("xmm0","xmm0",&QWP(16*3,"eax")); # fix counters + &vpshufd ("xmm4","xmm7",0x00); + &vpshufd ("xmm5","xmm7",0x55); + &vpsubd ("xmm0","xmm0",&QWP(16*4,"eax")); + &vpshufd ("xmm6","xmm7",0xaa); + &vpshufd ("xmm7","xmm7",0xff); + &vmovdqa (&QWP(16*12-128,"ebp"),"xmm0"); + &vmovdqa (&QWP(16*13-128,"ebp"),"xmm1"); + &vmovdqa (&QWP(16*14-128,"ebp"),"xmm2"); + &vmovdqa (&QWP(16*15-128,"ebp"),"xmm3"); + &vmovdqu ("xmm3",&DWP(16,"edx")); # key + &vmovdqa (&QWP(16*4-128,"ebp"),"xmm4"); + &vmovdqa (&QWP(16*5-128,"ebp"),"xmm5"); + &vmovdqa (&QWP(16*6-128,"ebp"),"xmm6"); + &vmovdqa (&QWP(16*7-128,"ebp"),"xmm7"); + &vmovdqa ("xmm7",&DWP(16*2,"eax")); # sigma + &lea ("ebx",&DWP(128,"esp")); # size optimization + + &vpshufd ("xmm0","xmm3",0x00); + &vpshufd ("xmm1","xmm3",0x55); + &vpshufd ("xmm2","xmm3",0xaa); + &vpshufd ("xmm3","xmm3",0xff); + &vpshufd ("xmm4","xmm7",0x00); + &vpshufd ("xmm5","xmm7",0x55); + &vpshufd ("xmm6","xmm7",0xaa); + &vpshufd ("xmm7","xmm7",0xff); + &vmovdqa (&QWP(16*8-128,"ebp"),"xmm0"); + &vmovdqa (&QWP(16*9-128,"ebp"),"xmm1"); + &vmovdqa (&QWP(16*10-128,"ebp"),"xmm2"); + &vmovdqa (&QWP(16*11-128,"ebp"),"xmm3"); + &vmovdqa (&QWP(16*0-128,"ebp"),"xmm4"); + &vmovdqa (&QWP(16*1-128,"ebp"),"xmm5"); + &vmovdqa (&QWP(16*2-128,"ebp"),"xmm6"); + &vmovdqa (&QWP(16*3-128,"ebp"),"xmm7"); + + &lea ($inp,&DWP(128,$inp)); # size optimization + &lea ($out,&DWP(128,$out)); # size optimization + &jmp (&label("outer_loop")); + +&set_label("outer_loop",32); + #&vmovdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material + &vmovdqa ("xmm1",&QWP(16*1-128,"ebp")); + &vmovdqa ("xmm2",&QWP(16*2-128,"ebp")); + &vmovdqa ("xmm3",&QWP(16*3-128,"ebp")); + #&vmovdqa ("xmm4",&QWP(16*4-128,"ebp")); + &vmovdqa ("xmm5",&QWP(16*5-128,"ebp")); + &vmovdqa ("xmm6",&QWP(16*6-128,"ebp")); + &vmovdqa ("xmm7",&QWP(16*7-128,"ebp")); + #&vmovdqa (&QWP(16*0-128,"ebx"),"xmm0"); + &vmovdqa (&QWP(16*1-128,"ebx"),"xmm1"); + &vmovdqa (&QWP(16*2-128,"ebx"),"xmm2"); + &vmovdqa (&QWP(16*3-128,"ebx"),"xmm3"); + #&vmovdqa (&QWP(16*4-128,"ebx"),"xmm4"); + &vmovdqa (&QWP(16*5-128,"ebx"),"xmm5"); + &vmovdqa (&QWP(16*6-128,"ebx"),"xmm6"); + &vmovdqa (&QWP(16*7-128,"ebx"),"xmm7"); + #&vmovdqa ("xmm0",&QWP(16*8-128,"ebp")); + #&vmovdqa ("xmm1",&QWP(16*9-128,"ebp")); + &vmovdqa ("xmm2",&QWP(16*10-128,"ebp")); + &vmovdqa ("xmm3",&QWP(16*11-128,"ebp")); + &vmovdqa ("xmm4",&QWP(16*12-128,"ebp")); + &vmovdqa ("xmm5",&QWP(16*13-128,"ebp")); + &vmovdqa ("xmm6",&QWP(16*14-128,"ebp")); + &vmovdqa ("xmm7",&QWP(16*15-128,"ebp")); + &vpaddd ("xmm4","xmm4",&QWP(16*4,"eax")); # counter value + #&vmovdqa (&QWP(16*8-128,"ebx"),"xmm0"); + #&vmovdqa (&QWP(16*9-128,"ebx"),"xmm1"); + &vmovdqa (&QWP(16*10-128,"ebx"),"xmm2"); + &vmovdqa (&QWP(16*11-128,"ebx"),"xmm3"); + &vmovdqa (&QWP(16*12-128,"ebx"),"xmm4"); + &vmovdqa (&QWP(16*13-128,"ebx"),"xmm5"); + &vmovdqa (&QWP(16*14-128,"ebx"),"xmm6"); + &vmovdqa (&QWP(16*15-128,"ebx"),"xmm7"); + &vmovdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value + + &vmovdqa ($xa, &QWP(16*0-128,"ebp")); + &vmovdqa ($xd, "xmm4"); + &vmovdqa ($xb_,&QWP(16*4-128,"ebp")); + &vmovdqa ($xc, &QWP(16*8-128,"ebp")); + &vmovdqa ($xc_,&QWP(16*9-128,"ebp")); + + &mov ("edx",10); # loop counter + &nop (); + +&set_label("loop",32); + &vpaddd ($xa,$xa,$xb_); # elsewhere + &vpxor ($xd,$xd,$xa); # elsewhere + &QUARTERROUND_XOP(0, 4, 8, 12, 0); + &QUARTERROUND_XOP(1, 5, 9, 13, 1); + &QUARTERROUND_XOP(2, 6,10, 14, 2); + &QUARTERROUND_XOP(3, 7,11, 15, 3); + &QUARTERROUND_XOP(0, 5,10, 15, 4); + &QUARTERROUND_XOP(1, 6,11, 12, 5); + &QUARTERROUND_XOP(2, 7, 8, 13, 6); + &QUARTERROUND_XOP(3, 4, 9, 14, 7); + &dec ("edx"); + &jnz (&label("loop")); + + &vmovdqa (&QWP(16*4-128,"ebx"),$xb_); + &vmovdqa (&QWP(16*8-128,"ebx"),$xc); + &vmovdqa (&QWP(16*9-128,"ebx"),$xc_); + &vmovdqa (&QWP(16*12-128,"ebx"),$xd); + &vmovdqa (&QWP(16*14-128,"ebx"),$xd_); + + my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7)); + + #&vmovdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there + &vmovdqa ($xa1,&QWP(16*1-128,"ebx")); + &vmovdqa ($xa2,&QWP(16*2-128,"ebx")); + &vmovdqa ($xa3,&QWP(16*3-128,"ebx")); + + for($i=0;$i<256;$i+=64) { + &vpaddd ($xa0,$xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material + &vpaddd ($xa1,$xa1,&QWP($i+16*1-128,"ebp")); + &vpaddd ($xa2,$xa2,&QWP($i+16*2-128,"ebp")); + &vpaddd ($xa3,$xa3,&QWP($i+16*3-128,"ebp")); + + &vpunpckldq ($xt2,$xa0,$xa1); # "de-interlace" data + &vpunpckldq ($xt3,$xa2,$xa3); + &vpunpckhdq ($xa0,$xa0,$xa1); + &vpunpckhdq ($xa2,$xa2,$xa3); + &vpunpcklqdq ($xa1,$xt2,$xt3); # "a0" + &vpunpckhqdq ($xt2,$xt2,$xt3); # "a1" + &vpunpcklqdq ($xt3,$xa0,$xa2); # "a2" + &vpunpckhqdq ($xa3,$xa0,$xa2); # "a3" + + &vpxor ($xt0,$xa1,&QWP(64*0-128,$inp)); + &vpxor ($xt1,$xt2,&QWP(64*1-128,$inp)); + &vpxor ($xt2,$xt3,&QWP(64*2-128,$inp)); + &vpxor ($xt3,$xa3,&QWP(64*3-128,$inp)); + &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp)); + &vmovdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192); + &vmovdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192); + &vmovdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192); + &vmovdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192); + &vmovdqu (&QWP(64*0-128,$out),$xt0); # store output + &vmovdqu (&QWP(64*1-128,$out),$xt1); + &vmovdqu (&QWP(64*2-128,$out),$xt2); + &vmovdqu (&QWP(64*3-128,$out),$xt3); + &lea ($out,&QWP($i<192?16:(64*4-16*3),$out)); + } + &sub ($len,64*4); + &jnc (&label("outer_loop")); + + &add ($len,64*4); + &jz (&label("done")); + + &mov ("ebx",&DWP(512+8,"esp")); # restore pointers + &lea ($inp,&DWP(-128,$inp)); + &mov ("edx",&DWP(512+4,"esp")); + &lea ($out,&DWP(-128,$out)); + + &vmovd ("xmm2",&DWP(16*12-128,"ebp")); # counter value + &vmovdqu ("xmm3",&QWP(0,"ebx")); + &vpaddd ("xmm2","xmm2",&QWP(16*6,"eax"));# +four + &vpand ("xmm3","xmm3",&QWP(16*7,"eax")); + &vpor ("xmm3","xmm3","xmm2"); # counter value +{ +my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7)); + +sub XOPROUND { + &vpaddd ($a,$a,$b); + &vpxor ($d,$d,$a); + &vprotd ($d,$d,16); + + &vpaddd ($c,$c,$d); + &vpxor ($b,$b,$c); + &vprotd ($b,$b,12); + + &vpaddd ($a,$a,$b); + &vpxor ($d,$d,$a); + &vprotd ($d,$d,8); + + &vpaddd ($c,$c,$d); + &vpxor ($b,$b,$c); + &vprotd ($b,$b,7); +} + +&set_label("1x"); + &vmovdqa ($a,&QWP(16*2,"eax")); # sigma + &vmovdqu ($b,&QWP(0,"edx")); + &vmovdqu ($c,&QWP(16,"edx")); + #&vmovdqu ($d,&QWP(0,"ebx")); # already loaded + &vmovdqa ($rot16,&QWP(0,"eax")); + &vmovdqa ($rot24,&QWP(16,"eax")); + &mov (&DWP(16*3,"esp"),"ebp"); + + &vmovdqa (&QWP(16*0,"esp"),$a); + &vmovdqa (&QWP(16*1,"esp"),$b); + &vmovdqa (&QWP(16*2,"esp"),$c); + &vmovdqa (&QWP(16*3,"esp"),$d); + &mov ("edx",10); + &jmp (&label("loop1x")); + +&set_label("outer1x",16); + &vmovdqa ($d,&QWP(16*5,"eax")); # one + &vmovdqa ($a,&QWP(16*0,"esp")); + &vmovdqa ($b,&QWP(16*1,"esp")); + &vmovdqa ($c,&QWP(16*2,"esp")); + &vpaddd ($d,$d,&QWP(16*3,"esp")); + &mov ("edx",10); + &vmovdqa (&QWP(16*3,"esp"),$d); + &jmp (&label("loop1x")); + +&set_label("loop1x",16); + &XOPROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b00111001); + &vpshufd ($d,$d,0b10010011); + + &XOPROUND(); + &vpshufd ($c,$c,0b01001110); + &vpshufd ($b,$b,0b10010011); + &vpshufd ($d,$d,0b00111001); + + &dec ("edx"); + &jnz (&label("loop1x")); + + &vpaddd ($a,$a,&QWP(16*0,"esp")); + &vpaddd ($b,$b,&QWP(16*1,"esp")); + &vpaddd ($c,$c,&QWP(16*2,"esp")); + &vpaddd ($d,$d,&QWP(16*3,"esp")); + + &cmp ($len,64); + &jb (&label("tail")); + + &vpxor ($a,$a,&QWP(16*0,$inp)); # xor with input + &vpxor ($b,$b,&QWP(16*1,$inp)); + &vpxor ($c,$c,&QWP(16*2,$inp)); + &vpxor ($d,$d,&QWP(16*3,$inp)); + &lea ($inp,&DWP(16*4,$inp)); # inp+=64 + + &vmovdqu (&QWP(16*0,$out),$a); # write output + &vmovdqu (&QWP(16*1,$out),$b); + &vmovdqu (&QWP(16*2,$out),$c); + &vmovdqu (&QWP(16*3,$out),$d); + &lea ($out,&DWP(16*4,$out)); # inp+=64 + + &sub ($len,64); + &jnz (&label("outer1x")); + + &jmp (&label("done")); + +&set_label("tail"); + &vmovdqa (&QWP(16*0,"esp"),$a); + &vmovdqa (&QWP(16*1,"esp"),$b); + &vmovdqa (&QWP(16*2,"esp"),$c); + &vmovdqa (&QWP(16*3,"esp"),$d); + + &xor ("eax","eax"); + &xor ("edx","edx"); + &xor ("ebp","ebp"); + +&set_label("tail_loop"); + &movb ("al",&BP(0,"esp","ebp")); + &movb ("dl",&BP(0,$inp,"ebp")); + &lea ("ebp",&DWP(1,"ebp")); + &xor ("al","dl"); + &movb (&BP(-1,$out,"ebp"),"al"); + &dec ($len); + &jnz (&label("tail_loop")); +} +&set_label("done"); + &vzeroupper (); + &mov ("esp",&DWP(512,"esp")); +&function_end("ChaCha20_xop"); +} + +&asm_finish(); diff --git a/crypto/chacha/asm/chacha-x86_64.pl b/crypto/chacha/asm/chacha-x86_64.pl new file mode 100755 index 0000000..41dbef5 --- /dev/null +++ b/crypto/chacha/asm/chacha-x86_64.pl @@ -0,0 +1,2234 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# November 2014 +# +# ChaCha20 for x86_64. +# +# Performance in cycles per byte out of large buffer. +# +# IALU/gcc 4.8(i) 1xSSSE3/SSE2 4xSSSE3 8xAVX2 +# +# P4 9.48/+99% -/22.7(ii) - +# Core2 7.83/+55% 7.90/8.08 4.35 +# Westmere 7.19/+50% 5.60/6.70 3.00 +# Sandy Bridge 8.31/+42% 5.45/6.76 2.72 +# Ivy Bridge 6.71/+46% 5.40/6.49 2.41 +# Haswell 5.92/+43% 5.20/6.45 2.42 1.23 +# Silvermont 12.0/+33% 7.75/7.40 7.03(iii) +# Sledgehammer 7.28/+52% -/14.2(ii) - +# Bulldozer 9.66/+28% 9.85/11.1 3.06(iv) +# VIA Nano 10.5/+46% 6.72/8.60 6.05 +# +# (i) compared to older gcc 3.x one can observe >2x improvement on +# most platforms; +# (ii) as it can be seen, SSE2 performance is too low on legacy +# processors; NxSSE2 results are naturally better, but not +# impressively better than IALU ones, which is why you won't +# find SSE2 code below; +# (iii) this is not optimal result for Atom because of MSROM +# limitations, SSE2 can do better, but gain is considered too +# low to justify the [maintenance] effort; +# (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20; + +$flavour = shift; +$output = shift; +if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } + +$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or +die "can't locate x86_64-xlate.pl"; + +if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` + =~ /GNU assembler version ([2-9]\.[0-9]+)/) { + $avx = ($1>=2.19) + ($1>=2.22); +} + +if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && + `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) { + $avx = ($1>=2.09) + ($1>=2.10); +} + +if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && + `ml64 2>&1` =~ /Version ([0-9]+)\./) { + $avx = ($1>=10) + ($1>=11); +} + +if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { + $avx = ($2>=3.0) + ($2>3.0); +} + +open OUT,"| \"$^X\" $xlate $flavour $output"; +*STDOUT=*OUT; + +# input parameter block +($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8"); + +$code.=<<___; +.text + +.extern OPENSSL_ia32cap_P + +.align 64 +.Lzero: +.long 0,0,0,0 +.Lone: +.long 1,0,0,0 +.Linc: +.long 0,1,2,3 +.Lfour: +.long 4,4,4,4 +.Lincy: +.long 0,2,4,6,1,3,5,7 +.Leight: +.long 8,8,8,8,8,8,8,8 +.Lrot16: +.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd +.Lrot24: +.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe +.Lsigma: +.asciz "expand 32-byte k" +.asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>" +___ + +sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm +{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; + my $arg = pop; + $arg = "\$$arg" if ($arg*1 eq $arg); + $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; +} + +@x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)), + "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15))); +@t=("%esi","%edi"); + +sub ROUND { # critical path is 24 cycles per round +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my ($xc,$xc_)=map("\"$_\"",@t); +my @x=map("\"$_\"",@x); + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' and 'd's are permanently allocated in registers, + # @x[0..7,12..15], while 'c's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. + + # Normally instructions would be interleaved to favour in-order + # execution. Generally out-of-order cores manage it gracefully, + # but not this time for some reason. As in-order execution + # cores are dying breed, old Atom is the only one around, + # instructions are left uninterleaved. Besides, Atom is better + # off executing 1xSSSE3 code anyway... + + ( + "&add (@x[$a0],@x[$b0])", # Q1 + "&xor (@x[$d0],@x[$a0])", + "&rol (@x[$d0],16)", + "&add (@x[$a1],@x[$b1])", # Q2 + "&xor (@x[$d1],@x[$a1])", + "&rol (@x[$d1],16)", + + "&add ($xc,@x[$d0])", + "&xor (@x[$b0],$xc)", + "&rol (@x[$b0],12)", + "&add ($xc_,@x[$d1])", + "&xor (@x[$b1],$xc_)", + "&rol (@x[$b1],12)", + + "&add (@x[$a0],@x[$b0])", + "&xor (@x[$d0],@x[$a0])", + "&rol (@x[$d0],8)", + "&add (@x[$a1],@x[$b1])", + "&xor (@x[$d1],@x[$a1])", + "&rol (@x[$d1],8)", + + "&add ($xc,@x[$d0])", + "&xor (@x[$b0],$xc)", + "&rol (@x[$b0],7)", + "&add ($xc_,@x[$d1])", + "&xor (@x[$b1],$xc_)", + "&rol (@x[$b1],7)", + + "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's + "&mov (\"4*$c1(%rsp)\",$xc_)", + "&mov ($xc,\"4*$c2(%rsp)\")", + "&mov ($xc_,\"4*$c3(%rsp)\")", + + "&add (@x[$a2],@x[$b2])", # Q3 + "&xor (@x[$d2],@x[$a2])", + "&rol (@x[$d2],16)", + "&add (@x[$a3],@x[$b3])", # Q4 + "&xor (@x[$d3],@x[$a3])", + "&rol (@x[$d3],16)", + + "&add ($xc,@x[$d2])", + "&xor (@x[$b2],$xc)", + "&rol (@x[$b2],12)", + "&add ($xc_,@x[$d3])", + "&xor (@x[$b3],$xc_)", + "&rol (@x[$b3],12)", + + "&add (@x[$a2],@x[$b2])", + "&xor (@x[$d2],@x[$a2])", + "&rol (@x[$d2],8)", + "&add (@x[$a3],@x[$b3])", + "&xor (@x[$d3],@x[$a3])", + "&rol (@x[$d3],8)", + + "&add ($xc,@x[$d2])", + "&xor (@x[$b2],$xc)", + "&rol (@x[$b2],7)", + "&add ($xc_,@x[$d3])", + "&xor (@x[$b3],$xc_)", + "&rol (@x[$b3],7)" + ); +} + +######################################################################## +# Generic code path that handles all lengths on pre-SSSE3 processors. +$code.=<<___; +.globl ChaCha20_ctr32 +.type ChaCha20_ctr32,\@function,5 +.align 64 +ChaCha20_ctr32: + mov OPENSSL_ia32cap_P+4(%rip),%r10 + test \$`1<<(41-32)`,%r10d + jnz .LChaCha20_ssse3 + + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + sub \$64+24,%rsp + + #movdqa .Lsigma(%rip),%xmm0 + movdqu ($key),%xmm1 + movdqu 16($key),%xmm2 + movdqu ($counter),%xmm3 + movdqa .Lone(%rip),%xmm4 + + #movdqa %xmm0,4*0(%rsp) # key[0] + movdqa %xmm1,4*4(%rsp) # key[1] + movdqa %xmm2,4*8(%rsp) # key[2] + movdqa %xmm3,4*12(%rsp) # key[3] + mov $len,%rbp # reassign $len + jmp .Loop_outer + +.align 32 +.Loop_outer: + mov \$0x61707865,@x[0] # 'expa' + mov \$0x3320646e,@x[1] # 'nd 3' + mov \$0x79622d32,@x[2] # '2-by' + mov \$0x6b206574,@x[3] # 'te k' + mov 4*4(%rsp),@x[4] + mov 4*5(%rsp),@x[5] + mov 4*6(%rsp),@x[6] + mov 4*7(%rsp),@x[7] + movd %xmm3,@x[12] + mov 4*13(%rsp),@x[13] + mov 4*14(%rsp),@x[14] + mov 4*15(%rsp),@x[15] + + mov %rbp,64+0(%rsp) # save len + mov \$10,%ebp + mov $inp,64+8(%rsp) # save inp + movq %xmm2,%rsi # "@x[8]" + mov $out,64+16(%rsp) # save out + mov %rsi,%rdi + shr \$32,%rdi # "@x[9]" + jmp .Loop + +.align 32 +.Loop: +___ + foreach (&ROUND (0, 4, 8,12)) { eval; } + foreach (&ROUND (0, 5,10,15)) { eval; } + &dec ("%ebp"); + &jnz (".Loop"); + +$code.=<<___; + mov @t[1],4*9(%rsp) # modulo-scheduled + mov @t[0],4*8(%rsp) + mov 64(%rsp),%rbp # load len + movdqa %xmm2,%xmm1 + mov 64+8(%rsp),$inp # load inp + paddd %xmm4,%xmm3 # increment counter + mov 64+16(%rsp),$out # load out + + add \$0x61707865,@x[0] # 'expa' + add \$0x3320646e,@x[1] # 'nd 3' + add \$0x79622d32,@x[2] # '2-by' + add \$0x6b206574,@x[3] # 'te k' + add 4*4(%rsp),@x[4] + add 4*5(%rsp),@x[5] + add 4*6(%rsp),@x[6] + add 4*7(%rsp),@x[7] + add 4*12(%rsp),@x[12] + add 4*13(%rsp),@x[13] + add 4*14(%rsp),@x[14] + add 4*15(%rsp),@x[15] + paddd 4*8(%rsp),%xmm1 + + cmp \$64,%rbp + jb .Ltail + + xor 4*0($inp),@x[0] # xor with input + xor 4*1($inp),@x[1] + xor 4*2($inp),@x[2] + xor 4*3($inp),@x[3] + xor 4*4($inp),@x[4] + xor 4*5($inp),@x[5] + xor 4*6($inp),@x[6] + xor 4*7($inp),@x[7] + movdqu 4*8($inp),%xmm0 + xor 4*12($inp),@x[12] + xor 4*13($inp),@x[13] + xor 4*14($inp),@x[14] + xor 4*15($inp),@x[15] + lea 4*16($inp),$inp # inp+=64 + pxor %xmm1,%xmm0 + + movdqa %xmm2,4*8(%rsp) + movd %xmm3,4*12(%rsp) + + mov @x[0],4*0($out) # write output + mov @x[1],4*1($out) + mov @x[2],4*2($out) + mov @x[3],4*3($out) + mov @x[4],4*4($out) + mov @x[5],4*5($out) + mov @x[6],4*6($out) + mov @x[7],4*7($out) + movdqu %xmm0,4*8($out) + mov @x[12],4*12($out) + mov @x[13],4*13($out) + mov @x[14],4*14($out) + mov @x[15],4*15($out) + lea 4*16($out),$out # out+=64 + + sub \$64,%rbp + jnz .Loop_outer + + jmp .Ldone + +.align 16 +.Ltail: + mov @x[0],4*0(%rsp) + xor %rbx,%rbx + mov @x[1],4*1(%rsp) + mov @x[2],4*2(%rsp) + mov @x[3],4*3(%rsp) + mov @x[4],4*4(%rsp) + mov @x[5],4*5(%rsp) + mov @x[6],4*6(%rsp) + mov @x[7],4*7(%rsp) + movdqa %xmm1,4*8(%rsp) + mov @x[12],4*12(%rsp) + mov @x[13],4*13(%rsp) + mov @x[14],4*14(%rsp) + mov @x[15],4*15(%rsp) + +.Loop_tail: + movzb ($inp,%rbx),%eax + movzb (%rsp,%rbx),%edx + lea 1(%rbx),%rbx + xor %edx,%eax + mov %al,-1($out,%rbx) + dec %rbp + jnz .Loop_tail + +.Ldone: + add \$64+24,%rsp + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + ret +.size ChaCha20_ctr32,.-ChaCha20_ctr32 +___ + +######################################################################## +# SSSE3 code path that handles shorter lengths +{ +my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7)); + +sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round + &paddd ($a,$b); + &pxor ($d,$a); + &pshufb ($d,$rot16); + + &paddd ($c,$d); + &pxor ($b,$c); + &movdqa ($t,$b); + &psrld ($b,20); + &pslld ($t,12); + &por ($b,$t); + + &paddd ($a,$b); + &pxor ($d,$a); + &pshufb ($d,$rot24); + + &paddd ($c,$d); + &pxor ($b,$c); + &movdqa ($t,$b); + &psrld ($b,25); + &pslld ($t,7); + &por ($b,$t); +} + +my $xframe = $win64 ? 32+32+8 : 24; + +$code.=<<___; +.type ChaCha20_ssse3,\@function,5 +.align 32 +ChaCha20_ssse3: +.LChaCha20_ssse3: +___ +$code.=<<___ if ($avx); + test \$`1<<(43-32)`,%r10d + jnz .LChaCha20_4xop # XOP is fastest even if we use 1/4 +___ +$code.=<<___; + cmp \$128,$len # we might throw away some data, + ja .LChaCha20_4x # but overall it won't be slower + +.Ldo_sse3_after_all: + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + + sub \$64+$xframe,%rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,64+32(%rsp) + movaps %xmm7,64+48(%rsp) +___ +$code.=<<___; + movdqa .Lsigma(%rip),$a + movdqu ($key),$b + movdqu 16($key),$c + movdqu ($counter),$d + movdqa .Lrot16(%rip),$rot16 + movdqa .Lrot24(%rip),$rot24 + + movdqa $a,0x00(%rsp) + movdqa $b,0x10(%rsp) + movdqa $c,0x20(%rsp) + movdqa $d,0x30(%rsp) + mov \$10,%ebp + jmp .Loop_ssse3 + +.align 32 +.Loop_outer_ssse3: + movdqa .Lone(%rip),$d + movdqa 0x00(%rsp),$a + movdqa 0x10(%rsp),$b + movdqa 0x20(%rsp),$c + paddd 0x30(%rsp),$d + mov \$10,%ebp + movdqa $d,0x30(%rsp) + jmp .Loop_ssse3 + +.align 32 +.Loop_ssse3: +___ + &SSSE3ROUND(); + &pshufd ($c,$c,0b01001110); + &pshufd ($b,$b,0b00111001); + &pshufd ($d,$d,0b10010011); + &nop (); + + &SSSE3ROUND(); + &pshufd ($c,$c,0b01001110); + &pshufd ($b,$b,0b10010011); + &pshufd ($d,$d,0b00111001); + + &dec ("%ebp"); + &jnz (".Loop_ssse3"); + +$code.=<<___; + paddd 0x00(%rsp),$a + paddd 0x10(%rsp),$b + paddd 0x20(%rsp),$c + paddd 0x30(%rsp),$d + + cmp \$64,$len + jb .Ltail_ssse3 + + movdqu 0x00($inp),$t + movdqu 0x10($inp),$t1 + pxor $t,$a # xor with input + movdqu 0x20($inp),$t + pxor $t1,$b + movdqu 0x30($inp),$t1 + lea 0x40($inp),$inp # inp+=64 + pxor $t,$c + pxor $t1,$d + + movdqu $a,0x00($out) # write output + movdqu $b,0x10($out) + movdqu $c,0x20($out) + movdqu $d,0x30($out) + lea 0x40($out),$out # out+=64 + + sub \$64,$len + jnz .Loop_outer_ssse3 + + jmp .Ldone_ssse3 + +.align 16 +.Ltail_ssse3: + movdqa $a,0x00(%rsp) + movdqa $b,0x10(%rsp) + movdqa $c,0x20(%rsp) + movdqa $d,0x30(%rsp) + xor %rbx,%rbx + +.Loop_tail_ssse3: + movzb ($inp,%rbx),%eax + movzb (%rsp,%rbx),%edx + lea 1(%rbx),%rbx + xor %edx,%eax + mov %al,-1($out,%rbx) + inc %rbp + jnz .Loop_tail_ssse3 + +.Ldone_ssse3: +___ +$code.=<<___ if ($win64); + movaps 64+32(%rsp),%xmm6 + movaps 64+48(%rsp),%xmm7 +___ +$code.=<<___; + add \$64+$xframe,%rsp + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + ret +.size ChaCha20_ssse3,.-ChaCha20_ssse3 +___ +} + +######################################################################## +# SSSE3 code path that handles longer messages. +{ +# assign variables to favor Atom front-end +my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3, + $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3); + +sub SSSE3_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); +my @x=map("\"$_\"",@xx); + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' and 'd's are permanently allocated in registers, + # @x[0..7,12..15], while 'c's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. + + ( + "&paddd (@x[$a0],@x[$b0])", # Q1 + "&paddd (@x[$a1],@x[$b1])", # Q2 + "&pxor (@x[$d0],@x[$a0])", + "&pxor (@x[$d1],@x[$a1])", + "&pshufb (@x[$d0],$t1)", + "&pshufb (@x[$d1],$t1)", + + "&paddd ($xc,@x[$d0])", + "&paddd ($xc_,@x[$d1])", + "&pxor (@x[$b0],$xc)", + "&pxor (@x[$b1],$xc_)", + "&movdqa ($t0,@x[$b0])", + "&pslld (@x[$b0],12)", + "&psrld ($t0,20)", + "&movdqa ($t1,@x[$b1])", + "&pslld (@x[$b1],12)", + "&por (@x[$b0],$t0)", + "&psrld ($t1,20)", + "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip) + "&por (@x[$b1],$t1)", + + "&paddd (@x[$a0],@x[$b0])", + "&paddd (@x[$a1],@x[$b1])", + "&pxor (@x[$d0],@x[$a0])", + "&pxor (@x[$d1],@x[$a1])", + "&pshufb (@x[$d0],$t0)", + "&pshufb (@x[$d1],$t0)", + + "&paddd ($xc,@x[$d0])", + "&paddd ($xc_,@x[$d1])", + "&pxor (@x[$b0],$xc)", + "&pxor (@x[$b1],$xc_)", + "&movdqa ($t1,@x[$b0])", + "&pslld (@x[$b0],7)", + "&psrld ($t1,25)", + "&movdqa ($t0,@x[$b1])", + "&pslld (@x[$b1],7)", + "&por (@x[$b0],$t1)", + "&psrld ($t0,25)", + "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip) + "&por (@x[$b1],$t0)", + + "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's + "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)", + "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")", + "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")", + + "&paddd (@x[$a2],@x[$b2])", # Q3 + "&paddd (@x[$a3],@x[$b3])", # Q4 + "&pxor (@x[$d2],@x[$a2])", + "&pxor (@x[$d3],@x[$a3])", + "&pshufb (@x[$d2],$t1)", + "&pshufb (@x[$d3],$t1)", + + "&paddd ($xc,@x[$d2])", + "&paddd ($xc_,@x[$d3])", + "&pxor (@x[$b2],$xc)", + "&pxor (@x[$b3],$xc_)", + "&movdqa ($t0,@x[$b2])", + "&pslld (@x[$b2],12)", + "&psrld ($t0,20)", + "&movdqa ($t1,@x[$b3])", + "&pslld (@x[$b3],12)", + "&por (@x[$b2],$t0)", + "&psrld ($t1,20)", + "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip) + "&por (@x[$b3],$t1)", + + "&paddd (@x[$a2],@x[$b2])", + "&paddd (@x[$a3],@x[$b3])", + "&pxor (@x[$d2],@x[$a2])", + "&pxor (@x[$d3],@x[$a3])", + "&pshufb (@x[$d2],$t0)", + "&pshufb (@x[$d3],$t0)", + + "&paddd ($xc,@x[$d2])", + "&paddd ($xc_,@x[$d3])", + "&pxor (@x[$b2],$xc)", + "&pxor (@x[$b3],$xc_)", + "&movdqa ($t1,@x[$b2])", + "&pslld (@x[$b2],7)", + "&psrld ($t1,25)", + "&movdqa ($t0,@x[$b3])", + "&pslld (@x[$b3],7)", + "&por (@x[$b2],$t1)", + "&psrld ($t0,25)", + "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip) + "&por (@x[$b3],$t0)" + ); +} + +my $xframe = $win64 ? 0xa0 : 0; + +$code.=<<___; +.type ChaCha20_4x,\@function,5 +.align 32 +ChaCha20_4x: +.LChaCha20_4x: + mov %r10,%r11 +___ +$code.=<<___ if ($avx>1); + shr \$32,%r10 # OPENSSL_ia32cap_P+8 + test \$`1<<5`,%r10 # test AVX2 + jnz .LChaCha20_8x +___ +$code.=<<___; + cmp \$192,$len + ja .Lproceed4x + + and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE + cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE + je .Ldo_sse3_after_all # to detect Atom + +.Lproceed4x: + lea -0x78(%rsp),%r11 + sub \$0x148+$xframe,%rsp +___ + ################ stack layout + # +0x00 SIMD equivalent of @x[8-12] + # ... + # +0x40 constant copy of key[0-2] smashed by lanes + # ... + # +0x100 SIMD counters (with nonce smashed by lanes) + # ... + # +0x140 +$code.=<<___ if ($win64); + movaps %xmm6,-0x30(%r11) + movaps %xmm7,-0x20(%r11) + movaps %xmm8,-0x10(%r11) + movaps %xmm9,0x00(%r11) + movaps %xmm10,0x10(%r11) + movaps %xmm11,0x20(%r11) + movaps %xmm12,0x30(%r11) + movaps %xmm13,0x40(%r11) + movaps %xmm14,0x50(%r11) + movaps %xmm15,0x60(%r11) +___ +$code.=<<___; + movdqa .Lsigma(%rip),$xa3 # key[0] + movdqu ($key),$xb3 # key[1] + movdqu 16($key),$xt3 # key[2] + movdqu ($counter),$xd3 # key[3] + lea 0x100(%rsp),%rcx # size optimization + lea .Lrot16(%rip),%r10 + lea .Lrot24(%rip),%r11 + + pshufd \$0x00,$xa3,$xa0 # smash key by lanes... + pshufd \$0x55,$xa3,$xa1 + movdqa $xa0,0x40(%rsp) # ... and offload + pshufd \$0xaa,$xa3,$xa2 + movdqa $xa1,0x50(%rsp) + pshufd \$0xff,$xa3,$xa3 + movdqa $xa2,0x60(%rsp) + movdqa $xa3,0x70(%rsp) + + pshufd \$0x00,$xb3,$xb0 + pshufd \$0x55,$xb3,$xb1 + movdqa $xb0,0x80-0x100(%rcx) + pshufd \$0xaa,$xb3,$xb2 + movdqa $xb1,0x90-0x100(%rcx) + pshufd \$0xff,$xb3,$xb3 + movdqa $xb2,0xa0-0x100(%rcx) + movdqa $xb3,0xb0-0x100(%rcx) + + pshufd \$0x00,$xt3,$xt0 # "$xc0" + pshufd \$0x55,$xt3,$xt1 # "$xc1" + movdqa $xt0,0xc0-0x100(%rcx) + pshufd \$0xaa,$xt3,$xt2 # "$xc2" + movdqa $xt1,0xd0-0x100(%rcx) + pshufd \$0xff,$xt3,$xt3 # "$xc3" + movdqa $xt2,0xe0-0x100(%rcx) + movdqa $xt3,0xf0-0x100(%rcx) + + pshufd \$0x00,$xd3,$xd0 + pshufd \$0x55,$xd3,$xd1 + paddd .Linc(%rip),$xd0 # don't save counters yet + pshufd \$0xaa,$xd3,$xd2 + movdqa $xd1,0x110-0x100(%rcx) + pshufd \$0xff,$xd3,$xd3 + movdqa $xd2,0x120-0x100(%rcx) + movdqa $xd3,0x130-0x100(%rcx) + + jmp .Loop_enter4x + +.align 32 +.Loop_outer4x: + movdqa 0x40(%rsp),$xa0 # re-load smashed key + movdqa 0x50(%rsp),$xa1 + movdqa 0x60(%rsp),$xa2 + movdqa 0x70(%rsp),$xa3 + movdqa 0x80-0x100(%rcx),$xb0 + movdqa 0x90-0x100(%rcx),$xb1 + movdqa 0xa0-0x100(%rcx),$xb2 + movdqa 0xb0-0x100(%rcx),$xb3 + movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0" + movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1" + movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2" + movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3" + movdqa 0x100-0x100(%rcx),$xd0 + movdqa 0x110-0x100(%rcx),$xd1 + movdqa 0x120-0x100(%rcx),$xd2 + movdqa 0x130-0x100(%rcx),$xd3 + paddd .Lfour(%rip),$xd0 # next SIMD counters + +.Loop_enter4x: + movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]" + movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]" + movdqa (%r10),$xt3 # .Lrot16(%rip) + mov \$10,%eax + movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters + jmp .Loop4x + +.align 32 +.Loop4x: +___ + foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop4x + + paddd 0x40(%rsp),$xa0 # accumulate key material + paddd 0x50(%rsp),$xa1 + paddd 0x60(%rsp),$xa2 + paddd 0x70(%rsp),$xa3 + + movdqa $xa0,$xt2 # "de-interlace" data + punpckldq $xa1,$xa0 + movdqa $xa2,$xt3 + punpckldq $xa3,$xa2 + punpckhdq $xa1,$xt2 + punpckhdq $xa3,$xt3 + movdqa $xa0,$xa1 + punpcklqdq $xa2,$xa0 # "a0" + movdqa $xt2,$xa3 + punpcklqdq $xt3,$xt2 # "a2" + punpckhqdq $xa2,$xa1 # "a1" + punpckhqdq $xt3,$xa3 # "a3" +___ + ($xa2,$xt2)=($xt2,$xa2); +$code.=<<___; + paddd 0x80-0x100(%rcx),$xb0 + paddd 0x90-0x100(%rcx),$xb1 + paddd 0xa0-0x100(%rcx),$xb2 + paddd 0xb0-0x100(%rcx),$xb3 + + movdqa $xa0,0x00(%rsp) # offload $xaN + movdqa $xa1,0x10(%rsp) + movdqa 0x20(%rsp),$xa0 # "xc2" + movdqa 0x30(%rsp),$xa1 # "xc3" + + movdqa $xb0,$xt2 + punpckldq $xb1,$xb0 + movdqa $xb2,$xt3 + punpckldq $xb3,$xb2 + punpckhdq $xb1,$xt2 + punpckhdq $xb3,$xt3 + movdqa $xb0,$xb1 + punpcklqdq $xb2,$xb0 # "b0" + movdqa $xt2,$xb3 + punpcklqdq $xt3,$xt2 # "b2" + punpckhqdq $xb2,$xb1 # "b1" + punpckhqdq $xt3,$xb3 # "b3" +___ + ($xb2,$xt2)=($xt2,$xb2); + my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); +$code.=<<___; + paddd 0xc0-0x100(%rcx),$xc0 + paddd 0xd0-0x100(%rcx),$xc1 + paddd 0xe0-0x100(%rcx),$xc2 + paddd 0xf0-0x100(%rcx),$xc3 + + movdqa $xa2,0x20(%rsp) # keep offloading $xaN + movdqa $xa3,0x30(%rsp) + + movdqa $xc0,$xt2 + punpckldq $xc1,$xc0 + movdqa $xc2,$xt3 + punpckldq $xc3,$xc2 + punpckhdq $xc1,$xt2 + punpckhdq $xc3,$xt3 + movdqa $xc0,$xc1 + punpcklqdq $xc2,$xc0 # "c0" + movdqa $xt2,$xc3 + punpcklqdq $xt3,$xt2 # "c2" + punpckhqdq $xc2,$xc1 # "c1" + punpckhqdq $xt3,$xc3 # "c3" +___ + ($xc2,$xt2)=($xt2,$xc2); + ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary +$code.=<<___; + paddd 0x100-0x100(%rcx),$xd0 + paddd 0x110-0x100(%rcx),$xd1 + paddd 0x120-0x100(%rcx),$xd2 + paddd 0x130-0x100(%rcx),$xd3 + + movdqa $xd0,$xt2 + punpckldq $xd1,$xd0 + movdqa $xd2,$xt3 + punpckldq $xd3,$xd2 + punpckhdq $xd1,$xt2 + punpckhdq $xd3,$xt3 + movdqa $xd0,$xd1 + punpcklqdq $xd2,$xd0 # "d0" + movdqa $xt2,$xd3 + punpcklqdq $xt3,$xt2 # "d2" + punpckhqdq $xd2,$xd1 # "d1" + punpckhqdq $xt3,$xd3 # "d3" +___ + ($xd2,$xt2)=($xt2,$xd2); +$code.=<<___; + cmp \$64*4,$len + jb .Ltail4x + + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + lea 0x80($inp),$inp # size optimization + pxor 0x10(%rsp),$xt0 + pxor $xb1,$xt1 + pxor $xc1,$xt2 + pxor $xd1,$xt3 + + movdqu $xt0,0x40($out) + movdqu 0x00($inp),$xt0 + movdqu $xt1,0x50($out) + movdqu 0x10($inp),$xt1 + movdqu $xt2,0x60($out) + movdqu 0x20($inp),$xt2 + movdqu $xt3,0x70($out) + lea 0x80($out),$out # size optimization + movdqu 0x30($inp),$xt3 + pxor 0x20(%rsp),$xt0 + pxor $xb2,$xt1 + pxor $xc2,$xt2 + pxor $xd2,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + lea 0x80($inp),$inp # inp+=64*4 + pxor 0x30(%rsp),$xt0 + pxor $xb3,$xt1 + pxor $xc3,$xt2 + pxor $xd3,$xt3 + movdqu $xt0,0x40($out) + movdqu $xt1,0x50($out) + movdqu $xt2,0x60($out) + movdqu $xt3,0x70($out) + lea 0x80($out),$out # out+=64*4 + + sub \$64*4,$len + jnz .Loop_outer4x + + jmp .Ldone4x + +.Ltail4x: + cmp \$192,$len + jae .L192_or_more4x + cmp \$128,$len + jae .L128_or_more4x + cmp \$64,$len + jae .L64_or_more4x + + #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + xor %r10,%r10 + #movdqa $xt0,0x00(%rsp) + movdqa $xb0,0x10(%rsp) + movdqa $xc0,0x20(%rsp) + movdqa $xd0,0x30(%rsp) + jmp .Loop_tail4x + +.align 32 +.L64_or_more4x: + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + movdqu $xt0,0x00($out) + movdqu $xt1,0x10($out) + movdqu $xt2,0x20($out) + movdqu $xt3,0x30($out) + je .Ldone4x + + movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember? + lea 0x40($inp),$inp # inp+=64*1 + xor %r10,%r10 + movdqa $xt0,0x00(%rsp) + movdqa $xb1,0x10(%rsp) + lea 0x40($out),$out # out+=64*1 + movdqa $xc1,0x20(%rsp) + sub \$64,$len # len-=64*1 + movdqa $xd1,0x30(%rsp) + jmp .Loop_tail4x + +.align 32 +.L128_or_more4x: + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + pxor 0x10(%rsp),$xt0 + pxor $xb1,$xt1 + pxor $xc1,$xt2 + pxor $xd1,$xt3 + movdqu $xt0,0x40($out) + movdqu $xt1,0x50($out) + movdqu $xt2,0x60($out) + movdqu $xt3,0x70($out) + je .Ldone4x + + movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember? + lea 0x80($inp),$inp # inp+=64*2 + xor %r10,%r10 + movdqa $xt0,0x00(%rsp) + movdqa $xb2,0x10(%rsp) + lea 0x80($out),$out # out+=64*2 + movdqa $xc2,0x20(%rsp) + sub \$128,$len # len-=64*2 + movdqa $xd2,0x30(%rsp) + jmp .Loop_tail4x + +.align 32 +.L192_or_more4x: + movdqu 0x00($inp),$xt0 # xor with input + movdqu 0x10($inp),$xt1 + movdqu 0x20($inp),$xt2 + movdqu 0x30($inp),$xt3 + pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? + pxor $xb0,$xt1 + pxor $xc0,$xt2 + pxor $xd0,$xt3 + + movdqu $xt0,0x00($out) + movdqu 0x40($inp),$xt0 + movdqu $xt1,0x10($out) + movdqu 0x50($inp),$xt1 + movdqu $xt2,0x20($out) + movdqu 0x60($inp),$xt2 + movdqu $xt3,0x30($out) + movdqu 0x70($inp),$xt3 + lea 0x80($inp),$inp # size optimization + pxor 0x10(%rsp),$xt0 + pxor $xb1,$xt1 + pxor $xc1,$xt2 + pxor $xd1,$xt3 + + movdqu $xt0,0x40($out) + movdqu 0x00($inp),$xt0 + movdqu $xt1,0x50($out) + movdqu 0x10($inp),$xt1 + movdqu $xt2,0x60($out) + movdqu 0x20($inp),$xt2 + movdqu $xt3,0x70($out) + lea 0x80($out),$out # size optimization + movdqu 0x30($inp),$xt3 + pxor 0x20(%rsp),$xt0 + pxor $xb2,$xt1 + pxor $xc2,$xt2 + pxor $xd2,$xt3 + movdqu $xt0,0x00($out) + movdqu $xt1,0x10($out) + movdqu $xt2,0x20($out) + movdqu $xt3,0x30($out) + je .Ldone4x + + movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember? + lea 0x40($inp),$inp # inp+=64*3 + xor %r10,%r10 + movdqa $xt0,0x00(%rsp) + movdqa $xb3,0x10(%rsp) + lea 0x40($out),$out # out+=64*3 + movdqa $xc3,0x20(%rsp) + sub \$192,$len # len-=64*3 + movdqa $xd3,0x30(%rsp) + +.Loop_tail4x: + movzb ($inp,%r10),%eax + movzb (%rsp,%r10),%ecx + lea 1(%r10),%r10 + xor %ecx,%eax + mov %al,-1($out,%r10) + dec $len + jnz .Loop_tail4x + +.Ldone4x: +___ +$code.=<<___ if ($win64); + lea 0x140+0x30(%rsp),%r11 + movaps -0x30(%r11),%xmm6 + movaps -0x20(%r11),%xmm7 + movaps -0x10(%r11),%xmm8 + movaps 0x00(%r11),%xmm9 + movaps 0x10(%r11),%xmm10 + movaps 0x20(%r11),%xmm11 + movaps 0x30(%r11),%xmm12 + movaps 0x40(%r11),%xmm13 + movaps 0x50(%r11),%xmm14 + movaps 0x60(%r11),%xmm15 +___ +$code.=<<___; + add \$0x148+$xframe,%rsp + ret +.size ChaCha20_4x,.-ChaCha20_4x +___ +} + +######################################################################## +# XOP code path that handles all lengths. +if ($avx) { +# There is some "anomaly" observed depending on instructions' size or +# alignment. If you look closely at below code you'll notice that +# sometimes argument order varies. The order affects instruction +# encoding by making it larger, and such fiddling gives 5% performance +# improvement. This is on FX-4100... + +my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3, + $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3); + +sub XOP_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my @x=map("\"$_\"",@xx); + + ( + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1 + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2 + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3 + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4 + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vprotd (@x[$d0],@x[$d0],16)", + "&vprotd (@x[$d1],@x[$d1],16)", + "&vprotd (@x[$d2],@x[$d2],16)", + "&vprotd (@x[$d3],@x[$d3],16)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxor (@x[$b0],@x[$c0],@x[$b0])", + "&vpxor (@x[$b1],@x[$c1],@x[$b1])", + "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip + "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip + "&vprotd (@x[$b0],@x[$b0],12)", + "&vprotd (@x[$b1],@x[$b1],12)", + "&vprotd (@x[$b2],@x[$b2],12)", + "&vprotd (@x[$b3],@x[$b3],12)", + + "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip + "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vprotd (@x[$d0],@x[$d0],8)", + "&vprotd (@x[$d1],@x[$d1],8)", + "&vprotd (@x[$d2],@x[$d2],8)", + "&vprotd (@x[$d3],@x[$d3],8)", + + "&vpaddd (@x[$c0],@x[$c0],@x[$d0])", + "&vpaddd (@x[$c1],@x[$c1],@x[$d1])", + "&vpaddd (@x[$c2],@x[$c2],@x[$d2])", + "&vpaddd (@x[$c3],@x[$c3],@x[$d3])", + "&vpxor (@x[$b0],@x[$c0],@x[$b0])", + "&vpxor (@x[$b1],@x[$c1],@x[$b1])", + "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip + "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip + "&vprotd (@x[$b0],@x[$b0],7)", + "&vprotd (@x[$b1],@x[$b1],7)", + "&vprotd (@x[$b2],@x[$b2],7)", + "&vprotd (@x[$b3],@x[$b3],7)" + ); +} + +my $xframe = $win64 ? 0xa0 : 0; + +$code.=<<___; +.type ChaCha20_4xop,\@function,5 +.align 32 +ChaCha20_4xop: +.LChaCha20_4xop: + lea -0x78(%rsp),%r11 + sub \$0x148+$xframe,%rsp +___ + ################ stack layout + # +0x00 SIMD equivalent of @x[8-12] + # ... + # +0x40 constant copy of key[0-2] smashed by lanes + # ... + # +0x100 SIMD counters (with nonce smashed by lanes) + # ... + # +0x140 +$code.=<<___ if ($win64); + movaps %xmm6,-0x30(%r11) + movaps %xmm7,-0x20(%r11) + movaps %xmm8,-0x10(%r11) + movaps %xmm9,0x00(%r11) + movaps %xmm10,0x10(%r11) + movaps %xmm11,0x20(%r11) + movaps %xmm12,0x30(%r11) + movaps %xmm13,0x40(%r11) + movaps %xmm14,0x50(%r11) + movaps %xmm15,0x60(%r11) +___ +$code.=<<___; + vzeroupper + + vmovdqa .Lsigma(%rip),$xa3 # key[0] + vmovdqu ($key),$xb3 # key[1] + vmovdqu 16($key),$xt3 # key[2] + vmovdqu ($counter),$xd3 # key[3] + lea 0x100(%rsp),%rcx # size optimization + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vmovdqa $xa0,0x40(%rsp) # ... and offload + vpshufd \$0xaa,$xa3,$xa2 + vmovdqa $xa1,0x50(%rsp) + vpshufd \$0xff,$xa3,$xa3 + vmovdqa $xa2,0x60(%rsp) + vmovdqa $xa3,0x70(%rsp) + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vmovdqa $xb0,0x80-0x100(%rcx) + vpshufd \$0xaa,$xb3,$xb2 + vmovdqa $xb1,0x90-0x100(%rcx) + vpshufd \$0xff,$xb3,$xb3 + vmovdqa $xb2,0xa0-0x100(%rcx) + vmovdqa $xb3,0xb0-0x100(%rcx) + + vpshufd \$0x00,$xt3,$xt0 # "$xc0" + vpshufd \$0x55,$xt3,$xt1 # "$xc1" + vmovdqa $xt0,0xc0-0x100(%rcx) + vpshufd \$0xaa,$xt3,$xt2 # "$xc2" + vmovdqa $xt1,0xd0-0x100(%rcx) + vpshufd \$0xff,$xt3,$xt3 # "$xc3" + vmovdqa $xt2,0xe0-0x100(%rcx) + vmovdqa $xt3,0xf0-0x100(%rcx) + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet + vpshufd \$0xaa,$xd3,$xd2 + vmovdqa $xd1,0x110-0x100(%rcx) + vpshufd \$0xff,$xd3,$xd3 + vmovdqa $xd2,0x120-0x100(%rcx) + vmovdqa $xd3,0x130-0x100(%rcx) + + jmp .Loop_enter4xop + +.align 32 +.Loop_outer4xop: + vmovdqa 0x40(%rsp),$xa0 # re-load smashed key + vmovdqa 0x50(%rsp),$xa1 + vmovdqa 0x60(%rsp),$xa2 + vmovdqa 0x70(%rsp),$xa3 + vmovdqa 0x80-0x100(%rcx),$xb0 + vmovdqa 0x90-0x100(%rcx),$xb1 + vmovdqa 0xa0-0x100(%rcx),$xb2 + vmovdqa 0xb0-0x100(%rcx),$xb3 + vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0" + vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1" + vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2" + vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3" + vmovdqa 0x100-0x100(%rcx),$xd0 + vmovdqa 0x110-0x100(%rcx),$xd1 + vmovdqa 0x120-0x100(%rcx),$xd2 + vmovdqa 0x130-0x100(%rcx),$xd3 + vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters + +.Loop_enter4xop: + mov \$10,%eax + vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters + jmp .Loop4xop + +.align 32 +.Loop4xop: +___ + foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop4xop + + vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material + vpaddd 0x50(%rsp),$xa1,$xa1 + vpaddd 0x60(%rsp),$xa2,$xa2 + vpaddd 0x70(%rsp),$xa3,$xa3 + + vmovdqa $xt2,0x20(%rsp) # offload $xc2,3 + vmovdqa $xt3,0x30(%rsp) + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd 0x80-0x100(%rcx),$xb0,$xb0 + vpaddd 0x90-0x100(%rcx),$xb1,$xb1 + vpaddd 0xa0-0x100(%rcx),$xb2,$xb2 + vpaddd 0xb0-0x100(%rcx),$xb3,$xb3 + + vmovdqa $xa0,0x00(%rsp) # offload $xa0,1 + vmovdqa $xa1,0x10(%rsp) + vmovdqa 0x20(%rsp),$xa0 # "xc2" + vmovdqa 0x30(%rsp),$xa1 # "xc3" + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); + my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); +$code.=<<___; + vpaddd 0xc0-0x100(%rcx),$xc0,$xc0 + vpaddd 0xd0-0x100(%rcx),$xc1,$xc1 + vpaddd 0xe0-0x100(%rcx),$xc2,$xc2 + vpaddd 0xf0-0x100(%rcx),$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd 0x100-0x100(%rcx),$xd0,$xd0 + vpaddd 0x110-0x100(%rcx),$xd1,$xd1 + vpaddd 0x120-0x100(%rcx),$xd2,$xd2 + vpaddd 0x130-0x100(%rcx),$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); + ($xa0,$xa1)=($xt2,$xt3); +$code.=<<___; + vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1 + vmovdqa 0x10(%rsp),$xa1 + + cmp \$64*4,$len + jb .Ltail4xop + + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vpxor 0x40($inp),$xa1,$xa1 + vpxor 0x50($inp),$xb1,$xb1 + vpxor 0x60($inp),$xc1,$xc1 + vpxor 0x70($inp),$xd1,$xd1 + lea 0x80($inp),$inp # size optimization + vpxor 0x00($inp),$xa2,$xa2 + vpxor 0x10($inp),$xb2,$xb2 + vpxor 0x20($inp),$xc2,$xc2 + vpxor 0x30($inp),$xd2,$xd2 + vpxor 0x40($inp),$xa3,$xa3 + vpxor 0x50($inp),$xb3,$xb3 + vpxor 0x60($inp),$xc3,$xc3 + vpxor 0x70($inp),$xd3,$xd3 + lea 0x80($inp),$inp # inp+=64*4 + + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + vmovdqu $xa1,0x40($out) + vmovdqu $xb1,0x50($out) + vmovdqu $xc1,0x60($out) + vmovdqu $xd1,0x70($out) + lea 0x80($out),$out # size optimization + vmovdqu $xa2,0x00($out) + vmovdqu $xb2,0x10($out) + vmovdqu $xc2,0x20($out) + vmovdqu $xd2,0x30($out) + vmovdqu $xa3,0x40($out) + vmovdqu $xb3,0x50($out) + vmovdqu $xc3,0x60($out) + vmovdqu $xd3,0x70($out) + lea 0x80($out),$out # out+=64*4 + + sub \$64*4,$len + jnz .Loop_outer4xop + + jmp .Ldone4xop + +.align 32 +.Ltail4xop: + cmp \$192,$len + jae .L192_or_more4xop + cmp \$128,$len + jae .L128_or_more4xop + cmp \$64,$len + jae .L64_or_more4xop + + xor %r10,%r10 + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xb0,0x10(%rsp) + vmovdqa $xc0,0x20(%rsp) + vmovdqa $xd0,0x30(%rsp) + jmp .Loop_tail4xop + +.align 32 +.L64_or_more4xop: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + je .Ldone4xop + + lea 0x40($inp),$inp # inp+=64*1 + vmovdqa $xa1,0x00(%rsp) + xor %r10,%r10 + vmovdqa $xb1,0x10(%rsp) + lea 0x40($out),$out # out+=64*1 + vmovdqa $xc1,0x20(%rsp) + sub \$64,$len # len-=64*1 + vmovdqa $xd1,0x30(%rsp) + jmp .Loop_tail4xop + +.align 32 +.L128_or_more4xop: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vpxor 0x40($inp),$xa1,$xa1 + vpxor 0x50($inp),$xb1,$xb1 + vpxor 0x60($inp),$xc1,$xc1 + vpxor 0x70($inp),$xd1,$xd1 + + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + vmovdqu $xa1,0x40($out) + vmovdqu $xb1,0x50($out) + vmovdqu $xc1,0x60($out) + vmovdqu $xd1,0x70($out) + je .Ldone4xop + + lea 0x80($inp),$inp # inp+=64*2 + vmovdqa $xa2,0x00(%rsp) + xor %r10,%r10 + vmovdqa $xb2,0x10(%rsp) + lea 0x80($out),$out # out+=64*2 + vmovdqa $xc2,0x20(%rsp) + sub \$128,$len # len-=64*2 + vmovdqa $xd2,0x30(%rsp) + jmp .Loop_tail4xop + +.align 32 +.L192_or_more4xop: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x10($inp),$xb0,$xb0 + vpxor 0x20($inp),$xc0,$xc0 + vpxor 0x30($inp),$xd0,$xd0 + vpxor 0x40($inp),$xa1,$xa1 + vpxor 0x50($inp),$xb1,$xb1 + vpxor 0x60($inp),$xc1,$xc1 + vpxor 0x70($inp),$xd1,$xd1 + lea 0x80($inp),$inp # size optimization + vpxor 0x00($inp),$xa2,$xa2 + vpxor 0x10($inp),$xb2,$xb2 + vpxor 0x20($inp),$xc2,$xc2 + vpxor 0x30($inp),$xd2,$xd2 + + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x10($out) + vmovdqu $xc0,0x20($out) + vmovdqu $xd0,0x30($out) + vmovdqu $xa1,0x40($out) + vmovdqu $xb1,0x50($out) + vmovdqu $xc1,0x60($out) + vmovdqu $xd1,0x70($out) + lea 0x80($out),$out # size optimization + vmovdqu $xa2,0x00($out) + vmovdqu $xb2,0x10($out) + vmovdqu $xc2,0x20($out) + vmovdqu $xd2,0x30($out) + je .Ldone4xop + + lea 0x40($inp),$inp # inp+=64*3 + vmovdqa $xa2,0x00(%rsp) + xor %r10,%r10 + vmovdqa $xb2,0x10(%rsp) + lea 0x40($out),$out # out+=64*3 + vmovdqa $xc2,0x20(%rsp) + sub \$192,$len # len-=64*3 + vmovdqa $xd2,0x30(%rsp) + +.Loop_tail4xop: + movzb ($inp,%r10),%eax + movzb (%rsp,%r10),%ecx + lea 1(%r10),%r10 + xor %ecx,%eax + mov %al,-1($out,%r10) + dec $len + jnz .Loop_tail4xop + +.Ldone4xop: + vzeroupper +___ +$code.=<<___ if ($win64); + lea 0x140+0x30(%rsp),%r11 + movaps -0x30(%r11),%xmm6 + movaps -0x20(%r11),%xmm7 + movaps -0x10(%r11),%xmm8 + movaps 0x00(%r11),%xmm9 + movaps 0x10(%r11),%xmm10 + movaps 0x20(%r11),%xmm11 + movaps 0x30(%r11),%xmm12 + movaps 0x40(%r11),%xmm13 + movaps 0x50(%r11),%xmm14 + movaps 0x60(%r11),%xmm15 +___ +$code.=<<___; + add \$0x148+$xframe,%rsp + ret +.size ChaCha20_4xop,.-ChaCha20_4xop +___ +} + +######################################################################## +# AVX2 code path +if ($avx>1) { +my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3, + $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15)); +my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, + "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3); + +sub AVX2_lane_ROUND { +my ($a0,$b0,$c0,$d0)=@_; +my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); +my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); +my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); +my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); +my @x=map("\"$_\"",@xx); + + # Consider order in which variables are addressed by their + # index: + # + # a b c d + # + # 0 4 8 12 < even round + # 1 5 9 13 + # 2 6 10 14 + # 3 7 11 15 + # 0 5 10 15 < odd round + # 1 6 11 12 + # 2 7 8 13 + # 3 4 9 14 + # + # 'a', 'b' and 'd's are permanently allocated in registers, + # @x[0..7,12..15], while 'c's are maintained in memory. If + # you observe 'c' column, you'll notice that pair of 'c's is + # invariant between rounds. This means that we have to reload + # them once per round, in the middle. This is why you'll see + # bunch of 'c' stores and loads in the middle, but none in + # the beginning or end. + + ( + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1 + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpshufb (@x[$d0],@x[$d0],$t1)", + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2 + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpshufb (@x[$d1],@x[$d1],$t1)", + + "&vpaddd ($xc,$xc,@x[$d0])", + "&vpxor (@x[$b0],$xc,@x[$b0])", + "&vpslld ($t0,@x[$b0],12)", + "&vpsrld (@x[$b0],@x[$b0],20)", + "&vpor (@x[$b0],$t0,@x[$b0])", + "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip) + "&vpaddd ($xc_,$xc_,@x[$d1])", + "&vpxor (@x[$b1],$xc_,@x[$b1])", + "&vpslld ($t1,@x[$b1],12)", + "&vpsrld (@x[$b1],@x[$b1],20)", + "&vpor (@x[$b1],$t1,@x[$b1])", + + "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", + "&vpxor (@x[$d0],@x[$a0],@x[$d0])", + "&vpshufb (@x[$d0],@x[$d0],$t0)", + "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", + "&vpxor (@x[$d1],@x[$a1],@x[$d1])", + "&vpshufb (@x[$d1],@x[$d1],$t0)", + + "&vpaddd ($xc,$xc,@x[$d0])", + "&vpxor (@x[$b0],$xc,@x[$b0])", + "&vpslld ($t1,@x[$b0],7)", + "&vpsrld (@x[$b0],@x[$b0],25)", + "&vpor (@x[$b0],$t1,@x[$b0])", + "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip) + "&vpaddd ($xc_,$xc_,@x[$d1])", + "&vpxor (@x[$b1],$xc_,@x[$b1])", + "&vpslld ($t0,@x[$b1],7)", + "&vpsrld (@x[$b1],@x[$b1],25)", + "&vpor (@x[$b1],$t0,@x[$b1])", + + "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's + "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)", + "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")", + "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")", + + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3 + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpshufb (@x[$d2],@x[$d2],$t1)", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4 + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vpshufb (@x[$d3],@x[$d3],$t1)", + + "&vpaddd ($xc,$xc,@x[$d2])", + "&vpxor (@x[$b2],$xc,@x[$b2])", + "&vpslld ($t0,@x[$b2],12)", + "&vpsrld (@x[$b2],@x[$b2],20)", + "&vpor (@x[$b2],$t0,@x[$b2])", + "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip) + "&vpaddd ($xc_,$xc_,@x[$d3])", + "&vpxor (@x[$b3],$xc_,@x[$b3])", + "&vpslld ($t1,@x[$b3],12)", + "&vpsrld (@x[$b3],@x[$b3],20)", + "&vpor (@x[$b3],$t1,@x[$b3])", + + "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", + "&vpxor (@x[$d2],@x[$a2],@x[$d2])", + "&vpshufb (@x[$d2],@x[$d2],$t0)", + "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", + "&vpxor (@x[$d3],@x[$a3],@x[$d3])", + "&vpshufb (@x[$d3],@x[$d3],$t0)", + + "&vpaddd ($xc,$xc,@x[$d2])", + "&vpxor (@x[$b2],$xc,@x[$b2])", + "&vpslld ($t1,@x[$b2],7)", + "&vpsrld (@x[$b2],@x[$b2],25)", + "&vpor (@x[$b2],$t1,@x[$b2])", + "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip) + "&vpaddd ($xc_,$xc_,@x[$d3])", + "&vpxor (@x[$b3],$xc_,@x[$b3])", + "&vpslld ($t0,@x[$b3],7)", + "&vpsrld (@x[$b3],@x[$b3],25)", + "&vpor (@x[$b3],$t0,@x[$b3])" + ); +} + +my $xframe = $win64 ? 0xb0 : 8; + +$code.=<<___; +.type ChaCha20_8x,\@function,5 +.align 32 +ChaCha20_8x: +.LChaCha20_8x: + mov %rsp,%r10 + sub \$0x280+$xframe,%rsp + and \$-32,%rsp +___ +$code.=<<___ if ($win64); + lea 0x290+0x30(%rsp),%r11 + movaps %xmm6,-0x30(%r11) + movaps %xmm7,-0x20(%r11) + movaps %xmm8,-0x10(%r11) + movaps %xmm9,0x00(%r11) + movaps %xmm10,0x10(%r11) + movaps %xmm11,0x20(%r11) + movaps %xmm12,0x30(%r11) + movaps %xmm13,0x40(%r11) + movaps %xmm14,0x50(%r11) + movaps %xmm15,0x60(%r11) +___ +$code.=<<___; + vzeroupper + mov %r10,0x280(%rsp) + + ################ stack layout + # +0x00 SIMD equivalent of @x[8-12] + # ... + # +0x80 constant copy of key[0-2] smashed by lanes + # ... + # +0x200 SIMD counters (with nonce smashed by lanes) + # ... + # +0x280 saved %rsp + + vbroadcasti128 .Lsigma(%rip),$xa3 # key[0] + vbroadcasti128 ($key),$xb3 # key[1] + vbroadcasti128 16($key),$xt3 # key[2] + vbroadcasti128 ($counter),$xd3 # key[3] + lea 0x100(%rsp),%rcx # size optimization + lea 0x200(%rsp),%rax # size optimization + lea .Lrot16(%rip),%r10 + lea .Lrot24(%rip),%r11 + + vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... + vpshufd \$0x55,$xa3,$xa1 + vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload + vpshufd \$0xaa,$xa3,$xa2 + vmovdqa $xa1,0xa0-0x100(%rcx) + vpshufd \$0xff,$xa3,$xa3 + vmovdqa $xa2,0xc0-0x100(%rcx) + vmovdqa $xa3,0xe0-0x100(%rcx) + + vpshufd \$0x00,$xb3,$xb0 + vpshufd \$0x55,$xb3,$xb1 + vmovdqa $xb0,0x100-0x100(%rcx) + vpshufd \$0xaa,$xb3,$xb2 + vmovdqa $xb1,0x120-0x100(%rcx) + vpshufd \$0xff,$xb3,$xb3 + vmovdqa $xb2,0x140-0x100(%rcx) + vmovdqa $xb3,0x160-0x100(%rcx) + + vpshufd \$0x00,$xt3,$xt0 # "xc0" + vpshufd \$0x55,$xt3,$xt1 # "xc1" + vmovdqa $xt0,0x180-0x200(%rax) + vpshufd \$0xaa,$xt3,$xt2 # "xc2" + vmovdqa $xt1,0x1a0-0x200(%rax) + vpshufd \$0xff,$xt3,$xt3 # "xc3" + vmovdqa $xt2,0x1c0-0x200(%rax) + vmovdqa $xt3,0x1e0-0x200(%rax) + + vpshufd \$0x00,$xd3,$xd0 + vpshufd \$0x55,$xd3,$xd1 + vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet + vpshufd \$0xaa,$xd3,$xd2 + vmovdqa $xd1,0x220-0x200(%rax) + vpshufd \$0xff,$xd3,$xd3 + vmovdqa $xd2,0x240-0x200(%rax) + vmovdqa $xd3,0x260-0x200(%rax) + + jmp .Loop_enter8x + +.align 32 +.Loop_outer8x: + vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key + vmovdqa 0xa0-0x100(%rcx),$xa1 + vmovdqa 0xc0-0x100(%rcx),$xa2 + vmovdqa 0xe0-0x100(%rcx),$xa3 + vmovdqa 0x100-0x100(%rcx),$xb0 + vmovdqa 0x120-0x100(%rcx),$xb1 + vmovdqa 0x140-0x100(%rcx),$xb2 + vmovdqa 0x160-0x100(%rcx),$xb3 + vmovdqa 0x180-0x200(%rax),$xt0 # "xc0" + vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1" + vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2" + vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3" + vmovdqa 0x200-0x200(%rax),$xd0 + vmovdqa 0x220-0x200(%rax),$xd1 + vmovdqa 0x240-0x200(%rax),$xd2 + vmovdqa 0x260-0x200(%rax),$xd3 + vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters + +.Loop_enter8x: + vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]" + vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]" + vbroadcasti128 (%r10),$xt3 + vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters + mov \$10,%eax + jmp .Loop8x + +.align 32 +.Loop8x: +___ + foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; } + foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; } +$code.=<<___; + dec %eax + jnz .Loop8x + + lea 0x200(%rsp),%rax # size optimization + vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key + vpaddd 0xa0-0x100(%rcx),$xa1,$xa1 + vpaddd 0xc0-0x100(%rcx),$xa2,$xa2 + vpaddd 0xe0-0x100(%rcx),$xa3,$xa3 + + vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data + vpunpckldq $xa3,$xa2,$xt3 + vpunpckhdq $xa1,$xa0,$xa0 + vpunpckhdq $xa3,$xa2,$xa2 + vpunpcklqdq $xt3,$xt2,$xa1 # "a0" + vpunpckhqdq $xt3,$xt2,$xt2 # "a1" + vpunpcklqdq $xa2,$xa0,$xa3 # "a2" + vpunpckhqdq $xa2,$xa0,$xa0 # "a3" +___ + ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); +$code.=<<___; + vpaddd 0x100-0x100(%rcx),$xb0,$xb0 + vpaddd 0x120-0x100(%rcx),$xb1,$xb1 + vpaddd 0x140-0x100(%rcx),$xb2,$xb2 + vpaddd 0x160-0x100(%rcx),$xb3,$xb3 + + vpunpckldq $xb1,$xb0,$xt2 + vpunpckldq $xb3,$xb2,$xt3 + vpunpckhdq $xb1,$xb0,$xb0 + vpunpckhdq $xb3,$xb2,$xb2 + vpunpcklqdq $xt3,$xt2,$xb1 # "b0" + vpunpckhqdq $xt3,$xt2,$xt2 # "b1" + vpunpcklqdq $xb2,$xb0,$xb3 # "b2" + vpunpckhqdq $xb2,$xb0,$xb0 # "b3" +___ + ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); +$code.=<<___; + vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further + vperm2i128 \$0x31,$xb0,$xa0,$xb0 + vperm2i128 \$0x20,$xb1,$xa1,$xa0 + vperm2i128 \$0x31,$xb1,$xa1,$xb1 + vperm2i128 \$0x20,$xb2,$xa2,$xa1 + vperm2i128 \$0x31,$xb2,$xa2,$xb2 + vperm2i128 \$0x20,$xb3,$xa3,$xa2 + vperm2i128 \$0x31,$xb3,$xa3,$xb3 +___ + ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3); + my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); +$code.=<<___; + vmovdqa $xa0,0x00(%rsp) # offload $xaN + vmovdqa $xa1,0x20(%rsp) + vmovdqa 0x40(%rsp),$xc2 # $xa0 + vmovdqa 0x60(%rsp),$xc3 # $xa1 + + vpaddd 0x180-0x200(%rax),$xc0,$xc0 + vpaddd 0x1a0-0x200(%rax),$xc1,$xc1 + vpaddd 0x1c0-0x200(%rax),$xc2,$xc2 + vpaddd 0x1e0-0x200(%rax),$xc3,$xc3 + + vpunpckldq $xc1,$xc0,$xt2 + vpunpckldq $xc3,$xc2,$xt3 + vpunpckhdq $xc1,$xc0,$xc0 + vpunpckhdq $xc3,$xc2,$xc2 + vpunpcklqdq $xt3,$xt2,$xc1 # "c0" + vpunpckhqdq $xt3,$xt2,$xt2 # "c1" + vpunpcklqdq $xc2,$xc0,$xc3 # "c2" + vpunpckhqdq $xc2,$xc0,$xc0 # "c3" +___ + ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); +$code.=<<___; + vpaddd 0x200-0x200(%rax),$xd0,$xd0 + vpaddd 0x220-0x200(%rax),$xd1,$xd1 + vpaddd 0x240-0x200(%rax),$xd2,$xd2 + vpaddd 0x260-0x200(%rax),$xd3,$xd3 + + vpunpckldq $xd1,$xd0,$xt2 + vpunpckldq $xd3,$xd2,$xt3 + vpunpckhdq $xd1,$xd0,$xd0 + vpunpckhdq $xd3,$xd2,$xd2 + vpunpcklqdq $xt3,$xt2,$xd1 # "d0" + vpunpckhqdq $xt3,$xt2,$xt2 # "d1" + vpunpcklqdq $xd2,$xd0,$xd3 # "d2" + vpunpckhqdq $xd2,$xd0,$xd0 # "d3" +___ + ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); +$code.=<<___; + vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further + vperm2i128 \$0x31,$xd0,$xc0,$xd0 + vperm2i128 \$0x20,$xd1,$xc1,$xc0 + vperm2i128 \$0x31,$xd1,$xc1,$xd1 + vperm2i128 \$0x20,$xd2,$xc2,$xc1 + vperm2i128 \$0x31,$xd2,$xc2,$xd2 + vperm2i128 \$0x20,$xd3,$xc3,$xc2 + vperm2i128 \$0x31,$xd3,$xc3,$xd3 +___ + ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3); + ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)= + ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3); + ($xa0,$xa1)=($xt2,$xt3); +$code.=<<___; + vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember? + vmovdqa 0x20(%rsp),$xa1 + + cmp \$64*8,$len + jb .Ltail8x + + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + lea 0x80($out),$out # size optimization + + vpxor 0x00($inp),$xa1,$xa1 + vpxor 0x20($inp),$xb1,$xb1 + vpxor 0x40($inp),$xc1,$xc1 + vpxor 0x60($inp),$xd1,$xd1 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa1,0x00($out) + vmovdqu $xb1,0x20($out) + vmovdqu $xc1,0x40($out) + vmovdqu $xd1,0x60($out) + lea 0x80($out),$out # size optimization + + vpxor 0x00($inp),$xa2,$xa2 + vpxor 0x20($inp),$xb2,$xb2 + vpxor 0x40($inp),$xc2,$xc2 + vpxor 0x60($inp),$xd2,$xd2 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa2,0x00($out) + vmovdqu $xb2,0x20($out) + vmovdqu $xc2,0x40($out) + vmovdqu $xd2,0x60($out) + lea 0x80($out),$out # size optimization + + vpxor 0x00($inp),$xa3,$xa3 + vpxor 0x20($inp),$xb3,$xb3 + vpxor 0x40($inp),$xc3,$xc3 + vpxor 0x60($inp),$xd3,$xd3 + lea 0x80($inp),$inp # size optimization + vmovdqu $xa3,0x00($out) + vmovdqu $xb3,0x20($out) + vmovdqu $xc3,0x40($out) + vmovdqu $xd3,0x60($out) + lea 0x80($out),$out # size optimization + + sub \$64*8,$len + jnz .Loop_outer8x + + jmp .Ldone8x + +.Ltail8x: + cmp \$448,$len + jae .L448_or_more8x + cmp \$384,$len + jae .L384_or_more8x + cmp \$320,$len + jae .L320_or_more8x + cmp \$256,$len + jae .L256_or_more8x + cmp \$192,$len + jae .L192_or_more8x + cmp \$128,$len + jae .L128_or_more8x + cmp \$64,$len + jae .L64_or_more8x + + xor %r10,%r10 + vmovdqa $xa0,0x00(%rsp) + vmovdqa $xb0,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L64_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + je .Ldone8x + + lea 0x40($inp),$inp # inp+=64*1 + xor %r10,%r10 + vmovdqa $xc0,0x00(%rsp) + lea 0x40($out),$out # out+=64*1 + sub \$64,$len # len-=64*1 + vmovdqa $xd0,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L128_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + je .Ldone8x + + lea 0x80($inp),$inp # inp+=64*2 + xor %r10,%r10 + vmovdqa $xa1,0x00(%rsp) + lea 0x80($out),$out # out+=64*2 + sub \$128,$len # len-=64*2 + vmovdqa $xb1,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L192_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + je .Ldone8x + + lea 0xc0($inp),$inp # inp+=64*3 + xor %r10,%r10 + vmovdqa $xc1,0x00(%rsp) + lea 0xc0($out),$out # out+=64*3 + sub \$192,$len # len-=64*3 + vmovdqa $xd1,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L256_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + je .Ldone8x + + lea 0x100($inp),$inp # inp+=64*4 + xor %r10,%r10 + vmovdqa $xa2,0x00(%rsp) + lea 0x100($out),$out # out+=64*4 + sub \$256,$len # len-=64*4 + vmovdqa $xb2,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L320_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vpxor 0x100($inp),$xa2,$xa2 + vpxor 0x120($inp),$xb2,$xb2 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + vmovdqu $xa2,0x100($out) + vmovdqu $xb2,0x120($out) + je .Ldone8x + + lea 0x140($inp),$inp # inp+=64*5 + xor %r10,%r10 + vmovdqa $xc2,0x00(%rsp) + lea 0x140($out),$out # out+=64*5 + sub \$320,$len # len-=64*5 + vmovdqa $xd2,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L384_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vpxor 0x100($inp),$xa2,$xa2 + vpxor 0x120($inp),$xb2,$xb2 + vpxor 0x140($inp),$xc2,$xc2 + vpxor 0x160($inp),$xd2,$xd2 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + vmovdqu $xa2,0x100($out) + vmovdqu $xb2,0x120($out) + vmovdqu $xc2,0x140($out) + vmovdqu $xd2,0x160($out) + je .Ldone8x + + lea 0x180($inp),$inp # inp+=64*6 + xor %r10,%r10 + vmovdqa $xa3,0x00(%rsp) + lea 0x180($out),$out # out+=64*6 + sub \$384,$len # len-=64*6 + vmovdqa $xb3,0x20(%rsp) + jmp .Loop_tail8x + +.align 32 +.L448_or_more8x: + vpxor 0x00($inp),$xa0,$xa0 # xor with input + vpxor 0x20($inp),$xb0,$xb0 + vpxor 0x40($inp),$xc0,$xc0 + vpxor 0x60($inp),$xd0,$xd0 + vpxor 0x80($inp),$xa1,$xa1 + vpxor 0xa0($inp),$xb1,$xb1 + vpxor 0xc0($inp),$xc1,$xc1 + vpxor 0xe0($inp),$xd1,$xd1 + vpxor 0x100($inp),$xa2,$xa2 + vpxor 0x120($inp),$xb2,$xb2 + vpxor 0x140($inp),$xc2,$xc2 + vpxor 0x160($inp),$xd2,$xd2 + vpxor 0x180($inp),$xa3,$xa3 + vpxor 0x1a0($inp),$xb3,$xb3 + vmovdqu $xa0,0x00($out) + vmovdqu $xb0,0x20($out) + vmovdqu $xc0,0x40($out) + vmovdqu $xd0,0x60($out) + vmovdqu $xa1,0x80($out) + vmovdqu $xb1,0xa0($out) + vmovdqu $xc1,0xc0($out) + vmovdqu $xd1,0xe0($out) + vmovdqu $xa2,0x100($out) + vmovdqu $xb2,0x120($out) + vmovdqu $xc2,0x140($out) + vmovdqu $xd2,0x160($out) + vmovdqu $xa3,0x180($out) + vmovdqu $xb3,0x1a0($out) + je .Ldone8x + + lea 0x1c0($inp),$inp # inp+=64*7 + xor %r10,%r10 + vmovdqa $xc3,0x00(%rsp) + lea 0x1c0($out),$out # out+=64*7 + sub \$448,$len # len-=64*7 + vmovdqa $xd3,0x20(%rsp) + +.Loop_tail8x: + movzb ($inp,%r10),%eax + movzb (%rsp,%r10),%ecx + lea 1(%r10),%r10 + xor %ecx,%eax + mov %al,-1($out,%r10) + dec $len + jnz .Loop_tail8x + +.Ldone8x: + vzeroall +___ +$code.=<<___ if ($win64); + lea 0x290+0x30(%rsp),%r11 + movaps -0x30(%r11),%xmm6 + movaps -0x20(%r11),%xmm7 + movaps -0x10(%r11),%xmm8 + movaps 0x00(%r11),%xmm9 + movaps 0x10(%r11),%xmm10 + movaps 0x20(%r11),%xmm11 + movaps 0x30(%r11),%xmm12 + movaps 0x40(%r11),%xmm13 + movaps 0x50(%r11),%xmm14 + movaps 0x60(%r11),%xmm15 +___ +$code.=<<___; + mov 0x280(%rsp),%rsp + ret +.size ChaCha20_8x,.-ChaCha20_8x +___ +} + +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/geo; + + s/%x#%y/%x/go; + + print $_,"\n"; +} + +close STDOUT; |