Inital import.

Initial fork from f2d678e6e89b6508147086610e985d4e8416e867 (1.0.2 beta).

(This change contains substantial changes from the original and
effectively starts a new history.)
diff --git a/crypto/sha/CMakeLists.txt b/crypto/sha/CMakeLists.txt
new file mode 100644
index 0000000..848c048
--- /dev/null
+++ b/crypto/sha/CMakeLists.txt
@@ -0,0 +1,61 @@
+include_directories(. .. ../../include)
+
+if (${ARCH} STREQUAL "x86_64")
+	set(
+		SHA_ARCH_SOURCES
+
+		sha1-x86_64.${ASM_EXT}
+		sha256-x86_64.${ASM_EXT}
+		sha512-x86_64.${ASM_EXT}
+	)
+endif()
+
+if (${ARCH} STREQUAL "x86")
+	set(
+		SHA_ARCH_SOURCES
+
+		sha1-586.${ASM_EXT}
+		sha256-586.${ASM_EXT}
+		sha512-586.${ASM_EXT}
+	)
+endif()
+
+if (${ARCH} STREQUAL "arm")
+	set(
+		SHA_ARCH_SOURCES
+
+		sha1-armv4-large.${ASM_EXT}
+		sha256-armv4.${ASM_EXT}
+		sha512-armv4.${ASM_EXT}
+	)
+endif()
+
+add_library(
+	sha
+
+	OBJECT
+
+	sha1.c
+	sha256.c
+	sha512.c
+
+	${SHA_ARCH_SOURCES}
+)
+
+perlasm(sha1-x86_64.${ASM_EXT} asm/sha1-x86_64.pl)
+perlasm(sha256-x86_64.${ASM_EXT} asm/sha512-x86_64.pl sha256)
+perlasm(sha512-x86_64.${ASM_EXT} asm/sha512-x86_64.pl sha512)
+perlasm(sha1-586.${ASM_EXT} asm/sha1-586.pl)
+perlasm(sha256-586.${ASM_EXT} asm/sha256-586.pl)
+perlasm(sha512-586.${ASM_EXT} asm/sha512-586.pl)
+perlasm(sha1-armv4-large.${ASM_EXT} asm/sha1-armv4-large.pl)
+perlasm(sha256-armv4.${ASM_EXT} asm/sha256-armv4.pl)
+perlasm(sha512-armv4.${ASM_EXT} asm/sha512-armv4.pl)
+
+add_executable(
+	sha1_test
+
+	sha1_test.c
+)
+
+target_link_libraries(sha1_test crypto)
diff --git a/crypto/sha/asm/sha1-586.pl b/crypto/sha/asm/sha1-586.pl
new file mode 100644
index 0000000..632dbbe
--- /dev/null
+++ b/crypto/sha/asm/sha1-586.pl
@@ -0,0 +1,1266 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
+# functions were re-implemented to address P4 performance issue [see
+# commentary below], and in 2006 the rest was rewritten in order to
+# gain freedom to liberate licensing terms.
+
+# January, September 2004.
+#
+# It was noted that Intel IA-32 C compiler generates code which
+# performs ~30% *faster* on P4 CPU than original *hand-coded*
+# SHA1 assembler implementation. To address this problem (and
+# prove that humans are still better than machines:-), the
+# original code was overhauled, which resulted in following
+# performance changes:
+#
+#		compared with original	compared with Intel cc
+#		assembler impl.		generated code
+# Pentium	-16%			+48%
+# PIII/AMD	+8%			+16%
+# P4		+85%(!)			+45%
+#
+# As you can see Pentium came out as looser:-( Yet I reckoned that
+# improvement on P4 outweights the loss and incorporate this
+# re-tuned code to 0.9.7 and later.
+# ----------------------------------------------------------------
+#					<appro@fy.chalmers.se>
+
+# August 2009.
+#
+# George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
+# '(c&d) + (b&(c^d))', which allows to accumulate partial results
+# and lighten "pressure" on scratch registers. This resulted in
+# >12% performance improvement on contemporary AMD cores (with no
+# degradation on other CPUs:-). Also, the code was revised to maximize
+# "distance" between instructions producing input to 'lea' instruction
+# and the 'lea' instruction itself, which is essential for Intel Atom
+# core and resulted in ~15% improvement.
+
+# October 2010.
+#
+# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
+# is to offload message schedule denoted by Wt in NIST specification,
+# or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel,
+# and in SSE2 context was first explored by Dean Gaudet in 2004, see
+# http://arctic.org/~dean/crypto/sha1.html. Since then several things
+# have changed that made it interesting again:
+#
+# a) XMM units became faster and wider;
+# b) instruction set became more versatile;
+# c) an important observation was made by Max Locktykhin, which made
+#    it possible to reduce amount of instructions required to perform
+#    the operation in question, for further details see
+#    http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/.
+
+# April 2011.
+#
+# Add AVX code path, probably most controversial... The thing is that
+# switch to AVX alone improves performance by as little as 4% in
+# comparison to SSSE3 code path. But below result doesn't look like
+# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as
+# pair of µ-ops, and it's the additional µ-ops, two per round, that
+# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded
+# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
+# equivalent 'sh[rl]d' that is responsible for the impressive 5.1
+# cycles per processed byte. But 'sh[rl]d' is not something that used
+# to be fast, nor does it appear to be fast in upcoming Bulldozer
+# [according to its optimization manual]. Which is why AVX code path
+# is guarded by *both* AVX and synthetic bit denoting Intel CPUs.
+# One can argue that it's unfair to AMD, but without 'sh[rl]d' it
+# makes no sense to keep the AVX code path. If somebody feels that
+# strongly, it's probably more appropriate to discuss possibility of
+# using vector rotate XOP on AMD...
+
+######################################################################
+# Current performance is summarized in following table. Numbers are
+# CPU clock cycles spent to process single byte (less is better).
+#
+#		x86		SSSE3		AVX
+# Pentium	15.7		-
+# PIII		11.5		-
+# P4		10.6		-
+# AMD K8	7.1		-
+# Core2		7.3		6.0/+22%	-
+# Atom		12.5		9.3(*)/+35%	-
+# Westmere	7.3		5.5/+33%	-
+# Sandy Bridge	8.8		6.2/+40%	5.1(**)/+73%
+# Ivy Bridge	7.2		4.8/+51%	4.7(**)/+53%
+# Bulldozer	11.6		6.0/+92%
+# VIA Nano	10.6		7.4/+43%
+#
+# (*)	Loop is 1056 instructions long and expected result is ~8.25.
+#	It remains mystery [to me] why ILP is limited to 1.7.
+#
+# (**)	As per above comment, the result is for AVX *plus* sh[rl]d.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386");
+
+$xmm=$ymm=0;
+for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+$ymm=1 if ($xmm &&
+		`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+			=~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
+		$1>=2.19);	# first version supporting AVX
+
+$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" && 
+		`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
+		$1>=2.03);	# first version supporting AVX
+
+$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
+		`ml 2>&1` =~ /Version ([0-9]+)\./ &&
+		$1>=10);	# first version supporting AVX
+
+&external_label("OPENSSL_ia32cap_P") if ($xmm);
+
+
+$A="eax";
+$B="ebx";
+$C="ecx";
+$D="edx";
+$E="edi";
+$T="esi";
+$tmp1="ebp";
+
+@V=($A,$B,$C,$D,$E,$T);
+
+$alt=0;	# 1 denotes alternative IALU implementation, which performs
+	# 8% *worse* on P4, same on Westmere and Atom, 2% better on
+	# Sandy Bridge...
+
+sub BODY_00_15
+	{
+	local($n,$a,$b,$c,$d,$e,$f)=@_;
+
+	&comment("00_15 $n");
+
+	&mov($f,$c);			# f to hold F_00_19(b,c,d)
+	 if ($n==0)  { &mov($tmp1,$a); }
+	 else        { &mov($a,$tmp1); }
+	&rotl($tmp1,5);			# tmp1=ROTATE(a,5)
+	 &xor($f,$d);
+	&add($tmp1,$e);			# tmp1+=e;
+	 &mov($e,&swtmp($n%16));	# e becomes volatile and is loaded
+	 				# with xi, also note that e becomes
+					# f in next round...
+	&and($f,$b);
+	&rotr($b,2);			# b=ROTATE(b,30)
+	 &xor($f,$d);			# f holds F_00_19(b,c,d)
+	&lea($tmp1,&DWP(0x5a827999,$tmp1,$e));	# tmp1+=K_00_19+xi
+
+	if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
+		      &add($f,$tmp1); }	# f+=tmp1
+	else        { &add($tmp1,$f); }	# f becomes a in next round
+	&mov($tmp1,$a)			if ($alt && $n==15);
+	}
+
+sub BODY_16_19
+	{
+	local($n,$a,$b,$c,$d,$e,$f)=@_;
+
+	&comment("16_19 $n");
+
+if ($alt) {
+	&xor($c,$d);
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&and($tmp1,$c);			# tmp1 to hold F_00_19(b,c,d), b&=c^d
+	 &xor($f,&swtmp(($n+8)%16));
+	&xor($tmp1,$d);			# tmp1=F_00_19(b,c,d)
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &add($e,$tmp1);		# e+=F_00_19(b,c,d)
+	&xor($c,$d);			# restore $c
+	 &mov($tmp1,$a);		# b in next round
+	&rotr($b,$n==16?2:7);		# b=ROTATE(b,30)
+	 &mov(&swtmp($n%16),$f);	# xi=f
+	&rotl($a,5);			# ROTATE(a,5)
+	 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
+	&mov($e,&swtmp(($n+1)%16));	# pre-fetch f for next round
+	 &add($f,$a);			# f+=ROTATE(a,5)
+} else {
+	&mov($tmp1,$c);			# tmp1 to hold F_00_19(b,c,d)
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&xor($tmp1,$d);
+	 &xor($f,&swtmp(($n+8)%16));
+	&and($tmp1,$b);
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &xor($tmp1,$d);		# tmp1=F_00_19(b,c,d)
+	&add($e,$tmp1);			# e+=F_00_19(b,c,d)
+	 &mov($tmp1,$a);
+	&rotr($b,2);			# b=ROTATE(b,30)
+	 &mov(&swtmp($n%16),$f);	# xi=f
+	&rotl($tmp1,5);			# ROTATE(a,5)
+	 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
+	&mov($e,&swtmp(($n+1)%16));	# pre-fetch f for next round
+	 &add($f,$tmp1);		# f+=ROTATE(a,5)
+}
+	}
+
+sub BODY_20_39
+	{
+	local($n,$a,$b,$c,$d,$e,$f)=@_;
+	local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
+
+	&comment("20_39 $n");
+
+if ($alt) {
+	&xor($tmp1,$c);			# tmp1 to hold F_20_39(b,c,d), b^=c
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&xor($tmp1,$d);			# tmp1 holds F_20_39(b,c,d)
+	 &xor($f,&swtmp(($n+8)%16));
+	&add($e,$tmp1);			# e+=F_20_39(b,c,d)
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &mov($tmp1,$a);		# b in next round
+	&rotr($b,7);			# b=ROTATE(b,30)
+	 &mov(&swtmp($n%16),$f)		if($n<77);# xi=f
+	&rotl($a,5);			# ROTATE(a,5)
+	 &xor($b,$c)			if($n==39);# warm up for BODY_40_59
+	&and($tmp1,$b)			if($n==39);
+	 &lea($f,&DWP($K,$f,$e));	# f+=e+K_XX_YY
+	&mov($e,&swtmp(($n+1)%16))	if($n<79);# pre-fetch f for next round
+	 &add($f,$a);			# f+=ROTATE(a,5)
+	&rotr($a,5)			if ($n==79);
+} else {
+	&mov($tmp1,$b);			# tmp1 to hold F_20_39(b,c,d)
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&xor($tmp1,$c);
+	 &xor($f,&swtmp(($n+8)%16));
+	&xor($tmp1,$d);			# tmp1 holds F_20_39(b,c,d)
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &add($e,$tmp1);		# e+=F_20_39(b,c,d)
+	&rotr($b,2);			# b=ROTATE(b,30)
+	 &mov($tmp1,$a);
+	&rotl($tmp1,5);			# ROTATE(a,5)
+	 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
+	&lea($f,&DWP($K,$f,$e));	# f+=e+K_XX_YY
+	 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
+	&add($f,$tmp1);			# f+=ROTATE(a,5)
+}
+	}
+
+sub BODY_40_59
+	{
+	local($n,$a,$b,$c,$d,$e,$f)=@_;
+
+	&comment("40_59 $n");
+
+if ($alt) {
+	&add($e,$tmp1);			# e+=b&(c^d)
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&mov($tmp1,$d);
+	 &xor($f,&swtmp(($n+8)%16));
+	&xor($c,$d);			# restore $c
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &and($tmp1,$c);
+	&rotr($b,7);			# b=ROTATE(b,30)
+	 &add($e,$tmp1);		# e+=c&d
+	&mov($tmp1,$a);			# b in next round
+	 &mov(&swtmp($n%16),$f);	# xi=f
+	&rotl($a,5);			# ROTATE(a,5)
+	 &xor($b,$c)			if ($n<59);
+	&and($tmp1,$b)			if ($n<59);# tmp1 to hold F_40_59(b,c,d)
+	 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d))
+	&mov($e,&swtmp(($n+1)%16));	# pre-fetch f for next round
+	 &add($f,$a);			# f+=ROTATE(a,5)
+} else {
+	&mov($tmp1,$c);			# tmp1 to hold F_40_59(b,c,d)
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&xor($tmp1,$d);
+	 &xor($f,&swtmp(($n+8)%16));
+	&and($tmp1,$b);
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &add($tmp1,$e);		# b&(c^d)+=e
+	&rotr($b,2);			# b=ROTATE(b,30)
+	 &mov($e,$a);			# e becomes volatile
+	&rotl($e,5);			# ROTATE(a,5)
+	 &mov(&swtmp($n%16),$f);	# xi=f
+	&lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
+	 &mov($tmp1,$c);
+	&add($f,$e);			# f+=ROTATE(a,5)
+	 &and($tmp1,$d);
+	&mov($e,&swtmp(($n+1)%16));	# pre-fetch f for next round
+	 &add($f,$tmp1);		# f+=c&d
+}
+	}
+
+&function_begin("sha1_block_data_order");
+if ($xmm) {
+  &static_label("ssse3_shortcut");
+  &static_label("avx_shortcut")		if ($ymm);
+  &static_label("K_XX_XX");
+
+	&call	(&label("pic_point"));	# make it PIC!
+  &set_label("pic_point");
+	&blindpop($tmp1);
+	&picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point"));
+	&lea	($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
+
+	&mov	($A,&DWP(0,$T));
+	&mov	($D,&DWP(4,$T));
+	&test	($D,1<<9);		# check SSSE3 bit
+	&jz	(&label("x86"));
+	&test	($A,1<<24);		# check FXSR bit
+	&jz	(&label("x86"));
+	if ($ymm) {
+		&and	($D,1<<28);		# mask AVX bit
+		&and	($A,1<<30);		# mask "Intel CPU" bit
+		&or	($A,$D);
+		&cmp	($A,1<<28|1<<30);
+		&je	(&label("avx_shortcut"));
+	}
+	&jmp	(&label("ssse3_shortcut"));
+  &set_label("x86",16);
+}
+	&mov($tmp1,&wparam(0));	# SHA_CTX *c
+	&mov($T,&wparam(1));	# const void *input
+	&mov($A,&wparam(2));	# size_t num
+	&stack_push(16+3);	# allocate X[16]
+	&shl($A,6);
+	&add($A,$T);
+	&mov(&wparam(2),$A);	# pointer beyond the end of input
+	&mov($E,&DWP(16,$tmp1));# pre-load E
+	&jmp(&label("loop"));
+
+&set_label("loop",16);
+
+	# copy input chunk to X, but reversing byte order!
+	for ($i=0; $i<16; $i+=4)
+		{
+		&mov($A,&DWP(4*($i+0),$T));
+		&mov($B,&DWP(4*($i+1),$T));
+		&mov($C,&DWP(4*($i+2),$T));
+		&mov($D,&DWP(4*($i+3),$T));
+		&bswap($A);
+		&bswap($B);
+		&bswap($C);
+		&bswap($D);
+		&mov(&swtmp($i+0),$A);
+		&mov(&swtmp($i+1),$B);
+		&mov(&swtmp($i+2),$C);
+		&mov(&swtmp($i+3),$D);
+		}
+	&mov(&wparam(1),$T);	# redundant in 1st spin
+
+	&mov($A,&DWP(0,$tmp1));	# load SHA_CTX
+	&mov($B,&DWP(4,$tmp1));
+	&mov($C,&DWP(8,$tmp1));
+	&mov($D,&DWP(12,$tmp1));
+	# E is pre-loaded
+
+	for($i=0;$i<16;$i++)	{ &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
+	for(;$i<20;$i++)	{ &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
+	for(;$i<40;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+	for(;$i<60;$i++)	{ &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
+	for(;$i<80;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+
+	(($V[5] eq $D) and ($V[0] eq $E)) or die;	# double-check
+
+	&mov($tmp1,&wparam(0));	# re-load SHA_CTX*
+	&mov($D,&wparam(1));	# D is last "T" and is discarded
+
+	&add($E,&DWP(0,$tmp1));	# E is last "A"...
+	&add($T,&DWP(4,$tmp1));
+	&add($A,&DWP(8,$tmp1));
+	&add($B,&DWP(12,$tmp1));
+	&add($C,&DWP(16,$tmp1));
+
+	&mov(&DWP(0,$tmp1),$E);	# update SHA_CTX
+	 &add($D,64);		# advance input pointer
+	&mov(&DWP(4,$tmp1),$T);
+	 &cmp($D,&wparam(2));	# have we reached the end yet?
+	&mov(&DWP(8,$tmp1),$A);
+	 &mov($E,$C);		# C is last "E" which needs to be "pre-loaded"
+	&mov(&DWP(12,$tmp1),$B);
+	 &mov($T,$D);		# input pointer
+	&mov(&DWP(16,$tmp1),$C);
+	&jb(&label("loop"));
+
+	&stack_pop(16+3);
+&function_end("sha1_block_data_order");
+
+if ($xmm) {
+######################################################################
+# The SSSE3 implementation.
+#
+# %xmm[0-7] are used as ring @X[] buffer containing quadruples of last
+# 32 elements of the message schedule or Xupdate outputs. First 4
+# quadruples are simply byte-swapped input, next 4 are calculated
+# according to method originally suggested by Dean Gaudet (modulo
+# being implemented in SSSE3). Once 8 quadruples or 32 elements are
+# collected, it switches to routine proposed by Max Locktyukhin.
+#
+# Calculations inevitably require temporary reqisters, and there are
+# no %xmm registers left to spare. For this reason part of the ring
+# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring
+# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] -
+# X[-5], and X[4] - X[-4]...
+#
+# Another notable optimization is aggressive stack frame compression
+# aiming to minimize amount of 9-byte instructions...
+#
+# Yet another notable optimization is "jumping" $B variable. It means
+# that there is no register permanently allocated for $B value. This
+# allowed to eliminate one instruction from body_20_39...
+#
+my $Xi=4;			# 4xSIMD Xupdate round, start pre-seeded
+my @X=map("xmm$_",(4..7,0..3));	# pre-seeded for $Xi=4
+my @V=($A,$B,$C,$D,$E);
+my $j=0;			# hash round
+my $rx=0;
+my @T=($T,$tmp1);
+my $inp;
+
+my $_rol=sub { &rol(@_) };
+my $_ror=sub { &ror(@_) };
+
+&function_begin("_sha1_block_data_order_ssse3");
+	&call	(&label("pic_point"));	# make it PIC!
+	&set_label("pic_point");
+	&blindpop($tmp1);
+	&lea	($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
+&set_label("ssse3_shortcut");
+
+	&movdqa	(@X[3],&QWP(0,$tmp1));		# K_00_19
+	&movdqa	(@X[4],&QWP(16,$tmp1));		# K_20_39
+	&movdqa	(@X[5],&QWP(32,$tmp1));		# K_40_59
+	&movdqa	(@X[6],&QWP(48,$tmp1));		# K_60_79
+	&movdqa	(@X[2],&QWP(64,$tmp1));		# pbswap mask
+
+	&mov	($E,&wparam(0));		# load argument block
+	&mov	($inp=@T[1],&wparam(1));
+	&mov	($D,&wparam(2));
+	&mov	(@T[0],"esp");
+
+	# stack frame layout
+	#
+	# +0	X[0]+K	X[1]+K	X[2]+K	X[3]+K	# XMM->IALU xfer area
+	#	X[4]+K	X[5]+K	X[6]+K	X[7]+K
+	#	X[8]+K	X[9]+K	X[10]+K	X[11]+K
+	#	X[12]+K	X[13]+K	X[14]+K	X[15]+K
+	#
+	# +64	X[0]	X[1]	X[2]	X[3]	# XMM->XMM backtrace area
+	#	X[4]	X[5]	X[6]	X[7]
+	#	X[8]	X[9]	X[10]	X[11]	# even borrowed for K_00_19
+	#
+	# +112	K_20_39	K_20_39	K_20_39	K_20_39	# constants
+	#	K_40_59	K_40_59	K_40_59	K_40_59
+	#	K_60_79	K_60_79	K_60_79	K_60_79
+	#	K_00_19	K_00_19	K_00_19	K_00_19
+	#	pbswap mask
+	#
+	# +192	ctx				# argument block
+	# +196	inp
+	# +200	end
+	# +204	esp
+	&sub	("esp",208);
+	&and	("esp",-64);
+
+	&movdqa	(&QWP(112+0,"esp"),@X[4]);	# copy constants
+	&movdqa	(&QWP(112+16,"esp"),@X[5]);
+	&movdqa	(&QWP(112+32,"esp"),@X[6]);
+	&shl	($D,6);				# len*64
+	&movdqa	(&QWP(112+48,"esp"),@X[3]);
+	&add	($D,$inp);			# end of input
+	&movdqa	(&QWP(112+64,"esp"),@X[2]);
+	&add	($inp,64);
+	&mov	(&DWP(192+0,"esp"),$E);		# save argument block
+	&mov	(&DWP(192+4,"esp"),$inp);
+	&mov	(&DWP(192+8,"esp"),$D);
+	&mov	(&DWP(192+12,"esp"),@T[0]);	# save original %esp
+
+	&mov	($A,&DWP(0,$E));		# load context
+	&mov	($B,&DWP(4,$E));
+	&mov	($C,&DWP(8,$E));
+	&mov	($D,&DWP(12,$E));
+	&mov	($E,&DWP(16,$E));
+	&mov	(@T[0],$B);			# magic seed
+
+	&movdqu	(@X[-4&7],&QWP(-64,$inp));	# load input to %xmm[0-3]
+	&movdqu	(@X[-3&7],&QWP(-48,$inp));
+	&movdqu	(@X[-2&7],&QWP(-32,$inp));
+	&movdqu	(@X[-1&7],&QWP(-16,$inp));
+	&pshufb	(@X[-4&7],@X[2]);		# byte swap
+	&pshufb	(@X[-3&7],@X[2]);
+	&pshufb	(@X[-2&7],@X[2]);
+	&movdqa	(&QWP(112-16,"esp"),@X[3]);	# borrow last backtrace slot
+	&pshufb	(@X[-1&7],@X[2]);
+	&paddd	(@X[-4&7],@X[3]);		# add K_00_19
+	&paddd	(@X[-3&7],@X[3]);
+	&paddd	(@X[-2&7],@X[3]);
+	&movdqa	(&QWP(0,"esp"),@X[-4&7]);	# X[]+K xfer to IALU
+	&psubd	(@X[-4&7],@X[3]);		# restore X[]
+	&movdqa	(&QWP(0+16,"esp"),@X[-3&7]);
+	&psubd	(@X[-3&7],@X[3]);
+	&movdqa	(&QWP(0+32,"esp"),@X[-2&7]);
+	&mov	(@T[1],$C);
+	&psubd	(@X[-2&7],@X[3]);
+	&xor	(@T[1],$D);
+	&movdqa	(@X[0],@X[-3&7]);
+	&and	(@T[0],@T[1]);
+	&jmp	(&label("loop"));
+
+######################################################################
+# SSE instruction sequence is first broken to groups of indepentent
+# instructions, independent in respect to their inputs and shifter
+# (not all architectures have more than one). Then IALU instructions
+# are "knitted in" between the SSE groups. Distance is maintained for
+# SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer
+# [which allegedly also implements SSSE3]...
+#
+# Temporary registers usage. X[2] is volatile at the entry and at the
+# end is restored from backtrace ring buffer. X[3] is expected to
+# contain current K_XX_XX constant and is used to caclulate X[-1]+K
+# from previous round, it becomes volatile the moment the value is
+# saved to stack for transfer to IALU. X[4] becomes volatile whenever
+# X[-4] is accumulated and offloaded to backtrace ring buffer, at the
+# end it is loaded with next K_XX_XX [which becomes X[3] in next
+# round]...
+#
+sub Xupdate_ssse3_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&palignr(@X[0],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	&movdqa	(@X[2],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &paddd	(@X[3],@X[-1&7]);
+	  &movdqa	(&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psrldq	(@X[2],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[2],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@X[2]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &movdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&movdqa	(@X[4],@X[0]);
+	&movdqa	(@X[2],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pslldq	(@X[4],12);		# "X[0]"<<96, extract one dword
+	&paddd	(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@X[2],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(@X[3],@X[4]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@X[4],30);
+	&por	(@X[0],@X[2]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &movdqa	(@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5);	# restore X[] from backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pslld	(@X[3],2);
+	&pxor	(@X[0],@X[4]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &movdqa	(@X[4],&QWP(112-16+16*(($Xi)/5),"esp"));	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@X[3]);		# "X[0]"^=("X[0]"<<96)<<<2
+	  &movdqa	(@X[1],@X[-2&7])	if ($Xi<7);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_ssse3_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 44 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&movdqa	(@X[2],@X[-1&7])	if ($Xi==8);
+	 eval(shift(@insns));		# body_20_39
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"="X[-32]"^"X[-16]"
+	&palignr(@X[2],@X[-2&7],8);	# compose "X[-6]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&pxor	(@X[0],@X[-7&7]);	# "X[0]"^="X[-28]"
+	  &movdqa	(&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);	# save X[] to backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 if ($Xi%5) {
+	  &movdqa	(@X[4],@X[3]);	# "perpetuate" K_XX_XX...
+	 } else {			# ... or load next one
+	  &movdqa	(@X[4],&QWP(112-16+16*($Xi/5),"esp"));
+	 }
+	  &paddd	(@X[3],@X[-1&7]);
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@X[2]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&movdqa	(@X[2],@X[0]);
+	  &movdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&pslld	(@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	&psrld	(@X[2],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&por	(@X[0],@X[2]);		# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	  &movdqa	(@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19);	# restore X[] from backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	  &movdqa	(@X[3],@X[0])	if ($Xi<19);
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xuplast_ssse3_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	  &paddd	(@X[3],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &movdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&mov	($inp=@T[1],&DWP(192+4,"esp"));
+	&cmp	($inp,&DWP(192+8,"esp"));
+	&je	(&label("done"));
+
+	&movdqa	(@X[3],&QWP(112+48,"esp"));	# K_00_19
+	&movdqa	(@X[2],&QWP(112+64,"esp"));	# pbswap mask
+	&movdqu	(@X[-4&7],&QWP(0,$inp));	# load input
+	&movdqu	(@X[-3&7],&QWP(16,$inp));
+	&movdqu	(@X[-2&7],&QWP(32,$inp));
+	&movdqu	(@X[-1&7],&QWP(48,$inp));
+	&add	($inp,64);
+	&pshufb	(@X[-4&7],@X[2]);		# byte swap
+	&mov	(&DWP(192+4,"esp"),$inp);
+	&movdqa	(&QWP(112-16,"esp"),@X[3]);	# borrow last backtrace slot
+
+  $Xi=0;
+}
+
+sub Xloop_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pshufb	(@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&paddd	(@X[($Xi-4)&7],@X[3]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psubd	(@X[($Xi-4)&7],@X[3]);
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+sub body_00_19 () {	# ((c^d)&b)^d
+	# on start @T[0]=(c^d)&b
+	return &body_20_39()	if ($rx==19);	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&$_ror	($b,$j?7:2);',	# $b>>>2
+	'&xor	(@T[0],$d);',
+	'&mov	(@T[1],$a);',	# $b in next round
+
+	'&add	($e,&DWP(4*($j&15),"esp"));',	# X[]+K xfer
+	'&xor	($b,$c);',	# $c^$d for next round
+
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&and	(@T[1],$b);',	# ($b&($c^$d)) for next round
+
+	'&xor	($b,$c);',	# restore $b
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub body_20_39 () {	# b^d^c
+	# on entry @T[0]=b^d
+	return &body_40_59()	if ($rx==39);	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,&DWP(4*($j&15),"esp"));',	# X[]+K xfer
+	'&xor	(@T[0],$d)	if($j==19);'.
+	'&xor	(@T[0],$c)	if($j> 19);',	# ($b^$d^$c)
+	'&mov	(@T[1],$a);',	# $b in next round
+
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&xor	(@T[1],$c)	if ($j< 79);',	# $b^$d for next round
+
+	'&$_ror	($b,7);',	# $b>>>2
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub body_40_59 () {	# ((b^c)&(c^d))^c
+	# on entry @T[0]=(b^c), (c^=d)
+	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,&DWP(4*($j&15),"esp"));',	# X[]+K xfer
+	'&and	(@T[0],$c)	if ($j>=40);',	# (b^c)&(c^d)
+	'&xor	($c,$d)		if ($j>=40);',	# restore $c
+
+	'&$_ror	($b,7);',	# $b>>>2
+	'&mov	(@T[1],$a);',	# $b for next round
+	'&xor	(@T[0],$c);',
+
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&xor	(@T[1],$c)	if ($j==59);'.
+	'&xor	(@T[1],$b)	if ($j< 59);',	# b^c for next round
+
+	'&xor	($b,$c)		if ($j< 59);',	# c^d for next round
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+&set_label("loop",16);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xuplast_ssse3_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+
+	&mov	(@T[1],&DWP(192,"esp"));	# update context
+	&add	($A,&DWP(0,@T[1]));
+	&add	(@T[0],&DWP(4,@T[1]));		# $b
+	&add	($C,&DWP(8,@T[1]));
+	&mov	(&DWP(0,@T[1]),$A);
+	&add	($D,&DWP(12,@T[1]));
+	&mov	(&DWP(4,@T[1]),@T[0]);
+	&add	($E,&DWP(16,@T[1]));
+	&mov	(&DWP(8,@T[1]),$C);
+	&mov	($B,$C);
+	&mov	(&DWP(12,@T[1]),$D);
+	&xor	($B,$D);
+	&mov	(&DWP(16,@T[1]),$E);
+	&and	($B,@T[0]);
+	&movdqa	(@X[0],@X[-3&7]);
+	&xchg	($B,@T[0]);
+
+	&jmp	(&label("loop"));
+
+&set_label("done",16);		$j=$saved_j; @V=@saved_V;
+
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+
+	&mov	(@T[1],&DWP(192,"esp"));	# update context
+	&add	($A,&DWP(0,@T[1]));
+	&mov	("esp",&DWP(192+12,"esp"));	# restore %esp
+	&add	(@T[0],&DWP(4,@T[1]));		# $b
+	&add	($C,&DWP(8,@T[1]));
+	&mov	(&DWP(0,@T[1]),$A);
+	&add	($D,&DWP(12,@T[1]));
+	&mov	(&DWP(4,@T[1]),@T[0]);
+	&add	($E,&DWP(16,@T[1]));
+	&mov	(&DWP(8,@T[1]),$C);
+	&mov	(&DWP(12,@T[1]),$D);
+	&mov	(&DWP(16,@T[1]),$E);
+
+&function_end("_sha1_block_data_order_ssse3");
+
+$rx=0;	# reset
+
+if ($ymm) {
+my $Xi=4;			# 4xSIMD Xupdate round, start pre-seeded
+my @X=map("xmm$_",(4..7,0..3));	# pre-seeded for $Xi=4
+my @V=($A,$B,$C,$D,$E);
+my $j=0;			# hash round
+my @T=($T,$tmp1);
+my $inp;
+
+my $_rol=sub { &shld(@_[0],@_) };
+my $_ror=sub { &shrd(@_[0],@_) };
+
+&function_begin("_sha1_block_data_order_avx");
+	&call	(&label("pic_point"));	# make it PIC!
+	&set_label("pic_point");
+	&blindpop($tmp1);
+	&lea	($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
+&set_label("avx_shortcut");
+	&vzeroall();
+
+	&vmovdqa(@X[3],&QWP(0,$tmp1));		# K_00_19
+	&vmovdqa(@X[4],&QWP(16,$tmp1));		# K_20_39
+	&vmovdqa(@X[5],&QWP(32,$tmp1));		# K_40_59
+	&vmovdqa(@X[6],&QWP(48,$tmp1));		# K_60_79
+	&vmovdqa(@X[2],&QWP(64,$tmp1));		# pbswap mask
+
+	&mov	($E,&wparam(0));		# load argument block
+	&mov	($inp=@T[1],&wparam(1));
+	&mov	($D,&wparam(2));
+	&mov	(@T[0],"esp");
+
+	# stack frame layout
+	#
+	# +0	X[0]+K	X[1]+K	X[2]+K	X[3]+K	# XMM->IALU xfer area
+	#	X[4]+K	X[5]+K	X[6]+K	X[7]+K
+	#	X[8]+K	X[9]+K	X[10]+K	X[11]+K
+	#	X[12]+K	X[13]+K	X[14]+K	X[15]+K
+	#
+	# +64	X[0]	X[1]	X[2]	X[3]	# XMM->XMM backtrace area
+	#	X[4]	X[5]	X[6]	X[7]
+	#	X[8]	X[9]	X[10]	X[11]	# even borrowed for K_00_19
+	#
+	# +112	K_20_39	K_20_39	K_20_39	K_20_39	# constants
+	#	K_40_59	K_40_59	K_40_59	K_40_59
+	#	K_60_79	K_60_79	K_60_79	K_60_79
+	#	K_00_19	K_00_19	K_00_19	K_00_19
+	#	pbswap mask
+	#
+	# +192	ctx				# argument block
+	# +196	inp
+	# +200	end
+	# +204	esp
+	&sub	("esp",208);
+	&and	("esp",-64);
+
+	&vmovdqa(&QWP(112+0,"esp"),@X[4]);	# copy constants
+	&vmovdqa(&QWP(112+16,"esp"),@X[5]);
+	&vmovdqa(&QWP(112+32,"esp"),@X[6]);
+	&shl	($D,6);				# len*64
+	&vmovdqa(&QWP(112+48,"esp"),@X[3]);
+	&add	($D,$inp);			# end of input
+	&vmovdqa(&QWP(112+64,"esp"),@X[2]);
+	&add	($inp,64);
+	&mov	(&DWP(192+0,"esp"),$E);		# save argument block
+	&mov	(&DWP(192+4,"esp"),$inp);
+	&mov	(&DWP(192+8,"esp"),$D);
+	&mov	(&DWP(192+12,"esp"),@T[0]);	# save original %esp
+
+	&mov	($A,&DWP(0,$E));		# load context
+	&mov	($B,&DWP(4,$E));
+	&mov	($C,&DWP(8,$E));
+	&mov	($D,&DWP(12,$E));
+	&mov	($E,&DWP(16,$E));
+	&mov	(@T[0],$B);			# magic seed
+
+	&vmovdqu(@X[-4&7],&QWP(-64,$inp));	# load input to %xmm[0-3]
+	&vmovdqu(@X[-3&7],&QWP(-48,$inp));
+	&vmovdqu(@X[-2&7],&QWP(-32,$inp));
+	&vmovdqu(@X[-1&7],&QWP(-16,$inp));
+	&vpshufb(@X[-4&7],@X[-4&7],@X[2]);	# byte swap
+	&vpshufb(@X[-3&7],@X[-3&7],@X[2]);
+	&vpshufb(@X[-2&7],@X[-2&7],@X[2]);
+	&vmovdqa(&QWP(112-16,"esp"),@X[3]);	# borrow last backtrace slot
+	&vpshufb(@X[-1&7],@X[-1&7],@X[2]);
+	&vpaddd	(@X[0],@X[-4&7],@X[3]);		# add K_00_19
+	&vpaddd	(@X[1],@X[-3&7],@X[3]);
+	&vpaddd	(@X[2],@X[-2&7],@X[3]);
+	&vmovdqa(&QWP(0,"esp"),@X[0]);		# X[]+K xfer to IALU
+	&mov	(@T[1],$C);
+	&vmovdqa(&QWP(0+16,"esp"),@X[1]);
+	&xor	(@T[1],$D);
+	&vmovdqa(&QWP(0+32,"esp"),@X[2]);
+	&and	(@T[0],@T[1]);
+	&jmp	(&label("loop"));
+
+sub Xupdate_avx_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vpaddd	(@X[3],@X[3],@X[-1&7]);
+	  &vmovdqa	(&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpsrldq(@X[2],@X[-1&7],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[2],@X[2],@X[-2&7]);		# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[2]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@X[2],@X[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslldq(@X[4],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@X[3],@X[4],30);
+	&vpor	(@X[0],@X[0],@X[2]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslld	(@X[4],@X[4],2);
+	  &vmovdqa	(@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5);	# restore X[] from backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpxor	(@X[0],@X[0],@X[3]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[4]);		# "X[0]"^=("X[0]"<<96)<<<2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	(@X[4],&QWP(112-16+16*(($Xi)/5),"esp"));	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_avx_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 44 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@X[2],@X[-1&7],@X[-2&7],8);	# compose "X[-6]"
+	&vpxor	(@X[0],@X[0],@X[-4&7]);	# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpxor	(@X[0],@X[0],@X[-7&7]);	# "X[0]"^="X[-28]"
+	  &vmovdqa	(&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);	# save X[] to backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 if ($Xi%5) {
+	  &vmovdqa	(@X[4],@X[3]);	# "perpetuate" K_XX_XX...
+	 } else {			# ... or load next one
+	  &vmovdqa	(@X[4],&QWP(112-16+16*($Xi/5),"esp"));
+	 }
+	  &vpaddd	(@X[3],@X[3],@X[-1&7]);
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[2]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpsrld	(@X[2],@X[0],30);
+	  &vmovdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpor	(@X[0],@X[0],@X[2]);	# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	  &vmovdqa	(@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19);	# restore X[] from backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xuplast_avx_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	  &vpaddd	(@X[3],@X[3],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vmovdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&mov	($inp=@T[1],&DWP(192+4,"esp"));
+	&cmp	($inp,&DWP(192+8,"esp"));
+	&je	(&label("done"));
+
+	&vmovdqa(@X[3],&QWP(112+48,"esp"));	# K_00_19
+	&vmovdqa(@X[2],&QWP(112+64,"esp"));	# pbswap mask
+	&vmovdqu(@X[-4&7],&QWP(0,$inp));	# load input
+	&vmovdqu(@X[-3&7],&QWP(16,$inp));
+	&vmovdqu(@X[-2&7],&QWP(32,$inp));
+	&vmovdqu(@X[-1&7],&QWP(48,$inp));
+	&add	($inp,64);
+	&vpshufb(@X[-4&7],@X[-4&7],@X[2]);		# byte swap
+	&mov	(&DWP(192+4,"esp"),$inp);
+	&vmovdqa(&QWP(112-16,"esp"),@X[3]);	# borrow last backtrace slot
+
+  $Xi=0;
+}
+
+sub Xloop_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpshufb	(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpaddd	(@X[$Xi&7],@X[($Xi-4)&7],@X[3]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vmovdqa	(&QWP(0+16*$Xi,"esp"),@X[$Xi&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+&set_label("loop",16);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xuplast_avx_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+
+	&mov	(@T[1],&DWP(192,"esp"));	# update context
+	&add	($A,&DWP(0,@T[1]));
+	&add	(@T[0],&DWP(4,@T[1]));		# $b
+	&add	($C,&DWP(8,@T[1]));
+	&mov	(&DWP(0,@T[1]),$A);
+	&add	($D,&DWP(12,@T[1]));
+	&mov	(&DWP(4,@T[1]),@T[0]);
+	&add	($E,&DWP(16,@T[1]));
+	&mov	($B,$C);
+	&mov	(&DWP(8,@T[1]),$C);
+	&xor	($B,$D);
+	&mov	(&DWP(12,@T[1]),$D);
+	&and	($B,@T[0]);
+	&mov	(&DWP(16,@T[1]),$E);
+	&xchg	($B,@T[0]);
+
+	&jmp	(&label("loop"));
+
+&set_label("done",16);		$j=$saved_j; @V=@saved_V;
+
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+
+	&vzeroall();
+
+	&mov	(@T[1],&DWP(192,"esp"));	# update context
+	&add	($A,&DWP(0,@T[1]));
+	&mov	("esp",&DWP(192+12,"esp"));	# restore %esp
+	&add	(@T[0],&DWP(4,@T[1]));		# $b
+	&add	($C,&DWP(8,@T[1]));
+	&mov	(&DWP(0,@T[1]),$A);
+	&add	($D,&DWP(12,@T[1]));
+	&mov	(&DWP(4,@T[1]),@T[0]);
+	&add	($E,&DWP(16,@T[1]));
+	&mov	(&DWP(8,@T[1]),$C);
+	&mov	(&DWP(12,@T[1]),$D);
+	&mov	(&DWP(16,@T[1]),$E);
+&function_end("_sha1_block_data_order_avx");
+}
+&set_label("K_XX_XX",64);
+&data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999);	# K_00_19
+&data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1);	# K_20_39
+&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc);	# K_40_59
+&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6);	# K_60_79
+&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f);	# pbswap mask
+}
+&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/crypto/sha/asm/sha1-armv4-large.pl b/crypto/sha/asm/sha1-armv4-large.pl
new file mode 100644
index 0000000..6643731
--- /dev/null
+++ b/crypto/sha/asm/sha1-armv4-large.pl
@@ -0,0 +1,251 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# sha1_block procedure for ARMv4.
+#
+# January 2007.
+
+# Size/performance trade-off
+# ====================================================================
+# impl		size in bytes	comp cycles[*]	measured performance
+# ====================================================================
+# thumb		304		3212		4420
+# armv4-small	392/+29%	1958/+64%	2250/+96%
+# armv4-compact	740/+89%	1552/+26%	1840/+22%
+# armv4-large	1420/+92%	1307/+19%	1370/+34%[***]
+# full unroll	~5100/+260%	~1260/+4%	~1300/+5%
+# ====================================================================
+# thumb		= same as 'small' but in Thumb instructions[**] and
+#		  with recurring code in two private functions;
+# small		= detached Xload/update, loops are folded;
+# compact	= detached Xload/update, 5x unroll;
+# large		= interleaved Xload/update, 5x unroll;
+# full unroll	= interleaved Xload/update, full unroll, estimated[!];
+#
+# [*]	Manually counted instructions in "grand" loop body. Measured
+#	performance is affected by prologue and epilogue overhead,
+#	i-cache availability, branch penalties, etc.
+# [**]	While each Thumb instruction is twice smaller, they are not as
+#	diverse as ARM ones: e.g., there are only two arithmetic
+#	instructions with 3 arguments, no [fixed] rotate, addressing
+#	modes are limited. As result it takes more instructions to do
+#	the same job in Thumb, therefore the code is never twice as
+#	small and always slower.
+# [***]	which is also ~35% better than compiler generated code. Dual-
+#	issue Cortex A8 core was measured to process input block in
+#	~990 cycles.
+
+# August 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 13% improvement on
+# Cortex A8 core and in absolute terms ~870 cycles per input block
+# [or 13.6 cycles per byte].
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 10%
+# improvement on Cortex A8 core and 12.2 cycles per byte.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$ctx="r0";
+$inp="r1";
+$len="r2";
+$a="r3";
+$b="r4";
+$c="r5";
+$d="r6";
+$e="r7";
+$K="r8";
+$t0="r9";
+$t1="r10";
+$t2="r11";
+$t3="r12";
+$Xi="r14";
+@V=($a,$b,$c,$d,$e);
+
+sub Xupdate {
+my ($a,$b,$c,$d,$e,$opt1,$opt2)=@_;
+$code.=<<___;
+	ldr	$t0,[$Xi,#15*4]
+	ldr	$t1,[$Xi,#13*4]
+	ldr	$t2,[$Xi,#7*4]
+	add	$e,$K,$e,ror#2			@ E+=K_xx_xx
+	ldr	$t3,[$Xi,#2*4]
+	eor	$t0,$t0,$t1
+	eor	$t2,$t2,$t3			@ 1 cycle stall
+	eor	$t1,$c,$d			@ F_xx_xx
+	mov	$t0,$t0,ror#31
+	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
+	eor	$t0,$t0,$t2,ror#31
+	str	$t0,[$Xi,#-4]!
+	$opt1					@ F_xx_xx
+	$opt2					@ F_xx_xx
+	add	$e,$e,$t0			@ E+=X[i]
+___
+}
+
+sub BODY_00_15 {
+my ($a,$b,$c,$d,$e)=@_;
+$code.=<<___;
+#if __ARM_ARCH__<7
+	ldrb	$t1,[$inp,#2]
+	ldrb	$t0,[$inp,#3]
+	ldrb	$t2,[$inp,#1]
+	add	$e,$K,$e,ror#2			@ E+=K_00_19
+	ldrb	$t3,[$inp],#4
+	orr	$t0,$t0,$t1,lsl#8
+	eor	$t1,$c,$d			@ F_xx_xx
+	orr	$t0,$t0,$t2,lsl#16
+	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
+	orr	$t0,$t0,$t3,lsl#24
+#else
+	ldr	$t0,[$inp],#4			@ handles unaligned
+	add	$e,$K,$e,ror#2			@ E+=K_00_19
+	eor	$t1,$c,$d			@ F_xx_xx
+	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
+#ifdef __ARMEL__
+	rev	$t0,$t0				@ byte swap
+#endif
+#endif
+	and	$t1,$b,$t1,ror#2
+	add	$e,$e,$t0			@ E+=X[i]
+	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
+	str	$t0,[$Xi,#-4]!
+	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
+___
+}
+
+sub BODY_16_19 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xupdate(@_,"and $t1,$b,$t1,ror#2");
+$code.=<<___;
+	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
+	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
+___
+}
+
+sub BODY_20_39 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xupdate(@_,"eor $t1,$b,$t1,ror#2");
+$code.=<<___;
+	add	$e,$e,$t1			@ E+=F_20_39(B,C,D)
+___
+}
+
+sub BODY_40_59 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xupdate(@_,"and $t1,$b,$t1,ror#2","and $t2,$c,$d");
+$code.=<<___;
+	add	$e,$e,$t1			@ E+=F_40_59(B,C,D)
+	add	$e,$e,$t2,ror#2
+___
+}
+
+$code=<<___;
+#if defined(__arm__)
+#include "arm_arch.h"
+
+.text
+
+.global	sha1_block_data_order
+.type	sha1_block_data_order,%function
+
+.align	2
+sha1_block_data_order:
+	stmdb	sp!,{r4-r12,lr}
+	add	$len,$inp,$len,lsl#6	@ $len to point at the end of $inp
+	ldmia	$ctx,{$a,$b,$c,$d,$e}
+.Lloop:
+	ldr	$K,.LK_00_19
+	mov	$Xi,sp
+	sub	sp,sp,#15*4
+	mov	$c,$c,ror#30
+	mov	$d,$d,ror#30
+	mov	$e,$e,ror#30		@ [6]
+.L_00_15:
+___
+for($i=0;$i<5;$i++) {
+	&BODY_00_15(@V);	unshift(@V,pop(@V));
+}
+$code.=<<___;
+	teq	$Xi,sp
+	bne	.L_00_15		@ [((11+4)*5+2)*3]
+	sub	sp,sp,#25*4
+___
+	&BODY_00_15(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+$code.=<<___;
+
+	ldr	$K,.LK_20_39		@ [+15+16*4]
+	cmn	sp,#0			@ [+3], clear carry to denote 20_39
+.L_20_39_or_60_79:
+___
+for($i=0;$i<5;$i++) {
+	&BODY_20_39(@V);	unshift(@V,pop(@V));
+}
+$code.=<<___;
+	teq	$Xi,sp			@ preserve carry
+	bne	.L_20_39_or_60_79	@ [+((12+3)*5+2)*4]
+	bcs	.L_done			@ [+((12+3)*5+2)*4], spare 300 bytes
+
+	ldr	$K,.LK_40_59
+	sub	sp,sp,#20*4		@ [+2]
+.L_40_59:
+___
+for($i=0;$i<5;$i++) {
+	&BODY_40_59(@V);	unshift(@V,pop(@V));
+}
+$code.=<<___;
+	teq	$Xi,sp
+	bne	.L_40_59		@ [+((12+5)*5+2)*4]
+
+	ldr	$K,.LK_60_79
+	sub	sp,sp,#20*4
+	cmp	sp,#0			@ set carry to denote 60_79
+	b	.L_20_39_or_60_79	@ [+4], spare 300 bytes
+.L_done:
+	add	sp,sp,#80*4		@ "deallocate" stack frame
+	ldmia	$ctx,{$K,$t0,$t1,$t2,$t3}
+	add	$a,$K,$a
+	add	$b,$t0,$b
+	add	$c,$t1,$c,ror#2
+	add	$d,$t2,$d,ror#2
+	add	$e,$t3,$e,ror#2
+	stmia	$ctx,{$a,$b,$c,$d,$e}
+	teq	$inp,$len
+	bne	.Lloop			@ [+18], total 1307
+
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia	sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.align	2
+.LK_00_19:	.word	0x5a827999
+.LK_20_39:	.word	0x6ed9eba1
+.LK_40_59:	.word	0x8f1bbcdc
+.LK_60_79:	.word	0xca62c1d6
+.size	sha1_block_data_order,.-sha1_block_data_order
+.asciz	"SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+
+#endif
+___
+
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
+print $code;
+close STDOUT; # enforce flush
diff --git a/crypto/sha/asm/sha1-x86_64.pl b/crypto/sha/asm/sha1-x86_64.pl
new file mode 100644
index 0000000..194b5ff
--- /dev/null
+++ b/crypto/sha/asm/sha1-x86_64.pl
@@ -0,0 +1,1837 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# sha1_block procedure for x86_64.
+#
+# It was brought to my attention that on EM64T compiler-generated code
+# was far behind 32-bit assembler implementation. This is unlike on
+# Opteron where compiler-generated code was only 15% behind 32-bit
+# assembler, which originally made it hard to motivate the effort.
+# There was suggestion to mechanically translate 32-bit code, but I
+# dismissed it, reasoning that x86_64 offers enough register bank
+# capacity to fully utilize SHA-1 parallelism. Therefore this fresh
+# implementation:-) However! While 64-bit code does perform better
+# on Opteron, I failed to beat 32-bit assembler on EM64T core. Well,
+# x86_64 does offer larger *addressable* bank, but out-of-order core
+# reaches for even more registers through dynamic aliasing, and EM64T
+# core must have managed to run-time optimize even 32-bit code just as
+# good as 64-bit one. Performance improvement is summarized in the
+# following table:
+#
+#		gcc 3.4		32-bit asm	cycles/byte
+# Opteron	+45%		+20%		6.8
+# Xeon P4	+65%		+0%		9.9
+# Core2		+60%		+10%		7.0
+
+# August 2009.
+#
+# The code was revised to minimize code size and to maximize
+# "distance" between instructions producing input to 'lea'
+# instruction and the 'lea' instruction itself, which is essential
+# for Intel Atom core.
+
+# October 2010.
+#
+# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
+# is to offload message schedule denoted by Wt in NIST specification,
+# or Xupdate in OpenSSL source, to SIMD unit. See sha1-586.pl module
+# for background and implementation details. The only difference from
+# 32-bit code is that 64-bit code doesn't have to spill @X[] elements
+# to free temporary registers.
+
+# April 2011.
+#
+# Add AVX code path. See sha1-586.pl for further information.
+
+# May 2013.
+#
+# Add AVX2+BMI code path. Initial attempt (utilizing BMI instructions
+# and loading pair of consecutive blocks to 256-bit %ymm registers)
+# did not provide impressive performance improvement till a crucial
+# hint regarding the number of Xupdate iterations to pre-compute in
+# advance was provided by Ilya Albrekht of Intel Corp.
+
+######################################################################
+# Current performance is summarized in following table. Numbers are
+# CPU clock cycles spent to process single byte (less is better).
+#
+#		x86_64		SSSE3		AVX[2]
+# P4		9.8		-
+# Opteron	6.65		-
+# Core2		6.70		6.05/+11%	-
+# Westmere	7.08		5.44/+30%	-
+# Sandy Bridge	7.93		6.16/+28%	4.99/+59%
+# Ivy Bridge	6.30		4.63/+36%	4.60/+37%
+# Haswell	5.98		4.12/+45%	3.57/+67%
+# Bulldozer	10.9		5.95/+82%
+# VIA Nano	10.2		7.46/+37%
+# Atom		11.0		9.61/+14%
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+		=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+	   `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+	   `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+	$avx = ($1>=10) + ($1>=11);
+}
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+$ctx="%rdi";	# 1st arg
+$inp="%rsi";	# 2nd arg
+$num="%rdx";	# 3rd arg
+
+# reassign arguments in order to produce more compact code
+$ctx="%r8";
+$inp="%r9";
+$num="%r10";
+
+$t0="%eax";
+$t1="%ebx";
+$t2="%ecx";
+@xi=("%edx","%ebp");
+$A="%esi";
+$B="%edi";
+$C="%r11d";
+$D="%r12d";
+$E="%r13d";
+
+@V=($A,$B,$C,$D,$E);
+
+sub BODY_00_19 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+$code.=<<___ if ($i==0);
+	mov	`4*$i`($inp),$xi[0]
+	bswap	$xi[0]
+	mov	$xi[0],`4*$i`(%rsp)
+___
+$code.=<<___ if ($i<15);
+	mov	$c,$t0
+	mov	`4*$j`($inp),$xi[1]
+	mov	$a,$t2
+	xor	$d,$t0
+	bswap	$xi[1]
+	rol	\$5,$t2
+	lea	0x5a827999($xi[0],$e),$e
+	and	$b,$t0
+	mov	$xi[1],`4*$j`(%rsp)
+	add	$t2,$e
+	xor	$d,$t0
+	rol	\$30,$b
+	add	$t0,$e
+___
+$code.=<<___ if ($i>=15);
+	mov	`4*($j%16)`(%rsp),$xi[1]
+	mov	$c,$t0
+	mov	$a,$t2
+	xor	`4*(($j+2)%16)`(%rsp),$xi[1]
+	xor	$d,$t0
+	rol	\$5,$t2
+	xor	`4*(($j+8)%16)`(%rsp),$xi[1]
+	and	$b,$t0
+	lea	0x5a827999($xi[0],$e),$e
+	xor	`4*(($j+13)%16)`(%rsp),$xi[1]
+	xor	$d,$t0
+	rol	\$1,$xi[1]
+	add	$t2,$e
+	rol	\$30,$b
+	mov	$xi[1],`4*($j%16)`(%rsp)
+	add	$t0,$e
+___
+unshift(@xi,pop(@xi));
+}
+
+sub BODY_20_39 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+my $K=($i<40)?0x6ed9eba1:0xca62c1d6;
+$code.=<<___ if ($i<79);
+	mov	`4*($j%16)`(%rsp),$xi[1]
+	mov	$c,$t0
+	mov	$a,$t2
+	xor	`4*(($j+2)%16)`(%rsp),$xi[1]
+	xor	$b,$t0
+	rol	\$5,$t2
+	lea	$K($xi[0],$e),$e
+	xor	`4*(($j+8)%16)`(%rsp),$xi[1]
+	xor	$d,$t0
+	add	$t2,$e
+	xor	`4*(($j+13)%16)`(%rsp),$xi[1]
+	rol	\$30,$b
+	add	$t0,$e
+	rol	\$1,$xi[1]
+___
+$code.=<<___ if ($i<76);
+	mov	$xi[1],`4*($j%16)`(%rsp)
+___
+$code.=<<___ if ($i==79);
+	mov	$c,$t0
+	mov	$a,$t2
+	xor	$b,$t0
+	lea	$K($xi[0],$e),$e
+	rol	\$5,$t2
+	xor	$d,$t0
+	add	$t2,$e
+	rol	\$30,$b
+	add	$t0,$e
+___
+unshift(@xi,pop(@xi));
+}
+
+sub BODY_40_59 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+$code.=<<___;
+	mov	`4*($j%16)`(%rsp),$xi[1]
+	mov	$c,$t0
+	mov	$c,$t1
+	xor	`4*(($j+2)%16)`(%rsp),$xi[1]
+	and	$d,$t0
+	mov	$a,$t2
+	xor	`4*(($j+8)%16)`(%rsp),$xi[1]
+	xor	$d,$t1
+	lea	0x8f1bbcdc($xi[0],$e),$e
+	rol	\$5,$t2
+	xor	`4*(($j+13)%16)`(%rsp),$xi[1]
+	add	$t0,$e
+	and	$b,$t1
+	rol	\$1,$xi[1]
+	add	$t1,$e
+	rol	\$30,$b
+	mov	$xi[1],`4*($j%16)`(%rsp)
+	add	$t2,$e
+___
+unshift(@xi,pop(@xi));
+}
+
+$code.=<<___;
+.text
+.extern	OPENSSL_ia32cap_P
+
+.globl	sha1_block_data_order
+.type	sha1_block_data_order,\@function,3
+.align	16
+sha1_block_data_order:
+	mov	OPENSSL_ia32cap_P+0(%rip),%r9d
+	mov	OPENSSL_ia32cap_P+4(%rip),%r8d
+	mov	OPENSSL_ia32cap_P+8(%rip),%r10d
+	test	\$`1<<9`,%r8d		# check SSSE3 bit
+	jz	.Lialu
+___
+$code.=<<___ if ($avx>1);
+	and	\$`1<<3|1<<5|1<<8`,%r10d	# check AVX2+BMI1+BMI2
+	cmp	\$`1<<3|1<<5|1<<8`,%r10d
+	je	_avx2_shortcut
+___
+$code.=<<___ if ($avx);
+	and	\$`1<<28`,%r8d		# mask AVX bit
+	and	\$`1<<30`,%r9d		# mask "Intel CPU" bit
+	or	%r9d,%r8d
+	cmp	\$`1<<28|1<<30`,%r8d
+	je	_avx_shortcut
+___
+$code.=<<___;
+	jmp	_ssse3_shortcut
+
+.align	16
+.Lialu:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	mov	%rsp,%r11
+	mov	%rdi,$ctx	# reassigned argument
+	sub	\$`8+16*4`,%rsp
+	mov	%rsi,$inp	# reassigned argument
+	and	\$-64,%rsp
+	mov	%rdx,$num	# reassigned argument
+	mov	%r11,`16*4`(%rsp)
+.Lprologue:
+
+	mov	0($ctx),$A
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	16($ctx),$E
+	jmp	.Lloop
+
+.align	16
+.Lloop:
+___
+for($i=0;$i<20;$i++)	{ &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
+for(;$i<40;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+for(;$i<60;$i++)	{ &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
+for(;$i<80;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	add	0($ctx),$A
+	add	4($ctx),$B
+	add	8($ctx),$C
+	add	12($ctx),$D
+	add	16($ctx),$E
+	mov	$A,0($ctx)
+	mov	$B,4($ctx)
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+
+	sub	\$1,$num
+	lea	`16*4`($inp),$inp
+	jnz	.Lloop
+
+	mov	`16*4`(%rsp),%rsi
+	mov	(%rsi),%r13
+	mov	8(%rsi),%r12
+	mov	16(%rsi),%rbp
+	mov	24(%rsi),%rbx
+	lea	32(%rsi),%rsp
+.Lepilogue:
+	ret
+.size	sha1_block_data_order,.-sha1_block_data_order
+___
+{{{
+my $Xi=4;
+my @X=map("%xmm$_",(4..7,0..3));
+my @Tx=map("%xmm$_",(8..10));
+my $Kx="%xmm11";
+my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp");	# size optimization
+my @T=("%esi","%edi");
+my $j=0;
+my $rx=0;
+my $K_XX_XX="%r11";
+
+my $_rol=sub { &rol(@_) };
+my $_ror=sub { &ror(@_) };
+
+{ my $sn;
+sub align32() {
+  ++$sn;
+$code.=<<___;
+	jmp	.Lalign32_$sn	# see "Decoded ICache" in manual
+.align	32
+.Lalign32_$sn:
+___
+}
+}
+
+$code.=<<___;
+.type	sha1_block_data_order_ssse3,\@function,3
+.align	16
+sha1_block_data_order_ssse3:
+_ssse3_shortcut:
+	mov	%rsp,%rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13		# redundant, done to share Win64 SE handler
+	push	%r14
+	lea	`-64-($win64?6*16:0)`(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,-40-6*16(%rax)
+	movaps	%xmm7,-40-5*16(%rax)
+	movaps	%xmm8,-40-4*16(%rax)
+	movaps	%xmm9,-40-3*16(%rax)
+	movaps	%xmm10,-40-2*16(%rax)
+	movaps	%xmm11,-40-1*16(%rax)
+.Lprologue_ssse3:
+___
+$code.=<<___;
+	mov	%rax,%r14	# original %rsp
+	and	\$-64,%rsp
+	mov	%rdi,$ctx	# reassigned argument
+	mov	%rsi,$inp	# reassigned argument
+	mov	%rdx,$num	# reassigned argument
+
+	shl	\$6,$num
+	add	$inp,$num
+	lea	K_XX_XX+64(%rip),$K_XX_XX
+
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+	mov	$C,@T[1]
+	xor	$D,@T[1]
+	and	@T[1],@T[0]
+
+	movdqa	64($K_XX_XX),@X[2]	# pbswap mask
+	movdqa	-64($K_XX_XX),@Tx[1]	# K_00_19
+	movdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	movdqu	16($inp),@X[-3&7]
+	movdqu	32($inp),@X[-2&7]
+	movdqu	48($inp),@X[-1&7]
+	pshufb	@X[2],@X[-4&7]		# byte swap
+	add	\$64,$inp
+	pshufb	@X[2],@X[-3&7]
+	pshufb	@X[2],@X[-2&7]
+	pshufb	@X[2],@X[-1&7]
+	paddd	@Tx[1],@X[-4&7]		# add K_00_19
+	paddd	@Tx[1],@X[-3&7]
+	paddd	@Tx[1],@X[-2&7]
+	movdqa	@X[-4&7],0(%rsp)	# X[]+K xfer to IALU
+	psubd	@Tx[1],@X[-4&7]		# restore X[]
+	movdqa	@X[-3&7],16(%rsp)
+	psubd	@Tx[1],@X[-3&7]
+	movdqa	@X[-2&7],32(%rsp)
+	psubd	@Tx[1],@X[-2&7]
+	jmp	.Loop_ssse3
+___
+
+sub AUTOLOAD()		# thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+  my $arg = pop;
+    $arg = "\$$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+sub Xupdate_ssse3_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&movdqa	(@X[0],@X[-3&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(@Tx[0],@X[-1&7]);
+	&palignr(@X[0],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psrldq	(@Tx[0],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&movdqa	(@Tx[2],@X[0]);
+	&movdqa	(@Tx[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pslldq	(@Tx[2],12);		# "X[0]"<<96, extract one dword
+	&paddd	(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@Tx[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(@Tx[1],@Tx[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@Tx[2],30);
+	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pslld	(@Tx[1],2);
+	&pxor	(@X[0],@Tx[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &movdqa	(@Tx[2],eval(2*16*(($Xi)/5)-64)."($K_XX_XX)");	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[1]);		# "X[0]"^=("X[0]">>96)<<<2
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xupdate_ssse3_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 44 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&movdqa	(@Tx[0],@X[-1&7])	if ($Xi==8);
+	 eval(shift(@insns));		# body_20_39
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"="X[-32]"^"X[-16]"
+	&palignr(@Tx[0],@X[-2&7],8);	# compose "X[-6]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&pxor	(@X[0],@X[-7&7]);	# "X[0]"^="X[-28]"
+	 eval(shift(@insns));
+	 eval(shift(@insns))	if (@insns[0] !~ /&ro[rl]/);
+	if ($Xi%5) {
+	  &movdqa	(@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
+	} else {			# ... or load next one
+	  &movdqa	(@Tx[2],eval(2*16*($Xi/5)-64)."($K_XX_XX)");
+	}
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&movdqa	(@Tx[0],@X[0]);
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&pslld	(@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	&psrld	(@Tx[0],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	  &movdqa	(@Tx[1],@X[0])	if ($Xi<19);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xuplast_ssse3_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&cmp	($inp,$num);
+	&je	(".Ldone_ssse3");
+
+	unshift(@Tx,pop(@Tx));
+
+	&movdqa	(@X[2],"64($K_XX_XX)");		# pbswap mask
+	&movdqa	(@Tx[1],"-64($K_XX_XX)");	# K_00_19
+	&movdqu	(@X[-4&7],"0($inp)");		# load input
+	&movdqu	(@X[-3&7],"16($inp)");
+	&movdqu	(@X[-2&7],"32($inp)");
+	&movdqu	(@X[-1&7],"48($inp)");
+	&pshufb	(@X[-4&7],@X[2]);		# byte swap
+	&add	($inp,64);
+
+  $Xi=0;
+}
+
+sub Xloop_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pshufb	(@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&paddd	(@X[($Xi-4)&7],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psubd	(@X[($Xi-4)&7],@Tx[1]);
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+sub body_00_19 () {	# ((c^d)&b)^d
+	# on start @T[0]=(c^d)&b
+	return &body_20_39() if ($rx==19); $rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&$_ror	($b,$j?7:2)',	# $b>>>2
+	'&xor	(@T[0],$d)',
+	'&mov	(@T[1],$a)',	# $b for next round
+
+	'&add	($e,eval(4*($j&15))."(%rsp)")',	# X[]+K xfer
+	'&xor	($b,$c)',	# $c^$d for next round
+
+	'&$_rol	($a,5)',
+	'&add	($e,@T[0])',
+	'&and	(@T[1],$b)',	# ($b&($c^$d)) for next round
+
+	'&xor	($b,$c)',	# restore $b
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub body_20_39 () {	# b^d^c
+	# on entry @T[0]=b^d
+	return &body_40_59() if ($rx==39); $rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,eval(4*($j&15))."(%rsp)")',	# X[]+K xfer
+	'&xor	(@T[0],$d)	if($j==19);'.
+	'&xor	(@T[0],$c)	if($j> 19)',	# ($b^$d^$c)
+	'&mov	(@T[1],$a)',	# $b for next round
+
+	'&$_rol	($a,5)',
+	'&add	($e,@T[0])',
+	'&xor	(@T[1],$c)	if ($j< 79)',	# $b^$d for next round
+
+	'&$_ror	($b,7)',	# $b>>>2
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub body_40_59 () {	# ((b^c)&(c^d))^c
+	# on entry @T[0]=(b^c), (c^=d)
+	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,eval(4*($j&15))."(%rsp)")',	# X[]+K xfer
+	'&and	(@T[0],$c)	if ($j>=40)',	# (b^c)&(c^d)
+	'&xor	($c,$d)		if ($j>=40)',	# restore $c
+
+	'&$_ror	($b,7)',	# $b>>>2
+	'&mov	(@T[1],$a)',	# $b for next round
+	'&xor	(@T[0],$c)',
+
+	'&$_rol	($a,5)',
+	'&add	($e,@T[0])',
+	'&xor	(@T[1],$c)	if ($j==59);'.
+	'&xor	(@T[1],$b)	if ($j< 59)',	# b^c for next round
+
+	'&xor	($b,$c)		if ($j< 59)',	# c^d for next round
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+$code.=<<___;
+.align	16
+.Loop_ssse3:
+___
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xuplast_ssse3_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+
+$code.=<<___;
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$C,@T[1]
+	mov	$D,12($ctx)
+	xor	$D,@T[1]
+	mov	$E,16($ctx)
+	and	@T[1],@T[0]
+	jmp	.Loop_ssse3
+
+.align	16
+.Ldone_ssse3:
+___
+				$j=$saved_j; @V=@saved_V;
+
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+
+$code.=<<___;
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+___
+$code.=<<___ if ($win64);
+	movaps	-40-6*16(%r14),%xmm6
+	movaps	-40-5*16(%r14),%xmm7
+	movaps	-40-4*16(%r14),%xmm8
+	movaps	-40-3*16(%r14),%xmm9
+	movaps	-40-2*16(%r14),%xmm10
+	movaps	-40-1*16(%r14),%xmm11
+___
+$code.=<<___;
+	lea	(%r14),%rsi
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lepilogue_ssse3:
+	ret
+.size	sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3
+___
+
+if ($avx) {
+$Xi=4;				# reset variables
+@X=map("%xmm$_",(4..7,0..3));
+@Tx=map("%xmm$_",(8..10));
+$j=0;
+$rx=0;
+
+my $done_avx_label=".Ldone_avx";
+
+my $_rol=sub { &shld(@_[0],@_) };
+my $_ror=sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type	sha1_block_data_order_avx,\@function,3
+.align	16
+sha1_block_data_order_avx:
+_avx_shortcut:
+	mov	%rsp,%rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13		# redundant, done to share Win64 SE handler
+	push	%r14
+	lea	`-64-($win64?6*16:0)`(%rsp),%rsp
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	vmovaps	%xmm6,-40-6*16(%rax)
+	vmovaps	%xmm7,-40-5*16(%rax)
+	vmovaps	%xmm8,-40-4*16(%rax)
+	vmovaps	%xmm9,-40-3*16(%rax)
+	vmovaps	%xmm10,-40-2*16(%rax)
+	vmovaps	%xmm11,-40-1*16(%rax)
+.Lprologue_avx:
+___
+$code.=<<___;
+	mov	%rax,%r14	# original %rsp
+	and	\$-64,%rsp
+	mov	%rdi,$ctx	# reassigned argument
+	mov	%rsi,$inp	# reassigned argument
+	mov	%rdx,$num	# reassigned argument
+
+	shl	\$6,$num
+	add	$inp,$num
+	lea	K_XX_XX+64(%rip),$K_XX_XX
+
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+	mov	$C,@T[1]
+	xor	$D,@T[1]
+	and	@T[1],@T[0]
+
+	vmovdqa	64($K_XX_XX),@X[2]	# pbswap mask
+	vmovdqa	-64($K_XX_XX),$Kx	# K_00_19
+	vmovdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	vmovdqu	16($inp),@X[-3&7]
+	vmovdqu	32($inp),@X[-2&7]
+	vmovdqu	48($inp),@X[-1&7]
+	vpshufb	@X[2],@X[-4&7],@X[-4&7]	# byte swap
+	add	\$64,$inp
+	vpshufb	@X[2],@X[-3&7],@X[-3&7]
+	vpshufb	@X[2],@X[-2&7],@X[-2&7]
+	vpshufb	@X[2],@X[-1&7],@X[-1&7]
+	vpaddd	$Kx,@X[-4&7],@X[0]	# add K_00_19
+	vpaddd	$Kx,@X[-3&7],@X[1]
+	vpaddd	$Kx,@X[-2&7],@X[2]
+	vmovdqa	@X[0],0(%rsp)		# X[]+K xfer to IALU
+	vmovdqa	@X[1],16(%rsp)
+	vmovdqa	@X[2],32(%rsp)
+	jmp	.Loop_avx
+___
+
+sub Xupdate_avx_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vpaddd	(@Tx[1],$Kx,@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpsrldq(@Tx[0],@X[-1&7],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[0],@X[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslldq(@Tx[2],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[1],@Tx[2],30);
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslld	(@Tx[2],@Tx[2],2);
+	&vpxor	(@X[0],@X[0],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[2]);		# "X[0]"^=("X[0]">>96)<<<2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)")	if ($Xi%5==0);	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_avx_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 44 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8);	# compose "X[-6]"
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpxor	(@X[0],@X[0],@X[-7&7]);		# "X[0]"^="X[-28]"
+	 eval(shift(@insns));
+	 eval(shift(@insns))	if (@insns[0] !~ /&ro[rl]/);
+	  &vpaddd	(@Tx[1],$Kx,@X[-1&7]);
+	  &vmovdqa	($Kx,eval(2*16*($Xi/5)-64)."($K_XX_XX)")	if ($Xi%5==0);
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpsrld	(@Tx[0],@X[0],30);
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xuplast_avx_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	  &vpaddd	(@Tx[1],$Kx,@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&cmp	($inp,$num);
+	&je	($done_avx_label);
+
+	&vmovdqa(@X[2],"64($K_XX_XX)");		# pbswap mask
+	&vmovdqa($Kx,"-64($K_XX_XX)");		# K_00_19
+	&vmovdqu(@X[-4&7],"0($inp)");		# load input
+	&vmovdqu(@X[-3&7],"16($inp)");
+	&vmovdqu(@X[-2&7],"32($inp)");
+	&vmovdqu(@X[-1&7],"48($inp)");
+	&vpshufb(@X[-4&7],@X[-4&7],@X[2]);	# byte swap
+	&add	($inp,64);
+
+  $Xi=0;
+}
+
+sub Xloop_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpaddd	(@X[$Xi&7],@X[($Xi-4)&7],$Kx);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vmovdqa(eval(16*$Xi)."(%rsp)",@X[$Xi&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+$code.=<<___;
+.align	16
+.Loop_avx:
+___
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xuplast_avx_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+
+$code.=<<___;
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$C,@T[1]
+	mov	$D,12($ctx)
+	xor	$D,@T[1]
+	mov	$E,16($ctx)
+	and	@T[1],@T[0]
+	jmp	.Loop_avx
+
+.align	16
+$done_avx_label:
+___
+				$j=$saved_j; @V=@saved_V;
+
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+
+$code.=<<___;
+	vzeroupper
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+___
+$code.=<<___ if ($win64);
+	movaps	-40-6*16(%r14),%xmm6
+	movaps	-40-5*16(%r14),%xmm7
+	movaps	-40-4*16(%r14),%xmm8
+	movaps	-40-3*16(%r14),%xmm9
+	movaps	-40-2*16(%r14),%xmm10
+	movaps	-40-1*16(%r14),%xmm11
+___
+$code.=<<___;
+	lea	(%r14),%rsi
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lepilogue_avx:
+	ret
+.size	sha1_block_data_order_avx,.-sha1_block_data_order_avx
+___
+
+if ($avx>1) {
+use integer;
+$Xi=4;					# reset variables
+@X=map("%ymm$_",(4..7,0..3));
+@Tx=map("%ymm$_",(8..10));
+$Kx="%ymm11";
+$j=0;
+
+my @ROTX=("%eax","%ebp","%ebx","%ecx","%edx","%esi");
+my ($a5,$t0)=("%r12d","%edi");
+
+my ($A,$F,$B,$C,$D,$E)=@ROTX;
+my $rx=0;
+my $frame="%r13";
+
+$code.=<<___;
+.type	sha1_block_data_order_avx2,\@function,3
+.align	16
+sha1_block_data_order_avx2:
+_avx2_shortcut:
+	mov	%rsp,%rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	lea	-6*16(%rsp),%rsp
+	vmovaps	%xmm6,-40-6*16(%rax)
+	vmovaps	%xmm7,-40-5*16(%rax)
+	vmovaps	%xmm8,-40-4*16(%rax)
+	vmovaps	%xmm9,-40-3*16(%rax)
+	vmovaps	%xmm10,-40-2*16(%rax)
+	vmovaps	%xmm11,-40-1*16(%rax)
+.Lprologue_avx2:
+___
+$code.=<<___;
+	mov	%rax,%r14		# original %rsp
+	mov	%rdi,$ctx		# reassigned argument
+	mov	%rsi,$inp		# reassigned argument
+	mov	%rdx,$num		# reassigned argument
+
+	lea	-640(%rsp),%rsp
+	shl	\$6,$num
+	 lea	64($inp),$frame
+	and	\$-128,%rsp
+	add	$inp,$num
+	lea	K_XX_XX+64(%rip),$K_XX_XX
+
+	mov	0($ctx),$A		# load context
+	 cmp	$num,$frame
+	 cmovae	$inp,$frame		# next or same block
+	mov	4($ctx),$F
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	16($ctx),$E
+	vmovdqu	64($K_XX_XX),@X[2]	# pbswap mask
+
+	vmovdqu		($inp),%xmm0
+	vmovdqu		16($inp),%xmm1
+	vmovdqu		32($inp),%xmm2
+	vmovdqu		48($inp),%xmm3
+	lea		64($inp),$inp
+	vinserti128	\$1,($frame),@X[-4&7],@X[-4&7]
+	vinserti128	\$1,16($frame),@X[-3&7],@X[-3&7]
+	vpshufb		@X[2],@X[-4&7],@X[-4&7]
+	vinserti128	\$1,32($frame),@X[-2&7],@X[-2&7]
+	vpshufb		@X[2],@X[-3&7],@X[-3&7]
+	vinserti128	\$1,48($frame),@X[-1&7],@X[-1&7]
+	vpshufb		@X[2],@X[-2&7],@X[-2&7]
+	vmovdqu		-64($K_XX_XX),$Kx	# K_00_19
+	vpshufb		@X[2],@X[-1&7],@X[-1&7]
+
+	vpaddd	$Kx,@X[-4&7],@X[0]	# add K_00_19
+	vpaddd	$Kx,@X[-3&7],@X[1]
+	vmovdqu	@X[0],0(%rsp)		# X[]+K xfer to IALU
+	vpaddd	$Kx,@X[-2&7],@X[2]
+	vmovdqu	@X[1],32(%rsp)
+	vpaddd	$Kx,@X[-1&7],@X[3]
+	vmovdqu	@X[2],64(%rsp)
+	vmovdqu	@X[3],96(%rsp)
+___
+for (;$Xi<8;$Xi++) {	# Xupdate_avx2_16_31
+    use integer;
+
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	&vpsrldq(@Tx[0],@X[-1&7],4);		# "X[-3]", 3 dwords
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	&vpxor	(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	&vpsrld	(@Tx[0],@X[0],31);
+	&vmovdqu($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)")	if ($Xi%5==0);	# K_XX_XX
+	&vpslldq(@Tx[2],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	&vpsrld	(@Tx[1],@Tx[2],30);
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=1
+	&vpslld	(@Tx[2],@Tx[2],2);
+	&vpxor	(@X[0],@X[0],@Tx[1]);
+	&vpxor	(@X[0],@X[0],@Tx[2]);		# "X[0]"^=("X[0]">>96)<<<2
+	&vpaddd	(@Tx[1],@X[0],$Kx);
+	&vmovdqu("32*$Xi(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+
+	push(@X,shift(@X));	# "rotate" X[]
+}
+$code.=<<___;
+	lea	128(%rsp),$frame
+	jmp	.Loop_avx2
+.align	32
+.Loop_avx2:
+	rorx	\$2,$F,$B
+	andn	$D,$F,$t0
+	and	$C,$F
+	xor	$t0,$F
+___
+sub bodyx_00_19 () {	# 8 instructions, 3 cycles critical path
+	# at start $f=(b&c)^(~b&d), $b>>>=2
+	return &bodyx_20_39() if ($rx==19); $rx++;
+	(
+	'($a,$f,$b,$c,$d,$e)=@ROTX;'.
+
+	'&add	($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'.	# e+=X[i]+K
+	 '&lea	($frame,"256($frame)")	if ($j%32==31);',
+	'&andn	($t0,$a,$c)',			# ~b&d for next round
+
+	'&add	($e,$f)',			# e+=(b&c)^(~b&d)
+	'&rorx	($a5,$a,27)',			# a<<<5
+	'&rorx	($f,$a,2)',			# b>>>2 for next round
+	'&and	($a,$b)',			# b&c for next round
+
+	'&add	($e,$a5)',			# e+=a<<<5
+	'&xor	($a,$t0);'.			# f=(b&c)^(~b&d) for next round
+
+	'unshift(@ROTX,pop(@ROTX)); $j++;'
+	)
+}
+
+sub bodyx_20_39 () {	# 7 instructions, 2 cycles critical path
+	# on entry $f=b^c^d, $b>>>=2
+	return &bodyx_40_59() if ($rx==39); $rx++;
+	(
+	'($a,$f,$b,$c,$d,$e)=@ROTX;'.
+
+	'&add	($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'.	# e+=X[i]+K
+	 '&lea	($frame,"256($frame)")	if ($j%32==31);',
+
+	'&lea	($e,"($e,$f)")',		# e+=b^c^d
+	'&rorx	($a5,$a,27)',			# a<<<5
+	'&rorx	($f,$a,2)	if ($j<79)',	# b>>>2 in next round
+	'&xor	($a,$b)		if ($j<79)',	# b^c for next round
+
+	'&add	($e,$a5)',			# e+=a<<<5
+	'&xor	($a,$c)		if ($j<79);'.	# f=b^c^d for next round
+
+	'unshift(@ROTX,pop(@ROTX)); $j++;'
+	)
+}
+
+sub bodyx_40_59 () {	# 10 instructions, 3 cycles critical path
+	# on entry $f=((b^c)&(c^d)), $b>>>=2
+	$rx++;
+	(
+	'($a,$f,$b,$c,$d,$e)=@ROTX;'.
+
+	'&add	($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'.	# e+=X[i]+K
+	 '&lea	($frame,"256($frame)")	if ($j%32==31);',
+	'&xor	($f,$c)		if ($j>39)',	# (b^c)&(c^d)^c
+	'&mov	($t0,$b)	if ($j<59)',	# count on zero latency
+	'&xor	($t0,$c)	if ($j<59)',	# c^d for next round
+
+	'&lea	($e,"($e,$f)")',		# e+=(b^c)&(c^d)^c
+	'&rorx	($a5,$a,27)',			# a<<<5
+	'&rorx	($f,$a,2)',			# b>>>2 in next round
+	'&xor	($a,$b)',			# b^c for next round
+
+	'&add	($e,$a5)',			# e+=a<<<5
+	'&and	($a,$t0)	if ($j< 59);'.	# f=(b^c)&(c^d) for next round
+	'&xor	($a,$c)		if ($j==59);'.	# f=b^c^d for next round
+
+	'unshift(@ROTX,pop(@ROTX)); $j++;'
+	)
+}
+
+sub Xupdate_avx2_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body,&$body);	# 35 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrldq(@Tx[0],@X[-1&7],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	&vpxor	(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[0],@X[0],31);
+	&vmovdqu($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)")	if ($Xi%5==0);	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslldq(@Tx[2],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[1],@Tx[2],30);
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslld	(@Tx[2],@Tx[2],2);
+	&vpxor	(@X[0],@X[0],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[2]);		# "X[0]"^=("X[0]">>96)<<<2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpaddd	(@Tx[1],@X[0],$Kx);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vmovdqu(eval(32*($Xi))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+	$Xi++;
+	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_avx2_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body,&$body);	# 35 to 50 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8);	# compose "X[-6]"
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[-7&7]);		# "X[0]"^="X[-28]"
+	&vmovdqu($Kx,eval(2*16*($Xi/5)-64)."($K_XX_XX)")	if ($Xi%5==0);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[0],@X[0],30);
+	&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	#&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpaddd	(@Tx[1],@X[0],$Kx);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vmovdqu("32*$Xi(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+	$Xi++;
+	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xloop_avx2()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 foreach (@insns) { eval; }
+}
+
+	&align32();
+	&Xupdate_avx2_32_79(\&bodyx_00_19);
+	&Xupdate_avx2_32_79(\&bodyx_00_19);
+	&Xupdate_avx2_32_79(\&bodyx_00_19);
+	&Xupdate_avx2_32_79(\&bodyx_00_19);
+
+	&Xupdate_avx2_32_79(\&bodyx_20_39);
+	&Xupdate_avx2_32_79(\&bodyx_20_39);
+	&Xupdate_avx2_32_79(\&bodyx_20_39);
+	&Xupdate_avx2_32_79(\&bodyx_20_39);
+
+	&align32();
+	&Xupdate_avx2_32_79(\&bodyx_40_59);
+	&Xupdate_avx2_32_79(\&bodyx_40_59);
+	&Xupdate_avx2_32_79(\&bodyx_40_59);
+	&Xupdate_avx2_32_79(\&bodyx_40_59);
+
+	&Xloop_avx2(\&bodyx_20_39);
+	&Xloop_avx2(\&bodyx_20_39);
+	&Xloop_avx2(\&bodyx_20_39);
+	&Xloop_avx2(\&bodyx_20_39);
+
+$code.=<<___;
+	lea	128($inp),$frame
+	lea	128($inp),%rdi			# borrow $t0
+	cmp	$num,$frame
+	cmovae	$inp,$frame			# next or previous block
+
+	# output is d-e-[a]-f-b-c => A=d,F=e,C=f,D=b,E=c
+	add	0($ctx),@ROTX[0]		# update context
+	add	4($ctx),@ROTX[1]
+	add	8($ctx),@ROTX[3]
+	mov	@ROTX[0],0($ctx)
+	add	12($ctx),@ROTX[4]
+	mov	@ROTX[1],4($ctx)
+	 mov	@ROTX[0],$A			# A=d
+	add	16($ctx),@ROTX[5]
+	 mov	@ROTX[3],$a5
+	mov	@ROTX[3],8($ctx)
+	 mov	@ROTX[4],$D			# D=b
+	 #xchg	@ROTX[5],$F			# F=c, C=f
+	mov	@ROTX[4],12($ctx)
+	 mov	@ROTX[1],$F			# F=e
+	mov	@ROTX[5],16($ctx)
+	#mov	$F,16($ctx)
+	 mov	@ROTX[5],$E			# E=c
+	 mov	$a5,$C				# C=f
+	 #xchg	$F,$E				# E=c, F=e
+
+	cmp	$num,$inp
+	je	.Ldone_avx2
+___
+
+$Xi=4;				# reset variables
+@X=map("%ymm$_",(4..7,0..3));
+
+$code.=<<___;
+	vmovdqu	64($K_XX_XX),@X[2]		# pbswap mask
+	cmp	$num,%rdi			# borrowed $t0
+	ja	.Last_avx2
+
+	vmovdqu		-64(%rdi),%xmm0		# low part of @X[-4&7]
+	vmovdqu		-48(%rdi),%xmm1
+	vmovdqu		-32(%rdi),%xmm2
+	vmovdqu		-16(%rdi),%xmm3
+	vinserti128	\$1,0($frame),@X[-4&7],@X[-4&7]
+	vinserti128	\$1,16($frame),@X[-3&7],@X[-3&7]
+	vinserti128	\$1,32($frame),@X[-2&7],@X[-2&7]
+	vinserti128	\$1,48($frame),@X[-1&7],@X[-1&7]
+	jmp	.Last_avx2
+
+.align	32
+.Last_avx2:
+	lea	128+16(%rsp),$frame
+	rorx	\$2,$F,$B
+	andn	$D,$F,$t0
+	and	$C,$F
+	xor	$t0,$F
+	sub	\$-128,$inp
+___
+	$rx=$j=0;	@ROTX=($A,$F,$B,$C,$D,$E);
+
+	&Xloop_avx2	(\&bodyx_00_19);
+	&Xloop_avx2	(\&bodyx_00_19);
+	&Xloop_avx2	(\&bodyx_00_19);
+	&Xloop_avx2	(\&bodyx_00_19);
+
+	&Xloop_avx2	(\&bodyx_20_39);
+	  &vmovdqu	($Kx,"-64($K_XX_XX)");		# K_00_19
+	  &vpshufb	(@X[-4&7],@X[-4&7],@X[2]);	# byte swap
+	&Xloop_avx2	(\&bodyx_20_39);
+	  &vpshufb	(@X[-3&7],@X[-3&7],@X[2]);
+	  &vpaddd	(@Tx[0],@X[-4&7],$Kx);		# add K_00_19
+	&Xloop_avx2	(\&bodyx_20_39);
+	  &vmovdqu	("0(%rsp)",@Tx[0]);
+	  &vpshufb	(@X[-2&7],@X[-2&7],@X[2]);
+	  &vpaddd	(@Tx[1],@X[-3&7],$Kx);
+	&Xloop_avx2	(\&bodyx_20_39);
+	  &vmovdqu	("32(%rsp)",@Tx[1]);
+	  &vpshufb	(@X[-1&7],@X[-1&7],@X[2]);
+	  &vpaddd	(@X[2],@X[-2&7],$Kx);
+
+	&Xloop_avx2	(\&bodyx_40_59);
+	&align32	();
+	  &vmovdqu	("64(%rsp)",@X[2]);
+	  &vpaddd	(@X[3],@X[-1&7],$Kx);
+	&Xloop_avx2	(\&bodyx_40_59);
+	  &vmovdqu	("96(%rsp)",@X[3]);
+	&Xloop_avx2	(\&bodyx_40_59);
+	&Xupdate_avx2_16_31(\&bodyx_40_59);
+
+	&Xupdate_avx2_16_31(\&bodyx_20_39);
+	&Xupdate_avx2_16_31(\&bodyx_20_39);
+	&Xupdate_avx2_16_31(\&bodyx_20_39);
+	&Xloop_avx2	(\&bodyx_20_39);
+
+$code.=<<___;
+	lea	128(%rsp),$frame
+
+	# output is d-e-[a]-f-b-c => A=d,F=e,C=f,D=b,E=c
+	add	0($ctx),@ROTX[0]		# update context
+	add	4($ctx),@ROTX[1]
+	add	8($ctx),@ROTX[3]
+	mov	@ROTX[0],0($ctx)
+	add	12($ctx),@ROTX[4]
+	mov	@ROTX[1],4($ctx)
+	 mov	@ROTX[0],$A			# A=d
+	add	16($ctx),@ROTX[5]
+	 mov	@ROTX[3],$a5
+	mov	@ROTX[3],8($ctx)
+	 mov	@ROTX[4],$D			# D=b
+	 #xchg	@ROTX[5],$F			# F=c, C=f
+	mov	@ROTX[4],12($ctx)
+	 mov	@ROTX[1],$F			# F=e
+	mov	@ROTX[5],16($ctx)
+	#mov	$F,16($ctx)
+	 mov	@ROTX[5],$E			# E=c
+	 mov	$a5,$C				# C=f
+	 #xchg	$F,$E				# E=c, F=e
+
+	cmp	$num,$inp
+	jbe	.Loop_avx2
+
+.Ldone_avx2:
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	-40-6*16(%r14),%xmm6
+	movaps	-40-5*16(%r14),%xmm7
+	movaps	-40-4*16(%r14),%xmm8
+	movaps	-40-3*16(%r14),%xmm9
+	movaps	-40-2*16(%r14),%xmm10
+	movaps	-40-1*16(%r14),%xmm11
+___
+$code.=<<___;
+	lea	(%r14),%rsi
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lepilogue_avx2:
+	ret
+.size	sha1_block_data_order_avx2,.-sha1_block_data_order_avx2
+___
+}
+}
+$code.=<<___;
+.align	64
+K_XX_XX:
+.long	0x5a827999,0x5a827999,0x5a827999,0x5a827999	# K_00_19
+.long	0x5a827999,0x5a827999,0x5a827999,0x5a827999	# K_00_19
+.long	0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1	# K_20_39
+.long	0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1	# K_20_39
+.long	0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc	# K_40_59
+.long	0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc	# K_40_59
+.long	0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6	# K_60_79
+.long	0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6	# K_60_79
+.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f	# pbswap mask
+.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f	# pbswap mask
+___
+}}}
+$code.=<<___;
+.asciz	"SHA1 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lprologue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lprologue
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lepilogue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
+	jae	.Lcommon_seh_tail
+
+	mov	`16*4`(%rax),%rax	# pull saved stack pointer
+	lea	32(%rax),%rax
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+
+	jmp	.Lcommon_seh_tail
+.size	se_handler,.-se_handler
+
+.type	ssse3_handler,\@abi-omnipotent
+.align	16
+ssse3_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	mov	232($context),%rax	# pull context->R14
+
+	lea	-40-6*16(%rax),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$12,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore cotnext->R12
+	mov	%r13,224($context)	# restore cotnext->R13
+	mov	%r14,232($context)	# restore cotnext->R14
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	ssse3_handler,.-ssse3_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_sha1_block_data_order
+	.rva	.LSEH_end_sha1_block_data_order
+	.rva	.LSEH_info_sha1_block_data_order
+	.rva	.LSEH_begin_sha1_block_data_order_ssse3
+	.rva	.LSEH_end_sha1_block_data_order_ssse3
+	.rva	.LSEH_info_sha1_block_data_order_ssse3
+___
+$code.=<<___ if ($avx);
+	.rva	.LSEH_begin_sha1_block_data_order_avx
+	.rva	.LSEH_end_sha1_block_data_order_avx
+	.rva	.LSEH_info_sha1_block_data_order_avx
+___
+$code.=<<___ if ($avx>1);
+	.rva	.LSEH_begin_sha1_block_data_order_avx2
+	.rva	.LSEH_end_sha1_block_data_order_avx2
+	.rva	.LSEH_info_sha1_block_data_order_avx2
+___
+$code.=<<___;
+.section	.xdata
+.align	8
+.LSEH_info_sha1_block_data_order:
+	.byte	9,0,0,0
+	.rva	se_handler
+.LSEH_info_sha1_block_data_order_ssse3:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_ssse3,.Lepilogue_ssse3	# HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_sha1_block_data_order_avx:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_avx,.Lepilogue_avx		# HandlerData[]
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_sha1_block_data_order_avx2:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_avx2,.Lepilogue_avx2		# HandlerData[]
+___
+}
+
+####################################################################
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/crypto/sha/asm/sha256-586.pl b/crypto/sha/asm/sha256-586.pl
new file mode 100644
index 0000000..bd48b63
--- /dev/null
+++ b/crypto/sha/asm/sha256-586.pl
@@ -0,0 +1,1125 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA256 block transform for x86. September 2007.
+#
+# Performance improvement over compiler generated code varies from
+# 10% to 40% [see below]. Not very impressive on some µ-archs, but
+# it's 5 times smaller and optimizies amount of writes.
+#
+# May 2012.
+#
+# Optimization including two of Pavel Semjanov's ideas, alternative
+# Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
+# ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
+# 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
+# on P4, where it kills performance, nor Sandy Bridge, where folded
+# loop is approximately as fast...
+#
+# June 2012.
+#
+# Add AMD XOP-specific code path, >30% improvement on Bulldozer over
+# May version, >60% over original. Add AVX+shrd code path, >25%
+# improvement on Sandy Bridge over May version, 60% over original.
+#
+# May 2013.
+#
+# Replace AMD XOP code path with SSSE3 to cover more processors.
+# (Biggest improvement coefficient is on upcoming Atom Silvermont,
+# not shown.) Add AVX+BMI code path.
+#
+# Performance in clock cycles per processed byte (less is better):
+#
+#		gcc	icc	x86 asm(*)	SIMD	x86_64 asm(**)	
+# Pentium	46	57	40/38		-	-
+# PIII		36	33	27/24		-	-
+# P4		41	38	28		-	17.3
+# AMD K8	27	25	19/15.5		-	14.9
+# Core2		26	23	18/15.6		14.3	13.8
+# Westmere	27	-	19/15.7		13.4	12.3
+# Sandy Bridge	25	-	15.9		12.4	11.6
+# Ivy Bridge	24	-	15.0		11.4	10.3
+# Haswell	22	-	13.9		9.46	7.80
+# Bulldozer	36	-	27/22		17.0	13.6
+# VIA Nano	36	-	25/22		16.8	16.5
+# Atom		50	-	30/25		21.9	18.9
+#
+# (*)	numbers after slash are for unrolled loop, where applicable;
+# (**)	x86_64 assembly performance is presented for reference
+#	purposes, results are best-available;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
+
+$xmm=$avx=0;
+for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+if ($xmm &&	`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+			=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
+		`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.03) + ($1>=2.10);
+}
+
+if ($xmm && !$avx && $ARGV[0] eq "win32" &&
+		`ml 2>&1` =~ /Version ([0-9]+)\./) {
+	$avx = ($1>=10) + ($1>=11);
+}
+
+$unroll_after = 64*4;	# If pre-evicted from L1P cache first spin of
+			# fully unrolled loop was measured to run about
+			# 3-4x slower. If slowdown coefficient is N and
+			# unrolled loop is m times faster, then you break
+			# even at (N-1)/(m-1) blocks. Then it needs to be
+			# adjusted for probability of code being evicted,
+			# code size/cache size=1/4. Typical m is 1.15...
+
+$A="eax";
+$E="edx";
+$T="ebx";
+$Aoff=&DWP(4,"esp");
+$Boff=&DWP(8,"esp");
+$Coff=&DWP(12,"esp");
+$Doff=&DWP(16,"esp");
+$Eoff=&DWP(20,"esp");
+$Foff=&DWP(24,"esp");
+$Goff=&DWP(28,"esp");
+$Hoff=&DWP(32,"esp");
+$Xoff=&DWP(36,"esp");
+$K256="ebp";
+
+sub BODY_16_63() {
+	&mov	($T,"ecx");			# "ecx" is preloaded
+	 &mov	("esi",&DWP(4*(9+15+16-14),"esp"));
+	&ror	("ecx",18-7);
+	 &mov	("edi","esi");
+	&ror	("esi",19-17);
+	 &xor	("ecx",$T);
+	 &shr	($T,3);
+	&ror	("ecx",7);
+	 &xor	("esi","edi");
+	 &xor	($T,"ecx");			# T = sigma0(X[-15])
+	&ror	("esi",17);
+	 &add	($T,&DWP(4*(9+15+16),"esp"));	# T += X[-16]
+	&shr	("edi",10);
+	 &add	($T,&DWP(4*(9+15+16-9),"esp"));	# T += X[-7]
+	#&xor	("edi","esi")			# sigma1(X[-2])
+	# &add	($T,"edi");			# T += sigma1(X[-2])
+	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
+
+	&BODY_00_15(1);
+}
+sub BODY_00_15() {
+    my $in_16_63=shift;
+
+	&mov	("ecx",$E);
+	 &xor	("edi","esi")			if ($in_16_63);	# sigma1(X[-2])
+	 &mov	("esi",$Foff);
+	&ror	("ecx",25-11);
+	 &add	($T,"edi")			if ($in_16_63);	# T += sigma1(X[-2])
+	 &mov	("edi",$Goff);
+	&xor	("ecx",$E);
+	 &xor	("esi","edi");
+	 &mov	($T,&DWP(4*(9+15),"esp"))	if (!$in_16_63);
+	 &mov	(&DWP(4*(9+15),"esp"),$T)	if ($in_16_63);	# save X[0]
+	&ror	("ecx",11-6);
+	 &and	("esi",$E);
+	 &mov	($Eoff,$E);		# modulo-scheduled
+	&xor	($E,"ecx");
+	 &add	($T,$Hoff);		# T += h
+	 &xor	("esi","edi");		# Ch(e,f,g)
+	&ror	($E,6);			# Sigma1(e)
+	 &mov	("ecx",$A);
+	 &add	($T,"esi");		# T += Ch(e,f,g)
+
+	&ror	("ecx",22-13);
+	 &add	($T,$E);		# T += Sigma1(e)
+	 &mov	("edi",$Boff);
+	&xor	("ecx",$A);
+	 &mov	($Aoff,$A);		# modulo-scheduled
+	 &lea	("esp",&DWP(-4,"esp"));
+	&ror	("ecx",13-2);
+	 &mov	("esi",&DWP(0,$K256));
+	&xor	("ecx",$A);
+	 &mov	($E,$Eoff);		# e in next iteration, d in this one
+	 &xor	($A,"edi");		# a ^= b
+	&ror	("ecx",2);		# Sigma0(a)
+
+	 &add	($T,"esi");		# T+= K[i]
+	 &mov	(&DWP(0,"esp"),$A);	# (b^c) in next round
+	&add	($E,$T);		# d += T
+	 &and	($A,&DWP(4,"esp"));	# a &= (b^c)
+	&add	($T,"ecx");		# T += Sigma0(a)
+	 &xor	($A,"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
+	 &mov	("ecx",&DWP(4*(9+15+16-1),"esp"))	if ($in_16_63);	# preload T
+	&add	($K256,4);
+	 &add	($A,$T);		# h += T
+}
+
+&external_label("OPENSSL_ia32cap_P")		if (!$i386);
+
+&function_begin("sha256_block_data_order");
+	&mov	("esi",wparam(0));	# ctx
+	&mov	("edi",wparam(1));	# inp
+	&mov	("eax",wparam(2));	# num
+	&mov	("ebx","esp");		# saved sp
+
+	&call	(&label("pic_point"));	# make it PIC!
+&set_label("pic_point");
+	&blindpop($K256);
+	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
+
+	&sub	("esp",16);
+	&and	("esp",-64);
+
+	&shl	("eax",6);
+	&add	("eax","edi");
+	&mov	(&DWP(0,"esp"),"esi");	# ctx
+	&mov	(&DWP(4,"esp"),"edi");	# inp
+	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
+	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
+						if (!$i386) {
+	&picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
+	&mov	("ecx",&DWP(0,"edx"));
+	&mov	("ebx",&DWP(4,"edx"));
+	&test	("ecx",1<<20);		# check for P4
+	&jnz	(&label("loop"));
+	&and	("ecx",1<<30);		# mask "Intel CPU" bit
+	&and	("ebx",1<<28|1<<9);	# mask AVX and SSSE3 bits
+	&or	("ecx","ebx");
+	&and	("ecx",1<<28|1<<30);
+	&cmp	("ecx",1<<28|1<<30);
+					if ($xmm) {
+	&je	(&label("AVX"))		if ($avx);
+	&test	("ebx",1<<9);		# check for SSSE3
+	&jnz	(&label("SSSE3"));
+					} else {
+	&je	(&label("loop_shrd"));
+					}
+						if ($unroll_after) {
+	&sub	("eax","edi");
+	&cmp	("eax",$unroll_after);
+	&jae	(&label("unrolled"));
+						} }
+	&jmp	(&label("loop"));
+
+sub COMPACT_LOOP() {
+my $suffix=shift;
+
+&set_label("loop$suffix",$suffix?32:16);
+    # copy input block to stack reversing byte and dword order
+    for($i=0;$i<4;$i++) {
+	&mov	("eax",&DWP($i*16+0,"edi"));
+	&mov	("ebx",&DWP($i*16+4,"edi"));
+	&mov	("ecx",&DWP($i*16+8,"edi"));
+	&bswap	("eax");
+	&mov	("edx",&DWP($i*16+12,"edi"));
+	&bswap	("ebx");
+	&push	("eax");
+	&bswap	("ecx");
+	&push	("ebx");
+	&bswap	("edx");
+	&push	("ecx");
+	&push	("edx");
+    }
+	&add	("edi",64);
+	&lea	("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
+	&mov	(&DWP(4*(9+16)+4,"esp"),"edi");
+
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($A,&DWP(0,"esi"));
+	&mov	("ebx",&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edi",&DWP(12,"esi"));
+	# &mov	($Aoff,$A);
+	&mov	($Boff,"ebx");
+	&xor	("ebx","ecx");
+	&mov	($Coff,"ecx");
+	&mov	($Doff,"edi");
+	&mov	(&DWP(0,"esp"),"ebx");	# magic
+	&mov	($E,&DWP(16,"esi"));	
+	&mov	("ebx",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("edi",&DWP(28,"esi"));
+	# &mov	($Eoff,$E);
+	&mov	($Foff,"ebx");
+	&mov	($Goff,"ecx");
+	&mov	($Hoff,"edi");
+
+&set_label("00_15$suffix",16);
+
+	&BODY_00_15();
+
+	&cmp	("esi",0xc19bf174);
+	&jne	(&label("00_15$suffix"));
+
+	&mov	("ecx",&DWP(4*(9+15+16-1),"esp"));	# preloaded in BODY_00_15(1)
+	&jmp	(&label("16_63$suffix"));
+
+&set_label("16_63$suffix",16);
+
+	&BODY_16_63();
+
+	&cmp	("esi",0xc67178f2);
+	&jne	(&label("16_63$suffix"));
+
+	&mov	("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
+	# &mov	($A,$Aoff);
+	&mov	("ebx",$Boff);
+	# &mov	("edi",$Coff);
+	&mov	("ecx",$Doff);
+	&add	($A,&DWP(0,"esi"));
+	&add	("ebx",&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$A);
+	&mov	(&DWP(4,"esi"),"ebx");
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	# &mov	($E,$Eoff);
+	&mov	("eax",$Foff);
+	&mov	("ebx",$Goff);
+	&mov	("ecx",$Hoff);
+	&mov	("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
+	&add	($E,&DWP(16,"esi"));
+	&add	("eax",&DWP(20,"esi"));
+	&add	("ebx",&DWP(24,"esi"));
+	&add	("ecx",&DWP(28,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"eax");
+	&mov	(&DWP(24,"esi"),"ebx");
+	&mov	(&DWP(28,"esi"),"ecx");
+
+	&lea	("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
+	&sub	($K256,4*64);			# rewind K
+
+	&cmp	("edi",&DWP(8,"esp"));		# are we done yet?
+	&jb	(&label("loop$suffix"));
+}
+	&COMPACT_LOOP();
+	&mov	("esp",&DWP(12,"esp"));		# restore sp
+&function_end_A();
+						if (!$i386 && !$xmm) {
+	# ~20% improvement on Sandy Bridge
+	local *ror = sub { &shrd(@_[0],@_) };
+	&COMPACT_LOOP("_shrd");
+	&mov	("esp",&DWP(12,"esp"));		# restore sp
+&function_end_A();
+						}
+
+&set_label("K256",64);	# Yes! I keep it in the code segment!
+@K256=(	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
+	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
+	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
+	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
+	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
+	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
+	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
+	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
+	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
+	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
+	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
+	0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
+	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
+	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
+	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
+	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2	);
+&data_word(@K256);
+&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f);	# byte swap mask
+&asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+($a,$b,$c,$d,$e,$f,$g,$h)=(0..7);	# offsets
+sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
+
+if (!$i386 && $unroll_after) {
+my @AH=($A,$K256);
+
+&set_label("unrolled",16);
+	&lea	("esp",&DWP(-96,"esp"));
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($AH[0],&DWP(0,"esi"));
+	&mov	($AH[1],&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("ebx",&DWP(12,"esi"));
+	#&mov	(&DWP(0,"esp"),$AH[0]);
+	&mov	(&DWP(4,"esp"),$AH[1]);
+	&xor	($AH[1],"ecx");		# magic
+	&mov	(&DWP(8,"esp"),"ecx");
+	&mov	(&DWP(12,"esp"),"ebx");
+	&mov	($E,&DWP(16,"esi"));	
+	&mov	("ebx",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("esi",&DWP(28,"esi"));
+	#&mov	(&DWP(16,"esp"),$E);
+	&mov	(&DWP(20,"esp"),"ebx");
+	&mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esp"),"esi");
+	&jmp	(&label("grand_loop"));
+
+&set_label("grand_loop",16);
+    # copy input block to stack reversing byte order
+    for($i=0;$i<5;$i++) {
+	&mov	("ebx",&DWP(12*$i+0,"edi"));
+	&mov	("ecx",&DWP(12*$i+4,"edi"));
+	&bswap	("ebx");
+	&mov	("esi",&DWP(12*$i+8,"edi"));
+	&bswap	("ecx");
+	&mov	(&DWP(32+12*$i+0,"esp"),"ebx");
+	&bswap	("esi");
+	&mov	(&DWP(32+12*$i+4,"esp"),"ecx");
+	&mov	(&DWP(32+12*$i+8,"esp"),"esi");
+    }
+	&mov	("ebx",&DWP($i*12,"edi"));
+	&add	("edi",64);
+	&bswap	("ebx");
+	&mov	(&DWP(96+4,"esp"),"edi");
+	&mov	(&DWP(32+12*$i,"esp"),"ebx");
+
+    my ($t1,$t2) = ("ecx","esi");
+
+    for ($i=0;$i<64;$i++) {
+
+      if ($i>=16) {
+	&mov	($T,$t1);			# $t1 is preloaded
+	# &mov	($t2,&DWP(32+4*(($i+14)&15),"esp"));
+	&ror	($t1,18-7);
+	 &mov	("edi",$t2);
+	&ror	($t2,19-17);
+	 &xor	($t1,$T);
+	 &shr	($T,3);
+	&ror	($t1,7);
+	 &xor	($t2,"edi");
+	 &xor	($T,$t1);			# T = sigma0(X[-15])
+	&ror	($t2,17);
+	 &add	($T,&DWP(32+4*($i&15),"esp"));	# T += X[-16]
+	&shr	("edi",10);
+	 &add	($T,&DWP(32+4*(($i+9)&15),"esp"));	# T += X[-7]
+	#&xor	("edi",$t2)			# sigma1(X[-2])
+	# &add	($T,"edi");			# T += sigma1(X[-2])
+	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
+      }
+	&mov	($t1,$E);
+	 &xor	("edi",$t2)			if ($i>=16);	# sigma1(X[-2])
+	 &mov	($t2,&off($f));
+	&ror	($E,25-11);
+	 &add	($T,"edi")			if ($i>=16);	# T += sigma1(X[-2])
+	 &mov	("edi",&off($g));
+	&xor	($E,$t1);
+	 &mov	($T,&DWP(32+4*($i&15),"esp"))	if ($i<16);	# X[i]
+	 &mov	(&DWP(32+4*($i&15),"esp"),$T)	if ($i>=16 && $i<62);	# save X[0]
+	 &xor	($t2,"edi");
+	&ror	($E,11-6);
+	 &and	($t2,$t1);
+	 &mov	(&off($e),$t1);		# save $E, modulo-scheduled
+	&xor	($E,$t1);
+	 &add	($T,&off($h));		# T += h
+	 &xor	("edi",$t2);		# Ch(e,f,g)
+	&ror	($E,6);			# Sigma1(e)
+	 &mov	($t1,$AH[0]);
+	 &add	($T,"edi");		# T += Ch(e,f,g)
+
+	&ror	($t1,22-13);
+	 &mov	($t2,$AH[0]);
+	 &mov	("edi",&off($b));
+	&xor	($t1,$AH[0]);
+	 &mov	(&off($a),$AH[0]);	# save $A, modulo-scheduled
+	 &xor	($AH[0],"edi");		# a ^= b, (b^c) in next round
+	&ror	($t1,13-2);
+	 &and	($AH[1],$AH[0]);	# (b^c) &= (a^b)
+	 &lea	($E,&DWP(@K256[$i],$T,$E));	# T += Sigma1(1)+K[i]
+	&xor	($t1,$t2);
+	 &xor	($AH[1],"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
+	 &mov	($t2,&DWP(32+4*(($i+2)&15),"esp"))	if ($i>=15 && $i<63);
+	&ror	($t1,2);		# Sigma0(a)
+
+	 &add	($AH[1],$E);		# h += T
+	 &add	($E,&off($d));		# d += T
+	&add	($AH[1],$t1);		# h += Sigma0(a)
+	 &mov	($t1,&DWP(32+4*(($i+15)&15),"esp"))	if ($i>=15 && $i<63);
+
+	@AH = reverse(@AH);		# rotate(a,h)
+	($t1,$t2) = ($t2,$t1);		# rotate(t1,t2)
+    }
+	&mov	("esi",&DWP(96,"esp"));	#ctx
+					#&mov	($AH[0],&DWP(0,"esp"));
+	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
+					#&mov	("edi", &DWP(8,"esp"));
+	&mov	("ecx",&DWP(12,"esp"));
+	&add	($AH[0],&DWP(0,"esi"));
+	&add	($AH[1],&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$AH[0]);
+	&mov	(&DWP(4,"esi"),$AH[1]);
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	 #&mov	(&DWP(0,"esp"),$AH[0]);
+	 &mov	(&DWP(4,"esp"),$AH[1]);
+	 &xor	($AH[1],"edi");		# magic
+	 &mov	(&DWP(8,"esp"),"edi");
+	 &mov	(&DWP(12,"esp"),"ecx");
+	#&mov	($E,&DWP(16,"esp"));
+	&mov	("edi",&DWP(20,"esp"));
+	&mov	("ebx",&DWP(24,"esp"));
+	&mov	("ecx",&DWP(28,"esp"));
+	&add	($E,&DWP(16,"esi"));
+	&add	("edi",&DWP(20,"esi"));
+	&add	("ebx",&DWP(24,"esi"));
+	&add	("ecx",&DWP(28,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"edi");
+	&mov	(&DWP(24,"esi"),"ebx");
+	&mov	(&DWP(28,"esi"),"ecx");
+	 #&mov	(&DWP(16,"esp"),$E);
+	 &mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+	 &mov	(&DWP(24,"esp"),"ebx");
+	 &mov	(&DWP(28,"esp"),"ecx");
+
+	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
+	&jb	(&label("grand_loop"));
+
+	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
+&function_end_A();
+}
+						if (!$i386 && $xmm) {{{
+my @X = map("xmm$_",(0..3));
+my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
+my @AH = ($A,$T);
+
+&set_label("SSSE3",32);
+	&lea	("esp",&DWP(-96,"esp"));
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($AH[0],&DWP(0,"esi"));
+	&mov	($AH[1],&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edi",&DWP(12,"esi"));
+	#&mov	(&DWP(0,"esp"),$AH[0]);
+	&mov	(&DWP(4,"esp"),$AH[1]);
+	&xor	($AH[1],"ecx");			# magic
+	&mov	(&DWP(8,"esp"),"ecx");
+	&mov	(&DWP(12,"esp"),"edi");
+	&mov	($E,&DWP(16,"esi"));
+	&mov	("edi",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("esi",&DWP(28,"esi"));
+	#&mov	(&DWP(16,"esp"),$E);
+	&mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+	&mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esp"),"esi");
+	&movdqa	($t3,&QWP(256,$K256));
+	&jmp	(&label("grand_ssse3"));
+
+&set_label("grand_ssse3",16);
+	# load input, reverse byte order, add K256[0..15], save to stack
+	&movdqu	(@X[0],&QWP(0,"edi"));
+	&movdqu	(@X[1],&QWP(16,"edi"));
+	&movdqu	(@X[2],&QWP(32,"edi"));
+	&movdqu	(@X[3],&QWP(48,"edi"));
+	&add	("edi",64);
+	&pshufb	(@X[0],$t3);
+	&mov	(&DWP(96+4,"esp"),"edi");
+	&pshufb	(@X[1],$t3);
+	&movdqa	($t0,&QWP(0,$K256));
+	&pshufb	(@X[2],$t3);
+	&movdqa	($t1,&QWP(16,$K256));
+	&paddd	($t0,@X[0]);
+	&pshufb	(@X[3],$t3);
+	&movdqa	($t2,&QWP(32,$K256));
+	&paddd	($t1,@X[1]);
+	&movdqa	($t3,&QWP(48,$K256));
+	&movdqa	(&QWP(32+0,"esp"),$t0);
+	&paddd	($t2,@X[2]);
+	&movdqa	(&QWP(32+16,"esp"),$t1);
+	&paddd	($t3,@X[3]);
+	&movdqa	(&QWP(32+32,"esp"),$t2);
+	&movdqa	(&QWP(32+48,"esp"),$t3);
+	&jmp	(&label("ssse3_00_47"));
+
+&set_label("ssse3_00_47",16);
+	&add		($K256,64);
+
+sub SSSE3_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
+
+	  eval(shift(@insns));
+	&movdqa		($t0,@X[1]);
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	&movdqa		($t3,@X[3]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&palignr	($t0,@X[0],4);		# X[1..4]
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	 &palignr	($t3,@X[2],4);		# X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&movdqa		($t1,$t0);
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	&movdqa		($t2,$t0);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&psrld		($t0,3);
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &paddd		(@X[0],$t3);		# X[0..3] += X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&psrld		($t2,7);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	 &pshufd	($t3,@X[3],0b11111010);	# X[14..15]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pslld		($t1,32-18);
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&pxor		($t0,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&psrld		($t2,18-7);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&pxor		($t0,$t1);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pslld		($t1,18-7);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&pxor		($t0,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &movdqa	($t2,$t3);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&pxor		($t0,$t1);		# sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrld		($t3,10);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&paddd		(@X[0],$t0);		# X[0..3] += sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrlq		($t2,17);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrlq		($t2,19-17);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pshufd	($t3,$t3,0b10000000);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	 &psrldq	($t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&paddd		(@X[0],$t3);		# X[0..1] += sigma1(X[14..15])
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	 &pshufd	($t3,@X[0],0b01010000);	# X[16..17]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &movdqa	($t2,$t3);
+	  eval(shift(@insns));			# @
+	 &psrld		($t3,10);
+	  eval(shift(@insns));
+	 &psrlq		($t2,17);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrlq		($t2,19-17);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pshufd	($t3,$t3,0b00001000);
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&movdqa		($t2,&QWP(16*$j,$K256));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pslldq	($t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&paddd		(@X[0],$t3);		# X[2..3] += sigma1(X[16..17])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&paddd		($t2,@X[0]);
+	  eval(shift(@insns));			# @
+
+	foreach (@insns) { eval; }		# remaining instructions
+
+	&movdqa		(&QWP(32+16*$j,"esp"),$t2);
+}
+
+sub body_00_15 () {
+	(
+	'&mov	("ecx",$E);',
+	'&ror	($E,25-11);',
+	 '&mov	("esi",&off($f));',
+	'&xor	($E,"ecx");',
+	 '&mov	("edi",&off($g));',
+	 '&xor	("esi","edi");',
+	'&ror	($E,11-6);',
+	 '&and	("esi","ecx");',
+	 '&mov	(&off($e),"ecx");',	# save $E, modulo-scheduled
+	'&xor	($E,"ecx");',
+	 '&xor	("edi","esi");',	# Ch(e,f,g)
+	'&ror	($E,6);',		# T = Sigma1(e)
+	 '&mov	("ecx",$AH[0]);',
+	 '&add	($E,"edi");',		# T += Ch(e,f,g)
+	 '&mov	("edi",&off($b));',
+	'&mov	("esi",$AH[0]);',
+
+	'&ror	("ecx",22-13);',
+	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
+	'&xor	("ecx",$AH[0]);',
+	 '&xor	($AH[0],"edi");',	# a ^= b, (b^c) in next round
+	 '&add	($E,&off($h));',	# T += h
+	'&ror	("ecx",13-2);',
+	 '&and	($AH[1],$AH[0]);',	# (b^c) &= (a^b)
+	'&xor	("ecx","esi");',
+	 '&add	($E,&DWP(32+4*($i&15),"esp"));',	# T += K[i]+X[i]
+	 '&xor	($AH[1],"edi");',	# h = Maj(a,b,c) = Ch(a^b,c,b)
+	'&ror	("ecx",2);',		# Sigma0(a)
+
+	 '&add	($AH[1],$E);',		# h += T
+	 '&add	($E,&off($d));',	# d += T
+	'&add	($AH[1],"ecx");'.	# h += Sigma0(a)
+
+	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
+	);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&SSSE3_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));		# rotate(@X)
+    }
+	&cmp	(&DWP(16*$j,$K256),0x00010203);
+	&jne	(&label("ssse3_00_47"));
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+
+	&mov	("esi",&DWP(96,"esp"));	#ctx
+					#&mov	($AH[0],&DWP(0,"esp"));
+	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
+					#&mov	("edi", &DWP(8,"esp"));
+	&mov	("ecx",&DWP(12,"esp"));
+	&add	($AH[0],&DWP(0,"esi"));
+	&add	($AH[1],&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$AH[0]);
+	&mov	(&DWP(4,"esi"),$AH[1]);
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	 #&mov	(&DWP(0,"esp"),$AH[0]);
+	 &mov	(&DWP(4,"esp"),$AH[1]);
+	 &xor	($AH[1],"edi");			# magic
+	 &mov	(&DWP(8,"esp"),"edi");
+	 &mov	(&DWP(12,"esp"),"ecx");
+	#&mov	($E,&DWP(16,"esp"));
+	&mov	("edi",&DWP(20,"esp"));
+	&mov	("ecx",&DWP(24,"esp"));
+	&add	($E,&DWP(16,"esi"));
+	&add	("edi",&DWP(20,"esi"));
+	&add	("ecx",&DWP(24,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"edi");
+	 &mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(28,"esp"));
+	&mov	(&DWP(24,"esi"),"ecx");
+	 #&mov	(&DWP(16,"esp"),$E);
+	&add	("edi",&DWP(28,"esi"));
+	 &mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esi"),"edi");
+	 &mov	(&DWP(28,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+
+	&movdqa	($t3,&QWP(64,$K256));
+	&sub	($K256,3*64);			# rewind K
+	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
+	&jb	(&label("grand_ssse3"));
+
+	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
+&function_end_A();
+						if ($avx) {
+&set_label("AVX",32);
+						if ($avx>1) {
+	&mov	("edx",&DWP(8,"edx"));
+	&and	("edx",1<<8|1<<3);		# check for BMI2+BMI1
+	&cmp	("edx",1<<8|1<<3);
+	&je	(&label("AVX_BMI"));
+						}
+	&lea	("esp",&DWP(-96,"esp"));
+	&vzeroall	();
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($AH[0],&DWP(0,"esi"));
+	&mov	($AH[1],&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edi",&DWP(12,"esi"));
+	#&mov	(&DWP(0,"esp"),$AH[0]);
+	&mov	(&DWP(4,"esp"),$AH[1]);
+	&xor	($AH[1],"ecx");			# magic
+	&mov	(&DWP(8,"esp"),"ecx");
+	&mov	(&DWP(12,"esp"),"edi");
+	&mov	($E,&DWP(16,"esi"));
+	&mov	("edi",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("esi",&DWP(28,"esi"));
+	#&mov	(&DWP(16,"esp"),$E);
+	&mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+	&mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esp"),"esi");
+	&vmovdqa	($t3,&QWP(256,$K256));
+	&jmp	(&label("grand_avx"));
+
+&set_label("grand_avx",32);
+	# load input, reverse byte order, add K256[0..15], save to stack
+	&vmovdqu	(@X[0],&QWP(0,"edi"));
+	&vmovdqu	(@X[1],&QWP(16,"edi"));
+	&vmovdqu	(@X[2],&QWP(32,"edi"));
+	&vmovdqu	(@X[3],&QWP(48,"edi"));
+	&add		("edi",64);
+	&vpshufb	(@X[0],@X[0],$t3);
+	&mov		(&DWP(96+4,"esp"),"edi");
+	&vpshufb	(@X[1],@X[1],$t3);
+	&vpshufb	(@X[2],@X[2],$t3);
+	&vpaddd		($t0,@X[0],&QWP(0,$K256));
+	&vpshufb	(@X[3],@X[3],$t3);
+	&vpaddd		($t1,@X[1],&QWP(16,$K256));
+	&vpaddd		($t2,@X[2],&QWP(32,$K256));
+	&vpaddd		($t3,@X[3],&QWP(48,$K256));
+	&vmovdqa	(&QWP(32+0,"esp"),$t0);
+	&vmovdqa	(&QWP(32+16,"esp"),$t1);
+	&vmovdqa	(&QWP(32+32,"esp"),$t2);
+	&vmovdqa	(&QWP(32+48,"esp"),$t3);
+	&jmp		(&label("avx_00_47"));
+
+&set_label("avx_00_47",16);
+	&add		($K256,64);
+
+sub Xupdate_AVX () {
+	(
+	'&vpalignr	($t0,@X[1],@X[0],4);',	# X[1..4]
+	 '&vpalignr	($t3,@X[3],@X[2],4);',	# X[9..12]
+	'&vpsrld	($t2,$t0,7);',
+	 '&vpaddd	(@X[0],@X[0],$t3);',	# X[0..3] += X[9..16]
+	'&vpsrld	($t3,$t0,3);',
+	'&vpslld	($t1,$t0,14);',
+	'&vpxor		($t0,$t3,$t2);',
+	 '&vpshufd	($t3,@X[3],0b11111010)',# X[14..15]
+	'&vpsrld	($t2,$t2,18-7);',
+	'&vpxor		($t0,$t0,$t1);',
+	'&vpslld	($t1,$t1,25-14);',
+	'&vpxor		($t0,$t0,$t2);',
+	 '&vpsrld	($t2,$t3,10);',
+	'&vpxor		($t0,$t0,$t1);',	# sigma0(X[1..4])
+	 '&vpsrlq	($t1,$t3,17);',
+	'&vpaddd	(@X[0],@X[0],$t0);',	# X[0..3] += sigma0(X[1..4])
+	 '&vpxor	($t2,$t2,$t1);',
+	 '&vpsrlq	($t3,$t3,19);',
+	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[14..15]
+	 '&vpshufd	($t3,$t2,0b10000100);',
+	'&vpsrldq	($t3,$t3,8);',
+	'&vpaddd	(@X[0],@X[0],$t3);',	# X[0..1] += sigma1(X[14..15])
+	 '&vpshufd	($t3,@X[0],0b01010000)',# X[16..17]
+	 '&vpsrld	($t2,$t3,10);',
+	 '&vpsrlq	($t1,$t3,17);',
+	 '&vpxor	($t2,$t2,$t1);',
+	 '&vpsrlq	($t3,$t3,19);',
+	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[16..17]
+	 '&vpshufd	($t3,$t2,0b11101000);',
+	'&vpslldq	($t3,$t3,8);',
+	'&vpaddd	(@X[0],@X[0],$t3);'	# X[2..3] += sigma1(X[16..17])
+	);
+}
+
+local *ror = sub { &shrd(@_[0],@_) };
+sub AVX_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
+my $insn;
+
+	foreach (Xupdate_AVX()) {		# 31 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval($insn = shift(@insns));
+	    eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
+	}
+	&vpaddd		($t2,@X[0],&QWP(16*$j,$K256));
+	foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(&QWP(32+16*$j,"esp"),$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));		# rotate(@X)
+    }
+	&cmp	(&DWP(16*$j,$K256),0x00010203);
+	&jne	(&label("avx_00_47"));
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+
+	&mov	("esi",&DWP(96,"esp"));	#ctx
+					#&mov	($AH[0],&DWP(0,"esp"));
+	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
+					#&mov	("edi", &DWP(8,"esp"));
+	&mov	("ecx",&DWP(12,"esp"));
+	&add	($AH[0],&DWP(0,"esi"));
+	&add	($AH[1],&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$AH[0]);
+	&mov	(&DWP(4,"esi"),$AH[1]);
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	 #&mov	(&DWP(0,"esp"),$AH[0]);
+	 &mov	(&DWP(4,"esp"),$AH[1]);
+	 &xor	($AH[1],"edi");			# magic
+	 &mov	(&DWP(8,"esp"),"edi");
+	 &mov	(&DWP(12,"esp"),"ecx");
+	#&mov	($E,&DWP(16,"esp"));
+	&mov	("edi",&DWP(20,"esp"));
+	&mov	("ecx",&DWP(24,"esp"));
+	&add	($E,&DWP(16,"esi"));
+	&add	("edi",&DWP(20,"esi"));
+	&add	("ecx",&DWP(24,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"edi");
+	 &mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(28,"esp"));
+	&mov	(&DWP(24,"esi"),"ecx");
+	 #&mov	(&DWP(16,"esp"),$E);
+	&add	("edi",&DWP(28,"esi"));
+	 &mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esi"),"edi");
+	 &mov	(&DWP(28,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+
+	&vmovdqa	($t3,&QWP(64,$K256));
+	&sub	($K256,3*64);			# rewind K
+	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
+	&jb	(&label("grand_avx"));
+
+	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
+	&vzeroall	();
+&function_end_A();
+						if ($avx>1) {
+sub bodyx_00_15 () {			# +10%
+	(
+	'&rorx	("ecx",$E,6)',
+	'&rorx	("esi",$E,11)',
+	 '&mov	(&off($e),$E)',		# save $E, modulo-scheduled
+	'&rorx	("edi",$E,25)',
+	'&xor	("ecx","esi")',
+	 '&andn	("esi",$E,&off($g))',
+	'&xor	("ecx","edi")',		# Sigma1(e)
+	 '&and	($E,&off($f))',
+	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
+	 '&or	($E,"esi")',		# T = Ch(e,f,g)
+
+	'&rorx	("edi",$AH[0],2)',
+	'&rorx	("esi",$AH[0],13)',
+	 '&lea	($E,&DWP(0,$E,"ecx"))',	# T += Sigma1(e)
+	'&rorx	("ecx",$AH[0],22)',
+	'&xor	("esi","edi")',
+	 '&mov	("edi",&off($b))',
+	'&xor	("ecx","esi")',		# Sigma0(a)
+
+	 '&xor	($AH[0],"edi")',	# a ^= b, (b^c) in next round
+	 '&add	($E,&off($h))',		# T += h
+	 '&and	($AH[1],$AH[0])',	# (b^c) &= (a^b)
+	 '&add	($E,&DWP(32+4*($i&15),"esp"))',	# T += K[i]+X[i]
+	 '&xor	($AH[1],"edi")',	# h = Maj(a,b,c) = Ch(a^b,c,b)
+
+	 '&add	("ecx",$E)',		# h += T
+	 '&add	($E,&off($d))',		# d += T
+	'&lea	($AH[1],&DWP(0,$AH[1],"ecx"));'.	# h += Sigma0(a)
+
+	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
+	);
+}
+
+&set_label("AVX_BMI",32);
+	&lea	("esp",&DWP(-96,"esp"));
+	&vzeroall	();
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($AH[0],&DWP(0,"esi"));
+	&mov	($AH[1],&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edi",&DWP(12,"esi"));
+	#&mov	(&DWP(0,"esp"),$AH[0]);
+	&mov	(&DWP(4,"esp"),$AH[1]);
+	&xor	($AH[1],"ecx");			# magic
+	&mov	(&DWP(8,"esp"),"ecx");
+	&mov	(&DWP(12,"esp"),"edi");
+	&mov	($E,&DWP(16,"esi"));
+	&mov	("edi",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("esi",&DWP(28,"esi"));
+	#&mov	(&DWP(16,"esp"),$E);
+	&mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+	&mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esp"),"esi");
+	&vmovdqa	($t3,&QWP(256,$K256));
+	&jmp	(&label("grand_avx_bmi"));
+
+&set_label("grand_avx_bmi",32);
+	# load input, reverse byte order, add K256[0..15], save to stack
+	&vmovdqu	(@X[0],&QWP(0,"edi"));
+	&vmovdqu	(@X[1],&QWP(16,"edi"));
+	&vmovdqu	(@X[2],&QWP(32,"edi"));
+	&vmovdqu	(@X[3],&QWP(48,"edi"));
+	&add		("edi",64);
+	&vpshufb	(@X[0],@X[0],$t3);
+	&mov		(&DWP(96+4,"esp"),"edi");
+	&vpshufb	(@X[1],@X[1],$t3);
+	&vpshufb	(@X[2],@X[2],$t3);
+	&vpaddd		($t0,@X[0],&QWP(0,$K256));
+	&vpshufb	(@X[3],@X[3],$t3);
+	&vpaddd		($t1,@X[1],&QWP(16,$K256));
+	&vpaddd		($t2,@X[2],&QWP(32,$K256));
+	&vpaddd		($t3,@X[3],&QWP(48,$K256));
+	&vmovdqa	(&QWP(32+0,"esp"),$t0);
+	&vmovdqa	(&QWP(32+16,"esp"),$t1);
+	&vmovdqa	(&QWP(32+32,"esp"),$t2);
+	&vmovdqa	(&QWP(32+48,"esp"),$t3);
+	&jmp		(&label("avx_bmi_00_47"));
+
+&set_label("avx_bmi_00_47",16);
+	&add		($K256,64);
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX_00_47($j,\&bodyx_00_15,@X);
+	push(@X,shift(@X));		# rotate(@X)
+    }
+	&cmp	(&DWP(16*$j,$K256),0x00010203);
+	&jne	(&label("avx_bmi_00_47"));
+
+    for ($i=0; $i<16; ) {
+	foreach(bodyx_00_15()) { eval; }
+    }
+
+	&mov	("esi",&DWP(96,"esp"));	#ctx
+					#&mov	($AH[0],&DWP(0,"esp"));
+	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
+					#&mov	("edi", &DWP(8,"esp"));
+	&mov	("ecx",&DWP(12,"esp"));
+	&add	($AH[0],&DWP(0,"esi"));
+	&add	($AH[1],&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$AH[0]);
+	&mov	(&DWP(4,"esi"),$AH[1]);
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	 #&mov	(&DWP(0,"esp"),$AH[0]);
+	 &mov	(&DWP(4,"esp"),$AH[1]);
+	 &xor	($AH[1],"edi");			# magic
+	 &mov	(&DWP(8,"esp"),"edi");
+	 &mov	(&DWP(12,"esp"),"ecx");
+	#&mov	($E,&DWP(16,"esp"));
+	&mov	("edi",&DWP(20,"esp"));
+	&mov	("ecx",&DWP(24,"esp"));
+	&add	($E,&DWP(16,"esi"));
+	&add	("edi",&DWP(20,"esi"));
+	&add	("ecx",&DWP(24,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"edi");
+	 &mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(28,"esp"));
+	&mov	(&DWP(24,"esi"),"ecx");
+	 #&mov	(&DWP(16,"esp"),$E);
+	&add	("edi",&DWP(28,"esi"));
+	 &mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esi"),"edi");
+	 &mov	(&DWP(28,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+
+	&vmovdqa	($t3,&QWP(64,$K256));
+	&sub	($K256,3*64);			# rewind K
+	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
+	&jb	(&label("grand_avx_bmi"));
+
+	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
+	&vzeroall	();
+&function_end_A();
+						}
+						}
+						}}}
+&function_end_B("sha256_block_data_order");
+
+&asm_finish();
diff --git a/crypto/sha/asm/sha256-armv4.pl b/crypto/sha/asm/sha256-armv4.pl
new file mode 100644
index 0000000..1b2a098
--- /dev/null
+++ b/crypto/sha/asm/sha256-armv4.pl
@@ -0,0 +1,540 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# SHA256 block procedure for ARMv4. May 2007.
+
+# Performance is ~2x better than gcc 3.4 generated code and in "abso-
+# lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+# byte [on single-issue Xscale PXA250 core].
+
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 22% improvement on
+# Cortex A8 core and ~20 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+# September 2013.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process one
+# byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+# S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+# code (meaning that latter performs sub-optimally, nothing was done
+# about it).
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$ctx="r0";	$t0="r0";
+$inp="r1";	$t4="r1";
+$len="r2";	$t1="r2";
+$T1="r3";	$t3="r3";
+$A="r4";
+$B="r5";
+$C="r6";
+$D="r7";
+$E="r8";
+$F="r9";
+$G="r10";
+$H="r11";
+@V=($A,$B,$C,$D,$E,$F,$G,$H);
+$t2="r12";
+$Ktbl="r14";
+
+@Sigma0=( 2,13,22);
+@Sigma1=( 6,11,25);
+@sigma0=( 7,18, 3);
+@sigma1=(17,19,10);
+
+sub BODY_00_15 {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___ if ($i<16);
+#if __ARM_ARCH__>=7
+	@ ldr	$t1,[$inp],#4			@ $i
+# if $i==15
+	str	$inp,[sp,#17*4]			@ make room for $t4
+# endif
+	eor	$t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+	add	$a,$a,$t2			@ h+=Maj(a,b,c) from the past
+	eor	$t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]`	@ Sigma1(e)
+	rev	$t1,$t1
+#else
+	@ ldrb	$t1,[$inp,#3]			@ $i
+	add	$a,$a,$t2			@ h+=Maj(a,b,c) from the past
+	ldrb	$t2,[$inp,#2]
+	ldrb	$t0,[$inp,#1]
+	orr	$t1,$t1,$t2,lsl#8
+	ldrb	$t2,[$inp],#4
+	orr	$t1,$t1,$t0,lsl#16
+# if $i==15
+	str	$inp,[sp,#17*4]			@ make room for $t4
+# endif
+	eor	$t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+	orr	$t1,$t1,$t2,lsl#24
+	eor	$t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]`	@ Sigma1(e)
+#endif
+___
+$code.=<<___;
+	ldr	$t2,[$Ktbl],#4			@ *K256++
+	add	$h,$h,$t1			@ h+=X[i]
+	str	$t1,[sp,#`$i%16`*4]
+	eor	$t1,$f,$g
+	add	$h,$h,$t0,ror#$Sigma1[0]	@ h+=Sigma1(e)
+	and	$t1,$t1,$e
+	add	$h,$h,$t2			@ h+=K256[i]
+	eor	$t1,$t1,$g			@ Ch(e,f,g)
+	eor	$t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]`
+	add	$h,$h,$t1			@ h+=Ch(e,f,g)
+#if $i==31
+	and	$t2,$t2,#0xff
+	cmp	$t2,#0xf2			@ done?
+#endif
+#if $i<15
+# if __ARM_ARCH__>=7
+	ldr	$t1,[$inp],#4			@ prefetch
+# else
+	ldrb	$t1,[$inp,#3]
+# endif
+	eor	$t2,$a,$b			@ a^b, b^c in next round
+#else
+	ldr	$t1,[sp,#`($i+2)%16`*4]		@ from future BODY_16_xx
+	eor	$t2,$a,$b			@ a^b, b^c in next round
+	ldr	$t4,[sp,#`($i+15)%16`*4]	@ from future BODY_16_xx
+#endif
+	eor	$t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]`	@ Sigma0(a)
+	and	$t3,$t3,$t2			@ (b^c)&=(a^b)
+	add	$d,$d,$h			@ d+=h
+	eor	$t3,$t3,$b			@ Maj(a,b,c)
+	add	$h,$h,$t0,ror#$Sigma0[0]	@ h+=Sigma0(a)
+	@ add	$h,$h,$t3			@ h+=Maj(a,b,c)
+___
+	($t2,$t3)=($t3,$t2);
+}
+
+sub BODY_16_XX {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___;
+	@ ldr	$t1,[sp,#`($i+1)%16`*4]		@ $i
+	@ ldr	$t4,[sp,#`($i+14)%16`*4]
+	mov	$t0,$t1,ror#$sigma0[0]
+	add	$a,$a,$t2			@ h+=Maj(a,b,c) from the past
+	mov	$t2,$t4,ror#$sigma1[0]
+	eor	$t0,$t0,$t1,ror#$sigma0[1]
+	eor	$t2,$t2,$t4,ror#$sigma1[1]
+	eor	$t0,$t0,$t1,lsr#$sigma0[2]	@ sigma0(X[i+1])
+	ldr	$t1,[sp,#`($i+0)%16`*4]
+	eor	$t2,$t2,$t4,lsr#$sigma1[2]	@ sigma1(X[i+14])
+	ldr	$t4,[sp,#`($i+9)%16`*4]
+
+	add	$t2,$t2,$t0
+	eor	$t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`	@ from BODY_00_15
+	add	$t1,$t1,$t2
+	eor	$t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]`	@ Sigma1(e)
+	add	$t1,$t1,$t4			@ X[i]
+___
+	&BODY_00_15(@_);
+}
+
+$code=<<___;
+#if defined(__arm__)
+#include "arm_arch.h"
+
+.text
+.code	32
+
+.type	K256,%object
+.align	5
+K256:
+.word	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size	K256,.-K256
+.word	0				@ terminator
+.LOPENSSL_armcap:
+.word	OPENSSL_armcap_P-sha256_block_data_order
+.align	5
+
+.global	sha256_block_data_order
+.type	sha256_block_data_order,%function
+sha256_block_data_order:
+	sub	r3,pc,#8		@ sha256_block_data_order
+	add	$len,$inp,$len,lsl#6	@ len to point at the end of inp
+#if __ARM_ARCH__>=7
+	ldr	r12,.LOPENSSL_armcap
+	ldr	r12,[r3,r12]		@ OPENSSL_armcap_P
+	tst	r12,#1
+	bne	.LNEON
+#endif
+	stmdb	sp!,{$ctx,$inp,$len,r4-r11,lr}
+	ldmia	$ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
+	sub	$Ktbl,r3,#256+32	@ K256
+	sub	sp,sp,#16*4		@ alloca(X[16])
+.Loop:
+# if __ARM_ARCH__>=7
+	ldr	$t1,[$inp],#4
+# else
+	ldrb	$t1,[$inp,#3]
+# endif
+	eor	$t3,$B,$C		@ magic
+	eor	$t2,$t2,$t2
+___
+for($i=0;$i<16;$i++)	{ &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=".Lrounds_16_xx:\n";
+for (;$i<32;$i++)	{ &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	ldreq	$t3,[sp,#16*4]		@ pull ctx
+	bne	.Lrounds_16_xx
+
+	add	$A,$A,$t2		@ h+=Maj(a,b,c) from the past
+	ldr	$t0,[$t3,#0]
+	ldr	$t1,[$t3,#4]
+	ldr	$t2,[$t3,#8]
+	add	$A,$A,$t0
+	ldr	$t0,[$t3,#12]
+	add	$B,$B,$t1
+	ldr	$t1,[$t3,#16]
+	add	$C,$C,$t2
+	ldr	$t2,[$t3,#20]
+	add	$D,$D,$t0
+	ldr	$t0,[$t3,#24]
+	add	$E,$E,$t1
+	ldr	$t1,[$t3,#28]
+	add	$F,$F,$t2
+	ldr	$inp,[sp,#17*4]		@ pull inp
+	ldr	$t2,[sp,#18*4]		@ pull inp+len
+	add	$G,$G,$t0
+	add	$H,$H,$t1
+	stmia	$t3,{$A,$B,$C,$D,$E,$F,$G,$H}
+	cmp	$inp,$t2
+	sub	$Ktbl,$Ktbl,#256	@ rewind Ktbl
+	bne	.Loop
+
+	add	sp,sp,#`16+3`*4	@ destroy frame
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r11,pc}
+#else
+	ldmia	sp!,{r4-r11,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+___
+######################################################################
+# NEON stuff
+#
+{{{
+my @X=map("q$_",(0..3));
+my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25");
+my $Xfer=$t4;
+my $j=0;
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+  my $arg = pop;
+    $arg = "#$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub Xupdate()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+	&vext_8		($T0,@X[0],@X[1],4);	# X[1..4]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vext_8		($T1,@X[2],@X[3],4);	# X[9..12]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	($T2,$T0,$sigma0[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(@X[0],@X[0],$T1);	# X[0..3] += X[9..12]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	($T1,$T0,$sigma0[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vsli_32	($T2,$T0,32-$sigma0[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	($T3,$T0,$sigma0[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		($T1,$T1,$T2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vsli_32	($T3,$T0,32-$sigma0[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dhi(@X[3]),$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		($T1,$T1,$T3);		# sigma0(X[1..4])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dhi(@X[3]),32-$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T5,&Dhi(@X[3]),$sigma1[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(@X[0],@X[0],$T1);	# X[0..3] += sigma0(X[1..4])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dhi(@X[3]),$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dhi(@X[3]),32-$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);		# sigma1(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dlo(@X[0]),$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dlo(@X[0]),32-$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T5,&Dlo(@X[0]),$sigma1[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dlo(@X[0]),$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vld1_32	("{$T0}","[$Ktbl,:128]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dlo(@X[0]),32-$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);		# sigma1(X[16..17])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	($T0,$T0,@X[0]);
+	 while($#insns>=2) { eval(shift(@insns)); }
+	&vst1_32	("{$T0}","[$Xfer,:128]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	push(@X,shift(@X));		# "rotate" X[]
+}
+
+sub Xpreload()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vld1_32	("{$T0}","[$Ktbl,:128]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vrev32_8	(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	($T0,$T0,@X[0]);
+	 foreach (@insns) { eval; }	# remaining instructions
+	&vst1_32	("{$T0}","[$Xfer,:128]!");
+
+	push(@X,shift(@X));		# "rotate" X[]
+}
+
+sub body_00_15 () {
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
+	'&add	($h,$h,$t1)',			# h+=X[i]+K[i]
+	'&eor	($t1,$f,$g)',
+	'&eor	($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
+	'&add	($a,$a,$t2)',			# h+=Maj(a,b,c) from the past
+	'&and	($t1,$t1,$e)',
+	'&eor	($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))',	# Sigma1(e)
+	'&eor	($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
+	'&eor	($t1,$t1,$g)',			# Ch(e,f,g)
+	'&add	($h,$h,$t2,"ror#$Sigma1[0]")',	# h+=Sigma1(e)
+	'&eor	($t2,$a,$b)',			# a^b, b^c in next round
+	'&eor	($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))',	# Sigma0(a)
+	'&add	($h,$h,$t1)',			# h+=Ch(e,f,g)
+	'&ldr	($t1,sprintf "[sp,#%d]",4*(($j+1)&15))	if (($j&15)!=15);'.
+	'&ldr	($t1,"[$Ktbl]")				if ($j==15);'.
+	'&ldr	($t1,"[sp,#64]")			if ($j==31)',
+	'&and	($t3,$t3,$t2)',			# (b^c)&=(a^b)
+	'&add	($d,$d,$h)',			# d+=h
+	'&add	($h,$h,$t0,"ror#$Sigma0[0]");'.	# h+=Sigma0(a)
+	'&eor	($t3,$t3,$b)',			# Maj(a,b,c)
+	'$j++;	unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
+	)
+}
+
+$code.=<<___;
+#if __ARM_ARCH__>=7
+.fpu	neon
+.align	4
+.LNEON:
+	stmdb	sp!,{r4-r12,lr}
+
+	mov	$t2,sp
+	sub	sp,sp,#16*4+16		@ alloca
+	sub	$Ktbl,r3,#256+32	@ K256
+	bic	sp,sp,#15		@ align for 128-bit stores
+
+	vld1.8		{@X[0]},[$inp]!
+	vld1.8		{@X[1]},[$inp]!
+	vld1.8		{@X[2]},[$inp]!
+	vld1.8		{@X[3]},[$inp]!
+	vld1.32		{$T0},[$Ktbl,:128]!
+	vld1.32		{$T1},[$Ktbl,:128]!
+	vld1.32		{$T2},[$Ktbl,:128]!
+	vld1.32		{$T3},[$Ktbl,:128]!
+	vrev32.8	@X[0],@X[0]		@ yes, even on
+	str		$ctx,[sp,#64]
+	vrev32.8	@X[1],@X[1]		@ big-endian
+	str		$inp,[sp,#68]
+	mov		$Xfer,sp
+	vrev32.8	@X[2],@X[2]
+	str		$len,[sp,#72]
+	vrev32.8	@X[3],@X[3]
+	str		$t2,[sp,#76]		@ save original sp
+	vadd.i32	$T0,$T0,@X[0]
+	vadd.i32	$T1,$T1,@X[1]
+	vst1.32		{$T0},[$Xfer,:128]!
+	vadd.i32	$T2,$T2,@X[2]
+	vst1.32		{$T1},[$Xfer,:128]!
+	vadd.i32	$T3,$T3,@X[3]
+	vst1.32		{$T2},[$Xfer,:128]!
+	vst1.32		{$T3},[$Xfer,:128]!
+
+	ldmia		$ctx,{$A-$H}
+	sub		$Xfer,$Xfer,#64
+	ldr		$t1,[sp,#0]
+	eor		$t2,$t2,$t2
+	eor		$t3,$B,$C
+	b		.L_00_48
+
+.align	4
+.L_00_48:
+___
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+$code.=<<___;
+	teq	$t1,#0				@ check for K256 terminator
+	ldr	$t1,[sp,#0]
+	sub	$Xfer,$Xfer,#64
+	bne	.L_00_48
+
+	ldr		$inp,[sp,#68]
+	ldr		$t0,[sp,#72]
+	sub		$Ktbl,$Ktbl,#256	@ rewind $Ktbl
+	teq		$inp,$t0
+	subeq		$inp,$inp,#64		@ avoid SEGV
+	vld1.8		{@X[0]},[$inp]!		@ load next input block
+	vld1.8		{@X[1]},[$inp]!
+	vld1.8		{@X[2]},[$inp]!
+	vld1.8		{@X[3]},[$inp]!
+	strne		$inp,[sp,#68]
+	mov		$Xfer,sp
+___
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+$code.=<<___;
+	ldr	$t0,[$t1,#0]
+	add	$A,$A,$t2			@ h+=Maj(a,b,c) from the past
+	ldr	$t2,[$t1,#4]
+	ldr	$t3,[$t1,#8]
+	ldr	$t4,[$t1,#12]
+	add	$A,$A,$t0			@ accumulate
+	ldr	$t0,[$t1,#16]
+	add	$B,$B,$t2
+	ldr	$t2,[$t1,#20]
+	add	$C,$C,$t3
+	ldr	$t3,[$t1,#24]
+	add	$D,$D,$t4
+	ldr	$t4,[$t1,#28]
+	add	$E,$E,$t0
+	str	$A,[$t1],#4
+	add	$F,$F,$t2
+	str	$B,[$t1],#4
+	add	$G,$G,$t3
+	str	$C,[$t1],#4
+	add	$H,$H,$t4
+	str	$D,[$t1],#4
+	stmia	$t1,{$E-$H}
+
+	movne	$Xfer,sp
+	ldrne	$t1,[sp,#0]
+	eorne	$t2,$t2,$t2
+	ldreq	sp,[sp,#76]			@ restore original sp
+	eorne	$t3,$B,$C
+	bne	.L_00_48
+
+	ldmia	sp!,{r4-r12,pc}
+#endif
+___
+}}}
+$code.=<<___;
+.size   sha256_block_data_order,.-sha256_block_data_order
+.asciz  "SHA256 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+.comm   OPENSSL_armcap_P,4,4
+
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
+print $code;
+close STDOUT; # enforce flush
diff --git a/crypto/sha/asm/sha512-586.pl b/crypto/sha/asm/sha512-586.pl
new file mode 100644
index 0000000..9fc7929
--- /dev/null
+++ b/crypto/sha/asm/sha512-586.pl
@@ -0,0 +1,910 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA512 block transform for x86. September 2007.
+#
+# May 2013.
+#
+# Add SSSE3 code path, 20-25% improvement [over original SSE2 code].
+#
+# Performance in clock cycles per processed byte (less is better):
+#
+#		gcc	icc	x86 asm	SIMD(*)	x86_64(**)
+# Pentium	100	97	61	-	-
+# PIII		75	77	56	-	-
+# P4		116	95	82	34.6	30.8
+# AMD K8	54	55	36	20.7	9.57
+# Core2		66	57	40	15.9	9.97
+# Westmere	70	-	38	12.2	9.58
+# Sandy Bridge	58	-	35	11.9	11.2
+# Ivy Bridge	50	-	33	11.5	8.17
+# Haswell	46	-	29	11.3	7.66
+# Bulldozer	121	-	50	14.0	13.5
+# VIA Nano	91	-	52	33	14.7
+# Atom		126	-	68	48(***)	14.7
+#
+# (*)	whichever best applicable.
+# (**)	x86_64 assembler performance is presented for reference
+#	purposes, the results are for integer-only code.
+# (***)	paddq is increadibly slow on Atom.
+#
+# IALU code-path is optimized for elder Pentiums. On vanilla Pentium
+# performance improvement over compiler generated code reaches ~60%,
+# while on PIII - ~35%. On newer µ-archs improvement varies from 15%
+# to 50%, but it's less important as they are expected to execute SSE2
+# code-path, which is commonly ~2-3x faster [than compiler generated
+# code]. SSE2 code-path is as fast as original sha512-sse2.pl, even
+# though it does not use 128-bit operations. The latter means that
+# SSE2-aware kernel is no longer required to execute the code. Another
+# difference is that new code optimizes amount of writes, but at the
+# cost of increased data cache "footprint" by 1/2KB.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+$Tlo=&DWP(0,"esp");	$Thi=&DWP(4,"esp");
+$Alo=&DWP(8,"esp");	$Ahi=&DWP(8+4,"esp");
+$Blo=&DWP(16,"esp");	$Bhi=&DWP(16+4,"esp");
+$Clo=&DWP(24,"esp");	$Chi=&DWP(24+4,"esp");
+$Dlo=&DWP(32,"esp");	$Dhi=&DWP(32+4,"esp");
+$Elo=&DWP(40,"esp");	$Ehi=&DWP(40+4,"esp");
+$Flo=&DWP(48,"esp");	$Fhi=&DWP(48+4,"esp");
+$Glo=&DWP(56,"esp");	$Ghi=&DWP(56+4,"esp");
+$Hlo=&DWP(64,"esp");	$Hhi=&DWP(64+4,"esp");
+$K512="ebp";
+
+$Asse2=&QWP(0,"esp");
+$Bsse2=&QWP(8,"esp");
+$Csse2=&QWP(16,"esp");
+$Dsse2=&QWP(24,"esp");
+$Esse2=&QWP(32,"esp");
+$Fsse2=&QWP(40,"esp");
+$Gsse2=&QWP(48,"esp");
+$Hsse2=&QWP(56,"esp");
+
+$A="mm0";	# B-D and
+$E="mm4";	# F-H are commonly loaded to respectively mm1-mm3 and
+		# mm5-mm7, but it's done on on-demand basis...
+$BxC="mm2";	# ... except for B^C
+
+sub BODY_00_15_sse2 {
+    my $phase=shift;
+
+	#&movq	("mm5",$Fsse2);			# load f
+	#&movq	("mm6",$Gsse2);			# load g
+
+	&movq	("mm1",$E);			# %mm1 is sliding right
+	 &pxor	("mm5","mm6");			# f^=g
+	&psrlq	("mm1",14);
+	 &movq	($Esse2,$E);			# modulo-scheduled save e
+	 &pand	("mm5",$E);			# f&=e
+	&psllq	($E,23);			# $E is sliding left
+	 &movq	($A,"mm3")			if ($phase<2);
+	 &movq	(&QWP(8*9,"esp"),"mm7")		# save X[i]
+	&movq	("mm3","mm1");			# %mm3 is T1
+	 &psrlq	("mm1",4);
+	 &pxor	("mm5","mm6");			# Ch(e,f,g)
+	&pxor	("mm3",$E);
+	 &psllq	($E,23);
+	&pxor	("mm3","mm1");
+	 &movq	($Asse2,$A);			# modulo-scheduled save a
+	 &paddq	("mm7","mm5");			# X[i]+=Ch(e,f,g)
+	&pxor	("mm3",$E);
+	 &psrlq	("mm1",23);
+	 &paddq	("mm7",$Hsse2);			# X[i]+=h
+	&pxor	("mm3","mm1");
+	 &psllq	($E,4);
+	 &paddq	("mm7",QWP(0,$K512));		# X[i]+=K512[i]
+	&pxor	("mm3",$E);			# T1=Sigma1_512(e)
+
+	 &movq	($E,$Dsse2);			# e = load d, e in next round
+	&paddq	("mm3","mm7");			# T1+=X[i]
+	 &movq	("mm5",$A);			# %mm5 is sliding right
+	 &psrlq	("mm5",28);
+	&paddq	($E,"mm3");			# d += T1
+	 &movq	("mm6",$A);			# %mm6 is sliding left
+	 &movq	("mm7","mm5");
+	 &psllq	("mm6",25);
+	&movq	("mm1",$Bsse2);			# load b
+	 &psrlq	("mm5",6);
+	 &pxor	("mm7","mm6");
+	&sub	("esp",8);
+	 &psllq	("mm6",5);
+	 &pxor	("mm7","mm5");
+	&pxor	($A,"mm1");			# a^b, b^c in next round
+	 &psrlq	("mm5",5);
+	 &pxor	("mm7","mm6");
+	&pand	($BxC,$A);			# (b^c)&(a^b)
+	 &psllq	("mm6",6);
+	 &pxor	("mm7","mm5");
+	&pxor	($BxC,"mm1");			# [h=]Maj(a,b,c)
+	 &pxor	("mm6","mm7");			# Sigma0_512(a)
+	 &movq	("mm7",&QWP(8*(9+16-1),"esp"))	if ($phase!=0);	# pre-fetch
+	 &movq	("mm5",$Fsse2)			if ($phase==0);	# load f
+
+    if ($phase>1) {
+	&paddq	($BxC,"mm6");			# h+=Sigma0(a)
+	 &add	($K512,8);
+	#&paddq	($BxC,"mm3");			# h+=T1
+
+	($A,$BxC) = ($BxC,$A);			# rotate registers
+    } else {
+	&paddq	("mm3",$BxC);			# T1+=Maj(a,b,c)
+	 &movq	($BxC,$A);
+	 &add	($K512,8);
+	&paddq	("mm3","mm6");			# T1+=Sigma0(a)
+	 &movq	("mm6",$Gsse2)			if ($phase==0);	# load g
+	#&movq	($A,"mm3");			# h=T1
+    }
+}
+
+sub BODY_00_15_x86 {
+	#define Sigma1(x)	(ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
+	#	LO		lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+	#	HI		hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+	&mov	("ecx",$Elo);
+	&mov	("edx",$Ehi);
+	&mov	("esi","ecx");
+
+	&shr	("ecx",9);	# lo>>9
+	&mov	("edi","edx");
+	&shr	("edx",9);	# hi>>9
+	&mov	("ebx","ecx");
+	&shl	("esi",14);	# lo<<14
+	&mov	("eax","edx");
+	&shl	("edi",14);	# hi<<14
+	&xor	("ebx","esi");
+
+	&shr	("ecx",14-9);	# lo>>14
+	&xor	("eax","edi");
+	&shr	("edx",14-9);	# hi>>14
+	&xor	("eax","ecx");
+	&shl	("esi",18-14);	# lo<<18
+	&xor	("ebx","edx");
+	&shl	("edi",18-14);	# hi<<18
+	&xor	("ebx","esi");
+
+	&shr	("ecx",18-14);	# lo>>18
+	&xor	("eax","edi");
+	&shr	("edx",18-14);	# hi>>18
+	&xor	("eax","ecx");
+	&shl	("esi",23-18);	# lo<<23
+	&xor	("ebx","edx");
+	&shl	("edi",23-18);	# hi<<23
+	&xor	("eax","esi");
+	&xor	("ebx","edi");			# T1 = Sigma1(e)
+
+	&mov	("ecx",$Flo);
+	&mov	("edx",$Fhi);
+	&mov	("esi",$Glo);
+	&mov	("edi",$Ghi);
+	 &add	("eax",$Hlo);
+	 &adc	("ebx",$Hhi);			# T1 += h
+	&xor	("ecx","esi");
+	&xor	("edx","edi");
+	&and	("ecx",$Elo);
+	&and	("edx",$Ehi);
+	 &add	("eax",&DWP(8*(9+15)+0,"esp"));
+	 &adc	("ebx",&DWP(8*(9+15)+4,"esp"));	# T1 += X[0]
+	&xor	("ecx","esi");
+	&xor	("edx","edi");			# Ch(e,f,g) = (f^g)&e)^g
+
+	&mov	("esi",&DWP(0,$K512));
+	&mov	("edi",&DWP(4,$K512));		# K[i]
+	&add	("eax","ecx");
+	&adc	("ebx","edx");			# T1 += Ch(e,f,g)
+	&mov	("ecx",$Dlo);
+	&mov	("edx",$Dhi);
+	&add	("eax","esi");
+	&adc	("ebx","edi");			# T1 += K[i]
+	&mov	($Tlo,"eax");
+	&mov	($Thi,"ebx");			# put T1 away
+	&add	("eax","ecx");
+	&adc	("ebx","edx");			# d += T1
+
+	#define Sigma0(x)	(ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+	#	LO		lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+	#	HI		hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+	&mov	("ecx",$Alo);
+	&mov	("edx",$Ahi);
+	&mov	($Dlo,"eax");
+	&mov	($Dhi,"ebx");
+	&mov	("esi","ecx");
+
+	&shr	("ecx",2);	# lo>>2
+	&mov	("edi","edx");
+	&shr	("edx",2);	# hi>>2
+	&mov	("ebx","ecx");
+	&shl	("esi",4);	# lo<<4
+	&mov	("eax","edx");
+	&shl	("edi",4);	# hi<<4
+	&xor	("ebx","esi");
+
+	&shr	("ecx",7-2);	# lo>>7
+	&xor	("eax","edi");
+	&shr	("edx",7-2);	# hi>>7
+	&xor	("ebx","ecx");
+	&shl	("esi",25-4);	# lo<<25
+	&xor	("eax","edx");
+	&shl	("edi",25-4);	# hi<<25
+	&xor	("eax","esi");
+
+	&shr	("ecx",28-7);	# lo>>28
+	&xor	("ebx","edi");
+	&shr	("edx",28-7);	# hi>>28
+	&xor	("eax","ecx");
+	&shl	("esi",30-25);	# lo<<30
+	&xor	("ebx","edx");
+	&shl	("edi",30-25);	# hi<<30
+	&xor	("eax","esi");
+	&xor	("ebx","edi");			# Sigma0(a)
+
+	&mov	("ecx",$Alo);
+	&mov	("edx",$Ahi);
+	&mov	("esi",$Blo);
+	&mov	("edi",$Bhi);
+	&add	("eax",$Tlo);
+	&adc	("ebx",$Thi);			# T1 = Sigma0(a)+T1
+	&or	("ecx","esi");
+	&or	("edx","edi");
+	&and	("ecx",$Clo);
+	&and	("edx",$Chi);
+	&and	("esi",$Alo);
+	&and	("edi",$Ahi);
+	&or	("ecx","esi");
+	&or	("edx","edi");			# Maj(a,b,c) = ((a|b)&c)|(a&b)
+
+	&add	("eax","ecx");
+	&adc	("ebx","edx");			# T1 += Maj(a,b,c)
+	&mov	($Tlo,"eax");
+	&mov	($Thi,"ebx");
+
+	&mov	(&LB("edx"),&BP(0,$K512));	# pre-fetch LSB of *K
+	&sub	("esp",8);
+	&lea	($K512,&DWP(8,$K512));		# K++
+}
+
+
+&function_begin("sha512_block_data_order");
+	&mov	("esi",wparam(0));	# ctx
+	&mov	("edi",wparam(1));	# inp
+	&mov	("eax",wparam(2));	# num
+	&mov	("ebx","esp");		# saved sp
+
+	&call	(&label("pic_point"));	# make it PIC!
+&set_label("pic_point");
+	&blindpop($K512);
+	&lea	($K512,&DWP(&label("K512")."-".&label("pic_point"),$K512));
+
+	&sub	("esp",16);
+	&and	("esp",-64);
+
+	&shl	("eax",7);
+	&add	("eax","edi");
+	&mov	(&DWP(0,"esp"),"esi");	# ctx
+	&mov	(&DWP(4,"esp"),"edi");	# inp
+	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
+	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
+
+if ($sse2) {
+	&picmeup("edx","OPENSSL_ia32cap_P",$K512,&label("K512"));
+	&mov	("ecx",&DWP(0,"edx"));
+	&test	("ecx",1<<26);
+	&jz	(&label("loop_x86"));
+
+	&mov	("edx",&DWP(4,"edx"));
+
+	# load ctx->h[0-7]
+	&movq	($A,&QWP(0,"esi"));
+	 &and	("ecx",1<<24);		# XMM registers availability
+	&movq	("mm1",&QWP(8,"esi"));
+	 &and	("edx",1<<9);		# SSSE3 bit
+	&movq	($BxC,&QWP(16,"esi"));
+	 &or	("ecx","edx");
+	&movq	("mm3",&QWP(24,"esi"));
+	&movq	($E,&QWP(32,"esi"));
+	&movq	("mm5",&QWP(40,"esi"));
+	&movq	("mm6",&QWP(48,"esi"));
+	&movq	("mm7",&QWP(56,"esi"));
+	&cmp	("ecx",1<<24|1<<9);
+	&je	(&label("SSSE3"));
+	&sub	("esp",8*10);
+	&jmp	(&label("loop_sse2"));
+
+&set_label("loop_sse2",16);
+	#&movq	($Asse2,$A);
+	&movq	($Bsse2,"mm1");
+	&movq	($Csse2,$BxC);
+	&movq	($Dsse2,"mm3");
+	#&movq	($Esse2,$E);
+	&movq	($Fsse2,"mm5");
+	&movq	($Gsse2,"mm6");
+	&pxor	($BxC,"mm1");			# magic
+	&movq	($Hsse2,"mm7");
+	&movq	("mm3",$A);			# magic
+
+	&mov	("eax",&DWP(0,"edi"));
+	&mov	("ebx",&DWP(4,"edi"));
+	&add	("edi",8);
+	&mov	("edx",15);			# counter
+	&bswap	("eax");
+	&bswap	("ebx");
+	&jmp	(&label("00_14_sse2"));
+
+&set_label("00_14_sse2",16);
+	&movd	("mm1","eax");
+	&mov	("eax",&DWP(0,"edi"));
+	&movd	("mm7","ebx");
+	&mov	("ebx",&DWP(4,"edi"));
+	&add	("edi",8);
+	&bswap	("eax");
+	&bswap	("ebx");
+	&punpckldq("mm7","mm1");
+
+	&BODY_00_15_sse2();
+
+	&dec	("edx");
+	&jnz	(&label("00_14_sse2"));
+
+	&movd	("mm1","eax");
+	&movd	("mm7","ebx");
+	&punpckldq("mm7","mm1");
+
+	&BODY_00_15_sse2(1);
+
+	&pxor	($A,$A);			# A is in %mm3
+	&mov	("edx",32);			# counter
+	&jmp	(&label("16_79_sse2"));
+
+&set_label("16_79_sse2",16);
+    for ($j=0;$j<2;$j++) {			# 2x unroll
+	#&movq	("mm7",&QWP(8*(9+16-1),"esp"));	# prefetched in BODY_00_15 
+	&movq	("mm5",&QWP(8*(9+16-14),"esp"));
+	&movq	("mm1","mm7");
+	&psrlq	("mm7",1);
+	 &movq	("mm6","mm5");
+	 &psrlq	("mm5",6);
+	&psllq	("mm1",56);
+	 &paddq	($A,"mm3");			# from BODY_00_15
+	 &movq	("mm3","mm7");
+	&psrlq	("mm7",7-1);
+	 &pxor	("mm3","mm1");
+	 &psllq	("mm1",63-56);
+	&pxor	("mm3","mm7");
+	 &psrlq	("mm7",8-7);
+	&pxor	("mm3","mm1");
+	 &movq	("mm1","mm5");
+	 &psrlq	("mm5",19-6);
+	&pxor	("mm7","mm3");			# sigma0
+
+	 &psllq	("mm6",3);
+	 &pxor	("mm1","mm5");
+	&paddq	("mm7",&QWP(8*(9+16),"esp"));
+	 &pxor	("mm1","mm6");
+	 &psrlq	("mm5",61-19);
+	&paddq	("mm7",&QWP(8*(9+16-9),"esp"));
+	 &pxor	("mm1","mm5");
+	 &psllq	("mm6",45-3);
+	&movq	("mm5",$Fsse2);			# load f
+	 &pxor	("mm1","mm6");			# sigma1
+	&movq	("mm6",$Gsse2);			# load g
+
+	&paddq	("mm7","mm1");			# X[i]
+	#&movq	(&QWP(8*9,"esp"),"mm7");	# moved to BODY_00_15
+
+	&BODY_00_15_sse2(2);
+    }
+	&dec	("edx");
+	&jnz	(&label("16_79_sse2"));
+
+	#&movq	($A,$Asse2);
+	&paddq	($A,"mm3");			# from BODY_00_15
+	&movq	("mm1",$Bsse2);
+	#&movq	($BxC,$Csse2);
+	&movq	("mm3",$Dsse2);
+	#&movq	($E,$Esse2);
+	&movq	("mm5",$Fsse2);
+	&movq	("mm6",$Gsse2);
+	&movq	("mm7",$Hsse2);
+
+	&pxor	($BxC,"mm1");			# de-magic
+	&paddq	($A,&QWP(0,"esi"));
+	&paddq	("mm1",&QWP(8,"esi"));
+	&paddq	($BxC,&QWP(16,"esi"));
+	&paddq	("mm3",&QWP(24,"esi"));
+	&paddq	($E,&QWP(32,"esi"));
+	&paddq	("mm5",&QWP(40,"esi"));
+	&paddq	("mm6",&QWP(48,"esi"));
+	&paddq	("mm7",&QWP(56,"esi"));
+
+	&mov	("eax",8*80);
+	&movq	(&QWP(0,"esi"),$A);
+	&movq	(&QWP(8,"esi"),"mm1");
+	&movq	(&QWP(16,"esi"),$BxC);
+	&movq	(&QWP(24,"esi"),"mm3");
+	&movq	(&QWP(32,"esi"),$E);
+	&movq	(&QWP(40,"esi"),"mm5");
+	&movq	(&QWP(48,"esi"),"mm6");
+	&movq	(&QWP(56,"esi"),"mm7");
+
+	&lea	("esp",&DWP(0,"esp","eax"));	# destroy frame
+	&sub	($K512,"eax");			# rewind K
+
+	&cmp	("edi",&DWP(8*10+8,"esp"));	# are we done yet?
+	&jb	(&label("loop_sse2"));
+
+	&mov	("esp",&DWP(8*10+12,"esp"));	# restore sp
+	&emms	();
+&function_end_A();
+
+&set_label("SSSE3",32);
+{ my ($cnt,$frame)=("ecx","edx");
+  my @X=map("xmm$_",(0..7));
+  my $j;
+  my $i=0;
+
+	&lea	($frame,&DWP(-64,"esp"));
+	&sub	("esp",256);
+
+	# fixed stack frame layout
+	#
+	# +0	A B C D E F G H		# backing store
+	# +64	X[0]+K[i] .. X[15]+K[i]	# XMM->MM xfer area
+	# +192				# XMM off-load ring buffer
+	# +256				# saved parameters
+
+	&movdqa		(@X[1],&QWP(80*8,$K512));		# byte swap mask
+	&movdqu		(@X[0],&QWP(0,"edi"));
+	&pshufb		(@X[0],@X[1]);
+    for ($j=0;$j<8;$j++) {
+	&movdqa		(&QWP(16*(($j-1)%4),$frame),@X[3])	if ($j>4); # off-load
+	&movdqa		(@X[3],&QWP(16*($j%8),$K512));
+	&movdqa		(@X[2],@X[1])				if ($j<7); # perpetuate byte swap mask
+	&movdqu		(@X[1],&QWP(16*($j+1),"edi"))		if ($j<7); # next input
+	&movdqa		(@X[1],&QWP(16*(($j+1)%4),$frame))	if ($j==7);# restore @X[0]
+	&paddq		(@X[3],@X[0]);
+	&pshufb		(@X[1],@X[2])				if ($j<7);
+	&movdqa		(&QWP(16*($j%8)-128,$frame),@X[3]);	# xfer X[i]+K[i]
+
+	push(@X,shift(@X));					# rotate(@X)
+    }
+	#&jmp		(&label("loop_ssse3"));
+	&nop		();
+
+&set_label("loop_ssse3",32);
+	&movdqa		(@X[2],&QWP(16*(($j+1)%4),$frame));	# pre-restore @X[1]
+	&movdqa		(&QWP(16*(($j-1)%4),$frame),@X[3]);	# off-load @X[3]
+	&lea		($K512,&DWP(16*8,$K512));
+
+	#&movq	($Asse2,$A);			# off-load A-H
+	&movq	($Bsse2,"mm1");
+	 &mov	("ebx","edi");
+	&movq	($Csse2,$BxC);
+	 &lea	("edi",&DWP(128,"edi"));	# advance input
+	&movq	($Dsse2,"mm3");
+	 &cmp	("edi","eax");
+	#&movq	($Esse2,$E);
+	&movq	($Fsse2,"mm5");
+	 &cmovb	("ebx","edi");
+	&movq	($Gsse2,"mm6");
+	 &mov	("ecx",4);			# loop counter
+	&pxor	($BxC,"mm1");			# magic
+	&movq	($Hsse2,"mm7");
+	&pxor	("mm3","mm3");			# magic
+
+	&jmp		(&label("00_47_ssse3"));
+
+sub BODY_00_15_ssse3 {		# "phase-less" copy of BODY_00_15_sse2
+	(
+	'&movq	("mm1",$E)',				# %mm1 is sliding right
+	'&movq	("mm7",&QWP(((-8*$i)%128)-128,$frame))',# X[i]+K[i]
+	 '&pxor	("mm5","mm6")',				# f^=g
+	'&psrlq	("mm1",14)',
+	 '&movq	(&QWP(8*($i+4)%64,"esp"),$E)',		# modulo-scheduled save e
+	 '&pand	("mm5",$E)',				# f&=e
+	'&psllq	($E,23)',				# $E is sliding left
+	'&paddq	($A,"mm3")',				# [h+=Maj(a,b,c)]
+	'&movq	("mm3","mm1")',				# %mm3 is T1
+	 '&psrlq("mm1",4)',
+	 '&pxor	("mm5","mm6")',				# Ch(e,f,g)
+	'&pxor	("mm3",$E)',
+	 '&psllq($E,23)',
+	'&pxor	("mm3","mm1")',
+	 '&movq	(&QWP(8*$i%64,"esp"),$A)',		# modulo-scheduled save a
+	 '&paddq("mm7","mm5")',				# X[i]+=Ch(e,f,g)
+	'&pxor	("mm3",$E)',
+	 '&psrlq("mm1",23)',
+	 '&paddq("mm7",&QWP(8*($i+7)%64,"esp"))',	# X[i]+=h
+	'&pxor	("mm3","mm1")',
+	 '&psllq($E,4)',
+	'&pxor	("mm3",$E)',				# T1=Sigma1_512(e)
+
+	 '&movq	($E,&QWP(8*($i+3)%64,"esp"))',		# e = load d, e in next round
+	'&paddq	("mm3","mm7")',				# T1+=X[i]
+	 '&movq	("mm5",$A)',				# %mm5 is sliding right
+	 '&psrlq("mm5",28)',
+	'&paddq	($E,"mm3")',				# d += T1
+	 '&movq	("mm6",$A)',				# %mm6 is sliding left
+	 '&movq	("mm7","mm5")',
+	 '&psllq("mm6",25)',
+	'&movq	("mm1",&QWP(8*($i+1)%64,"esp"))',	# load b
+	 '&psrlq("mm5",6)',
+	 '&pxor	("mm7","mm6")',
+	 '&psllq("mm6",5)',
+	 '&pxor	("mm7","mm5")',
+	'&pxor	($A,"mm1")',				# a^b, b^c in next round
+	 '&psrlq("mm5",5)',
+	 '&pxor	("mm7","mm6")',
+	'&pand	($BxC,$A)',				# (b^c)&(a^b)
+	 '&psllq("mm6",6)',
+	 '&pxor	("mm7","mm5")',
+	'&pxor	($BxC,"mm1")',				# [h=]Maj(a,b,c)
+	 '&pxor	("mm6","mm7")',				# Sigma0_512(a)
+	 '&movq	("mm5",&QWP(8*($i+5-1)%64,"esp"))',	# pre-load f
+	'&paddq	($BxC,"mm6")',				# h+=Sigma0(a)
+	 '&movq	("mm6",&QWP(8*($i+6-1)%64,"esp"))',	# pre-load g
+
+	'($A,$BxC) = ($BxC,$A); $i--;'
+	);
+}
+
+&set_label("00_47_ssse3",32);
+
+    for(;$j<16;$j++) {
+	my ($t0,$t2,$t1)=@X[2..4];
+	my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
+
+	&movdqa		($t2,@X[5]);
+	&movdqa		(@X[1],$t0);			# restore @X[1]
+	&palignr	($t0,@X[0],8);			# X[1..2]
+	&movdqa		(&QWP(16*($j%4),$frame),@X[4]);	# off-load @X[4]
+	 &palignr	($t2,@X[4],8);			# X[9..10]
+
+	&movdqa		($t1,$t0);
+	&psrlq		($t0,7);
+	 &paddq		(@X[0],$t2);			# X[0..1] += X[9..10]
+	&movdqa		($t2,$t1);
+	&psrlq		($t1,1);
+	&psllq		($t2,64-8);
+	&pxor		($t0,$t1);
+	&psrlq		($t1,8-1);
+	&pxor		($t0,$t2);
+	&psllq		($t2,8-1);
+	&pxor		($t0,$t1);
+	 &movdqa	($t1,@X[7]);
+	&pxor		($t0,$t2);			# sigma0(X[1..2])
+	 &movdqa	($t2,@X[7]);
+	 &psrlq		($t1,6);
+	&paddq		(@X[0],$t0);			# X[0..1] += sigma0(X[1..2])
+
+	&movdqa		($t0,@X[7]);
+	&psrlq		($t2,19);
+	&psllq		($t0,64-61);
+	&pxor		($t1,$t2);
+	&psrlq		($t2,61-19);
+	&pxor		($t1,$t0);
+	&psllq		($t0,61-19);
+	&pxor		($t1,$t2);
+	&movdqa		($t2,&QWP(16*(($j+2)%4),$frame));# pre-restore @X[1]
+	&pxor		($t1,$t0);			# sigma0(X[1..2])
+	&movdqa		($t0,&QWP(16*($j%8),$K512));
+	 eval(shift(@insns));
+	&paddq		(@X[0],$t1);			# X[0..1] += sigma0(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&paddq		($t0,@X[0]);
+	 foreach(@insns) { eval; }
+	&movdqa		(&QWP(16*($j%8)-128,$frame),$t0);# xfer X[i]+K[i]
+
+	push(@X,shift(@X));				# rotate(@X)
+    }
+	&lea		($K512,&DWP(16*8,$K512));
+	&dec		("ecx");
+	&jnz		(&label("00_47_ssse3"));
+
+	&movdqa		(@X[1],&QWP(0,$K512));		# byte swap mask
+	&lea		($K512,&DWP(-80*8,$K512));	# rewind
+	&movdqu		(@X[0],&QWP(0,"ebx"));
+	&pshufb		(@X[0],@X[1]);
+
+    for ($j=0;$j<8;$j++) {	# load next or same block
+	my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
+
+	&movdqa		(&QWP(16*(($j-1)%4),$frame),@X[3])	if ($j>4); # off-load
+	&movdqa		(@X[3],&QWP(16*($j%8),$K512));
+	&movdqa		(@X[2],@X[1])				if ($j<7); # perpetuate byte swap mask
+	&movdqu		(@X[1],&QWP(16*($j+1),"ebx"))		if ($j<7); # next input
+	&movdqa		(@X[1],&QWP(16*(($j+1)%4),$frame))	if ($j==7);# restore @X[0]
+	&paddq		(@X[3],@X[0]);
+	&pshufb		(@X[1],@X[2])				if ($j<7);
+	 foreach(@insns) { eval; }
+	&movdqa		(&QWP(16*($j%8)-128,$frame),@X[3]);# xfer X[i]+K[i]
+
+	push(@X,shift(@X));				# rotate(@X)
+    }
+
+	#&movq	($A,$Asse2);			# load A-H
+	&movq	("mm1",$Bsse2);
+	&paddq	($A,"mm3");			# from BODY_00_15
+	#&movq	($BxC,$Csse2);
+	&movq	("mm3",$Dsse2);
+	#&movq	($E,$Esse2);
+	#&movq	("mm5",$Fsse2);
+	#&movq	("mm6",$Gsse2);
+	&movq	("mm7",$Hsse2);
+
+	&pxor	($BxC,"mm1");			# de-magic
+	&paddq	($A,&QWP(0,"esi"));
+	&paddq	("mm1",&QWP(8,"esi"));
+	&paddq	($BxC,&QWP(16,"esi"));
+	&paddq	("mm3",&QWP(24,"esi"));
+	&paddq	($E,&QWP(32,"esi"));
+	&paddq	("mm5",&QWP(40,"esi"));
+	&paddq	("mm6",&QWP(48,"esi"));
+	&paddq	("mm7",&QWP(56,"esi"));
+
+	&movq	(&QWP(0,"esi"),$A);
+	&movq	(&QWP(8,"esi"),"mm1");
+	&movq	(&QWP(16,"esi"),$BxC);
+	&movq	(&QWP(24,"esi"),"mm3");
+	&movq	(&QWP(32,"esi"),$E);
+	&movq	(&QWP(40,"esi"),"mm5");
+	&movq	(&QWP(48,"esi"),"mm6");
+	&movq	(&QWP(56,"esi"),"mm7");
+
+    	&cmp	("edi","eax")			# are we done yet?
+	&jb	(&label("loop_ssse3"));
+
+	&mov	("esp",&DWP(64+12,$frame));	# restore sp
+	&emms	();
+}
+&function_end_A();
+}
+&set_label("loop_x86",16);
+    # copy input block to stack reversing byte and qword order
+    for ($i=0;$i<8;$i++) {
+	&mov	("eax",&DWP($i*16+0,"edi"));
+	&mov	("ebx",&DWP($i*16+4,"edi"));
+	&mov	("ecx",&DWP($i*16+8,"edi"));
+	&mov	("edx",&DWP($i*16+12,"edi"));
+	&bswap	("eax");
+	&bswap	("ebx");
+	&bswap	("ecx");
+	&bswap	("edx");
+	&push	("eax");
+	&push	("ebx");
+	&push	("ecx");
+	&push	("edx");
+    }
+	&add	("edi",128);
+	&sub	("esp",9*8);		# place for T,A,B,C,D,E,F,G,H
+	&mov	(&DWP(8*(9+16)+4,"esp"),"edi");
+
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&lea	("edi",&DWP(8,"esp"));
+	&mov	("ecx",16);
+	&data_word(0xA5F3F689);		# rep movsd
+
+&set_label("00_15_x86",16);
+	&BODY_00_15_x86();
+
+	&cmp	(&LB("edx"),0x94);
+	&jne	(&label("00_15_x86"));
+
+&set_label("16_79_x86",16);
+	#define sigma0(x)	(ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
+	#	LO		lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+	#	HI		hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
+	&mov	("ecx",&DWP(8*(9+15+16-1)+0,"esp"));
+	&mov	("edx",&DWP(8*(9+15+16-1)+4,"esp"));
+	&mov	("esi","ecx");
+
+	&shr	("ecx",1);	# lo>>1
+	&mov	("edi","edx");
+	&shr	("edx",1);	# hi>>1
+	&mov	("eax","ecx");
+	&shl	("esi",24);	# lo<<24
+	&mov	("ebx","edx");
+	&shl	("edi",24);	# hi<<24
+	&xor	("ebx","esi");
+
+	&shr	("ecx",7-1);	# lo>>7
+	&xor	("eax","edi");
+	&shr	("edx",7-1);	# hi>>7
+	&xor	("eax","ecx");
+	&shl	("esi",31-24);	# lo<<31
+	&xor	("ebx","edx");
+	&shl	("edi",25-24);	# hi<<25
+	&xor	("ebx","esi");
+
+	&shr	("ecx",8-7);	# lo>>8
+	&xor	("eax","edi");
+	&shr	("edx",8-7);	# hi>>8
+	&xor	("eax","ecx");
+	&shl	("edi",31-25);	# hi<<31
+	&xor	("ebx","edx");
+	&xor	("eax","edi");			# T1 = sigma0(X[-15])
+
+	&mov	(&DWP(0,"esp"),"eax");
+	&mov	(&DWP(4,"esp"),"ebx");		# put T1 away
+
+	#define sigma1(x)	(ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+	#	LO		lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+	#	HI		hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+	&mov	("ecx",&DWP(8*(9+15+16-14)+0,"esp"));
+	&mov	("edx",&DWP(8*(9+15+16-14)+4,"esp"));
+	&mov	("esi","ecx");
+
+	&shr	("ecx",6);	# lo>>6
+	&mov	("edi","edx");
+	&shr	("edx",6);	# hi>>6
+	&mov	("eax","ecx");
+	&shl	("esi",3);	# lo<<3
+	&mov	("ebx","edx");
+	&shl	("edi",3);	# hi<<3
+	&xor	("eax","esi");
+
+	&shr	("ecx",19-6);	# lo>>19
+	&xor	("ebx","edi");
+	&shr	("edx",19-6);	# hi>>19
+	&xor	("eax","ecx");
+	&shl	("esi",13-3);	# lo<<13
+	&xor	("ebx","edx");
+	&shl	("edi",13-3);	# hi<<13
+	&xor	("ebx","esi");
+
+	&shr	("ecx",29-19);	# lo>>29
+	&xor	("eax","edi");
+	&shr	("edx",29-19);	# hi>>29
+	&xor	("ebx","ecx");
+	&shl	("edi",26-13);	# hi<<26
+	&xor	("eax","edx");
+	&xor	("eax","edi");			# sigma1(X[-2])
+
+	&mov	("ecx",&DWP(8*(9+15+16)+0,"esp"));
+	&mov	("edx",&DWP(8*(9+15+16)+4,"esp"));
+	&add	("eax",&DWP(0,"esp"));
+	&adc	("ebx",&DWP(4,"esp"));		# T1 = sigma1(X[-2])+T1
+	&mov	("esi",&DWP(8*(9+15+16-9)+0,"esp"));
+	&mov	("edi",&DWP(8*(9+15+16-9)+4,"esp"));
+	&add	("eax","ecx");
+	&adc	("ebx","edx");			# T1 += X[-16]
+	&add	("eax","esi");
+	&adc	("ebx","edi");			# T1 += X[-7]
+	&mov	(&DWP(8*(9+15)+0,"esp"),"eax");
+	&mov	(&DWP(8*(9+15)+4,"esp"),"ebx");	# save X[0]
+
+	&BODY_00_15_x86();
+
+	&cmp	(&LB("edx"),0x17);
+	&jne	(&label("16_79_x86"));
+
+	&mov	("esi",&DWP(8*(9+16+80)+0,"esp"));# ctx
+	&mov	("edi",&DWP(8*(9+16+80)+4,"esp"));# inp
+    for($i=0;$i<4;$i++) {
+	&mov	("eax",&DWP($i*16+0,"esi"));
+	&mov	("ebx",&DWP($i*16+4,"esi"));
+	&mov	("ecx",&DWP($i*16+8,"esi"));
+	&mov	("edx",&DWP($i*16+12,"esi"));
+	&add	("eax",&DWP(8+($i*16)+0,"esp"));
+	&adc	("ebx",&DWP(8+($i*16)+4,"esp"));
+	&mov	(&DWP($i*16+0,"esi"),"eax");
+	&mov	(&DWP($i*16+4,"esi"),"ebx");
+	&add	("ecx",&DWP(8+($i*16)+8,"esp"));
+	&adc	("edx",&DWP(8+($i*16)+12,"esp"));
+	&mov	(&DWP($i*16+8,"esi"),"ecx");
+	&mov	(&DWP($i*16+12,"esi"),"edx");
+    }
+	&add	("esp",8*(9+16+80));		# destroy frame
+	&sub	($K512,8*80);			# rewind K
+
+	&cmp	("edi",&DWP(8,"esp"));		# are we done yet?
+	&jb	(&label("loop_x86"));
+
+	&mov	("esp",&DWP(12,"esp"));		# restore sp
+&function_end_A();
+
+&set_label("K512",64);	# Yes! I keep it in the code segment!
+	&data_word(0xd728ae22,0x428a2f98);	# u64
+	&data_word(0x23ef65cd,0x71374491);	# u64
+	&data_word(0xec4d3b2f,0xb5c0fbcf);	# u64
+	&data_word(0x8189dbbc,0xe9b5dba5);	# u64
+	&data_word(0xf348b538,0x3956c25b);	# u64
+	&data_word(0xb605d019,0x59f111f1);	# u64
+	&data_word(0xaf194f9b,0x923f82a4);	# u64
+	&data_word(0xda6d8118,0xab1c5ed5);	# u64
+	&data_word(0xa3030242,0xd807aa98);	# u64
+	&data_word(0x45706fbe,0x12835b01);	# u64
+	&data_word(0x4ee4b28c,0x243185be);	# u64
+	&data_word(0xd5ffb4e2,0x550c7dc3);	# u64
+	&data_word(0xf27b896f,0x72be5d74);	# u64
+	&data_word(0x3b1696b1,0x80deb1fe);	# u64
+	&data_word(0x25c71235,0x9bdc06a7);	# u64
+	&data_word(0xcf692694,0xc19bf174);	# u64
+	&data_word(0x9ef14ad2,0xe49b69c1);	# u64
+	&data_word(0x384f25e3,0xefbe4786);	# u64
+	&data_word(0x8b8cd5b5,0x0fc19dc6);	# u64
+	&data_word(0x77ac9c65,0x240ca1cc);	# u64
+	&data_word(0x592b0275,0x2de92c6f);	# u64
+	&data_word(0x6ea6e483,0x4a7484aa);	# u64
+	&data_word(0xbd41fbd4,0x5cb0a9dc);	# u64
+	&data_word(0x831153b5,0x76f988da);	# u64
+	&data_word(0xee66dfab,0x983e5152);	# u64
+	&data_word(0x2db43210,0xa831c66d);	# u64
+	&data_word(0x98fb213f,0xb00327c8);	# u64
+	&data_word(0xbeef0ee4,0xbf597fc7);	# u64
+	&data_word(0x3da88fc2,0xc6e00bf3);	# u64
+	&data_word(0x930aa725,0xd5a79147);	# u64
+	&data_word(0xe003826f,0x06ca6351);	# u64
+	&data_word(0x0a0e6e70,0x14292967);	# u64
+	&data_word(0x46d22ffc,0x27b70a85);	# u64
+	&data_word(0x5c26c926,0x2e1b2138);	# u64
+	&data_word(0x5ac42aed,0x4d2c6dfc);	# u64
+	&data_word(0x9d95b3df,0x53380d13);	# u64
+	&data_word(0x8baf63de,0x650a7354);	# u64
+	&data_word(0x3c77b2a8,0x766a0abb);	# u64
+	&data_word(0x47edaee6,0x81c2c92e);	# u64
+	&data_word(0x1482353b,0x92722c85);	# u64
+	&data_word(0x4cf10364,0xa2bfe8a1);	# u64
+	&data_word(0xbc423001,0xa81a664b);	# u64
+	&data_word(0xd0f89791,0xc24b8b70);	# u64
+	&data_word(0x0654be30,0xc76c51a3);	# u64
+	&data_word(0xd6ef5218,0xd192e819);	# u64
+	&data_word(0x5565a910,0xd6990624);	# u64
+	&data_word(0x5771202a,0xf40e3585);	# u64
+	&data_word(0x32bbd1b8,0x106aa070);	# u64
+	&data_word(0xb8d2d0c8,0x19a4c116);	# u64
+	&data_word(0x5141ab53,0x1e376c08);	# u64
+	&data_word(0xdf8eeb99,0x2748774c);	# u64
+	&data_word(0xe19b48a8,0x34b0bcb5);	# u64
+	&data_word(0xc5c95a63,0x391c0cb3);	# u64
+	&data_word(0xe3418acb,0x4ed8aa4a);	# u64
+	&data_word(0x7763e373,0x5b9cca4f);	# u64
+	&data_word(0xd6b2b8a3,0x682e6ff3);	# u64
+	&data_word(0x5defb2fc,0x748f82ee);	# u64
+	&data_word(0x43172f60,0x78a5636f);	# u64
+	&data_word(0xa1f0ab72,0x84c87814);	# u64
+	&data_word(0x1a6439ec,0x8cc70208);	# u64
+	&data_word(0x23631e28,0x90befffa);	# u64
+	&data_word(0xde82bde9,0xa4506ceb);	# u64
+	&data_word(0xb2c67915,0xbef9a3f7);	# u64
+	&data_word(0xe372532b,0xc67178f2);	# u64
+	&data_word(0xea26619c,0xca273ece);	# u64
+	&data_word(0x21c0c207,0xd186b8c7);	# u64
+	&data_word(0xcde0eb1e,0xeada7dd6);	# u64
+	&data_word(0xee6ed178,0xf57d4f7f);	# u64
+	&data_word(0x72176fba,0x06f067aa);	# u64
+	&data_word(0xa2c898a6,0x0a637dc5);	# u64
+	&data_word(0xbef90dae,0x113f9804);	# u64
+	&data_word(0x131c471b,0x1b710b35);	# u64
+	&data_word(0x23047d84,0x28db77f5);	# u64
+	&data_word(0x40c72493,0x32caab7b);	# u64
+	&data_word(0x15c9bebc,0x3c9ebe0a);	# u64
+	&data_word(0x9c100d4c,0x431d67c4);	# u64
+	&data_word(0xcb3e42b6,0x4cc5d4be);	# u64
+	&data_word(0xfc657e2a,0x597f299c);	# u64
+	&data_word(0x3ad6faec,0x5fcb6fab);	# u64
+	&data_word(0x4a475817,0x6c44198c);	# u64
+
+	&data_word(0x04050607,0x00010203);	# byte swap
+	&data_word(0x0c0d0e0f,0x08090a0b);	# mask
+&function_end_B("sha512_block_data_order");
+&asciz("SHA512 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/crypto/sha/asm/sha512-armv4.pl b/crypto/sha/asm/sha512-armv4.pl
new file mode 100644
index 0000000..ae2cbfc
--- /dev/null
+++ b/crypto/sha/asm/sha512-armv4.pl
@@ -0,0 +1,604 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# SHA512 block procedure for ARMv4. September 2007.
+
+# This code is ~4.5 (four and a half) times faster than code generated
+# by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
+# Xscale PXA250 core].
+#
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 6% improvement on
+# Cortex A8 core and ~40 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 7%
+# improvement on Coxtex A8 core and ~38 cycles per byte.
+
+# March 2011.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process
+# one byte in 23.3 cycles or ~60% faster than integer-only code.
+
+# August 2012.
+#
+# Improve NEON performance by 12% on Snapdragon S4. In absolute
+# terms it's 22.6 cycles per byte, which is disappointing result.
+# Technical writers asserted that 3-way S4 pipeline can sustain
+# multiple NEON instructions per cycle, but dual NEON issue could
+# not be observed, and for NEON-only sequences IPC(*) was found to
+# be limited by 1:-( 0.33 and 0.66 were measured for sequences with
+# ILPs(*) of 1 and 2 respectively. This in turn means that you can
+# even find yourself striving, as I did here, for achieving IPC
+# adequate to one delivered by Cortex A8 [for reference, it's
+# 0.5 for ILP of 1, and 1 for higher ILPs].
+#
+# (*) ILP, instruction-level parallelism, how many instructions
+#     *can* execute at the same time. IPC, instructions per cycle,
+#     indicates how many instructions actually execute.
+
+# Byte order [in]dependence. =========================================
+#
+# Originally caller was expected to maintain specific *dword* order in
+# h[0-7], namely with most significant dword at *lower* address, which
+# was reflected in below two parameters as 0 and 4. Now caller is
+# expected to maintain native byte order for whole 64-bit values.
+$hi="HI";
+$lo="LO";
+# ====================================================================
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$ctx="r0";	# parameter block
+$inp="r1";
+$len="r2";
+
+$Tlo="r3";
+$Thi="r4";
+$Alo="r5";
+$Ahi="r6";
+$Elo="r7";
+$Ehi="r8";
+$t0="r9";
+$t1="r10";
+$t2="r11";
+$t3="r12";
+############	r13 is stack pointer
+$Ktbl="r14";
+############	r15 is program counter
+
+$Aoff=8*0;
+$Boff=8*1;
+$Coff=8*2;
+$Doff=8*3;
+$Eoff=8*4;
+$Foff=8*5;
+$Goff=8*6;
+$Hoff=8*7;
+$Xoff=8*8;
+
+sub BODY_00_15() {
+my $magic = shift;
+$code.=<<___;
+	@ Sigma1(x)	(ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
+	@ LO		lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+	@ HI		hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+	mov	$t0,$Elo,lsr#14
+	str	$Tlo,[sp,#$Xoff+0]
+	mov	$t1,$Ehi,lsr#14
+	str	$Thi,[sp,#$Xoff+4]
+	eor	$t0,$t0,$Ehi,lsl#18
+	ldr	$t2,[sp,#$Hoff+0]	@ h.lo
+	eor	$t1,$t1,$Elo,lsl#18
+	ldr	$t3,[sp,#$Hoff+4]	@ h.hi
+	eor	$t0,$t0,$Elo,lsr#18
+	eor	$t1,$t1,$Ehi,lsr#18
+	eor	$t0,$t0,$Ehi,lsl#14
+	eor	$t1,$t1,$Elo,lsl#14
+	eor	$t0,$t0,$Ehi,lsr#9
+	eor	$t1,$t1,$Elo,lsr#9
+	eor	$t0,$t0,$Elo,lsl#23
+	eor	$t1,$t1,$Ehi,lsl#23	@ Sigma1(e)
+	adds	$Tlo,$Tlo,$t0
+	ldr	$t0,[sp,#$Foff+0]	@ f.lo
+	adc	$Thi,$Thi,$t1		@ T += Sigma1(e)
+	ldr	$t1,[sp,#$Foff+4]	@ f.hi
+	adds	$Tlo,$Tlo,$t2
+	ldr	$t2,[sp,#$Goff+0]	@ g.lo
+	adc	$Thi,$Thi,$t3		@ T += h
+	ldr	$t3,[sp,#$Goff+4]	@ g.hi
+
+	eor	$t0,$t0,$t2
+	str	$Elo,[sp,#$Eoff+0]
+	eor	$t1,$t1,$t3
+	str	$Ehi,[sp,#$Eoff+4]
+	and	$t0,$t0,$Elo
+	str	$Alo,[sp,#$Aoff+0]
+	and	$t1,$t1,$Ehi
+	str	$Ahi,[sp,#$Aoff+4]
+	eor	$t0,$t0,$t2
+	ldr	$t2,[$Ktbl,#$lo]	@ K[i].lo
+	eor	$t1,$t1,$t3		@ Ch(e,f,g)
+	ldr	$t3,[$Ktbl,#$hi]	@ K[i].hi
+
+	adds	$Tlo,$Tlo,$t0
+	ldr	$Elo,[sp,#$Doff+0]	@ d.lo
+	adc	$Thi,$Thi,$t1		@ T += Ch(e,f,g)
+	ldr	$Ehi,[sp,#$Doff+4]	@ d.hi
+	adds	$Tlo,$Tlo,$t2
+	and	$t0,$t2,#0xff
+	adc	$Thi,$Thi,$t3		@ T += K[i]
+	adds	$Elo,$Elo,$Tlo
+	ldr	$t2,[sp,#$Boff+0]	@ b.lo
+	adc	$Ehi,$Ehi,$Thi		@ d += T
+	teq	$t0,#$magic
+
+	ldr	$t3,[sp,#$Coff+0]	@ c.lo
+	orreq	$Ktbl,$Ktbl,#1
+	@ Sigma0(x)	(ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+	@ LO		lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+	@ HI		hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+	mov	$t0,$Alo,lsr#28
+	mov	$t1,$Ahi,lsr#28
+	eor	$t0,$t0,$Ahi,lsl#4
+	eor	$t1,$t1,$Alo,lsl#4
+	eor	$t0,$t0,$Ahi,lsr#2
+	eor	$t1,$t1,$Alo,lsr#2
+	eor	$t0,$t0,$Alo,lsl#30
+	eor	$t1,$t1,$Ahi,lsl#30
+	eor	$t0,$t0,$Ahi,lsr#7
+	eor	$t1,$t1,$Alo,lsr#7
+	eor	$t0,$t0,$Alo,lsl#25
+	eor	$t1,$t1,$Ahi,lsl#25	@ Sigma0(a)
+	adds	$Tlo,$Tlo,$t0
+	and	$t0,$Alo,$t2
+	adc	$Thi,$Thi,$t1		@ T += Sigma0(a)
+
+	ldr	$t1,[sp,#$Boff+4]	@ b.hi
+	orr	$Alo,$Alo,$t2
+	ldr	$t2,[sp,#$Coff+4]	@ c.hi
+	and	$Alo,$Alo,$t3
+	and	$t3,$Ahi,$t1
+	orr	$Ahi,$Ahi,$t1
+	orr	$Alo,$Alo,$t0		@ Maj(a,b,c).lo
+	and	$Ahi,$Ahi,$t2
+	adds	$Alo,$Alo,$Tlo
+	orr	$Ahi,$Ahi,$t3		@ Maj(a,b,c).hi
+	sub	sp,sp,#8
+	adc	$Ahi,$Ahi,$Thi		@ h += T
+	tst	$Ktbl,#1
+	add	$Ktbl,$Ktbl,#8
+___
+}
+$code=<<___;
+#if defined(__arm__)
+#include "arm_arch.h"
+#ifdef __ARMEL__
+# define LO 0
+# define HI 4
+# define WORD64(hi0,lo0,hi1,lo1)	.word	lo0,hi0, lo1,hi1
+#else
+# define HI 0
+# define LO 4
+# define WORD64(hi0,lo0,hi1,lo1)	.word	hi0,lo0, hi1,lo1
+#endif
+
+.text
+.code	32
+.type	K512,%object
+.align	5
+K512:
+WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
+WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
+WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
+WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
+WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
+WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
+WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
+WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
+WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
+WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
+WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
+WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
+WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
+WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
+WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
+WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
+WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
+WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
+WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
+WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
+WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
+WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
+WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
+WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
+WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
+WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
+WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
+WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
+WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
+WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
+WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
+WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
+WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
+WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
+WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
+WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
+WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
+WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
+WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
+WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+.size	K512,.-K512
+.LOPENSSL_armcap:
+.word	OPENSSL_armcap_P-sha512_block_data_order
+.skip	32-4
+
+.global	sha512_block_data_order
+.type	sha512_block_data_order,%function
+sha512_block_data_order:
+	sub	r3,pc,#8		@ sha512_block_data_order
+	add	$len,$inp,$len,lsl#7	@ len to point at the end of inp
+#if __ARM_ARCH__>=7
+	ldr	r12,.LOPENSSL_armcap
+	ldr	r12,[r3,r12]		@ OPENSSL_armcap_P
+	tst	r12,#1
+	bne	.LNEON
+#endif
+	stmdb	sp!,{r4-r12,lr}
+	sub	$Ktbl,r3,#672		@ K512
+	sub	sp,sp,#9*8
+
+	ldr	$Elo,[$ctx,#$Eoff+$lo]
+	ldr	$Ehi,[$ctx,#$Eoff+$hi]
+	ldr	$t0, [$ctx,#$Goff+$lo]
+	ldr	$t1, [$ctx,#$Goff+$hi]
+	ldr	$t2, [$ctx,#$Hoff+$lo]
+	ldr	$t3, [$ctx,#$Hoff+$hi]
+.Loop:
+	str	$t0, [sp,#$Goff+0]
+	str	$t1, [sp,#$Goff+4]
+	str	$t2, [sp,#$Hoff+0]
+	str	$t3, [sp,#$Hoff+4]
+	ldr	$Alo,[$ctx,#$Aoff+$lo]
+	ldr	$Ahi,[$ctx,#$Aoff+$hi]
+	ldr	$Tlo,[$ctx,#$Boff+$lo]
+	ldr	$Thi,[$ctx,#$Boff+$hi]
+	ldr	$t0, [$ctx,#$Coff+$lo]
+	ldr	$t1, [$ctx,#$Coff+$hi]
+	ldr	$t2, [$ctx,#$Doff+$lo]
+	ldr	$t3, [$ctx,#$Doff+$hi]
+	str	$Tlo,[sp,#$Boff+0]
+	str	$Thi,[sp,#$Boff+4]
+	str	$t0, [sp,#$Coff+0]
+	str	$t1, [sp,#$Coff+4]
+	str	$t2, [sp,#$Doff+0]
+	str	$t3, [sp,#$Doff+4]
+	ldr	$Tlo,[$ctx,#$Foff+$lo]
+	ldr	$Thi,[$ctx,#$Foff+$hi]
+	str	$Tlo,[sp,#$Foff+0]
+	str	$Thi,[sp,#$Foff+4]
+
+.L00_15:
+#if __ARM_ARCH__<7
+	ldrb	$Tlo,[$inp,#7]
+	ldrb	$t0, [$inp,#6]
+	ldrb	$t1, [$inp,#5]
+	ldrb	$t2, [$inp,#4]
+	ldrb	$Thi,[$inp,#3]
+	ldrb	$t3, [$inp,#2]
+	orr	$Tlo,$Tlo,$t0,lsl#8
+	ldrb	$t0, [$inp,#1]
+	orr	$Tlo,$Tlo,$t1,lsl#16
+	ldrb	$t1, [$inp],#8
+	orr	$Tlo,$Tlo,$t2,lsl#24
+	orr	$Thi,$Thi,$t3,lsl#8
+	orr	$Thi,$Thi,$t0,lsl#16
+	orr	$Thi,$Thi,$t1,lsl#24
+#else
+	ldr	$Tlo,[$inp,#4]
+	ldr	$Thi,[$inp],#8
+#ifdef __ARMEL__
+	rev	$Tlo,$Tlo
+	rev	$Thi,$Thi
+#endif
+#endif
+___
+	&BODY_00_15(0x94);
+$code.=<<___;
+	tst	$Ktbl,#1
+	beq	.L00_15
+	ldr	$t0,[sp,#`$Xoff+8*(16-1)`+0]
+	ldr	$t1,[sp,#`$Xoff+8*(16-1)`+4]
+	bic	$Ktbl,$Ktbl,#1
+.L16_79:
+	@ sigma0(x)	(ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
+	@ LO		lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+	@ HI		hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
+	mov	$Tlo,$t0,lsr#1
+	ldr	$t2,[sp,#`$Xoff+8*(16-14)`+0]
+	mov	$Thi,$t1,lsr#1
+	ldr	$t3,[sp,#`$Xoff+8*(16-14)`+4]
+	eor	$Tlo,$Tlo,$t1,lsl#31
+	eor	$Thi,$Thi,$t0,lsl#31
+	eor	$Tlo,$Tlo,$t0,lsr#8
+	eor	$Thi,$Thi,$t1,lsr#8
+	eor	$Tlo,$Tlo,$t1,lsl#24
+	eor	$Thi,$Thi,$t0,lsl#24
+	eor	$Tlo,$Tlo,$t0,lsr#7
+	eor	$Thi,$Thi,$t1,lsr#7
+	eor	$Tlo,$Tlo,$t1,lsl#25
+
+	@ sigma1(x)	(ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+	@ LO		lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+	@ HI		hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+	mov	$t0,$t2,lsr#19
+	mov	$t1,$t3,lsr#19
+	eor	$t0,$t0,$t3,lsl#13
+	eor	$t1,$t1,$t2,lsl#13
+	eor	$t0,$t0,$t3,lsr#29
+	eor	$t1,$t1,$t2,lsr#29
+	eor	$t0,$t0,$t2,lsl#3
+	eor	$t1,$t1,$t3,lsl#3
+	eor	$t0,$t0,$t2,lsr#6
+	eor	$t1,$t1,$t3,lsr#6
+	ldr	$t2,[sp,#`$Xoff+8*(16-9)`+0]
+	eor	$t0,$t0,$t3,lsl#26
+
+	ldr	$t3,[sp,#`$Xoff+8*(16-9)`+4]
+	adds	$Tlo,$Tlo,$t0
+	ldr	$t0,[sp,#`$Xoff+8*16`+0]
+	adc	$Thi,$Thi,$t1
+
+	ldr	$t1,[sp,#`$Xoff+8*16`+4]
+	adds	$Tlo,$Tlo,$t2
+	adc	$Thi,$Thi,$t3
+	adds	$Tlo,$Tlo,$t0
+	adc	$Thi,$Thi,$t1
+___
+	&BODY_00_15(0x17);
+$code.=<<___;
+	ldreq	$t0,[sp,#`$Xoff+8*(16-1)`+0]
+	ldreq	$t1,[sp,#`$Xoff+8*(16-1)`+4]
+	beq	.L16_79
+	bic	$Ktbl,$Ktbl,#1
+
+	ldr	$Tlo,[sp,#$Boff+0]
+	ldr	$Thi,[sp,#$Boff+4]
+	ldr	$t0, [$ctx,#$Aoff+$lo]
+	ldr	$t1, [$ctx,#$Aoff+$hi]
+	ldr	$t2, [$ctx,#$Boff+$lo]
+	ldr	$t3, [$ctx,#$Boff+$hi]
+	adds	$t0,$Alo,$t0
+	str	$t0, [$ctx,#$Aoff+$lo]
+	adc	$t1,$Ahi,$t1
+	str	$t1, [$ctx,#$Aoff+$hi]
+	adds	$t2,$Tlo,$t2
+	str	$t2, [$ctx,#$Boff+$lo]
+	adc	$t3,$Thi,$t3
+	str	$t3, [$ctx,#$Boff+$hi]
+
+	ldr	$Alo,[sp,#$Coff+0]
+	ldr	$Ahi,[sp,#$Coff+4]
+	ldr	$Tlo,[sp,#$Doff+0]
+	ldr	$Thi,[sp,#$Doff+4]
+	ldr	$t0, [$ctx,#$Coff+$lo]
+	ldr	$t1, [$ctx,#$Coff+$hi]
+	ldr	$t2, [$ctx,#$Doff+$lo]
+	ldr	$t3, [$ctx,#$Doff+$hi]
+	adds	$t0,$Alo,$t0
+	str	$t0, [$ctx,#$Coff+$lo]
+	adc	$t1,$Ahi,$t1
+	str	$t1, [$ctx,#$Coff+$hi]
+	adds	$t2,$Tlo,$t2
+	str	$t2, [$ctx,#$Doff+$lo]
+	adc	$t3,$Thi,$t3
+	str	$t3, [$ctx,#$Doff+$hi]
+
+	ldr	$Tlo,[sp,#$Foff+0]
+	ldr	$Thi,[sp,#$Foff+4]
+	ldr	$t0, [$ctx,#$Eoff+$lo]
+	ldr	$t1, [$ctx,#$Eoff+$hi]
+	ldr	$t2, [$ctx,#$Foff+$lo]
+	ldr	$t3, [$ctx,#$Foff+$hi]
+	adds	$Elo,$Elo,$t0
+	str	$Elo,[$ctx,#$Eoff+$lo]
+	adc	$Ehi,$Ehi,$t1
+	str	$Ehi,[$ctx,#$Eoff+$hi]
+	adds	$t2,$Tlo,$t2
+	str	$t2, [$ctx,#$Foff+$lo]
+	adc	$t3,$Thi,$t3
+	str	$t3, [$ctx,#$Foff+$hi]
+
+	ldr	$Alo,[sp,#$Goff+0]
+	ldr	$Ahi,[sp,#$Goff+4]
+	ldr	$Tlo,[sp,#$Hoff+0]
+	ldr	$Thi,[sp,#$Hoff+4]
+	ldr	$t0, [$ctx,#$Goff+$lo]
+	ldr	$t1, [$ctx,#$Goff+$hi]
+	ldr	$t2, [$ctx,#$Hoff+$lo]
+	ldr	$t3, [$ctx,#$Hoff+$hi]
+	adds	$t0,$Alo,$t0
+	str	$t0, [$ctx,#$Goff+$lo]
+	adc	$t1,$Ahi,$t1
+	str	$t1, [$ctx,#$Goff+$hi]
+	adds	$t2,$Tlo,$t2
+	str	$t2, [$ctx,#$Hoff+$lo]
+	adc	$t3,$Thi,$t3
+	str	$t3, [$ctx,#$Hoff+$hi]
+
+	add	sp,sp,#640
+	sub	$Ktbl,$Ktbl,#640
+
+	teq	$inp,$len
+	bne	.Loop
+
+	add	sp,sp,#8*9		@ destroy frame
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia	sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+___
+
+{
+my @Sigma0=(28,34,39);
+my @Sigma1=(14,18,41);
+my @sigma0=(1, 8, 7);
+my @sigma1=(19,61,6);
+
+my $Ktbl="r3";
+my $cnt="r12";	# volatile register known as ip, intra-procedure-call scratch
+
+my @X=map("d$_",(0..15));
+my @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("d$_",(16..23));
+
+sub NEON_00_15() {
+my $i=shift;
+my ($a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31));	# temps
+
+$code.=<<___ if ($i<16 || $i&1);
+	vshr.u64	$t0,$e,#@Sigma1[0]	@ $i
+#if $i<16
+	vld1.64		{@X[$i%16]},[$inp]!	@ handles unaligned
+#endif
+	vshr.u64	$t1,$e,#@Sigma1[1]
+#if $i>0
+	 vadd.i64	$a,$Maj			@ h+=Maj from the past
+#endif
+	vshr.u64	$t2,$e,#@Sigma1[2]
+___
+$code.=<<___;
+	vld1.64		{$K},[$Ktbl,:64]!	@ K[i++]
+	vsli.64		$t0,$e,#`64-@Sigma1[0]`
+	vsli.64		$t1,$e,#`64-@Sigma1[1]`
+	vmov		$Ch,$e
+	vsli.64		$t2,$e,#`64-@Sigma1[2]`
+#if $i<16 && defined(__ARMEL__)
+	vrev64.8	@X[$i],@X[$i]
+#endif
+	veor		$t1,$t0
+	vbsl		$Ch,$f,$g		@ Ch(e,f,g)
+	vshr.u64	$t0,$a,#@Sigma0[0]
+	veor		$t2,$t1			@ Sigma1(e)
+	vadd.i64	$T1,$Ch,$h
+	vshr.u64	$t1,$a,#@Sigma0[1]
+	vsli.64		$t0,$a,#`64-@Sigma0[0]`
+	vadd.i64	$T1,$t2
+	vshr.u64	$t2,$a,#@Sigma0[2]
+	vadd.i64	$K,@X[$i%16]
+	vsli.64		$t1,$a,#`64-@Sigma0[1]`
+	veor		$Maj,$a,$b
+	vsli.64		$t2,$a,#`64-@Sigma0[2]`
+	veor		$h,$t0,$t1
+	vadd.i64	$T1,$K
+	vbsl		$Maj,$c,$b		@ Maj(a,b,c)
+	veor		$h,$t2			@ Sigma0(a)
+	vadd.i64	$d,$T1
+	vadd.i64	$Maj,$T1
+	@ vadd.i64	$h,$Maj
+___
+}
+
+sub NEON_16_79() {
+my $i=shift;
+
+if ($i&1)	{ &NEON_00_15($i,@_); return; }
+
+# 2x-vectorized, therefore runs every 2nd round
+my @X=map("q$_",(0..7));			# view @X as 128-bit vector
+my ($t0,$t1,$s0,$s1) = map("q$_",(12..15));	# temps
+my ($d0,$d1,$d2) = map("d$_",(24..26));		# temps from NEON_00_15
+my $e=@_[4];					# $e from NEON_00_15
+$i /= 2;
+$code.=<<___;
+	vshr.u64	$t0,@X[($i+7)%8],#@sigma1[0]
+	vshr.u64	$t1,@X[($i+7)%8],#@sigma1[1]
+	 vadd.i64	@_[0],d30			@ h+=Maj from the past
+	vshr.u64	$s1,@X[($i+7)%8],#@sigma1[2]
+	vsli.64		$t0,@X[($i+7)%8],#`64-@sigma1[0]`
+	vext.8		$s0,@X[$i%8],@X[($i+1)%8],#8	@ X[i+1]
+	vsli.64		$t1,@X[($i+7)%8],#`64-@sigma1[1]`
+	veor		$s1,$t0
+	vshr.u64	$t0,$s0,#@sigma0[0]
+	veor		$s1,$t1				@ sigma1(X[i+14])
+	vshr.u64	$t1,$s0,#@sigma0[1]
+	vadd.i64	@X[$i%8],$s1
+	vshr.u64	$s1,$s0,#@sigma0[2]
+	vsli.64		$t0,$s0,#`64-@sigma0[0]`
+	vsli.64		$t1,$s0,#`64-@sigma0[1]`
+	vext.8		$s0,@X[($i+4)%8],@X[($i+5)%8],#8	@ X[i+9]
+	veor		$s1,$t0
+	vshr.u64	$d0,$e,#@Sigma1[0]		@ from NEON_00_15
+	vadd.i64	@X[$i%8],$s0
+	vshr.u64	$d1,$e,#@Sigma1[1]		@ from NEON_00_15
+	veor		$s1,$t1				@ sigma0(X[i+1])
+	vshr.u64	$d2,$e,#@Sigma1[2]		@ from NEON_00_15
+	vadd.i64	@X[$i%8],$s1
+___
+	&NEON_00_15(2*$i,@_);
+}
+
+$code.=<<___;
+#if __ARM_ARCH__>=7
+.fpu	neon
+
+.align	4
+.LNEON:
+	dmb				@ errata #451034 on early Cortex A8
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+	sub	$Ktbl,r3,#672		@ K512
+	vldmia	$ctx,{$A-$H}		@ load context
+.Loop_neon:
+___
+for($i=0;$i<16;$i++)	{ &NEON_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	mov		$cnt,#4
+.L16_79_neon:
+	subs		$cnt,#1
+___
+for(;$i<32;$i++)	{ &NEON_16_79($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	bne		.L16_79_neon
+
+	 vadd.i64	$A,d30		@ h+=Maj from the past
+	vldmia		$ctx,{d24-d31}	@ load context to temp
+	vadd.i64	q8,q12		@ vectorized accumulate
+	vadd.i64	q9,q13
+	vadd.i64	q10,q14
+	vadd.i64	q11,q15
+	vstmia		$ctx,{$A-$H}	@ save context
+	teq		$inp,$len
+	sub		$Ktbl,#640	@ rewind K512
+	bne		.Loop_neon
+
+	vldmia	sp!,{d8-d15}		@ epilogue
+	bx	lr
+#endif
+___
+}
+$code.=<<___;
+.size	sha512_block_data_order,.-sha512_block_data_order
+.asciz	"SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+.comm	OPENSSL_armcap_P,4,4
+
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
+print $code;
+close STDOUT; # enforce flush
diff --git a/crypto/sha/asm/sha512-x86_64.pl b/crypto/sha/asm/sha512-x86_64.pl
new file mode 100644
index 0000000..0c1285f
--- /dev/null
+++ b/crypto/sha/asm/sha512-x86_64.pl
@@ -0,0 +1,2152 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. Rights for redistribution and usage in source and binary
+# forms are granted according to the OpenSSL license.
+# ====================================================================
+#
+# sha256/512_block procedure for x86_64.
+#
+# 40% improvement over compiler-generated code on Opteron. On EM64T
+# sha256 was observed to run >80% faster and sha512 - >40%. No magical
+# tricks, just straight implementation... I really wonder why gcc
+# [being armed with inline assembler] fails to generate as fast code.
+# The only thing which is cool about this module is that it's very
+# same instruction sequence used for both SHA-256 and SHA-512. In
+# former case the instructions operate on 32-bit operands, while in
+# latter - on 64-bit ones. All I had to do is to get one flavor right,
+# the other one passed the test right away:-)
+#
+# sha256_block runs in ~1005 cycles on Opteron, which gives you
+# asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
+# frequency in GHz. sha512_block runs in ~1275 cycles, which results
+# in 128*1000/1275=100MBps per GHz. Is there room for improvement?
+# Well, if you compare it to IA-64 implementation, which maintains
+# X[16] in register bank[!], tends to 4 instructions per CPU clock
+# cycle and runs in 1003 cycles, 1275 is very good result for 3-way
+# issue Opteron pipeline and X[16] maintained in memory. So that *if*
+# there is a way to improve it, *then* the only way would be to try to
+# offload X[16] updates to SSE unit, but that would require "deeper"
+# loop unroll, which in turn would naturally cause size blow-up, not
+# to mention increased complexity! And once again, only *if* it's
+# actually possible to noticeably improve overall ILP, instruction
+# level parallelism, on a given CPU implementation in this case.
+#
+# Special note on Intel EM64T. While Opteron CPU exhibits perfect
+# perfromance ratio of 1.5 between 64- and 32-bit flavors [see above],
+# [currently available] EM64T CPUs apparently are far from it. On the
+# contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
+# sha256_block:-( This is presumably because 64-bit shifts/rotates
+# apparently are not atomic instructions, but implemented in microcode.
+#
+# May 2012.
+#
+# Optimization including one of Pavel Semjanov's ideas, alternative
+# Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and
+# unfortunately -2% SHA512 on P4 [which nobody should care about
+# that much].
+#
+# June 2012.
+#
+# Add SIMD code paths, see below for improvement coefficients. SSSE3
+# code path was not attempted for SHA512, because improvement is not
+# estimated to be high enough, noticeably less than 9%, to justify
+# the effort, not on pre-AVX processors. [Obviously with exclusion
+# for VIA Nano, but it has SHA512 instruction that is faster and
+# should be used instead.] For reference, corresponding estimated
+# upper limit for improvement for SSSE3 SHA256 is 28%. The fact that
+# higher coefficients are observed on VIA Nano and Bulldozer has more
+# to do with specifics of their architecture [which is topic for
+# separate discussion].
+#
+# November 2012.
+#
+# Add AVX2 code path. Two consecutive input blocks are loaded to
+# 256-bit %ymm registers, with data from first block to least
+# significant 128-bit halves and data from second to most significant.
+# The data is then processed with same SIMD instruction sequence as
+# for AVX, but with %ymm as operands. Side effect is increased stack
+# frame, 448 additional bytes in SHA256 and 1152 in SHA512.
+
+######################################################################
+# Current performance in cycles per processed byte (less is better):
+#
+#		SHA256	SSSE3       AVX/XOP(*)	    SHA512  AVX/XOP(*)
+#
+# AMD K8	14.9	-	    -		    9.57    -
+# P4		17.3	-	    -		    30.8    -
+# Core 2	15.6	13.8(+13%)  -		    9.97    -
+# Westmere	14.8	12.3(+19%)  -		    9.58    -
+# Sandy Bridge	17.4	14.2(+23%)  11.6(+50%(**))  11.2    8.10(+38%(**))
+# Ivy Bridge	12.6	10.5(+20%)  10.3(+22%)	    8.17    7.22(+13%)
+# Haswell	12.2	9.28(+31%)  7.80(+56%)	    7.66    5.40(+42%)
+# Bulldozer	21.1	13.6(+54%)  13.6(+54%(***)) 13.5    8.58(+57%)
+# VIA Nano	23.0	16.5(+39%)  -		    14.7    -
+# Atom		23.0	18.9(+22%)  -		    14.7    -
+#
+# (*)	whichever best applicable;
+# (**)	switch from ror to shrd stands for fair share of improvement;
+# (***)	execution time is fully determined by remaining integer-only
+#	part, body_00_15; reducing the amount of SIMD instructions
+#	below certain limit makes no difference/sense; to conserve
+#	space SHA256 XOP code path is therefore omitted;
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+		=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+	   `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+	   `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+	$avx = ($1>=10) + ($1>=11);
+}
+
+open OUT,"| \"$^X\" $xlate $flavour";
+*STDOUT=*OUT;
+
+if ($output =~ /512/) {
+	$func="sha512_block_data_order";
+	$TABLE="K512";
+	$SZ=8;
+	@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
+					"%r8", "%r9", "%r10","%r11");
+	($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi");
+	@Sigma0=(28,34,39);
+	@Sigma1=(14,18,41);
+	@sigma0=(1,  8, 7);
+	@sigma1=(19,61, 6);
+	$rounds=80;
+} else {
+	$func="sha256_block_data_order";
+	$TABLE="K256";
+	$SZ=4;
+	@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
+					"%r8d","%r9d","%r10d","%r11d");
+	($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi");
+	@Sigma0=( 2,13,22);
+	@Sigma1=( 6,11,25);
+	@sigma0=( 7,18, 3);
+	@sigma1=(17,19,10);
+	$rounds=64;
+}
+
+$ctx="%rdi";	# 1st arg, zapped by $a3
+$inp="%rsi";	# 2nd arg
+$Tbl="%rbp";
+
+$_ctx="16*$SZ+0*8(%rsp)";
+$_inp="16*$SZ+1*8(%rsp)";
+$_end="16*$SZ+2*8(%rsp)";
+$_rsp="16*$SZ+3*8(%rsp)";
+$framesz="16*$SZ+4*8";
+
+
+sub ROUND_00_15()
+{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+  my $STRIDE=$SZ;
+     $STRIDE += 16 if ($i%(16/$SZ)==(16/$SZ-1));
+
+$code.=<<___;
+	ror	\$`$Sigma1[2]-$Sigma1[1]`,$a0
+	mov	$f,$a2
+
+	xor	$e,$a0
+	ror	\$`$Sigma0[2]-$Sigma0[1]`,$a1
+	xor	$g,$a2			# f^g
+
+	mov	$T1,`$SZ*($i&0xf)`(%rsp)
+	xor	$a,$a1
+	and	$e,$a2			# (f^g)&e
+
+	ror	\$`$Sigma1[1]-$Sigma1[0]`,$a0
+	add	$h,$T1			# T1+=h
+	xor	$g,$a2			# Ch(e,f,g)=((f^g)&e)^g
+
+	ror	\$`$Sigma0[1]-$Sigma0[0]`,$a1
+	xor	$e,$a0
+	add	$a2,$T1			# T1+=Ch(e,f,g)
+
+	mov	$a,$a2
+	add	($Tbl),$T1		# T1+=K[round]
+	xor	$a,$a1
+
+	xor	$b,$a2			# a^b, b^c in next round
+	ror	\$$Sigma1[0],$a0	# Sigma1(e)
+	mov	$b,$h
+
+	and	$a2,$a3
+	ror	\$$Sigma0[0],$a1	# Sigma0(a)
+	add	$a0,$T1			# T1+=Sigma1(e)
+
+	xor	$a3,$h			# h=Maj(a,b,c)=Ch(a^b,c,b)
+	add	$T1,$d			# d+=T1
+	add	$T1,$h			# h+=T1
+
+	lea	$STRIDE($Tbl),$Tbl	# round++
+___
+$code.=<<___ if ($i<15);
+	add	$a1,$h			# h+=Sigma0(a)
+___
+	($a2,$a3) = ($a3,$a2);
+}
+
+sub ROUND_16_XX()
+{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___;
+	mov	`$SZ*(($i+1)&0xf)`(%rsp),$a0
+	mov	`$SZ*(($i+14)&0xf)`(%rsp),$a2
+
+	mov	$a0,$T1
+	ror	\$`$sigma0[1]-$sigma0[0]`,$a0
+	add	$a1,$a			# modulo-scheduled h+=Sigma0(a)
+	mov	$a2,$a1
+	ror	\$`$sigma1[1]-$sigma1[0]`,$a2
+
+	xor	$T1,$a0
+	shr	\$$sigma0[2],$T1
+	ror	\$$sigma0[0],$a0
+	xor	$a1,$a2
+	shr	\$$sigma1[2],$a1
+
+	ror	\$$sigma1[0],$a2
+	xor	$a0,$T1			# sigma0(X[(i+1)&0xf])
+	xor	$a1,$a2			# sigma1(X[(i+14)&0xf])
+	add	`$SZ*(($i+9)&0xf)`(%rsp),$T1
+
+	add	`$SZ*($i&0xf)`(%rsp),$T1
+	mov	$e,$a0
+	add	$a2,$T1
+	mov	$a,$a1
+___
+	&ROUND_00_15(@_);
+}
+
+$code=<<___;
+.text
+
+.extern	OPENSSL_ia32cap_P
+.globl	$func
+.type	$func,\@function,3
+.align	16
+$func:
+___
+$code.=<<___ if ($SZ==4 || $avx);
+	lea	OPENSSL_ia32cap_P(%rip),%r11
+	mov	0(%r11),%r9d
+	mov	4(%r11),%r10d
+	mov	8(%r11),%r11d
+___
+$code.=<<___ if ($avx && $SZ==8);
+	test	\$`1<<11`,%r10d		# check for XOP
+	jnz	.Lxop_shortcut
+___
+$code.=<<___ if ($avx>1);
+	and	\$`1<<8|1<<5|1<<3`,%r11d	# check for BMI2+AVX2+BMI1
+	cmp	\$`1<<8|1<<5|1<<3`,%r11d
+	je	.Lavx2_shortcut
+___
+$code.=<<___ if ($avx);
+	and	\$`1<<30`,%r9d		# mask "Intel CPU" bit
+	and	\$`1<<28|1<<9`,%r10d	# mask AVX and SSSE3 bits
+	or	%r9d,%r10d
+	cmp	\$`1<<28|1<<9|1<<30`,%r10d
+	je	.Lavx_shortcut
+___
+$code.=<<___ if ($SZ==4);
+	test	\$`1<<9`,%r10d
+	jnz	.Lssse3_shortcut
+___
+$code.=<<___;
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	mov	%rsp,%r11		# copy %rsp
+	shl	\$4,%rdx		# num*16
+	sub	\$$framesz,%rsp
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	and	\$-64,%rsp		# align stack frame
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%r11,$_rsp		# save copy of %rsp
+.Lprologue:
+
+	mov	$SZ*0($ctx),$A
+	mov	$SZ*1($ctx),$B
+	mov	$SZ*2($ctx),$C
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+	jmp	.Lloop
+
+.align	16
+.Lloop:
+	mov	$B,$a3
+	lea	$TABLE(%rip),$Tbl
+	xor	$C,$a3			# magic
+___
+	for($i=0;$i<16;$i++) {
+		$code.="	mov	$SZ*$i($inp),$T1\n";
+		$code.="	mov	@ROT[4],$a0\n";
+		$code.="	mov	@ROT[0],$a1\n";
+		$code.="	bswap	$T1\n";
+		&ROUND_00_15($i,@ROT);
+		unshift(@ROT,pop(@ROT));
+	}
+$code.=<<___;
+	jmp	.Lrounds_16_xx
+.align	16
+.Lrounds_16_xx:
+___
+	for(;$i<32;$i++) {
+		&ROUND_16_XX($i,@ROT);
+		unshift(@ROT,pop(@ROT));
+	}
+
+$code.=<<___;
+	cmpb	\$0,`$SZ-1`($Tbl)
+	jnz	.Lrounds_16_xx
+
+	mov	$_ctx,$ctx
+	add	$a1,$A			# modulo-scheduled h+=Sigma0(a)
+	lea	16*$SZ($inp),$inp
+
+	add	$SZ*0($ctx),$A
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+	jb	.Lloop
+
+	mov	$_rsp,%rsi
+	mov	(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lepilogue:
+	ret
+.size	$func,.-$func
+___
+
+if ($SZ==4) {
+$code.=<<___;
+.align	64
+.type	$TABLE,\@object
+$TABLE:
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+	.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+	.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+	.long	0x03020100,0x0b0a0908,0xffffffff,0xffffffff
+	.long	0x03020100,0x0b0a0908,0xffffffff,0xffffffff
+	.long	0xffffffff,0xffffffff,0x03020100,0x0b0a0908
+	.long	0xffffffff,0xffffffff,0x03020100,0x0b0a0908
+	.asciz	"SHA256 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+} else {
+$code.=<<___;
+.align	64
+.type	$TABLE,\@object
+$TABLE:
+	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad	0x3956c25bf348b538,0x59f111f1b605d019
+	.quad	0x3956c25bf348b538,0x59f111f1b605d019
+	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad	0xd807aa98a3030242,0x12835b0145706fbe
+	.quad	0xd807aa98a3030242,0x12835b0145706fbe
+	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad	0x06ca6351e003826f,0x142929670a0e6e70
+	.quad	0x06ca6351e003826f,0x142929670a0e6e70
+	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad	0x81c2c92e47edaee6,0x92722c851482353b
+	.quad	0x81c2c92e47edaee6,0x92722c851482353b
+	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad	0xd192e819d6ef5218,0xd69906245565a910
+	.quad	0xd192e819d6ef5218,0xd69906245565a910
+	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad	0x90befffa23631e28,0xa4506cebde82bde9
+	.quad	0x90befffa23631e28,0xa4506cebde82bde9
+	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad	0xca273eceea26619c,0xd186b8c721c0c207
+	.quad	0xca273eceea26619c,0xd186b8c721c0c207
+	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad	0x113f9804bef90dae,0x1b710b35131c471b
+	.quad	0x113f9804bef90dae,0x1b710b35131c471b
+	.quad	0x28db77f523047d84,0x32caab7b40c72493
+	.quad	0x28db77f523047d84,0x32caab7b40c72493
+	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
+	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
+
+	.quad	0x0001020304050607,0x08090a0b0c0d0e0f
+	.quad	0x0001020304050607,0x08090a0b0c0d0e0f
+	.asciz	"SHA512 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+}
+
+######################################################################
+# SIMD code paths
+#
+{{{
+
+my $a4=$T1;
+my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+sub AUTOLOAD()		# thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+  my $arg = pop;
+    $arg = "\$$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+sub body_00_15 () {
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
+
+	'&ror	($a0,$Sigma1[2]-$Sigma1[1])',
+	'&mov	($a,$a1)',
+	'&mov	($a4,$f)',
+
+	'&ror	($a1,$Sigma0[2]-$Sigma0[1])',
+	'&xor	($a0,$e)',
+	'&xor	($a4,$g)',			# f^g
+
+	'&ror	($a0,$Sigma1[1]-$Sigma1[0])',
+	'&xor	($a1,$a)',
+	'&and	($a4,$e)',			# (f^g)&e
+
+	'&xor	($a0,$e)',
+	'&add	($h,$SZ*($i&15)."(%rsp)")',	# h+=X[i]+K[i]
+	'&mov	($a2,$a)',
+
+	'&xor	($a4,$g)',			# Ch(e,f,g)=((f^g)&e)^g
+	'&ror	($a1,$Sigma0[1]-$Sigma0[0])',
+	'&xor	($a2,$b)',			# a^b, b^c in next round
+
+	'&add	($h,$a4)',			# h+=Ch(e,f,g)
+	'&ror	($a0,$Sigma1[0])',		# Sigma1(e)
+	'&and	($a3,$a2)',			# (b^c)&(a^b)
+
+	'&xor	($a1,$a)',
+	'&add	($h,$a0)',			# h+=Sigma1(e)
+	'&xor	($a3,$b)',			# Maj(a,b,c)=Ch(a^b,c,b)
+
+	'&ror	($a1,$Sigma0[0])',		# Sigma0(a)
+	'&add	($d,$h)',			# d+=h
+	'&add	($h,$a3)',			# h+=Maj(a,b,c)
+
+	'&mov	($a0,$d)',
+	'&add	($a1,$h);'.			# h+=Sigma0(a)
+	'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
+	);
+}
+
+######################################################################
+# SSSE3 code path
+#
+if ($SZ==4) {	# SHA256 only
+my @X = map("%xmm$_",(0..3));
+my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
+
+$code.=<<___;
+.type	${func}_ssse3,\@function,3
+.align	64
+${func}_ssse3:
+.Lssse3_shortcut:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	mov	%rsp,%r11		# copy %rsp
+	shl	\$4,%rdx		# num*16
+	sub	\$`$framesz+$win64*16*4`,%rsp
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	and	\$-64,%rsp		# align stack frame
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%r11,$_rsp		# save copy of %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,16*$SZ+32(%rsp)
+	movaps	%xmm7,16*$SZ+48(%rsp)
+	movaps	%xmm8,16*$SZ+64(%rsp)
+	movaps	%xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___;
+.Lprologue_ssse3:
+
+	mov	$SZ*0($ctx),$A
+	mov	$SZ*1($ctx),$B
+	mov	$SZ*2($ctx),$C
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+___
+
+$code.=<<___;
+	#movdqa	$TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+	#movdqa	$TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+	jmp	.Lloop_ssse3
+.align	16
+.Lloop_ssse3:
+	movdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	movdqu	0x00($inp),@X[0]
+	movdqu	0x10($inp),@X[1]
+	movdqu	0x20($inp),@X[2]
+	movdqu	0x30($inp),@X[3]
+	pshufb	$t3,@X[0]
+	lea	$TABLE(%rip),$Tbl
+	pshufb	$t3,@X[1]
+	movdqa	0x00($Tbl),$t0
+	pshufb	$t3,@X[2]
+	movdqa	0x20($Tbl),$t1
+	paddd	@X[0],$t0
+	movdqa	0x40($Tbl),$t2
+	pshufb	$t3,@X[3]
+	movdqa	0x60($Tbl),$t3
+	paddd	@X[1],$t1
+	paddd	@X[2],$t2
+	paddd	@X[3],$t3
+	movdqa	$t0,0x00(%rsp)
+	mov	$A,$a1
+	movdqa	$t1,0x10(%rsp)
+	mov	$B,$a3
+	movdqa	$t2,0x20(%rsp)
+	xor	$C,$a3			# magic
+	movdqa	$t3,0x30(%rsp)
+	mov	$E,$a0
+	jmp	.Lssse3_00_47
+
+.align	16
+.Lssse3_00_47:
+	sub	\$-16*2*$SZ,$Tbl	# size optimization
+___
+sub Xupdate_256_SSSE3 () {
+	(
+	'&movdqa	($t0,@X[1]);',
+	'&movdqa	($t3,@X[3])',
+	'&palignr	($t0,@X[0],$SZ)',	# X[1..4]
+	 '&palignr	($t3,@X[2],$SZ);',	# X[9..12]
+	'&movdqa	($t1,$t0)',
+	'&movdqa	($t2,$t0);',
+	'&psrld		($t0,$sigma0[2])',
+	 '&paddd	(@X[0],$t3);',		# X[0..3] += X[9..12]
+	'&psrld		($t2,$sigma0[0])',
+	 '&pshufd	($t3,@X[3],0b11111010)',# X[14..15]
+	'&pslld		($t1,8*$SZ-$sigma0[1]);'.
+	'&pxor		($t0,$t2)',
+	'&psrld		($t2,$sigma0[1]-$sigma0[0]);'.
+	'&pxor		($t0,$t1)',
+	'&pslld		($t1,$sigma0[1]-$sigma0[0]);'.
+	'&pxor		($t0,$t2);',
+	 '&movdqa	($t2,$t3)',
+	'&pxor		($t0,$t1);',		# sigma0(X[1..4])
+	 '&psrld	($t3,$sigma1[2])',
+	'&paddd		(@X[0],$t0);',		# X[0..3] += sigma0(X[1..4])
+	 '&psrlq	($t2,$sigma1[0])',
+	 '&pxor		($t3,$t2);',
+	 '&psrlq	($t2,$sigma1[1]-$sigma1[0])',
+	 '&pxor		($t3,$t2)',
+	 '&pshufb	($t3,$t4)',		# sigma1(X[14..15])
+	'&paddd		(@X[0],$t3)',		# X[0..1] += sigma1(X[14..15])
+	 '&pshufd	($t3,@X[0],0b01010000)',# X[16..17]
+	 '&movdqa	($t2,$t3);',
+	 '&psrld	($t3,$sigma1[2])',
+	 '&psrlq	($t2,$sigma1[0])',
+	 '&pxor		($t3,$t2);',
+	 '&psrlq	($t2,$sigma1[1]-$sigma1[0])',
+	 '&pxor		($t3,$t2);',
+	'&movdqa	($t2,16*2*$j."($Tbl)")',
+	 '&pshufb	($t3,$t5)',
+	'&paddd		(@X[0],$t3)'		# X[2..3] += sigma1(X[16..17])
+	);
+}
+
+sub SSSE3_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 104 instructions
+
+    if (0) {
+	foreach (Xupdate_256_SSSE3()) {		# 36 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+    } else {			# squeeze extra 4% on Westmere and 19% on Atom
+	  eval(shift(@insns));	#@
+	&movdqa		($t0,@X[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&movdqa		($t3,@X[3]);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	&palignr	($t0,@X[0],$SZ);	# X[1..4]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &palignr	($t3,@X[2],$SZ);	# X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	&movdqa		($t1,$t0);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&movdqa		($t2,$t0);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	&psrld		($t0,$sigma0[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &paddd		(@X[0],$t3);		# X[0..3] += X[9..12]
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	&psrld		($t2,$sigma0[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pshufd	($t3,@X[3],0b11111010);	# X[4..15]
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	&pslld		($t1,8*$SZ-$sigma0[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pxor		($t0,$t2);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	&psrld		($t2,$sigma0[1]-$sigma0[0]);
+	  eval(shift(@insns));
+	&pxor		($t0,$t1);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pslld		($t1,$sigma0[1]-$sigma0[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pxor		($t0,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	 &movdqa	($t2,$t3);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pxor		($t0,$t1);		# sigma0(X[1..4])
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrld		($t3,$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&paddd		(@X[0],$t0);		# X[0..3] += sigma0(X[1..4])
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	 &psrlq		($t2,$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	 &psrlq		($t2,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 #&pshufb	($t3,$t4);		# sigma1(X[14..15])
+	 &pshufd	($t3,$t3,0b10000000);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrldq	($t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	&paddd		(@X[0],$t3);		# X[0..1] += sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pshufd	($t3,@X[0],0b01010000);	# X[16..17]
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	 &movdqa	($t2,$t3);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrld		($t3,$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	 &psrlq		($t2,$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	 &psrlq		($t2,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	 #&pshufb	($t3,$t5);
+	 &pshufd	($t3,$t3,0b00001000);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&movdqa		($t2,16*2*$j."($Tbl)");
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	 &pslldq	($t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&paddd		(@X[0],$t3);		# X[2..3] += sigma1(X[16..17])
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+    }
+	&paddd		($t2,@X[0]);
+	  foreach (@insns) { eval; }		# remaining instructions
+	&movdqa		(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&SSSE3_256_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ."($Tbl)",0);
+	&jne	(".Lssse3_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+$code.=<<___;
+	mov	$_ctx,$ctx
+	mov	$a1,$A
+
+	add	$SZ*0($ctx),$A
+	lea	16*$SZ($inp),$inp
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+	jb	.Lloop_ssse3
+
+	mov	$_rsp,%rsi
+___
+$code.=<<___ if ($win64);
+	movaps	16*$SZ+32(%rsp),%xmm6
+	movaps	16*$SZ+48(%rsp),%xmm7
+	movaps	16*$SZ+64(%rsp),%xmm8
+	movaps	16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___;
+	mov	(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lepilogue_ssse3:
+	ret
+.size	${func}_ssse3,.-${func}_ssse3
+___
+}
+
+if ($avx) {{
+######################################################################
+# XOP code path
+#
+if ($SZ==8) {	# SHA512 only
+$code.=<<___;
+.type	${func}_xop,\@function,3
+.align	64
+${func}_xop:
+.Lxop_shortcut:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	mov	%rsp,%r11		# copy %rsp
+	shl	\$4,%rdx		# num*16
+	sub	\$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	and	\$-64,%rsp		# align stack frame
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%r11,$_rsp		# save copy of %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,16*$SZ+32(%rsp)
+	movaps	%xmm7,16*$SZ+48(%rsp)
+	movaps	%xmm8,16*$SZ+64(%rsp)
+	movaps	%xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	%xmm10,16*$SZ+96(%rsp)
+	movaps	%xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_xop:
+
+	vzeroupper
+	mov	$SZ*0($ctx),$A
+	mov	$SZ*1($ctx),$B
+	mov	$SZ*2($ctx),$C
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+	jmp	.Lloop_xop
+___
+					if ($SZ==4) {	# SHA256
+    my @X = map("%xmm$_",(0..3));
+    my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
+
+$code.=<<___;
+.align	16
+.Lloop_xop:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00($inp),@X[0]
+	vmovdqu	0x10($inp),@X[1]
+	vmovdqu	0x20($inp),@X[2]
+	vmovdqu	0x30($inp),@X[3]
+	vpshufb	$t3,@X[0],@X[0]
+	lea	$TABLE(%rip),$Tbl
+	vpshufb	$t3,@X[1],@X[1]
+	vpshufb	$t3,@X[2],@X[2]
+	vpaddd	0x00($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[3],@X[3]
+	vpaddd	0x20($Tbl),@X[1],$t1
+	vpaddd	0x40($Tbl),@X[2],$t2
+	vpaddd	0x60($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x10(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x20(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x30(%rsp)
+	mov	$E,$a0
+	jmp	.Lxop_00_47
+
+.align	16
+.Lxop_00_47:
+	sub	\$-16*2*$SZ,$Tbl	# size optimization
+___
+sub XOP_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 104 instructions
+
+	&vpalignr	($t0,@X[1],@X[0],$SZ);	# X[1..4]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpalignr	($t3,@X[3],@X[2],$SZ);	# X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotd		($t1,$t0,8*$SZ-$sigma0[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpsrld		($t0,$t0,$sigma0[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpaddd	(@X[0],@X[0],$t3);	# X[0..3] += X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotd		($t2,$t1,$sigma0[1]-$sigma0[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t1);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t3,@X[3],8*$SZ-$sigma1[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t2);		# sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpsrld	($t2,@X[3],$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		(@X[0],@X[0],$t0);	# X[0..3] += sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t1,$t3,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t1);		# sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpsrldq	($t3,$t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		(@X[0],@X[0],$t3);	# X[0..1] += sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t3,@X[0],8*$SZ-$sigma1[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpsrld	($t2,@X[0],$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t1,$t3,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t1);		# sigma1(X[16..17])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpslldq	($t3,$t3,8);		# 22 instructions
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		(@X[0],@X[0],$t3);	# X[2..3] += sigma1(X[16..17])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		($t2,@X[0],16*2*$j."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&XOP_256_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ."($Tbl)",0);
+	&jne	(".Lxop_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+
+					} else {	# SHA512
+    my @X = map("%xmm$_",(0..7));
+    my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
+
+$code.=<<___;
+.align	16
+.Lloop_xop:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00($inp),@X[0]
+	lea	$TABLE+0x80(%rip),$Tbl	# size optimization
+	vmovdqu	0x10($inp),@X[1]
+	vmovdqu	0x20($inp),@X[2]
+	vpshufb	$t3,@X[0],@X[0]
+	vmovdqu	0x30($inp),@X[3]
+	vpshufb	$t3,@X[1],@X[1]
+	vmovdqu	0x40($inp),@X[4]
+	vpshufb	$t3,@X[2],@X[2]
+	vmovdqu	0x50($inp),@X[5]
+	vpshufb	$t3,@X[3],@X[3]
+	vmovdqu	0x60($inp),@X[6]
+	vpshufb	$t3,@X[4],@X[4]
+	vmovdqu	0x70($inp),@X[7]
+	vpshufb	$t3,@X[5],@X[5]
+	vpaddq	-0x80($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[6],@X[6]
+	vpaddq	-0x60($Tbl),@X[1],$t1
+	vpshufb	$t3,@X[7],@X[7]
+	vpaddq	-0x40($Tbl),@X[2],$t2
+	vpaddq	-0x20($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	vpaddq	0x00($Tbl),@X[4],$t0
+	vmovdqa	$t1,0x10(%rsp)
+	vpaddq	0x20($Tbl),@X[5],$t1
+	vmovdqa	$t2,0x20(%rsp)
+	vpaddq	0x40($Tbl),@X[6],$t2
+	vmovdqa	$t3,0x30(%rsp)
+	vpaddq	0x60($Tbl),@X[7],$t3
+	vmovdqa	$t0,0x40(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x50(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x60(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x70(%rsp)
+	mov	$E,$a0
+	jmp	.Lxop_00_47
+
+.align	16
+.Lxop_00_47:
+	add	\$16*2*$SZ,$Tbl
+___
+sub XOP_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body);			# 52 instructions
+
+	&vpalignr	($t0,@X[1],@X[0],$SZ);	# X[1..2]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpalignr	($t3,@X[5],@X[4],$SZ);	# X[9..10]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotq		($t1,$t0,8*$SZ-$sigma0[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpsrlq		($t0,$t0,$sigma0[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpaddq	(@X[0],@X[0],$t3);	# X[0..1] += X[9..10]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotq		($t2,$t1,$sigma0[1]-$sigma0[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t1);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotq	($t3,@X[7],8*$SZ-$sigma1[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t2);		# sigma0(X[1..2])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpsrlq	($t2,@X[7],$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddq		(@X[0],@X[0],$t0);	# X[0..1] += sigma0(X[1..2])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotq	($t1,$t3,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t1);		# sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddq		(@X[0],@X[0],$t3);	# X[0..1] += sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddq		($t2,@X[0],16*2*$j-0x80."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<8; $j++) {
+	&XOP_512_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
+	&jne	(".Lxop_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+}
+$code.=<<___;
+	mov	$_ctx,$ctx
+	mov	$a1,$A
+
+	add	$SZ*0($ctx),$A
+	lea	16*$SZ($inp),$inp
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+	jb	.Lloop_xop
+
+	mov	$_rsp,%rsi
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	16*$SZ+32(%rsp),%xmm6
+	movaps	16*$SZ+48(%rsp),%xmm7
+	movaps	16*$SZ+64(%rsp),%xmm8
+	movaps	16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	16*$SZ+96(%rsp),%xmm10
+	movaps	16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+	mov	(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lepilogue_xop:
+	ret
+.size	${func}_xop,.-${func}_xop
+___
+}
+######################################################################
+# AVX+shrd code path
+#
+local *ror = sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type	${func}_avx,\@function,3
+.align	64
+${func}_avx:
+.Lavx_shortcut:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	mov	%rsp,%r11		# copy %rsp
+	shl	\$4,%rdx		# num*16
+	sub	\$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	and	\$-64,%rsp		# align stack frame
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%r11,$_rsp		# save copy of %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,16*$SZ+32(%rsp)
+	movaps	%xmm7,16*$SZ+48(%rsp)
+	movaps	%xmm8,16*$SZ+64(%rsp)
+	movaps	%xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	%xmm10,16*$SZ+96(%rsp)
+	movaps	%xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx:
+
+	vzeroupper
+	mov	$SZ*0($ctx),$A
+	mov	$SZ*1($ctx),$B
+	mov	$SZ*2($ctx),$C
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+___
+					if ($SZ==4) {	# SHA256
+    my @X = map("%xmm$_",(0..3));
+    my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
+
+$code.=<<___;
+	vmovdqa	$TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+	vmovdqa	$TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+	jmp	.Lloop_avx
+.align	16
+.Lloop_avx:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00($inp),@X[0]
+	vmovdqu	0x10($inp),@X[1]
+	vmovdqu	0x20($inp),@X[2]
+	vmovdqu	0x30($inp),@X[3]
+	vpshufb	$t3,@X[0],@X[0]
+	lea	$TABLE(%rip),$Tbl
+	vpshufb	$t3,@X[1],@X[1]
+	vpshufb	$t3,@X[2],@X[2]
+	vpaddd	0x00($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[3],@X[3]
+	vpaddd	0x20($Tbl),@X[1],$t1
+	vpaddd	0x40($Tbl),@X[2],$t2
+	vpaddd	0x60($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x10(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x20(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x30(%rsp)
+	mov	$E,$a0
+	jmp	.Lavx_00_47
+
+.align	16
+.Lavx_00_47:
+	sub	\$-16*2*$SZ,$Tbl	# size optimization
+___
+sub Xupdate_256_AVX () {
+	(
+	'&vpalignr	($t0,@X[1],@X[0],$SZ)',	# X[1..4]
+	 '&vpalignr	($t3,@X[3],@X[2],$SZ)',	# X[9..12]
+	'&vpsrld	($t2,$t0,$sigma0[0]);',
+	 '&vpaddd	(@X[0],@X[0],$t3)',	# X[0..3] += X[9..12]
+	'&vpsrld	($t3,$t0,$sigma0[2])',
+	'&vpslld	($t1,$t0,8*$SZ-$sigma0[1]);',
+	'&vpxor		($t0,$t3,$t2)',
+	 '&vpshufd	($t3,@X[3],0b11111010)',# X[14..15]
+	'&vpsrld	($t2,$t2,$sigma0[1]-$sigma0[0]);',
+	'&vpxor		($t0,$t0,$t1)',
+	'&vpslld	($t1,$t1,$sigma0[1]-$sigma0[0]);',
+	'&vpxor		($t0,$t0,$t2)',
+	 '&vpsrld	($t2,$t3,$sigma1[2]);',
+	'&vpxor		($t0,$t0,$t1)',		# sigma0(X[1..4])
+	 '&vpsrlq	($t3,$t3,$sigma1[0]);',
+	'&vpaddd	(@X[0],@X[0],$t0)',	# X[0..3] += sigma0(X[1..4])
+	 '&vpxor	($t2,$t2,$t3);',
+	 '&vpsrlq	($t3,$t3,$sigma1[1]-$sigma1[0])',
+	 '&vpxor	($t2,$t2,$t3)',
+	 '&vpshufb	($t2,$t2,$t4)',		# sigma1(X[14..15])
+	'&vpaddd	(@X[0],@X[0],$t2)',	# X[0..1] += sigma1(X[14..15])
+	 '&vpshufd	($t3,@X[0],0b01010000)',# X[16..17]
+	 '&vpsrld	($t2,$t3,$sigma1[2])',
+	 '&vpsrlq	($t3,$t3,$sigma1[0])',
+	 '&vpxor	($t2,$t2,$t3);',
+	 '&vpsrlq	($t3,$t3,$sigma1[1]-$sigma1[0])',
+	 '&vpxor	($t2,$t2,$t3)',
+	 '&vpshufb	($t2,$t2,$t5)',
+	'&vpaddd	(@X[0],@X[0],$t2)'	# X[2..3] += sigma1(X[16..17])
+	);
+}
+
+sub AVX_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 104 instructions
+
+	foreach (Xupdate_256_AVX()) {		# 29 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+	&vpaddd		($t2,@X[0],16*2*$j."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX_256_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ."($Tbl)",0);
+	&jne	(".Lavx_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+
+					} else {	# SHA512
+    my @X = map("%xmm$_",(0..7));
+    my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
+
+$code.=<<___;
+	jmp	.Lloop_avx
+.align	16
+.Lloop_avx:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00($inp),@X[0]
+	lea	$TABLE+0x80(%rip),$Tbl	# size optimization
+	vmovdqu	0x10($inp),@X[1]
+	vmovdqu	0x20($inp),@X[2]
+	vpshufb	$t3,@X[0],@X[0]
+	vmovdqu	0x30($inp),@X[3]
+	vpshufb	$t3,@X[1],@X[1]
+	vmovdqu	0x40($inp),@X[4]
+	vpshufb	$t3,@X[2],@X[2]
+	vmovdqu	0x50($inp),@X[5]
+	vpshufb	$t3,@X[3],@X[3]
+	vmovdqu	0x60($inp),@X[6]
+	vpshufb	$t3,@X[4],@X[4]
+	vmovdqu	0x70($inp),@X[7]
+	vpshufb	$t3,@X[5],@X[5]
+	vpaddq	-0x80($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[6],@X[6]
+	vpaddq	-0x60($Tbl),@X[1],$t1
+	vpshufb	$t3,@X[7],@X[7]
+	vpaddq	-0x40($Tbl),@X[2],$t2
+	vpaddq	-0x20($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	vpaddq	0x00($Tbl),@X[4],$t0
+	vmovdqa	$t1,0x10(%rsp)
+	vpaddq	0x20($Tbl),@X[5],$t1
+	vmovdqa	$t2,0x20(%rsp)
+	vpaddq	0x40($Tbl),@X[6],$t2
+	vmovdqa	$t3,0x30(%rsp)
+	vpaddq	0x60($Tbl),@X[7],$t3
+	vmovdqa	$t0,0x40(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x50(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x60(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x70(%rsp)
+	mov	$E,$a0
+	jmp	.Lavx_00_47
+
+.align	16
+.Lavx_00_47:
+	add	\$16*2*$SZ,$Tbl
+___
+sub Xupdate_512_AVX () {
+	(
+	'&vpalignr	($t0,@X[1],@X[0],$SZ)',	# X[1..2]
+	 '&vpalignr	($t3,@X[5],@X[4],$SZ)',	# X[9..10]
+	'&vpsrlq	($t2,$t0,$sigma0[0])',
+	 '&vpaddq	(@X[0],@X[0],$t3);',	# X[0..1] += X[9..10]
+	'&vpsrlq	($t3,$t0,$sigma0[2])',
+	'&vpsllq	($t1,$t0,8*$SZ-$sigma0[1]);',
+	 '&vpxor	($t0,$t3,$t2)',
+	'&vpsrlq	($t2,$t2,$sigma0[1]-$sigma0[0]);',
+	 '&vpxor	($t0,$t0,$t1)',
+	'&vpsllq	($t1,$t1,$sigma0[1]-$sigma0[0]);',
+	 '&vpxor	($t0,$t0,$t2)',
+	 '&vpsrlq	($t3,@X[7],$sigma1[2]);',
+	'&vpxor		($t0,$t0,$t1)',		# sigma0(X[1..2])
+	 '&vpsllq	($t2,@X[7],8*$SZ-$sigma1[1]);',
+	'&vpaddq	(@X[0],@X[0],$t0)',	# X[0..1] += sigma0(X[1..2])
+	 '&vpsrlq	($t1,@X[7],$sigma1[0]);',
+	 '&vpxor	($t3,$t3,$t2)',
+	 '&vpsllq	($t2,$t2,$sigma1[1]-$sigma1[0]);',
+	 '&vpxor	($t3,$t3,$t1)',
+	 '&vpsrlq	($t1,$t1,$sigma1[1]-$sigma1[0]);',
+	 '&vpxor	($t3,$t3,$t2)',
+	 '&vpxor	($t3,$t3,$t1)',		# sigma1(X[14..15])
+	'&vpaddq	(@X[0],@X[0],$t3)',	# X[0..1] += sigma1(X[14..15])
+	);
+}
+
+sub AVX_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body);			# 52 instructions
+
+	foreach (Xupdate_512_AVX()) {		# 23 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+	&vpaddq		($t2,@X[0],16*2*$j-0x80."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<8; $j++) {
+	&AVX_512_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
+	&jne	(".Lavx_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+}
+$code.=<<___;
+	mov	$_ctx,$ctx
+	mov	$a1,$A
+
+	add	$SZ*0($ctx),$A
+	lea	16*$SZ($inp),$inp
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+	jb	.Lloop_avx
+
+	mov	$_rsp,%rsi
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	16*$SZ+32(%rsp),%xmm6
+	movaps	16*$SZ+48(%rsp),%xmm7
+	movaps	16*$SZ+64(%rsp),%xmm8
+	movaps	16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	16*$SZ+96(%rsp),%xmm10
+	movaps	16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+	mov	(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lepilogue_avx:
+	ret
+.size	${func}_avx,.-${func}_avx
+___
+
+if ($avx>1) {{
+######################################################################
+# AVX2+BMI code path
+#
+my $a5=$SZ==4?"%esi":"%rsi";	# zap $inp 
+my $PUSH8=8*2*$SZ;
+use integer;
+
+sub bodyx_00_15 () {
+	# at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
+
+	'&add	($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)',    # h+=X[i]+K[i]
+	'&and	($a4,$e)',		# f&e
+	'&rorx	($a0,$e,$Sigma1[2])',
+	'&rorx	($a2,$e,$Sigma1[1])',
+
+	'&lea	($a,"($a,$a1)")',	# h+=Sigma0(a) from the past
+	'&lea	($h,"($h,$a4)")',
+	'&andn	($a4,$e,$g)',		# ~e&g
+	'&xor	($a0,$a2)',
+
+	'&rorx	($a1,$e,$Sigma1[0])',
+	'&lea	($h,"($h,$a4)")',	# h+=Ch(e,f,g)=(e&f)+(~e&g)
+	'&xor	($a0,$a1)',		# Sigma1(e)
+	'&mov	($a2,$a)',
+
+	'&rorx	($a4,$a,$Sigma0[2])',
+	'&lea	($h,"($h,$a0)")',	# h+=Sigma1(e)
+	'&xor	($a2,$b)',		# a^b, b^c in next round
+	'&rorx	($a1,$a,$Sigma0[1])',
+
+	'&rorx	($a0,$a,$Sigma0[0])',
+	'&lea	($d,"($d,$h)")',	# d+=h
+	'&and	($a3,$a2)',		# (b^c)&(a^b)
+	'&xor	($a1,$a4)',
+
+	'&xor	($a3,$b)',		# Maj(a,b,c)=Ch(a^b,c,b)
+	'&xor	($a1,$a0)',		# Sigma0(a)
+	'&lea	($h,"($h,$a3)");'.	# h+=Maj(a,b,c)
+	'&mov	($a4,$e)',		# copy of f in future
+
+	'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
+	);
+	# and at the finish one has to $a+=$a1
+}
+
+$code.=<<___;
+.type	${func}_avx2,\@function,3
+.align	64
+${func}_avx2:
+.Lavx2_shortcut:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	mov	%rsp,%r11		# copy %rsp
+	sub	\$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
+	shl	\$4,%rdx		# num*16
+	and	\$-256*$SZ,%rsp		# align stack frame
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	add	\$`2*$SZ*($rounds-8)`,%rsp
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%r11,$_rsp		# save copy of %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,16*$SZ+32(%rsp)
+	movaps	%xmm7,16*$SZ+48(%rsp)
+	movaps	%xmm8,16*$SZ+64(%rsp)
+	movaps	%xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	%xmm10,16*$SZ+96(%rsp)
+	movaps	%xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx2:
+
+	vzeroupper
+	sub	\$-16*$SZ,$inp		# inp++, size optimization
+	mov	$SZ*0($ctx),$A
+	mov	$inp,%r12		# borrow $T1
+	mov	$SZ*1($ctx),$B
+	cmp	%rdx,$inp		# $_end
+	mov	$SZ*2($ctx),$C
+	cmove	%rsp,%r12		# next block or random data
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+___
+					if ($SZ==4) {	# SHA256
+    my @X = map("%ymm$_",(0..3));
+    my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%ymm$_",(4..9));
+
+$code.=<<___;
+	vmovdqa	$TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+	vmovdqa	$TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+	jmp	.Loop_avx2
+.align	16
+.Loop_avx2:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	-16*$SZ+0($inp),%xmm0
+	vmovdqu	-16*$SZ+16($inp),%xmm1
+	vmovdqu	-16*$SZ+32($inp),%xmm2
+	vmovdqu	-16*$SZ+48($inp),%xmm3
+	#mov		$inp,$_inp	# offload $inp
+	vinserti128	\$1,(%r12),@X[0],@X[0]
+	vinserti128	\$1,16(%r12),@X[1],@X[1]
+	vpshufb		$t3,@X[0],@X[0]
+	vinserti128	\$1,32(%r12),@X[2],@X[2]
+	vpshufb		$t3,@X[1],@X[1]
+	vinserti128	\$1,48(%r12),@X[3],@X[3]
+
+	lea	$TABLE(%rip),$Tbl
+	vpshufb	$t3,@X[2],@X[2]
+	vpaddd	0x00($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[3],@X[3]
+	vpaddd	0x20($Tbl),@X[1],$t1
+	vpaddd	0x40($Tbl),@X[2],$t2
+	vpaddd	0x60($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	xor	$a1,$a1
+	vmovdqa	$t1,0x20(%rsp)
+	lea	-$PUSH8(%rsp),%rsp
+	mov	$B,$a3
+	vmovdqa	$t2,0x00(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x20(%rsp)
+	mov	$F,$a4
+	sub	\$-16*2*$SZ,$Tbl	# size optimization
+	jmp	.Lavx2_00_47
+
+.align	16
+.Lavx2_00_47:
+___
+
+sub AVX2_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 96 instructions
+my $base = "+2*$PUSH8(%rsp)";
+
+	&lea	("%rsp","-$PUSH8(%rsp)")	if (($j%2)==0);
+	foreach (Xupdate_256_AVX()) {		# 29 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+	&vpaddd		($t2,@X[0],16*2*$j."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	((32*$j)%$PUSH8."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX2_256_00_47($j,\&bodyx_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&lea	($Tbl,16*2*$SZ."($Tbl)");
+	&cmpb	(($SZ-1)."($Tbl)",0);
+	&jne	(".Lavx2_00_47");
+
+    for ($i=0; $i<16; ) {
+	my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
+	foreach(bodyx_00_15()) { eval; }
+    }
+					} else {	# SHA512
+    my @X = map("%ymm$_",(0..7));
+    my ($t0,$t1,$t2,$t3) = map("%ymm$_",(8..11));
+
+$code.=<<___;
+	jmp	.Loop_avx2
+.align	16
+.Loop_avx2:
+	vmovdqu	-16*$SZ($inp),%xmm0
+	vmovdqu	-16*$SZ+16($inp),%xmm1
+	vmovdqu	-16*$SZ+32($inp),%xmm2
+	lea	$TABLE+0x80(%rip),$Tbl	# size optimization
+	vmovdqu	-16*$SZ+48($inp),%xmm3
+	vmovdqu	-16*$SZ+64($inp),%xmm4
+	vmovdqu	-16*$SZ+80($inp),%xmm5
+	vmovdqu	-16*$SZ+96($inp),%xmm6
+	vmovdqu	-16*$SZ+112($inp),%xmm7
+	#mov	$inp,$_inp	# offload $inp
+	vmovdqa	`$SZ*2*$rounds-0x80`($Tbl),$t2
+	vinserti128	\$1,(%r12),@X[0],@X[0]
+	vinserti128	\$1,16(%r12),@X[1],@X[1]
+	 vpshufb	$t2,@X[0],@X[0]
+	vinserti128	\$1,32(%r12),@X[2],@X[2]
+	 vpshufb	$t2,@X[1],@X[1]
+	vinserti128	\$1,48(%r12),@X[3],@X[3]
+	 vpshufb	$t2,@X[2],@X[2]
+	vinserti128	\$1,64(%r12),@X[4],@X[4]
+	 vpshufb	$t2,@X[3],@X[3]
+	vinserti128	\$1,80(%r12),@X[5],@X[5]
+	 vpshufb	$t2,@X[4],@X[4]
+	vinserti128	\$1,96(%r12),@X[6],@X[6]
+	 vpshufb	$t2,@X[5],@X[5]
+	vinserti128	\$1,112(%r12),@X[7],@X[7]
+
+	vpaddq	-0x80($Tbl),@X[0],$t0
+	vpshufb	$t2,@X[6],@X[6]
+	vpaddq	-0x60($Tbl),@X[1],$t1
+	vpshufb	$t2,@X[7],@X[7]
+	vpaddq	-0x40($Tbl),@X[2],$t2
+	vpaddq	-0x20($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	vpaddq	0x00($Tbl),@X[4],$t0
+	vmovdqa	$t1,0x20(%rsp)
+	vpaddq	0x20($Tbl),@X[5],$t1
+	vmovdqa	$t2,0x40(%rsp)
+	vpaddq	0x40($Tbl),@X[6],$t2
+	vmovdqa	$t3,0x60(%rsp)
+	lea	-$PUSH8(%rsp),%rsp
+	vpaddq	0x60($Tbl),@X[7],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	xor	$a1,$a1
+	vmovdqa	$t1,0x20(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x40(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x60(%rsp)
+	mov	$F,$a4
+	add	\$16*2*$SZ,$Tbl
+	jmp	.Lavx2_00_47
+
+.align	16
+.Lavx2_00_47:
+___
+
+sub AVX2_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body);			# 48 instructions
+my $base = "+2*$PUSH8(%rsp)";
+
+	&lea	("%rsp","-$PUSH8(%rsp)")	if (($j%4)==0);
+	foreach (Xupdate_512_AVX()) {		# 23 instructions
+	    eval;
+	    if ($_ !~ /\;$/) {
+		eval(shift(@insns));
+		eval(shift(@insns));
+		eval(shift(@insns));
+	    }
+	}
+	&vpaddq		($t2,@X[0],16*2*$j-0x80."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	((32*$j)%$PUSH8."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<8; $j++) {
+	&AVX2_512_00_47($j,\&bodyx_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&lea	($Tbl,16*2*$SZ."($Tbl)");
+	&cmpb	(($SZ-1-0x80)."($Tbl)",0);
+	&jne	(".Lavx2_00_47");
+
+    for ($i=0; $i<16; ) {
+	my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
+	foreach(bodyx_00_15()) { eval; }
+    }
+}
+$code.=<<___;
+	mov	`2*$SZ*$rounds`(%rsp),$ctx	# $_ctx
+	add	$a1,$A
+	#mov	`2*$SZ*$rounds+8`(%rsp),$inp	# $_inp
+	lea	`2*$SZ*($rounds-8)`(%rsp),$Tbl
+
+	add	$SZ*0($ctx),$A
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+
+	cmp	`$PUSH8+2*8`($Tbl),$inp	# $_end
+	je	.Ldone_avx2
+
+	xor	$a1,$a1
+	mov	$B,$a3
+	xor	$C,$a3			# magic
+	mov	$F,$a4
+	jmp	.Lower_avx2
+.align	16
+.Lower_avx2:
+___
+    for ($i=0; $i<8; ) {
+	my $base="+16($Tbl)";
+	foreach(bodyx_00_15()) { eval; }
+    }
+$code.=<<___;
+	lea	-$PUSH8($Tbl),$Tbl
+	cmp	%rsp,$Tbl
+	jae	.Lower_avx2
+
+	mov	`2*$SZ*$rounds`(%rsp),$ctx	# $_ctx
+	add	$a1,$A
+	#mov	`2*$SZ*$rounds+8`(%rsp),$inp	# $_inp
+	lea	`2*$SZ*($rounds-8)`(%rsp),%rsp
+
+	add	$SZ*0($ctx),$A
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	lea	`2*16*$SZ`($inp),$inp	# inp+=2
+	add	$SZ*6($ctx),$G
+	mov	$inp,%r12
+	add	$SZ*7($ctx),$H
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	cmove	%rsp,%r12		# next block or stale data
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+
+	jbe	.Loop_avx2
+	lea	(%rsp),$Tbl
+
+.Ldone_avx2:
+	lea	($Tbl),%rsp
+	mov	$_rsp,%rsi
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	16*$SZ+32(%rsp),%xmm6
+	movaps	16*$SZ+48(%rsp),%xmm7
+	movaps	16*$SZ+64(%rsp),%xmm8
+	movaps	16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	16*$SZ+96(%rsp),%xmm10
+	movaps	16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+	mov	(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lepilogue_avx2:
+	ret
+.size	${func}_avx2,.-${func}_avx2
+___
+}}
+}}}}}
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HanderlData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_prologue
+___
+$code.=<<___ if ($avx>1);
+	lea	.Lavx2_shortcut(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<avx2_shortcut
+	jb	.Lnot_in_avx2
+
+	and	\$-256*$SZ,%rax
+	add	\$`2*$SZ*($rounds-8)`,%rax
+.Lnot_in_avx2:
+___
+$code.=<<___;
+	mov	%rax,%rsi		# put aside Rsp
+	mov	16*$SZ+3*8(%rax),%rax	# pull $_rsp
+	lea	48(%rax),%rax
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+	lea	.Lepilogue(%rip),%r10
+	cmp	%r10,%rbx
+	jb	.Lin_prologue		# non-AVX code
+
+	lea	16*$SZ+4*8(%rsi),%rsi	# Xmm6- save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$`$SZ==4?8:12`,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_$func
+	.rva	.LSEH_end_$func
+	.rva	.LSEH_info_$func
+___
+$code.=<<___ if ($SZ==4);
+	.rva	.LSEH_begin_${func}_ssse3
+	.rva	.LSEH_end_${func}_ssse3
+	.rva	.LSEH_info_${func}_ssse3
+___
+$code.=<<___ if ($avx && $SZ==8);
+	.rva	.LSEH_begin_${func}_xop
+	.rva	.LSEH_end_${func}_xop
+	.rva	.LSEH_info_${func}_xop
+___
+$code.=<<___ if ($avx);
+	.rva	.LSEH_begin_${func}_avx
+	.rva	.LSEH_end_${func}_avx
+	.rva	.LSEH_info_${func}_avx
+___
+$code.=<<___ if ($avx>1);
+	.rva	.LSEH_begin_${func}_avx2
+	.rva	.LSEH_end_${func}_avx2
+	.rva	.LSEH_info_${func}_avx2
+___
+$code.=<<___;
+.section	.xdata
+.align	8
+.LSEH_info_$func:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue,.Lepilogue			# HandlerData[]
+___
+$code.=<<___ if ($SZ==4);
+.LSEH_info_${func}_ssse3:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_ssse3,.Lepilogue_ssse3	# HandlerData[]
+___
+$code.=<<___ if ($avx && $SZ==8);
+.LSEH_info_${func}_xop:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_xop,.Lepilogue_xop		# HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_${func}_avx:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_avx,.Lepilogue_avx		# HandlerData[]
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_${func}_avx2:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_avx2,.Lepilogue_avx2		# HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/crypto/sha/sha.h b/crypto/sha/sha.h
new file mode 100644
index 0000000..2eda284
--- /dev/null
+++ b/crypto/sha/sha.h
@@ -0,0 +1,237 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#ifndef OPENSSL_HEADER_SHA_H
+#define OPENSSL_HEADER_SHA_H
+
+#include <openssl/base.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+/* SHA-1. */
+
+
+/* SHA_CBLOCK is the block size of SHA-1. */
+#define SHA_CBLOCK 64
+
+/* SHA_DIGEST_LENGTH is the length of a SHA-1 digest. */
+#define SHA_DIGEST_LENGTH 20
+
+/* TODO(fork): remove */
+#define SHA_LBLOCK 16
+#define SHA_LONG uint32_t
+
+/* SHA1_Init initialises |sha| and returns one. */
+int SHA1_Init(SHA_CTX *sha);
+
+/* SHA1_Update adds |len| bytes from |data| to |sha| and returns one. */
+int SHA1_Update(SHA_CTX *sha, const void *data, size_t len);
+
+/* SHA1_Final adds the final padding to |sha| and writes the resulting digest
+ * to |md|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. It
+ * returns one. */
+int SHA1_Final(uint8_t *md, SHA_CTX *sha);
+
+/* SHA1 writes the digest of |len| bytes from |data| to |out| and returns
+ * |out|. There must be at least |SHA_DIGEST_LENGTH| bytes of space in
+ * |out|. */
+uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out);
+
+/* SHA1_Transform is a low-level function that performs a single, SHA-1 block
+ * transformation using the state from |sha| and 64 bytes from |block|. */
+void SHA1_Transform(SHA_CTX *sha, const uint8_t *block);
+
+struct sha_state_st {
+  uint32_t h0, h1, h2, h3, h4;
+  uint32_t Nl, Nh;
+  uint32_t data[16];
+  unsigned int num;
+};
+
+
+/* SHA-224. */
+
+/* SHA224_CBLOCK is the block size of SHA-224. */
+#define SHA224_CBLOCK 64
+
+/* SHA224_DIGEST_LENGTH is the length of a SHA-224 digest. */
+#define SHA224_DIGEST_LENGTH 28
+
+/* SHA224_Init initialises |sha| and returns 1. */
+int SHA224_Init(SHA256_CTX *sha);
+
+/* SHA224_Update adds |len| bytes from |data| to |sha|. */
+int SHA224_Update(SHA256_CTX *sha, const void *data, size_t len);
+
+/* SHA224_Final adds the final padding to |sha| and writes the resulting digest
+ * to |md|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. */
+int SHA224_Final(uint8_t *md, SHA256_CTX *sha);
+
+/* SHA224 writes the digest of |len| bytes from |data| to |out| and returns
+ * |out|. There must be at least |SHA_DIGEST_LENGTH| bytes of space in
+ * |out|. */
+uint8_t *SHA224(const uint8_t *data, size_t len, uint8_t *out);
+
+
+/* SHA-256. */
+
+/* SHA256_CBLOCK is the block size of SHA-256. */
+#define SHA256_CBLOCK 64
+
+/* SHA256_DIGEST_LENGTH is the length of a SHA-256 digest. */
+#define SHA256_DIGEST_LENGTH 32
+
+/* SHA256_Init initialises |sha| and returns 1. */
+int SHA256_Init(SHA256_CTX *sha);
+
+/* SHA256_Update adds |len| bytes from |data| to |sha|. */
+int SHA256_Update(SHA256_CTX *sha, const void *data, size_t len);
+
+/* SHA256_Final adds the final padding to |sha| and writes the resulting digest
+ * to |md|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. */
+int SHA256_Final(uint8_t *md, SHA256_CTX *sha);
+
+/* SHA256 writes the digest of |len| bytes from |data| to |out| and returns
+ * |out|. There must be at least |SHA_DIGEST_LENGTH| bytes of space in
+ * |out|. */
+uint8_t *SHA256(const uint8_t *data, size_t len ,uint8_t *out);
+
+/* SHA256_Transform is a low-level function that performs a single, SHA-1 block
+ * transformation using the state from |sha| and 64 bytes from |block|. */
+void SHA256_Transform(SHA256_CTX *sha, const uint8_t *data);
+
+struct sha256_state_st {
+  uint32_t h[8];
+  uint32_t Nl, Nh;
+  uint32_t data[16];
+  unsigned int num, md_len;
+};
+
+
+/* SHA-384. */
+
+/* SHA384_CBLOCK is the block size of SHA-384. */
+#define SHA384_CBLOCK 128
+
+/* SHA384_DIGEST_LENGTH is the length of a SHA-384 digest. */
+#define SHA384_DIGEST_LENGTH 48
+
+/* SHA384_Init initialises |sha| and returns 1. */
+int SHA384_Init(SHA512_CTX *sha);
+
+/* SHA384_Update adds |len| bytes from |data| to |sha|. */
+int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len);
+
+/* SHA384_Final adds the final padding to |sha| and writes the resulting digest
+ * to |md|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. */
+int SHA384_Final(uint8_t *md, SHA512_CTX *sha);
+
+/* SHA384 writes the digest of |len| bytes from |data| to |out| and returns
+ * |out|. There must be at least |SHA_DIGEST_LENGTH| bytes of space in
+ * |out|. */
+uint8_t *SHA384(const uint8_t *data, size_t len ,uint8_t *out);
+
+/* SHA384_Transform is a low-level function that performs a single, SHA-1 block
+ * transformation using the state from |sha| and 64 bytes from |block|. */
+void SHA384_Transform(SHA512_CTX *sha, const uint8_t *data);
+
+
+/* SHA-512. */
+
+/* SHA512_CBLOCK is the block size of SHA-512. */
+#define SHA512_CBLOCK 128
+
+/* SHA512_DIGEST_LENGTH is the length of a SHA-512 digest. */
+#define SHA512_DIGEST_LENGTH 64
+
+/* SHA512_Init initialises |sha| and returns 1. */
+int SHA512_Init(SHA512_CTX *sha);
+
+/* SHA512_Update adds |len| bytes from |data| to |sha|. */
+int SHA512_Update(SHA512_CTX *sha, const void *data, size_t len);
+
+/* SHA512_Final adds the final padding to |sha| and writes the resulting digest
+ * to |md|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. */
+int SHA512_Final(uint8_t *md, SHA512_CTX *sha);
+
+/* SHA512 writes the digest of |len| bytes from |data| to |out| and returns
+ * |out|. There must be at least |SHA_DIGEST_LENGTH| bytes of space in
+ * |out|. */
+uint8_t *SHA512(const uint8_t *data, size_t len ,uint8_t *out);
+
+/* SHA512_Transform is a low-level function that performs a single, SHA-1 block
+ * transformation using the state from |sha| and 64 bytes from |block|. */
+void SHA512_Transform(SHA512_CTX *sha, const uint8_t *data);
+
+struct sha512_state_st {
+  uint64_t h[8];
+  uint64_t Nl, Nh;
+  union {
+    uint64_t d[16];
+    uint8_t p[64];
+  } u;
+  unsigned int num, md_len;
+};
+
+
+#if defined(__cplusplus)
+}  /* extern C */
+#endif
+
+#endif  /* OPENSSL_HEADER_SHA_H */
diff --git a/crypto/sha/sha1.c b/crypto/sha/sha1.c
new file mode 100644
index 0000000..3148739
--- /dev/null
+++ b/crypto/sha/sha1.c
@@ -0,0 +1,379 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/sha.h>
+
+#include <string.h>
+
+#include <openssl/mem.h>
+
+
+#if !defined(OPENSSL_NO_ASM) && \
+    (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM))
+#define SHA1_ASM
+#endif
+
+int SHA1_Init(SHA_CTX *sha) {
+  memset(sha, 0, sizeof(SHA_CTX));
+  sha->h0 = 0x67452301UL;
+  sha->h1 = 0xefcdab89UL;
+  sha->h2 = 0x98badcfeUL;
+  sha->h3 = 0x10325476UL;
+  sha->h4 = 0xc3d2e1f0UL;
+  return 1;
+}
+
+uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA_CTX ctx;
+  static uint8_t buf[SHA_DIGEST_LENGTH];
+
+  /* TODO(fork): remove this static buffer. */
+  if (out == NULL) {
+    out = buf;
+  }
+  if (!SHA1_Init(&ctx)) {
+    return NULL;
+  }
+  SHA1_Update(&ctx, data, len);
+  SHA1_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+#define DATA_ORDER_IS_BIG_ENDIAN
+
+#define HASH_LONG               uint32_t
+#define HASH_CTX                SHA_CTX
+#define HASH_CBLOCK             64
+#define HASH_MAKE_STRING(c, s) \
+  do {                         \
+    unsigned long ll;          \
+    ll = (c)->h0;              \
+    (void) HOST_l2c(ll, (s));  \
+    ll = (c)->h1;              \
+    (void) HOST_l2c(ll, (s));  \
+    ll = (c)->h2;              \
+    (void) HOST_l2c(ll, (s));  \
+    ll = (c)->h3;              \
+    (void) HOST_l2c(ll, (s));  \
+    ll = (c)->h4;              \
+    (void) HOST_l2c(ll, (s));  \
+  } while (0)
+
+#define HASH_UPDATE SHA1_Update
+#define HASH_TRANSFORM SHA1_Transform
+#define HASH_FINAL SHA1_Final
+#define HASH_BLOCK_DATA_ORDER sha1_block_data_order
+#define Xupdate(a, ix, ia, ib, ic, id) \
+  ((a) = (ia ^ ib ^ ic ^ id), ix = (a) = ROTATE((a), 1))
+
+#ifndef SHA1_ASM
+static
+#endif
+void sha1_block_data_order(SHA_CTX *c, const void *p, size_t num);
+
+#include "../digest/md32_common.h"
+
+#define K_00_19 0x5a827999UL
+#define K_20_39 0x6ed9eba1UL
+#define K_40_59 0x8f1bbcdcUL
+#define K_60_79 0xca62c1d6UL
+
+/* As  pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
+ * to the code in F_00_19.  Wei attributes these optimisations to Peter
+ * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
+ * F(x,y,z) (((x) & (y))  |  ((~(x)) & (z))) I've just become aware of another
+ * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a */
+#define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
+#define F_20_39(b, c, d) ((b) ^ (c) ^ (d))
+#define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d)))
+#define F_60_79(b, c, d) F_20_39(b, c, d)
+
+#define BODY_00_15(i, a, b, c, d, e, f, xi)                           \
+  (f) = xi + (e) + K_00_19 + ROTATE((a), 5) + F_00_19((b), (c), (d)); \
+  (b) = ROTATE((b), 30);
+
+#define BODY_16_19(i, a, b, c, d, e, f, xi, xa, xb, xc, xd)       \
+  Xupdate(f, xi, xa, xb, xc, xd);                                 \
+  (f) += (e) + K_00_19 + ROTATE((a), 5) + F_00_19((b), (c), (d)); \
+  (b) = ROTATE((b), 30);
+
+#define BODY_20_31(i, a, b, c, d, e, f, xi, xa, xb, xc, xd)       \
+  Xupdate(f, xi, xa, xb, xc, xd);                                 \
+  (f) += (e) + K_20_39 + ROTATE((a), 5) + F_20_39((b), (c), (d)); \
+  (b) = ROTATE((b), 30);
+
+#define BODY_32_39(i, a, b, c, d, e, f, xa, xb, xc, xd)           \
+  Xupdate(f, xa, xa, xb, xc, xd);                                 \
+  (f) += (e) + K_20_39 + ROTATE((a), 5) + F_20_39((b), (c), (d)); \
+  (b) = ROTATE((b), 30);
+
+#define BODY_40_59(i, a, b, c, d, e, f, xa, xb, xc, xd)           \
+  Xupdate(f, xa, xa, xb, xc, xd);                                 \
+  (f) += (e) + K_40_59 + ROTATE((a), 5) + F_40_59((b), (c), (d)); \
+  (b) = ROTATE((b), 30);
+
+#define BODY_60_79(i, a, b, c, d, e, f, xa, xb, xc, xd)               \
+  Xupdate(f, xa, xa, xb, xc, xd);                                     \
+  (f) = xa + (e) + K_60_79 + ROTATE((a), 5) + F_60_79((b), (c), (d)); \
+  (b) = ROTATE((b), 30);
+
+#ifdef X
+#undef X
+#endif
+
+/* Originally X was an array. As it's automatic it's natural
+* to expect RISC compiler to accomodate at least part of it in
+* the register bank, isn't it? Unfortunately not all compilers
+* "find" this expectation reasonable:-( On order to make such
+* compilers generate better code I replace X[] with a bunch of
+* X0, X1, etc. See the function body below...
+*					<appro@fy.chalmers.se> */
+#define X(i)	XX##i
+
+#if !defined(SHA1_ASM)
+static void HASH_BLOCK_DATA_ORDER(SHA_CTX *c, const void *p, size_t num) {
+  const uint8_t *data = p;
+  register unsigned MD32_REG_T A, B, C, D, E, T, l;
+  unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10,
+      XX11, XX12, XX13, XX14, XX15;
+
+  A = c->h0;
+  B = c->h1;
+  C = c->h2;
+  D = c->h3;
+  E = c->h4;
+
+  for (;;) {
+    const union {
+      long one;
+      char little;
+    } is_endian = {1};
+
+    if (!is_endian.little && ((size_t)p % 4) == 0) {
+      const uint32_t *W = (const uint32_t *)data;
+
+      X(0) = W[0];
+      X(1) = W[1];
+      BODY_00_15(0, A, B, C, D, E, T, X(0));
+      X(2) = W[2];
+      BODY_00_15(1, T, A, B, C, D, E, X(1));
+      X(3) = W[3];
+      BODY_00_15(2, E, T, A, B, C, D, X(2));
+      X(4) = W[4];
+      BODY_00_15(3, D, E, T, A, B, C, X(3));
+      X(5) = W[5];
+      BODY_00_15(4, C, D, E, T, A, B, X(4));
+      X(6) = W[6];
+      BODY_00_15(5, B, C, D, E, T, A, X(5));
+      X(7) = W[7];
+      BODY_00_15(6, A, B, C, D, E, T, X(6));
+      X(8) = W[8];
+      BODY_00_15(7, T, A, B, C, D, E, X(7));
+      X(9) = W[9];
+      BODY_00_15(8, E, T, A, B, C, D, X(8));
+      X(10) = W[10];
+      BODY_00_15(9, D, E, T, A, B, C, X(9));
+      X(11) = W[11];
+      BODY_00_15(10, C, D, E, T, A, B, X(10));
+      X(12) = W[12];
+      BODY_00_15(11, B, C, D, E, T, A, X(11));
+      X(13) = W[13];
+      BODY_00_15(12, A, B, C, D, E, T, X(12));
+      X(14) = W[14];
+      BODY_00_15(13, T, A, B, C, D, E, X(13));
+      X(15) = W[15];
+      BODY_00_15(14, E, T, A, B, C, D, X(14));
+      BODY_00_15(15, D, E, T, A, B, C, X(15));
+
+      data += HASH_CBLOCK;
+    } else {
+      (void)HOST_c2l(data, l);
+      X(0) = l;
+      (void)HOST_c2l(data, l);
+      X(1) = l;
+      BODY_00_15(0, A, B, C, D, E, T, X(0));
+      (void)HOST_c2l(data, l);
+      X(2) = l;
+      BODY_00_15(1, T, A, B, C, D, E, X(1));
+      (void)HOST_c2l(data, l);
+      X(3) = l;
+      BODY_00_15(2, E, T, A, B, C, D, X(2));
+      (void)HOST_c2l(data, l);
+      X(4) = l;
+      BODY_00_15(3, D, E, T, A, B, C, X(3));
+      (void)HOST_c2l(data, l);
+      X(5) = l;
+      BODY_00_15(4, C, D, E, T, A, B, X(4));
+      (void)HOST_c2l(data, l);
+      X(6) = l;
+      BODY_00_15(5, B, C, D, E, T, A, X(5));
+      (void)HOST_c2l(data, l);
+      X(7) = l;
+      BODY_00_15(6, A, B, C, D, E, T, X(6));
+      (void)HOST_c2l(data, l);
+      X(8) = l;
+      BODY_00_15(7, T, A, B, C, D, E, X(7));
+      (void)HOST_c2l(data, l);
+      X(9) = l;
+      BODY_00_15(8, E, T, A, B, C, D, X(8));
+      (void)HOST_c2l(data, l);
+      X(10) = l;
+      BODY_00_15(9, D, E, T, A, B, C, X(9));
+      (void)HOST_c2l(data, l);
+      X(11) = l;
+      BODY_00_15(10, C, D, E, T, A, B, X(10));
+      (void)HOST_c2l(data, l);
+      X(12) = l;
+      BODY_00_15(11, B, C, D, E, T, A, X(11));
+      (void)HOST_c2l(data, l);
+      X(13) = l;
+      BODY_00_15(12, A, B, C, D, E, T, X(12));
+      (void)HOST_c2l(data, l);
+      X(14) = l;
+      BODY_00_15(13, T, A, B, C, D, E, X(13));
+      (void)HOST_c2l(data, l);
+      X(15) = l;
+      BODY_00_15(14, E, T, A, B, C, D, X(14));
+      BODY_00_15(15, D, E, T, A, B, C, X(15));
+    }
+
+    BODY_16_19(16, C, D, E, T, A, B, X(0), X(0), X(2), X(8), X(13));
+    BODY_16_19(17, B, C, D, E, T, A, X(1), X(1), X(3), X(9), X(14));
+    BODY_16_19(18, A, B, C, D, E, T, X(2), X(2), X(4), X(10), X(15));
+    BODY_16_19(19, T, A, B, C, D, E, X(3), X(3), X(5), X(11), X(0));
+
+    BODY_20_31(20, E, T, A, B, C, D, X(4), X(4), X(6), X(12), X(1));
+    BODY_20_31(21, D, E, T, A, B, C, X(5), X(5), X(7), X(13), X(2));
+    BODY_20_31(22, C, D, E, T, A, B, X(6), X(6), X(8), X(14), X(3));
+    BODY_20_31(23, B, C, D, E, T, A, X(7), X(7), X(9), X(15), X(4));
+    BODY_20_31(24, A, B, C, D, E, T, X(8), X(8), X(10), X(0), X(5));
+    BODY_20_31(25, T, A, B, C, D, E, X(9), X(9), X(11), X(1), X(6));
+    BODY_20_31(26, E, T, A, B, C, D, X(10), X(10), X(12), X(2), X(7));
+    BODY_20_31(27, D, E, T, A, B, C, X(11), X(11), X(13), X(3), X(8));
+    BODY_20_31(28, C, D, E, T, A, B, X(12), X(12), X(14), X(4), X(9));
+    BODY_20_31(29, B, C, D, E, T, A, X(13), X(13), X(15), X(5), X(10));
+    BODY_20_31(30, A, B, C, D, E, T, X(14), X(14), X(0), X(6), X(11));
+    BODY_20_31(31, T, A, B, C, D, E, X(15), X(15), X(1), X(7), X(12));
+
+    BODY_32_39(32, E, T, A, B, C, D, X(0), X(2), X(8), X(13));
+    BODY_32_39(33, D, E, T, A, B, C, X(1), X(3), X(9), X(14));
+    BODY_32_39(34, C, D, E, T, A, B, X(2), X(4), X(10), X(15));
+    BODY_32_39(35, B, C, D, E, T, A, X(3), X(5), X(11), X(0));
+    BODY_32_39(36, A, B, C, D, E, T, X(4), X(6), X(12), X(1));
+    BODY_32_39(37, T, A, B, C, D, E, X(5), X(7), X(13), X(2));
+    BODY_32_39(38, E, T, A, B, C, D, X(6), X(8), X(14), X(3));
+    BODY_32_39(39, D, E, T, A, B, C, X(7), X(9), X(15), X(4));
+
+    BODY_40_59(40, C, D, E, T, A, B, X(8), X(10), X(0), X(5));
+    BODY_40_59(41, B, C, D, E, T, A, X(9), X(11), X(1), X(6));
+    BODY_40_59(42, A, B, C, D, E, T, X(10), X(12), X(2), X(7));
+    BODY_40_59(43, T, A, B, C, D, E, X(11), X(13), X(3), X(8));
+    BODY_40_59(44, E, T, A, B, C, D, X(12), X(14), X(4), X(9));
+    BODY_40_59(45, D, E, T, A, B, C, X(13), X(15), X(5), X(10));
+    BODY_40_59(46, C, D, E, T, A, B, X(14), X(0), X(6), X(11));
+    BODY_40_59(47, B, C, D, E, T, A, X(15), X(1), X(7), X(12));
+    BODY_40_59(48, A, B, C, D, E, T, X(0), X(2), X(8), X(13));
+    BODY_40_59(49, T, A, B, C, D, E, X(1), X(3), X(9), X(14));
+    BODY_40_59(50, E, T, A, B, C, D, X(2), X(4), X(10), X(15));
+    BODY_40_59(51, D, E, T, A, B, C, X(3), X(5), X(11), X(0));
+    BODY_40_59(52, C, D, E, T, A, B, X(4), X(6), X(12), X(1));
+    BODY_40_59(53, B, C, D, E, T, A, X(5), X(7), X(13), X(2));
+    BODY_40_59(54, A, B, C, D, E, T, X(6), X(8), X(14), X(3));
+    BODY_40_59(55, T, A, B, C, D, E, X(7), X(9), X(15), X(4));
+    BODY_40_59(56, E, T, A, B, C, D, X(8), X(10), X(0), X(5));
+    BODY_40_59(57, D, E, T, A, B, C, X(9), X(11), X(1), X(6));
+    BODY_40_59(58, C, D, E, T, A, B, X(10), X(12), X(2), X(7));
+    BODY_40_59(59, B, C, D, E, T, A, X(11), X(13), X(3), X(8));
+
+    BODY_60_79(60, A, B, C, D, E, T, X(12), X(14), X(4), X(9));
+    BODY_60_79(61, T, A, B, C, D, E, X(13), X(15), X(5), X(10));
+    BODY_60_79(62, E, T, A, B, C, D, X(14), X(0), X(6), X(11));
+    BODY_60_79(63, D, E, T, A, B, C, X(15), X(1), X(7), X(12));
+    BODY_60_79(64, C, D, E, T, A, B, X(0), X(2), X(8), X(13));
+    BODY_60_79(65, B, C, D, E, T, A, X(1), X(3), X(9), X(14));
+    BODY_60_79(66, A, B, C, D, E, T, X(2), X(4), X(10), X(15));
+    BODY_60_79(67, T, A, B, C, D, E, X(3), X(5), X(11), X(0));
+    BODY_60_79(68, E, T, A, B, C, D, X(4), X(6), X(12), X(1));
+    BODY_60_79(69, D, E, T, A, B, C, X(5), X(7), X(13), X(2));
+    BODY_60_79(70, C, D, E, T, A, B, X(6), X(8), X(14), X(3));
+    BODY_60_79(71, B, C, D, E, T, A, X(7), X(9), X(15), X(4));
+    BODY_60_79(72, A, B, C, D, E, T, X(8), X(10), X(0), X(5));
+    BODY_60_79(73, T, A, B, C, D, E, X(9), X(11), X(1), X(6));
+    BODY_60_79(74, E, T, A, B, C, D, X(10), X(12), X(2), X(7));
+    BODY_60_79(75, D, E, T, A, B, C, X(11), X(13), X(3), X(8));
+    BODY_60_79(76, C, D, E, T, A, B, X(12), X(14), X(4), X(9));
+    BODY_60_79(77, B, C, D, E, T, A, X(13), X(15), X(5), X(10));
+    BODY_60_79(78, A, B, C, D, E, T, X(14), X(0), X(6), X(11));
+    BODY_60_79(79, T, A, B, C, D, E, X(15), X(1), X(7), X(12));
+
+    c->h0 = (c->h0 + E) & 0xffffffffL;
+    c->h1 = (c->h1 + T) & 0xffffffffL;
+    c->h2 = (c->h2 + A) & 0xffffffffL;
+    c->h3 = (c->h3 + B) & 0xffffffffL;
+    c->h4 = (c->h4 + C) & 0xffffffffL;
+
+    if (--num == 0)
+      break;
+
+    A = c->h0;
+    B = c->h1;
+    C = c->h2;
+    D = c->h3;
+    E = c->h4;
+  }
+}
+#endif
diff --git a/crypto/sha/sha1_test.c b/crypto/sha/sha1_test.c
new file mode 100644
index 0000000..72ef9e1
--- /dev/null
+++ b/crypto/sha/sha1_test.c
@@ -0,0 +1,125 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <stdio.h>
+
+#include <openssl/sha.h>
+#include <openssl/digest.h>
+
+
+static const char *const test[] = {
+    "abc", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", NULL, };
+
+static const char *const expected[] = {
+    "a9993e364706816aba3e25717850c26c9cd0d89d",
+    "84983e441c3bd26ebaae4aa1f95129e5e54670f1", };
+
+static int test_incremental() {
+  EVP_MD_CTX ctx;
+  char buf[1000];
+  uint8_t md[SHA_DIGEST_LENGTH];
+  char md_hex[sizeof(md) * 2 + 1];
+  size_t i;
+  static const char expected[] = "34aa973cd4c4daa4f61eeb2bdbad27316534016f";
+
+  memset(buf, 'a', sizeof(buf));
+  EVP_MD_CTX_init(&ctx);
+  EVP_DigestInit_ex(&ctx, EVP_sha1(), NULL);
+  for (i = 0; i < 1000; i++) {
+    EVP_DigestUpdate(&ctx, buf, sizeof(buf));
+  }
+  EVP_DigestFinal_ex(&ctx, md, NULL);
+  EVP_MD_CTX_cleanup(&ctx);
+
+  for (i = 0; i < sizeof(md); i++) {
+    sprintf(&md_hex[i * 2], "%02x", md[i]);
+  }
+
+  if (strcmp(md_hex, expected) != 0) {
+    fprintf(stderr, "test_incremental: got %s, wanted %s\n", md_hex, expected);
+    return 0;
+  }
+
+  return 1;
+}
+
+int main(int argc, char **argv) {
+  size_t i, j;
+  uint8_t md[SHA_DIGEST_LENGTH];
+  char md_hex[sizeof(md) * 2 + 1];
+  int ok = 1;
+
+  for (i = 0; test[i] != NULL; i++) {
+    EVP_Digest(test[i], strlen(test[i]), md, NULL, EVP_sha1(), NULL);
+    for (j = 0; j < sizeof(md); j++) {
+      sprintf(&md_hex[j * 2], "%02x", md[j]);
+    }
+
+    if (strcmp(md_hex, expected[i]) != 0) {
+      fprintf(stderr, "#%u: got %s, wanted %s\n", (unsigned)i, md_hex,
+              expected[i]);
+      ok = 0;
+    }
+  }
+
+  ok &= test_incremental();
+
+  if (ok) {
+    printf("PASS\n");
+  }
+
+  return ok ? 0 : 1;
+}
diff --git a/crypto/sha/sha256.c b/crypto/sha/sha256.c
new file mode 100644
index 0000000..0685784
--- /dev/null
+++ b/crypto/sha/sha256.c
@@ -0,0 +1,365 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/sha.h>
+
+#include <string.h>
+
+#include <openssl/mem.h>
+
+
+#if !defined(OPENSSL_NO_ASM) && \
+    (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM))
+#define SHA256_ASM
+#endif
+
+int SHA224_Init(SHA256_CTX *sha) {
+  memset(sha, 0, sizeof(SHA256_CTX));
+  sha->h[0] = 0xc1059ed8UL;
+  sha->h[1] = 0x367cd507UL;
+  sha->h[2] = 0x3070dd17UL;
+  sha->h[3] = 0xf70e5939UL;
+  sha->h[4] = 0xffc00b31UL;
+  sha->h[5] = 0x68581511UL;
+  sha->h[6] = 0x64f98fa7UL;
+  sha->h[7] = 0xbefa4fa4UL;
+  sha->md_len = SHA224_DIGEST_LENGTH;
+  return 1;
+}
+
+int SHA256_Init(SHA256_CTX *sha) {
+  memset(sha, 0, sizeof(SHA256_CTX));
+  sha->h[0] = 0x6a09e667UL;
+  sha->h[1] = 0xbb67ae85UL;
+  sha->h[2] = 0x3c6ef372UL;
+  sha->h[3] = 0xa54ff53aUL;
+  sha->h[4] = 0x510e527fUL;
+  sha->h[5] = 0x9b05688cUL;
+  sha->h[6] = 0x1f83d9abUL;
+  sha->h[7] = 0x5be0cd19UL;
+  sha->md_len = SHA256_DIGEST_LENGTH;
+  return 1;
+}
+
+uint8_t *SHA224(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA256_CTX ctx;
+  static uint8_t buf[SHA224_DIGEST_LENGTH];
+
+  /* TODO(fork): remove this static buffer. */
+  if (out == NULL) {
+    out = buf;
+  }
+  SHA224_Init(&ctx);
+  SHA256_Update(&ctx, data, len);
+  SHA256_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+uint8_t *SHA256(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA256_CTX ctx;
+  static uint8_t buf[SHA256_DIGEST_LENGTH];
+
+  /* TODO(fork): remove this static buffer. */
+  if (out == NULL) {
+    out = buf;
+  }
+  SHA256_Init(&ctx);
+  SHA256_Update(&ctx, data, len);
+  SHA256_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+int SHA224_Update(SHA256_CTX *ctx, const void *data, size_t len) {
+  return SHA256_Update(ctx, data, len);
+}
+
+int SHA224_Final(uint8_t *md, SHA256_CTX *ctx) {
+  return SHA256_Final(md, ctx);
+}
+
+#define DATA_ORDER_IS_BIG_ENDIAN
+
+#define HASH_LONG uint32_t
+#define HASH_CTX SHA256_CTX
+#define HASH_CBLOCK 64
+
+/* Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
+ * default: case below covers for it. It's not clear however if it's permitted
+ * to truncate to amount of bytes not divisible by 4. I bet not, but if it is,
+ * then default: case shall be extended. For reference. Idea behind separate
+ * cases for pre-defined lenghts is to let the compiler decide if it's
+ * appropriate to unroll small loops. */
+#define HASH_MAKE_STRING(c, s)                              \
+  do {                                                      \
+    unsigned long ll;                                       \
+    unsigned int nn;                                        \
+    switch ((c)->md_len) {                                  \
+      case SHA224_DIGEST_LENGTH:                            \
+        for (nn = 0; nn < SHA224_DIGEST_LENGTH / 4; nn++) { \
+          ll = (c)->h[nn];                                  \
+          (void) HOST_l2c(ll, (s));                         \
+        }                                                   \
+        break;                                              \
+      case SHA256_DIGEST_LENGTH:                            \
+        for (nn = 0; nn < SHA256_DIGEST_LENGTH / 4; nn++) { \
+          ll = (c)->h[nn];                                  \
+          (void) HOST_l2c(ll, (s));                         \
+        }                                                   \
+        break;                                              \
+      default:                                              \
+        if ((c)->md_len > SHA256_DIGEST_LENGTH)             \
+          return 0;                                         \
+        for (nn = 0; nn < (c)->md_len / 4; nn++) {          \
+          ll = (c)->h[nn];                                  \
+          (void) HOST_l2c(ll, (s));                         \
+        }                                                   \
+        break;                                              \
+    }                                                       \
+  } while (0)
+
+
+#define HASH_UPDATE SHA256_Update
+#define HASH_TRANSFORM SHA256_Transform
+#define HASH_FINAL SHA256_Final
+#define HASH_BLOCK_DATA_ORDER sha256_block_data_order
+#ifndef SHA256_ASM
+static
+#endif
+void sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num);
+
+#include "../digest/md32_common.h"
+
+#ifndef SHA256_ASM
+static const HASH_LONG K256[64] = {
+    0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL,
+    0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL,
+    0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL,
+    0xc19bf174UL, 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+    0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 0x983e5152UL,
+    0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 0xc6e00bf3UL, 0xd5a79147UL,
+    0x06ca6351UL, 0x14292967UL, 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL,
+    0x53380d13UL, 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+    0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 0xd192e819UL,
+    0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 0x19a4c116UL, 0x1e376c08UL,
+    0x2748774cUL, 0x34b0bcb5UL, 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL,
+    0x682e6ff3UL, 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+    0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL};
+
+/* FIPS specification refers to right rotations, while our ROTATE macro
+ * is left one. This is why you might notice that rotation coefficients
+ * differ from those observed in FIPS document by 32-N... */
+#define Sigma0(x) (ROTATE((x), 30) ^ ROTATE((x), 19) ^ ROTATE((x), 10))
+#define Sigma1(x) (ROTATE((x), 26) ^ ROTATE((x), 21) ^ ROTATE((x), 7))
+#define sigma0(x) (ROTATE((x), 25) ^ ROTATE((x), 14) ^ ((x) >> 3))
+#define sigma1(x) (ROTATE((x), 15) ^ ROTATE((x), 13) ^ ((x) >> 10))
+
+#define Ch(x, y, z) (((x) & (y)) ^ ((~(x)) & (z)))
+#define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+#define ROUND_00_15(i, a, b, c, d, e, f, g, h)   \
+  do {                                           \
+    T1 += h + Sigma1(e) + Ch(e, f, g) + K256[i]; \
+    h = Sigma0(a) + Maj(a, b, c);                \
+    d += T1;                                     \
+    h += T1;                                     \
+  } while (0)
+
+#define ROUND_16_63(i, a, b, c, d, e, f, g, h, X)      \
+  do {                                                 \
+    s0 = X[(i + 1) & 0x0f];                            \
+    s0 = sigma0(s0);                                   \
+    s1 = X[(i + 14) & 0x0f];                           \
+    s1 = sigma1(s1);                                   \
+    T1 = X[(i) & 0x0f] += s0 + s1 + X[(i + 9) & 0x0f]; \
+    ROUND_00_15(i, a, b, c, d, e, f, g, h);            \
+  } while (0)
+
+static void sha256_block_data_order(SHA256_CTX *ctx, const void *in,
+                                    size_t num) {
+  unsigned MD32_REG_T a, b, c, d, e, f, g, h, s0, s1, T1;
+  HASH_LONG X[16];
+  int i;
+  const uint8_t *data = in;
+  const union {
+    long one;
+    char little;
+  } is_endian = {1};
+
+  while (num--) {
+    a = ctx->h[0];
+    b = ctx->h[1];
+    c = ctx->h[2];
+    d = ctx->h[3];
+    e = ctx->h[4];
+    f = ctx->h[5];
+    g = ctx->h[6];
+    h = ctx->h[7];
+
+    if (!is_endian.little && sizeof(HASH_LONG) == 4 && ((size_t)in % 4) == 0) {
+      const HASH_LONG *W = (const HASH_LONG *)data;
+
+      T1 = X[0] = W[0];
+      ROUND_00_15(0, a, b, c, d, e, f, g, h);
+      T1 = X[1] = W[1];
+      ROUND_00_15(1, h, a, b, c, d, e, f, g);
+      T1 = X[2] = W[2];
+      ROUND_00_15(2, g, h, a, b, c, d, e, f);
+      T1 = X[3] = W[3];
+      ROUND_00_15(3, f, g, h, a, b, c, d, e);
+      T1 = X[4] = W[4];
+      ROUND_00_15(4, e, f, g, h, a, b, c, d);
+      T1 = X[5] = W[5];
+      ROUND_00_15(5, d, e, f, g, h, a, b, c);
+      T1 = X[6] = W[6];
+      ROUND_00_15(6, c, d, e, f, g, h, a, b);
+      T1 = X[7] = W[7];
+      ROUND_00_15(7, b, c, d, e, f, g, h, a);
+      T1 = X[8] = W[8];
+      ROUND_00_15(8, a, b, c, d, e, f, g, h);
+      T1 = X[9] = W[9];
+      ROUND_00_15(9, h, a, b, c, d, e, f, g);
+      T1 = X[10] = W[10];
+      ROUND_00_15(10, g, h, a, b, c, d, e, f);
+      T1 = X[11] = W[11];
+      ROUND_00_15(11, f, g, h, a, b, c, d, e);
+      T1 = X[12] = W[12];
+      ROUND_00_15(12, e, f, g, h, a, b, c, d);
+      T1 = X[13] = W[13];
+      ROUND_00_15(13, d, e, f, g, h, a, b, c);
+      T1 = X[14] = W[14];
+      ROUND_00_15(14, c, d, e, f, g, h, a, b);
+      T1 = X[15] = W[15];
+      ROUND_00_15(15, b, c, d, e, f, g, h, a);
+
+      data += HASH_CBLOCK;
+    } else {
+      HASH_LONG l;
+
+      HOST_c2l(data, l);
+      T1 = X[0] = l;
+      ROUND_00_15(0, a, b, c, d, e, f, g, h);
+      HOST_c2l(data, l);
+      T1 = X[1] = l;
+      ROUND_00_15(1, h, a, b, c, d, e, f, g);
+      HOST_c2l(data, l);
+      T1 = X[2] = l;
+      ROUND_00_15(2, g, h, a, b, c, d, e, f);
+      HOST_c2l(data, l);
+      T1 = X[3] = l;
+      ROUND_00_15(3, f, g, h, a, b, c, d, e);
+      HOST_c2l(data, l);
+      T1 = X[4] = l;
+      ROUND_00_15(4, e, f, g, h, a, b, c, d);
+      HOST_c2l(data, l);
+      T1 = X[5] = l;
+      ROUND_00_15(5, d, e, f, g, h, a, b, c);
+      HOST_c2l(data, l);
+      T1 = X[6] = l;
+      ROUND_00_15(6, c, d, e, f, g, h, a, b);
+      HOST_c2l(data, l);
+      T1 = X[7] = l;
+      ROUND_00_15(7, b, c, d, e, f, g, h, a);
+      HOST_c2l(data, l);
+      T1 = X[8] = l;
+      ROUND_00_15(8, a, b, c, d, e, f, g, h);
+      HOST_c2l(data, l);
+      T1 = X[9] = l;
+      ROUND_00_15(9, h, a, b, c, d, e, f, g);
+      HOST_c2l(data, l);
+      T1 = X[10] = l;
+      ROUND_00_15(10, g, h, a, b, c, d, e, f);
+      HOST_c2l(data, l);
+      T1 = X[11] = l;
+      ROUND_00_15(11, f, g, h, a, b, c, d, e);
+      HOST_c2l(data, l);
+      T1 = X[12] = l;
+      ROUND_00_15(12, e, f, g, h, a, b, c, d);
+      HOST_c2l(data, l);
+      T1 = X[13] = l;
+      ROUND_00_15(13, d, e, f, g, h, a, b, c);
+      HOST_c2l(data, l);
+      T1 = X[14] = l;
+      ROUND_00_15(14, c, d, e, f, g, h, a, b);
+      HOST_c2l(data, l);
+      T1 = X[15] = l;
+      ROUND_00_15(15, b, c, d, e, f, g, h, a);
+    }
+
+    for (i = 16; i < 64; i += 8) {
+      ROUND_16_63(i + 0, a, b, c, d, e, f, g, h, X);
+      ROUND_16_63(i + 1, h, a, b, c, d, e, f, g, X);
+      ROUND_16_63(i + 2, g, h, a, b, c, d, e, f, X);
+      ROUND_16_63(i + 3, f, g, h, a, b, c, d, e, X);
+      ROUND_16_63(i + 4, e, f, g, h, a, b, c, d, X);
+      ROUND_16_63(i + 5, d, e, f, g, h, a, b, c, X);
+      ROUND_16_63(i + 6, c, d, e, f, g, h, a, b, X);
+      ROUND_16_63(i + 7, b, c, d, e, f, g, h, a, X);
+    }
+
+    ctx->h[0] += a;
+    ctx->h[1] += b;
+    ctx->h[2] += c;
+    ctx->h[3] += d;
+    ctx->h[4] += e;
+    ctx->h[5] += f;
+    ctx->h[6] += g;
+    ctx->h[7] += h;
+  }
+}
+
+#endif /* SHA256_ASM */
diff --git a/crypto/sha/sha512.c b/crypto/sha/sha512.c
new file mode 100644
index 0000000..dcaac6f
--- /dev/null
+++ b/crypto/sha/sha512.c
@@ -0,0 +1,590 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/sha.h>
+
+#include <string.h>
+
+#include <openssl/mem.h>
+
+
+/* IMPLEMENTATION NOTES.
+ *
+ * As you might have noticed 32-bit hash algorithms:
+ *
+ * - permit SHA_LONG to be wider than 32-bit (case on CRAY);
+ * - optimized versions implement two transform functions: one operating
+ *   on [aligned] data in host byte order and one - on data in input
+ *   stream byte order;
+ * - share common byte-order neutral collector and padding function
+ *   implementations, ../md32_common.h;
+ *
+ * Neither of the above applies to this SHA-512 implementations. Reasons
+ * [in reverse order] are:
+ *
+ * - it's the only 64-bit hash algorithm for the moment of this writing,
+ *   there is no need for common collector/padding implementation [yet];
+ * - by supporting only one transform function [which operates on
+ *   *aligned* data in input stream byte order, big-endian in this case]
+ *   we minimize burden of maintenance in two ways: a) collector/padding
+ *   function is simpler; b) only one transform function to stare at;
+ * - SHA_LONG64 is required to be exactly 64-bit in order to be able to
+ *   apply a number of optimizations to mitigate potential performance
+ *   penalties caused by previous design decision; */
+
+#if !defined(OPENSSL_NO_ASM) && \
+    (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || defined(OPENSSL_ARM))
+#define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+#define SHA512_ASM
+#endif
+
+int SHA384_Init(SHA512_CTX *sha) {
+  sha->h[0] = 0xcbbb9d5dc1059ed8;
+  sha->h[1] = 0x629a292a367cd507;
+  sha->h[2] = 0x9159015a3070dd17;
+  sha->h[3] = 0x152fecd8f70e5939;
+  sha->h[4] = 0x67332667ffc00b31;
+  sha->h[5] = 0x8eb44a8768581511;
+  sha->h[6] = 0xdb0c2e0d64f98fa7;
+  sha->h[7] = 0x47b5481dbefa4fa4;
+
+  sha->Nl = 0;
+  sha->Nh = 0;
+  sha->num = 0;
+  sha->md_len = SHA384_DIGEST_LENGTH;
+  return 1;
+}
+
+
+int SHA512_Init(SHA512_CTX *sha) {
+  sha->h[0] = 0x6a09e667f3bcc908;
+  sha->h[1] = 0xbb67ae8584caa73b;
+  sha->h[2] = 0x3c6ef372fe94f82b;
+  sha->h[3] = 0xa54ff53a5f1d36f1;
+  sha->h[4] = 0x510e527fade682d1;
+  sha->h[5] = 0x9b05688c2b3e6c1f;
+  sha->h[6] = 0x1f83d9abfb41bd6b;
+  sha->h[7] = 0x5be0cd19137e2179;
+
+  sha->Nl = 0;
+  sha->Nh = 0;
+  sha->num = 0;
+  sha->md_len = SHA512_DIGEST_LENGTH;
+  return 1;
+}
+
+uint8_t *SHA384(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA512_CTX ctx;
+  static uint8_t buf[SHA384_DIGEST_LENGTH];
+
+  /* TODO(fork): remove this static buffer. */
+  if (out == NULL) {
+    out = buf;
+  }
+
+  SHA384_Init(&ctx);
+  SHA512_Update(&ctx, data, len);
+  SHA512_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+uint8_t *SHA512(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA512_CTX ctx;
+  static uint8_t buf[SHA512_DIGEST_LENGTH];
+
+  /* TODO(fork): remove this static buffer. */
+  if (out == NULL) {
+    out = buf;
+  }
+  SHA512_Init(&ctx);
+  SHA512_Update(&ctx, data, len);
+  SHA512_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+#if !defined(SHA512_ASM)
+static
+#endif
+void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num);
+
+
+int SHA384_Final(unsigned char *md, SHA512_CTX *sha) {
+  return SHA512_Final(md, sha);
+}
+
+int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len) {
+  return SHA512_Update(sha, data, len);
+}
+
+void SHA512_Transform(SHA512_CTX *c, const unsigned char *data) {
+#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+  if ((size_t)data % sizeof(c->u.d[0]) != 0) {
+    memcpy(c->u.p, data, sizeof(c->u.p));
+    data = c->u.p;
+  }
+#endif
+  sha512_block_data_order(c, data, 1);
+}
+
+int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
+  uint64_t l;
+  uint8_t *p = c->u.p;
+  const uint8_t *data = (const uint8_t *)in_data;
+
+  if (len == 0)
+    return 1;
+
+  l = (c->Nl + (((uint64_t)len) << 3)) & 0xffffffffffffffff;
+  if (l < c->Nl) {
+    c->Nh++;
+  }
+  if (sizeof(len) >= 8) {
+    c->Nh += (((uint64_t)len) >> 61);
+  }
+  c->Nl = l;
+
+  if (c->num != 0) {
+    size_t n = sizeof(c->u) - c->num;
+
+    if (len < n) {
+      memcpy(p + c->num, data, len);
+      c->num += (unsigned int)len;
+      return 1;
+    } else {
+      memcpy(p + c->num, data, n), c->num = 0;
+      len -= n;
+      data += n;
+      sha512_block_data_order(c, p, 1);
+    }
+  }
+
+  if (len >= sizeof(c->u)) {
+#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+    if ((size_t)data % sizeof(c->u.d[0]) != 0)
+      while (len >= sizeof(c->u))
+        memcpy(p, data, sizeof(c->u)), sha512_block_data_order(c, p, 1),
+            len -= sizeof(c->u), data += sizeof(c->u);
+    else
+#endif
+      sha512_block_data_order(c, data, len / sizeof(c->u)), data += len,
+          len %= sizeof(c->u), data -= len;
+  }
+
+  if (len != 0) {
+    memcpy(p, data, len);
+    c->num = (int)len;
+  }
+
+  return 1;
+}
+
+int SHA512_Final(unsigned char *md, SHA512_CTX *sha) {
+  uint8_t *p = (uint8_t *)sha->u.p;
+  size_t n = sha->num;
+
+  p[n] = 0x80; /* There always is a room for one */
+  n++;
+  if (n > (sizeof(sha->u) - 16)) {
+    memset(p + n, 0, sizeof(sha->u) - n);
+    n = 0;
+    sha512_block_data_order(sha, p, 1);
+  }
+
+  memset(p + n, 0, sizeof(sha->u) - 16 - n);
+  p[sizeof(sha->u) - 1] = (uint8_t)(sha->Nl);
+  p[sizeof(sha->u) - 2] = (uint8_t)(sha->Nl >> 8);
+  p[sizeof(sha->u) - 3] = (uint8_t)(sha->Nl >> 16);
+  p[sizeof(sha->u) - 4] = (uint8_t)(sha->Nl >> 24);
+  p[sizeof(sha->u) - 5] = (uint8_t)(sha->Nl >> 32);
+  p[sizeof(sha->u) - 6] = (uint8_t)(sha->Nl >> 40);
+  p[sizeof(sha->u) - 7] = (uint8_t)(sha->Nl >> 48);
+  p[sizeof(sha->u) - 8] = (uint8_t)(sha->Nl >> 56);
+  p[sizeof(sha->u) - 9] = (uint8_t)(sha->Nh);
+  p[sizeof(sha->u) - 10] = (uint8_t)(sha->Nh >> 8);
+  p[sizeof(sha->u) - 11] = (uint8_t)(sha->Nh >> 16);
+  p[sizeof(sha->u) - 12] = (uint8_t)(sha->Nh >> 24);
+  p[sizeof(sha->u) - 13] = (uint8_t)(sha->Nh >> 32);
+  p[sizeof(sha->u) - 14] = (uint8_t)(sha->Nh >> 40);
+  p[sizeof(sha->u) - 15] = (uint8_t)(sha->Nh >> 48);
+  p[sizeof(sha->u) - 16] = (uint8_t)(sha->Nh >> 56);
+
+  sha512_block_data_order(sha, p, 1);
+
+  if (md == 0) {
+    return 0;
+  }
+
+  switch (sha->md_len) {
+    /* Let compiler decide if it's appropriate to unroll... */
+    case SHA384_DIGEST_LENGTH:
+      for (n = 0; n < SHA384_DIGEST_LENGTH / 8; n++) {
+        uint64_t t = sha->h[n];
+
+        *(md++) = (uint8_t)(t >> 56);
+        *(md++) = (uint8_t)(t >> 48);
+        *(md++) = (uint8_t)(t >> 40);
+        *(md++) = (uint8_t)(t >> 32);
+        *(md++) = (uint8_t)(t >> 24);
+        *(md++) = (uint8_t)(t >> 16);
+        *(md++) = (uint8_t)(t >> 8);
+        *(md++) = (uint8_t)(t);
+      }
+      break;
+    case SHA512_DIGEST_LENGTH:
+      for (n = 0; n < SHA512_DIGEST_LENGTH / 8; n++) {
+        uint64_t t = sha->h[n];
+
+        *(md++) = (uint8_t)(t >> 56);
+        *(md++) = (uint8_t)(t >> 48);
+        *(md++) = (uint8_t)(t >> 40);
+        *(md++) = (uint8_t)(t >> 32);
+        *(md++) = (uint8_t)(t >> 24);
+        *(md++) = (uint8_t)(t >> 16);
+        *(md++) = (uint8_t)(t >> 8);
+        *(md++) = (uint8_t)(t);
+      }
+      break;
+    /* ... as well as make sure md_len is not abused. */
+    default:
+      return 0;
+  }
+
+  return 1;
+}
+
+#ifndef SHA512_ASM
+static const uint64_t K512[80] = {
+    0x428a2f98d728ae22, 0x7137449123ef65cd, 0xb5c0fbcfec4d3b2f,
+    0xe9b5dba58189dbbc, 0x3956c25bf348b538, 0x59f111f1b605d019,
+    0x923f82a4af194f9b, 0xab1c5ed5da6d8118, 0xd807aa98a3030242,
+    0x12835b0145706fbe, 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
+    0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235,
+    0xc19bf174cf692694, 0xe49b69c19ef14ad2, 0xefbe4786384f25e3,
+    0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65, 0x2de92c6f592b0275,
+    0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
+    0x983e5152ee66dfab, 0xa831c66d2db43210, 0xb00327c898fb213f,
+    0xbf597fc7beef0ee4, 0xc6e00bf33da88fc2, 0xd5a79147930aa725,
+    0x06ca6351e003826f, 0x142929670a0e6e70, 0x27b70a8546d22ffc,
+    0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
+    0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6,
+    0x92722c851482353b, 0xa2bfe8a14cf10364, 0xa81a664bbc423001,
+    0xc24b8b70d0f89791, 0xc76c51a30654be30, 0xd192e819d6ef5218,
+    0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8,
+    0x19a4c116b8d2d0c8, 0x1e376c085141ab53, 0x2748774cdf8eeb99,
+    0x34b0bcb5e19b48a8, 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb,
+    0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3, 0x748f82ee5defb2fc,
+    0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec,
+    0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915,
+    0xc67178f2e372532b, 0xca273eceea26619c, 0xd186b8c721c0c207,
+    0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178, 0x06f067aa72176fba,
+    0x0a637dc5a2c898a6, 0x113f9804bef90dae, 0x1b710b35131c471b,
+    0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc,
+    0x431d67c49c100d4c, 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a,
+    0x5fcb6fab3ad6faec, 0x6c44198c4a475817};
+
+#if defined(__GNUC__) && __GNUC__ >= 2 && !defined(OPENSSL_NO_ASM)
+#if defined(__x86_64) || defined(__x86_64__)
+#define ROTR(a, n)                                         \
+  ({                                                       \
+    uint64_t ret;                                          \
+    asm("rorq %1,%0" : "=r"(ret) : "J"(n), "0"(a) : "cc"); \
+    ret;                                                   \
+  })
+#define PULL64(x)                               \
+  ({                                            \
+    uint64_t ret = *((const uint64_t *)(&(x))); \
+    asm("bswapq	%0" : "=r"(ret) : "0"(ret));    \
+    ret;                                        \
+  })
+#elif(defined(__i386) || defined(__i386__))
+#define PULL64(x)                                                         \
+  ({                                                                      \
+    const unsigned int *p = (const unsigned int *)(&(x));                 \
+    unsigned int hi = p[0], lo = p[1];                                    \
+    asm("bswapl %0; bswapl %1;" : "=r"(lo), "=r"(hi) : "0"(lo), "1"(hi)); \
+    ((uint64_t)hi) << 32 | lo;                                            \
+  })
+#elif(defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
+#define ROTR(a, n)                                       \
+  ({                                                     \
+    uint64_t ret;                                        \
+    asm("rotrdi %0,%1,%2" : "=r"(ret) : "r"(a), "K"(n)); \
+    ret;                                                 \
+  })
+#elif defined(__aarch64__)
+#define ROTR(a, n)                                    \
+  ({                                                  \
+    uint64_t ret;                                     \
+    asm("ror %0,%1,%2" : "=r"(ret) : "r"(a), "I"(n)); \
+    ret;                                              \
+  })
+#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+    __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define PULL64(x)                                                       \
+  ({                                                                    \
+    uint64_t ret;                                                       \
+    asm("rev	%0,%1" : "=r"(ret) : "r"(*((const uint64_t *)(&(x))))); \
+    ret;                                                                \
+  })
+#endif
+#endif
+#elif defined(_MSC_VER)
+#if defined(_WIN64) /* applies to both IA-64 and AMD64 */
+#pragma intrinsic(_rotr64)
+#define ROTR(a, n) _rotr64((a), n)
+#endif
+#if defined(_M_IX86) && !defined(OPENSSL_NO_ASM)
+static uint64_t __fastcall __pull64be(const void *x) {
+  _asm mov edx, [ecx + 0]
+  _asm mov eax, [ecx + 4]
+  _asm bswap edx
+  _asm bswap eax
+}
+#define PULL64(x) __pull64be(&(x))
+#if _MSC_VER <= 1200
+#pragma inline_depth(0)
+#endif
+#endif
+#endif
+
+#ifndef PULL64
+#define B(x, j) \
+  (((uint64_t)(*(((const unsigned char *)(&x)) + j))) << ((7 - j) * 8))
+#define PULL64(x)                                                        \
+  (B(x, 0) | B(x, 1) | B(x, 2) | B(x, 3) | B(x, 4) | B(x, 5) | B(x, 6) | \
+   B(x, 7))
+#endif
+
+#ifndef ROTR
+#define ROTR(x, s) (((x) >> s) | (x) << (64 - s))
+#endif
+
+#define Sigma0(x) (ROTR((x), 28) ^ ROTR((x), 34) ^ ROTR((x), 39))
+#define Sigma1(x) (ROTR((x), 14) ^ ROTR((x), 18) ^ ROTR((x), 41))
+#define sigma0(x) (ROTR((x), 1) ^ ROTR((x), 8) ^ ((x) >> 7))
+#define sigma1(x) (ROTR((x), 19) ^ ROTR((x), 61) ^ ((x) >> 6))
+
+#define Ch(x, y, z) (((x) & (y)) ^ ((~(x)) & (z)))
+#define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
+/*
+ * This code should give better results on 32-bit CPU with less than
+ * ~24 registers, both size and performance wise...
+ */
+static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
+                                    size_t num) {
+  const uint64_t *W = in;
+  uint64_t A, E, T;
+  uint64_t X[9 + 80], *F;
+  int i;
+
+  while (num--) {
+    F = X + 80;
+    A = ctx->h[0];
+    F[1] = ctx->h[1];
+    F[2] = ctx->h[2];
+    F[3] = ctx->h[3];
+    E = ctx->h[4];
+    F[5] = ctx->h[5];
+    F[6] = ctx->h[6];
+    F[7] = ctx->h[7];
+
+    for (i = 0; i < 16; i++, F--) {
+      T = PULL64(W[i]);
+      F[0] = A;
+      F[4] = E;
+      F[8] = T;
+      T += F[7] + Sigma1(E) + Ch(E, F[5], F[6]) + K512[i];
+      E = F[3] + T;
+      A = T + Sigma0(A) + Maj(A, F[1], F[2]);
+    }
+
+    for (; i < 80; i++, F--) {
+      T = sigma0(F[8 + 16 - 1]);
+      T += sigma1(F[8 + 16 - 14]);
+      T += F[8 + 16] + F[8 + 16 - 9];
+
+      F[0] = A;
+      F[4] = E;
+      F[8] = T;
+      T += F[7] + Sigma1(E) + Ch(E, F[5], F[6]) + K512[i];
+      E = F[3] + T;
+      A = T + Sigma0(A) + Maj(A, F[1], F[2]);
+    }
+
+    ctx->h[0] += A;
+    ctx->h[1] += F[1];
+    ctx->h[2] += F[2];
+    ctx->h[3] += F[3];
+    ctx->h[4] += E;
+    ctx->h[5] += F[5];
+    ctx->h[6] += F[6];
+    ctx->h[7] += F[7];
+
+    W += 16;
+  }
+}
+
+#else
+
+#define ROUND_00_15(i, a, b, c, d, e, f, g, h)   \
+  do {                                           \
+    T1 += h + Sigma1(e) + Ch(e, f, g) + K512[i]; \
+    h = Sigma0(a) + Maj(a, b, c);                \
+    d += T1;                                     \
+    h += T1;                                     \
+  } while (0)
+
+#define ROUND_16_80(i, j, a, b, c, d, e, f, g, h, X)   \
+  do {                                                 \
+    s0 = X[(j + 1) & 0x0f];                            \
+    s0 = sigma0(s0);                                   \
+    s1 = X[(j + 14) & 0x0f];                           \
+    s1 = sigma1(s1);                                   \
+    T1 = X[(j) & 0x0f] += s0 + s1 + X[(j + 9) & 0x0f]; \
+    ROUND_00_15(i + j, a, b, c, d, e, f, g, h);        \
+  } while (0)
+
+static void sha512_block_data_order(SHA512_CTX *ctx, const void *in,
+                                    size_t num) {
+  const uint64_t *W = in;
+  uint64_t a, b, c, d, e, f, g, h, s0, s1, T1;
+  uint64_t X[16];
+  int i;
+
+  while (num--) {
+
+    a = ctx->h[0];
+    b = ctx->h[1];
+    c = ctx->h[2];
+    d = ctx->h[3];
+    e = ctx->h[4];
+    f = ctx->h[5];
+    g = ctx->h[6];
+    h = ctx->h[7];
+
+    T1 = X[0] = PULL64(W[0]);
+    ROUND_00_15(0, a, b, c, d, e, f, g, h);
+    T1 = X[1] = PULL64(W[1]);
+    ROUND_00_15(1, h, a, b, c, d, e, f, g);
+    T1 = X[2] = PULL64(W[2]);
+    ROUND_00_15(2, g, h, a, b, c, d, e, f);
+    T1 = X[3] = PULL64(W[3]);
+    ROUND_00_15(3, f, g, h, a, b, c, d, e);
+    T1 = X[4] = PULL64(W[4]);
+    ROUND_00_15(4, e, f, g, h, a, b, c, d);
+    T1 = X[5] = PULL64(W[5]);
+    ROUND_00_15(5, d, e, f, g, h, a, b, c);
+    T1 = X[6] = PULL64(W[6]);
+    ROUND_00_15(6, c, d, e, f, g, h, a, b);
+    T1 = X[7] = PULL64(W[7]);
+    ROUND_00_15(7, b, c, d, e, f, g, h, a);
+    T1 = X[8] = PULL64(W[8]);
+    ROUND_00_15(8, a, b, c, d, e, f, g, h);
+    T1 = X[9] = PULL64(W[9]);
+    ROUND_00_15(9, h, a, b, c, d, e, f, g);
+    T1 = X[10] = PULL64(W[10]);
+    ROUND_00_15(10, g, h, a, b, c, d, e, f);
+    T1 = X[11] = PULL64(W[11]);
+    ROUND_00_15(11, f, g, h, a, b, c, d, e);
+    T1 = X[12] = PULL64(W[12]);
+    ROUND_00_15(12, e, f, g, h, a, b, c, d);
+    T1 = X[13] = PULL64(W[13]);
+    ROUND_00_15(13, d, e, f, g, h, a, b, c);
+    T1 = X[14] = PULL64(W[14]);
+    ROUND_00_15(14, c, d, e, f, g, h, a, b);
+    T1 = X[15] = PULL64(W[15]);
+    ROUND_00_15(15, b, c, d, e, f, g, h, a);
+
+    for (i = 16; i < 80; i += 16) {
+      ROUND_16_80(i, 0, a, b, c, d, e, f, g, h, X);
+      ROUND_16_80(i, 1, h, a, b, c, d, e, f, g, X);
+      ROUND_16_80(i, 2, g, h, a, b, c, d, e, f, X);
+      ROUND_16_80(i, 3, f, g, h, a, b, c, d, e, X);
+      ROUND_16_80(i, 4, e, f, g, h, a, b, c, d, X);
+      ROUND_16_80(i, 5, d, e, f, g, h, a, b, c, X);
+      ROUND_16_80(i, 6, c, d, e, f, g, h, a, b, X);
+      ROUND_16_80(i, 7, b, c, d, e, f, g, h, a, X);
+      ROUND_16_80(i, 8, a, b, c, d, e, f, g, h, X);
+      ROUND_16_80(i, 9, h, a, b, c, d, e, f, g, X);
+      ROUND_16_80(i, 10, g, h, a, b, c, d, e, f, X);
+      ROUND_16_80(i, 11, f, g, h, a, b, c, d, e, X);
+      ROUND_16_80(i, 12, e, f, g, h, a, b, c, d, X);
+      ROUND_16_80(i, 13, d, e, f, g, h, a, b, c, X);
+      ROUND_16_80(i, 14, c, d, e, f, g, h, a, b, X);
+      ROUND_16_80(i, 15, b, c, d, e, f, g, h, a, X);
+    }
+
+    ctx->h[0] += a;
+    ctx->h[1] += b;
+    ctx->h[2] += c;
+    ctx->h[3] += d;
+    ctx->h[4] += e;
+    ctx->h[5] += f;
+    ctx->h[6] += g;
+    ctx->h[7] += h;
+
+    W += 16;
+  }
+}
+
+#endif
+
+#endif /* SHA512_ASM */