Sync vendored copies of Go poly1305 and curve25519.

Taken from revision 3cb07270c9455e8ad27956a70891c962d121a228 of
go-crypto. Some of the changes look like they might fix some of the
crashes we've been having on ARM bots?

Change-Id: I127fd358db553eae4805e777011a89124f595ff0
Reviewed-on: https://boringssl-review.googlesource.com/14544
Commit-Queue: David Benjamin <davidben@google.com>
Commit-Queue: Adam Langley <agl@google.com>
Reviewed-by: Adam Langley <agl@google.com>
CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
diff --git a/ssl/test/runner/curve25519/const_amd64.h b/ssl/test/runner/curve25519/const_amd64.h
new file mode 100644
index 0000000..80ad222
--- /dev/null
+++ b/ssl/test/runner/curve25519/const_amd64.h
@@ -0,0 +1,8 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+#define REDMASK51     0x0007FFFFFFFFFFFF
diff --git a/ssl/test/runner/curve25519/const_amd64.s b/ssl/test/runner/curve25519/const_amd64.s
index 797f9b0..0ad5398 100644
--- a/ssl/test/runner/curve25519/const_amd64.s
+++ b/ssl/test/runner/curve25519/const_amd64.s
@@ -7,8 +7,8 @@
 
 // +build amd64,!gccgo,!appengine
 
-DATA ·REDMASK51(SB)/8, $0x0007FFFFFFFFFFFF
-GLOBL ·REDMASK51(SB), 8, $8
+// These constants cannot be encoded in non-MOVQ immediates.
+// We access them directly from memory instead.
 
 DATA ·_121666_213(SB)/8, $996687872
 GLOBL ·_121666_213(SB), 8, $8
diff --git a/ssl/test/runner/curve25519/freeze_amd64.s b/ssl/test/runner/curve25519/freeze_amd64.s
index 37599fa..536479b 100644
--- a/ssl/test/runner/curve25519/freeze_amd64.s
+++ b/ssl/test/runner/curve25519/freeze_amd64.s
@@ -7,29 +7,18 @@
 
 // +build amd64,!gccgo,!appengine
 
+#include "const_amd64.h"
+
 // func freeze(inout *[5]uint64)
-TEXT ·freeze(SB),7,$96-8
+TEXT ·freeze(SB),7,$0-8
 	MOVQ inout+0(FP), DI
 
-	MOVQ SP,R11
-	MOVQ $31,CX
-	NOTQ CX
-	ANDQ CX,SP
-	ADDQ $32,SP
-
-	MOVQ R11,0(SP)
-	MOVQ R12,8(SP)
-	MOVQ R13,16(SP)
-	MOVQ R14,24(SP)
-	MOVQ R15,32(SP)
-	MOVQ BX,40(SP)
-	MOVQ BP,48(SP)
 	MOVQ 0(DI),SI
 	MOVQ 8(DI),DX
 	MOVQ 16(DI),CX
 	MOVQ 24(DI),R8
 	MOVQ 32(DI),R9
-	MOVQ ·REDMASK51(SB),AX
+	MOVQ $REDMASK51,AX
 	MOVQ AX,R10
 	SUBQ $18,R10
 	MOVQ $3,R11
@@ -81,14 +70,4 @@
 	MOVQ CX,16(DI)
 	MOVQ R8,24(DI)
 	MOVQ R9,32(DI)
-	MOVQ 0(SP),R11
-	MOVQ 8(SP),R12
-	MOVQ 16(SP),R13
-	MOVQ 24(SP),R14
-	MOVQ 32(SP),R15
-	MOVQ 40(SP),BX
-	MOVQ 48(SP),BP
-	MOVQ R11,SP
-	MOVQ DI,AX
-	MOVQ SI,DX
 	RET
diff --git a/ssl/test/runner/curve25519/ladderstep_amd64.s b/ssl/test/runner/curve25519/ladderstep_amd64.s
index 3949f9c..7074e5c 100644
--- a/ssl/test/runner/curve25519/ladderstep_amd64.s
+++ b/ssl/test/runner/curve25519/ladderstep_amd64.s
@@ -7,23 +7,12 @@
 
 // +build amd64,!gccgo,!appengine
 
+#include "const_amd64.h"
+
 // func ladderstep(inout *[5][5]uint64)
-TEXT ·ladderstep(SB),0,$384-8
+TEXT ·ladderstep(SB),0,$296-8
 	MOVQ inout+0(FP),DI
 
-	MOVQ SP,R11
-	MOVQ $31,CX
-	NOTQ CX
-	ANDQ CX,SP
-	ADDQ $32,SP
-
-	MOVQ R11,0(SP)
-	MOVQ R12,8(SP)
-	MOVQ R13,16(SP)
-	MOVQ R14,24(SP)
-	MOVQ R15,32(SP)
-	MOVQ BX,40(SP)
-	MOVQ BP,48(SP)
 	MOVQ 40(DI),SI
 	MOVQ 48(DI),DX
 	MOVQ 56(DI),CX
@@ -49,204 +38,89 @@
 	SUBQ 96(DI),R11
 	SUBQ 104(DI),R12
 	SUBQ 112(DI),R13
-	MOVQ SI,56(SP)
-	MOVQ DX,64(SP)
-	MOVQ CX,72(SP)
-	MOVQ R8,80(SP)
-	MOVQ R9,88(SP)
-	MOVQ AX,96(SP)
-	MOVQ R10,104(SP)
-	MOVQ R11,112(SP)
-	MOVQ R12,120(SP)
-	MOVQ R13,128(SP)
-	MOVQ 96(SP),AX
-	MULQ 96(SP)
+	MOVQ SI,0(SP)
+	MOVQ DX,8(SP)
+	MOVQ CX,16(SP)
+	MOVQ R8,24(SP)
+	MOVQ R9,32(SP)
+	MOVQ AX,40(SP)
+	MOVQ R10,48(SP)
+	MOVQ R11,56(SP)
+	MOVQ R12,64(SP)
+	MOVQ R13,72(SP)
+	MOVQ 40(SP),AX
+	MULQ 40(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 96(SP),AX
+	MOVQ 40(SP),AX
 	SHLQ $1,AX
-	MULQ 104(SP)
+	MULQ 48(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
-	MOVQ 96(SP),AX
+	MOVQ 40(SP),AX
 	SHLQ $1,AX
-	MULQ 112(SP)
+	MULQ 56(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
-	MOVQ 96(SP),AX
+	MOVQ 40(SP),AX
 	SHLQ $1,AX
-	MULQ 120(SP)
+	MULQ 64(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
-	MOVQ 96(SP),AX
+	MOVQ 40(SP),AX
 	SHLQ $1,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
-	MOVQ 104(SP),AX
-	MULQ 104(SP)
+	MOVQ 48(SP),AX
+	MULQ 48(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 104(SP),AX
+	MOVQ 48(SP),AX
 	SHLQ $1,AX
-	MULQ 112(SP)
+	MULQ 56(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 104(SP),AX
+	MOVQ 48(SP),AX
 	SHLQ $1,AX
-	MULQ 120(SP)
+	MULQ 64(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 104(SP),DX
+	MOVQ 48(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 112(SP),AX
-	MULQ 112(SP)
-	ADDQ AX,R14
-	ADCQ DX,R15
-	MOVQ 112(SP),DX
-	IMUL3Q $38,DX,AX
-	MULQ 120(SP)
-	ADDQ AX,SI
-	ADCQ DX,CX
-	MOVQ 112(SP),DX
-	IMUL3Q $38,DX,AX
-	MULQ 128(SP)
-	ADDQ AX,R8
-	ADCQ DX,R9
-	MOVQ 120(SP),DX
-	IMUL3Q $19,DX,AX
-	MULQ 120(SP)
-	ADDQ AX,R8
-	ADCQ DX,R9
-	MOVQ 120(SP),DX
-	IMUL3Q $38,DX,AX
-	MULQ 128(SP)
-	ADDQ AX,R10
-	ADCQ DX,R11
-	MOVQ 128(SP),DX
-	IMUL3Q $19,DX,AX
-	MULQ 128(SP)
-	ADDQ AX,R12
-	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
-	SHLQ $13,CX:SI
-	ANDQ DX,SI
-	SHLQ $13,R9:R8
-	ANDQ DX,R8
-	ADDQ CX,R8
-	SHLQ $13,R11:R10
-	ANDQ DX,R10
-	ADDQ R9,R10
-	SHLQ $13,R13:R12
-	ANDQ DX,R12
-	ADDQ R11,R12
-	SHLQ $13,R15:R14
-	ANDQ DX,R14
-	ADDQ R13,R14
-	IMUL3Q $19,R15,CX
-	ADDQ CX,SI
-	MOVQ SI,CX
-	SHRQ $51,CX
-	ADDQ R8,CX
-	ANDQ DX,SI
-	MOVQ CX,R8
-	SHRQ $51,CX
-	ADDQ R10,CX
-	ANDQ DX,R8
-	MOVQ CX,R9
-	SHRQ $51,CX
-	ADDQ R12,CX
-	ANDQ DX,R9
-	MOVQ CX,AX
-	SHRQ $51,CX
-	ADDQ R14,CX
-	ANDQ DX,AX
-	MOVQ CX,R10
-	SHRQ $51,CX
-	IMUL3Q $19,CX,CX
-	ADDQ CX,SI
-	ANDQ DX,R10
-	MOVQ SI,136(SP)
-	MOVQ R8,144(SP)
-	MOVQ R9,152(SP)
-	MOVQ AX,160(SP)
-	MOVQ R10,168(SP)
 	MOVQ 56(SP),AX
 	MULQ 56(SP)
-	MOVQ AX,SI
-	MOVQ DX,CX
-	MOVQ 56(SP),AX
-	SHLQ $1,AX
-	MULQ 64(SP)
-	MOVQ AX,R8
-	MOVQ DX,R9
-	MOVQ 56(SP),AX
-	SHLQ $1,AX
-	MULQ 72(SP)
-	MOVQ AX,R10
-	MOVQ DX,R11
-	MOVQ 56(SP),AX
-	SHLQ $1,AX
-	MULQ 80(SP)
-	MOVQ AX,R12
-	MOVQ DX,R13
-	MOVQ 56(SP),AX
-	SHLQ $1,AX
-	MULQ 88(SP)
-	MOVQ AX,R14
-	MOVQ DX,R15
-	MOVQ 64(SP),AX
-	MULQ 64(SP)
-	ADDQ AX,R10
-	ADCQ DX,R11
-	MOVQ 64(SP),AX
-	SHLQ $1,AX
-	MULQ 72(SP)
-	ADDQ AX,R12
-	ADCQ DX,R13
-	MOVQ 64(SP),AX
-	SHLQ $1,AX
-	MULQ 80(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
+	MOVQ 56(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 64(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 56(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 64(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 64(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
 	MOVQ 64(SP),DX
 	IMUL3Q $38,DX,AX
-	MULQ 88(SP)
-	ADDQ AX,SI
-	ADCQ DX,CX
-	MOVQ 72(SP),AX
 	MULQ 72(SP)
-	ADDQ AX,R14
-	ADCQ DX,R15
-	MOVQ 72(SP),DX
-	IMUL3Q $38,DX,AX
-	MULQ 80(SP)
-	ADDQ AX,SI
-	ADCQ DX,CX
-	MOVQ 72(SP),DX
-	IMUL3Q $38,DX,AX
-	MULQ 88(SP)
-	ADDQ AX,R8
-	ADCQ DX,R9
-	MOVQ 80(SP),DX
-	IMUL3Q $19,DX,AX
-	MULQ 80(SP)
-	ADDQ AX,R8
-	ADCQ DX,R9
-	MOVQ 80(SP),DX
-	IMUL3Q $38,DX,AX
-	MULQ 88(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 88(SP),DX
+	MOVQ 72(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 88(SP)
+	MULQ 72(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -284,11 +158,126 @@
 	IMUL3Q $19,CX,CX
 	ADDQ CX,SI
 	ANDQ DX,R10
-	MOVQ SI,176(SP)
-	MOVQ R8,184(SP)
-	MOVQ R9,192(SP)
-	MOVQ AX,200(SP)
-	MOVQ R10,208(SP)
+	MOVQ SI,80(SP)
+	MOVQ R8,88(SP)
+	MOVQ R9,96(SP)
+	MOVQ AX,104(SP)
+	MOVQ R10,112(SP)
+	MOVQ 0(SP),AX
+	MULQ 0(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 8(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 16(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 24(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 32(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 8(SP),AX
+	MULQ 8(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	SHLQ $1,AX
+	MULQ 16(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 8(SP),AX
+	SHLQ $1,AX
+	MULQ 24(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 16(SP),AX
+	MULQ 16(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 16(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 24(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 16(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 24(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 24(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 32(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	ANDQ DX,SI
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ADDQ R10,CX
+	ANDQ DX,R8
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ADDQ R12,CX
+	ANDQ DX,R9
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ADDQ R14,CX
+	ANDQ DX,AX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,120(SP)
+	MOVQ R8,128(SP)
+	MOVQ R9,136(SP)
+	MOVQ AX,144(SP)
+	MOVQ R10,152(SP)
 	MOVQ SI,SI
 	MOVQ R8,DX
 	MOVQ R9,CX
@@ -299,16 +288,16 @@
 	ADDQ ·_2P1234(SB),CX
 	ADDQ ·_2P1234(SB),R8
 	ADDQ ·_2P1234(SB),R9
-	SUBQ 136(SP),SI
-	SUBQ 144(SP),DX
-	SUBQ 152(SP),CX
-	SUBQ 160(SP),R8
-	SUBQ 168(SP),R9
-	MOVQ SI,216(SP)
-	MOVQ DX,224(SP)
-	MOVQ CX,232(SP)
-	MOVQ R8,240(SP)
-	MOVQ R9,248(SP)
+	SUBQ 80(SP),SI
+	SUBQ 88(SP),DX
+	SUBQ 96(SP),CX
+	SUBQ 104(SP),R8
+	SUBQ 112(SP),R9
+	MOVQ SI,160(SP)
+	MOVQ DX,168(SP)
+	MOVQ CX,176(SP)
+	MOVQ R8,184(SP)
+	MOVQ R9,192(SP)
 	MOVQ 120(DI),SI
 	MOVQ 128(DI),DX
 	MOVQ 136(DI),CX
@@ -334,124 +323,124 @@
 	SUBQ 176(DI),R11
 	SUBQ 184(DI),R12
 	SUBQ 192(DI),R13
-	MOVQ SI,256(SP)
-	MOVQ DX,264(SP)
-	MOVQ CX,272(SP)
-	MOVQ R8,280(SP)
-	MOVQ R9,288(SP)
-	MOVQ AX,296(SP)
-	MOVQ R10,304(SP)
-	MOVQ R11,312(SP)
-	MOVQ R12,320(SP)
-	MOVQ R13,328(SP)
-	MOVQ 280(SP),SI
+	MOVQ SI,200(SP)
+	MOVQ DX,208(SP)
+	MOVQ CX,216(SP)
+	MOVQ R8,224(SP)
+	MOVQ R9,232(SP)
+	MOVQ AX,240(SP)
+	MOVQ R10,248(SP)
+	MOVQ R11,256(SP)
+	MOVQ R12,264(SP)
+	MOVQ R13,272(SP)
+	MOVQ 224(SP),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,336(SP)
-	MULQ 112(SP)
+	MOVQ AX,280(SP)
+	MULQ 56(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 288(SP),DX
+	MOVQ 232(SP),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,344(SP)
-	MULQ 104(SP)
+	MOVQ AX,288(SP)
+	MULQ 48(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 256(SP),AX
-	MULQ 96(SP)
+	MOVQ 200(SP),AX
+	MULQ 40(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 256(SP),AX
-	MULQ 104(SP)
+	MOVQ 200(SP),AX
+	MULQ 48(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
-	MOVQ 256(SP),AX
-	MULQ 112(SP)
+	MOVQ 200(SP),AX
+	MULQ 56(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
-	MOVQ 256(SP),AX
-	MULQ 120(SP)
+	MOVQ 200(SP),AX
+	MULQ 64(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
-	MOVQ 256(SP),AX
-	MULQ 128(SP)
+	MOVQ 200(SP),AX
+	MULQ 72(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
-	MOVQ 264(SP),AX
-	MULQ 96(SP)
+	MOVQ 208(SP),AX
+	MULQ 40(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 264(SP),AX
-	MULQ 104(SP)
+	MOVQ 208(SP),AX
+	MULQ 48(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 264(SP),AX
-	MULQ 112(SP)
+	MOVQ 208(SP),AX
+	MULQ 56(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 264(SP),AX
-	MULQ 120(SP)
+	MOVQ 208(SP),AX
+	MULQ 64(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 264(SP),DX
+	MOVQ 208(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 272(SP),AX
-	MULQ 96(SP)
+	MOVQ 216(SP),AX
+	MULQ 40(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 272(SP),AX
-	MULQ 104(SP)
+	MOVQ 216(SP),AX
+	MULQ 48(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 272(SP),AX
-	MULQ 112(SP)
+	MOVQ 216(SP),AX
+	MULQ 56(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 272(SP),DX
+	MOVQ 216(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 120(SP)
+	MULQ 64(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 272(SP),DX
+	MOVQ 216(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 128(SP)
+	MULQ 72(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 224(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 224(SP),AX
+	MULQ 48(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 280(SP),AX
+	MULQ 64(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
 	MOVQ 280(SP),AX
-	MULQ 96(SP)
-	ADDQ AX,R12
-	ADCQ DX,R13
-	MOVQ 280(SP),AX
-	MULQ 104(SP)
+	MULQ 72(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 232(SP),AX
+	MULQ 40(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 336(SP),AX
-	MULQ 120(SP)
+	MOVQ 288(SP),AX
+	MULQ 56(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 336(SP),AX
-	MULQ 128(SP)
+	MOVQ 288(SP),AX
+	MULQ 64(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
 	MOVQ 288(SP),AX
-	MULQ 96(SP)
-	ADDQ AX,R14
-	ADCQ DX,R15
-	MOVQ 344(SP),AX
-	MULQ 112(SP)
-	ADDQ AX,R8
-	ADCQ DX,R9
-	MOVQ 344(SP),AX
-	MULQ 120(SP)
-	ADDQ AX,R10
-	ADCQ DX,R11
-	MOVQ 344(SP),AX
-	MULQ 128(SP)
+	MULQ 72(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -489,119 +478,119 @@
 	IMUL3Q $19,CX,CX
 	ADDQ CX,SI
 	ANDQ DX,R10
-	MOVQ SI,96(SP)
-	MOVQ R8,104(SP)
-	MOVQ R9,112(SP)
-	MOVQ AX,120(SP)
-	MOVQ R10,128(SP)
-	MOVQ 320(SP),SI
+	MOVQ SI,40(SP)
+	MOVQ R8,48(SP)
+	MOVQ R9,56(SP)
+	MOVQ AX,64(SP)
+	MOVQ R10,72(SP)
+	MOVQ 264(SP),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,256(SP)
-	MULQ 72(SP)
+	MOVQ AX,200(SP)
+	MULQ 16(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 328(SP),DX
+	MOVQ 272(SP),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,264(SP)
-	MULQ 64(SP)
+	MOVQ AX,208(SP)
+	MULQ 8(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 296(SP),AX
-	MULQ 56(SP)
+	MOVQ 240(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 296(SP),AX
-	MULQ 64(SP)
+	MOVQ 240(SP),AX
+	MULQ 8(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
-	MOVQ 296(SP),AX
-	MULQ 72(SP)
+	MOVQ 240(SP),AX
+	MULQ 16(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
-	MOVQ 296(SP),AX
-	MULQ 80(SP)
+	MOVQ 240(SP),AX
+	MULQ 24(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
-	MOVQ 296(SP),AX
-	MULQ 88(SP)
+	MOVQ 240(SP),AX
+	MULQ 32(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
-	MOVQ 304(SP),AX
-	MULQ 56(SP)
+	MOVQ 248(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 304(SP),AX
-	MULQ 64(SP)
+	MOVQ 248(SP),AX
+	MULQ 8(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 304(SP),AX
-	MULQ 72(SP)
+	MOVQ 248(SP),AX
+	MULQ 16(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 304(SP),AX
-	MULQ 80(SP)
+	MOVQ 248(SP),AX
+	MULQ 24(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 304(SP),DX
+	MOVQ 248(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 88(SP)
+	MULQ 32(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 312(SP),AX
-	MULQ 56(SP)
+	MOVQ 256(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 312(SP),AX
-	MULQ 64(SP)
+	MOVQ 256(SP),AX
+	MULQ 8(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 312(SP),AX
-	MULQ 72(SP)
+	MOVQ 256(SP),AX
+	MULQ 16(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 312(SP),DX
+	MOVQ 256(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 80(SP)
+	MULQ 24(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 312(SP),DX
+	MOVQ 256(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 88(SP)
+	MULQ 32(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 320(SP),AX
-	MULQ 56(SP)
+	MOVQ 264(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 320(SP),AX
-	MULQ 64(SP)
+	MOVQ 264(SP),AX
+	MULQ 8(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 256(SP),AX
-	MULQ 80(SP)
+	MOVQ 200(SP),AX
+	MULQ 24(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 256(SP),AX
-	MULQ 88(SP)
+	MOVQ 200(SP),AX
+	MULQ 32(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 328(SP),AX
-	MULQ 56(SP)
+	MOVQ 272(SP),AX
+	MULQ 0(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 264(SP),AX
-	MULQ 72(SP)
+	MOVQ 208(SP),AX
+	MULQ 16(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 264(SP),AX
-	MULQ 80(SP)
+	MOVQ 208(SP),AX
+	MULQ 24(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 264(SP),AX
-	MULQ 88(SP)
+	MOVQ 208(SP),AX
+	MULQ 32(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -649,16 +638,16 @@
 	ADDQ ·_2P1234(SB),R11
 	ADDQ ·_2P1234(SB),R12
 	ADDQ ·_2P1234(SB),R13
-	ADDQ 96(SP),SI
-	ADDQ 104(SP),R8
-	ADDQ 112(SP),R9
-	ADDQ 120(SP),AX
-	ADDQ 128(SP),R10
-	SUBQ 96(SP),DX
-	SUBQ 104(SP),CX
-	SUBQ 112(SP),R11
-	SUBQ 120(SP),R12
-	SUBQ 128(SP),R13
+	ADDQ 40(SP),SI
+	ADDQ 48(SP),R8
+	ADDQ 56(SP),R9
+	ADDQ 64(SP),AX
+	ADDQ 72(SP),R10
+	SUBQ 40(SP),DX
+	SUBQ 48(SP),CX
+	SUBQ 56(SP),R11
+	SUBQ 64(SP),R12
+	SUBQ 72(SP),R13
 	MOVQ SI,120(DI)
 	MOVQ R8,128(DI)
 	MOVQ R9,136(DI)
@@ -741,7 +730,7 @@
 	MULQ 152(DI)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -856,7 +845,7 @@
 	MULQ 192(DI)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -901,13 +890,13 @@
 	MOVQ R10,192(DI)
 	MOVQ 184(DI),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,56(SP)
+	MOVQ AX,0(SP)
 	MULQ 16(DI)
 	MOVQ AX,SI
 	MOVQ DX,CX
 	MOVQ 192(DI),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,64(SP)
+	MOVQ AX,8(SP)
 	MULQ 8(DI)
 	ADDQ AX,SI
 	ADCQ DX,CX
@@ -982,11 +971,11 @@
 	MULQ 8(DI)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 56(SP),AX
+	MOVQ 0(SP),AX
 	MULQ 24(DI)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 56(SP),AX
+	MOVQ 0(SP),AX
 	MULQ 32(DI)
 	ADDQ AX,R10
 	ADCQ DX,R11
@@ -994,19 +983,19 @@
 	MULQ 0(DI)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 64(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 16(DI)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 64(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 24(DI)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 64(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 32(DI)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -1049,114 +1038,114 @@
 	MOVQ R9,176(DI)
 	MOVQ AX,184(DI)
 	MOVQ R10,192(DI)
-	MOVQ 200(SP),SI
+	MOVQ 144(SP),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,56(SP)
-	MULQ 152(SP)
+	MOVQ AX,0(SP)
+	MULQ 96(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 208(SP),DX
+	MOVQ 152(SP),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,64(SP)
-	MULQ 144(SP)
+	MOVQ AX,8(SP)
+	MULQ 88(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 176(SP),AX
-	MULQ 136(SP)
+	MOVQ 120(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 176(SP),AX
-	MULQ 144(SP)
+	MOVQ 120(SP),AX
+	MULQ 88(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
-	MOVQ 176(SP),AX
-	MULQ 152(SP)
+	MOVQ 120(SP),AX
+	MULQ 96(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
-	MOVQ 176(SP),AX
-	MULQ 160(SP)
+	MOVQ 120(SP),AX
+	MULQ 104(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
-	MOVQ 176(SP),AX
-	MULQ 168(SP)
+	MOVQ 120(SP),AX
+	MULQ 112(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
-	MOVQ 184(SP),AX
-	MULQ 136(SP)
+	MOVQ 128(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 184(SP),AX
-	MULQ 144(SP)
+	MOVQ 128(SP),AX
+	MULQ 88(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 184(SP),AX
-	MULQ 152(SP)
+	MOVQ 128(SP),AX
+	MULQ 96(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 184(SP),AX
-	MULQ 160(SP)
+	MOVQ 128(SP),AX
+	MULQ 104(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 184(SP),DX
+	MOVQ 128(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 168(SP)
+	MULQ 112(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 192(SP),AX
-	MULQ 136(SP)
+	MOVQ 136(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 192(SP),AX
-	MULQ 144(SP)
+	MOVQ 136(SP),AX
+	MULQ 88(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 192(SP),AX
-	MULQ 152(SP)
+	MOVQ 136(SP),AX
+	MULQ 96(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 192(SP),DX
+	MOVQ 136(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 160(SP)
+	MULQ 104(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
-	MOVQ 192(SP),DX
+	MOVQ 136(SP),DX
 	IMUL3Q $19,DX,AX
-	MULQ 168(SP)
+	MULQ 112(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 200(SP),AX
-	MULQ 136(SP)
+	MOVQ 144(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 200(SP),AX
-	MULQ 144(SP)
+	MOVQ 144(SP),AX
+	MULQ 88(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 56(SP),AX
-	MULQ 160(SP)
+	MOVQ 0(SP),AX
+	MULQ 104(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 56(SP),AX
-	MULQ 168(SP)
+	MOVQ 0(SP),AX
+	MULQ 112(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 208(SP),AX
-	MULQ 136(SP)
+	MOVQ 152(SP),AX
+	MULQ 80(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 64(SP),AX
-	MULQ 152(SP)
+	MOVQ 8(SP),AX
+	MULQ 96(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 64(SP),AX
-	MULQ 160(SP)
+	MOVQ 8(SP),AX
+	MULQ 104(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 64(SP),AX
-	MULQ 168(SP)
+	MOVQ 8(SP),AX
+	MULQ 112(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -1199,37 +1188,37 @@
 	MOVQ R9,56(DI)
 	MOVQ AX,64(DI)
 	MOVQ R10,72(DI)
-	MOVQ 216(SP),AX
+	MOVQ 160(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	MOVQ AX,SI
 	MOVQ DX,CX
-	MOVQ 224(SP),AX
+	MOVQ 168(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	ADDQ AX,CX
 	MOVQ DX,R8
-	MOVQ 232(SP),AX
+	MOVQ 176(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	ADDQ AX,R8
 	MOVQ DX,R9
-	MOVQ 240(SP),AX
+	MOVQ 184(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	ADDQ AX,R9
 	MOVQ DX,R10
-	MOVQ 248(SP),AX
+	MOVQ 192(SP),AX
 	MULQ ·_121666_213(SB)
 	SHRQ $13,AX
 	ADDQ AX,R10
 	IMUL3Q $19,DX,DX
 	ADDQ DX,SI
-	ADDQ 136(SP),SI
-	ADDQ 144(SP),CX
-	ADDQ 152(SP),R8
-	ADDQ 160(SP),R9
-	ADDQ 168(SP),R10
+	ADDQ 80(SP),SI
+	ADDQ 88(SP),CX
+	ADDQ 96(SP),R8
+	ADDQ 104(SP),R9
+	ADDQ 112(SP),R10
 	MOVQ SI,80(DI)
 	MOVQ CX,88(DI)
 	MOVQ R8,96(DI)
@@ -1237,112 +1226,112 @@
 	MOVQ R10,112(DI)
 	MOVQ 104(DI),SI
 	IMUL3Q $19,SI,AX
-	MOVQ AX,56(SP)
-	MULQ 232(SP)
+	MOVQ AX,0(SP)
+	MULQ 176(SP)
 	MOVQ AX,SI
 	MOVQ DX,CX
 	MOVQ 112(DI),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,64(SP)
-	MULQ 224(SP)
+	MOVQ AX,8(SP)
+	MULQ 168(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
 	MOVQ 80(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
 	MOVQ 80(DI),AX
-	MULQ 224(SP)
+	MULQ 168(SP)
 	MOVQ AX,R8
 	MOVQ DX,R9
 	MOVQ 80(DI),AX
-	MULQ 232(SP)
+	MULQ 176(SP)
 	MOVQ AX,R10
 	MOVQ DX,R11
 	MOVQ 80(DI),AX
-	MULQ 240(SP)
+	MULQ 184(SP)
 	MOVQ AX,R12
 	MOVQ DX,R13
 	MOVQ 80(DI),AX
-	MULQ 248(SP)
+	MULQ 192(SP)
 	MOVQ AX,R14
 	MOVQ DX,R15
 	MOVQ 88(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
 	MOVQ 88(DI),AX
-	MULQ 224(SP)
+	MULQ 168(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
 	MOVQ 88(DI),AX
-	MULQ 232(SP)
+	MULQ 176(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ 88(DI),AX
-	MULQ 240(SP)
+	MULQ 184(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
 	MOVQ 88(DI),DX
 	IMUL3Q $19,DX,AX
-	MULQ 248(SP)
+	MULQ 192(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
 	MOVQ 96(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
 	MOVQ 96(DI),AX
-	MULQ 224(SP)
+	MULQ 168(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ 96(DI),AX
-	MULQ 232(SP)
+	MULQ 176(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
 	MOVQ 96(DI),DX
 	IMUL3Q $19,DX,AX
-	MULQ 240(SP)
+	MULQ 184(SP)
 	ADDQ AX,SI
 	ADCQ DX,CX
 	MOVQ 96(DI),DX
 	IMUL3Q $19,DX,AX
-	MULQ 248(SP)
+	MULQ 192(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
 	MOVQ 104(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
 	MOVQ 104(DI),AX
-	MULQ 224(SP)
+	MULQ 168(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 56(SP),AX
-	MULQ 240(SP)
+	MOVQ 0(SP),AX
+	MULQ 184(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 56(SP),AX
-	MULQ 248(SP)
+	MOVQ 0(SP),AX
+	MULQ 192(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
 	MOVQ 112(DI),AX
-	MULQ 216(SP)
+	MULQ 160(SP)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ 64(SP),AX
-	MULQ 232(SP)
+	MOVQ 8(SP),AX
+	MULQ 176(SP)
 	ADDQ AX,R8
 	ADCQ DX,R9
-	MOVQ 64(SP),AX
-	MULQ 240(SP)
+	MOVQ 8(SP),AX
+	MULQ 184(SP)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 64(SP),AX
-	MULQ 248(SP)
+	MOVQ 8(SP),AX
+	MULQ 192(SP)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ ·REDMASK51(SB),DX
+	MOVQ $REDMASK51,DX
 	SHLQ $13,CX:SI
 	ANDQ DX,SI
 	SHLQ $13,R9:R8
@@ -1385,14 +1374,4 @@
 	MOVQ R9,96(DI)
 	MOVQ AX,104(DI)
 	MOVQ R10,112(DI)
-	MOVQ 0(SP),R11
-	MOVQ 8(SP),R12
-	MOVQ 16(SP),R13
-	MOVQ 24(SP),R14
-	MOVQ 32(SP),R15
-	MOVQ 40(SP),BX
-	MOVQ 48(SP),BP
-	MOVQ R11,SP
-	MOVQ DI,AX
-	MOVQ SI,DX
 	RET
diff --git a/ssl/test/runner/curve25519/mul_amd64.s b/ssl/test/runner/curve25519/mul_amd64.s
index e48d183..b162e65 100644
--- a/ssl/test/runner/curve25519/mul_amd64.s
+++ b/ssl/test/runner/curve25519/mul_amd64.s
@@ -7,36 +7,24 @@
 
 // +build amd64,!gccgo,!appengine
 
+#include "const_amd64.h"
+
 // func mul(dest, a, b *[5]uint64)
-TEXT ·mul(SB),0,$128-24
+TEXT ·mul(SB),0,$16-24
 	MOVQ dest+0(FP), DI
 	MOVQ a+8(FP), SI
 	MOVQ b+16(FP), DX
 
-	MOVQ SP,R11
-	MOVQ $31,CX
-	NOTQ CX
-	ANDQ CX,SP
-	ADDQ $32,SP
-
-	MOVQ R11,0(SP)
-	MOVQ R12,8(SP)
-	MOVQ R13,16(SP)
-	MOVQ R14,24(SP)
-	MOVQ R15,32(SP)
-	MOVQ BX,40(SP)
-	MOVQ BP,48(SP)
-	MOVQ DI,56(SP)
 	MOVQ DX,CX
 	MOVQ 24(SI),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,64(SP)
+	MOVQ AX,0(SP)
 	MULQ 16(CX)
 	MOVQ AX,R8
 	MOVQ DX,R9
 	MOVQ 32(SI),DX
 	IMUL3Q $19,DX,AX
-	MOVQ AX,72(SP)
+	MOVQ AX,8(SP)
 	MULQ 8(CX)
 	ADDQ AX,R8
 	ADCQ DX,R9
@@ -111,11 +99,11 @@
 	MULQ 8(CX)
 	ADDQ AX,BX
 	ADCQ DX,BP
-	MOVQ 64(SP),AX
+	MOVQ 0(SP),AX
 	MULQ 24(CX)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 64(SP),AX
+	MOVQ 0(SP),AX
 	MULQ 32(CX)
 	ADDQ AX,R12
 	ADCQ DX,R13
@@ -123,19 +111,19 @@
 	MULQ 0(CX)
 	ADDQ AX,BX
 	ADCQ DX,BP
-	MOVQ 72(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 16(CX)
 	ADDQ AX,R10
 	ADCQ DX,R11
-	MOVQ 72(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 24(CX)
 	ADDQ AX,R12
 	ADCQ DX,R13
-	MOVQ 72(SP),AX
+	MOVQ 8(SP),AX
 	MULQ 32(CX)
 	ADDQ AX,R14
 	ADCQ DX,R15
-	MOVQ ·REDMASK51(SB),SI
+	MOVQ $REDMASK51,SI
 	SHLQ $13,R9:R8
 	ANDQ SI,R8
 	SHLQ $13,R11:R10
@@ -178,14 +166,4 @@
 	MOVQ R9,16(DI)
 	MOVQ AX,24(DI)
 	MOVQ R10,32(DI)
-	MOVQ 0(SP),R11
-	MOVQ 8(SP),R12
-	MOVQ 16(SP),R13
-	MOVQ 24(SP),R14
-	MOVQ 32(SP),R15
-	MOVQ 40(SP),BX
-	MOVQ 48(SP),BP
-	MOVQ R11,SP
-	MOVQ DI,AX
-	MOVQ SI,DX
 	RET
diff --git a/ssl/test/runner/curve25519/square_amd64.s b/ssl/test/runner/curve25519/square_amd64.s
index 78d1a50..4e864a8 100644
--- a/ssl/test/runner/curve25519/square_amd64.s
+++ b/ssl/test/runner/curve25519/square_amd64.s
@@ -7,24 +7,13 @@
 
 // +build amd64,!gccgo,!appengine
 
+#include "const_amd64.h"
+
 // func square(out, in *[5]uint64)
-TEXT ·square(SB),7,$96-16
+TEXT ·square(SB),7,$0-16
 	MOVQ out+0(FP), DI
 	MOVQ in+8(FP), SI
 
-	MOVQ SP,R11
-	MOVQ $31,CX
-	NOTQ CX
-	ANDQ CX,SP
-	ADDQ $32, SP
-
-	MOVQ R11,0(SP)
-	MOVQ R12,8(SP)
-	MOVQ R13,16(SP)
-	MOVQ R14,24(SP)
-	MOVQ R15,32(SP)
-	MOVQ BX,40(SP)
-	MOVQ BP,48(SP)
 	MOVQ 0(SI),AX
 	MULQ 0(SI)
 	MOVQ AX,CX
@@ -97,7 +86,7 @@
 	MULQ 32(SI)
 	ADDQ AX,R13
 	ADCQ DX,R14
-	MOVQ ·REDMASK51(SB),SI
+	MOVQ $REDMASK51,SI
 	SHLQ $13,R8:CX
 	ANDQ SI,CX
 	SHLQ $13,R10:R9
@@ -140,14 +129,4 @@
 	MOVQ R9,16(DI)
 	MOVQ AX,24(DI)
 	MOVQ R10,32(DI)
-	MOVQ 0(SP),R11
-	MOVQ 8(SP),R12
-	MOVQ 16(SP),R13
-	MOVQ 24(SP),R14
-	MOVQ 32(SP),R15
-	MOVQ 40(SP),BX
-	MOVQ 48(SP),BP
-	MOVQ R11,SP
-	MOVQ DI,AX
-	MOVQ SI,DX
 	RET
diff --git a/ssl/test/runner/poly1305/const_amd64.s b/ssl/test/runner/poly1305/const_amd64.s
deleted file mode 100644
index 8e861f3..0000000
--- a/ssl/test/runner/poly1305/const_amd64.s
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-DATA ·SCALE(SB)/8, $0x37F4000000000000
-GLOBL ·SCALE(SB), 8, $8
-DATA ·TWO32(SB)/8, $0x41F0000000000000
-GLOBL ·TWO32(SB), 8, $8
-DATA ·TWO64(SB)/8, $0x43F0000000000000
-GLOBL ·TWO64(SB), 8, $8
-DATA ·TWO96(SB)/8, $0x45F0000000000000
-GLOBL ·TWO96(SB), 8, $8
-DATA ·ALPHA32(SB)/8, $0x45E8000000000000
-GLOBL ·ALPHA32(SB), 8, $8
-DATA ·ALPHA64(SB)/8, $0x47E8000000000000
-GLOBL ·ALPHA64(SB), 8, $8
-DATA ·ALPHA96(SB)/8, $0x49E8000000000000
-GLOBL ·ALPHA96(SB), 8, $8
-DATA ·ALPHA130(SB)/8, $0x4C08000000000000
-GLOBL ·ALPHA130(SB), 8, $8
-DATA ·DOFFSET0(SB)/8, $0x4330000000000000
-GLOBL ·DOFFSET0(SB), 8, $8
-DATA ·DOFFSET1(SB)/8, $0x4530000000000000
-GLOBL ·DOFFSET1(SB), 8, $8
-DATA ·DOFFSET2(SB)/8, $0x4730000000000000
-GLOBL ·DOFFSET2(SB), 8, $8
-DATA ·DOFFSET3(SB)/8, $0x4930000000000000
-GLOBL ·DOFFSET3(SB), 8, $8
-DATA ·DOFFSET3MINUSTWO128(SB)/8, $0x492FFFFE00000000
-GLOBL ·DOFFSET3MINUSTWO128(SB), 8, $8
-DATA ·HOFFSET0(SB)/8, $0x43300001FFFFFFFB
-GLOBL ·HOFFSET0(SB), 8, $8
-DATA ·HOFFSET1(SB)/8, $0x45300001FFFFFFFE
-GLOBL ·HOFFSET1(SB), 8, $8
-DATA ·HOFFSET2(SB)/8, $0x47300001FFFFFFFE
-GLOBL ·HOFFSET2(SB), 8, $8
-DATA ·HOFFSET3(SB)/8, $0x49300003FFFFFFFE
-GLOBL ·HOFFSET3(SB), 8, $8
-DATA ·ROUNDING(SB)/2, $0x137f
-GLOBL ·ROUNDING(SB), 8, $2
diff --git a/ssl/test/runner/poly1305/poly1305_amd64.s b/ssl/test/runner/poly1305/poly1305_amd64.s
deleted file mode 100644
index f8d4ee9..0000000
--- a/ssl/test/runner/poly1305/poly1305_amd64.s
+++ /dev/null
@@ -1,497 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
-TEXT ·poly1305(SB),0,$224-32
-	MOVQ out+0(FP),DI
-	MOVQ m+8(FP),SI
-	MOVQ mlen+16(FP),DX
-	MOVQ key+24(FP),CX
-
-	MOVQ SP,R11
-	MOVQ $31,R9
-	NOTQ R9
-	ANDQ R9,SP
-	ADDQ $32,SP
-
-	MOVQ R11,32(SP)
-	MOVQ R12,40(SP)
-	MOVQ R13,48(SP)
-	MOVQ R14,56(SP)
-	MOVQ R15,64(SP)
-	MOVQ BX,72(SP)
-	MOVQ BP,80(SP)
-	FLDCW ·ROUNDING(SB)
-	MOVL 0(CX),R8
-	MOVL 4(CX),R9
-	MOVL 8(CX),AX
-	MOVL 12(CX),R10
-	MOVQ DI,88(SP)
-	MOVQ CX,96(SP)
-	MOVL $0X43300000,108(SP)
-	MOVL $0X45300000,116(SP)
-	MOVL $0X47300000,124(SP)
-	MOVL $0X49300000,132(SP)
-	ANDL $0X0FFFFFFF,R8
-	ANDL $0X0FFFFFFC,R9
-	ANDL $0X0FFFFFFC,AX
-	ANDL $0X0FFFFFFC,R10
-	MOVL R8,104(SP)
-	MOVL R9,112(SP)
-	MOVL AX,120(SP)
-	MOVL R10,128(SP)
-	FMOVD 104(SP), F0
-	FSUBD ·DOFFSET0(SB), F0
-	FMOVD 112(SP), F0
-	FSUBD ·DOFFSET1(SB), F0
-	FMOVD 120(SP), F0
-	FSUBD ·DOFFSET2(SB), F0
-	FMOVD 128(SP), F0
-	FSUBD ·DOFFSET3(SB), F0
-	FXCHD F0, F3
-	FMOVDP F0, 136(SP)
-	FXCHD F0, F1
-	FMOVD F0, 144(SP)
-	FMULD ·SCALE(SB), F0
-	FMOVDP F0, 152(SP)
-	FMOVD F0, 160(SP)
-	FMULD ·SCALE(SB), F0
-	FMOVDP F0, 168(SP)
-	FMOVD F0, 176(SP)
-	FMULD ·SCALE(SB), F0
-	FMOVDP F0, 184(SP)
-	FLDZ
-	FLDZ
-	FLDZ
-	FLDZ
-	CMPQ DX,$16
-	JB ADDATMOST15BYTES
-	INITIALATLEAST16BYTES:
-	MOVL 12(SI),DI
-	MOVL 8(SI),CX
-	MOVL 4(SI),R8
-	MOVL 0(SI),R9
-	MOVL DI,128(SP)
-	MOVL CX,120(SP)
-	MOVL R8,112(SP)
-	MOVL R9,104(SP)
-	ADDQ $16,SI
-	SUBQ $16,DX
-	FXCHD F0, F3
-	FADDD 128(SP), F0
-	FSUBD ·DOFFSET3MINUSTWO128(SB), F0
-	FXCHD F0, F1
-	FADDD 112(SP), F0
-	FSUBD ·DOFFSET1(SB), F0
-	FXCHD F0, F2
-	FADDD 120(SP), F0
-	FSUBD ·DOFFSET2(SB), F0
-	FXCHD F0, F3
-	FADDD 104(SP), F0
-	FSUBD ·DOFFSET0(SB), F0
-	CMPQ DX,$16
-	JB MULTIPLYADDATMOST15BYTES
-	MULTIPLYADDATLEAST16BYTES:
-	MOVL 12(SI),DI
-	MOVL 8(SI),CX
-	MOVL 4(SI),R8
-	MOVL 0(SI),R9
-	MOVL DI,128(SP)
-	MOVL CX,120(SP)
-	MOVL R8,112(SP)
-	MOVL R9,104(SP)
-	ADDQ $16,SI
-	SUBQ $16,DX
-	FMOVD ·ALPHA130(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA130(SB), F0
-	FSUBD F0,F2
-	FMULD ·SCALE(SB), F0
-	FMOVD ·ALPHA32(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA32(SB), F0
-	FSUBD F0,F2
-	FXCHD F0, F2
-	FADDDP F0,F1
-	FMOVD ·ALPHA64(SB), F0
-	FADDD F4,F0
-	FSUBD ·ALPHA64(SB), F0
-	FSUBD F0,F4
-	FMOVD ·ALPHA96(SB), F0
-	FADDD F6,F0
-	FSUBD ·ALPHA96(SB), F0
-	FSUBD F0,F6
-	FXCHD F0, F6
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FADDDP F0,F5
-	FXCHD F0, F3
-	FADDDP F0,F1
-	FMOVD 176(SP), F0
-	FMULD F3,F0
-	FMOVD 160(SP), F0
-	FMULD F4,F0
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FMOVD 136(SP), F0
-	FMULDP F0,F6
-	FMOVD 160(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F3
-	FMOVD 144(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULDP F0,F4
-	FXCHD F0, F3
-	FADDDP F0,F5
-	FMOVD 144(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F3
-	FMOVD 168(SP), F0
-	FMULDP F0,F4
-	FXCHD F0, F3
-	FADDDP F0,F4
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FMOVD 184(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F3
-	FXCHD F0, F1
-	FMOVD 168(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 152(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F1
-	CMPQ DX,$16
-	FXCHD F0, F2
-	FMOVD 128(SP), F0
-	FSUBD ·DOFFSET3MINUSTWO128(SB), F0
-	FADDDP F0,F1
-	FXCHD F0, F1
-	FMOVD 120(SP), F0
-	FSUBD ·DOFFSET2(SB), F0
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FMOVD 112(SP), F0
-	FSUBD ·DOFFSET1(SB), F0
-	FADDDP F0,F1
-	FXCHD F0, F2
-	FMOVD 104(SP), F0
-	FSUBD ·DOFFSET0(SB), F0
-	FADDDP F0,F1
-	JAE MULTIPLYADDATLEAST16BYTES
-	MULTIPLYADDATMOST15BYTES:
-	FMOVD ·ALPHA130(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA130(SB), F0
-	FSUBD F0,F2
-	FMULD ·SCALE(SB), F0
-	FMOVD ·ALPHA32(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA32(SB), F0
-	FSUBD F0,F2
-	FMOVD ·ALPHA64(SB), F0
-	FADDD F5,F0
-	FSUBD ·ALPHA64(SB), F0
-	FSUBD F0,F5
-	FMOVD ·ALPHA96(SB), F0
-	FADDD F7,F0
-	FSUBD ·ALPHA96(SB), F0
-	FSUBD F0,F7
-	FXCHD F0, F7
-	FADDDP F0,F1
-	FXCHD F0, F5
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FADDDP F0,F5
-	FADDDP F0,F1
-	FMOVD 176(SP), F0
-	FMULD F1,F0
-	FMOVD 160(SP), F0
-	FMULD F2,F0
-	FMOVD 144(SP), F0
-	FMULD F3,F0
-	FMOVD 136(SP), F0
-	FMULDP F0,F4
-	FMOVD 160(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F3
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F3
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F4
-	FMOVD 168(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F4
-	FMOVD 168(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F3
-	FMOVD 152(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F1
-	ADDATMOST15BYTES:
-	CMPQ DX,$0
-	JE NOMOREBYTES
-	MOVL $0,0(SP)
-	MOVL $0, 4 (SP)
-	MOVL $0, 8 (SP)
-	MOVL $0, 12 (SP)
-	LEAQ 0(SP),DI
-	MOVQ DX,CX
-	REP; MOVSB
-	MOVB $1,0(DI)
-	MOVL  12 (SP),DI
-	MOVL  8 (SP),SI
-	MOVL  4 (SP),DX
-	MOVL 0(SP),CX
-	MOVL DI,128(SP)
-	MOVL SI,120(SP)
-	MOVL DX,112(SP)
-	MOVL CX,104(SP)
-	FXCHD F0, F3
-	FADDD 128(SP), F0
-	FSUBD ·DOFFSET3(SB), F0
-	FXCHD F0, F2
-	FADDD 120(SP), F0
-	FSUBD ·DOFFSET2(SB), F0
-	FXCHD F0, F1
-	FADDD 112(SP), F0
-	FSUBD ·DOFFSET1(SB), F0
-	FXCHD F0, F3
-	FADDD 104(SP), F0
-	FSUBD ·DOFFSET0(SB), F0
-	FMOVD ·ALPHA130(SB), F0
-	FADDD F3,F0
-	FSUBD ·ALPHA130(SB), F0
-	FSUBD F0,F3
-	FMULD ·SCALE(SB), F0
-	FMOVD ·ALPHA32(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA32(SB), F0
-	FSUBD F0,F2
-	FMOVD ·ALPHA64(SB), F0
-	FADDD F6,F0
-	FSUBD ·ALPHA64(SB), F0
-	FSUBD F0,F6
-	FMOVD ·ALPHA96(SB), F0
-	FADDD F5,F0
-	FSUBD ·ALPHA96(SB), F0
-	FSUBD F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F3
-	FXCHD F0, F6
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FADDDP F0,F5
-	FXCHD F0, F3
-	FADDDP F0,F1
-	FMOVD 176(SP), F0
-	FMULD F3,F0
-	FMOVD 160(SP), F0
-	FMULD F4,F0
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FMOVD 136(SP), F0
-	FMULDP F0,F6
-	FMOVD 160(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F3
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F5
-	FMOVD 144(SP), F0
-	FMULD F6,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F6,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F6,F0
-	FADDDP F0,F4
-	FMOVD 168(SP), F0
-	FMULDP F0,F6
-	FXCHD F0, F5
-	FADDDP F0,F4
-	FMOVD 136(SP), F0
-	FMULD F2,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F2,F0
-	FADDDP F0,F5
-	FMOVD 168(SP), F0
-	FMULD F2,F0
-	FADDDP F0,F3
-	FMOVD 152(SP), F0
-	FMULDP F0,F2
-	FXCHD F0, F1
-	FADDDP F0,F3
-	FXCHD F0, F3
-	FXCHD F0, F2
-	NOMOREBYTES:
-	MOVL $0,R10
-	FMOVD ·ALPHA130(SB), F0
-	FADDD F4,F0
-	FSUBD ·ALPHA130(SB), F0
-	FSUBD F0,F4
-	FMULD ·SCALE(SB), F0
-	FMOVD ·ALPHA32(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA32(SB), F0
-	FSUBD F0,F2
-	FMOVD ·ALPHA64(SB), F0
-	FADDD F4,F0
-	FSUBD ·ALPHA64(SB), F0
-	FSUBD F0,F4
-	FMOVD ·ALPHA96(SB), F0
-	FADDD F6,F0
-	FSUBD ·ALPHA96(SB), F0
-	FXCHD F0, F6
-	FSUBD F6,F0
-	FXCHD F0, F4
-	FADDDP F0,F3
-	FXCHD F0, F4
-	FADDDP F0,F1
-	FXCHD F0, F2
-	FADDDP F0,F3
-	FXCHD F0, F4
-	FADDDP F0,F3
-	FXCHD F0, F3
-	FADDD ·HOFFSET0(SB), F0
-	FXCHD F0, F3
-	FADDD ·HOFFSET1(SB), F0
-	FXCHD F0, F1
-	FADDD ·HOFFSET2(SB), F0
-	FXCHD F0, F2
-	FADDD ·HOFFSET3(SB), F0
-	FXCHD F0, F3
-	FMOVDP F0, 104(SP)
-	FMOVDP F0, 112(SP)
-	FMOVDP F0, 120(SP)
-	FMOVDP F0, 128(SP)
-	MOVL 108(SP),DI
-	ANDL $63,DI
-	MOVL 116(SP),SI
-	ANDL $63,SI
-	MOVL 124(SP),DX
-	ANDL $63,DX
-	MOVL 132(SP),CX
-	ANDL $63,CX
-	MOVL 112(SP),R8
-	ADDL DI,R8
-	MOVQ R8,112(SP)
-	MOVL 120(SP),DI
-	ADCL SI,DI
-	MOVQ DI,120(SP)
-	MOVL 128(SP),DI
-	ADCL DX,DI
-	MOVQ DI,128(SP)
-	MOVL R10,DI
-	ADCL CX,DI
-	MOVQ DI,136(SP)
-	MOVQ $5,DI
-	MOVL 104(SP),SI
-	ADDL SI,DI
-	MOVQ DI,104(SP)
-	MOVL R10,DI
-	MOVQ 112(SP),DX
-	ADCL DX,DI
-	MOVQ DI,112(SP)
-	MOVL R10,DI
-	MOVQ 120(SP),CX
-	ADCL CX,DI
-	MOVQ DI,120(SP)
-	MOVL R10,DI
-	MOVQ 128(SP),R8
-	ADCL R8,DI
-	MOVQ DI,128(SP)
-	MOVQ $0XFFFFFFFC,DI
-	MOVQ 136(SP),R9
-	ADCL R9,DI
-	SARL $16,DI
-	MOVQ DI,R9
-	XORL $0XFFFFFFFF,R9
-	ANDQ DI,SI
-	MOVQ 104(SP),AX
-	ANDQ R9,AX
-	ORQ AX,SI
-	ANDQ DI,DX
-	MOVQ 112(SP),AX
-	ANDQ R9,AX
-	ORQ AX,DX
-	ANDQ DI,CX
-	MOVQ 120(SP),AX
-	ANDQ R9,AX
-	ORQ AX,CX
-	ANDQ DI,R8
-	MOVQ 128(SP),DI
-	ANDQ R9,DI
-	ORQ DI,R8
-	MOVQ 88(SP),DI
-	MOVQ 96(SP),R9
-	ADDL 16(R9),SI
-	ADCL 20(R9),DX
-	ADCL 24(R9),CX
-	ADCL 28(R9),R8
-	MOVL SI,0(DI)
-	MOVL DX,4(DI)
-	MOVL CX,8(DI)
-	MOVL R8,12(DI)
-	MOVQ 32(SP),R11
-	MOVQ 40(SP),R12
-	MOVQ 48(SP),R13
-	MOVQ 56(SP),R14
-	MOVQ 64(SP),R15
-	MOVQ 72(SP),BX
-	MOVQ 80(SP),BP
-	MOVQ R11,SP
-	RET
diff --git a/ssl/test/runner/poly1305/poly1305_arm.s b/ssl/test/runner/poly1305/poly1305_arm.s
deleted file mode 100644
index c153867..0000000
--- a/ssl/test/runner/poly1305/poly1305_arm.s
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 5a from the public
-// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
-
-// +build arm,!gccgo,!appengine
-
-DATA poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
-DATA poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
-DATA poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
-DATA poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
-DATA poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
-GLOBL poly1305_init_constants_armv6<>(SB), 8, $20
-
-// Warning: the linker may use R11 to synthesize certain instructions. Please
-// take care and verify that no synthetic instructions use it.
-
-TEXT poly1305_init_ext_armv6<>(SB),4,$-4
-  MOVM.DB.W [R4-R11], (R13)
-  MOVM.IA.W (R1), [R2-R5]
-  MOVW $poly1305_init_constants_armv6<>(SB), R7
-  MOVW R2, R8
-  MOVW R2>>26, R9
-  MOVW R3>>20, g
-  MOVW R4>>14, R11
-  MOVW R5>>8, R12
-  ORR R3<<6, R9, R9
-  ORR R4<<12, g, g
-  ORR R5<<18, R11, R11
-  MOVM.IA (R7), [R2-R6]
-  AND R8, R2, R2
-  AND R9, R3, R3
-  AND g, R4, R4
-  AND R11, R5, R5
-  AND R12, R6, R6
-  MOVM.IA.W [R2-R6], (R0)
-  EOR R2, R2, R2
-  EOR R3, R3, R3
-  EOR R4, R4, R4
-  EOR R5, R5, R5
-  EOR R6, R6, R6
-  MOVM.IA.W [R2-R6], (R0)
-  MOVM.IA.W (R1), [R2-R5]
-  MOVM.IA [R2-R6], (R0)
-  MOVM.IA.W (R13), [R4-R11]
-  RET
-
-#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
-  MOVBU (offset+0)(Rsrc), Rtmp; \
-  MOVBU Rtmp, (offset+0)(Rdst); \
-  MOVBU (offset+1)(Rsrc), Rtmp; \
-  MOVBU Rtmp, (offset+1)(Rdst); \
-  MOVBU (offset+2)(Rsrc), Rtmp; \
-  MOVBU Rtmp, (offset+2)(Rdst); \
-  MOVBU (offset+3)(Rsrc), Rtmp; \
-  MOVBU Rtmp, (offset+3)(Rdst)
-
-TEXT poly1305_blocks_armv6<>(SB),4,$-4
-  MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13)
-  SUB $128, R13
-  MOVW R0, 36(R13)
-  MOVW R1, 40(R13)
-  MOVW R2, 44(R13)
-  MOVW R1, R14
-  MOVW R2, R12
-  MOVW 56(R0), R8
-  WORD $0xe1180008 // TST R8, R8 not working see issue 5921
-  EOR R6, R6, R6
-  MOVW.EQ $(1<<24), R6
-  MOVW R6, 32(R13)
-  ADD $64, R13, g
-  MOVM.IA (R0), [R0-R9]
-  MOVM.IA [R0-R4], (g)
-  CMP $16, R12
-  BLO poly1305_blocks_armv6_done
-poly1305_blocks_armv6_mainloop:
-  WORD $0xe31e0003 // TST R14, #3 not working see issue 5921
-  BEQ poly1305_blocks_armv6_mainloop_aligned
-  ADD $48, R13, g
-  MOVW_UNALIGNED(R14, g, R0, 0)
-  MOVW_UNALIGNED(R14, g, R0, 4)
-  MOVW_UNALIGNED(R14, g, R0, 8)
-  MOVW_UNALIGNED(R14, g, R0, 12)
-  MOVM.IA (g), [R0-R3]
-  ADD $16, R14
-  B poly1305_blocks_armv6_mainloop_loaded
-poly1305_blocks_armv6_mainloop_aligned:
-  MOVM.IA.W (R14), [R0-R3]
-poly1305_blocks_armv6_mainloop_loaded:
-  MOVW R0>>26, g
-  MOVW R1>>20, R11
-  MOVW R2>>14, R12
-  MOVW R14, 40(R13)
-  MOVW R3>>8, R4
-  ORR R1<<6, g, g
-  ORR R2<<12, R11, R11
-  ORR R3<<18, R12, R12
-  BIC $0xfc000000, R0, R0
-  BIC $0xfc000000, g, g
-  MOVW 32(R13), R3
-  BIC $0xfc000000, R11, R11
-  BIC $0xfc000000, R12, R12
-  ADD R0, R5, R5
-  ADD g, R6, R6
-  ORR R3, R4, R4
-  ADD R11, R7, R7
-  ADD $64, R13, R14
-  ADD R12, R8, R8
-  ADD R4, R9, R9
-  MOVM.IA (R14), [R0-R4]
-  MULLU R4, R5, (R11, g)
-  MULLU R3, R5, (R14, R12)
-  MULALU R3, R6, (R11, g)
-  MULALU R2, R6, (R14, R12)
-  MULALU R2, R7, (R11, g)
-  MULALU R1, R7, (R14, R12)
-  ADD R4<<2, R4, R4
-  ADD R3<<2, R3, R3
-  MULALU R1, R8, (R11, g)
-  MULALU R0, R8, (R14, R12)
-  MULALU R0, R9, (R11, g)
-  MULALU R4, R9, (R14, R12)
-  MOVW g, 24(R13)
-  MOVW R11, 28(R13)
-  MOVW R12, 16(R13)
-  MOVW R14, 20(R13)
-  MULLU R2, R5, (R11, g)
-  MULLU R1, R5, (R14, R12)
-  MULALU R1, R6, (R11, g)
-  MULALU R0, R6, (R14, R12)
-  MULALU R0, R7, (R11, g)
-  MULALU R4, R7, (R14, R12)
-  ADD R2<<2, R2, R2
-  ADD R1<<2, R1, R1
-  MULALU R4, R8, (R11, g)
-  MULALU R3, R8, (R14, R12)
-  MULALU R3, R9, (R11, g)
-  MULALU R2, R9, (R14, R12)
-  MOVW g, 8(R13)
-  MOVW R11, 12(R13)
-  MOVW R12, 0(R13)
-  MOVW R14, w+4(SP)
-  MULLU R0, R5, (R11, g)
-  MULALU R4, R6, (R11, g)
-  MULALU R3, R7, (R11, g)
-  MULALU R2, R8, (R11, g)
-  MULALU R1, R9, (R11, g)
-  MOVM.IA (R13), [R0-R7]
-  MOVW g>>26, R12
-  MOVW R4>>26, R14
-  ORR R11<<6, R12, R12
-  ORR R5<<6, R14, R14
-  BIC $0xfc000000, g, g
-  BIC $0xfc000000, R4, R4
-  ADD.S R12, R0, R0
-  ADC $0, R1, R1
-  ADD.S R14, R6, R6
-  ADC $0, R7, R7
-  MOVW R0>>26, R12
-  MOVW R6>>26, R14
-  ORR R1<<6, R12, R12
-  ORR R7<<6, R14, R14
-  BIC $0xfc000000, R0, R0
-  BIC $0xfc000000, R6, R6
-  ADD R14<<2, R14, R14
-  ADD.S R12, R2, R2
-  ADC $0, R3, R3
-  ADD R14, g, g
-  MOVW R2>>26, R12
-  MOVW g>>26, R14
-  ORR R3<<6, R12, R12
-  BIC $0xfc000000, g, R5
-  BIC $0xfc000000, R2, R7
-  ADD R12, R4, R4
-  ADD R14, R0, R0
-  MOVW R4>>26, R12
-  BIC $0xfc000000, R4, R8
-  ADD R12, R6, R9
-  MOVW w+44(SP), R12
-  MOVW w+40(SP), R14
-  MOVW R0, R6
-  CMP $32, R12
-  SUB $16, R12, R12
-  MOVW R12, 44(R13)
-  BHS poly1305_blocks_armv6_mainloop
-poly1305_blocks_armv6_done:
-  MOVW 36(R13), R12
-  MOVW R5, 20(R12)
-  MOVW R6, 24(R12)
-  MOVW R7, 28(R12)
-  MOVW R8, 32(R12)
-  MOVW R9, 36(R12)
-  ADD $128, R13, R13
-  MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14]
-  RET
-
-#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
-  MOVBU.P 1(Rsrc), Rtmp; \
-  MOVBU.P Rtmp, 1(Rdst); \
-  MOVBU.P 1(Rsrc), Rtmp; \
-  MOVBU.P Rtmp, 1(Rdst)
-
-#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
-  MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
-  MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
-
-TEXT poly1305_finish_ext_armv6<>(SB),4,$-4
-  MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13)
-  SUB $16, R13, R13
-  MOVW R0, R5
-  MOVW R1, R6
-  MOVW R2, R7
-  MOVW R3, R8
-  AND.S R2, R2, R2
-  BEQ poly1305_finish_ext_armv6_noremaining
-  EOR R0, R0
-  MOVW R13, R9
-  MOVW R0, 0(R13)
-  MOVW R0, 4(R13)
-  MOVW R0, 8(R13)
-  MOVW R0, 12(R13)
-  WORD $0xe3110003 // TST R1, #3 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_aligned
-  WORD $0xe3120008 // TST R2, #8 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip8
-  MOVWP_UNALIGNED(R1, R9, g)
-  MOVWP_UNALIGNED(R1, R9, g)
-poly1305_finish_ext_armv6_skip8:
-  WORD $0xe3120004 // TST $4, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip4
-  MOVWP_UNALIGNED(R1, R9, g)
-poly1305_finish_ext_armv6_skip4:
-  WORD $0xe3120002 // TST $2, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip2
-  MOVHUP_UNALIGNED(R1, R9, g)
-  B poly1305_finish_ext_armv6_skip2
-poly1305_finish_ext_armv6_aligned:
-  WORD $0xe3120008 // TST R2, #8 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip8_aligned
-  MOVM.IA.W (R1), [g-R11]
-  MOVM.IA.W [g-R11], (R9)
-poly1305_finish_ext_armv6_skip8_aligned:
-  WORD $0xe3120004 // TST $4, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip4_aligned
-  MOVW.P 4(R1), g
-  MOVW.P g, 4(R9)
-poly1305_finish_ext_armv6_skip4_aligned:
-  WORD $0xe3120002 // TST $2, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip2
-  MOVHU.P 2(R1), g
-  MOVH.P g, 2(R9)
-poly1305_finish_ext_armv6_skip2:
-  WORD $0xe3120001 // TST $1, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip1
-  MOVBU.P 1(R1), g
-  MOVBU.P g, 1(R9)
-poly1305_finish_ext_armv6_skip1:
-  MOVW $1, R11
-  MOVBU R11, 0(R9)
-  MOVW R11, 56(R5)
-  MOVW R5, R0
-  MOVW R13, R1
-  MOVW $16, R2
-  BL poly1305_blocks_armv6<>(SB)
-poly1305_finish_ext_armv6_noremaining:
-  MOVW 20(R5), R0
-  MOVW 24(R5), R1
-  MOVW 28(R5), R2
-  MOVW 32(R5), R3
-  MOVW 36(R5), R4
-  MOVW R4>>26, R12
-  BIC $0xfc000000, R4, R4
-  ADD R12<<2, R12, R12
-  ADD R12, R0, R0
-  MOVW R0>>26, R12
-  BIC $0xfc000000, R0, R0
-  ADD R12, R1, R1
-  MOVW R1>>26, R12
-  BIC $0xfc000000, R1, R1
-  ADD R12, R2, R2
-  MOVW R2>>26, R12
-  BIC $0xfc000000, R2, R2
-  ADD R12, R3, R3
-  MOVW R3>>26, R12
-  BIC $0xfc000000, R3, R3
-  ADD R12, R4, R4
-  ADD $5, R0, R6
-  MOVW R6>>26, R12
-  BIC $0xfc000000, R6, R6
-  ADD R12, R1, R7
-  MOVW R7>>26, R12
-  BIC $0xfc000000, R7, R7
-  ADD R12, R2, g
-  MOVW g>>26, R12
-  BIC $0xfc000000, g, g
-  ADD R12, R3, R11
-  MOVW $-(1<<26), R12
-  ADD R11>>26, R12, R12
-  BIC $0xfc000000, R11, R11
-  ADD R12, R4, R14
-  MOVW R14>>31, R12
-  SUB $1, R12
-  AND R12, R6, R6
-  AND R12, R7, R7
-  AND R12, g, g
-  AND R12, R11, R11
-  AND R12, R14, R14
-  MVN R12, R12
-  AND R12, R0, R0
-  AND R12, R1, R1
-  AND R12, R2, R2
-  AND R12, R3, R3
-  AND R12, R4, R4
-  ORR R6, R0, R0
-  ORR R7, R1, R1
-  ORR g, R2, R2
-  ORR R11, R3, R3
-  ORR R14, R4, R4
-  ORR R1<<26, R0, R0
-  MOVW R1>>6, R1
-  ORR R2<<20, R1, R1
-  MOVW R2>>12, R2
-  ORR R3<<14, R2, R2
-  MOVW R3>>18, R3
-  ORR R4<<8, R3, R3
-  MOVW 40(R5), R6
-  MOVW 44(R5), R7
-  MOVW 48(R5), g
-  MOVW 52(R5), R11
-  ADD.S R6, R0, R0
-  ADC.S R7, R1, R1
-  ADC.S g, R2, R2
-  ADC.S R11, R3, R3
-  MOVM.IA [R0-R3], (R8)
-  MOVW R5, R12
-  EOR R0, R0, R0
-  EOR R1, R1, R1
-  EOR R2, R2, R2
-  EOR R3, R3, R3
-  EOR R4, R4, R4
-  EOR R5, R5, R5
-  EOR R6, R6, R6
-  EOR R7, R7, R7
-  MOVM.IA.W [R0-R7], (R12)
-  MOVM.IA [R0-R7], (R12)
-  ADD $16, R13, R13
-  MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14]
-  RET
-
-// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
-TEXT ·poly1305_auth_armv6(SB),0,$280-16
-  MOVW  out+0(FP), R4
-  MOVW  m+4(FP), R5
-  MOVW  mlen+8(FP), R6
-  MOVW  key+12(FP), R7
-
-  MOVW R13, R8
-  BIC $63, R13
-  SUB $64, R13, R13
-  MOVW  R13, R0
-  MOVW  R7, R1
-  BL poly1305_init_ext_armv6<>(SB)
-  BIC.S $15, R6, R2
-  BEQ poly1305_auth_armv6_noblocks
-  MOVW R13, R0
-  MOVW R5, R1
-  ADD R2, R5, R5
-  SUB R2, R6, R6
-  BL poly1305_blocks_armv6<>(SB)
-poly1305_auth_armv6_noblocks:
-  MOVW R13, R0
-  MOVW R5, R1
-  MOVW R6, R2
-  MOVW R4, R3
-  BL poly1305_finish_ext_armv6<>(SB)
-  MOVW R8, R13
-  RET
diff --git a/ssl/test/runner/poly1305/poly1305_test.go b/ssl/test/runner/poly1305/poly1305_test.go
index b3e9231..017027f 100644
--- a/ssl/test/runner/poly1305/poly1305_test.go
+++ b/ssl/test/runner/poly1305/poly1305_test.go
@@ -6,10 +6,14 @@
 
 import (
 	"bytes"
+	"encoding/hex"
+	"flag"
 	"testing"
 	"unsafe"
 )
 
+var stressFlag = flag.Bool("stress", false, "run slow stress tests")
+
 var testData = []struct {
 	in, k, correct []byte
 }{
@@ -33,6 +37,42 @@
 		make([]byte, 32),
 		make([]byte, 16),
 	},
+	{
+		// This test triggers an edge-case. See https://go-review.googlesource.com/#/c/30101/.
+		[]byte{0x81, 0xd8, 0xb2, 0xe4, 0x6a, 0x25, 0x21, 0x3b, 0x58, 0xfe, 0xe4, 0x21, 0x3a, 0x2a, 0x28, 0xe9, 0x21, 0xc1, 0x2a, 0x96, 0x32, 0x51, 0x6d, 0x3b, 0x73, 0x27, 0x27, 0x27, 0xbe, 0xcf, 0x21, 0x29},
+		[]byte{0x3b, 0x3a, 0x29, 0xe9, 0x3b, 0x21, 0x3a, 0x5c, 0x5c, 0x3b, 0x3b, 0x05, 0x3a, 0x3a, 0x8c, 0x0d},
+		[]byte{0x6d, 0xc1, 0x8b, 0x8c, 0x34, 0x4c, 0xd7, 0x99, 0x27, 0x11, 0x8b, 0xbe, 0x84, 0xb7, 0xf3, 0x14},
+	},
+	{
+		// This test generates a result of (2^130-1) % (2^130-5).
+		[]byte{
+			0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		},
+		[]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		[]byte{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+	},
+	{
+		// This test generates a result of (2^130-6) % (2^130-5).
+		[]byte{
+			0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		},
+		[]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		[]byte{0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+	},
+	{
+		// This test generates a result of (2^130-5) % (2^130-5).
+		[]byte{
+			0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		},
+		[]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+		[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+	},
 }
 
 func testSum(t *testing.T, unaligned bool) {
@@ -52,6 +92,39 @@
 	}
 }
 
+func TestBurnin(t *testing.T) {
+	// This test can be used to sanity-check significant changes. It can
+	// take about many minutes to run, even on fast machines. It's disabled
+	// by default.
+	if !*stressFlag {
+		t.Skip("skipping without -stress")
+	}
+
+	var key [32]byte
+	var input [25]byte
+	var output [16]byte
+
+	for i := range key {
+		key[i] = 1
+	}
+	for i := range input {
+		input[i] = 2
+	}
+
+	for i := uint64(0); i < 1e10; i++ {
+		Sum(&output, input[:], &key)
+		copy(key[0:], output[:])
+		copy(key[16:], output[:])
+		copy(input[:], output[:])
+		copy(input[16:], output[:])
+	}
+
+	const expected = "5e3b866aea0b636d240c83c428f84bfa"
+	if got := hex.EncodeToString(output[:]); got != expected {
+		t.Errorf("expected %s, got %s", expected, got)
+	}
+}
+
 func TestSum(t *testing.T)          { testSum(t, false) }
 func TestSumUnaligned(t *testing.T) { testSum(t, true) }
 
diff --git a/ssl/test/runner/poly1305/sum_amd64.go b/ssl/test/runner/poly1305/sum_amd64.go
index 6775c70..4dd72fe 100644
--- a/ssl/test/runner/poly1305/sum_amd64.go
+++ b/ssl/test/runner/poly1305/sum_amd64.go
@@ -6,10 +6,8 @@
 
 package poly1305
 
-// This function is implemented in poly1305_amd64.s
-
+// This function is implemented in sum_amd64.s
 //go:noescape
-
 func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
 
 // Sum generates an authenticator for m using a one-time key and puts the
diff --git a/ssl/test/runner/poly1305/sum_amd64.s b/ssl/test/runner/poly1305/sum_amd64.s
new file mode 100644
index 0000000..2edae63
--- /dev/null
+++ b/ssl/test/runner/poly1305/sum_amd64.s
@@ -0,0 +1,125 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+#include "textflag.h"
+
+#define POLY1305_ADD(msg, h0, h1, h2) \
+	ADDQ 0(msg), h0;  \
+	ADCQ 8(msg), h1;  \
+	ADCQ $1, h2;      \
+	LEAQ 16(msg), msg
+
+#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
+	MOVQ  r0, AX;                  \
+	MULQ  h0;                      \
+	MOVQ  AX, t0;                  \
+	MOVQ  DX, t1;                  \
+	MOVQ  r0, AX;                  \
+	MULQ  h1;                      \
+	ADDQ  AX, t1;                  \
+	ADCQ  $0, DX;                  \
+	MOVQ  r0, t2;                  \
+	IMULQ h2, t2;                  \
+	ADDQ  DX, t2;                  \
+	                               \
+	MOVQ  r1, AX;                  \
+	MULQ  h0;                      \
+	ADDQ  AX, t1;                  \
+	ADCQ  $0, DX;                  \
+	MOVQ  DX, h0;                  \
+	MOVQ  r1, t3;                  \
+	IMULQ h2, t3;                  \
+	MOVQ  r1, AX;                  \
+	MULQ  h1;                      \
+	ADDQ  AX, t2;                  \
+	ADCQ  DX, t3;                  \
+	ADDQ  h0, t2;                  \
+	ADCQ  $0, t3;                  \
+	                               \
+	MOVQ  t0, h0;                  \
+	MOVQ  t1, h1;                  \
+	MOVQ  t2, h2;                  \
+	ANDQ  $3, h2;                  \
+	MOVQ  t2, t0;                  \
+	ANDQ  $0xFFFFFFFFFFFFFFFC, t0; \
+	ADDQ  t0, h0;                  \
+	ADCQ  t3, h1;                  \
+	ADCQ  $0, h2;                  \
+	SHRQ  $2, t3, t2;              \
+	SHRQ  $2, t3;                  \
+	ADDQ  t2, h0;                  \
+	ADCQ  t3, h1;                  \
+	ADCQ  $0, h2
+
+DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
+DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
+GLOBL ·poly1305Mask<>(SB), RODATA, $16
+
+// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
+TEXT ·poly1305(SB), $0-32
+	MOVQ out+0(FP), DI
+	MOVQ m+8(FP), SI
+	MOVQ mlen+16(FP), R15
+	MOVQ key+24(FP), AX
+
+	MOVQ 0(AX), R11
+	MOVQ 8(AX), R12
+	ANDQ ·poly1305Mask<>(SB), R11   // r0
+	ANDQ ·poly1305Mask<>+8(SB), R12 // r1
+	XORQ R8, R8                    // h0
+	XORQ R9, R9                    // h1
+	XORQ R10, R10                  // h2
+
+	CMPQ R15, $16
+	JB   bytes_between_0_and_15
+
+loop:
+	POLY1305_ADD(SI, R8, R9, R10)
+
+multiply:
+	POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
+	SUBQ $16, R15
+	CMPQ R15, $16
+	JAE  loop
+
+bytes_between_0_and_15:
+	TESTQ R15, R15
+	JZ    done
+	MOVQ  $1, BX
+	XORQ  CX, CX
+	XORQ  R13, R13
+	ADDQ  R15, SI
+
+flush_buffer:
+	SHLQ $8, BX, CX
+	SHLQ $8, BX
+	MOVB -1(SI), R13
+	XORQ R13, BX
+	DECQ SI
+	DECQ R15
+	JNZ  flush_buffer
+
+	ADDQ BX, R8
+	ADCQ CX, R9
+	ADCQ $0, R10
+	MOVQ $16, R15
+	JMP  multiply
+
+done:
+	MOVQ    R8, AX
+	MOVQ    R9, BX
+	SUBQ    $0xFFFFFFFFFFFFFFFB, AX
+	SBBQ    $0xFFFFFFFFFFFFFFFF, BX
+	SBBQ    $3, R10
+	CMOVQCS R8, AX
+	CMOVQCS R9, BX
+	MOVQ    key+24(FP), R8
+	ADDQ    16(R8), AX
+	ADCQ    24(R8), BX
+
+	MOVQ AX, 0(DI)
+	MOVQ BX, 8(DI)
+	RET
diff --git a/ssl/test/runner/poly1305/sum_arm.go b/ssl/test/runner/poly1305/sum_arm.go
index 50b979c..5dc321c 100644
--- a/ssl/test/runner/poly1305/sum_arm.go
+++ b/ssl/test/runner/poly1305/sum_arm.go
@@ -2,14 +2,12 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build arm,!gccgo,!appengine
+// +build arm,!gccgo,!appengine,!nacl
 
 package poly1305
 
-// This function is implemented in poly1305_arm.s
-
+// This function is implemented in sum_arm.s
 //go:noescape
-
 func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
 
 // Sum generates an authenticator for m using a one-time key and puts the
diff --git a/ssl/test/runner/poly1305/sum_arm.s b/ssl/test/runner/poly1305/sum_arm.s
new file mode 100644
index 0000000..f70b4ac
--- /dev/null
+++ b/ssl/test/runner/poly1305/sum_arm.s
@@ -0,0 +1,427 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,!gccgo,!appengine,!nacl
+
+#include "textflag.h"
+
+// This code was translated into a form compatible with 5a from the public
+// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
+
+DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
+DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
+DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
+DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
+DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
+GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20
+
+// Warning: the linker may use R11 to synthesize certain instructions. Please
+// take care and verify that no synthetic instructions use it.
+
+TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0
+	// Needs 16 bytes of stack and 64 bytes of space pointed to by R0.  (It
+	// might look like it's only 60 bytes of space but the final four bytes
+	// will be written by another function.) We need to skip over four
+	// bytes of stack because that's saving the value of 'g'.
+	ADD       $4, R13, R8
+	MOVM.IB   [R4-R7], (R8)
+	MOVM.IA.W (R1), [R2-R5]
+	MOVW      $·poly1305_init_constants_armv6<>(SB), R7
+	MOVW      R2, R8
+	MOVW      R2>>26, R9
+	MOVW      R3>>20, g
+	MOVW      R4>>14, R11
+	MOVW      R5>>8, R12
+	ORR       R3<<6, R9, R9
+	ORR       R4<<12, g, g
+	ORR       R5<<18, R11, R11
+	MOVM.IA   (R7), [R2-R6]
+	AND       R8, R2, R2
+	AND       R9, R3, R3
+	AND       g, R4, R4
+	AND       R11, R5, R5
+	AND       R12, R6, R6
+	MOVM.IA.W [R2-R6], (R0)
+	EOR       R2, R2, R2
+	EOR       R3, R3, R3
+	EOR       R4, R4, R4
+	EOR       R5, R5, R5
+	EOR       R6, R6, R6
+	MOVM.IA.W [R2-R6], (R0)
+	MOVM.IA.W (R1), [R2-R5]
+	MOVM.IA   [R2-R6], (R0)
+	ADD       $20, R13, R0
+	MOVM.DA   (R0), [R4-R7]
+	RET
+
+#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
+	MOVBU (offset+0)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+0)(Rdst); \
+	MOVBU (offset+1)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+1)(Rdst); \
+	MOVBU (offset+2)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+2)(Rdst); \
+	MOVBU (offset+3)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+3)(Rdst)
+
+TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0
+	// Needs 24 bytes of stack for saved registers and then 88 bytes of
+	// scratch space after that. We assume that 24 bytes at (R13) have
+	// already been used: four bytes for the link register saved in the
+	// prelude of poly1305_auth_armv6, four bytes for saving the value of g
+	// in that function and 16 bytes of scratch space used around
+	// poly1305_finish_ext_armv6_skip1.
+	ADD     $24, R13, R12
+	MOVM.IB [R4-R8, R14], (R12)
+	MOVW    R0, 88(R13)
+	MOVW    R1, 92(R13)
+	MOVW    R2, 96(R13)
+	MOVW    R1, R14
+	MOVW    R2, R12
+	MOVW    56(R0), R8
+	WORD    $0xe1180008                // TST R8, R8 not working see issue 5921
+	EOR     R6, R6, R6
+	MOVW.EQ $(1<<24), R6
+	MOVW    R6, 84(R13)
+	ADD     $116, R13, g
+	MOVM.IA (R0), [R0-R9]
+	MOVM.IA [R0-R4], (g)
+	CMP     $16, R12
+	BLO     poly1305_blocks_armv6_done
+
+poly1305_blocks_armv6_mainloop:
+	WORD    $0xe31e0003                            // TST R14, #3 not working see issue 5921
+	BEQ     poly1305_blocks_armv6_mainloop_aligned
+	ADD     $100, R13, g
+	MOVW_UNALIGNED(R14, g, R0, 0)
+	MOVW_UNALIGNED(R14, g, R0, 4)
+	MOVW_UNALIGNED(R14, g, R0, 8)
+	MOVW_UNALIGNED(R14, g, R0, 12)
+	MOVM.IA (g), [R0-R3]
+	ADD     $16, R14
+	B       poly1305_blocks_armv6_mainloop_loaded
+
+poly1305_blocks_armv6_mainloop_aligned:
+	MOVM.IA.W (R14), [R0-R3]
+
+poly1305_blocks_armv6_mainloop_loaded:
+	MOVW    R0>>26, g
+	MOVW    R1>>20, R11
+	MOVW    R2>>14, R12
+	MOVW    R14, 92(R13)
+	MOVW    R3>>8, R4
+	ORR     R1<<6, g, g
+	ORR     R2<<12, R11, R11
+	ORR     R3<<18, R12, R12
+	BIC     $0xfc000000, R0, R0
+	BIC     $0xfc000000, g, g
+	MOVW    84(R13), R3
+	BIC     $0xfc000000, R11, R11
+	BIC     $0xfc000000, R12, R12
+	ADD     R0, R5, R5
+	ADD     g, R6, R6
+	ORR     R3, R4, R4
+	ADD     R11, R7, R7
+	ADD     $116, R13, R14
+	ADD     R12, R8, R8
+	ADD     R4, R9, R9
+	MOVM.IA (R14), [R0-R4]
+	MULLU   R4, R5, (R11, g)
+	MULLU   R3, R5, (R14, R12)
+	MULALU  R3, R6, (R11, g)
+	MULALU  R2, R6, (R14, R12)
+	MULALU  R2, R7, (R11, g)
+	MULALU  R1, R7, (R14, R12)
+	ADD     R4<<2, R4, R4
+	ADD     R3<<2, R3, R3
+	MULALU  R1, R8, (R11, g)
+	MULALU  R0, R8, (R14, R12)
+	MULALU  R0, R9, (R11, g)
+	MULALU  R4, R9, (R14, R12)
+	MOVW    g, 76(R13)
+	MOVW    R11, 80(R13)
+	MOVW    R12, 68(R13)
+	MOVW    R14, 72(R13)
+	MULLU   R2, R5, (R11, g)
+	MULLU   R1, R5, (R14, R12)
+	MULALU  R1, R6, (R11, g)
+	MULALU  R0, R6, (R14, R12)
+	MULALU  R0, R7, (R11, g)
+	MULALU  R4, R7, (R14, R12)
+	ADD     R2<<2, R2, R2
+	ADD     R1<<2, R1, R1
+	MULALU  R4, R8, (R11, g)
+	MULALU  R3, R8, (R14, R12)
+	MULALU  R3, R9, (R11, g)
+	MULALU  R2, R9, (R14, R12)
+	MOVW    g, 60(R13)
+	MOVW    R11, 64(R13)
+	MOVW    R12, 52(R13)
+	MOVW    R14, 56(R13)
+	MULLU   R0, R5, (R11, g)
+	MULALU  R4, R6, (R11, g)
+	MULALU  R3, R7, (R11, g)
+	MULALU  R2, R8, (R11, g)
+	MULALU  R1, R9, (R11, g)
+	ADD     $52, R13, R0
+	MOVM.IA (R0), [R0-R7]
+	MOVW    g>>26, R12
+	MOVW    R4>>26, R14
+	ORR     R11<<6, R12, R12
+	ORR     R5<<6, R14, R14
+	BIC     $0xfc000000, g, g
+	BIC     $0xfc000000, R4, R4
+	ADD.S   R12, R0, R0
+	ADC     $0, R1, R1
+	ADD.S   R14, R6, R6
+	ADC     $0, R7, R7
+	MOVW    R0>>26, R12
+	MOVW    R6>>26, R14
+	ORR     R1<<6, R12, R12
+	ORR     R7<<6, R14, R14
+	BIC     $0xfc000000, R0, R0
+	BIC     $0xfc000000, R6, R6
+	ADD     R14<<2, R14, R14
+	ADD.S   R12, R2, R2
+	ADC     $0, R3, R3
+	ADD     R14, g, g
+	MOVW    R2>>26, R12
+	MOVW    g>>26, R14
+	ORR     R3<<6, R12, R12
+	BIC     $0xfc000000, g, R5
+	BIC     $0xfc000000, R2, R7
+	ADD     R12, R4, R4
+	ADD     R14, R0, R0
+	MOVW    R4>>26, R12
+	BIC     $0xfc000000, R4, R8
+	ADD     R12, R6, R9
+	MOVW    96(R13), R12
+	MOVW    92(R13), R14
+	MOVW    R0, R6
+	CMP     $32, R12
+	SUB     $16, R12, R12
+	MOVW    R12, 96(R13)
+	BHS     poly1305_blocks_armv6_mainloop
+
+poly1305_blocks_armv6_done:
+	MOVW    88(R13), R12
+	MOVW    R5, 20(R12)
+	MOVW    R6, 24(R12)
+	MOVW    R7, 28(R12)
+	MOVW    R8, 32(R12)
+	MOVW    R9, 36(R12)
+	ADD     $48, R13, R0
+	MOVM.DA (R0), [R4-R8, R14]
+	RET
+
+#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+	MOVBU.P 1(Rsrc), Rtmp; \
+	MOVBU.P Rtmp, 1(Rdst); \
+	MOVBU.P 1(Rsrc), Rtmp; \
+	MOVBU.P Rtmp, 1(Rdst)
+
+#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+	MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
+	MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
+
+// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
+TEXT ·poly1305_auth_armv6(SB), $196-16
+	// The value 196, just above, is the sum of 64 (the size of the context
+	// structure) and 132 (the amount of stack needed).
+	//
+	// At this point, the stack pointer (R13) has been moved down. It
+	// points to the saved link register and there's 196 bytes of free
+	// space above it.
+	//
+	// The stack for this function looks like:
+	//
+	// +---------------------
+	// |
+	// | 64 bytes of context structure
+	// |
+	// +---------------------
+	// |
+	// | 112 bytes for poly1305_blocks_armv6
+	// |
+	// +---------------------
+	// | 16 bytes of final block, constructed at
+	// | poly1305_finish_ext_armv6_skip8
+	// +---------------------
+	// | four bytes of saved 'g'
+	// +---------------------
+	// | lr, saved by prelude    <- R13 points here
+	// +---------------------
+	MOVW g, 4(R13)
+
+	MOVW out+0(FP), R4
+	MOVW m+4(FP), R5
+	MOVW mlen+8(FP), R6
+	MOVW key+12(FP), R7
+
+	ADD  $136, R13, R0 // 136 = 4 + 4 + 16 + 112
+	MOVW R7, R1
+
+	// poly1305_init_ext_armv6 will write to the stack from R13+4, but
+	// that's ok because none of the other values have been written yet.
+	BL    poly1305_init_ext_armv6<>(SB)
+	BIC.S $15, R6, R2
+	BEQ   poly1305_auth_armv6_noblocks
+	ADD   $136, R13, R0
+	MOVW  R5, R1
+	ADD   R2, R5, R5
+	SUB   R2, R6, R6
+	BL    poly1305_blocks_armv6<>(SB)
+
+poly1305_auth_armv6_noblocks:
+	ADD  $136, R13, R0
+	MOVW R5, R1
+	MOVW R6, R2
+	MOVW R4, R3
+
+	MOVW  R0, R5
+	MOVW  R1, R6
+	MOVW  R2, R7
+	MOVW  R3, R8
+	AND.S R2, R2, R2
+	BEQ   poly1305_finish_ext_armv6_noremaining
+	EOR   R0, R0
+	ADD   $8, R13, R9                           // 8 = offset to 16 byte scratch space
+	MOVW  R0, (R9)
+	MOVW  R0, 4(R9)
+	MOVW  R0, 8(R9)
+	MOVW  R0, 12(R9)
+	WORD  $0xe3110003                           // TST R1, #3 not working see issue 5921
+	BEQ   poly1305_finish_ext_armv6_aligned
+	WORD  $0xe3120008                           // TST R2, #8 not working see issue 5921
+	BEQ   poly1305_finish_ext_armv6_skip8
+	MOVWP_UNALIGNED(R1, R9, g)
+	MOVWP_UNALIGNED(R1, R9, g)
+
+poly1305_finish_ext_armv6_skip8:
+	WORD $0xe3120004                     // TST $4, R2 not working see issue 5921
+	BEQ  poly1305_finish_ext_armv6_skip4
+	MOVWP_UNALIGNED(R1, R9, g)
+
+poly1305_finish_ext_armv6_skip4:
+	WORD $0xe3120002                     // TST $2, R2 not working see issue 5921
+	BEQ  poly1305_finish_ext_armv6_skip2
+	MOVHUP_UNALIGNED(R1, R9, g)
+	B    poly1305_finish_ext_armv6_skip2
+
+poly1305_finish_ext_armv6_aligned:
+	WORD      $0xe3120008                             // TST R2, #8 not working see issue 5921
+	BEQ       poly1305_finish_ext_armv6_skip8_aligned
+	MOVM.IA.W (R1), [g-R11]
+	MOVM.IA.W [g-R11], (R9)
+
+poly1305_finish_ext_armv6_skip8_aligned:
+	WORD   $0xe3120004                             // TST $4, R2 not working see issue 5921
+	BEQ    poly1305_finish_ext_armv6_skip4_aligned
+	MOVW.P 4(R1), g
+	MOVW.P g, 4(R9)
+
+poly1305_finish_ext_armv6_skip4_aligned:
+	WORD    $0xe3120002                     // TST $2, R2 not working see issue 5921
+	BEQ     poly1305_finish_ext_armv6_skip2
+	MOVHU.P 2(R1), g
+	MOVH.P  g, 2(R9)
+
+poly1305_finish_ext_armv6_skip2:
+	WORD    $0xe3120001                     // TST $1, R2 not working see issue 5921
+	BEQ     poly1305_finish_ext_armv6_skip1
+	MOVBU.P 1(R1), g
+	MOVBU.P g, 1(R9)
+
+poly1305_finish_ext_armv6_skip1:
+	MOVW  $1, R11
+	MOVBU R11, 0(R9)
+	MOVW  R11, 56(R5)
+	MOVW  R5, R0
+	ADD   $8, R13, R1
+	MOVW  $16, R2
+	BL    poly1305_blocks_armv6<>(SB)
+
+poly1305_finish_ext_armv6_noremaining:
+	MOVW      20(R5), R0
+	MOVW      24(R5), R1
+	MOVW      28(R5), R2
+	MOVW      32(R5), R3
+	MOVW      36(R5), R4
+	MOVW      R4>>26, R12
+	BIC       $0xfc000000, R4, R4
+	ADD       R12<<2, R12, R12
+	ADD       R12, R0, R0
+	MOVW      R0>>26, R12
+	BIC       $0xfc000000, R0, R0
+	ADD       R12, R1, R1
+	MOVW      R1>>26, R12
+	BIC       $0xfc000000, R1, R1
+	ADD       R12, R2, R2
+	MOVW      R2>>26, R12
+	BIC       $0xfc000000, R2, R2
+	ADD       R12, R3, R3
+	MOVW      R3>>26, R12
+	BIC       $0xfc000000, R3, R3
+	ADD       R12, R4, R4
+	ADD       $5, R0, R6
+	MOVW      R6>>26, R12
+	BIC       $0xfc000000, R6, R6
+	ADD       R12, R1, R7
+	MOVW      R7>>26, R12
+	BIC       $0xfc000000, R7, R7
+	ADD       R12, R2, g
+	MOVW      g>>26, R12
+	BIC       $0xfc000000, g, g
+	ADD       R12, R3, R11
+	MOVW      $-(1<<26), R12
+	ADD       R11>>26, R12, R12
+	BIC       $0xfc000000, R11, R11
+	ADD       R12, R4, R9
+	MOVW      R9>>31, R12
+	SUB       $1, R12
+	AND       R12, R6, R6
+	AND       R12, R7, R7
+	AND       R12, g, g
+	AND       R12, R11, R11
+	AND       R12, R9, R9
+	MVN       R12, R12
+	AND       R12, R0, R0
+	AND       R12, R1, R1
+	AND       R12, R2, R2
+	AND       R12, R3, R3
+	AND       R12, R4, R4
+	ORR       R6, R0, R0
+	ORR       R7, R1, R1
+	ORR       g, R2, R2
+	ORR       R11, R3, R3
+	ORR       R9, R4, R4
+	ORR       R1<<26, R0, R0
+	MOVW      R1>>6, R1
+	ORR       R2<<20, R1, R1
+	MOVW      R2>>12, R2
+	ORR       R3<<14, R2, R2
+	MOVW      R3>>18, R3
+	ORR       R4<<8, R3, R3
+	MOVW      40(R5), R6
+	MOVW      44(R5), R7
+	MOVW      48(R5), g
+	MOVW      52(R5), R11
+	ADD.S     R6, R0, R0
+	ADC.S     R7, R1, R1
+	ADC.S     g, R2, R2
+	ADC.S     R11, R3, R3
+	MOVM.IA   [R0-R3], (R8)
+	MOVW      R5, R12
+	EOR       R0, R0, R0
+	EOR       R1, R1, R1
+	EOR       R2, R2, R2
+	EOR       R3, R3, R3
+	EOR       R4, R4, R4
+	EOR       R5, R5, R5
+	EOR       R6, R6, R6
+	EOR       R7, R7, R7
+	MOVM.IA.W [R0-R7], (R12)
+	MOVM.IA   [R0-R7], (R12)
+	MOVW      4(R13), g
+	RET
diff --git a/ssl/test/runner/poly1305/sum_ref.go b/ssl/test/runner/poly1305/sum_ref.go
index 0b24fc7..b2805a5 100644
--- a/ssl/test/runner/poly1305/sum_ref.go
+++ b/ssl/test/runner/poly1305/sum_ref.go
@@ -2,1530 +2,140 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build !amd64,!arm gccgo appengine
+// +build !amd64,!arm gccgo appengine nacl
 
 package poly1305
 
-// Based on original, public domain implementation from NaCl by D. J.
-// Bernstein.
+import "encoding/binary"
 
-import "math"
-
-const (
-	alpham80 = 0.00000000558793544769287109375
-	alpham48 = 24.0
-	alpham16 = 103079215104.0
-	alpha0   = 6755399441055744.0
-	alpha18  = 1770887431076116955136.0
-	alpha32  = 29014219670751100192948224.0
-	alpha50  = 7605903601369376408980219232256.0
-	alpha64  = 124615124604835863084731911901282304.0
-	alpha82  = 32667107224410092492483962313449748299776.0
-	alpha96  = 535217884764734955396857238543560676143529984.0
-	alpha112 = 35076039295941670036888435985190792471742381031424.0
-	alpha130 = 9194973245195333150150082162901855101712434733101613056.0
-	scale    = 0.0000000000000000000000000000000000000036734198463196484624023016788195177431833298649127735047148490821200539357960224151611328125
-	offset0  = 6755408030990331.0
-	offset1  = 29014256564239239022116864.0
-	offset2  = 124615283061160854719918951570079744.0
-	offset3  = 535219245894202480694386063513315216128475136.0
-)
-
-// Sum generates an authenticator for m using a one-time key and puts the
+// Sum generates an authenticator for msg using a one-time key and puts the
 // 16-byte result into out. Authenticating two different messages with the same
 // key allows an attacker to forge messages at will.
-func Sum(out *[16]byte, m []byte, key *[32]byte) {
-	r := key
-	s := key[16:]
+func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
 	var (
-		y7        float64
-		y6        float64
-		y1        float64
-		y0        float64
-		y5        float64
-		y4        float64
-		x7        float64
-		x6        float64
-		x1        float64
-		x0        float64
-		y3        float64
-		y2        float64
-		x5        float64
-		r3lowx0   float64
-		x4        float64
-		r0lowx6   float64
-		x3        float64
-		r3highx0  float64
-		x2        float64
-		r0highx6  float64
-		r0lowx0   float64
-		sr1lowx6  float64
-		r0highx0  float64
-		sr1highx6 float64
-		sr3low    float64
-		r1lowx0   float64
-		sr2lowx6  float64
-		r1highx0  float64
-		sr2highx6 float64
-		r2lowx0   float64
-		sr3lowx6  float64
-		r2highx0  float64
-		sr3highx6 float64
-		r1highx4  float64
-		r1lowx4   float64
-		r0highx4  float64
-		r0lowx4   float64
-		sr3highx4 float64
-		sr3lowx4  float64
-		sr2highx4 float64
-		sr2lowx4  float64
-		r0lowx2   float64
-		r0highx2  float64
-		r1lowx2   float64
-		r1highx2  float64
-		r2lowx2   float64
-		r2highx2  float64
-		sr3lowx2  float64
-		sr3highx2 float64
-		z0        float64
-		z1        float64
-		z2        float64
-		z3        float64
-		m0        int64
-		m1        int64
-		m2        int64
-		m3        int64
-		m00       uint32
-		m01       uint32
-		m02       uint32
-		m03       uint32
-		m10       uint32
-		m11       uint32
-		m12       uint32
-		m13       uint32
-		m20       uint32
-		m21       uint32
-		m22       uint32
-		m23       uint32
-		m30       uint32
-		m31       uint32
-		m32       uint32
-		m33       uint64
-		lbelow2   int32
-		lbelow3   int32
-		lbelow4   int32
-		lbelow5   int32
-		lbelow6   int32
-		lbelow7   int32
-		lbelow8   int32
-		lbelow9   int32
-		lbelow10  int32
-		lbelow11  int32
-		lbelow12  int32
-		lbelow13  int32
-		lbelow14  int32
-		lbelow15  int32
-		s00       uint32
-		s01       uint32
-		s02       uint32
-		s03       uint32
-		s10       uint32
-		s11       uint32
-		s12       uint32
-		s13       uint32
-		s20       uint32
-		s21       uint32
-		s22       uint32
-		s23       uint32
-		s30       uint32
-		s31       uint32
-		s32       uint32
-		s33       uint32
-		bits32    uint64
-		f         uint64
-		f0        uint64
-		f1        uint64
-		f2        uint64
-		f3        uint64
-		f4        uint64
-		g         uint64
-		g0        uint64
-		g1        uint64
-		g2        uint64
-		g3        uint64
-		g4        uint64
+		h0, h1, h2, h3, h4 uint32 // the hash accumulators
+		r0, r1, r2, r3, r4 uint64 // the r part of the key
 	)
 
-	var p int32
+	r0 = uint64(binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff)
+	r1 = uint64((binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03)
+	r2 = uint64((binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff)
+	r3 = uint64((binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff)
+	r4 = uint64((binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff)
 
-	l := int32(len(m))
+	R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5
 
-	r00 := uint32(r[0])
+	for len(msg) >= TagSize {
+		// h += msg
+		h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff
+		h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff
+		h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff
+		h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff
+		h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | (1 << 24)
 
-	r01 := uint32(r[1])
+		// h *= r
+		d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1)
+		d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2)
+		d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3)
+		d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4)
+		d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0)
 
-	r02 := uint32(r[2])
-	r0 := int64(2151)
+		// h %= p
+		h0 = uint32(d0) & 0x3ffffff
+		h1 = uint32(d1) & 0x3ffffff
+		h2 = uint32(d2) & 0x3ffffff
+		h3 = uint32(d3) & 0x3ffffff
+		h4 = uint32(d4) & 0x3ffffff
 
-	r03 := uint32(r[3])
-	r03 &= 15
-	r0 <<= 51
+		h0 += uint32(d4>>26) * 5
+		h1 += h0 >> 26
+		h0 = h0 & 0x3ffffff
 
-	r10 := uint32(r[4])
-	r10 &= 252
-	r01 <<= 8
-	r0 += int64(r00)
-
-	r11 := uint32(r[5])
-	r02 <<= 16
-	r0 += int64(r01)
-
-	r12 := uint32(r[6])
-	r03 <<= 24
-	r0 += int64(r02)
-
-	r13 := uint32(r[7])
-	r13 &= 15
-	r1 := int64(2215)
-	r0 += int64(r03)
-
-	d0 := r0
-	r1 <<= 51
-	r2 := int64(2279)
-
-	r20 := uint32(r[8])
-	r20 &= 252
-	r11 <<= 8
-	r1 += int64(r10)
-
-	r21 := uint32(r[9])
-	r12 <<= 16
-	r1 += int64(r11)
-
-	r22 := uint32(r[10])
-	r13 <<= 24
-	r1 += int64(r12)
-
-	r23 := uint32(r[11])
-	r23 &= 15
-	r2 <<= 51
-	r1 += int64(r13)
-
-	d1 := r1
-	r21 <<= 8
-	r2 += int64(r20)
-
-	r30 := uint32(r[12])
-	r30 &= 252
-	r22 <<= 16
-	r2 += int64(r21)
-
-	r31 := uint32(r[13])
-	r23 <<= 24
-	r2 += int64(r22)
-
-	r32 := uint32(r[14])
-	r2 += int64(r23)
-	r3 := int64(2343)
-
-	d2 := r2
-	r3 <<= 51
-
-	r33 := uint32(r[15])
-	r33 &= 15
-	r31 <<= 8
-	r3 += int64(r30)
-
-	r32 <<= 16
-	r3 += int64(r31)
-
-	r33 <<= 24
-	r3 += int64(r32)
-
-	r3 += int64(r33)
-	h0 := alpha32 - alpha32
-
-	d3 := r3
-	h1 := alpha32 - alpha32
-
-	h2 := alpha32 - alpha32
-
-	h3 := alpha32 - alpha32
-
-	h4 := alpha32 - alpha32
-
-	r0low := math.Float64frombits(uint64(d0))
-	h5 := alpha32 - alpha32
-
-	r1low := math.Float64frombits(uint64(d1))
-	h6 := alpha32 - alpha32
-
-	r2low := math.Float64frombits(uint64(d2))
-	h7 := alpha32 - alpha32
-
-	r0low -= alpha0
-
-	r1low -= alpha32
-
-	r2low -= alpha64
-
-	r0high := r0low + alpha18
-
-	r3low := math.Float64frombits(uint64(d3))
-
-	r1high := r1low + alpha50
-	sr1low := scale * r1low
-
-	r2high := r2low + alpha82
-	sr2low := scale * r2low
-
-	r0high -= alpha18
-	r0high_stack := r0high
-
-	r3low -= alpha96
-
-	r1high -= alpha50
-	r1high_stack := r1high
-
-	sr1high := sr1low + alpham80
-
-	r0low -= r0high
-
-	r2high -= alpha82
-	sr3low = scale * r3low
-
-	sr2high := sr2low + alpham48
-
-	r1low -= r1high
-	r1low_stack := r1low
-
-	sr1high -= alpham80
-	sr1high_stack := sr1high
-
-	r2low -= r2high
-	r2low_stack := r2low
-
-	sr2high -= alpham48
-	sr2high_stack := sr2high
-
-	r3high := r3low + alpha112
-	r0low_stack := r0low
-
-	sr1low -= sr1high
-	sr1low_stack := sr1low
-
-	sr3high := sr3low + alpham16
-	r2high_stack := r2high
-
-	sr2low -= sr2high
-	sr2low_stack := sr2low
-
-	r3high -= alpha112
-	r3high_stack := r3high
-
-	sr3high -= alpham16
-	sr3high_stack := sr3high
-
-	r3low -= r3high
-	r3low_stack := r3low
-
-	sr3low -= sr3high
-	sr3low_stack := sr3low
-
-	if l < 16 {
-		goto addatmost15bytes
+		msg = msg[TagSize:]
 	}
 
-	m00 = uint32(m[p+0])
-	m0 = 2151
+	if len(msg) > 0 {
+		var block [TagSize]byte
+		off := copy(block[:], msg)
+		block[off] = 0x01
 
-	m0 <<= 51
-	m1 = 2215
-	m01 = uint32(m[p+1])
+		// h += msg
+		h0 += binary.LittleEndian.Uint32(block[0:]) & 0x3ffffff
+		h1 += (binary.LittleEndian.Uint32(block[3:]) >> 2) & 0x3ffffff
+		h2 += (binary.LittleEndian.Uint32(block[6:]) >> 4) & 0x3ffffff
+		h3 += (binary.LittleEndian.Uint32(block[9:]) >> 6) & 0x3ffffff
+		h4 += (binary.LittleEndian.Uint32(block[12:]) >> 8)
 
-	m1 <<= 51
-	m2 = 2279
-	m02 = uint32(m[p+2])
+		// h *= r
+		d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1)
+		d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2)
+		d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3)
+		d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4)
+		d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0)
 
-	m2 <<= 51
-	m3 = 2343
-	m03 = uint32(m[p+3])
+		// h %= p
+		h0 = uint32(d0) & 0x3ffffff
+		h1 = uint32(d1) & 0x3ffffff
+		h2 = uint32(d2) & 0x3ffffff
+		h3 = uint32(d3) & 0x3ffffff
+		h4 = uint32(d4) & 0x3ffffff
 
-	m10 = uint32(m[p+4])
-	m01 <<= 8
-	m0 += int64(m00)
-
-	m11 = uint32(m[p+5])
-	m02 <<= 16
-	m0 += int64(m01)
-
-	m12 = uint32(m[p+6])
-	m03 <<= 24
-	m0 += int64(m02)
-
-	m13 = uint32(m[p+7])
-	m3 <<= 51
-	m0 += int64(m03)
-
-	m20 = uint32(m[p+8])
-	m11 <<= 8
-	m1 += int64(m10)
-
-	m21 = uint32(m[p+9])
-	m12 <<= 16
-	m1 += int64(m11)
-
-	m22 = uint32(m[p+10])
-	m13 <<= 24
-	m1 += int64(m12)
-
-	m23 = uint32(m[p+11])
-	m1 += int64(m13)
-
-	m30 = uint32(m[p+12])
-	m21 <<= 8
-	m2 += int64(m20)
-
-	m31 = uint32(m[p+13])
-	m22 <<= 16
-	m2 += int64(m21)
-
-	m32 = uint32(m[p+14])
-	m23 <<= 24
-	m2 += int64(m22)
-
-	m33 = uint64(m[p+15])
-	m2 += int64(m23)
-
-	d0 = m0
-	m31 <<= 8
-	m3 += int64(m30)
-
-	d1 = m1
-	m32 <<= 16
-	m3 += int64(m31)
-
-	d2 = m2
-	m33 += 256
-
-	m33 <<= 24
-	m3 += int64(m32)
-
-	m3 += int64(m33)
-	d3 = m3
-
-	p += 16
-	l -= 16
-
-	z0 = math.Float64frombits(uint64(d0))
-
-	z1 = math.Float64frombits(uint64(d1))
-
-	z2 = math.Float64frombits(uint64(d2))
-
-	z3 = math.Float64frombits(uint64(d3))
-
-	z0 -= alpha0
-
-	z1 -= alpha32
-
-	z2 -= alpha64
-
-	z3 -= alpha96
-
-	h0 += z0
-
-	h1 += z1
-
-	h3 += z2
-
-	h5 += z3
-
-	if l < 16 {
-		goto multiplyaddatmost15bytes
+		h0 += uint32(d4>>26) * 5
+		h1 += h0 >> 26
+		h0 = h0 & 0x3ffffff
 	}
 
-multiplyaddatleast16bytes:
+	// h %= p reduction
+	h2 += h1 >> 26
+	h1 &= 0x3ffffff
+	h3 += h2 >> 26
+	h2 &= 0x3ffffff
+	h4 += h3 >> 26
+	h3 &= 0x3ffffff
+	h0 += 5 * (h4 >> 26)
+	h4 &= 0x3ffffff
+	h1 += h0 >> 26
+	h0 &= 0x3ffffff
 
-	m2 = 2279
-	m20 = uint32(m[p+8])
-	y7 = h7 + alpha130
+	// h - p
+	t0 := h0 + 5
+	t1 := h1 + (t0 >> 26)
+	t2 := h2 + (t1 >> 26)
+	t3 := h3 + (t2 >> 26)
+	t4 := h4 + (t3 >> 26) - (1 << 26)
+	t0 &= 0x3ffffff
+	t1 &= 0x3ffffff
+	t2 &= 0x3ffffff
+	t3 &= 0x3ffffff
 
-	m2 <<= 51
-	m3 = 2343
-	m21 = uint32(m[p+9])
-	y6 = h6 + alpha130
+	// select h if h < p else h - p
+	t_mask := (t4 >> 31) - 1
+	h_mask := ^t_mask
+	h0 = (h0 & h_mask) | (t0 & t_mask)
+	h1 = (h1 & h_mask) | (t1 & t_mask)
+	h2 = (h2 & h_mask) | (t2 & t_mask)
+	h3 = (h3 & h_mask) | (t3 & t_mask)
+	h4 = (h4 & h_mask) | (t4 & t_mask)
 
-	m3 <<= 51
-	m0 = 2151
-	m22 = uint32(m[p+10])
-	y1 = h1 + alpha32
+	// h %= 2^128
+	h0 |= h1 << 26
+	h1 = ((h1 >> 6) | (h2 << 20))
+	h2 = ((h2 >> 12) | (h3 << 14))
+	h3 = ((h3 >> 18) | (h4 << 8))
 
-	m0 <<= 51
-	m1 = 2215
-	m23 = uint32(m[p+11])
-	y0 = h0 + alpha32
+	// s: the s part of the key
+	// tag = (h + s) % (2^128)
+	t := uint64(h0) + uint64(binary.LittleEndian.Uint32(key[16:]))
+	h0 = uint32(t)
+	t = uint64(h1) + uint64(binary.LittleEndian.Uint32(key[20:])) + (t >> 32)
+	h1 = uint32(t)
+	t = uint64(h2) + uint64(binary.LittleEndian.Uint32(key[24:])) + (t >> 32)
+	h2 = uint32(t)
+	t = uint64(h3) + uint64(binary.LittleEndian.Uint32(key[28:])) + (t >> 32)
+	h3 = uint32(t)
 
-	m1 <<= 51
-	m30 = uint32(m[p+12])
-	y7 -= alpha130
-
-	m21 <<= 8
-	m2 += int64(m20)
-	m31 = uint32(m[p+13])
-	y6 -= alpha130
-
-	m22 <<= 16
-	m2 += int64(m21)
-	m32 = uint32(m[p+14])
-	y1 -= alpha32
-
-	m23 <<= 24
-	m2 += int64(m22)
-	m33 = uint64(m[p+15])
-	y0 -= alpha32
-
-	m2 += int64(m23)
-	m00 = uint32(m[p+0])
-	y5 = h5 + alpha96
-
-	m31 <<= 8
-	m3 += int64(m30)
-	m01 = uint32(m[p+1])
-	y4 = h4 + alpha96
-
-	m32 <<= 16
-	m02 = uint32(m[p+2])
-	x7 = h7 - y7
-	y7 *= scale
-
-	m33 += 256
-	m03 = uint32(m[p+3])
-	x6 = h6 - y6
-	y6 *= scale
-
-	m33 <<= 24
-	m3 += int64(m31)
-	m10 = uint32(m[p+4])
-	x1 = h1 - y1
-
-	m01 <<= 8
-	m3 += int64(m32)
-	m11 = uint32(m[p+5])
-	x0 = h0 - y0
-
-	m3 += int64(m33)
-	m0 += int64(m00)
-	m12 = uint32(m[p+6])
-	y5 -= alpha96
-
-	m02 <<= 16
-	m0 += int64(m01)
-	m13 = uint32(m[p+7])
-	y4 -= alpha96
-
-	m03 <<= 24
-	m0 += int64(m02)
-	d2 = m2
-	x1 += y7
-
-	m0 += int64(m03)
-	d3 = m3
-	x0 += y6
-
-	m11 <<= 8
-	m1 += int64(m10)
-	d0 = m0
-	x7 += y5
-
-	m12 <<= 16
-	m1 += int64(m11)
-	x6 += y4
-
-	m13 <<= 24
-	m1 += int64(m12)
-	y3 = h3 + alpha64
-
-	m1 += int64(m13)
-	d1 = m1
-	y2 = h2 + alpha64
-
-	x0 += x1
-
-	x6 += x7
-
-	y3 -= alpha64
-	r3low = r3low_stack
-
-	y2 -= alpha64
-	r0low = r0low_stack
-
-	x5 = h5 - y5
-	r3lowx0 = r3low * x0
-	r3high = r3high_stack
-
-	x4 = h4 - y4
-	r0lowx6 = r0low * x6
-	r0high = r0high_stack
-
-	x3 = h3 - y3
-	r3highx0 = r3high * x0
-	sr1low = sr1low_stack
-
-	x2 = h2 - y2
-	r0highx6 = r0high * x6
-	sr1high = sr1high_stack
-
-	x5 += y3
-	r0lowx0 = r0low * x0
-	r1low = r1low_stack
-
-	h6 = r3lowx0 + r0lowx6
-	sr1lowx6 = sr1low * x6
-	r1high = r1high_stack
-
-	x4 += y2
-	r0highx0 = r0high * x0
-	sr2low = sr2low_stack
-
-	h7 = r3highx0 + r0highx6
-	sr1highx6 = sr1high * x6
-	sr2high = sr2high_stack
-
-	x3 += y1
-	r1lowx0 = r1low * x0
-	r2low = r2low_stack
-
-	h0 = r0lowx0 + sr1lowx6
-	sr2lowx6 = sr2low * x6
-	r2high = r2high_stack
-
-	x2 += y0
-	r1highx0 = r1high * x0
-	sr3low = sr3low_stack
-
-	h1 = r0highx0 + sr1highx6
-	sr2highx6 = sr2high * x6
-	sr3high = sr3high_stack
-
-	x4 += x5
-	r2lowx0 = r2low * x0
-	z2 = math.Float64frombits(uint64(d2))
-
-	h2 = r1lowx0 + sr2lowx6
-	sr3lowx6 = sr3low * x6
-
-	x2 += x3
-	r2highx0 = r2high * x0
-	z3 = math.Float64frombits(uint64(d3))
-
-	h3 = r1highx0 + sr2highx6
-	sr3highx6 = sr3high * x6
-
-	r1highx4 = r1high * x4
-	z2 -= alpha64
-
-	h4 = r2lowx0 + sr3lowx6
-	r1lowx4 = r1low * x4
-
-	r0highx4 = r0high * x4
-	z3 -= alpha96
-
-	h5 = r2highx0 + sr3highx6
-	r0lowx4 = r0low * x4
-
-	h7 += r1highx4
-	sr3highx4 = sr3high * x4
-
-	h6 += r1lowx4
-	sr3lowx4 = sr3low * x4
-
-	h5 += r0highx4
-	sr2highx4 = sr2high * x4
-
-	h4 += r0lowx4
-	sr2lowx4 = sr2low * x4
-
-	h3 += sr3highx4
-	r0lowx2 = r0low * x2
-
-	h2 += sr3lowx4
-	r0highx2 = r0high * x2
-
-	h1 += sr2highx4
-	r1lowx2 = r1low * x2
-
-	h0 += sr2lowx4
-	r1highx2 = r1high * x2
-
-	h2 += r0lowx2
-	r2lowx2 = r2low * x2
-
-	h3 += r0highx2
-	r2highx2 = r2high * x2
-
-	h4 += r1lowx2
-	sr3lowx2 = sr3low * x2
-
-	h5 += r1highx2
-	sr3highx2 = sr3high * x2
-
-	p += 16
-	l -= 16
-	h6 += r2lowx2
-
-	h7 += r2highx2
-
-	z1 = math.Float64frombits(uint64(d1))
-	h0 += sr3lowx2
-
-	z0 = math.Float64frombits(uint64(d0))
-	h1 += sr3highx2
-
-	z1 -= alpha32
-
-	z0 -= alpha0
-
-	h5 += z3
-
-	h3 += z2
-
-	h1 += z1
-
-	h0 += z0
-
-	if l >= 16 {
-		goto multiplyaddatleast16bytes
-	}
-
-multiplyaddatmost15bytes:
-
-	y7 = h7 + alpha130
-
-	y6 = h6 + alpha130
-
-	y1 = h1 + alpha32
-
-	y0 = h0 + alpha32
-
-	y7 -= alpha130
-
-	y6 -= alpha130
-
-	y1 -= alpha32
-
-	y0 -= alpha32
-
-	y5 = h5 + alpha96
-
-	y4 = h4 + alpha96
-
-	x7 = h7 - y7
-	y7 *= scale
-
-	x6 = h6 - y6
-	y6 *= scale
-
-	x1 = h1 - y1
-
-	x0 = h0 - y0
-
-	y5 -= alpha96
-
-	y4 -= alpha96
-
-	x1 += y7
-
-	x0 += y6
-
-	x7 += y5
-
-	x6 += y4
-
-	y3 = h3 + alpha64
-
-	y2 = h2 + alpha64
-
-	x0 += x1
-
-	x6 += x7
-
-	y3 -= alpha64
-	r3low = r3low_stack
-
-	y2 -= alpha64
-	r0low = r0low_stack
-
-	x5 = h5 - y5
-	r3lowx0 = r3low * x0
-	r3high = r3high_stack
-
-	x4 = h4 - y4
-	r0lowx6 = r0low * x6
-	r0high = r0high_stack
-
-	x3 = h3 - y3
-	r3highx0 = r3high * x0
-	sr1low = sr1low_stack
-
-	x2 = h2 - y2
-	r0highx6 = r0high * x6
-	sr1high = sr1high_stack
-
-	x5 += y3
-	r0lowx0 = r0low * x0
-	r1low = r1low_stack
-
-	h6 = r3lowx0 + r0lowx6
-	sr1lowx6 = sr1low * x6
-	r1high = r1high_stack
-
-	x4 += y2
-	r0highx0 = r0high * x0
-	sr2low = sr2low_stack
-
-	h7 = r3highx0 + r0highx6
-	sr1highx6 = sr1high * x6
-	sr2high = sr2high_stack
-
-	x3 += y1
-	r1lowx0 = r1low * x0
-	r2low = r2low_stack
-
-	h0 = r0lowx0 + sr1lowx6
-	sr2lowx6 = sr2low * x6
-	r2high = r2high_stack
-
-	x2 += y0
-	r1highx0 = r1high * x0
-	sr3low = sr3low_stack
-
-	h1 = r0highx0 + sr1highx6
-	sr2highx6 = sr2high * x6
-	sr3high = sr3high_stack
-
-	x4 += x5
-	r2lowx0 = r2low * x0
-
-	h2 = r1lowx0 + sr2lowx6
-	sr3lowx6 = sr3low * x6
-
-	x2 += x3
-	r2highx0 = r2high * x0
-
-	h3 = r1highx0 + sr2highx6
-	sr3highx6 = sr3high * x6
-
-	r1highx4 = r1high * x4
-
-	h4 = r2lowx0 + sr3lowx6
-	r1lowx4 = r1low * x4
-
-	r0highx4 = r0high * x4
-
-	h5 = r2highx0 + sr3highx6
-	r0lowx4 = r0low * x4
-
-	h7 += r1highx4
-	sr3highx4 = sr3high * x4
-
-	h6 += r1lowx4
-	sr3lowx4 = sr3low * x4
-
-	h5 += r0highx4
-	sr2highx4 = sr2high * x4
-
-	h4 += r0lowx4
-	sr2lowx4 = sr2low * x4
-
-	h3 += sr3highx4
-	r0lowx2 = r0low * x2
-
-	h2 += sr3lowx4
-	r0highx2 = r0high * x2
-
-	h1 += sr2highx4
-	r1lowx2 = r1low * x2
-
-	h0 += sr2lowx4
-	r1highx2 = r1high * x2
-
-	h2 += r0lowx2
-	r2lowx2 = r2low * x2
-
-	h3 += r0highx2
-	r2highx2 = r2high * x2
-
-	h4 += r1lowx2
-	sr3lowx2 = sr3low * x2
-
-	h5 += r1highx2
-	sr3highx2 = sr3high * x2
-
-	h6 += r2lowx2
-
-	h7 += r2highx2
-
-	h0 += sr3lowx2
-
-	h1 += sr3highx2
-
-addatmost15bytes:
-
-	if l == 0 {
-		goto nomorebytes
-	}
-
-	lbelow2 = l - 2
-
-	lbelow3 = l - 3
-
-	lbelow2 >>= 31
-	lbelow4 = l - 4
-
-	m00 = uint32(m[p+0])
-	lbelow3 >>= 31
-	p += lbelow2
-
-	m01 = uint32(m[p+1])
-	lbelow4 >>= 31
-	p += lbelow3
-
-	m02 = uint32(m[p+2])
-	p += lbelow4
-	m0 = 2151
-
-	m03 = uint32(m[p+3])
-	m0 <<= 51
-	m1 = 2215
-
-	m0 += int64(m00)
-	m01 &^= uint32(lbelow2)
-
-	m02 &^= uint32(lbelow3)
-	m01 -= uint32(lbelow2)
-
-	m01 <<= 8
-	m03 &^= uint32(lbelow4)
-
-	m0 += int64(m01)
-	lbelow2 -= lbelow3
-
-	m02 += uint32(lbelow2)
-	lbelow3 -= lbelow4
-
-	m02 <<= 16
-	m03 += uint32(lbelow3)
-
-	m03 <<= 24
-	m0 += int64(m02)
-
-	m0 += int64(m03)
-	lbelow5 = l - 5
-
-	lbelow6 = l - 6
-	lbelow7 = l - 7
-
-	lbelow5 >>= 31
-	lbelow8 = l - 8
-
-	lbelow6 >>= 31
-	p += lbelow5
-
-	m10 = uint32(m[p+4])
-	lbelow7 >>= 31
-	p += lbelow6
-
-	m11 = uint32(m[p+5])
-	lbelow8 >>= 31
-	p += lbelow7
-
-	m12 = uint32(m[p+6])
-	m1 <<= 51
-	p += lbelow8
-
-	m13 = uint32(m[p+7])
-	m10 &^= uint32(lbelow5)
-	lbelow4 -= lbelow5
-
-	m10 += uint32(lbelow4)
-	lbelow5 -= lbelow6
-
-	m11 &^= uint32(lbelow6)
-	m11 += uint32(lbelow5)
-
-	m11 <<= 8
-	m1 += int64(m10)
-
-	m1 += int64(m11)
-	m12 &^= uint32(lbelow7)
-
-	lbelow6 -= lbelow7
-	m13 &^= uint32(lbelow8)
-
-	m12 += uint32(lbelow6)
-	lbelow7 -= lbelow8
-
-	m12 <<= 16
-	m13 += uint32(lbelow7)
-
-	m13 <<= 24
-	m1 += int64(m12)
-
-	m1 += int64(m13)
-	m2 = 2279
-
-	lbelow9 = l - 9
-	m3 = 2343
-
-	lbelow10 = l - 10
-	lbelow11 = l - 11
-
-	lbelow9 >>= 31
-	lbelow12 = l - 12
-
-	lbelow10 >>= 31
-	p += lbelow9
-
-	m20 = uint32(m[p+8])
-	lbelow11 >>= 31
-	p += lbelow10
-
-	m21 = uint32(m[p+9])
-	lbelow12 >>= 31
-	p += lbelow11
-
-	m22 = uint32(m[p+10])
-	m2 <<= 51
-	p += lbelow12
-
-	m23 = uint32(m[p+11])
-	m20 &^= uint32(lbelow9)
-	lbelow8 -= lbelow9
-
-	m20 += uint32(lbelow8)
-	lbelow9 -= lbelow10
-
-	m21 &^= uint32(lbelow10)
-	m21 += uint32(lbelow9)
-
-	m21 <<= 8
-	m2 += int64(m20)
-
-	m2 += int64(m21)
-	m22 &^= uint32(lbelow11)
-
-	lbelow10 -= lbelow11
-	m23 &^= uint32(lbelow12)
-
-	m22 += uint32(lbelow10)
-	lbelow11 -= lbelow12
-
-	m22 <<= 16
-	m23 += uint32(lbelow11)
-
-	m23 <<= 24
-	m2 += int64(m22)
-
-	m3 <<= 51
-	lbelow13 = l - 13
-
-	lbelow13 >>= 31
-	lbelow14 = l - 14
-
-	lbelow14 >>= 31
-	p += lbelow13
-	lbelow15 = l - 15
-
-	m30 = uint32(m[p+12])
-	lbelow15 >>= 31
-	p += lbelow14
-
-	m31 = uint32(m[p+13])
-	p += lbelow15
-	m2 += int64(m23)
-
-	m32 = uint32(m[p+14])
-	m30 &^= uint32(lbelow13)
-	lbelow12 -= lbelow13
-
-	m30 += uint32(lbelow12)
-	lbelow13 -= lbelow14
-
-	m3 += int64(m30)
-	m31 &^= uint32(lbelow14)
-
-	m31 += uint32(lbelow13)
-	m32 &^= uint32(lbelow15)
-
-	m31 <<= 8
-	lbelow14 -= lbelow15
-
-	m3 += int64(m31)
-	m32 += uint32(lbelow14)
-	d0 = m0
-
-	m32 <<= 16
-	m33 = uint64(lbelow15 + 1)
-	d1 = m1
-
-	m33 <<= 24
-	m3 += int64(m32)
-	d2 = m2
-
-	m3 += int64(m33)
-	d3 = m3
-
-	z3 = math.Float64frombits(uint64(d3))
-
-	z2 = math.Float64frombits(uint64(d2))
-
-	z1 = math.Float64frombits(uint64(d1))
-
-	z0 = math.Float64frombits(uint64(d0))
-
-	z3 -= alpha96
-
-	z2 -= alpha64
-
-	z1 -= alpha32
-
-	z0 -= alpha0
-
-	h5 += z3
-
-	h3 += z2
-
-	h1 += z1
-
-	h0 += z0
-
-	y7 = h7 + alpha130
-
-	y6 = h6 + alpha130
-
-	y1 = h1 + alpha32
-
-	y0 = h0 + alpha32
-
-	y7 -= alpha130
-
-	y6 -= alpha130
-
-	y1 -= alpha32
-
-	y0 -= alpha32
-
-	y5 = h5 + alpha96
-
-	y4 = h4 + alpha96
-
-	x7 = h7 - y7
-	y7 *= scale
-
-	x6 = h6 - y6
-	y6 *= scale
-
-	x1 = h1 - y1
-
-	x0 = h0 - y0
-
-	y5 -= alpha96
-
-	y4 -= alpha96
-
-	x1 += y7
-
-	x0 += y6
-
-	x7 += y5
-
-	x6 += y4
-
-	y3 = h3 + alpha64
-
-	y2 = h2 + alpha64
-
-	x0 += x1
-
-	x6 += x7
-
-	y3 -= alpha64
-	r3low = r3low_stack
-
-	y2 -= alpha64
-	r0low = r0low_stack
-
-	x5 = h5 - y5
-	r3lowx0 = r3low * x0
-	r3high = r3high_stack
-
-	x4 = h4 - y4
-	r0lowx6 = r0low * x6
-	r0high = r0high_stack
-
-	x3 = h3 - y3
-	r3highx0 = r3high * x0
-	sr1low = sr1low_stack
-
-	x2 = h2 - y2
-	r0highx6 = r0high * x6
-	sr1high = sr1high_stack
-
-	x5 += y3
-	r0lowx0 = r0low * x0
-	r1low = r1low_stack
-
-	h6 = r3lowx0 + r0lowx6
-	sr1lowx6 = sr1low * x6
-	r1high = r1high_stack
-
-	x4 += y2
-	r0highx0 = r0high * x0
-	sr2low = sr2low_stack
-
-	h7 = r3highx0 + r0highx6
-	sr1highx6 = sr1high * x6
-	sr2high = sr2high_stack
-
-	x3 += y1
-	r1lowx0 = r1low * x0
-	r2low = r2low_stack
-
-	h0 = r0lowx0 + sr1lowx6
-	sr2lowx6 = sr2low * x6
-	r2high = r2high_stack
-
-	x2 += y0
-	r1highx0 = r1high * x0
-	sr3low = sr3low_stack
-
-	h1 = r0highx0 + sr1highx6
-	sr2highx6 = sr2high * x6
-	sr3high = sr3high_stack
-
-	x4 += x5
-	r2lowx0 = r2low * x0
-
-	h2 = r1lowx0 + sr2lowx6
-	sr3lowx6 = sr3low * x6
-
-	x2 += x3
-	r2highx0 = r2high * x0
-
-	h3 = r1highx0 + sr2highx6
-	sr3highx6 = sr3high * x6
-
-	r1highx4 = r1high * x4
-
-	h4 = r2lowx0 + sr3lowx6
-	r1lowx4 = r1low * x4
-
-	r0highx4 = r0high * x4
-
-	h5 = r2highx0 + sr3highx6
-	r0lowx4 = r0low * x4
-
-	h7 += r1highx4
-	sr3highx4 = sr3high * x4
-
-	h6 += r1lowx4
-	sr3lowx4 = sr3low * x4
-
-	h5 += r0highx4
-	sr2highx4 = sr2high * x4
-
-	h4 += r0lowx4
-	sr2lowx4 = sr2low * x4
-
-	h3 += sr3highx4
-	r0lowx2 = r0low * x2
-
-	h2 += sr3lowx4
-	r0highx2 = r0high * x2
-
-	h1 += sr2highx4
-	r1lowx2 = r1low * x2
-
-	h0 += sr2lowx4
-	r1highx2 = r1high * x2
-
-	h2 += r0lowx2
-	r2lowx2 = r2low * x2
-
-	h3 += r0highx2
-	r2highx2 = r2high * x2
-
-	h4 += r1lowx2
-	sr3lowx2 = sr3low * x2
-
-	h5 += r1highx2
-	sr3highx2 = sr3high * x2
-
-	h6 += r2lowx2
-
-	h7 += r2highx2
-
-	h0 += sr3lowx2
-
-	h1 += sr3highx2
-
-nomorebytes:
-
-	y7 = h7 + alpha130
-
-	y0 = h0 + alpha32
-
-	y1 = h1 + alpha32
-
-	y2 = h2 + alpha64
-
-	y7 -= alpha130
-
-	y3 = h3 + alpha64
-
-	y4 = h4 + alpha96
-
-	y5 = h5 + alpha96
-
-	x7 = h7 - y7
-	y7 *= scale
-
-	y0 -= alpha32
-
-	y1 -= alpha32
-
-	y2 -= alpha64
-
-	h6 += x7
-
-	y3 -= alpha64
-
-	y4 -= alpha96
-
-	y5 -= alpha96
-
-	y6 = h6 + alpha130
-
-	x0 = h0 - y0
-
-	x1 = h1 - y1
-
-	x2 = h2 - y2
-
-	y6 -= alpha130
-
-	x0 += y7
-
-	x3 = h3 - y3
-
-	x4 = h4 - y4
-
-	x5 = h5 - y5
-
-	x6 = h6 - y6
-
-	y6 *= scale
-
-	x2 += y0
-
-	x3 += y1
-
-	x4 += y2
-
-	x0 += y6
-
-	x5 += y3
-
-	x6 += y4
-
-	x2 += x3
-
-	x0 += x1
-
-	x4 += x5
-
-	x6 += y5
-
-	x2 += offset1
-	d1 = int64(math.Float64bits(x2))
-
-	x0 += offset0
-	d0 = int64(math.Float64bits(x0))
-
-	x4 += offset2
-	d2 = int64(math.Float64bits(x4))
-
-	x6 += offset3
-	d3 = int64(math.Float64bits(x6))
-
-	f0 = uint64(d0)
-
-	f1 = uint64(d1)
-	bits32 = math.MaxUint64
-
-	f2 = uint64(d2)
-	bits32 >>= 32
-
-	f3 = uint64(d3)
-	f = f0 >> 32
-
-	f0 &= bits32
-	f &= 255
-
-	f1 += f
-	g0 = f0 + 5
-
-	g = g0 >> 32
-	g0 &= bits32
-
-	f = f1 >> 32
-	f1 &= bits32
-
-	f &= 255
-	g1 = f1 + g
-
-	g = g1 >> 32
-	f2 += f
-
-	f = f2 >> 32
-	g1 &= bits32
-
-	f2 &= bits32
-	f &= 255
-
-	f3 += f
-	g2 = f2 + g
-
-	g = g2 >> 32
-	g2 &= bits32
-
-	f4 = f3 >> 32
-	f3 &= bits32
-
-	f4 &= 255
-	g3 = f3 + g
-
-	g = g3 >> 32
-	g3 &= bits32
-
-	g4 = f4 + g
-
-	g4 = g4 - 4
-	s00 = uint32(s[0])
-
-	f = uint64(int64(g4) >> 63)
-	s01 = uint32(s[1])
-
-	f0 &= f
-	g0 &^= f
-	s02 = uint32(s[2])
-
-	f1 &= f
-	f0 |= g0
-	s03 = uint32(s[3])
-
-	g1 &^= f
-	f2 &= f
-	s10 = uint32(s[4])
-
-	f3 &= f
-	g2 &^= f
-	s11 = uint32(s[5])
-
-	g3 &^= f
-	f1 |= g1
-	s12 = uint32(s[6])
-
-	f2 |= g2
-	f3 |= g3
-	s13 = uint32(s[7])
-
-	s01 <<= 8
-	f0 += uint64(s00)
-	s20 = uint32(s[8])
-
-	s02 <<= 16
-	f0 += uint64(s01)
-	s21 = uint32(s[9])
-
-	s03 <<= 24
-	f0 += uint64(s02)
-	s22 = uint32(s[10])
-
-	s11 <<= 8
-	f1 += uint64(s10)
-	s23 = uint32(s[11])
-
-	s12 <<= 16
-	f1 += uint64(s11)
-	s30 = uint32(s[12])
-
-	s13 <<= 24
-	f1 += uint64(s12)
-	s31 = uint32(s[13])
-
-	f0 += uint64(s03)
-	f1 += uint64(s13)
-	s32 = uint32(s[14])
-
-	s21 <<= 8
-	f2 += uint64(s20)
-	s33 = uint32(s[15])
-
-	s22 <<= 16
-	f2 += uint64(s21)
-
-	s23 <<= 24
-	f2 += uint64(s22)
-
-	s31 <<= 8
-	f3 += uint64(s30)
-
-	s32 <<= 16
-	f3 += uint64(s31)
-
-	s33 <<= 24
-	f3 += uint64(s32)
-
-	f2 += uint64(s23)
-	f3 += uint64(s33)
-
-	out[0] = byte(f0)
-	f0 >>= 8
-	out[1] = byte(f0)
-	f0 >>= 8
-	out[2] = byte(f0)
-	f0 >>= 8
-	out[3] = byte(f0)
-	f0 >>= 8
-	f1 += f0
-
-	out[4] = byte(f1)
-	f1 >>= 8
-	out[5] = byte(f1)
-	f1 >>= 8
-	out[6] = byte(f1)
-	f1 >>= 8
-	out[7] = byte(f1)
-	f1 >>= 8
-	f2 += f1
-
-	out[8] = byte(f2)
-	f2 >>= 8
-	out[9] = byte(f2)
-	f2 >>= 8
-	out[10] = byte(f2)
-	f2 >>= 8
-	out[11] = byte(f2)
-	f2 >>= 8
-	f3 += f2
-
-	out[12] = byte(f3)
-	f3 >>= 8
-	out[13] = byte(f3)
-	f3 >>= 8
-	out[14] = byte(f3)
-	f3 >>= 8
-	out[15] = byte(f3)
+	binary.LittleEndian.PutUint32(out[0:], h0)
+	binary.LittleEndian.PutUint32(out[4:], h1)
+	binary.LittleEndian.PutUint32(out[8:], h2)
+	binary.LittleEndian.PutUint32(out[12:], h3)
 }