blob: bd48b638c549072ecc97fc521ddd0e04955085d8 [file] [log] [blame]
Adam Langley95c29f32014-06-20 12:00:00 -07001#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# SHA256 block transform for x86. September 2007.
11#
12# Performance improvement over compiler generated code varies from
13# 10% to 40% [see below]. Not very impressive on some ยต-archs, but
14# it's 5 times smaller and optimizies amount of writes.
15#
16# May 2012.
17#
18# Optimization including two of Pavel Semjanov's ideas, alternative
19# Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
20# ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
21# 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
22# on P4, where it kills performance, nor Sandy Bridge, where folded
23# loop is approximately as fast...
24#
25# June 2012.
26#
27# Add AMD XOP-specific code path, >30% improvement on Bulldozer over
28# May version, >60% over original. Add AVX+shrd code path, >25%
29# improvement on Sandy Bridge over May version, 60% over original.
30#
31# May 2013.
32#
33# Replace AMD XOP code path with SSSE3 to cover more processors.
34# (Biggest improvement coefficient is on upcoming Atom Silvermont,
35# not shown.) Add AVX+BMI code path.
36#
37# Performance in clock cycles per processed byte (less is better):
38#
39# gcc icc x86 asm(*) SIMD x86_64 asm(**)
40# Pentium 46 57 40/38 - -
41# PIII 36 33 27/24 - -
42# P4 41 38 28 - 17.3
43# AMD K8 27 25 19/15.5 - 14.9
44# Core2 26 23 18/15.6 14.3 13.8
45# Westmere 27 - 19/15.7 13.4 12.3
46# Sandy Bridge 25 - 15.9 12.4 11.6
47# Ivy Bridge 24 - 15.0 11.4 10.3
48# Haswell 22 - 13.9 9.46 7.80
49# Bulldozer 36 - 27/22 17.0 13.6
50# VIA Nano 36 - 25/22 16.8 16.5
51# Atom 50 - 30/25 21.9 18.9
52#
53# (*) numbers after slash are for unrolled loop, where applicable;
54# (**) x86_64 assembly performance is presented for reference
55# purposes, results are best-available;
56
57$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
58push(@INC,"${dir}","${dir}../../perlasm");
59require "x86asm.pl";
60
61&asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
62
63$xmm=$avx=0;
64for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
65
66if ($xmm && `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
67 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
68 $avx = ($1>=2.19) + ($1>=2.22);
69}
70
71if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
72 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
73 $avx = ($1>=2.03) + ($1>=2.10);
74}
75
76if ($xmm && !$avx && $ARGV[0] eq "win32" &&
77 `ml 2>&1` =~ /Version ([0-9]+)\./) {
78 $avx = ($1>=10) + ($1>=11);
79}
80
81$unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
82 # fully unrolled loop was measured to run about
83 # 3-4x slower. If slowdown coefficient is N and
84 # unrolled loop is m times faster, then you break
85 # even at (N-1)/(m-1) blocks. Then it needs to be
86 # adjusted for probability of code being evicted,
87 # code size/cache size=1/4. Typical m is 1.15...
88
89$A="eax";
90$E="edx";
91$T="ebx";
92$Aoff=&DWP(4,"esp");
93$Boff=&DWP(8,"esp");
94$Coff=&DWP(12,"esp");
95$Doff=&DWP(16,"esp");
96$Eoff=&DWP(20,"esp");
97$Foff=&DWP(24,"esp");
98$Goff=&DWP(28,"esp");
99$Hoff=&DWP(32,"esp");
100$Xoff=&DWP(36,"esp");
101$K256="ebp";
102
103sub BODY_16_63() {
104 &mov ($T,"ecx"); # "ecx" is preloaded
105 &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
106 &ror ("ecx",18-7);
107 &mov ("edi","esi");
108 &ror ("esi",19-17);
109 &xor ("ecx",$T);
110 &shr ($T,3);
111 &ror ("ecx",7);
112 &xor ("esi","edi");
113 &xor ($T,"ecx"); # T = sigma0(X[-15])
114 &ror ("esi",17);
115 &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
116 &shr ("edi",10);
117 &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
118 #&xor ("edi","esi") # sigma1(X[-2])
119 # &add ($T,"edi"); # T += sigma1(X[-2])
120 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
121
122 &BODY_00_15(1);
123}
124sub BODY_00_15() {
125 my $in_16_63=shift;
126
127 &mov ("ecx",$E);
128 &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
129 &mov ("esi",$Foff);
130 &ror ("ecx",25-11);
131 &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
132 &mov ("edi",$Goff);
133 &xor ("ecx",$E);
134 &xor ("esi","edi");
135 &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
136 &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
137 &ror ("ecx",11-6);
138 &and ("esi",$E);
139 &mov ($Eoff,$E); # modulo-scheduled
140 &xor ($E,"ecx");
141 &add ($T,$Hoff); # T += h
142 &xor ("esi","edi"); # Ch(e,f,g)
143 &ror ($E,6); # Sigma1(e)
144 &mov ("ecx",$A);
145 &add ($T,"esi"); # T += Ch(e,f,g)
146
147 &ror ("ecx",22-13);
148 &add ($T,$E); # T += Sigma1(e)
149 &mov ("edi",$Boff);
150 &xor ("ecx",$A);
151 &mov ($Aoff,$A); # modulo-scheduled
152 &lea ("esp",&DWP(-4,"esp"));
153 &ror ("ecx",13-2);
154 &mov ("esi",&DWP(0,$K256));
155 &xor ("ecx",$A);
156 &mov ($E,$Eoff); # e in next iteration, d in this one
157 &xor ($A,"edi"); # a ^= b
158 &ror ("ecx",2); # Sigma0(a)
159
160 &add ($T,"esi"); # T+= K[i]
161 &mov (&DWP(0,"esp"),$A); # (b^c) in next round
162 &add ($E,$T); # d += T
163 &and ($A,&DWP(4,"esp")); # a &= (b^c)
164 &add ($T,"ecx"); # T += Sigma0(a)
165 &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
166 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
167 &add ($K256,4);
168 &add ($A,$T); # h += T
169}
170
171&external_label("OPENSSL_ia32cap_P") if (!$i386);
172
173&function_begin("sha256_block_data_order");
174 &mov ("esi",wparam(0)); # ctx
175 &mov ("edi",wparam(1)); # inp
176 &mov ("eax",wparam(2)); # num
177 &mov ("ebx","esp"); # saved sp
178
179 &call (&label("pic_point")); # make it PIC!
180&set_label("pic_point");
181 &blindpop($K256);
182 &lea ($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
183
184 &sub ("esp",16);
185 &and ("esp",-64);
186
187 &shl ("eax",6);
188 &add ("eax","edi");
189 &mov (&DWP(0,"esp"),"esi"); # ctx
190 &mov (&DWP(4,"esp"),"edi"); # inp
191 &mov (&DWP(8,"esp"),"eax"); # inp+num*128
192 &mov (&DWP(12,"esp"),"ebx"); # saved sp
193 if (!$i386) {
194 &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
195 &mov ("ecx",&DWP(0,"edx"));
196 &mov ("ebx",&DWP(4,"edx"));
197 &test ("ecx",1<<20); # check for P4
198 &jnz (&label("loop"));
199 &and ("ecx",1<<30); # mask "Intel CPU" bit
200 &and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
201 &or ("ecx","ebx");
202 &and ("ecx",1<<28|1<<30);
203 &cmp ("ecx",1<<28|1<<30);
204 if ($xmm) {
205 &je (&label("AVX")) if ($avx);
206 &test ("ebx",1<<9); # check for SSSE3
207 &jnz (&label("SSSE3"));
208 } else {
209 &je (&label("loop_shrd"));
210 }
211 if ($unroll_after) {
212 &sub ("eax","edi");
213 &cmp ("eax",$unroll_after);
214 &jae (&label("unrolled"));
215 } }
216 &jmp (&label("loop"));
217
218sub COMPACT_LOOP() {
219my $suffix=shift;
220
221&set_label("loop$suffix",$suffix?32:16);
222 # copy input block to stack reversing byte and dword order
223 for($i=0;$i<4;$i++) {
224 &mov ("eax",&DWP($i*16+0,"edi"));
225 &mov ("ebx",&DWP($i*16+4,"edi"));
226 &mov ("ecx",&DWP($i*16+8,"edi"));
227 &bswap ("eax");
228 &mov ("edx",&DWP($i*16+12,"edi"));
229 &bswap ("ebx");
230 &push ("eax");
231 &bswap ("ecx");
232 &push ("ebx");
233 &bswap ("edx");
234 &push ("ecx");
235 &push ("edx");
236 }
237 &add ("edi",64);
238 &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
239 &mov (&DWP(4*(9+16)+4,"esp"),"edi");
240
241 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
242 &mov ($A,&DWP(0,"esi"));
243 &mov ("ebx",&DWP(4,"esi"));
244 &mov ("ecx",&DWP(8,"esi"));
245 &mov ("edi",&DWP(12,"esi"));
246 # &mov ($Aoff,$A);
247 &mov ($Boff,"ebx");
248 &xor ("ebx","ecx");
249 &mov ($Coff,"ecx");
250 &mov ($Doff,"edi");
251 &mov (&DWP(0,"esp"),"ebx"); # magic
252 &mov ($E,&DWP(16,"esi"));
253 &mov ("ebx",&DWP(20,"esi"));
254 &mov ("ecx",&DWP(24,"esi"));
255 &mov ("edi",&DWP(28,"esi"));
256 # &mov ($Eoff,$E);
257 &mov ($Foff,"ebx");
258 &mov ($Goff,"ecx");
259 &mov ($Hoff,"edi");
260
261&set_label("00_15$suffix",16);
262
263 &BODY_00_15();
264
265 &cmp ("esi",0xc19bf174);
266 &jne (&label("00_15$suffix"));
267
268 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
269 &jmp (&label("16_63$suffix"));
270
271&set_label("16_63$suffix",16);
272
273 &BODY_16_63();
274
275 &cmp ("esi",0xc67178f2);
276 &jne (&label("16_63$suffix"));
277
278 &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
279 # &mov ($A,$Aoff);
280 &mov ("ebx",$Boff);
281 # &mov ("edi",$Coff);
282 &mov ("ecx",$Doff);
283 &add ($A,&DWP(0,"esi"));
284 &add ("ebx",&DWP(4,"esi"));
285 &add ("edi",&DWP(8,"esi"));
286 &add ("ecx",&DWP(12,"esi"));
287 &mov (&DWP(0,"esi"),$A);
288 &mov (&DWP(4,"esi"),"ebx");
289 &mov (&DWP(8,"esi"),"edi");
290 &mov (&DWP(12,"esi"),"ecx");
291 # &mov ($E,$Eoff);
292 &mov ("eax",$Foff);
293 &mov ("ebx",$Goff);
294 &mov ("ecx",$Hoff);
295 &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
296 &add ($E,&DWP(16,"esi"));
297 &add ("eax",&DWP(20,"esi"));
298 &add ("ebx",&DWP(24,"esi"));
299 &add ("ecx",&DWP(28,"esi"));
300 &mov (&DWP(16,"esi"),$E);
301 &mov (&DWP(20,"esi"),"eax");
302 &mov (&DWP(24,"esi"),"ebx");
303 &mov (&DWP(28,"esi"),"ecx");
304
305 &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
306 &sub ($K256,4*64); # rewind K
307
308 &cmp ("edi",&DWP(8,"esp")); # are we done yet?
309 &jb (&label("loop$suffix"));
310}
311 &COMPACT_LOOP();
312 &mov ("esp",&DWP(12,"esp")); # restore sp
313&function_end_A();
314 if (!$i386 && !$xmm) {
315 # ~20% improvement on Sandy Bridge
316 local *ror = sub { &shrd(@_[0],@_) };
317 &COMPACT_LOOP("_shrd");
318 &mov ("esp",&DWP(12,"esp")); # restore sp
319&function_end_A();
320 }
321
322&set_label("K256",64); # Yes! I keep it in the code segment!
323@K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
324 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
325 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
326 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
327 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
328 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
329 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
330 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
331 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
332 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
333 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
334 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
335 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
336 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
337 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
338 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
339&data_word(@K256);
340&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # byte swap mask
341&asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
342
343($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
344sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
345
346if (!$i386 && $unroll_after) {
347my @AH=($A,$K256);
348
349&set_label("unrolled",16);
350 &lea ("esp",&DWP(-96,"esp"));
351 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
352 &mov ($AH[0],&DWP(0,"esi"));
353 &mov ($AH[1],&DWP(4,"esi"));
354 &mov ("ecx",&DWP(8,"esi"));
355 &mov ("ebx",&DWP(12,"esi"));
356 #&mov (&DWP(0,"esp"),$AH[0]);
357 &mov (&DWP(4,"esp"),$AH[1]);
358 &xor ($AH[1],"ecx"); # magic
359 &mov (&DWP(8,"esp"),"ecx");
360 &mov (&DWP(12,"esp"),"ebx");
361 &mov ($E,&DWP(16,"esi"));
362 &mov ("ebx",&DWP(20,"esi"));
363 &mov ("ecx",&DWP(24,"esi"));
364 &mov ("esi",&DWP(28,"esi"));
365 #&mov (&DWP(16,"esp"),$E);
366 &mov (&DWP(20,"esp"),"ebx");
367 &mov (&DWP(24,"esp"),"ecx");
368 &mov (&DWP(28,"esp"),"esi");
369 &jmp (&label("grand_loop"));
370
371&set_label("grand_loop",16);
372 # copy input block to stack reversing byte order
373 for($i=0;$i<5;$i++) {
374 &mov ("ebx",&DWP(12*$i+0,"edi"));
375 &mov ("ecx",&DWP(12*$i+4,"edi"));
376 &bswap ("ebx");
377 &mov ("esi",&DWP(12*$i+8,"edi"));
378 &bswap ("ecx");
379 &mov (&DWP(32+12*$i+0,"esp"),"ebx");
380 &bswap ("esi");
381 &mov (&DWP(32+12*$i+4,"esp"),"ecx");
382 &mov (&DWP(32+12*$i+8,"esp"),"esi");
383 }
384 &mov ("ebx",&DWP($i*12,"edi"));
385 &add ("edi",64);
386 &bswap ("ebx");
387 &mov (&DWP(96+4,"esp"),"edi");
388 &mov (&DWP(32+12*$i,"esp"),"ebx");
389
390 my ($t1,$t2) = ("ecx","esi");
391
392 for ($i=0;$i<64;$i++) {
393
394 if ($i>=16) {
395 &mov ($T,$t1); # $t1 is preloaded
396 # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
397 &ror ($t1,18-7);
398 &mov ("edi",$t2);
399 &ror ($t2,19-17);
400 &xor ($t1,$T);
401 &shr ($T,3);
402 &ror ($t1,7);
403 &xor ($t2,"edi");
404 &xor ($T,$t1); # T = sigma0(X[-15])
405 &ror ($t2,17);
406 &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
407 &shr ("edi",10);
408 &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
409 #&xor ("edi",$t2) # sigma1(X[-2])
410 # &add ($T,"edi"); # T += sigma1(X[-2])
411 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
412 }
413 &mov ($t1,$E);
414 &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
415 &mov ($t2,&off($f));
416 &ror ($E,25-11);
417 &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
418 &mov ("edi",&off($g));
419 &xor ($E,$t1);
420 &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
421 &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
422 &xor ($t2,"edi");
423 &ror ($E,11-6);
424 &and ($t2,$t1);
425 &mov (&off($e),$t1); # save $E, modulo-scheduled
426 &xor ($E,$t1);
427 &add ($T,&off($h)); # T += h
428 &xor ("edi",$t2); # Ch(e,f,g)
429 &ror ($E,6); # Sigma1(e)
430 &mov ($t1,$AH[0]);
431 &add ($T,"edi"); # T += Ch(e,f,g)
432
433 &ror ($t1,22-13);
434 &mov ($t2,$AH[0]);
435 &mov ("edi",&off($b));
436 &xor ($t1,$AH[0]);
437 &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
438 &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
439 &ror ($t1,13-2);
440 &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
441 &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
442 &xor ($t1,$t2);
443 &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
444 &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
445 &ror ($t1,2); # Sigma0(a)
446
447 &add ($AH[1],$E); # h += T
448 &add ($E,&off($d)); # d += T
449 &add ($AH[1],$t1); # h += Sigma0(a)
450 &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
451
452 @AH = reverse(@AH); # rotate(a,h)
453 ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
454 }
455 &mov ("esi",&DWP(96,"esp")); #ctx
456 #&mov ($AH[0],&DWP(0,"esp"));
457 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
458 #&mov ("edi", &DWP(8,"esp"));
459 &mov ("ecx",&DWP(12,"esp"));
460 &add ($AH[0],&DWP(0,"esi"));
461 &add ($AH[1],&DWP(4,"esi"));
462 &add ("edi",&DWP(8,"esi"));
463 &add ("ecx",&DWP(12,"esi"));
464 &mov (&DWP(0,"esi"),$AH[0]);
465 &mov (&DWP(4,"esi"),$AH[1]);
466 &mov (&DWP(8,"esi"),"edi");
467 &mov (&DWP(12,"esi"),"ecx");
468 #&mov (&DWP(0,"esp"),$AH[0]);
469 &mov (&DWP(4,"esp"),$AH[1]);
470 &xor ($AH[1],"edi"); # magic
471 &mov (&DWP(8,"esp"),"edi");
472 &mov (&DWP(12,"esp"),"ecx");
473 #&mov ($E,&DWP(16,"esp"));
474 &mov ("edi",&DWP(20,"esp"));
475 &mov ("ebx",&DWP(24,"esp"));
476 &mov ("ecx",&DWP(28,"esp"));
477 &add ($E,&DWP(16,"esi"));
478 &add ("edi",&DWP(20,"esi"));
479 &add ("ebx",&DWP(24,"esi"));
480 &add ("ecx",&DWP(28,"esi"));
481 &mov (&DWP(16,"esi"),$E);
482 &mov (&DWP(20,"esi"),"edi");
483 &mov (&DWP(24,"esi"),"ebx");
484 &mov (&DWP(28,"esi"),"ecx");
485 #&mov (&DWP(16,"esp"),$E);
486 &mov (&DWP(20,"esp"),"edi");
487 &mov ("edi",&DWP(96+4,"esp")); # inp
488 &mov (&DWP(24,"esp"),"ebx");
489 &mov (&DWP(28,"esp"),"ecx");
490
491 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
492 &jb (&label("grand_loop"));
493
494 &mov ("esp",&DWP(96+12,"esp")); # restore sp
495&function_end_A();
496}
497 if (!$i386 && $xmm) {{{
498my @X = map("xmm$_",(0..3));
499my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
500my @AH = ($A,$T);
501
502&set_label("SSSE3",32);
503 &lea ("esp",&DWP(-96,"esp"));
504 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
505 &mov ($AH[0],&DWP(0,"esi"));
506 &mov ($AH[1],&DWP(4,"esi"));
507 &mov ("ecx",&DWP(8,"esi"));
508 &mov ("edi",&DWP(12,"esi"));
509 #&mov (&DWP(0,"esp"),$AH[0]);
510 &mov (&DWP(4,"esp"),$AH[1]);
511 &xor ($AH[1],"ecx"); # magic
512 &mov (&DWP(8,"esp"),"ecx");
513 &mov (&DWP(12,"esp"),"edi");
514 &mov ($E,&DWP(16,"esi"));
515 &mov ("edi",&DWP(20,"esi"));
516 &mov ("ecx",&DWP(24,"esi"));
517 &mov ("esi",&DWP(28,"esi"));
518 #&mov (&DWP(16,"esp"),$E);
519 &mov (&DWP(20,"esp"),"edi");
520 &mov ("edi",&DWP(96+4,"esp")); # inp
521 &mov (&DWP(24,"esp"),"ecx");
522 &mov (&DWP(28,"esp"),"esi");
523 &movdqa ($t3,&QWP(256,$K256));
524 &jmp (&label("grand_ssse3"));
525
526&set_label("grand_ssse3",16);
527 # load input, reverse byte order, add K256[0..15], save to stack
528 &movdqu (@X[0],&QWP(0,"edi"));
529 &movdqu (@X[1],&QWP(16,"edi"));
530 &movdqu (@X[2],&QWP(32,"edi"));
531 &movdqu (@X[3],&QWP(48,"edi"));
532 &add ("edi",64);
533 &pshufb (@X[0],$t3);
534 &mov (&DWP(96+4,"esp"),"edi");
535 &pshufb (@X[1],$t3);
536 &movdqa ($t0,&QWP(0,$K256));
537 &pshufb (@X[2],$t3);
538 &movdqa ($t1,&QWP(16,$K256));
539 &paddd ($t0,@X[0]);
540 &pshufb (@X[3],$t3);
541 &movdqa ($t2,&QWP(32,$K256));
542 &paddd ($t1,@X[1]);
543 &movdqa ($t3,&QWP(48,$K256));
544 &movdqa (&QWP(32+0,"esp"),$t0);
545 &paddd ($t2,@X[2]);
546 &movdqa (&QWP(32+16,"esp"),$t1);
547 &paddd ($t3,@X[3]);
548 &movdqa (&QWP(32+32,"esp"),$t2);
549 &movdqa (&QWP(32+48,"esp"),$t3);
550 &jmp (&label("ssse3_00_47"));
551
552&set_label("ssse3_00_47",16);
553 &add ($K256,64);
554
555sub SSSE3_00_47 () {
556my $j = shift;
557my $body = shift;
558my @X = @_;
559my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
560
561 eval(shift(@insns));
562 &movdqa ($t0,@X[1]);
563 eval(shift(@insns)); # @
564 eval(shift(@insns));
565 &movdqa ($t3,@X[3]);
566 eval(shift(@insns));
567 eval(shift(@insns));
568 &palignr ($t0,@X[0],4); # X[1..4]
569 eval(shift(@insns));
570 eval(shift(@insns)); # @
571 eval(shift(@insns));
572 &palignr ($t3,@X[2],4); # X[9..12]
573 eval(shift(@insns));
574 eval(shift(@insns));
575 eval(shift(@insns));
576 &movdqa ($t1,$t0);
577 eval(shift(@insns)); # @
578 eval(shift(@insns));
579 &movdqa ($t2,$t0);
580 eval(shift(@insns));
581 eval(shift(@insns));
582 &psrld ($t0,3);
583 eval(shift(@insns));
584 eval(shift(@insns)); # @
585 &paddd (@X[0],$t3); # X[0..3] += X[9..12]
586 eval(shift(@insns));
587 eval(shift(@insns));
588 &psrld ($t2,7);
589 eval(shift(@insns));
590 eval(shift(@insns));
591 eval(shift(@insns)); # @
592 eval(shift(@insns));
593 &pshufd ($t3,@X[3],0b11111010); # X[14..15]
594 eval(shift(@insns));
595 eval(shift(@insns));
596 &pslld ($t1,32-18);
597 eval(shift(@insns));
598 eval(shift(@insns)); # @
599 &pxor ($t0,$t2);
600 eval(shift(@insns));
601 eval(shift(@insns));
602 &psrld ($t2,18-7);
603 eval(shift(@insns));
604 eval(shift(@insns));
605 eval(shift(@insns)); # @
606 &pxor ($t0,$t1);
607 eval(shift(@insns));
608 eval(shift(@insns));
609 &pslld ($t1,18-7);
610 eval(shift(@insns));
611 eval(shift(@insns));
612 eval(shift(@insns)); # @
613 &pxor ($t0,$t2);
614 eval(shift(@insns));
615 eval(shift(@insns));
616 &movdqa ($t2,$t3);
617 eval(shift(@insns));
618 eval(shift(@insns));
619 eval(shift(@insns)); # @
620 &pxor ($t0,$t1); # sigma0(X[1..4])
621 eval(shift(@insns));
622 eval(shift(@insns));
623 &psrld ($t3,10);
624 eval(shift(@insns));
625 eval(shift(@insns));
626 eval(shift(@insns)); # @
627 &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
628 eval(shift(@insns));
629 eval(shift(@insns));
630 &psrlq ($t2,17);
631 eval(shift(@insns));
632 eval(shift(@insns));
633 eval(shift(@insns)); # @
634 &pxor ($t3,$t2);
635 eval(shift(@insns));
636 eval(shift(@insns));
637 &psrlq ($t2,19-17);
638 eval(shift(@insns));
639 eval(shift(@insns));
640 eval(shift(@insns)); # @
641 &pxor ($t3,$t2);
642 eval(shift(@insns));
643 eval(shift(@insns));
644 &pshufd ($t3,$t3,0b10000000);
645 eval(shift(@insns));
646 eval(shift(@insns));
647 eval(shift(@insns)); # @
648 eval(shift(@insns));
649 eval(shift(@insns));
650 eval(shift(@insns));
651 eval(shift(@insns));
652 eval(shift(@insns)); # @
653 eval(shift(@insns));
654 &psrldq ($t3,8);
655 eval(shift(@insns));
656 eval(shift(@insns));
657 eval(shift(@insns));
658 &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
659 eval(shift(@insns)); # @
660 eval(shift(@insns));
661 eval(shift(@insns));
662 eval(shift(@insns));
663 eval(shift(@insns));
664 eval(shift(@insns)); # @
665 eval(shift(@insns));
666 &pshufd ($t3,@X[0],0b01010000); # X[16..17]
667 eval(shift(@insns));
668 eval(shift(@insns));
669 eval(shift(@insns));
670 &movdqa ($t2,$t3);
671 eval(shift(@insns)); # @
672 &psrld ($t3,10);
673 eval(shift(@insns));
674 &psrlq ($t2,17);
675 eval(shift(@insns));
676 eval(shift(@insns));
677 eval(shift(@insns));
678 eval(shift(@insns)); # @
679 &pxor ($t3,$t2);
680 eval(shift(@insns));
681 eval(shift(@insns));
682 &psrlq ($t2,19-17);
683 eval(shift(@insns));
684 eval(shift(@insns));
685 eval(shift(@insns)); # @
686 &pxor ($t3,$t2);
687 eval(shift(@insns));
688 eval(shift(@insns));
689 eval(shift(@insns));
690 &pshufd ($t3,$t3,0b00001000);
691 eval(shift(@insns));
692 eval(shift(@insns)); # @
693 &movdqa ($t2,&QWP(16*$j,$K256));
694 eval(shift(@insns));
695 eval(shift(@insns));
696 &pslldq ($t3,8);
697 eval(shift(@insns));
698 eval(shift(@insns));
699 eval(shift(@insns)); # @
700 eval(shift(@insns));
701 eval(shift(@insns));
702 eval(shift(@insns));
703 eval(shift(@insns));
704 eval(shift(@insns)); # @
705 &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
706 eval(shift(@insns));
707 eval(shift(@insns));
708 eval(shift(@insns));
709 eval(shift(@insns));
710 &paddd ($t2,@X[0]);
711 eval(shift(@insns)); # @
712
713 foreach (@insns) { eval; } # remaining instructions
714
715 &movdqa (&QWP(32+16*$j,"esp"),$t2);
716}
717
718sub body_00_15 () {
719 (
720 '&mov ("ecx",$E);',
721 '&ror ($E,25-11);',
722 '&mov ("esi",&off($f));',
723 '&xor ($E,"ecx");',
724 '&mov ("edi",&off($g));',
725 '&xor ("esi","edi");',
726 '&ror ($E,11-6);',
727 '&and ("esi","ecx");',
728 '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
729 '&xor ($E,"ecx");',
730 '&xor ("edi","esi");', # Ch(e,f,g)
731 '&ror ($E,6);', # T = Sigma1(e)
732 '&mov ("ecx",$AH[0]);',
733 '&add ($E,"edi");', # T += Ch(e,f,g)
734 '&mov ("edi",&off($b));',
735 '&mov ("esi",$AH[0]);',
736
737 '&ror ("ecx",22-13);',
738 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
739 '&xor ("ecx",$AH[0]);',
740 '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
741 '&add ($E,&off($h));', # T += h
742 '&ror ("ecx",13-2);',
743 '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
744 '&xor ("ecx","esi");',
745 '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
746 '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
747 '&ror ("ecx",2);', # Sigma0(a)
748
749 '&add ($AH[1],$E);', # h += T
750 '&add ($E,&off($d));', # d += T
751 '&add ($AH[1],"ecx");'. # h += Sigma0(a)
752
753 '@AH = reverse(@AH); $i++;' # rotate(a,h)
754 );
755}
756
757 for ($i=0,$j=0; $j<4; $j++) {
758 &SSSE3_00_47($j,\&body_00_15,@X);
759 push(@X,shift(@X)); # rotate(@X)
760 }
761 &cmp (&DWP(16*$j,$K256),0x00010203);
762 &jne (&label("ssse3_00_47"));
763
764 for ($i=0; $i<16; ) {
765 foreach(body_00_15()) { eval; }
766 }
767
768 &mov ("esi",&DWP(96,"esp")); #ctx
769 #&mov ($AH[0],&DWP(0,"esp"));
770 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
771 #&mov ("edi", &DWP(8,"esp"));
772 &mov ("ecx",&DWP(12,"esp"));
773 &add ($AH[0],&DWP(0,"esi"));
774 &add ($AH[1],&DWP(4,"esi"));
775 &add ("edi",&DWP(8,"esi"));
776 &add ("ecx",&DWP(12,"esi"));
777 &mov (&DWP(0,"esi"),$AH[0]);
778 &mov (&DWP(4,"esi"),$AH[1]);
779 &mov (&DWP(8,"esi"),"edi");
780 &mov (&DWP(12,"esi"),"ecx");
781 #&mov (&DWP(0,"esp"),$AH[0]);
782 &mov (&DWP(4,"esp"),$AH[1]);
783 &xor ($AH[1],"edi"); # magic
784 &mov (&DWP(8,"esp"),"edi");
785 &mov (&DWP(12,"esp"),"ecx");
786 #&mov ($E,&DWP(16,"esp"));
787 &mov ("edi",&DWP(20,"esp"));
788 &mov ("ecx",&DWP(24,"esp"));
789 &add ($E,&DWP(16,"esi"));
790 &add ("edi",&DWP(20,"esi"));
791 &add ("ecx",&DWP(24,"esi"));
792 &mov (&DWP(16,"esi"),$E);
793 &mov (&DWP(20,"esi"),"edi");
794 &mov (&DWP(20,"esp"),"edi");
795 &mov ("edi",&DWP(28,"esp"));
796 &mov (&DWP(24,"esi"),"ecx");
797 #&mov (&DWP(16,"esp"),$E);
798 &add ("edi",&DWP(28,"esi"));
799 &mov (&DWP(24,"esp"),"ecx");
800 &mov (&DWP(28,"esi"),"edi");
801 &mov (&DWP(28,"esp"),"edi");
802 &mov ("edi",&DWP(96+4,"esp")); # inp
803
804 &movdqa ($t3,&QWP(64,$K256));
805 &sub ($K256,3*64); # rewind K
806 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
807 &jb (&label("grand_ssse3"));
808
809 &mov ("esp",&DWP(96+12,"esp")); # restore sp
810&function_end_A();
811 if ($avx) {
812&set_label("AVX",32);
813 if ($avx>1) {
814 &mov ("edx",&DWP(8,"edx"));
815 &and ("edx",1<<8|1<<3); # check for BMI2+BMI1
816 &cmp ("edx",1<<8|1<<3);
817 &je (&label("AVX_BMI"));
818 }
819 &lea ("esp",&DWP(-96,"esp"));
820 &vzeroall ();
821 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
822 &mov ($AH[0],&DWP(0,"esi"));
823 &mov ($AH[1],&DWP(4,"esi"));
824 &mov ("ecx",&DWP(8,"esi"));
825 &mov ("edi",&DWP(12,"esi"));
826 #&mov (&DWP(0,"esp"),$AH[0]);
827 &mov (&DWP(4,"esp"),$AH[1]);
828 &xor ($AH[1],"ecx"); # magic
829 &mov (&DWP(8,"esp"),"ecx");
830 &mov (&DWP(12,"esp"),"edi");
831 &mov ($E,&DWP(16,"esi"));
832 &mov ("edi",&DWP(20,"esi"));
833 &mov ("ecx",&DWP(24,"esi"));
834 &mov ("esi",&DWP(28,"esi"));
835 #&mov (&DWP(16,"esp"),$E);
836 &mov (&DWP(20,"esp"),"edi");
837 &mov ("edi",&DWP(96+4,"esp")); # inp
838 &mov (&DWP(24,"esp"),"ecx");
839 &mov (&DWP(28,"esp"),"esi");
840 &vmovdqa ($t3,&QWP(256,$K256));
841 &jmp (&label("grand_avx"));
842
843&set_label("grand_avx",32);
844 # load input, reverse byte order, add K256[0..15], save to stack
845 &vmovdqu (@X[0],&QWP(0,"edi"));
846 &vmovdqu (@X[1],&QWP(16,"edi"));
847 &vmovdqu (@X[2],&QWP(32,"edi"));
848 &vmovdqu (@X[3],&QWP(48,"edi"));
849 &add ("edi",64);
850 &vpshufb (@X[0],@X[0],$t3);
851 &mov (&DWP(96+4,"esp"),"edi");
852 &vpshufb (@X[1],@X[1],$t3);
853 &vpshufb (@X[2],@X[2],$t3);
854 &vpaddd ($t0,@X[0],&QWP(0,$K256));
855 &vpshufb (@X[3],@X[3],$t3);
856 &vpaddd ($t1,@X[1],&QWP(16,$K256));
857 &vpaddd ($t2,@X[2],&QWP(32,$K256));
858 &vpaddd ($t3,@X[3],&QWP(48,$K256));
859 &vmovdqa (&QWP(32+0,"esp"),$t0);
860 &vmovdqa (&QWP(32+16,"esp"),$t1);
861 &vmovdqa (&QWP(32+32,"esp"),$t2);
862 &vmovdqa (&QWP(32+48,"esp"),$t3);
863 &jmp (&label("avx_00_47"));
864
865&set_label("avx_00_47",16);
866 &add ($K256,64);
867
868sub Xupdate_AVX () {
869 (
870 '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
871 '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
872 '&vpsrld ($t2,$t0,7);',
873 '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
874 '&vpsrld ($t3,$t0,3);',
875 '&vpslld ($t1,$t0,14);',
876 '&vpxor ($t0,$t3,$t2);',
877 '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
878 '&vpsrld ($t2,$t2,18-7);',
879 '&vpxor ($t0,$t0,$t1);',
880 '&vpslld ($t1,$t1,25-14);',
881 '&vpxor ($t0,$t0,$t2);',
882 '&vpsrld ($t2,$t3,10);',
883 '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
884 '&vpsrlq ($t1,$t3,17);',
885 '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
886 '&vpxor ($t2,$t2,$t1);',
887 '&vpsrlq ($t3,$t3,19);',
888 '&vpxor ($t2,$t2,$t3);', # sigma1(X[14..15]
889 '&vpshufd ($t3,$t2,0b10000100);',
890 '&vpsrldq ($t3,$t3,8);',
891 '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
892 '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
893 '&vpsrld ($t2,$t3,10);',
894 '&vpsrlq ($t1,$t3,17);',
895 '&vpxor ($t2,$t2,$t1);',
896 '&vpsrlq ($t3,$t3,19);',
897 '&vpxor ($t2,$t2,$t3);', # sigma1(X[16..17]
898 '&vpshufd ($t3,$t2,0b11101000);',
899 '&vpslldq ($t3,$t3,8);',
900 '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
901 );
902}
903
904local *ror = sub { &shrd(@_[0],@_) };
905sub AVX_00_47 () {
906my $j = shift;
907my $body = shift;
908my @X = @_;
909my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
910my $insn;
911
912 foreach (Xupdate_AVX()) { # 31 instructions
913 eval;
914 eval(shift(@insns));
915 eval(shift(@insns));
916 eval($insn = shift(@insns));
917 eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
918 }
919 &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
920 foreach (@insns) { eval; } # remaining instructions
921 &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
922}
923
924 for ($i=0,$j=0; $j<4; $j++) {
925 &AVX_00_47($j,\&body_00_15,@X);
926 push(@X,shift(@X)); # rotate(@X)
927 }
928 &cmp (&DWP(16*$j,$K256),0x00010203);
929 &jne (&label("avx_00_47"));
930
931 for ($i=0; $i<16; ) {
932 foreach(body_00_15()) { eval; }
933 }
934
935 &mov ("esi",&DWP(96,"esp")); #ctx
936 #&mov ($AH[0],&DWP(0,"esp"));
937 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
938 #&mov ("edi", &DWP(8,"esp"));
939 &mov ("ecx",&DWP(12,"esp"));
940 &add ($AH[0],&DWP(0,"esi"));
941 &add ($AH[1],&DWP(4,"esi"));
942 &add ("edi",&DWP(8,"esi"));
943 &add ("ecx",&DWP(12,"esi"));
944 &mov (&DWP(0,"esi"),$AH[0]);
945 &mov (&DWP(4,"esi"),$AH[1]);
946 &mov (&DWP(8,"esi"),"edi");
947 &mov (&DWP(12,"esi"),"ecx");
948 #&mov (&DWP(0,"esp"),$AH[0]);
949 &mov (&DWP(4,"esp"),$AH[1]);
950 &xor ($AH[1],"edi"); # magic
951 &mov (&DWP(8,"esp"),"edi");
952 &mov (&DWP(12,"esp"),"ecx");
953 #&mov ($E,&DWP(16,"esp"));
954 &mov ("edi",&DWP(20,"esp"));
955 &mov ("ecx",&DWP(24,"esp"));
956 &add ($E,&DWP(16,"esi"));
957 &add ("edi",&DWP(20,"esi"));
958 &add ("ecx",&DWP(24,"esi"));
959 &mov (&DWP(16,"esi"),$E);
960 &mov (&DWP(20,"esi"),"edi");
961 &mov (&DWP(20,"esp"),"edi");
962 &mov ("edi",&DWP(28,"esp"));
963 &mov (&DWP(24,"esi"),"ecx");
964 #&mov (&DWP(16,"esp"),$E);
965 &add ("edi",&DWP(28,"esi"));
966 &mov (&DWP(24,"esp"),"ecx");
967 &mov (&DWP(28,"esi"),"edi");
968 &mov (&DWP(28,"esp"),"edi");
969 &mov ("edi",&DWP(96+4,"esp")); # inp
970
971 &vmovdqa ($t3,&QWP(64,$K256));
972 &sub ($K256,3*64); # rewind K
973 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
974 &jb (&label("grand_avx"));
975
976 &mov ("esp",&DWP(96+12,"esp")); # restore sp
977 &vzeroall ();
978&function_end_A();
979 if ($avx>1) {
980sub bodyx_00_15 () { # +10%
981 (
982 '&rorx ("ecx",$E,6)',
983 '&rorx ("esi",$E,11)',
984 '&mov (&off($e),$E)', # save $E, modulo-scheduled
985 '&rorx ("edi",$E,25)',
986 '&xor ("ecx","esi")',
987 '&andn ("esi",$E,&off($g))',
988 '&xor ("ecx","edi")', # Sigma1(e)
989 '&and ($E,&off($f))',
990 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
991 '&or ($E,"esi")', # T = Ch(e,f,g)
992
993 '&rorx ("edi",$AH[0],2)',
994 '&rorx ("esi",$AH[0],13)',
995 '&lea ($E,&DWP(0,$E,"ecx"))', # T += Sigma1(e)
996 '&rorx ("ecx",$AH[0],22)',
997 '&xor ("esi","edi")',
998 '&mov ("edi",&off($b))',
999 '&xor ("ecx","esi")', # Sigma0(a)
1000
1001 '&xor ($AH[0],"edi")', # a ^= b, (b^c) in next round
1002 '&add ($E,&off($h))', # T += h
1003 '&and ($AH[1],$AH[0])', # (b^c) &= (a^b)
1004 '&add ($E,&DWP(32+4*($i&15),"esp"))', # T += K[i]+X[i]
1005 '&xor ($AH[1],"edi")', # h = Maj(a,b,c) = Ch(a^b,c,b)
1006
1007 '&add ("ecx",$E)', # h += T
1008 '&add ($E,&off($d))', # d += T
1009 '&lea ($AH[1],&DWP(0,$AH[1],"ecx"));'. # h += Sigma0(a)
1010
1011 '@AH = reverse(@AH); $i++;' # rotate(a,h)
1012 );
1013}
1014
1015&set_label("AVX_BMI",32);
1016 &lea ("esp",&DWP(-96,"esp"));
1017 &vzeroall ();
1018 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
1019 &mov ($AH[0],&DWP(0,"esi"));
1020 &mov ($AH[1],&DWP(4,"esi"));
1021 &mov ("ecx",&DWP(8,"esi"));
1022 &mov ("edi",&DWP(12,"esi"));
1023 #&mov (&DWP(0,"esp"),$AH[0]);
1024 &mov (&DWP(4,"esp"),$AH[1]);
1025 &xor ($AH[1],"ecx"); # magic
1026 &mov (&DWP(8,"esp"),"ecx");
1027 &mov (&DWP(12,"esp"),"edi");
1028 &mov ($E,&DWP(16,"esi"));
1029 &mov ("edi",&DWP(20,"esi"));
1030 &mov ("ecx",&DWP(24,"esi"));
1031 &mov ("esi",&DWP(28,"esi"));
1032 #&mov (&DWP(16,"esp"),$E);
1033 &mov (&DWP(20,"esp"),"edi");
1034 &mov ("edi",&DWP(96+4,"esp")); # inp
1035 &mov (&DWP(24,"esp"),"ecx");
1036 &mov (&DWP(28,"esp"),"esi");
1037 &vmovdqa ($t3,&QWP(256,$K256));
1038 &jmp (&label("grand_avx_bmi"));
1039
1040&set_label("grand_avx_bmi",32);
1041 # load input, reverse byte order, add K256[0..15], save to stack
1042 &vmovdqu (@X[0],&QWP(0,"edi"));
1043 &vmovdqu (@X[1],&QWP(16,"edi"));
1044 &vmovdqu (@X[2],&QWP(32,"edi"));
1045 &vmovdqu (@X[3],&QWP(48,"edi"));
1046 &add ("edi",64);
1047 &vpshufb (@X[0],@X[0],$t3);
1048 &mov (&DWP(96+4,"esp"),"edi");
1049 &vpshufb (@X[1],@X[1],$t3);
1050 &vpshufb (@X[2],@X[2],$t3);
1051 &vpaddd ($t0,@X[0],&QWP(0,$K256));
1052 &vpshufb (@X[3],@X[3],$t3);
1053 &vpaddd ($t1,@X[1],&QWP(16,$K256));
1054 &vpaddd ($t2,@X[2],&QWP(32,$K256));
1055 &vpaddd ($t3,@X[3],&QWP(48,$K256));
1056 &vmovdqa (&QWP(32+0,"esp"),$t0);
1057 &vmovdqa (&QWP(32+16,"esp"),$t1);
1058 &vmovdqa (&QWP(32+32,"esp"),$t2);
1059 &vmovdqa (&QWP(32+48,"esp"),$t3);
1060 &jmp (&label("avx_bmi_00_47"));
1061
1062&set_label("avx_bmi_00_47",16);
1063 &add ($K256,64);
1064
1065 for ($i=0,$j=0; $j<4; $j++) {
1066 &AVX_00_47($j,\&bodyx_00_15,@X);
1067 push(@X,shift(@X)); # rotate(@X)
1068 }
1069 &cmp (&DWP(16*$j,$K256),0x00010203);
1070 &jne (&label("avx_bmi_00_47"));
1071
1072 for ($i=0; $i<16; ) {
1073 foreach(bodyx_00_15()) { eval; }
1074 }
1075
1076 &mov ("esi",&DWP(96,"esp")); #ctx
1077 #&mov ($AH[0],&DWP(0,"esp"));
1078 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
1079 #&mov ("edi", &DWP(8,"esp"));
1080 &mov ("ecx",&DWP(12,"esp"));
1081 &add ($AH[0],&DWP(0,"esi"));
1082 &add ($AH[1],&DWP(4,"esi"));
1083 &add ("edi",&DWP(8,"esi"));
1084 &add ("ecx",&DWP(12,"esi"));
1085 &mov (&DWP(0,"esi"),$AH[0]);
1086 &mov (&DWP(4,"esi"),$AH[1]);
1087 &mov (&DWP(8,"esi"),"edi");
1088 &mov (&DWP(12,"esi"),"ecx");
1089 #&mov (&DWP(0,"esp"),$AH[0]);
1090 &mov (&DWP(4,"esp"),$AH[1]);
1091 &xor ($AH[1],"edi"); # magic
1092 &mov (&DWP(8,"esp"),"edi");
1093 &mov (&DWP(12,"esp"),"ecx");
1094 #&mov ($E,&DWP(16,"esp"));
1095 &mov ("edi",&DWP(20,"esp"));
1096 &mov ("ecx",&DWP(24,"esp"));
1097 &add ($E,&DWP(16,"esi"));
1098 &add ("edi",&DWP(20,"esi"));
1099 &add ("ecx",&DWP(24,"esi"));
1100 &mov (&DWP(16,"esi"),$E);
1101 &mov (&DWP(20,"esi"),"edi");
1102 &mov (&DWP(20,"esp"),"edi");
1103 &mov ("edi",&DWP(28,"esp"));
1104 &mov (&DWP(24,"esi"),"ecx");
1105 #&mov (&DWP(16,"esp"),$E);
1106 &add ("edi",&DWP(28,"esi"));
1107 &mov (&DWP(24,"esp"),"ecx");
1108 &mov (&DWP(28,"esi"),"edi");
1109 &mov (&DWP(28,"esp"),"edi");
1110 &mov ("edi",&DWP(96+4,"esp")); # inp
1111
1112 &vmovdqa ($t3,&QWP(64,$K256));
1113 &sub ($K256,3*64); # rewind K
1114 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
1115 &jb (&label("grand_avx_bmi"));
1116
1117 &mov ("esp",&DWP(96+12,"esp")); # restore sp
1118 &vzeroall ();
1119&function_end_A();
1120 }
1121 }
1122 }}}
1123&function_end_B("sha256_block_data_order");
1124
1125&asm_finish();