blob: 9d6c0135efc23f24f03537eec8e85817a9e45572 [file] [log] [blame]
Adam Langley95c29f32014-06-20 12:00:00 -07001#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# October 2005.
11#
12# Montgomery multiplication routine for x86_64. While it gives modest
13# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14# than twice, >2x, as fast. Most common rsa1024 sign is improved by
15# respectful 50%. It remains to be seen if loop unrolling and
16# dedicated squaring routine can provide further improvement...
17
18# July 2011.
19#
20# Add dedicated squaring procedure. Performance improvement varies
21# from platform to platform, but in average it's ~5%/15%/25%/33%
22# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
23
24# August 2011.
25#
26# Unroll and modulo-schedule inner loops in such manner that they
27# are "fallen through" for input lengths of 8, which is critical for
28# 1024-bit RSA *sign*. Average performance improvement in comparison
29# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
31
32# June 2013.
33#
34# Optimize reduction in squaring procedure and improve 1024+-bit RSA
35# sign performance by 10-16% on Intel Sandy Bridge and later
36# (virtually same on non-Intel processors).
37
38# August 2013.
39#
40# Add MULX/ADOX/ADCX code path.
41
42$flavour = shift;
43$output = shift;
44if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
45
46$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
47
48$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
50( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
51die "can't locate x86_64-xlate.pl";
52
David Benjaminfdd8e9c2016-06-26 13:18:50 -040053open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
Adam Langley95c29f32014-06-20 12:00:00 -070054*STDOUT=*OUT;
55
David Benjamin278d3422015-10-14 14:03:23 -040056# In upstream, this is controlled by shelling out to the compiler to check
57# versions, but BoringSSL is intended to be used with pre-generated perlasm
58# output, so this isn't useful anyway.
59#
60# TODO(davidben): Enable this option after testing. $addx goes up to 1.
61$addx = 0;
Adam Langley95c29f32014-06-20 12:00:00 -070062
63# int bn_mul_mont(
64$rp="%rdi"; # BN_ULONG *rp,
65$ap="%rsi"; # const BN_ULONG *ap,
66$bp="%rdx"; # const BN_ULONG *bp,
67$np="%rcx"; # const BN_ULONG *np,
68$n0="%r8"; # const BN_ULONG *n0,
69$num="%r9"; # int num);
70$lo0="%r10";
71$hi0="%r11";
72$hi1="%r13";
73$i="%r14";
74$j="%r15";
75$m0="%rbx";
76$m1="%rbp";
77
78$code=<<___;
79.text
80
81.extern OPENSSL_ia32cap_P
82
83.globl bn_mul_mont
84.type bn_mul_mont,\@function,6
85.align 16
86bn_mul_mont:
87 test \$3,${num}d
88 jnz .Lmul_enter
89 cmp \$8,${num}d
90 jb .Lmul_enter
91___
92$code.=<<___ if ($addx);
93 mov OPENSSL_ia32cap_P+8(%rip),%r11d
94___
95$code.=<<___;
96 cmp $ap,$bp
97 jne .Lmul4x_enter
98 test \$7,${num}d
99 jz .Lsqr8x_enter
100 jmp .Lmul4x_enter
101
102.align 16
103.Lmul_enter:
104 push %rbx
105 push %rbp
106 push %r12
107 push %r13
108 push %r14
109 push %r15
110
111 mov ${num}d,${num}d
112 lea 2($num),%r10
113 mov %rsp,%r11
114 neg %r10
115 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
116 and \$-1024,%rsp # minimize TLB usage
117
118 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
119.Lmul_body:
Adam Langleyb8344502017-02-09 12:13:52 -0800120 # Some OSes (Windows) insist on stack being "wired" to
Adam Langleyedcd8fd2017-02-09 12:11:53 -0800121 # physical memory in strictly sequential manner, i.e. if stack
122 # allocation spans two pages, then reference to farmost one can
123 # be punishable by SEGV. But page walking can do good even on
124 # other OSes, because it guarantees that villain thread hits
125 # the guard page before it can make damage to innocent one...
126 sub %rsp,%r11
127 and \$-4096,%r11
128.Lmul_page_walk:
129 mov (%rsp,%r11),%r10
130 sub \$4096,%r11
131 .byte 0x66,0x2e # predict non-taken
132 jnc .Lmul_page_walk
133
Adam Langley95c29f32014-06-20 12:00:00 -0700134 mov $bp,%r12 # reassign $bp
135___
136 $bp="%r12";
137$code.=<<___;
138 mov ($n0),$n0 # pull n0[0] value
139 mov ($bp),$m0 # m0=bp[0]
140 mov ($ap),%rax
141
142 xor $i,$i # i=0
143 xor $j,$j # j=0
144
145 mov $n0,$m1
146 mulq $m0 # ap[0]*bp[0]
147 mov %rax,$lo0
148 mov ($np),%rax
149
150 imulq $lo0,$m1 # "tp[0]"*n0
151 mov %rdx,$hi0
152
153 mulq $m1 # np[0]*m1
154 add %rax,$lo0 # discarded
155 mov 8($ap),%rax
156 adc \$0,%rdx
157 mov %rdx,$hi1
158
159 lea 1($j),$j # j++
160 jmp .L1st_enter
161
162.align 16
163.L1st:
164 add %rax,$hi1
165 mov ($ap,$j,8),%rax
166 adc \$0,%rdx
167 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
168 mov $lo0,$hi0
169 adc \$0,%rdx
170 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
171 mov %rdx,$hi1
172
173.L1st_enter:
174 mulq $m0 # ap[j]*bp[0]
175 add %rax,$hi0
176 mov ($np,$j,8),%rax
177 adc \$0,%rdx
178 lea 1($j),$j # j++
179 mov %rdx,$lo0
180
181 mulq $m1 # np[j]*m1
182 cmp $num,$j
183 jne .L1st
184
185 add %rax,$hi1
186 mov ($ap),%rax # ap[0]
187 adc \$0,%rdx
188 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
189 adc \$0,%rdx
190 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
191 mov %rdx,$hi1
192 mov $lo0,$hi0
193
194 xor %rdx,%rdx
195 add $hi0,$hi1
196 adc \$0,%rdx
197 mov $hi1,-8(%rsp,$num,8)
198 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
199
200 lea 1($i),$i # i++
201 jmp .Louter
202.align 16
203.Louter:
204 mov ($bp,$i,8),$m0 # m0=bp[i]
205 xor $j,$j # j=0
206 mov $n0,$m1
207 mov (%rsp),$lo0
208 mulq $m0 # ap[0]*bp[i]
209 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
210 mov ($np),%rax
211 adc \$0,%rdx
212
213 imulq $lo0,$m1 # tp[0]*n0
214 mov %rdx,$hi0
215
216 mulq $m1 # np[0]*m1
217 add %rax,$lo0 # discarded
218 mov 8($ap),%rax
219 adc \$0,%rdx
220 mov 8(%rsp),$lo0 # tp[1]
221 mov %rdx,$hi1
222
223 lea 1($j),$j # j++
224 jmp .Linner_enter
225
226.align 16
227.Linner:
228 add %rax,$hi1
229 mov ($ap,$j,8),%rax
230 adc \$0,%rdx
231 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
232 mov (%rsp,$j,8),$lo0
233 adc \$0,%rdx
234 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
235 mov %rdx,$hi1
236
237.Linner_enter:
238 mulq $m0 # ap[j]*bp[i]
239 add %rax,$hi0
240 mov ($np,$j,8),%rax
241 adc \$0,%rdx
242 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
243 mov %rdx,$hi0
244 adc \$0,$hi0
245 lea 1($j),$j # j++
246
247 mulq $m1 # np[j]*m1
248 cmp $num,$j
249 jne .Linner
250
251 add %rax,$hi1
252 mov ($ap),%rax # ap[0]
253 adc \$0,%rdx
254 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
255 mov (%rsp,$j,8),$lo0
256 adc \$0,%rdx
257 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
258 mov %rdx,$hi1
259
260 xor %rdx,%rdx
261 add $hi0,$hi1
262 adc \$0,%rdx
263 add $lo0,$hi1 # pull upmost overflow bit
264 adc \$0,%rdx
265 mov $hi1,-8(%rsp,$num,8)
266 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
267
268 lea 1($i),$i # i++
269 cmp $num,$i
270 jb .Louter
271
272 xor $i,$i # i=0 and clear CF!
273 mov (%rsp),%rax # tp[0]
274 lea (%rsp),$ap # borrow ap for tp
275 mov $num,$j # j=num
276 jmp .Lsub
277.align 16
278.Lsub: sbb ($np,$i,8),%rax
279 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
280 mov 8($ap,$i,8),%rax # tp[i+1]
281 lea 1($i),$i # i++
Adam Langley75b833c2014-06-20 12:00:00 -0700282 dec $j # doesn't affect CF!
Adam Langley95c29f32014-06-20 12:00:00 -0700283 jnz .Lsub
284
285 sbb \$0,%rax # handle upmost overflow bit
286 xor $i,$i
Adam Langley95c29f32014-06-20 12:00:00 -0700287 mov $num,$j # j=num
Adam Langley95c29f32014-06-20 12:00:00 -0700288.align 16
289.Lcopy: # copy or in-place refresh
Adam Langley75b833c2014-06-20 12:00:00 -0700290 mov (%rsp,$i,8),$ap
291 mov ($rp,$i,8),$np
292 xor $np,$ap # conditional select:
293 and %rax,$ap # ((ap ^ np) & %rax) ^ np
294 xor $np,$ap # ap = borrow?tp:rp
Adam Langley95c29f32014-06-20 12:00:00 -0700295 mov $i,(%rsp,$i,8) # zap temporary vector
Adam Langley75b833c2014-06-20 12:00:00 -0700296 mov $ap,($rp,$i,8) # rp[i]=tp[i]
Adam Langley95c29f32014-06-20 12:00:00 -0700297 lea 1($i),$i
298 sub \$1,$j
299 jnz .Lcopy
300
301 mov 8(%rsp,$num,8),%rsi # restore %rsp
302 mov \$1,%rax
303 mov (%rsi),%r15
304 mov 8(%rsi),%r14
305 mov 16(%rsi),%r13
306 mov 24(%rsi),%r12
307 mov 32(%rsi),%rbp
308 mov 40(%rsi),%rbx
309 lea 48(%rsi),%rsp
310.Lmul_epilogue:
311 ret
312.size bn_mul_mont,.-bn_mul_mont
313___
314{{{
315my @A=("%r10","%r11");
316my @N=("%r13","%rdi");
317$code.=<<___;
318.type bn_mul4x_mont,\@function,6
319.align 16
320bn_mul4x_mont:
321.Lmul4x_enter:
322___
323$code.=<<___ if ($addx);
324 and \$0x80100,%r11d
325 cmp \$0x80100,%r11d
326 je .Lmulx4x_enter
327___
328$code.=<<___;
329 push %rbx
330 push %rbp
331 push %r12
332 push %r13
333 push %r14
334 push %r15
335
336 mov ${num}d,${num}d
337 lea 4($num),%r10
338 mov %rsp,%r11
339 neg %r10
340 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
341 and \$-1024,%rsp # minimize TLB usage
342
343 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
344.Lmul4x_body:
Adam Langleyedcd8fd2017-02-09 12:11:53 -0800345 sub %rsp,%r11
346 and \$-4096,%r11
347.Lmul4x_page_walk:
348 mov (%rsp,%r11),%r10
349 sub \$4096,%r11
350 .byte 0x2e # predict non-taken
351 jnc .Lmul4x_page_walk
352
Adam Langley95c29f32014-06-20 12:00:00 -0700353 mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
354 mov %rdx,%r12 # reassign $bp
355___
356 $bp="%r12";
357$code.=<<___;
358 mov ($n0),$n0 # pull n0[0] value
359 mov ($bp),$m0 # m0=bp[0]
360 mov ($ap),%rax
361
362 xor $i,$i # i=0
363 xor $j,$j # j=0
364
365 mov $n0,$m1
366 mulq $m0 # ap[0]*bp[0]
367 mov %rax,$A[0]
368 mov ($np),%rax
369
370 imulq $A[0],$m1 # "tp[0]"*n0
371 mov %rdx,$A[1]
372
373 mulq $m1 # np[0]*m1
374 add %rax,$A[0] # discarded
375 mov 8($ap),%rax
376 adc \$0,%rdx
377 mov %rdx,$N[1]
378
379 mulq $m0
380 add %rax,$A[1]
381 mov 8($np),%rax
382 adc \$0,%rdx
383 mov %rdx,$A[0]
384
385 mulq $m1
386 add %rax,$N[1]
387 mov 16($ap),%rax
388 adc \$0,%rdx
389 add $A[1],$N[1]
390 lea 4($j),$j # j++
391 adc \$0,%rdx
392 mov $N[1],(%rsp)
393 mov %rdx,$N[0]
394 jmp .L1st4x
395.align 16
396.L1st4x:
397 mulq $m0 # ap[j]*bp[0]
398 add %rax,$A[0]
399 mov -16($np,$j,8),%rax
400 adc \$0,%rdx
401 mov %rdx,$A[1]
402
403 mulq $m1 # np[j]*m1
404 add %rax,$N[0]
405 mov -8($ap,$j,8),%rax
406 adc \$0,%rdx
407 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
408 adc \$0,%rdx
409 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
410 mov %rdx,$N[1]
411
412 mulq $m0 # ap[j]*bp[0]
413 add %rax,$A[1]
414 mov -8($np,$j,8),%rax
415 adc \$0,%rdx
416 mov %rdx,$A[0]
417
418 mulq $m1 # np[j]*m1
419 add %rax,$N[1]
420 mov ($ap,$j,8),%rax
421 adc \$0,%rdx
422 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
423 adc \$0,%rdx
424 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
425 mov %rdx,$N[0]
426
427 mulq $m0 # ap[j]*bp[0]
428 add %rax,$A[0]
429 mov ($np,$j,8),%rax
430 adc \$0,%rdx
431 mov %rdx,$A[1]
432
433 mulq $m1 # np[j]*m1
434 add %rax,$N[0]
435 mov 8($ap,$j,8),%rax
436 adc \$0,%rdx
437 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
438 adc \$0,%rdx
439 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
440 mov %rdx,$N[1]
441
442 mulq $m0 # ap[j]*bp[0]
443 add %rax,$A[1]
444 mov 8($np,$j,8),%rax
445 adc \$0,%rdx
446 lea 4($j),$j # j++
447 mov %rdx,$A[0]
448
449 mulq $m1 # np[j]*m1
450 add %rax,$N[1]
451 mov -16($ap,$j,8),%rax
452 adc \$0,%rdx
453 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
454 adc \$0,%rdx
455 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
456 mov %rdx,$N[0]
457 cmp $num,$j
458 jb .L1st4x
459
460 mulq $m0 # ap[j]*bp[0]
461 add %rax,$A[0]
462 mov -16($np,$j,8),%rax
463 adc \$0,%rdx
464 mov %rdx,$A[1]
465
466 mulq $m1 # np[j]*m1
467 add %rax,$N[0]
468 mov -8($ap,$j,8),%rax
469 adc \$0,%rdx
470 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
471 adc \$0,%rdx
472 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
473 mov %rdx,$N[1]
474
475 mulq $m0 # ap[j]*bp[0]
476 add %rax,$A[1]
477 mov -8($np,$j,8),%rax
478 adc \$0,%rdx
479 mov %rdx,$A[0]
480
481 mulq $m1 # np[j]*m1
482 add %rax,$N[1]
483 mov ($ap),%rax # ap[0]
484 adc \$0,%rdx
485 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
486 adc \$0,%rdx
487 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
488 mov %rdx,$N[0]
489
490 xor $N[1],$N[1]
491 add $A[0],$N[0]
492 adc \$0,$N[1]
493 mov $N[0],-8(%rsp,$j,8)
494 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
495
496 lea 1($i),$i # i++
497.align 4
498.Louter4x:
499 mov ($bp,$i,8),$m0 # m0=bp[i]
500 xor $j,$j # j=0
501 mov (%rsp),$A[0]
502 mov $n0,$m1
503 mulq $m0 # ap[0]*bp[i]
504 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
505 mov ($np),%rax
506 adc \$0,%rdx
507
508 imulq $A[0],$m1 # tp[0]*n0
509 mov %rdx,$A[1]
510
511 mulq $m1 # np[0]*m1
512 add %rax,$A[0] # "$N[0]", discarded
513 mov 8($ap),%rax
514 adc \$0,%rdx
515 mov %rdx,$N[1]
516
517 mulq $m0 # ap[j]*bp[i]
518 add %rax,$A[1]
519 mov 8($np),%rax
520 adc \$0,%rdx
521 add 8(%rsp),$A[1] # +tp[1]
522 adc \$0,%rdx
523 mov %rdx,$A[0]
524
525 mulq $m1 # np[j]*m1
526 add %rax,$N[1]
527 mov 16($ap),%rax
528 adc \$0,%rdx
529 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
530 lea 4($j),$j # j+=2
531 adc \$0,%rdx
532 mov $N[1],(%rsp) # tp[j-1]
533 mov %rdx,$N[0]
534 jmp .Linner4x
535.align 16
536.Linner4x:
537 mulq $m0 # ap[j]*bp[i]
538 add %rax,$A[0]
539 mov -16($np,$j,8),%rax
540 adc \$0,%rdx
541 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
542 adc \$0,%rdx
543 mov %rdx,$A[1]
544
545 mulq $m1 # np[j]*m1
546 add %rax,$N[0]
547 mov -8($ap,$j,8),%rax
548 adc \$0,%rdx
549 add $A[0],$N[0]
550 adc \$0,%rdx
551 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
552 mov %rdx,$N[1]
553
554 mulq $m0 # ap[j]*bp[i]
555 add %rax,$A[1]
556 mov -8($np,$j,8),%rax
557 adc \$0,%rdx
558 add -8(%rsp,$j,8),$A[1]
559 adc \$0,%rdx
560 mov %rdx,$A[0]
561
562 mulq $m1 # np[j]*m1
563 add %rax,$N[1]
564 mov ($ap,$j,8),%rax
565 adc \$0,%rdx
566 add $A[1],$N[1]
567 adc \$0,%rdx
568 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
569 mov %rdx,$N[0]
570
571 mulq $m0 # ap[j]*bp[i]
572 add %rax,$A[0]
573 mov ($np,$j,8),%rax
574 adc \$0,%rdx
575 add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
576 adc \$0,%rdx
577 mov %rdx,$A[1]
578
579 mulq $m1 # np[j]*m1
580 add %rax,$N[0]
581 mov 8($ap,$j,8),%rax
582 adc \$0,%rdx
583 add $A[0],$N[0]
584 adc \$0,%rdx
585 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
586 mov %rdx,$N[1]
587
588 mulq $m0 # ap[j]*bp[i]
589 add %rax,$A[1]
590 mov 8($np,$j,8),%rax
591 adc \$0,%rdx
592 add 8(%rsp,$j,8),$A[1]
593 adc \$0,%rdx
594 lea 4($j),$j # j++
595 mov %rdx,$A[0]
596
597 mulq $m1 # np[j]*m1
598 add %rax,$N[1]
599 mov -16($ap,$j,8),%rax
600 adc \$0,%rdx
601 add $A[1],$N[1]
602 adc \$0,%rdx
603 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
604 mov %rdx,$N[0]
605 cmp $num,$j
606 jb .Linner4x
607
608 mulq $m0 # ap[j]*bp[i]
609 add %rax,$A[0]
610 mov -16($np,$j,8),%rax
611 adc \$0,%rdx
612 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
613 adc \$0,%rdx
614 mov %rdx,$A[1]
615
616 mulq $m1 # np[j]*m1
617 add %rax,$N[0]
618 mov -8($ap,$j,8),%rax
619 adc \$0,%rdx
620 add $A[0],$N[0]
621 adc \$0,%rdx
622 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
623 mov %rdx,$N[1]
624
625 mulq $m0 # ap[j]*bp[i]
626 add %rax,$A[1]
627 mov -8($np,$j,8),%rax
628 adc \$0,%rdx
629 add -8(%rsp,$j,8),$A[1]
630 adc \$0,%rdx
631 lea 1($i),$i # i++
632 mov %rdx,$A[0]
633
634 mulq $m1 # np[j]*m1
635 add %rax,$N[1]
636 mov ($ap),%rax # ap[0]
637 adc \$0,%rdx
638 add $A[1],$N[1]
639 adc \$0,%rdx
640 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
641 mov %rdx,$N[0]
642
643 xor $N[1],$N[1]
644 add $A[0],$N[0]
645 adc \$0,$N[1]
646 add (%rsp,$num,8),$N[0] # pull upmost overflow bit
647 adc \$0,$N[1]
648 mov $N[0],-8(%rsp,$j,8)
649 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
650
651 cmp $num,$i
652 jb .Louter4x
653___
654{
655my @ri=("%rax","%rdx",$m0,$m1);
656$code.=<<___;
657 mov 16(%rsp,$num,8),$rp # restore $rp
658 mov 0(%rsp),@ri[0] # tp[0]
Adam Langley95c29f32014-06-20 12:00:00 -0700659 mov 8(%rsp),@ri[1] # tp[1]
660 shr \$2,$num # num/=4
661 lea (%rsp),$ap # borrow ap for tp
662 xor $i,$i # i=0 and clear CF!
663
664 sub 0($np),@ri[0]
665 mov 16($ap),@ri[2] # tp[2]
666 mov 24($ap),@ri[3] # tp[3]
667 sbb 8($np),@ri[1]
668 lea -1($num),$j # j=num/4-1
669 jmp .Lsub4x
670.align 16
671.Lsub4x:
672 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
673 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
674 sbb 16($np,$i,8),@ri[2]
675 mov 32($ap,$i,8),@ri[0] # tp[i+1]
676 mov 40($ap,$i,8),@ri[1]
677 sbb 24($np,$i,8),@ri[3]
678 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
679 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
680 sbb 32($np,$i,8),@ri[0]
681 mov 48($ap,$i,8),@ri[2]
682 mov 56($ap,$i,8),@ri[3]
683 sbb 40($np,$i,8),@ri[1]
684 lea 4($i),$i # i++
685 dec $j # doesnn't affect CF!
686 jnz .Lsub4x
687
688 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
689 mov 32($ap,$i,8),@ri[0] # load overflow bit
690 sbb 16($np,$i,8),@ri[2]
691 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
692 sbb 24($np,$i,8),@ri[3]
693 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
694
695 sbb \$0,@ri[0] # handle upmost overflow bit
Adam Langley75b833c2014-06-20 12:00:00 -0700696 mov @ri[0],%xmm0
697 punpcklqdq %xmm0,%xmm0 # extend mask to 128 bits
Adam Langley95c29f32014-06-20 12:00:00 -0700698 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
699 xor $i,$i # i=0
Adam Langley95c29f32014-06-20 12:00:00 -0700700
Adam Langley75b833c2014-06-20 12:00:00 -0700701 mov $num,$j
702 pxor %xmm5,%xmm5
Adam Langley95c29f32014-06-20 12:00:00 -0700703 jmp .Lcopy4x
704.align 16
Adam Langley75b833c2014-06-20 12:00:00 -0700705.Lcopy4x: # copy or in-place refresh
706 movdqu (%rsp,$i),%xmm2
707 movdqu 16(%rsp,$i),%xmm4
708 movdqu ($rp,$i),%xmm1
709 movdqu 16($rp,$i),%xmm3
710 pxor %xmm1,%xmm2 # conditional select
711 pxor %xmm3,%xmm4
712 pand %xmm0,%xmm2
713 pand %xmm0,%xmm4
714 pxor %xmm1,%xmm2
715 pxor %xmm3,%xmm4
716 movdqu %xmm2,($rp,$i)
717 movdqu %xmm4,16($rp,$i)
718 movdqa %xmm5,(%rsp,$i) # zap temporary vectors
719 movdqa %xmm5,16(%rsp,$i)
720
Adam Langley95c29f32014-06-20 12:00:00 -0700721 lea 32($i),$i
722 dec $j
723 jnz .Lcopy4x
724
725 shl \$2,$num
Adam Langley95c29f32014-06-20 12:00:00 -0700726___
727}
728$code.=<<___;
729 mov 8(%rsp,$num,8),%rsi # restore %rsp
730 mov \$1,%rax
731 mov (%rsi),%r15
732 mov 8(%rsi),%r14
733 mov 16(%rsi),%r13
734 mov 24(%rsi),%r12
735 mov 32(%rsi),%rbp
736 mov 40(%rsi),%rbx
737 lea 48(%rsi),%rsp
738.Lmul4x_epilogue:
739 ret
740.size bn_mul4x_mont,.-bn_mul4x_mont
741___
742}}}
743 {{{
744######################################################################
745# void bn_sqr8x_mont(
746my $rptr="%rdi"; # const BN_ULONG *rptr,
747my $aptr="%rsi"; # const BN_ULONG *aptr,
748my $bptr="%rdx"; # not used
749my $nptr="%rcx"; # const BN_ULONG *nptr,
750my $n0 ="%r8"; # const BN_ULONG *n0);
751my $num ="%r9"; # int num, has to be divisible by 8
752
753my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
754my @A0=("%r10","%r11");
755my @A1=("%r12","%r13");
756my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
757
Adam Langley4b5979b2014-07-24 18:10:23 -0700758$code.=<<___ if ($addx);
759.extern bn_sqrx8x_internal # see x86_64-mont5 module
760___
Adam Langley95c29f32014-06-20 12:00:00 -0700761$code.=<<___;
762.extern bn_sqr8x_internal # see x86_64-mont5 module
Adam Langley95c29f32014-06-20 12:00:00 -0700763
764.type bn_sqr8x_mont,\@function,6
765.align 32
766bn_sqr8x_mont:
767.Lsqr8x_enter:
768 mov %rsp,%rax
769 push %rbx
770 push %rbp
771 push %r12
772 push %r13
773 push %r14
774 push %r15
775
776 mov ${num}d,%r10d
777 shl \$3,${num}d # convert $num to bytes
778 shl \$3+2,%r10 # 4*$num
779 neg $num
780
781 ##############################################################
782 # ensure that stack frame doesn't alias with $aptr modulo
783 # 4096. this is done to allow memory disambiguation logic
784 # do its job.
785 #
Adam Langleyb360eaf2016-03-01 08:16:30 -0800786 lea -64(%rsp,$num,2),%r11
Adam Langley95c29f32014-06-20 12:00:00 -0700787 mov ($n0),$n0 # *n0
788 sub $aptr,%r11
789 and \$4095,%r11
790 cmp %r11,%r10
791 jb .Lsqr8x_sp_alt
792 sub %r11,%rsp # align with $aptr
Adam Langleyb360eaf2016-03-01 08:16:30 -0800793 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
Adam Langley95c29f32014-06-20 12:00:00 -0700794 jmp .Lsqr8x_sp_done
795
796.align 32
797.Lsqr8x_sp_alt:
Adam Langleyb360eaf2016-03-01 08:16:30 -0800798 lea 4096-64(,$num,2),%r10 # 4096-frame-2*$num
799 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
Adam Langley95c29f32014-06-20 12:00:00 -0700800 sub %r10,%r11
801 mov \$0,%r10
802 cmovc %r10,%r11
803 sub %r11,%rsp
804.Lsqr8x_sp_done:
805 and \$-64,%rsp
Adam Langleyedcd8fd2017-02-09 12:11:53 -0800806 mov %rax,%r11
807 sub %rsp,%r11
808 and \$-4096,%r11
809.Lsqr8x_page_walk:
810 mov (%rsp,%r11),%r10
811 sub \$4096,%r11
812 .byte 0x2e # predict non-taken
813 jnc .Lsqr8x_page_walk
814
Adam Langleydf2a5562016-03-01 08:17:29 -0800815 mov $num,%r10
Adam Langley95c29f32014-06-20 12:00:00 -0700816 neg $num
817
Adam Langley95c29f32014-06-20 12:00:00 -0700818 mov $n0, 32(%rsp)
819 mov %rax, 40(%rsp) # save original %rsp
820.Lsqr8x_body:
821
Adam Langleyb360eaf2016-03-01 08:16:30 -0800822 movq $nptr, %xmm2 # save pointer to modulus
Adam Langley95c29f32014-06-20 12:00:00 -0700823 pxor %xmm0,%xmm0
824 movq $rptr,%xmm1 # save $rptr
825 movq %r10, %xmm3 # -$num
826___
827$code.=<<___ if ($addx);
Adam Langleyb360eaf2016-03-01 08:16:30 -0800828 mov OPENSSL_ia32cap_P+8(%rip),%eax
Adam Langley95c29f32014-06-20 12:00:00 -0700829 and \$0x80100,%eax
830 cmp \$0x80100,%eax
831 jne .Lsqr8x_nox
832
833 call bn_sqrx8x_internal # see x86_64-mont5 module
Adam Langleydf2a5562016-03-01 08:17:29 -0800834 # %rax top-most carry
835 # %rbp nptr
836 # %rcx -8*num
837 # %r8 end of tp[2*num]
838 lea (%r8,%rcx),%rbx
839 mov %rcx,$num
840 mov %rcx,%rdx
841 movq %xmm1,$rptr
842 sar \$3+2,%rcx # %cf=0
843 jmp .Lsqr8x_sub
Adam Langley95c29f32014-06-20 12:00:00 -0700844
845.align 32
846.Lsqr8x_nox:
847___
848$code.=<<___;
849 call bn_sqr8x_internal # see x86_64-mont5 module
Adam Langleydf2a5562016-03-01 08:17:29 -0800850 # %rax top-most carry
851 # %rbp nptr
852 # %r8 -8*num
853 # %rdi end of tp[2*num]
854 lea (%rdi,$num),%rbx
855 mov $num,%rcx
856 mov $num,%rdx
857 movq %xmm1,$rptr
858 sar \$3+2,%rcx # %cf=0
859 jmp .Lsqr8x_sub
Adam Langley95c29f32014-06-20 12:00:00 -0700860
861.align 32
Adam Langleydf2a5562016-03-01 08:17:29 -0800862.Lsqr8x_sub:
863 mov 8*0(%rbx),%r12
864 mov 8*1(%rbx),%r13
865 mov 8*2(%rbx),%r14
866 mov 8*3(%rbx),%r15
867 lea 8*4(%rbx),%rbx
868 sbb 8*0(%rbp),%r12
869 sbb 8*1(%rbp),%r13
870 sbb 8*2(%rbp),%r14
871 sbb 8*3(%rbp),%r15
872 lea 8*4(%rbp),%rbp
873 mov %r12,8*0($rptr)
874 mov %r13,8*1($rptr)
875 mov %r14,8*2($rptr)
876 mov %r15,8*3($rptr)
877 lea 8*4($rptr),$rptr
878 inc %rcx # preserves %cf
879 jnz .Lsqr8x_sub
880
881 sbb \$0,%rax # top-most carry
882 lea (%rbx,$num),%rbx # rewind
883 lea ($rptr,$num),$rptr # rewind
884
885 movq %rax,%xmm1
886 pxor %xmm0,%xmm0
887 pshufd \$0,%xmm1,%xmm1
888 mov 40(%rsp),%rsi # restore %rsp
889 jmp .Lsqr8x_cond_copy
890
891.align 32
892.Lsqr8x_cond_copy:
893 movdqa 16*0(%rbx),%xmm2
894 movdqa 16*1(%rbx),%xmm3
895 lea 16*2(%rbx),%rbx
896 movdqu 16*0($rptr),%xmm4
897 movdqu 16*1($rptr),%xmm5
898 lea 16*2($rptr),$rptr
899 movdqa %xmm0,-16*2(%rbx) # zero tp
900 movdqa %xmm0,-16*1(%rbx)
901 movdqa %xmm0,-16*2(%rbx,%rdx)
902 movdqa %xmm0,-16*1(%rbx,%rdx)
903 pcmpeqd %xmm1,%xmm0
904 pand %xmm1,%xmm2
905 pand %xmm1,%xmm3
906 pand %xmm0,%xmm4
907 pand %xmm0,%xmm5
908 pxor %xmm0,%xmm0
909 por %xmm2,%xmm4
910 por %xmm3,%xmm5
911 movdqu %xmm4,-16*2($rptr)
912 movdqu %xmm5,-16*1($rptr)
913 add \$32,$num
914 jnz .Lsqr8x_cond_copy
Adam Langley95c29f32014-06-20 12:00:00 -0700915
916 mov \$1,%rax
917 mov -48(%rsi),%r15
918 mov -40(%rsi),%r14
919 mov -32(%rsi),%r13
920 mov -24(%rsi),%r12
921 mov -16(%rsi),%rbp
922 mov -8(%rsi),%rbx
923 lea (%rsi),%rsp
924.Lsqr8x_epilogue:
925 ret
926.size bn_sqr8x_mont,.-bn_sqr8x_mont
927___
928}}}
929
930if ($addx) {{{
931my $bp="%rdx"; # original value
932
933$code.=<<___;
934.type bn_mulx4x_mont,\@function,6
935.align 32
936bn_mulx4x_mont:
937.Lmulx4x_enter:
938 mov %rsp,%rax
939 push %rbx
940 push %rbp
941 push %r12
942 push %r13
943 push %r14
944 push %r15
945
946 shl \$3,${num}d # convert $num to bytes
947 .byte 0x67
948 xor %r10,%r10
949 sub $num,%r10 # -$num
950 mov ($n0),$n0 # *n0
951 lea -72(%rsp,%r10),%rsp # alloca(frame+$num+8)
Adam Langley95c29f32014-06-20 12:00:00 -0700952 and \$-128,%rsp
Adam Langleyedcd8fd2017-02-09 12:11:53 -0800953 mov %rax,%r11
954 sub %rsp,%r11
955 and \$-4096,%r11
956.Lmulx4x_page_walk:
957 mov (%rsp,%r11),%r10
958 sub \$4096,%r11
959 .byte 0x66,0x2e # predict non-taken
960 jnc .Lmulx4x_page_walk
961
962 lea ($bp,$num),%r10
Adam Langley95c29f32014-06-20 12:00:00 -0700963 ##############################################################
964 # Stack layout
965 # +0 num
966 # +8 off-loaded &b[i]
967 # +16 end of b[num]
968 # +24 saved n0
969 # +32 saved rp
970 # +40 saved %rsp
971 # +48 inner counter
972 # +56
973 # +64 tmp[num+1]
974 #
975 mov $num,0(%rsp) # save $num
976 shr \$5,$num
977 mov %r10,16(%rsp) # end of b[num]
978 sub \$1,$num
979 mov $n0, 24(%rsp) # save *n0
980 mov $rp, 32(%rsp) # save $rp
981 mov %rax,40(%rsp) # save original %rsp
982 mov $num,48(%rsp) # inner counter
983 jmp .Lmulx4x_body
984
985.align 32
986.Lmulx4x_body:
987___
988my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
989 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
990my $rptr=$bptr;
991$code.=<<___;
992 lea 8($bp),$bptr
993 mov ($bp),%rdx # b[0], $bp==%rdx actually
994 lea 64+32(%rsp),$tptr
995 mov %rdx,$bi
996
997 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
998 mulx 1*8($aptr),%r11,%r14 # a[1]*b[0]
999 add %rax,%r11
1000 mov $bptr,8(%rsp) # off-load &b[i]
1001 mulx 2*8($aptr),%r12,%r13 # ...
1002 adc %r14,%r12
1003 adc \$0,%r13
1004
1005 mov $mi,$bptr # borrow $bptr
1006 imulq 24(%rsp),$mi # "t[0]"*n0
1007 xor $zero,$zero # cf=0, of=0
1008
1009 mulx 3*8($aptr),%rax,%r14
1010 mov $mi,%rdx
1011 lea 4*8($aptr),$aptr
1012 adcx %rax,%r13
1013 adcx $zero,%r14 # cf=0
1014
1015 mulx 0*8($nptr),%rax,%r10
1016 adcx %rax,$bptr # discarded
1017 adox %r11,%r10
1018 mulx 1*8($nptr),%rax,%r11
1019 adcx %rax,%r10
1020 adox %r12,%r11
1021 .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 # mulx 2*8($nptr),%rax,%r12
1022 mov 48(%rsp),$bptr # counter value
1023 mov %r10,-4*8($tptr)
1024 adcx %rax,%r11
1025 adox %r13,%r12
1026 mulx 3*8($nptr),%rax,%r15
1027 mov $bi,%rdx
1028 mov %r11,-3*8($tptr)
1029 adcx %rax,%r12
1030 adox $zero,%r15 # of=0
1031 lea 4*8($nptr),$nptr
1032 mov %r12,-2*8($tptr)
1033
1034 jmp .Lmulx4x_1st
1035
1036.align 32
1037.Lmulx4x_1st:
1038 adcx $zero,%r15 # cf=0, modulo-scheduled
1039 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
1040 adcx %r14,%r10
1041 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
1042 adcx %rax,%r11
1043 mulx 2*8($aptr),%r12,%rax # ...
1044 adcx %r14,%r12
1045 mulx 3*8($aptr),%r13,%r14
1046 .byte 0x67,0x67
1047 mov $mi,%rdx
1048 adcx %rax,%r13
1049 adcx $zero,%r14 # cf=0
1050 lea 4*8($aptr),$aptr
1051 lea 4*8($tptr),$tptr
1052
1053 adox %r15,%r10
1054 mulx 0*8($nptr),%rax,%r15
1055 adcx %rax,%r10
1056 adox %r15,%r11
1057 mulx 1*8($nptr),%rax,%r15
1058 adcx %rax,%r11
1059 adox %r15,%r12
1060 mulx 2*8($nptr),%rax,%r15
1061 mov %r10,-5*8($tptr)
1062 adcx %rax,%r12
1063 mov %r11,-4*8($tptr)
1064 adox %r15,%r13
1065 mulx 3*8($nptr),%rax,%r15
1066 mov $bi,%rdx
1067 mov %r12,-3*8($tptr)
1068 adcx %rax,%r13
1069 adox $zero,%r15
1070 lea 4*8($nptr),$nptr
1071 mov %r13,-2*8($tptr)
1072
1073 dec $bptr # of=0, pass cf
1074 jnz .Lmulx4x_1st
1075
1076 mov 0(%rsp),$num # load num
1077 mov 8(%rsp),$bptr # re-load &b[i]
1078 adc $zero,%r15 # modulo-scheduled
1079 add %r15,%r14
1080 sbb %r15,%r15 # top-most carry
1081 mov %r14,-1*8($tptr)
1082 jmp .Lmulx4x_outer
1083
1084.align 32
1085.Lmulx4x_outer:
1086 mov ($bptr),%rdx # b[i]
1087 lea 8($bptr),$bptr # b++
1088 sub $num,$aptr # rewind $aptr
1089 mov %r15,($tptr) # save top-most carry
1090 lea 64+4*8(%rsp),$tptr
1091 sub $num,$nptr # rewind $nptr
1092
1093 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
1094 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1095 mov %rdx,$bi
1096 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
1097 adox -4*8($tptr),$mi
1098 adcx %r14,%r11
1099 mulx 2*8($aptr),%r15,%r13 # ...
1100 adox -3*8($tptr),%r11
1101 adcx %r15,%r12
David Benjamin7f2ee352016-11-10 09:45:19 -05001102 adox -2*8($tptr),%r12
Adam Langley95c29f32014-06-20 12:00:00 -07001103 adcx $zero,%r13
David Benjamin7f2ee352016-11-10 09:45:19 -05001104 adox $zero,%r13
Adam Langley95c29f32014-06-20 12:00:00 -07001105
1106 mov $bptr,8(%rsp) # off-load &b[i]
Adam Langley95c29f32014-06-20 12:00:00 -07001107 mov $mi,%r15
1108 imulq 24(%rsp),$mi # "t[0]"*n0
1109 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1110
1111 mulx 3*8($aptr),%rax,%r14
1112 mov $mi,%rdx
Adam Langley95c29f32014-06-20 12:00:00 -07001113 adcx %rax,%r13
1114 adox -1*8($tptr),%r13
1115 adcx $zero,%r14
1116 lea 4*8($aptr),$aptr
1117 adox $zero,%r14
1118
1119 mulx 0*8($nptr),%rax,%r10
1120 adcx %rax,%r15 # discarded
1121 adox %r11,%r10
1122 mulx 1*8($nptr),%rax,%r11
1123 adcx %rax,%r10
1124 adox %r12,%r11
1125 mulx 2*8($nptr),%rax,%r12
1126 mov %r10,-4*8($tptr)
1127 adcx %rax,%r11
1128 adox %r13,%r12
1129 mulx 3*8($nptr),%rax,%r15
1130 mov $bi,%rdx
1131 mov %r11,-3*8($tptr)
1132 lea 4*8($nptr),$nptr
1133 adcx %rax,%r12
1134 adox $zero,%r15 # of=0
1135 mov 48(%rsp),$bptr # counter value
1136 mov %r12,-2*8($tptr)
1137
1138 jmp .Lmulx4x_inner
1139
1140.align 32
1141.Lmulx4x_inner:
1142 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
1143 adcx $zero,%r15 # cf=0, modulo-scheduled
1144 adox %r14,%r10
1145 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
1146 adcx 0*8($tptr),%r10
1147 adox %rax,%r11
1148 mulx 2*8($aptr),%r12,%rax # ...
1149 adcx 1*8($tptr),%r11
1150 adox %r14,%r12
1151 mulx 3*8($aptr),%r13,%r14
1152 mov $mi,%rdx
1153 adcx 2*8($tptr),%r12
1154 adox %rax,%r13
1155 adcx 3*8($tptr),%r13
1156 adox $zero,%r14 # of=0
1157 lea 4*8($aptr),$aptr
1158 lea 4*8($tptr),$tptr
1159 adcx $zero,%r14 # cf=0
1160
1161 adox %r15,%r10
1162 mulx 0*8($nptr),%rax,%r15
1163 adcx %rax,%r10
1164 adox %r15,%r11
1165 mulx 1*8($nptr),%rax,%r15
1166 adcx %rax,%r11
1167 adox %r15,%r12
1168 mulx 2*8($nptr),%rax,%r15
1169 mov %r10,-5*8($tptr)
1170 adcx %rax,%r12
1171 adox %r15,%r13
1172 mulx 3*8($nptr),%rax,%r15
1173 mov $bi,%rdx
1174 mov %r11,-4*8($tptr)
1175 mov %r12,-3*8($tptr)
1176 adcx %rax,%r13
1177 adox $zero,%r15
1178 lea 4*8($nptr),$nptr
1179 mov %r13,-2*8($tptr)
1180
1181 dec $bptr # of=0, pass cf
1182 jnz .Lmulx4x_inner
1183
1184 mov 0(%rsp),$num # load num
1185 mov 8(%rsp),$bptr # re-load &b[i]
1186 adc $zero,%r15 # modulo-scheduled
1187 sub 0*8($tptr),$zero # pull top-most carry
1188 adc %r15,%r14
Adam Langley95c29f32014-06-20 12:00:00 -07001189 sbb %r15,%r15 # top-most carry
1190 mov %r14,-1*8($tptr)
1191
1192 cmp 16(%rsp),$bptr
1193 jne .Lmulx4x_outer
1194
Adam Langley95c29f32014-06-20 12:00:00 -07001195 lea 64(%rsp),$tptr
Adam Langleydf2a5562016-03-01 08:17:29 -08001196 sub $num,$nptr # rewind $nptr
1197 neg %r15
1198 mov $num,%rdx
1199 shr \$3+2,$num # %cf=0
1200 mov 32(%rsp),$rptr # restore rp
1201 jmp .Lmulx4x_sub
Adam Langley95c29f32014-06-20 12:00:00 -07001202
1203.align 32
1204.Lmulx4x_sub:
Adam Langleydf2a5562016-03-01 08:17:29 -08001205 mov 8*0($tptr),%r11
1206 mov 8*1($tptr),%r12
1207 mov 8*2($tptr),%r13
1208 mov 8*3($tptr),%r14
1209 lea 8*4($tptr),$tptr
1210 sbb 8*0($nptr),%r11
1211 sbb 8*1($nptr),%r12
1212 sbb 8*2($nptr),%r13
1213 sbb 8*3($nptr),%r14
1214 lea 8*4($nptr),$nptr
1215 mov %r11,8*0($rptr)
1216 mov %r12,8*1($rptr)
1217 mov %r13,8*2($rptr)
1218 mov %r14,8*3($rptr)
1219 lea 8*4($rptr),$rptr
1220 dec $num # preserves %cf
Adam Langley95c29f32014-06-20 12:00:00 -07001221 jnz .Lmulx4x_sub
1222
Adam Langleydf2a5562016-03-01 08:17:29 -08001223 sbb \$0,%r15 # top-most carry
1224 lea 64(%rsp),$tptr
1225 sub %rdx,$rptr # rewind
1226
1227 movq %r15,%xmm1
1228 pxor %xmm0,%xmm0
1229 pshufd \$0,%xmm1,%xmm1
Adam Langley95c29f32014-06-20 12:00:00 -07001230 mov 40(%rsp),%rsi # restore %rsp
Adam Langleydf2a5562016-03-01 08:17:29 -08001231 jmp .Lmulx4x_cond_copy
1232
1233.align 32
1234.Lmulx4x_cond_copy:
1235 movdqa 16*0($tptr),%xmm2
1236 movdqa 16*1($tptr),%xmm3
1237 lea 16*2($tptr),$tptr
1238 movdqu 16*0($rptr),%xmm4
1239 movdqu 16*1($rptr),%xmm5
1240 lea 16*2($rptr),$rptr
1241 movdqa %xmm0,-16*2($tptr) # zero tp
1242 movdqa %xmm0,-16*1($tptr)
1243 pcmpeqd %xmm1,%xmm0
1244 pand %xmm1,%xmm2
1245 pand %xmm1,%xmm3
1246 pand %xmm0,%xmm4
1247 pand %xmm0,%xmm5
1248 pxor %xmm0,%xmm0
1249 por %xmm2,%xmm4
1250 por %xmm3,%xmm5
1251 movdqu %xmm4,-16*2($rptr)
1252 movdqu %xmm5,-16*1($rptr)
1253 sub \$32,%rdx
1254 jnz .Lmulx4x_cond_copy
1255
1256 mov %rdx,($tptr)
1257
Adam Langley95c29f32014-06-20 12:00:00 -07001258 mov \$1,%rax
1259 mov -48(%rsi),%r15
1260 mov -40(%rsi),%r14
1261 mov -32(%rsi),%r13
1262 mov -24(%rsi),%r12
1263 mov -16(%rsi),%rbp
1264 mov -8(%rsi),%rbx
1265 lea (%rsi),%rsp
1266.Lmulx4x_epilogue:
1267 ret
1268.size bn_mulx4x_mont,.-bn_mulx4x_mont
1269___
1270}}}
1271$code.=<<___;
1272.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1273.align 16
1274___
1275
1276# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1277# CONTEXT *context,DISPATCHER_CONTEXT *disp)
1278if ($win64) {
1279$rec="%rcx";
1280$frame="%rdx";
1281$context="%r8";
1282$disp="%r9";
1283
1284$code.=<<___;
1285.extern __imp_RtlVirtualUnwind
1286.type mul_handler,\@abi-omnipotent
1287.align 16
1288mul_handler:
1289 push %rsi
1290 push %rdi
1291 push %rbx
1292 push %rbp
1293 push %r12
1294 push %r13
1295 push %r14
1296 push %r15
1297 pushfq
1298 sub \$64,%rsp
1299
1300 mov 120($context),%rax # pull context->Rax
1301 mov 248($context),%rbx # pull context->Rip
1302
1303 mov 8($disp),%rsi # disp->ImageBase
1304 mov 56($disp),%r11 # disp->HandlerData
1305
1306 mov 0(%r11),%r10d # HandlerData[0]
1307 lea (%rsi,%r10),%r10 # end of prologue label
1308 cmp %r10,%rbx # context->Rip<end of prologue label
1309 jb .Lcommon_seh_tail
1310
1311 mov 152($context),%rax # pull context->Rsp
1312
1313 mov 4(%r11),%r10d # HandlerData[1]
1314 lea (%rsi,%r10),%r10 # epilogue label
1315 cmp %r10,%rbx # context->Rip>=epilogue label
1316 jae .Lcommon_seh_tail
1317
1318 mov 192($context),%r10 # pull $num
1319 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
1320 lea 48(%rax),%rax
1321
1322 mov -8(%rax),%rbx
1323 mov -16(%rax),%rbp
1324 mov -24(%rax),%r12
1325 mov -32(%rax),%r13
1326 mov -40(%rax),%r14
1327 mov -48(%rax),%r15
1328 mov %rbx,144($context) # restore context->Rbx
1329 mov %rbp,160($context) # restore context->Rbp
1330 mov %r12,216($context) # restore context->R12
1331 mov %r13,224($context) # restore context->R13
1332 mov %r14,232($context) # restore context->R14
1333 mov %r15,240($context) # restore context->R15
1334
1335 jmp .Lcommon_seh_tail
1336.size mul_handler,.-mul_handler
1337
1338.type sqr_handler,\@abi-omnipotent
1339.align 16
1340sqr_handler:
1341 push %rsi
1342 push %rdi
1343 push %rbx
1344 push %rbp
1345 push %r12
1346 push %r13
1347 push %r14
1348 push %r15
1349 pushfq
1350 sub \$64,%rsp
1351
1352 mov 120($context),%rax # pull context->Rax
1353 mov 248($context),%rbx # pull context->Rip
1354
1355 mov 8($disp),%rsi # disp->ImageBase
1356 mov 56($disp),%r11 # disp->HandlerData
1357
1358 mov 0(%r11),%r10d # HandlerData[0]
1359 lea (%rsi,%r10),%r10 # end of prologue label
1360 cmp %r10,%rbx # context->Rip<.Lsqr_body
1361 jb .Lcommon_seh_tail
1362
1363 mov 152($context),%rax # pull context->Rsp
1364
1365 mov 4(%r11),%r10d # HandlerData[1]
1366 lea (%rsi,%r10),%r10 # epilogue label
1367 cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
1368 jae .Lcommon_seh_tail
1369
1370 mov 40(%rax),%rax # pull saved stack pointer
1371
1372 mov -8(%rax),%rbx
1373 mov -16(%rax),%rbp
1374 mov -24(%rax),%r12
1375 mov -32(%rax),%r13
1376 mov -40(%rax),%r14
1377 mov -48(%rax),%r15
1378 mov %rbx,144($context) # restore context->Rbx
1379 mov %rbp,160($context) # restore context->Rbp
1380 mov %r12,216($context) # restore context->R12
1381 mov %r13,224($context) # restore context->R13
1382 mov %r14,232($context) # restore context->R14
1383 mov %r15,240($context) # restore context->R15
1384
1385.Lcommon_seh_tail:
1386 mov 8(%rax),%rdi
1387 mov 16(%rax),%rsi
1388 mov %rax,152($context) # restore context->Rsp
1389 mov %rsi,168($context) # restore context->Rsi
1390 mov %rdi,176($context) # restore context->Rdi
1391
1392 mov 40($disp),%rdi # disp->ContextRecord
1393 mov $context,%rsi # context
1394 mov \$154,%ecx # sizeof(CONTEXT)
1395 .long 0xa548f3fc # cld; rep movsq
1396
1397 mov $disp,%rsi
1398 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1399 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1400 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1401 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1402 mov 40(%rsi),%r10 # disp->ContextRecord
1403 lea 56(%rsi),%r11 # &disp->HandlerData
1404 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1405 mov %r10,32(%rsp) # arg5
1406 mov %r11,40(%rsp) # arg6
1407 mov %r12,48(%rsp) # arg7
1408 mov %rcx,56(%rsp) # arg8, (NULL)
1409 call *__imp_RtlVirtualUnwind(%rip)
1410
1411 mov \$1,%eax # ExceptionContinueSearch
1412 add \$64,%rsp
1413 popfq
1414 pop %r15
1415 pop %r14
1416 pop %r13
1417 pop %r12
1418 pop %rbp
1419 pop %rbx
1420 pop %rdi
1421 pop %rsi
1422 ret
1423.size sqr_handler,.-sqr_handler
1424
1425.section .pdata
1426.align 4
1427 .rva .LSEH_begin_bn_mul_mont
1428 .rva .LSEH_end_bn_mul_mont
1429 .rva .LSEH_info_bn_mul_mont
1430
1431 .rva .LSEH_begin_bn_mul4x_mont
1432 .rva .LSEH_end_bn_mul4x_mont
1433 .rva .LSEH_info_bn_mul4x_mont
1434
1435 .rva .LSEH_begin_bn_sqr8x_mont
1436 .rva .LSEH_end_bn_sqr8x_mont
1437 .rva .LSEH_info_bn_sqr8x_mont
1438___
1439$code.=<<___ if ($addx);
1440 .rva .LSEH_begin_bn_mulx4x_mont
1441 .rva .LSEH_end_bn_mulx4x_mont
1442 .rva .LSEH_info_bn_mulx4x_mont
1443___
1444$code.=<<___;
1445.section .xdata
1446.align 8
1447.LSEH_info_bn_mul_mont:
1448 .byte 9,0,0,0
1449 .rva mul_handler
1450 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
1451.LSEH_info_bn_mul4x_mont:
1452 .byte 9,0,0,0
1453 .rva mul_handler
1454 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
1455.LSEH_info_bn_sqr8x_mont:
1456 .byte 9,0,0,0
1457 .rva sqr_handler
1458 .rva .Lsqr8x_body,.Lsqr8x_epilogue # HandlerData[]
1459___
1460$code.=<<___ if ($addx);
1461.LSEH_info_bn_mulx4x_mont:
1462 .byte 9,0,0,0
1463 .rva sqr_handler
1464 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
1465___
1466}
1467
1468print $code;
1469close STDOUT;