Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#!/usr/bin/env perl
2# SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause
3#
4# ====================================================================
5# Written by Andy Polyakov, @dot-asm, initially for the OpenSSL
6# project.
7# ====================================================================
8#
9# This module implements Poly1305 hash for ARMv8.
10#
11# June 2015
12#
13# Numbers are cycles per processed byte with poly1305_blocks alone.
14#
15# IALU/gcc-4.9 NEON
16#
17# Apple A7 1.86/+5% 0.72
18# Cortex-A53 2.69/+58% 1.47
19# Cortex-A57 2.70/+7% 1.14
20# Denver 1.64/+50% 1.18(*)
21# X-Gene 2.13/+68% 2.27
22# Mongoose 1.77/+75% 1.12
23# Kryo 2.70/+55% 1.13
24# ThunderX2 1.17/+95% 1.36
25#
26# (*) estimate based on resources availability is less than 1.0,
27# i.e. measured result is worse than expected, presumably binary
28# translator is not almighty;
29
30$flavour=shift;
31$output=shift;
32
33if ($flavour && $flavour ne "void") {
34 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
35 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
36 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
37 die "can't locate arm-xlate.pl";
38
39 open STDOUT,"| \"$^X\" $xlate $flavour $output";
40} else {
41 open STDOUT,">$output";
42}
43
44my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3));
45my ($mac,$nonce)=($inp,$len);
46
47my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14));
48
49$code.=<<___;
50#ifndef __KERNEL__
51# include "arm_arch.h"
52.extern OPENSSL_armcap_P
53#else
54# define poly1305_init poly1305_block_init
55# define poly1305_blocks poly1305_blocks_arm64
56#endif
57
58.text
59
60// forward "declarations" are required for Apple
61.globl poly1305_blocks
62.globl poly1305_emit
63
64.globl poly1305_init
65.type poly1305_init,%function
66.align 5
67poly1305_init:
68 cmp $inp,xzr
69 stp xzr,xzr,[$ctx] // zero hash value
70 stp xzr,xzr,[$ctx,#16] // [along with is_base2_26]
71
72 csel x0,xzr,x0,eq
73 b.eq .Lno_key
74
75#ifndef __KERNEL__
76 adrp x17,OPENSSL_armcap_P
77 ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
78#endif
79
80 ldp $r0,$r1,[$inp] // load key
81 mov $s1,#0xfffffffc0fffffff
82 movk $s1,#0x0fff,lsl#48
83#ifdef __AARCH64EB__
84 rev $r0,$r0 // flip bytes
85 rev $r1,$r1
86#endif
87 and $r0,$r0,$s1 // &=0ffffffc0fffffff
88 and $s1,$s1,#-4
89 and $r1,$r1,$s1 // &=0ffffffc0ffffffc
90 mov w#$s1,#-1
91 stp $r0,$r1,[$ctx,#32] // save key value
92 str w#$s1,[$ctx,#48] // impossible key power value
93
94#ifndef __KERNEL__
95 tst w17,#ARMV7_NEON
96
97 adr $d0,.Lpoly1305_blocks
98 adr $r0,.Lpoly1305_blocks_neon
99 adr $d1,.Lpoly1305_emit
100
101 csel $d0,$d0,$r0,eq
102
103# ifdef __ILP32__
104 stp w#$d0,w#$d1,[$len]
105# else
106 stp $d0,$d1,[$len]
107# endif
108#endif
109 mov x0,#1
110.Lno_key:
111 ret
112.size poly1305_init,.-poly1305_init
113
114.type poly1305_blocks,%function
115.align 5
116poly1305_blocks:
117.Lpoly1305_blocks:
118 ands $len,$len,#-16
119 b.eq .Lno_data
120
121 ldp $h0,$h1,[$ctx] // load hash value
122 ldp $h2,x17,[$ctx,#16] // [along with is_base2_26]
123 ldp $r0,$r1,[$ctx,#32] // load key value
124
125#ifdef __AARCH64EB__
126 lsr $d0,$h0,#32
127 mov w#$d1,w#$h0
128 lsr $d2,$h1,#32
129 mov w15,w#$h1
130 lsr x16,$h2,#32
131#else
132 mov w#$d0,w#$h0
133 lsr $d1,$h0,#32
134 mov w#$d2,w#$h1
135 lsr x15,$h1,#32
136 mov w16,w#$h2
137#endif
138
139 add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64
140 lsr $d1,$d2,#12
141 adds $d0,$d0,$d2,lsl#52
142 add $d1,$d1,x15,lsl#14
143 adc $d1,$d1,xzr
144 lsr $d2,x16,#24
145 adds $d1,$d1,x16,lsl#40
146 adc $d2,$d2,xzr
147
148 cmp x17,#0 // is_base2_26?
149 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
150 csel $h0,$h0,$d0,eq // choose between radixes
151 csel $h1,$h1,$d1,eq
152 csel $h2,$h2,$d2,eq
153
154.Loop:
155 ldp $t0,$t1,[$inp],#16 // load input
156 sub $len,$len,#16
157#ifdef __AARCH64EB__
158 rev $t0,$t0
159 rev $t1,$t1
160#endif
161 adds $h0,$h0,$t0 // accumulate input
162 adcs $h1,$h1,$t1
163
164 mul $d0,$h0,$r0 // h0*r0
165 adc $h2,$h2,$padbit
166 umulh $d1,$h0,$r0
167
168 mul $t0,$h1,$s1 // h1*5*r1
169 umulh $t1,$h1,$s1
170
171 adds $d0,$d0,$t0
172 mul $t0,$h0,$r1 // h0*r1
173 adc $d1,$d1,$t1
174 umulh $d2,$h0,$r1
175
176 adds $d1,$d1,$t0
177 mul $t0,$h1,$r0 // h1*r0
178 adc $d2,$d2,xzr
179 umulh $t1,$h1,$r0
180
181 adds $d1,$d1,$t0
182 mul $t0,$h2,$s1 // h2*5*r1
183 adc $d2,$d2,$t1
184 mul $t1,$h2,$r0 // h2*r0
185
186 adds $d1,$d1,$t0
187 adc $d2,$d2,$t1
188
189 and $t0,$d2,#-4 // final reduction
190 and $h2,$d2,#3
191 add $t0,$t0,$d2,lsr#2
192 adds $h0,$d0,$t0
193 adcs $h1,$d1,xzr
194 adc $h2,$h2,xzr
195
196 cbnz $len,.Loop
197
198 stp $h0,$h1,[$ctx] // store hash value
199 stp $h2,xzr,[$ctx,#16] // [and clear is_base2_26]
200
201.Lno_data:
202 ret
203.size poly1305_blocks,.-poly1305_blocks
204
205.type poly1305_emit,%function
206.align 5
207poly1305_emit:
208.Lpoly1305_emit:
209 ldp $h0,$h1,[$ctx] // load hash base 2^64
210 ldp $h2,$r0,[$ctx,#16] // [along with is_base2_26]
211 ldp $t0,$t1,[$nonce] // load nonce
212
213#ifdef __AARCH64EB__
214 lsr $d0,$h0,#32
215 mov w#$d1,w#$h0
216 lsr $d2,$h1,#32
217 mov w15,w#$h1
218 lsr x16,$h2,#32
219#else
220 mov w#$d0,w#$h0
221 lsr $d1,$h0,#32
222 mov w#$d2,w#$h1
223 lsr x15,$h1,#32
224 mov w16,w#$h2
225#endif
226
227 add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64
228 lsr $d1,$d2,#12
229 adds $d0,$d0,$d2,lsl#52
230 add $d1,$d1,x15,lsl#14
231 adc $d1,$d1,xzr
232 lsr $d2,x16,#24
233 adds $d1,$d1,x16,lsl#40
234 adc $d2,$d2,xzr
235
236 cmp $r0,#0 // is_base2_26?
237 csel $h0,$h0,$d0,eq // choose between radixes
238 csel $h1,$h1,$d1,eq
239 csel $h2,$h2,$d2,eq
240
241 adds $d0,$h0,#5 // compare to modulus
242 adcs $d1,$h1,xzr
243 adc $d2,$h2,xzr
244
245 tst $d2,#-4 // see if it's carried/borrowed
246
247 csel $h0,$h0,$d0,eq
248 csel $h1,$h1,$d1,eq
249
250#ifdef __AARCH64EB__
251 ror $t0,$t0,#32 // flip nonce words
252 ror $t1,$t1,#32
253#endif
254 adds $h0,$h0,$t0 // accumulate nonce
255 adc $h1,$h1,$t1
256#ifdef __AARCH64EB__
257 rev $h0,$h0 // flip output bytes
258 rev $h1,$h1
259#endif
260 stp $h0,$h1,[$mac] // write result
261
262 ret
263.size poly1305_emit,.-poly1305_emit
264___
265my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8));
266my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13));
267my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18));
268my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23));
269my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28));
270my ($T0,$T1,$MASK) = map("v$_",(29..31));
271
272my ($in2,$zeros)=("x16","x17");
273my $is_base2_26 = $zeros; # borrow
274
275$code.=<<___;
276.type poly1305_mult,%function
277.align 5
278poly1305_mult:
279 mul $d0,$h0,$r0 // h0*r0
280 umulh $d1,$h0,$r0
281
282 mul $t0,$h1,$s1 // h1*5*r1
283 umulh $t1,$h1,$s1
284
285 adds $d0,$d0,$t0
286 mul $t0,$h0,$r1 // h0*r1
287 adc $d1,$d1,$t1
288 umulh $d2,$h0,$r1
289
290 adds $d1,$d1,$t0
291 mul $t0,$h1,$r0 // h1*r0
292 adc $d2,$d2,xzr
293 umulh $t1,$h1,$r0
294
295 adds $d1,$d1,$t0
296 mul $t0,$h2,$s1 // h2*5*r1
297 adc $d2,$d2,$t1
298 mul $t1,$h2,$r0 // h2*r0
299
300 adds $d1,$d1,$t0
301 adc $d2,$d2,$t1
302
303 and $t0,$d2,#-4 // final reduction
304 and $h2,$d2,#3
305 add $t0,$t0,$d2,lsr#2
306 adds $h0,$d0,$t0
307 adcs $h1,$d1,xzr
308 adc $h2,$h2,xzr
309
310 ret
311.size poly1305_mult,.-poly1305_mult
312
313.type poly1305_splat,%function
314.align 4
315poly1305_splat:
316 and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26
317 ubfx x13,$h0,#26,#26
318 extr x14,$h1,$h0,#52
319 and x14,x14,#0x03ffffff
320 ubfx x15,$h1,#14,#26
321 extr x16,$h2,$h1,#40
322
323 str w12,[$ctx,#16*0] // r0
324 add w12,w13,w13,lsl#2 // r1*5
325 str w13,[$ctx,#16*1] // r1
326 add w13,w14,w14,lsl#2 // r2*5
327 str w12,[$ctx,#16*2] // s1
328 str w14,[$ctx,#16*3] // r2
329 add w14,w15,w15,lsl#2 // r3*5
330 str w13,[$ctx,#16*4] // s2
331 str w15,[$ctx,#16*5] // r3
332 add w15,w16,w16,lsl#2 // r4*5
333 str w14,[$ctx,#16*6] // s3
334 str w16,[$ctx,#16*7] // r4
335 str w15,[$ctx,#16*8] // s4
336
337 ret
338.size poly1305_splat,.-poly1305_splat
339
340#ifdef __KERNEL__
341.globl poly1305_blocks_neon
342#endif
343.type poly1305_blocks_neon,%function
344.align 5
345poly1305_blocks_neon:
346.Lpoly1305_blocks_neon:
347 ldr $is_base2_26,[$ctx,#24]
348 cmp $len,#128
349 b.lo .Lpoly1305_blocks
350
351 .inst 0xd503233f // paciasp
352 stp x29,x30,[sp,#-80]!
353 add x29,sp,#0
354
355 stp d8,d9,[sp,#16] // meet ABI requirements
356 stp d10,d11,[sp,#32]
357 stp d12,d13,[sp,#48]
358 stp d14,d15,[sp,#64]
359
360 cbz $is_base2_26,.Lbase2_64_neon
361
362 ldp w10,w11,[$ctx] // load hash value base 2^26
363 ldp w12,w13,[$ctx,#8]
364 ldr w14,[$ctx,#16]
365
366 tst $len,#31
367 b.eq .Leven_neon
368
369 ldp $r0,$r1,[$ctx,#32] // load key value
370
371 add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64
372 lsr $h1,x12,#12
373 adds $h0,$h0,x12,lsl#52
374 add $h1,$h1,x13,lsl#14
375 adc $h1,$h1,xzr
376 lsr $h2,x14,#24
377 adds $h1,$h1,x14,lsl#40
378 adc $d2,$h2,xzr // can be partially reduced...
379
380 ldp $d0,$d1,[$inp],#16 // load input
381 sub $len,$len,#16
382 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
383
384#ifdef __AARCH64EB__
385 rev $d0,$d0
386 rev $d1,$d1
387#endif
388 adds $h0,$h0,$d0 // accumulate input
389 adcs $h1,$h1,$d1
390 adc $h2,$h2,$padbit
391
392 bl poly1305_mult
393
394 and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
395 ubfx x11,$h0,#26,#26
396 extr x12,$h1,$h0,#52
397 and x12,x12,#0x03ffffff
398 ubfx x13,$h1,#14,#26
399 extr x14,$h2,$h1,#40
400
401 b .Leven_neon
402
403.align 4
404.Lbase2_64_neon:
405 ldp $r0,$r1,[$ctx,#32] // load key value
406
407 ldp $h0,$h1,[$ctx] // load hash value base 2^64
408 ldr $h2,[$ctx,#16]
409
410 tst $len,#31
411 b.eq .Linit_neon
412
413 ldp $d0,$d1,[$inp],#16 // load input
414 sub $len,$len,#16
415 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
416#ifdef __AARCH64EB__
417 rev $d0,$d0
418 rev $d1,$d1
419#endif
420 adds $h0,$h0,$d0 // accumulate input
421 adcs $h1,$h1,$d1
422 adc $h2,$h2,$padbit
423
424 bl poly1305_mult
425
426.Linit_neon:
427 ldr w17,[$ctx,#48] // first table element
428 and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26
429 ubfx x11,$h0,#26,#26
430 extr x12,$h1,$h0,#52
431 and x12,x12,#0x03ffffff
432 ubfx x13,$h1,#14,#26
433 extr x14,$h2,$h1,#40
434
435 cmp w17,#-1 // is value impossible?
436 b.ne .Leven_neon
437
438 fmov ${H0},x10
439 fmov ${H1},x11
440 fmov ${H2},x12
441 fmov ${H3},x13
442 fmov ${H4},x14
443
444 ////////////////////////////////// initialize r^n table
445 mov $h0,$r0 // r^1
446 add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2)
447 mov $h1,$r1
448 mov $h2,xzr
449 add $ctx,$ctx,#48+12
450 bl poly1305_splat
451
452 bl poly1305_mult // r^2
453 sub $ctx,$ctx,#4
454 bl poly1305_splat
455
456 bl poly1305_mult // r^3
457 sub $ctx,$ctx,#4
458 bl poly1305_splat
459
460 bl poly1305_mult // r^4
461 sub $ctx,$ctx,#4
462 bl poly1305_splat
463 sub $ctx,$ctx,#48 // restore original $ctx
464 b .Ldo_neon
465
466.align 4
467.Leven_neon:
468 fmov ${H0},x10
469 fmov ${H1},x11
470 fmov ${H2},x12
471 fmov ${H3},x13
472 fmov ${H4},x14
473
474.Ldo_neon:
475 ldp x8,x12,[$inp,#32] // inp[2:3]
476 subs $len,$len,#64
477 ldp x9,x13,[$inp,#48]
478 add $in2,$inp,#96
479 adrp $zeros,.Lzeros
480 add $zeros,$zeros,#:lo12:.Lzeros
481
482 lsl $padbit,$padbit,#24
483 add x15,$ctx,#48
484
485#ifdef __AARCH64EB__
486 rev x8,x8
487 rev x12,x12
488 rev x9,x9
489 rev x13,x13
490#endif
491 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
492 and x5,x9,#0x03ffffff
493 ubfx x6,x8,#26,#26
494 ubfx x7,x9,#26,#26
495 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
496 extr x8,x12,x8,#52
497 extr x9,x13,x9,#52
498 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
499 fmov $IN23_0,x4
500 and x8,x8,#0x03ffffff
501 and x9,x9,#0x03ffffff
502 ubfx x10,x12,#14,#26
503 ubfx x11,x13,#14,#26
504 add x12,$padbit,x12,lsr#40
505 add x13,$padbit,x13,lsr#40
506 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
507 fmov $IN23_1,x6
508 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
509 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
510 fmov $IN23_2,x8
511 fmov $IN23_3,x10
512 fmov $IN23_4,x12
513
514 ldp x8,x12,[$inp],#16 // inp[0:1]
515 ldp x9,x13,[$inp],#48
516
517 ld1 {$R0,$R1,$S1,$R2},[x15],#64
518 ld1 {$S2,$R3,$S3,$R4},[x15],#64
519 ld1 {$S4},[x15]
520
521#ifdef __AARCH64EB__
522 rev x8,x8
523 rev x12,x12
524 rev x9,x9
525 rev x13,x13
526#endif
527 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
528 and x5,x9,#0x03ffffff
529 ubfx x6,x8,#26,#26
530 ubfx x7,x9,#26,#26
531 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
532 extr x8,x12,x8,#52
533 extr x9,x13,x9,#52
534 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
535 fmov $IN01_0,x4
536 and x8,x8,#0x03ffffff
537 and x9,x9,#0x03ffffff
538 ubfx x10,x12,#14,#26
539 ubfx x11,x13,#14,#26
540 add x12,$padbit,x12,lsr#40
541 add x13,$padbit,x13,lsr#40
542 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
543 fmov $IN01_1,x6
544 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
545 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
546 movi $MASK.2d,#-1
547 fmov $IN01_2,x8
548 fmov $IN01_3,x10
549 fmov $IN01_4,x12
550 ushr $MASK.2d,$MASK.2d,#38
551
552 b.ls .Lskip_loop
553
554.align 4
555.Loop_neon:
556 ////////////////////////////////////////////////////////////////
557 // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
558 // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
559 // \___________________/
560 // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
561 // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
562 // \___________________/ \____________________/
563 //
564 // Note that we start with inp[2:3]*r^2. This is because it
565 // doesn't depend on reduction in previous iteration.
566 ////////////////////////////////////////////////////////////////
567 // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
568 // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
569 // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
570 // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
571 // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
572
573 subs $len,$len,#64
574 umull $ACC4,$IN23_0,${R4}[2]
575 csel $in2,$zeros,$in2,lo
576 umull $ACC3,$IN23_0,${R3}[2]
577 umull $ACC2,$IN23_0,${R2}[2]
578 ldp x8,x12,[$in2],#16 // inp[2:3] (or zero)
579 umull $ACC1,$IN23_0,${R1}[2]
580 ldp x9,x13,[$in2],#48
581 umull $ACC0,$IN23_0,${R0}[2]
582#ifdef __AARCH64EB__
583 rev x8,x8
584 rev x12,x12
585 rev x9,x9
586 rev x13,x13
587#endif
588
589 umlal $ACC4,$IN23_1,${R3}[2]
590 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
591 umlal $ACC3,$IN23_1,${R2}[2]
592 and x5,x9,#0x03ffffff
593 umlal $ACC2,$IN23_1,${R1}[2]
594 ubfx x6,x8,#26,#26
595 umlal $ACC1,$IN23_1,${R0}[2]
596 ubfx x7,x9,#26,#26
597 umlal $ACC0,$IN23_1,${S4}[2]
598 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
599
600 umlal $ACC4,$IN23_2,${R2}[2]
601 extr x8,x12,x8,#52
602 umlal $ACC3,$IN23_2,${R1}[2]
603 extr x9,x13,x9,#52
604 umlal $ACC2,$IN23_2,${R0}[2]
605 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
606 umlal $ACC1,$IN23_2,${S4}[2]
607 fmov $IN23_0,x4
608 umlal $ACC0,$IN23_2,${S3}[2]
609 and x8,x8,#0x03ffffff
610
611 umlal $ACC4,$IN23_3,${R1}[2]
612 and x9,x9,#0x03ffffff
613 umlal $ACC3,$IN23_3,${R0}[2]
614 ubfx x10,x12,#14,#26
615 umlal $ACC2,$IN23_3,${S4}[2]
616 ubfx x11,x13,#14,#26
617 umlal $ACC1,$IN23_3,${S3}[2]
618 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
619 umlal $ACC0,$IN23_3,${S2}[2]
620 fmov $IN23_1,x6
621
622 add $IN01_2,$IN01_2,$H2
623 add x12,$padbit,x12,lsr#40
624 umlal $ACC4,$IN23_4,${R0}[2]
625 add x13,$padbit,x13,lsr#40
626 umlal $ACC3,$IN23_4,${S4}[2]
627 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
628 umlal $ACC2,$IN23_4,${S3}[2]
629 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
630 umlal $ACC1,$IN23_4,${S2}[2]
631 fmov $IN23_2,x8
632 umlal $ACC0,$IN23_4,${S1}[2]
633 fmov $IN23_3,x10
634
635 ////////////////////////////////////////////////////////////////
636 // (hash+inp[0:1])*r^4 and accumulate
637
638 add $IN01_0,$IN01_0,$H0
639 fmov $IN23_4,x12
640 umlal $ACC3,$IN01_2,${R1}[0]
641 ldp x8,x12,[$inp],#16 // inp[0:1]
642 umlal $ACC0,$IN01_2,${S3}[0]
643 ldp x9,x13,[$inp],#48
644 umlal $ACC4,$IN01_2,${R2}[0]
645 umlal $ACC1,$IN01_2,${S4}[0]
646 umlal $ACC2,$IN01_2,${R0}[0]
647#ifdef __AARCH64EB__
648 rev x8,x8
649 rev x12,x12
650 rev x9,x9
651 rev x13,x13
652#endif
653
654 add $IN01_1,$IN01_1,$H1
655 umlal $ACC3,$IN01_0,${R3}[0]
656 umlal $ACC4,$IN01_0,${R4}[0]
657 and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
658 umlal $ACC2,$IN01_0,${R2}[0]
659 and x5,x9,#0x03ffffff
660 umlal $ACC0,$IN01_0,${R0}[0]
661 ubfx x6,x8,#26,#26
662 umlal $ACC1,$IN01_0,${R1}[0]
663 ubfx x7,x9,#26,#26
664
665 add $IN01_3,$IN01_3,$H3
666 add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
667 umlal $ACC3,$IN01_1,${R2}[0]
668 extr x8,x12,x8,#52
669 umlal $ACC4,$IN01_1,${R3}[0]
670 extr x9,x13,x9,#52
671 umlal $ACC0,$IN01_1,${S4}[0]
672 add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
673 umlal $ACC2,$IN01_1,${R1}[0]
674 fmov $IN01_0,x4
675 umlal $ACC1,$IN01_1,${R0}[0]
676 and x8,x8,#0x03ffffff
677
678 add $IN01_4,$IN01_4,$H4
679 and x9,x9,#0x03ffffff
680 umlal $ACC3,$IN01_3,${R0}[0]
681 ubfx x10,x12,#14,#26
682 umlal $ACC0,$IN01_3,${S2}[0]
683 ubfx x11,x13,#14,#26
684 umlal $ACC4,$IN01_3,${R1}[0]
685 add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
686 umlal $ACC1,$IN01_3,${S3}[0]
687 fmov $IN01_1,x6
688 umlal $ACC2,$IN01_3,${S4}[0]
689 add x12,$padbit,x12,lsr#40
690
691 umlal $ACC3,$IN01_4,${S4}[0]
692 add x13,$padbit,x13,lsr#40
693 umlal $ACC0,$IN01_4,${S1}[0]
694 add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
695 umlal $ACC4,$IN01_4,${R0}[0]
696 add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
697 umlal $ACC1,$IN01_4,${S2}[0]
698 fmov $IN01_2,x8
699 umlal $ACC2,$IN01_4,${S3}[0]
700 fmov $IN01_3,x10
701 fmov $IN01_4,x12
702
703 /////////////////////////////////////////////////////////////////
704 // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
705 // and P. Schwabe
706 //
707 // [see discussion in poly1305-armv4 module]
708
709 ushr $T0.2d,$ACC3,#26
710 xtn $H3,$ACC3
711 ushr $T1.2d,$ACC0,#26
712 and $ACC0,$ACC0,$MASK.2d
713 add $ACC4,$ACC4,$T0.2d // h3 -> h4
714 bic $H3,#0xfc,lsl#24 // &=0x03ffffff
715 add $ACC1,$ACC1,$T1.2d // h0 -> h1
716
717 ushr $T0.2d,$ACC4,#26
718 xtn $H4,$ACC4
719 ushr $T1.2d,$ACC1,#26
720 xtn $H1,$ACC1
721 bic $H4,#0xfc,lsl#24
722 add $ACC2,$ACC2,$T1.2d // h1 -> h2
723
724 add $ACC0,$ACC0,$T0.2d
725 shl $T0.2d,$T0.2d,#2
726 shrn $T1.2s,$ACC2,#26
727 xtn $H2,$ACC2
728 add $ACC0,$ACC0,$T0.2d // h4 -> h0
729 bic $H1,#0xfc,lsl#24
730 add $H3,$H3,$T1.2s // h2 -> h3
731 bic $H2,#0xfc,lsl#24
732
733 shrn $T0.2s,$ACC0,#26
734 xtn $H0,$ACC0
735 ushr $T1.2s,$H3,#26
736 bic $H3,#0xfc,lsl#24
737 bic $H0,#0xfc,lsl#24
738 add $H1,$H1,$T0.2s // h0 -> h1
739 add $H4,$H4,$T1.2s // h3 -> h4
740
741 b.hi .Loop_neon
742
743.Lskip_loop:
744 dup $IN23_2,${IN23_2}[0]
745 add $IN01_2,$IN01_2,$H2
746
747 ////////////////////////////////////////////////////////////////
748 // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
749
750 adds $len,$len,#32
751 b.ne .Long_tail
752
753 dup $IN23_2,${IN01_2}[0]
754 add $IN23_0,$IN01_0,$H0
755 add $IN23_3,$IN01_3,$H3
756 add $IN23_1,$IN01_1,$H1
757 add $IN23_4,$IN01_4,$H4
758
759.Long_tail:
760 dup $IN23_0,${IN23_0}[0]
761 umull2 $ACC0,$IN23_2,${S3}
762 umull2 $ACC3,$IN23_2,${R1}
763 umull2 $ACC4,$IN23_2,${R2}
764 umull2 $ACC2,$IN23_2,${R0}
765 umull2 $ACC1,$IN23_2,${S4}
766
767 dup $IN23_1,${IN23_1}[0]
768 umlal2 $ACC0,$IN23_0,${R0}
769 umlal2 $ACC2,$IN23_0,${R2}
770 umlal2 $ACC3,$IN23_0,${R3}
771 umlal2 $ACC4,$IN23_0,${R4}
772 umlal2 $ACC1,$IN23_0,${R1}
773
774 dup $IN23_3,${IN23_3}[0]
775 umlal2 $ACC0,$IN23_1,${S4}
776 umlal2 $ACC3,$IN23_1,${R2}
777 umlal2 $ACC2,$IN23_1,${R1}
778 umlal2 $ACC4,$IN23_1,${R3}
779 umlal2 $ACC1,$IN23_1,${R0}
780
781 dup $IN23_4,${IN23_4}[0]
782 umlal2 $ACC3,$IN23_3,${R0}
783 umlal2 $ACC4,$IN23_3,${R1}
784 umlal2 $ACC0,$IN23_3,${S2}
785 umlal2 $ACC1,$IN23_3,${S3}
786 umlal2 $ACC2,$IN23_3,${S4}
787
788 umlal2 $ACC3,$IN23_4,${S4}
789 umlal2 $ACC0,$IN23_4,${S1}
790 umlal2 $ACC4,$IN23_4,${R0}
791 umlal2 $ACC1,$IN23_4,${S2}
792 umlal2 $ACC2,$IN23_4,${S3}
793
794 b.eq .Lshort_tail
795
796 ////////////////////////////////////////////////////////////////
797 // (hash+inp[0:1])*r^4:r^3 and accumulate
798
799 add $IN01_0,$IN01_0,$H0
800 umlal $ACC3,$IN01_2,${R1}
801 umlal $ACC0,$IN01_2,${S3}
802 umlal $ACC4,$IN01_2,${R2}
803 umlal $ACC1,$IN01_2,${S4}
804 umlal $ACC2,$IN01_2,${R0}
805
806 add $IN01_1,$IN01_1,$H1
807 umlal $ACC3,$IN01_0,${R3}
808 umlal $ACC0,$IN01_0,${R0}
809 umlal $ACC4,$IN01_0,${R4}
810 umlal $ACC1,$IN01_0,${R1}
811 umlal $ACC2,$IN01_0,${R2}
812
813 add $IN01_3,$IN01_3,$H3
814 umlal $ACC3,$IN01_1,${R2}
815 umlal $ACC0,$IN01_1,${S4}
816 umlal $ACC4,$IN01_1,${R3}
817 umlal $ACC1,$IN01_1,${R0}
818 umlal $ACC2,$IN01_1,${R1}
819
820 add $IN01_4,$IN01_4,$H4
821 umlal $ACC3,$IN01_3,${R0}
822 umlal $ACC0,$IN01_3,${S2}
823 umlal $ACC4,$IN01_3,${R1}
824 umlal $ACC1,$IN01_3,${S3}
825 umlal $ACC2,$IN01_3,${S4}
826
827 umlal $ACC3,$IN01_4,${S4}
828 umlal $ACC0,$IN01_4,${S1}
829 umlal $ACC4,$IN01_4,${R0}
830 umlal $ACC1,$IN01_4,${S2}
831 umlal $ACC2,$IN01_4,${S3}
832
833.Lshort_tail:
834 ////////////////////////////////////////////////////////////////
835 // horizontal add
836
837 addp $ACC3,$ACC3,$ACC3
838 ldp d8,d9,[sp,#16] // meet ABI requirements
839 addp $ACC0,$ACC0,$ACC0
840 ldp d10,d11,[sp,#32]
841 addp $ACC4,$ACC4,$ACC4
842 ldp d12,d13,[sp,#48]
843 addp $ACC1,$ACC1,$ACC1
844 ldp d14,d15,[sp,#64]
845 addp $ACC2,$ACC2,$ACC2
846 ldr x30,[sp,#8]
847
848 ////////////////////////////////////////////////////////////////
849 // lazy reduction, but without narrowing
850
851 ushr $T0.2d,$ACC3,#26
852 and $ACC3,$ACC3,$MASK.2d
853 ushr $T1.2d,$ACC0,#26
854 and $ACC0,$ACC0,$MASK.2d
855
856 add $ACC4,$ACC4,$T0.2d // h3 -> h4
857 add $ACC1,$ACC1,$T1.2d // h0 -> h1
858
859 ushr $T0.2d,$ACC4,#26
860 and $ACC4,$ACC4,$MASK.2d
861 ushr $T1.2d,$ACC1,#26
862 and $ACC1,$ACC1,$MASK.2d
863 add $ACC2,$ACC2,$T1.2d // h1 -> h2
864
865 add $ACC0,$ACC0,$T0.2d
866 shl $T0.2d,$T0.2d,#2
867 ushr $T1.2d,$ACC2,#26
868 and $ACC2,$ACC2,$MASK.2d
869 add $ACC0,$ACC0,$T0.2d // h4 -> h0
870 add $ACC3,$ACC3,$T1.2d // h2 -> h3
871
872 ushr $T0.2d,$ACC0,#26
873 and $ACC0,$ACC0,$MASK.2d
874 ushr $T1.2d,$ACC3,#26
875 and $ACC3,$ACC3,$MASK.2d
876 add $ACC1,$ACC1,$T0.2d // h0 -> h1
877 add $ACC4,$ACC4,$T1.2d // h3 -> h4
878
879 ////////////////////////////////////////////////////////////////
880 // write the result, can be partially reduced
881
882 st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16
883 mov x4,#1
884 st1 {$ACC4}[0],[$ctx]
885 str x4,[$ctx,#8] // set is_base2_26
886
887 ldr x29,[sp],#80
888 .inst 0xd50323bf // autiasp
889 ret
890.size poly1305_blocks_neon,.-poly1305_blocks_neon
891
892.pushsection .rodata
893.align 5
894.Lzeros:
895.long 0,0,0,0,0,0,0,0
896.asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm"
897.popsection
898
899.align 2
900#if !defined(__KERNEL__) && !defined(_WIN64)
901.comm OPENSSL_armcap_P,4,4
902.hidden OPENSSL_armcap_P
903#endif
904___
905
906foreach (split("\n",$code)) {
907 s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or
908 s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or
909 (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or
910 (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or
911 (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or
912 (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or
913 (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1));
914
915 s/\.[124]([sd])\[/.$1\[/;
916 s/w#x([0-9]+)/w$1/g;
917
918 print $_,"\n";
919}
920close STDOUT;