Merge tag 'for-linus-4.18-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / arm / crypto / sha512-core.S_shipped
1 @ SPDX-License-Identifier: GPL-2.0
2
3 @ This code is taken from the OpenSSL project but the author (Andy Polyakov)
4 @ has relicensed it under the GPLv2. Therefore this program is free software;
5 @ you can redistribute it and/or modify it under the terms of the GNU General
6 @ Public License version 2 as published by the Free Software Foundation.
7 @
8 @ The original headers, including the original license headers, are
9 @ included below for completeness.
10
11 @ ====================================================================
12 @ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
13 @ project. The module is, however, dual licensed under OpenSSL and
14 @ CRYPTOGAMS licenses depending on where you obtain it. For further
15 @ details see http://www.openssl.org/~appro/cryptogams/.
16 @ ====================================================================
17
18 @ SHA512 block procedure for ARMv4. September 2007.
19
20 @ This code is ~4.5 (four and a half) times faster than code generated
21 @ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
22 @ Xscale PXA250 core].
23 @
24 @ July 2010.
25 @
26 @ Rescheduling for dual-issue pipeline resulted in 6% improvement on
27 @ Cortex A8 core and ~40 cycles per processed byte.
28
29 @ February 2011.
30 @
31 @ Profiler-assisted and platform-specific optimization resulted in 7%
32 @ improvement on Coxtex A8 core and ~38 cycles per byte.
33
34 @ March 2011.
35 @
36 @ Add NEON implementation. On Cortex A8 it was measured to process
37 @ one byte in 23.3 cycles or ~60% faster than integer-only code.
38
39 @ August 2012.
40 @
41 @ Improve NEON performance by 12% on Snapdragon S4. In absolute
42 @ terms it's 22.6 cycles per byte, which is disappointing result.
43 @ Technical writers asserted that 3-way S4 pipeline can sustain
44 @ multiple NEON instructions per cycle, but dual NEON issue could
45 @ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
46 @ for further details. On side note Cortex-A15 processes one byte in
47 @ 16 cycles.
48
49 @ Byte order [in]dependence. =========================================
50 @
51 @ Originally caller was expected to maintain specific *dword* order in
52 @ h[0-7], namely with most significant dword at *lower* address, which
53 @ was reflected in below two parameters as 0 and 4. Now caller is
54 @ expected to maintain native byte order for whole 64-bit values.
55 #ifndef __KERNEL__
56 # include "arm_arch.h"
57 # define VFP_ABI_PUSH   vstmdb  sp!,{d8-d15}
58 # define VFP_ABI_POP    vldmia  sp!,{d8-d15}
59 #else
60 # define __ARM_ARCH__ __LINUX_ARM_ARCH__
61 # define __ARM_MAX_ARCH__ 7
62 # define VFP_ABI_PUSH
63 # define VFP_ABI_POP
64 #endif
65
66 #ifdef __ARMEL__
67 # define LO 0
68 # define HI 4
69 # define WORD64(hi0,lo0,hi1,lo1)        .word   lo0,hi0, lo1,hi1
70 #else
71 # define HI 0
72 # define LO 4
73 # define WORD64(hi0,lo0,hi1,lo1)        .word   hi0,lo0, hi1,lo1
74 #endif
75
76 .text
77 #if __ARM_ARCH__<7
78 .code   32
79 #else
80 .syntax unified
81 # ifdef __thumb2__
82 #  define adrl adr
83 .thumb
84 # else
85 .code   32
86 # endif
87 #endif
88
89 .type   K512,%object
90 .align  5
91 K512:
92 WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
93 WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
94 WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
95 WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
96 WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
97 WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
98 WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
99 WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
100 WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
101 WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
102 WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
103 WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
104 WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
105 WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
106 WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
107 WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
108 WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
109 WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
110 WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
111 WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
112 WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
113 WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
114 WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
115 WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
116 WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
117 WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
118 WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
119 WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
120 WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
121 WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
122 WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
123 WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
124 WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
125 WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
126 WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
127 WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
128 WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
129 WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
130 WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
131 WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
132 .size   K512,.-K512
133 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
134 .LOPENSSL_armcap:
135 .word   OPENSSL_armcap_P-sha512_block_data_order
136 .skip   32-4
137 #else
138 .skip   32
139 #endif
140
141 .global sha512_block_data_order
142 .type   sha512_block_data_order,%function
143 sha512_block_data_order:
144 #if __ARM_ARCH__<7
145         sub     r3,pc,#8                @ sha512_block_data_order
146 #else
147         adr     r3,sha512_block_data_order
148 #endif
149 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
150         ldr     r12,.LOPENSSL_armcap
151         ldr     r12,[r3,r12]            @ OPENSSL_armcap_P
152         tst     r12,#1
153         bne     .LNEON
154 #endif
155         add     r2,r1,r2,lsl#7  @ len to point at the end of inp
156         stmdb   sp!,{r4-r12,lr}
157         sub     r14,r3,#672             @ K512
158         sub     sp,sp,#9*8
159
160         ldr     r7,[r0,#32+LO]
161         ldr     r8,[r0,#32+HI]
162         ldr     r9, [r0,#48+LO]
163         ldr     r10, [r0,#48+HI]
164         ldr     r11, [r0,#56+LO]
165         ldr     r12, [r0,#56+HI]
166 .Loop:
167         str     r9, [sp,#48+0]
168         str     r10, [sp,#48+4]
169         str     r11, [sp,#56+0]
170         str     r12, [sp,#56+4]
171         ldr     r5,[r0,#0+LO]
172         ldr     r6,[r0,#0+HI]
173         ldr     r3,[r0,#8+LO]
174         ldr     r4,[r0,#8+HI]
175         ldr     r9, [r0,#16+LO]
176         ldr     r10, [r0,#16+HI]
177         ldr     r11, [r0,#24+LO]
178         ldr     r12, [r0,#24+HI]
179         str     r3,[sp,#8+0]
180         str     r4,[sp,#8+4]
181         str     r9, [sp,#16+0]
182         str     r10, [sp,#16+4]
183         str     r11, [sp,#24+0]
184         str     r12, [sp,#24+4]
185         ldr     r3,[r0,#40+LO]
186         ldr     r4,[r0,#40+HI]
187         str     r3,[sp,#40+0]
188         str     r4,[sp,#40+4]
189
190 .L00_15:
191 #if __ARM_ARCH__<7
192         ldrb    r3,[r1,#7]
193         ldrb    r9, [r1,#6]
194         ldrb    r10, [r1,#5]
195         ldrb    r11, [r1,#4]
196         ldrb    r4,[r1,#3]
197         ldrb    r12, [r1,#2]
198         orr     r3,r3,r9,lsl#8
199         ldrb    r9, [r1,#1]
200         orr     r3,r3,r10,lsl#16
201         ldrb    r10, [r1],#8
202         orr     r3,r3,r11,lsl#24
203         orr     r4,r4,r12,lsl#8
204         orr     r4,r4,r9,lsl#16
205         orr     r4,r4,r10,lsl#24
206 #else
207         ldr     r3,[r1,#4]
208         ldr     r4,[r1],#8
209 #ifdef __ARMEL__
210         rev     r3,r3
211         rev     r4,r4
212 #endif
213 #endif
214         @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
215         @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
216         @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
217         mov     r9,r7,lsr#14
218         str     r3,[sp,#64+0]
219         mov     r10,r8,lsr#14
220         str     r4,[sp,#64+4]
221         eor     r9,r9,r8,lsl#18
222         ldr     r11,[sp,#56+0]  @ h.lo
223         eor     r10,r10,r7,lsl#18
224         ldr     r12,[sp,#56+4]  @ h.hi
225         eor     r9,r9,r7,lsr#18
226         eor     r10,r10,r8,lsr#18
227         eor     r9,r9,r8,lsl#14
228         eor     r10,r10,r7,lsl#14
229         eor     r9,r9,r8,lsr#9
230         eor     r10,r10,r7,lsr#9
231         eor     r9,r9,r7,lsl#23
232         eor     r10,r10,r8,lsl#23       @ Sigma1(e)
233         adds    r3,r3,r9
234         ldr     r9,[sp,#40+0]   @ f.lo
235         adc     r4,r4,r10               @ T += Sigma1(e)
236         ldr     r10,[sp,#40+4]  @ f.hi
237         adds    r3,r3,r11
238         ldr     r11,[sp,#48+0]  @ g.lo
239         adc     r4,r4,r12               @ T += h
240         ldr     r12,[sp,#48+4]  @ g.hi
241
242         eor     r9,r9,r11
243         str     r7,[sp,#32+0]
244         eor     r10,r10,r12
245         str     r8,[sp,#32+4]
246         and     r9,r9,r7
247         str     r5,[sp,#0+0]
248         and     r10,r10,r8
249         str     r6,[sp,#0+4]
250         eor     r9,r9,r11
251         ldr     r11,[r14,#LO]   @ K[i].lo
252         eor     r10,r10,r12             @ Ch(e,f,g)
253         ldr     r12,[r14,#HI]   @ K[i].hi
254
255         adds    r3,r3,r9
256         ldr     r7,[sp,#24+0]   @ d.lo
257         adc     r4,r4,r10               @ T += Ch(e,f,g)
258         ldr     r8,[sp,#24+4]   @ d.hi
259         adds    r3,r3,r11
260         and     r9,r11,#0xff
261         adc     r4,r4,r12               @ T += K[i]
262         adds    r7,r7,r3
263         ldr     r11,[sp,#8+0]   @ b.lo
264         adc     r8,r8,r4                @ d += T
265         teq     r9,#148
266
267         ldr     r12,[sp,#16+0]  @ c.lo
268 #if __ARM_ARCH__>=7
269         it      eq                      @ Thumb2 thing, sanity check in ARM
270 #endif
271         orreq   r14,r14,#1
272         @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
273         @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
274         @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
275         mov     r9,r5,lsr#28
276         mov     r10,r6,lsr#28
277         eor     r9,r9,r6,lsl#4
278         eor     r10,r10,r5,lsl#4
279         eor     r9,r9,r6,lsr#2
280         eor     r10,r10,r5,lsr#2
281         eor     r9,r9,r5,lsl#30
282         eor     r10,r10,r6,lsl#30
283         eor     r9,r9,r6,lsr#7
284         eor     r10,r10,r5,lsr#7
285         eor     r9,r9,r5,lsl#25
286         eor     r10,r10,r6,lsl#25       @ Sigma0(a)
287         adds    r3,r3,r9
288         and     r9,r5,r11
289         adc     r4,r4,r10               @ T += Sigma0(a)
290
291         ldr     r10,[sp,#8+4]   @ b.hi
292         orr     r5,r5,r11
293         ldr     r11,[sp,#16+4]  @ c.hi
294         and     r5,r5,r12
295         and     r12,r6,r10
296         orr     r6,r6,r10
297         orr     r5,r5,r9                @ Maj(a,b,c).lo
298         and     r6,r6,r11
299         adds    r5,r5,r3
300         orr     r6,r6,r12               @ Maj(a,b,c).hi
301         sub     sp,sp,#8
302         adc     r6,r6,r4                @ h += T
303         tst     r14,#1
304         add     r14,r14,#8
305         tst     r14,#1
306         beq     .L00_15
307         ldr     r9,[sp,#184+0]
308         ldr     r10,[sp,#184+4]
309         bic     r14,r14,#1
310 .L16_79:
311         @ sigma0(x)     (ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
312         @ LO            lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
313         @ HI            hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
314         mov     r3,r9,lsr#1
315         ldr     r11,[sp,#80+0]
316         mov     r4,r10,lsr#1
317         ldr     r12,[sp,#80+4]
318         eor     r3,r3,r10,lsl#31
319         eor     r4,r4,r9,lsl#31
320         eor     r3,r3,r9,lsr#8
321         eor     r4,r4,r10,lsr#8
322         eor     r3,r3,r10,lsl#24
323         eor     r4,r4,r9,lsl#24
324         eor     r3,r3,r9,lsr#7
325         eor     r4,r4,r10,lsr#7
326         eor     r3,r3,r10,lsl#25
327
328         @ sigma1(x)     (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
329         @ LO            lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
330         @ HI            hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
331         mov     r9,r11,lsr#19
332         mov     r10,r12,lsr#19
333         eor     r9,r9,r12,lsl#13
334         eor     r10,r10,r11,lsl#13
335         eor     r9,r9,r12,lsr#29
336         eor     r10,r10,r11,lsr#29
337         eor     r9,r9,r11,lsl#3
338         eor     r10,r10,r12,lsl#3
339         eor     r9,r9,r11,lsr#6
340         eor     r10,r10,r12,lsr#6
341         ldr     r11,[sp,#120+0]
342         eor     r9,r9,r12,lsl#26
343
344         ldr     r12,[sp,#120+4]
345         adds    r3,r3,r9
346         ldr     r9,[sp,#192+0]
347         adc     r4,r4,r10
348
349         ldr     r10,[sp,#192+4]
350         adds    r3,r3,r11
351         adc     r4,r4,r12
352         adds    r3,r3,r9
353         adc     r4,r4,r10
354         @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
355         @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
356         @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
357         mov     r9,r7,lsr#14
358         str     r3,[sp,#64+0]
359         mov     r10,r8,lsr#14
360         str     r4,[sp,#64+4]
361         eor     r9,r9,r8,lsl#18
362         ldr     r11,[sp,#56+0]  @ h.lo
363         eor     r10,r10,r7,lsl#18
364         ldr     r12,[sp,#56+4]  @ h.hi
365         eor     r9,r9,r7,lsr#18
366         eor     r10,r10,r8,lsr#18
367         eor     r9,r9,r8,lsl#14
368         eor     r10,r10,r7,lsl#14
369         eor     r9,r9,r8,lsr#9
370         eor     r10,r10,r7,lsr#9
371         eor     r9,r9,r7,lsl#23
372         eor     r10,r10,r8,lsl#23       @ Sigma1(e)
373         adds    r3,r3,r9
374         ldr     r9,[sp,#40+0]   @ f.lo
375         adc     r4,r4,r10               @ T += Sigma1(e)
376         ldr     r10,[sp,#40+4]  @ f.hi
377         adds    r3,r3,r11
378         ldr     r11,[sp,#48+0]  @ g.lo
379         adc     r4,r4,r12               @ T += h
380         ldr     r12,[sp,#48+4]  @ g.hi
381
382         eor     r9,r9,r11
383         str     r7,[sp,#32+0]
384         eor     r10,r10,r12
385         str     r8,[sp,#32+4]
386         and     r9,r9,r7
387         str     r5,[sp,#0+0]
388         and     r10,r10,r8
389         str     r6,[sp,#0+4]
390         eor     r9,r9,r11
391         ldr     r11,[r14,#LO]   @ K[i].lo
392         eor     r10,r10,r12             @ Ch(e,f,g)
393         ldr     r12,[r14,#HI]   @ K[i].hi
394
395         adds    r3,r3,r9
396         ldr     r7,[sp,#24+0]   @ d.lo
397         adc     r4,r4,r10               @ T += Ch(e,f,g)
398         ldr     r8,[sp,#24+4]   @ d.hi
399         adds    r3,r3,r11
400         and     r9,r11,#0xff
401         adc     r4,r4,r12               @ T += K[i]
402         adds    r7,r7,r3
403         ldr     r11,[sp,#8+0]   @ b.lo
404         adc     r8,r8,r4                @ d += T
405         teq     r9,#23
406
407         ldr     r12,[sp,#16+0]  @ c.lo
408 #if __ARM_ARCH__>=7
409         it      eq                      @ Thumb2 thing, sanity check in ARM
410 #endif
411         orreq   r14,r14,#1
412         @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
413         @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
414         @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
415         mov     r9,r5,lsr#28
416         mov     r10,r6,lsr#28
417         eor     r9,r9,r6,lsl#4
418         eor     r10,r10,r5,lsl#4
419         eor     r9,r9,r6,lsr#2
420         eor     r10,r10,r5,lsr#2
421         eor     r9,r9,r5,lsl#30
422         eor     r10,r10,r6,lsl#30
423         eor     r9,r9,r6,lsr#7
424         eor     r10,r10,r5,lsr#7
425         eor     r9,r9,r5,lsl#25
426         eor     r10,r10,r6,lsl#25       @ Sigma0(a)
427         adds    r3,r3,r9
428         and     r9,r5,r11
429         adc     r4,r4,r10               @ T += Sigma0(a)
430
431         ldr     r10,[sp,#8+4]   @ b.hi
432         orr     r5,r5,r11
433         ldr     r11,[sp,#16+4]  @ c.hi
434         and     r5,r5,r12
435         and     r12,r6,r10
436         orr     r6,r6,r10
437         orr     r5,r5,r9                @ Maj(a,b,c).lo
438         and     r6,r6,r11
439         adds    r5,r5,r3
440         orr     r6,r6,r12               @ Maj(a,b,c).hi
441         sub     sp,sp,#8
442         adc     r6,r6,r4                @ h += T
443         tst     r14,#1
444         add     r14,r14,#8
445 #if __ARM_ARCH__>=7
446         ittt    eq                      @ Thumb2 thing, sanity check in ARM
447 #endif
448         ldreq   r9,[sp,#184+0]
449         ldreq   r10,[sp,#184+4]
450         beq     .L16_79
451         bic     r14,r14,#1
452
453         ldr     r3,[sp,#8+0]
454         ldr     r4,[sp,#8+4]
455         ldr     r9, [r0,#0+LO]
456         ldr     r10, [r0,#0+HI]
457         ldr     r11, [r0,#8+LO]
458         ldr     r12, [r0,#8+HI]
459         adds    r9,r5,r9
460         str     r9, [r0,#0+LO]
461         adc     r10,r6,r10
462         str     r10, [r0,#0+HI]
463         adds    r11,r3,r11
464         str     r11, [r0,#8+LO]
465         adc     r12,r4,r12
466         str     r12, [r0,#8+HI]
467
468         ldr     r5,[sp,#16+0]
469         ldr     r6,[sp,#16+4]
470         ldr     r3,[sp,#24+0]
471         ldr     r4,[sp,#24+4]
472         ldr     r9, [r0,#16+LO]
473         ldr     r10, [r0,#16+HI]
474         ldr     r11, [r0,#24+LO]
475         ldr     r12, [r0,#24+HI]
476         adds    r9,r5,r9
477         str     r9, [r0,#16+LO]
478         adc     r10,r6,r10
479         str     r10, [r0,#16+HI]
480         adds    r11,r3,r11
481         str     r11, [r0,#24+LO]
482         adc     r12,r4,r12
483         str     r12, [r0,#24+HI]
484
485         ldr     r3,[sp,#40+0]
486         ldr     r4,[sp,#40+4]
487         ldr     r9, [r0,#32+LO]
488         ldr     r10, [r0,#32+HI]
489         ldr     r11, [r0,#40+LO]
490         ldr     r12, [r0,#40+HI]
491         adds    r7,r7,r9
492         str     r7,[r0,#32+LO]
493         adc     r8,r8,r10
494         str     r8,[r0,#32+HI]
495         adds    r11,r3,r11
496         str     r11, [r0,#40+LO]
497         adc     r12,r4,r12
498         str     r12, [r0,#40+HI]
499
500         ldr     r5,[sp,#48+0]
501         ldr     r6,[sp,#48+4]
502         ldr     r3,[sp,#56+0]
503         ldr     r4,[sp,#56+4]
504         ldr     r9, [r0,#48+LO]
505         ldr     r10, [r0,#48+HI]
506         ldr     r11, [r0,#56+LO]
507         ldr     r12, [r0,#56+HI]
508         adds    r9,r5,r9
509         str     r9, [r0,#48+LO]
510         adc     r10,r6,r10
511         str     r10, [r0,#48+HI]
512         adds    r11,r3,r11
513         str     r11, [r0,#56+LO]
514         adc     r12,r4,r12
515         str     r12, [r0,#56+HI]
516
517         add     sp,sp,#640
518         sub     r14,r14,#640
519
520         teq     r1,r2
521         bne     .Loop
522
523         add     sp,sp,#8*9              @ destroy frame
524 #if __ARM_ARCH__>=5
525         ldmia   sp!,{r4-r12,pc}
526 #else
527         ldmia   sp!,{r4-r12,lr}
528         tst     lr,#1
529         moveq   pc,lr                   @ be binary compatible with V4, yet
530         .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
531 #endif
532 .size   sha512_block_data_order,.-sha512_block_data_order
533 #if __ARM_MAX_ARCH__>=7
534 .arch   armv7-a
535 .fpu    neon
536
537 .global sha512_block_data_order_neon
538 .type   sha512_block_data_order_neon,%function
539 .align  4
540 sha512_block_data_order_neon:
541 .LNEON:
542         dmb                             @ errata #451034 on early Cortex A8
543         add     r2,r1,r2,lsl#7  @ len to point at the end of inp
544         VFP_ABI_PUSH
545         adrl    r3,K512
546         vldmia  r0,{d16-d23}            @ load context
547 .Loop_neon:
548         vshr.u64        d24,d20,#14     @ 0
549 #if 0<16
550         vld1.64         {d0},[r1]!      @ handles unaligned
551 #endif
552         vshr.u64        d25,d20,#18
553 #if 0>0
554          vadd.i64       d16,d30                 @ h+=Maj from the past
555 #endif
556         vshr.u64        d26,d20,#41
557         vld1.64         {d28},[r3,:64]! @ K[i++]
558         vsli.64         d24,d20,#50
559         vsli.64         d25,d20,#46
560         vmov            d29,d20
561         vsli.64         d26,d20,#23
562 #if 0<16 && defined(__ARMEL__)
563         vrev64.8        d0,d0
564 #endif
565         veor            d25,d24
566         vbsl            d29,d21,d22             @ Ch(e,f,g)
567         vshr.u64        d24,d16,#28
568         veor            d26,d25                 @ Sigma1(e)
569         vadd.i64        d27,d29,d23
570         vshr.u64        d25,d16,#34
571         vsli.64         d24,d16,#36
572         vadd.i64        d27,d26
573         vshr.u64        d26,d16,#39
574         vadd.i64        d28,d0
575         vsli.64         d25,d16,#30
576         veor            d30,d16,d17
577         vsli.64         d26,d16,#25
578         veor            d23,d24,d25
579         vadd.i64        d27,d28
580         vbsl            d30,d18,d17             @ Maj(a,b,c)
581         veor            d23,d26                 @ Sigma0(a)
582         vadd.i64        d19,d27
583         vadd.i64        d30,d27
584         @ vadd.i64      d23,d30
585         vshr.u64        d24,d19,#14     @ 1
586 #if 1<16
587         vld1.64         {d1},[r1]!      @ handles unaligned
588 #endif
589         vshr.u64        d25,d19,#18
590 #if 1>0
591          vadd.i64       d23,d30                 @ h+=Maj from the past
592 #endif
593         vshr.u64        d26,d19,#41
594         vld1.64         {d28},[r3,:64]! @ K[i++]
595         vsli.64         d24,d19,#50
596         vsli.64         d25,d19,#46
597         vmov            d29,d19
598         vsli.64         d26,d19,#23
599 #if 1<16 && defined(__ARMEL__)
600         vrev64.8        d1,d1
601 #endif
602         veor            d25,d24
603         vbsl            d29,d20,d21             @ Ch(e,f,g)
604         vshr.u64        d24,d23,#28
605         veor            d26,d25                 @ Sigma1(e)
606         vadd.i64        d27,d29,d22
607         vshr.u64        d25,d23,#34
608         vsli.64         d24,d23,#36
609         vadd.i64        d27,d26
610         vshr.u64        d26,d23,#39
611         vadd.i64        d28,d1
612         vsli.64         d25,d23,#30
613         veor            d30,d23,d16
614         vsli.64         d26,d23,#25
615         veor            d22,d24,d25
616         vadd.i64        d27,d28
617         vbsl            d30,d17,d16             @ Maj(a,b,c)
618         veor            d22,d26                 @ Sigma0(a)
619         vadd.i64        d18,d27
620         vadd.i64        d30,d27
621         @ vadd.i64      d22,d30
622         vshr.u64        d24,d18,#14     @ 2
623 #if 2<16
624         vld1.64         {d2},[r1]!      @ handles unaligned
625 #endif
626         vshr.u64        d25,d18,#18
627 #if 2>0
628          vadd.i64       d22,d30                 @ h+=Maj from the past
629 #endif
630         vshr.u64        d26,d18,#41
631         vld1.64         {d28},[r3,:64]! @ K[i++]
632         vsli.64         d24,d18,#50
633         vsli.64         d25,d18,#46
634         vmov            d29,d18
635         vsli.64         d26,d18,#23
636 #if 2<16 && defined(__ARMEL__)
637         vrev64.8        d2,d2
638 #endif
639         veor            d25,d24
640         vbsl            d29,d19,d20             @ Ch(e,f,g)
641         vshr.u64        d24,d22,#28
642         veor            d26,d25                 @ Sigma1(e)
643         vadd.i64        d27,d29,d21
644         vshr.u64        d25,d22,#34
645         vsli.64         d24,d22,#36
646         vadd.i64        d27,d26
647         vshr.u64        d26,d22,#39
648         vadd.i64        d28,d2
649         vsli.64         d25,d22,#30
650         veor            d30,d22,d23
651         vsli.64         d26,d22,#25
652         veor            d21,d24,d25
653         vadd.i64        d27,d28
654         vbsl            d30,d16,d23             @ Maj(a,b,c)
655         veor            d21,d26                 @ Sigma0(a)
656         vadd.i64        d17,d27
657         vadd.i64        d30,d27
658         @ vadd.i64      d21,d30
659         vshr.u64        d24,d17,#14     @ 3
660 #if 3<16
661         vld1.64         {d3},[r1]!      @ handles unaligned
662 #endif
663         vshr.u64        d25,d17,#18
664 #if 3>0
665          vadd.i64       d21,d30                 @ h+=Maj from the past
666 #endif
667         vshr.u64        d26,d17,#41
668         vld1.64         {d28},[r3,:64]! @ K[i++]
669         vsli.64         d24,d17,#50
670         vsli.64         d25,d17,#46
671         vmov            d29,d17
672         vsli.64         d26,d17,#23
673 #if 3<16 && defined(__ARMEL__)
674         vrev64.8        d3,d3
675 #endif
676         veor            d25,d24
677         vbsl            d29,d18,d19             @ Ch(e,f,g)
678         vshr.u64        d24,d21,#28
679         veor            d26,d25                 @ Sigma1(e)
680         vadd.i64        d27,d29,d20
681         vshr.u64        d25,d21,#34
682         vsli.64         d24,d21,#36
683         vadd.i64        d27,d26
684         vshr.u64        d26,d21,#39
685         vadd.i64        d28,d3
686         vsli.64         d25,d21,#30
687         veor            d30,d21,d22
688         vsli.64         d26,d21,#25
689         veor            d20,d24,d25
690         vadd.i64        d27,d28
691         vbsl            d30,d23,d22             @ Maj(a,b,c)
692         veor            d20,d26                 @ Sigma0(a)
693         vadd.i64        d16,d27
694         vadd.i64        d30,d27
695         @ vadd.i64      d20,d30
696         vshr.u64        d24,d16,#14     @ 4
697 #if 4<16
698         vld1.64         {d4},[r1]!      @ handles unaligned
699 #endif
700         vshr.u64        d25,d16,#18
701 #if 4>0
702          vadd.i64       d20,d30                 @ h+=Maj from the past
703 #endif
704         vshr.u64        d26,d16,#41
705         vld1.64         {d28},[r3,:64]! @ K[i++]
706         vsli.64         d24,d16,#50
707         vsli.64         d25,d16,#46
708         vmov            d29,d16
709         vsli.64         d26,d16,#23
710 #if 4<16 && defined(__ARMEL__)
711         vrev64.8        d4,d4
712 #endif
713         veor            d25,d24
714         vbsl            d29,d17,d18             @ Ch(e,f,g)
715         vshr.u64        d24,d20,#28
716         veor            d26,d25                 @ Sigma1(e)
717         vadd.i64        d27,d29,d19
718         vshr.u64        d25,d20,#34
719         vsli.64         d24,d20,#36
720         vadd.i64        d27,d26
721         vshr.u64        d26,d20,#39
722         vadd.i64        d28,d4
723         vsli.64         d25,d20,#30
724         veor            d30,d20,d21
725         vsli.64         d26,d20,#25
726         veor            d19,d24,d25
727         vadd.i64        d27,d28
728         vbsl            d30,d22,d21             @ Maj(a,b,c)
729         veor            d19,d26                 @ Sigma0(a)
730         vadd.i64        d23,d27
731         vadd.i64        d30,d27
732         @ vadd.i64      d19,d30
733         vshr.u64        d24,d23,#14     @ 5
734 #if 5<16
735         vld1.64         {d5},[r1]!      @ handles unaligned
736 #endif
737         vshr.u64        d25,d23,#18
738 #if 5>0
739          vadd.i64       d19,d30                 @ h+=Maj from the past
740 #endif
741         vshr.u64        d26,d23,#41
742         vld1.64         {d28},[r3,:64]! @ K[i++]
743         vsli.64         d24,d23,#50
744         vsli.64         d25,d23,#46
745         vmov            d29,d23
746         vsli.64         d26,d23,#23
747 #if 5<16 && defined(__ARMEL__)
748         vrev64.8        d5,d5
749 #endif
750         veor            d25,d24
751         vbsl            d29,d16,d17             @ Ch(e,f,g)
752         vshr.u64        d24,d19,#28
753         veor            d26,d25                 @ Sigma1(e)
754         vadd.i64        d27,d29,d18
755         vshr.u64        d25,d19,#34
756         vsli.64         d24,d19,#36
757         vadd.i64        d27,d26
758         vshr.u64        d26,d19,#39
759         vadd.i64        d28,d5
760         vsli.64         d25,d19,#30
761         veor            d30,d19,d20
762         vsli.64         d26,d19,#25
763         veor            d18,d24,d25
764         vadd.i64        d27,d28
765         vbsl            d30,d21,d20             @ Maj(a,b,c)
766         veor            d18,d26                 @ Sigma0(a)
767         vadd.i64        d22,d27
768         vadd.i64        d30,d27
769         @ vadd.i64      d18,d30
770         vshr.u64        d24,d22,#14     @ 6
771 #if 6<16
772         vld1.64         {d6},[r1]!      @ handles unaligned
773 #endif
774         vshr.u64        d25,d22,#18
775 #if 6>0
776          vadd.i64       d18,d30                 @ h+=Maj from the past
777 #endif
778         vshr.u64        d26,d22,#41
779         vld1.64         {d28},[r3,:64]! @ K[i++]
780         vsli.64         d24,d22,#50
781         vsli.64         d25,d22,#46
782         vmov            d29,d22
783         vsli.64         d26,d22,#23
784 #if 6<16 && defined(__ARMEL__)
785         vrev64.8        d6,d6
786 #endif
787         veor            d25,d24
788         vbsl            d29,d23,d16             @ Ch(e,f,g)
789         vshr.u64        d24,d18,#28
790         veor            d26,d25                 @ Sigma1(e)
791         vadd.i64        d27,d29,d17
792         vshr.u64        d25,d18,#34
793         vsli.64         d24,d18,#36
794         vadd.i64        d27,d26
795         vshr.u64        d26,d18,#39
796         vadd.i64        d28,d6
797         vsli.64         d25,d18,#30
798         veor            d30,d18,d19
799         vsli.64         d26,d18,#25
800         veor            d17,d24,d25
801         vadd.i64        d27,d28
802         vbsl            d30,d20,d19             @ Maj(a,b,c)
803         veor            d17,d26                 @ Sigma0(a)
804         vadd.i64        d21,d27
805         vadd.i64        d30,d27
806         @ vadd.i64      d17,d30
807         vshr.u64        d24,d21,#14     @ 7
808 #if 7<16
809         vld1.64         {d7},[r1]!      @ handles unaligned
810 #endif
811         vshr.u64        d25,d21,#18
812 #if 7>0
813          vadd.i64       d17,d30                 @ h+=Maj from the past
814 #endif
815         vshr.u64        d26,d21,#41
816         vld1.64         {d28},[r3,:64]! @ K[i++]
817         vsli.64         d24,d21,#50
818         vsli.64         d25,d21,#46
819         vmov            d29,d21
820         vsli.64         d26,d21,#23
821 #if 7<16 && defined(__ARMEL__)
822         vrev64.8        d7,d7
823 #endif
824         veor            d25,d24
825         vbsl            d29,d22,d23             @ Ch(e,f,g)
826         vshr.u64        d24,d17,#28
827         veor            d26,d25                 @ Sigma1(e)
828         vadd.i64        d27,d29,d16
829         vshr.u64        d25,d17,#34
830         vsli.64         d24,d17,#36
831         vadd.i64        d27,d26
832         vshr.u64        d26,d17,#39
833         vadd.i64        d28,d7
834         vsli.64         d25,d17,#30
835         veor            d30,d17,d18
836         vsli.64         d26,d17,#25
837         veor            d16,d24,d25
838         vadd.i64        d27,d28
839         vbsl            d30,d19,d18             @ Maj(a,b,c)
840         veor            d16,d26                 @ Sigma0(a)
841         vadd.i64        d20,d27
842         vadd.i64        d30,d27
843         @ vadd.i64      d16,d30
844         vshr.u64        d24,d20,#14     @ 8
845 #if 8<16
846         vld1.64         {d8},[r1]!      @ handles unaligned
847 #endif
848         vshr.u64        d25,d20,#18
849 #if 8>0
850          vadd.i64       d16,d30                 @ h+=Maj from the past
851 #endif
852         vshr.u64        d26,d20,#41
853         vld1.64         {d28},[r3,:64]! @ K[i++]
854         vsli.64         d24,d20,#50
855         vsli.64         d25,d20,#46
856         vmov            d29,d20
857         vsli.64         d26,d20,#23
858 #if 8<16 && defined(__ARMEL__)
859         vrev64.8        d8,d8
860 #endif
861         veor            d25,d24
862         vbsl            d29,d21,d22             @ Ch(e,f,g)
863         vshr.u64        d24,d16,#28
864         veor            d26,d25                 @ Sigma1(e)
865         vadd.i64        d27,d29,d23
866         vshr.u64        d25,d16,#34
867         vsli.64         d24,d16,#36
868         vadd.i64        d27,d26
869         vshr.u64        d26,d16,#39
870         vadd.i64        d28,d8
871         vsli.64         d25,d16,#30
872         veor            d30,d16,d17
873         vsli.64         d26,d16,#25
874         veor            d23,d24,d25
875         vadd.i64        d27,d28
876         vbsl            d30,d18,d17             @ Maj(a,b,c)
877         veor            d23,d26                 @ Sigma0(a)
878         vadd.i64        d19,d27
879         vadd.i64        d30,d27
880         @ vadd.i64      d23,d30
881         vshr.u64        d24,d19,#14     @ 9
882 #if 9<16
883         vld1.64         {d9},[r1]!      @ handles unaligned
884 #endif
885         vshr.u64        d25,d19,#18
886 #if 9>0
887          vadd.i64       d23,d30                 @ h+=Maj from the past
888 #endif
889         vshr.u64        d26,d19,#41
890         vld1.64         {d28},[r3,:64]! @ K[i++]
891         vsli.64         d24,d19,#50
892         vsli.64         d25,d19,#46
893         vmov            d29,d19
894         vsli.64         d26,d19,#23
895 #if 9<16 && defined(__ARMEL__)
896         vrev64.8        d9,d9
897 #endif
898         veor            d25,d24
899         vbsl            d29,d20,d21             @ Ch(e,f,g)
900         vshr.u64        d24,d23,#28
901         veor            d26,d25                 @ Sigma1(e)
902         vadd.i64        d27,d29,d22
903         vshr.u64        d25,d23,#34
904         vsli.64         d24,d23,#36
905         vadd.i64        d27,d26
906         vshr.u64        d26,d23,#39
907         vadd.i64        d28,d9
908         vsli.64         d25,d23,#30
909         veor            d30,d23,d16
910         vsli.64         d26,d23,#25
911         veor            d22,d24,d25
912         vadd.i64        d27,d28
913         vbsl            d30,d17,d16             @ Maj(a,b,c)
914         veor            d22,d26                 @ Sigma0(a)
915         vadd.i64        d18,d27
916         vadd.i64        d30,d27
917         @ vadd.i64      d22,d30
918         vshr.u64        d24,d18,#14     @ 10
919 #if 10<16
920         vld1.64         {d10},[r1]!     @ handles unaligned
921 #endif
922         vshr.u64        d25,d18,#18
923 #if 10>0
924          vadd.i64       d22,d30                 @ h+=Maj from the past
925 #endif
926         vshr.u64        d26,d18,#41
927         vld1.64         {d28},[r3,:64]! @ K[i++]
928         vsli.64         d24,d18,#50
929         vsli.64         d25,d18,#46
930         vmov            d29,d18
931         vsli.64         d26,d18,#23
932 #if 10<16 && defined(__ARMEL__)
933         vrev64.8        d10,d10
934 #endif
935         veor            d25,d24
936         vbsl            d29,d19,d20             @ Ch(e,f,g)
937         vshr.u64        d24,d22,#28
938         veor            d26,d25                 @ Sigma1(e)
939         vadd.i64        d27,d29,d21
940         vshr.u64        d25,d22,#34
941         vsli.64         d24,d22,#36
942         vadd.i64        d27,d26
943         vshr.u64        d26,d22,#39
944         vadd.i64        d28,d10
945         vsli.64         d25,d22,#30
946         veor            d30,d22,d23
947         vsli.64         d26,d22,#25
948         veor            d21,d24,d25
949         vadd.i64        d27,d28
950         vbsl            d30,d16,d23             @ Maj(a,b,c)
951         veor            d21,d26                 @ Sigma0(a)
952         vadd.i64        d17,d27
953         vadd.i64        d30,d27
954         @ vadd.i64      d21,d30
955         vshr.u64        d24,d17,#14     @ 11
956 #if 11<16
957         vld1.64         {d11},[r1]!     @ handles unaligned
958 #endif
959         vshr.u64        d25,d17,#18
960 #if 11>0
961          vadd.i64       d21,d30                 @ h+=Maj from the past
962 #endif
963         vshr.u64        d26,d17,#41
964         vld1.64         {d28},[r3,:64]! @ K[i++]
965         vsli.64         d24,d17,#50
966         vsli.64         d25,d17,#46
967         vmov            d29,d17
968         vsli.64         d26,d17,#23
969 #if 11<16 && defined(__ARMEL__)
970         vrev64.8        d11,d11
971 #endif
972         veor            d25,d24
973         vbsl            d29,d18,d19             @ Ch(e,f,g)
974         vshr.u64        d24,d21,#28
975         veor            d26,d25                 @ Sigma1(e)
976         vadd.i64        d27,d29,d20
977         vshr.u64        d25,d21,#34
978         vsli.64         d24,d21,#36
979         vadd.i64        d27,d26
980         vshr.u64        d26,d21,#39
981         vadd.i64        d28,d11
982         vsli.64         d25,d21,#30
983         veor            d30,d21,d22
984         vsli.64         d26,d21,#25
985         veor            d20,d24,d25
986         vadd.i64        d27,d28
987         vbsl            d30,d23,d22             @ Maj(a,b,c)
988         veor            d20,d26                 @ Sigma0(a)
989         vadd.i64        d16,d27
990         vadd.i64        d30,d27
991         @ vadd.i64      d20,d30
992         vshr.u64        d24,d16,#14     @ 12
993 #if 12<16
994         vld1.64         {d12},[r1]!     @ handles unaligned
995 #endif
996         vshr.u64        d25,d16,#18
997 #if 12>0
998          vadd.i64       d20,d30                 @ h+=Maj from the past
999 #endif
1000         vshr.u64        d26,d16,#41
1001         vld1.64         {d28},[r3,:64]! @ K[i++]
1002         vsli.64         d24,d16,#50
1003         vsli.64         d25,d16,#46
1004         vmov            d29,d16
1005         vsli.64         d26,d16,#23
1006 #if 12<16 && defined(__ARMEL__)
1007         vrev64.8        d12,d12
1008 #endif
1009         veor            d25,d24
1010         vbsl            d29,d17,d18             @ Ch(e,f,g)
1011         vshr.u64        d24,d20,#28
1012         veor            d26,d25                 @ Sigma1(e)
1013         vadd.i64        d27,d29,d19
1014         vshr.u64        d25,d20,#34
1015         vsli.64         d24,d20,#36
1016         vadd.i64        d27,d26
1017         vshr.u64        d26,d20,#39
1018         vadd.i64        d28,d12
1019         vsli.64         d25,d20,#30
1020         veor            d30,d20,d21
1021         vsli.64         d26,d20,#25
1022         veor            d19,d24,d25
1023         vadd.i64        d27,d28
1024         vbsl            d30,d22,d21             @ Maj(a,b,c)
1025         veor            d19,d26                 @ Sigma0(a)
1026         vadd.i64        d23,d27
1027         vadd.i64        d30,d27
1028         @ vadd.i64      d19,d30
1029         vshr.u64        d24,d23,#14     @ 13
1030 #if 13<16
1031         vld1.64         {d13},[r1]!     @ handles unaligned
1032 #endif
1033         vshr.u64        d25,d23,#18
1034 #if 13>0
1035          vadd.i64       d19,d30                 @ h+=Maj from the past
1036 #endif
1037         vshr.u64        d26,d23,#41
1038         vld1.64         {d28},[r3,:64]! @ K[i++]
1039         vsli.64         d24,d23,#50
1040         vsli.64         d25,d23,#46
1041         vmov            d29,d23
1042         vsli.64         d26,d23,#23
1043 #if 13<16 && defined(__ARMEL__)
1044         vrev64.8        d13,d13
1045 #endif
1046         veor            d25,d24
1047         vbsl            d29,d16,d17             @ Ch(e,f,g)
1048         vshr.u64        d24,d19,#28
1049         veor            d26,d25                 @ Sigma1(e)
1050         vadd.i64        d27,d29,d18
1051         vshr.u64        d25,d19,#34
1052         vsli.64         d24,d19,#36
1053         vadd.i64        d27,d26
1054         vshr.u64        d26,d19,#39
1055         vadd.i64        d28,d13
1056         vsli.64         d25,d19,#30
1057         veor            d30,d19,d20
1058         vsli.64         d26,d19,#25
1059         veor            d18,d24,d25
1060         vadd.i64        d27,d28
1061         vbsl            d30,d21,d20             @ Maj(a,b,c)
1062         veor            d18,d26                 @ Sigma0(a)
1063         vadd.i64        d22,d27
1064         vadd.i64        d30,d27
1065         @ vadd.i64      d18,d30
1066         vshr.u64        d24,d22,#14     @ 14
1067 #if 14<16
1068         vld1.64         {d14},[r1]!     @ handles unaligned
1069 #endif
1070         vshr.u64        d25,d22,#18
1071 #if 14>0
1072          vadd.i64       d18,d30                 @ h+=Maj from the past
1073 #endif
1074         vshr.u64        d26,d22,#41
1075         vld1.64         {d28},[r3,:64]! @ K[i++]
1076         vsli.64         d24,d22,#50
1077         vsli.64         d25,d22,#46
1078         vmov            d29,d22
1079         vsli.64         d26,d22,#23
1080 #if 14<16 && defined(__ARMEL__)
1081         vrev64.8        d14,d14
1082 #endif
1083         veor            d25,d24
1084         vbsl            d29,d23,d16             @ Ch(e,f,g)
1085         vshr.u64        d24,d18,#28
1086         veor            d26,d25                 @ Sigma1(e)
1087         vadd.i64        d27,d29,d17
1088         vshr.u64        d25,d18,#34
1089         vsli.64         d24,d18,#36
1090         vadd.i64        d27,d26
1091         vshr.u64        d26,d18,#39
1092         vadd.i64        d28,d14
1093         vsli.64         d25,d18,#30
1094         veor            d30,d18,d19
1095         vsli.64         d26,d18,#25
1096         veor            d17,d24,d25
1097         vadd.i64        d27,d28
1098         vbsl            d30,d20,d19             @ Maj(a,b,c)
1099         veor            d17,d26                 @ Sigma0(a)
1100         vadd.i64        d21,d27
1101         vadd.i64        d30,d27
1102         @ vadd.i64      d17,d30
1103         vshr.u64        d24,d21,#14     @ 15
1104 #if 15<16
1105         vld1.64         {d15},[r1]!     @ handles unaligned
1106 #endif
1107         vshr.u64        d25,d21,#18
1108 #if 15>0
1109          vadd.i64       d17,d30                 @ h+=Maj from the past
1110 #endif
1111         vshr.u64        d26,d21,#41
1112         vld1.64         {d28},[r3,:64]! @ K[i++]
1113         vsli.64         d24,d21,#50
1114         vsli.64         d25,d21,#46
1115         vmov            d29,d21
1116         vsli.64         d26,d21,#23
1117 #if 15<16 && defined(__ARMEL__)
1118         vrev64.8        d15,d15
1119 #endif
1120         veor            d25,d24
1121         vbsl            d29,d22,d23             @ Ch(e,f,g)
1122         vshr.u64        d24,d17,#28
1123         veor            d26,d25                 @ Sigma1(e)
1124         vadd.i64        d27,d29,d16
1125         vshr.u64        d25,d17,#34
1126         vsli.64         d24,d17,#36
1127         vadd.i64        d27,d26
1128         vshr.u64        d26,d17,#39
1129         vadd.i64        d28,d15
1130         vsli.64         d25,d17,#30
1131         veor            d30,d17,d18
1132         vsli.64         d26,d17,#25
1133         veor            d16,d24,d25
1134         vadd.i64        d27,d28
1135         vbsl            d30,d19,d18             @ Maj(a,b,c)
1136         veor            d16,d26                 @ Sigma0(a)
1137         vadd.i64        d20,d27
1138         vadd.i64        d30,d27
1139         @ vadd.i64      d16,d30
1140         mov             r12,#4
1141 .L16_79_neon:
1142         subs            r12,#1
1143         vshr.u64        q12,q7,#19
1144         vshr.u64        q13,q7,#61
1145          vadd.i64       d16,d30                 @ h+=Maj from the past
1146         vshr.u64        q15,q7,#6
1147         vsli.64         q12,q7,#45
1148         vext.8          q14,q0,q1,#8    @ X[i+1]
1149         vsli.64         q13,q7,#3
1150         veor            q15,q12
1151         vshr.u64        q12,q14,#1
1152         veor            q15,q13                         @ sigma1(X[i+14])
1153         vshr.u64        q13,q14,#8
1154         vadd.i64        q0,q15
1155         vshr.u64        q15,q14,#7
1156         vsli.64         q12,q14,#63
1157         vsli.64         q13,q14,#56
1158         vext.8          q14,q4,q5,#8    @ X[i+9]
1159         veor            q15,q12
1160         vshr.u64        d24,d20,#14             @ from NEON_00_15
1161         vadd.i64        q0,q14
1162         vshr.u64        d25,d20,#18             @ from NEON_00_15
1163         veor            q15,q13                         @ sigma0(X[i+1])
1164         vshr.u64        d26,d20,#41             @ from NEON_00_15
1165         vadd.i64        q0,q15
1166         vld1.64         {d28},[r3,:64]! @ K[i++]
1167         vsli.64         d24,d20,#50
1168         vsli.64         d25,d20,#46
1169         vmov            d29,d20
1170         vsli.64         d26,d20,#23
1171 #if 16<16 && defined(__ARMEL__)
1172         vrev64.8        ,
1173 #endif
1174         veor            d25,d24
1175         vbsl            d29,d21,d22             @ Ch(e,f,g)
1176         vshr.u64        d24,d16,#28
1177         veor            d26,d25                 @ Sigma1(e)
1178         vadd.i64        d27,d29,d23
1179         vshr.u64        d25,d16,#34
1180         vsli.64         d24,d16,#36
1181         vadd.i64        d27,d26
1182         vshr.u64        d26,d16,#39
1183         vadd.i64        d28,d0
1184         vsli.64         d25,d16,#30
1185         veor            d30,d16,d17
1186         vsli.64         d26,d16,#25
1187         veor            d23,d24,d25
1188         vadd.i64        d27,d28
1189         vbsl            d30,d18,d17             @ Maj(a,b,c)
1190         veor            d23,d26                 @ Sigma0(a)
1191         vadd.i64        d19,d27
1192         vadd.i64        d30,d27
1193         @ vadd.i64      d23,d30
1194         vshr.u64        d24,d19,#14     @ 17
1195 #if 17<16
1196         vld1.64         {d1},[r1]!      @ handles unaligned
1197 #endif
1198         vshr.u64        d25,d19,#18
1199 #if 17>0
1200          vadd.i64       d23,d30                 @ h+=Maj from the past
1201 #endif
1202         vshr.u64        d26,d19,#41
1203         vld1.64         {d28},[r3,:64]! @ K[i++]
1204         vsli.64         d24,d19,#50
1205         vsli.64         d25,d19,#46
1206         vmov            d29,d19
1207         vsli.64         d26,d19,#23
1208 #if 17<16 && defined(__ARMEL__)
1209         vrev64.8        ,
1210 #endif
1211         veor            d25,d24
1212         vbsl            d29,d20,d21             @ Ch(e,f,g)
1213         vshr.u64        d24,d23,#28
1214         veor            d26,d25                 @ Sigma1(e)
1215         vadd.i64        d27,d29,d22
1216         vshr.u64        d25,d23,#34
1217         vsli.64         d24,d23,#36
1218         vadd.i64        d27,d26
1219         vshr.u64        d26,d23,#39
1220         vadd.i64        d28,d1
1221         vsli.64         d25,d23,#30
1222         veor            d30,d23,d16
1223         vsli.64         d26,d23,#25
1224         veor            d22,d24,d25
1225         vadd.i64        d27,d28
1226         vbsl            d30,d17,d16             @ Maj(a,b,c)
1227         veor            d22,d26                 @ Sigma0(a)
1228         vadd.i64        d18,d27
1229         vadd.i64        d30,d27
1230         @ vadd.i64      d22,d30
1231         vshr.u64        q12,q0,#19
1232         vshr.u64        q13,q0,#61
1233          vadd.i64       d22,d30                 @ h+=Maj from the past
1234         vshr.u64        q15,q0,#6
1235         vsli.64         q12,q0,#45
1236         vext.8          q14,q1,q2,#8    @ X[i+1]
1237         vsli.64         q13,q0,#3
1238         veor            q15,q12
1239         vshr.u64        q12,q14,#1
1240         veor            q15,q13                         @ sigma1(X[i+14])
1241         vshr.u64        q13,q14,#8
1242         vadd.i64        q1,q15
1243         vshr.u64        q15,q14,#7
1244         vsli.64         q12,q14,#63
1245         vsli.64         q13,q14,#56
1246         vext.8          q14,q5,q6,#8    @ X[i+9]
1247         veor            q15,q12
1248         vshr.u64        d24,d18,#14             @ from NEON_00_15
1249         vadd.i64        q1,q14
1250         vshr.u64        d25,d18,#18             @ from NEON_00_15
1251         veor            q15,q13                         @ sigma0(X[i+1])
1252         vshr.u64        d26,d18,#41             @ from NEON_00_15
1253         vadd.i64        q1,q15
1254         vld1.64         {d28},[r3,:64]! @ K[i++]
1255         vsli.64         d24,d18,#50
1256         vsli.64         d25,d18,#46
1257         vmov            d29,d18
1258         vsli.64         d26,d18,#23
1259 #if 18<16 && defined(__ARMEL__)
1260         vrev64.8        ,
1261 #endif
1262         veor            d25,d24
1263         vbsl            d29,d19,d20             @ Ch(e,f,g)
1264         vshr.u64        d24,d22,#28
1265         veor            d26,d25                 @ Sigma1(e)
1266         vadd.i64        d27,d29,d21
1267         vshr.u64        d25,d22,#34
1268         vsli.64         d24,d22,#36
1269         vadd.i64        d27,d26
1270         vshr.u64        d26,d22,#39
1271         vadd.i64        d28,d2
1272         vsli.64         d25,d22,#30
1273         veor            d30,d22,d23
1274         vsli.64         d26,d22,#25
1275         veor            d21,d24,d25
1276         vadd.i64        d27,d28
1277         vbsl            d30,d16,d23             @ Maj(a,b,c)
1278         veor            d21,d26                 @ Sigma0(a)
1279         vadd.i64        d17,d27
1280         vadd.i64        d30,d27
1281         @ vadd.i64      d21,d30
1282         vshr.u64        d24,d17,#14     @ 19
1283 #if 19<16
1284         vld1.64         {d3},[r1]!      @ handles unaligned
1285 #endif
1286         vshr.u64        d25,d17,#18
1287 #if 19>0
1288          vadd.i64       d21,d30                 @ h+=Maj from the past
1289 #endif
1290         vshr.u64        d26,d17,#41
1291         vld1.64         {d28},[r3,:64]! @ K[i++]
1292         vsli.64         d24,d17,#50
1293         vsli.64         d25,d17,#46
1294         vmov            d29,d17
1295         vsli.64         d26,d17,#23
1296 #if 19<16 && defined(__ARMEL__)
1297         vrev64.8        ,
1298 #endif
1299         veor            d25,d24
1300         vbsl            d29,d18,d19             @ Ch(e,f,g)
1301         vshr.u64        d24,d21,#28
1302         veor            d26,d25                 @ Sigma1(e)
1303         vadd.i64        d27,d29,d20
1304         vshr.u64        d25,d21,#34
1305         vsli.64         d24,d21,#36
1306         vadd.i64        d27,d26
1307         vshr.u64        d26,d21,#39
1308         vadd.i64        d28,d3
1309         vsli.64         d25,d21,#30
1310         veor            d30,d21,d22
1311         vsli.64         d26,d21,#25
1312         veor            d20,d24,d25
1313         vadd.i64        d27,d28
1314         vbsl            d30,d23,d22             @ Maj(a,b,c)
1315         veor            d20,d26                 @ Sigma0(a)
1316         vadd.i64        d16,d27
1317         vadd.i64        d30,d27
1318         @ vadd.i64      d20,d30
1319         vshr.u64        q12,q1,#19
1320         vshr.u64        q13,q1,#61
1321          vadd.i64       d20,d30                 @ h+=Maj from the past
1322         vshr.u64        q15,q1,#6
1323         vsli.64         q12,q1,#45
1324         vext.8          q14,q2,q3,#8    @ X[i+1]
1325         vsli.64         q13,q1,#3
1326         veor            q15,q12
1327         vshr.u64        q12,q14,#1
1328         veor            q15,q13                         @ sigma1(X[i+14])
1329         vshr.u64        q13,q14,#8
1330         vadd.i64        q2,q15
1331         vshr.u64        q15,q14,#7
1332         vsli.64         q12,q14,#63
1333         vsli.64         q13,q14,#56
1334         vext.8          q14,q6,q7,#8    @ X[i+9]
1335         veor            q15,q12
1336         vshr.u64        d24,d16,#14             @ from NEON_00_15
1337         vadd.i64        q2,q14
1338         vshr.u64        d25,d16,#18             @ from NEON_00_15
1339         veor            q15,q13                         @ sigma0(X[i+1])
1340         vshr.u64        d26,d16,#41             @ from NEON_00_15
1341         vadd.i64        q2,q15
1342         vld1.64         {d28},[r3,:64]! @ K[i++]
1343         vsli.64         d24,d16,#50
1344         vsli.64         d25,d16,#46
1345         vmov            d29,d16
1346         vsli.64         d26,d16,#23
1347 #if 20<16 && defined(__ARMEL__)
1348         vrev64.8        ,
1349 #endif
1350         veor            d25,d24
1351         vbsl            d29,d17,d18             @ Ch(e,f,g)
1352         vshr.u64        d24,d20,#28
1353         veor            d26,d25                 @ Sigma1(e)
1354         vadd.i64        d27,d29,d19
1355         vshr.u64        d25,d20,#34
1356         vsli.64         d24,d20,#36
1357         vadd.i64        d27,d26
1358         vshr.u64        d26,d20,#39
1359         vadd.i64        d28,d4
1360         vsli.64         d25,d20,#30
1361         veor            d30,d20,d21
1362         vsli.64         d26,d20,#25
1363         veor            d19,d24,d25
1364         vadd.i64        d27,d28
1365         vbsl            d30,d22,d21             @ Maj(a,b,c)
1366         veor            d19,d26                 @ Sigma0(a)
1367         vadd.i64        d23,d27
1368         vadd.i64        d30,d27
1369         @ vadd.i64      d19,d30
1370         vshr.u64        d24,d23,#14     @ 21
1371 #if 21<16
1372         vld1.64         {d5},[r1]!      @ handles unaligned
1373 #endif
1374         vshr.u64        d25,d23,#18
1375 #if 21>0
1376          vadd.i64       d19,d30                 @ h+=Maj from the past
1377 #endif
1378         vshr.u64        d26,d23,#41
1379         vld1.64         {d28},[r3,:64]! @ K[i++]
1380         vsli.64         d24,d23,#50
1381         vsli.64         d25,d23,#46
1382         vmov            d29,d23
1383         vsli.64         d26,d23,#23
1384 #if 21<16 && defined(__ARMEL__)
1385         vrev64.8        ,
1386 #endif
1387         veor            d25,d24
1388         vbsl            d29,d16,d17             @ Ch(e,f,g)
1389         vshr.u64        d24,d19,#28
1390         veor            d26,d25                 @ Sigma1(e)
1391         vadd.i64        d27,d29,d18
1392         vshr.u64        d25,d19,#34
1393         vsli.64         d24,d19,#36
1394         vadd.i64        d27,d26
1395         vshr.u64        d26,d19,#39
1396         vadd.i64        d28,d5
1397         vsli.64         d25,d19,#30
1398         veor            d30,d19,d20
1399         vsli.64         d26,d19,#25
1400         veor            d18,d24,d25
1401         vadd.i64        d27,d28
1402         vbsl            d30,d21,d20             @ Maj(a,b,c)
1403         veor            d18,d26                 @ Sigma0(a)
1404         vadd.i64        d22,d27
1405         vadd.i64        d30,d27
1406         @ vadd.i64      d18,d30
1407         vshr.u64        q12,q2,#19
1408         vshr.u64        q13,q2,#61
1409          vadd.i64       d18,d30                 @ h+=Maj from the past
1410         vshr.u64        q15,q2,#6
1411         vsli.64         q12,q2,#45
1412         vext.8          q14,q3,q4,#8    @ X[i+1]
1413         vsli.64         q13,q2,#3
1414         veor            q15,q12
1415         vshr.u64        q12,q14,#1
1416         veor            q15,q13                         @ sigma1(X[i+14])
1417         vshr.u64        q13,q14,#8
1418         vadd.i64        q3,q15
1419         vshr.u64        q15,q14,#7
1420         vsli.64         q12,q14,#63
1421         vsli.64         q13,q14,#56
1422         vext.8          q14,q7,q0,#8    @ X[i+9]
1423         veor            q15,q12
1424         vshr.u64        d24,d22,#14             @ from NEON_00_15
1425         vadd.i64        q3,q14
1426         vshr.u64        d25,d22,#18             @ from NEON_00_15
1427         veor            q15,q13                         @ sigma0(X[i+1])
1428         vshr.u64        d26,d22,#41             @ from NEON_00_15
1429         vadd.i64        q3,q15
1430         vld1.64         {d28},[r3,:64]! @ K[i++]
1431         vsli.64         d24,d22,#50
1432         vsli.64         d25,d22,#46
1433         vmov            d29,d22
1434         vsli.64         d26,d22,#23
1435 #if 22<16 && defined(__ARMEL__)
1436         vrev64.8        ,
1437 #endif
1438         veor            d25,d24
1439         vbsl            d29,d23,d16             @ Ch(e,f,g)
1440         vshr.u64        d24,d18,#28
1441         veor            d26,d25                 @ Sigma1(e)
1442         vadd.i64        d27,d29,d17
1443         vshr.u64        d25,d18,#34
1444         vsli.64         d24,d18,#36
1445         vadd.i64        d27,d26
1446         vshr.u64        d26,d18,#39
1447         vadd.i64        d28,d6
1448         vsli.64         d25,d18,#30
1449         veor            d30,d18,d19
1450         vsli.64         d26,d18,#25
1451         veor            d17,d24,d25
1452         vadd.i64        d27,d28
1453         vbsl            d30,d20,d19             @ Maj(a,b,c)
1454         veor            d17,d26                 @ Sigma0(a)
1455         vadd.i64        d21,d27
1456         vadd.i64        d30,d27
1457         @ vadd.i64      d17,d30
1458         vshr.u64        d24,d21,#14     @ 23
1459 #if 23<16
1460         vld1.64         {d7},[r1]!      @ handles unaligned
1461 #endif
1462         vshr.u64        d25,d21,#18
1463 #if 23>0
1464          vadd.i64       d17,d30                 @ h+=Maj from the past
1465 #endif
1466         vshr.u64        d26,d21,#41
1467         vld1.64         {d28},[r3,:64]! @ K[i++]
1468         vsli.64         d24,d21,#50
1469         vsli.64         d25,d21,#46
1470         vmov            d29,d21
1471         vsli.64         d26,d21,#23
1472 #if 23<16 && defined(__ARMEL__)
1473         vrev64.8        ,
1474 #endif
1475         veor            d25,d24
1476         vbsl            d29,d22,d23             @ Ch(e,f,g)
1477         vshr.u64        d24,d17,#28
1478         veor            d26,d25                 @ Sigma1(e)
1479         vadd.i64        d27,d29,d16
1480         vshr.u64        d25,d17,#34
1481         vsli.64         d24,d17,#36
1482         vadd.i64        d27,d26
1483         vshr.u64        d26,d17,#39
1484         vadd.i64        d28,d7
1485         vsli.64         d25,d17,#30
1486         veor            d30,d17,d18
1487         vsli.64         d26,d17,#25
1488         veor            d16,d24,d25
1489         vadd.i64        d27,d28
1490         vbsl            d30,d19,d18             @ Maj(a,b,c)
1491         veor            d16,d26                 @ Sigma0(a)
1492         vadd.i64        d20,d27
1493         vadd.i64        d30,d27
1494         @ vadd.i64      d16,d30
1495         vshr.u64        q12,q3,#19
1496         vshr.u64        q13,q3,#61
1497          vadd.i64       d16,d30                 @ h+=Maj from the past
1498         vshr.u64        q15,q3,#6
1499         vsli.64         q12,q3,#45
1500         vext.8          q14,q4,q5,#8    @ X[i+1]
1501         vsli.64         q13,q3,#3
1502         veor            q15,q12
1503         vshr.u64        q12,q14,#1
1504         veor            q15,q13                         @ sigma1(X[i+14])
1505         vshr.u64        q13,q14,#8
1506         vadd.i64        q4,q15
1507         vshr.u64        q15,q14,#7
1508         vsli.64         q12,q14,#63
1509         vsli.64         q13,q14,#56
1510         vext.8          q14,q0,q1,#8    @ X[i+9]
1511         veor            q15,q12
1512         vshr.u64        d24,d20,#14             @ from NEON_00_15
1513         vadd.i64        q4,q14
1514         vshr.u64        d25,d20,#18             @ from NEON_00_15
1515         veor            q15,q13                         @ sigma0(X[i+1])
1516         vshr.u64        d26,d20,#41             @ from NEON_00_15
1517         vadd.i64        q4,q15
1518         vld1.64         {d28},[r3,:64]! @ K[i++]
1519         vsli.64         d24,d20,#50
1520         vsli.64         d25,d20,#46
1521         vmov            d29,d20
1522         vsli.64         d26,d20,#23
1523 #if 24<16 && defined(__ARMEL__)
1524         vrev64.8        ,
1525 #endif
1526         veor            d25,d24
1527         vbsl            d29,d21,d22             @ Ch(e,f,g)
1528         vshr.u64        d24,d16,#28
1529         veor            d26,d25                 @ Sigma1(e)
1530         vadd.i64        d27,d29,d23
1531         vshr.u64        d25,d16,#34
1532         vsli.64         d24,d16,#36
1533         vadd.i64        d27,d26
1534         vshr.u64        d26,d16,#39
1535         vadd.i64        d28,d8
1536         vsli.64         d25,d16,#30
1537         veor            d30,d16,d17
1538         vsli.64         d26,d16,#25
1539         veor            d23,d24,d25
1540         vadd.i64        d27,d28
1541         vbsl            d30,d18,d17             @ Maj(a,b,c)
1542         veor            d23,d26                 @ Sigma0(a)
1543         vadd.i64        d19,d27
1544         vadd.i64        d30,d27
1545         @ vadd.i64      d23,d30
1546         vshr.u64        d24,d19,#14     @ 25
1547 #if 25<16
1548         vld1.64         {d9},[r1]!      @ handles unaligned
1549 #endif
1550         vshr.u64        d25,d19,#18
1551 #if 25>0
1552          vadd.i64       d23,d30                 @ h+=Maj from the past
1553 #endif
1554         vshr.u64        d26,d19,#41
1555         vld1.64         {d28},[r3,:64]! @ K[i++]
1556         vsli.64         d24,d19,#50
1557         vsli.64         d25,d19,#46
1558         vmov            d29,d19
1559         vsli.64         d26,d19,#23
1560 #if 25<16 && defined(__ARMEL__)
1561         vrev64.8        ,
1562 #endif
1563         veor            d25,d24
1564         vbsl            d29,d20,d21             @ Ch(e,f,g)
1565         vshr.u64        d24,d23,#28
1566         veor            d26,d25                 @ Sigma1(e)
1567         vadd.i64        d27,d29,d22
1568         vshr.u64        d25,d23,#34
1569         vsli.64         d24,d23,#36
1570         vadd.i64        d27,d26
1571         vshr.u64        d26,d23,#39
1572         vadd.i64        d28,d9
1573         vsli.64         d25,d23,#30
1574         veor            d30,d23,d16
1575         vsli.64         d26,d23,#25
1576         veor            d22,d24,d25
1577         vadd.i64        d27,d28
1578         vbsl            d30,d17,d16             @ Maj(a,b,c)
1579         veor            d22,d26                 @ Sigma0(a)
1580         vadd.i64        d18,d27
1581         vadd.i64        d30,d27
1582         @ vadd.i64      d22,d30
1583         vshr.u64        q12,q4,#19
1584         vshr.u64        q13,q4,#61
1585          vadd.i64       d22,d30                 @ h+=Maj from the past
1586         vshr.u64        q15,q4,#6
1587         vsli.64         q12,q4,#45
1588         vext.8          q14,q5,q6,#8    @ X[i+1]
1589         vsli.64         q13,q4,#3
1590         veor            q15,q12
1591         vshr.u64        q12,q14,#1
1592         veor            q15,q13                         @ sigma1(X[i+14])
1593         vshr.u64        q13,q14,#8
1594         vadd.i64        q5,q15
1595         vshr.u64        q15,q14,#7
1596         vsli.64         q12,q14,#63
1597         vsli.64         q13,q14,#56
1598         vext.8          q14,q1,q2,#8    @ X[i+9]
1599         veor            q15,q12
1600         vshr.u64        d24,d18,#14             @ from NEON_00_15
1601         vadd.i64        q5,q14
1602         vshr.u64        d25,d18,#18             @ from NEON_00_15
1603         veor            q15,q13                         @ sigma0(X[i+1])
1604         vshr.u64        d26,d18,#41             @ from NEON_00_15
1605         vadd.i64        q5,q15
1606         vld1.64         {d28},[r3,:64]! @ K[i++]
1607         vsli.64         d24,d18,#50
1608         vsli.64         d25,d18,#46
1609         vmov            d29,d18
1610         vsli.64         d26,d18,#23
1611 #if 26<16 && defined(__ARMEL__)
1612         vrev64.8        ,
1613 #endif
1614         veor            d25,d24
1615         vbsl            d29,d19,d20             @ Ch(e,f,g)
1616         vshr.u64        d24,d22,#28
1617         veor            d26,d25                 @ Sigma1(e)
1618         vadd.i64        d27,d29,d21
1619         vshr.u64        d25,d22,#34
1620         vsli.64         d24,d22,#36
1621         vadd.i64        d27,d26
1622         vshr.u64        d26,d22,#39
1623         vadd.i64        d28,d10
1624         vsli.64         d25,d22,#30
1625         veor            d30,d22,d23
1626         vsli.64         d26,d22,#25
1627         veor            d21,d24,d25
1628         vadd.i64        d27,d28
1629         vbsl            d30,d16,d23             @ Maj(a,b,c)
1630         veor            d21,d26                 @ Sigma0(a)
1631         vadd.i64        d17,d27
1632         vadd.i64        d30,d27
1633         @ vadd.i64      d21,d30
1634         vshr.u64        d24,d17,#14     @ 27
1635 #if 27<16
1636         vld1.64         {d11},[r1]!     @ handles unaligned
1637 #endif
1638         vshr.u64        d25,d17,#18
1639 #if 27>0
1640          vadd.i64       d21,d30                 @ h+=Maj from the past
1641 #endif
1642         vshr.u64        d26,d17,#41
1643         vld1.64         {d28},[r3,:64]! @ K[i++]
1644         vsli.64         d24,d17,#50
1645         vsli.64         d25,d17,#46
1646         vmov            d29,d17
1647         vsli.64         d26,d17,#23
1648 #if 27<16 && defined(__ARMEL__)
1649         vrev64.8        ,
1650 #endif
1651         veor            d25,d24
1652         vbsl            d29,d18,d19             @ Ch(e,f,g)
1653         vshr.u64        d24,d21,#28
1654         veor            d26,d25                 @ Sigma1(e)
1655         vadd.i64        d27,d29,d20
1656         vshr.u64        d25,d21,#34
1657         vsli.64         d24,d21,#36
1658         vadd.i64        d27,d26
1659         vshr.u64        d26,d21,#39
1660         vadd.i64        d28,d11
1661         vsli.64         d25,d21,#30
1662         veor            d30,d21,d22
1663         vsli.64         d26,d21,#25
1664         veor            d20,d24,d25
1665         vadd.i64        d27,d28
1666         vbsl            d30,d23,d22             @ Maj(a,b,c)
1667         veor            d20,d26                 @ Sigma0(a)
1668         vadd.i64        d16,d27
1669         vadd.i64        d30,d27
1670         @ vadd.i64      d20,d30
1671         vshr.u64        q12,q5,#19
1672         vshr.u64        q13,q5,#61
1673          vadd.i64       d20,d30                 @ h+=Maj from the past
1674         vshr.u64        q15,q5,#6
1675         vsli.64         q12,q5,#45
1676         vext.8          q14,q6,q7,#8    @ X[i+1]
1677         vsli.64         q13,q5,#3
1678         veor            q15,q12
1679         vshr.u64        q12,q14,#1
1680         veor            q15,q13                         @ sigma1(X[i+14])
1681         vshr.u64        q13,q14,#8
1682         vadd.i64        q6,q15
1683         vshr.u64        q15,q14,#7
1684         vsli.64         q12,q14,#63
1685         vsli.64         q13,q14,#56
1686         vext.8          q14,q2,q3,#8    @ X[i+9]
1687         veor            q15,q12
1688         vshr.u64        d24,d16,#14             @ from NEON_00_15
1689         vadd.i64        q6,q14
1690         vshr.u64        d25,d16,#18             @ from NEON_00_15
1691         veor            q15,q13                         @ sigma0(X[i+1])
1692         vshr.u64        d26,d16,#41             @ from NEON_00_15
1693         vadd.i64        q6,q15
1694         vld1.64         {d28},[r3,:64]! @ K[i++]
1695         vsli.64         d24,d16,#50
1696         vsli.64         d25,d16,#46
1697         vmov            d29,d16
1698         vsli.64         d26,d16,#23
1699 #if 28<16 && defined(__ARMEL__)
1700         vrev64.8        ,
1701 #endif
1702         veor            d25,d24
1703         vbsl            d29,d17,d18             @ Ch(e,f,g)
1704         vshr.u64        d24,d20,#28
1705         veor            d26,d25                 @ Sigma1(e)
1706         vadd.i64        d27,d29,d19
1707         vshr.u64        d25,d20,#34
1708         vsli.64         d24,d20,#36
1709         vadd.i64        d27,d26
1710         vshr.u64        d26,d20,#39
1711         vadd.i64        d28,d12
1712         vsli.64         d25,d20,#30
1713         veor            d30,d20,d21
1714         vsli.64         d26,d20,#25
1715         veor            d19,d24,d25
1716         vadd.i64        d27,d28
1717         vbsl            d30,d22,d21             @ Maj(a,b,c)
1718         veor            d19,d26                 @ Sigma0(a)
1719         vadd.i64        d23,d27
1720         vadd.i64        d30,d27
1721         @ vadd.i64      d19,d30
1722         vshr.u64        d24,d23,#14     @ 29
1723 #if 29<16
1724         vld1.64         {d13},[r1]!     @ handles unaligned
1725 #endif
1726         vshr.u64        d25,d23,#18
1727 #if 29>0
1728          vadd.i64       d19,d30                 @ h+=Maj from the past
1729 #endif
1730         vshr.u64        d26,d23,#41
1731         vld1.64         {d28},[r3,:64]! @ K[i++]
1732         vsli.64         d24,d23,#50
1733         vsli.64         d25,d23,#46
1734         vmov            d29,d23
1735         vsli.64         d26,d23,#23
1736 #if 29<16 && defined(__ARMEL__)
1737         vrev64.8        ,
1738 #endif
1739         veor            d25,d24
1740         vbsl            d29,d16,d17             @ Ch(e,f,g)
1741         vshr.u64        d24,d19,#28
1742         veor            d26,d25                 @ Sigma1(e)
1743         vadd.i64        d27,d29,d18
1744         vshr.u64        d25,d19,#34
1745         vsli.64         d24,d19,#36
1746         vadd.i64        d27,d26
1747         vshr.u64        d26,d19,#39
1748         vadd.i64        d28,d13
1749         vsli.64         d25,d19,#30
1750         veor            d30,d19,d20
1751         vsli.64         d26,d19,#25
1752         veor            d18,d24,d25
1753         vadd.i64        d27,d28
1754         vbsl            d30,d21,d20             @ Maj(a,b,c)
1755         veor            d18,d26                 @ Sigma0(a)
1756         vadd.i64        d22,d27
1757         vadd.i64        d30,d27
1758         @ vadd.i64      d18,d30
1759         vshr.u64        q12,q6,#19
1760         vshr.u64        q13,q6,#61
1761          vadd.i64       d18,d30                 @ h+=Maj from the past
1762         vshr.u64        q15,q6,#6
1763         vsli.64         q12,q6,#45
1764         vext.8          q14,q7,q0,#8    @ X[i+1]
1765         vsli.64         q13,q6,#3
1766         veor            q15,q12
1767         vshr.u64        q12,q14,#1
1768         veor            q15,q13                         @ sigma1(X[i+14])
1769         vshr.u64        q13,q14,#8
1770         vadd.i64        q7,q15
1771         vshr.u64        q15,q14,#7
1772         vsli.64         q12,q14,#63
1773         vsli.64         q13,q14,#56
1774         vext.8          q14,q3,q4,#8    @ X[i+9]
1775         veor            q15,q12
1776         vshr.u64        d24,d22,#14             @ from NEON_00_15
1777         vadd.i64        q7,q14
1778         vshr.u64        d25,d22,#18             @ from NEON_00_15
1779         veor            q15,q13                         @ sigma0(X[i+1])
1780         vshr.u64        d26,d22,#41             @ from NEON_00_15
1781         vadd.i64        q7,q15
1782         vld1.64         {d28},[r3,:64]! @ K[i++]
1783         vsli.64         d24,d22,#50
1784         vsli.64         d25,d22,#46
1785         vmov            d29,d22
1786         vsli.64         d26,d22,#23
1787 #if 30<16 && defined(__ARMEL__)
1788         vrev64.8        ,
1789 #endif
1790         veor            d25,d24
1791         vbsl            d29,d23,d16             @ Ch(e,f,g)
1792         vshr.u64        d24,d18,#28
1793         veor            d26,d25                 @ Sigma1(e)
1794         vadd.i64        d27,d29,d17
1795         vshr.u64        d25,d18,#34
1796         vsli.64         d24,d18,#36
1797         vadd.i64        d27,d26
1798         vshr.u64        d26,d18,#39
1799         vadd.i64        d28,d14
1800         vsli.64         d25,d18,#30
1801         veor            d30,d18,d19
1802         vsli.64         d26,d18,#25
1803         veor            d17,d24,d25
1804         vadd.i64        d27,d28
1805         vbsl            d30,d20,d19             @ Maj(a,b,c)
1806         veor            d17,d26                 @ Sigma0(a)
1807         vadd.i64        d21,d27
1808         vadd.i64        d30,d27
1809         @ vadd.i64      d17,d30
1810         vshr.u64        d24,d21,#14     @ 31
1811 #if 31<16
1812         vld1.64         {d15},[r1]!     @ handles unaligned
1813 #endif
1814         vshr.u64        d25,d21,#18
1815 #if 31>0
1816          vadd.i64       d17,d30                 @ h+=Maj from the past
1817 #endif
1818         vshr.u64        d26,d21,#41
1819         vld1.64         {d28},[r3,:64]! @ K[i++]
1820         vsli.64         d24,d21,#50
1821         vsli.64         d25,d21,#46
1822         vmov            d29,d21
1823         vsli.64         d26,d21,#23
1824 #if 31<16 && defined(__ARMEL__)
1825         vrev64.8        ,
1826 #endif
1827         veor            d25,d24
1828         vbsl            d29,d22,d23             @ Ch(e,f,g)
1829         vshr.u64        d24,d17,#28
1830         veor            d26,d25                 @ Sigma1(e)
1831         vadd.i64        d27,d29,d16
1832         vshr.u64        d25,d17,#34
1833         vsli.64         d24,d17,#36
1834         vadd.i64        d27,d26
1835         vshr.u64        d26,d17,#39
1836         vadd.i64        d28,d15
1837         vsli.64         d25,d17,#30
1838         veor            d30,d17,d18
1839         vsli.64         d26,d17,#25
1840         veor            d16,d24,d25
1841         vadd.i64        d27,d28
1842         vbsl            d30,d19,d18             @ Maj(a,b,c)
1843         veor            d16,d26                 @ Sigma0(a)
1844         vadd.i64        d20,d27
1845         vadd.i64        d30,d27
1846         @ vadd.i64      d16,d30
1847         bne             .L16_79_neon
1848
1849          vadd.i64       d16,d30         @ h+=Maj from the past
1850         vldmia          r0,{d24-d31}    @ load context to temp
1851         vadd.i64        q8,q12          @ vectorized accumulate
1852         vadd.i64        q9,q13
1853         vadd.i64        q10,q14
1854         vadd.i64        q11,q15
1855         vstmia          r0,{d16-d23}    @ save context
1856         teq             r1,r2
1857         sub             r3,#640 @ rewind K512
1858         bne             .Loop_neon
1859
1860         VFP_ABI_POP
1861         bx      lr                              @ .word 0xe12fff1e
1862 .size   sha512_block_data_order_neon,.-sha512_block_data_order_neon
1863 #endif
1864 .asciz  "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
1865 .align  2
1866 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
1867 .comm   OPENSSL_armcap_P,4,4
1868 #endif