1 /* $FreeBSD$ */
2 /* Do not modify. This file is auto-generated from armv4-gf2m.pl. */
3 #include "arm_arch.h"
4
5 .text
6 #if defined(__thumb2__)
7 .syntax unified
8 .thumb
9 #else
10 .code 32
11 #endif
12 .type mul_1x1_ialu,%function
13 .align 5
14 mul_1x1_ialu:
15 mov r4,#0
16 bic r5,r1,#3<<30 @ a1=a&0x3fffffff
17 str r4,[sp,#0] @ tab[0]=0
18 add r6,r5,r5 @ a2=a1<<1
19 str r5,[sp,#4] @ tab[1]=a1
20 eor r7,r5,r6 @ a1^a2
21 str r6,[sp,#8] @ tab[2]=a2
22 mov r8,r5,lsl#2 @ a4=a1<<2
23 str r7,[sp,#12] @ tab[3]=a1^a2
24 eor r9,r5,r8 @ a1^a4
25 str r8,[sp,#16] @ tab[4]=a4
26 eor r4,r6,r8 @ a2^a4
27 str r9,[sp,#20] @ tab[5]=a1^a4
28 eor r7,r7,r8 @ a1^a2^a4
29 str r4,[sp,#24] @ tab[6]=a2^a4
30 and r8,r12,r0,lsl#2
31 str r7,[sp,#28] @ tab[7]=a1^a2^a4
32
33 and r9,r12,r0,lsr#1
34 ldr r5,[sp,r8] @ tab[b & 0x7]
35 and r8,r12,r0,lsr#4
36 ldr r7,[sp,r9] @ tab[b >> 3 & 0x7]
37 and r9,r12,r0,lsr#7
38 ldr r6,[sp,r8] @ tab[b >> 6 & 0x7]
39 eor r5,r5,r7,lsl#3 @ stall
40 mov r4,r7,lsr#29
41 ldr r7,[sp,r9] @ tab[b >> 9 & 0x7]
42
43 and r8,r12,r0,lsr#10
44 eor r5,r5,r6,lsl#6
45 eor r4,r4,r6,lsr#26
46 ldr r6,[sp,r8] @ tab[b >> 12 & 0x7]
47
48 and r9,r12,r0,lsr#13
49 eor r5,r5,r7,lsl#9
50 eor r4,r4,r7,lsr#23
51 ldr r7,[sp,r9] @ tab[b >> 15 & 0x7]
52
53 and r8,r12,r0,lsr#16
54 eor r5,r5,r6,lsl#12
55 eor r4,r4,r6,lsr#20
56 ldr r6,[sp,r8] @ tab[b >> 18 & 0x7]
57
58 and r9,r12,r0,lsr#19
59 eor r5,r5,r7,lsl#15
60 eor r4,r4,r7,lsr#17
61 ldr r7,[sp,r9] @ tab[b >> 21 & 0x7]
62
63 and r8,r12,r0,lsr#22
64 eor r5,r5,r6,lsl#18
65 eor r4,r4,r6,lsr#14
66 ldr r6,[sp,r8] @ tab[b >> 24 & 0x7]
67
68 and r9,r12,r0,lsr#25
69 eor r5,r5,r7,lsl#21
70 eor r4,r4,r7,lsr#11
71 ldr r7,[sp,r9] @ tab[b >> 27 & 0x7]
72
73 tst r1,#1<<30
74 and r8,r12,r0,lsr#28
75 eor r5,r5,r6,lsl#24
76 eor r4,r4,r6,lsr#8
77 ldr r6,[sp,r8] @ tab[b >> 30 ]
78
79 #ifdef __thumb2__
80 itt ne
81 #endif
82 eorne r5,r5,r0,lsl#30
83 eorne r4,r4,r0,lsr#2
84 tst r1,#1<<31
85 eor r5,r5,r7,lsl#27
86 eor r4,r4,r7,lsr#5
87 #ifdef __thumb2__
88 itt ne
89 #endif
90 eorne r5,r5,r0,lsl#31
91 eorne r4,r4,r0,lsr#1
92 eor r5,r5,r6,lsl#30
93 eor r4,r4,r6,lsr#2
94
95 mov pc,lr
96 .size mul_1x1_ialu,.-mul_1x1_ialu
97 .globl bn_GF2m_mul_2x2
98 .type bn_GF2m_mul_2x2,%function
99 .align 5
100 bn_GF2m_mul_2x2:
101 #if __ARM_MAX_ARCH__>=7
102 stmdb sp!,{r10,lr}
103 ldr r12,.LOPENSSL_armcap
104 adr r10,.LOPENSSL_armcap
105 ldr r12,[r12,r10]
106 #ifdef __APPLE__
107 ldr r12,[r12]
108 #endif
109 tst r12,#ARMV7_NEON
110 itt ne
111 ldrne r10,[sp],#8
112 bne .LNEON
113 stmdb sp!,{r4,r5,r6,r7,r8,r9}
114 #else
115 stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
116 #endif
117 mov r10,r0 @ reassign 1st argument
118 mov r0,r3 @ r0=b1
119 sub r7,sp,#36
120 mov r8,sp
121 and r7,r7,#-32
122 ldr r3,[sp,#32] @ load b0
123 mov r12,#7<<2
124 mov sp,r7 @ allocate tab[8]
125 str r8,[r7,#32]
126
127 bl mul_1x1_ialu @ a1·b1
128 str r5,[r10,#8]
129 str r4,[r10,#12]
130
131 eor r0,r0,r3 @ flip b0 and b1
132 eor r1,r1,r2 @ flip a0 and a1
133 eor r3,r3,r0
134 eor r2,r2,r1
135 eor r0,r0,r3
136 eor r1,r1,r2
137 bl mul_1x1_ialu @ a0·b0
138 str r5,[r10]
139 str r4,[r10,#4]
140
141 eor r1,r1,r2
142 eor r0,r0,r3
143 bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
144 ldmia r10,{r6,r7,r8,r9}
145 eor r5,r5,r4
146 ldr sp,[sp,#32] @ destroy tab[8]
147 eor r4,r4,r7
148 eor r5,r5,r6
149 eor r4,r4,r8
150 eor r5,r5,r9
151 eor r4,r4,r9
152 str r4,[r10,#8]
153 eor r5,r5,r4
154 str r5,[r10,#4]
155
156 #if __ARM_ARCH__>=5
157 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
158 #else
159 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
160 tst lr,#1
161 moveq pc,lr @ be binary compatible with V4, yet
162 .word 0xe12fff1e @ interoperable with Thumb ISA:-)
163 #endif
164 #if __ARM_MAX_ARCH__>=7
165 .arch armv7-a
166 .fpu neon
167
168 .align 5
169 .LNEON:
170 ldr r12, [sp] @ 5th argument
171 vmov d26, r2, r1
172 vmov d27, r12, r3
173 vmov.i64 d28, #0x0000ffffffffffff
174 vmov.i64 d29, #0x00000000ffffffff
175 vmov.i64 d30, #0x000000000000ffff
176
177 vext.8 d2, d26, d26, #1 @ A1
178 vmull.p8 q1, d2, d27 @ F = A1*B
179 vext.8 d0, d27, d27, #1 @ B1
180 vmull.p8 q0, d26, d0 @ E = A*B1
181 vext.8 d4, d26, d26, #2 @ A2
182 vmull.p8 q2, d4, d27 @ H = A2*B
183 vext.8 d16, d27, d27, #2 @ B2
184 vmull.p8 q8, d26, d16 @ G = A*B2
185 vext.8 d6, d26, d26, #3 @ A3
186 veor q1, q1, q0 @ L = E + F
187 vmull.p8 q3, d6, d27 @ J = A3*B
188 vext.8 d0, d27, d27, #3 @ B3
189 veor q2, q2, q8 @ M = G + H
190 vmull.p8 q0, d26, d0 @ I = A*B3
191 veor d2, d2, d3 @ t0 = (L) (P0 + P1) << 8
192 vand d3, d3, d28
193 vext.8 d16, d27, d27, #4 @ B4
194 veor d4, d4, d5 @ t1 = (M) (P2 + P3) << 16
195 vand d5, d5, d29
196 vmull.p8 q8, d26, d16 @ K = A*B4
197 veor q3, q3, q0 @ N = I + J
198 veor d2, d2, d3
199 veor d4, d4, d5
200 veor d6, d6, d7 @ t2 = (N) (P4 + P5) << 24
201 vand d7, d7, d30
202 vext.8 q1, q1, q1, #15
203 veor d16, d16, d17 @ t3 = (K) (P6 + P7) << 32
204 vmov.i64 d17, #0
205 vext.8 q2, q2, q2, #14
206 veor d6, d6, d7
207 vmull.p8 q0, d26, d27 @ D = A*B
208 vext.8 q8, q8, q8, #12
209 vext.8 q3, q3, q3, #13
210 veor q1, q1, q2
211 veor q3, q3, q8
212 veor q0, q0, q1
213 veor q0, q0, q3
214
215 vst1.32 {q0}, [r0]
216 bx lr @ bx lr
217 #endif
218 .size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
219 #if __ARM_MAX_ARCH__>=7
220 .align 5
221 .LOPENSSL_armcap:
222 .word OPENSSL_armcap_P-.
223 #endif
224 .byte 71,70,40,50,94,109,41,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
225 .align 2
226 .align 5
227
228 #if __ARM_MAX_ARCH__>=7
229 .comm OPENSSL_armcap_P,4,4
230 #endif
Cache object: 2814f7daac903cef69b27e7c2ffcbae2
|