The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/crypto/openssl/arm/ghashv8-armx.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* $FreeBSD$ */
    2 /* Do not modify. This file is auto-generated from ghashv8-armx.pl. */
    3 #include "arm_arch.h"
    4 
    5 #if __ARM_MAX_ARCH__>=7
    6 .text
    7 .fpu    neon
    8 .code   32
    9 #undef  __thumb2__
   10 .globl  gcm_init_v8
   11 .type   gcm_init_v8,%function
   12 .align  4
   13 gcm_init_v8:
   14         vld1.64 {q9},[r1]               @ load input H
   15         vmov.i8 q11,#0xe1
   16         vshl.i64        q11,q11,#57             @ 0xc2.0
   17         vext.8  q3,q9,q9,#8
   18         vshr.u64        q10,q11,#63
   19         vdup.32 q9,d18[1]
   20         vext.8  q8,q10,q11,#8           @ t0=0xc2....01
   21         vshr.u64        q10,q3,#63
   22         vshr.s32        q9,q9,#31               @ broadcast carry bit
   23         vand    q10,q10,q8
   24         vshl.i64        q3,q3,#1
   25         vext.8  q10,q10,q10,#8
   26         vand    q8,q8,q9
   27         vorr    q3,q3,q10               @ H<<<=1
   28         veor    q12,q3,q8               @ twisted H
   29         vst1.64 {q12},[r0]!             @ store Htable[0]
   30 
   31         @ calculate H^2
   32         vext.8  q8,q12,q12,#8           @ Karatsuba pre-processing
   33 .byte   0xa8,0x0e,0xa8,0xf2     @ pmull q0,q12,q12
   34         veor    q8,q8,q12
   35 .byte   0xa9,0x4e,0xa9,0xf2     @ pmull2 q2,q12,q12
   36 .byte   0xa0,0x2e,0xa0,0xf2     @ pmull q1,q8,q8
   37 
   38         vext.8  q9,q0,q2,#8             @ Karatsuba post-processing
   39         veor    q10,q0,q2
   40         veor    q1,q1,q9
   41         veor    q1,q1,q10
   42 .byte   0x26,0x4e,0xe0,0xf2     @ pmull q10,q0,q11              @ 1st phase
   43 
   44         vmov    d4,d3           @ Xh|Xm - 256-bit result
   45         vmov    d3,d0           @ Xm is rotated Xl
   46         veor    q0,q1,q10
   47 
   48         vext.8  q10,q0,q0,#8            @ 2nd phase
   49 .byte   0x26,0x0e,0xa0,0xf2     @ pmull q0,q0,q11
   50         veor    q10,q10,q2
   51         veor    q14,q0,q10
   52 
   53         vext.8  q9,q14,q14,#8           @ Karatsuba pre-processing
   54         veor    q9,q9,q14
   55         vext.8  q13,q8,q9,#8            @ pack Karatsuba pre-processed
   56         vst1.64 {q13,q14},[r0]! @ store Htable[1..2]
   57         bx      lr
   58 .size   gcm_init_v8,.-gcm_init_v8
   59 .globl  gcm_gmult_v8
   60 .type   gcm_gmult_v8,%function
   61 .align  4
   62 gcm_gmult_v8:
   63         vld1.64 {q9},[r0]               @ load Xi
   64         vmov.i8 q11,#0xe1
   65         vld1.64 {q12,q13},[r1]  @ load twisted H, ...
   66         vshl.u64        q11,q11,#57
   67 #ifndef __ARMEB__
   68         vrev64.8        q9,q9
   69 #endif
   70         vext.8  q3,q9,q9,#8
   71 
   72 .byte   0x86,0x0e,0xa8,0xf2     @ pmull q0,q12,q3               @ H.lo·Xi.lo
   73         veor    q9,q9,q3                @ Karatsuba pre-processing
   74 .byte   0x87,0x4e,0xa9,0xf2     @ pmull2 q2,q12,q3              @ H.hi·Xi.hi
   75 .byte   0xa2,0x2e,0xaa,0xf2     @ pmull q1,q13,q9               @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
   76 
   77         vext.8  q9,q0,q2,#8             @ Karatsuba post-processing
   78         veor    q10,q0,q2
   79         veor    q1,q1,q9
   80         veor    q1,q1,q10
   81 .byte   0x26,0x4e,0xe0,0xf2     @ pmull q10,q0,q11              @ 1st phase of reduction
   82 
   83         vmov    d4,d3           @ Xh|Xm - 256-bit result
   84         vmov    d3,d0           @ Xm is rotated Xl
   85         veor    q0,q1,q10
   86 
   87         vext.8  q10,q0,q0,#8            @ 2nd phase of reduction
   88 .byte   0x26,0x0e,0xa0,0xf2     @ pmull q0,q0,q11
   89         veor    q10,q10,q2
   90         veor    q0,q0,q10
   91 
   92 #ifndef __ARMEB__
   93         vrev64.8        q0,q0
   94 #endif
   95         vext.8  q0,q0,q0,#8
   96         vst1.64 {q0},[r0]               @ write out Xi
   97 
   98         bx      lr
   99 .size   gcm_gmult_v8,.-gcm_gmult_v8
  100 .globl  gcm_ghash_v8
  101 .type   gcm_ghash_v8,%function
  102 .align  4
  103 gcm_ghash_v8:
  104         vstmdb  sp!,{d8,d9,d10,d11,d12,d13,d14,d15}             @ 32-bit ABI says so
  105         vld1.64 {q0},[r0]               @ load [rotated] Xi
  106                                                 @ "[rotated]" means that
  107                                                 @ loaded value would have
  108                                                 @ to be rotated in order to
  109                                                 @ make it appear as in
  110                                                 @ algorithm specification
  111         subs    r3,r3,#32               @ see if r3 is 32 or larger
  112         mov     r12,#16         @ r12 is used as post-
  113                                                 @ increment for input pointer;
  114                                                 @ as loop is modulo-scheduled
  115                                                 @ r12 is zeroed just in time
  116                                                 @ to preclude overstepping
  117                                                 @ inp[len], which means that
  118                                                 @ last block[s] are actually
  119                                                 @ loaded twice, but last
  120                                                 @ copy is not processed
  121         vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2
  122         vmov.i8 q11,#0xe1
  123         vld1.64 {q14},[r1]
  124         moveq   r12,#0                  @ is it time to zero r12?
  125         vext.8  q0,q0,q0,#8             @ rotate Xi
  126         vld1.64 {q8},[r2]!      @ load [rotated] I[0]
  127         vshl.u64        q11,q11,#57             @ compose 0xc2.0 constant
  128 #ifndef __ARMEB__
  129         vrev64.8        q8,q8
  130         vrev64.8        q0,q0
  131 #endif
  132         vext.8  q3,q8,q8,#8             @ rotate I[0]
  133         blo     .Lodd_tail_v8           @ r3 was less than 32
  134         vld1.64 {q9},[r2],r12   @ load [rotated] I[1]
  135 #ifndef __ARMEB__
  136         vrev64.8        q9,q9
  137 #endif
  138         vext.8  q7,q9,q9,#8
  139         veor    q3,q3,q0                @ I[i]^=Xi
  140 .byte   0x8e,0x8e,0xa8,0xf2     @ pmull q4,q12,q7               @ H·Ii+1
  141         veor    q9,q9,q7                @ Karatsuba pre-processing
  142 .byte   0x8f,0xce,0xa9,0xf2     @ pmull2 q6,q12,q7
  143         b       .Loop_mod2x_v8
  144 
  145 .align  4
  146 .Loop_mod2x_v8:
  147         vext.8  q10,q3,q3,#8
  148         subs    r3,r3,#32               @ is there more data?
  149 .byte   0x86,0x0e,0xac,0xf2     @ pmull q0,q14,q3               @ H^2.lo·Xi.lo
  150         movlo   r12,#0                  @ is it time to zero r12?
  151 
  152 .byte   0xa2,0xae,0xaa,0xf2     @ pmull q5,q13,q9
  153         veor    q10,q10,q3              @ Karatsuba pre-processing
  154 .byte   0x87,0x4e,0xad,0xf2     @ pmull2 q2,q14,q3              @ H^2.hi·Xi.hi
  155         veor    q0,q0,q4                @ accumulate
  156 .byte   0xa5,0x2e,0xab,0xf2     @ pmull2 q1,q13,q10             @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
  157         vld1.64 {q8},[r2],r12   @ load [rotated] I[i+2]
  158 
  159         veor    q2,q2,q6
  160         moveq   r12,#0                  @ is it time to zero r12?
  161         veor    q1,q1,q5
  162 
  163         vext.8  q9,q0,q2,#8             @ Karatsuba post-processing
  164         veor    q10,q0,q2
  165         veor    q1,q1,q9
  166         vld1.64 {q9},[r2],r12   @ load [rotated] I[i+3]
  167 #ifndef __ARMEB__
  168         vrev64.8        q8,q8
  169 #endif
  170         veor    q1,q1,q10
  171 .byte   0x26,0x4e,0xe0,0xf2     @ pmull q10,q0,q11              @ 1st phase of reduction
  172 
  173 #ifndef __ARMEB__
  174         vrev64.8        q9,q9
  175 #endif
  176         vmov    d4,d3           @ Xh|Xm - 256-bit result
  177         vmov    d3,d0           @ Xm is rotated Xl
  178         vext.8  q7,q9,q9,#8
  179         vext.8  q3,q8,q8,#8
  180         veor    q0,q1,q10
  181 .byte   0x8e,0x8e,0xa8,0xf2     @ pmull q4,q12,q7               @ H·Ii+1
  182         veor    q3,q3,q2                @ accumulate q3 early
  183 
  184         vext.8  q10,q0,q0,#8            @ 2nd phase of reduction
  185 .byte   0x26,0x0e,0xa0,0xf2     @ pmull q0,q0,q11
  186         veor    q3,q3,q10
  187         veor    q9,q9,q7                @ Karatsuba pre-processing
  188         veor    q3,q3,q0
  189 .byte   0x8f,0xce,0xa9,0xf2     @ pmull2 q6,q12,q7
  190         bhs     .Loop_mod2x_v8          @ there was at least 32 more bytes
  191 
  192         veor    q2,q2,q10
  193         vext.8  q3,q8,q8,#8             @ re-construct q3
  194         adds    r3,r3,#32               @ re-construct r3
  195         veor    q0,q0,q2                @ re-construct q0
  196         beq     .Ldone_v8               @ is r3 zero?
  197 .Lodd_tail_v8:
  198         vext.8  q10,q0,q0,#8
  199         veor    q3,q3,q0                @ inp^=Xi
  200         veor    q9,q8,q10               @ q9 is rotated inp^Xi
  201 
  202 .byte   0x86,0x0e,0xa8,0xf2     @ pmull q0,q12,q3               @ H.lo·Xi.lo
  203         veor    q9,q9,q3                @ Karatsuba pre-processing
  204 .byte   0x87,0x4e,0xa9,0xf2     @ pmull2 q2,q12,q3              @ H.hi·Xi.hi
  205 .byte   0xa2,0x2e,0xaa,0xf2     @ pmull q1,q13,q9               @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
  206 
  207         vext.8  q9,q0,q2,#8             @ Karatsuba post-processing
  208         veor    q10,q0,q2
  209         veor    q1,q1,q9
  210         veor    q1,q1,q10
  211 .byte   0x26,0x4e,0xe0,0xf2     @ pmull q10,q0,q11              @ 1st phase of reduction
  212 
  213         vmov    d4,d3           @ Xh|Xm - 256-bit result
  214         vmov    d3,d0           @ Xm is rotated Xl
  215         veor    q0,q1,q10
  216 
  217         vext.8  q10,q0,q0,#8            @ 2nd phase of reduction
  218 .byte   0x26,0x0e,0xa0,0xf2     @ pmull q0,q0,q11
  219         veor    q10,q10,q2
  220         veor    q0,q0,q10
  221 
  222 .Ldone_v8:
  223 #ifndef __ARMEB__
  224         vrev64.8        q0,q0
  225 #endif
  226         vext.8  q0,q0,q0,#8
  227         vst1.64 {q0},[r0]               @ write out Xi
  228 
  229         vldmia  sp!,{d8,d9,d10,d11,d12,d13,d14,d15}             @ 32-bit ABI says so
  230         bx      lr
  231 .size   gcm_ghash_v8,.-gcm_ghash_v8
  232 .byte   71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
  233 .align  2
  234 .align  2
  235 #endif

Cache object: cb19721a95ec177962afdfc51a83abcb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.