1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; 2; Copyright(c) 2011-2015 Intel Corporation All rights reserved. 3; 4; Redistribution and use in source and binary forms, with or without 5; modification, are permitted provided that the following conditions 6; are met: 7; * Redistributions of source code must retain the above copyright 8; notice, this list of conditions and the following disclaimer. 9; * Redistributions in binary form must reproduce the above copyright 10; notice, this list of conditions and the following disclaimer in 11; the documentation and/or other materials provided with the 12; distribution. 13; * Neither the name of Intel Corporation nor the names of its 14; contributors may be used to endorse or promote products derived 15; from this software without specific prior written permission. 16; 17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; 29 30;;; 31;;; gf_5vect_dot_prod_avx(len, vec, *g_tbls, **buffs, **dests); 32;;; 33 34%include "reg_sizes.asm" 35 36%ifidn __OUTPUT_FORMAT__, elf64 37 %define arg0 rdi 38 %define arg1 rsi 39 %define arg2 rdx 40 %define arg3 rcx 41 %define arg4 r8 42 %define arg5 r9 43 44 %define tmp r11 45 %define tmp2 r10 46 %define tmp3 r13 ; must be saved and restored 47 %define tmp4 r12 ; must be saved and restored 48 %define tmp5 r14 ; must be saved and restored 49 %define tmp6 r15 ; must be saved and restored 50 %define return rax 51 %define PS 8 52 %define LOG_PS 3 53 54 %define func(x) x: endbranch 55 %macro FUNC_SAVE 0 56 push r12 57 push r13 58 push r14 59 push r15 60 %endmacro 61 %macro FUNC_RESTORE 0 62 pop r15 63 pop r14 64 pop r13 65 pop r12 66 %endmacro 67%endif 68 69%ifidn __OUTPUT_FORMAT__, win64 70 %define arg0 rcx 71 %define arg1 rdx 72 %define arg2 r8 73 %define arg3 r9 74 75 %define arg4 r12 ; must be saved, loaded and restored 76 %define arg5 r15 ; must be saved and restored 77 %define tmp r11 78 %define tmp2 r10 79 %define tmp3 r13 ; must be saved and restored 80 %define tmp4 r14 ; must be saved and restored 81 %define tmp5 rdi ; must be saved and restored 82 %define tmp6 rsi ; must be saved and restored 83 %define return rax 84 %define PS 8 85 %define LOG_PS 3 86 %define stack_size 10*16 + 7*8 ; must be an odd multiple of 8 87 %define arg(x) [rsp + stack_size + PS + PS*x] 88 89 %define func(x) proc_frame x 90 %macro FUNC_SAVE 0 91 alloc_stack stack_size 92 vmovdqa [rsp + 0*16], xmm6 93 vmovdqa [rsp + 1*16], xmm7 94 vmovdqa [rsp + 2*16], xmm8 95 vmovdqa [rsp + 3*16], xmm9 96 vmovdqa [rsp + 4*16], xmm10 97 vmovdqa [rsp + 5*16], xmm11 98 vmovdqa [rsp + 6*16], xmm12 99 vmovdqa [rsp + 7*16], xmm13 100 vmovdqa [rsp + 8*16], xmm14 101 vmovdqa [rsp + 9*16], xmm15 102 save_reg r12, 10*16 + 0*8 103 save_reg r13, 10*16 + 1*8 104 save_reg r14, 10*16 + 2*8 105 save_reg r15, 10*16 + 3*8 106 save_reg rdi, 10*16 + 4*8 107 save_reg rsi, 10*16 + 5*8 108 end_prolog 109 mov arg4, arg(4) 110 %endmacro 111 112 %macro FUNC_RESTORE 0 113 vmovdqa xmm6, [rsp + 0*16] 114 vmovdqa xmm7, [rsp + 1*16] 115 vmovdqa xmm8, [rsp + 2*16] 116 vmovdqa xmm9, [rsp + 3*16] 117 vmovdqa xmm10, [rsp + 4*16] 118 vmovdqa xmm11, [rsp + 5*16] 119 vmovdqa xmm12, [rsp + 6*16] 120 vmovdqa xmm13, [rsp + 7*16] 121 vmovdqa xmm14, [rsp + 8*16] 122 vmovdqa xmm15, [rsp + 9*16] 123 mov r12, [rsp + 10*16 + 0*8] 124 mov r13, [rsp + 10*16 + 1*8] 125 mov r14, [rsp + 10*16 + 2*8] 126 mov r15, [rsp + 10*16 + 3*8] 127 mov rdi, [rsp + 10*16 + 4*8] 128 mov rsi, [rsp + 10*16 + 5*8] 129 add rsp, stack_size 130 %endmacro 131%endif 132 133%define len arg0 134%define vec arg1 135%define mul_array arg2 136%define src arg3 137%define dest arg4 138%define ptr arg5 139%define vec_i tmp2 140%define dest1 tmp3 141%define dest2 tmp4 142%define vskip1 tmp5 143%define vskip3 tmp6 144%define pos return 145 146 147%ifndef EC_ALIGNED_ADDR 148;;; Use Un-aligned load/store 149 %define XLDR vmovdqu 150 %define XSTR vmovdqu 151%else 152;;; Use Non-temporal load/stor 153 %ifdef NO_NT_LDST 154 %define XLDR vmovdqa 155 %define XSTR vmovdqa 156 %else 157 %define XLDR vmovntdqa 158 %define XSTR vmovntdq 159 %endif 160%endif 161 162default rel 163 164[bits 64] 165section .text 166 167%define xmask0f xmm15 168%define xgft1_lo xmm14 169%define xgft1_hi xmm13 170%define xgft2_lo xmm12 171%define xgft2_hi xmm11 172%define xgft3_lo xmm10 173%define xgft3_hi xmm9 174%define xgft4_lo xmm8 175%define xgft4_hi xmm7 176 177 178%define x0 xmm0 179%define xtmpa xmm1 180%define xp1 xmm2 181%define xp2 xmm3 182%define xp3 xmm4 183%define xp4 xmm5 184%define xp5 xmm6 185 186align 16 187mk_global gf_5vect_dot_prod_avx, function 188func(gf_5vect_dot_prod_avx) 189 FUNC_SAVE 190 sub len, 16 191 jl .return_fail 192 xor pos, pos 193 vmovdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte 194 mov vskip1, vec 195 imul vskip1, 32 196 mov vskip3, vec 197 imul vskip3, 96 198 sal vec, LOG_PS ;vec *= PS. Make vec_i count by PS 199 mov dest1, [dest] 200 mov dest2, [dest+PS] 201 202 203.loop16: 204 mov tmp, mul_array 205 xor vec_i, vec_i 206 vpxor xp1, xp1 207 vpxor xp2, xp2 208 vpxor xp3, xp3 209 vpxor xp4, xp4 210 vpxor xp5, xp5 211 212 213.next_vect: 214 mov ptr, [src+vec_i] 215 add vec_i, PS 216 XLDR x0, [ptr+pos] ;Get next source vector 217 218 vmovdqu xgft1_lo, [tmp] ;Load array Ax{00}, Ax{01}, ..., Ax{0f} 219 vmovdqu xgft1_hi, [tmp+16] ; " Ax{00}, Ax{10}, ..., Ax{f0} 220 vmovdqu xgft2_lo, [tmp+vskip1*1] ;Load array Bx{00}, Bx{01}, ..., Bx{0f} 221 vmovdqu xgft2_hi, [tmp+vskip1*1+16] ; " Bx{00}, Bx{10}, ..., Bx{f0} 222 vmovdqu xgft3_lo, [tmp+vskip1*2] ;Load array Cx{00}, Cx{01}, ..., Cx{0f} 223 vmovdqu xgft3_hi, [tmp+vskip1*2+16] ; " Cx{00}, Cx{10}, ..., Cx{f0} 224 vmovdqu xgft4_lo, [tmp+vskip3] ;Load array Dx{00}, Dx{01}, ..., Dx{0f} 225 vmovdqu xgft4_hi, [tmp+vskip3+16] ; " Dx{00}, Dx{10}, ..., Dx{f0} 226 227 vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0 228 vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0 229 vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0 230 231 vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble 232 vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble 233 vpxor xgft1_hi, xgft1_lo ;GF add high and low partials 234 vpxor xp1, xgft1_hi ;xp1 += partial 235 236 vpshufb xgft2_hi, x0 ;Lookup mul table of high nibble 237 vpshufb xgft2_lo, xtmpa ;Lookup mul table of low nibble 238 vpxor xgft2_hi, xgft2_lo ;GF add high and low partials 239 vpxor xp2, xgft2_hi ;xp2 += partial 240 241 vmovdqu xgft1_lo, [tmp+vskip1*4] ;Load array Ex{00}, Ex{01}, ..., Ex{0f} 242 vmovdqu xgft1_hi, [tmp+vskip1*4+16] ; " Ex{00}, Ex{10}, ..., Ex{f0} 243 add tmp, 32 244 245 vpshufb xgft3_hi, x0 ;Lookup mul table of high nibble 246 vpshufb xgft3_lo, xtmpa ;Lookup mul table of low nibble 247 vpxor xgft3_hi, xgft3_lo ;GF add high and low partials 248 vpxor xp3, xgft3_hi ;xp3 += partial 249 250 vpshufb xgft4_hi, x0 ;Lookup mul table of high nibble 251 vpshufb xgft4_lo, xtmpa ;Lookup mul table of low nibble 252 vpxor xgft4_hi, xgft4_lo ;GF add high and low partials 253 vpxor xp4, xgft4_hi ;xp4 += partial 254 255 vpshufb xgft1_hi, x0 ;Lookup mul table of high nibble 256 vpshufb xgft1_lo, xtmpa ;Lookup mul table of low nibble 257 vpxor xgft1_hi, xgft1_lo ;GF add high and low partials 258 vpxor xp5, xgft1_hi ;xp5 += partial 259 260 cmp vec_i, vec 261 jl .next_vect 262 263 mov tmp, [dest+2*PS] 264 mov ptr, [dest+3*PS] 265 mov vec_i, [dest+4*PS] 266 267 XSTR [dest1+pos], xp1 268 XSTR [dest2+pos], xp2 269 XSTR [tmp+pos], xp3 270 XSTR [ptr+pos], xp4 271 XSTR [vec_i+pos], xp5 272 273 add pos, 16 ;Loop on 16 bytes at a time 274 cmp pos, len 275 jle .loop16 276 277 lea tmp, [len + 16] 278 cmp pos, tmp 279 je .return_pass 280 281 ;; Tail len 282 mov pos, len ;Overlapped offset length-16 283 jmp .loop16 ;Do one more overlap pass 284 285.return_pass: 286 FUNC_RESTORE 287 mov return, 0 288 ret 289 290.return_fail: 291 FUNC_RESTORE 292 mov return, 1 293 ret 294 295endproc_frame 296 297section .data 298 299align 16 300mask0f: dq 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f 301 302;;; func core, ver, snum 303slversion gf_5vect_dot_prod_avx, 02, 04, 0194 304