Home
last modified time | relevance | path

Searched refs:VecXor (Results 1 – 25 of 50) sorted by relevance

12

/dports/security/cryptopp/cryptopp-8.6.0/
H A Dchacha_simd.cpp874 r0_3 = VecXor(r0_3, r0_0); in ChaCha_OperateKeystream_CORE()
875 r1_3 = VecXor(r1_3, r1_0); in ChaCha_OperateKeystream_CORE()
876 r2_3 = VecXor(r2_3, r2_0); in ChaCha_OperateKeystream_CORE()
877 r3_3 = VecXor(r3_3, r3_0); in ChaCha_OperateKeystream_CORE()
889 r0_1 = VecXor(r0_1, r0_2); in ChaCha_OperateKeystream_CORE()
890 r1_1 = VecXor(r1_1, r1_2); in ChaCha_OperateKeystream_CORE()
891 r2_1 = VecXor(r2_1, r2_2); in ChaCha_OperateKeystream_CORE()
892 r3_1 = VecXor(r3_1, r3_2); in ChaCha_OperateKeystream_CORE()
904 r0_3 = VecXor(r0_3, r0_0); in ChaCha_OperateKeystream_CORE()
905 r1_3 = VecXor(r1_3, r1_0); in ChaCha_OperateKeystream_CORE()
[all …]
H A Dgcm_simd.cpp575 c1 = VecXor(c1, VecShiftRightOctet<8>(c0)); in GCM_Reduce_VMULL()
576 c1 = VecXor(c1, VecIntelMultiply10(c0, r)); in GCM_Reduce_VMULL()
577 c0 = VecXor(c1, VecShiftLeftOctet<8>(c0)); in GCM_Reduce_VMULL()
579 c2 = VecXor(c2, c0); in GCM_Reduce_VMULL()
580 c2 = VecXor(c2, VecShiftLeftOctet<8>(c1)); in GCM_Reduce_VMULL()
584 return VecXor(c2, c1); in GCM_Reduce_VMULL()
685 d1 = VecXor(d1, x); in GCM_AuthenticateBlocks_VMULL()
696 d2 = VecXor(d2, d1); in GCM_AuthenticateBlocks_VMULL()
702 d1 = VecXor(d1, x); in GCM_AuthenticateBlocks_VMULL()
713 d1 = VecXor(d1, d2); in GCM_AuthenticateBlocks_VMULL()
[all …]
H A Dgf2n_simd.cpp322 using CryptoPP::VecXor;
344 t1 = VecXor(a, t1); in F2N_Multiply_128x128_POWER8()
346 t2 = VecXor(b, t2); in F2N_Multiply_128x128_POWER8()
348 t1 = VecXor(c0, t1); in F2N_Multiply_128x128_POWER8()
349 t1 = VecXor(c1, t1); in F2N_Multiply_128x128_POWER8()
353 c0 = VecXor(c0, t1); in F2N_Multiply_128x128_POWER8()
354 c1 = VecXor(c1, t2); in F2N_Multiply_128x128_POWER8()
368 x0 = VecXor(x0, x1); in F2N_Multiply_256x256_POWER8()
369 y0 = VecXor(y0, y1); in F2N_Multiply_256x256_POWER8()
373 c4 = VecXor(c4, c0); in F2N_Multiply_256x256_POWER8()
[all …]
H A Dadv_simd.h1044 block0 = VecXor(block0, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1046 block1 = VecXor(block1, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1048 block2 = VecXor(block2, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1050 block3 = VecXor(block3, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1058 block0 = VecXor(block0, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1060 block1 = VecXor(block1, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1062 block2 = VecXor(block2, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1086 block = VecXor(block, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1094 block = VecXor(block, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1256 block = VecXor(block, VecLoadBE(xorBlocks)); in AdvancedProcessBlocks128_6x1_ALTIVEC()
[all …]
H A Drijndael_simd.cpp560 block = VecXor(block, k); in POWER8_Enc_Block()
580 block0 = VecXor(block0, k); in POWER8_Enc_6_Blocks()
581 block1 = VecXor(block1, k); in POWER8_Enc_6_Blocks()
582 block2 = VecXor(block2, k); in POWER8_Enc_6_Blocks()
583 block3 = VecXor(block3, k); in POWER8_Enc_6_Blocks()
584 block4 = VecXor(block4, k); in POWER8_Enc_6_Blocks()
585 block5 = VecXor(block5, k); in POWER8_Enc_6_Blocks()
613 block = VecXor(block, k); in POWER8_Dec_Block()
633 block0 = VecXor(block0, k); in POWER8_Dec_6_Blocks()
634 block1 = VecXor(block1, k); in POWER8_Dec_6_Blocks()
[all …]
H A Dblake2b_simd.cpp1113 row4l = VecXor(row4l, row1l); row4h = VecXor(row4h, row1h); \ in BLAKE2_Compress64_POWER8()
1116 row2l = VecXor(row2l, row3l); row2h = VecXor(row2h, row3h); \ in BLAKE2_Compress64_POWER8()
1124 row4l = VecXor(row4l, row1l); row4h = VecXor(row4h, row1h); \ in BLAKE2_Compress64_POWER8()
1127 row2l = VecXor(row2l, row3l); row2h = VecXor(row2h, row3h); \ in BLAKE2_Compress64_POWER8()
1253 row4l = VecXor(VecLoad64(BLAKE2B_IV+4), VecLoad64(state.t()+0)); in BLAKE2_Compress64_POWER8()
1254 row4h = VecXor(VecLoad64(BLAKE2B_IV+6), VecLoad64(state.f()+0)); in BLAKE2_Compress64_POWER8()
1269 VecStore64LE(state.h()+0, VecXor(h0, VecXor(row1l, row3l)), le_mask); in BLAKE2_Compress64_POWER8()
1270 VecStore64LE(state.h()+2, VecXor(h1, VecXor(row1h, row3h)), le_mask); in BLAKE2_Compress64_POWER8()
1271 VecStore64LE(state.h()+4, VecXor(h2, VecXor(row2l, row4l)), le_mask); in BLAKE2_Compress64_POWER8()
1272 VecStore64LE(state.h()+6, VecXor(h3, VecXor(row2h, row4h)), le_mask); in BLAKE2_Compress64_POWER8()
/dports/emulators/citra-qt5/citra-ac98458e0/externals/cryptopp/cryptopp/
H A Dchacha_simd.cpp874 r0_3 = VecXor(r0_3, r0_0); in ChaCha_OperateKeystream_CORE()
875 r1_3 = VecXor(r1_3, r1_0); in ChaCha_OperateKeystream_CORE()
876 r2_3 = VecXor(r2_3, r2_0); in ChaCha_OperateKeystream_CORE()
877 r3_3 = VecXor(r3_3, r3_0); in ChaCha_OperateKeystream_CORE()
889 r0_1 = VecXor(r0_1, r0_2); in ChaCha_OperateKeystream_CORE()
890 r1_1 = VecXor(r1_1, r1_2); in ChaCha_OperateKeystream_CORE()
891 r2_1 = VecXor(r2_1, r2_2); in ChaCha_OperateKeystream_CORE()
892 r3_1 = VecXor(r3_1, r3_2); in ChaCha_OperateKeystream_CORE()
904 r0_3 = VecXor(r0_3, r0_0); in ChaCha_OperateKeystream_CORE()
905 r1_3 = VecXor(r1_3, r1_0); in ChaCha_OperateKeystream_CORE()
[all …]
H A Dgcm_simd.cpp585 c1 = VecXor(c1, VecShiftRightOctet<8>(c0)); in GCM_Reduce_VMULL()
586 c1 = VecXor(c1, VecIntelMultiply10(c0, r)); in GCM_Reduce_VMULL()
587 c0 = VecXor(c1, VecShiftLeftOctet<8>(c0)); in GCM_Reduce_VMULL()
589 c2 = VecXor(c2, c0); in GCM_Reduce_VMULL()
590 c2 = VecXor(c2, VecShiftLeftOctet<8>(c1)); in GCM_Reduce_VMULL()
594 return VecXor(c2, c1); in GCM_Reduce_VMULL()
695 d1 = VecXor(d1, x); in GCM_AuthenticateBlocks_VMULL()
706 d2 = VecXor(d2, d1); in GCM_AuthenticateBlocks_VMULL()
712 d1 = VecXor(d1, x); in GCM_AuthenticateBlocks_VMULL()
723 d1 = VecXor(d1, d2); in GCM_AuthenticateBlocks_VMULL()
[all …]
H A Dgf2n_simd.cpp322 using CryptoPP::VecXor;
344 t1 = VecXor(a, t1); in F2N_Multiply_128x128_POWER8()
346 t2 = VecXor(b, t2); in F2N_Multiply_128x128_POWER8()
348 t1 = VecXor(c0, t1); in F2N_Multiply_128x128_POWER8()
349 t1 = VecXor(c1, t1); in F2N_Multiply_128x128_POWER8()
353 c0 = VecXor(c0, t1); in F2N_Multiply_128x128_POWER8()
354 c1 = VecXor(c1, t2); in F2N_Multiply_128x128_POWER8()
368 x0 = VecXor(x0, x1); in F2N_Multiply_256x256_POWER8()
369 y0 = VecXor(y0, y1); in F2N_Multiply_256x256_POWER8()
373 c4 = VecXor(c4, c0); in F2N_Multiply_256x256_POWER8()
[all …]
H A Dadv_simd.h1044 block0 = VecXor(block0, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1046 block1 = VecXor(block1, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1048 block2 = VecXor(block2, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1050 block3 = VecXor(block3, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1058 block0 = VecXor(block0, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1060 block1 = VecXor(block1, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1062 block2 = VecXor(block2, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1086 block = VecXor(block, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1094 block = VecXor(block, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1256 block = VecXor(block, VecLoadBE(xorBlocks)); in AdvancedProcessBlocks128_6x1_ALTIVEC()
[all …]
H A Drijndael_simd.cpp563 block = VecXor(block, k); in POWER8_Enc_Block()
583 block0 = VecXor(block0, k); in POWER8_Enc_6_Blocks()
584 block1 = VecXor(block1, k); in POWER8_Enc_6_Blocks()
585 block2 = VecXor(block2, k); in POWER8_Enc_6_Blocks()
586 block3 = VecXor(block3, k); in POWER8_Enc_6_Blocks()
587 block4 = VecXor(block4, k); in POWER8_Enc_6_Blocks()
588 block5 = VecXor(block5, k); in POWER8_Enc_6_Blocks()
616 block = VecXor(block, k); in POWER8_Dec_Block()
636 block0 = VecXor(block0, k); in POWER8_Dec_6_Blocks()
637 block1 = VecXor(block1, k); in POWER8_Dec_6_Blocks()
[all …]
H A Dblake2b_simd.cpp1113 row4l = VecXor(row4l, row1l); row4h = VecXor(row4h, row1h); \ in BLAKE2_Compress64_POWER8()
1116 row2l = VecXor(row2l, row3l); row2h = VecXor(row2h, row3h); \ in BLAKE2_Compress64_POWER8()
1124 row4l = VecXor(row4l, row1l); row4h = VecXor(row4h, row1h); \ in BLAKE2_Compress64_POWER8()
1127 row2l = VecXor(row2l, row3l); row2h = VecXor(row2h, row3h); \ in BLAKE2_Compress64_POWER8()
1253 row4l = VecXor(VecLoad64(BLAKE2B_IV+4), VecLoad64(state.t()+0)); in BLAKE2_Compress64_POWER8()
1254 row4h = VecXor(VecLoad64(BLAKE2B_IV+6), VecLoad64(state.f()+0)); in BLAKE2_Compress64_POWER8()
1269 VecStore64LE(state.h()+0, VecXor(h0, VecXor(row1l, row3l)), le_mask); in BLAKE2_Compress64_POWER8()
1270 VecStore64LE(state.h()+2, VecXor(h1, VecXor(row1h, row3h)), le_mask); in BLAKE2_Compress64_POWER8()
1271 VecStore64LE(state.h()+4, VecXor(h2, VecXor(row2l, row4l)), le_mask); in BLAKE2_Compress64_POWER8()
1272 VecStore64LE(state.h()+6, VecXor(h3, VecXor(row2h, row4h)), le_mask); in BLAKE2_Compress64_POWER8()
/dports/emulators/citra/citra-ac98458e0/externals/cryptopp/cryptopp/
H A Dchacha_simd.cpp874 r0_3 = VecXor(r0_3, r0_0); in ChaCha_OperateKeystream_CORE()
875 r1_3 = VecXor(r1_3, r1_0); in ChaCha_OperateKeystream_CORE()
876 r2_3 = VecXor(r2_3, r2_0); in ChaCha_OperateKeystream_CORE()
877 r3_3 = VecXor(r3_3, r3_0); in ChaCha_OperateKeystream_CORE()
889 r0_1 = VecXor(r0_1, r0_2); in ChaCha_OperateKeystream_CORE()
890 r1_1 = VecXor(r1_1, r1_2); in ChaCha_OperateKeystream_CORE()
891 r2_1 = VecXor(r2_1, r2_2); in ChaCha_OperateKeystream_CORE()
892 r3_1 = VecXor(r3_1, r3_2); in ChaCha_OperateKeystream_CORE()
904 r0_3 = VecXor(r0_3, r0_0); in ChaCha_OperateKeystream_CORE()
905 r1_3 = VecXor(r1_3, r1_0); in ChaCha_OperateKeystream_CORE()
[all …]
H A Dgcm_simd.cpp585 c1 = VecXor(c1, VecShiftRightOctet<8>(c0)); in GCM_Reduce_VMULL()
586 c1 = VecXor(c1, VecIntelMultiply10(c0, r)); in GCM_Reduce_VMULL()
587 c0 = VecXor(c1, VecShiftLeftOctet<8>(c0)); in GCM_Reduce_VMULL()
589 c2 = VecXor(c2, c0); in GCM_Reduce_VMULL()
590 c2 = VecXor(c2, VecShiftLeftOctet<8>(c1)); in GCM_Reduce_VMULL()
594 return VecXor(c2, c1); in GCM_Reduce_VMULL()
695 d1 = VecXor(d1, x); in GCM_AuthenticateBlocks_VMULL()
706 d2 = VecXor(d2, d1); in GCM_AuthenticateBlocks_VMULL()
712 d1 = VecXor(d1, x); in GCM_AuthenticateBlocks_VMULL()
723 d1 = VecXor(d1, d2); in GCM_AuthenticateBlocks_VMULL()
[all …]
H A Dgf2n_simd.cpp322 using CryptoPP::VecXor;
344 t1 = VecXor(a, t1); in F2N_Multiply_128x128_POWER8()
346 t2 = VecXor(b, t2); in F2N_Multiply_128x128_POWER8()
348 t1 = VecXor(c0, t1); in F2N_Multiply_128x128_POWER8()
349 t1 = VecXor(c1, t1); in F2N_Multiply_128x128_POWER8()
353 c0 = VecXor(c0, t1); in F2N_Multiply_128x128_POWER8()
354 c1 = VecXor(c1, t2); in F2N_Multiply_128x128_POWER8()
368 x0 = VecXor(x0, x1); in F2N_Multiply_256x256_POWER8()
369 y0 = VecXor(y0, y1); in F2N_Multiply_256x256_POWER8()
373 c4 = VecXor(c4, c0); in F2N_Multiply_256x256_POWER8()
[all …]
H A Dadv_simd.h1044 block0 = VecXor(block0, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1046 block1 = VecXor(block1, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1048 block2 = VecXor(block2, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1050 block3 = VecXor(block3, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1058 block0 = VecXor(block0, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1060 block1 = VecXor(block1, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1062 block2 = VecXor(block2, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1086 block = VecXor(block, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1094 block = VecXor(block, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1256 block = VecXor(block, VecLoadBE(xorBlocks)); in AdvancedProcessBlocks128_6x1_ALTIVEC()
[all …]
H A Drijndael_simd.cpp563 block = VecXor(block, k); in POWER8_Enc_Block()
583 block0 = VecXor(block0, k); in POWER8_Enc_6_Blocks()
584 block1 = VecXor(block1, k); in POWER8_Enc_6_Blocks()
585 block2 = VecXor(block2, k); in POWER8_Enc_6_Blocks()
586 block3 = VecXor(block3, k); in POWER8_Enc_6_Blocks()
587 block4 = VecXor(block4, k); in POWER8_Enc_6_Blocks()
588 block5 = VecXor(block5, k); in POWER8_Enc_6_Blocks()
616 block = VecXor(block, k); in POWER8_Dec_Block()
636 block0 = VecXor(block0, k); in POWER8_Dec_6_Blocks()
637 block1 = VecXor(block1, k); in POWER8_Dec_6_Blocks()
[all …]
H A Dblake2b_simd.cpp1113 row4l = VecXor(row4l, row1l); row4h = VecXor(row4h, row1h); \ in BLAKE2_Compress64_POWER8()
1116 row2l = VecXor(row2l, row3l); row2h = VecXor(row2h, row3h); \ in BLAKE2_Compress64_POWER8()
1124 row4l = VecXor(row4l, row1l); row4h = VecXor(row4h, row1h); \ in BLAKE2_Compress64_POWER8()
1127 row2l = VecXor(row2l, row3l); row2h = VecXor(row2h, row3h); \ in BLAKE2_Compress64_POWER8()
1253 row4l = VecXor(VecLoad64(BLAKE2B_IV+4), VecLoad64(state.t()+0)); in BLAKE2_Compress64_POWER8()
1254 row4h = VecXor(VecLoad64(BLAKE2B_IV+6), VecLoad64(state.f()+0)); in BLAKE2_Compress64_POWER8()
1269 VecStore64LE(state.h()+0, VecXor(h0, VecXor(row1l, row3l)), le_mask); in BLAKE2_Compress64_POWER8()
1270 VecStore64LE(state.h()+2, VecXor(h1, VecXor(row1h, row3h)), le_mask); in BLAKE2_Compress64_POWER8()
1271 VecStore64LE(state.h()+4, VecXor(h2, VecXor(row2l, row4l)), le_mask); in BLAKE2_Compress64_POWER8()
1272 VecStore64LE(state.h()+6, VecXor(h3, VecXor(row2h, row4h)), le_mask); in BLAKE2_Compress64_POWER8()
/dports/sysutils/fusefs-securefs/securefs-0.12.0/external/cryptopp/
H A Dchacha_simd.cpp874 r0_3 = VecXor(r0_3, r0_0); in ChaCha_OperateKeystream_CORE()
875 r1_3 = VecXor(r1_3, r1_0); in ChaCha_OperateKeystream_CORE()
876 r2_3 = VecXor(r2_3, r2_0); in ChaCha_OperateKeystream_CORE()
877 r3_3 = VecXor(r3_3, r3_0); in ChaCha_OperateKeystream_CORE()
889 r0_1 = VecXor(r0_1, r0_2); in ChaCha_OperateKeystream_CORE()
890 r1_1 = VecXor(r1_1, r1_2); in ChaCha_OperateKeystream_CORE()
891 r2_1 = VecXor(r2_1, r2_2); in ChaCha_OperateKeystream_CORE()
892 r3_1 = VecXor(r3_1, r3_2); in ChaCha_OperateKeystream_CORE()
904 r0_3 = VecXor(r0_3, r0_0); in ChaCha_OperateKeystream_CORE()
905 r1_3 = VecXor(r1_3, r1_0); in ChaCha_OperateKeystream_CORE()
[all …]
H A Dgcm_simd.cpp585 c1 = VecXor(c1, VecShiftRightOctet<8>(c0)); in GCM_Reduce_VMULL()
586 c1 = VecXor(c1, VecIntelMultiply10(c0, r)); in GCM_Reduce_VMULL()
587 c0 = VecXor(c1, VecShiftLeftOctet<8>(c0)); in GCM_Reduce_VMULL()
589 c2 = VecXor(c2, c0); in GCM_Reduce_VMULL()
590 c2 = VecXor(c2, VecShiftLeftOctet<8>(c1)); in GCM_Reduce_VMULL()
594 return VecXor(c2, c1); in GCM_Reduce_VMULL()
695 d1 = VecXor(d1, x); in GCM_AuthenticateBlocks_VMULL()
706 d2 = VecXor(d2, d1); in GCM_AuthenticateBlocks_VMULL()
712 d1 = VecXor(d1, x); in GCM_AuthenticateBlocks_VMULL()
723 d1 = VecXor(d1, d2); in GCM_AuthenticateBlocks_VMULL()
[all …]
H A Dgf2n_simd.cpp322 using CryptoPP::VecXor;
344 t1 = VecXor(a, t1); in F2N_Multiply_128x128_POWER8()
346 t2 = VecXor(b, t2); in F2N_Multiply_128x128_POWER8()
348 t1 = VecXor(c0, t1); in F2N_Multiply_128x128_POWER8()
349 t1 = VecXor(c1, t1); in F2N_Multiply_128x128_POWER8()
353 c0 = VecXor(c0, t1); in F2N_Multiply_128x128_POWER8()
354 c1 = VecXor(c1, t2); in F2N_Multiply_128x128_POWER8()
368 x0 = VecXor(x0, x1); in F2N_Multiply_256x256_POWER8()
369 y0 = VecXor(y0, y1); in F2N_Multiply_256x256_POWER8()
373 c4 = VecXor(c4, c0); in F2N_Multiply_256x256_POWER8()
[all …]
H A Dadv_simd.h1044 block0 = VecXor(block0, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1046 block1 = VecXor(block1, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1048 block2 = VecXor(block2, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1050 block3 = VecXor(block3, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1058 block0 = VecXor(block0, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1060 block1 = VecXor(block1, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1062 block2 = VecXor(block2, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1086 block = VecXor(block, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1094 block = VecXor(block, VecLoadBE(xorBlocks)); in NAMESPACE_BEGIN()
1256 block = VecXor(block, VecLoadBE(xorBlocks)); in AdvancedProcessBlocks128_6x1_ALTIVEC()
[all …]
H A Drijndael_simd.cpp563 block = VecXor(block, k); in POWER8_Enc_Block()
583 block0 = VecXor(block0, k); in POWER8_Enc_6_Blocks()
584 block1 = VecXor(block1, k); in POWER8_Enc_6_Blocks()
585 block2 = VecXor(block2, k); in POWER8_Enc_6_Blocks()
586 block3 = VecXor(block3, k); in POWER8_Enc_6_Blocks()
587 block4 = VecXor(block4, k); in POWER8_Enc_6_Blocks()
588 block5 = VecXor(block5, k); in POWER8_Enc_6_Blocks()
616 block = VecXor(block, k); in POWER8_Dec_Block()
636 block0 = VecXor(block0, k); in POWER8_Dec_6_Blocks()
637 block1 = VecXor(block1, k); in POWER8_Dec_6_Blocks()
[all …]
/dports/biology/sra-tools/sra-tools-2.11.0/ncbi-vdb/libs/krypto/
H A Daes-ncbi.c939 return AESBCMEMBER(VecXor)(sl, sr); in AESBCMEMBER()
1050 c0 = AESBCMEMBER(VecXor) (state, c1); in AESBCMEMBER()
1067 c1 = AESBCMEMBER(VecXor) (c1, c2); in AESBCMEMBER()
1068 c1 = AESBCMEMBER(VecXor) (c1, c3); in AESBCMEMBER()
1069 c1 = AESBCMEMBER(VecXor) (c1, c0); in AESBCMEMBER()
1110 f4 = AESBCMEMBER(VecXor) (state, r2); in AESBCMEMBER()
1111 f2 = AESBCMEMBER(VecXor) (state, r1); in AESBCMEMBER()
1112 f8 = AESBCMEMBER(VecXor) (r2, r3); in AESBCMEMBER()
1113 state = AESBCMEMBER(VecXor) (f8, r1); in AESBCMEMBER()
1114 f8 = AESBCMEMBER(VecXor) (f8, f2); in AESBCMEMBER()
[all …]
/dports/biology/ncbi-vdb/ncbi-vdb-2.11.0/libs/krypto/
H A Daes-ncbi.c939 return AESBCMEMBER(VecXor)(sl, sr); in AESBCMEMBER()
1050 c0 = AESBCMEMBER(VecXor) (state, c1); in AESBCMEMBER()
1067 c1 = AESBCMEMBER(VecXor) (c1, c2); in AESBCMEMBER()
1068 c1 = AESBCMEMBER(VecXor) (c1, c3); in AESBCMEMBER()
1069 c1 = AESBCMEMBER(VecXor) (c1, c0); in AESBCMEMBER()
1110 f4 = AESBCMEMBER(VecXor) (state, r2); in AESBCMEMBER()
1111 f2 = AESBCMEMBER(VecXor) (state, r1); in AESBCMEMBER()
1112 f8 = AESBCMEMBER(VecXor) (r2, r3); in AESBCMEMBER()
1113 state = AESBCMEMBER(VecXor) (f8, r1); in AESBCMEMBER()
1114 f8 = AESBCMEMBER(VecXor) (f8, f2); in AESBCMEMBER()
[all …]

12