Home
last modified time | relevance | path

Searched refs:b1 (Results 1 – 25 of 180) sorted by relevance

12345678

/linux/arch/arm64/crypto/
H A Dsm4-ce-asm.h46 rev64 b1.4s, b1.4s; \
48 ext b1.16b, b1.16b, b1.16b, #8; \
50 rev32 b1.16b, b1.16b; \
54 rev32 b1.16b, b1.16b; \
91 rev64 b1.4s, b1.4s; \
95 ext b1.16b, b1.16b, b1.16b, #8; \
99 rev32 b1.16b, b1.16b; \
105 rev32 b1.16b, b1.16b; \
176 rev64 b1.4s, b1.4s; \
184 ext b1.16b, b1.16b, b1.16b, #8; \
[all …]
H A Dsm4-neon-core.S137 ROUND4(0, b0, b1, b2, b3); \
138 ROUND4(1, b1, b2, b3, b0); \
139 ROUND4(2, b2, b3, b0, b1); \
140 ROUND4(3, b3, b0, b1, b2); \
145 rev32 b1.16b, b1.16b; \
149 rotate_clockwise_4x4(b0, b1, b2, b3); \
156 rev32 b1.16b, b1.16b; \
159 SM4_CRYPT_BLK4_BE(b0, b1, b2, b3);
222 rev32 b1.16b, b1.16b; \
235 ROUND8(0, b0, b1, b2, b3, b4, b5, b6, b7); \
[all …]
H A Daes-neonbs-core.S27 eor \b2, \b2, \b1
38 eor \b3, \b3, \b1
39 eor \b1, \b1, \b5
44 eor \b1, \b1, \b4
47 eor \b6, \b6, \b1
48 eor \b1, \b1, \b5
57 eor \b1, \b1, \b7
60 eor \b1, \b1, \b3
68 eor \b1, \b1, \b4
72 eor \b1, \b1, \b5
[all …]
/linux/crypto/
H A Daes_generic.c1179 u32 b0[4], b1[4]; in crypto_aes_encrypt() local
1198 f_nround(b1, b0, kp); in crypto_aes_encrypt()
1199 f_nround(b0, b1, kp); in crypto_aes_encrypt()
1200 f_nround(b1, b0, kp); in crypto_aes_encrypt()
1201 f_nround(b0, b1, kp); in crypto_aes_encrypt()
1202 f_nround(b1, b0, kp); in crypto_aes_encrypt()
1203 f_nround(b0, b1, kp); in crypto_aes_encrypt()
1204 f_nround(b1, b0, kp); in crypto_aes_encrypt()
1205 f_nround(b0, b1, kp); in crypto_aes_encrypt()
1206 f_nround(b1, b0, kp); in crypto_aes_encrypt()
[all …]
H A Dxor.c83 do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2) in do_xor_speed() argument
99 tmpl->do_2(BENCH_SIZE, b1, b2); in do_xor_speed()
121 void *b1, *b2; in calibrate_xor_blocks() local
133 b1 = (void *) __get_free_pages(GFP_KERNEL, 2); in calibrate_xor_blocks()
134 if (!b1) { in calibrate_xor_blocks()
138 b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE; in calibrate_xor_blocks()
145 #define xor_speed(templ) do_xor_speed((templ), b1, b2) in calibrate_xor_blocks()
160 free_pages((unsigned long)b1, 2); in calibrate_xor_blocks()
/linux/drivers/atm/
H A Dfore200e.h71 #define BITFIELD2(b1, b2) b1; b2; argument
72 #define BITFIELD3(b1, b2, b3) b1; b2; b3; argument
73 #define BITFIELD4(b1, b2, b3, b4) b1; b2; b3; b4; argument
74 #define BITFIELD5(b1, b2, b3, b4, b5) b1; b2; b3; b4; b5; argument
75 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b1; b2; b3; b4; b5; b6; argument
77 #define BITFIELD2(b1, b2) b2; b1; argument
78 #define BITFIELD3(b1, b2, b3) b3; b2; b1; argument
79 #define BITFIELD4(b1, b2, b3, b4) b4; b3; b2; b1; argument
80 #define BITFIELD5(b1, b2, b3, b4, b5) b5; b4; b3; b2; b1; argument
81 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b6; b5; b4; b3; b2; b1; argument
/linux/Documentation/arch/arm64/
H A Delf_hwcaps.rst252 Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1.
267 Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1.
300 Functionality implied by ID_AA64SMFR0_EL1.B16B16 == 0b1
303 Functionality implied by ID_AA64SMFR0_EL1.F16F16 == 0b1
330 Functionality implied by ID_AA64FPFR0_EL1.F8CVT == 0b1.
333 Functionality implied by ID_AA64FPFR0_EL1.F8FMA == 0b1.
336 Functionality implied by ID_AA64FPFR0_EL1.F8DP4 == 0b1.
339 Functionality implied by ID_AA64FPFR0_EL1.F8DP2 == 0b1.
348 Functionality implied by ID_AA64SMFR0_EL1.LUTv2 == 0b1.
351 Functionality implied by ID_AA64SMFR0_EL1.F8F16 == 0b1.
[all …]
H A Dbooting.rst221 - SCR_EL3.HCE (bit 8) must be initialised to 0b1.
256 - SCR_EL3.APK (bit 16) must be initialised to 0b1
257 - SCR_EL3.API (bit 17) must be initialised to 0b1
261 - HCR_EL2.APK (bit 40) must be initialised to 0b1
262 - HCR_EL2.API (bit 41) must be initialised to 0b1
292 - SCR_EL3.HXEn (bit 38) must be initialised to 0b1.
308 - CPTR_EL3.EZ (bit 8) must be initialised to 0b1.
326 - CPTR_EL3.ESM (bit 12) must be initialised to 0b1.
366 - SCR_EL3.ATA (bit 26) must be initialised to 0b1.
370 - HCR_EL2.ATA (bit 56) must be initialised to 0b1.
[all …]
/linux/drivers/isdn/mISDN/
H A Ddsp_biquad.h19 int32_t b1; member
27 int32_t gain, int32_t a1, int32_t a2, int32_t b1, int32_t b2) in biquad2_init() argument
32 bq->b1 = b1; in biquad2_init()
45 y = z0 + bq->z1 * bq->b1 + bq->z2 * bq->b2; in biquad2()
/linux/block/
H A Dblk-integrity.c123 struct blk_integrity *b1 = &gd1->queue->integrity; in blk_integrity_compare() local
126 if (!b1->profile && !b2->profile) in blk_integrity_compare()
129 if (!b1->profile || !b2->profile) in blk_integrity_compare()
132 if (b1->interval_exp != b2->interval_exp) { in blk_integrity_compare()
135 1 << b1->interval_exp, 1 << b2->interval_exp); in blk_integrity_compare()
139 if (b1->tuple_size != b2->tuple_size) { in blk_integrity_compare()
142 b1->tuple_size, b2->tuple_size); in blk_integrity_compare()
146 if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { in blk_integrity_compare()
149 b1->tag_size, b2->tag_size); in blk_integrity_compare()
153 if (b1->profile != b2->profile) { in blk_integrity_compare()
[all …]
/linux/fs/f2fs/
H A Dhash.c28 __u32 b0 = buf[0], b1 = buf[1]; in TEA_transform() local
34 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); in TEA_transform()
35 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); in TEA_transform()
39 buf[1] += b1; in TEA_transform()
/linux/arch/arm/include/asm/
H A Dxor.h26 : "=r" (src), "=r" (b1), "=r" (b2) \
28 __XOR(a1, b1); __XOR(a2, b2);
32 : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \
34 __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4)
55 register unsigned int b1 __asm__("r8"); in xor_arm4regs_2()
77 register unsigned int b1 __asm__("r8"); in xor_arm4regs_3()
99 register unsigned int b1 __asm__("ip"); in xor_arm4regs_4()
121 register unsigned int b1 __asm__("ip"); in xor_arm4regs_5()
/linux/fs/reiserfs/
H A Dhashes.c28 u32 b0, b1; \
31 b1 = h1; \
36 b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); \
37 b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); \
41 h1 += b1; \
/linux/drivers/crypto/nx/
H A Dnx-aes-ccm.c164 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; in generate_pat() local
192 b1 = nx_ctx->priv.ccm.iauth_tag; in generate_pat()
199 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; in generate_pat()
203 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; in generate_pat()
216 if (b1) { in generate_pat()
217 memset(b1, 0, 16); in generate_pat()
219 *(u16 *)b1 = assoclen; in generate_pat()
220 scatterwalk_map_and_copy(b1 + 2, req->src, 0, in generate_pat()
223 *(u16 *)b1 = (u16)(0xfffe); in generate_pat()
224 *(u32 *)&b1[2] = assoclen; in generate_pat()
[all …]
/linux/arch/arm/nwfpe/
H A Dsoftfloat-macros350 z1 = a1 + b1;
371 bits64 b1,
383 z1 = a1 + b1;
409 *z1Ptr = a1 - b1;
410 *z0Ptr = a0 - b0 - ( a1 < b1 );
429 bits64 b1,
441 z1 = a1 - b1;
442 borrow0 = ( a1 < b1 );
525 bits64 b1,
562 bits64 b0, b1;
[all …]
/linux/arch/riscv/crypto/
H A Dchacha-riscv64-zvkb.S76 .macro chacha_round a0, b0, c0, d0, a1, b1, c1, d1, \
80 vadd.vv \a1, \a1, \b1
98 vxor.vv \b1, \b1, \c1
102 vror.vi \b1, \b1, 32 - 12
108 vadd.vv \a1, \a1, \b1
126 vxor.vv \b1, \b1, \c1
130 vror.vi \b1, \b1, 32 - 7
/linux/tools/mm/
H A Dslabinfo.c999 b1, b2, b3, b4); in totals()
1004 b1, b2, b3, b4); in totals()
1009 b1, b2, b3, b4); in totals()
1014 b1, b2, b3, b4); in totals()
1020 b1, b2, b3, b4); in totals()
1026 b1, b2, b3, b4); in totals()
1031 b1, b2, b3, b4); in totals()
1036 b1, b2, b3, b4); in totals()
1052 b1, b2, b3); in totals()
1056 b1, b2, b3); in totals()
[all …]
/linux/arch/arm/crypto/
H A Daes-neonbs-core.S81 veor \b2, \b2, \b1
92 veor \b3, \b3, \b1
93 veor \b1, \b1, \b5
98 veor \b1, \b1, \b4
101 veor \b6, \b6, \b1
102 veor \b1, \b1, \b5
111 veor \b1, \b1, \b7
114 veor \b1, \b1, \b3
122 veor \b1, \b1, \b4
126 veor \b1, \b1, \b5
[all …]
H A Dblake2s-core.S68 .macro _blake2s_quarterround a0, b0, c0, d0, a1, b1, c1, d1, s0, s1, s2, s3
75 add \a1, \a1, \b1, ror #brot
89 eor \b1, \c1, \b1, ror #brot
96 add \a1, \a1, \b1, ror #12
110 eor \b1, \c1, \b1, ror#12
/linux/arch/xtensa/platforms/iss/include/platform/
H A Dsimcall-iss.h61 register int b1 asm("a3") = b; in __simc()
66 : "+r"(a1), "+r"(b1) in __simc()
69 errno = b1; in __simc()
/linux/arch/s390/net/
H A Dbpf_jit_comp.c119 u32 r1 = reg2hex[b1]; in reg_set_seen()
130 #define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]] argument
146 REG_SET_SEEN(b1); \
160 REG_SET_SEEN(b1); \
167 REG_SET_SEEN(b1); \
182 REG_SET_SEEN(b1); \
190 REG_SET_SEEN(b1); \
232 REG_SET_SEEN(b1); \
242 REG_SET_SEEN(b1); \
251 REG_SET_SEEN(b1); \
[all …]
/linux/scripts/
H A Dparse-maintainers.pl79 my $b1 = uc(substr($b, 0, 1));
82 my $b_index = index($preferred_order, $b1);
87 if (($a1 =~ /^F$/ && $b1 =~ /^F$/) ||
88 ($a1 =~ /^X$/ && $b1 =~ /^X$/)) {
/linux/drivers/mtd/nand/
H A Decc-sw-hamming.c378 unsigned char b0, b1, b2, bit_addr; in ecc_sw_hamming_correct() local
388 b1 = read_ecc[1] ^ calc_ecc[1]; in ecc_sw_hamming_correct()
391 b1 = read_ecc[0] ^ calc_ecc[0]; in ecc_sw_hamming_correct()
401 if ((b0 | b1 | b2) == 0) in ecc_sw_hamming_correct()
405 (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) && in ecc_sw_hamming_correct()
426 byte_addr = (addressbits[b1] << 4) + addressbits[b0]; in ecc_sw_hamming_correct()
429 (addressbits[b1] << 4) + addressbits[b0]; in ecc_sw_hamming_correct()
437 if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) in ecc_sw_hamming_correct()
/linux/arch/x86/crypto/
H A Dcast6-avx-x86_64-asm_64.S129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument
130 F_head(b1, RX, RGI1, RGI2, op0); \
133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
139 #define F1_2(a1, b1, a2, b2) \ argument
140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
141 #define F2_2(a1, b1, a2, b2) \ argument
142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
143 #define F3_2(a1, b1, a2, b2) \ argument
144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
H A Dcast5-avx-x86_64-asm_64.S129 #define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \ argument
130 F_head(b1, RX, RGI1, RGI2, op0); \
133 F_tail(b1, RX, RGI1, RGI2, op1, op2, op3); \
139 #define F1_2(a1, b1, a2, b2) \ argument
140 F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
141 #define F2_2(a1, b1, a2, b2) \ argument
142 F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
143 #define F3_2(a1, b1, a2, b2) \ argument
144 F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
146 #define subround(a1, b1, a2, b2, f) \ argument
[all …]

12345678