Home
last modified time | relevance | path

Searched refs:t5 (Results 1 – 25 of 78) sorted by relevance

1234

/linux/arch/alpha/lib/
H A Dev67-strrchr.S37 insbl a1, 2, t5 # U : 0000000000ch0000
42 sll t5, 8, t3 # U : 00000000ch000000
46 or t5, t3, t3 # E : 00000000chch0000
53 lda t5, -1 # E : build garbage mask
56 mskqh t5, a0, t4 # E : Complete garbage mask
86 subq t4, 1, t5 # E : build a mask of the bytes up to...
87 or t4, t5, t4 # E : ... and including the null
102 lda t5, 0x3f($31) # E :
103 subq t5, t2, t5 # E : Normalize leading zero count
105 addq t6, t5, v0 # E : and add to quadword address
H A Dstrrchr.S24 sll a1, 8, t5 # e0 : replicate our test character
26 or t5, a1, a1 # e0 :
28 sll a1, 16, t5 # e0 :
30 or t5, a1, a1 # e0 :
32 sll a1, 32, t5 # e0 :
35 or t5, a1, a1 # .. e1 : character replication complete
58 subq t4, 1, t5 # e0 : build a mask of the bytes up to...
59 or t4, t5, t4 # e1 : ... and including the null
H A Dstrchr.S24 sll a1, 8, t5 # e0 : replicate the search character
26 or t5, a1, a1 # e0 :
28 sll a1, 16, t5 # e0 :
31 or t5, a1, a1 # .. e1 :
32 sll a1, 32, t5 # e0 :
34 or t5, a1, a1 # e0 :
H A Dev67-strchr.S34 insbl a1, 1, t5 # U : 000000000000ch00
38 or t5, t3, a1 # E : 000000000000chch
44 inswl a1, 2, t5 # E : 00000000chch0000
48 or a3, t5, t5 # E : 0000chchchch0000
53 or t5, a1, a1 # E : chchchchchchchch
H A Dstxcpy.S239 and a1, 7, t5 # e0 : find src misalignment
256 cmplt t4, t5, t12 # e0 :
260 mskqh t2, t5, t2 # e0 :
275 and a1, 7, t5 # .. e1 :
278 srl t12, t5, t12 # e0 : adjust final null return value
H A Dev6-stxcpy.S269 and a1, 7, t5 # E : find src misalignment
287 cmplt t4, t5, t12 # E :
291 mskqh t2, t5, t2 # U :
304 and a1, 7, t5 # E :
308 srl t12, t5, t12 # U : adjust final null return value
/linux/arch/riscv/lib/
H A Dmemmove.S67 andi t5, t3, -SZREG
78 beq t5, t3, 1f
79 addi t5, t5, SZREG
164 addi a2, t5, -SZREG /* The other breakpoint for the unrolled loop*/
212 bne t4, t5, 1b
214 mv t4, t5 /* Fix the dest pointer in case the loop was broken */
247 bne t4, t5, 1b
261 beq t3, t5, 2f
267 bne t3, t5, 1b
H A Dstrncmp.S69 li t5, -1
84 bne t3, t5, 2f
86 bne t3, t5, 2f
H A Dmemcpy.S52 REG_L t5, 9*SZREG(a1)
62 REG_S t5, 9*SZREG(t6)
/linux/arch/arm64/crypto/
H A Dcrct10dif-ce-core.S85 t5 .req v19
137 ext t5.8b, ad.8b, ad.8b, #2 // A2
142 pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B
151 tbl t5.16b, {ad.16b}, perm2.16b // A2
156 pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B
163 eor t5.16b, t5.16b, t7.16b // M = G + H
166 uzp1 t8.2d, t4.2d, t5.2d
167 uzp2 t4.2d, t4.2d, t5.2d
184 zip2 t5.2d, t8.2d, t4.2d
190 ext t5.16b, t5.16b, t5.16b, #14
[all …]
H A Dghash-ce-core.S27 t5 .req v12
73 ext t5.8b, \ad\().8b, \ad\().8b, #2 // A2
81 tbl t5.16b, {\ad\().16b}, perm2.16b // A2
102 pmull\t t5.8h, t5.\nb, \bd // H = A2*B
110 eor t5.16b, t5.16b, t6.16b // M = G + H
113 uzp1 t4.2d, t3.2d, t5.2d
114 uzp2 t3.2d, t3.2d, t5.2d
131 zip2 t5.2d, t4.2d, t3.2d
137 ext t5.16b, t5.16b, t5.16b, #14
141 eor t3.16b, t3.16b, t5.16b
/linux/lib/zlib_dfltcc/
H A Ddfltcc_util.h39 size_t t5 = len2 ? *len2 : 0; in dfltcc() local
45 register size_t r5 __asm__("r5") = t5; in dfltcc()
60 t2 = r2; t3 = r3; t4 = r4; t5 = r5; in dfltcc()
69 *len2 = t5; in dfltcc()
/linux/arch/powerpc/crypto/
H A Dghashp10-ppc.pl60 my ($t4,$t5,$t6) = ($Hl,$H,$Hh);
196 vsldoi $t5,$zero,$Xm1,8
200 vxor $Xh1,$Xh1,$t5
208 vsldoi $t5,$Xl1,$Xl1,8 # 2nd reduction phase
212 vxor $t5,$t5,$Xh1
214 vxor $Xl1,$Xl1,$t5
/linux/crypto/
H A Decc.c1134 u64 t5[ECC_MAX_DIGITS]; in ecc_point_double_jacobian() local
1182 vli_mod_sub(t5, t5, z1, curve_prime, ndigits); in ecc_point_double_jacobian()
1235 u64 t5[ECC_MAX_DIGITS]; in xycz_add() local
1242 vli_mod_square_fast(t5, t5, curve); in xycz_add()
1250 vli_mod_square_fast(t5, y2, curve); in xycz_add()
1253 vli_mod_sub(t5, t5, x1, curve_prime, ndigits); in xycz_add()
1255 vli_mod_sub(t5, t5, x2, curve_prime, ndigits); in xycz_add()
1267 vli_set(x2, t5, ndigits); in xycz_add()
1278 u64 t5[ECC_MAX_DIGITS]; in xycz_add_c() local
1287 vli_mod_square_fast(t5, t5, curve); in xycz_add_c()
[all …]
/linux/arch/loongarch/mm/
H A Dpage.S49 ld.d t5, a1, 40
62 st.d t5, a0, 40
64 ld.d t5, a1, 104
77 st.d t5, a0, -24
/linux/arch/riscv/include/asm/
H A Dcompat.h70 compat_ulong_t t5; member
107 cregs->t5 = (compat_ulong_t) regs->t5; in regs_to_cregs()
144 regs->t5 = (unsigned long) cregs->t5; in cregs_to_regs()
/linux/scripts/
H A Dmakelst28 t5=`field 1 $t1`
29 t6=`printf "%lu" $((0x$t4 - 0x$t5))`
/linux/arch/mips/kernel/
H A Dscall32-o32.S61 load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
67 sw t5, 16(sp) # argument #5 to ksp
157 li t5, 0
196 lw t5, 24(sp)
199 sw t5, 20(sp)
/linux/arch/riscv/kernel/
H A Dmcount.S96 REG_L t5, 0(t3)
97 bne t5, t4, .Ldo_trace
126 jalr t5
/linux/arch/x86/crypto/
H A Dcamellia-aesni-avx2-asm_64.S86 filter_8bit(x0, t5, t6, t7, t4); \
87 filter_8bit(x7, t5, t6, t7, t4); \
94 filter_8bit(x2, t5, t6, t7, t4); \
95 filter_8bit(x5, t5, t6, t7, t4); \
96 filter_8bit(x1, t5, t6, t7, t4); \
103 vextracti128 $1, x5, t5##_x; \
124 vaesenclast t4##_x, t5##_x, t5##_x; \
125 vinserti128 $1, t5##_x, x5, x5; \
168 vpsrldq $5, t0, t5; \
177 vpshufb t7, t5, t5; \
[all …]
/linux/arch/x86/include/asm/
H A Dsyscall_wrapper.h63 #define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \ argument
64 SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
65 #define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \ argument
66 SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di))
/linux/arch/riscv/kvm/
H A Dvcpu_switch.S49 REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0)
67 csrw CSR_SEPC, t5
105 REG_L t5, (KVM_ARCH_GUEST_T5)(a0)
149 REG_S t5, (KVM_ARCH_GUEST_T5)(a0)
157 REG_L t5, (KVM_ARCH_HOST_SSTATUS)(a0)
175 csrrw t5, CSR_SSTATUS, t5
182 REG_S t5, (KVM_ARCH_GUEST_SSTATUS)(a0)
/linux/arch/loongarch/kernel/
H A Drethook_trampoline.S23 cfi_st t5, PT_R17
58 cfi_ld t5, PT_R17
/linux/arch/arm/crypto/
H A Daes-neonbs-core.S297 t0, t1, t2, t3, t4, t5, t6, t7, inv
307 vext.8 \t5, \x5, \x5, #12
310 veor \x5, \x5, \t5
320 veor \t5, \t5, \x4
335 veor \x7, \t1, \t5
353 t0, t1, t2, t3, t4, t5, t6, t7
358 vld1.8 {\t4-\t5}, [bskey, :256]!
364 veor \x5, \x5, \t5
379 vext.8 \t5, \x5, \x5, #8
382 veor \t5, \t5, \x5
[all …]
/linux/lib/
H A Dcrc32.c65 t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255])
70 # define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \
80 const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7]; local

1234