Home
last modified time | relevance | path

Searched refs:a0123 (Results 1 – 25 of 53) sorted by relevance

123

/dports/graphics/vapoursynth-waifu2x-ncnn-vulkan/vapoursynth-waifu2x-ncnn-vulkan-r4/deps/ncnn/src/layer/vulkan/shader/
H A Dinterp_bicubic_pack4.comp82 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
115 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
141 afpvec4 a = a0123 * alpha;
/dports/graphics/waifu2x-ncnn-vulkan/waifu2x-ncnn-vulkan-20210521/src/ncnn/src/layer/vulkan/shader/
H A Dinterp_bicubic_pack4.comp82 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
115 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
141 afpvec4 a = a0123 * alpha;
/dports/benchmarks/vkpeak/vkpeak-20210430/ncnn/src/layer/vulkan/shader/
H A Dinterp_bicubic_pack4.comp82 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
115 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
141 afpvec4 a = a0123 * alpha;
/dports/graphics/realsr-ncnn-vulkan/realsr-ncnn-vulkan-20210210/src/ncnn/src/layer/vulkan/shader/
H A Dinterp_bicubic_pack4.comp82 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
115 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
141 afpvec4 a = a0123 * alpha;
/dports/misc/ncnn/ncnn-20211208/src/layer/vulkan/shader/
H A Dinterp_bicubic_pack4.comp118 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
151 afpmat4 a0123 = afpmat4(a0, a1, a2, a3);
177 afpvec4 a = a0123 * alpha;
/dports/lang/halide/Halide-release_2019_08_27-2654-g664dc4993/src/
H A DHexagonOptimize.cpp598 …Expr a0123 = Shuffle::make_interleave({mpys[0].first, mpys[1].first, mpys[2].first, mpys[3].first}… in visit() local
599 a0123 = simplify(a0123); in visit()
603 if (op->type.bits() == 32 || !a0123.as<Shuffle>()) { in visit()
607 Expr new_expr = halide_hexagon_add_4mpy(op->type, suffix, a0123, b0123); in visit()
643 …Expr a0123 = Shuffle::make_interleave({mpys[0].first, mpys[1].first, mpys[2].first, mpys[3].first}… in visit() local
645 a0123 = simplify(a0123); in visit()
649 if (op->type.bits() == 32 || (!a0123.as<Shuffle>() && !b0123.as<Shuffle>())) { in visit()
650 Expr new_expr = halide_hexagon_add_4mpy(op->type, suffix, a0123, b0123); in visit()
/dports/devel/aarch64-none-elf-gcc/gcc-8.4.0/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/lang/gcc9/gcc-9.4.0/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/devel/arm-none-eabi-gcc492/gcc-4.9.2/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/lang/gnat_util/gcc-6-20180516/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/devel/riscv64-gcc/gcc-8.3.0/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/lang/gcc48/gcc-4.8.5/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/devel/mingw32-gcc/gcc-4.8.1/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/devel/riscv32-unknown-elf-gcc/gcc-8.4.0/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/devel/arm-none-eabi-gcc/gcc-8.4.0/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/devel/riscv64-none-elf-gcc/gcc-8.4.0/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/lang/gcc9-aux/gcc-9.1.0/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/lang/gcc9-devel/gcc-9-20211007/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/lang/gcc8/gcc-8.5.0/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/lang/gcc6-aux/gcc-6-20180516/gcc/config/spu/
H A Dvmx2spu.h2161 vec_uint4 a01_23, a0123; in vec_sum4s() local
2165 a0123 = spu_add(spu_rlmask(a01_23, -16), spu_and(a01_23, 0x1FF)); in vec_sum4s()
2166 return (vec_adds(a0123, b)); in vec_sum4s()
2171 vec_int4 a01_23, a0123; in vec_sum4s() local
2175 a0123 = spu_add(spu_rlmaska(a01_23, -16), spu_extend((vec_short8)(a01_23))); in vec_sum4s()
2176 return (vec_adds(a0123, b)); in vec_sum4s()
2181 vec_int4 a0123; in vec_sum4s() local
2183 a0123 = spu_add(spu_rlmaska((vec_int4)(a), -16), spu_extend(a)); in vec_sum4s()
2184 return (vec_adds(a0123, b)); in vec_sum4s()
/dports/dns/idnkit2/idnkit-2.3/test/undoiferr/
H A Dundoiferr.def313 to: a0123
452 to: a0123
/dports/math/kfr/kfr-4.2.1/include/kfr/dft/impl/
H A Dft.hpp707 KFR_INTRINSIC void butterfly4(cvec<T, N * 4>& a0123) in butterfly4() argument
713 split<T, N * 4 * 2>(a0123, a0, a1, a2, a3); in butterfly4()
715 a0123 = concat(a0, a1, a2, a3); in butterfly4()
818 KFR_INTRINSIC void apply_twiddles4(cvec<T, N * 4>& __restrict a0123) in apply_twiddles4() argument
824 split<T, 2 * N * 4>(a0123, a0, a1, a2, a3); in apply_twiddles4()
832 a0123 = concat(a0, a1, a2, a3); in apply_twiddles4()
/dports/devel/simde/simde-0.7.2/simde/x86/avx512/
H A Dpermutex2var.h1438 __m256i t0, t1, index, select0x10, select0x20, select0x40, t01, t23, a0123, b0123; in simde_mm512_permutex2var_epi8()
1460 a0123 = _mm256_blendv_epi8(t01, t23, select0x20); in simde_mm512_permutex2var_epi8()
1469 r_.m256i[i] = _mm256_blendv_epi8(a0123, b0123, select0x40); in simde_mm512_permutex2var_epi8()
/dports/biology/bowtie2/simde-no-tests-f6a0b3b/x86/avx512/
H A Dpermutex2var.h1438 __m256i t0, t1, index, select0x10, select0x20, select0x40, t01, t23, a0123, b0123; in simde_mm512_permutex2var_epi8()
1460 a0123 = _mm256_blendv_epi8(t01, t23, select0x20); in simde_mm512_permutex2var_epi8()
1469 r_.m256i[i] = _mm256_blendv_epi8(a0123, b0123, select0x40); in simde_mm512_permutex2var_epi8()
/dports/biology/mmseqs2/MMseqs2-13-45111/lib/simde/simde/x86/avx512/
H A Dpermutex2var.h1438 __m256i t0, t1, index, select0x10, select0x20, select0x40, t01, t23, a0123, b0123; in simde_mm512_permutex2var_epi8()
1460 a0123 = _mm256_blendv_epi8(t01, t23, select0x20); in simde_mm512_permutex2var_epi8()
1469 r_.m256i[i] = _mm256_blendv_epi8(a0123, b0123, select0x40); in simde_mm512_permutex2var_epi8()

123