Home
last modified time | relevance | path

Searched refs:_k04 (Results 1 – 25 of 35) sorted by relevance

12

/dports/graphics/vapoursynth-waifu2x-ncnn-vulkan/vapoursynth-waifu2x-ncnn-vulkan-r4/deps/ncnn/src/layer/x86/
H A Dconvolution_2x2_pack8.h74 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
80 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
101 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
117 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
123 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
141 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
267 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
273 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
300 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
306 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
H A Dconvolution_2x2_pack8_fp16.h255 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
261 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
282 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
298 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
304 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
322 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
448 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
454 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
481 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
487 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
H A Dconvolutiondepthwise_5x5_pack8.h61 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s1_pack8_avx() local
68 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s1_pack8_avx()
215 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s2_pack8_avx() local
222 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s2_pack8_avx()
/dports/graphics/waifu2x-ncnn-vulkan/waifu2x-ncnn-vulkan-20210521/src/ncnn/src/layer/x86/
H A Dconvolution_2x2_pack8.h74 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
80 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
101 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
117 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
123 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
141 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
267 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
273 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
300 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
306 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
H A Dconvolution_2x2_pack8_fp16.h255 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
261 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
282 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
298 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
304 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
322 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
448 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
454 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
481 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
487 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
H A Dconvolutiondepthwise_5x5_pack8.h61 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s1_pack8_avx() local
68 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s1_pack8_avx()
215 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s2_pack8_avx() local
222 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s2_pack8_avx()
/dports/misc/ncnn/ncnn-20211208/src/layer/x86/
H A Dconvolution_2x2_pack8.h74 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
80 _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
101 _sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
117 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
123 _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
141 _sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
267 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
273 _sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
300 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
306 _sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
H A Dconvolution_2x2_pack8_fp16.h255 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
261 _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
282 _sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
298 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
304 _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
322 _sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
448 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
454 _sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
481 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
487 _sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
H A Dconvolutiondepthwise_5x5_pack8.h61 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s1_pack8_avx() local
68 _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s1_pack8_avx()
215 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s2_pack8_avx() local
222 _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s2_pack8_avx()
/dports/benchmarks/vkpeak/vkpeak-20210430/ncnn/src/layer/x86/
H A Dconvolution_2x2_pack8.h74 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
80 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
101 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
117 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
123 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
141 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
267 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
273 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
300 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
306 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
H A Dconvolution_2x2_pack8_fp16.h255 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
261 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
282 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
298 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
304 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
322 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
448 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
454 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
481 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
487 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
H A Dconvolutiondepthwise_5x5_pack8.h61 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s1_pack8_avx() local
68 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s1_pack8_avx()
215 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s2_pack8_avx() local
222 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s2_pack8_avx()
/dports/graphics/realsr-ncnn-vulkan/realsr-ncnn-vulkan-20210210/src/ncnn/src/layer/x86/
H A Dconvolution_2x2_pack8.h74 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
80 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
101 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
117 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
123 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_pack8_avx()
141 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_pack8_avx()
267 __m256 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx() local
273 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
300 _k04 = _mm256_loadu_ps(kptr); in conv2x2s1_pack8_avx()
306 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_pack8_avx()
H A Dconvolution_2x2_pack8_fp16.h255 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
261 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
282 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
298 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
304 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in conv2x2s1_fp16_pack8_avx()
322 _sum1 = _mm256_fmadd_ps(_k04, _r04, _sum1); in conv2x2s1_fp16_pack8_avx()
448 __m256 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx() local
454 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
481 _k04 = loadfp16(kptr); in conv2x2s1_fp16_pack8_avx()
487 _sum = _mm256_fmadd_ps(_k04, _r04, _sum); in conv2x2s1_fp16_pack8_avx()
H A Dconvolutiondepthwise_5x5_pack8.h61 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s1_pack8_avx() local
68 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s1_pack8_avx()
215 __m256 _k04 = _mm256_loadu_ps(k0 + 32); in convdw5x5s2_pack8_avx() local
222 _sum0 = _mm256_fmadd_ps(_k04, _r04, _sum0); in convdw5x5s2_pack8_avx()
/dports/graphics/vapoursynth-waifu2x-ncnn-vulkan/vapoursynth-waifu2x-ncnn-vulkan-r4/deps/ncnn/src/layer/arm/
H A Dconvolutiondepthwise_5x5_pack4.h81 float32x4_t _k04 = vld1q_f32(k0 + 16); in convdw5x5s1_pack4_neon() local
88 _sum00 = vmlaq_f32(_sum00, _k04, _r04); in convdw5x5s1_pack4_neon()
93 _sum01 = vmlaq_f32(_sum01, _k04, _r05); in convdw5x5s1_pack4_neon()
98 _sum02 = vmlaq_f32(_sum02, _k04, _r06); in convdw5x5s1_pack4_neon()
103 _sum03 = vmlaq_f32(_sum03, _k04, _r07); in convdw5x5s1_pack4_neon()
125 _sum10 = vmlaq_f32(_sum10, _k04, _r14); in convdw5x5s1_pack4_neon()
130 _sum11 = vmlaq_f32(_sum11, _k04, _r15); in convdw5x5s1_pack4_neon()
135 _sum12 = vmlaq_f32(_sum12, _k04, _r16); in convdw5x5s1_pack4_neon()
140 _sum13 = vmlaq_f32(_sum13, _k04, _r17); in convdw5x5s1_pack4_neon()
615 _sum0 = vmlaq_f32(_sum0, _k04, _r04); in convdw5x5s1_pack4_neon()
[all …]
/dports/graphics/waifu2x-ncnn-vulkan/waifu2x-ncnn-vulkan-20210521/src/ncnn/src/layer/arm/
H A Dconvolutiondepthwise_5x5_pack4.h81 float32x4_t _k04 = vld1q_f32(k0 + 16); in convdw5x5s1_pack4_neon() local
88 _sum00 = vmlaq_f32(_sum00, _k04, _r04); in convdw5x5s1_pack4_neon()
93 _sum01 = vmlaq_f32(_sum01, _k04, _r05); in convdw5x5s1_pack4_neon()
98 _sum02 = vmlaq_f32(_sum02, _k04, _r06); in convdw5x5s1_pack4_neon()
103 _sum03 = vmlaq_f32(_sum03, _k04, _r07); in convdw5x5s1_pack4_neon()
125 _sum10 = vmlaq_f32(_sum10, _k04, _r14); in convdw5x5s1_pack4_neon()
130 _sum11 = vmlaq_f32(_sum11, _k04, _r15); in convdw5x5s1_pack4_neon()
135 _sum12 = vmlaq_f32(_sum12, _k04, _r16); in convdw5x5s1_pack4_neon()
140 _sum13 = vmlaq_f32(_sum13, _k04, _r17); in convdw5x5s1_pack4_neon()
615 _sum0 = vmlaq_f32(_sum0, _k04, _r04); in convdw5x5s1_pack4_neon()
[all …]
/dports/benchmarks/vkpeak/vkpeak-20210430/ncnn/src/layer/arm/
H A Dconvolutiondepthwise_5x5_pack4.h81 float32x4_t _k04 = vld1q_f32(k0 + 16); in convdw5x5s1_pack4_neon() local
88 _sum00 = vmlaq_f32(_sum00, _k04, _r04); in convdw5x5s1_pack4_neon()
93 _sum01 = vmlaq_f32(_sum01, _k04, _r05); in convdw5x5s1_pack4_neon()
98 _sum02 = vmlaq_f32(_sum02, _k04, _r06); in convdw5x5s1_pack4_neon()
103 _sum03 = vmlaq_f32(_sum03, _k04, _r07); in convdw5x5s1_pack4_neon()
125 _sum10 = vmlaq_f32(_sum10, _k04, _r14); in convdw5x5s1_pack4_neon()
130 _sum11 = vmlaq_f32(_sum11, _k04, _r15); in convdw5x5s1_pack4_neon()
135 _sum12 = vmlaq_f32(_sum12, _k04, _r16); in convdw5x5s1_pack4_neon()
140 _sum13 = vmlaq_f32(_sum13, _k04, _r17); in convdw5x5s1_pack4_neon()
615 _sum0 = vmlaq_f32(_sum0, _k04, _r04); in convdw5x5s1_pack4_neon()
[all …]
/dports/misc/ncnn/ncnn-20211208/src/layer/arm/
H A Dconvolutiondepthwise_5x5_pack4.h81 float32x4_t _k04 = vld1q_f32(k0 + 16); in convdw5x5s1_pack4_neon() local
88 _sum00 = vmlaq_f32(_sum00, _k04, _r04); in convdw5x5s1_pack4_neon()
93 _sum01 = vmlaq_f32(_sum01, _k04, _r05); in convdw5x5s1_pack4_neon()
98 _sum02 = vmlaq_f32(_sum02, _k04, _r06); in convdw5x5s1_pack4_neon()
103 _sum03 = vmlaq_f32(_sum03, _k04, _r07); in convdw5x5s1_pack4_neon()
125 _sum10 = vmlaq_f32(_sum10, _k04, _r14); in convdw5x5s1_pack4_neon()
130 _sum11 = vmlaq_f32(_sum11, _k04, _r15); in convdw5x5s1_pack4_neon()
135 _sum12 = vmlaq_f32(_sum12, _k04, _r16); in convdw5x5s1_pack4_neon()
140 _sum13 = vmlaq_f32(_sum13, _k04, _r17); in convdw5x5s1_pack4_neon()
615 _sum0 = vmlaq_f32(_sum0, _k04, _r04); in convdw5x5s1_pack4_neon()
[all …]
/dports/graphics/realsr-ncnn-vulkan/realsr-ncnn-vulkan-20210210/src/ncnn/src/layer/arm/
H A Dconvolutiondepthwise_5x5_pack4.h81 float32x4_t _k04 = vld1q_f32(k0 + 16); in convdw5x5s1_pack4_neon() local
88 _sum00 = vmlaq_f32(_sum00, _k04, _r04); in convdw5x5s1_pack4_neon()
93 _sum01 = vmlaq_f32(_sum01, _k04, _r05); in convdw5x5s1_pack4_neon()
98 _sum02 = vmlaq_f32(_sum02, _k04, _r06); in convdw5x5s1_pack4_neon()
103 _sum03 = vmlaq_f32(_sum03, _k04, _r07); in convdw5x5s1_pack4_neon()
125 _sum10 = vmlaq_f32(_sum10, _k04, _r14); in convdw5x5s1_pack4_neon()
130 _sum11 = vmlaq_f32(_sum11, _k04, _r15); in convdw5x5s1_pack4_neon()
135 _sum12 = vmlaq_f32(_sum12, _k04, _r16); in convdw5x5s1_pack4_neon()
140 _sum13 = vmlaq_f32(_sum13, _k04, _r17); in convdw5x5s1_pack4_neon()
615 _sum0 = vmlaq_f32(_sum0, _k04, _r04); in convdw5x5s1_pack4_neon()
[all …]
/dports/misc/ncnn/ncnn-20211208/src/layer/mips/
H A Dconvolutiondepthwise_5x5_pack4.h75 v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); in convdw5x5s1_pack4_msa() local
82 _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); in convdw5x5s1_pack4_msa()
94 _sum1 = __msa_fmadd_w(_sum1, _k04, _r14); in convdw5x5s1_pack4_msa()
245 v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); in convdw5x5s1_pack4_msa() local
252 _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); in convdw5x5s1_pack4_msa()
408 v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); in convdw5x5s2_pack4_msa() local
415 _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); in convdw5x5s2_pack4_msa()
/dports/misc/ncnn/ncnn-20211208/src/layer/riscv/
H A Dconvolutiondepthwise_5x5_packn.h69 vfloat32m1_t _k04 = vle32_v_f32m1(k0 + packn * 4, vl); in convdw5x5s1_packn_rvv() local
76 _sum0 = vfmacc_vv_f32m1(_sum0, _k04, _r04, vl); in convdw5x5s1_packn_rvv()
88 _sum1 = vfmacc_vv_f32m1(_sum1, _k04, _r14, vl); in convdw5x5s1_packn_rvv()
231 vfloat32m1_t _k04 = vle32_v_f32m1(k0 + packn * 4, vl); in convdw5x5s1_packn_rvv() local
238 _sum0 = vfmacc_vv_f32m1(_sum0, _k04, _r04, vl); in convdw5x5s1_packn_rvv()
389 vfloat32m1_t _k04 = vle32_v_f32m1(k0 + packn * 4, vl); in convdw5x5s2_packn_rvv() local
396 _sum0 = vfmacc_vv_f32m1(_sum0, _k04, _r04, vl); in convdw5x5s2_packn_rvv()
H A Dconvolutiondepthwise_5x5_packn_fp16s.h69 vfloat16m1_t _k04 = vle16_v_f16m1(k0 + packn * 4, vl); in convdw5x5s1_packn_fp16sa_rvv() local
76 _sum0 = vfmacc_vv_f16m1(_sum0, _k04, _r04, vl); in convdw5x5s1_packn_fp16sa_rvv()
88 _sum1 = vfmacc_vv_f16m1(_sum1, _k04, _r14, vl); in convdw5x5s1_packn_fp16sa_rvv()
231 vfloat16m1_t _k04 = vle16_v_f16m1(k0 + packn * 4, vl); in convdw5x5s1_packn_fp16sa_rvv() local
238 _sum0 = vfmacc_vv_f16m1(_sum0, _k04, _r04, vl); in convdw5x5s1_packn_fp16sa_rvv()
389 vfloat16m1_t _k04 = vle16_v_f16m1(k0 + packn * 4, vl); in convdw5x5s2_packn_fp16sa_rvv() local
396 _sum0 = vfmacc_vv_f16m1(_sum0, _k04, _r04, vl); in convdw5x5s2_packn_fp16sa_rvv()
H A Dconvolution_7x7_pack1ton.h113 _sum0 = vfmacc_vf_f32m1(_sum0, r0[4], _k04, vl); in conv7x7s2_pack1ton_rvv()
114 _sum1 = vfmacc_vf_f32m1(_sum1, r0[6], _k04, vl); in conv7x7s2_pack1ton_rvv()
115 _sum2 = vfmacc_vf_f32m1(_sum2, r0[8], _k04, vl); in conv7x7s2_pack1ton_rvv()
116 _sum3 = vfmacc_vf_f32m1(_sum3, r0[10], _k04, vl); in conv7x7s2_pack1ton_rvv()
117 _sum4 = vfmacc_vf_f32m1(_sum4, r0[12], _k04, vl); in conv7x7s2_pack1ton_rvv()
118 _sum5 = vfmacc_vf_f32m1(_sum5, r0[14], _k04, vl); in conv7x7s2_pack1ton_rvv()
119 _sum6 = vfmacc_vf_f32m1(_sum6, r0[16], _k04, vl); in conv7x7s2_pack1ton_rvv()
120 _sum7 = vfmacc_vf_f32m1(_sum7, r0[18], _k04, vl); in conv7x7s2_pack1ton_rvv()
592 _sum0 = vfmacc_vf_f32m1(_sum0, r0[4], _k04, vl); in conv7x7s2_pack1ton_rvv()
593 _sum1 = vfmacc_vf_f32m1(_sum1, r0[6], _k04, vl); in conv7x7s2_pack1ton_rvv()
[all …]
H A Dconvolution_7x7_pack1ton_fp16s.h113 _sum0 = vfmacc_vf_f16m1(_sum0, r0[4], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
114 _sum1 = vfmacc_vf_f16m1(_sum1, r0[6], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
115 _sum2 = vfmacc_vf_f16m1(_sum2, r0[8], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
116 _sum3 = vfmacc_vf_f16m1(_sum3, r0[10], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
117 _sum4 = vfmacc_vf_f16m1(_sum4, r0[12], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
118 _sum5 = vfmacc_vf_f16m1(_sum5, r0[14], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
119 _sum6 = vfmacc_vf_f16m1(_sum6, r0[16], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
120 _sum7 = vfmacc_vf_f16m1(_sum7, r0[18], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
592 _sum0 = vfmacc_vf_f16m1(_sum0, r0[4], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
593 _sum1 = vfmacc_vf_f16m1(_sum1, r0[6], _k04, vl); in conv7x7s2_pack1ton_fp16sa_rvv()
[all …]

12