1// 2// NEON_MNNConvRunForUnitDepthWise_BF16.S 3// MNN 4// 5// Created by MNN on 2021/03/09. 6// Copyright © 2018-2021 Alibaba Group Holding Limited 7// 8 9#ifdef __aarch64__ 10 11#include "MNNAsmGlobal.h" 12 13.text 14.align 5 15 16asm_function NEON_MNNConvRunForUnitDepthWise_BF16 17//void NEON_MNNConvRunForUnitDepthWise_BF16(float* dst, const float* src, const float* weight, size_t fw, size_t fh, size_t weight_y_step, size_t dilate_x_step, size_t dilate_y_step) 18 19//Auto: x0:dst, x1:src, x2:weight, x3:fw 20//x4:fh, x5:weight_y_step, x6:dilate_x_step, x7:dilate_y_step 21 22cmp x3, #0 23movi v0.4s, #0 24beq UnitEnd 25cmp x4, #0 26beq UnitEnd 27 28mov x9, #2 29mul x5, x9, x5 // x5(weight_y_step in byte) = sizeof(int16_t) * weight_y_step 30mul x6, x9, x6 // x6(dilate_x_step in byte) = sizeof(int16_t) * dilate_x_step 31mul x7, x9, x7 // x7(dilate_y_step in byte) = sizeof(int16_t) * dilate_y_step 32 33//dilate_y_step -> dilate_y_step - dilate_x_step*fw 34mul x9, x3, x6 35sub x7, x7, x9 // because x1 has already been auto-increased at 'ld1 {v1.4h}, [x1], x6', here we should rewind by x6*fw 36 37//weight_y_step -> weight_y_step - 4*sizeof(int16_t)*fw 38mov x9, #8 39mul x9, x3, x9 40sub x5, x5, x9 41 42 43UnitLoopH: 44mov x9, x3 45UnitLoopW: 46ld1 {v1.4h}, [x1], x6 47ld1 {v2.4h}, [x2], #8 // 4 * sizeof(int16_t) 48shll v1.4s, v1.4h, #16 49shll v2.4s, v2.4h, #16 50 51fmla v0.4s, v1.4s, v2.4s 52subs x9, x9, #1 53bne UnitLoopW 54subs x4, x4, #1 55add x1, x1, x7 56add x2, x2, x5 57bne UnitLoopH 58 59 60UnitEnd: 61shrn v0.4h, v0.4s, #16 62st1 {v0.4h}, [x0] 63 64ret 65 66#endif 67