1;******************************************************************************
2;* VP9 IDCT SIMD optimizations
3;*
4;* Copyright (C) 2013 Clément Bœsch <u pkh me>
5;* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
6;*
7;* This file is part of FFmpeg.
8;*
9;* FFmpeg is free software; you can redistribute it and/or
10;* modify it under the terms of the GNU Lesser General Public
11;* License as published by the Free Software Foundation; either
12;* version 2.1 of the License, or (at your option) any later version.
13;*
14;* FFmpeg is distributed in the hope that it will be useful,
15;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17;* Lesser General Public License for more details.
18;*
19;* You should have received a copy of the GNU Lesser General Public
20;* License along with FFmpeg; if not, write to the Free Software
21;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22;******************************************************************************
23
24%macro VP9_IWHT4_1D 0
25    SWAP                 1, 2, 3
26    paddw               m0, m2
27    psubw               m3, m1
28    psubw               m4, m0, m3
29    psraw               m4, 1
30    psubw               m5, m4, m1
31    SWAP                 5, 1
32    psubw               m4, m2
33    SWAP                 4, 2
34    psubw               m0, m1
35    paddw               m3, m2
36    SWAP                 3, 2, 1
37%endmacro
38
39; (a*x + b*y + round) >> shift
40%macro VP9_MULSUB_2W_2X 5 ; dst1, dst2/src, round, coefs1, coefs2
41    pmaddwd            m%1, m%2, %4
42    pmaddwd            m%2,  %5
43    paddd              m%1,  %3
44    paddd              m%2,  %3
45    psrad              m%1,  14
46    psrad              m%2,  14
47%endmacro
48
49%macro VP9_MULSUB_2W_4X 7 ; dst1, dst2, coef1, coef2, rnd, tmp1/src, tmp2
50    VP9_MULSUB_2W_2X    %7,  %6,  %5, [pw_m%3_%4], [pw_%4_%3]
51    VP9_MULSUB_2W_2X    %1,  %2,  %5, [pw_m%3_%4], [pw_%4_%3]
52    packssdw           m%1, m%7
53    packssdw           m%2, m%6
54%endmacro
55
56%macro VP9_UNPACK_MULSUB_2W_4X 7-9 ; dst1, dst2, (src1, src2,) coef1, coef2, rnd, tmp1, tmp2
57%if %0 == 7
58    punpckhwd          m%6, m%2, m%1
59    punpcklwd          m%2, m%1
60    VP9_MULSUB_2W_4X   %1, %2, %3, %4, %5, %6, %7
61%else
62    punpckhwd          m%8, m%4, m%3
63    punpcklwd          m%2, m%4, m%3
64    VP9_MULSUB_2W_4X   %1, %2, %5, %6, %7, %8, %9
65%endif
66%endmacro
67
68%macro VP9_IDCT4_1D_FINALIZE 0
69    SUMSUB_BA            w, 3, 2, 4                         ; m3=t3+t0, m2=-t3+t0
70    SUMSUB_BA            w, 1, 0, 4                         ; m1=t2+t1, m0=-t2+t1
71    SWAP                 0, 3, 2                            ; 3102 -> 0123
72%endmacro
73
74%macro VP9_IDCT4_1D 0
75%if cpuflag(ssse3)
76    SUMSUB_BA            w, 2, 0, 4                         ; m2=IN(0)+IN(2) m0=IN(0)-IN(2)
77    pmulhrsw            m2, m6                              ; m2=t0
78    pmulhrsw            m0, m6                              ; m0=t1
79%else ; <= sse2
80    VP9_UNPACK_MULSUB_2W_4X 0, 2, 11585, 11585, m7, 4, 5    ; m0=t1, m1=t0
81%endif
82    VP9_UNPACK_MULSUB_2W_4X 1, 3, 15137, 6270, m7, 4, 5     ; m1=t2, m3=t3
83    VP9_IDCT4_1D_FINALIZE
84%endmacro
85
86%macro VP9_IADST4_1D 0
87    movq2dq           xmm0, m0
88    movq2dq           xmm1, m1
89    movq2dq           xmm2, m2
90    movq2dq           xmm3, m3
91%if cpuflag(ssse3)
92    paddw               m3, m0
93%endif
94    punpcklwd         xmm0, xmm1
95    punpcklwd         xmm2, xmm3
96    pmaddwd           xmm1, xmm0, [pw_5283_13377]
97    pmaddwd           xmm4, xmm0, [pw_9929_13377]
98%if notcpuflag(ssse3)
99    pmaddwd           xmm6, xmm0, [pw_13377_0]
100%endif
101    pmaddwd           xmm0, [pw_15212_m13377]
102    pmaddwd           xmm3, xmm2, [pw_15212_9929]
103%if notcpuflag(ssse3)
104    pmaddwd           xmm7, xmm2, [pw_m13377_13377]
105%endif
106    pmaddwd           xmm2, [pw_m5283_m15212]
107%if cpuflag(ssse3)
108    psubw               m3, m2
109%else
110    paddd             xmm6, xmm7
111%endif
112    paddd             xmm0, xmm2
113    paddd             xmm3, xmm5
114    paddd             xmm2, xmm5
115%if notcpuflag(ssse3)
116    paddd             xmm6, xmm5
117%endif
118    paddd             xmm1, xmm3
119    paddd             xmm0, xmm3
120    paddd             xmm4, xmm2
121    psrad             xmm1, 14
122    psrad             xmm0, 14
123    psrad             xmm4, 14
124%if cpuflag(ssse3)
125    pmulhrsw            m3, [pw_13377x2]        ; out2
126%else
127    psrad             xmm6, 14
128%endif
129    packssdw          xmm0, xmm0
130    packssdw          xmm1, xmm1
131    packssdw          xmm4, xmm4
132%if notcpuflag(ssse3)
133    packssdw          xmm6, xmm6
134%endif
135    movdq2q             m0, xmm0                ; out3
136    movdq2q             m1, xmm1                ; out0
137    movdq2q             m2, xmm4                ; out1
138%if notcpuflag(ssse3)
139    movdq2q             m3, xmm6                ; out2
140%endif
141    SWAP                 0, 1, 2, 3
142%endmacro
143