1; Chinese AVS video (AVS1-P2, JiZhun profile) decoder
2; Copyright (c) 2006  Stefan Gehrer <stefan.gehrer@gmx.de>
3;
4; MMX-optimized DSP functions, based on H.264 optimizations by
5; Michael Niedermayer and Loren Merritt
6; Conversion from gcc syntax to x264asm syntax with modifications
7; by Ronald S. Bultje <rsbultje@gmail.com>
8;
9; This file is part of FFmpeg.
10;
11; FFmpeg is free software; you can redistribute it and/or
12; modify it under the terms of the GNU Lesser General Public
13; License as published by the Free Software Foundation; either
14; version 2.1 of the License, or (at your option) any later version.
15;
16; FFmpeg is distributed in the hope that it will be useful,
17; but WITHOUT ANY WARRANTY; without even the implied warranty of
18; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19; Lesser General Public License for more details.
20;
21; You should have received a copy of the GNU Lesser General Public License
22; along with FFmpeg; if not, write to the Free Software Foundation,
23; Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24
25%include "libavutil/x86/x86util.asm"
26
27cextern pw_4
28cextern pw_64
29
30SECTION .text
31
32%macro CAVS_IDCT8_1D 2-3 1 ; source, round, init_load
33%if %3 == 1
34    mova            m4, [%1+7*16]       ; m4 = src7
35    mova            m5, [%1+1*16]       ; m5 = src1
36    mova            m2, [%1+5*16]       ; m2 = src5
37    mova            m7, [%1+3*16]       ; m7 = src3
38%else
39    SWAP             1, 7
40    SWAP             4, 6
41%endif
42    mova            m0, m4
43    mova            m3, m5
44    mova            m6, m2
45    mova            m1, m7
46
47    paddw           m4, m4              ; m4 = 2*src7
48    paddw           m3, m3              ; m3 = 2*src1
49    paddw           m6, m6              ; m6 = 2*src5
50    paddw           m1, m1              ; m1 = 2*src3
51    paddw           m0, m4              ; m0 = 3*src7
52    paddw           m5, m3              ; m5 = 3*src1
53    paddw           m2, m6              ; m2 = 3*src5
54    paddw           m7, m1              ; m7 = 3*src3
55    psubw           m5, m4              ; m5 = 3*src1 - 2*src7 = a0
56    paddw           m7, m6              ; m7 = 3*src3 - 2*src5 = a1
57    psubw           m1, m2              ; m1 = 2*src3 - 3*src5 = a2
58    paddw           m3, m0              ; m3 = 2*src1 - 3*src7 = a3
59
60    mova            m4, m5
61    mova            m6, m7
62    mova            m0, m3
63    mova            m2, m1
64    SUMSUB_BA     w, 7, 5               ; m7 = a0 + a1, m5 = a0 - a1
65    paddw           m7, m3              ; m7 = a0 + a1 + a3
66    paddw           m5, m1              ; m5 = a0 - a1 + a2
67    paddw           m7, m7
68    paddw           m5, m5
69    paddw           m7, m6              ; m7 = b4
70    paddw           m5, m4              ; m5 = b5
71
72    SUMSUB_BA     w, 1, 3               ; m1 = a3 + a2, m3 = a3 - a2
73    psubw           m4, m1              ; m4 = a0 - a2 - a3
74    mova            m1, m4              ; m1 = a0 - a2 - a3
75    psubw           m3, m6              ; m3 = a3 - a2 - a1
76    paddw           m1, m1
77    paddw           m3, m3
78    psubw           m1, m2              ; m1 = b7
79    paddw           m3, m0              ; m3 = b6
80
81    mova            m2, [%1+2*16]       ; m2 = src2
82    mova            m6, [%1+6*16]       ; m6 = src6
83    mova            m4, m2
84    mova            m0, m6
85    psllw           m4, 2               ; m4 = 4*src2
86    psllw           m6, 2               ; m6 = 4*src6
87    paddw           m2, m4              ; m2 = 5*src2
88    paddw           m0, m6              ; m0 = 5*src6
89    paddw           m2, m2
90    paddw           m0, m0
91    psubw           m4, m0              ; m4 = 4*src2 - 10*src6 = a7
92    paddw           m6, m2              ; m6 = 4*src6 + 10*src2 = a6
93
94    mova            m2, [%1+0*16]       ; m2 = src0
95    mova            m0, [%1+4*16]       ; m0 = src4
96    SUMSUB_BA     w, 0, 2               ; m0 = src0 + src4, m2 = src0 - src4
97    psllw           m0, 3
98    psllw           m2, 3
99    paddw           m0, %2              ; add rounding bias
100    paddw           m2, %2              ; add rounding bias
101
102    SUMSUB_BA     w, 6, 0               ; m6 = a4 + a6, m0 = a4 - a6
103    SUMSUB_BA     w, 4, 2               ; m4 = a5 + a7, m2 = a5 - a7
104    SUMSUB_BA     w, 7, 6               ; m7 = dst0, m6 = dst7
105    SUMSUB_BA     w, 5, 4               ; m5 = dst1, m4 = dst6
106    SUMSUB_BA     w, 3, 2               ; m3 = dst2, m2 = dst5
107    SUMSUB_BA     w, 1, 0               ; m1 = dst3, m0 = dst4
108%endmacro
109
110INIT_MMX mmx
111cglobal cavs_idct8, 2, 4, 8, 8 * 16, out, in, cnt, tmp
112    mov           cntd, 2
113    mov           tmpq, rsp
114
115.loop_1:
116    CAVS_IDCT8_1D  inq, [pw_4]
117    psraw           m7, 3
118    psraw           m6, 3
119    psraw           m5, 3
120    psraw           m4, 3
121    psraw           m3, 3
122    psraw           m2, 3
123    psraw           m1, 3
124    psraw           m0, 3
125    mova        [tmpq], m7
126    TRANSPOSE4x4W    0, 2, 4, 6, 7
127    mova    [tmpq+1*8], m0
128    mova    [tmpq+3*8], m2
129    mova    [tmpq+5*8], m4
130    mova    [tmpq+7*8], m6
131    mova            m7, [tmpq]
132    TRANSPOSE4x4W    7, 5, 3, 1, 0
133    mova    [tmpq+0*8], m7
134    mova    [tmpq+2*8], m5
135    mova    [tmpq+4*8], m3
136    mova    [tmpq+6*8], m1
137
138    add            inq, mmsize
139    add           tmpq, 64
140    dec           cntd
141    jg .loop_1
142
143    mov           cntd, 2
144    mov           tmpq, rsp
145.loop_2:
146    CAVS_IDCT8_1D tmpq, [pw_64]
147    psraw           m7, 7
148    psraw           m6, 7
149    psraw           m5, 7
150    psraw           m4, 7
151    psraw           m3, 7
152    psraw           m2, 7
153    psraw           m1, 7
154    psraw           m0, 7
155
156    mova   [outq+0*16], m7
157    mova   [outq+1*16], m5
158    mova   [outq+2*16], m3
159    mova   [outq+3*16], m1
160    mova   [outq+4*16], m0
161    mova   [outq+5*16], m2
162    mova   [outq+6*16], m4
163    mova   [outq+7*16], m6
164
165    add           outq, mmsize
166    add           tmpq, mmsize
167    dec           cntd
168    jg .loop_2
169
170    RET
171
172INIT_XMM sse2
173cglobal cavs_idct8, 2, 2, 8 + ARCH_X86_64, 0 - 8 * 16, out, in
174    CAVS_IDCT8_1D  inq, [pw_4]
175    psraw           m7, 3
176    psraw           m6, 3
177    psraw           m5, 3
178    psraw           m4, 3
179    psraw           m3, 3
180    psraw           m2, 3
181    psraw           m1, 3
182    psraw           m0, 3
183%if ARCH_X86_64
184    TRANSPOSE8x8W    7, 5, 3, 1, 0, 2, 4, 6, 8
185    mova    [rsp+4*16], m0
186%else
187    mova    [rsp+0*16], m4
188    TRANSPOSE8x8W    7, 5, 3, 1, 0, 2, 4, 6, [rsp+0*16], [rsp+4*16], 1
189%endif
190    mova    [rsp+0*16], m7
191    mova    [rsp+2*16], m3
192    mova    [rsp+6*16], m4
193    CAVS_IDCT8_1D  rsp, [pw_64], 0
194    psraw           m7, 7
195    psraw           m6, 7
196    psraw           m5, 7
197    psraw           m4, 7
198    psraw           m3, 7
199    psraw           m2, 7
200    psraw           m1, 7
201    psraw           m0, 7
202
203    mova   [outq+0*16], m7
204    mova   [outq+1*16], m5
205    mova   [outq+2*16], m3
206    mova   [outq+3*16], m1
207    mova   [outq+4*16], m0
208    mova   [outq+5*16], m2
209    mova   [outq+6*16], m4
210    mova   [outq+7*16], m6
211    RET
212