1 /*
2  * Loongson MMI optimizations for libjpeg-turbo
3  *
4  * Copyright (C) 2014-2015, 2018-2019, D. R. Commander.  All Rights Reserved.
5  * Copyright (C) 2016-2018, Loongson Technology Corporation Limited, BeiJing.
6  *                          All Rights Reserved.
7  * Authors:  LiuQingfa <liuqingfa-hf@loongson.cn>
8  *
9  * Based on the x86 SIMD extension for IJG JPEG library
10  * Copyright (C) 1999-2006, MIYASAKA Masaru.
11  *
12  * This software is provided 'as-is', without any express or implied
13  * warranty.  In no event will the authors be held liable for any damages
14  * arising from the use of this software.
15  *
16  * Permission is granted to anyone to use this software for any purpose,
17  * including commercial applications, and to alter it and redistribute it
18  * freely, subject to the following restrictions:
19  *
20  * 1. The origin of this software must not be misrepresented; you must not
21  *    claim that you wrote the original software. If you use this software
22  *    in a product, an acknowledgment in the product documentation would be
23  *    appreciated but is not required.
24  * 2. Altered source versions must be plainly marked as such, and must not be
25  *    misrepresented as being the original software.
26  * 3. This notice may not be removed or altered from any source distribution.
27  */
28 
29 /* FAST INTEGER INVERSE DCT */
30 
31 #include "jsimd_mmi.h"
32 
33 
34 #define CONST_BITS  8
35 #define PASS1_BITS  2
36 
37 #define FIX_1_082  ((short)277)                   /* FIX(1.082392200) */
38 #define FIX_1_414  ((short)362)                   /* FIX(1.414213562) */
39 #define FIX_1_847  ((short)473)                   /* FIX(1.847759065) */
40 #define FIX_2_613  ((short)669)                   /* FIX(2.613125930) */
41 #define FIX_1_613  ((short)(FIX_2_613 - 256 * 3)) /* FIX(2.613125930) - FIX(1) */
42 
43 #define PRE_MULTIPLY_SCALE_BITS  2
44 #define CONST_SHIFT  (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS)
45 
46 enum const_index {
47   index_PW_F1082,
48   index_PW_F1414,
49   index_PW_F1847,
50   index_PW_MF1613,
51   index_PB_CENTERJSAMP
52 };
53 
54 static uint64_t const_value[] = {
55   _uint64_set1_pi16(FIX_1_082 << CONST_SHIFT),
56   _uint64_set1_pi16(FIX_1_414 << CONST_SHIFT),
57   _uint64_set1_pi16(FIX_1_847 << CONST_SHIFT),
58   _uint64_set1_pi16(-FIX_1_613 << CONST_SHIFT),
59   _uint64_set1_pi8(CENTERJSAMPLE)
60 };
61 
62 #define PW_F1414        get_const_value(index_PW_F1414)
63 #define PW_F1847        get_const_value(index_PW_F1847)
64 #define PW_MF1613       get_const_value(index_PW_MF1613)
65 #define PW_F1082        get_const_value(index_PW_F1082)
66 #define PB_CENTERJSAMP  get_const_value(index_PB_CENTERJSAMP)
67 
68 
69 #define test_m32_zero(mm32)  (!(*(uint32_t *)&mm32))
70 #define test_m64_zero(mm64)  (!(*(uint64_t *)&mm64))
71 
72 
73 #define DO_IDCT_COMMON() { \
74   tmp7 = _mm_add_pi16(z11, z13); \
75   \
76   tmp11 = _mm_sub_pi16(z11, z13); \
77   tmp11 = _mm_slli_pi16(tmp11, PRE_MULTIPLY_SCALE_BITS); \
78   tmp11 = _mm_mulhi_pi16(tmp11, PW_F1414); \
79   \
80   tmp10 = _mm_slli_pi16(z12, PRE_MULTIPLY_SCALE_BITS); \
81   tmp12 = _mm_slli_pi16(z10, PRE_MULTIPLY_SCALE_BITS); \
82   \
83   /* To avoid overflow... \
84    * \
85    * (Original) \
86    * tmp12 = -2.613125930 * z10 + z5; \
87    * \
88    * (This implementation) \
89    * tmp12 = (-1.613125930 - 1) * z10 + z5; \
90    *       = -1.613125930 * z10 - z10 + z5; \
91    */ \
92   \
93   z5 = _mm_add_pi16(tmp10, tmp12); \
94   z5 = _mm_mulhi_pi16(z5, PW_F1847); \
95   \
96   tmp10 = _mm_mulhi_pi16(tmp10, PW_F1082); \
97   tmp10 = _mm_sub_pi16(tmp10, z5); \
98   tmp12 = _mm_mulhi_pi16(tmp12, PW_MF1613); \
99   tmp12 = _mm_sub_pi16(tmp12, z10); \
100   tmp12 = _mm_sub_pi16(tmp12, z10); \
101   tmp12 = _mm_sub_pi16(tmp12, z10); \
102   tmp12 = _mm_add_pi16(tmp12, z5); \
103   \
104   /* Final output stage */ \
105   \
106   tmp6 = _mm_sub_pi16(tmp12, tmp7); \
107   tmp5 = _mm_sub_pi16(tmp11, tmp6); \
108   tmp4 = _mm_add_pi16(tmp10, tmp5); \
109   \
110   out0 = _mm_add_pi16(tmp0, tmp7); \
111   out7 = _mm_sub_pi16(tmp0, tmp7); \
112   out1 = _mm_add_pi16(tmp1, tmp6); \
113   out6 = _mm_sub_pi16(tmp1, tmp6); \
114   \
115   out2 = _mm_add_pi16(tmp2, tmp5); \
116   out5 = _mm_sub_pi16(tmp2, tmp5); \
117   out4 = _mm_add_pi16(tmp3, tmp4); \
118   out3 = _mm_sub_pi16(tmp3, tmp4); \
119 }
120 
121 #define DO_IDCT_PASS1(iter) { \
122   __m64 col0l, col1l, col2l, col3l, col4l, col5l, col6l, col7l; \
123   __m64 quant0l, quant1l, quant2l, quant3l; \
124   __m64 quant4l, quant5l, quant6l, quant7l; \
125   __m64 row01a, row01b, row01c, row01d, row23a, row23b, row23c, row23d; \
126   __m64 row0l, row0h, row1l, row1h, row2l, row2h, row3l, row3h; \
127   __m32 col0a, col1a, mm0; \
128   \
129   col0a = _mm_load_si32((__m32 *)&inptr[DCTSIZE * 1]); \
130   col1a = _mm_load_si32((__m32 *)&inptr[DCTSIZE * 2]); \
131   mm0 = _mm_or_si32(col0a, col1a); \
132   \
133   if (test_m32_zero(mm0)) { \
134     __m64 mm1, mm2; \
135     \
136     col0l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 0]); \
137     col1l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 1]); \
138     col2l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 2]); \
139     col3l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 3]); \
140     col4l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 4]); \
141     col5l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 5]); \
142     col6l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 6]); \
143     col7l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 7]); \
144     \
145     mm1 = _mm_or_si64(col1l, col3l); \
146     mm2 = _mm_or_si64(col2l, col4l); \
147     mm1 = _mm_or_si64(mm1, col5l); \
148     mm2 = _mm_or_si64(mm2, col6l); \
149     mm1 = _mm_or_si64(mm1, col7l); \
150     mm1 = _mm_or_si64(mm1, mm2); \
151     \
152     if (test_m64_zero(mm1)) { \
153       __m64 dcval, dcvall, dcvalh, row0, row1, row2, row3; \
154       \
155       /* AC terms all zero */ \
156       \
157       quant0l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 0]); \
158       \
159       dcval = _mm_mullo_pi16(col0l, quant0l);    /* dcval=(00 10 20 30) */ \
160       \
161       dcvall = _mm_unpacklo_pi16(dcval, dcval);  /* dcvall=(00 00 10 10) */ \
162       dcvalh = _mm_unpackhi_pi16(dcval, dcval);  /* dcvalh=(20 20 30 30) */ \
163       \
164       row0 = _mm_unpacklo_pi32(dcvall, dcvall);  /* row0=(00 00 00 00) */ \
165       row1 = _mm_unpackhi_pi32(dcvall, dcvall);  /* row1=(10 10 10 10) */ \
166       row2 = _mm_unpacklo_pi32(dcvalh, dcvalh);  /* row2=(20 20 20 20) */ \
167       row3 = _mm_unpackhi_pi32(dcvalh, dcvalh);  /* row3=(30 30 30 30) */ \
168       \
169       _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0], row0); \
170       _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0 + 4], row0); \
171       _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1], row1); \
172       _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1 + 4], row1); \
173       _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2], row2); \
174       _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2 + 4], row2); \
175       _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3], row3); \
176       _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3 + 4], row3); \
177       \
178       goto nextcolumn##iter; \
179     } \
180   } \
181   \
182   /* Even part */ \
183   \
184   col0l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 0]);  /* (00 10 20 30) */ \
185   col2l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 2]);  /* (02 12 22 32) */ \
186   col4l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 4]);  /* (04 14 24 34) */ \
187   col6l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 6]);  /* (06 16 26 36) */ \
188   \
189   quant0l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 0]); \
190   quant2l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 2]); \
191   quant4l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 4]); \
192   quant6l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 6]); \
193   \
194   tmp0 = _mm_mullo_pi16(col0l, quant0l); \
195   tmp1 = _mm_mullo_pi16(col2l, quant2l); \
196   tmp2 = _mm_mullo_pi16(col4l, quant4l); \
197   tmp3 = _mm_mullo_pi16(col6l, quant6l); \
198   \
199   tmp10 = _mm_add_pi16(tmp0, tmp2); \
200   tmp11 = _mm_sub_pi16(tmp0, tmp2); \
201   tmp13 = _mm_add_pi16(tmp1, tmp3); \
202   \
203   tmp12 = _mm_sub_pi16(tmp1, tmp3); \
204   tmp12 = _mm_slli_pi16(tmp12, PRE_MULTIPLY_SCALE_BITS); \
205   tmp12 = _mm_mulhi_pi16(tmp12, PW_F1414); \
206   tmp12 = _mm_sub_pi16(tmp12, tmp13); \
207   \
208   tmp0 = _mm_add_pi16(tmp10, tmp13); \
209   tmp3 = _mm_sub_pi16(tmp10, tmp13); \
210   tmp1 = _mm_add_pi16(tmp11, tmp12); \
211   tmp2 = _mm_sub_pi16(tmp11, tmp12); \
212   \
213   /* Odd part */ \
214   \
215   col1l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 1]);  /* (01 11 21 31) */ \
216   col3l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 3]);  /* (03 13 23 33) */ \
217   col5l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 5]);  /* (05 15 25 35) */ \
218   col7l = _mm_load_si64((__m64 *)&inptr[DCTSIZE * 7]);  /* (07 17 27 37) */ \
219   \
220   quant1l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 1]); \
221   quant3l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 3]); \
222   quant5l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 5]); \
223   quant7l = _mm_load_si64((__m64 *)&quantptr[DCTSIZE * 7]); \
224   \
225   tmp4 = _mm_mullo_pi16(col1l, quant1l); \
226   tmp5 = _mm_mullo_pi16(col3l, quant3l); \
227   tmp6 = _mm_mullo_pi16(col5l, quant5l); \
228   tmp7 = _mm_mullo_pi16(col7l, quant7l); \
229   \
230   z13 = _mm_add_pi16(tmp6, tmp5); \
231   z10 = _mm_sub_pi16(tmp6, tmp5); \
232   z11 = _mm_add_pi16(tmp4, tmp7); \
233   z12 = _mm_sub_pi16(tmp4, tmp7); \
234   \
235   DO_IDCT_COMMON() \
236   \
237   /* out0=(00 10 20 30), out1=(01 11 21 31) */ \
238   /* out2=(02 12 22 32), out3=(03 13 23 33) */ \
239   /* out4=(04 14 24 34), out5=(05 15 25 35) */ \
240   /* out6=(06 16 26 36), out7=(07 17 27 37) */ \
241   \
242   /* Transpose coefficients */ \
243   \
244   row01a = _mm_unpacklo_pi16(out0, out1);     /* row01a=(00 01 10 11) */ \
245   row23a = _mm_unpackhi_pi16(out0, out1);     /* row23a=(20 21 30 31) */ \
246   row01d = _mm_unpacklo_pi16(out6, out7);     /* row01d=(06 07 16 17) */ \
247   row23d = _mm_unpackhi_pi16(out6, out7);     /* row23d=(26 27 36 37) */ \
248   \
249   row01b = _mm_unpacklo_pi16(out2, out3);     /* row01b=(02 03 12 13) */ \
250   row23b = _mm_unpackhi_pi16(out2, out3);     /* row23b=(22 23 32 33) */ \
251   row01c = _mm_unpacklo_pi16(out4, out5);     /* row01c=(04 05 14 15) */ \
252   row23c = _mm_unpackhi_pi16(out4, out5);     /* row23c=(24 25 34 35) */ \
253   \
254   row0l = _mm_unpacklo_pi32(row01a, row01b);  /* row0l=(00 01 02 03) */ \
255   row1l = _mm_unpackhi_pi32(row01a, row01b);  /* row1l=(10 11 12 13) */ \
256   row2l = _mm_unpacklo_pi32(row23a, row23b);  /* row2l=(20 21 22 23) */ \
257   row3l = _mm_unpackhi_pi32(row23a, row23b);  /* row3l=(30 31 32 33) */ \
258   \
259   row0h = _mm_unpacklo_pi32(row01c, row01d);  /* row0h=(04 05 06 07) */ \
260   row1h = _mm_unpackhi_pi32(row01c, row01d);  /* row1h=(14 15 16 17) */ \
261   row2h = _mm_unpacklo_pi32(row23c, row23d);  /* row2h=(24 25 26 27) */ \
262   row3h = _mm_unpackhi_pi32(row23c, row23d);  /* row3h=(34 35 36 37) */ \
263   \
264   _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0], row0l); \
265   _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 0 + 4], row0h); \
266   _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1], row1l); \
267   _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 1 + 4], row1h); \
268   _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2], row2l); \
269   _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 2 + 4], row2h); \
270   _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3], row3l); \
271   _mm_store_si64((__m64 *)&wsptr[DCTSIZE * 3 + 4], row3h); \
272 }
273 
274 #define DO_IDCT_PASS2(ctr) { \
275   __m64 row0l, row1l, row2l, row3l, row4l, row5l, row6l, row7l; \
276   __m64 col0123a, col0123b, col0123c, col0123d; \
277   __m64 col01l, col01h, col23l, col23h; \
278   __m64 col0, col1, col2, col3; \
279   __m64 row06, row17, row24, row35; \
280   \
281   row0l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 0]);  /* (00 01 02 03) */ \
282   row1l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 1]);  /* (10 11 12 13) */ \
283   row2l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 2]);  /* (20 21 22 23) */ \
284   row3l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 3]);  /* (30 31 32 33) */ \
285   row4l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 4]);  /* (40 41 42 43) */ \
286   row5l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 5]);  /* (50 51 52 53) */ \
287   row6l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 6]);  /* (60 61 62 63) */ \
288   row7l = _mm_load_si64((__m64 *)&wsptr[DCTSIZE * 7]);  /* (70 71 72 73) */ \
289   \
290   /* Even part */ \
291   \
292   tmp10 = _mm_add_pi16(row0l, row4l); \
293   tmp11 = _mm_sub_pi16(row0l, row4l); \
294   tmp13 = _mm_add_pi16(row2l, row6l); \
295   \
296   tmp12 = _mm_sub_pi16(row2l, row6l); \
297   tmp12 = _mm_slli_pi16(tmp12, PRE_MULTIPLY_SCALE_BITS); \
298   tmp12 = _mm_mulhi_pi16(tmp12, PW_F1414); \
299   tmp12 = _mm_sub_pi16(tmp12, tmp13); \
300   \
301   tmp0 = _mm_add_pi16(tmp10, tmp13); \
302   tmp3 = _mm_sub_pi16(tmp10, tmp13); \
303   tmp1 = _mm_add_pi16(tmp11, tmp12); \
304   tmp2 = _mm_sub_pi16(tmp11, tmp12); \
305   \
306   /* Odd part */ \
307   \
308   z13 = _mm_add_pi16(row5l, row3l); \
309   z10 = _mm_sub_pi16(row5l, row3l); \
310   z11 = _mm_add_pi16(row1l, row7l); \
311   z12 = _mm_sub_pi16(row1l, row7l); \
312   \
313   DO_IDCT_COMMON() \
314   \
315   /* out0=(00 01 02 03), out1=(10 11 12 13) */ \
316   /* out2=(20 21 22 23), out3=(30 31 32 33) */ \
317   /* out4=(40 41 42 43), out5=(50 51 52 53) */ \
318   /* out6=(60 61 62 63), out7=(70 71 72 73) */ \
319   \
320   out0 = _mm_srai_pi16(out0, PASS1_BITS + 3); \
321   out1 = _mm_srai_pi16(out1, PASS1_BITS + 3); \
322   out2 = _mm_srai_pi16(out2, PASS1_BITS + 3); \
323   out3 = _mm_srai_pi16(out3, PASS1_BITS + 3); \
324   out4 = _mm_srai_pi16(out4, PASS1_BITS + 3); \
325   out5 = _mm_srai_pi16(out5, PASS1_BITS + 3); \
326   out6 = _mm_srai_pi16(out6, PASS1_BITS + 3); \
327   out7 = _mm_srai_pi16(out7, PASS1_BITS + 3); \
328   \
329   row06 = _mm_packs_pi16(out0, out6);  /* row06=(00 01 02 03 60 61 62 63) */ \
330   row17 = _mm_packs_pi16(out1, out7);  /* row17=(10 11 12 13 70 71 72 73) */ \
331   row24 = _mm_packs_pi16(out2, out4);  /* row24=(20 21 22 23 40 41 42 43) */ \
332   row35 = _mm_packs_pi16(out3, out5);  /* row35=(30 31 32 33 50 51 52 53) */ \
333   \
334   row06 = _mm_add_pi8(row06, PB_CENTERJSAMP); \
335   row17 = _mm_add_pi8(row17, PB_CENTERJSAMP); \
336   row24 = _mm_add_pi8(row24, PB_CENTERJSAMP); \
337   row35 = _mm_add_pi8(row35, PB_CENTERJSAMP); \
338   \
339   /* Transpose coefficients */ \
340   \
341   col0123a = _mm_unpacklo_pi8(row06, row17);  /* col0123a=(00 10 01 11 02 12 03 13) */ \
342   col0123d = _mm_unpackhi_pi8(row06, row17);  /* col0123d=(60 70 61 71 62 72 63 73) */ \
343   col0123b = _mm_unpacklo_pi8(row24, row35);  /* col0123b=(20 30 21 31 22 32 23 33) */ \
344   col0123c = _mm_unpackhi_pi8(row24, row35);  /* col0123c=(40 50 41 51 42 52 43 53) */ \
345   \
346   col01l = _mm_unpacklo_pi16(col0123a, col0123b);  /* col01l=(00 10 20 30 01 11 21 31) */ \
347   col23l = _mm_unpackhi_pi16(col0123a, col0123b);  /* col23l=(02 12 22 32 03 13 23 33) */ \
348   col01h = _mm_unpacklo_pi16(col0123c, col0123d);  /* col01h=(40 50 60 70 41 51 61 71) */ \
349   col23h = _mm_unpackhi_pi16(col0123c, col0123d);  /* col23h=(42 52 62 72 43 53 63 73) */ \
350   \
351   col0 = _mm_unpacklo_pi32(col01l, col01h);   /* col0=(00 10 20 30 40 50 60 70) */ \
352   col1 = _mm_unpackhi_pi32(col01l, col01h);   /* col1=(01 11 21 31 41 51 61 71) */ \
353   col2 = _mm_unpacklo_pi32(col23l, col23h);   /* col2=(02 12 22 32 42 52 62 72) */ \
354   col3 = _mm_unpackhi_pi32(col23l, col23h);   /* col3=(03 13 23 33 43 53 63 73) */ \
355   \
356   _mm_store_si64((__m64 *)(output_buf[ctr + 0] + output_col), col0); \
357   _mm_store_si64((__m64 *)(output_buf[ctr + 1] + output_col), col1); \
358   _mm_store_si64((__m64 *)(output_buf[ctr + 2] + output_col), col2); \
359   _mm_store_si64((__m64 *)(output_buf[ctr + 3] + output_col), col3); \
360 }
361 
jsimd_idct_ifast_mmi(void * dct_table,JCOEFPTR coef_block,JSAMPARRAY output_buf,JDIMENSION output_col)362 void jsimd_idct_ifast_mmi(void *dct_table, JCOEFPTR coef_block,
363                           JSAMPARRAY output_buf, JDIMENSION output_col)
364 {
365   __m64 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
366   __m64 tmp10, tmp11, tmp12, tmp13;
367   __m64 out0, out1, out2, out3, out4, out5, out6, out7;
368   __m64 z5, z10, z11, z12, z13;
369   JCOEFPTR inptr;
370   ISLOW_MULT_TYPE *quantptr;
371   JCOEF *wsptr;
372   JCOEF workspace[DCTSIZE2];  /* buffers data between passes */
373 
374   /* Pass 1: process columns. */
375 
376   inptr = coef_block;
377   quantptr = (ISLOW_MULT_TYPE *)dct_table;
378   wsptr = workspace;
379 
380   DO_IDCT_PASS1(1)
381 nextcolumn1:
382   inptr += 4;
383   quantptr += 4;
384   wsptr += DCTSIZE * 4;
385   DO_IDCT_PASS1(2)
386 nextcolumn2:
387 
388   /* Pass 2: process rows. */
389 
390   wsptr = workspace;
391 
392   DO_IDCT_PASS2(0)
393   wsptr += 4;
394   DO_IDCT_PASS2(4)
395 }
396