1 /*
2 * Copyright (C) 2008 Nicolai Haehnle.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28 /**
29 * @file
30 *
31 * Shareable transformations that transform "special" ALU instructions
32 * into ALU instructions that are supported by hardware.
33 *
34 */
35
36 #include "radeon_program_alu.h"
37
38 #include "radeon_compiler.h"
39 #include "radeon_compiler_util.h"
40
41
emit1(struct radeon_compiler * c,struct rc_instruction * after,rc_opcode Opcode,struct rc_sub_instruction * base,struct rc_dst_register DstReg,struct rc_src_register SrcReg)42 static struct rc_instruction *emit1(
43 struct radeon_compiler * c, struct rc_instruction * after,
44 rc_opcode Opcode, struct rc_sub_instruction * base,
45 struct rc_dst_register DstReg, struct rc_src_register SrcReg)
46 {
47 struct rc_instruction *fpi = rc_insert_new_instruction(c, after);
48
49 if (base) {
50 memcpy(&fpi->U.I, base, sizeof(struct rc_sub_instruction));
51 }
52
53 fpi->U.I.Opcode = Opcode;
54 fpi->U.I.DstReg = DstReg;
55 fpi->U.I.SrcReg[0] = SrcReg;
56 return fpi;
57 }
58
emit2(struct radeon_compiler * c,struct rc_instruction * after,rc_opcode Opcode,struct rc_sub_instruction * base,struct rc_dst_register DstReg,struct rc_src_register SrcReg0,struct rc_src_register SrcReg1)59 static struct rc_instruction *emit2(
60 struct radeon_compiler * c, struct rc_instruction * after,
61 rc_opcode Opcode, struct rc_sub_instruction * base,
62 struct rc_dst_register DstReg,
63 struct rc_src_register SrcReg0, struct rc_src_register SrcReg1)
64 {
65 struct rc_instruction *fpi = rc_insert_new_instruction(c, after);
66
67 if (base) {
68 memcpy(&fpi->U.I, base, sizeof(struct rc_sub_instruction));
69 }
70
71 fpi->U.I.Opcode = Opcode;
72 fpi->U.I.DstReg = DstReg;
73 fpi->U.I.SrcReg[0] = SrcReg0;
74 fpi->U.I.SrcReg[1] = SrcReg1;
75 return fpi;
76 }
77
emit3(struct radeon_compiler * c,struct rc_instruction * after,rc_opcode Opcode,struct rc_sub_instruction * base,struct rc_dst_register DstReg,struct rc_src_register SrcReg0,struct rc_src_register SrcReg1,struct rc_src_register SrcReg2)78 static struct rc_instruction *emit3(
79 struct radeon_compiler * c, struct rc_instruction * after,
80 rc_opcode Opcode, struct rc_sub_instruction * base,
81 struct rc_dst_register DstReg,
82 struct rc_src_register SrcReg0, struct rc_src_register SrcReg1,
83 struct rc_src_register SrcReg2)
84 {
85 struct rc_instruction *fpi = rc_insert_new_instruction(c, after);
86
87 if (base) {
88 memcpy(&fpi->U.I, base, sizeof(struct rc_sub_instruction));
89 }
90
91 fpi->U.I.Opcode = Opcode;
92 fpi->U.I.DstReg = DstReg;
93 fpi->U.I.SrcReg[0] = SrcReg0;
94 fpi->U.I.SrcReg[1] = SrcReg1;
95 fpi->U.I.SrcReg[2] = SrcReg2;
96 return fpi;
97 }
98
dstregtmpmask(int index,int mask)99 static struct rc_dst_register dstregtmpmask(int index, int mask)
100 {
101 struct rc_dst_register dst = {0, 0, 0};
102 dst.File = RC_FILE_TEMPORARY;
103 dst.Index = index;
104 dst.WriteMask = mask;
105 return dst;
106 }
107
108 static const struct rc_src_register builtin_zero = {
109 .File = RC_FILE_NONE,
110 .Index = 0,
111 .Swizzle = RC_SWIZZLE_0000
112 };
113 static const struct rc_src_register builtin_one = {
114 .File = RC_FILE_NONE,
115 .Index = 0,
116 .Swizzle = RC_SWIZZLE_1111
117 };
118
119 static const struct rc_src_register builtin_half = {
120 .File = RC_FILE_NONE,
121 .Index = 0,
122 .Swizzle = RC_SWIZZLE_HHHH
123 };
124
125 static const struct rc_src_register srcreg_undefined = {
126 .File = RC_FILE_NONE,
127 .Index = 0,
128 .Swizzle = RC_SWIZZLE_XYZW
129 };
130
srcreg(int file,int index)131 static struct rc_src_register srcreg(int file, int index)
132 {
133 struct rc_src_register src = srcreg_undefined;
134 src.File = file;
135 src.Index = index;
136 return src;
137 }
138
srcregswz(int file,int index,int swz)139 static struct rc_src_register srcregswz(int file, int index, int swz)
140 {
141 struct rc_src_register src = srcreg_undefined;
142 src.File = file;
143 src.Index = index;
144 src.Swizzle = swz;
145 return src;
146 }
147
absolute(struct rc_src_register reg)148 static struct rc_src_register absolute(struct rc_src_register reg)
149 {
150 struct rc_src_register newreg = reg;
151 newreg.Abs = 1;
152 newreg.Negate = RC_MASK_NONE;
153 return newreg;
154 }
155
negate(struct rc_src_register reg)156 static struct rc_src_register negate(struct rc_src_register reg)
157 {
158 struct rc_src_register newreg = reg;
159 newreg.Negate = newreg.Negate ^ RC_MASK_XYZW;
160 return newreg;
161 }
162
swizzle(struct rc_src_register reg,rc_swizzle x,rc_swizzle y,rc_swizzle z,rc_swizzle w)163 static struct rc_src_register swizzle(struct rc_src_register reg,
164 rc_swizzle x, rc_swizzle y, rc_swizzle z, rc_swizzle w)
165 {
166 struct rc_src_register swizzled = reg;
167 swizzled.Swizzle = combine_swizzles4(reg.Swizzle, x, y, z, w);
168 return swizzled;
169 }
170
swizzle_smear(struct rc_src_register reg,rc_swizzle x)171 static struct rc_src_register swizzle_smear(struct rc_src_register reg,
172 rc_swizzle x)
173 {
174 return swizzle(reg, x, x, x, x);
175 }
176
swizzle_xxxx(struct rc_src_register reg)177 static struct rc_src_register swizzle_xxxx(struct rc_src_register reg)
178 {
179 return swizzle_smear(reg, RC_SWIZZLE_X);
180 }
181
swizzle_yyyy(struct rc_src_register reg)182 static struct rc_src_register swizzle_yyyy(struct rc_src_register reg)
183 {
184 return swizzle_smear(reg, RC_SWIZZLE_Y);
185 }
186
swizzle_zzzz(struct rc_src_register reg)187 static struct rc_src_register swizzle_zzzz(struct rc_src_register reg)
188 {
189 return swizzle_smear(reg, RC_SWIZZLE_Z);
190 }
191
swizzle_wwww(struct rc_src_register reg)192 static struct rc_src_register swizzle_wwww(struct rc_src_register reg)
193 {
194 return swizzle_smear(reg, RC_SWIZZLE_W);
195 }
196
is_dst_safe_to_reuse(struct rc_instruction * inst)197 static int is_dst_safe_to_reuse(struct rc_instruction *inst)
198 {
199 const struct rc_opcode_info *info = rc_get_opcode_info(inst->U.I.Opcode);
200 unsigned i;
201
202 assert(info->HasDstReg);
203
204 if (inst->U.I.DstReg.File != RC_FILE_TEMPORARY)
205 return 0;
206
207 for (i = 0; i < info->NumSrcRegs; i++) {
208 if (inst->U.I.SrcReg[i].File == RC_FILE_TEMPORARY &&
209 inst->U.I.SrcReg[i].Index == inst->U.I.DstReg.Index)
210 return 0;
211 }
212
213 return 1;
214 }
215
try_to_reuse_dst(struct radeon_compiler * c,struct rc_instruction * inst)216 static struct rc_dst_register try_to_reuse_dst(struct radeon_compiler *c,
217 struct rc_instruction *inst)
218 {
219 unsigned tmp;
220
221 if (is_dst_safe_to_reuse(inst))
222 tmp = inst->U.I.DstReg.Index;
223 else
224 tmp = rc_find_free_temporary(c);
225
226 return dstregtmpmask(tmp, inst->U.I.DstReg.WriteMask);
227 }
228
transform_CEIL(struct radeon_compiler * c,struct rc_instruction * inst)229 static void transform_CEIL(struct radeon_compiler* c,
230 struct rc_instruction* inst)
231 {
232 /* Assuming:
233 * ceil(x) = -floor(-x)
234 *
235 * After inlining floor:
236 * ceil(x) = -(-x-frac(-x))
237 *
238 * After simplification:
239 * ceil(x) = x+frac(-x)
240 */
241
242 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
243 emit1(c, inst->Prev, RC_OPCODE_FRC, NULL, dst, negate(inst->U.I.SrcReg[0]));
244 emit2(c, inst->Prev, RC_OPCODE_ADD, &inst->U.I, inst->U.I.DstReg,
245 inst->U.I.SrcReg[0], srcreg(RC_FILE_TEMPORARY, dst.Index));
246 rc_remove_instruction(inst);
247 }
248
transform_DP2(struct radeon_compiler * c,struct rc_instruction * inst)249 static void transform_DP2(struct radeon_compiler* c,
250 struct rc_instruction* inst)
251 {
252 struct rc_src_register src0 = inst->U.I.SrcReg[0];
253 struct rc_src_register src1 = inst->U.I.SrcReg[1];
254 src0.Negate &= ~(RC_MASK_Z | RC_MASK_W);
255 src0.Swizzle &= ~(63 << (3 * 2));
256 src0.Swizzle |= (RC_SWIZZLE_ZERO << (3 * 2)) | (RC_SWIZZLE_ZERO << (3 * 3));
257 src1.Negate &= ~(RC_MASK_Z | RC_MASK_W);
258 src1.Swizzle &= ~(63 << (3 * 2));
259 src1.Swizzle |= (RC_SWIZZLE_ZERO << (3 * 2)) | (RC_SWIZZLE_ZERO << (3 * 3));
260 emit2(c, inst->Prev, RC_OPCODE_DP3, &inst->U.I, inst->U.I.DstReg, src0, src1);
261 rc_remove_instruction(inst);
262 }
263
264 /**
265 * [1, src0.y*src1.y, src0.z, src1.w]
266 * So basically MUL with lotsa swizzling.
267 */
transform_DST(struct radeon_compiler * c,struct rc_instruction * inst)268 static void transform_DST(struct radeon_compiler* c,
269 struct rc_instruction* inst)
270 {
271 emit2(c, inst->Prev, RC_OPCODE_MUL, &inst->U.I, inst->U.I.DstReg,
272 swizzle(inst->U.I.SrcReg[0], RC_SWIZZLE_ONE, RC_SWIZZLE_Y, RC_SWIZZLE_Z, RC_SWIZZLE_ONE),
273 swizzle(inst->U.I.SrcReg[1], RC_SWIZZLE_ONE, RC_SWIZZLE_Y, RC_SWIZZLE_ONE, RC_SWIZZLE_W));
274 rc_remove_instruction(inst);
275 }
276
transform_FLR(struct radeon_compiler * c,struct rc_instruction * inst)277 static void transform_FLR(struct radeon_compiler* c,
278 struct rc_instruction* inst)
279 {
280 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
281 emit1(c, inst->Prev, RC_OPCODE_FRC, NULL, dst, inst->U.I.SrcReg[0]);
282 emit2(c, inst->Prev, RC_OPCODE_ADD, &inst->U.I, inst->U.I.DstReg,
283 inst->U.I.SrcReg[0], negate(srcreg(RC_FILE_TEMPORARY, dst.Index)));
284 rc_remove_instruction(inst);
285 }
286
transform_TRUNC(struct radeon_compiler * c,struct rc_instruction * inst)287 static void transform_TRUNC(struct radeon_compiler* c,
288 struct rc_instruction* inst)
289 {
290 /* Definition of trunc:
291 * trunc(x) = (abs(x) - fract(abs(x))) * sgn(x)
292 *
293 * The multiplication by sgn(x) can be simplified using CMP:
294 * y * sgn(x) = (x < 0 ? -y : y)
295 */
296 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
297 emit1(c, inst->Prev, RC_OPCODE_FRC, NULL, dst, absolute(inst->U.I.SrcReg[0]));
298 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL, dst, absolute(inst->U.I.SrcReg[0]),
299 negate(srcreg(RC_FILE_TEMPORARY, dst.Index)));
300 emit3(c, inst->Prev, RC_OPCODE_CMP, &inst->U.I, inst->U.I.DstReg, inst->U.I.SrcReg[0],
301 negate(srcreg(RC_FILE_TEMPORARY, dst.Index)), srcreg(RC_FILE_TEMPORARY, dst.Index));
302 rc_remove_instruction(inst);
303 }
304
305 /**
306 * Definition of LIT (from ARB_fragment_program):
307 *
308 * tmp = VectorLoad(op0);
309 * if (tmp.x < 0) tmp.x = 0;
310 * if (tmp.y < 0) tmp.y = 0;
311 * if (tmp.w < -(128.0-epsilon)) tmp.w = -(128.0-epsilon);
312 * else if (tmp.w > 128-epsilon) tmp.w = 128-epsilon;
313 * result.x = 1.0;
314 * result.y = tmp.x;
315 * result.z = (tmp.x > 0) ? RoughApproxPower(tmp.y, tmp.w) : 0.0;
316 * result.w = 1.0;
317 *
318 * The longest path of computation is the one leading to result.z,
319 * consisting of 5 operations. This implementation of LIT takes
320 * 5 slots, if the subsequent optimization passes are clever enough
321 * to pair instructions correctly.
322 */
transform_LIT(struct radeon_compiler * c,struct rc_instruction * inst)323 static void transform_LIT(struct radeon_compiler* c,
324 struct rc_instruction* inst)
325 {
326 unsigned int constant;
327 unsigned int constant_swizzle;
328 unsigned int temp;
329 struct rc_src_register srctemp;
330
331 constant = rc_constants_add_immediate_scalar(&c->Program.Constants, -127.999999, &constant_swizzle);
332
333 if (inst->U.I.DstReg.WriteMask != RC_MASK_XYZW || inst->U.I.DstReg.File != RC_FILE_TEMPORARY) {
334 struct rc_instruction * inst_mov;
335
336 inst_mov = emit1(c, inst,
337 RC_OPCODE_MOV, NULL, inst->U.I.DstReg,
338 srcreg(RC_FILE_TEMPORARY, rc_find_free_temporary(c)));
339
340 inst->U.I.DstReg.File = RC_FILE_TEMPORARY;
341 inst->U.I.DstReg.Index = inst_mov->U.I.SrcReg[0].Index;
342 inst->U.I.DstReg.WriteMask = RC_MASK_XYZW;
343 }
344
345 temp = inst->U.I.DstReg.Index;
346 srctemp = srcreg(RC_FILE_TEMPORARY, temp);
347
348 /* tmp.x = max(0.0, Src.x); */
349 /* tmp.y = max(0.0, Src.y); */
350 /* tmp.w = clamp(Src.z, -128+eps, 128-eps); */
351 emit2(c, inst->Prev, RC_OPCODE_MAX, NULL,
352 dstregtmpmask(temp, RC_MASK_XYW),
353 inst->U.I.SrcReg[0],
354 swizzle(srcreg(RC_FILE_CONSTANT, constant),
355 RC_SWIZZLE_ZERO, RC_SWIZZLE_ZERO, RC_SWIZZLE_ZERO, constant_swizzle&3));
356 emit2(c, inst->Prev, RC_OPCODE_MIN, NULL,
357 dstregtmpmask(temp, RC_MASK_Z),
358 swizzle_wwww(srctemp),
359 negate(srcregswz(RC_FILE_CONSTANT, constant, constant_swizzle)));
360
361 /* tmp.w = Pow(tmp.y, tmp.w) */
362 emit1(c, inst->Prev, RC_OPCODE_LG2, NULL,
363 dstregtmpmask(temp, RC_MASK_W),
364 swizzle_yyyy(srctemp));
365 emit2(c, inst->Prev, RC_OPCODE_MUL, NULL,
366 dstregtmpmask(temp, RC_MASK_W),
367 swizzle_wwww(srctemp),
368 swizzle_zzzz(srctemp));
369 emit1(c, inst->Prev, RC_OPCODE_EX2, NULL,
370 dstregtmpmask(temp, RC_MASK_W),
371 swizzle_wwww(srctemp));
372
373 /* tmp.z = (tmp.x > 0) ? tmp.w : 0.0 */
374 emit3(c, inst->Prev, RC_OPCODE_CMP, &inst->U.I,
375 dstregtmpmask(temp, RC_MASK_Z),
376 negate(swizzle_xxxx(srctemp)),
377 swizzle_wwww(srctemp),
378 builtin_zero);
379
380 /* tmp.x, tmp.y, tmp.w = 1.0, tmp.x, 1.0 */
381 emit1(c, inst->Prev, RC_OPCODE_MOV, &inst->U.I,
382 dstregtmpmask(temp, RC_MASK_XYW),
383 swizzle(srctemp, RC_SWIZZLE_ONE, RC_SWIZZLE_X, RC_SWIZZLE_ONE, RC_SWIZZLE_ONE));
384
385 rc_remove_instruction(inst);
386 }
387
transform_LRP(struct radeon_compiler * c,struct rc_instruction * inst)388 static void transform_LRP(struct radeon_compiler* c,
389 struct rc_instruction* inst)
390 {
391 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
392
393 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL,
394 dst,
395 negate(inst->U.I.SrcReg[0]), inst->U.I.SrcReg[2], inst->U.I.SrcReg[2]);
396 emit3(c, inst->Prev, RC_OPCODE_MAD, &inst->U.I,
397 inst->U.I.DstReg,
398 inst->U.I.SrcReg[0], inst->U.I.SrcReg[1], srcreg(RC_FILE_TEMPORARY, dst.Index));
399
400 rc_remove_instruction(inst);
401 }
402
transform_POW(struct radeon_compiler * c,struct rc_instruction * inst)403 static void transform_POW(struct radeon_compiler* c,
404 struct rc_instruction* inst)
405 {
406 struct rc_dst_register tempdst = try_to_reuse_dst(c, inst);
407 struct rc_src_register tempsrc = srcreg(RC_FILE_TEMPORARY, tempdst.Index);
408 tempdst.WriteMask = RC_MASK_W;
409 tempsrc.Swizzle = RC_SWIZZLE_WWWW;
410
411 emit1(c, inst->Prev, RC_OPCODE_LG2, NULL, tempdst, swizzle_xxxx(inst->U.I.SrcReg[0]));
412 emit2(c, inst->Prev, RC_OPCODE_MUL, NULL, tempdst, tempsrc, swizzle_xxxx(inst->U.I.SrcReg[1]));
413 emit1(c, inst->Prev, RC_OPCODE_EX2, &inst->U.I, inst->U.I.DstReg, tempsrc);
414
415 rc_remove_instruction(inst);
416 }
417
418 /* dst = ROUND(src) :
419 * add = src + .5
420 * frac = FRC(add)
421 * dst = add - frac
422 *
423 * According to the GLSL spec, the implementor can decide which way to round
424 * when the fraction is .5. We round down for .5.
425 *
426 */
transform_ROUND(struct radeon_compiler * c,struct rc_instruction * inst)427 static void transform_ROUND(struct radeon_compiler* c,
428 struct rc_instruction* inst)
429 {
430 unsigned int mask = inst->U.I.DstReg.WriteMask;
431 unsigned int frac_index, add_index;
432 struct rc_dst_register frac_dst, add_dst;
433 struct rc_src_register frac_src, add_src;
434
435 /* add = src + .5 */
436 add_index = rc_find_free_temporary(c);
437 add_dst = dstregtmpmask(add_index, mask);
438 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL, add_dst, inst->U.I.SrcReg[0],
439 builtin_half);
440 add_src = srcreg(RC_FILE_TEMPORARY, add_dst.Index);
441
442
443 /* frac = FRC(add) */
444 frac_index = rc_find_free_temporary(c);
445 frac_dst = dstregtmpmask(frac_index, mask);
446 emit1(c, inst->Prev, RC_OPCODE_FRC, NULL, frac_dst, add_src);
447 frac_src = srcreg(RC_FILE_TEMPORARY, frac_dst.Index);
448
449 /* dst = add - frac */
450 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL, inst->U.I.DstReg,
451 add_src, negate(frac_src));
452 rc_remove_instruction(inst);
453 }
454
transform_RSQ(struct radeon_compiler * c,struct rc_instruction * inst)455 static void transform_RSQ(struct radeon_compiler* c,
456 struct rc_instruction* inst)
457 {
458 inst->U.I.SrcReg[0] = absolute(inst->U.I.SrcReg[0]);
459 }
460
transform_SEQ(struct radeon_compiler * c,struct rc_instruction * inst)461 static void transform_SEQ(struct radeon_compiler* c,
462 struct rc_instruction* inst)
463 {
464 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
465
466 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL, dst, inst->U.I.SrcReg[0], negate(inst->U.I.SrcReg[1]));
467 emit3(c, inst->Prev, RC_OPCODE_CMP, &inst->U.I, inst->U.I.DstReg,
468 negate(absolute(srcreg(RC_FILE_TEMPORARY, dst.Index))), builtin_zero, builtin_one);
469
470 rc_remove_instruction(inst);
471 }
472
transform_SGE(struct radeon_compiler * c,struct rc_instruction * inst)473 static void transform_SGE(struct radeon_compiler* c,
474 struct rc_instruction* inst)
475 {
476 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
477
478 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL, dst, inst->U.I.SrcReg[0], negate(inst->U.I.SrcReg[1]));
479 emit3(c, inst->Prev, RC_OPCODE_CMP, &inst->U.I, inst->U.I.DstReg,
480 srcreg(RC_FILE_TEMPORARY, dst.Index), builtin_zero, builtin_one);
481
482 rc_remove_instruction(inst);
483 }
484
transform_SGT(struct radeon_compiler * c,struct rc_instruction * inst)485 static void transform_SGT(struct radeon_compiler* c,
486 struct rc_instruction* inst)
487 {
488 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
489
490 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL, dst, negate(inst->U.I.SrcReg[0]), inst->U.I.SrcReg[1]);
491 emit3(c, inst->Prev, RC_OPCODE_CMP, &inst->U.I, inst->U.I.DstReg,
492 srcreg(RC_FILE_TEMPORARY, dst.Index), builtin_one, builtin_zero);
493
494 rc_remove_instruction(inst);
495 }
496
transform_SLE(struct radeon_compiler * c,struct rc_instruction * inst)497 static void transform_SLE(struct radeon_compiler* c,
498 struct rc_instruction* inst)
499 {
500 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
501
502 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL, dst, negate(inst->U.I.SrcReg[0]), inst->U.I.SrcReg[1]);
503 emit3(c, inst->Prev, RC_OPCODE_CMP, &inst->U.I, inst->U.I.DstReg,
504 srcreg(RC_FILE_TEMPORARY, dst.Index), builtin_zero, builtin_one);
505
506 rc_remove_instruction(inst);
507 }
508
transform_SLT(struct radeon_compiler * c,struct rc_instruction * inst)509 static void transform_SLT(struct radeon_compiler* c,
510 struct rc_instruction* inst)
511 {
512 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
513
514 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL, dst, inst->U.I.SrcReg[0], negate(inst->U.I.SrcReg[1]));
515 emit3(c, inst->Prev, RC_OPCODE_CMP, &inst->U.I, inst->U.I.DstReg,
516 srcreg(RC_FILE_TEMPORARY, dst.Index), builtin_one, builtin_zero);
517
518 rc_remove_instruction(inst);
519 }
520
transform_SNE(struct radeon_compiler * c,struct rc_instruction * inst)521 static void transform_SNE(struct radeon_compiler* c,
522 struct rc_instruction* inst)
523 {
524 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
525
526 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL, dst, inst->U.I.SrcReg[0], negate(inst->U.I.SrcReg[1]));
527 emit3(c, inst->Prev, RC_OPCODE_CMP, &inst->U.I, inst->U.I.DstReg,
528 negate(absolute(srcreg(RC_FILE_TEMPORARY, dst.Index))), builtin_one, builtin_zero);
529
530 rc_remove_instruction(inst);
531 }
532
transform_SSG(struct radeon_compiler * c,struct rc_instruction * inst)533 static void transform_SSG(struct radeon_compiler* c,
534 struct rc_instruction* inst)
535 {
536 /* result = sign(x)
537 *
538 * CMP tmp0, -x, 1, 0
539 * CMP tmp1, x, 1, 0
540 * ADD result, tmp0, -tmp1;
541 */
542 struct rc_dst_register dst0;
543 unsigned tmp1;
544
545 /* 0 < x */
546 dst0 = try_to_reuse_dst(c, inst);
547 emit3(c, inst->Prev, RC_OPCODE_CMP, NULL,
548 dst0,
549 negate(inst->U.I.SrcReg[0]),
550 builtin_one,
551 builtin_zero);
552
553 /* x < 0 */
554 tmp1 = rc_find_free_temporary(c);
555 emit3(c, inst->Prev, RC_OPCODE_CMP, NULL,
556 dstregtmpmask(tmp1, inst->U.I.DstReg.WriteMask),
557 inst->U.I.SrcReg[0],
558 builtin_one,
559 builtin_zero);
560
561 /* Either both are zero, or one of them is one and the other is zero. */
562 /* result = tmp0 - tmp1 */
563 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL,
564 inst->U.I.DstReg,
565 srcreg(RC_FILE_TEMPORARY, dst0.Index),
566 negate(srcreg(RC_FILE_TEMPORARY, tmp1)));
567
568 rc_remove_instruction(inst);
569 }
570
transform_SUB(struct radeon_compiler * c,struct rc_instruction * inst)571 static void transform_SUB(struct radeon_compiler* c,
572 struct rc_instruction* inst)
573 {
574 inst->U.I.Opcode = RC_OPCODE_ADD;
575 inst->U.I.SrcReg[1] = negate(inst->U.I.SrcReg[1]);
576 }
577
578 /**
579 * Can be used as a transformation for @ref radeonClauseLocalTransform,
580 * no userData necessary.
581 *
582 * Eliminates the following ALU instructions:
583 * CEIL, DST, FLR, LIT, LRP, POW, SEQ, SGE, SGT, SLE, SLT, SNE, SUB
584 * using:
585 * MOV, ADD, MUL, MAD, FRC, DP3, LG2, EX2, CMP
586 *
587 * Transforms RSQ to Radeon's native RSQ by explicitly setting
588 * absolute value.
589 *
590 * @note should be applicable to R300 and R500 fragment programs.
591 */
radeonTransformALU(struct radeon_compiler * c,struct rc_instruction * inst,void * unused)592 int radeonTransformALU(
593 struct radeon_compiler * c,
594 struct rc_instruction* inst,
595 void* unused)
596 {
597 switch(inst->U.I.Opcode) {
598 case RC_OPCODE_CEIL: transform_CEIL(c, inst); return 1;
599 case RC_OPCODE_DP2: transform_DP2(c, inst); return 1;
600 case RC_OPCODE_DST: transform_DST(c, inst); return 1;
601 case RC_OPCODE_FLR: transform_FLR(c, inst); return 1;
602 case RC_OPCODE_LIT: transform_LIT(c, inst); return 1;
603 case RC_OPCODE_LRP: transform_LRP(c, inst); return 1;
604 case RC_OPCODE_POW: transform_POW(c, inst); return 1;
605 case RC_OPCODE_ROUND: transform_ROUND(c, inst); return 1;
606 case RC_OPCODE_RSQ: transform_RSQ(c, inst); return 1;
607 case RC_OPCODE_SEQ: transform_SEQ(c, inst); return 1;
608 case RC_OPCODE_SGE: transform_SGE(c, inst); return 1;
609 case RC_OPCODE_SGT: transform_SGT(c, inst); return 1;
610 case RC_OPCODE_SLE: transform_SLE(c, inst); return 1;
611 case RC_OPCODE_SLT: transform_SLT(c, inst); return 1;
612 case RC_OPCODE_SNE: transform_SNE(c, inst); return 1;
613 case RC_OPCODE_SSG: transform_SSG(c, inst); return 1;
614 case RC_OPCODE_SUB: transform_SUB(c, inst); return 1;
615 case RC_OPCODE_TRUNC: transform_TRUNC(c, inst); return 1;
616 default:
617 return 0;
618 }
619 }
620
transform_r300_vertex_CMP(struct radeon_compiler * c,struct rc_instruction * inst)621 static void transform_r300_vertex_CMP(struct radeon_compiler* c,
622 struct rc_instruction* inst)
623 {
624 /* There is no decent CMP available, so let's rig one up.
625 * CMP is defined as dst = src0 < 0.0 ? src1 : src2
626 * The following sequence consumes zero to two temps and two extra slots
627 * (the second temp and the second slot is consumed by transform_LRP),
628 * but should be equivalent:
629 *
630 * SLT tmp0, src0, 0.0
631 * LRP dst, tmp0, src1, src2
632 *
633 * Yes, I know, I'm a mad scientist. ~ C. & M. */
634 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
635
636 /* SLT tmp0, src0, 0.0 */
637 emit2(c, inst->Prev, RC_OPCODE_SLT, NULL,
638 dst,
639 inst->U.I.SrcReg[0], builtin_zero);
640
641 /* LRP dst, tmp0, src1, src2 */
642 transform_LRP(c,
643 emit3(c, inst->Prev, RC_OPCODE_LRP, NULL,
644 inst->U.I.DstReg,
645 srcreg(RC_FILE_TEMPORARY, dst.Index), inst->U.I.SrcReg[1], inst->U.I.SrcReg[2]));
646
647 rc_remove_instruction(inst);
648 }
649
transform_r300_vertex_DP2(struct radeon_compiler * c,struct rc_instruction * inst)650 static void transform_r300_vertex_DP2(struct radeon_compiler* c,
651 struct rc_instruction* inst)
652 {
653 struct rc_instruction *next_inst = inst->Next;
654 transform_DP2(c, inst);
655 next_inst->Prev->U.I.Opcode = RC_OPCODE_DP4;
656 }
657
transform_r300_vertex_DP3(struct radeon_compiler * c,struct rc_instruction * inst)658 static void transform_r300_vertex_DP3(struct radeon_compiler* c,
659 struct rc_instruction* inst)
660 {
661 struct rc_src_register src0 = inst->U.I.SrcReg[0];
662 struct rc_src_register src1 = inst->U.I.SrcReg[1];
663 src0.Negate &= ~RC_MASK_W;
664 src0.Swizzle &= ~(7 << (3 * 3));
665 src0.Swizzle |= RC_SWIZZLE_ZERO << (3 * 3);
666 src1.Negate &= ~RC_MASK_W;
667 src1.Swizzle &= ~(7 << (3 * 3));
668 src1.Swizzle |= RC_SWIZZLE_ZERO << (3 * 3);
669 emit2(c, inst->Prev, RC_OPCODE_DP4, &inst->U.I, inst->U.I.DstReg, src0, src1);
670 rc_remove_instruction(inst);
671 }
672
transform_r300_vertex_fix_LIT(struct radeon_compiler * c,struct rc_instruction * inst)673 static void transform_r300_vertex_fix_LIT(struct radeon_compiler* c,
674 struct rc_instruction* inst)
675 {
676 struct rc_dst_register dst = try_to_reuse_dst(c, inst);
677 unsigned constant_swizzle;
678 int constant = rc_constants_add_immediate_scalar(&c->Program.Constants,
679 0.0000000000000000001,
680 &constant_swizzle);
681
682 /* MOV dst, src */
683 dst.WriteMask = RC_MASK_XYZW;
684 emit1(c, inst->Prev, RC_OPCODE_MOV, NULL,
685 dst,
686 inst->U.I.SrcReg[0]);
687
688 /* MAX dst.y, src, 0.00...001 */
689 emit2(c, inst->Prev, RC_OPCODE_MAX, NULL,
690 dstregtmpmask(dst.Index, RC_MASK_Y),
691 srcreg(RC_FILE_TEMPORARY, dst.Index),
692 srcregswz(RC_FILE_CONSTANT, constant, constant_swizzle));
693
694 inst->U.I.SrcReg[0] = srcreg(RC_FILE_TEMPORARY, dst.Index);
695 }
696
transform_r300_vertex_SEQ(struct radeon_compiler * c,struct rc_instruction * inst)697 static void transform_r300_vertex_SEQ(struct radeon_compiler *c,
698 struct rc_instruction *inst)
699 {
700 /* x = y <==> x >= y && y >= x */
701 int tmp = rc_find_free_temporary(c);
702
703 /* x <= y */
704 emit2(c, inst->Prev, RC_OPCODE_SGE, NULL,
705 dstregtmpmask(tmp, inst->U.I.DstReg.WriteMask),
706 inst->U.I.SrcReg[0],
707 inst->U.I.SrcReg[1]);
708
709 /* y <= x */
710 emit2(c, inst->Prev, RC_OPCODE_SGE, NULL,
711 inst->U.I.DstReg,
712 inst->U.I.SrcReg[1],
713 inst->U.I.SrcReg[0]);
714
715 /* x && y = x * y */
716 emit2(c, inst->Prev, RC_OPCODE_MUL, NULL,
717 inst->U.I.DstReg,
718 srcreg(RC_FILE_TEMPORARY, tmp),
719 srcreg(inst->U.I.DstReg.File, inst->U.I.DstReg.Index));
720
721 rc_remove_instruction(inst);
722 }
723
transform_r300_vertex_SNE(struct radeon_compiler * c,struct rc_instruction * inst)724 static void transform_r300_vertex_SNE(struct radeon_compiler *c,
725 struct rc_instruction *inst)
726 {
727 /* x != y <==> x < y || y < x */
728 int tmp = rc_find_free_temporary(c);
729
730 /* x < y */
731 emit2(c, inst->Prev, RC_OPCODE_SLT, NULL,
732 dstregtmpmask(tmp, inst->U.I.DstReg.WriteMask),
733 inst->U.I.SrcReg[0],
734 inst->U.I.SrcReg[1]);
735
736 /* y < x */
737 emit2(c, inst->Prev, RC_OPCODE_SLT, NULL,
738 inst->U.I.DstReg,
739 inst->U.I.SrcReg[1],
740 inst->U.I.SrcReg[0]);
741
742 /* x || y = max(x, y) */
743 emit2(c, inst->Prev, RC_OPCODE_MAX, NULL,
744 inst->U.I.DstReg,
745 srcreg(RC_FILE_TEMPORARY, tmp),
746 srcreg(inst->U.I.DstReg.File, inst->U.I.DstReg.Index));
747
748 rc_remove_instruction(inst);
749 }
750
transform_r300_vertex_SGT(struct radeon_compiler * c,struct rc_instruction * inst)751 static void transform_r300_vertex_SGT(struct radeon_compiler* c,
752 struct rc_instruction* inst)
753 {
754 /* x > y <==> -x < -y */
755 inst->U.I.Opcode = RC_OPCODE_SLT;
756 inst->U.I.SrcReg[0].Negate ^= RC_MASK_XYZW;
757 inst->U.I.SrcReg[1].Negate ^= RC_MASK_XYZW;
758 }
759
transform_r300_vertex_SLE(struct radeon_compiler * c,struct rc_instruction * inst)760 static void transform_r300_vertex_SLE(struct radeon_compiler* c,
761 struct rc_instruction* inst)
762 {
763 /* x <= y <==> -x >= -y */
764 inst->U.I.Opcode = RC_OPCODE_SGE;
765 inst->U.I.SrcReg[0].Negate ^= RC_MASK_XYZW;
766 inst->U.I.SrcReg[1].Negate ^= RC_MASK_XYZW;
767 }
768
transform_r300_vertex_SSG(struct radeon_compiler * c,struct rc_instruction * inst)769 static void transform_r300_vertex_SSG(struct radeon_compiler* c,
770 struct rc_instruction* inst)
771 {
772 /* result = sign(x)
773 *
774 * SLT tmp0, 0, x;
775 * SLT tmp1, x, 0;
776 * ADD result, tmp0, -tmp1;
777 */
778 struct rc_dst_register dst0;
779 unsigned tmp1;
780
781 /* 0 < x */
782 dst0 = try_to_reuse_dst(c, inst);
783 emit2(c, inst->Prev, RC_OPCODE_SLT, NULL,
784 dst0,
785 builtin_zero,
786 inst->U.I.SrcReg[0]);
787
788 /* x < 0 */
789 tmp1 = rc_find_free_temporary(c);
790 emit2(c, inst->Prev, RC_OPCODE_SLT, NULL,
791 dstregtmpmask(tmp1, inst->U.I.DstReg.WriteMask),
792 inst->U.I.SrcReg[0],
793 builtin_zero);
794
795 /* Either both are zero, or one of them is one and the other is zero. */
796 /* result = tmp0 - tmp1 */
797 emit2(c, inst->Prev, RC_OPCODE_ADD, NULL,
798 inst->U.I.DstReg,
799 srcreg(RC_FILE_TEMPORARY, dst0.Index),
800 negate(srcreg(RC_FILE_TEMPORARY, tmp1)));
801
802 rc_remove_instruction(inst);
803 }
804
transform_vertex_TRUNC(struct radeon_compiler * c,struct rc_instruction * inst)805 static void transform_vertex_TRUNC(struct radeon_compiler* c,
806 struct rc_instruction* inst)
807 {
808 struct rc_instruction *next = inst->Next;
809
810 /* next->Prev is removed after each transformation and replaced
811 * by a new instruction. */
812 transform_TRUNC(c, next->Prev);
813 transform_r300_vertex_CMP(c, next->Prev);
814 }
815
816 /**
817 * For use with rc_local_transform, this transforms non-native ALU
818 * instructions of the r300 up to r500 vertex engine.
819 */
r300_transform_vertex_alu(struct radeon_compiler * c,struct rc_instruction * inst,void * unused)820 int r300_transform_vertex_alu(
821 struct radeon_compiler * c,
822 struct rc_instruction* inst,
823 void* unused)
824 {
825 switch(inst->U.I.Opcode) {
826 case RC_OPCODE_CEIL: transform_CEIL(c, inst); return 1;
827 case RC_OPCODE_CMP: transform_r300_vertex_CMP(c, inst); return 1;
828 case RC_OPCODE_DP2: transform_r300_vertex_DP2(c, inst); return 1;
829 case RC_OPCODE_DP3: transform_r300_vertex_DP3(c, inst); return 1;
830 case RC_OPCODE_FLR: transform_FLR(c, inst); return 1;
831 case RC_OPCODE_LIT: transform_r300_vertex_fix_LIT(c, inst); return 1;
832 case RC_OPCODE_LRP: transform_LRP(c, inst); return 1;
833 case RC_OPCODE_SEQ:
834 if (!c->is_r500) {
835 transform_r300_vertex_SEQ(c, inst);
836 return 1;
837 }
838 return 0;
839 case RC_OPCODE_SGT: transform_r300_vertex_SGT(c, inst); return 1;
840 case RC_OPCODE_SLE: transform_r300_vertex_SLE(c, inst); return 1;
841 case RC_OPCODE_SNE:
842 if (!c->is_r500) {
843 transform_r300_vertex_SNE(c, inst);
844 return 1;
845 }
846 return 0;
847 case RC_OPCODE_SSG: transform_r300_vertex_SSG(c, inst); return 1;
848 case RC_OPCODE_SUB: transform_SUB(c, inst); return 1;
849 case RC_OPCODE_TRUNC: transform_vertex_TRUNC(c, inst); return 1;
850 default:
851 return 0;
852 }
853 }
854
sincos_constants(struct radeon_compiler * c,unsigned int * constants)855 static void sincos_constants(struct radeon_compiler* c, unsigned int *constants)
856 {
857 static const float SinCosConsts[2][4] = {
858 {
859 1.273239545, /* 4/PI */
860 -0.405284735, /* -4/(PI*PI) */
861 3.141592654, /* PI */
862 0.2225 /* weight */
863 },
864 {
865 0.75,
866 0.5,
867 0.159154943, /* 1/(2*PI) */
868 6.283185307 /* 2*PI */
869 }
870 };
871 int i;
872
873 for(i = 0; i < 2; ++i)
874 constants[i] = rc_constants_add_immediate_vec4(&c->Program.Constants, SinCosConsts[i]);
875 }
876
877 /**
878 * Approximate sin(x), where x is clamped to (-pi/2, pi/2).
879 *
880 * MUL tmp.xy, src, { 4/PI, -4/(PI^2) }
881 * MAD tmp.x, tmp.y, |src|, tmp.x
882 * MAD tmp.y, tmp.x, |tmp.x|, -tmp.x
883 * MAD dest, tmp.y, weight, tmp.x
884 */
sin_approx(struct radeon_compiler * c,struct rc_instruction * inst,struct rc_dst_register dst,struct rc_src_register src,const unsigned int * constants)885 static void sin_approx(
886 struct radeon_compiler* c, struct rc_instruction * inst,
887 struct rc_dst_register dst, struct rc_src_register src, const unsigned int* constants)
888 {
889 unsigned int tempreg = rc_find_free_temporary(c);
890
891 emit2(c, inst->Prev, RC_OPCODE_MUL, NULL, dstregtmpmask(tempreg, RC_MASK_XY),
892 swizzle_xxxx(src),
893 srcreg(RC_FILE_CONSTANT, constants[0]));
894 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(tempreg, RC_MASK_X),
895 swizzle_yyyy(srcreg(RC_FILE_TEMPORARY, tempreg)),
896 absolute(swizzle_xxxx(src)),
897 swizzle_xxxx(srcreg(RC_FILE_TEMPORARY, tempreg)));
898 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(tempreg, RC_MASK_Y),
899 swizzle_xxxx(srcreg(RC_FILE_TEMPORARY, tempreg)),
900 absolute(swizzle_xxxx(srcreg(RC_FILE_TEMPORARY, tempreg))),
901 negate(swizzle_xxxx(srcreg(RC_FILE_TEMPORARY, tempreg))));
902 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dst,
903 swizzle_yyyy(srcreg(RC_FILE_TEMPORARY, tempreg)),
904 swizzle_wwww(srcreg(RC_FILE_CONSTANT, constants[0])),
905 swizzle_xxxx(srcreg(RC_FILE_TEMPORARY, tempreg)));
906 }
907
908 /**
909 * Translate the trigonometric functions COS and SIN
910 * using only the basic instructions
911 * MOV, ADD, MUL, MAD, FRC
912 */
r300_transform_trig_simple(struct radeon_compiler * c,struct rc_instruction * inst,void * unused)913 int r300_transform_trig_simple(struct radeon_compiler* c,
914 struct rc_instruction* inst,
915 void* unused)
916 {
917 unsigned int constants[2];
918 unsigned int tempreg;
919
920 if (inst->U.I.Opcode != RC_OPCODE_COS &&
921 inst->U.I.Opcode != RC_OPCODE_SIN)
922 return 0;
923
924 tempreg = rc_find_free_temporary(c);
925
926 sincos_constants(c, constants);
927
928 if (inst->U.I.Opcode == RC_OPCODE_COS) {
929 /* MAD tmp.x, src, 1/(2*PI), 0.75 */
930 /* FRC tmp.x, tmp.x */
931 /* MAD tmp.z, tmp.x, 2*PI, -PI */
932 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(tempreg, RC_MASK_W),
933 swizzle_xxxx(inst->U.I.SrcReg[0]),
934 swizzle_zzzz(srcreg(RC_FILE_CONSTANT, constants[1])),
935 swizzle_xxxx(srcreg(RC_FILE_CONSTANT, constants[1])));
936 emit1(c, inst->Prev, RC_OPCODE_FRC, NULL, dstregtmpmask(tempreg, RC_MASK_W),
937 swizzle_wwww(srcreg(RC_FILE_TEMPORARY, tempreg)));
938 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(tempreg, RC_MASK_W),
939 swizzle_wwww(srcreg(RC_FILE_TEMPORARY, tempreg)),
940 swizzle_wwww(srcreg(RC_FILE_CONSTANT, constants[1])),
941 negate(swizzle_zzzz(srcreg(RC_FILE_CONSTANT, constants[0]))));
942
943 sin_approx(c, inst, inst->U.I.DstReg,
944 swizzle_wwww(srcreg(RC_FILE_TEMPORARY, tempreg)),
945 constants);
946 } else if (inst->U.I.Opcode == RC_OPCODE_SIN) {
947 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(tempreg, RC_MASK_W),
948 swizzle_xxxx(inst->U.I.SrcReg[0]),
949 swizzle_zzzz(srcreg(RC_FILE_CONSTANT, constants[1])),
950 swizzle_yyyy(srcreg(RC_FILE_CONSTANT, constants[1])));
951 emit1(c, inst->Prev, RC_OPCODE_FRC, NULL, dstregtmpmask(tempreg, RC_MASK_W),
952 swizzle_wwww(srcreg(RC_FILE_TEMPORARY, tempreg)));
953 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(tempreg, RC_MASK_W),
954 swizzle_wwww(srcreg(RC_FILE_TEMPORARY, tempreg)),
955 swizzle_wwww(srcreg(RC_FILE_CONSTANT, constants[1])),
956 negate(swizzle_zzzz(srcreg(RC_FILE_CONSTANT, constants[0]))));
957
958 sin_approx(c, inst, inst->U.I.DstReg,
959 swizzle_wwww(srcreg(RC_FILE_TEMPORARY, tempreg)),
960 constants);
961 } else {
962 struct rc_dst_register dst;
963
964 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(tempreg, RC_MASK_XY),
965 swizzle_xxxx(inst->U.I.SrcReg[0]),
966 swizzle_zzzz(srcreg(RC_FILE_CONSTANT, constants[1])),
967 swizzle(srcreg(RC_FILE_CONSTANT, constants[1]), RC_SWIZZLE_X, RC_SWIZZLE_Y, RC_SWIZZLE_Z, RC_SWIZZLE_W));
968 emit1(c, inst->Prev, RC_OPCODE_FRC, NULL, dstregtmpmask(tempreg, RC_MASK_XY),
969 srcreg(RC_FILE_TEMPORARY, tempreg));
970 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(tempreg, RC_MASK_XY),
971 srcreg(RC_FILE_TEMPORARY, tempreg),
972 swizzle_wwww(srcreg(RC_FILE_CONSTANT, constants[1])),
973 negate(swizzle_zzzz(srcreg(RC_FILE_CONSTANT, constants[0]))));
974
975 dst = inst->U.I.DstReg;
976
977 dst.WriteMask = inst->U.I.DstReg.WriteMask & RC_MASK_X;
978 sin_approx(c, inst, dst,
979 swizzle_xxxx(srcreg(RC_FILE_TEMPORARY, tempreg)),
980 constants);
981
982 dst.WriteMask = inst->U.I.DstReg.WriteMask & RC_MASK_Y;
983 sin_approx(c, inst, dst,
984 swizzle_yyyy(srcreg(RC_FILE_TEMPORARY, tempreg)),
985 constants);
986 }
987
988 rc_remove_instruction(inst);
989
990 return 1;
991 }
992
r300_transform_SIN_COS(struct radeon_compiler * c,struct rc_instruction * inst,unsigned srctmp)993 static void r300_transform_SIN_COS(struct radeon_compiler *c,
994 struct rc_instruction *inst,
995 unsigned srctmp)
996 {
997 if (inst->U.I.Opcode == RC_OPCODE_COS) {
998 emit1(c, inst->Prev, RC_OPCODE_COS, &inst->U.I, inst->U.I.DstReg,
999 srcregswz(RC_FILE_TEMPORARY, srctmp, RC_SWIZZLE_WWWW));
1000 } else if (inst->U.I.Opcode == RC_OPCODE_SIN) {
1001 emit1(c, inst->Prev, RC_OPCODE_SIN, &inst->U.I,
1002 inst->U.I.DstReg, srcregswz(RC_FILE_TEMPORARY, srctmp, RC_SWIZZLE_WWWW));
1003 }
1004
1005 rc_remove_instruction(inst);
1006 }
1007
1008
1009 /**
1010 * Transform the trigonometric functions COS and SIN
1011 * to include pre-scaling by 1/(2*PI) and taking the fractional
1012 * part, so that the input to COS and SIN is always in the range [0,1).
1013 *
1014 * @warning This transformation implicitly changes the semantics of SIN and COS!
1015 */
radeonTransformTrigScale(struct radeon_compiler * c,struct rc_instruction * inst,void * unused)1016 int radeonTransformTrigScale(struct radeon_compiler* c,
1017 struct rc_instruction* inst,
1018 void* unused)
1019 {
1020 static const float RCP_2PI = 0.15915494309189535;
1021 unsigned int temp;
1022 unsigned int constant;
1023 unsigned int constant_swizzle;
1024
1025 if (inst->U.I.Opcode != RC_OPCODE_COS &&
1026 inst->U.I.Opcode != RC_OPCODE_SIN)
1027 return 0;
1028
1029 if (!c->needs_trig_input_transform)
1030 return 1;
1031
1032 temp = rc_find_free_temporary(c);
1033 constant = rc_constants_add_immediate_scalar(&c->Program.Constants, RCP_2PI, &constant_swizzle);
1034
1035 emit2(c, inst->Prev, RC_OPCODE_MUL, NULL, dstregtmpmask(temp, RC_MASK_W),
1036 swizzle_xxxx(inst->U.I.SrcReg[0]),
1037 srcregswz(RC_FILE_CONSTANT, constant, constant_swizzle));
1038 emit1(c, inst->Prev, RC_OPCODE_FRC, NULL, dstregtmpmask(temp, RC_MASK_W),
1039 srcreg(RC_FILE_TEMPORARY, temp));
1040
1041 r300_transform_SIN_COS(c, inst, temp);
1042 return 1;
1043 }
1044
1045 /**
1046 * Transform the trigonometric functions COS and SIN
1047 * so that the input to COS and SIN is always in the range [-PI, PI].
1048 */
r300_transform_trig_scale_vertex(struct radeon_compiler * c,struct rc_instruction * inst,void * unused)1049 int r300_transform_trig_scale_vertex(struct radeon_compiler *c,
1050 struct rc_instruction *inst,
1051 void *unused)
1052 {
1053 static const float cons[4] = {0.15915494309189535, 0.5, 6.28318530717959, -3.14159265358979};
1054 unsigned int temp;
1055 unsigned int constant;
1056
1057 if (inst->U.I.Opcode != RC_OPCODE_COS &&
1058 inst->U.I.Opcode != RC_OPCODE_SIN)
1059 return 0;
1060
1061 if (!c->needs_trig_input_transform)
1062 return 1;
1063
1064 /* Repeat x in the range [-PI, PI]:
1065 *
1066 * repeat(x) = frac(x / 2PI + 0.5) * 2PI - PI
1067 */
1068
1069 temp = rc_find_free_temporary(c);
1070 constant = rc_constants_add_immediate_vec4(&c->Program.Constants, cons);
1071
1072 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(temp, RC_MASK_W),
1073 swizzle_xxxx(inst->U.I.SrcReg[0]),
1074 srcregswz(RC_FILE_CONSTANT, constant, RC_SWIZZLE_XXXX),
1075 srcregswz(RC_FILE_CONSTANT, constant, RC_SWIZZLE_YYYY));
1076 emit1(c, inst->Prev, RC_OPCODE_FRC, NULL, dstregtmpmask(temp, RC_MASK_W),
1077 srcreg(RC_FILE_TEMPORARY, temp));
1078 emit3(c, inst->Prev, RC_OPCODE_MAD, NULL, dstregtmpmask(temp, RC_MASK_W),
1079 srcreg(RC_FILE_TEMPORARY, temp),
1080 srcregswz(RC_FILE_CONSTANT, constant, RC_SWIZZLE_ZZZZ),
1081 srcregswz(RC_FILE_CONSTANT, constant, RC_SWIZZLE_WWWW));
1082
1083 r300_transform_SIN_COS(c, inst, temp);
1084 return 1;
1085 }
1086
1087 /**
1088 * Replaces DDX/DDY instructions with MOV 0 to avoid using dummy shaders on r300/r400.
1089 *
1090 * @warning This explicitly changes the form of DDX and DDY!
1091 */
1092
radeonStubDeriv(struct radeon_compiler * c,struct rc_instruction * inst,void * unused)1093 int radeonStubDeriv(struct radeon_compiler* c,
1094 struct rc_instruction* inst,
1095 void* unused)
1096 {
1097 if (inst->U.I.Opcode != RC_OPCODE_DDX && inst->U.I.Opcode != RC_OPCODE_DDY)
1098 return 0;
1099
1100 inst->U.I.Opcode = RC_OPCODE_MOV;
1101 inst->U.I.SrcReg[0].Swizzle = RC_SWIZZLE_0000;
1102
1103 return 1;
1104 }
1105
1106 /**
1107 * Rewrite DDX/DDY instructions to properly work with r5xx shaders.
1108 * The r5xx MDH/MDV instruction provides per-quad partial derivatives.
1109 * It takes the form A*B+C. A and C are set by setting src0. B should be -1.
1110 *
1111 * @warning This explicitly changes the form of DDX and DDY!
1112 */
1113
radeonTransformDeriv(struct radeon_compiler * c,struct rc_instruction * inst,void * unused)1114 int radeonTransformDeriv(struct radeon_compiler* c,
1115 struct rc_instruction* inst,
1116 void* unused)
1117 {
1118 if (inst->U.I.Opcode != RC_OPCODE_DDX && inst->U.I.Opcode != RC_OPCODE_DDY)
1119 return 0;
1120
1121 inst->U.I.SrcReg[1].Swizzle = RC_SWIZZLE_1111;
1122 inst->U.I.SrcReg[1].Negate = RC_MASK_XYZW;
1123
1124 return 1;
1125 }
1126
1127 /**
1128 * IF Temp[0].x -> IF Temp[0].x
1129 * ... -> ...
1130 * KILL -> KIL -abs(Temp[0].x)
1131 * ... -> ...
1132 * ENDIF -> ENDIF
1133 *
1134 * === OR ===
1135 *
1136 * IF Temp[0].x -> IF Temp[0].x
1137 * ... -> ...
1138 * ELSE -> ELSE
1139 * ... -> ...
1140 * KILL -> KIL -abs(Temp[0].x)
1141 * ... -> ...
1142 * ENDIF -> ENDIF
1143 *
1144 * === OR ===
1145 *
1146 * KILL -> KIL -none.1111
1147 *
1148 * This needs to be done in its own pass, because it might modify the
1149 * instructions before and after KILL.
1150 */
rc_transform_KILL(struct radeon_compiler * c,void * user)1151 void rc_transform_KILL(struct radeon_compiler * c, void *user)
1152 {
1153 struct rc_instruction * inst;
1154 for (inst = c->Program.Instructions.Next;
1155 inst != &c->Program.Instructions; inst = inst->Next) {
1156 struct rc_instruction * if_inst;
1157 unsigned in_if = 0;
1158
1159 if (inst->U.I.Opcode != RC_OPCODE_KILP)
1160 continue;
1161
1162 for (if_inst = inst->Prev; if_inst != &c->Program.Instructions;
1163 if_inst = if_inst->Prev) {
1164
1165 if (if_inst->U.I.Opcode == RC_OPCODE_IF) {
1166 in_if = 1;
1167 break;
1168 }
1169 }
1170
1171 inst->U.I.Opcode = RC_OPCODE_KIL;
1172
1173 if (!in_if) {
1174 inst->U.I.SrcReg[0] = negate(builtin_one);
1175 } else {
1176 /* This should work even if the KILP is inside the ELSE
1177 * block, because -0.0 is considered negative. */
1178 inst->U.I.SrcReg[0] =
1179 negate(absolute(if_inst->U.I.SrcReg[0]));
1180 }
1181 }
1182 }
1183
rc_force_output_alpha_to_one(struct radeon_compiler * c,struct rc_instruction * inst,void * data)1184 int rc_force_output_alpha_to_one(struct radeon_compiler *c,
1185 struct rc_instruction *inst, void *data)
1186 {
1187 struct r300_fragment_program_compiler *fragc = (struct r300_fragment_program_compiler*)c;
1188 const struct rc_opcode_info *info = rc_get_opcode_info(inst->U.I.Opcode);
1189 unsigned tmp;
1190
1191 if (!info->HasDstReg || inst->U.I.DstReg.File != RC_FILE_OUTPUT ||
1192 inst->U.I.DstReg.Index == fragc->OutputDepth)
1193 return 1;
1194
1195 tmp = rc_find_free_temporary(c);
1196
1197 /* Insert MOV after inst, set alpha to 1. */
1198 emit1(c, inst, RC_OPCODE_MOV, NULL, inst->U.I.DstReg,
1199 srcregswz(RC_FILE_TEMPORARY, tmp, RC_SWIZZLE_XYZ1));
1200
1201 /* Re-route the destination of inst to the source of mov. */
1202 inst->U.I.DstReg.File = RC_FILE_TEMPORARY;
1203 inst->U.I.DstReg.Index = tmp;
1204
1205 /* Move the saturate output modifier to the MOV instruction
1206 * (for better copy propagation). */
1207 inst->Next->U.I.SaturateMode = inst->U.I.SaturateMode;
1208 inst->U.I.SaturateMode = RC_SATURATE_NONE;
1209 return 1;
1210 }
1211