1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25 #include <linux/module.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <asm/unaligned.h>
29
30 #define ATOM_DEBUG
31
32 #include "atom.h"
33 #include "atom-names.h"
34 #include "atom-bits.h"
35 #include "amdgpu.h"
36
37 #define ATOM_COND_ABOVE 0
38 #define ATOM_COND_ABOVEOREQUAL 1
39 #define ATOM_COND_ALWAYS 2
40 #define ATOM_COND_BELOW 3
41 #define ATOM_COND_BELOWOREQUAL 4
42 #define ATOM_COND_EQUAL 5
43 #define ATOM_COND_NOTEQUAL 6
44
45 #define ATOM_PORT_ATI 0
46 #define ATOM_PORT_PCI 1
47 #define ATOM_PORT_SYSIO 2
48
49 #define ATOM_UNIT_MICROSEC 0
50 #define ATOM_UNIT_MILLISEC 1
51
52 #define PLL_INDEX 2
53 #define PLL_DATA 3
54
55 typedef struct {
56 struct atom_context *ctx;
57 uint32_t *ps, *ws;
58 int ps_shift;
59 uint16_t start;
60 unsigned last_jump;
61 unsigned long last_jump_jiffies;
62 bool abort;
63 } atom_exec_context;
64
65 int amdgpu_atom_debug = 0;
66 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
67
68 static uint32_t atom_arg_mask[8] =
69 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
70 0xFF000000 };
71 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
72
73 static int atom_dst_to_src[8][4] = {
74 /* translate destination alignment field to the source alignment encoding */
75 {0, 0, 0, 0},
76 {1, 2, 3, 0},
77 {1, 2, 3, 0},
78 {1, 2, 3, 0},
79 {4, 5, 6, 7},
80 {4, 5, 6, 7},
81 {4, 5, 6, 7},
82 {4, 5, 6, 7},
83 };
84 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
85
86 static int debug_depth = 0;
87 #ifdef ATOM_DEBUG
debug_print_spaces(int n)88 static void debug_print_spaces(int n)
89 {
90 while (n--)
91 printk(" ");
92 }
93
94 #define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
95 #define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
96 #else
97 #define DEBUG(...) do { } while (0)
98 #define SDEBUG(...) do { } while (0)
99 #endif
100
atom_iio_execute(struct atom_context * ctx,int base,uint32_t index,uint32_t data)101 static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
102 uint32_t index, uint32_t data)
103 {
104 uint32_t temp = 0xCDCDCDCD;
105
106 while (1)
107 switch (CU8(base)) {
108 case ATOM_IIO_NOP:
109 base++;
110 break;
111 case ATOM_IIO_READ:
112 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
113 base += 3;
114 break;
115 case ATOM_IIO_WRITE:
116 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
117 base += 3;
118 break;
119 case ATOM_IIO_CLEAR:
120 temp &=
121 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
122 CU8(base + 2));
123 base += 3;
124 break;
125 case ATOM_IIO_SET:
126 temp |=
127 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
128 2);
129 base += 3;
130 break;
131 case ATOM_IIO_MOVE_INDEX:
132 temp &=
133 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
134 CU8(base + 3));
135 temp |=
136 ((index >> CU8(base + 2)) &
137 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
138 3);
139 base += 4;
140 break;
141 case ATOM_IIO_MOVE_DATA:
142 temp &=
143 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
144 CU8(base + 3));
145 temp |=
146 ((data >> CU8(base + 2)) &
147 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
148 3);
149 base += 4;
150 break;
151 case ATOM_IIO_MOVE_ATTR:
152 temp &=
153 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
154 CU8(base + 3));
155 temp |=
156 ((ctx->
157 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
158 CU8
159 (base
160 +
161 1))))
162 << CU8(base + 3);
163 base += 4;
164 break;
165 case ATOM_IIO_END:
166 return temp;
167 default:
168 pr_info("Unknown IIO opcode\n");
169 return 0;
170 }
171 }
172
atom_get_src_int(atom_exec_context * ctx,uint8_t attr,int * ptr,uint32_t * saved,int print)173 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
174 int *ptr, uint32_t *saved, int print)
175 {
176 uint32_t idx, val = 0xCDCDCDCD, align, arg;
177 struct atom_context *gctx = ctx->ctx;
178 arg = attr & 7;
179 align = (attr >> 3) & 7;
180 switch (arg) {
181 case ATOM_ARG_REG:
182 idx = U16(*ptr);
183 (*ptr) += 2;
184 if (print)
185 DEBUG("REG[0x%04X]", idx);
186 idx += gctx->reg_block;
187 switch (gctx->io_mode) {
188 case ATOM_IO_MM:
189 val = gctx->card->reg_read(gctx->card, idx);
190 break;
191 case ATOM_IO_PCI:
192 pr_info("PCI registers are not implemented\n");
193 return 0;
194 case ATOM_IO_SYSIO:
195 pr_info("SYSIO registers are not implemented\n");
196 return 0;
197 default:
198 if (!(gctx->io_mode & 0x80)) {
199 pr_info("Bad IO mode\n");
200 return 0;
201 }
202 if (!gctx->iio[gctx->io_mode & 0x7F]) {
203 pr_info("Undefined indirect IO read method %d\n",
204 gctx->io_mode & 0x7F);
205 return 0;
206 }
207 val =
208 atom_iio_execute(gctx,
209 gctx->iio[gctx->io_mode & 0x7F],
210 idx, 0);
211 }
212 break;
213 case ATOM_ARG_PS:
214 idx = U8(*ptr);
215 (*ptr)++;
216 /* get_unaligned_le32 avoids unaligned accesses from atombios
217 * tables, noticed on a DEC Alpha. */
218 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
219 if (print)
220 DEBUG("PS[0x%02X,0x%04X]", idx, val);
221 break;
222 case ATOM_ARG_WS:
223 idx = U8(*ptr);
224 (*ptr)++;
225 if (print)
226 DEBUG("WS[0x%02X]", idx);
227 switch (idx) {
228 case ATOM_WS_QUOTIENT:
229 val = gctx->divmul[0];
230 break;
231 case ATOM_WS_REMAINDER:
232 val = gctx->divmul[1];
233 break;
234 case ATOM_WS_DATAPTR:
235 val = gctx->data_block;
236 break;
237 case ATOM_WS_SHIFT:
238 val = gctx->shift;
239 break;
240 case ATOM_WS_OR_MASK:
241 val = 1 << gctx->shift;
242 break;
243 case ATOM_WS_AND_MASK:
244 val = ~(1 << gctx->shift);
245 break;
246 case ATOM_WS_FB_WINDOW:
247 val = gctx->fb_base;
248 break;
249 case ATOM_WS_ATTRIBUTES:
250 val = gctx->io_attr;
251 break;
252 case ATOM_WS_REGPTR:
253 val = gctx->reg_block;
254 break;
255 default:
256 val = ctx->ws[idx];
257 }
258 break;
259 case ATOM_ARG_ID:
260 idx = U16(*ptr);
261 (*ptr) += 2;
262 if (print) {
263 if (gctx->data_block)
264 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
265 else
266 DEBUG("ID[0x%04X]", idx);
267 }
268 val = U32(idx + gctx->data_block);
269 break;
270 case ATOM_ARG_FB:
271 idx = U8(*ptr);
272 (*ptr)++;
273 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
274 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
275 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
276 val = 0;
277 } else
278 val = gctx->scratch[(gctx->fb_base / 4) + idx];
279 if (print)
280 DEBUG("FB[0x%02X]", idx);
281 break;
282 case ATOM_ARG_IMM:
283 switch (align) {
284 case ATOM_SRC_DWORD:
285 val = U32(*ptr);
286 (*ptr) += 4;
287 if (print)
288 DEBUG("IMM 0x%08X\n", val);
289 return val;
290 case ATOM_SRC_WORD0:
291 case ATOM_SRC_WORD8:
292 case ATOM_SRC_WORD16:
293 val = U16(*ptr);
294 (*ptr) += 2;
295 if (print)
296 DEBUG("IMM 0x%04X\n", val);
297 return val;
298 case ATOM_SRC_BYTE0:
299 case ATOM_SRC_BYTE8:
300 case ATOM_SRC_BYTE16:
301 case ATOM_SRC_BYTE24:
302 val = U8(*ptr);
303 (*ptr)++;
304 if (print)
305 DEBUG("IMM 0x%02X\n", val);
306 return val;
307 }
308 return 0;
309 case ATOM_ARG_PLL:
310 idx = U8(*ptr);
311 (*ptr)++;
312 if (print)
313 DEBUG("PLL[0x%02X]", idx);
314 val = gctx->card->pll_read(gctx->card, idx);
315 break;
316 case ATOM_ARG_MC:
317 idx = U8(*ptr);
318 (*ptr)++;
319 if (print)
320 DEBUG("MC[0x%02X]", idx);
321 val = gctx->card->mc_read(gctx->card, idx);
322 break;
323 }
324 if (saved)
325 *saved = val;
326 val &= atom_arg_mask[align];
327 val >>= atom_arg_shift[align];
328 if (print)
329 switch (align) {
330 case ATOM_SRC_DWORD:
331 DEBUG(".[31:0] -> 0x%08X\n", val);
332 break;
333 case ATOM_SRC_WORD0:
334 DEBUG(".[15:0] -> 0x%04X\n", val);
335 break;
336 case ATOM_SRC_WORD8:
337 DEBUG(".[23:8] -> 0x%04X\n", val);
338 break;
339 case ATOM_SRC_WORD16:
340 DEBUG(".[31:16] -> 0x%04X\n", val);
341 break;
342 case ATOM_SRC_BYTE0:
343 DEBUG(".[7:0] -> 0x%02X\n", val);
344 break;
345 case ATOM_SRC_BYTE8:
346 DEBUG(".[15:8] -> 0x%02X\n", val);
347 break;
348 case ATOM_SRC_BYTE16:
349 DEBUG(".[23:16] -> 0x%02X\n", val);
350 break;
351 case ATOM_SRC_BYTE24:
352 DEBUG(".[31:24] -> 0x%02X\n", val);
353 break;
354 }
355 return val;
356 }
357
atom_skip_src_int(atom_exec_context * ctx,uint8_t attr,int * ptr)358 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
359 {
360 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
361 switch (arg) {
362 case ATOM_ARG_REG:
363 case ATOM_ARG_ID:
364 (*ptr) += 2;
365 break;
366 case ATOM_ARG_PLL:
367 case ATOM_ARG_MC:
368 case ATOM_ARG_PS:
369 case ATOM_ARG_WS:
370 case ATOM_ARG_FB:
371 (*ptr)++;
372 break;
373 case ATOM_ARG_IMM:
374 switch (align) {
375 case ATOM_SRC_DWORD:
376 (*ptr) += 4;
377 return;
378 case ATOM_SRC_WORD0:
379 case ATOM_SRC_WORD8:
380 case ATOM_SRC_WORD16:
381 (*ptr) += 2;
382 return;
383 case ATOM_SRC_BYTE0:
384 case ATOM_SRC_BYTE8:
385 case ATOM_SRC_BYTE16:
386 case ATOM_SRC_BYTE24:
387 (*ptr)++;
388 return;
389 }
390 return;
391 }
392 }
393
atom_get_src(atom_exec_context * ctx,uint8_t attr,int * ptr)394 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
395 {
396 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
397 }
398
atom_get_src_direct(atom_exec_context * ctx,uint8_t align,int * ptr)399 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
400 {
401 uint32_t val = 0xCDCDCDCD;
402
403 switch (align) {
404 case ATOM_SRC_DWORD:
405 val = U32(*ptr);
406 (*ptr) += 4;
407 break;
408 case ATOM_SRC_WORD0:
409 case ATOM_SRC_WORD8:
410 case ATOM_SRC_WORD16:
411 val = U16(*ptr);
412 (*ptr) += 2;
413 break;
414 case ATOM_SRC_BYTE0:
415 case ATOM_SRC_BYTE8:
416 case ATOM_SRC_BYTE16:
417 case ATOM_SRC_BYTE24:
418 val = U8(*ptr);
419 (*ptr)++;
420 break;
421 }
422 return val;
423 }
424
atom_get_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr,uint32_t * saved,int print)425 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
426 int *ptr, uint32_t *saved, int print)
427 {
428 return atom_get_src_int(ctx,
429 arg | atom_dst_to_src[(attr >> 3) &
430 7][(attr >> 6) & 3] << 3,
431 ptr, saved, print);
432 }
433
atom_skip_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr)434 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
435 {
436 atom_skip_src_int(ctx,
437 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
438 3] << 3, ptr);
439 }
440
atom_put_dst(atom_exec_context * ctx,int arg,uint8_t attr,int * ptr,uint32_t val,uint32_t saved)441 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
442 int *ptr, uint32_t val, uint32_t saved)
443 {
444 uint32_t align =
445 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
446 val, idx;
447 struct atom_context *gctx = ctx->ctx;
448 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
449 val <<= atom_arg_shift[align];
450 val &= atom_arg_mask[align];
451 saved &= ~atom_arg_mask[align];
452 val |= saved;
453 switch (arg) {
454 case ATOM_ARG_REG:
455 idx = U16(*ptr);
456 (*ptr) += 2;
457 DEBUG("REG[0x%04X]", idx);
458 idx += gctx->reg_block;
459 switch (gctx->io_mode) {
460 case ATOM_IO_MM:
461 if (idx == 0)
462 gctx->card->reg_write(gctx->card, idx,
463 val << 2);
464 else
465 gctx->card->reg_write(gctx->card, idx, val);
466 break;
467 case ATOM_IO_PCI:
468 pr_info("PCI registers are not implemented\n");
469 return;
470 case ATOM_IO_SYSIO:
471 pr_info("SYSIO registers are not implemented\n");
472 return;
473 default:
474 if (!(gctx->io_mode & 0x80)) {
475 pr_info("Bad IO mode\n");
476 return;
477 }
478 if (!gctx->iio[gctx->io_mode & 0xFF]) {
479 pr_info("Undefined indirect IO write method %d\n",
480 gctx->io_mode & 0x7F);
481 return;
482 }
483 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
484 idx, val);
485 }
486 break;
487 case ATOM_ARG_PS:
488 idx = U8(*ptr);
489 (*ptr)++;
490 DEBUG("PS[0x%02X]", idx);
491 ctx->ps[idx] = cpu_to_le32(val);
492 break;
493 case ATOM_ARG_WS:
494 idx = U8(*ptr);
495 (*ptr)++;
496 DEBUG("WS[0x%02X]", idx);
497 switch (idx) {
498 case ATOM_WS_QUOTIENT:
499 gctx->divmul[0] = val;
500 break;
501 case ATOM_WS_REMAINDER:
502 gctx->divmul[1] = val;
503 break;
504 case ATOM_WS_DATAPTR:
505 gctx->data_block = val;
506 break;
507 case ATOM_WS_SHIFT:
508 gctx->shift = val;
509 break;
510 case ATOM_WS_OR_MASK:
511 case ATOM_WS_AND_MASK:
512 break;
513 case ATOM_WS_FB_WINDOW:
514 gctx->fb_base = val;
515 break;
516 case ATOM_WS_ATTRIBUTES:
517 gctx->io_attr = val;
518 break;
519 case ATOM_WS_REGPTR:
520 gctx->reg_block = val;
521 break;
522 default:
523 ctx->ws[idx] = val;
524 }
525 break;
526 case ATOM_ARG_FB:
527 idx = U8(*ptr);
528 (*ptr)++;
529 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
530 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
531 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
532 } else
533 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
534 DEBUG("FB[0x%02X]", idx);
535 break;
536 case ATOM_ARG_PLL:
537 idx = U8(*ptr);
538 (*ptr)++;
539 DEBUG("PLL[0x%02X]", idx);
540 gctx->card->pll_write(gctx->card, idx, val);
541 break;
542 case ATOM_ARG_MC:
543 idx = U8(*ptr);
544 (*ptr)++;
545 DEBUG("MC[0x%02X]", idx);
546 gctx->card->mc_write(gctx->card, idx, val);
547 return;
548 }
549 switch (align) {
550 case ATOM_SRC_DWORD:
551 DEBUG(".[31:0] <- 0x%08X\n", old_val);
552 break;
553 case ATOM_SRC_WORD0:
554 DEBUG(".[15:0] <- 0x%04X\n", old_val);
555 break;
556 case ATOM_SRC_WORD8:
557 DEBUG(".[23:8] <- 0x%04X\n", old_val);
558 break;
559 case ATOM_SRC_WORD16:
560 DEBUG(".[31:16] <- 0x%04X\n", old_val);
561 break;
562 case ATOM_SRC_BYTE0:
563 DEBUG(".[7:0] <- 0x%02X\n", old_val);
564 break;
565 case ATOM_SRC_BYTE8:
566 DEBUG(".[15:8] <- 0x%02X\n", old_val);
567 break;
568 case ATOM_SRC_BYTE16:
569 DEBUG(".[23:16] <- 0x%02X\n", old_val);
570 break;
571 case ATOM_SRC_BYTE24:
572 DEBUG(".[31:24] <- 0x%02X\n", old_val);
573 break;
574 }
575 }
576
atom_op_add(atom_exec_context * ctx,int * ptr,int arg)577 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
578 {
579 uint8_t attr = U8((*ptr)++);
580 uint32_t dst, src, saved;
581 int dptr = *ptr;
582 SDEBUG(" dst: ");
583 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
584 SDEBUG(" src: ");
585 src = atom_get_src(ctx, attr, ptr);
586 dst += src;
587 SDEBUG(" dst: ");
588 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
589 }
590
atom_op_and(atom_exec_context * ctx,int * ptr,int arg)591 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
592 {
593 uint8_t attr = U8((*ptr)++);
594 uint32_t dst, src, saved;
595 int dptr = *ptr;
596 SDEBUG(" dst: ");
597 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
598 SDEBUG(" src: ");
599 src = atom_get_src(ctx, attr, ptr);
600 dst &= src;
601 SDEBUG(" dst: ");
602 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
603 }
604
atom_op_beep(atom_exec_context * ctx,int * ptr,int arg)605 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
606 {
607 printk("ATOM BIOS beeped!\n");
608 }
609
atom_op_calltable(atom_exec_context * ctx,int * ptr,int arg)610 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
611 {
612 int idx = U8((*ptr)++);
613 int r = 0;
614
615 if (idx < ATOM_TABLE_NAMES_CNT)
616 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
617 else
618 SDEBUG(" table: %d\n", idx);
619 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
620 r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
621 if (r) {
622 ctx->abort = true;
623 }
624 }
625
atom_op_clear(atom_exec_context * ctx,int * ptr,int arg)626 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
627 {
628 uint8_t attr = U8((*ptr)++);
629 uint32_t saved;
630 int dptr = *ptr;
631 attr &= 0x38;
632 attr |= atom_def_dst[attr >> 3] << 6;
633 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
634 SDEBUG(" dst: ");
635 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
636 }
637
atom_op_compare(atom_exec_context * ctx,int * ptr,int arg)638 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
639 {
640 uint8_t attr = U8((*ptr)++);
641 uint32_t dst, src;
642 SDEBUG(" src1: ");
643 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
644 SDEBUG(" src2: ");
645 src = atom_get_src(ctx, attr, ptr);
646 ctx->ctx->cs_equal = (dst == src);
647 ctx->ctx->cs_above = (dst > src);
648 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
649 ctx->ctx->cs_above ? "GT" : "LE");
650 }
651
atom_op_delay(atom_exec_context * ctx,int * ptr,int arg)652 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
653 {
654 unsigned count = U8((*ptr)++);
655 SDEBUG(" count: %d\n", count);
656 if (arg == ATOM_UNIT_MICROSEC)
657 udelay(count);
658 else if (!drm_can_sleep())
659 mdelay(count);
660 else
661 msleep(count);
662 }
663
atom_op_div(atom_exec_context * ctx,int * ptr,int arg)664 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
665 {
666 uint8_t attr = U8((*ptr)++);
667 uint32_t dst, src;
668 SDEBUG(" src1: ");
669 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
670 SDEBUG(" src2: ");
671 src = atom_get_src(ctx, attr, ptr);
672 if (src != 0) {
673 ctx->ctx->divmul[0] = dst / src;
674 ctx->ctx->divmul[1] = dst % src;
675 } else {
676 ctx->ctx->divmul[0] = 0;
677 ctx->ctx->divmul[1] = 0;
678 }
679 }
680
atom_op_div32(atom_exec_context * ctx,int * ptr,int arg)681 static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
682 {
683 uint64_t val64;
684 uint8_t attr = U8((*ptr)++);
685 uint32_t dst, src;
686 SDEBUG(" src1: ");
687 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
688 SDEBUG(" src2: ");
689 src = atom_get_src(ctx, attr, ptr);
690 if (src != 0) {
691 val64 = dst;
692 val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
693 do_div(val64, src);
694 ctx->ctx->divmul[0] = lower_32_bits(val64);
695 ctx->ctx->divmul[1] = upper_32_bits(val64);
696 } else {
697 ctx->ctx->divmul[0] = 0;
698 ctx->ctx->divmul[1] = 0;
699 }
700 }
701
atom_op_eot(atom_exec_context * ctx,int * ptr,int arg)702 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
703 {
704 /* functionally, a nop */
705 }
706
atom_op_jump(atom_exec_context * ctx,int * ptr,int arg)707 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
708 {
709 int execute = 0, target = U16(*ptr);
710 unsigned long cjiffies;
711
712 (*ptr) += 2;
713 switch (arg) {
714 case ATOM_COND_ABOVE:
715 execute = ctx->ctx->cs_above;
716 break;
717 case ATOM_COND_ABOVEOREQUAL:
718 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
719 break;
720 case ATOM_COND_ALWAYS:
721 execute = 1;
722 break;
723 case ATOM_COND_BELOW:
724 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
725 break;
726 case ATOM_COND_BELOWOREQUAL:
727 execute = !ctx->ctx->cs_above;
728 break;
729 case ATOM_COND_EQUAL:
730 execute = ctx->ctx->cs_equal;
731 break;
732 case ATOM_COND_NOTEQUAL:
733 execute = !ctx->ctx->cs_equal;
734 break;
735 }
736 if (arg != ATOM_COND_ALWAYS)
737 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
738 SDEBUG(" target: 0x%04X\n", target);
739 if (execute) {
740 if (ctx->last_jump == (ctx->start + target)) {
741 cjiffies = jiffies;
742 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
743 cjiffies -= ctx->last_jump_jiffies;
744 if ((jiffies_to_msecs(cjiffies) > 10000)) {
745 DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
746 ctx->abort = true;
747 }
748 } else {
749 /* jiffies wrap around we will just wait a little longer */
750 ctx->last_jump_jiffies = jiffies;
751 }
752 } else {
753 ctx->last_jump = ctx->start + target;
754 ctx->last_jump_jiffies = jiffies;
755 }
756 *ptr = ctx->start + target;
757 }
758 }
759
atom_op_mask(atom_exec_context * ctx,int * ptr,int arg)760 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
761 {
762 uint8_t attr = U8((*ptr)++);
763 uint32_t dst, mask, src, saved;
764 int dptr = *ptr;
765 SDEBUG(" dst: ");
766 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
767 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
768 SDEBUG(" mask: 0x%08x", mask);
769 SDEBUG(" src: ");
770 src = atom_get_src(ctx, attr, ptr);
771 dst &= mask;
772 dst |= src;
773 SDEBUG(" dst: ");
774 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
775 }
776
atom_op_move(atom_exec_context * ctx,int * ptr,int arg)777 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
778 {
779 uint8_t attr = U8((*ptr)++);
780 uint32_t src, saved;
781 int dptr = *ptr;
782 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
783 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
784 else {
785 atom_skip_dst(ctx, arg, attr, ptr);
786 saved = 0xCDCDCDCD;
787 }
788 SDEBUG(" src: ");
789 src = atom_get_src(ctx, attr, ptr);
790 SDEBUG(" dst: ");
791 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
792 }
793
atom_op_mul(atom_exec_context * ctx,int * ptr,int arg)794 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
795 {
796 uint8_t attr = U8((*ptr)++);
797 uint32_t dst, src;
798 SDEBUG(" src1: ");
799 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
800 SDEBUG(" src2: ");
801 src = atom_get_src(ctx, attr, ptr);
802 ctx->ctx->divmul[0] = dst * src;
803 }
804
atom_op_mul32(atom_exec_context * ctx,int * ptr,int arg)805 static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
806 {
807 uint64_t val64;
808 uint8_t attr = U8((*ptr)++);
809 uint32_t dst, src;
810 SDEBUG(" src1: ");
811 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
812 SDEBUG(" src2: ");
813 src = atom_get_src(ctx, attr, ptr);
814 val64 = (uint64_t)dst * (uint64_t)src;
815 ctx->ctx->divmul[0] = lower_32_bits(val64);
816 ctx->ctx->divmul[1] = upper_32_bits(val64);
817 }
818
atom_op_nop(atom_exec_context * ctx,int * ptr,int arg)819 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
820 {
821 /* nothing */
822 }
823
atom_op_or(atom_exec_context * ctx,int * ptr,int arg)824 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
825 {
826 uint8_t attr = U8((*ptr)++);
827 uint32_t dst, src, saved;
828 int dptr = *ptr;
829 SDEBUG(" dst: ");
830 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
831 SDEBUG(" src: ");
832 src = atom_get_src(ctx, attr, ptr);
833 dst |= src;
834 SDEBUG(" dst: ");
835 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
836 }
837
atom_op_postcard(atom_exec_context * ctx,int * ptr,int arg)838 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
839 {
840 uint8_t val = U8((*ptr)++);
841 SDEBUG("POST card output: 0x%02X\n", val);
842 }
843
atom_op_repeat(atom_exec_context * ctx,int * ptr,int arg)844 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
845 {
846 pr_info("unimplemented!\n");
847 }
848
atom_op_restorereg(atom_exec_context * ctx,int * ptr,int arg)849 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
850 {
851 pr_info("unimplemented!\n");
852 }
853
atom_op_savereg(atom_exec_context * ctx,int * ptr,int arg)854 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
855 {
856 pr_info("unimplemented!\n");
857 }
858
atom_op_setdatablock(atom_exec_context * ctx,int * ptr,int arg)859 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
860 {
861 int idx = U8(*ptr);
862 (*ptr)++;
863 SDEBUG(" block: %d\n", idx);
864 if (!idx)
865 ctx->ctx->data_block = 0;
866 else if (idx == 255)
867 ctx->ctx->data_block = ctx->start;
868 else
869 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
870 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
871 }
872
atom_op_setfbbase(atom_exec_context * ctx,int * ptr,int arg)873 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
874 {
875 uint8_t attr = U8((*ptr)++);
876 SDEBUG(" fb_base: ");
877 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
878 }
879
atom_op_setport(atom_exec_context * ctx,int * ptr,int arg)880 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
881 {
882 int port;
883 switch (arg) {
884 case ATOM_PORT_ATI:
885 port = U16(*ptr);
886 if (port < ATOM_IO_NAMES_CNT)
887 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
888 else
889 SDEBUG(" port: %d\n", port);
890 if (!port)
891 ctx->ctx->io_mode = ATOM_IO_MM;
892 else
893 ctx->ctx->io_mode = ATOM_IO_IIO | port;
894 (*ptr) += 2;
895 break;
896 case ATOM_PORT_PCI:
897 ctx->ctx->io_mode = ATOM_IO_PCI;
898 (*ptr)++;
899 break;
900 case ATOM_PORT_SYSIO:
901 ctx->ctx->io_mode = ATOM_IO_SYSIO;
902 (*ptr)++;
903 break;
904 }
905 }
906
atom_op_setregblock(atom_exec_context * ctx,int * ptr,int arg)907 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
908 {
909 ctx->ctx->reg_block = U16(*ptr);
910 (*ptr) += 2;
911 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
912 }
913
atom_op_shift_left(atom_exec_context * ctx,int * ptr,int arg)914 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
915 {
916 uint8_t attr = U8((*ptr)++), shift;
917 uint32_t saved, dst;
918 int dptr = *ptr;
919 attr &= 0x38;
920 attr |= atom_def_dst[attr >> 3] << 6;
921 SDEBUG(" dst: ");
922 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
923 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
924 SDEBUG(" shift: %d\n", shift);
925 dst <<= shift;
926 SDEBUG(" dst: ");
927 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
928 }
929
atom_op_shift_right(atom_exec_context * ctx,int * ptr,int arg)930 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
931 {
932 uint8_t attr = U8((*ptr)++), shift;
933 uint32_t saved, dst;
934 int dptr = *ptr;
935 attr &= 0x38;
936 attr |= atom_def_dst[attr >> 3] << 6;
937 SDEBUG(" dst: ");
938 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
939 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
940 SDEBUG(" shift: %d\n", shift);
941 dst >>= shift;
942 SDEBUG(" dst: ");
943 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
944 }
945
atom_op_shl(atom_exec_context * ctx,int * ptr,int arg)946 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
947 {
948 uint8_t attr = U8((*ptr)++), shift;
949 uint32_t saved, dst;
950 int dptr = *ptr;
951 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
952 SDEBUG(" dst: ");
953 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
954 /* op needs to full dst value */
955 dst = saved;
956 shift = atom_get_src(ctx, attr, ptr);
957 SDEBUG(" shift: %d\n", shift);
958 dst <<= shift;
959 dst &= atom_arg_mask[dst_align];
960 dst >>= atom_arg_shift[dst_align];
961 SDEBUG(" dst: ");
962 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
963 }
964
atom_op_shr(atom_exec_context * ctx,int * ptr,int arg)965 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
966 {
967 uint8_t attr = U8((*ptr)++), shift;
968 uint32_t saved, dst;
969 int dptr = *ptr;
970 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
971 SDEBUG(" dst: ");
972 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
973 /* op needs to full dst value */
974 dst = saved;
975 shift = atom_get_src(ctx, attr, ptr);
976 SDEBUG(" shift: %d\n", shift);
977 dst >>= shift;
978 dst &= atom_arg_mask[dst_align];
979 dst >>= atom_arg_shift[dst_align];
980 SDEBUG(" dst: ");
981 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
982 }
983
atom_op_sub(atom_exec_context * ctx,int * ptr,int arg)984 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
985 {
986 uint8_t attr = U8((*ptr)++);
987 uint32_t dst, src, saved;
988 int dptr = *ptr;
989 SDEBUG(" dst: ");
990 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
991 SDEBUG(" src: ");
992 src = atom_get_src(ctx, attr, ptr);
993 dst -= src;
994 SDEBUG(" dst: ");
995 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
996 }
997
atom_op_switch(atom_exec_context * ctx,int * ptr,int arg)998 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
999 {
1000 uint8_t attr = U8((*ptr)++);
1001 uint32_t src, val, target;
1002 SDEBUG(" switch: ");
1003 src = atom_get_src(ctx, attr, ptr);
1004 while (U16(*ptr) != ATOM_CASE_END)
1005 if (U8(*ptr) == ATOM_CASE_MAGIC) {
1006 (*ptr)++;
1007 SDEBUG(" case: ");
1008 val =
1009 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1010 ptr);
1011 target = U16(*ptr);
1012 if (val == src) {
1013 SDEBUG(" target: %04X\n", target);
1014 *ptr = ctx->start + target;
1015 return;
1016 }
1017 (*ptr) += 2;
1018 } else {
1019 pr_info("Bad case\n");
1020 return;
1021 }
1022 (*ptr) += 2;
1023 }
1024
atom_op_test(atom_exec_context * ctx,int * ptr,int arg)1025 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1026 {
1027 uint8_t attr = U8((*ptr)++);
1028 uint32_t dst, src;
1029 SDEBUG(" src1: ");
1030 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1031 SDEBUG(" src2: ");
1032 src = atom_get_src(ctx, attr, ptr);
1033 ctx->ctx->cs_equal = ((dst & src) == 0);
1034 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1035 }
1036
atom_op_xor(atom_exec_context * ctx,int * ptr,int arg)1037 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1038 {
1039 uint8_t attr = U8((*ptr)++);
1040 uint32_t dst, src, saved;
1041 int dptr = *ptr;
1042 SDEBUG(" dst: ");
1043 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1044 SDEBUG(" src: ");
1045 src = atom_get_src(ctx, attr, ptr);
1046 dst ^= src;
1047 SDEBUG(" dst: ");
1048 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1049 }
1050
atom_op_debug(atom_exec_context * ctx,int * ptr,int arg)1051 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1052 {
1053 uint8_t val = U8((*ptr)++);
1054 SDEBUG("DEBUG output: 0x%02X\n", val);
1055 }
1056
atom_op_processds(atom_exec_context * ctx,int * ptr,int arg)1057 static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1058 {
1059 uint16_t val = U16(*ptr);
1060 (*ptr) += val + 2;
1061 SDEBUG("PROCESSDS output: 0x%02X\n", val);
1062 }
1063
1064 static struct {
1065 void (*func) (atom_exec_context *, int *, int);
1066 int arg;
1067 } opcode_table[ATOM_OP_CNT] = {
1068 {
1069 NULL, 0}, {
1070 atom_op_move, ATOM_ARG_REG}, {
1071 atom_op_move, ATOM_ARG_PS}, {
1072 atom_op_move, ATOM_ARG_WS}, {
1073 atom_op_move, ATOM_ARG_FB}, {
1074 atom_op_move, ATOM_ARG_PLL}, {
1075 atom_op_move, ATOM_ARG_MC}, {
1076 atom_op_and, ATOM_ARG_REG}, {
1077 atom_op_and, ATOM_ARG_PS}, {
1078 atom_op_and, ATOM_ARG_WS}, {
1079 atom_op_and, ATOM_ARG_FB}, {
1080 atom_op_and, ATOM_ARG_PLL}, {
1081 atom_op_and, ATOM_ARG_MC}, {
1082 atom_op_or, ATOM_ARG_REG}, {
1083 atom_op_or, ATOM_ARG_PS}, {
1084 atom_op_or, ATOM_ARG_WS}, {
1085 atom_op_or, ATOM_ARG_FB}, {
1086 atom_op_or, ATOM_ARG_PLL}, {
1087 atom_op_or, ATOM_ARG_MC}, {
1088 atom_op_shift_left, ATOM_ARG_REG}, {
1089 atom_op_shift_left, ATOM_ARG_PS}, {
1090 atom_op_shift_left, ATOM_ARG_WS}, {
1091 atom_op_shift_left, ATOM_ARG_FB}, {
1092 atom_op_shift_left, ATOM_ARG_PLL}, {
1093 atom_op_shift_left, ATOM_ARG_MC}, {
1094 atom_op_shift_right, ATOM_ARG_REG}, {
1095 atom_op_shift_right, ATOM_ARG_PS}, {
1096 atom_op_shift_right, ATOM_ARG_WS}, {
1097 atom_op_shift_right, ATOM_ARG_FB}, {
1098 atom_op_shift_right, ATOM_ARG_PLL}, {
1099 atom_op_shift_right, ATOM_ARG_MC}, {
1100 atom_op_mul, ATOM_ARG_REG}, {
1101 atom_op_mul, ATOM_ARG_PS}, {
1102 atom_op_mul, ATOM_ARG_WS}, {
1103 atom_op_mul, ATOM_ARG_FB}, {
1104 atom_op_mul, ATOM_ARG_PLL}, {
1105 atom_op_mul, ATOM_ARG_MC}, {
1106 atom_op_div, ATOM_ARG_REG}, {
1107 atom_op_div, ATOM_ARG_PS}, {
1108 atom_op_div, ATOM_ARG_WS}, {
1109 atom_op_div, ATOM_ARG_FB}, {
1110 atom_op_div, ATOM_ARG_PLL}, {
1111 atom_op_div, ATOM_ARG_MC}, {
1112 atom_op_add, ATOM_ARG_REG}, {
1113 atom_op_add, ATOM_ARG_PS}, {
1114 atom_op_add, ATOM_ARG_WS}, {
1115 atom_op_add, ATOM_ARG_FB}, {
1116 atom_op_add, ATOM_ARG_PLL}, {
1117 atom_op_add, ATOM_ARG_MC}, {
1118 atom_op_sub, ATOM_ARG_REG}, {
1119 atom_op_sub, ATOM_ARG_PS}, {
1120 atom_op_sub, ATOM_ARG_WS}, {
1121 atom_op_sub, ATOM_ARG_FB}, {
1122 atom_op_sub, ATOM_ARG_PLL}, {
1123 atom_op_sub, ATOM_ARG_MC}, {
1124 atom_op_setport, ATOM_PORT_ATI}, {
1125 atom_op_setport, ATOM_PORT_PCI}, {
1126 atom_op_setport, ATOM_PORT_SYSIO}, {
1127 atom_op_setregblock, 0}, {
1128 atom_op_setfbbase, 0}, {
1129 atom_op_compare, ATOM_ARG_REG}, {
1130 atom_op_compare, ATOM_ARG_PS}, {
1131 atom_op_compare, ATOM_ARG_WS}, {
1132 atom_op_compare, ATOM_ARG_FB}, {
1133 atom_op_compare, ATOM_ARG_PLL}, {
1134 atom_op_compare, ATOM_ARG_MC}, {
1135 atom_op_switch, 0}, {
1136 atom_op_jump, ATOM_COND_ALWAYS}, {
1137 atom_op_jump, ATOM_COND_EQUAL}, {
1138 atom_op_jump, ATOM_COND_BELOW}, {
1139 atom_op_jump, ATOM_COND_ABOVE}, {
1140 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1141 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1142 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1143 atom_op_test, ATOM_ARG_REG}, {
1144 atom_op_test, ATOM_ARG_PS}, {
1145 atom_op_test, ATOM_ARG_WS}, {
1146 atom_op_test, ATOM_ARG_FB}, {
1147 atom_op_test, ATOM_ARG_PLL}, {
1148 atom_op_test, ATOM_ARG_MC}, {
1149 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1150 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1151 atom_op_calltable, 0}, {
1152 atom_op_repeat, 0}, {
1153 atom_op_clear, ATOM_ARG_REG}, {
1154 atom_op_clear, ATOM_ARG_PS}, {
1155 atom_op_clear, ATOM_ARG_WS}, {
1156 atom_op_clear, ATOM_ARG_FB}, {
1157 atom_op_clear, ATOM_ARG_PLL}, {
1158 atom_op_clear, ATOM_ARG_MC}, {
1159 atom_op_nop, 0}, {
1160 atom_op_eot, 0}, {
1161 atom_op_mask, ATOM_ARG_REG}, {
1162 atom_op_mask, ATOM_ARG_PS}, {
1163 atom_op_mask, ATOM_ARG_WS}, {
1164 atom_op_mask, ATOM_ARG_FB}, {
1165 atom_op_mask, ATOM_ARG_PLL}, {
1166 atom_op_mask, ATOM_ARG_MC}, {
1167 atom_op_postcard, 0}, {
1168 atom_op_beep, 0}, {
1169 atom_op_savereg, 0}, {
1170 atom_op_restorereg, 0}, {
1171 atom_op_setdatablock, 0}, {
1172 atom_op_xor, ATOM_ARG_REG}, {
1173 atom_op_xor, ATOM_ARG_PS}, {
1174 atom_op_xor, ATOM_ARG_WS}, {
1175 atom_op_xor, ATOM_ARG_FB}, {
1176 atom_op_xor, ATOM_ARG_PLL}, {
1177 atom_op_xor, ATOM_ARG_MC}, {
1178 atom_op_shl, ATOM_ARG_REG}, {
1179 atom_op_shl, ATOM_ARG_PS}, {
1180 atom_op_shl, ATOM_ARG_WS}, {
1181 atom_op_shl, ATOM_ARG_FB}, {
1182 atom_op_shl, ATOM_ARG_PLL}, {
1183 atom_op_shl, ATOM_ARG_MC}, {
1184 atom_op_shr, ATOM_ARG_REG}, {
1185 atom_op_shr, ATOM_ARG_PS}, {
1186 atom_op_shr, ATOM_ARG_WS}, {
1187 atom_op_shr, ATOM_ARG_FB}, {
1188 atom_op_shr, ATOM_ARG_PLL}, {
1189 atom_op_shr, ATOM_ARG_MC}, {
1190 atom_op_debug, 0}, {
1191 atom_op_processds, 0}, {
1192 atom_op_mul32, ATOM_ARG_PS}, {
1193 atom_op_mul32, ATOM_ARG_WS}, {
1194 atom_op_div32, ATOM_ARG_PS}, {
1195 atom_op_div32, ATOM_ARG_WS},
1196 };
1197
amdgpu_atom_execute_table_locked(struct atom_context * ctx,int index,uint32_t * params)1198 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1199 {
1200 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1201 int len, ws, ps, ptr;
1202 unsigned char op;
1203 atom_exec_context ectx;
1204 int ret = 0;
1205
1206 if (!base)
1207 return -EINVAL;
1208
1209 len = CU16(base + ATOM_CT_SIZE_PTR);
1210 ws = CU8(base + ATOM_CT_WS_PTR);
1211 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1212 ptr = base + ATOM_CT_CODE_PTR;
1213
1214 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1215
1216 ectx.ctx = ctx;
1217 ectx.ps_shift = ps / 4;
1218 ectx.start = base;
1219 ectx.ps = params;
1220 ectx.abort = false;
1221 ectx.last_jump = 0;
1222 if (ws)
1223 ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1224 else
1225 ectx.ws = NULL;
1226
1227 debug_depth++;
1228 while (1) {
1229 op = CU8(ptr++);
1230 if (op < ATOM_OP_NAMES_CNT)
1231 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1232 else
1233 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1234 if (ectx.abort) {
1235 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1236 base, len, ws, ps, ptr - 1);
1237 ret = -EINVAL;
1238 goto free;
1239 }
1240
1241 if (op < ATOM_OP_CNT && op > 0)
1242 opcode_table[op].func(&ectx, &ptr,
1243 opcode_table[op].arg);
1244 else
1245 break;
1246
1247 if (op == ATOM_OP_EOT)
1248 break;
1249 }
1250 debug_depth--;
1251 SDEBUG("<<\n");
1252
1253 free:
1254 if (ws)
1255 kfree(ectx.ws);
1256 return ret;
1257 }
1258
amdgpu_atom_execute_table(struct atom_context * ctx,int index,uint32_t * params)1259 int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1260 {
1261 int r;
1262
1263 mutex_lock(&ctx->mutex);
1264 /* reset data block */
1265 ctx->data_block = 0;
1266 /* reset reg block */
1267 ctx->reg_block = 0;
1268 /* reset fb window */
1269 ctx->fb_base = 0;
1270 /* reset io mode */
1271 ctx->io_mode = ATOM_IO_MM;
1272 /* reset divmul */
1273 ctx->divmul[0] = 0;
1274 ctx->divmul[1] = 0;
1275 r = amdgpu_atom_execute_table_locked(ctx, index, params);
1276 mutex_unlock(&ctx->mutex);
1277 return r;
1278 }
1279
1280 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1281
atom_index_iio(struct atom_context * ctx,int base)1282 static void atom_index_iio(struct atom_context *ctx, int base)
1283 {
1284 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1285 if (!ctx->iio)
1286 return;
1287 while (CU8(base) == ATOM_IIO_START) {
1288 ctx->iio[CU8(base + 1)] = base + 2;
1289 base += 2;
1290 while (CU8(base) != ATOM_IIO_END)
1291 base += atom_iio_len[CU8(base)];
1292 base += 3;
1293 }
1294 }
1295
amdgpu_atom_parse(struct card_info * card,void * bios)1296 struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1297 {
1298 int base;
1299 struct atom_context *ctx =
1300 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1301 char *str;
1302 u16 idx;
1303
1304 if (!ctx)
1305 return NULL;
1306
1307 ctx->card = card;
1308 ctx->bios = bios;
1309
1310 if (CU16(0) != ATOM_BIOS_MAGIC) {
1311 pr_info("Invalid BIOS magic\n");
1312 kfree(ctx);
1313 return NULL;
1314 }
1315 if (strncmp
1316 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1317 strlen(ATOM_ATI_MAGIC))) {
1318 pr_info("Invalid ATI magic\n");
1319 kfree(ctx);
1320 return NULL;
1321 }
1322
1323 base = CU16(ATOM_ROM_TABLE_PTR);
1324 if (strncmp
1325 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1326 strlen(ATOM_ROM_MAGIC))) {
1327 pr_info("Invalid ATOM magic\n");
1328 kfree(ctx);
1329 return NULL;
1330 }
1331
1332 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1333 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1334 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1335 if (!ctx->iio) {
1336 amdgpu_atom_destroy(ctx);
1337 return NULL;
1338 }
1339
1340 idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
1341 if (idx == 0)
1342 idx = 0x80;
1343
1344 str = CSTR(idx);
1345 if (*str != '\0') {
1346 pr_info("ATOM BIOS: %s\n", str);
1347 strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
1348 }
1349
1350
1351 return ctx;
1352 }
1353
amdgpu_atom_asic_init(struct atom_context * ctx)1354 int amdgpu_atom_asic_init(struct atom_context *ctx)
1355 {
1356 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1357 uint32_t ps[16];
1358 int ret;
1359
1360 memset(ps, 0, 64);
1361
1362 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1363 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1364 if (!ps[0] || !ps[1])
1365 return 1;
1366
1367 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1368 return 1;
1369 ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1370 if (ret)
1371 return ret;
1372
1373 memset(ps, 0, 64);
1374
1375 return ret;
1376 }
1377
amdgpu_atom_destroy(struct atom_context * ctx)1378 void amdgpu_atom_destroy(struct atom_context *ctx)
1379 {
1380 kfree(ctx->iio);
1381 kfree(ctx);
1382 }
1383
amdgpu_atom_parse_data_header(struct atom_context * ctx,int index,uint16_t * size,uint8_t * frev,uint8_t * crev,uint16_t * data_start)1384 bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1385 uint16_t * size, uint8_t * frev, uint8_t * crev,
1386 uint16_t * data_start)
1387 {
1388 int offset = index * 2 + 4;
1389 int idx = CU16(ctx->data_table + offset);
1390 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1391
1392 if (!mdt[index])
1393 return false;
1394
1395 if (size)
1396 *size = CU16(idx);
1397 if (frev)
1398 *frev = CU8(idx + 2);
1399 if (crev)
1400 *crev = CU8(idx + 3);
1401 *data_start = idx;
1402 return true;
1403 }
1404
amdgpu_atom_parse_cmd_header(struct atom_context * ctx,int index,uint8_t * frev,uint8_t * crev)1405 bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1406 uint8_t * crev)
1407 {
1408 int offset = index * 2 + 4;
1409 int idx = CU16(ctx->cmd_table + offset);
1410 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1411
1412 if (!mct[index])
1413 return false;
1414
1415 if (frev)
1416 *frev = CU8(idx + 2);
1417 if (crev)
1418 *crev = CU8(idx + 3);
1419 return true;
1420 }
1421
1422