drm/i915: Record the RING_MODE register for post-mortem debugging
[deliverable/linux.git] / drivers / gpu / drm / radeon / atom.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25 #include <linux/module.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <asm/unaligned.h>
29
30 #define ATOM_DEBUG
31
32 #include "atom.h"
33 #include "atom-names.h"
34 #include "atom-bits.h"
35 #include "radeon.h"
36
37 #define ATOM_COND_ABOVE 0
38 #define ATOM_COND_ABOVEOREQUAL 1
39 #define ATOM_COND_ALWAYS 2
40 #define ATOM_COND_BELOW 3
41 #define ATOM_COND_BELOWOREQUAL 4
42 #define ATOM_COND_EQUAL 5
43 #define ATOM_COND_NOTEQUAL 6
44
45 #define ATOM_PORT_ATI 0
46 #define ATOM_PORT_PCI 1
47 #define ATOM_PORT_SYSIO 2
48
49 #define ATOM_UNIT_MICROSEC 0
50 #define ATOM_UNIT_MILLISEC 1
51
52 #define PLL_INDEX 2
53 #define PLL_DATA 3
54
55 typedef struct {
56 struct atom_context *ctx;
57 uint32_t *ps, *ws;
58 int ps_shift;
59 uint16_t start;
60 unsigned last_jump;
61 unsigned long last_jump_jiffies;
62 bool abort;
63 } atom_exec_context;
64
65 int atom_debug = 0;
66 static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
67 int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
68
69 static uint32_t atom_arg_mask[8] = {
70 0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
71 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
72 };
73 static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
74
75 static int atom_dst_to_src[8][4] = {
76 /* translate destination alignment field to the source alignment encoding */
77 {0, 0, 0, 0},
78 {1, 2, 3, 0},
79 {1, 2, 3, 0},
80 {1, 2, 3, 0},
81 {4, 5, 6, 7},
82 {4, 5, 6, 7},
83 {4, 5, 6, 7},
84 {4, 5, 6, 7},
85 };
86 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
87
88 static int debug_depth = 0;
89 #ifdef ATOM_DEBUG
90 static void debug_print_spaces(int n)
91 {
92 while (n--)
93 printk(" ");
94 }
95
96 #define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
97 #define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
98 #else
99 #define DEBUG(...) do { } while (0)
100 #define SDEBUG(...) do { } while (0)
101 #endif
102
103 static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
104 uint32_t index, uint32_t data)
105 {
106 struct radeon_device *rdev = ctx->card->dev->dev_private;
107 uint32_t temp = 0xCDCDCDCD;
108
109 while (1)
110 switch (CU8(base)) {
111 case ATOM_IIO_NOP:
112 base++;
113 break;
114 case ATOM_IIO_READ:
115 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
116 base += 3;
117 break;
118 case ATOM_IIO_WRITE:
119 if (rdev->family == CHIP_RV515)
120 (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
121 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
122 base += 3;
123 break;
124 case ATOM_IIO_CLEAR:
125 temp &=
126 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
127 CU8(base + 2));
128 base += 3;
129 break;
130 case ATOM_IIO_SET:
131 temp |=
132 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
133 2);
134 base += 3;
135 break;
136 case ATOM_IIO_MOVE_INDEX:
137 temp &=
138 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
139 CU8(base + 3));
140 temp |=
141 ((index >> CU8(base + 2)) &
142 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
143 3);
144 base += 4;
145 break;
146 case ATOM_IIO_MOVE_DATA:
147 temp &=
148 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
149 CU8(base + 3));
150 temp |=
151 ((data >> CU8(base + 2)) &
152 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
153 3);
154 base += 4;
155 break;
156 case ATOM_IIO_MOVE_ATTR:
157 temp &=
158 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
159 CU8(base + 3));
160 temp |=
161 ((ctx->
162 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
163 CU8
164 (base
165 +
166 1))))
167 << CU8(base + 3);
168 base += 4;
169 break;
170 case ATOM_IIO_END:
171 return temp;
172 default:
173 printk(KERN_INFO "Unknown IIO opcode.\n");
174 return 0;
175 }
176 }
177
178 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
179 int *ptr, uint32_t *saved, int print)
180 {
181 uint32_t idx, val = 0xCDCDCDCD, align, arg;
182 struct atom_context *gctx = ctx->ctx;
183 arg = attr & 7;
184 align = (attr >> 3) & 7;
185 switch (arg) {
186 case ATOM_ARG_REG:
187 idx = U16(*ptr);
188 (*ptr) += 2;
189 if (print)
190 DEBUG("REG[0x%04X]", idx);
191 idx += gctx->reg_block;
192 switch (gctx->io_mode) {
193 case ATOM_IO_MM:
194 val = gctx->card->reg_read(gctx->card, idx);
195 break;
196 case ATOM_IO_PCI:
197 printk(KERN_INFO
198 "PCI registers are not implemented.\n");
199 return 0;
200 case ATOM_IO_SYSIO:
201 printk(KERN_INFO
202 "SYSIO registers are not implemented.\n");
203 return 0;
204 default:
205 if (!(gctx->io_mode & 0x80)) {
206 printk(KERN_INFO "Bad IO mode.\n");
207 return 0;
208 }
209 if (!gctx->iio[gctx->io_mode & 0x7F]) {
210 printk(KERN_INFO
211 "Undefined indirect IO read method %d.\n",
212 gctx->io_mode & 0x7F);
213 return 0;
214 }
215 val =
216 atom_iio_execute(gctx,
217 gctx->iio[gctx->io_mode & 0x7F],
218 idx, 0);
219 }
220 break;
221 case ATOM_ARG_PS:
222 idx = U8(*ptr);
223 (*ptr)++;
224 /* get_unaligned_le32 avoids unaligned accesses from atombios
225 * tables, noticed on a DEC Alpha. */
226 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
227 if (print)
228 DEBUG("PS[0x%02X,0x%04X]", idx, val);
229 break;
230 case ATOM_ARG_WS:
231 idx = U8(*ptr);
232 (*ptr)++;
233 if (print)
234 DEBUG("WS[0x%02X]", idx);
235 switch (idx) {
236 case ATOM_WS_QUOTIENT:
237 val = gctx->divmul[0];
238 break;
239 case ATOM_WS_REMAINDER:
240 val = gctx->divmul[1];
241 break;
242 case ATOM_WS_DATAPTR:
243 val = gctx->data_block;
244 break;
245 case ATOM_WS_SHIFT:
246 val = gctx->shift;
247 break;
248 case ATOM_WS_OR_MASK:
249 val = 1 << gctx->shift;
250 break;
251 case ATOM_WS_AND_MASK:
252 val = ~(1 << gctx->shift);
253 break;
254 case ATOM_WS_FB_WINDOW:
255 val = gctx->fb_base;
256 break;
257 case ATOM_WS_ATTRIBUTES:
258 val = gctx->io_attr;
259 break;
260 case ATOM_WS_REGPTR:
261 val = gctx->reg_block;
262 break;
263 default:
264 val = ctx->ws[idx];
265 }
266 break;
267 case ATOM_ARG_ID:
268 idx = U16(*ptr);
269 (*ptr) += 2;
270 if (print) {
271 if (gctx->data_block)
272 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
273 else
274 DEBUG("ID[0x%04X]", idx);
275 }
276 val = U32(idx + gctx->data_block);
277 break;
278 case ATOM_ARG_FB:
279 idx = U8(*ptr);
280 (*ptr)++;
281 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
282 DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
283 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
284 val = 0;
285 } else
286 val = gctx->scratch[(gctx->fb_base / 4) + idx];
287 if (print)
288 DEBUG("FB[0x%02X]", idx);
289 break;
290 case ATOM_ARG_IMM:
291 switch (align) {
292 case ATOM_SRC_DWORD:
293 val = U32(*ptr);
294 (*ptr) += 4;
295 if (print)
296 DEBUG("IMM 0x%08X\n", val);
297 return val;
298 case ATOM_SRC_WORD0:
299 case ATOM_SRC_WORD8:
300 case ATOM_SRC_WORD16:
301 val = U16(*ptr);
302 (*ptr) += 2;
303 if (print)
304 DEBUG("IMM 0x%04X\n", val);
305 return val;
306 case ATOM_SRC_BYTE0:
307 case ATOM_SRC_BYTE8:
308 case ATOM_SRC_BYTE16:
309 case ATOM_SRC_BYTE24:
310 val = U8(*ptr);
311 (*ptr)++;
312 if (print)
313 DEBUG("IMM 0x%02X\n", val);
314 return val;
315 }
316 return 0;
317 case ATOM_ARG_PLL:
318 idx = U8(*ptr);
319 (*ptr)++;
320 if (print)
321 DEBUG("PLL[0x%02X]", idx);
322 val = gctx->card->pll_read(gctx->card, idx);
323 break;
324 case ATOM_ARG_MC:
325 idx = U8(*ptr);
326 (*ptr)++;
327 if (print)
328 DEBUG("MC[0x%02X]", idx);
329 val = gctx->card->mc_read(gctx->card, idx);
330 break;
331 }
332 if (saved)
333 *saved = val;
334 val &= atom_arg_mask[align];
335 val >>= atom_arg_shift[align];
336 if (print)
337 switch (align) {
338 case ATOM_SRC_DWORD:
339 DEBUG(".[31:0] -> 0x%08X\n", val);
340 break;
341 case ATOM_SRC_WORD0:
342 DEBUG(".[15:0] -> 0x%04X\n", val);
343 break;
344 case ATOM_SRC_WORD8:
345 DEBUG(".[23:8] -> 0x%04X\n", val);
346 break;
347 case ATOM_SRC_WORD16:
348 DEBUG(".[31:16] -> 0x%04X\n", val);
349 break;
350 case ATOM_SRC_BYTE0:
351 DEBUG(".[7:0] -> 0x%02X\n", val);
352 break;
353 case ATOM_SRC_BYTE8:
354 DEBUG(".[15:8] -> 0x%02X\n", val);
355 break;
356 case ATOM_SRC_BYTE16:
357 DEBUG(".[23:16] -> 0x%02X\n", val);
358 break;
359 case ATOM_SRC_BYTE24:
360 DEBUG(".[31:24] -> 0x%02X\n", val);
361 break;
362 }
363 return val;
364 }
365
366 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
367 {
368 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
369 switch (arg) {
370 case ATOM_ARG_REG:
371 case ATOM_ARG_ID:
372 (*ptr) += 2;
373 break;
374 case ATOM_ARG_PLL:
375 case ATOM_ARG_MC:
376 case ATOM_ARG_PS:
377 case ATOM_ARG_WS:
378 case ATOM_ARG_FB:
379 (*ptr)++;
380 break;
381 case ATOM_ARG_IMM:
382 switch (align) {
383 case ATOM_SRC_DWORD:
384 (*ptr) += 4;
385 return;
386 case ATOM_SRC_WORD0:
387 case ATOM_SRC_WORD8:
388 case ATOM_SRC_WORD16:
389 (*ptr) += 2;
390 return;
391 case ATOM_SRC_BYTE0:
392 case ATOM_SRC_BYTE8:
393 case ATOM_SRC_BYTE16:
394 case ATOM_SRC_BYTE24:
395 (*ptr)++;
396 return;
397 }
398 return;
399 }
400 }
401
402 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
403 {
404 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
405 }
406
407 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
408 {
409 uint32_t val = 0xCDCDCDCD;
410
411 switch (align) {
412 case ATOM_SRC_DWORD:
413 val = U32(*ptr);
414 (*ptr) += 4;
415 break;
416 case ATOM_SRC_WORD0:
417 case ATOM_SRC_WORD8:
418 case ATOM_SRC_WORD16:
419 val = U16(*ptr);
420 (*ptr) += 2;
421 break;
422 case ATOM_SRC_BYTE0:
423 case ATOM_SRC_BYTE8:
424 case ATOM_SRC_BYTE16:
425 case ATOM_SRC_BYTE24:
426 val = U8(*ptr);
427 (*ptr)++;
428 break;
429 }
430 return val;
431 }
432
433 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
434 int *ptr, uint32_t *saved, int print)
435 {
436 return atom_get_src_int(ctx,
437 arg | atom_dst_to_src[(attr >> 3) &
438 7][(attr >> 6) & 3] << 3,
439 ptr, saved, print);
440 }
441
442 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
443 {
444 atom_skip_src_int(ctx,
445 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
446 3] << 3, ptr);
447 }
448
449 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
450 int *ptr, uint32_t val, uint32_t saved)
451 {
452 uint32_t align =
453 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
454 val, idx;
455 struct atom_context *gctx = ctx->ctx;
456 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
457 val <<= atom_arg_shift[align];
458 val &= atom_arg_mask[align];
459 saved &= ~atom_arg_mask[align];
460 val |= saved;
461 switch (arg) {
462 case ATOM_ARG_REG:
463 idx = U16(*ptr);
464 (*ptr) += 2;
465 DEBUG("REG[0x%04X]", idx);
466 idx += gctx->reg_block;
467 switch (gctx->io_mode) {
468 case ATOM_IO_MM:
469 if (idx == 0)
470 gctx->card->reg_write(gctx->card, idx,
471 val << 2);
472 else
473 gctx->card->reg_write(gctx->card, idx, val);
474 break;
475 case ATOM_IO_PCI:
476 printk(KERN_INFO
477 "PCI registers are not implemented.\n");
478 return;
479 case ATOM_IO_SYSIO:
480 printk(KERN_INFO
481 "SYSIO registers are not implemented.\n");
482 return;
483 default:
484 if (!(gctx->io_mode & 0x80)) {
485 printk(KERN_INFO "Bad IO mode.\n");
486 return;
487 }
488 if (!gctx->iio[gctx->io_mode & 0xFF]) {
489 printk(KERN_INFO
490 "Undefined indirect IO write method %d.\n",
491 gctx->io_mode & 0x7F);
492 return;
493 }
494 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
495 idx, val);
496 }
497 break;
498 case ATOM_ARG_PS:
499 idx = U8(*ptr);
500 (*ptr)++;
501 DEBUG("PS[0x%02X]", idx);
502 ctx->ps[idx] = cpu_to_le32(val);
503 break;
504 case ATOM_ARG_WS:
505 idx = U8(*ptr);
506 (*ptr)++;
507 DEBUG("WS[0x%02X]", idx);
508 switch (idx) {
509 case ATOM_WS_QUOTIENT:
510 gctx->divmul[0] = val;
511 break;
512 case ATOM_WS_REMAINDER:
513 gctx->divmul[1] = val;
514 break;
515 case ATOM_WS_DATAPTR:
516 gctx->data_block = val;
517 break;
518 case ATOM_WS_SHIFT:
519 gctx->shift = val;
520 break;
521 case ATOM_WS_OR_MASK:
522 case ATOM_WS_AND_MASK:
523 break;
524 case ATOM_WS_FB_WINDOW:
525 gctx->fb_base = val;
526 break;
527 case ATOM_WS_ATTRIBUTES:
528 gctx->io_attr = val;
529 break;
530 case ATOM_WS_REGPTR:
531 gctx->reg_block = val;
532 break;
533 default:
534 ctx->ws[idx] = val;
535 }
536 break;
537 case ATOM_ARG_FB:
538 idx = U8(*ptr);
539 (*ptr)++;
540 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
541 DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
542 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
543 } else
544 gctx->scratch[(gctx->fb_base / 4) + idx] = val;
545 DEBUG("FB[0x%02X]", idx);
546 break;
547 case ATOM_ARG_PLL:
548 idx = U8(*ptr);
549 (*ptr)++;
550 DEBUG("PLL[0x%02X]", idx);
551 gctx->card->pll_write(gctx->card, idx, val);
552 break;
553 case ATOM_ARG_MC:
554 idx = U8(*ptr);
555 (*ptr)++;
556 DEBUG("MC[0x%02X]", idx);
557 gctx->card->mc_write(gctx->card, idx, val);
558 return;
559 }
560 switch (align) {
561 case ATOM_SRC_DWORD:
562 DEBUG(".[31:0] <- 0x%08X\n", old_val);
563 break;
564 case ATOM_SRC_WORD0:
565 DEBUG(".[15:0] <- 0x%04X\n", old_val);
566 break;
567 case ATOM_SRC_WORD8:
568 DEBUG(".[23:8] <- 0x%04X\n", old_val);
569 break;
570 case ATOM_SRC_WORD16:
571 DEBUG(".[31:16] <- 0x%04X\n", old_val);
572 break;
573 case ATOM_SRC_BYTE0:
574 DEBUG(".[7:0] <- 0x%02X\n", old_val);
575 break;
576 case ATOM_SRC_BYTE8:
577 DEBUG(".[15:8] <- 0x%02X\n", old_val);
578 break;
579 case ATOM_SRC_BYTE16:
580 DEBUG(".[23:16] <- 0x%02X\n", old_val);
581 break;
582 case ATOM_SRC_BYTE24:
583 DEBUG(".[31:24] <- 0x%02X\n", old_val);
584 break;
585 }
586 }
587
588 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
589 {
590 uint8_t attr = U8((*ptr)++);
591 uint32_t dst, src, saved;
592 int dptr = *ptr;
593 SDEBUG(" dst: ");
594 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
595 SDEBUG(" src: ");
596 src = atom_get_src(ctx, attr, ptr);
597 dst += src;
598 SDEBUG(" dst: ");
599 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
600 }
601
602 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
603 {
604 uint8_t attr = U8((*ptr)++);
605 uint32_t dst, src, saved;
606 int dptr = *ptr;
607 SDEBUG(" dst: ");
608 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
609 SDEBUG(" src: ");
610 src = atom_get_src(ctx, attr, ptr);
611 dst &= src;
612 SDEBUG(" dst: ");
613 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
614 }
615
616 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
617 {
618 printk("ATOM BIOS beeped!\n");
619 }
620
621 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
622 {
623 int idx = U8((*ptr)++);
624 int r = 0;
625
626 if (idx < ATOM_TABLE_NAMES_CNT)
627 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
628 else
629 SDEBUG(" table: %d\n", idx);
630 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
631 r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
632 if (r) {
633 ctx->abort = true;
634 }
635 }
636
637 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
638 {
639 uint8_t attr = U8((*ptr)++);
640 uint32_t saved;
641 int dptr = *ptr;
642 attr &= 0x38;
643 attr |= atom_def_dst[attr >> 3] << 6;
644 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
645 SDEBUG(" dst: ");
646 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
647 }
648
649 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
650 {
651 uint8_t attr = U8((*ptr)++);
652 uint32_t dst, src;
653 SDEBUG(" src1: ");
654 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
655 SDEBUG(" src2: ");
656 src = atom_get_src(ctx, attr, ptr);
657 ctx->ctx->cs_equal = (dst == src);
658 ctx->ctx->cs_above = (dst > src);
659 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
660 ctx->ctx->cs_above ? "GT" : "LE");
661 }
662
663 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
664 {
665 unsigned count = U8((*ptr)++);
666 SDEBUG(" count: %d\n", count);
667 if (arg == ATOM_UNIT_MICROSEC)
668 udelay(count);
669 else if (!drm_can_sleep())
670 mdelay(count);
671 else
672 msleep(count);
673 }
674
675 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
676 {
677 uint8_t attr = U8((*ptr)++);
678 uint32_t dst, src;
679 SDEBUG(" src1: ");
680 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
681 SDEBUG(" src2: ");
682 src = atom_get_src(ctx, attr, ptr);
683 if (src != 0) {
684 ctx->ctx->divmul[0] = dst / src;
685 ctx->ctx->divmul[1] = dst % src;
686 } else {
687 ctx->ctx->divmul[0] = 0;
688 ctx->ctx->divmul[1] = 0;
689 }
690 }
691
692 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
693 {
694 /* functionally, a nop */
695 }
696
697 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
698 {
699 int execute = 0, target = U16(*ptr);
700 unsigned long cjiffies;
701
702 (*ptr) += 2;
703 switch (arg) {
704 case ATOM_COND_ABOVE:
705 execute = ctx->ctx->cs_above;
706 break;
707 case ATOM_COND_ABOVEOREQUAL:
708 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
709 break;
710 case ATOM_COND_ALWAYS:
711 execute = 1;
712 break;
713 case ATOM_COND_BELOW:
714 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
715 break;
716 case ATOM_COND_BELOWOREQUAL:
717 execute = !ctx->ctx->cs_above;
718 break;
719 case ATOM_COND_EQUAL:
720 execute = ctx->ctx->cs_equal;
721 break;
722 case ATOM_COND_NOTEQUAL:
723 execute = !ctx->ctx->cs_equal;
724 break;
725 }
726 if (arg != ATOM_COND_ALWAYS)
727 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
728 SDEBUG(" target: 0x%04X\n", target);
729 if (execute) {
730 if (ctx->last_jump == (ctx->start + target)) {
731 cjiffies = jiffies;
732 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
733 cjiffies -= ctx->last_jump_jiffies;
734 if ((jiffies_to_msecs(cjiffies) > 5000)) {
735 DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
736 ctx->abort = true;
737 }
738 } else {
739 /* jiffies wrap around we will just wait a little longer */
740 ctx->last_jump_jiffies = jiffies;
741 }
742 } else {
743 ctx->last_jump = ctx->start + target;
744 ctx->last_jump_jiffies = jiffies;
745 }
746 *ptr = ctx->start + target;
747 }
748 }
749
750 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
751 {
752 uint8_t attr = U8((*ptr)++);
753 uint32_t dst, mask, src, saved;
754 int dptr = *ptr;
755 SDEBUG(" dst: ");
756 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
757 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
758 SDEBUG(" mask: 0x%08x", mask);
759 SDEBUG(" src: ");
760 src = atom_get_src(ctx, attr, ptr);
761 dst &= mask;
762 dst |= src;
763 SDEBUG(" dst: ");
764 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
765 }
766
767 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
768 {
769 uint8_t attr = U8((*ptr)++);
770 uint32_t src, saved;
771 int dptr = *ptr;
772 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
773 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
774 else {
775 atom_skip_dst(ctx, arg, attr, ptr);
776 saved = 0xCDCDCDCD;
777 }
778 SDEBUG(" src: ");
779 src = atom_get_src(ctx, attr, ptr);
780 SDEBUG(" dst: ");
781 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
782 }
783
784 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
785 {
786 uint8_t attr = U8((*ptr)++);
787 uint32_t dst, src;
788 SDEBUG(" src1: ");
789 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
790 SDEBUG(" src2: ");
791 src = atom_get_src(ctx, attr, ptr);
792 ctx->ctx->divmul[0] = dst * src;
793 }
794
795 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
796 {
797 /* nothing */
798 }
799
800 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
801 {
802 uint8_t attr = U8((*ptr)++);
803 uint32_t dst, src, saved;
804 int dptr = *ptr;
805 SDEBUG(" dst: ");
806 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
807 SDEBUG(" src: ");
808 src = atom_get_src(ctx, attr, ptr);
809 dst |= src;
810 SDEBUG(" dst: ");
811 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
812 }
813
814 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
815 {
816 uint8_t val = U8((*ptr)++);
817 SDEBUG("POST card output: 0x%02X\n", val);
818 }
819
820 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
821 {
822 printk(KERN_INFO "unimplemented!\n");
823 }
824
825 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
826 {
827 printk(KERN_INFO "unimplemented!\n");
828 }
829
830 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
831 {
832 printk(KERN_INFO "unimplemented!\n");
833 }
834
835 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
836 {
837 int idx = U8(*ptr);
838 (*ptr)++;
839 SDEBUG(" block: %d\n", idx);
840 if (!idx)
841 ctx->ctx->data_block = 0;
842 else if (idx == 255)
843 ctx->ctx->data_block = ctx->start;
844 else
845 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
846 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
847 }
848
849 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
850 {
851 uint8_t attr = U8((*ptr)++);
852 SDEBUG(" fb_base: ");
853 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
854 }
855
856 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
857 {
858 int port;
859 switch (arg) {
860 case ATOM_PORT_ATI:
861 port = U16(*ptr);
862 if (port < ATOM_IO_NAMES_CNT)
863 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
864 else
865 SDEBUG(" port: %d\n", port);
866 if (!port)
867 ctx->ctx->io_mode = ATOM_IO_MM;
868 else
869 ctx->ctx->io_mode = ATOM_IO_IIO | port;
870 (*ptr) += 2;
871 break;
872 case ATOM_PORT_PCI:
873 ctx->ctx->io_mode = ATOM_IO_PCI;
874 (*ptr)++;
875 break;
876 case ATOM_PORT_SYSIO:
877 ctx->ctx->io_mode = ATOM_IO_SYSIO;
878 (*ptr)++;
879 break;
880 }
881 }
882
883 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
884 {
885 ctx->ctx->reg_block = U16(*ptr);
886 (*ptr) += 2;
887 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
888 }
889
890 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
891 {
892 uint8_t attr = U8((*ptr)++), shift;
893 uint32_t saved, dst;
894 int dptr = *ptr;
895 attr &= 0x38;
896 attr |= atom_def_dst[attr >> 3] << 6;
897 SDEBUG(" dst: ");
898 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
899 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
900 SDEBUG(" shift: %d\n", shift);
901 dst <<= shift;
902 SDEBUG(" dst: ");
903 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
904 }
905
906 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
907 {
908 uint8_t attr = U8((*ptr)++), shift;
909 uint32_t saved, dst;
910 int dptr = *ptr;
911 attr &= 0x38;
912 attr |= atom_def_dst[attr >> 3] << 6;
913 SDEBUG(" dst: ");
914 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
915 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
916 SDEBUG(" shift: %d\n", shift);
917 dst >>= shift;
918 SDEBUG(" dst: ");
919 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
920 }
921
922 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
923 {
924 uint8_t attr = U8((*ptr)++), shift;
925 uint32_t saved, dst;
926 int dptr = *ptr;
927 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
928 SDEBUG(" dst: ");
929 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
930 /* op needs to full dst value */
931 dst = saved;
932 shift = atom_get_src(ctx, attr, ptr);
933 SDEBUG(" shift: %d\n", shift);
934 dst <<= shift;
935 dst &= atom_arg_mask[dst_align];
936 dst >>= atom_arg_shift[dst_align];
937 SDEBUG(" dst: ");
938 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
939 }
940
941 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
942 {
943 uint8_t attr = U8((*ptr)++), shift;
944 uint32_t saved, dst;
945 int dptr = *ptr;
946 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
947 SDEBUG(" dst: ");
948 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
949 /* op needs to full dst value */
950 dst = saved;
951 shift = atom_get_src(ctx, attr, ptr);
952 SDEBUG(" shift: %d\n", shift);
953 dst >>= shift;
954 dst &= atom_arg_mask[dst_align];
955 dst >>= atom_arg_shift[dst_align];
956 SDEBUG(" dst: ");
957 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
958 }
959
960 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
961 {
962 uint8_t attr = U8((*ptr)++);
963 uint32_t dst, src, saved;
964 int dptr = *ptr;
965 SDEBUG(" dst: ");
966 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
967 SDEBUG(" src: ");
968 src = atom_get_src(ctx, attr, ptr);
969 dst -= src;
970 SDEBUG(" dst: ");
971 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
972 }
973
974 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
975 {
976 uint8_t attr = U8((*ptr)++);
977 uint32_t src, val, target;
978 SDEBUG(" switch: ");
979 src = atom_get_src(ctx, attr, ptr);
980 while (U16(*ptr) != ATOM_CASE_END)
981 if (U8(*ptr) == ATOM_CASE_MAGIC) {
982 (*ptr)++;
983 SDEBUG(" case: ");
984 val =
985 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
986 ptr);
987 target = U16(*ptr);
988 if (val == src) {
989 SDEBUG(" target: %04X\n", target);
990 *ptr = ctx->start + target;
991 return;
992 }
993 (*ptr) += 2;
994 } else {
995 printk(KERN_INFO "Bad case.\n");
996 return;
997 }
998 (*ptr) += 2;
999 }
1000
1001 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1002 {
1003 uint8_t attr = U8((*ptr)++);
1004 uint32_t dst, src;
1005 SDEBUG(" src1: ");
1006 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1007 SDEBUG(" src2: ");
1008 src = atom_get_src(ctx, attr, ptr);
1009 ctx->ctx->cs_equal = ((dst & src) == 0);
1010 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1011 }
1012
1013 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1014 {
1015 uint8_t attr = U8((*ptr)++);
1016 uint32_t dst, src, saved;
1017 int dptr = *ptr;
1018 SDEBUG(" dst: ");
1019 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1020 SDEBUG(" src: ");
1021 src = atom_get_src(ctx, attr, ptr);
1022 dst ^= src;
1023 SDEBUG(" dst: ");
1024 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1025 }
1026
1027 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1028 {
1029 printk(KERN_INFO "unimplemented!\n");
1030 }
1031
1032 static struct {
1033 void (*func) (atom_exec_context *, int *, int);
1034 int arg;
1035 } opcode_table[ATOM_OP_CNT] = {
1036 {
1037 NULL, 0}, {
1038 atom_op_move, ATOM_ARG_REG}, {
1039 atom_op_move, ATOM_ARG_PS}, {
1040 atom_op_move, ATOM_ARG_WS}, {
1041 atom_op_move, ATOM_ARG_FB}, {
1042 atom_op_move, ATOM_ARG_PLL}, {
1043 atom_op_move, ATOM_ARG_MC}, {
1044 atom_op_and, ATOM_ARG_REG}, {
1045 atom_op_and, ATOM_ARG_PS}, {
1046 atom_op_and, ATOM_ARG_WS}, {
1047 atom_op_and, ATOM_ARG_FB}, {
1048 atom_op_and, ATOM_ARG_PLL}, {
1049 atom_op_and, ATOM_ARG_MC}, {
1050 atom_op_or, ATOM_ARG_REG}, {
1051 atom_op_or, ATOM_ARG_PS}, {
1052 atom_op_or, ATOM_ARG_WS}, {
1053 atom_op_or, ATOM_ARG_FB}, {
1054 atom_op_or, ATOM_ARG_PLL}, {
1055 atom_op_or, ATOM_ARG_MC}, {
1056 atom_op_shift_left, ATOM_ARG_REG}, {
1057 atom_op_shift_left, ATOM_ARG_PS}, {
1058 atom_op_shift_left, ATOM_ARG_WS}, {
1059 atom_op_shift_left, ATOM_ARG_FB}, {
1060 atom_op_shift_left, ATOM_ARG_PLL}, {
1061 atom_op_shift_left, ATOM_ARG_MC}, {
1062 atom_op_shift_right, ATOM_ARG_REG}, {
1063 atom_op_shift_right, ATOM_ARG_PS}, {
1064 atom_op_shift_right, ATOM_ARG_WS}, {
1065 atom_op_shift_right, ATOM_ARG_FB}, {
1066 atom_op_shift_right, ATOM_ARG_PLL}, {
1067 atom_op_shift_right, ATOM_ARG_MC}, {
1068 atom_op_mul, ATOM_ARG_REG}, {
1069 atom_op_mul, ATOM_ARG_PS}, {
1070 atom_op_mul, ATOM_ARG_WS}, {
1071 atom_op_mul, ATOM_ARG_FB}, {
1072 atom_op_mul, ATOM_ARG_PLL}, {
1073 atom_op_mul, ATOM_ARG_MC}, {
1074 atom_op_div, ATOM_ARG_REG}, {
1075 atom_op_div, ATOM_ARG_PS}, {
1076 atom_op_div, ATOM_ARG_WS}, {
1077 atom_op_div, ATOM_ARG_FB}, {
1078 atom_op_div, ATOM_ARG_PLL}, {
1079 atom_op_div, ATOM_ARG_MC}, {
1080 atom_op_add, ATOM_ARG_REG}, {
1081 atom_op_add, ATOM_ARG_PS}, {
1082 atom_op_add, ATOM_ARG_WS}, {
1083 atom_op_add, ATOM_ARG_FB}, {
1084 atom_op_add, ATOM_ARG_PLL}, {
1085 atom_op_add, ATOM_ARG_MC}, {
1086 atom_op_sub, ATOM_ARG_REG}, {
1087 atom_op_sub, ATOM_ARG_PS}, {
1088 atom_op_sub, ATOM_ARG_WS}, {
1089 atom_op_sub, ATOM_ARG_FB}, {
1090 atom_op_sub, ATOM_ARG_PLL}, {
1091 atom_op_sub, ATOM_ARG_MC}, {
1092 atom_op_setport, ATOM_PORT_ATI}, {
1093 atom_op_setport, ATOM_PORT_PCI}, {
1094 atom_op_setport, ATOM_PORT_SYSIO}, {
1095 atom_op_setregblock, 0}, {
1096 atom_op_setfbbase, 0}, {
1097 atom_op_compare, ATOM_ARG_REG}, {
1098 atom_op_compare, ATOM_ARG_PS}, {
1099 atom_op_compare, ATOM_ARG_WS}, {
1100 atom_op_compare, ATOM_ARG_FB}, {
1101 atom_op_compare, ATOM_ARG_PLL}, {
1102 atom_op_compare, ATOM_ARG_MC}, {
1103 atom_op_switch, 0}, {
1104 atom_op_jump, ATOM_COND_ALWAYS}, {
1105 atom_op_jump, ATOM_COND_EQUAL}, {
1106 atom_op_jump, ATOM_COND_BELOW}, {
1107 atom_op_jump, ATOM_COND_ABOVE}, {
1108 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1109 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1110 atom_op_jump, ATOM_COND_NOTEQUAL}, {
1111 atom_op_test, ATOM_ARG_REG}, {
1112 atom_op_test, ATOM_ARG_PS}, {
1113 atom_op_test, ATOM_ARG_WS}, {
1114 atom_op_test, ATOM_ARG_FB}, {
1115 atom_op_test, ATOM_ARG_PLL}, {
1116 atom_op_test, ATOM_ARG_MC}, {
1117 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1118 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1119 atom_op_calltable, 0}, {
1120 atom_op_repeat, 0}, {
1121 atom_op_clear, ATOM_ARG_REG}, {
1122 atom_op_clear, ATOM_ARG_PS}, {
1123 atom_op_clear, ATOM_ARG_WS}, {
1124 atom_op_clear, ATOM_ARG_FB}, {
1125 atom_op_clear, ATOM_ARG_PLL}, {
1126 atom_op_clear, ATOM_ARG_MC}, {
1127 atom_op_nop, 0}, {
1128 atom_op_eot, 0}, {
1129 atom_op_mask, ATOM_ARG_REG}, {
1130 atom_op_mask, ATOM_ARG_PS}, {
1131 atom_op_mask, ATOM_ARG_WS}, {
1132 atom_op_mask, ATOM_ARG_FB}, {
1133 atom_op_mask, ATOM_ARG_PLL}, {
1134 atom_op_mask, ATOM_ARG_MC}, {
1135 atom_op_postcard, 0}, {
1136 atom_op_beep, 0}, {
1137 atom_op_savereg, 0}, {
1138 atom_op_restorereg, 0}, {
1139 atom_op_setdatablock, 0}, {
1140 atom_op_xor, ATOM_ARG_REG}, {
1141 atom_op_xor, ATOM_ARG_PS}, {
1142 atom_op_xor, ATOM_ARG_WS}, {
1143 atom_op_xor, ATOM_ARG_FB}, {
1144 atom_op_xor, ATOM_ARG_PLL}, {
1145 atom_op_xor, ATOM_ARG_MC}, {
1146 atom_op_shl, ATOM_ARG_REG}, {
1147 atom_op_shl, ATOM_ARG_PS}, {
1148 atom_op_shl, ATOM_ARG_WS}, {
1149 atom_op_shl, ATOM_ARG_FB}, {
1150 atom_op_shl, ATOM_ARG_PLL}, {
1151 atom_op_shl, ATOM_ARG_MC}, {
1152 atom_op_shr, ATOM_ARG_REG}, {
1153 atom_op_shr, ATOM_ARG_PS}, {
1154 atom_op_shr, ATOM_ARG_WS}, {
1155 atom_op_shr, ATOM_ARG_FB}, {
1156 atom_op_shr, ATOM_ARG_PLL}, {
1157 atom_op_shr, ATOM_ARG_MC}, {
1158 atom_op_debug, 0},};
1159
1160 static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1161 {
1162 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1163 int len, ws, ps, ptr;
1164 unsigned char op;
1165 atom_exec_context ectx;
1166 int ret = 0;
1167
1168 if (!base)
1169 return -EINVAL;
1170
1171 len = CU16(base + ATOM_CT_SIZE_PTR);
1172 ws = CU8(base + ATOM_CT_WS_PTR);
1173 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1174 ptr = base + ATOM_CT_CODE_PTR;
1175
1176 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1177
1178 ectx.ctx = ctx;
1179 ectx.ps_shift = ps / 4;
1180 ectx.start = base;
1181 ectx.ps = params;
1182 ectx.abort = false;
1183 ectx.last_jump = 0;
1184 if (ws)
1185 ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
1186 else
1187 ectx.ws = NULL;
1188
1189 debug_depth++;
1190 while (1) {
1191 op = CU8(ptr++);
1192 if (op < ATOM_OP_NAMES_CNT)
1193 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1194 else
1195 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1196 if (ectx.abort) {
1197 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1198 base, len, ws, ps, ptr - 1);
1199 ret = -EINVAL;
1200 goto free;
1201 }
1202
1203 if (op < ATOM_OP_CNT && op > 0)
1204 opcode_table[op].func(&ectx, &ptr,
1205 opcode_table[op].arg);
1206 else
1207 break;
1208
1209 if (op == ATOM_OP_EOT)
1210 break;
1211 }
1212 debug_depth--;
1213 SDEBUG("<<\n");
1214
1215 free:
1216 if (ws)
1217 kfree(ectx.ws);
1218 return ret;
1219 }
1220
1221 int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
1222 {
1223 int r;
1224
1225 mutex_lock(&ctx->mutex);
1226 /* reset data block */
1227 ctx->data_block = 0;
1228 /* reset reg block */
1229 ctx->reg_block = 0;
1230 /* reset fb window */
1231 ctx->fb_base = 0;
1232 /* reset io mode */
1233 ctx->io_mode = ATOM_IO_MM;
1234 /* reset divmul */
1235 ctx->divmul[0] = 0;
1236 ctx->divmul[1] = 0;
1237 r = atom_execute_table_locked(ctx, index, params);
1238 mutex_unlock(&ctx->mutex);
1239 return r;
1240 }
1241
1242 int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1243 {
1244 int r;
1245 mutex_lock(&ctx->scratch_mutex);
1246 r = atom_execute_table_scratch_unlocked(ctx, index, params);
1247 mutex_unlock(&ctx->scratch_mutex);
1248 return r;
1249 }
1250
1251 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1252
1253 static void atom_index_iio(struct atom_context *ctx, int base)
1254 {
1255 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1256 if (!ctx->iio)
1257 return;
1258 while (CU8(base) == ATOM_IIO_START) {
1259 ctx->iio[CU8(base + 1)] = base + 2;
1260 base += 2;
1261 while (CU8(base) != ATOM_IIO_END)
1262 base += atom_iio_len[CU8(base)];
1263 base += 3;
1264 }
1265 }
1266
1267 struct atom_context *atom_parse(struct card_info *card, void *bios)
1268 {
1269 int base;
1270 struct atom_context *ctx =
1271 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1272 char *str;
1273 char name[512];
1274 int i;
1275
1276 if (!ctx)
1277 return NULL;
1278
1279 ctx->card = card;
1280 ctx->bios = bios;
1281
1282 if (CU16(0) != ATOM_BIOS_MAGIC) {
1283 printk(KERN_INFO "Invalid BIOS magic.\n");
1284 kfree(ctx);
1285 return NULL;
1286 }
1287 if (strncmp
1288 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1289 strlen(ATOM_ATI_MAGIC))) {
1290 printk(KERN_INFO "Invalid ATI magic.\n");
1291 kfree(ctx);
1292 return NULL;
1293 }
1294
1295 base = CU16(ATOM_ROM_TABLE_PTR);
1296 if (strncmp
1297 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1298 strlen(ATOM_ROM_MAGIC))) {
1299 printk(KERN_INFO "Invalid ATOM magic.\n");
1300 kfree(ctx);
1301 return NULL;
1302 }
1303
1304 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1305 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1306 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1307 if (!ctx->iio) {
1308 atom_destroy(ctx);
1309 return NULL;
1310 }
1311
1312 str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
1313 while (*str && ((*str == '\n') || (*str == '\r')))
1314 str++;
1315 /* name string isn't always 0 terminated */
1316 for (i = 0; i < 511; i++) {
1317 name[i] = str[i];
1318 if (name[i] < '.' || name[i] > 'z') {
1319 name[i] = 0;
1320 break;
1321 }
1322 }
1323 printk(KERN_INFO "ATOM BIOS: %s\n", name);
1324
1325 return ctx;
1326 }
1327
1328 int atom_asic_init(struct atom_context *ctx)
1329 {
1330 struct radeon_device *rdev = ctx->card->dev->dev_private;
1331 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1332 uint32_t ps[16];
1333 int ret;
1334
1335 memset(ps, 0, 64);
1336
1337 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1338 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1339 if (!ps[0] || !ps[1])
1340 return 1;
1341
1342 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1343 return 1;
1344 ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1345 if (ret)
1346 return ret;
1347
1348 memset(ps, 0, 64);
1349
1350 if (rdev->family < CHIP_R600) {
1351 if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
1352 atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
1353 }
1354 return ret;
1355 }
1356
1357 void atom_destroy(struct atom_context *ctx)
1358 {
1359 kfree(ctx->iio);
1360 kfree(ctx);
1361 }
1362
1363 bool atom_parse_data_header(struct atom_context *ctx, int index,
1364 uint16_t * size, uint8_t * frev, uint8_t * crev,
1365 uint16_t * data_start)
1366 {
1367 int offset = index * 2 + 4;
1368 int idx = CU16(ctx->data_table + offset);
1369 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1370
1371 if (!mdt[index])
1372 return false;
1373
1374 if (size)
1375 *size = CU16(idx);
1376 if (frev)
1377 *frev = CU8(idx + 2);
1378 if (crev)
1379 *crev = CU8(idx + 3);
1380 *data_start = idx;
1381 return true;
1382 }
1383
1384 bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1385 uint8_t * crev)
1386 {
1387 int offset = index * 2 + 4;
1388 int idx = CU16(ctx->cmd_table + offset);
1389 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1390
1391 if (!mct[index])
1392 return false;
1393
1394 if (frev)
1395 *frev = CU8(idx + 2);
1396 if (crev)
1397 *crev = CU8(idx + 3);
1398 return true;
1399 }
1400
1401 int atom_allocate_fb_scratch(struct atom_context *ctx)
1402 {
1403 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1404 uint16_t data_offset;
1405 int usage_bytes = 0;
1406 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1407
1408 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1409 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1410
1411 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1412 le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1413 le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1414
1415 usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1416 }
1417 ctx->scratch_size_bytes = 0;
1418 if (usage_bytes == 0)
1419 usage_bytes = 20 * 1024;
1420 /* allocate some scratch memory */
1421 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
1422 if (!ctx->scratch)
1423 return -ENOMEM;
1424 ctx->scratch_size_bytes = usage_bytes;
1425 return 0;
1426 }
This page took 0.063747 seconds and 5 git commands to generate.