2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kernel.h>
32 #include "r600_reg_safe.h"
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
35 struct radeon_cs_reloc
**cs_reloc
);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
37 struct radeon_cs_reloc
**cs_reloc
);
38 typedef int (*next_reloc_t
)(struct radeon_cs_parser
*, struct radeon_cs_reloc
**);
39 static next_reloc_t r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_mm
;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device
*dev
, u32
*npipes
, u32
*nbanks
, u32
*group_size
);
43 struct r600_cs_track
{
44 /* configuration we miror so that we use same code btw kms/ums */
51 u32 cb_color_base_last
[8];
52 struct radeon_bo
*cb_color_bo
[8];
53 u64 cb_color_bo_mc
[8];
54 u32 cb_color_bo_offset
[8];
55 struct radeon_bo
*cb_color_frag_bo
[8];
56 struct radeon_bo
*cb_color_tile_bo
[8];
58 u32 cb_color_size_idx
[8];
63 u32 vgt_strmout_buffer_en
;
66 u32 db_depth_size_idx
;
70 struct radeon_bo
*db_bo
;
74 static inline int r600_bpe_from_format(u32
*bpe
, u32 format
)
77 case V_038004_COLOR_8
:
78 case V_038004_COLOR_4_4
:
79 case V_038004_COLOR_3_3_2
:
83 case V_038004_COLOR_16
:
84 case V_038004_COLOR_16_FLOAT
:
85 case V_038004_COLOR_8_8
:
86 case V_038004_COLOR_5_6_5
:
87 case V_038004_COLOR_6_5_5
:
88 case V_038004_COLOR_1_5_5_5
:
89 case V_038004_COLOR_4_4_4_4
:
90 case V_038004_COLOR_5_5_5_1
:
93 case V_038004_FMT_8_8_8
:
96 case V_038004_COLOR_32
:
97 case V_038004_COLOR_32_FLOAT
:
98 case V_038004_COLOR_16_16
:
99 case V_038004_COLOR_16_16_FLOAT
:
100 case V_038004_COLOR_8_24
:
101 case V_038004_COLOR_8_24_FLOAT
:
102 case V_038004_COLOR_24_8
:
103 case V_038004_COLOR_24_8_FLOAT
:
104 case V_038004_COLOR_10_11_11
:
105 case V_038004_COLOR_10_11_11_FLOAT
:
106 case V_038004_COLOR_11_11_10
:
107 case V_038004_COLOR_11_11_10_FLOAT
:
108 case V_038004_COLOR_2_10_10_10
:
109 case V_038004_COLOR_8_8_8_8
:
110 case V_038004_COLOR_10_10_10_2
:
111 case V_038004_FMT_5_9_9_9_SHAREDEXP
:
112 case V_038004_FMT_32_AS_8
:
113 case V_038004_FMT_32_AS_8_8
:
116 case V_038004_COLOR_X24_8_32_FLOAT
:
117 case V_038004_COLOR_32_32
:
118 case V_038004_COLOR_32_32_FLOAT
:
119 case V_038004_COLOR_16_16_16_16
:
120 case V_038004_COLOR_16_16_16_16_FLOAT
:
123 case V_038004_FMT_16_16_16
:
124 case V_038004_FMT_16_16_16_FLOAT
:
127 case V_038004_FMT_32_32_32
:
128 case V_038004_FMT_32_32_32_FLOAT
:
131 case V_038004_COLOR_32_32_32_32
:
132 case V_038004_COLOR_32_32_32_32_FLOAT
:
135 case V_038004_FMT_GB_GR
:
136 case V_038004_FMT_BG_RG
:
137 case V_038004_COLOR_INVALID
:
145 struct array_mode_checker
{
154 /* returns alignment in pixels for pitch/height/depth and bytes for base */
155 static inline int r600_get_array_mode_alignment(struct array_mode_checker
*values
,
163 u32 macro_tile_width
= values
->nbanks
;
164 u32 macro_tile_height
= values
->npipes
;
165 u32 tile_bytes
= tile_width
* tile_height
* values
->bpe
* values
->nsamples
;
166 u32 macro_tile_bytes
= macro_tile_width
* macro_tile_height
* tile_bytes
;
168 switch (values
->array_mode
) {
169 case ARRAY_LINEAR_GENERAL
:
170 /* technically tile_width/_height for pitch/height */
171 *pitch_align
= 1; /* tile_width */
172 *height_align
= 1; /* tile_height */
176 case ARRAY_LINEAR_ALIGNED
:
177 *pitch_align
= max((u32
)64, (u32
)(values
->group_size
/ values
->bpe
));
178 *height_align
= tile_height
;
180 *base_align
= values
->group_size
;
182 case ARRAY_1D_TILED_THIN1
:
183 *pitch_align
= max((u32
)tile_width
,
184 (u32
)(values
->group_size
/
185 (tile_height
* values
->bpe
* values
->nsamples
)));
186 *height_align
= tile_height
;
188 *base_align
= values
->group_size
;
190 case ARRAY_2D_TILED_THIN1
:
191 *pitch_align
= max((u32
)macro_tile_width
,
192 (u32
)(((values
->group_size
/ tile_height
) /
193 (values
->bpe
* values
->nsamples
)) *
194 values
->nbanks
)) * tile_width
;
195 *height_align
= macro_tile_height
* tile_height
;
197 *base_align
= max(macro_tile_bytes
,
198 (*pitch_align
) * values
->bpe
* (*height_align
) * values
->nsamples
);
207 static void r600_cs_track_init(struct r600_cs_track
*track
)
211 /* assume DX9 mode */
212 track
->sq_config
= DX9_CONSTS
;
213 for (i
= 0; i
< 8; i
++) {
214 track
->cb_color_base_last
[i
] = 0;
215 track
->cb_color_size
[i
] = 0;
216 track
->cb_color_size_idx
[i
] = 0;
217 track
->cb_color_info
[i
] = 0;
218 track
->cb_color_bo
[i
] = NULL
;
219 track
->cb_color_bo_offset
[i
] = 0xFFFFFFFF;
220 track
->cb_color_bo_mc
[i
] = 0xFFFFFFFF;
222 track
->cb_target_mask
= 0xFFFFFFFF;
223 track
->cb_shader_mask
= 0xFFFFFFFF;
225 track
->db_bo_mc
= 0xFFFFFFFF;
226 /* assume the biggest format and that htile is enabled */
227 track
->db_depth_info
= 7 | (1 << 25);
228 track
->db_depth_view
= 0xFFFFC000;
229 track
->db_depth_size
= 0xFFFFFFFF;
230 track
->db_depth_size_idx
= 0;
231 track
->db_depth_control
= 0xFFFFFFFF;
234 static inline int r600_cs_track_validate_cb(struct radeon_cs_parser
*p
, int i
)
236 struct r600_cs_track
*track
= p
->track
;
237 u32 bpe
= 0, slice_tile_max
, size
, tmp
;
238 u32 height
, height_align
, pitch
, pitch_align
, depth_align
;
239 u64 base_offset
, base_align
;
240 struct array_mode_checker array_check
;
241 volatile u32
*ib
= p
->ib
->ptr
;
244 if (G_0280A0_TILE_MODE(track
->cb_color_info
[i
])) {
245 dev_warn(p
->dev
, "FMASK or CMASK buffer are not supported by this kernel\n");
248 size
= radeon_bo_size(track
->cb_color_bo
[i
]) - track
->cb_color_bo_offset
[i
];
249 if (r600_bpe_from_format(&bpe
, G_0280A0_FORMAT(track
->cb_color_info
[i
]))) {
250 dev_warn(p
->dev
, "%s:%d cb invalid format %d for %d (0x%08X)\n",
251 __func__
, __LINE__
, G_0280A0_FORMAT(track
->cb_color_info
[i
]),
252 i
, track
->cb_color_info
[i
]);
255 /* pitch in pixels */
256 pitch
= (G_028060_PITCH_TILE_MAX(track
->cb_color_size
[i
]) + 1) * 8;
257 slice_tile_max
= G_028060_SLICE_TILE_MAX(track
->cb_color_size
[i
]) + 1;
258 slice_tile_max
*= 64;
259 height
= slice_tile_max
/ pitch
;
262 array_mode
= G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]);
264 base_offset
= track
->cb_color_bo_mc
[i
] + track
->cb_color_bo_offset
[i
];
265 array_check
.array_mode
= array_mode
;
266 array_check
.group_size
= track
->group_size
;
267 array_check
.nbanks
= track
->nbanks
;
268 array_check
.npipes
= track
->npipes
;
269 array_check
.nsamples
= track
->nsamples
;
270 array_check
.bpe
= bpe
;
271 if (r600_get_array_mode_alignment(&array_check
,
272 &pitch_align
, &height_align
, &depth_align
, &base_align
)) {
273 dev_warn(p
->dev
, "%s invalid tiling %d for %d (0x%08X)\n", __func__
,
274 G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]), i
,
275 track
->cb_color_info
[i
]);
278 switch (array_mode
) {
279 case V_0280A0_ARRAY_LINEAR_GENERAL
:
281 case V_0280A0_ARRAY_LINEAR_ALIGNED
:
283 case V_0280A0_ARRAY_1D_TILED_THIN1
:
284 /* avoid breaking userspace */
288 case V_0280A0_ARRAY_2D_TILED_THIN1
:
291 dev_warn(p
->dev
, "%s invalid tiling %d for %d (0x%08X)\n", __func__
,
292 G_0280A0_ARRAY_MODE(track
->cb_color_info
[i
]), i
,
293 track
->cb_color_info
[i
]);
297 if (!IS_ALIGNED(pitch
, pitch_align
)) {
298 dev_warn(p
->dev
, "%s:%d cb pitch (%d) invalid\n",
299 __func__
, __LINE__
, pitch
);
302 if (!IS_ALIGNED(height
, height_align
)) {
303 dev_warn(p
->dev
, "%s:%d cb height (%d) invalid\n",
304 __func__
, __LINE__
, height
);
307 if (!IS_ALIGNED(base_offset
, base_align
)) {
308 dev_warn(p
->dev
, "%s offset[%d] 0x%llx not aligned\n", __func__
, i
, base_offset
);
313 tmp
= height
* pitch
* bpe
;
314 if ((tmp
+ track
->cb_color_bo_offset
[i
]) > radeon_bo_size(track
->cb_color_bo
[i
])) {
315 if (array_mode
== V_0280A0_ARRAY_LINEAR_GENERAL
) {
316 /* the initial DDX does bad things with the CB size occasionally */
317 /* it rounds up height too far for slice tile max but the BO is smaller */
318 tmp
= (height
- 7) * pitch
* bpe
;
319 if ((tmp
+ track
->cb_color_bo_offset
[i
]) > radeon_bo_size(track
->cb_color_bo
[i
])) {
320 dev_warn(p
->dev
, "%s offset[%d] %d %d %lu too big\n", __func__
, i
, track
->cb_color_bo_offset
[i
], tmp
, radeon_bo_size(track
->cb_color_bo
[i
]));
324 dev_warn(p
->dev
, "%s offset[%d] %d %d %lu too big\n", __func__
, i
, track
->cb_color_bo_offset
[i
], tmp
, radeon_bo_size(track
->cb_color_bo
[i
]));
329 tmp
= (height
* pitch
) >> 6;
330 if (tmp
< slice_tile_max
)
331 slice_tile_max
= tmp
;
332 tmp
= S_028060_PITCH_TILE_MAX((pitch
/ 8) - 1) |
333 S_028060_SLICE_TILE_MAX(slice_tile_max
- 1);
334 ib
[track
->cb_color_size_idx
[i
]] = tmp
;
338 static int r600_cs_track_check(struct radeon_cs_parser
*p
)
340 struct r600_cs_track
*track
= p
->track
;
343 volatile u32
*ib
= p
->ib
->ptr
;
345 /* on legacy kernel we don't perform advanced check */
348 /* we don't support out buffer yet */
349 if (track
->vgt_strmout_en
|| track
->vgt_strmout_buffer_en
) {
350 dev_warn(p
->dev
, "this kernel doesn't support SMX output buffer\n");
353 /* check that we have a cb for each enabled target, we don't check
354 * shader_mask because it seems mesa isn't always setting it :(
356 tmp
= track
->cb_target_mask
;
357 for (i
= 0; i
< 8; i
++) {
358 if ((tmp
>> (i
* 4)) & 0xF) {
359 /* at least one component is enabled */
360 if (track
->cb_color_bo
[i
] == NULL
) {
361 dev_warn(p
->dev
, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
362 __func__
, __LINE__
, track
->cb_target_mask
, track
->cb_shader_mask
, i
);
365 /* perform rewrite of CB_COLOR[0-7]_SIZE */
366 r
= r600_cs_track_validate_cb(p
, i
);
371 /* Check depth buffer */
372 if (G_028800_STENCIL_ENABLE(track
->db_depth_control
) ||
373 G_028800_Z_ENABLE(track
->db_depth_control
)) {
374 u32 nviews
, bpe
, ntiles
, size
, slice_tile_max
;
375 u32 height
, height_align
, pitch
, pitch_align
, depth_align
;
376 u64 base_offset
, base_align
;
377 struct array_mode_checker array_check
;
380 if (track
->db_bo
== NULL
) {
381 dev_warn(p
->dev
, "z/stencil with no depth buffer\n");
384 if (G_028010_TILE_SURFACE_ENABLE(track
->db_depth_info
)) {
385 dev_warn(p
->dev
, "this kernel doesn't support z/stencil htile\n");
388 switch (G_028010_FORMAT(track
->db_depth_info
)) {
389 case V_028010_DEPTH_16
:
392 case V_028010_DEPTH_X8_24
:
393 case V_028010_DEPTH_8_24
:
394 case V_028010_DEPTH_X8_24_FLOAT
:
395 case V_028010_DEPTH_8_24_FLOAT
:
396 case V_028010_DEPTH_32_FLOAT
:
399 case V_028010_DEPTH_X24_8_32_FLOAT
:
403 dev_warn(p
->dev
, "z/stencil with invalid format %d\n", G_028010_FORMAT(track
->db_depth_info
));
406 if ((track
->db_depth_size
& 0xFFFFFC00) == 0xFFFFFC00) {
407 if (!track
->db_depth_size_idx
) {
408 dev_warn(p
->dev
, "z/stencil buffer size not set\n");
411 tmp
= radeon_bo_size(track
->db_bo
) - track
->db_offset
;
412 tmp
= (tmp
/ bpe
) >> 6;
414 dev_warn(p
->dev
, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
415 track
->db_depth_size
, bpe
, track
->db_offset
,
416 radeon_bo_size(track
->db_bo
));
419 ib
[track
->db_depth_size_idx
] = S_028000_SLICE_TILE_MAX(tmp
- 1) | (track
->db_depth_size
& 0x3FF);
421 size
= radeon_bo_size(track
->db_bo
);
422 /* pitch in pixels */
423 pitch
= (G_028000_PITCH_TILE_MAX(track
->db_depth_size
) + 1) * 8;
424 slice_tile_max
= G_028000_SLICE_TILE_MAX(track
->db_depth_size
) + 1;
425 slice_tile_max
*= 64;
426 height
= slice_tile_max
/ pitch
;
429 base_offset
= track
->db_bo_mc
+ track
->db_offset
;
430 array_mode
= G_028010_ARRAY_MODE(track
->db_depth_info
);
431 array_check
.array_mode
= array_mode
;
432 array_check
.group_size
= track
->group_size
;
433 array_check
.nbanks
= track
->nbanks
;
434 array_check
.npipes
= track
->npipes
;
435 array_check
.nsamples
= track
->nsamples
;
436 array_check
.bpe
= bpe
;
437 if (r600_get_array_mode_alignment(&array_check
,
438 &pitch_align
, &height_align
, &depth_align
, &base_align
)) {
439 dev_warn(p
->dev
, "%s invalid tiling %d (0x%08X)\n", __func__
,
440 G_028010_ARRAY_MODE(track
->db_depth_info
),
441 track
->db_depth_info
);
444 switch (array_mode
) {
445 case V_028010_ARRAY_1D_TILED_THIN1
:
446 /* don't break userspace */
449 case V_028010_ARRAY_2D_TILED_THIN1
:
452 dev_warn(p
->dev
, "%s invalid tiling %d (0x%08X)\n", __func__
,
453 G_028010_ARRAY_MODE(track
->db_depth_info
),
454 track
->db_depth_info
);
458 if (!IS_ALIGNED(pitch
, pitch_align
)) {
459 dev_warn(p
->dev
, "%s:%d db pitch (%d) invalid\n",
460 __func__
, __LINE__
, pitch
);
463 if (!IS_ALIGNED(height
, height_align
)) {
464 dev_warn(p
->dev
, "%s:%d db height (%d) invalid\n",
465 __func__
, __LINE__
, height
);
468 if (!IS_ALIGNED(base_offset
, base_align
)) {
469 dev_warn(p
->dev
, "%s offset[%d] 0x%llx not aligned\n", __func__
, i
, base_offset
);
473 ntiles
= G_028000_SLICE_TILE_MAX(track
->db_depth_size
) + 1;
474 nviews
= G_028004_SLICE_MAX(track
->db_depth_view
) + 1;
475 tmp
= ntiles
* bpe
* 64 * nviews
;
476 if ((tmp
+ track
->db_offset
) > radeon_bo_size(track
->db_bo
)) {
477 dev_warn(p
->dev
, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
478 track
->db_depth_size
, ntiles
, nviews
, bpe
, tmp
+ track
->db_offset
,
479 radeon_bo_size(track
->db_bo
));
488 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
489 * @parser: parser structure holding parsing context.
490 * @pkt: where to store packet informations
492 * Assume that chunk_ib_index is properly set. Will return -EINVAL
493 * if packet is bigger than remaining ib size. or if packets is unknown.
495 int r600_cs_packet_parse(struct radeon_cs_parser
*p
,
496 struct radeon_cs_packet
*pkt
,
499 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
502 if (idx
>= ib_chunk
->length_dw
) {
503 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
504 idx
, ib_chunk
->length_dw
);
507 header
= radeon_get_ib_value(p
, idx
);
509 pkt
->type
= CP_PACKET_GET_TYPE(header
);
510 pkt
->count
= CP_PACKET_GET_COUNT(header
);
514 pkt
->reg
= CP_PACKET0_GET_REG(header
);
517 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
523 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
526 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
527 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
528 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
535 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
536 * @parser: parser structure holding parsing context.
537 * @data: pointer to relocation data
538 * @offset_start: starting offset
539 * @offset_mask: offset mask (to align start offset on)
540 * @reloc: reloc informations
542 * Check next packet is relocation packet3, do bo validation and compute
543 * GPU offset using the provided start.
545 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
546 struct radeon_cs_reloc
**cs_reloc
)
548 struct radeon_cs_chunk
*relocs_chunk
;
549 struct radeon_cs_packet p3reloc
;
553 if (p
->chunk_relocs_idx
== -1) {
554 DRM_ERROR("No relocation chunk !\n");
558 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
559 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
563 p
->idx
+= p3reloc
.count
+ 2;
564 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
565 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
569 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
570 if (idx
>= relocs_chunk
->length_dw
) {
571 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
572 idx
, relocs_chunk
->length_dw
);
575 /* FIXME: we assume reloc size is 4 dwords */
576 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
581 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
582 * @parser: parser structure holding parsing context.
583 * @data: pointer to relocation data
584 * @offset_start: starting offset
585 * @offset_mask: offset mask (to align start offset on)
586 * @reloc: reloc informations
588 * Check next packet is relocation packet3, do bo validation and compute
589 * GPU offset using the provided start.
591 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
592 struct radeon_cs_reloc
**cs_reloc
)
594 struct radeon_cs_chunk
*relocs_chunk
;
595 struct radeon_cs_packet p3reloc
;
599 if (p
->chunk_relocs_idx
== -1) {
600 DRM_ERROR("No relocation chunk !\n");
604 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
605 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
609 p
->idx
+= p3reloc
.count
+ 2;
610 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
611 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
615 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
616 if (idx
>= relocs_chunk
->length_dw
) {
617 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
618 idx
, relocs_chunk
->length_dw
);
621 *cs_reloc
= p
->relocs
;
622 (*cs_reloc
)->lobj
.gpu_offset
= (u64
)relocs_chunk
->kdata
[idx
+ 3] << 32;
623 (*cs_reloc
)->lobj
.gpu_offset
|= relocs_chunk
->kdata
[idx
+ 0];
628 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
629 * @parser: parser structure holding parsing context.
631 * Check next packet is relocation packet3, do bo validation and compute
632 * GPU offset using the provided start.
634 static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
636 struct radeon_cs_packet p3reloc
;
639 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
643 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
650 * r600_cs_packet_next_vline() - parse userspace VLINE packet
651 * @parser: parser structure holding parsing context.
653 * Userspace sends a special sequence for VLINE waits.
654 * PACKET0 - VLINE_START_END + value
655 * PACKET3 - WAIT_REG_MEM poll vline status reg
656 * RELOC (P3) - crtc_id in reloc.
658 * This function parses this and relocates the VLINE START END
659 * and WAIT_REG_MEM packets to the correct crtc.
660 * It also detects a switched off crtc and nulls out the
663 static int r600_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
665 struct drm_mode_object
*obj
;
666 struct drm_crtc
*crtc
;
667 struct radeon_crtc
*radeon_crtc
;
668 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
671 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
672 volatile uint32_t *ib
;
676 /* parse the WAIT_REG_MEM */
677 r
= r600_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
681 /* check its a WAIT_REG_MEM */
682 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
683 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
684 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
689 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
690 /* bit 4 is reg (0) or mem (1) */
691 if (wait_reg_mem_info
& 0x10) {
692 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
696 /* waiting for value to be equal */
697 if ((wait_reg_mem_info
& 0x7) != 0x3) {
698 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
702 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != AVIVO_D1MODE_VLINE_STATUS
) {
703 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
708 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != AVIVO_D1MODE_VLINE_STAT
) {
709 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
714 /* jump over the NOP */
715 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
720 p
->idx
+= wait_reg_mem
.count
+ 2;
721 p
->idx
+= p3reloc
.count
+ 2;
723 header
= radeon_get_ib_value(p
, h_idx
);
724 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
725 reg
= CP_PACKET0_GET_REG(header
);
727 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
729 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
733 crtc
= obj_to_crtc(obj
);
734 radeon_crtc
= to_radeon_crtc(crtc
);
735 crtc_id
= radeon_crtc
->crtc_id
;
737 if (!crtc
->enabled
) {
738 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
739 ib
[h_idx
+ 2] = PACKET2(0);
740 ib
[h_idx
+ 3] = PACKET2(0);
741 ib
[h_idx
+ 4] = PACKET2(0);
742 ib
[h_idx
+ 5] = PACKET2(0);
743 ib
[h_idx
+ 6] = PACKET2(0);
744 ib
[h_idx
+ 7] = PACKET2(0);
745 ib
[h_idx
+ 8] = PACKET2(0);
746 } else if (crtc_id
== 1) {
748 case AVIVO_D1MODE_VLINE_START_END
:
749 header
&= ~R600_CP_PACKET0_REG_MASK
;
750 header
|= AVIVO_D2MODE_VLINE_START_END
>> 2;
753 DRM_ERROR("unknown crtc reloc\n");
758 ib
[h_idx
+ 4] = AVIVO_D2MODE_VLINE_STATUS
>> 2;
764 static int r600_packet0_check(struct radeon_cs_parser
*p
,
765 struct radeon_cs_packet
*pkt
,
766 unsigned idx
, unsigned reg
)
771 case AVIVO_D1MODE_VLINE_START_END
:
772 r
= r600_cs_packet_parse_vline(p
);
774 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
780 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
787 static int r600_cs_parse_packet0(struct radeon_cs_parser
*p
,
788 struct radeon_cs_packet
*pkt
)
796 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
797 r
= r600_packet0_check(p
, pkt
, idx
, reg
);
806 * r600_cs_check_reg() - check if register is authorized or not
807 * @parser: parser structure holding parsing context
808 * @reg: register we are testing
809 * @idx: index into the cs buffer
811 * This function will test against r600_reg_safe_bm and return 0
812 * if register is safe. If register is not flag as safe this function
813 * will test it against a list of register needind special handling.
815 static inline int r600_cs_check_reg(struct radeon_cs_parser
*p
, u32 reg
, u32 idx
)
817 struct r600_cs_track
*track
= (struct r600_cs_track
*)p
->track
;
818 struct radeon_cs_reloc
*reloc
;
819 u32 last_reg
= ARRAY_SIZE(r600_reg_safe_bm
);
825 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
828 m
= 1 << ((reg
>> 2) & 31);
829 if (!(r600_reg_safe_bm
[i
] & m
))
833 /* force following reg to 0 in an attemp to disable out buffer
834 * which will need us to better understand how it works to perform
835 * security check on it (Jerome)
837 case R_0288A8_SQ_ESGS_RING_ITEMSIZE
:
838 case R_008C44_SQ_ESGS_RING_SIZE
:
839 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE
:
840 case R_008C54_SQ_ESTMP_RING_SIZE
:
841 case R_0288C0_SQ_FBUF_RING_ITEMSIZE
:
842 case R_008C74_SQ_FBUF_RING_SIZE
:
843 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE
:
844 case R_008C5C_SQ_GSTMP_RING_SIZE
:
845 case R_0288AC_SQ_GSVS_RING_ITEMSIZE
:
846 case R_008C4C_SQ_GSVS_RING_SIZE
:
847 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE
:
848 case R_008C6C_SQ_PSTMP_RING_SIZE
:
849 case R_0288C4_SQ_REDUC_RING_ITEMSIZE
:
850 case R_008C7C_SQ_REDUC_RING_SIZE
:
851 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE
:
852 case R_008C64_SQ_VSTMP_RING_SIZE
:
853 case R_0288C8_SQ_GS_VERT_ITEMSIZE
:
854 /* get value to populate the IB don't remove */
855 tmp
=radeon_get_ib_value(p
, idx
);
859 track
->sq_config
= radeon_get_ib_value(p
, idx
);
861 case R_028800_DB_DEPTH_CONTROL
:
862 track
->db_depth_control
= radeon_get_ib_value(p
, idx
);
864 case R_028010_DB_DEPTH_INFO
:
865 if (r600_cs_packet_next_is_pkt3_nop(p
)) {
866 r
= r600_cs_packet_next_reloc(p
, &reloc
);
868 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
872 track
->db_depth_info
= radeon_get_ib_value(p
, idx
);
873 ib
[idx
] &= C_028010_ARRAY_MODE
;
874 track
->db_depth_info
&= C_028010_ARRAY_MODE
;
875 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
876 ib
[idx
] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1
);
877 track
->db_depth_info
|= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1
);
879 ib
[idx
] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1
);
880 track
->db_depth_info
|= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1
);
883 track
->db_depth_info
= radeon_get_ib_value(p
, idx
);
885 case R_028004_DB_DEPTH_VIEW
:
886 track
->db_depth_view
= radeon_get_ib_value(p
, idx
);
888 case R_028000_DB_DEPTH_SIZE
:
889 track
->db_depth_size
= radeon_get_ib_value(p
, idx
);
890 track
->db_depth_size_idx
= idx
;
892 case R_028AB0_VGT_STRMOUT_EN
:
893 track
->vgt_strmout_en
= radeon_get_ib_value(p
, idx
);
895 case R_028B20_VGT_STRMOUT_BUFFER_EN
:
896 track
->vgt_strmout_buffer_en
= radeon_get_ib_value(p
, idx
);
898 case R_028238_CB_TARGET_MASK
:
899 track
->cb_target_mask
= radeon_get_ib_value(p
, idx
);
901 case R_02823C_CB_SHADER_MASK
:
902 track
->cb_shader_mask
= radeon_get_ib_value(p
, idx
);
904 case R_028C04_PA_SC_AA_CONFIG
:
905 tmp
= G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p
, idx
));
906 track
->nsamples
= 1 << tmp
;
908 case R_0280A0_CB_COLOR0_INFO
:
909 case R_0280A4_CB_COLOR1_INFO
:
910 case R_0280A8_CB_COLOR2_INFO
:
911 case R_0280AC_CB_COLOR3_INFO
:
912 case R_0280B0_CB_COLOR4_INFO
:
913 case R_0280B4_CB_COLOR5_INFO
:
914 case R_0280B8_CB_COLOR6_INFO
:
915 case R_0280BC_CB_COLOR7_INFO
:
916 if (r600_cs_packet_next_is_pkt3_nop(p
)) {
917 r
= r600_cs_packet_next_reloc(p
, &reloc
);
919 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
922 tmp
= (reg
- R_0280A0_CB_COLOR0_INFO
) / 4;
923 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
924 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
925 ib
[idx
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1
);
926 track
->cb_color_info
[tmp
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1
);
927 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
928 ib
[idx
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1
);
929 track
->cb_color_info
[tmp
] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1
);
932 tmp
= (reg
- R_0280A0_CB_COLOR0_INFO
) / 4;
933 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
936 case R_028060_CB_COLOR0_SIZE
:
937 case R_028064_CB_COLOR1_SIZE
:
938 case R_028068_CB_COLOR2_SIZE
:
939 case R_02806C_CB_COLOR3_SIZE
:
940 case R_028070_CB_COLOR4_SIZE
:
941 case R_028074_CB_COLOR5_SIZE
:
942 case R_028078_CB_COLOR6_SIZE
:
943 case R_02807C_CB_COLOR7_SIZE
:
944 tmp
= (reg
- R_028060_CB_COLOR0_SIZE
) / 4;
945 track
->cb_color_size
[tmp
] = radeon_get_ib_value(p
, idx
);
946 track
->cb_color_size_idx
[tmp
] = idx
;
948 /* This register were added late, there is userspace
949 * which does provide relocation for those but set
950 * 0 offset. In order to avoid breaking old userspace
951 * we detect this and set address to point to last
952 * CB_COLOR0_BASE, note that if userspace doesn't set
953 * CB_COLOR0_BASE before this register we will report
954 * error. Old userspace always set CB_COLOR0_BASE
955 * before any of this.
957 case R_0280E0_CB_COLOR0_FRAG
:
958 case R_0280E4_CB_COLOR1_FRAG
:
959 case R_0280E8_CB_COLOR2_FRAG
:
960 case R_0280EC_CB_COLOR3_FRAG
:
961 case R_0280F0_CB_COLOR4_FRAG
:
962 case R_0280F4_CB_COLOR5_FRAG
:
963 case R_0280F8_CB_COLOR6_FRAG
:
964 case R_0280FC_CB_COLOR7_FRAG
:
965 tmp
= (reg
- R_0280E0_CB_COLOR0_FRAG
) / 4;
966 if (!r600_cs_packet_next_is_pkt3_nop(p
)) {
967 if (!track
->cb_color_base_last
[tmp
]) {
968 dev_err(p
->dev
, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg
);
971 ib
[idx
] = track
->cb_color_base_last
[tmp
];
972 track
->cb_color_frag_bo
[tmp
] = track
->cb_color_bo
[tmp
];
974 r
= r600_cs_packet_next_reloc(p
, &reloc
);
976 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
979 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
980 track
->cb_color_frag_bo
[tmp
] = reloc
->robj
;
983 case R_0280C0_CB_COLOR0_TILE
:
984 case R_0280C4_CB_COLOR1_TILE
:
985 case R_0280C8_CB_COLOR2_TILE
:
986 case R_0280CC_CB_COLOR3_TILE
:
987 case R_0280D0_CB_COLOR4_TILE
:
988 case R_0280D4_CB_COLOR5_TILE
:
989 case R_0280D8_CB_COLOR6_TILE
:
990 case R_0280DC_CB_COLOR7_TILE
:
991 tmp
= (reg
- R_0280C0_CB_COLOR0_TILE
) / 4;
992 if (!r600_cs_packet_next_is_pkt3_nop(p
)) {
993 if (!track
->cb_color_base_last
[tmp
]) {
994 dev_err(p
->dev
, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg
);
997 ib
[idx
] = track
->cb_color_base_last
[tmp
];
998 track
->cb_color_tile_bo
[tmp
] = track
->cb_color_bo
[tmp
];
1000 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1002 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
1005 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1006 track
->cb_color_tile_bo
[tmp
] = reloc
->robj
;
1009 case CB_COLOR0_BASE
:
1010 case CB_COLOR1_BASE
:
1011 case CB_COLOR2_BASE
:
1012 case CB_COLOR3_BASE
:
1013 case CB_COLOR4_BASE
:
1014 case CB_COLOR5_BASE
:
1015 case CB_COLOR6_BASE
:
1016 case CB_COLOR7_BASE
:
1017 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1019 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1023 tmp
= (reg
- CB_COLOR0_BASE
) / 4;
1024 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
) << 8;
1025 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1026 track
->cb_color_base_last
[tmp
] = ib
[idx
];
1027 track
->cb_color_bo
[tmp
] = reloc
->robj
;
1028 track
->cb_color_bo_mc
[tmp
] = reloc
->lobj
.gpu_offset
;
1031 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1033 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1037 track
->db_offset
= radeon_get_ib_value(p
, idx
) << 8;
1038 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1039 track
->db_bo
= reloc
->robj
;
1040 track
->db_bo_mc
= reloc
->lobj
.gpu_offset
;
1042 case DB_HTILE_DATA_BASE
:
1043 case SQ_PGM_START_FS
:
1044 case SQ_PGM_START_ES
:
1045 case SQ_PGM_START_VS
:
1046 case SQ_PGM_START_GS
:
1047 case SQ_PGM_START_PS
:
1048 case SQ_ALU_CONST_CACHE_GS_0
:
1049 case SQ_ALU_CONST_CACHE_GS_1
:
1050 case SQ_ALU_CONST_CACHE_GS_2
:
1051 case SQ_ALU_CONST_CACHE_GS_3
:
1052 case SQ_ALU_CONST_CACHE_GS_4
:
1053 case SQ_ALU_CONST_CACHE_GS_5
:
1054 case SQ_ALU_CONST_CACHE_GS_6
:
1055 case SQ_ALU_CONST_CACHE_GS_7
:
1056 case SQ_ALU_CONST_CACHE_GS_8
:
1057 case SQ_ALU_CONST_CACHE_GS_9
:
1058 case SQ_ALU_CONST_CACHE_GS_10
:
1059 case SQ_ALU_CONST_CACHE_GS_11
:
1060 case SQ_ALU_CONST_CACHE_GS_12
:
1061 case SQ_ALU_CONST_CACHE_GS_13
:
1062 case SQ_ALU_CONST_CACHE_GS_14
:
1063 case SQ_ALU_CONST_CACHE_GS_15
:
1064 case SQ_ALU_CONST_CACHE_PS_0
:
1065 case SQ_ALU_CONST_CACHE_PS_1
:
1066 case SQ_ALU_CONST_CACHE_PS_2
:
1067 case SQ_ALU_CONST_CACHE_PS_3
:
1068 case SQ_ALU_CONST_CACHE_PS_4
:
1069 case SQ_ALU_CONST_CACHE_PS_5
:
1070 case SQ_ALU_CONST_CACHE_PS_6
:
1071 case SQ_ALU_CONST_CACHE_PS_7
:
1072 case SQ_ALU_CONST_CACHE_PS_8
:
1073 case SQ_ALU_CONST_CACHE_PS_9
:
1074 case SQ_ALU_CONST_CACHE_PS_10
:
1075 case SQ_ALU_CONST_CACHE_PS_11
:
1076 case SQ_ALU_CONST_CACHE_PS_12
:
1077 case SQ_ALU_CONST_CACHE_PS_13
:
1078 case SQ_ALU_CONST_CACHE_PS_14
:
1079 case SQ_ALU_CONST_CACHE_PS_15
:
1080 case SQ_ALU_CONST_CACHE_VS_0
:
1081 case SQ_ALU_CONST_CACHE_VS_1
:
1082 case SQ_ALU_CONST_CACHE_VS_2
:
1083 case SQ_ALU_CONST_CACHE_VS_3
:
1084 case SQ_ALU_CONST_CACHE_VS_4
:
1085 case SQ_ALU_CONST_CACHE_VS_5
:
1086 case SQ_ALU_CONST_CACHE_VS_6
:
1087 case SQ_ALU_CONST_CACHE_VS_7
:
1088 case SQ_ALU_CONST_CACHE_VS_8
:
1089 case SQ_ALU_CONST_CACHE_VS_9
:
1090 case SQ_ALU_CONST_CACHE_VS_10
:
1091 case SQ_ALU_CONST_CACHE_VS_11
:
1092 case SQ_ALU_CONST_CACHE_VS_12
:
1093 case SQ_ALU_CONST_CACHE_VS_13
:
1094 case SQ_ALU_CONST_CACHE_VS_14
:
1095 case SQ_ALU_CONST_CACHE_VS_15
:
1096 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1098 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
1102 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1105 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
1111 static inline unsigned minify(unsigned size
, unsigned levels
)
1113 size
= size
>> levels
;
1119 static void r600_texture_size(unsigned nfaces
, unsigned blevel
, unsigned nlevels
,
1120 unsigned w0
, unsigned h0
, unsigned d0
, unsigned bpe
,
1121 unsigned pitch_align
,
1122 unsigned *l0_size
, unsigned *mipmap_size
)
1124 unsigned offset
, i
, level
, face
;
1125 unsigned width
, height
, depth
, rowstride
, size
;
1130 for(i
= 0, offset
= 0, level
= blevel
; i
< nlevels
; i
++, level
++) {
1131 width
= minify(w0
, i
);
1132 height
= minify(h0
, i
);
1133 depth
= minify(d0
, i
);
1134 for(face
= 0; face
< nfaces
; face
++) {
1135 rowstride
= ALIGN((width
* bpe
), pitch_align
);
1136 size
= height
* rowstride
* depth
;
1138 offset
= (offset
+ 0x1f) & ~0x1f;
1141 *l0_size
= ALIGN((w0
* bpe
), pitch_align
) * h0
* d0
;
1142 *mipmap_size
= offset
;
1144 *mipmap_size
= *l0_size
;
1146 *mipmap_size
-= *l0_size
;
1150 * r600_check_texture_resource() - check if register is authorized or not
1151 * @p: parser structure holding parsing context
1152 * @idx: index into the cs buffer
1153 * @texture: texture's bo structure
1154 * @mipmap: mipmap's bo structure
1156 * This function will check that the resource has valid field and that
1157 * the texture and mipmap bo object are big enough to cover this resource.
1159 static inline int r600_check_texture_resource(struct radeon_cs_parser
*p
, u32 idx
,
1160 struct radeon_bo
*texture
,
1161 struct radeon_bo
*mipmap
,
1166 struct r600_cs_track
*track
= p
->track
;
1167 u32 nfaces
, nlevels
, blevel
, w0
, h0
, d0
, bpe
= 0;
1168 u32 word0
, word1
, l0_size
, mipmap_size
;
1169 u32 height_align
, pitch
, pitch_align
, depth_align
;
1171 struct array_mode_checker array_check
;
1173 /* on legacy kernel we don't perform advanced check */
1174 if (p
->rdev
== NULL
)
1177 /* convert to bytes */
1181 word0
= radeon_get_ib_value(p
, idx
+ 0);
1182 if (tiling_flags
& RADEON_TILING_MACRO
)
1183 word0
|= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
1184 else if (tiling_flags
& RADEON_TILING_MICRO
)
1185 word0
|= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
1186 word1
= radeon_get_ib_value(p
, idx
+ 1);
1187 w0
= G_038000_TEX_WIDTH(word0
) + 1;
1188 h0
= G_038004_TEX_HEIGHT(word1
) + 1;
1189 d0
= G_038004_TEX_DEPTH(word1
);
1191 switch (G_038000_DIM(word0
)) {
1192 case V_038000_SQ_TEX_DIM_1D
:
1193 case V_038000_SQ_TEX_DIM_2D
:
1194 case V_038000_SQ_TEX_DIM_3D
:
1196 case V_038000_SQ_TEX_DIM_CUBEMAP
:
1199 case V_038000_SQ_TEX_DIM_1D_ARRAY
:
1200 case V_038000_SQ_TEX_DIM_2D_ARRAY
:
1201 case V_038000_SQ_TEX_DIM_2D_MSAA
:
1202 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA
:
1204 dev_warn(p
->dev
, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0
));
1207 if (r600_bpe_from_format(&bpe
, G_038004_DATA_FORMAT(word1
))) {
1208 dev_warn(p
->dev
, "%s:%d texture invalid format %d\n",
1209 __func__
, __LINE__
, G_038004_DATA_FORMAT(word1
));
1213 /* pitch in texels */
1214 pitch
= (G_038000_PITCH(word0
) + 1) * 8;
1215 array_check
.array_mode
= G_038000_TILE_MODE(word0
);
1216 array_check
.group_size
= track
->group_size
;
1217 array_check
.nbanks
= track
->nbanks
;
1218 array_check
.npipes
= track
->npipes
;
1219 array_check
.nsamples
= 1;
1220 array_check
.bpe
= bpe
;
1221 if (r600_get_array_mode_alignment(&array_check
,
1222 &pitch_align
, &height_align
, &depth_align
, &base_align
)) {
1223 dev_warn(p
->dev
, "%s:%d tex array mode (%d) invalid\n",
1224 __func__
, __LINE__
, G_038000_TILE_MODE(word0
));
1228 /* XXX check height as well... */
1230 if (!IS_ALIGNED(pitch
, pitch_align
)) {
1231 dev_warn(p
->dev
, "%s:%d tex pitch (%d) invalid\n",
1232 __func__
, __LINE__
, pitch
);
1235 if (!IS_ALIGNED(base_offset
, base_align
)) {
1236 dev_warn(p
->dev
, "%s:%d tex base offset (0x%llx) invalid\n",
1237 __func__
, __LINE__
, base_offset
);
1240 if (!IS_ALIGNED(mip_offset
, base_align
)) {
1241 dev_warn(p
->dev
, "%s:%d tex mip offset (0x%llx) invalid\n",
1242 __func__
, __LINE__
, mip_offset
);
1246 word0
= radeon_get_ib_value(p
, idx
+ 4);
1247 word1
= radeon_get_ib_value(p
, idx
+ 5);
1248 blevel
= G_038010_BASE_LEVEL(word0
);
1249 nlevels
= G_038014_LAST_LEVEL(word1
);
1250 r600_texture_size(nfaces
, blevel
, nlevels
, w0
, h0
, d0
, bpe
,
1251 (pitch_align
* bpe
),
1252 &l0_size
, &mipmap_size
);
1253 /* using get ib will give us the offset into the texture bo */
1254 word0
= radeon_get_ib_value(p
, idx
+ 2) << 8;
1255 if ((l0_size
+ word0
) > radeon_bo_size(texture
)) {
1256 dev_warn(p
->dev
, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
1257 w0
, h0
, bpe
, word0
, l0_size
, radeon_bo_size(texture
));
1260 /* using get ib will give us the offset into the mipmap bo */
1261 word0
= radeon_get_ib_value(p
, idx
+ 3) << 8;
1262 if ((mipmap_size
+ word0
) > radeon_bo_size(mipmap
)) {
1263 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1264 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
1269 static int r600_packet3_check(struct radeon_cs_parser
*p
,
1270 struct radeon_cs_packet
*pkt
)
1272 struct radeon_cs_reloc
*reloc
;
1273 struct r600_cs_track
*track
;
1277 unsigned start_reg
, end_reg
, reg
;
1281 track
= (struct r600_cs_track
*)p
->track
;
1284 idx_value
= radeon_get_ib_value(p
, idx
);
1286 switch (pkt
->opcode
) {
1287 case PACKET3_START_3D_CMDBUF
:
1288 if (p
->family
>= CHIP_RV770
|| pkt
->count
) {
1289 DRM_ERROR("bad START_3D\n");
1293 case PACKET3_CONTEXT_CONTROL
:
1294 if (pkt
->count
!= 1) {
1295 DRM_ERROR("bad CONTEXT_CONTROL\n");
1299 case PACKET3_INDEX_TYPE
:
1300 case PACKET3_NUM_INSTANCES
:
1302 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1306 case PACKET3_DRAW_INDEX
:
1307 if (pkt
->count
!= 3) {
1308 DRM_ERROR("bad DRAW_INDEX\n");
1311 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1313 DRM_ERROR("bad DRAW_INDEX\n");
1316 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1317 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1318 r
= r600_cs_track_check(p
);
1320 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1324 case PACKET3_DRAW_INDEX_AUTO
:
1325 if (pkt
->count
!= 1) {
1326 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1329 r
= r600_cs_track_check(p
);
1331 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1335 case PACKET3_DRAW_INDEX_IMMD_BE
:
1336 case PACKET3_DRAW_INDEX_IMMD
:
1337 if (pkt
->count
< 2) {
1338 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1341 r
= r600_cs_track_check(p
);
1343 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1347 case PACKET3_WAIT_REG_MEM
:
1348 if (pkt
->count
!= 5) {
1349 DRM_ERROR("bad WAIT_REG_MEM\n");
1352 /* bit 4 is reg (0) or mem (1) */
1353 if (idx_value
& 0x10) {
1354 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1356 DRM_ERROR("bad WAIT_REG_MEM\n");
1359 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1360 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1363 case PACKET3_SURFACE_SYNC
:
1364 if (pkt
->count
!= 3) {
1365 DRM_ERROR("bad SURFACE_SYNC\n");
1368 /* 0xffffffff/0x0 is flush all cache flag */
1369 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
1370 radeon_get_ib_value(p
, idx
+ 2) != 0) {
1371 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1373 DRM_ERROR("bad SURFACE_SYNC\n");
1376 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1379 case PACKET3_EVENT_WRITE
:
1380 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
1381 DRM_ERROR("bad EVENT_WRITE\n");
1385 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1387 DRM_ERROR("bad EVENT_WRITE\n");
1390 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1391 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1394 case PACKET3_EVENT_WRITE_EOP
:
1395 if (pkt
->count
!= 4) {
1396 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1399 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1401 DRM_ERROR("bad EVENT_WRITE\n");
1404 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1405 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1407 case PACKET3_SET_CONFIG_REG
:
1408 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_OFFSET
;
1409 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1410 if ((start_reg
< PACKET3_SET_CONFIG_REG_OFFSET
) ||
1411 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
1412 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
1413 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1416 for (i
= 0; i
< pkt
->count
; i
++) {
1417 reg
= start_reg
+ (4 * i
);
1418 r
= r600_cs_check_reg(p
, reg
, idx
+1+i
);
1423 case PACKET3_SET_CONTEXT_REG
:
1424 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_OFFSET
;
1425 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1426 if ((start_reg
< PACKET3_SET_CONTEXT_REG_OFFSET
) ||
1427 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
1428 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
1429 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1432 for (i
= 0; i
< pkt
->count
; i
++) {
1433 reg
= start_reg
+ (4 * i
);
1434 r
= r600_cs_check_reg(p
, reg
, idx
+1+i
);
1439 case PACKET3_SET_RESOURCE
:
1440 if (pkt
->count
% 7) {
1441 DRM_ERROR("bad SET_RESOURCE\n");
1444 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_OFFSET
;
1445 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1446 if ((start_reg
< PACKET3_SET_RESOURCE_OFFSET
) ||
1447 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
1448 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
1449 DRM_ERROR("bad SET_RESOURCE\n");
1452 for (i
= 0; i
< (pkt
->count
/ 7); i
++) {
1453 struct radeon_bo
*texture
, *mipmap
;
1454 u32 size
, offset
, base_offset
, mip_offset
;
1456 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+(i
*7)+6+1))) {
1457 case SQ_TEX_VTX_VALID_TEXTURE
:
1459 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1461 DRM_ERROR("bad SET_RESOURCE\n");
1464 base_offset
= (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1465 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1466 ib
[idx
+1+(i
*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1
);
1467 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
1468 ib
[idx
+1+(i
*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
1469 texture
= reloc
->robj
;
1471 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1473 DRM_ERROR("bad SET_RESOURCE\n");
1476 mip_offset
= (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1477 mipmap
= reloc
->robj
;
1478 r
= r600_check_texture_resource(p
, idx
+(i
*7)+1,
1480 base_offset
+ radeon_get_ib_value(p
, idx
+1+(i
*7)+2),
1481 mip_offset
+ radeon_get_ib_value(p
, idx
+1+(i
*7)+3),
1482 reloc
->lobj
.tiling_flags
);
1485 ib
[idx
+1+(i
*7)+2] += base_offset
;
1486 ib
[idx
+1+(i
*7)+3] += mip_offset
;
1488 case SQ_TEX_VTX_VALID_BUFFER
:
1490 r
= r600_cs_packet_next_reloc(p
, &reloc
);
1492 DRM_ERROR("bad SET_RESOURCE\n");
1495 offset
= radeon_get_ib_value(p
, idx
+1+(i
*7)+0);
1496 size
= radeon_get_ib_value(p
, idx
+1+(i
*7)+1) + 1;
1497 if (p
->rdev
&& (size
+ offset
) > radeon_bo_size(reloc
->robj
)) {
1498 /* force size to size of the buffer */
1499 dev_warn(p
->dev
, "vbo resource seems too big (%d) for the bo (%ld)\n",
1500 size
+ offset
, radeon_bo_size(reloc
->robj
));
1501 ib
[idx
+1+(i
*7)+1] = radeon_bo_size(reloc
->robj
);
1503 ib
[idx
+1+(i
*7)+0] += (u32
)((reloc
->lobj
.gpu_offset
) & 0xffffffff);
1504 ib
[idx
+1+(i
*7)+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1506 case SQ_TEX_VTX_INVALID_TEXTURE
:
1507 case SQ_TEX_VTX_INVALID_BUFFER
:
1509 DRM_ERROR("bad SET_RESOURCE\n");
1514 case PACKET3_SET_ALU_CONST
:
1515 if (track
->sq_config
& DX9_CONSTS
) {
1516 start_reg
= (idx_value
<< 2) + PACKET3_SET_ALU_CONST_OFFSET
;
1517 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1518 if ((start_reg
< PACKET3_SET_ALU_CONST_OFFSET
) ||
1519 (start_reg
>= PACKET3_SET_ALU_CONST_END
) ||
1520 (end_reg
>= PACKET3_SET_ALU_CONST_END
)) {
1521 DRM_ERROR("bad SET_ALU_CONST\n");
1526 case PACKET3_SET_BOOL_CONST
:
1527 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_OFFSET
;
1528 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1529 if ((start_reg
< PACKET3_SET_BOOL_CONST_OFFSET
) ||
1530 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
1531 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
1532 DRM_ERROR("bad SET_BOOL_CONST\n");
1536 case PACKET3_SET_LOOP_CONST
:
1537 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_OFFSET
;
1538 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1539 if ((start_reg
< PACKET3_SET_LOOP_CONST_OFFSET
) ||
1540 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
1541 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
1542 DRM_ERROR("bad SET_LOOP_CONST\n");
1546 case PACKET3_SET_CTL_CONST
:
1547 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_OFFSET
;
1548 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1549 if ((start_reg
< PACKET3_SET_CTL_CONST_OFFSET
) ||
1550 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
1551 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
1552 DRM_ERROR("bad SET_CTL_CONST\n");
1556 case PACKET3_SET_SAMPLER
:
1557 if (pkt
->count
% 3) {
1558 DRM_ERROR("bad SET_SAMPLER\n");
1561 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_OFFSET
;
1562 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1563 if ((start_reg
< PACKET3_SET_SAMPLER_OFFSET
) ||
1564 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
1565 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
1566 DRM_ERROR("bad SET_SAMPLER\n");
1570 case PACKET3_SURFACE_BASE_UPDATE
:
1571 if (p
->family
>= CHIP_RV770
|| p
->family
== CHIP_R600
) {
1572 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1576 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
1583 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1589 int r600_cs_parse(struct radeon_cs_parser
*p
)
1591 struct radeon_cs_packet pkt
;
1592 struct r600_cs_track
*track
;
1595 if (p
->track
== NULL
) {
1596 /* initialize tracker, we are in kms */
1597 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1600 r600_cs_track_init(track
);
1601 if (p
->rdev
->family
< CHIP_RV770
) {
1602 track
->npipes
= p
->rdev
->config
.r600
.tiling_npipes
;
1603 track
->nbanks
= p
->rdev
->config
.r600
.tiling_nbanks
;
1604 track
->group_size
= p
->rdev
->config
.r600
.tiling_group_size
;
1605 } else if (p
->rdev
->family
<= CHIP_RV740
) {
1606 track
->npipes
= p
->rdev
->config
.rv770
.tiling_npipes
;
1607 track
->nbanks
= p
->rdev
->config
.rv770
.tiling_nbanks
;
1608 track
->group_size
= p
->rdev
->config
.rv770
.tiling_group_size
;
1613 r
= r600_cs_packet_parse(p
, &pkt
, p
->idx
);
1619 p
->idx
+= pkt
.count
+ 2;
1622 r
= r600_cs_parse_packet0(p
, &pkt
);
1627 r
= r600_packet3_check(p
, &pkt
);
1630 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1640 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1642 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
1643 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
->ptr
[r
]);
1652 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser
*p
)
1654 if (p
->chunk_relocs_idx
== -1) {
1657 p
->relocs
= kzalloc(sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
1658 if (p
->relocs
== NULL
) {
1665 * cs_parser_fini() - clean parser states
1666 * @parser: parser structure holding parsing context.
1667 * @error: error number
1669 * If error is set than unvalidate buffer, otherwise just free memory
1670 * used by parsing context.
1672 static void r600_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
1676 kfree(parser
->relocs
);
1677 for (i
= 0; i
< parser
->nchunks
; i
++) {
1678 kfree(parser
->chunks
[i
].kdata
);
1679 kfree(parser
->chunks
[i
].kpage
[0]);
1680 kfree(parser
->chunks
[i
].kpage
[1]);
1682 kfree(parser
->chunks
);
1683 kfree(parser
->chunks_array
);
1686 int r600_cs_legacy(struct drm_device
*dev
, void *data
, struct drm_file
*filp
,
1687 unsigned family
, u32
*ib
, int *l
)
1689 struct radeon_cs_parser parser
;
1690 struct radeon_cs_chunk
*ib_chunk
;
1691 struct radeon_ib fake_ib
;
1692 struct r600_cs_track
*track
;
1695 /* initialize tracker */
1696 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1699 r600_cs_track_init(track
);
1700 r600_cs_legacy_get_tiling_conf(dev
, &track
->npipes
, &track
->nbanks
, &track
->group_size
);
1701 /* initialize parser */
1702 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
1704 parser
.dev
= &dev
->pdev
->dev
;
1706 parser
.family
= family
;
1707 parser
.ib
= &fake_ib
;
1708 parser
.track
= track
;
1710 r
= radeon_cs_parser_init(&parser
, data
);
1712 DRM_ERROR("Failed to initialize parser !\n");
1713 r600_cs_parser_fini(&parser
, r
);
1716 r
= r600_cs_parser_relocs_legacy(&parser
);
1718 DRM_ERROR("Failed to parse relocation !\n");
1719 r600_cs_parser_fini(&parser
, r
);
1722 /* Copy the packet into the IB, the parser will read from the
1723 * input memory (cached) and write to the IB (which can be
1725 ib_chunk
= &parser
.chunks
[parser
.chunk_ib_idx
];
1726 parser
.ib
->length_dw
= ib_chunk
->length_dw
;
1727 *l
= parser
.ib
->length_dw
;
1728 r
= r600_cs_parse(&parser
);
1730 DRM_ERROR("Invalid command stream !\n");
1731 r600_cs_parser_fini(&parser
, r
);
1734 r
= radeon_cs_finish_pages(&parser
);
1736 DRM_ERROR("Invalid command stream !\n");
1737 r600_cs_parser_fini(&parser
, r
);
1740 r600_cs_parser_fini(&parser
, r
);
1744 void r600_cs_legacy_init(void)
1746 r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_nomm
;