Merge tag 'batman-adv-fix-for-davem' of git://git.open-mesh.org/linux-merge
[deliverable/linux.git] / drivers / gpu / drm / radeon / r300_cmdbuf.c
CommitLineData
414ed537
DA
1/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002.
4 * Copyright (C) 2004 Nicolai Haehnle.
5 * All Rights Reserved.
6 *
7 * The Weather Channel (TM) funded Tungsten Graphics to develop the
8 * initial release of the Radeon 8500 driver under the XFree86 license.
9 * This notice must be preserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net>
14adc892
CK
32 *
33 * ------------------------ This file is DEPRECATED! -------------------------
414ed537
DA
34 */
35
760285e7
DH
36#include <drm/drmP.h>
37#include <drm/drm_buffer.h>
38#include <drm/radeon_drm.h>
414ed537
DA
39#include "radeon_drv.h"
40#include "r300_reg.h"
41
958a6f8c
DM
42#include <asm/unaligned.h>
43
414ed537
DA
44#define R300_SIMULTANEOUS_CLIPRECTS 4
45
46/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
47 */
48static const int r300_cliprect_cntl[4] = {
49 0xAAAA,
50 0xEEEE,
51 0xFEFE,
52 0xFFFE
53};
54
414ed537
DA
55/**
56 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
57 * buffer, starting with index n.
58 */
d985c108
DA
59static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
60 drm_radeon_kcmd_buffer_t *cmdbuf, int n)
414ed537 61{
c60ce623 62 struct drm_clip_rect box;
414ed537
DA
63 int nr;
64 int i;
65 RING_LOCALS;
66
67 nr = cmdbuf->nbox - n;
68 if (nr > R300_SIMULTANEOUS_CLIPRECTS)
69 nr = R300_SIMULTANEOUS_CLIPRECTS;
70
71 DRM_DEBUG("%i cliprects\n", nr);
72
73 if (nr) {
b5e89ed5
DA
74 BEGIN_RING(6 + nr * 2);
75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
414ed537 76
b5e89ed5 77 for (i = 0; i < nr; ++i) {
fefaedcf 78 if (DRM_COPY_FROM_USER
b5e89ed5 79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
414ed537 80 DRM_ERROR("copy cliprect faulted\n");
20caafa6 81 return -EFAULT;
414ed537
DA
82 }
83
649ffc06
NH
84 box.x2--; /* Hardware expects inclusive bottom-right corner */
85 box.y2--;
86
3d5e2c13
DA
87 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
88 box.x1 = (box.x1) &
89 R300_CLIPRECT_MASK;
90 box.y1 = (box.y1) &
91 R300_CLIPRECT_MASK;
92 box.x2 = (box.x2) &
93 R300_CLIPRECT_MASK;
94 box.y2 = (box.y2) &
95 R300_CLIPRECT_MASK;
96 } else {
97 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
98 R300_CLIPRECT_MASK;
99 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
100 R300_CLIPRECT_MASK;
101 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
102 R300_CLIPRECT_MASK;
103 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
104 R300_CLIPRECT_MASK;
3d5e2c13 105 }
649ffc06 106
414ed537 107 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
b5e89ed5 108 (box.y1 << R300_CLIPRECT_Y_SHIFT));
414ed537 109 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
b5e89ed5 110 (box.y2 << R300_CLIPRECT_Y_SHIFT));
3d5e2c13 111
414ed537
DA
112 }
113
b5e89ed5 114 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
414ed537
DA
115
116 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
b5e89ed5
DA
117 * client might be able to trample over memory.
118 * The impact should be very limited, but I'd rather be safe than
119 * sorry.
120 */
121 OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
122 OUT_RING(0);
123 OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
414ed537 124 ADVANCE_RING();
b5e89ed5 125 } else {
414ed537
DA
126 /* Why we allow zero cliprect rendering:
127 * There are some commands in a command buffer that must be submitted
128 * even when there are no cliprects, e.g. DMA buffer discard
129 * or state setting (though state setting could be avoided by
130 * simulating a loss of context).
131 *
132 * Now since the cmdbuf interface is so chaotic right now (and is
133 * bound to remain that way for a bit until things settle down),
134 * it is basically impossible to filter out the commands that are
135 * necessary and those that aren't.
136 *
137 * So I choose the safe way and don't do any filtering at all;
138 * instead, I simply set up the engine so that all rendering
139 * can't produce any fragments.
140 */
141 BEGIN_RING(2);
b5e89ed5 142 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
414ed537 143 ADVANCE_RING();
b5e89ed5 144 }
414ed537 145
54f961a6
JG
146 /* flus cache and wait idle clean after cliprect change */
147 BEGIN_RING(2);
148 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
149 OUT_RING(R300_RB3D_DC_FLUSH);
150 ADVANCE_RING();
151 BEGIN_RING(2);
152 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
153 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
154 ADVANCE_RING();
155 /* set flush flag */
156 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
157
414ed537
DA
158 return 0;
159}
160
b3a83639 161static u8 r300_reg_flags[0x10000 >> 2];
414ed537 162
3d5e2c13 163void r300_init_reg_flags(struct drm_device *dev)
414ed537
DA
164{
165 int i;
3d5e2c13
DA
166 drm_radeon_private_t *dev_priv = dev->dev_private;
167
b5e89ed5
DA
168 memset(r300_reg_flags, 0, 0x10000 >> 2);
169#define ADD_RANGE_MARK(reg, count,mark) \
414ed537
DA
170 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
171 r300_reg_flags[i]|=(mark);
b5e89ed5
DA
172
173#define MARK_SAFE 1
174#define MARK_CHECK_OFFSET 2
175
176#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
414ed537
DA
177
178 /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
179 ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
c6c656b4 180 ADD_RANGE(R300_VAP_CNTL, 1);
414ed537
DA
181 ADD_RANGE(R300_SE_VTE_CNTL, 2);
182 ADD_RANGE(0x2134, 2);
c6c656b4 183 ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
414ed537
DA
184 ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
185 ADD_RANGE(0x21DC, 1);
c6c656b4
OM
186 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
187 ADD_RANGE(R300_VAP_CLIP_X_0, 4);
54f961a6 188 ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
c6c656b4 189 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
414ed537
DA
190 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
191 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
192 ADD_RANGE(R300_GB_ENABLE, 1);
193 ADD_RANGE(R300_GB_MSPOS0, 5);
54f961a6 194 ADD_RANGE(R300_TX_INVALTAGS, 1);
414ed537
DA
195 ADD_RANGE(R300_TX_ENABLE, 1);
196 ADD_RANGE(0x4200, 4);
197 ADD_RANGE(0x4214, 1);
198 ADD_RANGE(R300_RE_POINTSIZE, 1);
199 ADD_RANGE(0x4230, 3);
200 ADD_RANGE(R300_RE_LINE_CNT, 1);
c6c656b4 201 ADD_RANGE(R300_RE_UNK4238, 1);
414ed537 202 ADD_RANGE(0x4260, 3);
c6c656b4
OM
203 ADD_RANGE(R300_RE_SHADE, 4);
204 ADD_RANGE(R300_RE_POLYGON_MODE, 5);
205 ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
414ed537 206 ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
c6c656b4 207 ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
414ed537
DA
208 ADD_RANGE(R300_RE_CULL_CNTL, 1);
209 ADD_RANGE(0x42C0, 2);
210 ADD_RANGE(R300_RS_CNTL_0, 2);
c0beb2a7 211
af7ae351
MC
212 ADD_RANGE(R300_SU_REG_DEST, 1);
213 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530)
214 ADD_RANGE(RV530_FG_ZBREG_DEST, 1);
215
21efa2ba 216 ADD_RANGE(R300_SC_HYPERZ, 2);
414ed537 217 ADD_RANGE(0x43E8, 1);
c0beb2a7 218
414ed537 219 ADD_RANGE(0x46A4, 5);
c0beb2a7 220
c6c656b4
OM
221 ADD_RANGE(R300_RE_FOG_STATE, 1);
222 ADD_RANGE(R300_FOG_COLOR_R, 3);
414ed537
DA
223 ADD_RANGE(R300_PP_ALPHA_TEST, 2);
224 ADD_RANGE(0x4BD8, 1);
225 ADD_RANGE(R300_PFS_PARAM_0_X, 64);
226 ADD_RANGE(0x4E00, 1);
227 ADD_RANGE(R300_RB3D_CBLEND, 2);
228 ADD_RANGE(R300_RB3D_COLORMASK, 1);
c6c656b4 229 ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
b5e89ed5 230 ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
414ed537
DA
231 ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
232 ADD_RANGE(0x4E50, 9);
233 ADD_RANGE(0x4E88, 1);
234 ADD_RANGE(0x4EA0, 2);
21efa2ba
DA
235 ADD_RANGE(R300_ZB_CNTL, 3);
236 ADD_RANGE(R300_ZB_FORMAT, 4);
237 ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
238 ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
239 ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
240 ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
af7ae351 241 ADD_RANGE(R300_ZB_ZPASS_DATA, 2); /* ZB_ZPASS_DATA, ZB_ZPASS_ADDR */
414ed537
DA
242
243 ADD_RANGE(R300_TX_FILTER_0, 16);
45f17100 244 ADD_RANGE(R300_TX_FILTER1_0, 16);
414ed537
DA
245 ADD_RANGE(R300_TX_SIZE_0, 16);
246 ADD_RANGE(R300_TX_FORMAT_0, 16);
d985c108 247 ADD_RANGE(R300_TX_PITCH_0, 16);
b5e89ed5 248 /* Texture offset is dangerous and needs more checking */
414ed537 249 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
45f17100 250 ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
414ed537
DA
251 ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
252
253 /* Sporadic registers used as primitives are emitted */
21efa2ba 254 ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
414ed537
DA
255 ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
256 ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
257 ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
258
3d5e2c13 259 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
c0beb2a7
DA
260 ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
261 ADD_RANGE(R500_US_CONFIG, 2);
262 ADD_RANGE(R500_US_CODE_ADDR, 3);
263 ADD_RANGE(R500_US_FC_CTRL, 1);
264 ADD_RANGE(R500_RS_IP_0, 16);
265 ADD_RANGE(R500_RS_INST_0, 16);
266 ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
267 ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
21efa2ba 268 ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
c0beb2a7
DA
269 } else {
270 ADD_RANGE(R300_PFS_CNTL_0, 3);
271 ADD_RANGE(R300_PFS_NODE_0, 4);
272 ADD_RANGE(R300_PFS_TEXI_0, 64);
273 ADD_RANGE(R300_PFS_INSTR0_0, 64);
274 ADD_RANGE(R300_PFS_INSTR1_0, 64);
275 ADD_RANGE(R300_PFS_INSTR2_0, 64);
276 ADD_RANGE(R300_PFS_INSTR3_0, 64);
277 ADD_RANGE(R300_RS_INTERP_0, 8);
278 ADD_RANGE(R300_RS_ROUTE_0, 8);
279
3d5e2c13 280 }
414ed537
DA
281}
282
b5e89ed5 283static __inline__ int r300_check_range(unsigned reg, int count)
414ed537
DA
284{
285 int i;
b5e89ed5
DA
286 if (reg & ~0xffff)
287 return -1;
288 for (i = (reg >> 2); i < (reg >> 2) + count; i++)
289 if (r300_reg_flags[i] != MARK_SAFE)
290 return 1;
414ed537
DA
291 return 0;
292}
293
b5e89ed5
DA
294static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
295 dev_priv,
b3a83639 296 drm_radeon_kcmd_buffer_t
b5e89ed5
DA
297 * cmdbuf,
298 drm_r300_cmd_header_t
299 header)
414ed537
DA
300{
301 int reg;
302 int sz;
303 int i;
b4fe9454 304 u32 *value;
414ed537
DA
305 RING_LOCALS;
306
307 sz = header.packet0.count;
308 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
b5e89ed5
DA
309
310 if ((sz > 64) || (sz < 0)) {
b4fe9454
PN
311 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
312 reg, sz);
20caafa6 313 return -EINVAL;
b5e89ed5 314 }
b4fe9454 315
b5e89ed5 316 for (i = 0; i < sz; i++) {
b5e89ed5 317 switch (r300_reg_flags[(reg >> 2) + i]) {
414ed537
DA
318 case MARK_SAFE:
319 break;
320 case MARK_CHECK_OFFSET:
b4fe9454
PN
321 value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
322 if (!radeon_check_offset(dev_priv, *value)) {
323 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
324 reg, sz);
20caafa6 325 return -EINVAL;
b5e89ed5 326 }
414ed537
DA
327 break;
328 default:
b5e89ed5 329 DRM_ERROR("Register %04x failed check as flag=%02x\n",
b4fe9454 330 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
20caafa6 331 return -EINVAL;
414ed537 332 }
b5e89ed5
DA
333 }
334
335 BEGIN_RING(1 + sz);
336 OUT_RING(CP_PACKET0(reg, sz - 1));
b4fe9454 337 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
414ed537
DA
338 ADVANCE_RING();
339
414ed537
DA
340 return 0;
341}
342
343/**
344 * Emits a packet0 setting arbitrary registers.
345 * Called by r300_do_cp_cmdbuf.
346 *
347 * Note that checks are performed on contents and addresses of the registers
348 */
d985c108
DA
349static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
350 drm_radeon_kcmd_buffer_t *cmdbuf,
b5e89ed5 351 drm_r300_cmd_header_t header)
414ed537
DA
352{
353 int reg;
354 int sz;
355 RING_LOCALS;
356
357 sz = header.packet0.count;
358 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
359
360 if (!sz)
361 return 0;
362
b4fe9454 363 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
20caafa6 364 return -EINVAL;
b5e89ed5
DA
365
366 if (reg + sz * 4 >= 0x10000) {
367 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
368 sz);
20caafa6 369 return -EINVAL;
b5e89ed5 370 }
414ed537 371
b5e89ed5 372 if (r300_check_range(reg, sz)) {
414ed537 373 /* go and check everything */
b5e89ed5
DA
374 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
375 header);
376 }
414ed537
DA
377 /* the rest of the data is safe to emit, whatever the values the user passed */
378
b5e89ed5
DA
379 BEGIN_RING(1 + sz);
380 OUT_RING(CP_PACKET0(reg, sz - 1));
b4fe9454 381 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
414ed537
DA
382 ADVANCE_RING();
383
414ed537
DA
384 return 0;
385}
386
414ed537
DA
387/**
388 * Uploads user-supplied vertex program instructions or parameters onto
389 * the graphics card.
390 * Called by r300_do_cp_cmdbuf.
391 */
d985c108
DA
392static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
393 drm_radeon_kcmd_buffer_t *cmdbuf,
414ed537
DA
394 drm_r300_cmd_header_t header)
395{
396 int sz;
397 int addr;
398 RING_LOCALS;
399
400 sz = header.vpu.count;
401 addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
402
403 if (!sz)
404 return 0;
b4fe9454 405 if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
20caafa6 406 return -EINVAL;
414ed537 407
54f961a6
JG
408 /* VAP is very sensitive so we purge cache before we program it
409 * and we also flush its state before & after */
410 BEGIN_RING(6);
411 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
412 OUT_RING(R300_RB3D_DC_FLUSH);
413 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
414 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
415 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
416 OUT_RING(0);
417 ADVANCE_RING();
418 /* set flush flag */
419 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
420
421 BEGIN_RING(3 + sz * 4);
b5e89ed5
DA
422 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
423 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
b4fe9454 424 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
54f961a6 425 ADVANCE_RING();
414ed537 426
54f961a6
JG
427 BEGIN_RING(2);
428 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
429 OUT_RING(0);
414ed537
DA
430 ADVANCE_RING();
431
414ed537
DA
432 return 0;
433}
434
414ed537
DA
435/**
436 * Emit a clear packet from userspace.
437 * Called by r300_emit_packet3.
438 */
d985c108
DA
439static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
440 drm_radeon_kcmd_buffer_t *cmdbuf)
414ed537
DA
441{
442 RING_LOCALS;
443
b4fe9454 444 if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
20caafa6 445 return -EINVAL;
414ed537
DA
446
447 BEGIN_RING(10);
b5e89ed5
DA
448 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
449 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
450 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
b4fe9454 451 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
414ed537
DA
452 ADVANCE_RING();
453
54f961a6
JG
454 BEGIN_RING(4);
455 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
456 OUT_RING(R300_RB3D_DC_FLUSH);
457 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
458 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
459 ADVANCE_RING();
460 /* set flush flag */
461 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
462
414ed537
DA
463 return 0;
464}
465
d985c108
DA
466static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
467 drm_radeon_kcmd_buffer_t *cmdbuf,
b5e89ed5 468 u32 header)
414ed537 469{
b5e89ed5
DA
470 int count, i, k;
471#define MAX_ARRAY_PACKET 64
b4fe9454 472 u32 *data;
414ed537
DA
473 u32 narrays;
474 RING_LOCALS;
475
b4fe9454 476 count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
b5e89ed5
DA
477
478 if ((count + 1) > MAX_ARRAY_PACKET) {
479 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
480 count);
20caafa6 481 return -EINVAL;
b5e89ed5 482 }
414ed537 483 /* carefully check packet contents */
b5e89ed5 484
b4fe9454
PN
485 /* We have already read the header so advance the buffer. */
486 drm_buffer_advance(cmdbuf->buffer, 4);
487
488 narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
b5e89ed5
DA
489 k = 0;
490 i = 1;
491 while ((k < narrays) && (i < (count + 1))) {
492 i++; /* skip attribute field */
b4fe9454
PN
493 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
494 if (!radeon_check_offset(dev_priv, *data)) {
b5e89ed5
DA
495 DRM_ERROR
496 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
497 k, i);
20caafa6 498 return -EINVAL;
b5e89ed5 499 }
414ed537
DA
500 k++;
501 i++;
b5e89ed5
DA
502 if (k == narrays)
503 break;
414ed537 504 /* have one more to process, they come in pairs */
b4fe9454
PN
505 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
506 if (!radeon_check_offset(dev_priv, *data)) {
b5e89ed5
DA
507 DRM_ERROR
508 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
509 k, i);
20caafa6 510 return -EINVAL;
414ed537 511 }
b5e89ed5
DA
512 k++;
513 i++;
514 }
414ed537 515 /* do the counts match what we expect ? */
b5e89ed5
DA
516 if ((k != narrays) || (i != (count + 1))) {
517 DRM_ERROR
518 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
519 k, i, narrays, count + 1);
20caafa6 520 return -EINVAL;
b5e89ed5 521 }
414ed537
DA
522
523 /* all clear, output packet */
524
b5e89ed5 525 BEGIN_RING(count + 2);
414ed537 526 OUT_RING(header);
b4fe9454 527 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
414ed537
DA
528 ADVANCE_RING();
529
414ed537
DA
530 return 0;
531}
d5ea702f 532
4e5e2e25
DA
533static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
534 drm_radeon_kcmd_buffer_t *cmdbuf)
535{
b4fe9454 536 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
4e5e2e25
DA
537 int count, ret;
538 RING_LOCALS;
539
4e5e2e25 540
b4fe9454 541 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
4e5e2e25 542
b4fe9454
PN
543 if (*cmd & 0x8000) {
544 u32 offset;
545 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
546 if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
4e5e2e25 547 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
b4fe9454
PN
548
549 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
550 offset = *cmd2 << 10;
1d6bb8e5 551 ret = !radeon_check_offset(dev_priv, offset);
73d72cff 552 if (ret) {
4e5e2e25 553 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
20caafa6 554 return -EINVAL;
4e5e2e25
DA
555 }
556 }
557
b4fe9454
PN
558 if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
559 (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
560 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
561 offset = *cmd3 << 10;
1d6bb8e5 562 ret = !radeon_check_offset(dev_priv, offset);
73d72cff 563 if (ret) {
4e5e2e25 564 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
20caafa6 565 return -EINVAL;
4e5e2e25 566 }
bc5f4523 567
4e5e2e25
DA
568 }
569 }
570
571 BEGIN_RING(count+2);
b4fe9454 572 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
4e5e2e25
DA
573 ADVANCE_RING();
574
4e5e2e25
DA
575 return 0;
576}
414ed537 577
e2898c5f
NH
578static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
579 drm_radeon_kcmd_buffer_t *cmdbuf)
a1aa2897 580{
b4fe9454
PN
581 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
582 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
e2898c5f
NH
583 int count;
584 int expected_count;
a1aa2897
RS
585 RING_LOCALS;
586
b4fe9454
PN
587 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
588
589 expected_count = *cmd1 >> 16;
590 if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
e2898c5f 591 expected_count = (expected_count+1)/2;
a1aa2897 592
e2898c5f
NH
593 if (count && count != expected_count) {
594 DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
595 count, expected_count);
20caafa6 596 return -EINVAL;
a1aa2897
RS
597 }
598
599 BEGIN_RING(count+2);
b4fe9454 600 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
a1aa2897
RS
601 ADVANCE_RING();
602
e2898c5f 603 if (!count) {
b4fe9454
PN
604 drm_r300_cmd_header_t stack_header, *header;
605 u32 *cmd1, *cmd2, *cmd3;
e2898c5f 606
b4fe9454
PN
607 if (drm_buffer_unprocessed(cmdbuf->buffer)
608 < 4*4 + sizeof(stack_header)) {
e2898c5f
NH
609 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
610 return -EINVAL;
611 }
612
b4fe9454
PN
613 header = drm_buffer_read_object(cmdbuf->buffer,
614 sizeof(stack_header), &stack_header);
e2898c5f 615
b4fe9454
PN
616 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
617 cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
618 cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
619 cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
e2898c5f 620
b4fe9454
PN
621 if (header->header.cmd_type != R300_CMD_PACKET3 ||
622 header->packet3.packet != R300_CMD_PACKET3_RAW ||
623 *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
e2898c5f
NH
624 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
625 return -EINVAL;
626 }
627
b4fe9454
PN
628 if ((*cmd1 & 0x8000ffff) != 0x80000810) {
629 DRM_ERROR("Invalid indx_buffer reg address %08X\n",
630 *cmd1);
e2898c5f
NH
631 return -EINVAL;
632 }
b4fe9454
PN
633 if (!radeon_check_offset(dev_priv, *cmd2)) {
634 DRM_ERROR("Invalid indx_buffer offset is %08X\n",
635 *cmd2);
e2898c5f
NH
636 return -EINVAL;
637 }
b4fe9454 638 if (*cmd3 != expected_count) {
e2898c5f 639 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
b4fe9454 640 *cmd3, expected_count);
e2898c5f
NH
641 return -EINVAL;
642 }
643
644 BEGIN_RING(4);
b4fe9454 645 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
e2898c5f 646 ADVANCE_RING();
e2898c5f
NH
647 }
648
a1aa2897
RS
649 return 0;
650}
651
d985c108
DA
652static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
653 drm_radeon_kcmd_buffer_t *cmdbuf)
414ed537 654{
b4fe9454 655 u32 *header;
414ed537
DA
656 int count;
657 RING_LOCALS;
658
b4fe9454 659 if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
20caafa6 660 return -EINVAL;
414ed537 661
b5e89ed5 662 /* Fixme !! This simply emits a packet without much checking.
414ed537
DA
663 We need to be smarter. */
664
665 /* obtain first word - actual packet3 header */
b4fe9454 666 header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
414ed537
DA
667
668 /* Is it packet 3 ? */
b4fe9454
PN
669 if ((*header >> 30) != 0x3) {
670 DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
20caafa6 671 return -EINVAL;
b5e89ed5 672 }
414ed537 673
b4fe9454 674 count = (*header >> 16) & 0x3fff;
414ed537
DA
675
676 /* Check again now that we know how much data to expect */
b4fe9454 677 if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
b5e89ed5
DA
678 DRM_ERROR
679 ("Expected packet3 of length %d but have only %d bytes left\n",
b4fe9454 680 (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
20caafa6 681 return -EINVAL;
b5e89ed5 682 }
414ed537
DA
683
684 /* Is it a packet type we know about ? */
b4fe9454 685 switch (*header & 0xff00) {
b5e89ed5 686 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
b4fe9454 687 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
414ed537 688
4e5e2e25
DA
689 case RADEON_CNTL_BITBLT_MULTI:
690 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
691
54f961a6 692 case RADEON_CP_INDX_BUFFER:
e2898c5f
NH
693 DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
694 return -EINVAL;
54f961a6
JG
695 case RADEON_CP_3D_DRAW_IMMD_2:
696 /* triggers drawing using in-packet vertex data */
697 case RADEON_CP_3D_DRAW_VBUF_2:
698 /* triggers drawing of vertex buffers setup elsewhere */
e2898c5f
NH
699 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
700 RADEON_PURGE_EMITED);
701 break;
54f961a6
JG
702 case RADEON_CP_3D_DRAW_INDX_2:
703 /* triggers drawing using indices to vertex buffer */
704 /* whenever we send vertex we clear flush & purge */
705 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
706 RADEON_PURGE_EMITED);
e2898c5f 707 return r300_emit_draw_indx_2(dev_priv, cmdbuf);
414ed537
DA
708 case RADEON_WAIT_FOR_IDLE:
709 case RADEON_CP_NOP:
710 /* these packets are safe */
711 break;
712 default:
b4fe9454 713 DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
20caafa6 714 return -EINVAL;
b5e89ed5 715 }
414ed537 716
b5e89ed5 717 BEGIN_RING(count + 2);
b4fe9454 718 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
414ed537
DA
719 ADVANCE_RING();
720
414ed537
DA
721 return 0;
722}
723
414ed537
DA
724/**
725 * Emit a rendering packet3 from userspace.
726 * Called by r300_do_cp_cmdbuf.
727 */
d985c108
DA
728static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
729 drm_radeon_kcmd_buffer_t *cmdbuf,
414ed537
DA
730 drm_r300_cmd_header_t header)
731{
732 int n;
733 int ret;
b4fe9454 734 int orig_iter = cmdbuf->buffer->iterator;
414ed537
DA
735
736 /* This is a do-while-loop so that we run the interior at least once,
737 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
738 */
739 n = 0;
740 do {
741 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
742 ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
743 if (ret)
744 return ret;
745
b4fe9454 746 cmdbuf->buffer->iterator = orig_iter;
b5e89ed5 747 }
414ed537 748
b5e89ed5 749 switch (header.packet3.packet) {
414ed537
DA
750 case R300_CMD_PACKET3_CLEAR:
751 DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
752 ret = r300_emit_clear(dev_priv, cmdbuf);
753 if (ret) {
754 DRM_ERROR("r300_emit_clear failed\n");
755 return ret;
b5e89ed5 756 }
414ed537
DA
757 break;
758
759 case R300_CMD_PACKET3_RAW:
760 DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
761 ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
762 if (ret) {
763 DRM_ERROR("r300_emit_raw_packet3 failed\n");
764 return ret;
b5e89ed5 765 }
414ed537
DA
766 break;
767
768 default:
b4fe9454 769 DRM_ERROR("bad packet3 type %i at byte %d\n",
b5e89ed5 770 header.packet3.packet,
55a5cb5d 771 cmdbuf->buffer->iterator - (int)sizeof(header));
20caafa6 772 return -EINVAL;
b5e89ed5 773 }
414ed537
DA
774
775 n += R300_SIMULTANEOUS_CLIPRECTS;
b5e89ed5 776 } while (n < cmdbuf->nbox);
414ed537
DA
777
778 return 0;
779}
780
781/* Some of the R300 chips seem to be extremely touchy about the two registers
782 * that are configured in r300_pacify.
783 * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
784 * sends a command buffer that contains only state setting commands and a
785 * vertex program/parameter upload sequence, this will eventually lead to a
786 * lockup, unless the sequence is bracketed by calls to r300_pacify.
787 * So we should take great care to *always* call r300_pacify before
788 * *anything* 3D related, and again afterwards. This is what the
789 * call bracket in r300_do_cp_cmdbuf is for.
790 */
791
792/**
793 * Emit the sequence to pacify R300.
794 */
ce580fab 795static void r300_pacify(drm_radeon_private_t *dev_priv)
414ed537 796{
54f961a6 797 uint32_t cache_z, cache_3d, cache_2d;
414ed537 798 RING_LOCALS;
e2898c5f 799
54f961a6
JG
800 cache_z = R300_ZC_FLUSH;
801 cache_2d = R300_RB2D_DC_FLUSH;
802 cache_3d = R300_RB3D_DC_FLUSH;
803 if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
804 /* we can purge, primitive where draw since last purge */
805 cache_z |= R300_ZC_FREE;
806 cache_2d |= R300_RB2D_DC_FREE;
807 cache_3d |= R300_RB3D_DC_FREE;
808 }
414ed537 809
54f961a6
JG
810 /* flush & purge zbuffer */
811 BEGIN_RING(2);
21efa2ba 812 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
54f961a6
JG
813 OUT_RING(cache_z);
814 ADVANCE_RING();
815 /* flush & purge 3d */
816 BEGIN_RING(2);
817 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
818 OUT_RING(cache_3d);
819 ADVANCE_RING();
820 /* flush & purge texture */
821 BEGIN_RING(2);
822 OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
823 OUT_RING(0);
824 ADVANCE_RING();
825 /* FIXME: is this one really needed ? */
826 BEGIN_RING(2);
827 OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
828 OUT_RING(0);
829 ADVANCE_RING();
830 BEGIN_RING(2);
831 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
832 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
833 ADVANCE_RING();
834 /* flush & purge 2d through E2 as RB2D will trigger lockup */
835 BEGIN_RING(4);
836 OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
837 OUT_RING(cache_2d);
838 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
839 OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
840 RADEON_WAIT_HOST_IDLECLEAN);
414ed537 841 ADVANCE_RING();
54f961a6
JG
842 /* set flush & purge flags */
843 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
414ed537
DA
844}
845
414ed537
DA
846/**
847 * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
848 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
849 * be careful about how this function is called.
850 */
7c1c2871 851static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
414ed537 852{
414ed537 853 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
7c1c2871 854 struct drm_radeon_master_private *master_priv = master->driver_priv;
414ed537 855
7c1c2871 856 buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
414ed537
DA
857 buf->pending = 1;
858 buf->used = 0;
859}
860
0c76be35
DA
861static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
862 drm_r300_cmd_header_t header)
863{
864 u32 wait_until;
865 RING_LOCALS;
866
867 if (!header.wait.flags)
868 return;
869
870 wait_until = 0;
871
872 switch(header.wait.flags) {
873 case R300_WAIT_2D:
874 wait_until = RADEON_WAIT_2D_IDLE;
875 break;
876 case R300_WAIT_3D:
877 wait_until = RADEON_WAIT_3D_IDLE;
878 break;
879 case R300_NEW_WAIT_2D_3D:
880 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
881 break;
882 case R300_NEW_WAIT_2D_2D_CLEAN:
883 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
884 break;
885 case R300_NEW_WAIT_3D_3D_CLEAN:
886 wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
887 break;
888 case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
889 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
890 wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
891 break;
892 default:
893 return;
894 }
895
896 BEGIN_RING(2);
897 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
898 OUT_RING(wait_until);
899 ADVANCE_RING();
900}
901
ee4621f0
DA
902static int r300_scratch(drm_radeon_private_t *dev_priv,
903 drm_radeon_kcmd_buffer_t *cmdbuf,
904 drm_r300_cmd_header_t header)
905{
906 u32 *ref_age_base;
b4fe9454
PN
907 u32 i, *buf_idx, h_pending;
908 u64 *ptr_addr;
909 u64 stack_ptr_addr;
ee4621f0 910 RING_LOCALS;
bc5f4523 911
b4fe9454
PN
912 if (drm_buffer_unprocessed(cmdbuf->buffer) <
913 (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
20caafa6 914 return -EINVAL;
ee4621f0 915 }
bc5f4523 916
ee4621f0 917 if (header.scratch.reg >= 5) {
20caafa6 918 return -EINVAL;
ee4621f0 919 }
bc5f4523 920
ee4621f0 921 dev_priv->scratch_ages[header.scratch.reg]++;
bc5f4523 922
b4fe9454
PN
923 ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
924 sizeof(stack_ptr_addr), &stack_ptr_addr);
88b04507 925 ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
bc5f4523 926
ee4621f0 927 for (i=0; i < header.scratch.n_bufs; i++) {
b4fe9454
PN
928 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
929 *buf_idx *= 2; /* 8 bytes per buf */
bc5f4523 930
b4fe9454
PN
931 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
932 &dev_priv->scratch_ages[header.scratch.reg],
933 sizeof(u32)))
20caafa6 934 return -EINVAL;
bc5f4523 935
b4fe9454
PN
936 if (DRM_COPY_FROM_USER(&h_pending,
937 ref_age_base + *buf_idx + 1,
938 sizeof(u32)))
20caafa6 939 return -EINVAL;
bc5f4523 940
b4fe9454 941 if (h_pending == 0)
20caafa6 942 return -EINVAL;
bc5f4523 943
ee4621f0 944 h_pending--;
bc5f4523 945
b4fe9454
PN
946 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
947 &h_pending,
948 sizeof(u32)))
20caafa6 949 return -EINVAL;
bc5f4523 950
b4fe9454 951 drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
ee4621f0 952 }
bc5f4523 953
ee4621f0 954 BEGIN_RING(2);
c6c656b4
OM
955 OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
956 OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
ee4621f0 957 ADVANCE_RING();
bc5f4523 958
ee4621f0
DA
959 return 0;
960}
961
c0beb2a7
DA
962/**
963 * Uploads user-supplied vertex program instructions or parameters onto
964 * the graphics card.
965 * Called by r300_do_cp_cmdbuf.
966 */
967static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
968 drm_radeon_kcmd_buffer_t *cmdbuf,
969 drm_r300_cmd_header_t header)
970{
971 int sz;
972 int addr;
973 int type;
01136acf 974 int isclamp;
c0beb2a7
DA
975 int stride;
976 RING_LOCALS;
977
978 sz = header.r500fp.count;
979 /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
980 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
981
982 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
01136acf 983 isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
c0beb2a7
DA
984
985 addr |= (type << 16);
01136acf 986 addr |= (isclamp << 17);
c0beb2a7
DA
987
988 stride = type ? 4 : 6;
989
990 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
991 if (!sz)
992 return 0;
b4fe9454 993 if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
c0beb2a7
DA
994 return -EINVAL;
995
996 BEGIN_RING(3 + sz * stride);
997 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
998 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
b4fe9454 999 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
c0beb2a7
DA
1000
1001 ADVANCE_RING();
1002
c0beb2a7
DA
1003 return 0;
1004}
1005
1006
414ed537
DA
1007/**
1008 * Parses and validates a user-supplied command buffer and emits appropriate
1009 * commands on the DMA ring buffer.
1010 * Called by the ioctl handler function radeon_cp_cmdbuf.
1011 */
84b1fd10 1012int r300_do_cp_cmdbuf(struct drm_device *dev,
6c340eac 1013 struct drm_file *file_priv,
d985c108 1014 drm_radeon_kcmd_buffer_t *cmdbuf)
414ed537
DA
1015{
1016 drm_radeon_private_t *dev_priv = dev->dev_private;
7c1c2871 1017 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
cdd55a29 1018 struct drm_device_dma *dma = dev->dma;
056219e2 1019 struct drm_buf *buf = NULL;
414ed537
DA
1020 int emit_dispatch_age = 0;
1021 int ret = 0;
1022
1023 DRM_DEBUG("\n");
1024
54f961a6 1025 /* pacify */
414ed537
DA
1026 r300_pacify(dev_priv);
1027
1028 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
1029 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
1030 if (ret)
1031 goto cleanup;
b5e89ed5 1032 }
414ed537 1033
b4fe9454
PN
1034 while (drm_buffer_unprocessed(cmdbuf->buffer)
1035 >= sizeof(drm_r300_cmd_header_t)) {
414ed537 1036 int idx;
b4fe9454 1037 drm_r300_cmd_header_t *header, stack_header;
414ed537 1038
b4fe9454
PN
1039 header = drm_buffer_read_object(cmdbuf->buffer,
1040 sizeof(stack_header), &stack_header);
414ed537 1041
b4fe9454 1042 switch (header->header.cmd_type) {
b5e89ed5 1043 case R300_CMD_PACKET0:
414ed537 1044 DRM_DEBUG("R300_CMD_PACKET0\n");
b4fe9454 1045 ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
414ed537
DA
1046 if (ret) {
1047 DRM_ERROR("r300_emit_packet0 failed\n");
1048 goto cleanup;
b5e89ed5 1049 }
414ed537
DA
1050 break;
1051
1052 case R300_CMD_VPU:
1053 DRM_DEBUG("R300_CMD_VPU\n");
b4fe9454 1054 ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
414ed537
DA
1055 if (ret) {
1056 DRM_ERROR("r300_emit_vpu failed\n");
1057 goto cleanup;
b5e89ed5 1058 }
414ed537
DA
1059 break;
1060
1061 case R300_CMD_PACKET3:
1062 DRM_DEBUG("R300_CMD_PACKET3\n");
b4fe9454 1063 ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
414ed537
DA
1064 if (ret) {
1065 DRM_ERROR("r300_emit_packet3 failed\n");
1066 goto cleanup;
b5e89ed5 1067 }
414ed537
DA
1068 break;
1069
1070 case R300_CMD_END3D:
1071 DRM_DEBUG("R300_CMD_END3D\n");
b5e89ed5
DA
1072 /* TODO:
1073 Ideally userspace driver should not need to issue this call,
1074 i.e. the drm driver should issue it automatically and prevent
1075 lockups.
1076
1077 In practice, we do not understand why this call is needed and what
1078 it does (except for some vague guesses that it has to do with cache
1079 coherence) and so the user space driver does it.
1080
1081 Once we are sure which uses prevent lockups the code could be moved
1082 into the kernel and the userspace driver will not
1083 need to use this command.
1084
1085 Note that issuing this command does not hurt anything
1086 except, possibly, performance */
414ed537
DA
1087 r300_pacify(dev_priv);
1088 break;
1089
1090 case R300_CMD_CP_DELAY:
1091 /* simple enough, we can do it here */
1092 DRM_DEBUG("R300_CMD_CP_DELAY\n");
1093 {
1094 int i;
1095 RING_LOCALS;
1096
b4fe9454
PN
1097 BEGIN_RING(header->delay.count);
1098 for (i = 0; i < header->delay.count; i++)
414ed537
DA
1099 OUT_RING(RADEON_CP_PACKET2);
1100 ADVANCE_RING();
1101 }
1102 break;
1103
1104 case R300_CMD_DMA_DISCARD:
1105 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
b4fe9454 1106 idx = header->dma.buf_idx;
b5e89ed5
DA
1107 if (idx < 0 || idx >= dma->buf_count) {
1108 DRM_ERROR("buffer index %d (of %d max)\n",
1109 idx, dma->buf_count - 1);
20caafa6 1110 ret = -EINVAL;
414ed537 1111 goto cleanup;
b5e89ed5
DA
1112 }
1113
1114 buf = dma->buflist[idx];
6c340eac 1115 if (buf->file_priv != file_priv || buf->pending) {
b5e89ed5 1116 DRM_ERROR("bad buffer %p %p %d\n",
6c340eac
EA
1117 buf->file_priv, file_priv,
1118 buf->pending);
20caafa6 1119 ret = -EINVAL;
b5e89ed5
DA
1120 goto cleanup;
1121 }
414ed537
DA
1122
1123 emit_dispatch_age = 1;
7c1c2871 1124 r300_discard_buffer(dev, file_priv->master, buf);
b5e89ed5 1125 break;
414ed537
DA
1126
1127 case R300_CMD_WAIT:
414ed537 1128 DRM_DEBUG("R300_CMD_WAIT\n");
b4fe9454 1129 r300_cmd_wait(dev_priv, *header);
414ed537
DA
1130 break;
1131
ee4621f0
DA
1132 case R300_CMD_SCRATCH:
1133 DRM_DEBUG("R300_CMD_SCRATCH\n");
b4fe9454 1134 ret = r300_scratch(dev_priv, cmdbuf, *header);
ee4621f0
DA
1135 if (ret) {
1136 DRM_ERROR("r300_scratch failed\n");
1137 goto cleanup;
1138 }
1139 break;
bc5f4523 1140
c0beb2a7
DA
1141 case R300_CMD_R500FP:
1142 if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
1143 DRM_ERROR("Calling r500 command on r300 card\n");
1144 ret = -EINVAL;
1145 goto cleanup;
1146 }
1147 DRM_DEBUG("R300_CMD_R500FP\n");
b4fe9454 1148 ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
c0beb2a7
DA
1149 if (ret) {
1150 DRM_ERROR("r300_emit_r500fp failed\n");
1151 goto cleanup;
1152 }
1153 break;
414ed537 1154 default:
b4fe9454
PN
1155 DRM_ERROR("bad cmd_type %i at byte %d\n",
1156 header->header.cmd_type,
55a5cb5d 1157 cmdbuf->buffer->iterator - (int)sizeof(*header));
20caafa6 1158 ret = -EINVAL;
414ed537 1159 goto cleanup;
b5e89ed5 1160 }
414ed537
DA
1161 }
1162
1163 DRM_DEBUG("END\n");
1164
b5e89ed5 1165 cleanup:
414ed537
DA
1166 r300_pacify(dev_priv);
1167
1168 /* We emit the vertex buffer age here, outside the pacifier "brackets"
1169 * for two reasons:
1170 * (1) This may coalesce multiple age emissions into a single one and
1171 * (2) more importantly, some chips lock up hard when scratch registers
1172 * are written inside the pacifier bracket.
1173 */
1174 if (emit_dispatch_age) {
1175 RING_LOCALS;
1176
1177 /* Emit the vertex buffer age */
1178 BEGIN_RING(2);
7c1c2871 1179 RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
414ed537 1180 ADVANCE_RING();
b5e89ed5 1181 }
414ed537
DA
1182
1183 COMMIT_RING();
1184
1185 return ret;
1186}
This page took 1.020661 seconds and 5 git commands to generate.