2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
27 #include "radeon_asic.h"
28 #include "radeon_trace.h"
32 #define CIK_SDMA_UCODE_SIZE 1050
33 #define CIK_SDMA_UCODE_VERSION 64
35 u32
cik_gpu_check_soft_reset(struct radeon_device
*rdev
);
39 * Starting with CIK, the GPU has new asynchronous
40 * DMA engines. These engines are used for compute
41 * and gfx. There are two DMA engines (SDMA0, SDMA1)
42 * and each one supports 1 ring buffer used for gfx
43 * and 2 queues used for compute.
45 * The programming model is very similar to the CP
46 * (ring buffer, IBs, etc.), but sDMA has it's own
47 * packet format that is different from the PM4 format
48 * used by the CP. sDMA supports copying data, writing
49 * embedded data, solid fills, and a number of other
50 * things. It also has support for tiling/detiling of
55 * cik_sdma_get_rptr - get the current read pointer
57 * @rdev: radeon_device pointer
58 * @ring: radeon ring pointer
60 * Get the current rptr from the hardware (CIK+).
62 uint32_t cik_sdma_get_rptr(struct radeon_device
*rdev
,
63 struct radeon_ring
*ring
)
67 if (rdev
->wb
.enabled
) {
68 rptr
= rdev
->wb
.wb
[ring
->rptr_offs
/4];
70 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
71 reg
= SDMA0_GFX_RB_RPTR
+ SDMA0_REGISTER_OFFSET
;
73 reg
= SDMA0_GFX_RB_RPTR
+ SDMA1_REGISTER_OFFSET
;
78 return (rptr
& 0x3fffc) >> 2;
82 * cik_sdma_get_wptr - get the current write pointer
84 * @rdev: radeon_device pointer
85 * @ring: radeon ring pointer
87 * Get the current wptr from the hardware (CIK+).
89 uint32_t cik_sdma_get_wptr(struct radeon_device
*rdev
,
90 struct radeon_ring
*ring
)
94 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
95 reg
= SDMA0_GFX_RB_WPTR
+ SDMA0_REGISTER_OFFSET
;
97 reg
= SDMA0_GFX_RB_WPTR
+ SDMA1_REGISTER_OFFSET
;
99 return (RREG32(reg
) & 0x3fffc) >> 2;
103 * cik_sdma_set_wptr - commit the write pointer
105 * @rdev: radeon_device pointer
106 * @ring: radeon ring pointer
108 * Write the wptr back to the hardware (CIK+).
110 void cik_sdma_set_wptr(struct radeon_device
*rdev
,
111 struct radeon_ring
*ring
)
115 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
116 reg
= SDMA0_GFX_RB_WPTR
+ SDMA0_REGISTER_OFFSET
;
118 reg
= SDMA0_GFX_RB_WPTR
+ SDMA1_REGISTER_OFFSET
;
120 WREG32(reg
, (ring
->wptr
<< 2) & 0x3fffc);
124 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
126 * @rdev: radeon_device pointer
127 * @ib: IB object to schedule
129 * Schedule an IB in the DMA ring (CIK).
131 void cik_sdma_ring_ib_execute(struct radeon_device
*rdev
,
132 struct radeon_ib
*ib
)
134 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
135 u32 extra_bits
= (ib
->vm
? ib
->vm
->id
: 0) & 0xf;
137 if (rdev
->wb
.enabled
) {
138 u32 next_rptr
= ring
->wptr
+ 5;
139 while ((next_rptr
& 7) != 4)
142 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0));
143 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
144 radeon_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xffffffff);
145 radeon_ring_write(ring
, 1); /* number of DWs to follow */
146 radeon_ring_write(ring
, next_rptr
);
149 /* IB packet must end on a 8 DW boundary */
150 while ((ring
->wptr
& 7) != 4)
151 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_NOP
, 0, 0));
152 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER
, 0, extra_bits
));
153 radeon_ring_write(ring
, ib
->gpu_addr
& 0xffffffe0); /* base must be 32 byte aligned */
154 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xffffffff);
155 radeon_ring_write(ring
, ib
->length_dw
);
160 * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
162 * @rdev: radeon_device pointer
163 * @ridx: radeon ring index
165 * Emit an hdp flush packet on the requested DMA ring.
167 static void cik_sdma_hdp_flush_ring_emit(struct radeon_device
*rdev
,
170 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
171 u32 extra_bits
= (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
172 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
175 if (ridx
== R600_RING_TYPE_DMA_INDEX
)
176 ref_and_mask
= SDMA0
;
178 ref_and_mask
= SDMA1
;
180 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM
, 0, extra_bits
));
181 radeon_ring_write(ring
, GPU_HDP_FLUSH_DONE
);
182 radeon_ring_write(ring
, GPU_HDP_FLUSH_REQ
);
183 radeon_ring_write(ring
, ref_and_mask
); /* reference */
184 radeon_ring_write(ring
, ref_and_mask
); /* mask */
185 radeon_ring_write(ring
, (0xfff << 16) | 10); /* retry count, poll interval */
189 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
191 * @rdev: radeon_device pointer
192 * @fence: radeon fence object
194 * Add a DMA fence packet to the ring to write
195 * the fence seq number and DMA trap packet to generate
196 * an interrupt if needed (CIK).
198 void cik_sdma_fence_ring_emit(struct radeon_device
*rdev
,
199 struct radeon_fence
*fence
)
201 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
202 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
204 /* write the fence */
205 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_FENCE
, 0, 0));
206 radeon_ring_write(ring
, addr
& 0xffffffff);
207 radeon_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
208 radeon_ring_write(ring
, fence
->seq
);
209 /* generate an interrupt */
210 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_TRAP
, 0, 0));
212 cik_sdma_hdp_flush_ring_emit(rdev
, fence
->ring
);
216 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
218 * @rdev: radeon_device pointer
219 * @ring: radeon_ring structure holding ring information
220 * @semaphore: radeon semaphore object
221 * @emit_wait: wait or signal semaphore
223 * Add a DMA semaphore packet to the ring wait on or signal
226 bool cik_sdma_semaphore_ring_emit(struct radeon_device
*rdev
,
227 struct radeon_ring
*ring
,
228 struct radeon_semaphore
*semaphore
,
231 u64 addr
= semaphore
->gpu_addr
;
232 u32 extra_bits
= emit_wait
? 0 : SDMA_SEMAPHORE_EXTRA_S
;
234 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE
, 0, extra_bits
));
235 radeon_ring_write(ring
, addr
& 0xfffffff8);
236 radeon_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
242 * cik_sdma_gfx_stop - stop the gfx async dma engines
244 * @rdev: radeon_device pointer
246 * Stop the gfx async dma ring buffers (CIK).
248 static void cik_sdma_gfx_stop(struct radeon_device
*rdev
)
250 u32 rb_cntl
, reg_offset
;
253 if ((rdev
->asic
->copy
.copy_ring_index
== R600_RING_TYPE_DMA_INDEX
) ||
254 (rdev
->asic
->copy
.copy_ring_index
== CAYMAN_RING_TYPE_DMA1_INDEX
))
255 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
257 for (i
= 0; i
< 2; i
++) {
259 reg_offset
= SDMA0_REGISTER_OFFSET
;
261 reg_offset
= SDMA1_REGISTER_OFFSET
;
262 rb_cntl
= RREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
);
263 rb_cntl
&= ~SDMA_RB_ENABLE
;
264 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
);
265 WREG32(SDMA0_GFX_IB_CNTL
+ reg_offset
, 0);
270 * cik_sdma_rlc_stop - stop the compute async dma engines
272 * @rdev: radeon_device pointer
274 * Stop the compute async dma queues (CIK).
276 static void cik_sdma_rlc_stop(struct radeon_device
*rdev
)
282 * cik_sdma_enable - stop the async dma engines
284 * @rdev: radeon_device pointer
285 * @enable: enable/disable the DMA MEs.
287 * Halt or unhalt the async dma engines (CIK).
289 void cik_sdma_enable(struct radeon_device
*rdev
, bool enable
)
291 u32 me_cntl
, reg_offset
;
294 for (i
= 0; i
< 2; i
++) {
296 reg_offset
= SDMA0_REGISTER_OFFSET
;
298 reg_offset
= SDMA1_REGISTER_OFFSET
;
299 me_cntl
= RREG32(SDMA0_ME_CNTL
+ reg_offset
);
301 me_cntl
&= ~SDMA_HALT
;
303 me_cntl
|= SDMA_HALT
;
304 WREG32(SDMA0_ME_CNTL
+ reg_offset
, me_cntl
);
309 * cik_sdma_gfx_resume - setup and start the async dma engines
311 * @rdev: radeon_device pointer
313 * Set up the gfx DMA ring buffers and enable them (CIK).
314 * Returns 0 for success, error for failure.
316 static int cik_sdma_gfx_resume(struct radeon_device
*rdev
)
318 struct radeon_ring
*ring
;
319 u32 rb_cntl
, ib_cntl
;
321 u32 reg_offset
, wb_offset
;
324 for (i
= 0; i
< 2; i
++) {
326 ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
327 reg_offset
= SDMA0_REGISTER_OFFSET
;
328 wb_offset
= R600_WB_DMA_RPTR_OFFSET
;
330 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
];
331 reg_offset
= SDMA1_REGISTER_OFFSET
;
332 wb_offset
= CAYMAN_WB_DMA1_RPTR_OFFSET
;
335 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL
+ reg_offset
, 0);
336 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+ reg_offset
, 0);
338 /* Set ring buffer size in dwords */
339 rb_bufsz
= order_base_2(ring
->ring_size
/ 4);
340 rb_cntl
= rb_bufsz
<< 1;
342 rb_cntl
|= SDMA_RB_SWAP_ENABLE
| SDMA_RPTR_WRITEBACK_SWAP_ENABLE
;
344 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
);
346 /* Initialize the ring buffer's read and write pointers */
347 WREG32(SDMA0_GFX_RB_RPTR
+ reg_offset
, 0);
348 WREG32(SDMA0_GFX_RB_WPTR
+ reg_offset
, 0);
350 /* set the wb address whether it's enabled or not */
351 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI
+ reg_offset
,
352 upper_32_bits(rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFF);
353 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO
+ reg_offset
,
354 ((rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFC));
356 if (rdev
->wb
.enabled
)
357 rb_cntl
|= SDMA_RPTR_WRITEBACK_ENABLE
;
359 WREG32(SDMA0_GFX_RB_BASE
+ reg_offset
, ring
->gpu_addr
>> 8);
360 WREG32(SDMA0_GFX_RB_BASE_HI
+ reg_offset
, ring
->gpu_addr
>> 40);
363 WREG32(SDMA0_GFX_RB_WPTR
+ reg_offset
, ring
->wptr
<< 2);
365 ring
->rptr
= RREG32(SDMA0_GFX_RB_RPTR
+ reg_offset
) >> 2;
368 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
| SDMA_RB_ENABLE
);
370 ib_cntl
= SDMA_IB_ENABLE
;
372 ib_cntl
|= SDMA_IB_SWAP_ENABLE
;
375 WREG32(SDMA0_GFX_IB_CNTL
+ reg_offset
, ib_cntl
);
379 r
= radeon_ring_test(rdev
, ring
->idx
, ring
);
386 if ((rdev
->asic
->copy
.copy_ring_index
== R600_RING_TYPE_DMA_INDEX
) ||
387 (rdev
->asic
->copy
.copy_ring_index
== CAYMAN_RING_TYPE_DMA1_INDEX
))
388 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.real_vram_size
);
394 * cik_sdma_rlc_resume - setup and start the async dma engines
396 * @rdev: radeon_device pointer
398 * Set up the compute DMA queues and enable them (CIK).
399 * Returns 0 for success, error for failure.
401 static int cik_sdma_rlc_resume(struct radeon_device
*rdev
)
408 * cik_sdma_load_microcode - load the sDMA ME ucode
410 * @rdev: radeon_device pointer
412 * Loads the sDMA0/1 ucode.
413 * Returns 0 for success, -EINVAL if the ucode is not available.
415 static int cik_sdma_load_microcode(struct radeon_device
*rdev
)
417 const __be32
*fw_data
;
423 /* stop the gfx rings and rlc compute queues */
424 cik_sdma_gfx_stop(rdev
);
425 cik_sdma_rlc_stop(rdev
);
428 cik_sdma_enable(rdev
, false);
431 fw_data
= (const __be32
*)rdev
->sdma_fw
->data
;
432 WREG32(SDMA0_UCODE_ADDR
+ SDMA0_REGISTER_OFFSET
, 0);
433 for (i
= 0; i
< CIK_SDMA_UCODE_SIZE
; i
++)
434 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, be32_to_cpup(fw_data
++));
435 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
438 fw_data
= (const __be32
*)rdev
->sdma_fw
->data
;
439 WREG32(SDMA0_UCODE_ADDR
+ SDMA1_REGISTER_OFFSET
, 0);
440 for (i
= 0; i
< CIK_SDMA_UCODE_SIZE
; i
++)
441 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, be32_to_cpup(fw_data
++));
442 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
444 WREG32(SDMA0_UCODE_ADDR
+ SDMA0_REGISTER_OFFSET
, 0);
445 WREG32(SDMA0_UCODE_ADDR
+ SDMA1_REGISTER_OFFSET
, 0);
450 * cik_sdma_resume - setup and start the async dma engines
452 * @rdev: radeon_device pointer
454 * Set up the DMA engines and enable them (CIK).
455 * Returns 0 for success, error for failure.
457 int cik_sdma_resume(struct radeon_device
*rdev
)
462 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_SDMA
| SOFT_RESET_SDMA1
);
463 RREG32(SRBM_SOFT_RESET
);
465 WREG32(SRBM_SOFT_RESET
, 0);
466 RREG32(SRBM_SOFT_RESET
);
468 r
= cik_sdma_load_microcode(rdev
);
473 cik_sdma_enable(rdev
, true);
475 /* start the gfx rings and rlc compute queues */
476 r
= cik_sdma_gfx_resume(rdev
);
479 r
= cik_sdma_rlc_resume(rdev
);
487 * cik_sdma_fini - tear down the async dma engines
489 * @rdev: radeon_device pointer
491 * Stop the async dma engines and free the rings (CIK).
493 void cik_sdma_fini(struct radeon_device
*rdev
)
495 /* stop the gfx rings and rlc compute queues */
496 cik_sdma_gfx_stop(rdev
);
497 cik_sdma_rlc_stop(rdev
);
499 cik_sdma_enable(rdev
, false);
500 radeon_ring_fini(rdev
, &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
]);
501 radeon_ring_fini(rdev
, &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
]);
502 /* XXX - compute dma queue tear down */
506 * cik_copy_dma - copy pages using the DMA engine
508 * @rdev: radeon_device pointer
509 * @src_offset: src GPU address
510 * @dst_offset: dst GPU address
511 * @num_gpu_pages: number of GPU pages to xfer
512 * @fence: radeon fence object
514 * Copy GPU paging using the DMA engine (CIK).
515 * Used by the radeon ttm implementation to move pages if
516 * registered as the asic copy callback.
518 int cik_copy_dma(struct radeon_device
*rdev
,
519 uint64_t src_offset
, uint64_t dst_offset
,
520 unsigned num_gpu_pages
,
521 struct radeon_fence
**fence
)
523 struct radeon_semaphore
*sem
= NULL
;
524 int ring_index
= rdev
->asic
->copy
.dma_ring_index
;
525 struct radeon_ring
*ring
= &rdev
->ring
[ring_index
];
526 u32 size_in_bytes
, cur_size_in_bytes
;
530 r
= radeon_semaphore_create(rdev
, &sem
);
532 DRM_ERROR("radeon: moving bo (%d).\n", r
);
536 size_in_bytes
= (num_gpu_pages
<< RADEON_GPU_PAGE_SHIFT
);
537 num_loops
= DIV_ROUND_UP(size_in_bytes
, 0x1fffff);
538 r
= radeon_ring_lock(rdev
, ring
, num_loops
* 7 + 14);
540 DRM_ERROR("radeon: moving bo (%d).\n", r
);
541 radeon_semaphore_free(rdev
, &sem
, NULL
);
545 radeon_semaphore_sync_to(sem
, *fence
);
546 radeon_semaphore_sync_rings(rdev
, sem
, ring
->idx
);
548 for (i
= 0; i
< num_loops
; i
++) {
549 cur_size_in_bytes
= size_in_bytes
;
550 if (cur_size_in_bytes
> 0x1fffff)
551 cur_size_in_bytes
= 0x1fffff;
552 size_in_bytes
-= cur_size_in_bytes
;
553 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_COPY
, SDMA_COPY_SUB_OPCODE_LINEAR
, 0));
554 radeon_ring_write(ring
, cur_size_in_bytes
);
555 radeon_ring_write(ring
, 0); /* src/dst endian swap */
556 radeon_ring_write(ring
, src_offset
& 0xffffffff);
557 radeon_ring_write(ring
, upper_32_bits(src_offset
) & 0xffffffff);
558 radeon_ring_write(ring
, dst_offset
& 0xffffffff);
559 radeon_ring_write(ring
, upper_32_bits(dst_offset
) & 0xffffffff);
560 src_offset
+= cur_size_in_bytes
;
561 dst_offset
+= cur_size_in_bytes
;
564 r
= radeon_fence_emit(rdev
, fence
, ring
->idx
);
566 radeon_ring_unlock_undo(rdev
, ring
);
570 radeon_ring_unlock_commit(rdev
, ring
);
571 radeon_semaphore_free(rdev
, &sem
, *fence
);
577 * cik_sdma_ring_test - simple async dma engine test
579 * @rdev: radeon_device pointer
580 * @ring: radeon_ring structure holding ring information
582 * Test the DMA engine by writing using it to write an
583 * value to memory. (CIK).
584 * Returns 0 for success, error for failure.
586 int cik_sdma_ring_test(struct radeon_device
*rdev
,
587 struct radeon_ring
*ring
)
591 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
595 DRM_ERROR("invalid vram scratch pointer\n");
602 r
= radeon_ring_lock(rdev
, ring
, 4);
604 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring
->idx
, r
);
607 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0));
608 radeon_ring_write(ring
, rdev
->vram_scratch
.gpu_addr
& 0xfffffffc);
609 radeon_ring_write(ring
, upper_32_bits(rdev
->vram_scratch
.gpu_addr
) & 0xffffffff);
610 radeon_ring_write(ring
, 1); /* number of DWs to follow */
611 radeon_ring_write(ring
, 0xDEADBEEF);
612 radeon_ring_unlock_commit(rdev
, ring
);
614 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
616 if (tmp
== 0xDEADBEEF)
621 if (i
< rdev
->usec_timeout
) {
622 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
624 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
632 * cik_sdma_ib_test - test an IB on the DMA engine
634 * @rdev: radeon_device pointer
635 * @ring: radeon_ring structure holding ring information
637 * Test a simple IB in the DMA ring (CIK).
638 * Returns 0 on success, error on failure.
640 int cik_sdma_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
645 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
649 DRM_ERROR("invalid vram scratch pointer\n");
656 r
= radeon_ib_get(rdev
, ring
->idx
, &ib
, NULL
, 256);
658 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
662 ib
.ptr
[0] = SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0);
663 ib
.ptr
[1] = rdev
->vram_scratch
.gpu_addr
& 0xfffffffc;
664 ib
.ptr
[2] = upper_32_bits(rdev
->vram_scratch
.gpu_addr
) & 0xffffffff;
666 ib
.ptr
[4] = 0xDEADBEEF;
669 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
671 radeon_ib_free(rdev
, &ib
);
672 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
675 r
= radeon_fence_wait(ib
.fence
, false);
677 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
680 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
682 if (tmp
== 0xDEADBEEF)
686 if (i
< rdev
->usec_timeout
) {
687 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib
.fence
->ring
, i
);
689 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp
);
692 radeon_ib_free(rdev
, &ib
);
697 * cik_sdma_is_lockup - Check if the DMA engine is locked up
699 * @rdev: radeon_device pointer
700 * @ring: radeon_ring structure holding ring information
702 * Check if the async DMA engine is locked up (CIK).
703 * Returns true if the engine appears to be locked up, false if not.
705 bool cik_sdma_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
707 u32 reset_mask
= cik_gpu_check_soft_reset(rdev
);
710 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
711 mask
= RADEON_RESET_DMA
;
713 mask
= RADEON_RESET_DMA1
;
715 if (!(reset_mask
& mask
)) {
716 radeon_ring_lockup_update(ring
);
719 /* force ring activities */
720 radeon_ring_force_activity(rdev
, ring
);
721 return radeon_ring_test_lockup(rdev
, ring
);
725 * cik_sdma_vm_set_page - update the page tables using sDMA
727 * @rdev: radeon_device pointer
728 * @ib: indirect buffer to fill with commands
729 * @pe: addr of the page entry
730 * @addr: dst addr to write into pe
731 * @count: number of page entries to update
732 * @incr: increase next addr by incr bytes
733 * @flags: access flags
735 * Update the page tables using sDMA (CIK).
737 void cik_sdma_vm_set_page(struct radeon_device
*rdev
,
738 struct radeon_ib
*ib
,
740 uint64_t addr
, unsigned count
,
741 uint32_t incr
, uint32_t flags
)
746 trace_radeon_vm_set_page(pe
, addr
, count
, incr
, flags
);
748 if (flags
& R600_PTE_SYSTEM
) {
754 /* for non-physically contiguous pages (system) */
755 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0);
756 ib
->ptr
[ib
->length_dw
++] = pe
;
757 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
758 ib
->ptr
[ib
->length_dw
++] = ndw
;
759 for (; ndw
> 0; ndw
-= 2, --count
, pe
+= 8) {
760 value
= radeon_vm_map_gart(rdev
, addr
);
761 value
&= 0xFFFFFFFFFFFFF000ULL
;
764 ib
->ptr
[ib
->length_dw
++] = value
;
765 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
774 if (flags
& R600_PTE_VALID
)
778 /* for physically contiguous pages (vram) */
779 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE
, 0, 0);
780 ib
->ptr
[ib
->length_dw
++] = pe
; /* dst addr */
781 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
782 ib
->ptr
[ib
->length_dw
++] = flags
; /* mask */
783 ib
->ptr
[ib
->length_dw
++] = 0;
784 ib
->ptr
[ib
->length_dw
++] = value
; /* value */
785 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
786 ib
->ptr
[ib
->length_dw
++] = incr
; /* increment size */
787 ib
->ptr
[ib
->length_dw
++] = 0;
788 ib
->ptr
[ib
->length_dw
++] = ndw
; /* number of entries */
794 while (ib
->length_dw
& 0x7)
795 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_NOP
, 0, 0);
799 * cik_dma_vm_flush - cik vm flush using sDMA
801 * @rdev: radeon_device pointer
803 * Update the page table base and flush the VM TLB
806 void cik_dma_vm_flush(struct radeon_device
*rdev
, int ridx
, struct radeon_vm
*vm
)
808 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
813 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
815 radeon_ring_write(ring
, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (vm
->id
<< 2)) >> 2);
817 radeon_ring_write(ring
, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ ((vm
->id
- 8) << 2)) >> 2);
819 radeon_ring_write(ring
, vm
->pd_gpu_addr
>> 12);
821 /* update SH_MEM_* regs */
822 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
823 radeon_ring_write(ring
, SRBM_GFX_CNTL
>> 2);
824 radeon_ring_write(ring
, VMID(vm
->id
));
826 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
827 radeon_ring_write(ring
, SH_MEM_BASES
>> 2);
828 radeon_ring_write(ring
, 0);
830 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
831 radeon_ring_write(ring
, SH_MEM_CONFIG
>> 2);
832 radeon_ring_write(ring
, 0);
834 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
835 radeon_ring_write(ring
, SH_MEM_APE1_BASE
>> 2);
836 radeon_ring_write(ring
, 1);
838 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
839 radeon_ring_write(ring
, SH_MEM_APE1_LIMIT
>> 2);
840 radeon_ring_write(ring
, 0);
842 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
843 radeon_ring_write(ring
, SRBM_GFX_CNTL
>> 2);
844 radeon_ring_write(ring
, VMID(0));
847 cik_sdma_hdp_flush_ring_emit(rdev
, ridx
);
850 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
851 radeon_ring_write(ring
, VM_INVALIDATE_REQUEST
>> 2);
852 radeon_ring_write(ring
, 1 << vm
->id
);