2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
27 #include "radeon_asic.h"
28 #include "radeon_trace.h"
32 #define CIK_SDMA_UCODE_SIZE 1050
33 #define CIK_SDMA_UCODE_VERSION 64
35 u32
cik_gpu_check_soft_reset(struct radeon_device
*rdev
);
39 * Starting with CIK, the GPU has new asynchronous
40 * DMA engines. These engines are used for compute
41 * and gfx. There are two DMA engines (SDMA0, SDMA1)
42 * and each one supports 1 ring buffer used for gfx
43 * and 2 queues used for compute.
45 * The programming model is very similar to the CP
46 * (ring buffer, IBs, etc.), but sDMA has it's own
47 * packet format that is different from the PM4 format
48 * used by the CP. sDMA supports copying data, writing
49 * embedded data, solid fills, and a number of other
50 * things. It also has support for tiling/detiling of
55 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
57 * @rdev: radeon_device pointer
58 * @ib: IB object to schedule
60 * Schedule an IB in the DMA ring (CIK).
62 void cik_sdma_ring_ib_execute(struct radeon_device
*rdev
,
65 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
66 u32 extra_bits
= (ib
->vm
? ib
->vm
->id
: 0) & 0xf;
68 if (rdev
->wb
.enabled
) {
69 u32 next_rptr
= ring
->wptr
+ 5;
70 while ((next_rptr
& 7) != 4)
73 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0));
74 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
75 radeon_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xffffffff);
76 radeon_ring_write(ring
, 1); /* number of DWs to follow */
77 radeon_ring_write(ring
, next_rptr
);
80 /* IB packet must end on a 8 DW boundary */
81 while ((ring
->wptr
& 7) != 4)
82 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_NOP
, 0, 0));
83 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER
, 0, extra_bits
));
84 radeon_ring_write(ring
, ib
->gpu_addr
& 0xffffffe0); /* base must be 32 byte aligned */
85 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xffffffff);
86 radeon_ring_write(ring
, ib
->length_dw
);
91 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
93 * @rdev: radeon_device pointer
94 * @fence: radeon fence object
96 * Add a DMA fence packet to the ring to write
97 * the fence seq number and DMA trap packet to generate
98 * an interrupt if needed (CIK).
100 void cik_sdma_fence_ring_emit(struct radeon_device
*rdev
,
101 struct radeon_fence
*fence
)
103 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
104 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
105 u32 extra_bits
= (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
106 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
109 if (fence
->ring
== R600_RING_TYPE_DMA_INDEX
)
110 ref_and_mask
= SDMA0
;
112 ref_and_mask
= SDMA1
;
114 /* write the fence */
115 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_FENCE
, 0, 0));
116 radeon_ring_write(ring
, addr
& 0xffffffff);
117 radeon_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
118 radeon_ring_write(ring
, fence
->seq
);
119 /* generate an interrupt */
120 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_TRAP
, 0, 0));
122 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM
, 0, extra_bits
));
123 radeon_ring_write(ring
, GPU_HDP_FLUSH_DONE
);
124 radeon_ring_write(ring
, GPU_HDP_FLUSH_REQ
);
125 radeon_ring_write(ring
, ref_and_mask
); /* REFERENCE */
126 radeon_ring_write(ring
, ref_and_mask
); /* MASK */
127 radeon_ring_write(ring
, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
131 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
133 * @rdev: radeon_device pointer
134 * @ring: radeon_ring structure holding ring information
135 * @semaphore: radeon semaphore object
136 * @emit_wait: wait or signal semaphore
138 * Add a DMA semaphore packet to the ring wait on or signal
141 void cik_sdma_semaphore_ring_emit(struct radeon_device
*rdev
,
142 struct radeon_ring
*ring
,
143 struct radeon_semaphore
*semaphore
,
146 u64 addr
= semaphore
->gpu_addr
;
147 u32 extra_bits
= emit_wait
? 0 : SDMA_SEMAPHORE_EXTRA_S
;
149 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE
, 0, extra_bits
));
150 radeon_ring_write(ring
, addr
& 0xfffffff8);
151 radeon_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
155 * cik_sdma_gfx_stop - stop the gfx async dma engines
157 * @rdev: radeon_device pointer
159 * Stop the gfx async dma ring buffers (CIK).
161 static void cik_sdma_gfx_stop(struct radeon_device
*rdev
)
163 u32 rb_cntl
, reg_offset
;
166 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
168 for (i
= 0; i
< 2; i
++) {
170 reg_offset
= SDMA0_REGISTER_OFFSET
;
172 reg_offset
= SDMA1_REGISTER_OFFSET
;
173 rb_cntl
= RREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
);
174 rb_cntl
&= ~SDMA_RB_ENABLE
;
175 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
);
176 WREG32(SDMA0_GFX_IB_CNTL
+ reg_offset
, 0);
181 * cik_sdma_rlc_stop - stop the compute async dma engines
183 * @rdev: radeon_device pointer
185 * Stop the compute async dma queues (CIK).
187 static void cik_sdma_rlc_stop(struct radeon_device
*rdev
)
193 * cik_sdma_enable - stop the async dma engines
195 * @rdev: radeon_device pointer
196 * @enable: enable/disable the DMA MEs.
198 * Halt or unhalt the async dma engines (CIK).
200 void cik_sdma_enable(struct radeon_device
*rdev
, bool enable
)
202 u32 me_cntl
, reg_offset
;
205 for (i
= 0; i
< 2; i
++) {
207 reg_offset
= SDMA0_REGISTER_OFFSET
;
209 reg_offset
= SDMA1_REGISTER_OFFSET
;
210 me_cntl
= RREG32(SDMA0_ME_CNTL
+ reg_offset
);
212 me_cntl
&= ~SDMA_HALT
;
214 me_cntl
|= SDMA_HALT
;
215 WREG32(SDMA0_ME_CNTL
+ reg_offset
, me_cntl
);
220 * cik_sdma_gfx_resume - setup and start the async dma engines
222 * @rdev: radeon_device pointer
224 * Set up the gfx DMA ring buffers and enable them (CIK).
225 * Returns 0 for success, error for failure.
227 static int cik_sdma_gfx_resume(struct radeon_device
*rdev
)
229 struct radeon_ring
*ring
;
230 u32 rb_cntl
, ib_cntl
;
232 u32 reg_offset
, wb_offset
;
235 for (i
= 0; i
< 2; i
++) {
237 ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
238 reg_offset
= SDMA0_REGISTER_OFFSET
;
239 wb_offset
= R600_WB_DMA_RPTR_OFFSET
;
241 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
];
242 reg_offset
= SDMA1_REGISTER_OFFSET
;
243 wb_offset
= CAYMAN_WB_DMA1_RPTR_OFFSET
;
246 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL
+ reg_offset
, 0);
247 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+ reg_offset
, 0);
249 /* Set ring buffer size in dwords */
250 rb_bufsz
= order_base_2(ring
->ring_size
/ 4);
251 rb_cntl
= rb_bufsz
<< 1;
253 rb_cntl
|= SDMA_RB_SWAP_ENABLE
| SDMA_RPTR_WRITEBACK_SWAP_ENABLE
;
255 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
);
257 /* Initialize the ring buffer's read and write pointers */
258 WREG32(SDMA0_GFX_RB_RPTR
+ reg_offset
, 0);
259 WREG32(SDMA0_GFX_RB_WPTR
+ reg_offset
, 0);
261 /* set the wb address whether it's enabled or not */
262 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI
+ reg_offset
,
263 upper_32_bits(rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFF);
264 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO
+ reg_offset
,
265 ((rdev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFC));
267 if (rdev
->wb
.enabled
)
268 rb_cntl
|= SDMA_RPTR_WRITEBACK_ENABLE
;
270 WREG32(SDMA0_GFX_RB_BASE
+ reg_offset
, ring
->gpu_addr
>> 8);
271 WREG32(SDMA0_GFX_RB_BASE_HI
+ reg_offset
, ring
->gpu_addr
>> 40);
274 WREG32(SDMA0_GFX_RB_WPTR
+ reg_offset
, ring
->wptr
<< 2);
276 ring
->rptr
= RREG32(SDMA0_GFX_RB_RPTR
+ reg_offset
) >> 2;
279 WREG32(SDMA0_GFX_RB_CNTL
+ reg_offset
, rb_cntl
| SDMA_RB_ENABLE
);
281 ib_cntl
= SDMA_IB_ENABLE
;
283 ib_cntl
|= SDMA_IB_SWAP_ENABLE
;
286 WREG32(SDMA0_GFX_IB_CNTL
+ reg_offset
, ib_cntl
);
290 r
= radeon_ring_test(rdev
, ring
->idx
, ring
);
297 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.real_vram_size
);
303 * cik_sdma_rlc_resume - setup and start the async dma engines
305 * @rdev: radeon_device pointer
307 * Set up the compute DMA queues and enable them (CIK).
308 * Returns 0 for success, error for failure.
310 static int cik_sdma_rlc_resume(struct radeon_device
*rdev
)
317 * cik_sdma_load_microcode - load the sDMA ME ucode
319 * @rdev: radeon_device pointer
321 * Loads the sDMA0/1 ucode.
322 * Returns 0 for success, -EINVAL if the ucode is not available.
324 static int cik_sdma_load_microcode(struct radeon_device
*rdev
)
326 const __be32
*fw_data
;
332 /* stop the gfx rings and rlc compute queues */
333 cik_sdma_gfx_stop(rdev
);
334 cik_sdma_rlc_stop(rdev
);
337 cik_sdma_enable(rdev
, false);
340 fw_data
= (const __be32
*)rdev
->sdma_fw
->data
;
341 WREG32(SDMA0_UCODE_ADDR
+ SDMA0_REGISTER_OFFSET
, 0);
342 for (i
= 0; i
< CIK_SDMA_UCODE_SIZE
; i
++)
343 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, be32_to_cpup(fw_data
++));
344 WREG32(SDMA0_UCODE_DATA
+ SDMA0_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
347 fw_data
= (const __be32
*)rdev
->sdma_fw
->data
;
348 WREG32(SDMA0_UCODE_ADDR
+ SDMA1_REGISTER_OFFSET
, 0);
349 for (i
= 0; i
< CIK_SDMA_UCODE_SIZE
; i
++)
350 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, be32_to_cpup(fw_data
++));
351 WREG32(SDMA0_UCODE_DATA
+ SDMA1_REGISTER_OFFSET
, CIK_SDMA_UCODE_VERSION
);
353 WREG32(SDMA0_UCODE_ADDR
+ SDMA0_REGISTER_OFFSET
, 0);
354 WREG32(SDMA0_UCODE_ADDR
+ SDMA1_REGISTER_OFFSET
, 0);
359 * cik_sdma_resume - setup and start the async dma engines
361 * @rdev: radeon_device pointer
363 * Set up the DMA engines and enable them (CIK).
364 * Returns 0 for success, error for failure.
366 int cik_sdma_resume(struct radeon_device
*rdev
)
371 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_SDMA
| SOFT_RESET_SDMA1
);
372 RREG32(SRBM_SOFT_RESET
);
374 WREG32(SRBM_SOFT_RESET
, 0);
375 RREG32(SRBM_SOFT_RESET
);
377 r
= cik_sdma_load_microcode(rdev
);
382 cik_sdma_enable(rdev
, true);
384 /* start the gfx rings and rlc compute queues */
385 r
= cik_sdma_gfx_resume(rdev
);
388 r
= cik_sdma_rlc_resume(rdev
);
396 * cik_sdma_fini - tear down the async dma engines
398 * @rdev: radeon_device pointer
400 * Stop the async dma engines and free the rings (CIK).
402 void cik_sdma_fini(struct radeon_device
*rdev
)
404 /* stop the gfx rings and rlc compute queues */
405 cik_sdma_gfx_stop(rdev
);
406 cik_sdma_rlc_stop(rdev
);
408 cik_sdma_enable(rdev
, false);
409 radeon_ring_fini(rdev
, &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
]);
410 radeon_ring_fini(rdev
, &rdev
->ring
[CAYMAN_RING_TYPE_DMA1_INDEX
]);
411 /* XXX - compute dma queue tear down */
415 * cik_copy_dma - copy pages using the DMA engine
417 * @rdev: radeon_device pointer
418 * @src_offset: src GPU address
419 * @dst_offset: dst GPU address
420 * @num_gpu_pages: number of GPU pages to xfer
421 * @fence: radeon fence object
423 * Copy GPU paging using the DMA engine (CIK).
424 * Used by the radeon ttm implementation to move pages if
425 * registered as the asic copy callback.
427 int cik_copy_dma(struct radeon_device
*rdev
,
428 uint64_t src_offset
, uint64_t dst_offset
,
429 unsigned num_gpu_pages
,
430 struct radeon_fence
**fence
)
432 struct radeon_semaphore
*sem
= NULL
;
433 int ring_index
= rdev
->asic
->copy
.dma_ring_index
;
434 struct radeon_ring
*ring
= &rdev
->ring
[ring_index
];
435 u32 size_in_bytes
, cur_size_in_bytes
;
439 r
= radeon_semaphore_create(rdev
, &sem
);
441 DRM_ERROR("radeon: moving bo (%d).\n", r
);
445 size_in_bytes
= (num_gpu_pages
<< RADEON_GPU_PAGE_SHIFT
);
446 num_loops
= DIV_ROUND_UP(size_in_bytes
, 0x1fffff);
447 r
= radeon_ring_lock(rdev
, ring
, num_loops
* 7 + 14);
449 DRM_ERROR("radeon: moving bo (%d).\n", r
);
450 radeon_semaphore_free(rdev
, &sem
, NULL
);
454 if (radeon_fence_need_sync(*fence
, ring
->idx
)) {
455 radeon_semaphore_sync_rings(rdev
, sem
, (*fence
)->ring
,
457 radeon_fence_note_sync(*fence
, ring
->idx
);
459 radeon_semaphore_free(rdev
, &sem
, NULL
);
462 for (i
= 0; i
< num_loops
; i
++) {
463 cur_size_in_bytes
= size_in_bytes
;
464 if (cur_size_in_bytes
> 0x1fffff)
465 cur_size_in_bytes
= 0x1fffff;
466 size_in_bytes
-= cur_size_in_bytes
;
467 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_COPY
, SDMA_COPY_SUB_OPCODE_LINEAR
, 0));
468 radeon_ring_write(ring
, cur_size_in_bytes
);
469 radeon_ring_write(ring
, 0); /* src/dst endian swap */
470 radeon_ring_write(ring
, src_offset
& 0xffffffff);
471 radeon_ring_write(ring
, upper_32_bits(src_offset
) & 0xffffffff);
472 radeon_ring_write(ring
, dst_offset
& 0xfffffffc);
473 radeon_ring_write(ring
, upper_32_bits(dst_offset
) & 0xffffffff);
474 src_offset
+= cur_size_in_bytes
;
475 dst_offset
+= cur_size_in_bytes
;
478 r
= radeon_fence_emit(rdev
, fence
, ring
->idx
);
480 radeon_ring_unlock_undo(rdev
, ring
);
484 radeon_ring_unlock_commit(rdev
, ring
);
485 radeon_semaphore_free(rdev
, &sem
, *fence
);
491 * cik_sdma_ring_test - simple async dma engine test
493 * @rdev: radeon_device pointer
494 * @ring: radeon_ring structure holding ring information
496 * Test the DMA engine by writing using it to write an
497 * value to memory. (CIK).
498 * Returns 0 for success, error for failure.
500 int cik_sdma_ring_test(struct radeon_device
*rdev
,
501 struct radeon_ring
*ring
)
505 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
509 DRM_ERROR("invalid vram scratch pointer\n");
516 r
= radeon_ring_lock(rdev
, ring
, 4);
518 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring
->idx
, r
);
521 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0));
522 radeon_ring_write(ring
, rdev
->vram_scratch
.gpu_addr
& 0xfffffffc);
523 radeon_ring_write(ring
, upper_32_bits(rdev
->vram_scratch
.gpu_addr
) & 0xffffffff);
524 radeon_ring_write(ring
, 1); /* number of DWs to follow */
525 radeon_ring_write(ring
, 0xDEADBEEF);
526 radeon_ring_unlock_commit(rdev
, ring
);
528 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
530 if (tmp
== 0xDEADBEEF)
535 if (i
< rdev
->usec_timeout
) {
536 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
538 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
546 * cik_sdma_ib_test - test an IB on the DMA engine
548 * @rdev: radeon_device pointer
549 * @ring: radeon_ring structure holding ring information
551 * Test a simple IB in the DMA ring (CIK).
552 * Returns 0 on success, error on failure.
554 int cik_sdma_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
559 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
563 DRM_ERROR("invalid vram scratch pointer\n");
570 r
= radeon_ib_get(rdev
, ring
->idx
, &ib
, NULL
, 256);
572 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
576 ib
.ptr
[0] = SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0);
577 ib
.ptr
[1] = rdev
->vram_scratch
.gpu_addr
& 0xfffffffc;
578 ib
.ptr
[2] = upper_32_bits(rdev
->vram_scratch
.gpu_addr
) & 0xffffffff;
580 ib
.ptr
[4] = 0xDEADBEEF;
583 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
585 radeon_ib_free(rdev
, &ib
);
586 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
589 r
= radeon_fence_wait(ib
.fence
, false);
591 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
594 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
596 if (tmp
== 0xDEADBEEF)
600 if (i
< rdev
->usec_timeout
) {
601 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib
.fence
->ring
, i
);
603 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp
);
606 radeon_ib_free(rdev
, &ib
);
611 * cik_sdma_is_lockup - Check if the DMA engine is locked up
613 * @rdev: radeon_device pointer
614 * @ring: radeon_ring structure holding ring information
616 * Check if the async DMA engine is locked up (CIK).
617 * Returns true if the engine appears to be locked up, false if not.
619 bool cik_sdma_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
621 u32 reset_mask
= cik_gpu_check_soft_reset(rdev
);
624 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
625 mask
= RADEON_RESET_DMA
;
627 mask
= RADEON_RESET_DMA1
;
629 if (!(reset_mask
& mask
)) {
630 radeon_ring_lockup_update(ring
);
633 /* force ring activities */
634 radeon_ring_force_activity(rdev
, ring
);
635 return radeon_ring_test_lockup(rdev
, ring
);
639 * cik_sdma_vm_set_page - update the page tables using sDMA
641 * @rdev: radeon_device pointer
642 * @ib: indirect buffer to fill with commands
643 * @pe: addr of the page entry
644 * @addr: dst addr to write into pe
645 * @count: number of page entries to update
646 * @incr: increase next addr by incr bytes
647 * @flags: access flags
649 * Update the page tables using sDMA (CIK).
651 void cik_sdma_vm_set_page(struct radeon_device
*rdev
,
652 struct radeon_ib
*ib
,
654 uint64_t addr
, unsigned count
,
655 uint32_t incr
, uint32_t flags
)
657 uint32_t r600_flags
= cayman_vm_page_flags(rdev
, flags
);
661 trace_radeon_vm_set_page(pe
, addr
, count
, incr
, r600_flags
);
663 if (flags
& RADEON_VM_PAGE_SYSTEM
) {
669 /* for non-physically contiguous pages (system) */
670 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_WRITE
, SDMA_WRITE_SUB_OPCODE_LINEAR
, 0);
671 ib
->ptr
[ib
->length_dw
++] = pe
;
672 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
673 ib
->ptr
[ib
->length_dw
++] = ndw
;
674 for (; ndw
> 0; ndw
-= 2, --count
, pe
+= 8) {
675 if (flags
& RADEON_VM_PAGE_SYSTEM
) {
676 value
= radeon_vm_map_gart(rdev
, addr
);
677 value
&= 0xFFFFFFFFFFFFF000ULL
;
678 } else if (flags
& RADEON_VM_PAGE_VALID
) {
685 ib
->ptr
[ib
->length_dw
++] = value
;
686 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
695 if (flags
& RADEON_VM_PAGE_VALID
)
699 /* for physically contiguous pages (vram) */
700 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE
, 0, 0);
701 ib
->ptr
[ib
->length_dw
++] = pe
; /* dst addr */
702 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
703 ib
->ptr
[ib
->length_dw
++] = r600_flags
; /* mask */
704 ib
->ptr
[ib
->length_dw
++] = 0;
705 ib
->ptr
[ib
->length_dw
++] = value
; /* value */
706 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
707 ib
->ptr
[ib
->length_dw
++] = incr
; /* increment size */
708 ib
->ptr
[ib
->length_dw
++] = 0;
709 ib
->ptr
[ib
->length_dw
++] = ndw
; /* number of entries */
715 while (ib
->length_dw
& 0x7)
716 ib
->ptr
[ib
->length_dw
++] = SDMA_PACKET(SDMA_OPCODE_NOP
, 0, 0);
720 * cik_dma_vm_flush - cik vm flush using sDMA
722 * @rdev: radeon_device pointer
724 * Update the page table base and flush the VM TLB
727 void cik_dma_vm_flush(struct radeon_device
*rdev
, int ridx
, struct radeon_vm
*vm
)
729 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
730 u32 extra_bits
= (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
731 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
737 if (ridx
== R600_RING_TYPE_DMA_INDEX
)
738 ref_and_mask
= SDMA0
;
740 ref_and_mask
= SDMA1
;
742 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
744 radeon_ring_write(ring
, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (vm
->id
<< 2)) >> 2);
746 radeon_ring_write(ring
, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ ((vm
->id
- 8) << 2)) >> 2);
748 radeon_ring_write(ring
, vm
->pd_gpu_addr
>> 12);
750 /* update SH_MEM_* regs */
751 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
752 radeon_ring_write(ring
, SRBM_GFX_CNTL
>> 2);
753 radeon_ring_write(ring
, VMID(vm
->id
));
755 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
756 radeon_ring_write(ring
, SH_MEM_BASES
>> 2);
757 radeon_ring_write(ring
, 0);
759 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
760 radeon_ring_write(ring
, SH_MEM_CONFIG
>> 2);
761 radeon_ring_write(ring
, 0);
763 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
764 radeon_ring_write(ring
, SH_MEM_APE1_BASE
>> 2);
765 radeon_ring_write(ring
, 1);
767 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
768 radeon_ring_write(ring
, SH_MEM_APE1_LIMIT
>> 2);
769 radeon_ring_write(ring
, 0);
771 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
772 radeon_ring_write(ring
, SRBM_GFX_CNTL
>> 2);
773 radeon_ring_write(ring
, VMID(0));
776 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM
, 0, extra_bits
));
777 radeon_ring_write(ring
, GPU_HDP_FLUSH_DONE
);
778 radeon_ring_write(ring
, GPU_HDP_FLUSH_REQ
);
779 radeon_ring_write(ring
, ref_and_mask
); /* REFERENCE */
780 radeon_ring_write(ring
, ref_and_mask
); /* MASK */
781 radeon_ring_write(ring
, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
784 radeon_ring_write(ring
, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE
, 0, 0xf000));
785 radeon_ring_write(ring
, VM_INVALIDATE_REQUEST
>> 2);
786 radeon_ring_write(ring
, 1 << vm
->id
);