drm/radeon/kms: consolidate GART code, fix segfault after GPU lockup V2
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_gart.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_drm.h"
30#include "radeon.h"
31#include "radeon_reg.h"
32
33/*
34 * Common GART table functions.
35 */
36int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
37{
38 void *ptr;
39
40 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
41 &rdev->gart.table_addr);
42 if (ptr == NULL) {
43 return -ENOMEM;
44 }
45#ifdef CONFIG_X86
46 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
47 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
48 set_memory_uc((unsigned long)ptr,
49 rdev->gart.table_size >> PAGE_SHIFT);
50 }
51#endif
c9a1be96
JG
52 rdev->gart.ptr = ptr;
53 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
771fe6b9
JG
54 return 0;
55}
56
57void radeon_gart_table_ram_free(struct radeon_device *rdev)
58{
c9a1be96 59 if (rdev->gart.ptr == NULL) {
771fe6b9
JG
60 return;
61 }
62#ifdef CONFIG_X86
63 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
64 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
c9a1be96 65 set_memory_wb((unsigned long)rdev->gart.ptr,
771fe6b9
JG
66 rdev->gart.table_size >> PAGE_SHIFT);
67 }
68#endif
69 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
c9a1be96 70 (void *)rdev->gart.ptr,
771fe6b9 71 rdev->gart.table_addr);
c9a1be96 72 rdev->gart.ptr = NULL;
771fe6b9
JG
73 rdev->gart.table_addr = 0;
74}
75
76int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
77{
771fe6b9
JG
78 int r;
79
c9a1be96 80 if (rdev->gart.robj == NULL) {
441921d5 81 r = radeon_bo_create(rdev, rdev->gart.table_size,
268b2510 82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
c9a1be96 83 &rdev->gart.robj);
771fe6b9
JG
84 if (r) {
85 return r;
86 }
87 }
4aac0473
JG
88 return 0;
89}
90
91int radeon_gart_table_vram_pin(struct radeon_device *rdev)
92{
93 uint64_t gpu_addr;
94 int r;
95
c9a1be96 96 r = radeon_bo_reserve(rdev->gart.robj, false);
4c788679 97 if (unlikely(r != 0))
771fe6b9 98 return r;
c9a1be96 99 r = radeon_bo_pin(rdev->gart.robj,
4c788679 100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
771fe6b9 101 if (r) {
c9a1be96 102 radeon_bo_unreserve(rdev->gart.robj);
771fe6b9
JG
103 return r;
104 }
c9a1be96 105 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
4c788679 106 if (r)
c9a1be96
JG
107 radeon_bo_unpin(rdev->gart.robj);
108 radeon_bo_unreserve(rdev->gart.robj);
771fe6b9 109 rdev->gart.table_addr = gpu_addr;
4c788679 110 return r;
771fe6b9
JG
111}
112
c9a1be96 113void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
771fe6b9 114{
4c788679
JG
115 int r;
116
c9a1be96 117 if (rdev->gart.robj == NULL) {
771fe6b9
JG
118 return;
119 }
c9a1be96 120 r = radeon_bo_reserve(rdev->gart.robj, false);
4c788679 121 if (likely(r == 0)) {
c9a1be96
JG
122 radeon_bo_kunmap(rdev->gart.robj);
123 radeon_bo_unpin(rdev->gart.robj);
124 radeon_bo_unreserve(rdev->gart.robj);
125 rdev->gart.ptr = NULL;
126 }
127}
128
129void radeon_gart_table_vram_free(struct radeon_device *rdev)
130{
131 if (rdev->gart.robj == NULL) {
132 return;
4c788679 133 }
c9a1be96
JG
134 radeon_gart_table_vram_unpin(rdev);
135 radeon_bo_unref(&rdev->gart.robj);
771fe6b9
JG
136}
137
138
139
140
141/*
142 * Common gart functions.
143 */
144void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
145 int pages)
146{
147 unsigned t;
148 unsigned p;
149 int i, j;
82568565 150 u64 page_base;
771fe6b9
JG
151
152 if (!rdev->gart.ready) {
fcf4de5a 153 WARN(1, "trying to unbind memory from uninitialized GART !\n");
771fe6b9
JG
154 return;
155 }
a77f1718
MT
156 t = offset / RADEON_GPU_PAGE_SIZE;
157 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
771fe6b9
JG
158 for (i = 0; i < pages; i++, p++) {
159 if (rdev->gart.pages[p]) {
c39d3516
KRW
160 if (!rdev->gart.ttm_alloced[p])
161 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
c9a1be96 162 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
771fe6b9 163 rdev->gart.pages[p] = NULL;
82568565
DA
164 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
165 page_base = rdev->gart.pages_addr[p];
a77f1718 166 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
c9a1be96
JG
167 if (rdev->gart.ptr) {
168 radeon_gart_set_page(rdev, t, page_base);
169 }
82568565 170 page_base += RADEON_GPU_PAGE_SIZE;
771fe6b9
JG
171 }
172 }
173 }
174 mb();
175 radeon_gart_tlb_flush(rdev);
176}
177
178int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
c39d3516 179 int pages, struct page **pagelist, dma_addr_t *dma_addr)
771fe6b9
JG
180{
181 unsigned t;
182 unsigned p;
183 uint64_t page_base;
184 int i, j;
185
186 if (!rdev->gart.ready) {
fcf4de5a 187 WARN(1, "trying to bind memory to uninitialized GART !\n");
771fe6b9
JG
188 return -EINVAL;
189 }
a77f1718
MT
190 t = offset / RADEON_GPU_PAGE_SIZE;
191 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
771fe6b9
JG
192
193 for (i = 0; i < pages; i++, p++) {
03a80665
DA
194 /* we reverted the patch using dma_addr in TTM for now but this
195 * code stops building on alpha so just comment it out for now */
196 if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
c39d3516
KRW
197 rdev->gart.ttm_alloced[p] = true;
198 rdev->gart.pages_addr[p] = dma_addr[i];
199 } else {
200 /* we need to support large memory configurations */
201 /* assume that unbind have already been call on the range */
202 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
771fe6b9
JG
203 0, PAGE_SIZE,
204 PCI_DMA_BIDIRECTIONAL);
c39d3516
KRW
205 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
206 /* FIXME: failed to map page (return -ENOMEM?) */
207 radeon_gart_unbind(rdev, offset, pages);
208 return -ENOMEM;
209 }
771fe6b9
JG
210 }
211 rdev->gart.pages[p] = pagelist[i];
c9a1be96
JG
212 if (rdev->gart.ptr) {
213 page_base = rdev->gart.pages_addr[p];
214 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
215 radeon_gart_set_page(rdev, t, page_base);
216 page_base += RADEON_GPU_PAGE_SIZE;
217 }
771fe6b9
JG
218 }
219 }
220 mb();
221 radeon_gart_tlb_flush(rdev);
222 return 0;
223}
224
82568565
DA
225void radeon_gart_restore(struct radeon_device *rdev)
226{
227 int i, j, t;
228 u64 page_base;
229
c9a1be96
JG
230 if (!rdev->gart.ptr) {
231 return;
232 }
82568565
DA
233 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
234 page_base = rdev->gart.pages_addr[i];
235 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
236 radeon_gart_set_page(rdev, t, page_base);
237 page_base += RADEON_GPU_PAGE_SIZE;
238 }
239 }
240 mb();
241 radeon_gart_tlb_flush(rdev);
242}
243
771fe6b9
JG
244int radeon_gart_init(struct radeon_device *rdev)
245{
82568565
DA
246 int r, i;
247
771fe6b9
JG
248 if (rdev->gart.pages) {
249 return 0;
250 }
a77f1718
MT
251 /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
252 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
771fe6b9
JG
253 DRM_ERROR("Page size is smaller than GPU page size!\n");
254 return -EINVAL;
255 }
82568565
DA
256 r = radeon_dummy_page_init(rdev);
257 if (r)
258 return r;
771fe6b9
JG
259 /* Compute table size */
260 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
a77f1718 261 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
771fe6b9
JG
262 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
263 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
264 /* Allocate pages table */
265 rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
266 GFP_KERNEL);
267 if (rdev->gart.pages == NULL) {
268 radeon_gart_fini(rdev);
269 return -ENOMEM;
270 }
271 rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
272 rdev->gart.num_cpu_pages, GFP_KERNEL);
273 if (rdev->gart.pages_addr == NULL) {
274 radeon_gart_fini(rdev);
275 return -ENOMEM;
276 }
c39d3516
KRW
277 rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
278 rdev->gart.num_cpu_pages, GFP_KERNEL);
279 if (rdev->gart.ttm_alloced == NULL) {
280 radeon_gart_fini(rdev);
281 return -ENOMEM;
282 }
82568565
DA
283 /* set GART entry to point to the dummy page by default */
284 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
285 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
286 }
771fe6b9
JG
287 return 0;
288}
289
290void radeon_gart_fini(struct radeon_device *rdev)
291{
292 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
293 /* unbind pages */
294 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
295 }
296 rdev->gart.ready = false;
297 kfree(rdev->gart.pages);
298 kfree(rdev->gart.pages_addr);
c39d3516 299 kfree(rdev->gart.ttm_alloced);
771fe6b9
JG
300 rdev->gart.pages = NULL;
301 rdev->gart.pages_addr = NULL;
c39d3516 302 rdev->gart.ttm_alloced = NULL;
92656d70
AD
303
304 radeon_dummy_page_fini(rdev);
771fe6b9 305}
This page took 0.184613 seconds and 5 git commands to generate.