2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 * Keith Whitwell <keith@tungstengraphics.com>
35 #include "drm_sarea.h"
36 #include "nouveau_drv.h"
39 * NV10-NV40 tiling helpers
43 nv10_mem_set_region_tiling(struct drm_device
*dev
, int i
, uint32_t addr
,
44 uint32_t size
, uint32_t pitch
)
46 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
47 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
48 struct nouveau_fb_engine
*pfb
= &dev_priv
->engine
.fb
;
49 struct nouveau_pgraph_engine
*pgraph
= &dev_priv
->engine
.graph
;
50 struct nouveau_tile_reg
*tile
= &dev_priv
->tile
.reg
[i
];
55 nouveau_fence_unref((void **)&tile
->fence
);
57 if (!pfifo
->cache_flush(dev
))
60 pfifo
->reassign(dev
, false);
61 pfifo
->cache_flush(dev
);
62 pfifo
->cache_pull(dev
, false);
64 nouveau_wait_for_idle(dev
);
66 pgraph
->set_region_tiling(dev
, i
, addr
, size
, pitch
);
67 pfb
->set_region_tiling(dev
, i
, addr
, size
, pitch
);
69 pfifo
->cache_pull(dev
, true);
70 pfifo
->reassign(dev
, true);
73 struct nouveau_tile_reg
*
74 nv10_mem_set_tiling(struct drm_device
*dev
, uint32_t addr
, uint32_t size
,
77 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
78 struct nouveau_fb_engine
*pfb
= &dev_priv
->engine
.fb
;
79 struct nouveau_tile_reg
*tile
= dev_priv
->tile
.reg
, *found
= NULL
;
82 spin_lock(&dev_priv
->tile
.lock
);
84 for (i
= 0; i
< pfb
->num_tiles
; i
++) {
86 /* Tile region in use. */
90 !nouveau_fence_signalled(tile
[i
].fence
, NULL
))
91 /* Pending tile region. */
94 if (max(tile
[i
].addr
, addr
) <
95 min(tile
[i
].addr
+ tile
[i
].size
, addr
+ size
))
96 /* Kill an intersecting tile region. */
97 nv10_mem_set_region_tiling(dev
, i
, 0, 0, 0);
99 if (pitch
&& !found
) {
100 /* Free tile region. */
101 nv10_mem_set_region_tiling(dev
, i
, addr
, size
, pitch
);
106 spin_unlock(&dev_priv
->tile
.lock
);
112 nv10_mem_expire_tiling(struct drm_device
*dev
, struct nouveau_tile_reg
*tile
,
113 struct nouveau_fence
*fence
)
116 /* Mark it as pending. */
118 nouveau_fence_ref(fence
);
128 nv50_mem_vm_bind_linear(struct drm_device
*dev
, uint64_t virt
, uint32_t size
,
129 uint32_t flags
, uint64_t phys
)
131 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
132 struct nouveau_gpuobj
*pgt
;
136 virt
= ((virt
- dev_priv
->vm_vram_base
) >> 16) << 1;
137 size
= (size
>> 16) << 1;
139 phys
|= ((uint64_t)flags
<< 32);
141 if (dev_priv
->vram_sys_base
) {
142 phys
+= dev_priv
->vram_sys_base
;
146 dev_priv
->engine
.instmem
.prepare_access(dev
, true);
148 unsigned offset_h
= upper_32_bits(phys
);
149 unsigned offset_l
= lower_32_bits(phys
);
152 for (i
= 7; i
>= 0; i
--) {
153 block
= 1 << (i
+ 1);
154 if (size
>= block
&& !(virt
& (block
- 1)))
157 offset_l
|= (i
<< 7);
163 pgt
= dev_priv
->vm_vram_pt
[virt
>> 14];
169 block
-= (end
- pte
);
173 nv_wo32(dev
, pgt
, pte
++, offset_l
);
174 nv_wo32(dev
, pgt
, pte
++, offset_h
);
178 dev_priv
->engine
.instmem
.finish_access(dev
);
180 nv_wr32(dev
, 0x100c80, 0x00050001);
181 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
182 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
183 NV_ERROR(dev
, "0x100c80 = 0x%08x\n", nv_rd32(dev
, 0x100c80));
187 nv_wr32(dev
, 0x100c80, 0x00000001);
188 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
189 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
190 NV_ERROR(dev
, "0x100c80 = 0x%08x\n", nv_rd32(dev
, 0x100c80));
194 nv_wr32(dev
, 0x100c80, 0x00040001);
195 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
196 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
197 NV_ERROR(dev
, "0x100c80 = 0x%08x\n", nv_rd32(dev
, 0x100c80));
201 nv_wr32(dev
, 0x100c80, 0x00060001);
202 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
203 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
204 NV_ERROR(dev
, "0x100c80 = 0x%08x\n", nv_rd32(dev
, 0x100c80));
212 nv50_mem_vm_unbind(struct drm_device
*dev
, uint64_t virt
, uint32_t size
)
214 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
215 struct nouveau_gpuobj
*pgt
;
216 unsigned pages
, pte
, end
;
218 virt
-= dev_priv
->vm_vram_base
;
219 pages
= (size
>> 16) << 1;
221 dev_priv
->engine
.instmem
.prepare_access(dev
, true);
223 pgt
= dev_priv
->vm_vram_pt
[virt
>> 29];
224 pte
= (virt
& 0x1ffe0000ULL
) >> 15;
229 pages
-= (end
- pte
);
230 virt
+= (end
- pte
) << 15;
233 nv_wo32(dev
, pgt
, pte
++, 0);
235 dev_priv
->engine
.instmem
.finish_access(dev
);
237 nv_wr32(dev
, 0x100c80, 0x00050001);
238 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
239 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
240 NV_ERROR(dev
, "0x100c80 = 0x%08x\n", nv_rd32(dev
, 0x100c80));
244 nv_wr32(dev
, 0x100c80, 0x00000001);
245 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
246 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
247 NV_ERROR(dev
, "0x100c80 = 0x%08x\n", nv_rd32(dev
, 0x100c80));
251 nv_wr32(dev
, 0x100c80, 0x00040001);
252 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
253 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
254 NV_ERROR(dev
, "0x100c80 = 0x%08x\n", nv_rd32(dev
, 0x100c80));
258 nv_wr32(dev
, 0x100c80, 0x00060001);
259 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
260 NV_ERROR(dev
, "timeout: (0x100c80 & 1) == 0 (2)\n");
261 NV_ERROR(dev
, "0x100c80 = 0x%08x\n", nv_rd32(dev
, 0x100c80));
269 nouveau_mem_close(struct drm_device
*dev
)
271 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
273 nouveau_bo_unpin(dev_priv
->vga_ram
);
274 nouveau_bo_ref(NULL
, &dev_priv
->vga_ram
);
276 ttm_bo_device_release(&dev_priv
->ttm
.bdev
);
278 nouveau_ttm_global_release(dev_priv
);
280 if (drm_core_has_AGP(dev
) && dev
->agp
&&
281 drm_core_check_feature(dev
, DRIVER_MODESET
)) {
282 struct drm_agp_mem
*entry
, *tempe
;
284 /* Remove AGP resources, but leave dev->agp
285 intact until drv_cleanup is called. */
286 list_for_each_entry_safe(entry
, tempe
, &dev
->agp
->memory
, head
) {
288 drm_unbind_agp(entry
->memory
);
289 drm_free_agp(entry
->memory
, entry
->pages
);
292 INIT_LIST_HEAD(&dev
->agp
->memory
);
294 if (dev
->agp
->acquired
)
295 drm_agp_release(dev
);
297 dev
->agp
->acquired
= 0;
298 dev
->agp
->enabled
= 0;
301 if (dev_priv
->fb_mtrr
) {
302 drm_mtrr_del(dev_priv
->fb_mtrr
,
303 pci_resource_start(dev
->pdev
, 1),
304 pci_resource_len(dev
->pdev
, 1), DRM_MTRR_WC
);
305 dev_priv
->fb_mtrr
= 0;
310 nouveau_mem_detect_nv04(struct drm_device
*dev
)
312 uint32_t boot0
= nv_rd32(dev
, NV03_BOOT_0
);
314 if (boot0
& 0x00000100)
315 return (((boot0
>> 12) & 0xf) * 2 + 2) * 1024 * 1024;
317 switch (boot0
& NV03_BOOT_0_RAM_AMOUNT
) {
318 case NV04_BOOT_0_RAM_AMOUNT_32MB
:
319 return 32 * 1024 * 1024;
320 case NV04_BOOT_0_RAM_AMOUNT_16MB
:
321 return 16 * 1024 * 1024;
322 case NV04_BOOT_0_RAM_AMOUNT_8MB
:
323 return 8 * 1024 * 1024;
324 case NV04_BOOT_0_RAM_AMOUNT_4MB
:
325 return 4 * 1024 * 1024;
332 nouveau_mem_detect_nforce(struct drm_device
*dev
)
334 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
335 struct pci_dev
*bridge
;
338 bridge
= pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
340 NV_ERROR(dev
, "no bridge device\n");
344 if (dev_priv
->flags
& NV_NFORCE
) {
345 pci_read_config_dword(bridge
, 0x7C, &mem
);
346 return (uint64_t)(((mem
>> 6) & 31) + 1)*1024*1024;
348 if (dev_priv
->flags
& NV_NFORCE2
) {
349 pci_read_config_dword(bridge
, 0x84, &mem
);
350 return (uint64_t)(((mem
>> 4) & 127) + 1)*1024*1024;
353 NV_ERROR(dev
, "impossible!\n");
357 /* returns the amount of FB ram in bytes */
359 nouveau_mem_detect(struct drm_device
*dev
)
361 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
363 if (dev_priv
->card_type
== NV_04
) {
364 dev_priv
->vram_size
= nouveau_mem_detect_nv04(dev
);
366 if (dev_priv
->flags
& (NV_NFORCE
| NV_NFORCE2
)) {
367 dev_priv
->vram_size
= nouveau_mem_detect_nforce(dev
);
369 dev_priv
->vram_size
= nv_rd32(dev
, NV04_FIFO_DATA
);
370 dev_priv
->vram_size
&= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK
;
371 if (dev_priv
->chipset
== 0xaa || dev_priv
->chipset
== 0xac)
372 dev_priv
->vram_sys_base
= nv_rd32(dev
, 0x100e10);
373 dev_priv
->vram_sys_base
<<= 12;
376 NV_INFO(dev
, "Detected %dMiB VRAM\n", (int)(dev_priv
->vram_size
>> 20));
377 if (dev_priv
->vram_sys_base
) {
378 NV_INFO(dev
, "Stolen system memory at: 0x%010llx\n",
379 dev_priv
->vram_sys_base
);
382 if (dev_priv
->vram_size
)
388 static void nouveau_mem_reset_agp(struct drm_device
*dev
)
390 uint32_t saved_pci_nv_1
, saved_pci_nv_19
, pmc_enable
;
392 saved_pci_nv_1
= nv_rd32(dev
, NV04_PBUS_PCI_NV_1
);
393 saved_pci_nv_19
= nv_rd32(dev
, NV04_PBUS_PCI_NV_19
);
395 /* clear busmaster bit */
396 nv_wr32(dev
, NV04_PBUS_PCI_NV_1
, saved_pci_nv_1
& ~0x4);
397 /* clear SBA and AGP bits */
398 nv_wr32(dev
, NV04_PBUS_PCI_NV_19
, saved_pci_nv_19
& 0xfffff0ff);
400 /* power cycle pgraph, if enabled */
401 pmc_enable
= nv_rd32(dev
, NV03_PMC_ENABLE
);
402 if (pmc_enable
& NV_PMC_ENABLE_PGRAPH
) {
403 nv_wr32(dev
, NV03_PMC_ENABLE
,
404 pmc_enable
& ~NV_PMC_ENABLE_PGRAPH
);
405 nv_wr32(dev
, NV03_PMC_ENABLE
, nv_rd32(dev
, NV03_PMC_ENABLE
) |
406 NV_PMC_ENABLE_PGRAPH
);
409 /* and restore (gives effect of resetting AGP) */
410 nv_wr32(dev
, NV04_PBUS_PCI_NV_19
, saved_pci_nv_19
);
411 nv_wr32(dev
, NV04_PBUS_PCI_NV_1
, saved_pci_nv_1
);
416 nouveau_mem_init_agp(struct drm_device
*dev
)
419 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
420 struct drm_agp_info info
;
421 struct drm_agp_mode mode
;
427 nouveau_mem_reset_agp(dev
);
429 if (!dev
->agp
->acquired
) {
430 ret
= drm_agp_acquire(dev
);
432 NV_ERROR(dev
, "Unable to acquire AGP: %d\n", ret
);
437 ret
= drm_agp_info(dev
, &info
);
439 NV_ERROR(dev
, "Unable to get AGP info: %d\n", ret
);
443 /* see agp.h for the AGPSTAT_* modes available */
444 mode
.mode
= info
.mode
;
445 ret
= drm_agp_enable(dev
, mode
);
447 NV_ERROR(dev
, "Unable to enable AGP: %d\n", ret
);
451 dev_priv
->gart_info
.type
= NOUVEAU_GART_AGP
;
452 dev_priv
->gart_info
.aper_base
= info
.aperture_base
;
453 dev_priv
->gart_info
.aper_size
= info
.aperture_size
;
459 nouveau_mem_init(struct drm_device
*dev
)
461 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
462 struct ttm_bo_device
*bdev
= &dev_priv
->ttm
.bdev
;
463 int ret
, dma_bits
= 32;
465 dev_priv
->fb_phys
= pci_resource_start(dev
->pdev
, 1);
466 dev_priv
->gart_info
.type
= NOUVEAU_GART_NONE
;
468 if (dev_priv
->card_type
>= NV_50
&&
469 pci_dma_supported(dev
->pdev
, DMA_BIT_MASK(40)))
472 ret
= pci_set_dma_mask(dev
->pdev
, DMA_BIT_MASK(dma_bits
));
474 NV_ERROR(dev
, "Error setting DMA mask: %d\n", ret
);
478 ret
= nouveau_ttm_global_init(dev_priv
);
482 ret
= ttm_bo_device_init(&dev_priv
->ttm
.bdev
,
483 dev_priv
->ttm
.bo_global_ref
.ref
.object
,
484 &nouveau_bo_driver
, DRM_FILE_PAGE_OFFSET
,
485 dma_bits
<= 32 ? true : false);
487 NV_ERROR(dev
, "Error initialising bo driver: %d\n", ret
);
491 INIT_LIST_HEAD(&dev_priv
->ttm
.bo_list
);
492 spin_lock_init(&dev_priv
->ttm
.bo_list_lock
);
493 spin_lock_init(&dev_priv
->tile
.lock
);
495 dev_priv
->fb_available_size
= dev_priv
->vram_size
;
496 dev_priv
->fb_mappable_pages
= dev_priv
->fb_available_size
;
497 if (dev_priv
->fb_mappable_pages
> pci_resource_len(dev
->pdev
, 1))
498 dev_priv
->fb_mappable_pages
=
499 pci_resource_len(dev
->pdev
, 1);
500 dev_priv
->fb_mappable_pages
>>= PAGE_SHIFT
;
502 /* remove reserved space at end of vram from available amount */
503 dev_priv
->fb_available_size
-= dev_priv
->ramin_rsvd_vram
;
504 dev_priv
->fb_aper_free
= dev_priv
->fb_available_size
;
507 ret
= ttm_bo_init_mm(bdev
, TTM_PL_VRAM
,
508 dev_priv
->fb_available_size
>> PAGE_SHIFT
);
510 NV_ERROR(dev
, "Failed VRAM mm init: %d\n", ret
);
514 ret
= nouveau_bo_new(dev
, NULL
, 256*1024, 0, TTM_PL_FLAG_VRAM
,
515 0, 0, true, true, &dev_priv
->vga_ram
);
517 ret
= nouveau_bo_pin(dev_priv
->vga_ram
, TTM_PL_FLAG_VRAM
);
519 NV_WARN(dev
, "failed to reserve VGA memory\n");
520 nouveau_bo_ref(NULL
, &dev_priv
->vga_ram
);
524 #if !defined(__powerpc__) && !defined(__ia64__)
525 if (drm_device_is_agp(dev
) && dev
->agp
) {
526 ret
= nouveau_mem_init_agp(dev
);
528 NV_ERROR(dev
, "Error initialising AGP: %d\n", ret
);
532 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_NONE
) {
533 ret
= nouveau_sgdma_init(dev
);
535 NV_ERROR(dev
, "Error initialising PCI(E): %d\n", ret
);
540 NV_INFO(dev
, "%d MiB GART (aperture)\n",
541 (int)(dev_priv
->gart_info
.aper_size
>> 20));
542 dev_priv
->gart_info
.aper_free
= dev_priv
->gart_info
.aper_size
;
544 ret
= ttm_bo_init_mm(bdev
, TTM_PL_TT
,
545 dev_priv
->gart_info
.aper_size
>> PAGE_SHIFT
);
547 NV_ERROR(dev
, "Failed TT mm init: %d\n", ret
);
551 dev_priv
->fb_mtrr
= drm_mtrr_add(pci_resource_start(dev
->pdev
, 1),
552 pci_resource_len(dev
->pdev
, 1),