2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 * Keith Whitwell <keith@tungstengraphics.com>
35 #include "drm_sarea.h"
37 #include "nouveau_drv.h"
38 #include "nouveau_pm.h"
39 #include "nouveau_mm.h"
40 #include "nouveau_vm.h"
43 * NV10-NV40 tiling helpers
47 nv10_mem_update_tile_region(struct drm_device
*dev
,
48 struct nouveau_tile_reg
*tile
, uint32_t addr
,
49 uint32_t size
, uint32_t pitch
, uint32_t flags
)
51 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
52 struct nouveau_fifo_engine
*pfifo
= &dev_priv
->engine
.fifo
;
53 struct nouveau_fb_engine
*pfb
= &dev_priv
->engine
.fb
;
54 int i
= tile
- dev_priv
->tile
.reg
, j
;
57 nouveau_fence_unref(&tile
->fence
);
60 pfb
->free_tile_region(dev
, i
);
63 pfb
->init_tile_region(dev
, i
, addr
, size
, pitch
, flags
);
65 spin_lock_irqsave(&dev_priv
->context_switch_lock
, save
);
66 pfifo
->reassign(dev
, false);
67 pfifo
->cache_pull(dev
, false);
69 nouveau_wait_for_idle(dev
);
71 pfb
->set_tile_region(dev
, i
);
72 for (j
= 0; j
< NVOBJ_ENGINE_NR
; j
++) {
73 if (dev_priv
->eng
[j
] && dev_priv
->eng
[j
]->set_tile_region
)
74 dev_priv
->eng
[j
]->set_tile_region(dev
, i
);
77 pfifo
->cache_pull(dev
, true);
78 pfifo
->reassign(dev
, true);
79 spin_unlock_irqrestore(&dev_priv
->context_switch_lock
, save
);
82 static struct nouveau_tile_reg
*
83 nv10_mem_get_tile_region(struct drm_device
*dev
, int i
)
85 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
86 struct nouveau_tile_reg
*tile
= &dev_priv
->tile
.reg
[i
];
88 spin_lock(&dev_priv
->tile
.lock
);
91 (!tile
->fence
|| nouveau_fence_signalled(tile
->fence
)))
96 spin_unlock(&dev_priv
->tile
.lock
);
101 nv10_mem_put_tile_region(struct drm_device
*dev
, struct nouveau_tile_reg
*tile
,
102 struct nouveau_fence
*fence
)
104 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
107 spin_lock(&dev_priv
->tile
.lock
);
109 /* Mark it as pending. */
111 nouveau_fence_ref(fence
);
115 spin_unlock(&dev_priv
->tile
.lock
);
119 struct nouveau_tile_reg
*
120 nv10_mem_set_tiling(struct drm_device
*dev
, uint32_t addr
, uint32_t size
,
121 uint32_t pitch
, uint32_t flags
)
123 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
124 struct nouveau_fb_engine
*pfb
= &dev_priv
->engine
.fb
;
125 struct nouveau_tile_reg
*tile
, *found
= NULL
;
128 for (i
= 0; i
< pfb
->num_tiles
; i
++) {
129 tile
= nv10_mem_get_tile_region(dev
, i
);
131 if (pitch
&& !found
) {
135 } else if (tile
&& tile
->pitch
) {
136 /* Kill an unused tile region. */
137 nv10_mem_update_tile_region(dev
, tile
, 0, 0, 0, 0);
140 nv10_mem_put_tile_region(dev
, tile
, NULL
);
144 nv10_mem_update_tile_region(dev
, found
, addr
, size
,
153 nouveau_mem_vram_fini(struct drm_device
*dev
)
155 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
157 ttm_bo_device_release(&dev_priv
->ttm
.bdev
);
159 nouveau_ttm_global_release(dev_priv
);
161 if (dev_priv
->fb_mtrr
>= 0) {
162 drm_mtrr_del(dev_priv
->fb_mtrr
,
163 pci_resource_start(dev
->pdev
, 1),
164 pci_resource_len(dev
->pdev
, 1), DRM_MTRR_WC
);
165 dev_priv
->fb_mtrr
= -1;
170 nouveau_mem_gart_fini(struct drm_device
*dev
)
172 nouveau_sgdma_takedown(dev
);
174 if (drm_core_has_AGP(dev
) && dev
->agp
) {
175 struct drm_agp_mem
*entry
, *tempe
;
177 /* Remove AGP resources, but leave dev->agp
178 intact until drv_cleanup is called. */
179 list_for_each_entry_safe(entry
, tempe
, &dev
->agp
->memory
, head
) {
181 drm_unbind_agp(entry
->memory
);
182 drm_free_agp(entry
->memory
, entry
->pages
);
185 INIT_LIST_HEAD(&dev
->agp
->memory
);
187 if (dev
->agp
->acquired
)
188 drm_agp_release(dev
);
190 dev
->agp
->acquired
= 0;
191 dev
->agp
->enabled
= 0;
196 nouveau_mem_flags_valid(struct drm_device
*dev
, u32 tile_flags
)
198 if (!(tile_flags
& NOUVEAU_GEM_TILE_LAYOUT_MASK
))
206 get_agp_mode(struct drm_device
*dev
, unsigned long mode
)
208 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
211 * FW seems to be broken on nv18, it makes the card lock up
214 if (dev_priv
->chipset
== 0x18)
215 mode
&= ~PCI_AGP_COMMAND_FW
;
218 * AGP mode set in the command line.
220 if (nouveau_agpmode
> 0) {
221 bool agpv3
= mode
& 0x8;
222 int rate
= agpv3
? nouveau_agpmode
/ 4 : nouveau_agpmode
;
224 mode
= (mode
& ~0x7) | (rate
& 0x7);
232 nouveau_mem_reset_agp(struct drm_device
*dev
)
235 uint32_t saved_pci_nv_1
, pmc_enable
;
238 /* First of all, disable fast writes, otherwise if it's
239 * already enabled in the AGP bridge and we disable the card's
240 * AGP controller we might be locking ourselves out of it. */
241 if ((nv_rd32(dev
, NV04_PBUS_PCI_NV_19
) |
242 dev
->agp
->mode
) & PCI_AGP_COMMAND_FW
) {
243 struct drm_agp_info info
;
244 struct drm_agp_mode mode
;
246 ret
= drm_agp_info(dev
, &info
);
250 mode
.mode
= get_agp_mode(dev
, info
.mode
) & ~PCI_AGP_COMMAND_FW
;
251 ret
= drm_agp_enable(dev
, mode
);
256 saved_pci_nv_1
= nv_rd32(dev
, NV04_PBUS_PCI_NV_1
);
258 /* clear busmaster bit */
259 nv_wr32(dev
, NV04_PBUS_PCI_NV_1
, saved_pci_nv_1
& ~0x4);
261 nv_wr32(dev
, NV04_PBUS_PCI_NV_19
, 0);
263 /* power cycle pgraph, if enabled */
264 pmc_enable
= nv_rd32(dev
, NV03_PMC_ENABLE
);
265 if (pmc_enable
& NV_PMC_ENABLE_PGRAPH
) {
266 nv_wr32(dev
, NV03_PMC_ENABLE
,
267 pmc_enable
& ~NV_PMC_ENABLE_PGRAPH
);
268 nv_wr32(dev
, NV03_PMC_ENABLE
, nv_rd32(dev
, NV03_PMC_ENABLE
) |
269 NV_PMC_ENABLE_PGRAPH
);
272 /* and restore (gives effect of resetting AGP) */
273 nv_wr32(dev
, NV04_PBUS_PCI_NV_1
, saved_pci_nv_1
);
280 nouveau_mem_init_agp(struct drm_device
*dev
)
283 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
284 struct drm_agp_info info
;
285 struct drm_agp_mode mode
;
288 if (!dev
->agp
->acquired
) {
289 ret
= drm_agp_acquire(dev
);
291 NV_ERROR(dev
, "Unable to acquire AGP: %d\n", ret
);
296 nouveau_mem_reset_agp(dev
);
298 ret
= drm_agp_info(dev
, &info
);
300 NV_ERROR(dev
, "Unable to get AGP info: %d\n", ret
);
304 /* see agp.h for the AGPSTAT_* modes available */
305 mode
.mode
= get_agp_mode(dev
, info
.mode
);
306 ret
= drm_agp_enable(dev
, mode
);
308 NV_ERROR(dev
, "Unable to enable AGP: %d\n", ret
);
312 dev_priv
->gart_info
.type
= NOUVEAU_GART_AGP
;
313 dev_priv
->gart_info
.aper_base
= info
.aperture_base
;
314 dev_priv
->gart_info
.aper_size
= info
.aperture_size
;
319 static const struct vram_types
{
322 } vram_type_map
[] = {
323 { NV_MEM_TYPE_STOLEN
, "stolen system memory" },
324 { NV_MEM_TYPE_SGRAM
, "SGRAM" },
325 { NV_MEM_TYPE_SDRAM
, "SDRAM" },
326 { NV_MEM_TYPE_DDR1
, "DDR1" },
327 { NV_MEM_TYPE_DDR2
, "DDR2" },
328 { NV_MEM_TYPE_DDR3
, "DDR3" },
329 { NV_MEM_TYPE_GDDR2
, "GDDR2" },
330 { NV_MEM_TYPE_GDDR3
, "GDDR3" },
331 { NV_MEM_TYPE_GDDR4
, "GDDR4" },
332 { NV_MEM_TYPE_GDDR5
, "GDDR5" },
333 { NV_MEM_TYPE_UNKNOWN
, "unknown type" }
337 nouveau_mem_vram_init(struct drm_device
*dev
)
339 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
340 struct ttm_bo_device
*bdev
= &dev_priv
->ttm
.bdev
;
341 const struct vram_types
*vram_type
;
345 if (dev_priv
->card_type
>= NV_50
) {
346 if (pci_dma_supported(dev
->pdev
, DMA_BIT_MASK(40)))
349 if (0 && pci_is_pcie(dev
->pdev
) &&
350 dev_priv
->chipset
> 0x40 &&
351 dev_priv
->chipset
!= 0x45) {
352 if (pci_dma_supported(dev
->pdev
, DMA_BIT_MASK(39)))
356 ret
= pci_set_dma_mask(dev
->pdev
, DMA_BIT_MASK(dma_bits
));
359 ret
= pci_set_consistent_dma_mask(dev
->pdev
, DMA_BIT_MASK(dma_bits
));
361 /* Reset to default value. */
362 pci_set_consistent_dma_mask(dev
->pdev
, DMA_BIT_MASK(32));
366 ret
= nouveau_ttm_global_init(dev_priv
);
370 ret
= ttm_bo_device_init(&dev_priv
->ttm
.bdev
,
371 dev_priv
->ttm
.bo_global_ref
.ref
.object
,
372 &nouveau_bo_driver
, DRM_FILE_PAGE_OFFSET
,
373 dma_bits
<= 32 ? true : false);
375 NV_ERROR(dev
, "Error initialising bo driver: %d\n", ret
);
379 vram_type
= vram_type_map
;
380 while (vram_type
->value
!= NV_MEM_TYPE_UNKNOWN
) {
381 if (nouveau_vram_type
) {
382 if (!strcasecmp(nouveau_vram_type
, vram_type
->name
))
384 dev_priv
->vram_type
= vram_type
->value
;
386 if (vram_type
->value
== dev_priv
->vram_type
)
392 NV_INFO(dev
, "Detected %dMiB VRAM (%s)\n",
393 (int)(dev_priv
->vram_size
>> 20), vram_type
->name
);
394 if (dev_priv
->vram_sys_base
) {
395 NV_INFO(dev
, "Stolen system memory at: 0x%010llx\n",
396 dev_priv
->vram_sys_base
);
399 dev_priv
->fb_available_size
= dev_priv
->vram_size
;
400 dev_priv
->fb_mappable_pages
= dev_priv
->fb_available_size
;
401 if (dev_priv
->fb_mappable_pages
> pci_resource_len(dev
->pdev
, 1))
402 dev_priv
->fb_mappable_pages
= pci_resource_len(dev
->pdev
, 1);
403 dev_priv
->fb_mappable_pages
>>= PAGE_SHIFT
;
405 dev_priv
->fb_available_size
-= dev_priv
->ramin_rsvd_vram
;
406 dev_priv
->fb_aper_free
= dev_priv
->fb_available_size
;
409 ret
= ttm_bo_init_mm(bdev
, TTM_PL_VRAM
,
410 dev_priv
->fb_available_size
>> PAGE_SHIFT
);
412 NV_ERROR(dev
, "Failed VRAM mm init: %d\n", ret
);
416 if (dev_priv
->card_type
< NV_50
) {
417 ret
= nouveau_bo_new(dev
, 256*1024, 0, TTM_PL_FLAG_VRAM
,
418 0, 0, &dev_priv
->vga_ram
);
420 ret
= nouveau_bo_pin(dev_priv
->vga_ram
,
424 NV_WARN(dev
, "failed to reserve VGA memory\n");
425 nouveau_bo_ref(NULL
, &dev_priv
->vga_ram
);
429 dev_priv
->fb_mtrr
= drm_mtrr_add(pci_resource_start(dev
->pdev
, 1),
430 pci_resource_len(dev
->pdev
, 1),
436 nouveau_mem_gart_init(struct drm_device
*dev
)
438 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
439 struct ttm_bo_device
*bdev
= &dev_priv
->ttm
.bdev
;
442 dev_priv
->gart_info
.type
= NOUVEAU_GART_NONE
;
444 #if !defined(__powerpc__) && !defined(__ia64__)
445 if (drm_pci_device_is_agp(dev
) && dev
->agp
&& nouveau_agpmode
) {
446 ret
= nouveau_mem_init_agp(dev
);
448 NV_ERROR(dev
, "Error initialising AGP: %d\n", ret
);
452 if (dev_priv
->gart_info
.type
== NOUVEAU_GART_NONE
) {
453 ret
= nouveau_sgdma_init(dev
);
455 NV_ERROR(dev
, "Error initialising PCI(E): %d\n", ret
);
460 NV_INFO(dev
, "%d MiB GART (aperture)\n",
461 (int)(dev_priv
->gart_info
.aper_size
>> 20));
462 dev_priv
->gart_info
.aper_free
= dev_priv
->gart_info
.aper_size
;
464 ret
= ttm_bo_init_mm(bdev
, TTM_PL_TT
,
465 dev_priv
->gart_info
.aper_size
>> PAGE_SHIFT
);
467 NV_ERROR(dev
, "Failed TT mm init: %d\n", ret
);
475 nv40_mem_timing_entry(struct drm_device
*dev
, struct nouveau_pm_tbl_header
*hdr
,
476 struct nouveau_pm_tbl_entry
*e
,
477 struct nouveau_pm_memtiming
*t
,
478 struct nouveau_pm_memtiming
*boot
)
481 t
->reg
[0] = (e
->tRP
<< 24 | e
->tRAS
<< 16 | e
->tRFC
<< 8 | e
->tRC
);
483 /* XXX: I don't trust the -1's and +1's... they must come
485 t
->reg
[1] = (e
->tWR
+ 2 + (t
->tCWL
- 1)) << 24 |
487 (e
->tWTR
+ 2 + (t
->tCWL
- 1)) << 8 |
488 (e
->tCL
+ 2 - (t
->tCWL
- 1));
490 t
->reg
[2] = 0x20200000 |
491 ((t
->tCWL
- 1) << 24 |
496 NV_DEBUG(dev
, "Entry %d: 220: %08x %08x %08x\n", t
->id
,
497 t
->reg
[0], t
->reg
[1], t
->reg
[2]);
501 nv50_mem_timing_entry(struct drm_device
*dev
, struct bit_entry
*P
,
502 struct nouveau_pm_tbl_header
*hdr
,
503 struct nouveau_pm_tbl_entry
*e
,
504 struct nouveau_pm_memtiming
*t
,
505 struct nouveau_pm_memtiming
*boot
)
507 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
508 uint8_t unk18
= 1, unk20
= 0, unk21
= 0, tmp7_3
;
510 switch (min(hdr
->entry_len
, (u8
) 22)) {
523 t
->reg
[0] = (e
->tRP
<< 24 | e
->tRAS
<< 16 | e
->tRFC
<< 8 | e
->tRC
);
525 t
->reg
[1] = (e
->tWR
+ 2 + (t
->tCWL
- 1)) << 24 |
526 max(unk18
, (u8
) 1) << 16 |
527 (e
->tWTR
+ 2 + (t
->tCWL
- 1)) << 8;
529 t
->reg
[2] = ((t
->tCWL
- 1) << 24 |
534 t
->reg
[4] = e
->tUNK_13
<< 8 | e
->tUNK_13
;
536 t
->reg
[5] = (e
->tRFC
<< 24 | max(e
->tRCDRD
, e
->tRCDWR
) << 16 | e
->tRP
);
538 t
->reg
[8] = boot
->reg
[8] & 0xffffff00;
540 if (P
->version
== 1) {
541 t
->reg
[1] |= (e
->tCL
+ 2 - (t
->tCWL
- 1));
543 t
->reg
[3] = (0x14 + e
->tCL
) << 24 |
548 t
->reg
[4] |= boot
->reg
[4] & 0xffff0000;
550 t
->reg
[6] = (0x33 - t
->tCWL
) << 16 |
552 (0x2e + e
->tCL
- t
->tCWL
);
554 t
->reg
[7] = 0x4000202 | (e
->tCL
- 1) << 16;
556 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
557 if (dev_priv
->vram_type
== NV_MEM_TYPE_DDR2
) {
558 t
->reg
[5] |= (e
->tCL
+ 3) << 8;
559 t
->reg
[6] |= (t
->tCWL
- 2) << 8;
560 t
->reg
[8] |= (e
->tCL
- 4);
562 t
->reg
[5] |= (e
->tCL
+ 2) << 8;
563 t
->reg
[6] |= t
->tCWL
<< 8;
564 t
->reg
[8] |= (e
->tCL
- 2);
567 t
->reg
[1] |= (5 + e
->tCL
- (t
->tCWL
));
569 /* XXX: 0xb? 0x30? */
570 t
->reg
[3] = (0x30 + e
->tCL
) << 24 |
571 (boot
->reg
[3] & 0x00ff0000)|
572 (0xb + e
->tCL
) << 8 |
575 t
->reg
[4] |= (unk20
<< 24 | unk21
<< 16);
578 t
->reg
[5] |= (t
->tCWL
+ 6) << 8;
580 t
->reg
[6] = (0x5a + e
->tCL
) << 16 |
581 (6 - e
->tCL
+ t
->tCWL
) << 8 |
582 (0x50 + e
->tCL
- t
->tCWL
);
584 tmp7_3
= (boot
->reg
[7] & 0xff000000) >> 24;
585 t
->reg
[7] = (tmp7_3
<< 24) |
586 ((tmp7_3
- 6 + e
->tCL
) << 16) |
590 NV_DEBUG(dev
, "Entry %d: 220: %08x %08x %08x %08x\n", t
->id
,
591 t
->reg
[0], t
->reg
[1], t
->reg
[2], t
->reg
[3]);
592 NV_DEBUG(dev
, " 230: %08x %08x %08x %08x\n",
593 t
->reg
[4], t
->reg
[5], t
->reg
[6], t
->reg
[7]);
594 NV_DEBUG(dev
, " 240: %08x\n", t
->reg
[8]);
598 nvc0_mem_timing_entry(struct drm_device
*dev
, struct nouveau_pm_tbl_header
*hdr
,
599 struct nouveau_pm_tbl_entry
*e
,
600 struct nouveau_pm_memtiming
*t
,
601 struct nouveau_pm_memtiming
*boot
)
606 t
->reg
[0] = (e
->tRP
<< 24 | (e
->tRAS
& 0x7f) << 17 |
607 e
->tRFC
<< 8 | e
->tRC
);
609 t
->reg
[1] = (boot
->reg
[1] & 0xff000000) |
610 (e
->tRCDWR
& 0x0f) << 20 |
611 (e
->tRCDRD
& 0x0f) << 14 |
615 t
->reg
[2] = (boot
->reg
[2] & 0xff0000ff) |
616 e
->tWR
<< 16 | e
->tWTR
<< 8;
618 t
->reg
[3] = (e
->tUNK_20
& 0xf) << 9 |
619 (e
->tUNK_21
& 0xf) << 5 |
622 t
->reg
[4] = (boot
->reg
[4] & 0xfff00fff) |
623 (e
->tRRD
&0x1f) << 15;
625 NV_DEBUG(dev
, "Entry %d: 290: %08x %08x %08x %08x\n", t
->id
,
626 t
->reg
[0], t
->reg
[1], t
->reg
[2], t
->reg
[3]);
627 NV_DEBUG(dev
, " 2a0: %08x\n", t
->reg
[4]);
631 * MR generation methods
635 nouveau_mem_ddr2_mr(struct drm_device
*dev
, struct nouveau_pm_tbl_header
*hdr
,
636 struct nouveau_pm_tbl_entry
*e
,
637 struct nouveau_pm_memtiming
*t
,
638 struct nouveau_pm_memtiming
*boot
)
640 t
->drive_strength
= 0;
641 if (hdr
->entry_len
< 15) {
644 t
->odt
= e
->RAM_FT1
& 0x07;
647 if (e
->tCL
>= NV_MEM_CL_DDR2_MAX
) {
648 NV_WARN(dev
, "(%u) Invalid tCL: %u", t
->id
, e
->tCL
);
652 if (e
->tWR
>= NV_MEM_WR_DDR2_MAX
) {
653 NV_WARN(dev
, "(%u) Invalid tWR: %u", t
->id
, e
->tWR
);
658 NV_WARN(dev
, "(%u) Invalid odt value, assuming disabled: %x",
663 t
->mr
[0] = (boot
->mr
[0] & 0x100f) |
666 t
->mr
[1] = (boot
->mr
[1] & 0x101fbb) |
667 (t
->odt
& 0x1) << 2 |
670 NV_DEBUG(dev
, "(%u) MR: %08x", t
->id
, t
->mr
[0]);
674 uint8_t nv_mem_wr_lut_ddr3
[NV_MEM_WR_DDR3_MAX
] = {
675 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
678 nouveau_mem_ddr3_mr(struct drm_device
*dev
, struct nouveau_pm_tbl_header
*hdr
,
679 struct nouveau_pm_tbl_entry
*e
,
680 struct nouveau_pm_memtiming
*t
,
681 struct nouveau_pm_memtiming
*boot
)
685 t
->drive_strength
= 0;
686 if (hdr
->entry_len
< 15) {
689 t
->odt
= e
->RAM_FT1
& 0x07;
692 if (e
->tCL
>= NV_MEM_CL_DDR3_MAX
|| e
->tCL
< 4) {
693 NV_WARN(dev
, "(%u) Invalid tCL: %u", t
->id
, e
->tCL
);
697 if (e
->tWR
>= NV_MEM_WR_DDR3_MAX
|| e
->tWR
< 4) {
698 NV_WARN(dev
, "(%u) Invalid tWR: %u", t
->id
, e
->tWR
);
703 NV_WARN(dev
, "(%u) Invalid tCWL: %u", t
->id
, e
->tCWL
);
707 t
->mr
[0] = (boot
->mr
[0] & 0x180b) |
711 (nv_mem_wr_lut_ddr3
[e
->tWR
]) << 9;
712 t
->mr
[1] = (boot
->mr
[1] & 0x101dbb) |
713 (t
->odt
& 0x1) << 2 |
714 (t
->odt
& 0x2) << 5 |
716 t
->mr
[2] = (boot
->mr
[2] & 0x20ffb7) | (e
->tCWL
- 5) << 3;
718 NV_DEBUG(dev
, "(%u) MR: %08x %08x", t
->id
, t
->mr
[0], t
->mr
[2]);
722 uint8_t nv_mem_cl_lut_gddr3
[NV_MEM_CL_GDDR3_MAX
] = {
723 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
724 uint8_t nv_mem_wr_lut_gddr3
[NV_MEM_WR_GDDR3_MAX
] = {
725 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
728 nouveau_mem_gddr3_mr(struct drm_device
*dev
, struct nouveau_pm_tbl_header
*hdr
,
729 struct nouveau_pm_tbl_entry
*e
,
730 struct nouveau_pm_memtiming
*t
,
731 struct nouveau_pm_memtiming
*boot
)
733 if (hdr
->entry_len
< 15) {
734 t
->drive_strength
= boot
->drive_strength
;
737 t
->drive_strength
= (e
->RAM_FT1
& 0x30) >> 4;
738 t
->odt
= e
->RAM_FT1
& 0x07;
741 if (e
->tCL
>= NV_MEM_CL_GDDR3_MAX
) {
742 NV_WARN(dev
, "(%u) Invalid tCL: %u", t
->id
, e
->tCL
);
746 if (e
->tWR
>= NV_MEM_WR_GDDR3_MAX
) {
747 NV_WARN(dev
, "(%u) Invalid tWR: %u", t
->id
, e
->tWR
);
752 NV_WARN(dev
, "(%u) Invalid odt value, assuming autocal: %x",
757 t
->mr
[0] = (boot
->mr
[0] & 0xe0b) |
759 ((nv_mem_cl_lut_gddr3
[e
->tCL
] & 0x7) << 4) |
760 ((nv_mem_cl_lut_gddr3
[e
->tCL
] & 0x8) >> 2);
761 t
->mr
[1] = (boot
->mr
[1] & 0x100f40) | t
->drive_strength
|
763 (nv_mem_wr_lut_gddr3
[e
->tWR
] & 0xf) << 4;
765 NV_DEBUG(dev
, "(%u) MR: %08x %08x", t
->id
, t
->mr
[0], t
->mr
[1]);
770 nouveau_mem_gddr5_mr(struct drm_device
*dev
, struct nouveau_pm_tbl_header
*hdr
,
771 struct nouveau_pm_tbl_entry
*e
,
772 struct nouveau_pm_memtiming
*t
,
773 struct nouveau_pm_memtiming
*boot
)
775 if (hdr
->entry_len
< 15) {
776 t
->drive_strength
= boot
->drive_strength
;
779 t
->drive_strength
= (e
->RAM_FT1
& 0x30) >> 4;
780 t
->odt
= e
->RAM_FT1
& 0x03;
783 if (e
->tCL
>= NV_MEM_CL_GDDR5_MAX
) {
784 NV_WARN(dev
, "(%u) Invalid tCL: %u", t
->id
, e
->tCL
);
788 if (e
->tWR
>= NV_MEM_WR_GDDR5_MAX
) {
789 NV_WARN(dev
, "(%u) Invalid tWR: %u", t
->id
, e
->tWR
);
794 NV_WARN(dev
, "(%u) Invalid odt value, assuming autocal: %x",
799 t
->mr
[0] = (boot
->mr
[0] & 0x007) |
800 ((e
->tCL
- 5) << 3) |
802 t
->mr
[1] = (boot
->mr
[1] & 0x1007f0) |
806 NV_DEBUG(dev
, "(%u) MR: %08x %08x", t
->id
, t
->mr
[0], t
->mr
[1]);
811 nouveau_mem_copy_current_timings(struct drm_device
*dev
,
812 struct nouveau_pm_memtiming
*t
)
814 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
815 u32 timing_base
, timing_regs
, mr_base
;
818 if (dev_priv
->card_type
>= 0xC0) {
819 timing_base
= 0x10f290;
822 timing_base
= 0x100220;
828 switch (dev_priv
->card_type
) {
844 for(i
= 0; i
< timing_regs
; i
++)
845 t
->reg
[i
] = nv_rd32(dev
, timing_base
+ (0x04 * i
));
848 if (dev_priv
->card_type
< NV_C0
) {
849 t
->tCWL
= ((nv_rd32(dev
, 0x100228) & 0x0f000000) >> 24) + 1;
852 t
->mr
[0] = nv_rd32(dev
, mr_base
);
853 t
->mr
[1] = nv_rd32(dev
, mr_base
+ 0x04);
854 t
->mr
[2] = nv_rd32(dev
, mr_base
+ 0x20);
855 t
->mr
[3] = nv_rd32(dev
, mr_base
+ 0x24);
858 t
->drive_strength
= 0;
860 switch (dev_priv
->vram_type
) {
861 case NV_MEM_TYPE_DDR3
:
862 t
->odt
|= (t
->mr
[1] & 0x200) >> 7;
863 case NV_MEM_TYPE_DDR2
:
864 t
->odt
|= (t
->mr
[1] & 0x04) >> 2 |
865 (t
->mr
[1] & 0x40) >> 5;
867 case NV_MEM_TYPE_GDDR3
:
868 case NV_MEM_TYPE_GDDR5
:
869 t
->drive_strength
= t
->mr
[1] & 0x03;
870 t
->odt
= (t
->mr
[1] & 0x0c) >> 2;
878 nouveau_mem_compare_timings(struct drm_device
*dev
,
879 struct nouveau_pm_memtiming
*t1
,
880 struct nouveau_pm_memtiming
*t2
)
882 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
884 switch (dev_priv
->card_type
) {
886 if (t1
->reg
[8] != t2
->reg
[8] ||
887 t1
->reg
[7] != t2
->reg
[7] ||
888 t1
->reg
[6] != t2
->reg
[6] ||
889 t1
->reg
[5] != t2
->reg
[5])
892 if (t1
->reg
[4] != t2
->reg
[4] ||
893 t1
->reg
[3] != t2
->reg
[3])
896 if (t1
->reg
[2] != t2
->reg
[2] ||
897 t1
->reg
[1] != t2
->reg
[1] ||
898 t1
->reg
[0] != t2
->reg
[0])
905 /* RSpliet: may generate many false negatives */
906 switch (dev_priv
->vram_type
) {
907 case NV_MEM_TYPE_GDDR3
:
908 case NV_MEM_TYPE_GDDR5
:
909 if (t1
->mr
[0] == t2
->mr
[0] ||
910 t1
->mr
[1] != t2
->mr
[1])
913 case NV_MEM_TYPE_DDR3
:
914 if (t1
->mr
[2] == t2
->mr
[2])
916 case NV_MEM_TYPE_DDR2
:
917 if (t1
->mr
[0] == t2
->mr
[0])
928 * Processes the Memory Timing BIOS table, stores generated
930 * @pre init scripts were run, memtiming regs are initialized
933 nouveau_mem_timing_init(struct drm_device
*dev
)
935 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
936 struct nouveau_pm_engine
*pm
= &dev_priv
->engine
.pm
;
937 struct nouveau_pm_memtimings
*memtimings
= &pm
->memtimings
;
938 struct nvbios
*bios
= &dev_priv
->vbios
;
940 struct nouveau_pm_tbl_header
*hdr
= NULL
;
941 bool valid_generation
= false;
945 memtimings
->nr_timing
= 0;
946 memtimings
->nr_timing_valid
= 0;
947 memtimings
->supported
= 0;
949 if (dev_priv
->card_type
< NV_40
) {
950 NV_ERROR(dev
, "Timing entry format unknown for card_type %x. "
951 "please contact nouveau developers",
952 dev_priv
->card_type
);
956 /* Copy the current timings */
957 nouveau_mem_copy_current_timings(dev
, &memtimings
->boot
);
959 if (bios
->type
== NVBIOS_BIT
) {
960 if (bit_table(dev
, 'P', &P
))
964 hdr
= (struct nouveau_pm_tbl_header
*) ROMPTR(dev
,
966 else if (P
.version
== 2)
967 hdr
= (struct nouveau_pm_tbl_header
*) ROMPTR(dev
,
970 NV_WARN(dev
, "unknown mem for BIT P %d\n", P
.version
);
972 NV_DEBUG(dev
, "BMP version too old for memory\n");
977 NV_DEBUG(dev
, "memory timing table pointer invalid\n");
981 if (hdr
->version
!= 0x10) {
982 NV_WARN(dev
, "memory timing table 0x%02x unknown\n",
987 /* validate record length */
988 if (hdr
->entry_len
< 15) {
989 NV_ERROR(dev
, "mem timing table length unknown: %d\n",
994 /* parse vbios entries into common format */
995 memtimings
->timing
= kcalloc(hdr
->entry_cnt
,
996 sizeof(*memtimings
->timing
), GFP_KERNEL
);
997 if (!memtimings
->timing
)
1000 entry
= (u8
*) hdr
+ hdr
->header_len
;
1001 for (i
= 0; i
< hdr
->entry_cnt
; i
++, entry
+= hdr
->entry_len
) {
1002 struct nouveau_pm_memtiming
*timing
= &pm
->memtimings
.timing
[i
];
1003 struct nouveau_pm_tbl_entry
*entry_struct
=
1004 (struct nouveau_pm_tbl_entry
*) entry
;
1007 memtimings
->nr_timing_valid
++;
1010 timing
->tCWL
= memtimings
->boot
.tCWL
;
1012 /* generate the timngs */
1013 if (dev_priv
->card_type
== NV_40
) {
1014 nv40_mem_timing_entry(dev
, hdr
, entry_struct
,
1015 &pm
->memtimings
.timing
[i
],
1017 } else if (dev_priv
->card_type
== NV_50
) {
1018 nv50_mem_timing_entry(dev
, &P
, hdr
, entry_struct
,
1019 &pm
->memtimings
.timing
[i
],
1021 } else if (dev_priv
->card_type
== NV_C0
) {
1022 nvc0_mem_timing_entry(dev
, hdr
, entry_struct
,
1023 &pm
->memtimings
.timing
[i
],
1027 /* generate the MR/EMR/... */
1028 switch (dev_priv
->vram_type
) {
1029 case NV_MEM_TYPE_GDDR3
:
1030 nouveau_mem_gddr3_mr(dev
, hdr
, entry_struct
, timing
,
1033 case NV_MEM_TYPE_GDDR5
:
1034 nouveau_mem_gddr5_mr(dev
, hdr
, entry_struct
, timing
,
1037 case NV_MEM_TYPE_DDR2
:
1038 nouveau_mem_ddr2_mr(dev
, hdr
, entry_struct
, timing
,
1041 case NV_MEM_TYPE_DDR3
:
1042 nouveau_mem_ddr3_mr(dev
, hdr
, entry_struct
, timing
,
1046 valid_generation
= false;
1050 /* some kind of validation */
1051 if (nouveau_mem_compare_timings(dev
, timing
,
1052 &memtimings
->boot
)) {
1053 NV_DEBUG(dev
, "Copy boot timings from entry %d\n",
1055 memtimings
->boot
= *timing
;
1056 valid_generation
= true;
1060 memtimings
->nr_timing
= hdr
->entry_cnt
;
1061 memtimings
->supported
= (P
.version
== 1) && valid_generation
;
1063 /* if there are no timing entries that cannot
1064 * re-generate the current timings
1066 if (memtimings
->nr_timing_valid
> 0 && !valid_generation
) {
1068 "Memory timings management may not be working."
1069 " please report to nouveau devs\n");
1074 nouveau_mem_timing_fini(struct drm_device
*dev
)
1076 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
1077 struct nouveau_pm_memtimings
*mem
= &dev_priv
->engine
.pm
.memtimings
;
1084 nouveau_mem_vbios_type(struct drm_device
*dev
)
1087 u8 ramcfg
= (nv_rd32(dev
, 0x101000) & 0x0000003c) >> 2;
1088 if (!bit_table(dev
, 'M', &M
) || M
.version
!= 2 || M
.length
< 5) {
1089 u8
*table
= ROMPTR(dev
, M
.data
[3]);
1090 if (table
&& table
[0] == 0x10 && ramcfg
< table
[3]) {
1091 u8
*entry
= table
+ table
[1] + (ramcfg
* table
[2]);
1092 switch (entry
[0] & 0x0f) {
1093 case 0: return NV_MEM_TYPE_DDR2
;
1094 case 1: return NV_MEM_TYPE_DDR3
;
1095 case 2: return NV_MEM_TYPE_GDDR3
;
1096 case 3: return NV_MEM_TYPE_GDDR5
;
1103 return NV_MEM_TYPE_UNKNOWN
;
1107 nouveau_vram_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
1114 nouveau_vram_manager_fini(struct ttm_mem_type_manager
*man
)
1121 nouveau_mem_node_cleanup(struct nouveau_mem
*node
)
1123 if (node
->vma
[0].node
) {
1124 nouveau_vm_unmap(&node
->vma
[0]);
1125 nouveau_vm_put(&node
->vma
[0]);
1128 if (node
->vma
[1].node
) {
1129 nouveau_vm_unmap(&node
->vma
[1]);
1130 nouveau_vm_put(&node
->vma
[1]);
1135 nouveau_vram_manager_del(struct ttm_mem_type_manager
*man
,
1136 struct ttm_mem_reg
*mem
)
1138 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(man
->bdev
);
1139 struct nouveau_vram_engine
*vram
= &dev_priv
->engine
.vram
;
1140 struct drm_device
*dev
= dev_priv
->dev
;
1142 nouveau_mem_node_cleanup(mem
->mm_node
);
1143 vram
->put(dev
, (struct nouveau_mem
**)&mem
->mm_node
);
1147 nouveau_vram_manager_new(struct ttm_mem_type_manager
*man
,
1148 struct ttm_buffer_object
*bo
,
1149 struct ttm_placement
*placement
,
1150 struct ttm_mem_reg
*mem
)
1152 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(man
->bdev
);
1153 struct nouveau_vram_engine
*vram
= &dev_priv
->engine
.vram
;
1154 struct drm_device
*dev
= dev_priv
->dev
;
1155 struct nouveau_bo
*nvbo
= nouveau_bo(bo
);
1156 struct nouveau_mem
*node
;
1160 if (nvbo
->tile_flags
& NOUVEAU_GEM_TILE_NONCONTIG
)
1161 size_nc
= 1 << nvbo
->page_shift
;
1163 ret
= vram
->get(dev
, mem
->num_pages
<< PAGE_SHIFT
,
1164 mem
->page_alignment
<< PAGE_SHIFT
, size_nc
,
1165 (nvbo
->tile_flags
>> 8) & 0x3ff, &node
);
1167 mem
->mm_node
= NULL
;
1168 return (ret
== -ENOSPC
) ? 0 : ret
;
1171 node
->page_shift
= nvbo
->page_shift
;
1173 mem
->mm_node
= node
;
1174 mem
->start
= node
->offset
>> PAGE_SHIFT
;
1179 nouveau_vram_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
1181 struct nouveau_mm
*mm
= man
->priv
;
1182 struct nouveau_mm_node
*r
;
1183 u32 total
= 0, free
= 0;
1185 mutex_lock(&mm
->mutex
);
1186 list_for_each_entry(r
, &mm
->nodes
, nl_entry
) {
1187 printk(KERN_DEBUG
"%s %d: 0x%010llx 0x%010llx\n",
1188 prefix
, r
->type
, ((u64
)r
->offset
<< 12),
1189 (((u64
)r
->offset
+ r
->length
) << 12));
1195 mutex_unlock(&mm
->mutex
);
1197 printk(KERN_DEBUG
"%s total: 0x%010llx free: 0x%010llx\n",
1198 prefix
, (u64
)total
<< 12, (u64
)free
<< 12);
1199 printk(KERN_DEBUG
"%s block: 0x%08x\n",
1200 prefix
, mm
->block_size
<< 12);
1203 const struct ttm_mem_type_manager_func nouveau_vram_manager
= {
1204 nouveau_vram_manager_init
,
1205 nouveau_vram_manager_fini
,
1206 nouveau_vram_manager_new
,
1207 nouveau_vram_manager_del
,
1208 nouveau_vram_manager_debug
1212 nouveau_gart_manager_init(struct ttm_mem_type_manager
*man
, unsigned long psize
)
1218 nouveau_gart_manager_fini(struct ttm_mem_type_manager
*man
)
1224 nouveau_gart_manager_del(struct ttm_mem_type_manager
*man
,
1225 struct ttm_mem_reg
*mem
)
1227 nouveau_mem_node_cleanup(mem
->mm_node
);
1228 kfree(mem
->mm_node
);
1229 mem
->mm_node
= NULL
;
1233 nouveau_gart_manager_new(struct ttm_mem_type_manager
*man
,
1234 struct ttm_buffer_object
*bo
,
1235 struct ttm_placement
*placement
,
1236 struct ttm_mem_reg
*mem
)
1238 struct drm_nouveau_private
*dev_priv
= nouveau_bdev(bo
->bdev
);
1239 struct nouveau_mem
*node
;
1241 if (unlikely((mem
->num_pages
<< PAGE_SHIFT
) >=
1242 dev_priv
->gart_info
.aper_size
))
1245 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1248 node
->page_shift
= 12;
1250 mem
->mm_node
= node
;
1256 nouveau_gart_manager_debug(struct ttm_mem_type_manager
*man
, const char *prefix
)
1260 const struct ttm_mem_type_manager_func nouveau_gart_manager
= {
1261 nouveau_gart_manager_init
,
1262 nouveau_gart_manager_fini
,
1263 nouveau_gart_manager_new
,
1264 nouveau_gart_manager_del
,
1265 nouveau_gart_manager_debug