drm/nv50: import new vm code
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_mem.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33#include "drmP.h"
34#include "drm.h"
35#include "drm_sarea.h"
6ee73861 36
cbab95db
FJ
37#include "nouveau_drv.h"
38#include "nouveau_pm.h"
573a2a37 39#include "nouveau_mm.h"
a11c3198 40#include "nouveau_vm.h"
a845fff8 41
a0af9add
FJ
42/*
43 * NV10-NV40 tiling helpers
44 */
45
46static void
a5cf68b0
FJ
47nv10_mem_update_tile_region(struct drm_device *dev,
48 struct nouveau_tile_reg *tile, uint32_t addr,
49 uint32_t size, uint32_t pitch, uint32_t flags)
a0af9add
FJ
50{
51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
54 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
a5cf68b0
FJ
55 int i = tile - dev_priv->tile.reg;
56 unsigned long save;
a0af9add 57
382d62e5 58 nouveau_fence_unref(&tile->fence);
a0af9add 59
a5cf68b0
FJ
60 if (tile->pitch)
61 pfb->free_tile_region(dev, i);
62
63 if (pitch)
64 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
65
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
a0af9add 67 pfifo->reassign(dev, false);
a0af9add
FJ
68 pfifo->cache_pull(dev, false);
69
70 nouveau_wait_for_idle(dev);
71
a5cf68b0
FJ
72 pfb->set_tile_region(dev, i);
73 pgraph->set_tile_region(dev, i);
a0af9add
FJ
74
75 pfifo->cache_pull(dev, true);
76 pfifo->reassign(dev, true);
a5cf68b0 77 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
a0af9add
FJ
78}
79
a5cf68b0
FJ
80static struct nouveau_tile_reg *
81nv10_mem_get_tile_region(struct drm_device *dev, int i)
a0af9add
FJ
82{
83 struct drm_nouveau_private *dev_priv = dev->dev_private;
a5cf68b0 84 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
a0af9add 85
a5cf68b0 86 spin_lock(&dev_priv->tile.lock);
a0af9add 87
a5cf68b0
FJ
88 if (!tile->used &&
89 (!tile->fence || nouveau_fence_signalled(tile->fence)))
90 tile->used = true;
91 else
92 tile = NULL;
a0af9add 93
a5cf68b0
FJ
94 spin_unlock(&dev_priv->tile.lock);
95 return tile;
96}
a0af9add 97
a5cf68b0
FJ
98void
99nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
100 struct nouveau_fence *fence)
101{
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
a0af9add 103
a5cf68b0
FJ
104 if (tile) {
105 spin_lock(&dev_priv->tile.lock);
106 if (fence) {
107 /* Mark it as pending. */
108 tile->fence = fence;
109 nouveau_fence_ref(fence);
a0af9add 110 }
a0af9add 111
a5cf68b0
FJ
112 tile->used = false;
113 spin_unlock(&dev_priv->tile.lock);
114 }
a0af9add
FJ
115}
116
a5cf68b0
FJ
117struct nouveau_tile_reg *
118nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
119 uint32_t pitch, uint32_t flags)
a0af9add 120{
a5cf68b0
FJ
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
123 struct nouveau_tile_reg *tile, *found = NULL;
124 int i;
125
126 for (i = 0; i < pfb->num_tiles; i++) {
127 tile = nv10_mem_get_tile_region(dev, i);
128
129 if (pitch && !found) {
130 found = tile;
131 continue;
132
133 } else if (tile && tile->pitch) {
134 /* Kill an unused tile region. */
135 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
136 }
137
138 nv10_mem_put_tile_region(dev, tile, NULL);
a0af9add
FJ
139 }
140
a5cf68b0
FJ
141 if (found)
142 nv10_mem_update_tile_region(dev, found, addr, size,
143 pitch, flags);
144 return found;
a0af9add
FJ
145}
146
6ee73861
BS
147/*
148 * NV50 VM helpers
149 */
150int
151nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
152 uint32_t flags, uint64_t phys)
153{
154 struct drm_nouveau_private *dev_priv = dev->dev_private;
531e7713
BS
155 struct nouveau_gpuobj *pgt;
156 unsigned block;
157 int i;
6ee73861 158
531e7713
BS
159 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
160 size = (size >> 16) << 1;
6c429667
BS
161
162 phys |= ((uint64_t)flags << 32);
163 phys |= 1;
164 if (dev_priv->vram_sys_base) {
165 phys += dev_priv->vram_sys_base;
166 phys |= 0x30;
167 }
6ee73861 168
531e7713
BS
169 while (size) {
170 unsigned offset_h = upper_32_bits(phys);
4c27bd33 171 unsigned offset_l = lower_32_bits(phys);
531e7713
BS
172 unsigned pte, end;
173
174 for (i = 7; i >= 0; i--) {
175 block = 1 << (i + 1);
176 if (size >= block && !(virt & (block - 1)))
177 break;
178 }
179 offset_l |= (i << 7);
6ee73861 180
531e7713
BS
181 phys += block << 15;
182 size -= block;
6ee73861 183
531e7713
BS
184 while (block) {
185 pgt = dev_priv->vm_vram_pt[virt >> 14];
186 pte = virt & 0x3ffe;
187
188 end = pte + block;
189 if (end > 16384)
190 end = 16384;
191 block -= (end - pte);
192 virt += (end - pte);
193
194 while (pte < end) {
b3beb167
BS
195 nv_wo32(pgt, (pte * 4) + 0, offset_l);
196 nv_wo32(pgt, (pte * 4) + 4, offset_h);
197 pte += 2;
531e7713
BS
198 }
199 }
6ee73861 200 }
6ee73861 201
56ac7475
BS
202 dev_priv->engine.instmem.flush(dev);
203 dev_priv->engine.fifo.tlb_flush(dev);
204 dev_priv->engine.graph.tlb_flush(dev);
a11c3198 205 nv50_vm_flush_engine(dev, 6);
6ee73861
BS
206 return 0;
207}
208
209void
210nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
211{
4c27bd33
BS
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213 struct nouveau_gpuobj *pgt;
214 unsigned pages, pte, end;
215
216 virt -= dev_priv->vm_vram_base;
217 pages = (size >> 16) << 1;
218
4c27bd33
BS
219 while (pages) {
220 pgt = dev_priv->vm_vram_pt[virt >> 29];
221 pte = (virt & 0x1ffe0000ULL) >> 15;
222
223 end = pte + pages;
224 if (end > 16384)
225 end = 16384;
226 pages -= (end - pte);
227 virt += (end - pte) << 15;
228
b3beb167
BS
229 while (pte < end) {
230 nv_wo32(pgt, (pte * 4), 0);
231 pte++;
232 }
4c27bd33 233 }
4c27bd33 234
56ac7475
BS
235 dev_priv->engine.instmem.flush(dev);
236 dev_priv->engine.fifo.tlb_flush(dev);
237 dev_priv->engine.graph.tlb_flush(dev);
a11c3198 238 nv50_vm_flush_engine(dev, 6);
6ee73861
BS
239}
240
241/*
242 * Cleanup everything
243 */
b833ac26 244void
fbd2895e 245nouveau_mem_vram_fini(struct drm_device *dev)
6ee73861
BS
246{
247 struct drm_nouveau_private *dev_priv = dev->dev_private;
248
ac8fb975
BS
249 nouveau_bo_unpin(dev_priv->vga_ram);
250 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
251
6ee73861
BS
252 ttm_bo_device_release(&dev_priv->ttm.bdev);
253
254 nouveau_ttm_global_release(dev_priv);
255
fbd2895e
BS
256 if (dev_priv->fb_mtrr >= 0) {
257 drm_mtrr_del(dev_priv->fb_mtrr,
258 pci_resource_start(dev->pdev, 1),
259 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
260 dev_priv->fb_mtrr = -1;
261 }
262}
263
264void
265nouveau_mem_gart_fini(struct drm_device *dev)
266{
267 nouveau_sgdma_takedown(dev);
268
cd0b072f 269 if (drm_core_has_AGP(dev) && dev->agp) {
6ee73861
BS
270 struct drm_agp_mem *entry, *tempe;
271
272 /* Remove AGP resources, but leave dev->agp
273 intact until drv_cleanup is called. */
274 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
275 if (entry->bound)
276 drm_unbind_agp(entry->memory);
277 drm_free_agp(entry->memory, entry->pages);
278 kfree(entry);
279 }
280 INIT_LIST_HEAD(&dev->agp->memory);
281
282 if (dev->agp->acquired)
283 drm_agp_release(dev);
284
285 dev->agp->acquired = 0;
286 dev->agp->enabled = 0;
287 }
6ee73861
BS
288}
289
6ee73861 290static uint32_t
a76fb4e8
BS
291nouveau_mem_detect_nv04(struct drm_device *dev)
292{
3c7066bc 293 uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
a76fb4e8
BS
294
295 if (boot0 & 0x00000100)
296 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
297
3c7066bc
FJ
298 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
299 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
a76fb4e8 300 return 32 * 1024 * 1024;
3c7066bc 301 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
a76fb4e8 302 return 16 * 1024 * 1024;
3c7066bc 303 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
a76fb4e8 304 return 8 * 1024 * 1024;
3c7066bc 305 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
a76fb4e8
BS
306 return 4 * 1024 * 1024;
307 }
308
309 return 0;
310}
311
312static uint32_t
313nouveau_mem_detect_nforce(struct drm_device *dev)
6ee73861
BS
314{
315 struct drm_nouveau_private *dev_priv = dev->dev_private;
316 struct pci_dev *bridge;
317 uint32_t mem;
318
319 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
320 if (!bridge) {
321 NV_ERROR(dev, "no bridge device\n");
322 return 0;
323 }
324
a76fb4e8 325 if (dev_priv->flags & NV_NFORCE) {
6ee73861
BS
326 pci_read_config_dword(bridge, 0x7C, &mem);
327 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
328 } else
a76fb4e8 329 if (dev_priv->flags & NV_NFORCE2) {
6ee73861
BS
330 pci_read_config_dword(bridge, 0x84, &mem);
331 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
332 }
333
334 NV_ERROR(dev, "impossible!\n");
335 return 0;
336}
337
fbd2895e 338static int
a76fb4e8 339nouveau_mem_detect(struct drm_device *dev)
6ee73861
BS
340{
341 struct drm_nouveau_private *dev_priv = dev->dev_private;
a76fb4e8
BS
342
343 if (dev_priv->card_type == NV_04) {
344 dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
345 } else
346 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
347 dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
7a2e4e03
BS
348 } else
349 if (dev_priv->card_type < NV_50) {
3c7066bc
FJ
350 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
351 dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
c556d989
BS
352 } else
353 if (dev_priv->card_type < NV_C0) {
573a2a37
BS
354 if (nv50_vram_init(dev))
355 return -ENOMEM;
c556d989
BS
356 } else {
357 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
358 dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
6ee73861
BS
359 }
360
a76fb4e8
BS
361 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
362 if (dev_priv->vram_sys_base) {
363 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
364 dev_priv->vram_sys_base);
365 }
366
367 if (dev_priv->vram_size)
368 return 0;
369 return -ENOMEM;
6ee73861
BS
370}
371
71d06186
FJ
372#if __OS_HAS_AGP
373static unsigned long
374get_agp_mode(struct drm_device *dev, unsigned long mode)
375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377
378 /*
379 * FW seems to be broken on nv18, it makes the card lock up
380 * randomly.
381 */
382 if (dev_priv->chipset == 0x18)
383 mode &= ~PCI_AGP_COMMAND_FW;
384
de5899bd
FJ
385 /*
386 * AGP mode set in the command line.
387 */
388 if (nouveau_agpmode > 0) {
389 bool agpv3 = mode & 0x8;
390 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
391
392 mode = (mode & ~0x7) | (rate & 0x7);
393 }
394
71d06186
FJ
395 return mode;
396}
397#endif
398
e04d8e82
FJ
399int
400nouveau_mem_reset_agp(struct drm_device *dev)
6ee73861 401{
e04d8e82
FJ
402#if __OS_HAS_AGP
403 uint32_t saved_pci_nv_1, pmc_enable;
404 int ret;
405
406 /* First of all, disable fast writes, otherwise if it's
407 * already enabled in the AGP bridge and we disable the card's
408 * AGP controller we might be locking ourselves out of it. */
316f60a1
FJ
409 if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
410 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
e04d8e82
FJ
411 struct drm_agp_info info;
412 struct drm_agp_mode mode;
413
414 ret = drm_agp_info(dev, &info);
415 if (ret)
416 return ret;
417
71d06186 418 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
e04d8e82
FJ
419 ret = drm_agp_enable(dev, mode);
420 if (ret)
421 return ret;
422 }
6ee73861
BS
423
424 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
6ee73861
BS
425
426 /* clear busmaster bit */
427 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
e04d8e82
FJ
428 /* disable AGP */
429 nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
6ee73861
BS
430
431 /* power cycle pgraph, if enabled */
432 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
433 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
434 nv_wr32(dev, NV03_PMC_ENABLE,
435 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
436 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
437 NV_PMC_ENABLE_PGRAPH);
438 }
439
440 /* and restore (gives effect of resetting AGP) */
6ee73861 441 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
b694dfb2 442#endif
6ee73861 443
e04d8e82
FJ
444 return 0;
445}
446
6ee73861
BS
447int
448nouveau_mem_init_agp(struct drm_device *dev)
449{
b694dfb2 450#if __OS_HAS_AGP
6ee73861
BS
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct drm_agp_info info;
453 struct drm_agp_mode mode;
454 int ret;
455
6ee73861
BS
456 if (!dev->agp->acquired) {
457 ret = drm_agp_acquire(dev);
458 if (ret) {
459 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
460 return ret;
461 }
462 }
463
2b495268
FJ
464 nouveau_mem_reset_agp(dev);
465
6ee73861
BS
466 ret = drm_agp_info(dev, &info);
467 if (ret) {
468 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
469 return ret;
470 }
471
472 /* see agp.h for the AGPSTAT_* modes available */
71d06186 473 mode.mode = get_agp_mode(dev, info.mode);
6ee73861
BS
474 ret = drm_agp_enable(dev, mode);
475 if (ret) {
476 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
477 return ret;
478 }
479
480 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
481 dev_priv->gart_info.aper_base = info.aperture_base;
482 dev_priv->gart_info.aper_size = info.aperture_size;
b694dfb2 483#endif
6ee73861
BS
484 return 0;
485}
486
487int
fbd2895e 488nouveau_mem_vram_init(struct drm_device *dev)
6ee73861
BS
489{
490 struct drm_nouveau_private *dev_priv = dev->dev_private;
491 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
fbd2895e 492 int ret, dma_bits;
6ee73861
BS
493
494 if (dev_priv->card_type >= NV_50 &&
495 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
496 dma_bits = 40;
fbd2895e
BS
497 else
498 dma_bits = 32;
6ee73861
BS
499
500 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
fbd2895e 501 if (ret)
6ee73861 502 return ret;
fbd2895e 503
fbd2895e 504 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
6ee73861
BS
505
506 ret = nouveau_ttm_global_init(dev_priv);
507 if (ret)
508 return ret;
509
510 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
511 dev_priv->ttm.bo_global_ref.ref.object,
512 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
513 dma_bits <= 32 ? true : false);
514 if (ret) {
515 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
516 return ret;
517 }
518
fbd2895e
BS
519 /* reserve space at end of VRAM for PRAMIN */
520 if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
521 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
522 dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
523 else
524 if (dev_priv->card_type >= NV_40)
525 dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
526 else
527 dev_priv->ramin_rsvd_vram = (512 * 1024);
528
573a2a37
BS
529 /* initialise gpu-specific vram backend */
530 ret = nouveau_mem_detect(dev);
531 if (ret)
532 return ret;
533
534 dev_priv->fb_available_size = dev_priv->vram_size;
535 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
536 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
537 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
538 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
539
6ee73861
BS
540 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
541 dev_priv->fb_aper_free = dev_priv->fb_available_size;
542
543 /* mappable vram */
544 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
545 dev_priv->fb_available_size >> PAGE_SHIFT);
546 if (ret) {
547 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
548 return ret;
549 }
550
ac8fb975
BS
551 ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
552 0, 0, true, true, &dev_priv->vga_ram);
553 if (ret == 0)
554 ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
555 if (ret) {
556 NV_WARN(dev, "failed to reserve VGA memory\n");
557 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
558 }
559
fbd2895e
BS
560 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
561 pci_resource_len(dev->pdev, 1),
562 DRM_MTRR_WC);
563 return 0;
564}
565
566int
567nouveau_mem_gart_init(struct drm_device *dev)
568{
569 struct drm_nouveau_private *dev_priv = dev->dev_private;
570 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
571 int ret;
572
573 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
574
6ee73861 575#if !defined(__powerpc__) && !defined(__ia64__)
de5899bd 576 if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
6ee73861
BS
577 ret = nouveau_mem_init_agp(dev);
578 if (ret)
579 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
580 }
581#endif
582
583 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
584 ret = nouveau_sgdma_init(dev);
585 if (ret) {
586 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
587 return ret;
588 }
589 }
590
591 NV_INFO(dev, "%d MiB GART (aperture)\n",
592 (int)(dev_priv->gart_info.aper_size >> 20));
593 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
594
595 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
596 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
597 if (ret) {
598 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
599 return ret;
600 }
601
6ee73861
BS
602 return 0;
603}
604
7760fcb0
RS
605void
606nouveau_mem_timing_init(struct drm_device *dev)
607{
cac8f05b 608 /* cards < NVC0 only */
7760fcb0
RS
609 struct drm_nouveau_private *dev_priv = dev->dev_private;
610 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
611 struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
612 struct nvbios *bios = &dev_priv->vbios;
613 struct bit_entry P;
614 u8 tUNK_0, tUNK_1, tUNK_2;
615 u8 tRP; /* Byte 3 */
616 u8 tRAS; /* Byte 5 */
617 u8 tRFC; /* Byte 7 */
618 u8 tRC; /* Byte 9 */
619 u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
620 u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
621 u8 *mem = NULL, *entry;
622 int i, recordlen, entries;
623
624 if (bios->type == NVBIOS_BIT) {
625 if (bit_table(dev, 'P', &P))
626 return;
627
628 if (P.version == 1)
629 mem = ROMPTR(bios, P.data[4]);
630 else
631 if (P.version == 2)
632 mem = ROMPTR(bios, P.data[8]);
633 else {
634 NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
635 }
636 } else {
637 NV_DEBUG(dev, "BMP version too old for memory\n");
638 return;
639 }
640
641 if (!mem) {
642 NV_DEBUG(dev, "memory timing table pointer invalid\n");
643 return;
644 }
645
646 if (mem[0] != 0x10) {
647 NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
648 return;
649 }
650
651 /* validate record length */
652 entries = mem[2];
653 recordlen = mem[3];
654 if (recordlen < 15) {
655 NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
656 return;
657 }
658
659 /* parse vbios entries into common format */
660 memtimings->timing =
661 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
662 if (!memtimings->timing)
663 return;
664
665 entry = mem + mem[1];
666 for (i = 0; i < entries; i++, entry += recordlen) {
667 struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
668 if (entry[0] == 0)
669 continue;
670
671 tUNK_18 = 1;
672 tUNK_19 = 1;
673 tUNK_20 = 0;
674 tUNK_21 = 0;
cac8f05b
RS
675 switch (min(recordlen, 22)) {
676 case 22:
7760fcb0 677 tUNK_21 = entry[21];
cac8f05b 678 case 21:
7760fcb0 679 tUNK_20 = entry[20];
cac8f05b 680 case 20:
7760fcb0 681 tUNK_19 = entry[19];
cac8f05b 682 case 19:
7760fcb0
RS
683 tUNK_18 = entry[18];
684 default:
685 tUNK_0 = entry[0];
686 tUNK_1 = entry[1];
687 tUNK_2 = entry[2];
688 tRP = entry[3];
689 tRAS = entry[5];
690 tRFC = entry[7];
691 tRC = entry[9];
692 tUNK_10 = entry[10];
693 tUNK_11 = entry[11];
694 tUNK_12 = entry[12];
695 tUNK_13 = entry[13];
696 tUNK_14 = entry[14];
697 break;
698 }
699
700 timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
701
702 /* XXX: I don't trust the -1's and +1's... they must come
703 * from somewhere! */
704 timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
705 tUNK_18 << 16 |
706 (tUNK_1 + tUNK_19 + 1) << 8 |
707 (tUNK_2 - 1));
708
709 timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
710 if(recordlen > 19) {
711 timing->reg_100228 += (tUNK_19 - 1) << 24;
cac8f05b
RS
712 }/* I cannot back-up this else-statement right now
713 else {
7760fcb0 714 timing->reg_100228 += tUNK_12 << 24;
cac8f05b 715 }*/
7760fcb0
RS
716
717 /* XXX: reg_10022c */
cac8f05b 718 timing->reg_10022c = tUNK_2 - 1;
7760fcb0
RS
719
720 timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
721 tUNK_13 << 8 | tUNK_13);
722
723 /* XXX: +6? */
724 timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
cac8f05b
RS
725 timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
726
727 /* XXX; reg_100238, reg_10023c
728 * reg: 0x00??????
729 * reg_10023c:
730 * 0 for pre-NV50 cards
731 * 0x????0202 for NV50+ cards (empirical evidence) */
732 if(dev_priv->card_type >= NV_50) {
733 timing->reg_10023c = 0x202;
7760fcb0
RS
734 }
735
7760fcb0
RS
736 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
737 timing->reg_100220, timing->reg_100224,
738 timing->reg_100228, timing->reg_10022c);
739 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
740 timing->reg_100230, timing->reg_100234,
741 timing->reg_100238, timing->reg_10023c);
742 }
743
744 memtimings->nr_timing = entries;
745 memtimings->supported = true;
746}
747
748void
749nouveau_mem_timing_fini(struct drm_device *dev)
750{
751 struct drm_nouveau_private *dev_priv = dev->dev_private;
752 struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
753
754 kfree(mem->timing);
755}
573a2a37
BS
756
757static int
758nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
759{
760 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
761 struct nouveau_mm *mm;
762 u32 b_size;
763 int ret;
764
765 p_size = (p_size << PAGE_SHIFT) >> 12;
766 b_size = dev_priv->vram_rblock_size >> 12;
767
768 ret = nouveau_mm_init(&mm, 0, p_size, b_size);
769 if (ret)
770 return ret;
771
772 man->priv = mm;
773 return 0;
774}
775
776static int
777nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
778{
779 struct nouveau_mm *mm = man->priv;
780 int ret;
781
782 ret = nouveau_mm_fini(&mm);
783 if (ret)
784 return ret;
785
786 man->priv = NULL;
787 return 0;
788}
789
790static void
791nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
792 struct ttm_mem_reg *mem)
793{
794 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
795 struct drm_device *dev = dev_priv->dev;
796
797 nv50_vram_del(dev, (struct nouveau_vram **)&mem->mm_node);
798}
799
800static int
801nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
802 struct ttm_buffer_object *bo,
803 struct ttm_placement *placement,
804 struct ttm_mem_reg *mem)
805{
806 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
807 struct drm_device *dev = dev_priv->dev;
808 struct nouveau_bo *nvbo = nouveau_bo(bo);
809 struct nouveau_vram *vram;
810 int ret;
811
812 ret = nv50_vram_new(dev, mem->num_pages << PAGE_SHIFT, 65536, 0,
813 (nvbo->tile_flags >> 8) & 0x7f, &vram);
814 if (ret)
815 return ret;
816
817 mem->mm_node = vram;
818 mem->start = vram->offset >> PAGE_SHIFT;
819 return 0;
820}
821
822void
823nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
824{
825 struct ttm_bo_global *glob = man->bdev->glob;
826 struct nouveau_mm *mm = man->priv;
827 struct nouveau_mm_node *r;
828 u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
829 int i;
830
831 mutex_lock(&mm->mutex);
832 list_for_each_entry(r, &mm->nodes, nl_entry) {
833 printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
834 prefix, r->free ? "free" : "used", r->type,
835 ((u64)r->offset << 12),
836 (((u64)r->offset + r->length) << 12));
837 total += r->length;
838 ttotal[r->type] += r->length;
839 if (r->free)
840 tfree[r->type] += r->length;
841 else
842 tused[r->type] += r->length;
843 }
844 mutex_unlock(&mm->mutex);
845
846 printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
847 for (i = 0; i < 3; i++) {
848 printk(KERN_DEBUG "%s type %d: 0x%010llx, "
849 "used 0x%010llx, free 0x%010llx\n", prefix,
850 i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
851 }
852}
853
854const struct ttm_mem_type_manager_func nouveau_vram_manager = {
855 nouveau_vram_manager_init,
856 nouveau_vram_manager_fini,
857 nouveau_vram_manager_new,
858 nouveau_vram_manager_del,
859 nouveau_vram_manager_debug
860};
This page took 0.17325 seconds and 5 git commands to generate.