Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. | |
3 | * Copyright 2005 Stephane Marchesin | |
4 | * | |
5 | * The Weather Channel (TM) funded Tungsten Graphics to develop the | |
6 | * initial release of the Radeon 8500 driver under the XFree86 license. | |
7 | * This notice must be preserved. | |
8 | * | |
9 | * Permission is hereby granted, free of charge, to any person obtaining a | |
10 | * copy of this software and associated documentation files (the "Software"), | |
11 | * to deal in the Software without restriction, including without limitation | |
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
13 | * and/or sell copies of the Software, and to permit persons to whom the | |
14 | * Software is furnished to do so, subject to the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the next | |
17 | * paragraph) shall be included in all copies or substantial portions of the | |
18 | * Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
23 | * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
26 | * DEALINGS IN THE SOFTWARE. | |
27 | * | |
28 | * Authors: | |
29 | * Keith Whitwell <keith@tungstengraphics.com> | |
30 | */ | |
31 | ||
32 | ||
33 | #include "drmP.h" | |
34 | #include "drm.h" | |
35 | #include "drm_sarea.h" | |
6ee73861 | 36 | |
cbab95db FJ |
37 | #include "nouveau_drv.h" |
38 | #include "nouveau_pm.h" | |
573a2a37 | 39 | #include "nouveau_mm.h" |
a11c3198 | 40 | #include "nouveau_vm.h" |
a845fff8 | 41 | |
a0af9add FJ |
42 | /* |
43 | * NV10-NV40 tiling helpers | |
44 | */ | |
45 | ||
46 | static void | |
a5cf68b0 FJ |
47 | nv10_mem_update_tile_region(struct drm_device *dev, |
48 | struct nouveau_tile_reg *tile, uint32_t addr, | |
49 | uint32_t size, uint32_t pitch, uint32_t flags) | |
a0af9add FJ |
50 | { |
51 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
52 | struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; | |
53 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | |
96c50082 | 54 | int i = tile - dev_priv->tile.reg, j; |
a5cf68b0 | 55 | unsigned long save; |
a0af9add | 56 | |
382d62e5 | 57 | nouveau_fence_unref(&tile->fence); |
a0af9add | 58 | |
a5cf68b0 FJ |
59 | if (tile->pitch) |
60 | pfb->free_tile_region(dev, i); | |
61 | ||
62 | if (pitch) | |
63 | pfb->init_tile_region(dev, i, addr, size, pitch, flags); | |
64 | ||
65 | spin_lock_irqsave(&dev_priv->context_switch_lock, save); | |
a0af9add | 66 | pfifo->reassign(dev, false); |
a0af9add FJ |
67 | pfifo->cache_pull(dev, false); |
68 | ||
69 | nouveau_wait_for_idle(dev); | |
70 | ||
a5cf68b0 | 71 | pfb->set_tile_region(dev, i); |
96c50082 BS |
72 | for (j = 0; j < NVOBJ_ENGINE_NR; j++) { |
73 | if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region) | |
74 | dev_priv->eng[j]->set_tile_region(dev, i); | |
75 | } | |
a0af9add FJ |
76 | |
77 | pfifo->cache_pull(dev, true); | |
78 | pfifo->reassign(dev, true); | |
a5cf68b0 | 79 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); |
a0af9add FJ |
80 | } |
81 | ||
a5cf68b0 FJ |
82 | static struct nouveau_tile_reg * |
83 | nv10_mem_get_tile_region(struct drm_device *dev, int i) | |
a0af9add FJ |
84 | { |
85 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
a5cf68b0 | 86 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
a0af9add | 87 | |
a5cf68b0 | 88 | spin_lock(&dev_priv->tile.lock); |
a0af9add | 89 | |
a5cf68b0 FJ |
90 | if (!tile->used && |
91 | (!tile->fence || nouveau_fence_signalled(tile->fence))) | |
92 | tile->used = true; | |
93 | else | |
94 | tile = NULL; | |
a0af9add | 95 | |
a5cf68b0 FJ |
96 | spin_unlock(&dev_priv->tile.lock); |
97 | return tile; | |
98 | } | |
a0af9add | 99 | |
a5cf68b0 FJ |
100 | void |
101 | nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, | |
102 | struct nouveau_fence *fence) | |
103 | { | |
104 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
a0af9add | 105 | |
a5cf68b0 FJ |
106 | if (tile) { |
107 | spin_lock(&dev_priv->tile.lock); | |
108 | if (fence) { | |
109 | /* Mark it as pending. */ | |
110 | tile->fence = fence; | |
111 | nouveau_fence_ref(fence); | |
a0af9add | 112 | } |
a0af9add | 113 | |
a5cf68b0 FJ |
114 | tile->used = false; |
115 | spin_unlock(&dev_priv->tile.lock); | |
116 | } | |
a0af9add FJ |
117 | } |
118 | ||
a5cf68b0 FJ |
119 | struct nouveau_tile_reg * |
120 | nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, | |
121 | uint32_t pitch, uint32_t flags) | |
a0af9add | 122 | { |
a5cf68b0 FJ |
123 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
124 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | |
125 | struct nouveau_tile_reg *tile, *found = NULL; | |
126 | int i; | |
127 | ||
128 | for (i = 0; i < pfb->num_tiles; i++) { | |
129 | tile = nv10_mem_get_tile_region(dev, i); | |
130 | ||
131 | if (pitch && !found) { | |
132 | found = tile; | |
133 | continue; | |
134 | ||
135 | } else if (tile && tile->pitch) { | |
136 | /* Kill an unused tile region. */ | |
137 | nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0); | |
138 | } | |
139 | ||
140 | nv10_mem_put_tile_region(dev, tile, NULL); | |
a0af9add FJ |
141 | } |
142 | ||
a5cf68b0 FJ |
143 | if (found) |
144 | nv10_mem_update_tile_region(dev, found, addr, size, | |
145 | pitch, flags); | |
146 | return found; | |
a0af9add FJ |
147 | } |
148 | ||
6ee73861 BS |
149 | /* |
150 | * Cleanup everything | |
151 | */ | |
b833ac26 | 152 | void |
fbd2895e | 153 | nouveau_mem_vram_fini(struct drm_device *dev) |
6ee73861 BS |
154 | { |
155 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
156 | ||
6ee73861 BS |
157 | ttm_bo_device_release(&dev_priv->ttm.bdev); |
158 | ||
159 | nouveau_ttm_global_release(dev_priv); | |
160 | ||
fbd2895e BS |
161 | if (dev_priv->fb_mtrr >= 0) { |
162 | drm_mtrr_del(dev_priv->fb_mtrr, | |
163 | pci_resource_start(dev->pdev, 1), | |
164 | pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); | |
165 | dev_priv->fb_mtrr = -1; | |
166 | } | |
167 | } | |
168 | ||
169 | void | |
170 | nouveau_mem_gart_fini(struct drm_device *dev) | |
171 | { | |
172 | nouveau_sgdma_takedown(dev); | |
173 | ||
cd0b072f | 174 | if (drm_core_has_AGP(dev) && dev->agp) { |
6ee73861 BS |
175 | struct drm_agp_mem *entry, *tempe; |
176 | ||
177 | /* Remove AGP resources, but leave dev->agp | |
178 | intact until drv_cleanup is called. */ | |
179 | list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { | |
180 | if (entry->bound) | |
181 | drm_unbind_agp(entry->memory); | |
182 | drm_free_agp(entry->memory, entry->pages); | |
183 | kfree(entry); | |
184 | } | |
185 | INIT_LIST_HEAD(&dev->agp->memory); | |
186 | ||
187 | if (dev->agp->acquired) | |
188 | drm_agp_release(dev); | |
189 | ||
190 | dev->agp->acquired = 0; | |
191 | dev->agp->enabled = 0; | |
192 | } | |
6ee73861 BS |
193 | } |
194 | ||
60d2a88a BS |
195 | bool |
196 | nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) | |
197 | { | |
198 | if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) | |
199 | return true; | |
200 | ||
201 | return false; | |
202 | } | |
203 | ||
71d06186 FJ |
204 | #if __OS_HAS_AGP |
205 | static unsigned long | |
206 | get_agp_mode(struct drm_device *dev, unsigned long mode) | |
207 | { | |
208 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
209 | ||
210 | /* | |
211 | * FW seems to be broken on nv18, it makes the card lock up | |
212 | * randomly. | |
213 | */ | |
214 | if (dev_priv->chipset == 0x18) | |
215 | mode &= ~PCI_AGP_COMMAND_FW; | |
216 | ||
de5899bd FJ |
217 | /* |
218 | * AGP mode set in the command line. | |
219 | */ | |
220 | if (nouveau_agpmode > 0) { | |
221 | bool agpv3 = mode & 0x8; | |
222 | int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode; | |
223 | ||
224 | mode = (mode & ~0x7) | (rate & 0x7); | |
225 | } | |
226 | ||
71d06186 FJ |
227 | return mode; |
228 | } | |
229 | #endif | |
230 | ||
e04d8e82 FJ |
231 | int |
232 | nouveau_mem_reset_agp(struct drm_device *dev) | |
6ee73861 | 233 | { |
e04d8e82 FJ |
234 | #if __OS_HAS_AGP |
235 | uint32_t saved_pci_nv_1, pmc_enable; | |
236 | int ret; | |
237 | ||
238 | /* First of all, disable fast writes, otherwise if it's | |
239 | * already enabled in the AGP bridge and we disable the card's | |
240 | * AGP controller we might be locking ourselves out of it. */ | |
316f60a1 FJ |
241 | if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) | |
242 | dev->agp->mode) & PCI_AGP_COMMAND_FW) { | |
e04d8e82 FJ |
243 | struct drm_agp_info info; |
244 | struct drm_agp_mode mode; | |
245 | ||
246 | ret = drm_agp_info(dev, &info); | |
247 | if (ret) | |
248 | return ret; | |
249 | ||
71d06186 | 250 | mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW; |
e04d8e82 FJ |
251 | ret = drm_agp_enable(dev, mode); |
252 | if (ret) | |
253 | return ret; | |
254 | } | |
6ee73861 BS |
255 | |
256 | saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1); | |
6ee73861 BS |
257 | |
258 | /* clear busmaster bit */ | |
259 | nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); | |
e04d8e82 FJ |
260 | /* disable AGP */ |
261 | nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0); | |
6ee73861 BS |
262 | |
263 | /* power cycle pgraph, if enabled */ | |
264 | pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE); | |
265 | if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { | |
266 | nv_wr32(dev, NV03_PMC_ENABLE, | |
267 | pmc_enable & ~NV_PMC_ENABLE_PGRAPH); | |
268 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | |
269 | NV_PMC_ENABLE_PGRAPH); | |
270 | } | |
271 | ||
272 | /* and restore (gives effect of resetting AGP) */ | |
6ee73861 | 273 | nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1); |
b694dfb2 | 274 | #endif |
6ee73861 | 275 | |
e04d8e82 FJ |
276 | return 0; |
277 | } | |
278 | ||
6ee73861 BS |
279 | int |
280 | nouveau_mem_init_agp(struct drm_device *dev) | |
281 | { | |
b694dfb2 | 282 | #if __OS_HAS_AGP |
6ee73861 BS |
283 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
284 | struct drm_agp_info info; | |
285 | struct drm_agp_mode mode; | |
286 | int ret; | |
287 | ||
6ee73861 BS |
288 | if (!dev->agp->acquired) { |
289 | ret = drm_agp_acquire(dev); | |
290 | if (ret) { | |
291 | NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret); | |
292 | return ret; | |
293 | } | |
294 | } | |
295 | ||
2b495268 FJ |
296 | nouveau_mem_reset_agp(dev); |
297 | ||
6ee73861 BS |
298 | ret = drm_agp_info(dev, &info); |
299 | if (ret) { | |
300 | NV_ERROR(dev, "Unable to get AGP info: %d\n", ret); | |
301 | return ret; | |
302 | } | |
303 | ||
304 | /* see agp.h for the AGPSTAT_* modes available */ | |
71d06186 | 305 | mode.mode = get_agp_mode(dev, info.mode); |
6ee73861 BS |
306 | ret = drm_agp_enable(dev, mode); |
307 | if (ret) { | |
308 | NV_ERROR(dev, "Unable to enable AGP: %d\n", ret); | |
309 | return ret; | |
310 | } | |
311 | ||
312 | dev_priv->gart_info.type = NOUVEAU_GART_AGP; | |
313 | dev_priv->gart_info.aper_base = info.aperture_base; | |
314 | dev_priv->gart_info.aper_size = info.aperture_size; | |
b694dfb2 | 315 | #endif |
6ee73861 BS |
316 | return 0; |
317 | } | |
318 | ||
7ad2d31c BS |
319 | static const struct vram_types { |
320 | int value; | |
321 | const char *name; | |
322 | } vram_type_map[] = { | |
323 | { NV_MEM_TYPE_STOLEN , "stolen system memory" }, | |
324 | { NV_MEM_TYPE_SGRAM , "SGRAM" }, | |
325 | { NV_MEM_TYPE_SDRAM , "SDRAM" }, | |
326 | { NV_MEM_TYPE_DDR1 , "DDR1" }, | |
327 | { NV_MEM_TYPE_DDR2 , "DDR2" }, | |
328 | { NV_MEM_TYPE_DDR3 , "DDR3" }, | |
329 | { NV_MEM_TYPE_GDDR2 , "GDDR2" }, | |
330 | { NV_MEM_TYPE_GDDR3 , "GDDR3" }, | |
331 | { NV_MEM_TYPE_GDDR4 , "GDDR4" }, | |
332 | { NV_MEM_TYPE_GDDR5 , "GDDR5" }, | |
333 | { NV_MEM_TYPE_UNKNOWN, "unknown type" } | |
334 | }; | |
335 | ||
6ee73861 | 336 | int |
fbd2895e | 337 | nouveau_mem_vram_init(struct drm_device *dev) |
6ee73861 BS |
338 | { |
339 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
340 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | |
7ad2d31c | 341 | const struct vram_types *vram_type; |
fbd2895e | 342 | int ret, dma_bits; |
6ee73861 | 343 | |
e0435120 BS |
344 | dma_bits = 32; |
345 | if (dev_priv->card_type >= NV_50) { | |
346 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) | |
347 | dma_bits = 40; | |
348 | } else | |
58b6542b | 349 | if (0 && pci_is_pcie(dev->pdev) && |
01d15332 | 350 | dev_priv->chipset > 0x40 && |
e0435120 BS |
351 | dev_priv->chipset != 0x45) { |
352 | if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) | |
353 | dma_bits = 39; | |
354 | } | |
6ee73861 BS |
355 | |
356 | ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); | |
fbd2895e | 357 | if (ret) |
6ee73861 | 358 | return ret; |
3230cfc3 KRW |
359 | ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); |
360 | if (ret) { | |
361 | /* Reset to default value. */ | |
362 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32)); | |
363 | } | |
364 | ||
fbd2895e | 365 | |
6ee73861 BS |
366 | ret = nouveau_ttm_global_init(dev_priv); |
367 | if (ret) | |
368 | return ret; | |
369 | ||
370 | ret = ttm_bo_device_init(&dev_priv->ttm.bdev, | |
371 | dev_priv->ttm.bo_global_ref.ref.object, | |
372 | &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, | |
373 | dma_bits <= 32 ? true : false); | |
374 | if (ret) { | |
375 | NV_ERROR(dev, "Error initialising bo driver: %d\n", ret); | |
376 | return ret; | |
377 | } | |
378 | ||
7ad2d31c BS |
379 | vram_type = vram_type_map; |
380 | while (vram_type->value != NV_MEM_TYPE_UNKNOWN) { | |
381 | if (nouveau_vram_type) { | |
382 | if (!strcasecmp(nouveau_vram_type, vram_type->name)) | |
383 | break; | |
384 | dev_priv->vram_type = vram_type->value; | |
385 | } else { | |
386 | if (vram_type->value == dev_priv->vram_type) | |
387 | break; | |
388 | } | |
389 | vram_type++; | |
390 | } | |
391 | ||
392 | NV_INFO(dev, "Detected %dMiB VRAM (%s)\n", | |
393 | (int)(dev_priv->vram_size >> 20), vram_type->name); | |
60d2a88a BS |
394 | if (dev_priv->vram_sys_base) { |
395 | NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", | |
396 | dev_priv->vram_sys_base); | |
397 | } | |
398 | ||
573a2a37 BS |
399 | dev_priv->fb_available_size = dev_priv->vram_size; |
400 | dev_priv->fb_mappable_pages = dev_priv->fb_available_size; | |
401 | if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) | |
402 | dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1); | |
403 | dev_priv->fb_mappable_pages >>= PAGE_SHIFT; | |
404 | ||
6ee73861 BS |
405 | dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; |
406 | dev_priv->fb_aper_free = dev_priv->fb_available_size; | |
407 | ||
408 | /* mappable vram */ | |
409 | ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, | |
410 | dev_priv->fb_available_size >> PAGE_SHIFT); | |
411 | if (ret) { | |
412 | NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret); | |
413 | return ret; | |
414 | } | |
415 | ||
d550c41e | 416 | if (dev_priv->card_type < NV_50) { |
7375c95b | 417 | ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM, |
d550c41e BS |
418 | 0, 0, &dev_priv->vga_ram); |
419 | if (ret == 0) | |
420 | ret = nouveau_bo_pin(dev_priv->vga_ram, | |
421 | TTM_PL_FLAG_VRAM); | |
422 | ||
423 | if (ret) { | |
424 | NV_WARN(dev, "failed to reserve VGA memory\n"); | |
425 | nouveau_bo_ref(NULL, &dev_priv->vga_ram); | |
426 | } | |
ac8fb975 BS |
427 | } |
428 | ||
fbd2895e BS |
429 | dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), |
430 | pci_resource_len(dev->pdev, 1), | |
431 | DRM_MTRR_WC); | |
432 | return 0; | |
433 | } | |
434 | ||
435 | int | |
436 | nouveau_mem_gart_init(struct drm_device *dev) | |
437 | { | |
438 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
439 | struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; | |
440 | int ret; | |
441 | ||
442 | dev_priv->gart_info.type = NOUVEAU_GART_NONE; | |
443 | ||
6ee73861 | 444 | #if !defined(__powerpc__) && !defined(__ia64__) |
8410ea3b | 445 | if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) { |
6ee73861 BS |
446 | ret = nouveau_mem_init_agp(dev); |
447 | if (ret) | |
448 | NV_ERROR(dev, "Error initialising AGP: %d\n", ret); | |
449 | } | |
450 | #endif | |
451 | ||
452 | if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { | |
453 | ret = nouveau_sgdma_init(dev); | |
454 | if (ret) { | |
455 | NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret); | |
456 | return ret; | |
457 | } | |
458 | } | |
459 | ||
460 | NV_INFO(dev, "%d MiB GART (aperture)\n", | |
461 | (int)(dev_priv->gart_info.aper_size >> 20)); | |
462 | dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size; | |
463 | ||
464 | ret = ttm_bo_init_mm(bdev, TTM_PL_TT, | |
465 | dev_priv->gart_info.aper_size >> PAGE_SHIFT); | |
466 | if (ret) { | |
467 | NV_ERROR(dev, "Failed TT mm init: %d\n", ret); | |
468 | return ret; | |
469 | } | |
470 | ||
6ee73861 BS |
471 | return 0; |
472 | } | |
473 | ||
9a782488 RS |
474 | /* XXX: For now a dummy. More samples required, possibly even a card |
475 | * Called from nouveau_perf.c */ | |
476 | void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | |
477 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number, | |
478 | struct nouveau_pm_memtiming *timing) { | |
479 | ||
480 | NV_DEBUG(dev,"Timing entry format unknown, please contact nouveau developers"); | |
481 | } | |
482 | ||
483 | void nv40_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | |
484 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number, | |
485 | struct nouveau_pm_memtiming *timing) { | |
486 | ||
487 | timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP); | |
488 | ||
489 | /* XXX: I don't trust the -1's and +1's... they must come | |
490 | * from somewhere! */ | |
2228c6fe | 491 | timing->reg_1 = (e->tWR + 2 + magic_number) << 24 | |
9a782488 RS |
492 | 1 << 16 | |
493 | (e->tUNK_1 + 2 + magic_number) << 8 | | |
2228c6fe | 494 | (e->tCL + 2 - magic_number); |
9a782488 RS |
495 | timing->reg_2 = (magic_number << 24 | e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10); |
496 | timing->reg_2 |= 0x20200000; | |
497 | ||
498 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", timing->id, | |
499 | timing->reg_0, timing->reg_1,timing->reg_2); | |
500 | } | |
501 | ||
502 | void nv50_mem_timing_entry(struct drm_device *dev, struct bit_entry *P, struct nouveau_pm_tbl_header *hdr, | |
503 | struct nouveau_pm_tbl_entry *e, uint8_t magic_number,struct nouveau_pm_memtiming *timing) { | |
504 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
505 | ||
506 | uint8_t unk18 = 1, | |
507 | unk19 = 1, | |
508 | unk20 = 0, | |
509 | unk21 = 0; | |
510 | ||
511 | switch (min(hdr->entry_len, (u8) 22)) { | |
512 | case 22: | |
513 | unk21 = e->tUNK_21; | |
514 | case 21: | |
515 | unk20 = e->tUNK_20; | |
516 | case 20: | |
517 | unk19 = e->tUNK_19; | |
518 | case 19: | |
519 | unk18 = e->tUNK_18; | |
520 | break; | |
521 | } | |
522 | ||
523 | timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP); | |
524 | ||
525 | /* XXX: I don't trust the -1's and +1's... they must come | |
526 | * from somewhere! */ | |
2228c6fe | 527 | timing->reg_1 = (e->tWR + unk19 + 1 + magic_number) << 24 | |
9a782488 RS |
528 | max(unk18, (u8) 1) << 16 | |
529 | (e->tUNK_1 + unk19 + 1 + magic_number) << 8; | |
530 | if (dev_priv->chipset == 0xa8) { | |
2228c6fe | 531 | timing->reg_1 |= (e->tCL - 1); |
9a782488 | 532 | } else { |
2228c6fe | 533 | timing->reg_1 |= (e->tCL + 2 - magic_number); |
9a782488 RS |
534 | } |
535 | timing->reg_2 = (e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10); | |
536 | ||
537 | timing->reg_5 = (e->tRAS << 24 | e->tRC); | |
538 | timing->reg_5 += max(e->tUNK_10, e->tUNK_11) << 16; | |
539 | ||
540 | if (P->version == 1) { | |
541 | timing->reg_2 |= magic_number << 24; | |
2228c6fe | 542 | timing->reg_3 = (0x14 + e->tCL) << 24 | |
9a782488 | 543 | 0x16 << 16 | |
2228c6fe RS |
544 | (e->tCL - 1) << 8 | |
545 | (e->tCL - 1); | |
9a782488 | 546 | timing->reg_4 = (nv_rd32(dev,0x10022c) & 0xffff0000) | e->tUNK_13 << 8 | e->tUNK_13; |
2228c6fe RS |
547 | timing->reg_5 |= (e->tCL + 2) << 8; |
548 | timing->reg_7 = 0x4000202 | (e->tCL - 1) << 16; | |
9a782488 RS |
549 | } else { |
550 | timing->reg_2 |= (unk19 - 1) << 24; | |
551 | /* XXX: reg_10022c for recentish cards pretty much unknown*/ | |
2228c6fe | 552 | timing->reg_3 = e->tCL - 1; |
9a782488 RS |
553 | timing->reg_4 = (unk20 << 24 | unk21 << 16 | |
554 | e->tUNK_13 << 8 | e->tUNK_13); | |
555 | /* XXX: +6? */ | |
556 | timing->reg_5 |= (unk19 + 6) << 8; | |
557 | ||
558 | /* XXX: reg_10023c currently unknown | |
559 | * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ | |
560 | timing->reg_7 = 0x202; | |
561 | } | |
562 | ||
563 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", timing->id, | |
564 | timing->reg_0, timing->reg_1, | |
565 | timing->reg_2, timing->reg_3); | |
566 | NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", | |
567 | timing->reg_4, timing->reg_5, | |
568 | timing->reg_6, timing->reg_7); | |
569 | NV_DEBUG(dev, " 240: %08x\n", timing->reg_8); | |
570 | } | |
571 | ||
572 | void nvc0_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr, | |
573 | struct nouveau_pm_tbl_entry *e, struct nouveau_pm_memtiming *timing) { | |
574 | timing->reg_0 = (e->tRC << 24 | (e->tRFC & 0x7f) << 17 | e->tRAS << 8 | e->tRP); | |
2228c6fe RS |
575 | timing->reg_1 = (nv_rd32(dev,0x10f294) & 0xff000000) | (e->tUNK_11&0x0f) << 20 | (e->tUNK_19 << 7) | (e->tCL & 0x0f); |
576 | timing->reg_2 = (nv_rd32(dev,0x10f298) & 0xff0000ff) | e->tWR << 16 | e->tUNK_1 << 8; | |
9a782488 RS |
577 | timing->reg_3 = e->tUNK_20 << 9 | e->tUNK_13; |
578 | timing->reg_4 = (nv_rd32(dev,0x10f2a0) & 0xfff000ff) | e->tUNK_12 << 15; | |
579 | NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", timing->id, | |
580 | timing->reg_0, timing->reg_1, | |
581 | timing->reg_2, timing->reg_3); | |
582 | NV_DEBUG(dev, " 2a0: %08x %08x %08x %08x\n", | |
583 | timing->reg_4, timing->reg_5, | |
584 | timing->reg_6, timing->reg_7); | |
585 | } | |
586 | ||
587 | /** | |
588 | * Processes the Memory Timing BIOS table, stores generated | |
589 | * register values | |
590 | * @pre init scripts were run, memtiming regs are initialized | |
591 | */ | |
7760fcb0 RS |
592 | void |
593 | nouveau_mem_timing_init(struct drm_device *dev) | |
594 | { | |
595 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
596 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | |
597 | struct nouveau_pm_memtimings *memtimings = &pm->memtimings; | |
598 | struct nvbios *bios = &dev_priv->vbios; | |
599 | struct bit_entry P; | |
9a782488 RS |
600 | struct nouveau_pm_tbl_header *hdr = NULL; |
601 | uint8_t magic_number; | |
602 | u8 *entry; | |
603 | int i; | |
7760fcb0 RS |
604 | |
605 | if (bios->type == NVBIOS_BIT) { | |
606 | if (bit_table(dev, 'P', &P)) | |
607 | return; | |
608 | ||
609 | if (P.version == 1) | |
f9f9f536 | 610 | hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[4]); |
7760fcb0 RS |
611 | else |
612 | if (P.version == 2) | |
f9f9f536 | 613 | hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[8]); |
7760fcb0 RS |
614 | else { |
615 | NV_WARN(dev, "unknown mem for BIT P %d\n", P.version); | |
616 | } | |
617 | } else { | |
618 | NV_DEBUG(dev, "BMP version too old for memory\n"); | |
619 | return; | |
620 | } | |
621 | ||
9a782488 | 622 | if (!hdr) { |
7760fcb0 RS |
623 | NV_DEBUG(dev, "memory timing table pointer invalid\n"); |
624 | return; | |
625 | } | |
626 | ||
9a782488 RS |
627 | if (hdr->version != 0x10) { |
628 | NV_WARN(dev, "memory timing table 0x%02x unknown\n", hdr->version); | |
7760fcb0 RS |
629 | return; |
630 | } | |
631 | ||
632 | /* validate record length */ | |
9a782488 RS |
633 | if (hdr->entry_len < 15) { |
634 | NV_ERROR(dev, "mem timing table length unknown: %d\n", hdr->entry_len); | |
7760fcb0 RS |
635 | return; |
636 | } | |
637 | ||
638 | /* parse vbios entries into common format */ | |
639 | memtimings->timing = | |
9a782488 | 640 | kcalloc(hdr->entry_cnt, sizeof(*memtimings->timing), GFP_KERNEL); |
7760fcb0 RS |
641 | if (!memtimings->timing) |
642 | return; | |
643 | ||
50066f81 | 644 | /* Get "some number" from the timing reg for NV_40 and NV_50 |
9a782488 RS |
645 | * Used in calculations later... source unknown */ |
646 | magic_number = 0; | |
647 | if (P.version == 1) { | |
0b89a072 | 648 | magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24; |
ac5c15fa RS |
649 | } |
650 | ||
9a782488 RS |
651 | entry = (u8*) hdr + hdr->header_len; |
652 | for (i = 0; i < hdr->entry_cnt; i++, entry += hdr->entry_len) { | |
7760fcb0 RS |
653 | struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; |
654 | if (entry[0] == 0) | |
655 | continue; | |
656 | ||
e614b2e7 | 657 | timing->id = i; |
2228c6fe RS |
658 | timing->WR = entry[0]; |
659 | timing->CL = entry[2]; | |
7760fcb0 | 660 | |
9a782488 RS |
661 | if(dev_priv->card_type <= NV_40) { |
662 | nv40_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]); | |
663 | } else if(dev_priv->card_type == NV_50){ | |
664 | nv50_mem_timing_entry(dev,&P,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]); | |
665 | } else if(dev_priv->card_type == NV_C0) { | |
666 | nvc0_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,&pm->memtimings.timing[i]); | |
667 | } | |
7760fcb0 RS |
668 | } |
669 | ||
9a782488 RS |
670 | memtimings->nr_timing = hdr->entry_cnt; |
671 | memtimings->supported = P.version == 1; | |
7760fcb0 RS |
672 | } |
673 | ||
674 | void | |
675 | nouveau_mem_timing_fini(struct drm_device *dev) | |
676 | { | |
677 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
678 | struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings; | |
679 | ||
9a782488 RS |
680 | if(mem->timing) { |
681 | kfree(mem->timing); | |
682 | mem->timing = NULL; | |
683 | } | |
7760fcb0 | 684 | } |
573a2a37 | 685 | |
c70c41e8 BS |
686 | int |
687 | nouveau_mem_vbios_type(struct drm_device *dev) | |
688 | { | |
689 | struct bit_entry M; | |
690 | u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2; | |
691 | if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) { | |
692 | u8 *table = ROMPTR(dev, M.data[3]); | |
693 | if (table && table[0] == 0x10 && ramcfg < table[3]) { | |
694 | u8 *entry = table + table[1] + (ramcfg * table[2]); | |
695 | switch (entry[0] & 0x0f) { | |
696 | case 0: return NV_MEM_TYPE_DDR2; | |
697 | case 1: return NV_MEM_TYPE_DDR3; | |
698 | case 2: return NV_MEM_TYPE_GDDR3; | |
699 | case 3: return NV_MEM_TYPE_GDDR5; | |
700 | default: | |
701 | break; | |
702 | } | |
703 | ||
704 | } | |
705 | } | |
706 | return NV_MEM_TYPE_UNKNOWN; | |
707 | } | |
708 | ||
573a2a37 | 709 | static int |
24f246ac | 710 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
573a2a37 | 711 | { |
24f246ac | 712 | /* nothing to do */ |
573a2a37 BS |
713 | return 0; |
714 | } | |
715 | ||
716 | static int | |
717 | nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) | |
718 | { | |
24f246ac | 719 | /* nothing to do */ |
573a2a37 BS |
720 | return 0; |
721 | } | |
722 | ||
d2f96666 BS |
723 | static inline void |
724 | nouveau_mem_node_cleanup(struct nouveau_mem *node) | |
725 | { | |
726 | if (node->vma[0].node) { | |
727 | nouveau_vm_unmap(&node->vma[0]); | |
728 | nouveau_vm_put(&node->vma[0]); | |
729 | } | |
730 | ||
731 | if (node->vma[1].node) { | |
732 | nouveau_vm_unmap(&node->vma[1]); | |
733 | nouveau_vm_put(&node->vma[1]); | |
734 | } | |
735 | } | |
736 | ||
573a2a37 BS |
737 | static void |
738 | nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | |
739 | struct ttm_mem_reg *mem) | |
740 | { | |
741 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | |
60d2a88a | 742 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; |
573a2a37 BS |
743 | struct drm_device *dev = dev_priv->dev; |
744 | ||
d2f96666 | 745 | nouveau_mem_node_cleanup(mem->mm_node); |
d5f42394 | 746 | vram->put(dev, (struct nouveau_mem **)&mem->mm_node); |
573a2a37 BS |
747 | } |
748 | ||
749 | static int | |
750 | nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |
751 | struct ttm_buffer_object *bo, | |
752 | struct ttm_placement *placement, | |
753 | struct ttm_mem_reg *mem) | |
754 | { | |
755 | struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); | |
60d2a88a | 756 | struct nouveau_vram_engine *vram = &dev_priv->engine.vram; |
573a2a37 BS |
757 | struct drm_device *dev = dev_priv->dev; |
758 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
d5f42394 | 759 | struct nouveau_mem *node; |
5f6fdca5 | 760 | u32 size_nc = 0; |
573a2a37 BS |
761 | int ret; |
762 | ||
5f6fdca5 | 763 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) |
f91bac5b | 764 | size_nc = 1 << nvbo->page_shift; |
5f6fdca5 | 765 | |
60d2a88a BS |
766 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, |
767 | mem->page_alignment << PAGE_SHIFT, size_nc, | |
8f7286f8 | 768 | (nvbo->tile_flags >> 8) & 0x3ff, &node); |
ef1b2871 BS |
769 | if (ret) { |
770 | mem->mm_node = NULL; | |
771 | return (ret == -ENOSPC) ? 0 : ret; | |
772 | } | |
573a2a37 | 773 | |
f91bac5b | 774 | node->page_shift = nvbo->page_shift; |
4c74eb7f | 775 | |
60d2a88a BS |
776 | mem->mm_node = node; |
777 | mem->start = node->offset >> PAGE_SHIFT; | |
573a2a37 BS |
778 | return 0; |
779 | } | |
780 | ||
781 | void | |
782 | nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) | |
783 | { | |
573a2a37 BS |
784 | struct nouveau_mm *mm = man->priv; |
785 | struct nouveau_mm_node *r; | |
8b464bfe | 786 | u32 total = 0, free = 0; |
573a2a37 BS |
787 | |
788 | mutex_lock(&mm->mutex); | |
789 | list_for_each_entry(r, &mm->nodes, nl_entry) { | |
8b464bfe BS |
790 | printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", |
791 | prefix, r->type, ((u64)r->offset << 12), | |
573a2a37 | 792 | (((u64)r->offset + r->length) << 12)); |
8b464bfe | 793 | |
573a2a37 | 794 | total += r->length; |
8b464bfe BS |
795 | if (!r->type) |
796 | free += r->length; | |
573a2a37 BS |
797 | } |
798 | mutex_unlock(&mm->mutex); | |
799 | ||
8b464bfe BS |
800 | printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", |
801 | prefix, (u64)total << 12, (u64)free << 12); | |
802 | printk(KERN_DEBUG "%s block: 0x%08x\n", | |
803 | prefix, mm->block_size << 12); | |
573a2a37 BS |
804 | } |
805 | ||
806 | const struct ttm_mem_type_manager_func nouveau_vram_manager = { | |
807 | nouveau_vram_manager_init, | |
808 | nouveau_vram_manager_fini, | |
809 | nouveau_vram_manager_new, | |
810 | nouveau_vram_manager_del, | |
811 | nouveau_vram_manager_debug | |
812 | }; | |
26c0c9e3 BS |
813 | |
814 | static int | |
815 | nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | |
816 | { | |
817 | return 0; | |
818 | } | |
819 | ||
820 | static int | |
821 | nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) | |
822 | { | |
823 | return 0; | |
824 | } | |
825 | ||
826 | static void | |
827 | nouveau_gart_manager_del(struct ttm_mem_type_manager *man, | |
828 | struct ttm_mem_reg *mem) | |
829 | { | |
d2f96666 | 830 | nouveau_mem_node_cleanup(mem->mm_node); |
d2f96666 | 831 | kfree(mem->mm_node); |
0de53a54 | 832 | mem->mm_node = NULL; |
26c0c9e3 BS |
833 | } |
834 | ||
835 | static int | |
836 | nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |
837 | struct ttm_buffer_object *bo, | |
838 | struct ttm_placement *placement, | |
839 | struct ttm_mem_reg *mem) | |
840 | { | |
841 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
26c0c9e3 | 842 | struct nouveau_mem *node; |
26c0c9e3 BS |
843 | |
844 | if (unlikely((mem->num_pages << PAGE_SHIFT) >= | |
845 | dev_priv->gart_info.aper_size)) | |
846 | return -ENOMEM; | |
847 | ||
848 | node = kzalloc(sizeof(*node), GFP_KERNEL); | |
849 | if (!node) | |
850 | return -ENOMEM; | |
d2f96666 | 851 | node->page_shift = 12; |
26c0c9e3 | 852 | |
26c0c9e3 BS |
853 | mem->mm_node = node; |
854 | mem->start = 0; | |
855 | return 0; | |
856 | } | |
857 | ||
858 | void | |
859 | nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) | |
860 | { | |
861 | } | |
862 | ||
863 | const struct ttm_mem_type_manager_func nouveau_gart_manager = { | |
864 | nouveau_gart_manager_init, | |
865 | nouveau_gart_manager_fini, | |
866 | nouveau_gart_manager_new, | |
867 | nouveau_gart_manager_del, | |
868 | nouveau_gart_manager_debug | |
869 | }; |