Commit | Line | Data |
---|---|---|
8c8f1c95 AC |
1 | /* |
2 | * Copyright (c) 2007, Intel Corporation. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> | |
19 | * Alan Cox <alan@linux.intel.com> | |
20 | */ | |
21 | ||
22 | #include <drm/drmP.h> | |
23 | #include "psb_drv.h" | |
24 | ||
25 | ||
26 | /* | |
27 | * GTT resource allocator - manage page mappings in GTT space | |
28 | */ | |
29 | ||
30 | /** | |
31 | * psb_gtt_mask_pte - generate GTT pte entry | |
32 | * @pfn: page number to encode | |
33 | * @type: type of memory in the GTT | |
34 | * | |
35 | * Set the GTT entry for the appropriate memory type. | |
36 | */ | |
37 | static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type) | |
38 | { | |
39 | uint32_t mask = PSB_PTE_VALID; | |
40 | ||
41 | if (type & PSB_MMU_CACHED_MEMORY) | |
42 | mask |= PSB_PTE_CACHED; | |
43 | if (type & PSB_MMU_RO_MEMORY) | |
44 | mask |= PSB_PTE_RO; | |
45 | if (type & PSB_MMU_WO_MEMORY) | |
46 | mask |= PSB_PTE_WO; | |
47 | ||
48 | return (pfn << PAGE_SHIFT) | mask; | |
49 | } | |
50 | ||
51 | /** | |
52 | * psb_gtt_entry - find the GTT entries for a gtt_range | |
53 | * @dev: our DRM device | |
54 | * @r: our GTT range | |
55 | * | |
56 | * Given a gtt_range object return the GTT offset of the page table | |
57 | * entries for this gtt_range | |
58 | */ | |
59 | u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) | |
60 | { | |
61 | struct drm_psb_private *dev_priv = dev->dev_private; | |
62 | unsigned long offset; | |
63 | ||
64 | offset = r->resource.start - dev_priv->gtt_mem->start; | |
65 | ||
66 | return dev_priv->gtt_map + (offset >> PAGE_SHIFT); | |
67 | } | |
68 | ||
69 | /** | |
70 | * psb_gtt_insert - put an object into the GTT | |
71 | * @dev: our DRM device | |
72 | * @r: our GTT range | |
73 | * | |
74 | * Take our preallocated GTT range and insert the GEM object into | |
a746092b AC |
75 | * the GTT. This is protected via the gtt mutex which the caller |
76 | * must hold. | |
8c8f1c95 AC |
77 | */ |
78 | static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r) | |
79 | { | |
80 | u32 *gtt_slot, pte; | |
81 | struct page **pages; | |
82 | int i; | |
83 | ||
84 | if (r->pages == NULL) { | |
85 | WARN_ON(1); | |
86 | return -EINVAL; | |
87 | } | |
88 | ||
89 | WARN_ON(r->stolen); /* refcount these maybe ? */ | |
90 | ||
91 | gtt_slot = psb_gtt_entry(dev, r); | |
92 | pages = r->pages; | |
93 | ||
94 | /* Make sure changes are visible to the GPU */ | |
95 | set_pages_array_uc(pages, r->npage); | |
96 | ||
97 | /* Write our page entries into the GTT itself */ | |
a6ba582d AC |
98 | for (i = r->roll; i < r->npage; i++) { |
99 | pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); | |
100 | iowrite32(pte, gtt_slot++); | |
101 | } | |
102 | for (i = 0; i < r->roll; i++) { | |
103 | pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); | |
8c8f1c95 AC |
104 | iowrite32(pte, gtt_slot++); |
105 | } | |
106 | /* Make sure all the entries are set before we return */ | |
107 | ioread32(gtt_slot - 1); | |
a6ba582d | 108 | |
8c8f1c95 AC |
109 | return 0; |
110 | } | |
111 | ||
112 | /** | |
113 | * psb_gtt_remove - remove an object from the GTT | |
114 | * @dev: our DRM device | |
115 | * @r: our GTT range | |
116 | * | |
117 | * Remove a preallocated GTT range from the GTT. Overwrite all the | |
a746092b AC |
118 | * page table entries with the dummy page. This is protected via the gtt |
119 | * mutex which the caller must hold. | |
8c8f1c95 | 120 | */ |
8c8f1c95 AC |
121 | static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) |
122 | { | |
123 | struct drm_psb_private *dev_priv = dev->dev_private; | |
124 | u32 *gtt_slot, pte; | |
125 | int i; | |
126 | ||
127 | WARN_ON(r->stolen); | |
128 | ||
129 | gtt_slot = psb_gtt_entry(dev, r); | |
130 | pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0); | |
131 | ||
132 | for (i = 0; i < r->npage; i++) | |
133 | iowrite32(pte, gtt_slot++); | |
134 | ioread32(gtt_slot - 1); | |
135 | set_pages_array_wb(r->pages, r->npage); | |
136 | } | |
137 | ||
a6ba582d AC |
138 | /** |
139 | * psb_gtt_roll - set scrolling position | |
140 | * @dev: our DRM device | |
141 | * @r: the gtt mapping we are using | |
142 | * @roll: roll offset | |
143 | * | |
144 | * Roll an existing pinned mapping by moving the pages through the GTT. | |
145 | * This allows us to implement hardware scrolling on the consoles without | |
146 | * a 2D engine | |
147 | */ | |
148 | void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll) | |
149 | { | |
150 | u32 *gtt_slot, pte; | |
151 | int i; | |
152 | ||
153 | if (roll >= r->npage) { | |
154 | WARN_ON(1); | |
155 | return; | |
156 | } | |
157 | ||
158 | r->roll = roll; | |
159 | ||
160 | /* Not currently in the GTT - no worry we will write the mapping at | |
161 | the right position when it gets pinned */ | |
162 | if (!r->stolen && !r->in_gart) | |
163 | return; | |
164 | ||
165 | gtt_slot = psb_gtt_entry(dev, r); | |
166 | ||
167 | for (i = r->roll; i < r->npage; i++) { | |
168 | pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); | |
169 | iowrite32(pte, gtt_slot++); | |
170 | } | |
171 | for (i = 0; i < r->roll; i++) { | |
172 | pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); | |
173 | iowrite32(pte, gtt_slot++); | |
174 | } | |
175 | ioread32(gtt_slot - 1); | |
176 | } | |
177 | ||
8c8f1c95 AC |
178 | /** |
179 | * psb_gtt_attach_pages - attach and pin GEM pages | |
180 | * @gt: the gtt range | |
181 | * | |
182 | * Pin and build an in kernel list of the pages that back our GEM object. | |
a746092b AC |
183 | * While we hold this the pages cannot be swapped out. This is protected |
184 | * via the gtt mutex which the caller must hold. | |
8c8f1c95 AC |
185 | */ |
186 | static int psb_gtt_attach_pages(struct gtt_range *gt) | |
187 | { | |
188 | struct inode *inode; | |
189 | struct address_space *mapping; | |
190 | int i; | |
191 | struct page *p; | |
192 | int pages = gt->gem.size / PAGE_SIZE; | |
193 | ||
194 | WARN_ON(gt->pages); | |
195 | ||
196 | /* This is the shared memory object that backs the GEM resource */ | |
197 | inode = gt->gem.filp->f_path.dentry->d_inode; | |
198 | mapping = inode->i_mapping; | |
199 | ||
200 | gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL); | |
201 | if (gt->pages == NULL) | |
202 | return -ENOMEM; | |
203 | gt->npage = pages; | |
204 | ||
205 | for (i = 0; i < pages; i++) { | |
a746092b | 206 | /* FIXME: needs updating as per mail from Hugh Dickins */ |
8c8f1c95 AC |
207 | p = read_cache_page_gfp(mapping, i, |
208 | __GFP_COLD | GFP_KERNEL); | |
209 | if (IS_ERR(p)) | |
210 | goto err; | |
211 | gt->pages[i] = p; | |
212 | } | |
213 | return 0; | |
214 | ||
215 | err: | |
216 | while (i--) | |
217 | page_cache_release(gt->pages[i]); | |
218 | kfree(gt->pages); | |
219 | gt->pages = NULL; | |
220 | return PTR_ERR(p); | |
221 | } | |
222 | ||
223 | /** | |
224 | * psb_gtt_detach_pages - attach and pin GEM pages | |
225 | * @gt: the gtt range | |
226 | * | |
227 | * Undo the effect of psb_gtt_attach_pages. At this point the pages | |
228 | * must have been removed from the GTT as they could now be paged out | |
a746092b AC |
229 | * and move bus address. This is protected via the gtt mutex which the |
230 | * caller must hold. | |
8c8f1c95 AC |
231 | */ |
232 | static void psb_gtt_detach_pages(struct gtt_range *gt) | |
233 | { | |
234 | int i; | |
235 | for (i = 0; i < gt->npage; i++) { | |
236 | /* FIXME: do we need to force dirty */ | |
237 | set_page_dirty(gt->pages[i]); | |
238 | page_cache_release(gt->pages[i]); | |
239 | } | |
240 | kfree(gt->pages); | |
241 | gt->pages = NULL; | |
242 | } | |
243 | ||
244 | /** | |
245 | * psb_gtt_pin - pin pages into the GTT | |
246 | * @gt: range to pin | |
247 | * | |
248 | * Pin a set of pages into the GTT. The pins are refcounted so that | |
249 | * multiple pins need multiple unpins to undo. | |
250 | * | |
251 | * Non GEM backed objects treat this as a no-op as they are always GTT | |
252 | * backed objects. | |
253 | */ | |
254 | int psb_gtt_pin(struct gtt_range *gt) | |
255 | { | |
256 | int ret = 0; | |
257 | struct drm_device *dev = gt->gem.dev; | |
258 | struct drm_psb_private *dev_priv = dev->dev_private; | |
259 | ||
260 | mutex_lock(&dev_priv->gtt_mutex); | |
261 | ||
262 | if (gt->in_gart == 0 && gt->stolen == 0) { | |
263 | ret = psb_gtt_attach_pages(gt); | |
264 | if (ret < 0) | |
265 | goto out; | |
266 | ret = psb_gtt_insert(dev, gt); | |
267 | if (ret < 0) { | |
268 | psb_gtt_detach_pages(gt); | |
269 | goto out; | |
270 | } | |
271 | } | |
272 | gt->in_gart++; | |
273 | out: | |
274 | mutex_unlock(&dev_priv->gtt_mutex); | |
275 | return ret; | |
276 | } | |
277 | ||
278 | /** | |
279 | * psb_gtt_unpin - Drop a GTT pin requirement | |
280 | * @gt: range to pin | |
281 | * | |
282 | * Undoes the effect of psb_gtt_pin. On the last drop the GEM object | |
283 | * will be removed from the GTT which will also drop the page references | |
284 | * and allow the VM to clean up or page stuff. | |
285 | * | |
286 | * Non GEM backed objects treat this as a no-op as they are always GTT | |
287 | * backed objects. | |
288 | */ | |
289 | void psb_gtt_unpin(struct gtt_range *gt) | |
290 | { | |
291 | struct drm_device *dev = gt->gem.dev; | |
292 | struct drm_psb_private *dev_priv = dev->dev_private; | |
293 | ||
294 | mutex_lock(&dev_priv->gtt_mutex); | |
295 | ||
296 | WARN_ON(!gt->in_gart); | |
297 | ||
298 | gt->in_gart--; | |
299 | if (gt->in_gart == 0 && gt->stolen == 0) { | |
300 | psb_gtt_remove(dev, gt); | |
301 | psb_gtt_detach_pages(gt); | |
302 | } | |
303 | mutex_unlock(&dev_priv->gtt_mutex); | |
304 | } | |
305 | ||
306 | /* | |
307 | * GTT resource allocator - allocate and manage GTT address space | |
308 | */ | |
309 | ||
310 | /** | |
311 | * psb_gtt_alloc_range - allocate GTT address space | |
312 | * @dev: Our DRM device | |
313 | * @len: length (bytes) of address space required | |
314 | * @name: resource name | |
315 | * @backed: resource should be backed by stolen pages | |
316 | * | |
317 | * Ask the kernel core to find us a suitable range of addresses | |
318 | * to use for a GTT mapping. | |
319 | * | |
320 | * Returns a gtt_range structure describing the object, or NULL on | |
321 | * error. On successful return the resource is both allocated and marked | |
322 | * as in use. | |
323 | */ | |
324 | struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, | |
325 | const char *name, int backed) | |
326 | { | |
327 | struct drm_psb_private *dev_priv = dev->dev_private; | |
328 | struct gtt_range *gt; | |
329 | struct resource *r = dev_priv->gtt_mem; | |
330 | int ret; | |
331 | unsigned long start, end; | |
332 | ||
333 | if (backed) { | |
334 | /* The start of the GTT is the stolen pages */ | |
335 | start = r->start; | |
336 | end = r->start + dev_priv->gtt.stolen_size - 1; | |
337 | } else { | |
338 | /* The rest we will use for GEM backed objects */ | |
339 | start = r->start + dev_priv->gtt.stolen_size; | |
340 | end = r->end; | |
341 | } | |
342 | ||
343 | gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL); | |
344 | if (gt == NULL) | |
345 | return NULL; | |
346 | gt->resource.name = name; | |
347 | gt->stolen = backed; | |
348 | gt->in_gart = backed; | |
a6ba582d | 349 | gt->roll = 0; |
8c8f1c95 AC |
350 | /* Ensure this is set for non GEM objects */ |
351 | gt->gem.dev = dev; | |
352 | ret = allocate_resource(dev_priv->gtt_mem, >->resource, | |
353 | len, start, end, PAGE_SIZE, NULL, NULL); | |
354 | if (ret == 0) { | |
355 | gt->offset = gt->resource.start - r->start; | |
356 | return gt; | |
357 | } | |
358 | kfree(gt); | |
359 | return NULL; | |
360 | } | |
361 | ||
362 | /** | |
363 | * psb_gtt_free_range - release GTT address space | |
364 | * @dev: our DRM device | |
365 | * @gt: a mapping created with psb_gtt_alloc_range | |
366 | * | |
367 | * Release a resource that was allocated with psb_gtt_alloc_range. If the | |
368 | * object has been pinned by mmap users we clean this up here currently. | |
369 | */ | |
370 | void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt) | |
371 | { | |
372 | /* Undo the mmap pin if we are destroying the object */ | |
373 | if (gt->mmapping) { | |
374 | psb_gtt_unpin(gt); | |
375 | gt->mmapping = 0; | |
376 | } | |
377 | WARN_ON(gt->in_gart && !gt->stolen); | |
378 | release_resource(>->resource); | |
379 | kfree(gt); | |
380 | } | |
381 | ||
382 | void psb_gtt_alloc(struct drm_device *dev) | |
383 | { | |
384 | struct drm_psb_private *dev_priv = dev->dev_private; | |
385 | init_rwsem(&dev_priv->gtt.sem); | |
386 | } | |
387 | ||
388 | void psb_gtt_takedown(struct drm_device *dev) | |
389 | { | |
390 | struct drm_psb_private *dev_priv = dev->dev_private; | |
391 | ||
392 | if (dev_priv->gtt_map) { | |
393 | iounmap(dev_priv->gtt_map); | |
394 | dev_priv->gtt_map = NULL; | |
395 | } | |
396 | if (dev_priv->gtt_initialized) { | |
397 | pci_write_config_word(dev->pdev, PSB_GMCH_CTRL, | |
398 | dev_priv->gmch_ctrl); | |
399 | PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL); | |
400 | (void) PSB_RVDC32(PSB_PGETBL_CTL); | |
401 | } | |
402 | if (dev_priv->vram_addr) | |
403 | iounmap(dev_priv->gtt_map); | |
404 | } | |
405 | ||
406 | int psb_gtt_init(struct drm_device *dev, int resume) | |
407 | { | |
408 | struct drm_psb_private *dev_priv = dev->dev_private; | |
409 | unsigned gtt_pages; | |
410 | unsigned long stolen_size, vram_stolen_size; | |
411 | unsigned i, num_pages; | |
412 | unsigned pfn_base; | |
413 | uint32_t vram_pages; | |
414 | uint32_t dvmt_mode = 0; | |
415 | struct psb_gtt *pg; | |
416 | ||
417 | int ret = 0; | |
418 | uint32_t pte; | |
419 | ||
420 | mutex_init(&dev_priv->gtt_mutex); | |
421 | ||
422 | psb_gtt_alloc(dev); | |
423 | pg = &dev_priv->gtt; | |
424 | ||
425 | /* Enable the GTT */ | |
426 | pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl); | |
427 | pci_write_config_word(dev->pdev, PSB_GMCH_CTRL, | |
428 | dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED); | |
429 | ||
430 | dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL); | |
431 | PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); | |
432 | (void) PSB_RVDC32(PSB_PGETBL_CTL); | |
433 | ||
434 | /* The root resource we allocate address space from */ | |
435 | dev_priv->gtt_initialized = 1; | |
436 | ||
437 | pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK; | |
438 | ||
439 | /* | |
a746092b AC |
440 | * The video mmu has a hw bug when accessing 0x0D0000000. |
441 | * Make gatt start at 0x0e000,0000. This doesn't actually | |
442 | * matter for us but may do if the video acceleration ever | |
443 | * gets opened up. | |
8c8f1c95 AC |
444 | */ |
445 | pg->mmu_gatt_start = 0xE0000000; | |
446 | ||
447 | pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE); | |
448 | gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) | |
449 | >> PAGE_SHIFT; | |
a746092b AC |
450 | /* Some CDV firmware doesn't report this currently. In which case the |
451 | system has 64 gtt pages */ | |
8c8f1c95 AC |
452 | if (pg->gtt_start == 0 || gtt_pages == 0) { |
453 | dev_err(dev->dev, "GTT PCI BAR not initialized.\n"); | |
454 | gtt_pages = 64; | |
455 | pg->gtt_start = dev_priv->pge_ctl; | |
456 | } | |
457 | ||
458 | pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE); | |
459 | pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE) | |
460 | >> PAGE_SHIFT; | |
461 | dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE]; | |
462 | ||
463 | if (pg->gatt_pages == 0 || pg->gatt_start == 0) { | |
464 | static struct resource fudge; /* Preferably peppermint */ | |
8c8f1c95 AC |
465 | /* This can occur on CDV SDV systems. Fudge it in this case. |
466 | We really don't care what imaginary space is being allocated | |
467 | at this point */ | |
468 | dev_err(dev->dev, "GATT PCI BAR not initialized.\n"); | |
469 | pg->gatt_start = 0x40000000; | |
470 | pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT; | |
a746092b AC |
471 | /* This is a little confusing but in fact the GTT is providing |
472 | a view from the GPU into memory and not vice versa. As such | |
473 | this is really allocating space that is not the same as the | |
474 | CPU address space on CDV */ | |
8c8f1c95 AC |
475 | fudge.start = 0x40000000; |
476 | fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1; | |
477 | fudge.name = "fudge"; | |
478 | fudge.flags = IORESOURCE_MEM; | |
479 | dev_priv->gtt_mem = &fudge; | |
480 | } | |
481 | ||
482 | pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base); | |
483 | vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base | |
484 | - PAGE_SIZE; | |
485 | ||
486 | stolen_size = vram_stolen_size; | |
487 | ||
488 | printk(KERN_INFO "Stolen memory information\n"); | |
489 | printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base); | |
490 | printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n", | |
491 | vram_stolen_size/1024); | |
492 | dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7; | |
493 | printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n", | |
494 | (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode); | |
495 | ||
496 | if (resume && (gtt_pages != pg->gtt_pages) && | |
497 | (stolen_size != pg->stolen_size)) { | |
498 | dev_err(dev->dev, "GTT resume error.\n"); | |
499 | ret = -EINVAL; | |
500 | goto out_err; | |
501 | } | |
502 | ||
503 | pg->gtt_pages = gtt_pages; | |
504 | pg->stolen_size = stolen_size; | |
505 | dev_priv->vram_stolen_size = vram_stolen_size; | |
506 | ||
507 | /* | |
508 | * Map the GTT and the stolen memory area | |
509 | */ | |
510 | dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start, | |
511 | gtt_pages << PAGE_SHIFT); | |
512 | if (!dev_priv->gtt_map) { | |
513 | dev_err(dev->dev, "Failure to map gtt.\n"); | |
514 | ret = -ENOMEM; | |
515 | goto out_err; | |
516 | } | |
517 | ||
518 | dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size); | |
519 | if (!dev_priv->vram_addr) { | |
520 | dev_err(dev->dev, "Failure to map stolen base.\n"); | |
521 | ret = -ENOMEM; | |
522 | goto out_err; | |
523 | } | |
524 | ||
525 | /* | |
526 | * Insert vram stolen pages into the GTT | |
527 | */ | |
528 | ||
529 | pfn_base = dev_priv->stolen_base >> PAGE_SHIFT; | |
530 | vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT; | |
531 | printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", | |
532 | num_pages, pfn_base << PAGE_SHIFT, 0); | |
533 | for (i = 0; i < num_pages; ++i) { | |
534 | pte = psb_gtt_mask_pte(pfn_base + i, 0); | |
535 | iowrite32(pte, dev_priv->gtt_map + i); | |
536 | } | |
537 | ||
538 | /* | |
539 | * Init rest of GTT to the scratch page to avoid accidents or scribbles | |
540 | */ | |
541 | ||
542 | pfn_base = page_to_pfn(dev_priv->scratch_page); | |
543 | pte = psb_gtt_mask_pte(pfn_base, 0); | |
544 | for (; i < gtt_pages; ++i) | |
545 | iowrite32(pte, dev_priv->gtt_map + i); | |
546 | ||
547 | (void) ioread32(dev_priv->gtt_map + i - 1); | |
548 | return 0; | |
549 | ||
550 | out_err: | |
551 | psb_gtt_takedown(dev); | |
552 | return ret; | |
553 | } |