drm/i915: Re-enable GGTT earlier during resume on pre-gen6 platforms
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_gtt.h
CommitLineData
0260c420
BW
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Please try to maintain the following order within this file unless it makes
24 * sense to do otherwise. From top to bottom:
25 * 1. typedefs
26 * 2. #defines, and macros
27 * 3. structure definitions
28 * 4. function prototypes
29 *
30 * Within each section, please try to order by generation in ascending order,
31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
32 */
33
34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__
36
8ef8561f
CW
37#include <linux/io-mapping.h>
38
4d884705
DV
39struct drm_i915_file_private;
40
07749ef3
MT
41typedef uint32_t gen6_pte_t;
42typedef uint64_t gen8_pte_t;
43typedef uint64_t gen8_pde_t;
762d9936
MT
44typedef uint64_t gen8_ppgtt_pdpe_t;
45typedef uint64_t gen8_ppgtt_pml4e_t;
0260c420 46
72e96d64 47#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
0260c420 48
0260c420
BW
49/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
50#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
51#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
52#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
53#define GEN6_PTE_CACHE_LLC (2 << 1)
54#define GEN6_PTE_UNCACHED (1 << 1)
55#define GEN6_PTE_VALID (1 << 0)
56
07749ef3
MT
57#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
58#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
59#define I915_PDES 512
60#define I915_PDE_MASK (I915_PDES - 1)
678d96fb 61#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
07749ef3
MT
62
63#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
64#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
0260c420 65#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
678d96fb 66#define GEN6_PDE_SHIFT 22
0260c420
BW
67#define GEN6_PDE_VALID (1 << 0)
68
69#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
70
71#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
72#define BYT_PTE_WRITEABLE (1 << 1)
73
74/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
75 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
76 */
77#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
78 (((bits) & 0x8) << (11 - 3)))
79#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
80#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
81#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
82#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
83#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
84#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
85#define HSW_PTE_UNCACHED (0)
86#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
87#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
88
89/* GEN8 legacy style address is defined as a 3 level page table:
90 * 31:30 | 29:21 | 20:12 | 11:0
91 * PDPE | PDE | PTE | offset
92 * The difference as compared to normal x86 3 level page table is the PDPEs are
93 * programmed via register.
81ba8aef
MT
94 *
95 * GEN8 48b legacy style address is defined as a 4 level page table:
96 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
97 * PML4E | PDPE | PDE | PTE | offset
0260c420 98 */
81ba8aef
MT
99#define GEN8_PML4ES_PER_PML4 512
100#define GEN8_PML4E_SHIFT 39
762d9936 101#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
0260c420 102#define GEN8_PDPE_SHIFT 30
81ba8aef
MT
103/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
104 * tables */
105#define GEN8_PDPE_MASK 0x1ff
0260c420
BW
106#define GEN8_PDE_SHIFT 21
107#define GEN8_PDE_MASK 0x1ff
108#define GEN8_PTE_SHIFT 12
109#define GEN8_PTE_MASK 0x1ff
76643600 110#define GEN8_LEGACY_PDPES 4
07749ef3 111#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
0260c420 112
81ba8aef
MT
113#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
114 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
6ac18502 115
0260c420
BW
116#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
117#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
118#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
119#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
120
ee0ce478 121#define CHV_PPAT_SNOOP (1<<6)
0260c420
BW
122#define GEN8_PPAT_AGE(x) (x<<4)
123#define GEN8_PPAT_LLCeLLC (3<<2)
124#define GEN8_PPAT_LLCELLC (2<<2)
125#define GEN8_PPAT_LLC (1<<2)
126#define GEN8_PPAT_WB (3<<0)
127#define GEN8_PPAT_WT (2<<0)
128#define GEN8_PPAT_WC (1<<0)
129#define GEN8_PPAT_UC (0<<0)
130#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
131#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
132
fe14d5f4
TU
133enum i915_ggtt_view_type {
134 I915_GGTT_VIEW_NORMAL = 0,
8bd7ef16
JL
135 I915_GGTT_VIEW_ROTATED,
136 I915_GGTT_VIEW_PARTIAL,
50470bb0
TU
137};
138
139struct intel_rotation_info {
89e3e142 140 unsigned int uv_offset;
50470bb0 141 uint32_t pixel_format;
dedf278c 142 unsigned int uv_start_page;
1663b9d6
VS
143 struct {
144 /* tiles */
145 unsigned int width, height;
146 } plane[2];
fe14d5f4
TU
147};
148
149struct i915_ggtt_view {
150 enum i915_ggtt_view_type type;
151
8bd7ef16
JL
152 union {
153 struct {
088e0df4 154 u64 offset;
8bd7ef16
JL
155 unsigned int size;
156 } partial;
7723f47d 157 struct intel_rotation_info rotated;
8bd7ef16
JL
158 } params;
159
fe14d5f4
TU
160 struct sg_table *pages;
161};
162
163extern const struct i915_ggtt_view i915_ggtt_view_normal;
9abc4648 164extern const struct i915_ggtt_view i915_ggtt_view_rotated;
fe14d5f4 165
0260c420 166enum i915_cache_level;
fe14d5f4 167
0260c420
BW
168/**
169 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
170 * VMA's presence cannot be guaranteed before binding, or after unbinding the
171 * object into/from the address space.
172 *
173 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
174 * will always be <= an objects lifetime. So object refcounting should cover us.
175 */
176struct i915_vma {
177 struct drm_mm_node node;
178 struct drm_i915_gem_object *obj;
179 struct i915_address_space *vm;
8ef8561f 180 void __iomem *iomap;
0260c420 181
aff43766
TU
182 /** Flags and address space this VMA is bound to */
183#define GLOBAL_BIND (1<<0)
184#define LOCAL_BIND (1<<1)
aff43766 185 unsigned int bound : 4;
596c5923 186 bool is_ggtt : 1;
aff43766 187
fe14d5f4
TU
188 /**
189 * Support different GGTT views into the same object.
190 * This means there can be multiple VMA mappings per object and per VM.
191 * i915_ggtt_view_type is used to distinguish between those entries.
192 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
193 * assumed in GEM functions which take no ggtt view parameter.
194 */
195 struct i915_ggtt_view ggtt_view;
196
0260c420 197 /** This object's place on the active/inactive lists */
1c7f4bca 198 struct list_head vm_link;
0260c420 199
1c7f4bca 200 struct list_head obj_link; /* Link in the object's VMA list */
0260c420
BW
201
202 /** This vma's place in the batchbuffer or on the eviction list */
203 struct list_head exec_list;
204
205 /**
206 * Used for performing relocations during execbuffer insertion.
207 */
208 struct hlist_node exec_node;
209 unsigned long exec_handle;
210 struct drm_i915_gem_exec_object2 *exec_entry;
211
212 /**
213 * How many users have pinned this object in GTT space. The following
4feb7659
DV
214 * users can each hold at most one reference: pwrite/pread, execbuffer
215 * (objects are not allowed multiple times for the same batchbuffer),
216 * and the framebuffer code. When switching/pageflipping, the
217 * framebuffer code has at most two buffers pinned per crtc.
0260c420
BW
218 *
219 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
220 * bits with absolutely no headroom. So use 4 bits. */
221 unsigned int pin_count:4;
222#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
0260c420
BW
223};
224
44159ddb 225struct i915_page_dma {
d7b3de91 226 struct page *page;
44159ddb
MK
227 union {
228 dma_addr_t daddr;
229
230 /* For gen6/gen7 only. This is the offset in the GGTT
231 * where the page directory entries for PPGTT begin
232 */
233 uint32_t ggtt_offset;
234 };
235};
236
567047be
MK
237#define px_base(px) (&(px)->base)
238#define px_page(px) (px_base(px)->page)
239#define px_dma(px) (px_base(px)->daddr)
240
c114f76a
MK
241struct i915_page_scratch {
242 struct i915_page_dma base;
243};
244
44159ddb
MK
245struct i915_page_table {
246 struct i915_page_dma base;
678d96fb
BW
247
248 unsigned long *used_ptes;
d7b3de91
BW
249};
250
ec565b3c 251struct i915_page_directory {
44159ddb 252 struct i915_page_dma base;
7324cc04 253
33c8819f 254 unsigned long *used_pdes;
ec565b3c 255 struct i915_page_table *page_table[I915_PDES]; /* PDEs */
d7b3de91
BW
256};
257
ec565b3c 258struct i915_page_directory_pointer {
6ac18502
MT
259 struct i915_page_dma base;
260
261 unsigned long *used_pdpes;
262 struct i915_page_directory **page_directory;
d7b3de91
BW
263};
264
81ba8aef
MT
265struct i915_pml4 {
266 struct i915_page_dma base;
267
268 DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
269 struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
270};
271
0260c420
BW
272struct i915_address_space {
273 struct drm_mm mm;
274 struct drm_device *dev;
275 struct list_head global_link;
c44ef60e
MK
276 u64 start; /* Start offset always 0 for dri2 */
277 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
0260c420 278
596c5923
CW
279 bool is_ggtt;
280
c114f76a 281 struct i915_page_scratch *scratch_page;
79ab9370
MK
282 struct i915_page_table *scratch_pt;
283 struct i915_page_directory *scratch_pd;
69ab76fd 284 struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
0260c420
BW
285
286 /**
287 * List of objects currently involved in rendering.
288 *
289 * Includes buffers having the contents of their GPU caches
97b2a6a1 290 * flushed, not necessarily primitives. last_read_req
0260c420
BW
291 * represents when the rendering involved will be completed.
292 *
293 * A reference is held on the buffer while on this list.
294 */
295 struct list_head active_list;
296
297 /**
298 * LRU list of objects which are not in the ringbuffer and
299 * are ready to unbind, but are still in the GTT.
300 *
97b2a6a1 301 * last_read_req is NULL while an object is in this list.
0260c420
BW
302 *
303 * A reference is not held on the buffer while on this list,
304 * as merely being GTT-bound shouldn't prevent its being
305 * freed, and we'll pull it off the list in the free path.
306 */
307 struct list_head inactive_list;
308
309 /* FIXME: Need a more generic return type */
07749ef3
MT
310 gen6_pte_t (*pte_encode)(dma_addr_t addr,
311 enum i915_cache_level level,
312 bool valid, u32 flags); /* Create a valid PTE */
f329f5f6
DV
313 /* flags for pte_encode */
314#define PTE_READ_ONLY (1<<0)
678d96fb
BW
315 int (*allocate_va_range)(struct i915_address_space *vm,
316 uint64_t start,
317 uint64_t length);
0260c420
BW
318 void (*clear_range)(struct i915_address_space *vm,
319 uint64_t start,
320 uint64_t length,
321 bool use_scratch);
322 void (*insert_entries)(struct i915_address_space *vm,
323 struct sg_table *st,
324 uint64_t start,
24f3a8cf 325 enum i915_cache_level cache_level, u32 flags);
0260c420 326 void (*cleanup)(struct i915_address_space *vm);
777dc5bb
DV
327 /** Unmap an object from an address space. This usually consists of
328 * setting the valid PTE entries to a reserved scratch page. */
329 void (*unbind_vma)(struct i915_vma *vma);
330 /* Map an object into an address space with the given cache flags. */
70b9f6f8
DV
331 int (*bind_vma)(struct i915_vma *vma,
332 enum i915_cache_level cache_level,
333 u32 flags);
0260c420
BW
334};
335
596c5923
CW
336#define i915_is_ggtt(V) ((V)->is_ggtt)
337
0260c420
BW
338/* The Graphics Translation Table is the way in which GEN hardware translates a
339 * Graphics Virtual Address into a Physical Address. In addition to the normal
340 * collateral associated with any va->pa translations GEN hardware also has a
341 * portion of the GTT which can be mapped by the CPU and remain both coherent
342 * and correct (in cases like swizzling). That region is referred to as GMADR in
343 * the spec.
344 */
62106b4f 345struct i915_ggtt {
0260c420 346 struct i915_address_space base;
0260c420 347
c44ef60e 348 size_t stolen_size; /* Total size of stolen memory */
a9da512b 349 size_t stolen_usable_size; /* Total size minus BIOS reserved */
274008e8
SAK
350 size_t stolen_reserved_base;
351 size_t stolen_reserved_size;
d507d735 352 size_t size; /* Total size of Global GTT */
c44ef60e 353 u64 mappable_end; /* End offset that we can CPU map */
0260c420
BW
354 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
355 phys_addr_t mappable_base; /* PA of our GMADR */
356
357 /** "Graphics Stolen Memory" holds the global PTEs */
358 void __iomem *gsm;
359
360 bool do_idle_maps;
361
362 int mtrr;
363
d507d735 364 int (*probe)(struct i915_ggtt *ggtt);
0260c420
BW
365};
366
367struct i915_hw_ppgtt {
368 struct i915_address_space base;
369 struct kref ref;
370 struct drm_mm_node node;
563222a7 371 unsigned long pd_dirty_rings;
d7b3de91 372 union {
81ba8aef
MT
373 struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
374 struct i915_page_directory_pointer pdp; /* GEN8+ */
375 struct i915_page_directory pd; /* GEN6-7 */
d7b3de91 376 };
0260c420 377
4d884705 378 struct drm_i915_file_private *file_priv;
0260c420 379
678d96fb
BW
380 gen6_pte_t __iomem *pd_addr;
381
0260c420
BW
382 int (*enable)(struct i915_hw_ppgtt *ppgtt);
383 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
e85b26dc 384 struct drm_i915_gem_request *req);
0260c420
BW
385 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
386};
387
678d96fb
BW
388/* For each pde iterates over every pde between from start until start + length.
389 * If start, and start+length are not perfectly divisible, the macro will round
390 * down, and up as needed. The macro modifies pde, start, and length. Dev is
391 * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
392 * and length = 2G effectively iterates over every PDE in the system.
393 *
394 * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
395 */
396#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
fdc454c1 397 for (iter = gen6_pde_index(start); \
24dfd073
MT
398 length > 0 && iter < I915_PDES ? \
399 (pt = (pd)->page_table[iter]), 1 : 0; \
fdc454c1 400 iter++, \
678d96fb
BW
401 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
402 temp = min_t(unsigned, temp, length), \
403 start += temp, length -= temp)
404
09942c65
MT
405#define gen6_for_all_pdes(pt, ppgtt, iter) \
406 for (iter = 0; \
407 pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
408 iter++)
409
678d96fb
BW
410static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
411{
412 const uint32_t mask = NUM_PTE(pde_shift) - 1;
413
414 return (address >> PAGE_SHIFT) & mask;
415}
416
417/* Helper to counts the number of PTEs within the given length. This count
418 * does not cross a page table boundary, so the max value would be
419 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
420*/
421static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
422 uint32_t pde_shift)
423{
69603dbb 424 const uint64_t mask = ~((1ULL << pde_shift) - 1);
678d96fb
BW
425 uint64_t end;
426
427 WARN_ON(length == 0);
428 WARN_ON(offset_in_page(addr|length));
429
430 end = addr + length;
431
432 if ((addr & mask) != (end & mask))
433 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
434
435 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
436}
437
438static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
439{
440 return (addr >> shift) & I915_PDE_MASK;
441}
442
443static inline uint32_t gen6_pte_index(uint32_t addr)
444{
445 return i915_pte_index(addr, GEN6_PDE_SHIFT);
446}
447
448static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
449{
450 return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
451}
452
453static inline uint32_t gen6_pde_index(uint32_t addr)
454{
455 return i915_pde_index(addr, GEN6_PDE_SHIFT);
456}
457
9271d959
MT
458/* Equivalent to the gen6 version, For each pde iterates over every pde
459 * between from start until start + length. On gen8+ it simply iterates
460 * over every page directory entry in a page directory.
461 */
e8ebd8e2
DG
462#define gen8_for_each_pde(pt, pd, start, length, iter) \
463 for (iter = gen8_pde_index(start); \
464 length > 0 && iter < I915_PDES && \
465 (pt = (pd)->page_table[iter], true); \
466 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
467 temp = min(temp - start, length); \
468 start += temp, length -= temp; }), ++iter)
469
470#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
471 for (iter = gen8_pdpe_index(start); \
472 length > 0 && iter < I915_PDPES_PER_PDP(dev) && \
473 (pd = (pdp)->page_directory[iter], true); \
474 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
475 temp = min(temp - start, length); \
476 start += temp, length -= temp; }), ++iter)
477
478#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
479 for (iter = gen8_pml4e_index(start); \
480 length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
481 (pdp = (pml4)->pdps[iter], true); \
482 ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
483 temp = min(temp - start, length); \
484 start += temp, length -= temp; }), ++iter)
762d9936 485
9271d959
MT
486static inline uint32_t gen8_pte_index(uint64_t address)
487{
488 return i915_pte_index(address, GEN8_PDE_SHIFT);
489}
490
491static inline uint32_t gen8_pde_index(uint64_t address)
492{
493 return i915_pde_index(address, GEN8_PDE_SHIFT);
494}
495
496static inline uint32_t gen8_pdpe_index(uint64_t address)
497{
498 return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
499}
500
501static inline uint32_t gen8_pml4e_index(uint64_t address)
502{
762d9936 503 return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
9271d959
MT
504}
505
33c8819f
MT
506static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
507{
508 return i915_pte_count(address, length, GEN8_PDE_SHIFT);
509}
510
d852c7bf
MK
511static inline dma_addr_t
512i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
513{
514 return test_bit(n, ppgtt->pdp.used_pdpes) ?
567047be 515 px_dma(ppgtt->pdp.page_directory[n]) :
79ab9370 516 px_dma(ppgtt->base.scratch_pd);
d852c7bf
MK
517}
518
d85489d3 519int i915_ggtt_init_hw(struct drm_device *dev);
ac840ae5 520int i915_ggtt_enable_hw(struct drm_device *dev);
d85489d3
JL
521void i915_gem_init_ggtt(struct drm_device *dev);
522void i915_ggtt_cleanup_hw(struct drm_device *dev);
ee960be7 523
82460d97 524int i915_ppgtt_init_hw(struct drm_device *dev);
ee960be7 525void i915_ppgtt_release(struct kref *kref);
4d884705
DV
526struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
527 struct drm_i915_file_private *fpriv);
ee960be7
DV
528static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
529{
530 if (ppgtt)
531 kref_get(&ppgtt->ref);
532}
533static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
534{
535 if (ppgtt)
536 kref_put(&ppgtt->ref, i915_ppgtt_release);
537}
0260c420
BW
538
539void i915_check_and_clear_faults(struct drm_device *dev);
540void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
541void i915_gem_restore_gtt_mappings(struct drm_device *dev);
542
543int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
544void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
545
9abc4648
JL
546static inline bool
547i915_ggtt_view_equal(const struct i915_ggtt_view *a,
548 const struct i915_ggtt_view *b)
549{
550 if (WARN_ON(!a || !b))
551 return false;
552
8bd7ef16
JL
553 if (a->type != b->type)
554 return false;
ce7f1728 555 if (a->type != I915_GGTT_VIEW_NORMAL)
8bd7ef16
JL
556 return !memcmp(&a->params, &b->params, sizeof(a->params));
557 return true;
9abc4648
JL
558}
559
91e6711e
JL
560size_t
561i915_ggtt_view_size(struct drm_i915_gem_object *obj,
562 const struct i915_ggtt_view *view);
563
8ef8561f
CW
564/**
565 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
566 * @vma: VMA to iomap
567 *
568 * The passed in VMA has to be pinned in the global GTT mappable region.
569 * An extra pinning of the VMA is acquired for the return iomapping,
570 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
571 * after the iomapping is no longer required.
572 *
573 * Callers must hold the struct_mutex.
574 *
575 * Returns a valid iomapped pointer or ERR_PTR.
576 */
577void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
578
579/**
580 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
581 * @vma: VMA to unpin
582 *
583 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
584 *
585 * Callers must hold the struct_mutex. This function is only valid to be
586 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
587 */
588static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
589{
590 lockdep_assert_held(&vma->vm->dev->struct_mutex);
591 GEM_BUG_ON(vma->pin_count == 0);
592 GEM_BUG_ON(vma->iomap == NULL);
593 vma->pin_count--;
594}
595
0260c420 596#endif
This page took 0.290586 seconds and 5 git commands to generate.