2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * Please try to maintain the following order within this file unless it makes
24 * sense to do otherwise. From top to bottom:
26 * 2. #defines, and macros
27 * 3. structure definitions
28 * 4. function prototypes
30 * Within each section, please try to order by generation in ascending order,
31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
34 #ifndef __I915_GEM_GTT_H__
35 #define __I915_GEM_GTT_H__
37 struct drm_i915_file_private
;
39 typedef uint32_t gen6_pte_t
;
40 typedef uint64_t gen8_pte_t
;
41 typedef uint64_t gen8_pde_t
;
43 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
46 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
47 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
48 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
49 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
50 #define GEN6_PTE_CACHE_LLC (2 << 1)
51 #define GEN6_PTE_UNCACHED (1 << 1)
52 #define GEN6_PTE_VALID (1 << 0)
54 #define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
55 #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
57 #define I915_PDE_MASK (I915_PDES - 1)
58 #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
60 #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
61 #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
62 #define GEN6_PD_ALIGN (PAGE_SIZE * 16)
63 #define GEN6_PDE_SHIFT 22
64 #define GEN6_PDE_VALID (1 << 0)
66 #define GEN7_PTE_CACHE_L3_LLC (3 << 1)
68 #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
69 #define BYT_PTE_WRITEABLE (1 << 1)
71 /* Cacheability Control is a 4-bit value. The low three bits are stored in bits
72 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
74 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
75 (((bits) & 0x8) << (11 - 3)))
76 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
77 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
78 #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
79 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
80 #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
81 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
82 #define HSW_PTE_UNCACHED (0)
83 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
84 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
86 /* GEN8 legacy style address is defined as a 3 level page table:
87 * 31:30 | 29:21 | 20:12 | 11:0
88 * PDPE | PDE | PTE | offset
89 * The difference as compared to normal x86 3 level page table is the PDPEs are
90 * programmed via register.
92 * GEN8 48b legacy style address is defined as a 4 level page table:
93 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
94 * PML4E | PDPE | PDE | PTE | offset
96 #define GEN8_PML4ES_PER_PML4 512
97 #define GEN8_PML4E_SHIFT 39
98 #define GEN8_PDPE_SHIFT 30
99 /* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
101 #define GEN8_PDPE_MASK 0x1ff
102 #define GEN8_PDE_SHIFT 21
103 #define GEN8_PDE_MASK 0x1ff
104 #define GEN8_PTE_SHIFT 12
105 #define GEN8_PTE_MASK 0x1ff
106 #define GEN8_LEGACY_PDPES 4
107 #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
109 #define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
110 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
112 #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
113 #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
114 #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
115 #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
117 #define CHV_PPAT_SNOOP (1<<6)
118 #define GEN8_PPAT_AGE(x) (x<<4)
119 #define GEN8_PPAT_LLCeLLC (3<<2)
120 #define GEN8_PPAT_LLCELLC (2<<2)
121 #define GEN8_PPAT_LLC (1<<2)
122 #define GEN8_PPAT_WB (3<<0)
123 #define GEN8_PPAT_WT (2<<0)
124 #define GEN8_PPAT_WC (1<<0)
125 #define GEN8_PPAT_UC (0<<0)
126 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
127 #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
129 enum i915_ggtt_view_type
{
130 I915_GGTT_VIEW_NORMAL
= 0,
131 I915_GGTT_VIEW_ROTATED
,
132 I915_GGTT_VIEW_PARTIAL
,
135 struct intel_rotation_info
{
138 uint32_t pixel_format
;
139 uint64_t fb_modifier
;
140 unsigned int width_pages
, height_pages
;
144 struct i915_ggtt_view
{
145 enum i915_ggtt_view_type type
;
149 unsigned long offset
;
154 struct sg_table
*pages
;
157 struct intel_rotation_info rotation_info
;
161 extern const struct i915_ggtt_view i915_ggtt_view_normal
;
162 extern const struct i915_ggtt_view i915_ggtt_view_rotated
;
164 enum i915_cache_level
;
167 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
168 * VMA's presence cannot be guaranteed before binding, or after unbinding the
169 * object into/from the address space.
171 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
172 * will always be <= an objects lifetime. So object refcounting should cover us.
175 struct drm_mm_node node
;
176 struct drm_i915_gem_object
*obj
;
177 struct i915_address_space
*vm
;
179 /** Flags and address space this VMA is bound to */
180 #define GLOBAL_BIND (1<<0)
181 #define LOCAL_BIND (1<<1)
182 unsigned int bound
: 4;
185 * Support different GGTT views into the same object.
186 * This means there can be multiple VMA mappings per object and per VM.
187 * i915_ggtt_view_type is used to distinguish between those entries.
188 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
189 * assumed in GEM functions which take no ggtt view parameter.
191 struct i915_ggtt_view ggtt_view
;
193 /** This object's place on the active/inactive lists */
194 struct list_head mm_list
;
196 struct list_head vma_link
; /* Link in the object's VMA list */
198 /** This vma's place in the batchbuffer or on the eviction list */
199 struct list_head exec_list
;
202 * Used for performing relocations during execbuffer insertion.
204 struct hlist_node exec_node
;
205 unsigned long exec_handle
;
206 struct drm_i915_gem_exec_object2
*exec_entry
;
209 * How many users have pinned this object in GTT space. The following
210 * users can each hold at most one reference: pwrite/pread, execbuffer
211 * (objects are not allowed multiple times for the same batchbuffer),
212 * and the framebuffer code. When switching/pageflipping, the
213 * framebuffer code has at most two buffers pinned per crtc.
215 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
216 * bits with absolutely no headroom. So use 4 bits. */
217 unsigned int pin_count
:4;
218 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
221 struct i915_page_dma
{
226 /* For gen6/gen7 only. This is the offset in the GGTT
227 * where the page directory entries for PPGTT begin
229 uint32_t ggtt_offset
;
233 #define px_base(px) (&(px)->base)
234 #define px_page(px) (px_base(px)->page)
235 #define px_dma(px) (px_base(px)->daddr)
237 struct i915_page_scratch
{
238 struct i915_page_dma base
;
241 struct i915_page_table
{
242 struct i915_page_dma base
;
244 unsigned long *used_ptes
;
247 struct i915_page_directory
{
248 struct i915_page_dma base
;
250 unsigned long *used_pdes
;
251 struct i915_page_table
*page_table
[I915_PDES
]; /* PDEs */
254 struct i915_page_directory_pointer
{
255 struct i915_page_dma base
;
257 unsigned long *used_pdpes
;
258 struct i915_page_directory
**page_directory
;
262 struct i915_page_dma base
;
264 DECLARE_BITMAP(used_pml4es
, GEN8_PML4ES_PER_PML4
);
265 struct i915_page_directory_pointer
*pdps
[GEN8_PML4ES_PER_PML4
];
268 struct i915_address_space
{
270 struct drm_device
*dev
;
271 struct list_head global_link
;
272 u64 start
; /* Start offset always 0 for dri2 */
273 u64 total
; /* size addr space maps (ex. 2GB for ggtt) */
275 struct i915_page_scratch
*scratch_page
;
276 struct i915_page_table
*scratch_pt
;
277 struct i915_page_directory
*scratch_pd
;
280 * List of objects currently involved in rendering.
282 * Includes buffers having the contents of their GPU caches
283 * flushed, not necessarily primitives. last_read_req
284 * represents when the rendering involved will be completed.
286 * A reference is held on the buffer while on this list.
288 struct list_head active_list
;
291 * LRU list of objects which are not in the ringbuffer and
292 * are ready to unbind, but are still in the GTT.
294 * last_read_req is NULL while an object is in this list.
296 * A reference is not held on the buffer while on this list,
297 * as merely being GTT-bound shouldn't prevent its being
298 * freed, and we'll pull it off the list in the free path.
300 struct list_head inactive_list
;
302 /* FIXME: Need a more generic return type */
303 gen6_pte_t (*pte_encode
)(dma_addr_t addr
,
304 enum i915_cache_level level
,
305 bool valid
, u32 flags
); /* Create a valid PTE */
306 /* flags for pte_encode */
307 #define PTE_READ_ONLY (1<<0)
308 int (*allocate_va_range
)(struct i915_address_space
*vm
,
311 void (*clear_range
)(struct i915_address_space
*vm
,
315 void (*insert_entries
)(struct i915_address_space
*vm
,
318 enum i915_cache_level cache_level
, u32 flags
);
319 void (*cleanup
)(struct i915_address_space
*vm
);
320 /** Unmap an object from an address space. This usually consists of
321 * setting the valid PTE entries to a reserved scratch page. */
322 void (*unbind_vma
)(struct i915_vma
*vma
);
323 /* Map an object into an address space with the given cache flags. */
324 int (*bind_vma
)(struct i915_vma
*vma
,
325 enum i915_cache_level cache_level
,
329 /* The Graphics Translation Table is the way in which GEN hardware translates a
330 * Graphics Virtual Address into a Physical Address. In addition to the normal
331 * collateral associated with any va->pa translations GEN hardware also has a
332 * portion of the GTT which can be mapped by the CPU and remain both coherent
333 * and correct (in cases like swizzling). That region is referred to as GMADR in
337 struct i915_address_space base
;
339 size_t stolen_size
; /* Total size of stolen memory */
340 u64 mappable_end
; /* End offset that we can CPU map */
341 struct io_mapping
*mappable
; /* Mapping to our CPU mappable region */
342 phys_addr_t mappable_base
; /* PA of our GMADR */
344 /** "Graphics Stolen Memory" holds the global PTEs */
352 int (*gtt_probe
)(struct drm_device
*dev
, u64
*gtt_total
,
353 size_t *stolen
, phys_addr_t
*mappable_base
,
357 struct i915_hw_ppgtt
{
358 struct i915_address_space base
;
360 struct drm_mm_node node
;
361 unsigned long pd_dirty_rings
;
363 struct i915_pml4 pml4
; /* GEN8+ & 48b PPGTT */
364 struct i915_page_directory_pointer pdp
; /* GEN8+ */
365 struct i915_page_directory pd
; /* GEN6-7 */
368 struct drm_i915_file_private
*file_priv
;
370 gen6_pte_t __iomem
*pd_addr
;
372 int (*enable
)(struct i915_hw_ppgtt
*ppgtt
);
373 int (*switch_mm
)(struct i915_hw_ppgtt
*ppgtt
,
374 struct drm_i915_gem_request
*req
);
375 void (*debug_dump
)(struct i915_hw_ppgtt
*ppgtt
, struct seq_file
*m
);
378 /* For each pde iterates over every pde between from start until start + length.
379 * If start, and start+length are not perfectly divisible, the macro will round
380 * down, and up as needed. The macro modifies pde, start, and length. Dev is
381 * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
382 * and length = 2G effectively iterates over every PDE in the system.
384 * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
386 #define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
387 for (iter = gen6_pde_index(start); \
388 pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
390 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
391 temp = min_t(unsigned, temp, length), \
392 start += temp, length -= temp)
394 #define gen6_for_all_pdes(pt, ppgtt, iter) \
396 pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
399 static inline uint32_t i915_pte_index(uint64_t address
, uint32_t pde_shift
)
401 const uint32_t mask
= NUM_PTE(pde_shift
) - 1;
403 return (address
>> PAGE_SHIFT
) & mask
;
406 /* Helper to counts the number of PTEs within the given length. This count
407 * does not cross a page table boundary, so the max value would be
408 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
410 static inline uint32_t i915_pte_count(uint64_t addr
, size_t length
,
413 const uint64_t mask
= ~((1 << pde_shift
) - 1);
416 WARN_ON(length
== 0);
417 WARN_ON(offset_in_page(addr
|length
));
421 if ((addr
& mask
) != (end
& mask
))
422 return NUM_PTE(pde_shift
) - i915_pte_index(addr
, pde_shift
);
424 return i915_pte_index(end
, pde_shift
) - i915_pte_index(addr
, pde_shift
);
427 static inline uint32_t i915_pde_index(uint64_t addr
, uint32_t shift
)
429 return (addr
>> shift
) & I915_PDE_MASK
;
432 static inline uint32_t gen6_pte_index(uint32_t addr
)
434 return i915_pte_index(addr
, GEN6_PDE_SHIFT
);
437 static inline size_t gen6_pte_count(uint32_t addr
, uint32_t length
)
439 return i915_pte_count(addr
, length
, GEN6_PDE_SHIFT
);
442 static inline uint32_t gen6_pde_index(uint32_t addr
)
444 return i915_pde_index(addr
, GEN6_PDE_SHIFT
);
447 /* Equivalent to the gen6 version, For each pde iterates over every pde
448 * between from start until start + length. On gen8+ it simply iterates
449 * over every page directory entry in a page directory.
451 #define gen8_for_each_pde(pt, pd, start, length, temp, iter) \
452 for (iter = gen8_pde_index(start); \
453 pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
455 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \
456 temp = min(temp, length), \
457 start += temp, length -= temp)
459 #define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
460 for (iter = gen8_pdpe_index(start); \
461 pd = (pdp)->page_directory[iter], \
462 length > 0 && (iter < I915_PDPES_PER_PDP(dev)); \
464 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
465 temp = min(temp, length), \
466 start += temp, length -= temp)
468 static inline uint32_t gen8_pte_index(uint64_t address
)
470 return i915_pte_index(address
, GEN8_PDE_SHIFT
);
473 static inline uint32_t gen8_pde_index(uint64_t address
)
475 return i915_pde_index(address
, GEN8_PDE_SHIFT
);
478 static inline uint32_t gen8_pdpe_index(uint64_t address
)
480 return (address
>> GEN8_PDPE_SHIFT
) & GEN8_PDPE_MASK
;
483 static inline uint32_t gen8_pml4e_index(uint64_t address
)
485 WARN_ON(1); /* For 64B */
489 static inline size_t gen8_pte_count(uint64_t address
, uint64_t length
)
491 return i915_pte_count(address
, length
, GEN8_PDE_SHIFT
);
494 static inline dma_addr_t
495 i915_page_dir_dma_addr(const struct i915_hw_ppgtt
*ppgtt
, const unsigned n
)
497 return test_bit(n
, ppgtt
->pdp
.used_pdpes
) ?
498 px_dma(ppgtt
->pdp
.page_directory
[n
]) :
499 px_dma(ppgtt
->base
.scratch_pd
);
502 int i915_gem_gtt_init(struct drm_device
*dev
);
503 void i915_gem_init_global_gtt(struct drm_device
*dev
);
504 void i915_global_gtt_cleanup(struct drm_device
*dev
);
507 int i915_ppgtt_init(struct drm_device
*dev
, struct i915_hw_ppgtt
*ppgtt
);
508 int i915_ppgtt_init_hw(struct drm_device
*dev
);
509 int i915_ppgtt_init_ring(struct drm_i915_gem_request
*req
);
510 void i915_ppgtt_release(struct kref
*kref
);
511 struct i915_hw_ppgtt
*i915_ppgtt_create(struct drm_device
*dev
,
512 struct drm_i915_file_private
*fpriv
);
513 static inline void i915_ppgtt_get(struct i915_hw_ppgtt
*ppgtt
)
516 kref_get(&ppgtt
->ref
);
518 static inline void i915_ppgtt_put(struct i915_hw_ppgtt
*ppgtt
)
521 kref_put(&ppgtt
->ref
, i915_ppgtt_release
);
524 void i915_check_and_clear_faults(struct drm_device
*dev
);
525 void i915_gem_suspend_gtt_mappings(struct drm_device
*dev
);
526 void i915_gem_restore_gtt_mappings(struct drm_device
*dev
);
528 int __must_check
i915_gem_gtt_prepare_object(struct drm_i915_gem_object
*obj
);
529 void i915_gem_gtt_finish_object(struct drm_i915_gem_object
*obj
);
532 i915_ggtt_view_equal(const struct i915_ggtt_view
*a
,
533 const struct i915_ggtt_view
*b
)
535 if (WARN_ON(!a
|| !b
))
538 if (a
->type
!= b
->type
)
540 if (a
->type
== I915_GGTT_VIEW_PARTIAL
)
541 return !memcmp(&a
->params
, &b
->params
, sizeof(a
->params
));
546 i915_ggtt_view_size(struct drm_i915_gem_object
*obj
,
547 const struct i915_ggtt_view
*view
);