drm/i915: Release vma when the handle is closed
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_gtt.h
CommitLineData
0260c420
BW
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Please try to maintain the following order within this file unless it makes
24 * sense to do otherwise. From top to bottom:
25 * 1. typedefs
26 * 2. #defines, and macros
27 * 3. structure definitions
28 * 4. function prototypes
29 *
30 * Within each section, please try to order by generation in ascending order,
31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
32 */
33
34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__
36
8ef8561f
CW
37#include <linux/io-mapping.h>
38
b0decaf7
CW
39#include "i915_gem_request.h"
40
4d884705
DV
41struct drm_i915_file_private;
42
07749ef3
MT
43typedef uint32_t gen6_pte_t;
44typedef uint64_t gen8_pte_t;
45typedef uint64_t gen8_pde_t;
762d9936
MT
46typedef uint64_t gen8_ppgtt_pdpe_t;
47typedef uint64_t gen8_ppgtt_pml4e_t;
0260c420 48
72e96d64 49#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
0260c420 50
0260c420
BW
51/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
52#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
53#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
54#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
55#define GEN6_PTE_CACHE_LLC (2 << 1)
56#define GEN6_PTE_UNCACHED (1 << 1)
57#define GEN6_PTE_VALID (1 << 0)
58
07749ef3
MT
59#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
60#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
61#define I915_PDES 512
62#define I915_PDE_MASK (I915_PDES - 1)
678d96fb 63#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
07749ef3
MT
64
65#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
66#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
0260c420 67#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
678d96fb 68#define GEN6_PDE_SHIFT 22
0260c420
BW
69#define GEN6_PDE_VALID (1 << 0)
70
71#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
72
73#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
74#define BYT_PTE_WRITEABLE (1 << 1)
75
76/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
77 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
78 */
79#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
80 (((bits) & 0x8) << (11 - 3)))
81#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
82#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
83#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
84#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
85#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
86#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
87#define HSW_PTE_UNCACHED (0)
88#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
89#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
90
91/* GEN8 legacy style address is defined as a 3 level page table:
92 * 31:30 | 29:21 | 20:12 | 11:0
93 * PDPE | PDE | PTE | offset
94 * The difference as compared to normal x86 3 level page table is the PDPEs are
95 * programmed via register.
81ba8aef
MT
96 *
97 * GEN8 48b legacy style address is defined as a 4 level page table:
98 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
99 * PML4E | PDPE | PDE | PTE | offset
0260c420 100 */
81ba8aef
MT
101#define GEN8_PML4ES_PER_PML4 512
102#define GEN8_PML4E_SHIFT 39
762d9936 103#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
0260c420 104#define GEN8_PDPE_SHIFT 30
81ba8aef
MT
105/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
106 * tables */
107#define GEN8_PDPE_MASK 0x1ff
0260c420
BW
108#define GEN8_PDE_SHIFT 21
109#define GEN8_PDE_MASK 0x1ff
110#define GEN8_PTE_SHIFT 12
111#define GEN8_PTE_MASK 0x1ff
76643600 112#define GEN8_LEGACY_PDPES 4
07749ef3 113#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
0260c420 114
81ba8aef
MT
115#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
116 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
6ac18502 117
0260c420
BW
118#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
119#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
120#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
121#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
122
ee0ce478 123#define CHV_PPAT_SNOOP (1<<6)
0260c420
BW
124#define GEN8_PPAT_AGE(x) (x<<4)
125#define GEN8_PPAT_LLCeLLC (3<<2)
126#define GEN8_PPAT_LLCELLC (2<<2)
127#define GEN8_PPAT_LLC (1<<2)
128#define GEN8_PPAT_WB (3<<0)
129#define GEN8_PPAT_WT (2<<0)
130#define GEN8_PPAT_WC (1<<0)
131#define GEN8_PPAT_UC (0<<0)
132#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
133#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
134
fe14d5f4
TU
135enum i915_ggtt_view_type {
136 I915_GGTT_VIEW_NORMAL = 0,
8bd7ef16
JL
137 I915_GGTT_VIEW_ROTATED,
138 I915_GGTT_VIEW_PARTIAL,
50470bb0
TU
139};
140
141struct intel_rotation_info {
89e3e142 142 unsigned int uv_offset;
50470bb0 143 uint32_t pixel_format;
dedf278c 144 unsigned int uv_start_page;
1663b9d6
VS
145 struct {
146 /* tiles */
147 unsigned int width, height;
148 } plane[2];
fe14d5f4
TU
149};
150
151struct i915_ggtt_view {
152 enum i915_ggtt_view_type type;
153
8bd7ef16
JL
154 union {
155 struct {
088e0df4 156 u64 offset;
8bd7ef16
JL
157 unsigned int size;
158 } partial;
7723f47d 159 struct intel_rotation_info rotated;
8bd7ef16
JL
160 } params;
161
fe14d5f4
TU
162 struct sg_table *pages;
163};
164
165extern const struct i915_ggtt_view i915_ggtt_view_normal;
9abc4648 166extern const struct i915_ggtt_view i915_ggtt_view_rotated;
fe14d5f4 167
0260c420 168enum i915_cache_level;
fe14d5f4 169
0260c420
BW
170/**
171 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
172 * VMA's presence cannot be guaranteed before binding, or after unbinding the
173 * object into/from the address space.
174 *
175 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
176 * will always be <= an objects lifetime. So object refcounting should cover us.
177 */
178struct i915_vma {
179 struct drm_mm_node node;
180 struct drm_i915_gem_object *obj;
181 struct i915_address_space *vm;
8ef8561f 182 void __iomem *iomap;
0260c420 183
b0decaf7
CW
184 unsigned int active;
185 struct i915_gem_active last_read[I915_NUM_ENGINES];
186
aff43766
TU
187 /** Flags and address space this VMA is bound to */
188#define GLOBAL_BIND (1<<0)
189#define LOCAL_BIND (1<<1)
aff43766 190 unsigned int bound : 4;
596c5923 191 bool is_ggtt : 1;
b1f788c6 192 bool closed : 1;
aff43766 193
fe14d5f4
TU
194 /**
195 * Support different GGTT views into the same object.
196 * This means there can be multiple VMA mappings per object and per VM.
197 * i915_ggtt_view_type is used to distinguish between those entries.
198 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
199 * assumed in GEM functions which take no ggtt view parameter.
200 */
201 struct i915_ggtt_view ggtt_view;
202
0260c420 203 /** This object's place on the active/inactive lists */
1c7f4bca 204 struct list_head vm_link;
0260c420 205
1c7f4bca 206 struct list_head obj_link; /* Link in the object's VMA list */
0260c420
BW
207
208 /** This vma's place in the batchbuffer or on the eviction list */
209 struct list_head exec_list;
210
211 /**
212 * Used for performing relocations during execbuffer insertion.
213 */
214 struct hlist_node exec_node;
215 unsigned long exec_handle;
216 struct drm_i915_gem_exec_object2 *exec_entry;
217
218 /**
219 * How many users have pinned this object in GTT space. The following
4feb7659
DV
220 * users can each hold at most one reference: pwrite/pread, execbuffer
221 * (objects are not allowed multiple times for the same batchbuffer),
222 * and the framebuffer code. When switching/pageflipping, the
223 * framebuffer code has at most two buffers pinned per crtc.
0260c420
BW
224 *
225 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
226 * bits with absolutely no headroom. So use 4 bits. */
227 unsigned int pin_count:4;
228#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
0260c420
BW
229};
230
b0decaf7
CW
231static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
232{
233 return vma->active;
234}
235
236static inline bool i915_vma_is_active(const struct i915_vma *vma)
237{
238 return i915_vma_get_active(vma);
239}
240
241static inline void i915_vma_set_active(struct i915_vma *vma,
242 unsigned int engine)
243{
244 vma->active |= BIT(engine);
245}
246
247static inline void i915_vma_clear_active(struct i915_vma *vma,
248 unsigned int engine)
249{
250 vma->active &= ~BIT(engine);
251}
252
253static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
254 unsigned int engine)
255{
256 return vma->active & BIT(engine);
257}
258
44159ddb 259struct i915_page_dma {
d7b3de91 260 struct page *page;
44159ddb
MK
261 union {
262 dma_addr_t daddr;
263
264 /* For gen6/gen7 only. This is the offset in the GGTT
265 * where the page directory entries for PPGTT begin
266 */
267 uint32_t ggtt_offset;
268 };
269};
270
567047be
MK
271#define px_base(px) (&(px)->base)
272#define px_page(px) (px_base(px)->page)
273#define px_dma(px) (px_base(px)->daddr)
274
c114f76a
MK
275struct i915_page_scratch {
276 struct i915_page_dma base;
277};
278
44159ddb
MK
279struct i915_page_table {
280 struct i915_page_dma base;
678d96fb
BW
281
282 unsigned long *used_ptes;
d7b3de91
BW
283};
284
ec565b3c 285struct i915_page_directory {
44159ddb 286 struct i915_page_dma base;
7324cc04 287
33c8819f 288 unsigned long *used_pdes;
ec565b3c 289 struct i915_page_table *page_table[I915_PDES]; /* PDEs */
d7b3de91
BW
290};
291
ec565b3c 292struct i915_page_directory_pointer {
6ac18502
MT
293 struct i915_page_dma base;
294
295 unsigned long *used_pdpes;
296 struct i915_page_directory **page_directory;
d7b3de91
BW
297};
298
81ba8aef
MT
299struct i915_pml4 {
300 struct i915_page_dma base;
301
302 DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
303 struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
304};
305
0260c420
BW
306struct i915_address_space {
307 struct drm_mm mm;
308 struct drm_device *dev;
2bfa996e
CW
309 /* Every address space belongs to a struct file - except for the global
310 * GTT that is owned by the driver (and so @file is set to NULL). In
311 * principle, no information should leak from one context to another
312 * (or between files/processes etc) unless explicitly shared by the
313 * owner. Tracking the owner is important in order to free up per-file
314 * objects along with the file, to aide resource tracking, and to
315 * assign blame.
316 */
317 struct drm_i915_file_private *file;
0260c420 318 struct list_head global_link;
c44ef60e
MK
319 u64 start; /* Start offset always 0 for dri2 */
320 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
0260c420 321
c114f76a 322 struct i915_page_scratch *scratch_page;
79ab9370
MK
323 struct i915_page_table *scratch_pt;
324 struct i915_page_directory *scratch_pd;
69ab76fd 325 struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
0260c420
BW
326
327 /**
328 * List of objects currently involved in rendering.
329 *
330 * Includes buffers having the contents of their GPU caches
97b2a6a1 331 * flushed, not necessarily primitives. last_read_req
0260c420
BW
332 * represents when the rendering involved will be completed.
333 *
334 * A reference is held on the buffer while on this list.
335 */
336 struct list_head active_list;
337
338 /**
339 * LRU list of objects which are not in the ringbuffer and
340 * are ready to unbind, but are still in the GTT.
341 *
97b2a6a1 342 * last_read_req is NULL while an object is in this list.
0260c420
BW
343 *
344 * A reference is not held on the buffer while on this list,
345 * as merely being GTT-bound shouldn't prevent its being
346 * freed, and we'll pull it off the list in the free path.
347 */
348 struct list_head inactive_list;
349
350 /* FIXME: Need a more generic return type */
07749ef3
MT
351 gen6_pte_t (*pte_encode)(dma_addr_t addr,
352 enum i915_cache_level level,
353 bool valid, u32 flags); /* Create a valid PTE */
f329f5f6
DV
354 /* flags for pte_encode */
355#define PTE_READ_ONLY (1<<0)
678d96fb
BW
356 int (*allocate_va_range)(struct i915_address_space *vm,
357 uint64_t start,
358 uint64_t length);
0260c420
BW
359 void (*clear_range)(struct i915_address_space *vm,
360 uint64_t start,
361 uint64_t length,
362 bool use_scratch);
d6473f56
CW
363 void (*insert_page)(struct i915_address_space *vm,
364 dma_addr_t addr,
365 uint64_t offset,
366 enum i915_cache_level cache_level,
367 u32 flags);
0260c420
BW
368 void (*insert_entries)(struct i915_address_space *vm,
369 struct sg_table *st,
370 uint64_t start,
24f3a8cf 371 enum i915_cache_level cache_level, u32 flags);
0260c420 372 void (*cleanup)(struct i915_address_space *vm);
777dc5bb
DV
373 /** Unmap an object from an address space. This usually consists of
374 * setting the valid PTE entries to a reserved scratch page. */
375 void (*unbind_vma)(struct i915_vma *vma);
376 /* Map an object into an address space with the given cache flags. */
70b9f6f8
DV
377 int (*bind_vma)(struct i915_vma *vma,
378 enum i915_cache_level cache_level,
379 u32 flags);
0260c420
BW
380};
381
2bfa996e 382#define i915_is_ggtt(V) (!(V)->file)
596c5923 383
0260c420
BW
384/* The Graphics Translation Table is the way in which GEN hardware translates a
385 * Graphics Virtual Address into a Physical Address. In addition to the normal
386 * collateral associated with any va->pa translations GEN hardware also has a
387 * portion of the GTT which can be mapped by the CPU and remain both coherent
388 * and correct (in cases like swizzling). That region is referred to as GMADR in
389 * the spec.
390 */
62106b4f 391struct i915_ggtt {
0260c420 392 struct i915_address_space base;
0260c420 393
c44ef60e 394 size_t stolen_size; /* Total size of stolen memory */
a9da512b 395 size_t stolen_usable_size; /* Total size minus BIOS reserved */
274008e8
SAK
396 size_t stolen_reserved_base;
397 size_t stolen_reserved_size;
c44ef60e 398 u64 mappable_end; /* End offset that we can CPU map */
0260c420
BW
399 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
400 phys_addr_t mappable_base; /* PA of our GMADR */
401
402 /** "Graphics Stolen Memory" holds the global PTEs */
403 void __iomem *gsm;
404
405 bool do_idle_maps;
406
407 int mtrr;
0260c420
BW
408};
409
410struct i915_hw_ppgtt {
411 struct i915_address_space base;
412 struct kref ref;
413 struct drm_mm_node node;
563222a7 414 unsigned long pd_dirty_rings;
d7b3de91 415 union {
81ba8aef
MT
416 struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
417 struct i915_page_directory_pointer pdp; /* GEN8+ */
418 struct i915_page_directory pd; /* GEN6-7 */
d7b3de91 419 };
0260c420 420
678d96fb
BW
421 gen6_pte_t __iomem *pd_addr;
422
0260c420
BW
423 int (*enable)(struct i915_hw_ppgtt *ppgtt);
424 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
e85b26dc 425 struct drm_i915_gem_request *req);
0260c420
BW
426 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
427};
428
731f74c5
DG
429/*
430 * gen6_for_each_pde() iterates over every pde from start until start+length.
431 * If start and start+length are not perfectly divisible, the macro will round
432 * down and up as needed. Start=0 and length=2G effectively iterates over
433 * every PDE in the system. The macro modifies ALL its parameters except 'pd',
434 * so each of the other parameters should preferably be a simple variable, or
435 * at most an lvalue with no side-effects!
678d96fb 436 */
731f74c5
DG
437#define gen6_for_each_pde(pt, pd, start, length, iter) \
438 for (iter = gen6_pde_index(start); \
439 length > 0 && iter < I915_PDES && \
440 (pt = (pd)->page_table[iter], true); \
441 ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
442 temp = min(temp - start, length); \
443 start += temp, length -= temp; }), ++iter)
444
445#define gen6_for_all_pdes(pt, pd, iter) \
446 for (iter = 0; \
447 iter < I915_PDES && \
448 (pt = (pd)->page_table[iter], true); \
449 ++iter)
09942c65 450
678d96fb
BW
451static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
452{
453 const uint32_t mask = NUM_PTE(pde_shift) - 1;
454
455 return (address >> PAGE_SHIFT) & mask;
456}
457
458/* Helper to counts the number of PTEs within the given length. This count
459 * does not cross a page table boundary, so the max value would be
460 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
461*/
462static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
463 uint32_t pde_shift)
464{
69603dbb 465 const uint64_t mask = ~((1ULL << pde_shift) - 1);
678d96fb
BW
466 uint64_t end;
467
468 WARN_ON(length == 0);
469 WARN_ON(offset_in_page(addr|length));
470
471 end = addr + length;
472
473 if ((addr & mask) != (end & mask))
474 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
475
476 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
477}
478
479static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
480{
481 return (addr >> shift) & I915_PDE_MASK;
482}
483
484static inline uint32_t gen6_pte_index(uint32_t addr)
485{
486 return i915_pte_index(addr, GEN6_PDE_SHIFT);
487}
488
489static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
490{
491 return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
492}
493
494static inline uint32_t gen6_pde_index(uint32_t addr)
495{
496 return i915_pde_index(addr, GEN6_PDE_SHIFT);
497}
498
9271d959
MT
499/* Equivalent to the gen6 version, For each pde iterates over every pde
500 * between from start until start + length. On gen8+ it simply iterates
501 * over every page directory entry in a page directory.
502 */
e8ebd8e2
DG
503#define gen8_for_each_pde(pt, pd, start, length, iter) \
504 for (iter = gen8_pde_index(start); \
505 length > 0 && iter < I915_PDES && \
506 (pt = (pd)->page_table[iter], true); \
507 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
508 temp = min(temp - start, length); \
509 start += temp, length -= temp; }), ++iter)
510
511#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
512 for (iter = gen8_pdpe_index(start); \
513 length > 0 && iter < I915_PDPES_PER_PDP(dev) && \
514 (pd = (pdp)->page_directory[iter], true); \
515 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
516 temp = min(temp - start, length); \
517 start += temp, length -= temp; }), ++iter)
518
519#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
520 for (iter = gen8_pml4e_index(start); \
521 length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
522 (pdp = (pml4)->pdps[iter], true); \
523 ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
524 temp = min(temp - start, length); \
525 start += temp, length -= temp; }), ++iter)
762d9936 526
9271d959
MT
527static inline uint32_t gen8_pte_index(uint64_t address)
528{
529 return i915_pte_index(address, GEN8_PDE_SHIFT);
530}
531
532static inline uint32_t gen8_pde_index(uint64_t address)
533{
534 return i915_pde_index(address, GEN8_PDE_SHIFT);
535}
536
537static inline uint32_t gen8_pdpe_index(uint64_t address)
538{
539 return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
540}
541
542static inline uint32_t gen8_pml4e_index(uint64_t address)
543{
762d9936 544 return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
9271d959
MT
545}
546
33c8819f
MT
547static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
548{
549 return i915_pte_count(address, length, GEN8_PDE_SHIFT);
550}
551
d852c7bf
MK
552static inline dma_addr_t
553i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
554{
555 return test_bit(n, ppgtt->pdp.used_pdpes) ?
567047be 556 px_dma(ppgtt->pdp.page_directory[n]) :
79ab9370 557 px_dma(ppgtt->base.scratch_pd);
d852c7bf
MK
558}
559
97d6d7ab
CW
560int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
561int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
562int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
f6b9d5ca 563int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
97d6d7ab 564void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
ee960be7 565
82460d97 566int i915_ppgtt_init_hw(struct drm_device *dev);
ee960be7 567void i915_ppgtt_release(struct kref *kref);
2bfa996e 568struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
4d884705 569 struct drm_i915_file_private *fpriv);
ee960be7
DV
570static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
571{
572 if (ppgtt)
573 kref_get(&ppgtt->ref);
574}
575static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
576{
577 if (ppgtt)
578 kref_put(&ppgtt->ref, i915_ppgtt_release);
579}
0260c420 580
dc97997a 581void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
0260c420
BW
582void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
583void i915_gem_restore_gtt_mappings(struct drm_device *dev);
584
585int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
586void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
587
9abc4648
JL
588static inline bool
589i915_ggtt_view_equal(const struct i915_ggtt_view *a,
590 const struct i915_ggtt_view *b)
591{
592 if (WARN_ON(!a || !b))
593 return false;
594
8bd7ef16
JL
595 if (a->type != b->type)
596 return false;
ce7f1728 597 if (a->type != I915_GGTT_VIEW_NORMAL)
8bd7ef16
JL
598 return !memcmp(&a->params, &b->params, sizeof(a->params));
599 return true;
9abc4648
JL
600}
601
91e6711e
JL
602size_t
603i915_ggtt_view_size(struct drm_i915_gem_object *obj,
604 const struct i915_ggtt_view *view);
605
8ef8561f
CW
606/**
607 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
608 * @vma: VMA to iomap
609 *
610 * The passed in VMA has to be pinned in the global GTT mappable region.
611 * An extra pinning of the VMA is acquired for the return iomapping,
612 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
613 * after the iomapping is no longer required.
614 *
615 * Callers must hold the struct_mutex.
616 *
617 * Returns a valid iomapped pointer or ERR_PTR.
618 */
619void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
406ea8d2 620#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x))
8ef8561f
CW
621
622/**
623 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
624 * @vma: VMA to unpin
625 *
626 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
627 *
628 * Callers must hold the struct_mutex. This function is only valid to be
629 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
630 */
631static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
632{
633 lockdep_assert_held(&vma->vm->dev->struct_mutex);
634 GEM_BUG_ON(vma->pin_count == 0);
635 GEM_BUG_ON(vma->iomap == NULL);
636 vma->pin_count--;
637}
638
0260c420 639#endif
This page took 0.199117 seconds and 5 git commands to generate.