drm/i915: Do not use {HAS_*, IS_*, INTEL_INFO}(dev_priv->dev)
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
c4ac524c 3 * Copyright © 2011-2014 Intel Corporation
76aaf220
DV
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
0e46ce2e 26#include <linux/seq_file.h>
5bab6f60 27#include <linux/stop_machine.h>
760285e7
DH
28#include <drm/drmP.h>
29#include <drm/i915_drm.h>
76aaf220 30#include "i915_drv.h"
5dda8fa3 31#include "i915_vgpu.h"
76aaf220
DV
32#include "i915_trace.h"
33#include "intel_drv.h"
34
45f8f69a
TU
35/**
36 * DOC: Global GTT views
37 *
38 * Background and previous state
39 *
40 * Historically objects could exists (be bound) in global GTT space only as
41 * singular instances with a view representing all of the object's backing pages
42 * in a linear fashion. This view will be called a normal view.
43 *
44 * To support multiple views of the same object, where the number of mapped
45 * pages is not equal to the backing store, or where the layout of the pages
46 * is not linear, concept of a GGTT view was added.
47 *
48 * One example of an alternative view is a stereo display driven by a single
49 * image. In this case we would have a framebuffer looking like this
50 * (2x2 pages):
51 *
52 * 12
53 * 34
54 *
55 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
56 * rendering. In contrast, fed to the display engine would be an alternative
57 * view which could look something like this:
58 *
59 * 1212
60 * 3434
61 *
62 * In this example both the size and layout of pages in the alternative view is
63 * different from the normal view.
64 *
65 * Implementation and usage
66 *
67 * GGTT views are implemented using VMAs and are distinguished via enum
68 * i915_ggtt_view_type and struct i915_ggtt_view.
69 *
70 * A new flavour of core GEM functions which work with GGTT bound objects were
ec7adb6e
JL
71 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
72 * renaming in large amounts of code. They take the struct i915_ggtt_view
73 * parameter encapsulating all metadata required to implement a view.
45f8f69a
TU
74 *
75 * As a helper for callers which are only interested in the normal view,
76 * globally const i915_ggtt_view_normal singleton instance exists. All old core
77 * GEM API functions, the ones not taking the view parameter, are operating on,
78 * or with the normal GGTT view.
79 *
80 * Code wanting to add or use a new GGTT view needs to:
81 *
82 * 1. Add a new enum with a suitable name.
83 * 2. Extend the metadata in the i915_ggtt_view structure if required.
84 * 3. Add support to i915_get_vma_pages().
85 *
86 * New views are required to build a scatter-gather table from within the
87 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
88 * exists for the lifetime of an VMA.
89 *
90 * Core API is designed to have copy semantics which means that passed in
91 * struct i915_ggtt_view does not need to be persistent (left around after
92 * calling the core API functions).
93 *
94 */
95
70b9f6f8
DV
96static int
97i915_get_ggtt_vma_pages(struct i915_vma *vma);
98
b5e16987
VS
99const struct i915_ggtt_view i915_ggtt_view_normal = {
100 .type = I915_GGTT_VIEW_NORMAL,
101};
9abc4648 102const struct i915_ggtt_view i915_ggtt_view_rotated = {
b5e16987 103 .type = I915_GGTT_VIEW_ROTATED,
9abc4648 104};
fe14d5f4 105
cfa7c862
DV
106static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
107{
1893a71b
CW
108 bool has_aliasing_ppgtt;
109 bool has_full_ppgtt;
1f9a99e0 110 bool has_full_48bit_ppgtt;
1893a71b
CW
111
112 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
113 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
1f9a99e0 114 has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
1893a71b 115
71ba2d64
YZ
116 if (intel_vgpu_active(dev))
117 has_full_ppgtt = false; /* emulation is too hard */
118
70ee45e1
DL
119 /*
120 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
121 * execlists, the sole mechanism available to submit work.
122 */
123 if (INTEL_INFO(dev)->gen < 9 &&
124 (enable_ppgtt == 0 || !has_aliasing_ppgtt))
cfa7c862
DV
125 return 0;
126
127 if (enable_ppgtt == 1)
128 return 1;
129
1893a71b 130 if (enable_ppgtt == 2 && has_full_ppgtt)
cfa7c862
DV
131 return 2;
132
1f9a99e0
MT
133 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
134 return 3;
135
93a25a9e
DV
136#ifdef CONFIG_INTEL_IOMMU
137 /* Disable ppgtt on SNB if VT-d is on. */
138 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
139 DRM_INFO("Disabling PPGTT because VT-d is on\n");
cfa7c862 140 return 0;
93a25a9e
DV
141 }
142#endif
143
62942ed7 144 /* Early VLV doesn't have this */
666a4537 145 if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
62942ed7
JB
146 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
147 return 0;
148 }
149
2f82bbdf 150 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
1f9a99e0 151 return has_full_48bit_ppgtt ? 3 : 2;
2f82bbdf
MT
152 else
153 return has_aliasing_ppgtt ? 1 : 0;
93a25a9e
DV
154}
155
70b9f6f8
DV
156static int ppgtt_bind_vma(struct i915_vma *vma,
157 enum i915_cache_level cache_level,
158 u32 unused)
47552659
DV
159{
160 u32 pte_flags = 0;
161
162 /* Currently applicable only to VLV */
163 if (vma->obj->gt_ro)
164 pte_flags |= PTE_READ_ONLY;
165
166 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
167 cache_level, pte_flags);
70b9f6f8
DV
168
169 return 0;
47552659
DV
170}
171
172static void ppgtt_unbind_vma(struct i915_vma *vma)
173{
174 vma->vm->clear_range(vma->vm,
175 vma->node.start,
176 vma->obj->base.size,
177 true);
178}
6f65e29a 179
2c642b07
DV
180static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
181 enum i915_cache_level level,
182 bool valid)
94ec8f61 183{
07749ef3 184 gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
94ec8f61 185 pte |= addr;
63c42e56
BW
186
187 switch (level) {
188 case I915_CACHE_NONE:
fbe5d36e 189 pte |= PPAT_UNCACHED_INDEX;
63c42e56
BW
190 break;
191 case I915_CACHE_WT:
192 pte |= PPAT_DISPLAY_ELLC_INDEX;
193 break;
194 default:
195 pte |= PPAT_CACHED_INDEX;
196 break;
197 }
198
94ec8f61
BW
199 return pte;
200}
201
fe36f55d
MK
202static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
203 const enum i915_cache_level level)
b1fe6673 204{
07749ef3 205 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
b1fe6673
BW
206 pde |= addr;
207 if (level != I915_CACHE_NONE)
208 pde |= PPAT_CACHED_PDE_INDEX;
209 else
210 pde |= PPAT_UNCACHED_INDEX;
211 return pde;
212}
213
762d9936
MT
214#define gen8_pdpe_encode gen8_pde_encode
215#define gen8_pml4e_encode gen8_pde_encode
216
07749ef3
MT
217static gen6_pte_t snb_pte_encode(dma_addr_t addr,
218 enum i915_cache_level level,
219 bool valid, u32 unused)
54d12527 220{
07749ef3 221 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
54d12527 222 pte |= GEN6_PTE_ADDR_ENCODE(addr);
e7210c3c
BW
223
224 switch (level) {
350ec881
CW
225 case I915_CACHE_L3_LLC:
226 case I915_CACHE_LLC:
227 pte |= GEN6_PTE_CACHE_LLC;
228 break;
229 case I915_CACHE_NONE:
230 pte |= GEN6_PTE_UNCACHED;
231 break;
232 default:
5f77eeb0 233 MISSING_CASE(level);
350ec881
CW
234 }
235
236 return pte;
237}
238
07749ef3
MT
239static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
240 enum i915_cache_level level,
241 bool valid, u32 unused)
350ec881 242{
07749ef3 243 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
350ec881
CW
244 pte |= GEN6_PTE_ADDR_ENCODE(addr);
245
246 switch (level) {
247 case I915_CACHE_L3_LLC:
248 pte |= GEN7_PTE_CACHE_L3_LLC;
e7210c3c
BW
249 break;
250 case I915_CACHE_LLC:
251 pte |= GEN6_PTE_CACHE_LLC;
252 break;
253 case I915_CACHE_NONE:
9119708c 254 pte |= GEN6_PTE_UNCACHED;
e7210c3c
BW
255 break;
256 default:
5f77eeb0 257 MISSING_CASE(level);
e7210c3c
BW
258 }
259
54d12527
BW
260 return pte;
261}
262
07749ef3
MT
263static gen6_pte_t byt_pte_encode(dma_addr_t addr,
264 enum i915_cache_level level,
265 bool valid, u32 flags)
93c34e70 266{
07749ef3 267 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
93c34e70
KG
268 pte |= GEN6_PTE_ADDR_ENCODE(addr);
269
24f3a8cf
AG
270 if (!(flags & PTE_READ_ONLY))
271 pte |= BYT_PTE_WRITEABLE;
93c34e70
KG
272
273 if (level != I915_CACHE_NONE)
274 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
275
276 return pte;
277}
278
07749ef3
MT
279static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
280 enum i915_cache_level level,
281 bool valid, u32 unused)
9119708c 282{
07749ef3 283 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
0d8ff15e 284 pte |= HSW_PTE_ADDR_ENCODE(addr);
9119708c
KG
285
286 if (level != I915_CACHE_NONE)
87a6b688 287 pte |= HSW_WB_LLC_AGE3;
9119708c
KG
288
289 return pte;
290}
291
07749ef3
MT
292static gen6_pte_t iris_pte_encode(dma_addr_t addr,
293 enum i915_cache_level level,
294 bool valid, u32 unused)
4d15c145 295{
07749ef3 296 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
4d15c145
BW
297 pte |= HSW_PTE_ADDR_ENCODE(addr);
298
651d794f
CW
299 switch (level) {
300 case I915_CACHE_NONE:
301 break;
302 case I915_CACHE_WT:
c51e9701 303 pte |= HSW_WT_ELLC_LLC_AGE3;
651d794f
CW
304 break;
305 default:
c51e9701 306 pte |= HSW_WB_ELLC_LLC_AGE3;
651d794f
CW
307 break;
308 }
4d15c145
BW
309
310 return pte;
311}
312
c114f76a
MK
313static int __setup_page_dma(struct drm_device *dev,
314 struct i915_page_dma *p, gfp_t flags)
678d96fb
BW
315{
316 struct device *device = &dev->pdev->dev;
317
c114f76a 318 p->page = alloc_page(flags);
44159ddb
MK
319 if (!p->page)
320 return -ENOMEM;
678d96fb 321
44159ddb
MK
322 p->daddr = dma_map_page(device,
323 p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
678d96fb 324
44159ddb
MK
325 if (dma_mapping_error(device, p->daddr)) {
326 __free_page(p->page);
327 return -EINVAL;
328 }
1266cdb1
MT
329
330 return 0;
678d96fb
BW
331}
332
c114f76a
MK
333static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
334{
335 return __setup_page_dma(dev, p, GFP_KERNEL);
336}
337
44159ddb 338static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
06fda602 339{
44159ddb 340 if (WARN_ON(!p->page))
06fda602 341 return;
678d96fb 342
44159ddb
MK
343 dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
344 __free_page(p->page);
345 memset(p, 0, sizeof(*p));
346}
347
d1c54acd 348static void *kmap_page_dma(struct i915_page_dma *p)
73eeea53 349{
d1c54acd
MK
350 return kmap_atomic(p->page);
351}
73eeea53 352
d1c54acd
MK
353/* We use the flushing unmap only with ppgtt structures:
354 * page directories, page tables and scratch pages.
355 */
356static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
357{
73eeea53
MK
358 /* There are only few exceptions for gen >=6. chv and bxt.
359 * And we are not sure about the latter so play safe for now.
360 */
361 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
362 drm_clflush_virt_range(vaddr, PAGE_SIZE);
363
364 kunmap_atomic(vaddr);
365}
366
567047be 367#define kmap_px(px) kmap_page_dma(px_base(px))
d1c54acd
MK
368#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
369
567047be
MK
370#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
371#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
372#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
373#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
374
d1c54acd
MK
375static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
376 const uint64_t val)
377{
378 int i;
379 uint64_t * const vaddr = kmap_page_dma(p);
380
381 for (i = 0; i < 512; i++)
382 vaddr[i] = val;
383
384 kunmap_page_dma(dev, vaddr);
385}
386
73eeea53
MK
387static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
388 const uint32_t val32)
389{
390 uint64_t v = val32;
391
392 v = v << 32 | val32;
393
394 fill_page_dma(dev, p, v);
395}
396
4ad2af1e
MK
397static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
398{
399 struct i915_page_scratch *sp;
400 int ret;
401
402 sp = kzalloc(sizeof(*sp), GFP_KERNEL);
403 if (sp == NULL)
404 return ERR_PTR(-ENOMEM);
405
406 ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
407 if (ret) {
408 kfree(sp);
409 return ERR_PTR(ret);
410 }
411
412 set_pages_uc(px_page(sp), 1);
413
414 return sp;
415}
416
417static void free_scratch_page(struct drm_device *dev,
418 struct i915_page_scratch *sp)
419{
420 set_pages_wb(px_page(sp), 1);
421
422 cleanup_px(dev, sp);
423 kfree(sp);
424}
425
8a1ebd74 426static struct i915_page_table *alloc_pt(struct drm_device *dev)
06fda602 427{
ec565b3c 428 struct i915_page_table *pt;
678d96fb
BW
429 const size_t count = INTEL_INFO(dev)->gen >= 8 ?
430 GEN8_PTES : GEN6_PTES;
431 int ret = -ENOMEM;
06fda602
BW
432
433 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
434 if (!pt)
435 return ERR_PTR(-ENOMEM);
436
678d96fb
BW
437 pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
438 GFP_KERNEL);
439
440 if (!pt->used_ptes)
441 goto fail_bitmap;
442
567047be 443 ret = setup_px(dev, pt);
678d96fb 444 if (ret)
44159ddb 445 goto fail_page_m;
06fda602
BW
446
447 return pt;
678d96fb 448
44159ddb 449fail_page_m:
678d96fb
BW
450 kfree(pt->used_ptes);
451fail_bitmap:
452 kfree(pt);
453
454 return ERR_PTR(ret);
06fda602
BW
455}
456
2e906bea 457static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
06fda602 458{
2e906bea
MK
459 cleanup_px(dev, pt);
460 kfree(pt->used_ptes);
461 kfree(pt);
462}
463
464static void gen8_initialize_pt(struct i915_address_space *vm,
465 struct i915_page_table *pt)
466{
467 gen8_pte_t scratch_pte;
468
469 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
470 I915_CACHE_LLC, true);
471
472 fill_px(vm->dev, pt, scratch_pte);
473}
474
475static void gen6_initialize_pt(struct i915_address_space *vm,
476 struct i915_page_table *pt)
477{
478 gen6_pte_t scratch_pte;
479
480 WARN_ON(px_dma(vm->scratch_page) == 0);
481
482 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
483 I915_CACHE_LLC, true, 0);
484
485 fill32_px(vm->dev, pt, scratch_pte);
06fda602
BW
486}
487
8a1ebd74 488static struct i915_page_directory *alloc_pd(struct drm_device *dev)
06fda602 489{
ec565b3c 490 struct i915_page_directory *pd;
33c8819f 491 int ret = -ENOMEM;
06fda602
BW
492
493 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
494 if (!pd)
495 return ERR_PTR(-ENOMEM);
496
33c8819f
MT
497 pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
498 sizeof(*pd->used_pdes), GFP_KERNEL);
499 if (!pd->used_pdes)
a08e111a 500 goto fail_bitmap;
33c8819f 501
567047be 502 ret = setup_px(dev, pd);
33c8819f 503 if (ret)
a08e111a 504 goto fail_page_m;
e5815a2e 505
06fda602 506 return pd;
33c8819f 507
a08e111a 508fail_page_m:
33c8819f 509 kfree(pd->used_pdes);
a08e111a 510fail_bitmap:
33c8819f
MT
511 kfree(pd);
512
513 return ERR_PTR(ret);
06fda602
BW
514}
515
2e906bea
MK
516static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
517{
518 if (px_page(pd)) {
519 cleanup_px(dev, pd);
520 kfree(pd->used_pdes);
521 kfree(pd);
522 }
523}
524
525static void gen8_initialize_pd(struct i915_address_space *vm,
526 struct i915_page_directory *pd)
527{
528 gen8_pde_t scratch_pde;
529
530 scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
531
532 fill_px(vm->dev, pd, scratch_pde);
533}
534
6ac18502
MT
535static int __pdp_init(struct drm_device *dev,
536 struct i915_page_directory_pointer *pdp)
537{
538 size_t pdpes = I915_PDPES_PER_PDP(dev);
539
540 pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
541 sizeof(unsigned long),
542 GFP_KERNEL);
543 if (!pdp->used_pdpes)
544 return -ENOMEM;
545
546 pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
547 GFP_KERNEL);
548 if (!pdp->page_directory) {
549 kfree(pdp->used_pdpes);
550 /* the PDP might be the statically allocated top level. Keep it
551 * as clean as possible */
552 pdp->used_pdpes = NULL;
553 return -ENOMEM;
554 }
555
556 return 0;
557}
558
559static void __pdp_fini(struct i915_page_directory_pointer *pdp)
560{
561 kfree(pdp->used_pdpes);
562 kfree(pdp->page_directory);
563 pdp->page_directory = NULL;
564}
565
762d9936
MT
566static struct
567i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
568{
569 struct i915_page_directory_pointer *pdp;
570 int ret = -ENOMEM;
571
572 WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
573
574 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
575 if (!pdp)
576 return ERR_PTR(-ENOMEM);
577
578 ret = __pdp_init(dev, pdp);
579 if (ret)
580 goto fail_bitmap;
581
582 ret = setup_px(dev, pdp);
583 if (ret)
584 goto fail_page_m;
585
586 return pdp;
587
588fail_page_m:
589 __pdp_fini(pdp);
590fail_bitmap:
591 kfree(pdp);
592
593 return ERR_PTR(ret);
594}
595
6ac18502
MT
596static void free_pdp(struct drm_device *dev,
597 struct i915_page_directory_pointer *pdp)
598{
599 __pdp_fini(pdp);
762d9936
MT
600 if (USES_FULL_48BIT_PPGTT(dev)) {
601 cleanup_px(dev, pdp);
602 kfree(pdp);
603 }
604}
605
69ab76fd
MT
606static void gen8_initialize_pdp(struct i915_address_space *vm,
607 struct i915_page_directory_pointer *pdp)
608{
609 gen8_ppgtt_pdpe_t scratch_pdpe;
610
611 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
612
613 fill_px(vm->dev, pdp, scratch_pdpe);
614}
615
616static void gen8_initialize_pml4(struct i915_address_space *vm,
617 struct i915_pml4 *pml4)
618{
619 gen8_ppgtt_pml4e_t scratch_pml4e;
620
621 scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
622 I915_CACHE_LLC);
623
624 fill_px(vm->dev, pml4, scratch_pml4e);
625}
626
762d9936
MT
627static void
628gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
629 struct i915_page_directory_pointer *pdp,
630 struct i915_page_directory *pd,
631 int index)
632{
633 gen8_ppgtt_pdpe_t *page_directorypo;
634
635 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
636 return;
637
638 page_directorypo = kmap_px(pdp);
639 page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
640 kunmap_px(ppgtt, page_directorypo);
641}
642
643static void
644gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
645 struct i915_pml4 *pml4,
646 struct i915_page_directory_pointer *pdp,
647 int index)
648{
649 gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
650
651 WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
652 pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
653 kunmap_px(ppgtt, pagemap);
6ac18502
MT
654}
655
94e409c1 656/* Broadwell Page Directory Pointer Descriptors */
e85b26dc 657static int gen8_write_pdp(struct drm_i915_gem_request *req,
7cb6d7ac
MT
658 unsigned entry,
659 dma_addr_t addr)
94e409c1 660{
4a570db5 661 struct intel_engine_cs *engine = req->engine;
94e409c1
BW
662 int ret;
663
664 BUG_ON(entry >= 4);
665
5fb9de1a 666 ret = intel_ring_begin(req, 6);
94e409c1
BW
667 if (ret)
668 return ret;
669
e2f80391
TU
670 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
671 intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
672 intel_ring_emit(engine, upper_32_bits(addr));
673 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
674 intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
675 intel_ring_emit(engine, lower_32_bits(addr));
676 intel_ring_advance(engine);
94e409c1
BW
677
678 return 0;
679}
680
2dba3239
MT
681static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
682 struct drm_i915_gem_request *req)
94e409c1 683{
eeb9488e 684 int i, ret;
94e409c1 685
7cb6d7ac 686 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
d852c7bf
MK
687 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
688
e85b26dc 689 ret = gen8_write_pdp(req, i, pd_daddr);
eeb9488e
BW
690 if (ret)
691 return ret;
94e409c1 692 }
d595bd4b 693
eeb9488e 694 return 0;
94e409c1
BW
695}
696
2dba3239
MT
697static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
698 struct drm_i915_gem_request *req)
699{
700 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
701}
702
f9b5b782
MT
703static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
704 struct i915_page_directory_pointer *pdp,
705 uint64_t start,
706 uint64_t length,
707 gen8_pte_t scratch_pte)
459108b8 708{
e5716f55 709 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
f9b5b782 710 gen8_pte_t *pt_vaddr;
de5ba8eb
MT
711 unsigned pdpe = gen8_pdpe_index(start);
712 unsigned pde = gen8_pde_index(start);
713 unsigned pte = gen8_pte_index(start);
782f1495 714 unsigned num_entries = length >> PAGE_SHIFT;
459108b8
BW
715 unsigned last_pte, i;
716
f9b5b782
MT
717 if (WARN_ON(!pdp))
718 return;
459108b8
BW
719
720 while (num_entries) {
ec565b3c
MT
721 struct i915_page_directory *pd;
722 struct i915_page_table *pt;
06fda602 723
d4ec9da0 724 if (WARN_ON(!pdp->page_directory[pdpe]))
00245266 725 break;
06fda602 726
d4ec9da0 727 pd = pdp->page_directory[pdpe];
06fda602
BW
728
729 if (WARN_ON(!pd->page_table[pde]))
00245266 730 break;
06fda602
BW
731
732 pt = pd->page_table[pde];
733
567047be 734 if (WARN_ON(!px_page(pt)))
00245266 735 break;
06fda602 736
7ad47cf2 737 last_pte = pte + num_entries;
07749ef3
MT
738 if (last_pte > GEN8_PTES)
739 last_pte = GEN8_PTES;
459108b8 740
d1c54acd 741 pt_vaddr = kmap_px(pt);
459108b8 742
7ad47cf2 743 for (i = pte; i < last_pte; i++) {
459108b8 744 pt_vaddr[i] = scratch_pte;
7ad47cf2
BW
745 num_entries--;
746 }
459108b8 747
d1c54acd 748 kunmap_px(ppgtt, pt);
459108b8 749
7ad47cf2 750 pte = 0;
07749ef3 751 if (++pde == I915_PDES) {
de5ba8eb
MT
752 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
753 break;
7ad47cf2
BW
754 pde = 0;
755 }
459108b8
BW
756 }
757}
758
f9b5b782
MT
759static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
760 uint64_t start,
761 uint64_t length,
762 bool use_scratch)
9df15b49 763{
e5716f55 764 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
f9b5b782
MT
765 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
766 I915_CACHE_LLC, use_scratch);
767
de5ba8eb
MT
768 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
769 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
770 scratch_pte);
771 } else {
e8ebd8e2 772 uint64_t pml4e;
de5ba8eb
MT
773 struct i915_page_directory_pointer *pdp;
774
e8ebd8e2 775 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
de5ba8eb
MT
776 gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
777 scratch_pte);
778 }
779 }
f9b5b782
MT
780}
781
782static void
783gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
784 struct i915_page_directory_pointer *pdp,
3387d433 785 struct sg_page_iter *sg_iter,
f9b5b782
MT
786 uint64_t start,
787 enum i915_cache_level cache_level)
788{
e5716f55 789 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 790 gen8_pte_t *pt_vaddr;
de5ba8eb
MT
791 unsigned pdpe = gen8_pdpe_index(start);
792 unsigned pde = gen8_pde_index(start);
793 unsigned pte = gen8_pte_index(start);
9df15b49 794
6f1cc993 795 pt_vaddr = NULL;
7ad47cf2 796
3387d433 797 while (__sg_page_iter_next(sg_iter)) {
d7b3de91 798 if (pt_vaddr == NULL) {
d4ec9da0 799 struct i915_page_directory *pd = pdp->page_directory[pdpe];
ec565b3c 800 struct i915_page_table *pt = pd->page_table[pde];
d1c54acd 801 pt_vaddr = kmap_px(pt);
d7b3de91 802 }
9df15b49 803
7ad47cf2 804 pt_vaddr[pte] =
3387d433 805 gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
6f1cc993 806 cache_level, true);
07749ef3 807 if (++pte == GEN8_PTES) {
d1c54acd 808 kunmap_px(ppgtt, pt_vaddr);
6f1cc993 809 pt_vaddr = NULL;
07749ef3 810 if (++pde == I915_PDES) {
de5ba8eb
MT
811 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
812 break;
7ad47cf2
BW
813 pde = 0;
814 }
815 pte = 0;
9df15b49
BW
816 }
817 }
d1c54acd
MK
818
819 if (pt_vaddr)
820 kunmap_px(ppgtt, pt_vaddr);
9df15b49
BW
821}
822
f9b5b782
MT
823static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
824 struct sg_table *pages,
825 uint64_t start,
826 enum i915_cache_level cache_level,
827 u32 unused)
828{
e5716f55 829 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
3387d433 830 struct sg_page_iter sg_iter;
f9b5b782 831
3387d433 832 __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
de5ba8eb
MT
833
834 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
835 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
836 cache_level);
837 } else {
838 struct i915_page_directory_pointer *pdp;
e8ebd8e2 839 uint64_t pml4e;
de5ba8eb
MT
840 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
841
e8ebd8e2 842 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
de5ba8eb
MT
843 gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
844 start, cache_level);
845 }
846 }
f9b5b782
MT
847}
848
f37c0505
MT
849static void gen8_free_page_tables(struct drm_device *dev,
850 struct i915_page_directory *pd)
7ad47cf2
BW
851{
852 int i;
853
567047be 854 if (!px_page(pd))
7ad47cf2
BW
855 return;
856
33c8819f 857 for_each_set_bit(i, pd->used_pdes, I915_PDES) {
06fda602
BW
858 if (WARN_ON(!pd->page_table[i]))
859 continue;
7ad47cf2 860
a08e111a 861 free_pt(dev, pd->page_table[i]);
06fda602
BW
862 pd->page_table[i] = NULL;
863 }
d7b3de91
BW
864}
865
8776f02b
MK
866static int gen8_init_scratch(struct i915_address_space *vm)
867{
868 struct drm_device *dev = vm->dev;
869
870 vm->scratch_page = alloc_scratch_page(dev);
871 if (IS_ERR(vm->scratch_page))
872 return PTR_ERR(vm->scratch_page);
873
874 vm->scratch_pt = alloc_pt(dev);
875 if (IS_ERR(vm->scratch_pt)) {
876 free_scratch_page(dev, vm->scratch_page);
877 return PTR_ERR(vm->scratch_pt);
878 }
879
880 vm->scratch_pd = alloc_pd(dev);
881 if (IS_ERR(vm->scratch_pd)) {
882 free_pt(dev, vm->scratch_pt);
883 free_scratch_page(dev, vm->scratch_page);
884 return PTR_ERR(vm->scratch_pd);
885 }
886
69ab76fd
MT
887 if (USES_FULL_48BIT_PPGTT(dev)) {
888 vm->scratch_pdp = alloc_pdp(dev);
889 if (IS_ERR(vm->scratch_pdp)) {
890 free_pd(dev, vm->scratch_pd);
891 free_pt(dev, vm->scratch_pt);
892 free_scratch_page(dev, vm->scratch_page);
893 return PTR_ERR(vm->scratch_pdp);
894 }
895 }
896
8776f02b
MK
897 gen8_initialize_pt(vm, vm->scratch_pt);
898 gen8_initialize_pd(vm, vm->scratch_pd);
69ab76fd
MT
899 if (USES_FULL_48BIT_PPGTT(dev))
900 gen8_initialize_pdp(vm, vm->scratch_pdp);
8776f02b
MK
901
902 return 0;
903}
904
650da34c
ZL
905static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
906{
907 enum vgt_g2v_type msg;
908 struct drm_device *dev = ppgtt->base.dev;
909 struct drm_i915_private *dev_priv = dev->dev_private;
650da34c
ZL
910 int i;
911
912 if (USES_FULL_48BIT_PPGTT(dev)) {
913 u64 daddr = px_dma(&ppgtt->pml4);
914
ab75bb5d
VS
915 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
916 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
650da34c
ZL
917
918 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
919 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
920 } else {
921 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
922 u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
923
ab75bb5d
VS
924 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
925 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
650da34c
ZL
926 }
927
928 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
929 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
930 }
931
932 I915_WRITE(vgtif_reg(g2v_notify), msg);
933
934 return 0;
935}
936
8776f02b
MK
937static void gen8_free_scratch(struct i915_address_space *vm)
938{
939 struct drm_device *dev = vm->dev;
940
69ab76fd
MT
941 if (USES_FULL_48BIT_PPGTT(dev))
942 free_pdp(dev, vm->scratch_pdp);
8776f02b
MK
943 free_pd(dev, vm->scratch_pd);
944 free_pt(dev, vm->scratch_pt);
945 free_scratch_page(dev, vm->scratch_page);
946}
947
762d9936
MT
948static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
949 struct i915_page_directory_pointer *pdp)
b45a6715
BW
950{
951 int i;
952
d4ec9da0
MT
953 for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
954 if (WARN_ON(!pdp->page_directory[i]))
06fda602
BW
955 continue;
956
d4ec9da0
MT
957 gen8_free_page_tables(dev, pdp->page_directory[i]);
958 free_pd(dev, pdp->page_directory[i]);
7ad47cf2 959 }
69876bed 960
d4ec9da0 961 free_pdp(dev, pdp);
762d9936
MT
962}
963
964static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
965{
966 int i;
967
968 for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
969 if (WARN_ON(!ppgtt->pml4.pdps[i]))
970 continue;
971
972 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
973 }
974
975 cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
976}
977
978static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
979{
e5716f55 980 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 981
650da34c
ZL
982 if (intel_vgpu_active(vm->dev))
983 gen8_ppgtt_notify_vgt(ppgtt, false);
984
762d9936
MT
985 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
986 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
987 else
988 gen8_ppgtt_cleanup_4lvl(ppgtt);
d4ec9da0 989
8776f02b 990 gen8_free_scratch(vm);
b45a6715
BW
991}
992
d7b2633d
MT
993/**
994 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
d4ec9da0
MT
995 * @vm: Master vm structure.
996 * @pd: Page directory for this address range.
d7b2633d 997 * @start: Starting virtual address to begin allocations.
d4ec9da0 998 * @length: Size of the allocations.
d7b2633d
MT
999 * @new_pts: Bitmap set by function with new allocations. Likely used by the
1000 * caller to free on error.
1001 *
1002 * Allocate the required number of page tables. Extremely similar to
1003 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1004 * the page directory boundary (instead of the page directory pointer). That
1005 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1006 * possible, and likely that the caller will need to use multiple calls of this
1007 * function to achieve the appropriate allocation.
1008 *
1009 * Return: 0 if success; negative error code otherwise.
1010 */
d4ec9da0 1011static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
e5815a2e 1012 struct i915_page_directory *pd,
5441f0cb 1013 uint64_t start,
d7b2633d
MT
1014 uint64_t length,
1015 unsigned long *new_pts)
bf2b4ed2 1016{
d4ec9da0 1017 struct drm_device *dev = vm->dev;
d7b2633d 1018 struct i915_page_table *pt;
5441f0cb 1019 uint32_t pde;
bf2b4ed2 1020
e8ebd8e2 1021 gen8_for_each_pde(pt, pd, start, length, pde) {
d7b2633d 1022 /* Don't reallocate page tables */
6ac18502 1023 if (test_bit(pde, pd->used_pdes)) {
d7b2633d 1024 /* Scratch is never allocated this way */
d4ec9da0 1025 WARN_ON(pt == vm->scratch_pt);
d7b2633d
MT
1026 continue;
1027 }
1028
8a1ebd74 1029 pt = alloc_pt(dev);
d7b2633d 1030 if (IS_ERR(pt))
5441f0cb
MT
1031 goto unwind_out;
1032
d4ec9da0 1033 gen8_initialize_pt(vm, pt);
d7b2633d 1034 pd->page_table[pde] = pt;
966082c9 1035 __set_bit(pde, new_pts);
4c06ec8d 1036 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
7ad47cf2
BW
1037 }
1038
bf2b4ed2 1039 return 0;
7ad47cf2
BW
1040
1041unwind_out:
d7b2633d 1042 for_each_set_bit(pde, new_pts, I915_PDES)
a08e111a 1043 free_pt(dev, pd->page_table[pde]);
7ad47cf2 1044
d7b3de91 1045 return -ENOMEM;
bf2b4ed2
BW
1046}
1047
d7b2633d
MT
1048/**
1049 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
d4ec9da0 1050 * @vm: Master vm structure.
d7b2633d
MT
1051 * @pdp: Page directory pointer for this address range.
1052 * @start: Starting virtual address to begin allocations.
d4ec9da0
MT
1053 * @length: Size of the allocations.
1054 * @new_pds: Bitmap set by function with new allocations. Likely used by the
d7b2633d
MT
1055 * caller to free on error.
1056 *
1057 * Allocate the required number of page directories starting at the pde index of
1058 * @start, and ending at the pde index @start + @length. This function will skip
1059 * over already allocated page directories within the range, and only allocate
1060 * new ones, setting the appropriate pointer within the pdp as well as the
1061 * correct position in the bitmap @new_pds.
1062 *
1063 * The function will only allocate the pages within the range for a give page
1064 * directory pointer. In other words, if @start + @length straddles a virtually
1065 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1066 * required by the caller, This is not currently possible, and the BUG in the
1067 * code will prevent it.
1068 *
1069 * Return: 0 if success; negative error code otherwise.
1070 */
d4ec9da0
MT
1071static int
1072gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
1073 struct i915_page_directory_pointer *pdp,
1074 uint64_t start,
1075 uint64_t length,
1076 unsigned long *new_pds)
bf2b4ed2 1077{
d4ec9da0 1078 struct drm_device *dev = vm->dev;
d7b2633d 1079 struct i915_page_directory *pd;
69876bed 1080 uint32_t pdpe;
6ac18502 1081 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
69876bed 1082
6ac18502 1083 WARN_ON(!bitmap_empty(new_pds, pdpes));
d7b2633d 1084
e8ebd8e2 1085 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
6ac18502 1086 if (test_bit(pdpe, pdp->used_pdpes))
d7b2633d 1087 continue;
33c8819f 1088
8a1ebd74 1089 pd = alloc_pd(dev);
d7b2633d 1090 if (IS_ERR(pd))
d7b3de91 1091 goto unwind_out;
69876bed 1092
d4ec9da0 1093 gen8_initialize_pd(vm, pd);
d7b2633d 1094 pdp->page_directory[pdpe] = pd;
966082c9 1095 __set_bit(pdpe, new_pds);
4c06ec8d 1096 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
d7b3de91
BW
1097 }
1098
bf2b4ed2 1099 return 0;
d7b3de91
BW
1100
1101unwind_out:
6ac18502 1102 for_each_set_bit(pdpe, new_pds, pdpes)
a08e111a 1103 free_pd(dev, pdp->page_directory[pdpe]);
d7b3de91
BW
1104
1105 return -ENOMEM;
bf2b4ed2
BW
1106}
1107
762d9936
MT
1108/**
1109 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1110 * @vm: Master vm structure.
1111 * @pml4: Page map level 4 for this address range.
1112 * @start: Starting virtual address to begin allocations.
1113 * @length: Size of the allocations.
1114 * @new_pdps: Bitmap set by function with new allocations. Likely used by the
1115 * caller to free on error.
1116 *
1117 * Allocate the required number of page directory pointers. Extremely similar to
1118 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1119 * The main difference is here we are limited by the pml4 boundary (instead of
1120 * the page directory pointer).
1121 *
1122 * Return: 0 if success; negative error code otherwise.
1123 */
1124static int
1125gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
1126 struct i915_pml4 *pml4,
1127 uint64_t start,
1128 uint64_t length,
1129 unsigned long *new_pdps)
1130{
1131 struct drm_device *dev = vm->dev;
1132 struct i915_page_directory_pointer *pdp;
762d9936
MT
1133 uint32_t pml4e;
1134
1135 WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1136
e8ebd8e2 1137 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
762d9936
MT
1138 if (!test_bit(pml4e, pml4->used_pml4es)) {
1139 pdp = alloc_pdp(dev);
1140 if (IS_ERR(pdp))
1141 goto unwind_out;
1142
69ab76fd 1143 gen8_initialize_pdp(vm, pdp);
762d9936
MT
1144 pml4->pdps[pml4e] = pdp;
1145 __set_bit(pml4e, new_pdps);
1146 trace_i915_page_directory_pointer_entry_alloc(vm,
1147 pml4e,
1148 start,
1149 GEN8_PML4E_SHIFT);
1150 }
1151 }
1152
1153 return 0;
1154
1155unwind_out:
1156 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1157 free_pdp(dev, pml4->pdps[pml4e]);
1158
1159 return -ENOMEM;
1160}
1161
d7b2633d 1162static void
3a41a05d 1163free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
d7b2633d 1164{
d7b2633d
MT
1165 kfree(new_pts);
1166 kfree(new_pds);
1167}
1168
1169/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1170 * of these are based on the number of PDPEs in the system.
1171 */
1172static
1173int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
3a41a05d 1174 unsigned long **new_pts,
6ac18502 1175 uint32_t pdpes)
d7b2633d 1176{
d7b2633d 1177 unsigned long *pds;
3a41a05d 1178 unsigned long *pts;
d7b2633d 1179
3a41a05d 1180 pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
d7b2633d
MT
1181 if (!pds)
1182 return -ENOMEM;
1183
3a41a05d
MW
1184 pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
1185 GFP_TEMPORARY);
1186 if (!pts)
1187 goto err_out;
d7b2633d
MT
1188
1189 *new_pds = pds;
1190 *new_pts = pts;
1191
1192 return 0;
1193
1194err_out:
3a41a05d 1195 free_gen8_temp_bitmaps(pds, pts);
d7b2633d
MT
1196 return -ENOMEM;
1197}
1198
5b7e4c9c
MK
1199/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
1200 * the page table structures, we mark them dirty so that
1201 * context switching/execlist queuing code takes extra steps
1202 * to ensure that tlbs are flushed.
1203 */
1204static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
1205{
1206 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
1207}
1208
762d9936
MT
1209static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
1210 struct i915_page_directory_pointer *pdp,
1211 uint64_t start,
1212 uint64_t length)
bf2b4ed2 1213{
e5716f55 1214 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
3a41a05d 1215 unsigned long *new_page_dirs, *new_page_tables;
d4ec9da0 1216 struct drm_device *dev = vm->dev;
5441f0cb 1217 struct i915_page_directory *pd;
33c8819f
MT
1218 const uint64_t orig_start = start;
1219 const uint64_t orig_length = length;
5441f0cb 1220 uint32_t pdpe;
d4ec9da0 1221 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
bf2b4ed2
BW
1222 int ret;
1223
d7b2633d
MT
1224 /* Wrap is never okay since we can only represent 48b, and we don't
1225 * actually use the other side of the canonical address space.
1226 */
1227 if (WARN_ON(start + length < start))
a05d80ee
MK
1228 return -ENODEV;
1229
d4ec9da0 1230 if (WARN_ON(start + length > vm->total))
a05d80ee 1231 return -ENODEV;
d7b2633d 1232
6ac18502 1233 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
bf2b4ed2
BW
1234 if (ret)
1235 return ret;
1236
d7b2633d 1237 /* Do the allocations first so we can easily bail out */
d4ec9da0
MT
1238 ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
1239 new_page_dirs);
d7b2633d 1240 if (ret) {
3a41a05d 1241 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
d7b2633d
MT
1242 return ret;
1243 }
1244
1245 /* For every page directory referenced, allocate page tables */
e8ebd8e2 1246 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
d4ec9da0 1247 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
3a41a05d 1248 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
5441f0cb
MT
1249 if (ret)
1250 goto err_out;
5441f0cb
MT
1251 }
1252
33c8819f
MT
1253 start = orig_start;
1254 length = orig_length;
1255
d7b2633d
MT
1256 /* Allocations have completed successfully, so set the bitmaps, and do
1257 * the mappings. */
e8ebd8e2 1258 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
d1c54acd 1259 gen8_pde_t *const page_directory = kmap_px(pd);
33c8819f 1260 struct i915_page_table *pt;
09120d4e 1261 uint64_t pd_len = length;
33c8819f
MT
1262 uint64_t pd_start = start;
1263 uint32_t pde;
1264
d7b2633d
MT
1265 /* Every pd should be allocated, we just did that above. */
1266 WARN_ON(!pd);
1267
e8ebd8e2 1268 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
d7b2633d
MT
1269 /* Same reasoning as pd */
1270 WARN_ON(!pt);
1271 WARN_ON(!pd_len);
1272 WARN_ON(!gen8_pte_count(pd_start, pd_len));
1273
1274 /* Set our used ptes within the page table */
1275 bitmap_set(pt->used_ptes,
1276 gen8_pte_index(pd_start),
1277 gen8_pte_count(pd_start, pd_len));
1278
1279 /* Our pde is now pointing to the pagetable, pt */
966082c9 1280 __set_bit(pde, pd->used_pdes);
d7b2633d
MT
1281
1282 /* Map the PDE to the page table */
fe36f55d
MK
1283 page_directory[pde] = gen8_pde_encode(px_dma(pt),
1284 I915_CACHE_LLC);
4c06ec8d
MT
1285 trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
1286 gen8_pte_index(start),
1287 gen8_pte_count(start, length),
1288 GEN8_PTES);
d7b2633d
MT
1289
1290 /* NB: We haven't yet mapped ptes to pages. At this
1291 * point we're still relying on insert_entries() */
33c8819f 1292 }
d7b2633d 1293
d1c54acd 1294 kunmap_px(ppgtt, page_directory);
d4ec9da0 1295 __set_bit(pdpe, pdp->used_pdpes);
762d9936 1296 gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
33c8819f
MT
1297 }
1298
3a41a05d 1299 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
5b7e4c9c 1300 mark_tlbs_dirty(ppgtt);
d7b3de91 1301 return 0;
bf2b4ed2 1302
d7b3de91 1303err_out:
d7b2633d 1304 while (pdpe--) {
e8ebd8e2
DG
1305 unsigned long temp;
1306
3a41a05d
MW
1307 for_each_set_bit(temp, new_page_tables + pdpe *
1308 BITS_TO_LONGS(I915_PDES), I915_PDES)
d4ec9da0 1309 free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
d7b2633d
MT
1310 }
1311
6ac18502 1312 for_each_set_bit(pdpe, new_page_dirs, pdpes)
d4ec9da0 1313 free_pd(dev, pdp->page_directory[pdpe]);
d7b2633d 1314
3a41a05d 1315 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
5b7e4c9c 1316 mark_tlbs_dirty(ppgtt);
bf2b4ed2
BW
1317 return ret;
1318}
1319
762d9936
MT
1320static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
1321 struct i915_pml4 *pml4,
1322 uint64_t start,
1323 uint64_t length)
1324{
1325 DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
e5716f55 1326 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1327 struct i915_page_directory_pointer *pdp;
e8ebd8e2 1328 uint64_t pml4e;
762d9936
MT
1329 int ret = 0;
1330
1331 /* Do the pml4 allocations first, so we don't need to track the newly
1332 * allocated tables below the pdp */
1333 bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
1334
1335 /* The pagedirectory and pagetable allocations are done in the shared 3
1336 * and 4 level code. Just allocate the pdps.
1337 */
1338 ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
1339 new_pdps);
1340 if (ret)
1341 return ret;
1342
1343 WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1344 "The allocation has spanned more than 512GB. "
1345 "It is highly likely this is incorrect.");
1346
e8ebd8e2 1347 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
762d9936
MT
1348 WARN_ON(!pdp);
1349
1350 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1351 if (ret)
1352 goto err_out;
1353
1354 gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
1355 }
1356
1357 bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
1358 GEN8_PML4ES_PER_PML4);
1359
1360 return 0;
1361
1362err_out:
1363 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1364 gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
1365
1366 return ret;
1367}
1368
1369static int gen8_alloc_va_range(struct i915_address_space *vm,
1370 uint64_t start, uint64_t length)
1371{
e5716f55 1372 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936
MT
1373
1374 if (USES_FULL_48BIT_PPGTT(vm->dev))
1375 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
1376 else
1377 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
1378}
1379
ea91e401
MT
1380static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
1381 uint64_t start, uint64_t length,
1382 gen8_pte_t scratch_pte,
1383 struct seq_file *m)
1384{
1385 struct i915_page_directory *pd;
ea91e401
MT
1386 uint32_t pdpe;
1387
e8ebd8e2 1388 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ea91e401
MT
1389 struct i915_page_table *pt;
1390 uint64_t pd_len = length;
1391 uint64_t pd_start = start;
1392 uint32_t pde;
1393
1394 if (!test_bit(pdpe, pdp->used_pdpes))
1395 continue;
1396
1397 seq_printf(m, "\tPDPE #%d\n", pdpe);
e8ebd8e2 1398 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
ea91e401
MT
1399 uint32_t pte;
1400 gen8_pte_t *pt_vaddr;
1401
1402 if (!test_bit(pde, pd->used_pdes))
1403 continue;
1404
1405 pt_vaddr = kmap_px(pt);
1406 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1407 uint64_t va =
1408 (pdpe << GEN8_PDPE_SHIFT) |
1409 (pde << GEN8_PDE_SHIFT) |
1410 (pte << GEN8_PTE_SHIFT);
1411 int i;
1412 bool found = false;
1413
1414 for (i = 0; i < 4; i++)
1415 if (pt_vaddr[pte + i] != scratch_pte)
1416 found = true;
1417 if (!found)
1418 continue;
1419
1420 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1421 for (i = 0; i < 4; i++) {
1422 if (pt_vaddr[pte + i] != scratch_pte)
1423 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1424 else
1425 seq_puts(m, " SCRATCH ");
1426 }
1427 seq_puts(m, "\n");
1428 }
1429 /* don't use kunmap_px, it could trigger
1430 * an unnecessary flush.
1431 */
1432 kunmap_atomic(pt_vaddr);
1433 }
1434 }
1435}
1436
1437static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1438{
1439 struct i915_address_space *vm = &ppgtt->base;
1440 uint64_t start = ppgtt->base.start;
1441 uint64_t length = ppgtt->base.total;
1442 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
1443 I915_CACHE_LLC, true);
1444
1445 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1446 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1447 } else {
e8ebd8e2 1448 uint64_t pml4e;
ea91e401
MT
1449 struct i915_pml4 *pml4 = &ppgtt->pml4;
1450 struct i915_page_directory_pointer *pdp;
1451
e8ebd8e2 1452 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
ea91e401
MT
1453 if (!test_bit(pml4e, pml4->used_pml4es))
1454 continue;
1455
1456 seq_printf(m, " PML4E #%llu\n", pml4e);
1457 gen8_dump_pdp(pdp, start, length, scratch_pte, m);
1458 }
1459 }
1460}
1461
331f38e7
ZL
1462static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
1463{
3a41a05d 1464 unsigned long *new_page_dirs, *new_page_tables;
331f38e7
ZL
1465 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1466 int ret;
1467
1468 /* We allocate temp bitmap for page tables for no gain
1469 * but as this is for init only, lets keep the things simple
1470 */
1471 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1472 if (ret)
1473 return ret;
1474
1475 /* Allocate for all pdps regardless of how the ppgtt
1476 * was defined.
1477 */
1478 ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
1479 0, 1ULL << 32,
1480 new_page_dirs);
1481 if (!ret)
1482 *ppgtt->pdp.used_pdpes = *new_page_dirs;
1483
3a41a05d 1484 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
331f38e7
ZL
1485
1486 return ret;
1487}
1488
eb0b44ad 1489/*
f3a964b9
BW
1490 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1491 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1492 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1493 * space.
37aca44a 1494 *
f3a964b9 1495 */
5c5f6457 1496static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
37aca44a 1497{
8776f02b 1498 int ret;
7cb6d7ac 1499
8776f02b
MK
1500 ret = gen8_init_scratch(&ppgtt->base);
1501 if (ret)
1502 return ret;
69876bed 1503
d7b2633d 1504 ppgtt->base.start = 0;
d7b2633d 1505 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
5c5f6457 1506 ppgtt->base.allocate_va_range = gen8_alloc_va_range;
d7b2633d 1507 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
c7e16f22 1508 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
777dc5bb
DV
1509 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1510 ppgtt->base.bind_vma = ppgtt_bind_vma;
ea91e401 1511 ppgtt->debug_dump = gen8_dump_ppgtt;
d7b2633d 1512
762d9936
MT
1513 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
1514 ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
1515 if (ret)
1516 goto free_scratch;
6ac18502 1517
69ab76fd
MT
1518 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1519
762d9936 1520 ppgtt->base.total = 1ULL << 48;
2dba3239 1521 ppgtt->switch_mm = gen8_48b_mm_switch;
762d9936 1522 } else {
25f50337 1523 ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
81ba8aef
MT
1524 if (ret)
1525 goto free_scratch;
1526
1527 ppgtt->base.total = 1ULL << 32;
2dba3239 1528 ppgtt->switch_mm = gen8_legacy_mm_switch;
762d9936
MT
1529 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
1530 0, 0,
1531 GEN8_PML4E_SHIFT);
331f38e7
ZL
1532
1533 if (intel_vgpu_active(ppgtt->base.dev)) {
1534 ret = gen8_preallocate_top_level_pdps(ppgtt);
1535 if (ret)
1536 goto free_scratch;
1537 }
81ba8aef 1538 }
6ac18502 1539
650da34c
ZL
1540 if (intel_vgpu_active(ppgtt->base.dev))
1541 gen8_ppgtt_notify_vgt(ppgtt, true);
1542
d7b2633d 1543 return 0;
6ac18502
MT
1544
1545free_scratch:
1546 gen8_free_scratch(&ppgtt->base);
1547 return ret;
d7b2633d
MT
1548}
1549
87d60b63
BW
1550static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1551{
87d60b63 1552 struct i915_address_space *vm = &ppgtt->base;
09942c65 1553 struct i915_page_table *unused;
07749ef3 1554 gen6_pte_t scratch_pte;
87d60b63 1555 uint32_t pd_entry;
09942c65
MT
1556 uint32_t pte, pde, temp;
1557 uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
87d60b63 1558
79ab9370
MK
1559 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1560 I915_CACHE_LLC, true, 0);
87d60b63 1561
09942c65 1562 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
87d60b63 1563 u32 expected;
07749ef3 1564 gen6_pte_t *pt_vaddr;
567047be 1565 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
09942c65 1566 pd_entry = readl(ppgtt->pd_addr + pde);
87d60b63
BW
1567 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1568
1569 if (pd_entry != expected)
1570 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1571 pde,
1572 pd_entry,
1573 expected);
1574 seq_printf(m, "\tPDE: %x\n", pd_entry);
1575
d1c54acd
MK
1576 pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
1577
07749ef3 1578 for (pte = 0; pte < GEN6_PTES; pte+=4) {
87d60b63 1579 unsigned long va =
07749ef3 1580 (pde * PAGE_SIZE * GEN6_PTES) +
87d60b63
BW
1581 (pte * PAGE_SIZE);
1582 int i;
1583 bool found = false;
1584 for (i = 0; i < 4; i++)
1585 if (pt_vaddr[pte + i] != scratch_pte)
1586 found = true;
1587 if (!found)
1588 continue;
1589
1590 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1591 for (i = 0; i < 4; i++) {
1592 if (pt_vaddr[pte + i] != scratch_pte)
1593 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1594 else
1595 seq_puts(m, " SCRATCH ");
1596 }
1597 seq_puts(m, "\n");
1598 }
d1c54acd 1599 kunmap_px(ppgtt, pt_vaddr);
87d60b63
BW
1600 }
1601}
1602
678d96fb 1603/* Write pde (index) from the page directory @pd to the page table @pt */
ec565b3c
MT
1604static void gen6_write_pde(struct i915_page_directory *pd,
1605 const int pde, struct i915_page_table *pt)
6197349b 1606{
678d96fb
BW
1607 /* Caller needs to make sure the write completes if necessary */
1608 struct i915_hw_ppgtt *ppgtt =
1609 container_of(pd, struct i915_hw_ppgtt, pd);
1610 u32 pd_entry;
6197349b 1611
567047be 1612 pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
678d96fb 1613 pd_entry |= GEN6_PDE_VALID;
6197349b 1614
678d96fb
BW
1615 writel(pd_entry, ppgtt->pd_addr + pde);
1616}
6197349b 1617
678d96fb
BW
1618/* Write all the page tables found in the ppgtt structure to incrementing page
1619 * directories. */
1620static void gen6_write_page_range(struct drm_i915_private *dev_priv,
ec565b3c 1621 struct i915_page_directory *pd,
678d96fb
BW
1622 uint32_t start, uint32_t length)
1623{
72e96d64 1624 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ec565b3c 1625 struct i915_page_table *pt;
678d96fb
BW
1626 uint32_t pde, temp;
1627
1628 gen6_for_each_pde(pt, pd, start, length, temp, pde)
1629 gen6_write_pde(pd, pde, pt);
1630
1631 /* Make sure write is complete before other code can use this page
1632 * table. Also require for WC mapped PTEs */
72e96d64 1633 readl(ggtt->gsm);
3e302542
BW
1634}
1635
b4a74e3a 1636static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
3e302542 1637{
44159ddb 1638 BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
b4a74e3a 1639
44159ddb 1640 return (ppgtt->pd.base.ggtt_offset / 64) << 16;
b4a74e3a
BW
1641}
1642
90252e5c 1643static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1644 struct drm_i915_gem_request *req)
90252e5c 1645{
4a570db5 1646 struct intel_engine_cs *engine = req->engine;
90252e5c
BW
1647 int ret;
1648
90252e5c 1649 /* NB: TLBs must be flushed and invalidated before a switch */
e2f80391 1650 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
90252e5c
BW
1651 if (ret)
1652 return ret;
1653
5fb9de1a 1654 ret = intel_ring_begin(req, 6);
90252e5c
BW
1655 if (ret)
1656 return ret;
1657
e2f80391
TU
1658 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
1659 intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
1660 intel_ring_emit(engine, PP_DIR_DCLV_2G);
1661 intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
1662 intel_ring_emit(engine, get_pd_offset(ppgtt));
1663 intel_ring_emit(engine, MI_NOOP);
1664 intel_ring_advance(engine);
90252e5c
BW
1665
1666 return 0;
1667}
1668
71ba2d64 1669static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1670 struct drm_i915_gem_request *req)
71ba2d64 1671{
4a570db5 1672 struct intel_engine_cs *engine = req->engine;
71ba2d64
YZ
1673 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
1674
e2f80391
TU
1675 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1676 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
71ba2d64
YZ
1677 return 0;
1678}
1679
48a10389 1680static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1681 struct drm_i915_gem_request *req)
48a10389 1682{
4a570db5 1683 struct intel_engine_cs *engine = req->engine;
48a10389
BW
1684 int ret;
1685
48a10389 1686 /* NB: TLBs must be flushed and invalidated before a switch */
e2f80391 1687 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
48a10389
BW
1688 if (ret)
1689 return ret;
1690
5fb9de1a 1691 ret = intel_ring_begin(req, 6);
48a10389
BW
1692 if (ret)
1693 return ret;
1694
e2f80391
TU
1695 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
1696 intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
1697 intel_ring_emit(engine, PP_DIR_DCLV_2G);
1698 intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
1699 intel_ring_emit(engine, get_pd_offset(ppgtt));
1700 intel_ring_emit(engine, MI_NOOP);
1701 intel_ring_advance(engine);
48a10389 1702
90252e5c 1703 /* XXX: RCS is the only one to auto invalidate the TLBs? */
e2f80391
TU
1704 if (engine->id != RCS) {
1705 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
90252e5c
BW
1706 if (ret)
1707 return ret;
1708 }
1709
48a10389
BW
1710 return 0;
1711}
1712
eeb9488e 1713static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1714 struct drm_i915_gem_request *req)
eeb9488e 1715{
4a570db5 1716 struct intel_engine_cs *engine = req->engine;
eeb9488e
BW
1717 struct drm_device *dev = ppgtt->base.dev;
1718 struct drm_i915_private *dev_priv = dev->dev_private;
1719
48a10389 1720
e2f80391
TU
1721 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1722 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
eeb9488e 1723
e2f80391 1724 POSTING_READ(RING_PP_DIR_DCLV(engine));
eeb9488e
BW
1725
1726 return 0;
1727}
1728
82460d97 1729static void gen8_ppgtt_enable(struct drm_device *dev)
eeb9488e 1730{
eeb9488e 1731 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 1732 struct intel_engine_cs *engine;
3e302542 1733
b4ac5afc 1734 for_each_engine(engine, dev_priv) {
2dba3239 1735 u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
e2f80391 1736 I915_WRITE(RING_MODE_GEN7(engine),
2dba3239 1737 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
eeb9488e 1738 }
eeb9488e 1739}
6197349b 1740
82460d97 1741static void gen7_ppgtt_enable(struct drm_device *dev)
3e302542 1742{
50227e1c 1743 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 1744 struct intel_engine_cs *engine;
b4a74e3a 1745 uint32_t ecochk, ecobits;
6197349b 1746
b4a74e3a
BW
1747 ecobits = I915_READ(GAC_ECO_BITS);
1748 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
a65c2fcd 1749
b4a74e3a
BW
1750 ecochk = I915_READ(GAM_ECOCHK);
1751 if (IS_HASWELL(dev)) {
1752 ecochk |= ECOCHK_PPGTT_WB_HSW;
1753 } else {
1754 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1755 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1756 }
1757 I915_WRITE(GAM_ECOCHK, ecochk);
a65c2fcd 1758
b4ac5afc 1759 for_each_engine(engine, dev_priv) {
6197349b 1760 /* GFX_MODE is per-ring on gen7+ */
e2f80391 1761 I915_WRITE(RING_MODE_GEN7(engine),
b4a74e3a 1762 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b 1763 }
b4a74e3a 1764}
6197349b 1765
82460d97 1766static void gen6_ppgtt_enable(struct drm_device *dev)
b4a74e3a 1767{
50227e1c 1768 struct drm_i915_private *dev_priv = dev->dev_private;
b4a74e3a 1769 uint32_t ecochk, gab_ctl, ecobits;
a65c2fcd 1770
b4a74e3a
BW
1771 ecobits = I915_READ(GAC_ECO_BITS);
1772 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1773 ECOBITS_PPGTT_CACHE64B);
6197349b 1774
b4a74e3a
BW
1775 gab_ctl = I915_READ(GAB_CTL);
1776 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1777
1778 ecochk = I915_READ(GAM_ECOCHK);
1779 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1780
1781 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b
BW
1782}
1783
1d2a314c 1784/* PPGTT support for Sandybdrige/Gen6 and later */
853ba5d2 1785static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
782f1495
BW
1786 uint64_t start,
1787 uint64_t length,
828c7908 1788 bool use_scratch)
1d2a314c 1789{
e5716f55 1790 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 1791 gen6_pte_t *pt_vaddr, scratch_pte;
782f1495
BW
1792 unsigned first_entry = start >> PAGE_SHIFT;
1793 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3
MT
1794 unsigned act_pt = first_entry / GEN6_PTES;
1795 unsigned first_pte = first_entry % GEN6_PTES;
7bddb01f 1796 unsigned last_pte, i;
1d2a314c 1797
c114f76a
MK
1798 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1799 I915_CACHE_LLC, true, 0);
1d2a314c 1800
7bddb01f
DV
1801 while (num_entries) {
1802 last_pte = first_pte + num_entries;
07749ef3
MT
1803 if (last_pte > GEN6_PTES)
1804 last_pte = GEN6_PTES;
7bddb01f 1805
d1c54acd 1806 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1d2a314c 1807
7bddb01f
DV
1808 for (i = first_pte; i < last_pte; i++)
1809 pt_vaddr[i] = scratch_pte;
1d2a314c 1810
d1c54acd 1811 kunmap_px(ppgtt, pt_vaddr);
1d2a314c 1812
7bddb01f
DV
1813 num_entries -= last_pte - first_pte;
1814 first_pte = 0;
a15326a5 1815 act_pt++;
7bddb01f 1816 }
1d2a314c
DV
1817}
1818
853ba5d2 1819static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
def886c3 1820 struct sg_table *pages,
782f1495 1821 uint64_t start,
24f3a8cf 1822 enum i915_cache_level cache_level, u32 flags)
def886c3 1823{
e5716f55 1824 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 1825 gen6_pte_t *pt_vaddr;
782f1495 1826 unsigned first_entry = start >> PAGE_SHIFT;
07749ef3
MT
1827 unsigned act_pt = first_entry / GEN6_PTES;
1828 unsigned act_pte = first_entry % GEN6_PTES;
6e995e23
ID
1829 struct sg_page_iter sg_iter;
1830
cc79714f 1831 pt_vaddr = NULL;
6e995e23 1832 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
cc79714f 1833 if (pt_vaddr == NULL)
d1c54acd 1834 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
6e995e23 1835
cc79714f
CW
1836 pt_vaddr[act_pte] =
1837 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
24f3a8cf
AG
1838 cache_level, true, flags);
1839
07749ef3 1840 if (++act_pte == GEN6_PTES) {
d1c54acd 1841 kunmap_px(ppgtt, pt_vaddr);
cc79714f 1842 pt_vaddr = NULL;
a15326a5 1843 act_pt++;
6e995e23 1844 act_pte = 0;
def886c3 1845 }
def886c3 1846 }
cc79714f 1847 if (pt_vaddr)
d1c54acd 1848 kunmap_px(ppgtt, pt_vaddr);
def886c3
DV
1849}
1850
678d96fb 1851static int gen6_alloc_va_range(struct i915_address_space *vm,
a05d80ee 1852 uint64_t start_in, uint64_t length_in)
678d96fb 1853{
4933d519
MT
1854 DECLARE_BITMAP(new_page_tables, I915_PDES);
1855 struct drm_device *dev = vm->dev;
72e96d64
JL
1856 struct drm_i915_private *dev_priv = to_i915(dev);
1857 struct i915_ggtt *ggtt = &dev_priv->ggtt;
e5716f55 1858 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
ec565b3c 1859 struct i915_page_table *pt;
a05d80ee 1860 uint32_t start, length, start_save, length_save;
678d96fb 1861 uint32_t pde, temp;
4933d519
MT
1862 int ret;
1863
a05d80ee
MK
1864 if (WARN_ON(start_in + length_in > ppgtt->base.total))
1865 return -ENODEV;
1866
1867 start = start_save = start_in;
1868 length = length_save = length_in;
4933d519
MT
1869
1870 bitmap_zero(new_page_tables, I915_PDES);
1871
1872 /* The allocation is done in two stages so that we can bail out with
1873 * minimal amount of pain. The first stage finds new page tables that
1874 * need allocation. The second stage marks use ptes within the page
1875 * tables.
1876 */
1877 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
79ab9370 1878 if (pt != vm->scratch_pt) {
4933d519
MT
1879 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1880 continue;
1881 }
1882
1883 /* We've already allocated a page table */
1884 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
1885
8a1ebd74 1886 pt = alloc_pt(dev);
4933d519
MT
1887 if (IS_ERR(pt)) {
1888 ret = PTR_ERR(pt);
1889 goto unwind_out;
1890 }
1891
1892 gen6_initialize_pt(vm, pt);
1893
1894 ppgtt->pd.page_table[pde] = pt;
966082c9 1895 __set_bit(pde, new_page_tables);
72744cb1 1896 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
4933d519
MT
1897 }
1898
1899 start = start_save;
1900 length = length_save;
678d96fb
BW
1901
1902 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1903 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1904
1905 bitmap_zero(tmp_bitmap, GEN6_PTES);
1906 bitmap_set(tmp_bitmap, gen6_pte_index(start),
1907 gen6_pte_count(start, length));
1908
966082c9 1909 if (__test_and_clear_bit(pde, new_page_tables))
4933d519
MT
1910 gen6_write_pde(&ppgtt->pd, pde, pt);
1911
72744cb1
MT
1912 trace_i915_page_table_entry_map(vm, pde, pt,
1913 gen6_pte_index(start),
1914 gen6_pte_count(start, length),
1915 GEN6_PTES);
4933d519 1916 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
678d96fb
BW
1917 GEN6_PTES);
1918 }
1919
4933d519
MT
1920 WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
1921
1922 /* Make sure write is complete before other code can use this page
1923 * table. Also require for WC mapped PTEs */
72e96d64 1924 readl(ggtt->gsm);
4933d519 1925
563222a7 1926 mark_tlbs_dirty(ppgtt);
678d96fb 1927 return 0;
4933d519
MT
1928
1929unwind_out:
1930 for_each_set_bit(pde, new_page_tables, I915_PDES) {
ec565b3c 1931 struct i915_page_table *pt = ppgtt->pd.page_table[pde];
4933d519 1932
79ab9370 1933 ppgtt->pd.page_table[pde] = vm->scratch_pt;
a08e111a 1934 free_pt(vm->dev, pt);
4933d519
MT
1935 }
1936
1937 mark_tlbs_dirty(ppgtt);
1938 return ret;
678d96fb
BW
1939}
1940
8776f02b
MK
1941static int gen6_init_scratch(struct i915_address_space *vm)
1942{
1943 struct drm_device *dev = vm->dev;
1944
1945 vm->scratch_page = alloc_scratch_page(dev);
1946 if (IS_ERR(vm->scratch_page))
1947 return PTR_ERR(vm->scratch_page);
1948
1949 vm->scratch_pt = alloc_pt(dev);
1950 if (IS_ERR(vm->scratch_pt)) {
1951 free_scratch_page(dev, vm->scratch_page);
1952 return PTR_ERR(vm->scratch_pt);
1953 }
1954
1955 gen6_initialize_pt(vm, vm->scratch_pt);
1956
1957 return 0;
1958}
1959
1960static void gen6_free_scratch(struct i915_address_space *vm)
1961{
1962 struct drm_device *dev = vm->dev;
1963
1964 free_pt(dev, vm->scratch_pt);
1965 free_scratch_page(dev, vm->scratch_page);
1966}
1967
061dd493 1968static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
a00d825d 1969{
e5716f55 1970 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
09942c65
MT
1971 struct i915_page_table *pt;
1972 uint32_t pde;
4933d519 1973
061dd493
DV
1974 drm_mm_remove_node(&ppgtt->node);
1975
09942c65 1976 gen6_for_all_pdes(pt, ppgtt, pde) {
79ab9370 1977 if (pt != vm->scratch_pt)
a08e111a 1978 free_pt(ppgtt->base.dev, pt);
4933d519 1979 }
06fda602 1980
8776f02b 1981 gen6_free_scratch(vm);
3440d265
DV
1982}
1983
b146520f 1984static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
3440d265 1985{
8776f02b 1986 struct i915_address_space *vm = &ppgtt->base;
853ba5d2 1987 struct drm_device *dev = ppgtt->base.dev;
72e96d64
JL
1988 struct drm_i915_private *dev_priv = to_i915(dev);
1989 struct i915_ggtt *ggtt = &dev_priv->ggtt;
e3cc1995 1990 bool retried = false;
b146520f 1991 int ret;
1d2a314c 1992
c8d4c0d6
BW
1993 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1994 * allocator works in address space sizes, so it's multiplied by page
1995 * size. We allocate at the top of the GTT to avoid fragmentation.
1996 */
72e96d64 1997 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
4933d519 1998
8776f02b
MK
1999 ret = gen6_init_scratch(vm);
2000 if (ret)
2001 return ret;
4933d519 2002
e3cc1995 2003alloc:
72e96d64 2004 ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
c8d4c0d6
BW
2005 &ppgtt->node, GEN6_PD_SIZE,
2006 GEN6_PD_ALIGN, 0,
72e96d64 2007 0, ggtt->base.total,
3e8b5ae9 2008 DRM_MM_TOPDOWN);
e3cc1995 2009 if (ret == -ENOSPC && !retried) {
72e96d64 2010 ret = i915_gem_evict_something(dev, &ggtt->base,
e3cc1995 2011 GEN6_PD_SIZE, GEN6_PD_ALIGN,
d23db88c 2012 I915_CACHE_NONE,
72e96d64 2013 0, ggtt->base.total,
d23db88c 2014 0);
e3cc1995 2015 if (ret)
678d96fb 2016 goto err_out;
e3cc1995
BW
2017
2018 retried = true;
2019 goto alloc;
2020 }
c8d4c0d6 2021
c8c26622 2022 if (ret)
678d96fb
BW
2023 goto err_out;
2024
c8c26622 2025
72e96d64 2026 if (ppgtt->node.start < ggtt->mappable_end)
c8d4c0d6 2027 DRM_DEBUG("Forced to use aperture for PDEs\n");
1d2a314c 2028
c8c26622 2029 return 0;
678d96fb
BW
2030
2031err_out:
8776f02b 2032 gen6_free_scratch(vm);
678d96fb 2033 return ret;
b146520f
BW
2034}
2035
b146520f
BW
2036static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2037{
2f2cf682 2038 return gen6_ppgtt_allocate_page_directories(ppgtt);
4933d519 2039}
06dc68d6 2040
4933d519
MT
2041static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2042 uint64_t start, uint64_t length)
2043{
ec565b3c 2044 struct i915_page_table *unused;
4933d519 2045 uint32_t pde, temp;
1d2a314c 2046
4933d519 2047 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
79ab9370 2048 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
b146520f
BW
2049}
2050
5c5f6457 2051static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
b146520f
BW
2052{
2053 struct drm_device *dev = ppgtt->base.dev;
72e96d64
JL
2054 struct drm_i915_private *dev_priv = to_i915(dev);
2055 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f
BW
2056 int ret;
2057
72e96d64 2058 ppgtt->base.pte_encode = ggtt->base.pte_encode;
b146520f 2059 if (IS_GEN6(dev)) {
b146520f
BW
2060 ppgtt->switch_mm = gen6_mm_switch;
2061 } else if (IS_HASWELL(dev)) {
b146520f
BW
2062 ppgtt->switch_mm = hsw_mm_switch;
2063 } else if (IS_GEN7(dev)) {
b146520f
BW
2064 ppgtt->switch_mm = gen7_mm_switch;
2065 } else
2066 BUG();
2067
71ba2d64
YZ
2068 if (intel_vgpu_active(dev))
2069 ppgtt->switch_mm = vgpu_mm_switch;
2070
b146520f
BW
2071 ret = gen6_ppgtt_alloc(ppgtt);
2072 if (ret)
2073 return ret;
2074
5c5f6457 2075 ppgtt->base.allocate_va_range = gen6_alloc_va_range;
b146520f
BW
2076 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2077 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
777dc5bb
DV
2078 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2079 ppgtt->base.bind_vma = ppgtt_bind_vma;
b146520f 2080 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
b146520f 2081 ppgtt->base.start = 0;
09942c65 2082 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
87d60b63 2083 ppgtt->debug_dump = gen6_dump_ppgtt;
1d2a314c 2084
44159ddb 2085 ppgtt->pd.base.ggtt_offset =
07749ef3 2086 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1d2a314c 2087
72e96d64 2088 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
44159ddb 2089 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
678d96fb 2090
5c5f6457 2091 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
1d2a314c 2092
678d96fb
BW
2093 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
2094
440fd528 2095 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
b146520f
BW
2096 ppgtt->node.size >> 20,
2097 ppgtt->node.start / PAGE_SIZE);
3440d265 2098
fa76da34 2099 DRM_DEBUG("Adding PPGTT at offset %x\n",
44159ddb 2100 ppgtt->pd.base.ggtt_offset << 10);
fa76da34 2101
b146520f 2102 return 0;
3440d265
DV
2103}
2104
5c5f6457 2105static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
3440d265 2106{
853ba5d2 2107 ppgtt->base.dev = dev;
3440d265 2108
3ed124b2 2109 if (INTEL_INFO(dev)->gen < 8)
5c5f6457 2110 return gen6_ppgtt_init(ppgtt);
3ed124b2 2111 else
d7b2633d 2112 return gen8_ppgtt_init(ppgtt);
fa76da34 2113}
c114f76a 2114
a2cad9df
MW
2115static void i915_address_space_init(struct i915_address_space *vm,
2116 struct drm_i915_private *dev_priv)
2117{
2118 drm_mm_init(&vm->mm, vm->start, vm->total);
2119 vm->dev = dev_priv->dev;
2120 INIT_LIST_HEAD(&vm->active_list);
2121 INIT_LIST_HEAD(&vm->inactive_list);
2122 list_add_tail(&vm->global_link, &dev_priv->vm_list);
2123}
2124
d5165ebd
TG
2125static void gtt_write_workarounds(struct drm_device *dev)
2126{
2127 struct drm_i915_private *dev_priv = dev->dev_private;
2128
2129 /* This function is for gtt related workarounds. This function is
2130 * called on driver load and after a GPU reset, so you can place
2131 * workarounds here even if they get overwritten by GPU reset.
2132 */
2133 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
2134 if (IS_BROADWELL(dev))
2135 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2136 else if (IS_CHERRYVIEW(dev))
2137 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2138 else if (IS_SKYLAKE(dev))
2139 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2140 else if (IS_BROXTON(dev))
2141 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2142}
2143
fa76da34
DV
2144int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2145{
2146 struct drm_i915_private *dev_priv = dev->dev_private;
2147 int ret = 0;
3ed124b2 2148
5c5f6457 2149 ret = __hw_ppgtt_init(dev, ppgtt);
fa76da34 2150 if (ret == 0) {
c7c48dfd 2151 kref_init(&ppgtt->ref);
a2cad9df 2152 i915_address_space_init(&ppgtt->base, dev_priv);
93bd8649 2153 }
1d2a314c
DV
2154
2155 return ret;
2156}
2157
82460d97
DV
2158int i915_ppgtt_init_hw(struct drm_device *dev)
2159{
d5165ebd
TG
2160 gtt_write_workarounds(dev);
2161
671b5013
TD
2162 /* In the case of execlists, PPGTT is enabled by the context descriptor
2163 * and the PDPs are contained within the context itself. We don't
2164 * need to do anything here. */
2165 if (i915.enable_execlists)
2166 return 0;
2167
82460d97
DV
2168 if (!USES_PPGTT(dev))
2169 return 0;
2170
2171 if (IS_GEN6(dev))
2172 gen6_ppgtt_enable(dev);
2173 else if (IS_GEN7(dev))
2174 gen7_ppgtt_enable(dev);
2175 else if (INTEL_INFO(dev)->gen >= 8)
2176 gen8_ppgtt_enable(dev);
2177 else
5f77eeb0 2178 MISSING_CASE(INTEL_INFO(dev)->gen);
82460d97 2179
4ad2fd88
JH
2180 return 0;
2181}
1d2a314c 2182
b3dd6b96 2183int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
4ad2fd88 2184{
39dabecd 2185 struct drm_i915_private *dev_priv = req->i915;
4ad2fd88
JH
2186 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2187
2188 if (i915.enable_execlists)
2189 return 0;
2190
2191 if (!ppgtt)
2192 return 0;
2193
e85b26dc 2194 return ppgtt->switch_mm(ppgtt, req);
1d2a314c 2195}
4ad2fd88 2196
4d884705
DV
2197struct i915_hw_ppgtt *
2198i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2199{
2200 struct i915_hw_ppgtt *ppgtt;
2201 int ret;
2202
2203 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2204 if (!ppgtt)
2205 return ERR_PTR(-ENOMEM);
2206
2207 ret = i915_ppgtt_init(dev, ppgtt);
2208 if (ret) {
2209 kfree(ppgtt);
2210 return ERR_PTR(ret);
2211 }
2212
2213 ppgtt->file_priv = fpriv;
2214
198c974d
DCS
2215 trace_i915_ppgtt_create(&ppgtt->base);
2216
4d884705
DV
2217 return ppgtt;
2218}
2219
ee960be7
DV
2220void i915_ppgtt_release(struct kref *kref)
2221{
2222 struct i915_hw_ppgtt *ppgtt =
2223 container_of(kref, struct i915_hw_ppgtt, ref);
2224
198c974d
DCS
2225 trace_i915_ppgtt_release(&ppgtt->base);
2226
ee960be7
DV
2227 /* vmas should already be unbound */
2228 WARN_ON(!list_empty(&ppgtt->base.active_list));
2229 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
2230
19dd120c
DV
2231 list_del(&ppgtt->base.global_link);
2232 drm_mm_takedown(&ppgtt->base.mm);
2233
ee960be7
DV
2234 ppgtt->base.cleanup(&ppgtt->base);
2235 kfree(ppgtt);
2236}
1d2a314c 2237
a81cc00c
BW
2238extern int intel_iommu_gfx_mapped;
2239/* Certain Gen5 chipsets require require idling the GPU before
2240 * unmapping anything from the GTT when VT-d is enabled.
2241 */
2c642b07 2242static bool needs_idle_maps(struct drm_device *dev)
a81cc00c
BW
2243{
2244#ifdef CONFIG_INTEL_IOMMU
2245 /* Query intel_iommu to see if we need the workaround. Presumably that
2246 * was loaded first.
2247 */
2248 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
2249 return true;
2250#endif
2251 return false;
2252}
2253
5c042287
BW
2254static bool do_idling(struct drm_i915_private *dev_priv)
2255{
72e96d64 2256 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5c042287
BW
2257 bool ret = dev_priv->mm.interruptible;
2258
72e96d64 2259 if (unlikely(ggtt->do_idle_maps)) {
5c042287 2260 dev_priv->mm.interruptible = false;
b2da9fe5 2261 if (i915_gpu_idle(dev_priv->dev)) {
5c042287
BW
2262 DRM_ERROR("Couldn't idle GPU\n");
2263 /* Wait a bit, in hopes it avoids the hang */
2264 udelay(10);
2265 }
2266 }
2267
2268 return ret;
2269}
2270
2271static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
2272{
72e96d64
JL
2273 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2274
2275 if (unlikely(ggtt->do_idle_maps))
5c042287
BW
2276 dev_priv->mm.interruptible = interruptible;
2277}
2278
828c7908
BW
2279void i915_check_and_clear_faults(struct drm_device *dev)
2280{
2281 struct drm_i915_private *dev_priv = dev->dev_private;
e2f80391 2282 struct intel_engine_cs *engine;
828c7908
BW
2283
2284 if (INTEL_INFO(dev)->gen < 6)
2285 return;
2286
b4ac5afc 2287 for_each_engine(engine, dev_priv) {
828c7908 2288 u32 fault_reg;
e2f80391 2289 fault_reg = I915_READ(RING_FAULT_REG(engine));
828c7908
BW
2290 if (fault_reg & RING_FAULT_VALID) {
2291 DRM_DEBUG_DRIVER("Unexpected fault\n"
59a5d290 2292 "\tAddr: 0x%08lx\n"
828c7908
BW
2293 "\tAddress space: %s\n"
2294 "\tSource ID: %d\n"
2295 "\tType: %d\n",
2296 fault_reg & PAGE_MASK,
2297 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2298 RING_FAULT_SRCID(fault_reg),
2299 RING_FAULT_FAULT_TYPE(fault_reg));
e2f80391 2300 I915_WRITE(RING_FAULT_REG(engine),
828c7908
BW
2301 fault_reg & ~RING_FAULT_VALID);
2302 }
2303 }
4a570db5 2304 POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
828c7908
BW
2305}
2306
91e56499
CW
2307static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
2308{
2d1fe073 2309 if (INTEL_INFO(dev_priv)->gen < 6) {
91e56499
CW
2310 intel_gtt_chipset_flush();
2311 } else {
2312 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2313 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2314 }
2315}
2316
828c7908
BW
2317void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2318{
72e96d64
JL
2319 struct drm_i915_private *dev_priv = to_i915(dev);
2320 struct i915_ggtt *ggtt = &dev_priv->ggtt;
828c7908
BW
2321
2322 /* Don't bother messing with faults pre GEN6 as we have little
2323 * documentation supporting that it's a good idea.
2324 */
2325 if (INTEL_INFO(dev)->gen < 6)
2326 return;
2327
2328 i915_check_and_clear_faults(dev);
2329
72e96d64
JL
2330 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
2331 true);
91e56499
CW
2332
2333 i915_ggtt_flush(dev_priv);
828c7908
BW
2334}
2335
74163907 2336int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
7c2e6fdf 2337{
9da3da66
CW
2338 if (!dma_map_sg(&obj->base.dev->pdev->dev,
2339 obj->pages->sgl, obj->pages->nents,
2340 PCI_DMA_BIDIRECTIONAL))
2341 return -ENOSPC;
2342
2343 return 0;
7c2e6fdf
DV
2344}
2345
2c642b07 2346static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
94ec8f61
BW
2347{
2348#ifdef writeq
2349 writeq(pte, addr);
2350#else
2351 iowrite32((u32)pte, addr);
2352 iowrite32(pte >> 32, addr + 4);
2353#endif
2354}
2355
2356static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2357 struct sg_table *st,
782f1495 2358 uint64_t start,
24f3a8cf 2359 enum i915_cache_level level, u32 unused)
94ec8f61 2360{
72e96d64
JL
2361 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2362 struct i915_ggtt *ggtt = &dev_priv->ggtt;
782f1495 2363 unsigned first_entry = start >> PAGE_SHIFT;
07749ef3 2364 gen8_pte_t __iomem *gtt_entries =
72e96d64 2365 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
94ec8f61
BW
2366 int i = 0;
2367 struct sg_page_iter sg_iter;
57007df7 2368 dma_addr_t addr = 0; /* shut up gcc */
be69459a
ID
2369 int rpm_atomic_seq;
2370
2371 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
94ec8f61
BW
2372
2373 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2374 addr = sg_dma_address(sg_iter.sg) +
2375 (sg_iter.sg_pgoffset << PAGE_SHIFT);
2376 gen8_set_pte(&gtt_entries[i],
2377 gen8_pte_encode(addr, level, true));
2378 i++;
2379 }
2380
2381 /*
2382 * XXX: This serves as a posting read to make sure that the PTE has
2383 * actually been updated. There is some concern that even though
2384 * registers and PTEs are within the same BAR that they are potentially
2385 * of NUMA access patterns. Therefore, even with the way we assume
2386 * hardware should work, we must keep this posting read for paranoia.
2387 */
2388 if (i != 0)
2389 WARN_ON(readq(&gtt_entries[i-1])
2390 != gen8_pte_encode(addr, level, true));
2391
94ec8f61
BW
2392 /* This next bit makes the above posting read even more important. We
2393 * want to flush the TLBs only after we're certain all the PTE updates
2394 * have finished.
2395 */
2396 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2397 POSTING_READ(GFX_FLSH_CNTL_GEN6);
be69459a
ID
2398
2399 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
94ec8f61
BW
2400}
2401
c140330b
CW
2402struct insert_entries {
2403 struct i915_address_space *vm;
2404 struct sg_table *st;
2405 uint64_t start;
2406 enum i915_cache_level level;
2407 u32 flags;
2408};
2409
2410static int gen8_ggtt_insert_entries__cb(void *_arg)
2411{
2412 struct insert_entries *arg = _arg;
2413 gen8_ggtt_insert_entries(arg->vm, arg->st,
2414 arg->start, arg->level, arg->flags);
2415 return 0;
2416}
2417
2418static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2419 struct sg_table *st,
2420 uint64_t start,
2421 enum i915_cache_level level,
2422 u32 flags)
2423{
2424 struct insert_entries arg = { vm, st, start, level, flags };
2425 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
2426}
2427
e76e9aeb
BW
2428/*
2429 * Binds an object into the global gtt with the specified cache level. The object
2430 * will be accessible to the GPU via commands whose operands reference offsets
2431 * within the global GTT as well as accessible by the GPU through the GMADR
2432 * mapped BAR (dev_priv->mm.gtt->gtt).
2433 */
853ba5d2 2434static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
7faf1ab2 2435 struct sg_table *st,
782f1495 2436 uint64_t start,
24f3a8cf 2437 enum i915_cache_level level, u32 flags)
e76e9aeb 2438{
72e96d64
JL
2439 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2440 struct i915_ggtt *ggtt = &dev_priv->ggtt;
782f1495 2441 unsigned first_entry = start >> PAGE_SHIFT;
07749ef3 2442 gen6_pte_t __iomem *gtt_entries =
72e96d64 2443 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
6e995e23
ID
2444 int i = 0;
2445 struct sg_page_iter sg_iter;
57007df7 2446 dma_addr_t addr = 0;
be69459a
ID
2447 int rpm_atomic_seq;
2448
2449 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
e76e9aeb 2450
6e995e23 2451 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2db76d7c 2452 addr = sg_page_iter_dma_address(&sg_iter);
24f3a8cf 2453 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
6e995e23 2454 i++;
e76e9aeb
BW
2455 }
2456
e76e9aeb
BW
2457 /* XXX: This serves as a posting read to make sure that the PTE has
2458 * actually been updated. There is some concern that even though
2459 * registers and PTEs are within the same BAR that they are potentially
2460 * of NUMA access patterns. Therefore, even with the way we assume
2461 * hardware should work, we must keep this posting read for paranoia.
2462 */
57007df7
PM
2463 if (i != 0) {
2464 unsigned long gtt = readl(&gtt_entries[i-1]);
2465 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2466 }
0f9b91c7
BW
2467
2468 /* This next bit makes the above posting read even more important. We
2469 * want to flush the TLBs only after we're certain all the PTE updates
2470 * have finished.
2471 */
2472 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2473 POSTING_READ(GFX_FLSH_CNTL_GEN6);
be69459a
ID
2474
2475 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
e76e9aeb
BW
2476}
2477
94ec8f61 2478static void gen8_ggtt_clear_range(struct i915_address_space *vm,
782f1495
BW
2479 uint64_t start,
2480 uint64_t length,
94ec8f61
BW
2481 bool use_scratch)
2482{
72e96d64
JL
2483 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2484 struct i915_ggtt *ggtt = &dev_priv->ggtt;
782f1495
BW
2485 unsigned first_entry = start >> PAGE_SHIFT;
2486 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2487 gen8_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2488 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2489 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
94ec8f61 2490 int i;
be69459a
ID
2491 int rpm_atomic_seq;
2492
2493 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
94ec8f61
BW
2494
2495 if (WARN(num_entries > max_entries,
2496 "First entry = %d; Num entries = %d (max=%d)\n",
2497 first_entry, num_entries, max_entries))
2498 num_entries = max_entries;
2499
c114f76a 2500 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
94ec8f61
BW
2501 I915_CACHE_LLC,
2502 use_scratch);
2503 for (i = 0; i < num_entries; i++)
2504 gen8_set_pte(&gtt_base[i], scratch_pte);
2505 readl(gtt_base);
be69459a
ID
2506
2507 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
94ec8f61
BW
2508}
2509
853ba5d2 2510static void gen6_ggtt_clear_range(struct i915_address_space *vm,
782f1495
BW
2511 uint64_t start,
2512 uint64_t length,
828c7908 2513 bool use_scratch)
7faf1ab2 2514{
72e96d64
JL
2515 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2516 struct i915_ggtt *ggtt = &dev_priv->ggtt;
782f1495
BW
2517 unsigned first_entry = start >> PAGE_SHIFT;
2518 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2519 gen6_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2520 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2521 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
7faf1ab2 2522 int i;
be69459a
ID
2523 int rpm_atomic_seq;
2524
2525 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
7faf1ab2
DV
2526
2527 if (WARN(num_entries > max_entries,
2528 "First entry = %d; Num entries = %d (max=%d)\n",
2529 first_entry, num_entries, max_entries))
2530 num_entries = max_entries;
2531
c114f76a
MK
2532 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
2533 I915_CACHE_LLC, use_scratch, 0);
828c7908 2534
7faf1ab2
DV
2535 for (i = 0; i < num_entries; i++)
2536 iowrite32(scratch_pte, &gtt_base[i]);
2537 readl(gtt_base);
be69459a
ID
2538
2539 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
7faf1ab2
DV
2540}
2541
d369d2d9
DV
2542static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2543 struct sg_table *pages,
2544 uint64_t start,
2545 enum i915_cache_level cache_level, u32 unused)
7faf1ab2 2546{
be69459a 2547 struct drm_i915_private *dev_priv = vm->dev->dev_private;
7faf1ab2
DV
2548 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2549 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
be69459a
ID
2550 int rpm_atomic_seq;
2551
2552 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
7faf1ab2 2553
d369d2d9 2554 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
0875546c 2555
be69459a
ID
2556 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2557
7faf1ab2
DV
2558}
2559
853ba5d2 2560static void i915_ggtt_clear_range(struct i915_address_space *vm,
782f1495
BW
2561 uint64_t start,
2562 uint64_t length,
828c7908 2563 bool unused)
7faf1ab2 2564{
be69459a 2565 struct drm_i915_private *dev_priv = vm->dev->dev_private;
782f1495
BW
2566 unsigned first_entry = start >> PAGE_SHIFT;
2567 unsigned num_entries = length >> PAGE_SHIFT;
be69459a
ID
2568 int rpm_atomic_seq;
2569
2570 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2571
7faf1ab2 2572 intel_gtt_clear_range(first_entry, num_entries);
be69459a
ID
2573
2574 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
7faf1ab2
DV
2575}
2576
70b9f6f8
DV
2577static int ggtt_bind_vma(struct i915_vma *vma,
2578 enum i915_cache_level cache_level,
2579 u32 flags)
0a878716
DV
2580{
2581 struct drm_i915_gem_object *obj = vma->obj;
2582 u32 pte_flags = 0;
2583 int ret;
2584
2585 ret = i915_get_ggtt_vma_pages(vma);
2586 if (ret)
2587 return ret;
2588
2589 /* Currently applicable only to VLV */
2590 if (obj->gt_ro)
2591 pte_flags |= PTE_READ_ONLY;
2592
2593 vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
2594 vma->node.start,
2595 cache_level, pte_flags);
2596
2597 /*
2598 * Without aliasing PPGTT there's no difference between
2599 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2600 * upgrade to both bound if we bind either to avoid double-binding.
2601 */
2602 vma->bound |= GLOBAL_BIND | LOCAL_BIND;
2603
2604 return 0;
2605}
2606
2607static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2608 enum i915_cache_level cache_level,
2609 u32 flags)
d5bd1449 2610{
321d178e 2611 u32 pte_flags;
70b9f6f8
DV
2612 int ret;
2613
2614 ret = i915_get_ggtt_vma_pages(vma);
2615 if (ret)
2616 return ret;
7faf1ab2 2617
24f3a8cf 2618 /* Currently applicable only to VLV */
321d178e
CW
2619 pte_flags = 0;
2620 if (vma->obj->gt_ro)
f329f5f6 2621 pte_flags |= PTE_READ_ONLY;
24f3a8cf 2622
ec7adb6e 2623
0a878716 2624 if (flags & GLOBAL_BIND) {
321d178e
CW
2625 vma->vm->insert_entries(vma->vm,
2626 vma->ggtt_view.pages,
0875546c
DV
2627 vma->node.start,
2628 cache_level, pte_flags);
6f65e29a 2629 }
d5bd1449 2630
0a878716 2631 if (flags & LOCAL_BIND) {
321d178e
CW
2632 struct i915_hw_ppgtt *appgtt =
2633 to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
2634 appgtt->base.insert_entries(&appgtt->base,
2635 vma->ggtt_view.pages,
782f1495 2636 vma->node.start,
f329f5f6 2637 cache_level, pte_flags);
6f65e29a 2638 }
70b9f6f8
DV
2639
2640 return 0;
d5bd1449
CW
2641}
2642
6f65e29a 2643static void ggtt_unbind_vma(struct i915_vma *vma)
74163907 2644{
6f65e29a 2645 struct drm_device *dev = vma->vm->dev;
7faf1ab2 2646 struct drm_i915_private *dev_priv = dev->dev_private;
6f65e29a 2647 struct drm_i915_gem_object *obj = vma->obj;
06615ee5
JL
2648 const uint64_t size = min_t(uint64_t,
2649 obj->base.size,
2650 vma->node.size);
6f65e29a 2651
aff43766 2652 if (vma->bound & GLOBAL_BIND) {
782f1495
BW
2653 vma->vm->clear_range(vma->vm,
2654 vma->node.start,
06615ee5 2655 size,
6f65e29a 2656 true);
6f65e29a 2657 }
74898d7e 2658
0875546c 2659 if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
6f65e29a 2660 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
06615ee5 2661
6f65e29a 2662 appgtt->base.clear_range(&appgtt->base,
782f1495 2663 vma->node.start,
06615ee5 2664 size,
6f65e29a 2665 true);
6f65e29a 2666 }
74163907
DV
2667}
2668
2669void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
7c2e6fdf 2670{
5c042287
BW
2671 struct drm_device *dev = obj->base.dev;
2672 struct drm_i915_private *dev_priv = dev->dev_private;
2673 bool interruptible;
2674
2675 interruptible = do_idling(dev_priv);
2676
5ec5b516
ID
2677 dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
2678 PCI_DMA_BIDIRECTIONAL);
5c042287
BW
2679
2680 undo_idling(dev_priv, interruptible);
7c2e6fdf 2681}
644ec02b 2682
42d6ab48
CW
2683static void i915_gtt_color_adjust(struct drm_mm_node *node,
2684 unsigned long color,
440fd528
TR
2685 u64 *start,
2686 u64 *end)
42d6ab48
CW
2687{
2688 if (node->color != color)
2689 *start += 4096;
2690
2691 if (!list_empty(&node->node_list)) {
2692 node = list_entry(node->node_list.next,
2693 struct drm_mm_node,
2694 node_list);
2695 if (node->allocated && node->color != color)
2696 *end -= 4096;
2697 }
2698}
fbe5d36e 2699
f548c0e9 2700static int i915_gem_setup_global_gtt(struct drm_device *dev,
088e0df4
MT
2701 u64 start,
2702 u64 mappable_end,
2703 u64 end)
644ec02b 2704{
e78891ca
BW
2705 /* Let GEM Manage all of the aperture.
2706 *
2707 * However, leave one page at the end still bound to the scratch page.
2708 * There are a number of places where the hardware apparently prefetches
2709 * past the end of the object, and we've seen multiple hangs with the
2710 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2711 * aperture. One page should be enough to keep any prefetching inside
2712 * of the aperture.
2713 */
72e96d64
JL
2714 struct drm_i915_private *dev_priv = to_i915(dev);
2715 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ed2f3452
CW
2716 struct drm_mm_node *entry;
2717 struct drm_i915_gem_object *obj;
2718 unsigned long hole_start, hole_end;
fa76da34 2719 int ret;
644ec02b 2720
35451cb6
BW
2721 BUG_ON(mappable_end > end);
2722
72e96d64 2723 ggtt->base.start = start;
5dda8fa3 2724
a2cad9df
MW
2725 /* Subtract the guard page before address space initialization to
2726 * shrink the range used by drm_mm */
72e96d64
JL
2727 ggtt->base.total = end - start - PAGE_SIZE;
2728 i915_address_space_init(&ggtt->base, dev_priv);
2729 ggtt->base.total += PAGE_SIZE;
5dda8fa3
YZ
2730
2731 if (intel_vgpu_active(dev)) {
2732 ret = intel_vgt_balloon(dev);
2733 if (ret)
2734 return ret;
2735 }
2736
42d6ab48 2737 if (!HAS_LLC(dev))
72e96d64 2738 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
644ec02b 2739
ed2f3452 2740 /* Mark any preallocated objects as occupied */
35c20a60 2741 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
72e96d64 2742 struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
fa76da34 2743
088e0df4 2744 DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
c6cfb325
BW
2745 i915_gem_obj_ggtt_offset(obj), obj->base.size);
2746
2747 WARN_ON(i915_gem_obj_ggtt_bound(obj));
72e96d64 2748 ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
6c5566a8
DV
2749 if (ret) {
2750 DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
2751 return ret;
2752 }
aff43766 2753 vma->bound |= GLOBAL_BIND;
d0710abb 2754 __i915_vma_set_map_and_fenceable(vma);
72e96d64 2755 list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
ed2f3452
CW
2756 }
2757
ed2f3452 2758 /* Clear any non-preallocated blocks */
72e96d64 2759 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
ed2f3452
CW
2760 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2761 hole_start, hole_end);
72e96d64 2762 ggtt->base.clear_range(&ggtt->base, hole_start,
782f1495 2763 hole_end - hole_start, true);
ed2f3452
CW
2764 }
2765
2766 /* And finally clear the reserved guard page */
72e96d64 2767 ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
6c5566a8 2768
fa76da34
DV
2769 if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
2770 struct i915_hw_ppgtt *ppgtt;
2771
2772 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2773 if (!ppgtt)
2774 return -ENOMEM;
2775
5c5f6457
DV
2776 ret = __hw_ppgtt_init(dev, ppgtt);
2777 if (ret) {
2778 ppgtt->base.cleanup(&ppgtt->base);
2779 kfree(ppgtt);
2780 return ret;
2781 }
2782
2783 if (ppgtt->base.allocate_va_range)
2784 ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
2785 ppgtt->base.total);
4933d519 2786 if (ret) {
061dd493 2787 ppgtt->base.cleanup(&ppgtt->base);
4933d519 2788 kfree(ppgtt);
fa76da34 2789 return ret;
4933d519 2790 }
fa76da34 2791
5c5f6457
DV
2792 ppgtt->base.clear_range(&ppgtt->base,
2793 ppgtt->base.start,
2794 ppgtt->base.total,
2795 true);
2796
fa76da34 2797 dev_priv->mm.aliasing_ppgtt = ppgtt;
72e96d64
JL
2798 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2799 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
fa76da34
DV
2800 }
2801
6c5566a8 2802 return 0;
e76e9aeb
BW
2803}
2804
d85489d3
JL
2805/**
2806 * i915_gem_init_ggtt - Initialize GEM for Global GTT
2807 * @dev: DRM device
2808 */
2809void i915_gem_init_ggtt(struct drm_device *dev)
d7e5008f 2810{
72e96d64
JL
2811 struct drm_i915_private *dev_priv = to_i915(dev);
2812 struct i915_ggtt *ggtt = &dev_priv->ggtt;
d7e5008f 2813
72e96d64 2814 i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
e76e9aeb
BW
2815}
2816
d85489d3
JL
2817/**
2818 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2819 * @dev: DRM device
2820 */
2821void i915_ggtt_cleanup_hw(struct drm_device *dev)
90d0a0e8 2822{
72e96d64
JL
2823 struct drm_i915_private *dev_priv = to_i915(dev);
2824 struct i915_ggtt *ggtt = &dev_priv->ggtt;
90d0a0e8 2825
70e32544
DV
2826 if (dev_priv->mm.aliasing_ppgtt) {
2827 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2828
2829 ppgtt->base.cleanup(&ppgtt->base);
2830 }
2831
a4eba47b
ID
2832 i915_gem_cleanup_stolen(dev);
2833
72e96d64 2834 if (drm_mm_initialized(&ggtt->base.mm)) {
5dda8fa3
YZ
2835 if (intel_vgpu_active(dev))
2836 intel_vgt_deballoon();
2837
72e96d64
JL
2838 drm_mm_takedown(&ggtt->base.mm);
2839 list_del(&ggtt->base.global_link);
90d0a0e8
DV
2840 }
2841
72e96d64 2842 ggtt->base.cleanup(&ggtt->base);
90d0a0e8 2843}
70e32544 2844
2c642b07 2845static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2846{
2847 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2848 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2849 return snb_gmch_ctl << 20;
2850}
2851
2c642b07 2852static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
9459d252
BW
2853{
2854 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2855 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2856 if (bdw_gmch_ctl)
2857 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
562d55d9
BW
2858
2859#ifdef CONFIG_X86_32
2860 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2861 if (bdw_gmch_ctl > 4)
2862 bdw_gmch_ctl = 4;
2863#endif
2864
9459d252
BW
2865 return bdw_gmch_ctl << 20;
2866}
2867
2c642b07 2868static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
d7f25f23
DL
2869{
2870 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2871 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2872
2873 if (gmch_ctrl)
2874 return 1 << (20 + gmch_ctrl);
2875
2876 return 0;
2877}
2878
2c642b07 2879static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2880{
2881 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2882 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2883 return snb_gmch_ctl << 25; /* 32 MB units */
2884}
2885
2c642b07 2886static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
9459d252
BW
2887{
2888 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2889 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2890 return bdw_gmch_ctl << 25; /* 32 MB units */
2891}
2892
d7f25f23
DL
2893static size_t chv_get_stolen_size(u16 gmch_ctrl)
2894{
2895 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2896 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2897
2898 /*
2899 * 0x0 to 0x10: 32MB increments starting at 0MB
2900 * 0x11 to 0x16: 4MB increments starting at 8MB
2901 * 0x17 to 0x1d: 4MB increments start at 36MB
2902 */
2903 if (gmch_ctrl < 0x11)
2904 return gmch_ctrl << 25;
2905 else if (gmch_ctrl < 0x17)
2906 return (gmch_ctrl - 0x11 + 2) << 22;
2907 else
2908 return (gmch_ctrl - 0x17 + 9) << 22;
2909}
2910
66375014
DL
2911static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2912{
2913 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2914 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2915
2916 if (gen9_gmch_ctl < 0xf0)
2917 return gen9_gmch_ctl << 25; /* 32 MB units */
2918 else
2919 /* 4MB increments starting at 0xf0 for 4MB */
2920 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2921}
2922
63340133
BW
2923static int ggtt_probe_common(struct drm_device *dev,
2924 size_t gtt_size)
2925{
72e96d64
JL
2926 struct drm_i915_private *dev_priv = to_i915(dev);
2927 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4ad2af1e 2928 struct i915_page_scratch *scratch_page;
72e96d64 2929 phys_addr_t ggtt_phys_addr;
63340133
BW
2930
2931 /* For Modern GENs the PTEs and register space are split in the BAR */
72e96d64
JL
2932 ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
2933 (pci_resource_len(dev->pdev, 0) / 2);
63340133 2934
2a073f89
ID
2935 /*
2936 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2937 * dropped. For WC mappings in general we have 64 byte burst writes
2938 * when the WC buffer is flushed, so we can't use it, but have to
2939 * resort to an uncached mapping. The WC issue is easily caught by the
2940 * readback check when writing GTT PTE entries.
2941 */
2942 if (IS_BROXTON(dev))
72e96d64 2943 ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
2a073f89 2944 else
72e96d64
JL
2945 ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
2946 if (!ggtt->gsm) {
63340133
BW
2947 DRM_ERROR("Failed to map the gtt page table\n");
2948 return -ENOMEM;
2949 }
2950
4ad2af1e
MK
2951 scratch_page = alloc_scratch_page(dev);
2952 if (IS_ERR(scratch_page)) {
63340133
BW
2953 DRM_ERROR("Scratch setup failed\n");
2954 /* iounmap will also get called at remove, but meh */
72e96d64 2955 iounmap(ggtt->gsm);
4ad2af1e 2956 return PTR_ERR(scratch_page);
63340133
BW
2957 }
2958
72e96d64 2959 ggtt->base.scratch_page = scratch_page;
4ad2af1e
MK
2960
2961 return 0;
63340133
BW
2962}
2963
fbe5d36e
BW
2964/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2965 * bits. When using advanced contexts each context stores its own PAT, but
2966 * writing this data shouldn't be harmful even in those cases. */
ee0ce478 2967static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
fbe5d36e 2968{
fbe5d36e
BW
2969 uint64_t pat;
2970
2971 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2972 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2973 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2974 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2975 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2976 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2977 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2978 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2979
2d1fe073 2980 if (!USES_PPGTT(dev_priv))
d6a8b72e
RV
2981 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2982 * so RTL will always use the value corresponding to
2983 * pat_sel = 000".
2984 * So let's disable cache for GGTT to avoid screen corruptions.
2985 * MOCS still can be used though.
2986 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2987 * before this patch, i.e. the same uncached + snooping access
2988 * like on gen6/7 seems to be in effect.
2989 * - So this just fixes blitter/render access. Again it looks
2990 * like it's not just uncached access, but uncached + snooping.
2991 * So we can still hold onto all our assumptions wrt cpu
2992 * clflushing on LLC machines.
2993 */
2994 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2995
fbe5d36e
BW
2996 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2997 * write would work. */
7e435ad2
VS
2998 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2999 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
fbe5d36e
BW
3000}
3001
ee0ce478
VS
3002static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
3003{
3004 uint64_t pat;
3005
3006 /*
3007 * Map WB on BDW to snooped on CHV.
3008 *
3009 * Only the snoop bit has meaning for CHV, the rest is
3010 * ignored.
3011 *
cf3d262e
VS
3012 * The hardware will never snoop for certain types of accesses:
3013 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3014 * - PPGTT page tables
3015 * - some other special cycles
3016 *
3017 * As with BDW, we also need to consider the following for GT accesses:
3018 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3019 * so RTL will always use the value corresponding to
3020 * pat_sel = 000".
3021 * Which means we must set the snoop bit in PAT entry 0
3022 * in order to keep the global status page working.
ee0ce478
VS
3023 */
3024 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
3025 GEN8_PPAT(1, 0) |
3026 GEN8_PPAT(2, 0) |
3027 GEN8_PPAT(3, 0) |
3028 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
3029 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
3030 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
3031 GEN8_PPAT(7, CHV_PPAT_SNOOP);
3032
7e435ad2
VS
3033 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3034 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
ee0ce478
VS
3035}
3036
d507d735 3037static int gen8_gmch_probe(struct i915_ggtt *ggtt)
63340133 3038{
d507d735 3039 struct drm_device *dev = ggtt->base.dev;
72e96d64 3040 struct drm_i915_private *dev_priv = to_i915(dev);
63340133
BW
3041 u16 snb_gmch_ctl;
3042 int ret;
3043
3044 /* TODO: We're not aware of mappable constraints on gen8 yet */
d507d735
JL
3045 ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
3046 ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
63340133
BW
3047
3048 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
3049 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
3050
3051 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3052
66375014 3053 if (INTEL_INFO(dev)->gen >= 9) {
d507d735
JL
3054 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
3055 ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
66375014 3056 } else if (IS_CHERRYVIEW(dev)) {
d507d735
JL
3057 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
3058 ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3059 } else {
d507d735
JL
3060 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
3061 ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3062 }
63340133 3063
d507d735 3064 ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
63340133 3065
5a4e33a3 3066 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
ee0ce478
VS
3067 chv_setup_private_ppat(dev_priv);
3068 else
3069 bdw_setup_private_ppat(dev_priv);
fbe5d36e 3070
d507d735 3071 ret = ggtt_probe_common(dev, ggtt->size);
63340133 3072
d507d735 3073 ggtt->base.clear_range = gen8_ggtt_clear_range;
c140330b 3074 if (IS_CHERRYVIEW(dev_priv))
d507d735
JL
3075 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3076 else
3077 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3078 ggtt->base.bind_vma = ggtt_bind_vma;
3079 ggtt->base.unbind_vma = ggtt_unbind_vma;
3080
63340133
BW
3081 return ret;
3082}
3083
d507d735 3084static int gen6_gmch_probe(struct i915_ggtt *ggtt)
e76e9aeb 3085{
d507d735 3086 struct drm_device *dev = ggtt->base.dev;
e76e9aeb 3087 u16 snb_gmch_ctl;
e76e9aeb
BW
3088 int ret;
3089
d507d735
JL
3090 ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
3091 ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
41907ddc 3092
baa09f5f
BW
3093 /* 64/512MB is the current min/max we actually know of, but this is just
3094 * a coarse sanity check.
e76e9aeb 3095 */
d507d735
JL
3096 if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
3097 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
baa09f5f 3098 return -ENXIO;
e76e9aeb
BW
3099 }
3100
e76e9aeb
BW
3101 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
3102 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
e76e9aeb 3103 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
e76e9aeb 3104
d507d735
JL
3105 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
3106 ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
3107 ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
e76e9aeb 3108
d507d735 3109 ret = ggtt_probe_common(dev, ggtt->size);
e76e9aeb 3110
d507d735
JL
3111 ggtt->base.clear_range = gen6_ggtt_clear_range;
3112 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3113 ggtt->base.bind_vma = ggtt_bind_vma;
3114 ggtt->base.unbind_vma = ggtt_unbind_vma;
7faf1ab2 3115
e76e9aeb
BW
3116 return ret;
3117}
3118
853ba5d2 3119static void gen6_gmch_remove(struct i915_address_space *vm)
e76e9aeb 3120{
62106b4f 3121 struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
853ba5d2 3122
62106b4f 3123 iounmap(ggtt->gsm);
4ad2af1e 3124 free_scratch_page(vm->dev, vm->scratch_page);
644ec02b 3125}
baa09f5f 3126
d507d735 3127static int i915_gmch_probe(struct i915_ggtt *ggtt)
baa09f5f 3128{
d507d735 3129 struct drm_device *dev = ggtt->base.dev;
72e96d64 3130 struct drm_i915_private *dev_priv = to_i915(dev);
baa09f5f
BW
3131 int ret;
3132
baa09f5f
BW
3133 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
3134 if (!ret) {
3135 DRM_ERROR("failed to set up gmch\n");
3136 return -EIO;
3137 }
3138
d507d735
JL
3139 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
3140 &ggtt->mappable_base, &ggtt->mappable_end);
baa09f5f 3141
d507d735
JL
3142 ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
3143 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3144 ggtt->base.clear_range = i915_ggtt_clear_range;
3145 ggtt->base.bind_vma = ggtt_bind_vma;
3146 ggtt->base.unbind_vma = ggtt_unbind_vma;
baa09f5f 3147
d507d735 3148 if (unlikely(ggtt->do_idle_maps))
c0a7f818
CW
3149 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3150
baa09f5f
BW
3151 return 0;
3152}
3153
853ba5d2 3154static void i915_gmch_remove(struct i915_address_space *vm)
baa09f5f
BW
3155{
3156 intel_gmch_remove();
3157}
3158
d85489d3
JL
3159/**
3160 * i915_ggtt_init_hw - Initialize GGTT hardware
3161 * @dev: DRM device
3162 */
3163int i915_ggtt_init_hw(struct drm_device *dev)
baa09f5f 3164{
72e96d64 3165 struct drm_i915_private *dev_priv = to_i915(dev);
62106b4f 3166 struct i915_ggtt *ggtt = &dev_priv->ggtt;
baa09f5f
BW
3167 int ret;
3168
baa09f5f 3169 if (INTEL_INFO(dev)->gen <= 5) {
62106b4f
JL
3170 ggtt->probe = i915_gmch_probe;
3171 ggtt->base.cleanup = i915_gmch_remove;
63340133 3172 } else if (INTEL_INFO(dev)->gen < 8) {
62106b4f
JL
3173 ggtt->probe = gen6_gmch_probe;
3174 ggtt->base.cleanup = gen6_gmch_remove;
4d15c145 3175 if (IS_HASWELL(dev) && dev_priv->ellc_size)
62106b4f 3176 ggtt->base.pte_encode = iris_pte_encode;
4d15c145 3177 else if (IS_HASWELL(dev))
62106b4f 3178 ggtt->base.pte_encode = hsw_pte_encode;
b2f21b4d 3179 else if (IS_VALLEYVIEW(dev))
62106b4f 3180 ggtt->base.pte_encode = byt_pte_encode;
350ec881 3181 else if (INTEL_INFO(dev)->gen >= 7)
62106b4f 3182 ggtt->base.pte_encode = ivb_pte_encode;
b2f21b4d 3183 else
62106b4f 3184 ggtt->base.pte_encode = snb_pte_encode;
63340133 3185 } else {
62106b4f
JL
3186 ggtt->probe = gen8_gmch_probe;
3187 ggtt->base.cleanup = gen6_gmch_remove;
baa09f5f
BW
3188 }
3189
62106b4f
JL
3190 ggtt->base.dev = dev;
3191 ggtt->base.is_ggtt = true;
c114f76a 3192
d507d735 3193 ret = ggtt->probe(ggtt);
a54c0c27 3194 if (ret)
baa09f5f 3195 return ret;
baa09f5f 3196
c890e2d5
CW
3197 if ((ggtt->base.total - 1) >> 32) {
3198 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3199 "of address space! Found %lldM!\n",
3200 ggtt->base.total >> 20);
3201 ggtt->base.total = 1ULL << 32;
3202 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3203 }
3204
a4eba47b
ID
3205 /*
3206 * Initialise stolen early so that we may reserve preallocated
3207 * objects for the BIOS to KMS transition.
3208 */
3209 ret = i915_gem_init_stolen(dev);
3210 if (ret)
3211 goto out_gtt_cleanup;
3212
baa09f5f 3213 /* GMADR is the PCI mmio aperture into the global GTT. */
c44ef60e 3214 DRM_INFO("Memory usable by graphics device = %lluM\n",
62106b4f
JL
3215 ggtt->base.total >> 20);
3216 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
3217 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
5db6c735
DV
3218#ifdef CONFIG_INTEL_IOMMU
3219 if (intel_iommu_gfx_mapped)
3220 DRM_INFO("VT-d active for gfx access\n");
3221#endif
cfa7c862
DV
3222 /*
3223 * i915.enable_ppgtt is read-only, so do an early pass to validate the
3224 * user's requested state against the hardware/driver capabilities. We
3225 * do this now so that we can print out any log messages once rather
3226 * than every time we check intel_enable_ppgtt().
3227 */
3228 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3229 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
baa09f5f
BW
3230
3231 return 0;
a4eba47b
ID
3232
3233out_gtt_cleanup:
72e96d64 3234 ggtt->base.cleanup(&ggtt->base);
a4eba47b
ID
3235
3236 return ret;
baa09f5f 3237}
6f65e29a 3238
fa42331b
DV
3239void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3240{
72e96d64
JL
3241 struct drm_i915_private *dev_priv = to_i915(dev);
3242 struct i915_ggtt *ggtt = &dev_priv->ggtt;
fa42331b 3243 struct drm_i915_gem_object *obj;
2c3d9984
TU
3244 struct i915_vma *vma;
3245 bool flush;
fa42331b
DV
3246
3247 i915_check_and_clear_faults(dev);
3248
3249 /* First fill our portion of the GTT with scratch pages */
72e96d64
JL
3250 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
3251 true);
fa42331b 3252
2c3d9984 3253 /* Cache flush objects bound into GGTT and rebind them. */
fa42331b 3254 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2c3d9984 3255 flush = false;
1c7f4bca 3256 list_for_each_entry(vma, &obj->vma_list, obj_link) {
72e96d64 3257 if (vma->vm != &ggtt->base)
2c3d9984 3258 continue;
fa42331b 3259
2c3d9984
TU
3260 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3261 PIN_UPDATE));
fa42331b 3262
2c3d9984
TU
3263 flush = true;
3264 }
3265
3266 if (flush)
3267 i915_gem_clflush_object(obj, obj->pin_display);
3268 }
fa42331b
DV
3269
3270 if (INTEL_INFO(dev)->gen >= 8) {
3271 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3272 chv_setup_private_ppat(dev_priv);
3273 else
3274 bdw_setup_private_ppat(dev_priv);
3275
3276 return;
3277 }
3278
3279 if (USES_PPGTT(dev)) {
72e96d64
JL
3280 struct i915_address_space *vm;
3281
fa42331b
DV
3282 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3283 /* TODO: Perhaps it shouldn't be gen6 specific */
3284
e5716f55 3285 struct i915_hw_ppgtt *ppgtt;
fa42331b 3286
e5716f55 3287 if (vm->is_ggtt)
fa42331b 3288 ppgtt = dev_priv->mm.aliasing_ppgtt;
e5716f55
JL
3289 else
3290 ppgtt = i915_vm_to_ppgtt(vm);
fa42331b
DV
3291
3292 gen6_write_page_range(dev_priv, &ppgtt->pd,
3293 0, ppgtt->base.total);
3294 }
3295 }
3296
3297 i915_ggtt_flush(dev_priv);
3298}
3299
ec7adb6e
JL
3300static struct i915_vma *
3301__i915_gem_vma_create(struct drm_i915_gem_object *obj,
3302 struct i915_address_space *vm,
3303 const struct i915_ggtt_view *ggtt_view)
6f65e29a 3304{
dabde5c7 3305 struct i915_vma *vma;
6f65e29a 3306
ec7adb6e
JL
3307 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3308 return ERR_PTR(-EINVAL);
e20d2ab7
CW
3309
3310 vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
dabde5c7
DC
3311 if (vma == NULL)
3312 return ERR_PTR(-ENOMEM);
ec7adb6e 3313
1c7f4bca
CW
3314 INIT_LIST_HEAD(&vma->vm_link);
3315 INIT_LIST_HEAD(&vma->obj_link);
6f65e29a
BW
3316 INIT_LIST_HEAD(&vma->exec_list);
3317 vma->vm = vm;
3318 vma->obj = obj;
596c5923 3319 vma->is_ggtt = i915_is_ggtt(vm);
6f65e29a 3320
777dc5bb 3321 if (i915_is_ggtt(vm))
ec7adb6e 3322 vma->ggtt_view = *ggtt_view;
596c5923
CW
3323 else
3324 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
6f65e29a 3325
1c7f4bca 3326 list_add_tail(&vma->obj_link, &obj->vma_list);
6f65e29a
BW
3327
3328 return vma;
3329}
3330
3331struct i915_vma *
ec7adb6e
JL
3332i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
3333 struct i915_address_space *vm)
3334{
3335 struct i915_vma *vma;
3336
3337 vma = i915_gem_obj_to_vma(obj, vm);
3338 if (!vma)
3339 vma = __i915_gem_vma_create(obj, vm,
3340 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
3341
3342 return vma;
3343}
3344
3345struct i915_vma *
3346i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
fe14d5f4 3347 const struct i915_ggtt_view *view)
6f65e29a 3348{
72e96d64
JL
3349 struct drm_device *dev = obj->base.dev;
3350 struct drm_i915_private *dev_priv = to_i915(dev);
3351 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ade7daa1 3352 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
ec7adb6e 3353
6f65e29a 3354 if (!vma)
72e96d64 3355 vma = __i915_gem_vma_create(obj, &ggtt->base, view);
6f65e29a
BW
3356
3357 return vma;
ec7adb6e 3358
6f65e29a 3359}
fe14d5f4 3360
804beb4b 3361static struct scatterlist *
2d7f3bdb 3362rotate_pages(const dma_addr_t *in, unsigned int offset,
804beb4b 3363 unsigned int width, unsigned int height,
87130255 3364 unsigned int stride,
804beb4b 3365 struct sg_table *st, struct scatterlist *sg)
50470bb0
TU
3366{
3367 unsigned int column, row;
3368 unsigned int src_idx;
50470bb0 3369
50470bb0 3370 for (column = 0; column < width; column++) {
87130255 3371 src_idx = stride * (height - 1) + column;
50470bb0
TU
3372 for (row = 0; row < height; row++) {
3373 st->nents++;
3374 /* We don't need the pages, but need to initialize
3375 * the entries so the sg list can be happily traversed.
3376 * The only thing we need are DMA addresses.
3377 */
3378 sg_set_page(sg, NULL, PAGE_SIZE, 0);
804beb4b 3379 sg_dma_address(sg) = in[offset + src_idx];
50470bb0
TU
3380 sg_dma_len(sg) = PAGE_SIZE;
3381 sg = sg_next(sg);
87130255 3382 src_idx -= stride;
50470bb0
TU
3383 }
3384 }
804beb4b
TU
3385
3386 return sg;
50470bb0
TU
3387}
3388
3389static struct sg_table *
11d23e6f 3390intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
50470bb0
TU
3391 struct drm_i915_gem_object *obj)
3392{
1663b9d6 3393 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
89e3e142 3394 unsigned int size_pages_uv;
50470bb0
TU
3395 struct sg_page_iter sg_iter;
3396 unsigned long i;
3397 dma_addr_t *page_addr_list;
3398 struct sg_table *st;
89e3e142
TU
3399 unsigned int uv_start_page;
3400 struct scatterlist *sg;
1d00dad5 3401 int ret = -ENOMEM;
50470bb0 3402
50470bb0 3403 /* Allocate a temporary list of source pages for random access. */
84fe03f7
TU
3404 page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
3405 sizeof(dma_addr_t));
50470bb0
TU
3406 if (!page_addr_list)
3407 return ERR_PTR(ret);
3408
89e3e142
TU
3409 /* Account for UV plane with NV12. */
3410 if (rot_info->pixel_format == DRM_FORMAT_NV12)
1663b9d6 3411 size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
89e3e142
TU
3412 else
3413 size_pages_uv = 0;
3414
50470bb0
TU
3415 /* Allocate target SG list. */
3416 st = kmalloc(sizeof(*st), GFP_KERNEL);
3417 if (!st)
3418 goto err_st_alloc;
3419
89e3e142 3420 ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
50470bb0
TU
3421 if (ret)
3422 goto err_sg_alloc;
3423
3424 /* Populate source page list from the object. */
3425 i = 0;
3426 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
3427 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
3428 i++;
3429 }
3430
11f20322
VS
3431 st->nents = 0;
3432 sg = st->sgl;
3433
50470bb0 3434 /* Rotate the pages. */
89e3e142 3435 sg = rotate_pages(page_addr_list, 0,
1663b9d6
VS
3436 rot_info->plane[0].width, rot_info->plane[0].height,
3437 rot_info->plane[0].width,
11f20322 3438 st, sg);
50470bb0 3439
89e3e142
TU
3440 /* Append the UV plane if NV12. */
3441 if (rot_info->pixel_format == DRM_FORMAT_NV12) {
3442 uv_start_page = size_pages;
3443
3444 /* Check for tile-row un-alignment. */
3445 if (offset_in_page(rot_info->uv_offset))
3446 uv_start_page--;
3447
dedf278c
TU
3448 rot_info->uv_start_page = uv_start_page;
3449
11f20322
VS
3450 sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
3451 rot_info->plane[1].width, rot_info->plane[1].height,
3452 rot_info->plane[1].width,
3453 st, sg);
89e3e142
TU
3454 }
3455
1663b9d6
VS
3456 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
3457 obj->base.size, rot_info->plane[0].width,
3458 rot_info->plane[0].height, size_pages + size_pages_uv,
89e3e142 3459 size_pages);
50470bb0
TU
3460
3461 drm_free_large(page_addr_list);
3462
3463 return st;
3464
3465err_sg_alloc:
3466 kfree(st);
3467err_st_alloc:
3468 drm_free_large(page_addr_list);
3469
1663b9d6
VS
3470 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
3471 obj->base.size, ret, rot_info->plane[0].width,
3472 rot_info->plane[0].height, size_pages + size_pages_uv,
89e3e142 3473 size_pages);
50470bb0
TU
3474 return ERR_PTR(ret);
3475}
ec7adb6e 3476
8bd7ef16
JL
3477static struct sg_table *
3478intel_partial_pages(const struct i915_ggtt_view *view,
3479 struct drm_i915_gem_object *obj)
3480{
3481 struct sg_table *st;
3482 struct scatterlist *sg;
3483 struct sg_page_iter obj_sg_iter;
3484 int ret = -ENOMEM;
3485
3486 st = kmalloc(sizeof(*st), GFP_KERNEL);
3487 if (!st)
3488 goto err_st_alloc;
3489
3490 ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
3491 if (ret)
3492 goto err_sg_alloc;
3493
3494 sg = st->sgl;
3495 st->nents = 0;
3496 for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
3497 view->params.partial.offset)
3498 {
3499 if (st->nents >= view->params.partial.size)
3500 break;
3501
3502 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3503 sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
3504 sg_dma_len(sg) = PAGE_SIZE;
3505
3506 sg = sg_next(sg);
3507 st->nents++;
3508 }
3509
3510 return st;
3511
3512err_sg_alloc:
3513 kfree(st);
3514err_st_alloc:
3515 return ERR_PTR(ret);
3516}
3517
70b9f6f8 3518static int
50470bb0 3519i915_get_ggtt_vma_pages(struct i915_vma *vma)
fe14d5f4 3520{
50470bb0
TU
3521 int ret = 0;
3522
fe14d5f4
TU
3523 if (vma->ggtt_view.pages)
3524 return 0;
3525
3526 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
3527 vma->ggtt_view.pages = vma->obj->pages;
50470bb0
TU
3528 else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3529 vma->ggtt_view.pages =
11d23e6f 3530 intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
8bd7ef16
JL
3531 else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3532 vma->ggtt_view.pages =
3533 intel_partial_pages(&vma->ggtt_view, vma->obj);
fe14d5f4
TU
3534 else
3535 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3536 vma->ggtt_view.type);
3537
3538 if (!vma->ggtt_view.pages) {
ec7adb6e 3539 DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
fe14d5f4 3540 vma->ggtt_view.type);
50470bb0
TU
3541 ret = -EINVAL;
3542 } else if (IS_ERR(vma->ggtt_view.pages)) {
3543 ret = PTR_ERR(vma->ggtt_view.pages);
3544 vma->ggtt_view.pages = NULL;
3545 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3546 vma->ggtt_view.type, ret);
fe14d5f4
TU
3547 }
3548
50470bb0 3549 return ret;
fe14d5f4
TU
3550}
3551
3552/**
3553 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
3554 * @vma: VMA to map
3555 * @cache_level: mapping cache level
3556 * @flags: flags like global or local mapping
3557 *
3558 * DMA addresses are taken from the scatter-gather table of this object (or of
3559 * this VMA in case of non-default GGTT views) and PTE entries set up.
3560 * Note that DMA addresses are also the only part of the SG table we care about.
3561 */
3562int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3563 u32 flags)
3564{
75d04a37
MK
3565 int ret;
3566 u32 bind_flags;
1d335d1b 3567
75d04a37
MK
3568 if (WARN_ON(flags == 0))
3569 return -EINVAL;
1d335d1b 3570
75d04a37 3571 bind_flags = 0;
0875546c
DV
3572 if (flags & PIN_GLOBAL)
3573 bind_flags |= GLOBAL_BIND;
3574 if (flags & PIN_USER)
3575 bind_flags |= LOCAL_BIND;
3576
3577 if (flags & PIN_UPDATE)
3578 bind_flags |= vma->bound;
3579 else
3580 bind_flags &= ~vma->bound;
3581
75d04a37
MK
3582 if (bind_flags == 0)
3583 return 0;
3584
3585 if (vma->bound == 0 && vma->vm->allocate_va_range) {
b2dd4511
MK
3586 /* XXX: i915_vma_pin() will fix this +- hack */
3587 vma->pin_count++;
596c5923 3588 trace_i915_va_alloc(vma);
75d04a37
MK
3589 ret = vma->vm->allocate_va_range(vma->vm,
3590 vma->node.start,
3591 vma->node.size);
b2dd4511 3592 vma->pin_count--;
75d04a37
MK
3593 if (ret)
3594 return ret;
3595 }
3596
3597 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
70b9f6f8
DV
3598 if (ret)
3599 return ret;
0875546c
DV
3600
3601 vma->bound |= bind_flags;
fe14d5f4
TU
3602
3603 return 0;
3604}
91e6711e
JL
3605
3606/**
3607 * i915_ggtt_view_size - Get the size of a GGTT view.
3608 * @obj: Object the view is of.
3609 * @view: The view in question.
3610 *
3611 * @return The size of the GGTT view in bytes.
3612 */
3613size_t
3614i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3615 const struct i915_ggtt_view *view)
3616{
9e759ff1 3617 if (view->type == I915_GGTT_VIEW_NORMAL) {
91e6711e 3618 return obj->base.size;
9e759ff1 3619 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
1663b9d6 3620 return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
8bd7ef16
JL
3621 } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3622 return view->params.partial.size << PAGE_SHIFT;
91e6711e
JL
3623 } else {
3624 WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3625 return obj->base.size;
3626 }
3627}
This page took 0.591252 seconds and 5 git commands to generate.