x86/mpx: Restrict the mmap() size check to bounds tables
[deliverable/linux.git] / arch / x86 / mm / pgtable.c
CommitLineData
4f76cd38 1#include <linux/mm.h>
5a0e3ad6 2#include <linux/gfp.h>
4f76cd38 3#include <asm/pgalloc.h>
ee5aa8d3 4#include <asm/pgtable.h>
4f76cd38 5#include <asm/tlb.h>
a1d5a869 6#include <asm/fixmap.h>
6b637835 7#include <asm/mtrr.h>
4f76cd38 8
9e730237
VN
9#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
10
14315592
IC
11#ifdef CONFIG_HIGHPTE
12#define PGALLOC_USER_GFP __GFP_HIGHMEM
13#else
14#define PGALLOC_USER_GFP 0
15#endif
16
17gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
18
4f76cd38
JF
19pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
20{
9e730237 21 return (pte_t *)__get_free_page(PGALLOC_GFP);
4f76cd38
JF
22}
23
24pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
25{
26 struct page *pte;
27
14315592 28 pte = alloc_pages(__userpte_alloc_gfp, 0);
cecbd1b5
KS
29 if (!pte)
30 return NULL;
31 if (!pgtable_page_ctor(pte)) {
32 __free_page(pte);
33 return NULL;
34 }
4f76cd38
JF
35 return pte;
36}
37
14315592
IC
38static int __init setup_userpte(char *arg)
39{
40 if (!arg)
41 return -EINVAL;
42
43 /*
44 * "userpte=nohigh" disables allocation of user pagetables in
45 * high memory.
46 */
47 if (strcmp(arg, "nohigh") == 0)
48 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
49 else
50 return -EINVAL;
51 return 0;
52}
53early_param("userpte", setup_userpte);
54
9e1b32ca 55void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
397f687a
JF
56{
57 pgtable_page_dtor(pte);
6944a9c8 58 paravirt_release_pte(page_to_pfn(pte));
397f687a
JF
59 tlb_remove_page(tlb, pte);
60}
61
98233368 62#if CONFIG_PGTABLE_LEVELS > 2
9e1b32ca 63void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
170fdff7 64{
c283610e 65 struct page *page = virt_to_page(pmd);
6944a9c8 66 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
1de14c3c
DH
67 /*
68 * NOTE! For PAE, any changes to the top page-directory-pointer-table
69 * entries need a full cr3 reload to flush.
70 */
71#ifdef CONFIG_X86_PAE
72 tlb->need_flush_all = 1;
73#endif
c283610e
KS
74 pgtable_pmd_page_dtor(page);
75 tlb_remove_page(tlb, page);
170fdff7 76}
5a5f8f42 77
98233368 78#if CONFIG_PGTABLE_LEVELS > 3
9e1b32ca 79void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
5a5f8f42 80{
2761fa09 81 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
5a5f8f42
JF
82 tlb_remove_page(tlb, virt_to_page(pud));
83}
98233368
KS
84#endif /* CONFIG_PGTABLE_LEVELS > 3 */
85#endif /* CONFIG_PGTABLE_LEVELS > 2 */
170fdff7 86
4f76cd38
JF
87static inline void pgd_list_add(pgd_t *pgd)
88{
89 struct page *page = virt_to_page(pgd);
4f76cd38 90
4f76cd38 91 list_add(&page->lru, &pgd_list);
4f76cd38
JF
92}
93
94static inline void pgd_list_del(pgd_t *pgd)
95{
96 struct page *page = virt_to_page(pgd);
4f76cd38 97
4f76cd38 98 list_del(&page->lru);
4f76cd38
JF
99}
100
4f76cd38 101#define UNSHARED_PTRS_PER_PGD \
68db065c 102 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
4f76cd38 103
617d34d9
JF
104
105static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
106{
107 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
108 virt_to_page(pgd)->index = (pgoff_t)mm;
109}
110
111struct mm_struct *pgd_page_get_mm(struct page *page)
112{
113 return (struct mm_struct *)page->index;
114}
115
116static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
4f76cd38 117{
4f76cd38
JF
118 /* If the pgd points to a shared pagetable level (either the
119 ptes in non-PAE, or shared PMD in PAE), then just copy the
120 references from swapper_pg_dir. */
98233368
KS
121 if (CONFIG_PGTABLE_LEVELS == 2 ||
122 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
123 CONFIG_PGTABLE_LEVELS == 4) {
68db065c
JF
124 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
125 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
4f76cd38 126 KERNEL_PGD_PTRS);
4f76cd38
JF
127 }
128
129 /* list required to sync kernel mapping updates */
617d34d9
JF
130 if (!SHARED_KERNEL_PMD) {
131 pgd_set_mm(pgd, mm);
4f76cd38 132 pgd_list_add(pgd);
617d34d9 133 }
4f76cd38
JF
134}
135
17b74627 136static void pgd_dtor(pgd_t *pgd)
4f76cd38 137{
4f76cd38
JF
138 if (SHARED_KERNEL_PMD)
139 return;
140
a79e53d8 141 spin_lock(&pgd_lock);
4f76cd38 142 pgd_list_del(pgd);
a79e53d8 143 spin_unlock(&pgd_lock);
4f76cd38
JF
144}
145
85958b46
JF
146/*
147 * List of all pgd's needed for non-PAE so it can invalidate entries
148 * in both cached and uncached pgd's; not needed for PAE since the
149 * kernel pmd is shared. If PAE were not to share the pmd a similar
150 * tactic would be needed. This is essentially codepath-based locking
151 * against pageattr.c; it is the unique case in which a valid change
152 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
153 * vmalloc faults work because attached pagetables are never freed.
6d49e352 154 * -- nyc
85958b46
JF
155 */
156
4f76cd38 157#ifdef CONFIG_X86_PAE
d8d5900e
JF
158/*
159 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
160 * updating the top-level pagetable entries to guarantee the
161 * processor notices the update. Since this is expensive, and
162 * all 4 top-level entries are used almost immediately in a
163 * new process's life, we just pre-populate them here.
164 *
165 * Also, if we're in a paravirt environment where the kernel pmd is
166 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
167 * and initialize the kernel pmds here.
168 */
169#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
170
171void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
172{
173 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
174
175 /* Note: almost everything apart from _PAGE_PRESENT is
176 reserved at the pmd (PDPT) level. */
177 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
178
179 /*
180 * According to Intel App note "TLBs, Paging-Structure Caches,
181 * and Their Invalidation", April 2007, document 317080-001,
182 * section 8.1: in PAE mode we explicitly have to flush the
183 * TLB via cr3 if the top-level pgd is changed...
184 */
4981d01e 185 flush_tlb_mm(mm);
d8d5900e
JF
186}
187#else /* !CONFIG_X86_PAE */
188
189/* No need to prepopulate any pagetable entries in non-PAE modes. */
190#define PREALLOCATED_PMDS 0
191
192#endif /* CONFIG_X86_PAE */
193
dc6c9a35 194static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
d8d5900e
JF
195{
196 int i;
197
198 for(i = 0; i < PREALLOCATED_PMDS; i++)
09ef4939
KS
199 if (pmds[i]) {
200 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
d8d5900e 201 free_page((unsigned long)pmds[i]);
dc6c9a35 202 mm_dec_nr_pmds(mm);
09ef4939 203 }
d8d5900e
JF
204}
205
dc6c9a35 206static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
d8d5900e
JF
207{
208 int i;
209 bool failed = false;
210
211 for(i = 0; i < PREALLOCATED_PMDS; i++) {
9e730237 212 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
09ef4939 213 if (!pmd)
d8d5900e 214 failed = true;
09ef4939 215 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
2a46eed5 216 free_page((unsigned long)pmd);
09ef4939
KS
217 pmd = NULL;
218 failed = true;
219 }
dc6c9a35
KS
220 if (pmd)
221 mm_inc_nr_pmds(mm);
d8d5900e
JF
222 pmds[i] = pmd;
223 }
224
225 if (failed) {
dc6c9a35 226 free_pmds(mm, pmds);
d8d5900e
JF
227 return -ENOMEM;
228 }
229
230 return 0;
231}
232
4f76cd38
JF
233/*
234 * Mop up any pmd pages which may still be attached to the pgd.
235 * Normally they will be freed by munmap/exit_mmap, but any pmd we
236 * preallocate which never got a corresponding vma will need to be
237 * freed manually.
238 */
239static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
240{
241 int i;
242
d8d5900e 243 for(i = 0; i < PREALLOCATED_PMDS; i++) {
4f76cd38
JF
244 pgd_t pgd = pgdp[i];
245
246 if (pgd_val(pgd) != 0) {
247 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
248
249 pgdp[i] = native_make_pgd(0);
250
6944a9c8 251 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
4f76cd38 252 pmd_free(mm, pmd);
dc6c9a35 253 mm_dec_nr_pmds(mm);
4f76cd38
JF
254 }
255 }
256}
257
d8d5900e 258static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
4f76cd38
JF
259{
260 pud_t *pud;
4f76cd38
JF
261 int i;
262
cf3e5050
JF
263 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
264 return;
265
4f76cd38 266 pud = pud_offset(pgd, 0);
4f76cd38 267
73b44ff4 268 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
d8d5900e 269 pmd_t *pmd = pmds[i];
4f76cd38 270
68db065c 271 if (i >= KERNEL_PGD_BOUNDARY)
4f76cd38
JF
272 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
273 sizeof(pmd_t) * PTRS_PER_PMD);
274
275 pud_populate(mm, pud, pmd);
276 }
4f76cd38 277}
1ec1fe73 278
1db491f7
FY
279/*
280 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
281 * assumes that pgd should be in one page.
282 *
283 * But kernel with PAE paging that is not running as a Xen domain
284 * only needs to allocate 32 bytes for pgd instead of one page.
285 */
286#ifdef CONFIG_X86_PAE
287
288#include <linux/slab.h>
289
290#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
291#define PGD_ALIGN 32
292
293static struct kmem_cache *pgd_cache;
294
295static int __init pgd_cache_init(void)
296{
297 /*
298 * When PAE kernel is running as a Xen domain, it does not use
299 * shared kernel pmd. And this requires a whole page for pgd.
300 */
301 if (!SHARED_KERNEL_PMD)
302 return 0;
303
304 /*
305 * when PAE kernel is not running as a Xen domain, it uses
306 * shared kernel pmd. Shared kernel pmd does not require a whole
307 * page for pgd. We are able to just allocate a 32-byte for pgd.
308 * During boot time, we create a 32-byte slab for pgd table allocation.
309 */
310 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
311 SLAB_PANIC, NULL);
312 if (!pgd_cache)
313 return -ENOMEM;
314
315 return 0;
316}
317core_initcall(pgd_cache_init);
318
319static inline pgd_t *_pgd_alloc(void)
320{
321 /*
322 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
323 * We allocate one page for pgd.
324 */
325 if (!SHARED_KERNEL_PMD)
326 return (pgd_t *)__get_free_page(PGALLOC_GFP);
327
328 /*
329 * Now PAE kernel is not running as a Xen domain. We can allocate
330 * a 32-byte slab for pgd to save memory space.
331 */
332 return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
333}
334
335static inline void _pgd_free(pgd_t *pgd)
336{
337 if (!SHARED_KERNEL_PMD)
338 free_page((unsigned long)pgd);
339 else
340 kmem_cache_free(pgd_cache, pgd);
341}
342#else
343static inline pgd_t *_pgd_alloc(void)
344{
345 return (pgd_t *)__get_free_page(PGALLOC_GFP);
346}
347
348static inline void _pgd_free(pgd_t *pgd)
349{
350 free_page((unsigned long)pgd);
351}
352#endif /* CONFIG_X86_PAE */
353
d8d5900e 354pgd_t *pgd_alloc(struct mm_struct *mm)
1ec1fe73 355{
d8d5900e
JF
356 pgd_t *pgd;
357 pmd_t *pmds[PREALLOCATED_PMDS];
1ec1fe73 358
1db491f7 359 pgd = _pgd_alloc();
d8d5900e
JF
360
361 if (pgd == NULL)
362 goto out;
363
364 mm->pgd = pgd;
365
dc6c9a35 366 if (preallocate_pmds(mm, pmds) != 0)
d8d5900e
JF
367 goto out_free_pgd;
368
369 if (paravirt_pgd_alloc(mm) != 0)
370 goto out_free_pmds;
1ec1fe73
IM
371
372 /*
d8d5900e
JF
373 * Make sure that pre-populating the pmds is atomic with
374 * respect to anything walking the pgd_list, so that they
375 * never see a partially populated pgd.
1ec1fe73 376 */
a79e53d8 377 spin_lock(&pgd_lock);
4f76cd38 378
617d34d9 379 pgd_ctor(mm, pgd);
d8d5900e 380 pgd_prepopulate_pmd(mm, pgd, pmds);
4f76cd38 381
a79e53d8 382 spin_unlock(&pgd_lock);
4f76cd38
JF
383
384 return pgd;
d8d5900e
JF
385
386out_free_pmds:
dc6c9a35 387 free_pmds(mm, pmds);
d8d5900e 388out_free_pgd:
1db491f7 389 _pgd_free(pgd);
d8d5900e
JF
390out:
391 return NULL;
4f76cd38
JF
392}
393
394void pgd_free(struct mm_struct *mm, pgd_t *pgd)
395{
396 pgd_mop_up_pmds(mm, pgd);
397 pgd_dtor(pgd);
eba0045f 398 paravirt_pgd_free(mm, pgd);
1db491f7 399 _pgd_free(pgd);
4f76cd38 400}
ee5aa8d3 401
0f9a921c
RR
402/*
403 * Used to set accessed or dirty bits in the page table entries
404 * on other architectures. On x86, the accessed and dirty bits
405 * are tracked by hardware. However, do_wp_page calls this function
406 * to also make the pte writeable at the same time the dirty bit is
407 * set. In that case we do actually need to write the PTE.
408 */
ee5aa8d3
JF
409int ptep_set_access_flags(struct vm_area_struct *vma,
410 unsigned long address, pte_t *ptep,
411 pte_t entry, int dirty)
412{
413 int changed = !pte_same(*ptep, entry);
414
415 if (changed && dirty) {
416 *ptep = entry;
417 pte_update_defer(vma->vm_mm, address, ptep);
ee5aa8d3
JF
418 }
419
420 return changed;
421}
f9fbf1a3 422
db3eb96f
AA
423#ifdef CONFIG_TRANSPARENT_HUGEPAGE
424int pmdp_set_access_flags(struct vm_area_struct *vma,
425 unsigned long address, pmd_t *pmdp,
426 pmd_t entry, int dirty)
427{
428 int changed = !pmd_same(*pmdp, entry);
429
430 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
431
432 if (changed && dirty) {
433 *pmdp = entry;
434 pmd_update_defer(vma->vm_mm, address, pmdp);
5e4bf1a5
IM
435 /*
436 * We had a write-protection fault here and changed the pmd
437 * to to more permissive. No need to flush the TLB for that,
438 * #PF is architecturally guaranteed to do that and in the
439 * worst-case we'll generate a spurious fault.
440 */
db3eb96f
AA
441 }
442
443 return changed;
444}
445#endif
446
f9fbf1a3
JF
447int ptep_test_and_clear_young(struct vm_area_struct *vma,
448 unsigned long addr, pte_t *ptep)
449{
450 int ret = 0;
451
452 if (pte_young(*ptep))
453 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
48e23957 454 (unsigned long *) &ptep->pte);
f9fbf1a3
JF
455
456 if (ret)
457 pte_update(vma->vm_mm, addr, ptep);
458
459 return ret;
460}
c20311e1 461
db3eb96f
AA
462#ifdef CONFIG_TRANSPARENT_HUGEPAGE
463int pmdp_test_and_clear_young(struct vm_area_struct *vma,
464 unsigned long addr, pmd_t *pmdp)
465{
466 int ret = 0;
467
468 if (pmd_young(*pmdp))
469 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
f2d6bfe9 470 (unsigned long *)pmdp);
db3eb96f
AA
471
472 if (ret)
473 pmd_update(vma->vm_mm, addr, pmdp);
474
475 return ret;
476}
477#endif
478
c20311e1
JF
479int ptep_clear_flush_young(struct vm_area_struct *vma,
480 unsigned long address, pte_t *ptep)
481{
b13b1d2d
SL
482 /*
483 * On x86 CPUs, clearing the accessed bit without a TLB flush
484 * doesn't cause data corruption. [ It could cause incorrect
485 * page aging and the (mistaken) reclaim of hot pages, but the
486 * chance of that should be relatively low. ]
487 *
488 * So as a performance optimization don't flush the TLB when
489 * clearing the accessed bit, it will eventually be flushed by
490 * a context switch or a VM operation anyway. [ In the rare
491 * event of it not getting flushed for a long time the delay
492 * shouldn't really matter because there's no real memory
493 * pressure for swapout to react to. ]
494 */
495 return ptep_test_and_clear_young(vma, address, ptep);
c20311e1 496}
7c7e6e07 497
db3eb96f
AA
498#ifdef CONFIG_TRANSPARENT_HUGEPAGE
499int pmdp_clear_flush_young(struct vm_area_struct *vma,
500 unsigned long address, pmd_t *pmdp)
501{
502 int young;
503
504 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
505
506 young = pmdp_test_and_clear_young(vma, address, pmdp);
507 if (young)
508 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
509
510 return young;
511}
512
513void pmdp_splitting_flush(struct vm_area_struct *vma,
514 unsigned long address, pmd_t *pmdp)
515{
516 int set;
517 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
518 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
f2d6bfe9 519 (unsigned long *)pmdp);
db3eb96f
AA
520 if (set) {
521 pmd_update(vma->vm_mm, address, pmdp);
522 /* need tlb flush only to serialize against gup-fast */
523 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
524 }
525}
526#endif
527
fd862dde
GP
528/**
529 * reserve_top_address - reserves a hole in the top of kernel address space
530 * @reserve - size of hole to reserve
531 *
532 * Can be used to relocate the fixmap area and poke a hole in the top
533 * of kernel address space to make room for a hypervisor.
534 */
535void __init reserve_top_address(unsigned long reserve)
536{
537#ifdef CONFIG_X86_32
538 BUG_ON(fixmaps_set > 0);
73159fdc
AL
539 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
540 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
541 -reserve, __FIXADDR_TOP + PAGE_SIZE);
fd862dde
GP
542#endif
543}
544
7c7e6e07
JF
545int fixmaps_set;
546
aeaaa59c 547void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
7c7e6e07
JF
548{
549 unsigned long address = __fix_to_virt(idx);
550
551 if (idx >= __end_of_fixed_addresses) {
552 BUG();
553 return;
554 }
aeaaa59c 555 set_pte_vaddr(address, pte);
7c7e6e07
JF
556 fixmaps_set++;
557}
aeaaa59c 558
3b3809ac
MH
559void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
560 pgprot_t flags)
aeaaa59c
JF
561{
562 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
563}
6b637835
TK
564
565#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
566int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
567{
568 u8 mtrr;
569
570 /*
571 * Do not use a huge page when the range is covered by non-WB type
572 * of MTRRs.
573 */
574 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE);
575 if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
576 return 0;
577
578 prot = pgprot_4k_2_large(prot);
579
580 set_pte((pte_t *)pud, pfn_pte(
581 (u64)addr >> PAGE_SHIFT,
582 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
583
584 return 1;
585}
586
587int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
588{
589 u8 mtrr;
590
591 /*
592 * Do not use a huge page when the range is covered by non-WB type
593 * of MTRRs.
594 */
595 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE);
596 if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
597 return 0;
598
599 prot = pgprot_4k_2_large(prot);
600
601 set_pte((pte_t *)pmd, pfn_pte(
602 (u64)addr >> PAGE_SHIFT,
603 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
604
605 return 1;
606}
607
608int pud_clear_huge(pud_t *pud)
609{
610 if (pud_large(*pud)) {
611 pud_clear(pud);
612 return 1;
613 }
614
615 return 0;
616}
617
618int pmd_clear_huge(pmd_t *pmd)
619{
620 if (pmd_large(*pmd)) {
621 pmd_clear(pmd);
622 return 1;
623 }
624
625 return 0;
626}
627#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
This page took 0.417647 seconds and 5 git commands to generate.