2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/seq_file.h>
13 #include <linux/debugfs.h>
16 #include <asm/processor.h>
17 #include <asm/tlbflush.h>
18 #include <asm/sections.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgalloc.h>
21 #include <asm/proto.h>
24 * The current flushing context - we pass it instead of 5 arguments:
37 static inline unsigned long highmap_start_pfn(void)
39 return __pa(_text
) >> PAGE_SHIFT
;
42 static inline unsigned long highmap_end_pfn(void)
44 return __pa(round_up((unsigned long)_end
, PMD_SIZE
)) >> PAGE_SHIFT
;
49 #ifdef CONFIG_DEBUG_PAGEALLOC
50 # define debug_pagealloc 1
52 # define debug_pagealloc 0
56 within(unsigned long addr
, unsigned long start
, unsigned long end
)
58 return addr
>= start
&& addr
< end
;
66 * clflush_cache_range - flush a cache range with clflush
67 * @addr: virtual start address
68 * @size: number of bytes to flush
70 * clflush is an unordered instruction which needs fencing with mfence
71 * to avoid ordering issues.
73 void clflush_cache_range(void *vaddr
, unsigned int size
)
75 void *vend
= vaddr
+ size
- 1;
79 for (; vaddr
< vend
; vaddr
+= boot_cpu_data
.x86_clflush_size
)
82 * Flush any possible final partial cacheline:
89 static void __cpa_flush_all(void *arg
)
91 unsigned long cache
= (unsigned long)arg
;
94 * Flush all to work around Errata in early athlons regarding
95 * large page flushing.
99 if (cache
&& boot_cpu_data
.x86_model
>= 4)
103 static void cpa_flush_all(unsigned long cache
)
105 BUG_ON(irqs_disabled());
107 on_each_cpu(__cpa_flush_all
, (void *) cache
, 1, 1);
110 static void __cpa_flush_range(void *arg
)
113 * We could optimize that further and do individual per page
114 * tlb invalidates for a low number of pages. Caveat: we must
115 * flush the high aliases on 64bit as well.
120 static void cpa_flush_range(unsigned long start
, int numpages
, int cache
)
122 unsigned int i
, level
;
125 BUG_ON(irqs_disabled());
126 WARN_ON(PAGE_ALIGN(start
) != start
);
128 on_each_cpu(__cpa_flush_range
, NULL
, 1, 1);
134 * We only need to flush on one CPU,
135 * clflush is a MESI-coherent instruction that
136 * will cause all other CPUs to flush the same
139 for (i
= 0, addr
= start
; i
< numpages
; i
++, addr
+= PAGE_SIZE
) {
140 pte_t
*pte
= lookup_address(addr
, &level
);
143 * Only flush present addresses:
145 if (pte
&& (pte_val(*pte
) & _PAGE_PRESENT
))
146 clflush_cache_range((void *) addr
, PAGE_SIZE
);
151 * Certain areas of memory on x86 require very specific protection flags,
152 * for example the BIOS area or kernel text. Callers don't always get this
153 * right (again, ioremap() on BIOS memory is not uncommon) so this function
154 * checks and fixes these known static required protection bits.
156 static inline pgprot_t
static_protections(pgprot_t prot
, unsigned long address
,
159 pgprot_t forbidden
= __pgprot(0);
162 * The BIOS area between 640k and 1Mb needs to be executable for
163 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
165 if (within(pfn
, BIOS_BEGIN
>> PAGE_SHIFT
, BIOS_END
>> PAGE_SHIFT
))
166 pgprot_val(forbidden
) |= _PAGE_NX
;
169 * The kernel text needs to be executable for obvious reasons
170 * Does not cover __inittext since that is gone later on. On
171 * 64bit we do not enforce !NX on the low mapping
173 if (within(address
, (unsigned long)_text
, (unsigned long)_etext
))
174 pgprot_val(forbidden
) |= _PAGE_NX
;
177 * The .rodata section needs to be read-only. Using the pfn
178 * catches all aliases.
180 if (within(pfn
, __pa((unsigned long)__start_rodata
) >> PAGE_SHIFT
,
181 __pa((unsigned long)__end_rodata
) >> PAGE_SHIFT
))
182 pgprot_val(forbidden
) |= _PAGE_RW
;
184 prot
= __pgprot(pgprot_val(prot
) & ~pgprot_val(forbidden
));
190 * Lookup the page table entry for a virtual address. Return a pointer
191 * to the entry and the level of the mapping.
193 * Note: We return pud and pmd either when the entry is marked large
194 * or when the present bit is not set. Otherwise we would return a
195 * pointer to a nonexisting mapping.
197 pte_t
*lookup_address(unsigned long address
, unsigned int *level
)
199 pgd_t
*pgd
= pgd_offset_k(address
);
203 *level
= PG_LEVEL_NONE
;
208 pud
= pud_offset(pgd
, address
);
212 *level
= PG_LEVEL_1G
;
213 if (pud_large(*pud
) || !pud_present(*pud
))
216 pmd
= pmd_offset(pud
, address
);
220 *level
= PG_LEVEL_2M
;
221 if (pmd_large(*pmd
) || !pmd_present(*pmd
))
224 *level
= PG_LEVEL_4K
;
226 return pte_offset_kernel(pmd
, address
);
230 * Set the new pmd in all the pgds we know about:
232 static void __set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
235 set_pte_atomic(kpte
, pte
);
237 if (!SHARED_KERNEL_PMD
) {
240 list_for_each_entry(page
, &pgd_list
, lru
) {
245 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
246 pud
= pud_offset(pgd
, address
);
247 pmd
= pmd_offset(pud
, address
);
248 set_pte_atomic((pte_t
*)pmd
, pte
);
255 try_preserve_large_page(pte_t
*kpte
, unsigned long address
,
256 struct cpa_data
*cpa
)
258 unsigned long nextpage_addr
, numpages
, pmask
, psize
, flags
, addr
, pfn
;
259 pte_t new_pte
, old_pte
, *tmp
;
260 pgprot_t old_prot
, new_prot
;
264 spin_lock_irqsave(&pgd_lock
, flags
);
266 * Check for races, another CPU might have split this page
269 tmp
= lookup_address(address
, &level
);
275 psize
= PMD_PAGE_SIZE
;
276 pmask
= PMD_PAGE_MASK
;
280 psize
= PUD_PAGE_SIZE
;
281 pmask
= PUD_PAGE_MASK
;
290 * Calculate the number of pages, which fit into this large
291 * page starting at address:
293 nextpage_addr
= (address
+ psize
) & pmask
;
294 numpages
= (nextpage_addr
- address
) >> PAGE_SHIFT
;
295 if (numpages
< cpa
->numpages
)
296 cpa
->numpages
= numpages
;
299 * We are safe now. Check whether the new pgprot is the same:
302 old_prot
= new_prot
= pte_pgprot(old_pte
);
304 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
305 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
308 * old_pte points to the large page base address. So we need
309 * to add the offset of the virtual address:
311 pfn
= pte_pfn(old_pte
) + ((address
& (psize
- 1)) >> PAGE_SHIFT
);
314 new_prot
= static_protections(new_prot
, address
, pfn
);
317 * We need to check the full range, whether
318 * static_protection() requires a different pgprot for one of
319 * the pages in the range we try to preserve:
321 addr
= address
+ PAGE_SIZE
;
323 for (i
= 1; i
< cpa
->numpages
; i
++, addr
+= PAGE_SIZE
, pfn
++) {
324 pgprot_t chk_prot
= static_protections(new_prot
, addr
, pfn
);
326 if (pgprot_val(chk_prot
) != pgprot_val(new_prot
))
331 * If there are no changes, return. maxpages has been updated
334 if (pgprot_val(new_prot
) == pgprot_val(old_prot
)) {
340 * We need to change the attributes. Check, whether we can
341 * change the large page in one go. We request a split, when
342 * the address is not aligned and the number of pages is
343 * smaller than the number of pages in the large page. Note
344 * that we limited the number of possible pages already to
345 * the number of pages in the large page.
347 if (address
== (nextpage_addr
- psize
) && cpa
->numpages
== numpages
) {
349 * The address is aligned and the number of pages
350 * covers the full page.
352 new_pte
= pfn_pte(pte_pfn(old_pte
), canon_pgprot(new_prot
));
353 __set_pmd_pte(kpte
, address
, new_pte
);
359 spin_unlock_irqrestore(&pgd_lock
, flags
);
364 static LIST_HEAD(page_pool
);
365 static unsigned long pool_size
, pool_pages
, pool_low
;
366 static unsigned long pool_used
, pool_failed
;
368 static void cpa_fill_pool(struct page
**ret
)
370 gfp_t gfp
= GFP_KERNEL
;
375 * Avoid recursion (on debug-pagealloc) and also signal
376 * our priority to get to these pagetables:
378 if (current
->flags
& PF_MEMALLOC
)
380 current
->flags
|= PF_MEMALLOC
;
383 * Allocate atomically from atomic contexts:
385 if (in_atomic() || irqs_disabled() || debug_pagealloc
)
386 gfp
= GFP_ATOMIC
| __GFP_NORETRY
| __GFP_NOWARN
;
388 while (pool_pages
< pool_size
|| (ret
&& !*ret
)) {
389 p
= alloc_pages(gfp
, 0);
395 * If the call site needs a page right now, provide it:
401 spin_lock_irqsave(&pgd_lock
, flags
);
402 list_add(&p
->lru
, &page_pool
);
404 spin_unlock_irqrestore(&pgd_lock
, flags
);
407 current
->flags
&= ~PF_MEMALLOC
;
410 #define SHIFT_MB (20 - PAGE_SHIFT)
411 #define ROUND_MB_GB ((1 << 10) - 1)
412 #define SHIFT_MB_GB 10
413 #define POOL_PAGES_PER_GB 16
415 void __init
cpa_init(void)
422 * Calculate the number of pool pages:
424 * Convert totalram (nr of pages) to MiB and round to the next
425 * GiB. Shift MiB to Gib and multiply the result by
428 if (debug_pagealloc
) {
429 gb
= ((si
.totalram
>> SHIFT_MB
) + ROUND_MB_GB
) >> SHIFT_MB_GB
;
430 pool_size
= POOL_PAGES_PER_GB
* gb
;
434 pool_low
= pool_size
;
438 "CPA: page pool initialized %lu of %lu pages preallocated\n",
439 pool_pages
, pool_size
);
442 static int split_large_page(pte_t
*kpte
, unsigned long address
)
444 unsigned long flags
, pfn
, pfninc
= 1;
445 unsigned int i
, level
;
451 * Get a page from the pool. The pool list is protected by the
452 * pgd_lock, which we have to take anyway for the split
455 spin_lock_irqsave(&pgd_lock
, flags
);
456 if (list_empty(&page_pool
)) {
457 spin_unlock_irqrestore(&pgd_lock
, flags
);
459 cpa_fill_pool(&base
);
462 spin_lock_irqsave(&pgd_lock
, flags
);
464 base
= list_first_entry(&page_pool
, struct page
, lru
);
465 list_del(&base
->lru
);
468 if (pool_pages
< pool_low
)
469 pool_low
= pool_pages
;
473 * Check for races, another CPU might have split this page
476 tmp
= lookup_address(address
, &level
);
480 pbase
= (pte_t
*)page_address(base
);
482 paravirt_alloc_pt(&init_mm
, page_to_pfn(base
));
484 ref_prot
= pte_pgprot(pte_clrhuge(*kpte
));
487 if (level
== PG_LEVEL_1G
) {
488 pfninc
= PMD_PAGE_SIZE
>> PAGE_SHIFT
;
489 pgprot_val(ref_prot
) |= _PAGE_PSE
;
494 * Get the target pfn from the original entry:
496 pfn
= pte_pfn(*kpte
);
497 for (i
= 0; i
< PTRS_PER_PTE
; i
++, pfn
+= pfninc
)
498 set_pte(&pbase
[i
], pfn_pte(pfn
, ref_prot
));
501 * Install the new, split up pagetable. Important details here:
503 * On Intel the NX bit of all levels must be cleared to make a
504 * page executable. See section 4.13.2 of Intel 64 and IA-32
505 * Architectures Software Developer's Manual).
507 * Mark the entry present. The current mapping might be
508 * set to not present, which we preserved above.
510 ref_prot
= pte_pgprot(pte_mkexec(pte_clrhuge(*kpte
)));
511 pgprot_val(ref_prot
) |= _PAGE_PRESENT
;
512 __set_pmd_pte(kpte
, address
, mk_pte(base
, ref_prot
));
517 * If we dropped out via the lookup_address check under
518 * pgd_lock then stick the page back into the pool:
521 list_add(&base
->lru
, &page_pool
);
525 spin_unlock_irqrestore(&pgd_lock
, flags
);
530 static int __change_page_attr(struct cpa_data
*cpa
, int primary
)
532 unsigned long address
= cpa
->vaddr
;
535 pte_t
*kpte
, old_pte
;
538 kpte
= lookup_address(address
, &level
);
540 return primary
? -EINVAL
: 0;
543 if (!pte_val(old_pte
)) {
546 printk(KERN_WARNING
"CPA: called for zero pte. "
547 "vaddr = %lx cpa->vaddr = %lx\n", address
,
553 if (level
== PG_LEVEL_4K
) {
555 pgprot_t new_prot
= pte_pgprot(old_pte
);
556 unsigned long pfn
= pte_pfn(old_pte
);
558 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
559 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
561 new_prot
= static_protections(new_prot
, address
, pfn
);
564 * We need to keep the pfn from the existing PTE,
565 * after all we're only going to change it's attributes
566 * not the memory it points to
568 new_pte
= pfn_pte(pfn
, canon_pgprot(new_prot
));
571 * Do we really change anything ?
573 if (pte_val(old_pte
) != pte_val(new_pte
)) {
574 set_pte_atomic(kpte
, new_pte
);
582 * Check, whether we can keep the large page intact
583 * and just change the pte:
585 do_split
= try_preserve_large_page(kpte
, address
, cpa
);
587 * When the range fits into the existing large page,
588 * return. cp->numpages and cpa->tlbflush have been updated in
595 * We have to split the large page:
597 err
= split_large_page(kpte
, address
);
606 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
);
608 static int cpa_process_alias(struct cpa_data
*cpa
)
610 struct cpa_data alias_cpa
;
613 if (cpa
->pfn
> max_pfn_mapped
)
617 * No need to redo, when the primary call touched the direct
620 if (!within(cpa
->vaddr
, PAGE_OFFSET
,
621 PAGE_OFFSET
+ (max_pfn_mapped
<< PAGE_SHIFT
))) {
624 alias_cpa
.vaddr
= (unsigned long) __va(cpa
->pfn
<< PAGE_SHIFT
);
626 ret
= __change_page_attr_set_clr(&alias_cpa
, 0);
633 * No need to redo, when the primary call touched the high
636 if (within(cpa
->vaddr
, (unsigned long) _text
, (unsigned long) _end
))
640 * If the physical address is inside the kernel map, we need
641 * to touch the high mapped kernel as well:
643 if (!within(cpa
->pfn
, highmap_start_pfn(), highmap_end_pfn()))
648 (cpa
->pfn
<< PAGE_SHIFT
) + __START_KERNEL_map
- phys_base
;
651 * The high mapping range is imprecise, so ignore the return value.
653 __change_page_attr_set_clr(&alias_cpa
, 0);
658 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
)
660 int ret
, numpages
= cpa
->numpages
;
664 * Store the remaining nr of pages for the large page
665 * preservation check.
667 cpa
->numpages
= numpages
;
669 ret
= __change_page_attr(cpa
, checkalias
);
674 ret
= cpa_process_alias(cpa
);
680 * Adjust the number of pages with the result of the
681 * CPA operation. Either a large page has been
682 * preserved or a single page update happened.
684 BUG_ON(cpa
->numpages
> numpages
);
685 numpages
-= cpa
->numpages
;
686 cpa
->vaddr
+= cpa
->numpages
* PAGE_SIZE
;
691 static inline int cache_attr(pgprot_t attr
)
693 return pgprot_val(attr
) &
694 (_PAGE_PAT
| _PAGE_PAT_LARGE
| _PAGE_PWT
| _PAGE_PCD
);
697 static int change_page_attr_set_clr(unsigned long addr
, int numpages
,
698 pgprot_t mask_set
, pgprot_t mask_clr
)
701 int ret
, cache
, checkalias
;
704 * Check, if we are requested to change a not supported
707 mask_set
= canon_pgprot(mask_set
);
708 mask_clr
= canon_pgprot(mask_clr
);
709 if (!pgprot_val(mask_set
) && !pgprot_val(mask_clr
))
712 /* Ensure we are PAGE_SIZE aligned */
713 if (addr
& ~PAGE_MASK
) {
716 * People should not be passing in unaligned addresses:
722 cpa
.numpages
= numpages
;
723 cpa
.mask_set
= mask_set
;
724 cpa
.mask_clr
= mask_clr
;
727 /* No alias checking for _NX bit modifications */
728 checkalias
= (pgprot_val(mask_set
) | pgprot_val(mask_clr
)) != _PAGE_NX
;
730 ret
= __change_page_attr_set_clr(&cpa
, checkalias
);
733 * Check whether we really changed something:
739 * No need to flush, when we did not set any of the caching
742 cache
= cache_attr(mask_set
);
745 * On success we use clflush, when the CPU supports it to
746 * avoid the wbindv. If the CPU does not support it and in the
747 * error case we fall back to cpa_flush_all (which uses
750 if (!ret
&& cpu_has_clflush
)
751 cpa_flush_range(addr
, numpages
, cache
);
753 cpa_flush_all(cache
);
761 static inline int change_page_attr_set(unsigned long addr
, int numpages
,
764 return change_page_attr_set_clr(addr
, numpages
, mask
, __pgprot(0));
767 static inline int change_page_attr_clear(unsigned long addr
, int numpages
,
770 return change_page_attr_set_clr(addr
, numpages
, __pgprot(0), mask
);
773 int set_memory_uc(unsigned long addr
, int numpages
)
775 return change_page_attr_set(addr
, numpages
,
776 __pgprot(_PAGE_CACHE_UC
));
778 EXPORT_SYMBOL(set_memory_uc
);
780 int set_memory_wb(unsigned long addr
, int numpages
)
782 return change_page_attr_clear(addr
, numpages
,
783 __pgprot(_PAGE_CACHE_MASK
));
785 EXPORT_SYMBOL(set_memory_wb
);
787 int set_memory_x(unsigned long addr
, int numpages
)
789 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_NX
));
791 EXPORT_SYMBOL(set_memory_x
);
793 int set_memory_nx(unsigned long addr
, int numpages
)
795 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_NX
));
797 EXPORT_SYMBOL(set_memory_nx
);
799 int set_memory_ro(unsigned long addr
, int numpages
)
801 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_RW
));
804 int set_memory_rw(unsigned long addr
, int numpages
)
806 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_RW
));
809 int set_memory_np(unsigned long addr
, int numpages
)
811 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_PRESENT
));
814 int set_pages_uc(struct page
*page
, int numpages
)
816 unsigned long addr
= (unsigned long)page_address(page
);
818 return set_memory_uc(addr
, numpages
);
820 EXPORT_SYMBOL(set_pages_uc
);
822 int set_pages_wb(struct page
*page
, int numpages
)
824 unsigned long addr
= (unsigned long)page_address(page
);
826 return set_memory_wb(addr
, numpages
);
828 EXPORT_SYMBOL(set_pages_wb
);
830 int set_pages_x(struct page
*page
, int numpages
)
832 unsigned long addr
= (unsigned long)page_address(page
);
834 return set_memory_x(addr
, numpages
);
836 EXPORT_SYMBOL(set_pages_x
);
838 int set_pages_nx(struct page
*page
, int numpages
)
840 unsigned long addr
= (unsigned long)page_address(page
);
842 return set_memory_nx(addr
, numpages
);
844 EXPORT_SYMBOL(set_pages_nx
);
846 int set_pages_ro(struct page
*page
, int numpages
)
848 unsigned long addr
= (unsigned long)page_address(page
);
850 return set_memory_ro(addr
, numpages
);
853 int set_pages_rw(struct page
*page
, int numpages
)
855 unsigned long addr
= (unsigned long)page_address(page
);
857 return set_memory_rw(addr
, numpages
);
860 #ifdef CONFIG_DEBUG_PAGEALLOC
862 static int __set_pages_p(struct page
*page
, int numpages
)
864 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
865 .numpages
= numpages
,
866 .mask_set
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
),
867 .mask_clr
= __pgprot(0)};
869 return __change_page_attr_set_clr(&cpa
, 1);
872 static int __set_pages_np(struct page
*page
, int numpages
)
874 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
875 .numpages
= numpages
,
876 .mask_set
= __pgprot(0),
877 .mask_clr
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
)};
879 return __change_page_attr_set_clr(&cpa
, 1);
882 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
884 if (PageHighMem(page
))
887 debug_check_no_locks_freed(page_address(page
),
888 numpages
* PAGE_SIZE
);
892 * If page allocator is not up yet then do not call c_p_a():
894 if (!debug_pagealloc_enabled
)
898 * The return value is ignored as the calls cannot fail.
899 * Large pages are kept enabled at boot time, and are
900 * split up quickly with DEBUG_PAGEALLOC. If a splitup
901 * fails here (due to temporary memory shortage) no damage
902 * is done because we just keep the largepage intact up
903 * to the next attempt when it will likely be split up:
906 __set_pages_p(page
, numpages
);
908 __set_pages_np(page
, numpages
);
911 * We should perform an IPI and flush all tlbs,
912 * but that can deadlock->flush only current cpu:
917 * Try to refill the page pool here. We can do this only after
923 #ifdef CONFIG_DEBUG_FS
924 static int dpa_show(struct seq_file
*m
, void *v
)
926 seq_puts(m
, "DEBUG_PAGEALLOC\n");
927 seq_printf(m
, "pool_size : %lu\n", pool_size
);
928 seq_printf(m
, "pool_pages : %lu\n", pool_pages
);
929 seq_printf(m
, "pool_low : %lu\n", pool_low
);
930 seq_printf(m
, "pool_used : %lu\n", pool_used
);
931 seq_printf(m
, "pool_failed : %lu\n", pool_failed
);
936 static int dpa_open(struct inode
*inode
, struct file
*filp
)
938 return single_open(filp
, dpa_show
, NULL
);
941 static const struct file_operations dpa_fops
= {
945 .release
= single_release
,
948 int __init
debug_pagealloc_proc_init(void)
952 de
= debugfs_create_file("debug_pagealloc", 0600, NULL
, NULL
,
959 __initcall(debug_pagealloc_proc_init
);
962 #ifdef CONFIG_HIBERNATION
964 bool kernel_page_present(struct page
*page
)
969 if (PageHighMem(page
))
972 pte
= lookup_address((unsigned long)page_address(page
), &level
);
973 return (pte_val(*pte
) & _PAGE_PRESENT
);
976 #endif /* CONFIG_HIBERNATION */
978 #endif /* CONFIG_DEBUG_PAGEALLOC */
981 * The testcases use internal knowledge of the implementation that shouldn't
982 * be exposed to the rest of the kernel. Include these directly here.
984 #ifdef CONFIG_CPA_DEBUG
985 #include "pageattr-test.c"