Add __GFP_MOVABLE for callers to flag allocations from high memory that may be migrated
[deliverable/linux.git] / include / asm-s390 / page.h
CommitLineData
1da177e4
LT
1/*
2 * include/asm-s390/page.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 */
8
9#ifndef _S390_PAGE_H
10#define _S390_PAGE_H
11
1da177e4
LT
12#include <asm/types.h>
13
14/* PAGE_SHIFT determines the page size */
15#define PAGE_SHIFT 12
16#define PAGE_SIZE (1UL << PAGE_SHIFT)
17#define PAGE_MASK (~(PAGE_SIZE-1))
0b642ede
PO
18#define PAGE_DEFAULT_ACC 0
19#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
1da177e4
LT
20
21#ifdef __KERNEL__
274f5946 22#include <asm/setup.h>
1da177e4
LT
23#ifndef __ASSEMBLY__
24
1da177e4
LT
25static inline void clear_page(void *page)
26{
94c12cc7
MS
27 register unsigned long reg1 asm ("1") = 0;
28 register void *reg2 asm ("2") = page;
29 register unsigned long reg3 asm ("3") = 4096;
30 asm volatile(
31 " mvcl 2,0"
32 : "+d" (reg2), "+d" (reg3) : "d" (reg1) : "memory", "cc");
1da177e4
LT
33}
34
35static inline void copy_page(void *to, void *from)
36{
94c12cc7
MS
37 if (MACHINE_HAS_MVPG) {
38 register unsigned long reg0 asm ("0") = 0;
39 asm volatile(
40 " mvpg %0,%1"
41 : : "a" (to), "a" (from), "d" (reg0)
42 : "memory", "cc");
43 } else
44 asm volatile(
45 " mvc 0(256,%0),0(%1)\n"
46 " mvc 256(256,%0),256(%1)\n"
47 " mvc 512(256,%0),512(%1)\n"
48 " mvc 768(256,%0),768(%1)\n"
49 " mvc 1024(256,%0),1024(%1)\n"
50 " mvc 1280(256,%0),1280(%1)\n"
51 " mvc 1536(256,%0),1536(%1)\n"
52 " mvc 1792(256,%0),1792(%1)\n"
53 " mvc 2048(256,%0),2048(%1)\n"
54 " mvc 2304(256,%0),2304(%1)\n"
55 " mvc 2560(256,%0),2560(%1)\n"
56 " mvc 2816(256,%0),2816(%1)\n"
57 " mvc 3072(256,%0),3072(%1)\n"
58 " mvc 3328(256,%0),3328(%1)\n"
59 " mvc 3584(256,%0),3584(%1)\n"
60 " mvc 3840(256,%0),3840(%1)\n"
61 : : "a" (to), "a" (from) : "memory");
1da177e4
LT
62}
63
1da177e4
LT
64#define clear_user_page(page, vaddr, pg) clear_page(page)
65#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
66
769848c0
MG
67#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
68 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
1da177e4
LT
69#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
70
1da177e4
LT
71/*
72 * These are used to make use of C type-checking..
73 */
74
75typedef struct { unsigned long pgprot; } pgprot_t;
76typedef struct { unsigned long pte; } pte_t;
77
78#define pte_val(x) ((x).pte)
79#define pgprot_val(x) ((x).pgprot)
80
81#ifndef __s390x__
82
83typedef struct { unsigned long pmd; } pmd_t;
84typedef struct {
85 unsigned long pgd0;
86 unsigned long pgd1;
87 unsigned long pgd2;
88 unsigned long pgd3;
89 } pgd_t;
90
91#define pmd_val(x) ((x).pmd)
92#define pgd_val(x) ((x).pgd0)
93
94#else /* __s390x__ */
95
96typedef struct {
97 unsigned long pmd0;
98 unsigned long pmd1;
99 } pmd_t;
100typedef struct { unsigned long pgd; } pgd_t;
101
102#define pmd_val(x) ((x).pmd0)
103#define pmd_val1(x) ((x).pmd1)
104#define pgd_val(x) ((x).pgd)
105
106#endif /* __s390x__ */
107
108#define __pte(x) ((pte_t) { (x) } )
109#define __pmd(x) ((pmd_t) { (x) } )
110#define __pgd(x) ((pgd_t) { (x) } )
111#define __pgprot(x) ((pgprot_t) { (x) } )
112
113/* default storage key used for all pages */
114extern unsigned int default_storage_key;
115
116static inline void
117page_set_storage_key(unsigned long addr, unsigned int skey)
118{
94c12cc7 119 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
1da177e4
LT
120}
121
122static inline unsigned int
123page_get_storage_key(unsigned long addr)
124{
125 unsigned int skey;
126
94c12cc7 127 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0));
1da177e4
LT
128 return skey;
129}
130
f4eb07c1
HC
131extern unsigned long max_pfn;
132
133static inline int pfn_valid(unsigned long pfn)
134{
135 unsigned long dummy;
136 int ccode;
137
138 if (pfn >= max_pfn)
139 return 0;
140
141 asm volatile(
142 " lra %0,0(%2)\n"
143 " ipm %1\n"
144 " srl %1,28\n"
145 : "=d" (dummy), "=d" (ccode)
146 : "a" (pfn << PAGE_SHIFT)
147 : "cc");
148 return !ccode;
149}
150
1da177e4
LT
151#endif /* !__ASSEMBLY__ */
152
153/* to align the pointer to the (next) page boundary */
154#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
155
156#define __PAGE_OFFSET 0x0UL
157#define PAGE_OFFSET 0x0UL
158#define __pa(x) (unsigned long)(x)
159#define __va(x) (void *)(unsigned long)(x)
1da177e4 160#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
0b2b6e1d 161#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
1da177e4
LT
162#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
163
164#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
165 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
166
aed63043 167#include <asm-generic/memory_model.h>
fd4fd5aa
SR
168#include <asm-generic/page.h>
169
274f5946
DW
170#endif /* __KERNEL__ */
171
1da177e4 172#endif /* _S390_PAGE_H */
This page took 0.209834 seconds and 5 git commands to generate.