Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * NUMA memory policies for Linux. | |
3 | * Copyright 2003,2004 Andi Kleen SuSE Labs | |
4 | */ | |
607ca46e DH |
5 | #ifndef _LINUX_MEMPOLICY_H |
6 | #define _LINUX_MEMPOLICY_H 1 | |
1da177e4 | 7 | |
1da177e4 | 8 | |
1da177e4 | 9 | #include <linux/mmzone.h> |
1da177e4 LT |
10 | #include <linux/slab.h> |
11 | #include <linux/rbtree.h> | |
12 | #include <linux/spinlock.h> | |
dfcd3c0d | 13 | #include <linux/nodemask.h> |
83d1674a | 14 | #include <linux/pagemap.h> |
607ca46e | 15 | #include <uapi/linux/mempolicy.h> |
1da177e4 | 16 | |
45b35a5c | 17 | struct mm_struct; |
1da177e4 LT |
18 | |
19 | #ifdef CONFIG_NUMA | |
20 | ||
21 | /* | |
22 | * Describe a memory policy. | |
23 | * | |
24 | * A mempolicy can be either associated with a process or with a VMA. | |
25 | * For VMA related allocations the VMA policy is preferred, otherwise | |
26 | * the process policy is used. Interrupts ignore the memory policy | |
27 | * of the current process. | |
28 | * | |
29 | * Locking policy for interlave: | |
30 | * In process context there is no locking because only the process accesses | |
31 | * its own state. All vma manipulation is somewhat protected by a down_read on | |
b8072f09 | 32 | * mmap_sem. |
1da177e4 LT |
33 | * |
34 | * Freeing policy: | |
19770b32 | 35 | * Mempolicy objects are reference counted. A mempolicy will be freed when |
f0be3d32 | 36 | * mpol_put() decrements the reference count to zero. |
1da177e4 | 37 | * |
846a16bf LS |
38 | * Duplicating policy objects: |
39 | * mpol_dup() allocates a new mempolicy and copies the specified mempolicy | |
19770b32 | 40 | * to the new storage. The reference count of the new object is initialized |
846a16bf | 41 | * to 1, representing the caller of mpol_dup(). |
1da177e4 LT |
42 | */ |
43 | struct mempolicy { | |
44 | atomic_t refcnt; | |
45c4745a | 45 | unsigned short mode; /* See MPOL_* above */ |
028fec41 | 46 | unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ |
1da177e4 | 47 | union { |
1da177e4 | 48 | short preferred_node; /* preferred */ |
19770b32 | 49 | nodemask_t nodes; /* interleave/bind */ |
1da177e4 LT |
50 | /* undefined for default */ |
51 | } v; | |
f5b087b5 DR |
52 | union { |
53 | nodemask_t cpuset_mems_allowed; /* relative to these nodes */ | |
54 | nodemask_t user_nodemask; /* nodemask passed by user */ | |
55 | } w; | |
1da177e4 LT |
56 | }; |
57 | ||
58 | /* | |
59 | * Support for managing mempolicy data objects (clone, copy, destroy) | |
60 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. | |
61 | */ | |
62 | ||
f0be3d32 LS |
63 | extern void __mpol_put(struct mempolicy *pol); |
64 | static inline void mpol_put(struct mempolicy *pol) | |
1da177e4 LT |
65 | { |
66 | if (pol) | |
f0be3d32 | 67 | __mpol_put(pol); |
1da177e4 LT |
68 | } |
69 | ||
52cd3b07 LS |
70 | /* |
71 | * Does mempolicy pol need explicit unref after use? | |
72 | * Currently only needed for shared policies. | |
73 | */ | |
74 | static inline int mpol_needs_cond_ref(struct mempolicy *pol) | |
75 | { | |
76 | return (pol && (pol->flags & MPOL_F_SHARED)); | |
77 | } | |
78 | ||
79 | static inline void mpol_cond_put(struct mempolicy *pol) | |
80 | { | |
81 | if (mpol_needs_cond_ref(pol)) | |
82 | __mpol_put(pol); | |
83 | } | |
84 | ||
85 | extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, | |
86 | struct mempolicy *frompol); | |
87 | static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, | |
88 | struct mempolicy *frompol) | |
89 | { | |
90 | if (!frompol) | |
91 | return frompol; | |
92 | return __mpol_cond_copy(tompol, frompol); | |
93 | } | |
94 | ||
846a16bf LS |
95 | extern struct mempolicy *__mpol_dup(struct mempolicy *pol); |
96 | static inline struct mempolicy *mpol_dup(struct mempolicy *pol) | |
1da177e4 LT |
97 | { |
98 | if (pol) | |
846a16bf | 99 | pol = __mpol_dup(pol); |
1da177e4 LT |
100 | return pol; |
101 | } | |
102 | ||
103 | #define vma_policy(vma) ((vma)->vm_policy) | |
104 | #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) | |
105 | ||
106 | static inline void mpol_get(struct mempolicy *pol) | |
107 | { | |
108 | if (pol) | |
109 | atomic_inc(&pol->refcnt); | |
110 | } | |
111 | ||
fcfb4dcc KM |
112 | extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); |
113 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) | |
1da177e4 LT |
114 | { |
115 | if (a == b) | |
fcfb4dcc | 116 | return true; |
1da177e4 LT |
117 | return __mpol_equal(a, b); |
118 | } | |
1da177e4 | 119 | |
1da177e4 LT |
120 | /* |
121 | * Tree of shared policies for a shared memory region. | |
122 | * Maintain the policies in a pseudo mm that contains vmas. The vmas | |
123 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not | |
124 | * bytes, so that we can work with shared memory segments bigger than | |
125 | * unsigned long. | |
126 | */ | |
127 | ||
128 | struct sp_node { | |
129 | struct rb_node nd; | |
130 | unsigned long start, end; | |
131 | struct mempolicy *policy; | |
132 | }; | |
133 | ||
134 | struct shared_policy { | |
135 | struct rb_root root; | |
b22d127a | 136 | struct mutex mutex; |
1da177e4 LT |
137 | }; |
138 | ||
71fe804b | 139 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
1da177e4 LT |
140 | int mpol_set_shared_policy(struct shared_policy *info, |
141 | struct vm_area_struct *vma, | |
142 | struct mempolicy *new); | |
143 | void mpol_free_shared_policy(struct shared_policy *p); | |
144 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, | |
145 | unsigned long idx); | |
146 | ||
d98f6cb6 SW |
147 | struct mempolicy *get_vma_policy(struct task_struct *tsk, |
148 | struct vm_area_struct *vma, unsigned long addr); | |
149 | ||
1da177e4 LT |
150 | extern void numa_default_policy(void); |
151 | extern void numa_policy_init(void); | |
708c1bbc MX |
152 | extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, |
153 | enum mpol_rebind_step step); | |
4225399a | 154 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
c61afb18 | 155 | extern void mpol_fix_fork_child_flag(struct task_struct *p); |
4225399a | 156 | |
5da7ca86 | 157 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
19770b32 MG |
158 | unsigned long addr, gfp_t gfp_flags, |
159 | struct mempolicy **mpol, nodemask_t **nodemask); | |
06808b08 | 160 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
6f48d0eb DR |
161 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, |
162 | const nodemask_t *mask); | |
e7b691b0 | 163 | extern unsigned slab_node(void); |
1da177e4 | 164 | |
2f6726e5 | 165 | extern enum zone_type policy_zone; |
4be38e35 | 166 | |
2f6726e5 | 167 | static inline void check_highest_zone(enum zone_type k) |
4be38e35 | 168 | { |
b377fd39 | 169 | if (k > policy_zone && k != ZONE_MOVABLE) |
4be38e35 CL |
170 | policy_zone = k; |
171 | } | |
172 | ||
0ce72d4f AM |
173 | int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
174 | const nodemask_t *to, int flags); | |
39743889 | 175 | |
095f1fc4 LS |
176 | |
177 | #ifdef CONFIG_TMPFS | |
71fe804b | 178 | extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); |
13057efb | 179 | #endif |
095f1fc4 | 180 | |
71fe804b LS |
181 | extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, |
182 | int no_context); | |
83d1674a GS |
183 | |
184 | /* Check if a vma is migratable */ | |
185 | static inline int vma_migratable(struct vm_area_struct *vma) | |
186 | { | |
314e51b9 | 187 | if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP)) |
83d1674a GS |
188 | return 0; |
189 | /* | |
190 | * Migration allocates pages in the highest zone. If we cannot | |
191 | * do so then migration (at least from node to node) is not | |
192 | * possible. | |
193 | */ | |
194 | if (vma->vm_file && | |
195 | gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) | |
196 | < policy_zone) | |
197 | return 0; | |
198 | return 1; | |
199 | } | |
200 | ||
1da177e4 LT |
201 | #else |
202 | ||
203 | struct mempolicy {}; | |
204 | ||
fcfb4dcc | 205 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
1da177e4 | 206 | { |
fcfb4dcc | 207 | return true; |
1da177e4 | 208 | } |
1da177e4 | 209 | |
f0be3d32 | 210 | static inline void mpol_put(struct mempolicy *p) |
1da177e4 LT |
211 | { |
212 | } | |
213 | ||
52cd3b07 LS |
214 | static inline void mpol_cond_put(struct mempolicy *pol) |
215 | { | |
216 | } | |
217 | ||
218 | static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, | |
219 | struct mempolicy *from) | |
220 | { | |
221 | return from; | |
222 | } | |
223 | ||
1da177e4 LT |
224 | static inline void mpol_get(struct mempolicy *pol) |
225 | { | |
226 | } | |
227 | ||
846a16bf | 228 | static inline struct mempolicy *mpol_dup(struct mempolicy *old) |
1da177e4 LT |
229 | { |
230 | return NULL; | |
231 | } | |
232 | ||
1da177e4 LT |
233 | struct shared_policy {}; |
234 | ||
235 | static inline int mpol_set_shared_policy(struct shared_policy *info, | |
236 | struct vm_area_struct *vma, | |
237 | struct mempolicy *new) | |
238 | { | |
239 | return -EINVAL; | |
240 | } | |
241 | ||
71fe804b LS |
242 | static inline void mpol_shared_policy_init(struct shared_policy *sp, |
243 | struct mempolicy *mpol) | |
1da177e4 LT |
244 | { |
245 | } | |
246 | ||
247 | static inline void mpol_free_shared_policy(struct shared_policy *p) | |
248 | { | |
249 | } | |
250 | ||
251 | static inline struct mempolicy * | |
252 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) | |
253 | { | |
254 | return NULL; | |
255 | } | |
256 | ||
257 | #define vma_policy(vma) NULL | |
258 | #define vma_set_policy(vma, pol) do {} while(0) | |
259 | ||
260 | static inline void numa_policy_init(void) | |
261 | { | |
262 | } | |
263 | ||
264 | static inline void numa_default_policy(void) | |
265 | { | |
266 | } | |
267 | ||
74cb2155 | 268 | static inline void mpol_rebind_task(struct task_struct *tsk, |
708c1bbc MX |
269 | const nodemask_t *new, |
270 | enum mpol_rebind_step step) | |
68860ec1 PJ |
271 | { |
272 | } | |
273 | ||
4225399a PJ |
274 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) |
275 | { | |
276 | } | |
277 | ||
c61afb18 PJ |
278 | static inline void mpol_fix_fork_child_flag(struct task_struct *p) |
279 | { | |
280 | } | |
281 | ||
5da7ca86 | 282 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
19770b32 MG |
283 | unsigned long addr, gfp_t gfp_flags, |
284 | struct mempolicy **mpol, nodemask_t **nodemask) | |
5da7ca86 | 285 | { |
19770b32 MG |
286 | *mpol = NULL; |
287 | *nodemask = NULL; | |
0e88460d | 288 | return node_zonelist(0, gfp_flags); |
5da7ca86 CL |
289 | } |
290 | ||
6f48d0eb DR |
291 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) |
292 | { | |
293 | return false; | |
294 | } | |
295 | ||
296 | static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk, | |
297 | const nodemask_t *mask) | |
298 | { | |
299 | return false; | |
300 | } | |
06808b08 | 301 | |
0ce72d4f AM |
302 | static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
303 | const nodemask_t *to, int flags) | |
45b07ef3 PJ |
304 | { |
305 | return 0; | |
306 | } | |
307 | ||
4be38e35 CL |
308 | static inline void check_highest_zone(int k) |
309 | { | |
310 | } | |
095f1fc4 LS |
311 | |
312 | #ifdef CONFIG_TMPFS | |
71fe804b LS |
313 | static inline int mpol_parse_str(char *str, struct mempolicy **mpol, |
314 | int no_context) | |
095f1fc4 | 315 | { |
71fe804b | 316 | return 1; /* error */ |
095f1fc4 | 317 | } |
13057efb | 318 | #endif |
095f1fc4 | 319 | |
71fe804b LS |
320 | static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, |
321 | int no_context) | |
095f1fc4 LS |
322 | { |
323 | return 0; | |
324 | } | |
095f1fc4 | 325 | |
1da177e4 | 326 | #endif /* CONFIG_NUMA */ |
1da177e4 | 327 | #endif |