Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_MMZONE_H |
2 | #define _LINUX_MMZONE_H | |
3 | ||
4 | #ifdef __KERNEL__ | |
5 | #ifndef __ASSEMBLY__ | |
6 | ||
7 | #include <linux/config.h> | |
8 | #include <linux/spinlock.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/wait.h> | |
11 | #include <linux/cache.h> | |
12 | #include <linux/threads.h> | |
13 | #include <linux/numa.h> | |
14 | #include <linux/init.h> | |
15 | #include <asm/atomic.h> | |
16 | ||
17 | /* Free memory management - zoned buddy allocator. */ | |
18 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | |
19 | #define MAX_ORDER 11 | |
20 | #else | |
21 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | |
22 | #endif | |
23 | ||
24 | struct free_area { | |
25 | struct list_head free_list; | |
26 | unsigned long nr_free; | |
27 | }; | |
28 | ||
29 | struct pglist_data; | |
30 | ||
31 | /* | |
32 | * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. | |
33 | * So add a wild amount of padding here to ensure that they fall into separate | |
34 | * cachelines. There are very few zone structures in the machine, so space | |
35 | * consumption is not a concern here. | |
36 | */ | |
37 | #if defined(CONFIG_SMP) | |
38 | struct zone_padding { | |
39 | char x[0]; | |
40 | } ____cacheline_maxaligned_in_smp; | |
41 | #define ZONE_PADDING(name) struct zone_padding name; | |
42 | #else | |
43 | #define ZONE_PADDING(name) | |
44 | #endif | |
45 | ||
46 | struct per_cpu_pages { | |
47 | int count; /* number of pages in the list */ | |
48 | int low; /* low watermark, refill needed */ | |
49 | int high; /* high watermark, emptying needed */ | |
50 | int batch; /* chunk size for buddy add/remove */ | |
51 | struct list_head list; /* the list of pages */ | |
52 | }; | |
53 | ||
54 | struct per_cpu_pageset { | |
55 | struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ | |
56 | #ifdef CONFIG_NUMA | |
57 | unsigned long numa_hit; /* allocated in intended node */ | |
58 | unsigned long numa_miss; /* allocated in non intended node */ | |
59 | unsigned long numa_foreign; /* was intended here, hit elsewhere */ | |
60 | unsigned long interleave_hit; /* interleaver prefered this zone */ | |
61 | unsigned long local_node; /* allocation from local node */ | |
62 | unsigned long other_node; /* allocation from other node */ | |
63 | #endif | |
64 | } ____cacheline_aligned_in_smp; | |
65 | ||
66 | #define ZONE_DMA 0 | |
67 | #define ZONE_NORMAL 1 | |
68 | #define ZONE_HIGHMEM 2 | |
69 | ||
70 | #define MAX_NR_ZONES 3 /* Sync this with ZONES_SHIFT */ | |
71 | #define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ | |
72 | ||
73 | ||
74 | /* | |
75 | * When a memory allocation must conform to specific limitations (such | |
76 | * as being suitable for DMA) the caller will pass in hints to the | |
77 | * allocator in the gfp_mask, in the zone modifier bits. These bits | |
78 | * are used to select a priority ordered list of memory zones which | |
79 | * match the requested limits. GFP_ZONEMASK defines which bits within | |
80 | * the gfp_mask should be considered as zone modifiers. Each valid | |
81 | * combination of the zone modifier bits has a corresponding list | |
82 | * of zones (in node_zonelists). Thus for two zone modifiers there | |
83 | * will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will | |
84 | * be 8 (2 ** 3) zonelists. GFP_ZONETYPES defines the number of possible | |
85 | * combinations of zone modifiers in "zone modifier space". | |
86 | */ | |
87 | #define GFP_ZONEMASK 0x03 | |
88 | /* | |
89 | * As an optimisation any zone modifier bits which are only valid when | |
90 | * no other zone modifier bits are set (loners) should be placed in | |
91 | * the highest order bits of this field. This allows us to reduce the | |
92 | * extent of the zonelists thus saving space. For example in the case | |
93 | * of three zone modifier bits, we could require up to eight zonelists. | |
94 | * If the left most zone modifier is a "loner" then the highest valid | |
95 | * zonelist would be four allowing us to allocate only five zonelists. | |
96 | * Use the first form when the left most bit is not a "loner", otherwise | |
97 | * use the second. | |
98 | */ | |
99 | /* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */ | |
100 | #define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */ | |
101 | ||
102 | /* | |
103 | * On machines where it is needed (eg PCs) we divide physical memory | |
104 | * into multiple physical zones. On a PC we have 3 zones: | |
105 | * | |
106 | * ZONE_DMA < 16 MB ISA DMA capable memory | |
107 | * ZONE_NORMAL 16-896 MB direct mapped by the kernel | |
108 | * ZONE_HIGHMEM > 896 MB only page cache and user processes | |
109 | */ | |
110 | ||
111 | struct zone { | |
112 | /* Fields commonly accessed by the page allocator */ | |
113 | unsigned long free_pages; | |
114 | unsigned long pages_min, pages_low, pages_high; | |
115 | /* | |
116 | * We don't know if the memory that we're going to allocate will be freeable | |
117 | * or/and it will be released eventually, so to avoid totally wasting several | |
118 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | |
119 | * to run OOM on the lower zones despite there's tons of freeable ram | |
120 | * on the higher zones). This array is recalculated at runtime if the | |
121 | * sysctl_lowmem_reserve_ratio sysctl changes. | |
122 | */ | |
123 | unsigned long lowmem_reserve[MAX_NR_ZONES]; | |
124 | ||
125 | struct per_cpu_pageset pageset[NR_CPUS]; | |
126 | ||
127 | /* | |
128 | * free areas of different sizes | |
129 | */ | |
130 | spinlock_t lock; | |
131 | struct free_area free_area[MAX_ORDER]; | |
132 | ||
133 | ||
134 | ZONE_PADDING(_pad1_) | |
135 | ||
136 | /* Fields commonly accessed by the page reclaim scanner */ | |
137 | spinlock_t lru_lock; | |
138 | struct list_head active_list; | |
139 | struct list_head inactive_list; | |
140 | unsigned long nr_scan_active; | |
141 | unsigned long nr_scan_inactive; | |
142 | unsigned long nr_active; | |
143 | unsigned long nr_inactive; | |
144 | unsigned long pages_scanned; /* since last reclaim */ | |
145 | int all_unreclaimable; /* All pages pinned */ | |
146 | ||
147 | /* | |
148 | * prev_priority holds the scanning priority for this zone. It is | |
149 | * defined as the scanning priority at which we achieved our reclaim | |
150 | * target at the previous try_to_free_pages() or balance_pgdat() | |
151 | * invokation. | |
152 | * | |
153 | * We use prev_priority as a measure of how much stress page reclaim is | |
154 | * under - it drives the swappiness decision: whether to unmap mapped | |
155 | * pages. | |
156 | * | |
157 | * temp_priority is used to remember the scanning priority at which | |
158 | * this zone was successfully refilled to free_pages == pages_high. | |
159 | * | |
160 | * Access to both these fields is quite racy even on uniprocessor. But | |
161 | * it is expected to average out OK. | |
162 | */ | |
163 | int temp_priority; | |
164 | int prev_priority; | |
165 | ||
166 | ||
167 | ZONE_PADDING(_pad2_) | |
168 | /* Rarely used or read-mostly fields */ | |
169 | ||
170 | /* | |
171 | * wait_table -- the array holding the hash table | |
172 | * wait_table_size -- the size of the hash table array | |
173 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) | |
174 | * | |
175 | * The purpose of all these is to keep track of the people | |
176 | * waiting for a page to become available and make them | |
177 | * runnable again when possible. The trouble is that this | |
178 | * consumes a lot of space, especially when so few things | |
179 | * wait on pages at a given time. So instead of using | |
180 | * per-page waitqueues, we use a waitqueue hash table. | |
181 | * | |
182 | * The bucket discipline is to sleep on the same queue when | |
183 | * colliding and wake all in that wait queue when removing. | |
184 | * When something wakes, it must check to be sure its page is | |
185 | * truly available, a la thundering herd. The cost of a | |
186 | * collision is great, but given the expected load of the | |
187 | * table, they should be so rare as to be outweighed by the | |
188 | * benefits from the saved space. | |
189 | * | |
190 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | |
191 | * primary users of these fields, and in mm/page_alloc.c | |
192 | * free_area_init_core() performs the initialization of them. | |
193 | */ | |
194 | wait_queue_head_t * wait_table; | |
195 | unsigned long wait_table_size; | |
196 | unsigned long wait_table_bits; | |
197 | ||
198 | /* | |
199 | * Discontig memory support fields. | |
200 | */ | |
201 | struct pglist_data *zone_pgdat; | |
202 | struct page *zone_mem_map; | |
203 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | |
204 | unsigned long zone_start_pfn; | |
205 | ||
206 | unsigned long spanned_pages; /* total size, including holes */ | |
207 | unsigned long present_pages; /* amount of memory (excluding holes) */ | |
208 | ||
209 | /* | |
210 | * rarely used fields: | |
211 | */ | |
212 | char *name; | |
213 | } ____cacheline_maxaligned_in_smp; | |
214 | ||
215 | ||
216 | /* | |
217 | * The "priority" of VM scanning is how much of the queues we will scan in one | |
218 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | |
219 | * queues ("queue_length >> 12") during an aging round. | |
220 | */ | |
221 | #define DEF_PRIORITY 12 | |
222 | ||
223 | /* | |
224 | * One allocation request operates on a zonelist. A zonelist | |
225 | * is a list of zones, the first one is the 'goal' of the | |
226 | * allocation, the other zones are fallback zones, in decreasing | |
227 | * priority. | |
228 | * | |
229 | * Right now a zonelist takes up less than a cacheline. We never | |
230 | * modify it apart from boot-up, and only a few indices are used, | |
231 | * so despite the zonelist table being relatively big, the cache | |
232 | * footprint of this construct is very small. | |
233 | */ | |
234 | struct zonelist { | |
235 | struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited | |
236 | }; | |
237 | ||
238 | ||
239 | /* | |
240 | * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM | |
241 | * (mostly NUMA machines?) to denote a higher-level memory zone than the | |
242 | * zone denotes. | |
243 | * | |
244 | * On NUMA machines, each NUMA node would have a pg_data_t to describe | |
245 | * it's memory layout. | |
246 | * | |
247 | * Memory statistics and page replacement data structures are maintained on a | |
248 | * per-zone basis. | |
249 | */ | |
250 | struct bootmem_data; | |
251 | typedef struct pglist_data { | |
252 | struct zone node_zones[MAX_NR_ZONES]; | |
253 | struct zonelist node_zonelists[GFP_ZONETYPES]; | |
254 | int nr_zones; | |
255 | struct page *node_mem_map; | |
256 | struct bootmem_data *bdata; | |
257 | unsigned long node_start_pfn; | |
258 | unsigned long node_present_pages; /* total number of physical pages */ | |
259 | unsigned long node_spanned_pages; /* total size of physical page | |
260 | range, including holes */ | |
261 | int node_id; | |
262 | struct pglist_data *pgdat_next; | |
263 | wait_queue_head_t kswapd_wait; | |
264 | struct task_struct *kswapd; | |
265 | int kswapd_max_order; | |
266 | } pg_data_t; | |
267 | ||
268 | #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) | |
269 | #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) | |
270 | ||
271 | extern struct pglist_data *pgdat_list; | |
272 | ||
273 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, | |
274 | unsigned long *free, struct pglist_data *pgdat); | |
275 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | |
276 | unsigned long *free); | |
277 | void build_all_zonelists(void); | |
278 | void wakeup_kswapd(struct zone *zone, int order); | |
279 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |
280 | int alloc_type, int can_try_harder, int gfp_high); | |
281 | ||
282 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | |
283 | void memory_present(int nid, unsigned long start, unsigned long end); | |
284 | #else | |
285 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | |
286 | #endif | |
287 | ||
288 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | |
289 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |
290 | #endif | |
291 | ||
292 | /* | |
293 | * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. | |
294 | */ | |
295 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) | |
296 | ||
297 | /** | |
298 | * for_each_pgdat - helper macro to iterate over all nodes | |
299 | * @pgdat - pointer to a pg_data_t variable | |
300 | * | |
301 | * Meant to help with common loops of the form | |
302 | * pgdat = pgdat_list; | |
303 | * while(pgdat) { | |
304 | * ... | |
305 | * pgdat = pgdat->pgdat_next; | |
306 | * } | |
307 | */ | |
308 | #define for_each_pgdat(pgdat) \ | |
309 | for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next) | |
310 | ||
311 | /* | |
312 | * next_zone - helper magic for for_each_zone() | |
313 | * Thanks to William Lee Irwin III for this piece of ingenuity. | |
314 | */ | |
315 | static inline struct zone *next_zone(struct zone *zone) | |
316 | { | |
317 | pg_data_t *pgdat = zone->zone_pgdat; | |
318 | ||
319 | if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) | |
320 | zone++; | |
321 | else if (pgdat->pgdat_next) { | |
322 | pgdat = pgdat->pgdat_next; | |
323 | zone = pgdat->node_zones; | |
324 | } else | |
325 | zone = NULL; | |
326 | ||
327 | return zone; | |
328 | } | |
329 | ||
330 | /** | |
331 | * for_each_zone - helper macro to iterate over all memory zones | |
332 | * @zone - pointer to struct zone variable | |
333 | * | |
334 | * The user only needs to declare the zone variable, for_each_zone | |
335 | * fills it in. This basically means for_each_zone() is an | |
336 | * easier to read version of this piece of code: | |
337 | * | |
338 | * for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next) | |
339 | * for (i = 0; i < MAX_NR_ZONES; ++i) { | |
340 | * struct zone * z = pgdat->node_zones + i; | |
341 | * ... | |
342 | * } | |
343 | * } | |
344 | */ | |
345 | #define for_each_zone(zone) \ | |
346 | for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone)) | |
347 | ||
348 | static inline int is_highmem_idx(int idx) | |
349 | { | |
350 | return (idx == ZONE_HIGHMEM); | |
351 | } | |
352 | ||
353 | static inline int is_normal_idx(int idx) | |
354 | { | |
355 | return (idx == ZONE_NORMAL); | |
356 | } | |
357 | /** | |
358 | * is_highmem - helper function to quickly check if a struct zone is a | |
359 | * highmem zone or not. This is an attempt to keep references | |
360 | * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. | |
361 | * @zone - pointer to struct zone variable | |
362 | */ | |
363 | static inline int is_highmem(struct zone *zone) | |
364 | { | |
365 | return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; | |
366 | } | |
367 | ||
368 | static inline int is_normal(struct zone *zone) | |
369 | { | |
370 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | |
371 | } | |
372 | ||
373 | /* These two functions are used to setup the per zone pages min values */ | |
374 | struct ctl_table; | |
375 | struct file; | |
376 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, | |
377 | void __user *, size_t *, loff_t *); | |
378 | extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; | |
379 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, | |
380 | void __user *, size_t *, loff_t *); | |
381 | ||
382 | #include <linux/topology.h> | |
383 | /* Returns the number of the current Node. */ | |
39c715b7 | 384 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) |
1da177e4 LT |
385 | |
386 | #ifndef CONFIG_DISCONTIGMEM | |
387 | ||
388 | extern struct pglist_data contig_page_data; | |
389 | #define NODE_DATA(nid) (&contig_page_data) | |
390 | #define NODE_MEM_MAP(nid) mem_map | |
391 | #define MAX_NODES_SHIFT 1 | |
392 | #define pfn_to_nid(pfn) (0) | |
393 | ||
394 | #else /* CONFIG_DISCONTIGMEM */ | |
395 | ||
396 | #include <asm/mmzone.h> | |
397 | ||
398 | #if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED) | |
399 | /* | |
400 | * with 32 bit page->flags field, we reserve 8 bits for node/zone info. | |
401 | * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes. | |
402 | */ | |
403 | #define MAX_NODES_SHIFT 6 | |
404 | #elif BITS_PER_LONG == 64 | |
405 | /* | |
406 | * with 64 bit flags field, there's plenty of room. | |
407 | */ | |
408 | #define MAX_NODES_SHIFT 10 | |
409 | #endif | |
410 | ||
411 | #endif /* !CONFIG_DISCONTIGMEM */ | |
412 | ||
413 | #if NODES_SHIFT > MAX_NODES_SHIFT | |
414 | #error NODES_SHIFT > MAX_NODES_SHIFT | |
415 | #endif | |
416 | ||
417 | /* There are currently 3 zones: DMA, Normal & Highmem, thus we need 2 bits */ | |
418 | #define MAX_ZONES_SHIFT 2 | |
419 | ||
420 | #if ZONES_SHIFT > MAX_ZONES_SHIFT | |
421 | #error ZONES_SHIFT > MAX_ZONES_SHIFT | |
422 | #endif | |
423 | ||
424 | #endif /* !__ASSEMBLY__ */ | |
425 | #endif /* __KERNEL__ */ | |
426 | #endif /* _LINUX_MMZONE_H */ |