Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* internal.h: mm/ internal definitions |
2 | * | |
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
0f8053a5 NP |
11 | #ifndef __MM_INTERNAL_H |
12 | #define __MM_INTERNAL_H | |
13 | ||
29f175d1 | 14 | #include <linux/fs.h> |
0f8053a5 | 15 | #include <linux/mm.h> |
e9b61f19 | 16 | #include <linux/pagemap.h> |
1da177e4 | 17 | |
dd56b046 MG |
18 | /* |
19 | * The set of flags that only affect watermark checking and reclaim | |
20 | * behaviour. This is used by the MM to obey the caller constraints | |
21 | * about IO, FS and watermark checking while ignoring placement | |
22 | * hints such as HIGHMEM usage. | |
23 | */ | |
24 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | |
25 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | |
26 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) | |
27 | ||
28 | /* The GFP flags allowed during early boot */ | |
29 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) | |
30 | ||
31 | /* Control allocation cpuset and node placement constraints */ | |
32 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | |
33 | ||
34 | /* Do not use these with a slab allocator */ | |
35 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | |
36 | ||
42b77728 JB |
37 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
38 | unsigned long floor, unsigned long ceiling); | |
39 | ||
7835e98b | 40 | static inline void set_page_count(struct page *page, int v) |
77a8a788 | 41 | { |
7835e98b NP |
42 | atomic_set(&page->_count, v); |
43 | } | |
44 | ||
29f175d1 FF |
45 | extern int __do_page_cache_readahead(struct address_space *mapping, |
46 | struct file *filp, pgoff_t offset, unsigned long nr_to_read, | |
47 | unsigned long lookahead_size); | |
48 | ||
49 | /* | |
50 | * Submit IO for the read-ahead request in file_ra_state. | |
51 | */ | |
52 | static inline unsigned long ra_submit(struct file_ra_state *ra, | |
53 | struct address_space *mapping, struct file *filp) | |
54 | { | |
55 | return __do_page_cache_readahead(mapping, filp, | |
56 | ra->start, ra->size, ra->async_size); | |
57 | } | |
58 | ||
7835e98b NP |
59 | /* |
60 | * Turn a non-refcounted page (->_count == 0) into refcounted with | |
61 | * a count of one. | |
62 | */ | |
63 | static inline void set_page_refcounted(struct page *page) | |
64 | { | |
309381fe SL |
65 | VM_BUG_ON_PAGE(PageTail(page), page); |
66 | VM_BUG_ON_PAGE(atomic_read(&page->_count), page); | |
77a8a788 | 67 | set_page_count(page, 1); |
77a8a788 NP |
68 | } |
69 | ||
03f6462a HD |
70 | extern unsigned long highest_memmap_pfn; |
71 | ||
894bc310 LS |
72 | /* |
73 | * in mm/vmscan.c: | |
74 | */ | |
62695a84 | 75 | extern int isolate_lru_page(struct page *page); |
894bc310 | 76 | extern void putback_lru_page(struct page *page); |
6e543d57 | 77 | extern bool zone_reclaimable(struct zone *zone); |
62695a84 | 78 | |
6219049a BL |
79 | /* |
80 | * in mm/rmap.c: | |
81 | */ | |
82 | extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); | |
83 | ||
894bc310 LS |
84 | /* |
85 | * in mm/page_alloc.c | |
86 | */ | |
3c605096 | 87 | |
1a6d53a1 VB |
88 | /* |
89 | * Structure for holding the mostly immutable allocation parameters passed | |
90 | * between functions involved in allocations, including the alloc_pages* | |
91 | * family of functions. | |
92 | * | |
93 | * nodemask, migratetype and high_zoneidx are initialized only once in | |
94 | * __alloc_pages_nodemask() and then never change. | |
95 | * | |
96 | * zonelist, preferred_zone and classzone_idx are set first in | |
97 | * __alloc_pages_nodemask() for the fast path, and might be later changed | |
98 | * in __alloc_pages_slowpath(). All other functions pass the whole strucure | |
99 | * by a const pointer. | |
100 | */ | |
101 | struct alloc_context { | |
102 | struct zonelist *zonelist; | |
103 | nodemask_t *nodemask; | |
104 | struct zone *preferred_zone; | |
105 | int classzone_idx; | |
106 | int migratetype; | |
107 | enum zone_type high_zoneidx; | |
c9ab0c4f | 108 | bool spread_dirty_pages; |
1a6d53a1 VB |
109 | }; |
110 | ||
3c605096 JK |
111 | /* |
112 | * Locate the struct page for both the matching buddy in our | |
113 | * pair (buddy1) and the combined O(n+1) page they form (page). | |
114 | * | |
115 | * 1) Any buddy B1 will have an order O twin B2 which satisfies | |
116 | * the following equation: | |
117 | * B2 = B1 ^ (1 << O) | |
118 | * For example, if the starting buddy (buddy2) is #8 its order | |
119 | * 1 buddy is #10: | |
120 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 | |
121 | * | |
122 | * 2) Any buddy B will have an order O+1 parent P which | |
123 | * satisfies the following equation: | |
124 | * P = B & ~(1 << O) | |
125 | * | |
126 | * Assumption: *_mem_map is contiguous at least up to MAX_ORDER | |
127 | */ | |
128 | static inline unsigned long | |
129 | __find_buddy_index(unsigned long page_idx, unsigned int order) | |
130 | { | |
131 | return page_idx ^ (1 << order); | |
132 | } | |
133 | ||
134 | extern int __isolate_free_page(struct page *page, unsigned int order); | |
d70ddd7a MG |
135 | extern void __free_pages_bootmem(struct page *page, unsigned long pfn, |
136 | unsigned int order); | |
d00181b9 | 137 | extern void prep_compound_page(struct page *page, unsigned int order); |
8d22ba1b WF |
138 | #ifdef CONFIG_MEMORY_FAILURE |
139 | extern bool is_free_buddy_page(struct page *page); | |
140 | #endif | |
42aa83cb | 141 | extern int user_min_free_kbytes; |
20a0307c | 142 | |
ff9543fd MN |
143 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
144 | ||
145 | /* | |
146 | * in mm/compaction.c | |
147 | */ | |
148 | /* | |
149 | * compact_control is used to track pages being migrated and the free pages | |
150 | * they are being migrated to during memory compaction. The free_pfn starts | |
151 | * at the end of a zone and migrate_pfn begins at the start. Movable pages | |
152 | * are moved to the end of a zone during a compaction run and the run | |
153 | * completes when free_pfn <= migrate_pfn | |
154 | */ | |
155 | struct compact_control { | |
156 | struct list_head freepages; /* List of free pages to migrate to */ | |
157 | struct list_head migratepages; /* List of pages being migrated */ | |
158 | unsigned long nr_freepages; /* Number of isolated free pages */ | |
159 | unsigned long nr_migratepages; /* Number of pages to migrate */ | |
160 | unsigned long free_pfn; /* isolate_freepages search base */ | |
161 | unsigned long migrate_pfn; /* isolate_migratepages search base */ | |
1a16718c | 162 | unsigned long last_migrated_pfn;/* Not yet flushed page being freed */ |
e0b9daeb | 163 | enum migrate_mode mode; /* Async or sync migration mode */ |
bb13ffeb | 164 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
ff9543fd | 165 | int order; /* order a direct compactor needs */ |
6d7ce559 | 166 | const gfp_t gfp_mask; /* gfp mask of a direct compactor */ |
ebff3980 VB |
167 | const int alloc_flags; /* alloc flags of a direct compactor */ |
168 | const int classzone_idx; /* zone index of a direct compactor */ | |
ff9543fd | 169 | struct zone *zone; |
1f9efdef VB |
170 | int contended; /* Signal need_sched() or lock |
171 | * contention detected during | |
be976572 VB |
172 | * compaction |
173 | */ | |
ff9543fd MN |
174 | }; |
175 | ||
176 | unsigned long | |
bb13ffeb MG |
177 | isolate_freepages_range(struct compact_control *cc, |
178 | unsigned long start_pfn, unsigned long end_pfn); | |
ff9543fd | 179 | unsigned long |
edc2ca61 VB |
180 | isolate_migratepages_range(struct compact_control *cc, |
181 | unsigned long low_pfn, unsigned long end_pfn); | |
2149cdae JK |
182 | int find_suitable_fallback(struct free_area *area, unsigned int order, |
183 | int migratetype, bool only_stealable, bool *can_steal); | |
ff9543fd MN |
184 | |
185 | #endif | |
0f8053a5 | 186 | |
48f13bf3 | 187 | /* |
6c14466c MG |
188 | * This function returns the order of a free page in the buddy system. In |
189 | * general, page_zone(page)->lock must be held by the caller to prevent the | |
190 | * page from being allocated in parallel and returning garbage as the order. | |
191 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the | |
99c0fd5e VB |
192 | * page cannot be allocated or merged in parallel. Alternatively, it must |
193 | * handle invalid values gracefully, and use page_order_unsafe() below. | |
48f13bf3 | 194 | */ |
d00181b9 | 195 | static inline unsigned int page_order(struct page *page) |
48f13bf3 | 196 | { |
572438f9 | 197 | /* PageBuddy() must be checked by the caller */ |
48f13bf3 MG |
198 | return page_private(page); |
199 | } | |
b5a0e011 | 200 | |
99c0fd5e VB |
201 | /* |
202 | * Like page_order(), but for callers who cannot afford to hold the zone lock. | |
203 | * PageBuddy() should be checked first by the caller to minimize race window, | |
204 | * and invalid values must be handled gracefully. | |
205 | * | |
4db0c3c2 | 206 | * READ_ONCE is used so that if the caller assigns the result into a local |
99c0fd5e VB |
207 | * variable and e.g. tests it for valid range before using, the compiler cannot |
208 | * decide to remove the variable and inline the page_private(page) multiple | |
209 | * times, potentially observing different values in the tests and the actual | |
210 | * use of the result. | |
211 | */ | |
4db0c3c2 | 212 | #define page_order_unsafe(page) READ_ONCE(page_private(page)) |
99c0fd5e | 213 | |
4bbd4c77 KS |
214 | static inline bool is_cow_mapping(vm_flags_t flags) |
215 | { | |
216 | return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; | |
217 | } | |
218 | ||
30bdbb78 KK |
219 | /* |
220 | * These three helpers classifies VMAs for virtual memory accounting. | |
221 | */ | |
222 | ||
223 | /* | |
224 | * Executable code area - executable, not writable, not stack | |
225 | */ | |
d977d56c KK |
226 | static inline bool is_exec_mapping(vm_flags_t flags) |
227 | { | |
30bdbb78 | 228 | return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
d977d56c KK |
229 | } |
230 | ||
30bdbb78 KK |
231 | /* |
232 | * Stack area - atomatically grows in one direction | |
233 | * | |
234 | * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: | |
235 | * do_mmap() forbids all other combinations. | |
236 | */ | |
d977d56c KK |
237 | static inline bool is_stack_mapping(vm_flags_t flags) |
238 | { | |
30bdbb78 | 239 | return (flags & VM_STACK) == VM_STACK; |
d977d56c KK |
240 | } |
241 | ||
30bdbb78 KK |
242 | /* |
243 | * Data area - private, writable, not stack | |
244 | */ | |
d977d56c KK |
245 | static inline bool is_data_mapping(vm_flags_t flags) |
246 | { | |
30bdbb78 | 247 | return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
d977d56c KK |
248 | } |
249 | ||
6038def0 NK |
250 | /* mm/util.c */ |
251 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | |
252 | struct vm_area_struct *prev, struct rb_node *rb_parent); | |
253 | ||
af8e3354 | 254 | #ifdef CONFIG_MMU |
fc05f566 | 255 | extern long populate_vma_page_range(struct vm_area_struct *vma, |
cea10a19 | 256 | unsigned long start, unsigned long end, int *nonblocking); |
af8e3354 HD |
257 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
258 | unsigned long start, unsigned long end); | |
259 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |
260 | { | |
261 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | |
262 | } | |
263 | ||
b291f000 | 264 | /* |
73848b46 | 265 | * must be called with vma's mmap_sem held for read or write, and page locked. |
b291f000 NP |
266 | */ |
267 | extern void mlock_vma_page(struct page *page); | |
ff6a6da6 | 268 | extern unsigned int munlock_vma_page(struct page *page); |
b291f000 NP |
269 | |
270 | /* | |
271 | * Clear the page's PageMlocked(). This can be useful in a situation where | |
272 | * we want to unconditionally remove a page from the pagecache -- e.g., | |
273 | * on truncation or freeing. | |
274 | * | |
275 | * It is legal to call this function for any page, mlocked or not. | |
276 | * If called for a page that is still mapped by mlocked vmas, all we do | |
277 | * is revert to lazy LRU behaviour -- semantics are not broken. | |
278 | */ | |
e6c509f8 | 279 | extern void clear_page_mlock(struct page *page); |
b291f000 NP |
280 | |
281 | /* | |
51afb12b HD |
282 | * mlock_migrate_page - called only from migrate_misplaced_transhuge_page() |
283 | * (because that does not go through the full procedure of migration ptes): | |
284 | * to migrate the Mlocked page flag; update statistics. | |
b291f000 NP |
285 | */ |
286 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) | |
287 | { | |
5344b7e6 | 288 | if (TestClearPageMlocked(page)) { |
b32967ff | 289 | int nr_pages = hpage_nr_pages(page); |
5344b7e6 | 290 | |
51afb12b | 291 | /* Holding pmd lock, no change in irq context: __mod is safe */ |
b32967ff | 292 | __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
b291f000 | 293 | SetPageMlocked(newpage); |
b32967ff | 294 | __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); |
5344b7e6 | 295 | } |
b291f000 NP |
296 | } |
297 | ||
b32967ff MG |
298 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
299 | ||
e9b61f19 KS |
300 | /* |
301 | * At what user virtual address is page expected in @vma? | |
302 | */ | |
303 | static inline unsigned long | |
304 | __vma_address(struct page *page, struct vm_area_struct *vma) | |
305 | { | |
306 | pgoff_t pgoff = page_to_pgoff(page); | |
307 | return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
308 | } | |
309 | ||
310 | static inline unsigned long | |
311 | vma_address(struct page *page, struct vm_area_struct *vma) | |
312 | { | |
313 | unsigned long address = __vma_address(page, vma); | |
314 | ||
315 | /* page should be within @vma mapping range */ | |
316 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); | |
317 | ||
318 | return address; | |
319 | } | |
320 | ||
af8e3354 | 321 | #else /* !CONFIG_MMU */ |
b291f000 NP |
322 | static inline void clear_page_mlock(struct page *page) { } |
323 | static inline void mlock_vma_page(struct page *page) { } | |
324 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | |
325 | ||
af8e3354 | 326 | #endif /* !CONFIG_MMU */ |
894bc310 | 327 | |
69d177c2 AW |
328 | /* |
329 | * Return the mem_map entry representing the 'offset' subpage within | |
330 | * the maximally aligned gigantic page 'base'. Handle any discontiguity | |
331 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. | |
332 | */ | |
333 | static inline struct page *mem_map_offset(struct page *base, int offset) | |
334 | { | |
335 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) | |
bc7f84c0 | 336 | return nth_page(base, offset); |
69d177c2 AW |
337 | return base + offset; |
338 | } | |
339 | ||
340 | /* | |
25985edc | 341 | * Iterator over all subpages within the maximally aligned gigantic |
69d177c2 AW |
342 | * page 'base'. Handle any discontiguity in the mem_map. |
343 | */ | |
344 | static inline struct page *mem_map_next(struct page *iter, | |
345 | struct page *base, int offset) | |
346 | { | |
347 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { | |
348 | unsigned long pfn = page_to_pfn(base) + offset; | |
349 | if (!pfn_valid(pfn)) | |
350 | return NULL; | |
351 | return pfn_to_page(pfn); | |
352 | } | |
353 | return iter + 1; | |
354 | } | |
355 | ||
b5a0e011 AH |
356 | /* |
357 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, | |
358 | * so all functions starting at paging_init should be marked __init | |
359 | * in those cases. SPARSEMEM, however, allows for memory hotplug, | |
360 | * and alloc_bootmem_node is not used. | |
361 | */ | |
362 | #ifdef CONFIG_SPARSEMEM | |
363 | #define __paginginit __meminit | |
364 | #else | |
365 | #define __paginginit __init | |
366 | #endif | |
367 | ||
6b74ab97 MG |
368 | /* Memory initialisation debug and verification */ |
369 | enum mminit_level { | |
370 | MMINIT_WARNING, | |
371 | MMINIT_VERIFY, | |
372 | MMINIT_TRACE | |
373 | }; | |
374 | ||
375 | #ifdef CONFIG_DEBUG_MEMORY_INIT | |
376 | ||
377 | extern int mminit_loglevel; | |
378 | ||
379 | #define mminit_dprintk(level, prefix, fmt, arg...) \ | |
380 | do { \ | |
381 | if (level < mminit_loglevel) { \ | |
fc5199d1 RV |
382 | if (level <= MMINIT_WARNING) \ |
383 | printk(KERN_WARNING "mminit::" prefix " " fmt, ##arg); \ | |
384 | else \ | |
385 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ | |
6b74ab97 MG |
386 | } \ |
387 | } while (0) | |
388 | ||
708614e6 | 389 | extern void mminit_verify_pageflags_layout(void); |
68ad8df4 | 390 | extern void mminit_verify_zonelist(void); |
6b74ab97 MG |
391 | #else |
392 | ||
393 | static inline void mminit_dprintk(enum mminit_level level, | |
394 | const char *prefix, const char *fmt, ...) | |
395 | { | |
396 | } | |
397 | ||
708614e6 MG |
398 | static inline void mminit_verify_pageflags_layout(void) |
399 | { | |
400 | } | |
401 | ||
68ad8df4 MG |
402 | static inline void mminit_verify_zonelist(void) |
403 | { | |
404 | } | |
6b74ab97 | 405 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c4 MG |
406 | |
407 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ | |
408 | #if defined(CONFIG_SPARSEMEM) | |
409 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
410 | unsigned long *end_pfn); | |
411 | #else | |
412 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
413 | unsigned long *end_pfn) | |
414 | { | |
415 | } | |
416 | #endif /* CONFIG_SPARSEMEM */ | |
417 | ||
fa5e084e MG |
418 | #define ZONE_RECLAIM_NOSCAN -2 |
419 | #define ZONE_RECLAIM_FULL -1 | |
420 | #define ZONE_RECLAIM_SOME 0 | |
421 | #define ZONE_RECLAIM_SUCCESS 1 | |
7c116f2b | 422 | |
31d3d348 WF |
423 | extern int hwpoison_filter(struct page *p); |
424 | ||
7c116f2b WF |
425 | extern u32 hwpoison_filter_dev_major; |
426 | extern u32 hwpoison_filter_dev_minor; | |
478c5ffc WF |
427 | extern u64 hwpoison_filter_flags_mask; |
428 | extern u64 hwpoison_filter_flags_value; | |
4fd466eb | 429 | extern u64 hwpoison_filter_memcg; |
1bfe5feb | 430 | extern u32 hwpoison_filter_enable; |
eb36c587 AV |
431 | |
432 | extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, | |
433 | unsigned long, unsigned long, | |
434 | unsigned long, unsigned long); | |
ca57df79 XQ |
435 | |
436 | extern void set_pageblock_order(void); | |
02c6de8d MK |
437 | unsigned long reclaim_clean_pages_from_list(struct zone *zone, |
438 | struct list_head *page_list); | |
d95ea5d1 BZ |
439 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
440 | #define ALLOC_WMARK_MIN WMARK_MIN | |
441 | #define ALLOC_WMARK_LOW WMARK_LOW | |
442 | #define ALLOC_WMARK_HIGH WMARK_HIGH | |
443 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ | |
444 | ||
445 | /* Mask to get the watermark bits */ | |
446 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) | |
447 | ||
448 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ | |
449 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | |
450 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | |
451 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | |
3a025760 | 452 | #define ALLOC_FAIR 0x100 /* fair zone allocation */ |
d95ea5d1 | 453 | |
72b252ae MG |
454 | enum ttu_flags; |
455 | struct tlbflush_unmap_batch; | |
456 | ||
457 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH | |
458 | void try_to_unmap_flush(void); | |
d950c947 | 459 | void try_to_unmap_flush_dirty(void); |
72b252ae MG |
460 | #else |
461 | static inline void try_to_unmap_flush(void) | |
462 | { | |
463 | } | |
d950c947 MG |
464 | static inline void try_to_unmap_flush_dirty(void) |
465 | { | |
466 | } | |
72b252ae MG |
467 | |
468 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ | |
db971418 | 469 | #endif /* __MM_INTERNAL_H */ |