3978e6a8e824f3f27ba57fde690ec747aa266485
[deliverable/linux.git] / include / linux / memblock.h
1 #ifndef _LINUX_MEMBLOCK_H
2 #define _LINUX_MEMBLOCK_H
3 #ifdef __KERNEL__
4
5 #ifdef CONFIG_HAVE_MEMBLOCK
6 /*
7 * Logical memory blocks.
8 *
9 * Copyright (C) 2001 Peter Bergner, IBM Corp.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #include <linux/init.h>
18 #include <linux/mm.h>
19
20 #include <asm/memblock.h>
21
22 #define INIT_MEMBLOCK_REGIONS 128
23 #define MEMBLOCK_ERROR 0
24
25 struct memblock_region {
26 phys_addr_t base;
27 phys_addr_t size;
28 };
29
30 struct memblock_type {
31 unsigned long cnt; /* number of regions */
32 unsigned long max; /* size of the allocated array */
33 struct memblock_region *regions;
34 };
35
36 struct memblock {
37 phys_addr_t current_limit;
38 phys_addr_t memory_size; /* Updated by memblock_analyze() */
39 struct memblock_type memory;
40 struct memblock_type reserved;
41 };
42
43 extern struct memblock memblock;
44 extern int memblock_debug;
45 extern int memblock_can_resize;
46
47 #define memblock_dbg(fmt, ...) \
48 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
49
50 extern void __init memblock_init(void);
51 extern void __init memblock_analyze(void);
52 extern long memblock_add(phys_addr_t base, phys_addr_t size);
53 extern long memblock_remove(phys_addr_t base, phys_addr_t size);
54 extern long __init memblock_free(phys_addr_t base, phys_addr_t size);
55 extern long __init memblock_reserve(phys_addr_t base, phys_addr_t size);
56
57 /* The numa aware allocator is only available if
58 * CONFIG_ARCH_POPULATES_NODE_MAP is set
59 */
60 extern phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align,
61 int nid);
62 extern phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
63 int nid);
64
65 extern phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align);
66
67 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
68 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
69 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
70
71 extern phys_addr_t __init memblock_alloc_base(phys_addr_t size,
72 phys_addr_t align,
73 phys_addr_t max_addr);
74 extern phys_addr_t __init __memblock_alloc_base(phys_addr_t size,
75 phys_addr_t align,
76 phys_addr_t max_addr);
77 extern phys_addr_t __init memblock_phys_mem_size(void);
78 extern phys_addr_t memblock_end_of_DRAM(void);
79 extern void __init memblock_enforce_memory_limit(phys_addr_t memory_limit);
80 extern int memblock_is_memory(phys_addr_t addr);
81 extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
82 extern int __init memblock_is_reserved(phys_addr_t addr);
83 extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
84
85 extern void memblock_dump_all(void);
86
87 /* Provided by the architecture */
88 extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid);
89 extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
90 phys_addr_t addr2, phys_addr_t size2);
91
92 /**
93 * memblock_set_current_limit - Set the current allocation limit to allow
94 * limiting allocations to what is currently
95 * accessible during boot
96 * @limit: New limit value (physical address)
97 */
98 extern void memblock_set_current_limit(phys_addr_t limit);
99
100
101 /*
102 * pfn conversion functions
103 *
104 * While the memory MEMBLOCKs should always be page aligned, the reserved
105 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
106 * idea of what they return for such non aligned MEMBLOCKs.
107 */
108
109 /**
110 * memblock_region_base_pfn - Return the lowest pfn intersecting with the region
111 * @reg: memblock_region structure
112 */
113 static inline unsigned long memblock_region_base_pfn(const struct memblock_region *reg)
114 {
115 return reg->base >> PAGE_SHIFT;
116 }
117
118 /**
119 * memblock_region_last_pfn - Return the highest pfn intersecting with the region
120 * @reg: memblock_region structure
121 */
122 static inline unsigned long memblock_region_last_pfn(const struct memblock_region *reg)
123 {
124 return (reg->base + reg->size - 1) >> PAGE_SHIFT;
125 }
126
127 /**
128 * memblock_region_end_pfn - Return the pfn of the first page following the region
129 * but not intersecting it
130 * @reg: memblock_region structure
131 */
132 static inline unsigned long memblock_region_end_pfn(const struct memblock_region *reg)
133 {
134 return memblock_region_last_pfn(reg) + 1;
135 }
136
137 /**
138 * memblock_region_pages - Return the number of pages covering a region
139 * @reg: memblock_region structure
140 */
141 static inline unsigned long memblock_region_pages(const struct memblock_region *reg)
142 {
143 return memblock_region_end_pfn(reg) - memblock_region_end_pfn(reg);
144 }
145
146 #define for_each_memblock(memblock_type, region) \
147 for (region = memblock.memblock_type.regions; \
148 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
149 region++)
150
151
152 #ifdef ARCH_DISCARD_MEMBLOCK
153 #define __init_memblock __init
154 #define __initdata_memblock __initdata
155 #else
156 #define __init_memblock
157 #define __initdata_memblock
158 #endif
159
160 #endif /* CONFIG_HAVE_MEMBLOCK */
161
162 #endif /* __KERNEL__ */
163
164 #endif /* _LINUX_MEMBLOCK_H */
This page took 0.045048 seconds and 5 git commands to generate.