Pull extend-notify-die into release branch
[deliverable/linux.git] / include / asm-ppc64 / mmzone.h
1 /*
2 * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
3 *
4 * PowerPC64 port:
5 * Copyright (C) 2002 Anton Blanchard, IBM Corp.
6 */
7 #ifndef _ASM_MMZONE_H_
8 #define _ASM_MMZONE_H_
9
10 #include <linux/config.h>
11 #include <asm/smp.h>
12
13 /* generic non-linear memory support:
14 *
15 * 1) we will not split memory into more chunks than will fit into the
16 * flags field of the struct page
17 */
18
19
20 #ifdef CONFIG_NEED_MULTIPLE_NODES
21
22 extern struct pglist_data *node_data[];
23 /*
24 * Return a pointer to the node data for node n.
25 */
26 #define NODE_DATA(nid) (node_data[nid])
27
28 /*
29 * Following are specific to this numa platform.
30 */
31
32 extern int numa_cpu_lookup_table[];
33 extern char *numa_memory_lookup_table;
34 extern cpumask_t numa_cpumask_lookup_table[];
35 extern int nr_cpus_in_node[];
36 #ifdef CONFIG_MEMORY_HOTPLUG
37 extern unsigned long max_pfn;
38 #endif
39
40 /* 16MB regions */
41 #define MEMORY_INCREMENT_SHIFT 24
42 #define MEMORY_INCREMENT (1UL << MEMORY_INCREMENT_SHIFT)
43
44 /* NUMA debugging, will not work on a DLPAR machine */
45 #undef DEBUG_NUMA
46
47 static inline int pa_to_nid(unsigned long pa)
48 {
49 int nid;
50
51 #ifdef CONFIG_MEMORY_HOTPLUG
52 /* kludge hot added sections default to node 0 */
53 if (pa >= (max_pfn << PAGE_SHIFT))
54 return 0;
55 #endif
56 nid = numa_memory_lookup_table[pa >> MEMORY_INCREMENT_SHIFT];
57
58 #ifdef DEBUG_NUMA
59 /* the physical address passed in is not in the map for the system */
60 if (nid == -1) {
61 printk("bad address: %lx\n", pa);
62 BUG();
63 }
64 #endif
65
66 return nid;
67 }
68
69 #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
70
71 /*
72 * Following are macros that each numa implmentation must define.
73 */
74
75 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
76 #define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
77
78 #ifdef CONFIG_DISCONTIGMEM
79
80 /*
81 * Given a kernel address, find the home node of the underlying memory.
82 */
83 #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
84
85 #define pfn_to_nid(pfn) pa_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
86
87 /* Written this way to avoid evaluating arguments twice */
88 #define discontigmem_pfn_to_page(pfn) \
89 ({ \
90 unsigned long __tmp = pfn; \
91 (NODE_DATA(pfn_to_nid(__tmp))->node_mem_map + \
92 node_localnr(__tmp, pfn_to_nid(__tmp))); \
93 })
94
95 #define discontigmem_page_to_pfn(p) \
96 ({ \
97 struct page *__tmp = p; \
98 (((__tmp) - page_zone(__tmp)->zone_mem_map) + \
99 page_zone(__tmp)->zone_start_pfn); \
100 })
101
102 /* XXX fix for discontiguous physical memory */
103 #define discontigmem_pfn_valid(pfn) ((pfn) < num_physpages)
104
105 #endif /* CONFIG_DISCONTIGMEM */
106
107 #endif /* CONFIG_NEED_MULTIPLE_NODES */
108
109 #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
110 #define early_pfn_to_nid(pfn) pa_to_nid(((unsigned long)pfn) << PAGE_SHIFT)
111 #endif
112
113 #endif /* _ASM_MMZONE_H_ */
This page took 0.080187 seconds and 6 git commands to generate.