[PATCH] ppc64: prep for NUMA sparsemem rework 2
[deliverable/linux.git] / include / asm-ppc64 / mmzone.h
1 /*
2 * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
3 *
4 * PowerPC64 port:
5 * Copyright (C) 2002 Anton Blanchard, IBM Corp.
6 */
7 #ifndef _ASM_MMZONE_H_
8 #define _ASM_MMZONE_H_
9
10 #include <linux/config.h>
11 #include <asm/smp.h>
12
13 /* generic non-linear memory support:
14 *
15 * 1) we will not split memory into more chunks than will fit into the
16 * flags field of the struct page
17 */
18
19
20 #ifdef CONFIG_NEED_MULTIPLE_NODES
21
22 extern struct pglist_data *node_data[];
23 /*
24 * Return a pointer to the node data for node n.
25 */
26 #define NODE_DATA(nid) (node_data[nid])
27
28 /*
29 * Following are specific to this numa platform.
30 */
31
32 extern int numa_cpu_lookup_table[];
33 extern char *numa_memory_lookup_table;
34 extern cpumask_t numa_cpumask_lookup_table[];
35 #ifdef CONFIG_MEMORY_HOTPLUG
36 extern unsigned long max_pfn;
37 #endif
38
39 /* 16MB regions */
40 #define MEMORY_INCREMENT_SHIFT 24
41 #define MEMORY_INCREMENT (1UL << MEMORY_INCREMENT_SHIFT)
42
43 /* NUMA debugging, will not work on a DLPAR machine */
44 #undef DEBUG_NUMA
45
46 static inline int pa_to_nid(unsigned long pa)
47 {
48 int nid;
49
50 #ifdef CONFIG_MEMORY_HOTPLUG
51 /* kludge hot added sections default to node 0 */
52 if (pa >= (max_pfn << PAGE_SHIFT))
53 return 0;
54 #endif
55 nid = numa_memory_lookup_table[pa >> MEMORY_INCREMENT_SHIFT];
56
57 #ifdef DEBUG_NUMA
58 /* the physical address passed in is not in the map for the system */
59 if (nid == -1) {
60 printk("bad address: %lx\n", pa);
61 BUG();
62 }
63 #endif
64
65 return nid;
66 }
67
68 /*
69 * Following are macros that each numa implmentation must define.
70 */
71
72 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
73 #define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
74
75 #ifdef CONFIG_DISCONTIGMEM
76
77 #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
78
79 #define pfn_to_nid(pfn) pa_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
80
81 /* Written this way to avoid evaluating arguments twice */
82 #define discontigmem_pfn_to_page(pfn) \
83 ({ \
84 unsigned long __tmp = pfn; \
85 (NODE_DATA(pfn_to_nid(__tmp))->node_mem_map + \
86 node_localnr(__tmp, pfn_to_nid(__tmp))); \
87 })
88
89 #define discontigmem_page_to_pfn(p) \
90 ({ \
91 struct page *__tmp = p; \
92 (((__tmp) - page_zone(__tmp)->zone_mem_map) + \
93 page_zone(__tmp)->zone_start_pfn); \
94 })
95
96 /* XXX fix for discontiguous physical memory */
97 #define discontigmem_pfn_valid(pfn) ((pfn) < num_physpages)
98
99 #endif /* CONFIG_DISCONTIGMEM */
100
101 #endif /* CONFIG_NEED_MULTIPLE_NODES */
102
103 #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
104 #define early_pfn_to_nid(pfn) pa_to_nid(((unsigned long)pfn) << PAGE_SHIFT)
105 #endif
106
107 #endif /* _ASM_MMZONE_H_ */
This page took 0.063231 seconds and 6 git commands to generate.