SLUB: Move page->offset to kmem_cache_cpu->offset
[deliverable/linux.git] / include / linux / mm_types.h
CommitLineData
5b99cd0e
HC
1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
c92ff1bd 4#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
5b99cd0e
HC
5#include <linux/types.h>
6#include <linux/threads.h>
7#include <linux/list.h>
8#include <linux/spinlock.h>
c92ff1bd
MS
9#include <linux/prio_tree.h>
10#include <linux/rbtree.h>
11#include <linux/rwsem.h>
12#include <linux/completion.h>
13#include <asm/page.h>
14#include <asm/mmu.h>
5b99cd0e
HC
15
16struct address_space;
17
c92ff1bd
MS
18#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
19typedef atomic_long_t mm_counter_t;
20#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
21typedef unsigned long mm_counter_t;
22#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
23
5b99cd0e
HC
24/*
25 * Each physical page in the system has a struct page associated with
26 * it to keep track of whatever it is we are using the page for at the
27 * moment. Note that we have no way to track which tasks are using
28 * a page, though if it is a pagecache page, rmap structures can tell us
29 * who is mapping it.
30 */
31struct page {
32 unsigned long flags; /* Atomic flags, some possibly
33 * updated asynchronously */
34 atomic_t _count; /* Usage count, see below. */
81819f0f
CL
35 union {
36 atomic_t _mapcount; /* Count of ptes mapped in mms,
5b99cd0e
HC
37 * to show when page is mapped
38 * & limit reverse map searches.
39 */
b3fba8da 40 unsigned int inuse; /* SLUB: Nr of objects */
81819f0f 41 };
5b99cd0e
HC
42 union {
43 struct {
44 unsigned long private; /* Mapping-private opaque data:
45 * usually used for buffer_heads
46 * if PagePrivate set; used for
47 * swp_entry_t if PageSwapCache;
48 * indicates order in the buddy
49 * system if PG_buddy is set.
50 */
51 struct address_space *mapping; /* If low bit clear, points to
52 * inode address_space, or NULL.
53 * If page mapped as anonymous
54 * memory, low bit is set, and
55 * it points to anon_vma object:
56 * see PAGE_MAPPING_ANON below.
57 */
58 };
59#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
60 spinlock_t ptl;
61#endif
8e65d24c
CL
62 struct kmem_cache *slab; /* SLUB: Pointer to slab */
63 struct page *first_page; /* Compound tail pages */
81819f0f
CL
64 };
65 union {
66 pgoff_t index; /* Our offset within mapping. */
894b8788 67 void *freelist; /* SLUB: freelist req. slab lock */
5b99cd0e 68 };
5b99cd0e
HC
69 struct list_head lru; /* Pageout list, eg. active_list
70 * protected by zone->lru_lock !
71 */
72 /*
73 * On machines where all RAM is mapped into kernel address space,
74 * we can simply calculate the virtual address. On machines with
75 * highmem some memory is mapped into kernel virtual memory
76 * dynamically, so we need a place to store that address.
77 * Note that this field could be 16 bits on x86 ... ;)
78 *
79 * Architectures with slow multiplication can define
80 * WANT_PAGE_VIRTUAL in asm/page.h
81 */
82#if defined(WANT_PAGE_VIRTUAL)
83 void *virtual; /* Kernel virtual address (NULL if
84 not kmapped, ie. highmem) */
85#endif /* WANT_PAGE_VIRTUAL */
86};
87
c92ff1bd
MS
88/*
89 * This struct defines a memory VMM memory area. There is one of these
90 * per VM-area/task. A VM area is any part of the process virtual memory
91 * space that has a special rule for the page-fault handlers (ie a shared
92 * library, the executable area etc).
93 */
94struct vm_area_struct {
95 struct mm_struct * vm_mm; /* The address space we belong to. */
96 unsigned long vm_start; /* Our start address within vm_mm. */
97 unsigned long vm_end; /* The first byte after our end address
98 within vm_mm. */
99
100 /* linked list of VM areas per task, sorted by address */
101 struct vm_area_struct *vm_next;
102
103 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
104 unsigned long vm_flags; /* Flags, listed below. */
105
106 struct rb_node vm_rb;
107
108 /*
109 * For areas with an address space and backing store,
110 * linkage into the address_space->i_mmap prio tree, or
111 * linkage to the list of like vmas hanging off its node, or
112 * linkage of vma in the address_space->i_mmap_nonlinear list.
113 */
114 union {
115 struct {
116 struct list_head list;
117 void *parent; /* aligns with prio_tree_node parent */
118 struct vm_area_struct *head;
119 } vm_set;
120
121 struct raw_prio_tree_node prio_tree_node;
122 } shared;
123
124 /*
125 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
126 * list, after a COW of one of the file pages. A MAP_SHARED vma
127 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
128 * or brk vma (with NULL file) can only be in an anon_vma list.
129 */
130 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
131 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
132
133 /* Function pointers to deal with this struct. */
134 struct vm_operations_struct * vm_ops;
135
136 /* Information about our backing store: */
137 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
138 units, *not* PAGE_CACHE_SIZE */
139 struct file * vm_file; /* File we map to (can be NULL). */
140 void * vm_private_data; /* was vm_pte (shared mem) */
141 unsigned long vm_truncate_count;/* truncate_count or restart_addr */
142
143#ifndef CONFIG_MMU
144 atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */
145#endif
146#ifdef CONFIG_NUMA
147 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
148#endif
149};
150
151struct mm_struct {
152 struct vm_area_struct * mmap; /* list of VMAs */
153 struct rb_root mm_rb;
154 struct vm_area_struct * mmap_cache; /* last find_vma result */
155 unsigned long (*get_unmapped_area) (struct file *filp,
156 unsigned long addr, unsigned long len,
157 unsigned long pgoff, unsigned long flags);
158 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
159 unsigned long mmap_base; /* base of mmap area */
160 unsigned long task_size; /* size of task vm space */
161 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
162 unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
163 pgd_t * pgd;
164 atomic_t mm_users; /* How many users with user space? */
165 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
166 int map_count; /* number of VMAs */
167 struct rw_semaphore mmap_sem;
168 spinlock_t page_table_lock; /* Protects page tables and some counters */
169
170 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
171 * together off init_mm.mmlist, and are protected
172 * by mmlist_lock
173 */
174
175 /* Special counters, in some configurations protected by the
176 * page_table_lock, in other configurations by being atomic.
177 */
178 mm_counter_t _file_rss;
179 mm_counter_t _anon_rss;
180
181 unsigned long hiwater_rss; /* High-watermark of RSS usage */
182 unsigned long hiwater_vm; /* High-water virtual memory usage */
183
184 unsigned long total_vm, locked_vm, shared_vm, exec_vm;
185 unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
186 unsigned long start_code, end_code, start_data, end_data;
187 unsigned long start_brk, brk, start_stack;
188 unsigned long arg_start, arg_end, env_start, env_end;
189
190 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
191
192 cpumask_t cpu_vm_mask;
193
194 /* Architecture-specific MM context */
195 mm_context_t context;
196
197 /* Swap token stuff */
198 /*
199 * Last value of global fault stamp as seen by this process.
200 * In other words, this value gives an indication of how long
201 * it has been since this task got the token.
202 * Look at mm/thrash.c
203 */
204 unsigned int faultstamp;
205 unsigned int token_priority;
206 unsigned int last_interval;
207
208 unsigned long flags; /* Must use atomic bitops to access the bits */
209
210 /* coredumping support */
211 int core_waiters;
212 struct completion *core_startup_done, core_done;
213
214 /* aio bits */
215 rwlock_t ioctx_list_lock;
216 struct kioctx *ioctx_list;
217};
218
5b99cd0e 219#endif /* _LINUX_MM_TYPES_H */
This page took 0.217117 seconds and 5 git commands to generate.