Commit | Line | Data |
---|---|---|
2e892f43 CL |
1 | #ifndef _LINUX_SLAB_DEF_H |
2 | #define _LINUX_SLAB_DEF_H | |
3 | ||
4 | /* | |
5 | * Definitions unique to the original Linux SLAB allocator. | |
8eae985f PE |
6 | */ |
7 | ||
8 | struct kmem_cache { | |
24755e2e | 9 | /* 1) Cache tunables. Protected by slab_mutex */ |
8eae985f PE |
10 | unsigned int batchcount; |
11 | unsigned int limit; | |
12 | unsigned int shared; | |
13 | ||
3b0efdfa | 14 | unsigned int size; |
8eae985f | 15 | u32 reciprocal_buffer_size; |
b56efcf0 | 16 | /* 2) touched by every alloc & free from the backend */ |
8eae985f PE |
17 | |
18 | unsigned int flags; /* constant flags */ | |
19 | unsigned int num; /* # of objs per slab */ | |
20 | ||
b56efcf0 | 21 | /* 3) cache_grow/shrink */ |
8eae985f PE |
22 | /* order of pgs per slab (2^n) */ |
23 | unsigned int gfporder; | |
24 | ||
25 | /* force GFP flags, e.g. GFP_DMA */ | |
a618e89f | 26 | gfp_t allocflags; |
8eae985f PE |
27 | |
28 | size_t colour; /* cache colouring range */ | |
29 | unsigned int colour_off; /* colour offset */ | |
8456a648 JK |
30 | struct kmem_cache *freelist_cache; |
31 | unsigned int freelist_size; | |
8eae985f PE |
32 | |
33 | /* constructor func */ | |
34 | void (*ctor)(void *obj); | |
35 | ||
b56efcf0 | 36 | /* 4) cache creation/removal */ |
8eae985f | 37 | const char *name; |
3b0efdfa CL |
38 | struct list_head list; |
39 | int refcount; | |
40 | int object_size; | |
41 | int align; | |
8eae985f | 42 | |
b56efcf0 | 43 | /* 5) statistics */ |
8eae985f PE |
44 | #ifdef CONFIG_DEBUG_SLAB |
45 | unsigned long num_active; | |
46 | unsigned long num_allocations; | |
47 | unsigned long high_mark; | |
48 | unsigned long grown; | |
49 | unsigned long reaped; | |
50 | unsigned long errors; | |
51 | unsigned long max_freeable; | |
52 | unsigned long node_allocs; | |
53 | unsigned long node_frees; | |
54 | unsigned long node_overflow; | |
55 | atomic_t allochit; | |
56 | atomic_t allocmiss; | |
57 | atomic_t freehit; | |
58 | atomic_t freemiss; | |
59 | ||
60 | /* | |
61 | * If debugging is enabled, then the allocator can add additional | |
3b0efdfa | 62 | * fields and/or padding to every object. size contains the total |
8eae985f PE |
63 | * object size including these internal fields, the following two |
64 | * variables contain the offset to the user object and its size. | |
65 | */ | |
66 | int obj_offset; | |
8eae985f | 67 | #endif /* CONFIG_DEBUG_SLAB */ |
ba6c496e GC |
68 | #ifdef CONFIG_MEMCG_KMEM |
69 | struct memcg_cache_params *memcg_params; | |
70 | #endif | |
8eae985f | 71 | |
b56efcf0 | 72 | /* 6) per-cpu/per-node data, touched during every alloc/free */ |
8eae985f | 73 | /* |
b56efcf0 ED |
74 | * We put array[] at the end of kmem_cache, because we want to size |
75 | * this array to nr_cpu_ids slots instead of NR_CPUS | |
8eae985f | 76 | * (see kmem_cache_init()) |
b56efcf0 ED |
77 | * We still use [NR_CPUS] and not [1] or [0] because cache_cache |
78 | * is statically defined, so we reserve the max number of cpus. | |
3c583465 CL |
79 | * |
80 | * We also need to guarantee that the list is able to accomodate a | |
81 | * pointer for each node since "nodelists" uses the remainder of | |
82 | * available pointers. | |
8eae985f | 83 | */ |
6a67368c | 84 | struct kmem_cache_node **node; |
3c583465 | 85 | struct array_cache *array[NR_CPUS + MAX_NUMNODES]; |
8eae985f | 86 | /* |
b56efcf0 | 87 | * Do not add fields after array[] |
8eae985f PE |
88 | */ |
89 | }; | |
90 | ||
2e892f43 | 91 | #endif /* _LINUX_SLAB_DEF_H */ |