Merge tag 'for-3.7-rc1' of git://gitorious.org/linux-pwm/linux-pwm
[deliverable/linux.git] / drivers / staging / ramster / tmem.h
CommitLineData
faca2ef7
DM
1/*
2 * tmem.h
3 *
4 * Transcendent memory
5 *
6 * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
7 */
8
9#ifndef _TMEM_H_
10#define _TMEM_H_
11
12#include <linux/types.h>
13#include <linux/highmem.h>
14#include <linux/hash.h>
15#include <linux/atomic.h>
16
17/*
18 * These are defined by the Xen<->Linux ABI so should remain consistent
19 */
20#define TMEM_POOL_PERSIST 1
21#define TMEM_POOL_SHARED 2
22#define TMEM_POOL_PRECOMPRESSED 4
23#define TMEM_POOL_PAGESIZE_SHIFT 4
24#define TMEM_POOL_PAGESIZE_MASK 0xf
25#define TMEM_POOL_RESERVED_BITS 0x00ffff00
26
27/*
28 * sentinels have proven very useful for debugging but can be removed
29 * or disabled before final merge.
30 */
31#undef SENTINELS
32#ifdef SENTINELS
33#define DECL_SENTINEL uint32_t sentinel;
34#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
35#define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
36#define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
37#define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
38#else
39#define DECL_SENTINEL
40#define SET_SENTINEL(_x, _y) do { } while (0)
41#define INVERT_SENTINEL(_x, _y) do { } while (0)
42#define ASSERT_SENTINEL(_x, _y) do { } while (0)
43#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
44#endif
45
46#define ASSERT_SPINLOCK(_l) lockdep_assert_held(_l)
47
48/*
49 * A pool is the highest-level data structure managed by tmem and
50 * usually corresponds to a large independent set of pages such as
51 * a filesystem. Each pool has an id, and certain attributes and counters.
52 * It also contains a set of hash buckets, each of which contains an rbtree
53 * of objects and a lock to manage concurrency within the pool.
54 */
55
56#define TMEM_HASH_BUCKET_BITS 8
57#define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
58
59struct tmem_hashbucket {
60 struct rb_root obj_rb_root;
61 spinlock_t lock;
62};
63
64struct tmem_pool {
65 void *client; /* "up" for some clients, avoids table lookup */
66 struct list_head pool_list;
67 uint32_t pool_id;
68 bool persistent;
69 bool shared;
70 atomic_t obj_count;
71 atomic_t refcount;
72 struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
73 DECL_SENTINEL
74};
75
76#define is_persistent(_p) (_p->persistent)
77#define is_ephemeral(_p) (!(_p->persistent))
78
79/*
80 * An object id ("oid") is large: 192-bits (to ensure, for example, files
81 * in a modern filesystem can be uniquely identified).
82 */
83
84struct tmem_oid {
85 uint64_t oid[3];
86};
87
88static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
89{
90 oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
91}
92
93static inline bool tmem_oid_valid(struct tmem_oid *oidp)
94{
95 return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
96 oidp->oid[2] != -1UL;
97}
98
99static inline int tmem_oid_compare(struct tmem_oid *left,
100 struct tmem_oid *right)
101{
102 int ret;
103
104 if (left->oid[2] == right->oid[2]) {
105 if (left->oid[1] == right->oid[1]) {
106 if (left->oid[0] == right->oid[0])
107 ret = 0;
108 else if (left->oid[0] < right->oid[0])
109 ret = -1;
110 else
111 return 1;
112 } else if (left->oid[1] < right->oid[1])
113 ret = -1;
114 else
115 ret = 1;
116 } else if (left->oid[2] < right->oid[2])
117 ret = -1;
118 else
119 ret = 1;
120 return ret;
121}
122
123static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
124{
125 return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
126 TMEM_HASH_BUCKET_BITS);
127}
128
129#ifdef CONFIG_RAMSTER
130struct tmem_xhandle {
131 uint8_t client_id;
132 uint8_t xh_data_cksum;
133 uint16_t xh_data_size;
134 uint16_t pool_id;
135 struct tmem_oid oid;
136 uint32_t index;
137 void *extra;
138};
139
140static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
141 struct tmem_pool *pool,
142 struct tmem_oid *oidp,
143 uint32_t index)
144{
145 struct tmem_xhandle xh;
146 xh.client_id = client_id;
147 xh.xh_data_cksum = (uint8_t)-1;
148 xh.xh_data_size = (uint16_t)-1;
149 xh.pool_id = pool->pool_id;
150 xh.oid = *oidp;
151 xh.index = index;
152 return xh;
153}
154#endif
155
156
157/*
158 * A tmem_obj contains an identifier (oid), pointers to the parent
159 * pool and the rb_tree to which it belongs, counters, and an ordered
160 * set of pampds, structured in a radix-tree-like tree. The intermediate
161 * nodes of the tree are called tmem_objnodes.
162 */
163
164struct tmem_objnode;
165
166struct tmem_obj {
167 struct tmem_oid oid;
168 struct tmem_pool *pool;
169 struct rb_node rb_tree_node;
170 struct tmem_objnode *objnode_tree_root;
171 unsigned int objnode_tree_height;
172 unsigned long objnode_count;
173 long pampd_count;
174#ifdef CONFIG_RAMSTER
175 /*
176 * for current design of ramster, all pages belonging to
177 * an object reside on the same remotenode and extra is
178 * used to record the number of the remotenode so a
179 * flush-object operation can specify it
180 */
181 void *extra; /* for private use by pampd implementation */
182#endif
183 DECL_SENTINEL
184};
185
186#define OBJNODE_TREE_MAP_SHIFT 6
187#define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
188#define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
189#define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
190#define OBJNODE_TREE_MAX_PATH \
191 (OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
192
193struct tmem_objnode {
194 struct tmem_obj *obj;
195 DECL_SENTINEL
196 void *slots[OBJNODE_TREE_MAP_SIZE];
197 unsigned int slots_in_use;
198};
199
200struct tmem_handle {
201 struct tmem_oid oid; /* 24 bytes */
202 uint32_t index;
203 uint16_t pool_id;
204 uint16_t client_id;
205};
206
207
208/* pampd abstract datatype methods provided by the PAM implementation */
209struct tmem_pamops {
210 void (*create_finish)(void *, bool);
211 int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
212 struct tmem_oid *, uint32_t);
213 int (*get_data_and_free)(char *, size_t *, bool, void *,
214 struct tmem_pool *, struct tmem_oid *,
215 uint32_t);
216 void (*free)(void *, struct tmem_pool *,
217 struct tmem_oid *, uint32_t, bool);
218#ifdef CONFIG_RAMSTER
219 void (*new_obj)(struct tmem_obj *);
220 void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool);
221 void *(*repatriate_preload)(void *, struct tmem_pool *,
222 struct tmem_oid *, uint32_t, bool *);
223 int (*repatriate)(void *, void *, struct tmem_pool *,
224 struct tmem_oid *, uint32_t, bool, void *);
225 bool (*is_remote)(void *);
226 int (*replace_in_obj)(void *, struct tmem_obj *);
227#endif
228};
229extern void tmem_register_pamops(struct tmem_pamops *m);
230
231/* memory allocation methods provided by the host implementation */
232struct tmem_hostops {
233 struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
234 void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
235 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
236 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
237};
238extern void tmem_register_hostops(struct tmem_hostops *m);
239
240/* core tmem accessor functions */
241extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
242 bool, void *);
243extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
244 char *, size_t *, bool, int);
245extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
246 uint32_t index);
247extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
248extern int tmem_destroy_pool(struct tmem_pool *);
249extern void tmem_new_pool(struct tmem_pool *, uint32_t);
250#ifdef CONFIG_RAMSTER
251extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
252 void *);
253extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
254 uint32_t index, struct tmem_obj **,
255 void **);
256extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
257 void *, void *, bool);
258#endif
259#endif /* _TMEM_H */
This page took 0.055647 seconds and 5 git commands to generate.