net/mlx4_core: Port aggregation upper layer interface
[deliverable/linux.git] / include / linux / rhashtable.h
1 /*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6 *
7 * Based on the following paper by Josh Triplett, Paul E. McKenney
8 * and Jonathan Walpole:
9 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
10 *
11 * Code partially derived from nft_hash
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18 #ifndef _LINUX_RHASHTABLE_H
19 #define _LINUX_RHASHTABLE_H
20
21 #include <linux/list_nulls.h>
22 #include <linux/workqueue.h>
23 #include <linux/mutex.h>
24
25 /*
26 * The end of the chain is marked with a special nulls marks which has
27 * the following format:
28 *
29 * +-------+-----------------------------------------------------+-+
30 * | Base | Hash |1|
31 * +-------+-----------------------------------------------------+-+
32 *
33 * Base (4 bits) : Reserved to distinguish between multiple tables.
34 * Specified via &struct rhashtable_params.nulls_base.
35 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
36 * 1 (1 bit) : Nulls marker (always set)
37 *
38 * The remaining bits of the next pointer remain unused for now.
39 */
40 #define RHT_BASE_BITS 4
41 #define RHT_HASH_BITS 27
42 #define RHT_BASE_SHIFT RHT_HASH_BITS
43
44 struct rhash_head {
45 struct rhash_head __rcu *next;
46 };
47
48 /**
49 * struct bucket_table - Table of hash buckets
50 * @size: Number of hash buckets
51 * @locks_mask: Mask to apply before accessing locks[]
52 * @locks: Array of spinlocks protecting individual buckets
53 * @buckets: size * hash buckets
54 */
55 struct bucket_table {
56 size_t size;
57 unsigned int locks_mask;
58 spinlock_t *locks;
59 struct rhash_head __rcu *buckets[];
60 };
61
62 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
63 typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
64
65 struct rhashtable;
66
67 /**
68 * struct rhashtable_params - Hash table construction parameters
69 * @nelem_hint: Hint on number of elements, should be 75% of desired size
70 * @key_len: Length of key
71 * @key_offset: Offset of key in struct to be hashed
72 * @head_offset: Offset of rhash_head in struct to be hashed
73 * @hash_rnd: Seed to use while hashing
74 * @max_shift: Maximum number of shifts while expanding
75 * @min_shift: Minimum number of shifts while shrinking
76 * @nulls_base: Base value to generate nulls marker
77 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
78 * @hashfn: Function to hash key
79 * @obj_hashfn: Function to hash object
80 * @grow_decision: If defined, may return true if table should expand
81 * @shrink_decision: If defined, may return true if table should shrink
82 *
83 * Note: when implementing the grow and shrink decision function, min/max
84 * shift must be enforced, otherwise, resizing watermarks they set may be
85 * useless.
86 */
87 struct rhashtable_params {
88 size_t nelem_hint;
89 size_t key_len;
90 size_t key_offset;
91 size_t head_offset;
92 u32 hash_rnd;
93 size_t max_shift;
94 size_t min_shift;
95 u32 nulls_base;
96 size_t locks_mul;
97 rht_hashfn_t hashfn;
98 rht_obj_hashfn_t obj_hashfn;
99 bool (*grow_decision)(const struct rhashtable *ht,
100 size_t new_size);
101 bool (*shrink_decision)(const struct rhashtable *ht,
102 size_t new_size);
103 };
104
105 /**
106 * struct rhashtable - Hash table handle
107 * @tbl: Bucket table
108 * @future_tbl: Table under construction during expansion/shrinking
109 * @nelems: Number of elements in table
110 * @shift: Current size (1 << shift)
111 * @p: Configuration parameters
112 * @run_work: Deferred worker to expand/shrink asynchronously
113 * @mutex: Mutex to protect current/future table swapping
114 * @being_destroyed: True if table is set up for destruction
115 */
116 struct rhashtable {
117 struct bucket_table __rcu *tbl;
118 struct bucket_table __rcu *future_tbl;
119 atomic_t nelems;
120 atomic_t shift;
121 struct rhashtable_params p;
122 struct work_struct run_work;
123 struct mutex mutex;
124 bool being_destroyed;
125 };
126
127 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
128 {
129 return NULLS_MARKER(ht->p.nulls_base + hash);
130 }
131
132 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
133 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
134
135 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
136 {
137 return ((unsigned long) ptr & 1);
138 }
139
140 static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
141 {
142 return ((unsigned long) ptr) >> 1;
143 }
144
145 #ifdef CONFIG_PROVE_LOCKING
146 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
147 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
148 #else
149 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
150 {
151 return 1;
152 }
153
154 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
155 u32 hash)
156 {
157 return 1;
158 }
159 #endif /* CONFIG_PROVE_LOCKING */
160
161 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
162
163 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
164 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
165
166 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
167 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
168
169 int rhashtable_expand(struct rhashtable *ht);
170 int rhashtable_shrink(struct rhashtable *ht);
171
172 void *rhashtable_lookup(struct rhashtable *ht, const void *key);
173 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
174 bool (*compare)(void *, void *), void *arg);
175
176 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
177 bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
178 struct rhash_head *obj,
179 bool (*compare)(void *, void *),
180 void *arg);
181
182 void rhashtable_destroy(struct rhashtable *ht);
183
184 #define rht_dereference(p, ht) \
185 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
186
187 #define rht_dereference_rcu(p, ht) \
188 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
189
190 #define rht_dereference_bucket(p, tbl, hash) \
191 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
192
193 #define rht_dereference_bucket_rcu(p, tbl, hash) \
194 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
195
196 #define rht_entry(tpos, pos, member) \
197 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
198
199 /**
200 * rht_for_each_continue - continue iterating over hash chain
201 * @pos: the &struct rhash_head to use as a loop cursor.
202 * @head: the previous &struct rhash_head to continue from
203 * @tbl: the &struct bucket_table
204 * @hash: the hash value / bucket index
205 */
206 #define rht_for_each_continue(pos, head, tbl, hash) \
207 for (pos = rht_dereference_bucket(head, tbl, hash); \
208 !rht_is_a_nulls(pos); \
209 pos = rht_dereference_bucket((pos)->next, tbl, hash))
210
211 /**
212 * rht_for_each - iterate over hash chain
213 * @pos: the &struct rhash_head to use as a loop cursor.
214 * @tbl: the &struct bucket_table
215 * @hash: the hash value / bucket index
216 */
217 #define rht_for_each(pos, tbl, hash) \
218 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
219
220 /**
221 * rht_for_each_entry_continue - continue iterating over hash chain
222 * @tpos: the type * to use as a loop cursor.
223 * @pos: the &struct rhash_head to use as a loop cursor.
224 * @head: the previous &struct rhash_head to continue from
225 * @tbl: the &struct bucket_table
226 * @hash: the hash value / bucket index
227 * @member: name of the &struct rhash_head within the hashable struct.
228 */
229 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
230 for (pos = rht_dereference_bucket(head, tbl, hash); \
231 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
232 pos = rht_dereference_bucket((pos)->next, tbl, hash))
233
234 /**
235 * rht_for_each_entry - iterate over hash chain of given type
236 * @tpos: the type * to use as a loop cursor.
237 * @pos: the &struct rhash_head to use as a loop cursor.
238 * @tbl: the &struct bucket_table
239 * @hash: the hash value / bucket index
240 * @member: name of the &struct rhash_head within the hashable struct.
241 */
242 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
243 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
244 tbl, hash, member)
245
246 /**
247 * rht_for_each_entry_safe - safely iterate over hash chain of given type
248 * @tpos: the type * to use as a loop cursor.
249 * @pos: the &struct rhash_head to use as a loop cursor.
250 * @next: the &struct rhash_head to use as next in loop cursor.
251 * @tbl: the &struct bucket_table
252 * @hash: the hash value / bucket index
253 * @member: name of the &struct rhash_head within the hashable struct.
254 *
255 * This hash chain list-traversal primitive allows for the looped code to
256 * remove the loop cursor from the list.
257 */
258 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
259 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
260 next = !rht_is_a_nulls(pos) ? \
261 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
262 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
263 pos = next, \
264 next = !rht_is_a_nulls(pos) ? \
265 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
266
267 /**
268 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
269 * @pos: the &struct rhash_head to use as a loop cursor.
270 * @head: the previous &struct rhash_head to continue from
271 * @tbl: the &struct bucket_table
272 * @hash: the hash value / bucket index
273 *
274 * This hash chain list-traversal primitive may safely run concurrently with
275 * the _rcu mutation primitives such as rhashtable_insert() as long as the
276 * traversal is guarded by rcu_read_lock().
277 */
278 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
279 for (({barrier(); }), \
280 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
281 !rht_is_a_nulls(pos); \
282 pos = rcu_dereference_raw(pos->next))
283
284 /**
285 * rht_for_each_rcu - iterate over rcu hash chain
286 * @pos: the &struct rhash_head to use as a loop cursor.
287 * @tbl: the &struct bucket_table
288 * @hash: the hash value / bucket index
289 *
290 * This hash chain list-traversal primitive may safely run concurrently with
291 * the _rcu mutation primitives such as rhashtable_insert() as long as the
292 * traversal is guarded by rcu_read_lock().
293 */
294 #define rht_for_each_rcu(pos, tbl, hash) \
295 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
296
297 /**
298 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
299 * @tpos: the type * to use as a loop cursor.
300 * @pos: the &struct rhash_head to use as a loop cursor.
301 * @head: the previous &struct rhash_head to continue from
302 * @tbl: the &struct bucket_table
303 * @hash: the hash value / bucket index
304 * @member: name of the &struct rhash_head within the hashable struct.
305 *
306 * This hash chain list-traversal primitive may safely run concurrently with
307 * the _rcu mutation primitives such as rhashtable_insert() as long as the
308 * traversal is guarded by rcu_read_lock().
309 */
310 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
311 for (({barrier(); }), \
312 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
313 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
314 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
315
316 /**
317 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
318 * @tpos: the type * to use as a loop cursor.
319 * @pos: the &struct rhash_head to use as a loop cursor.
320 * @tbl: the &struct bucket_table
321 * @hash: the hash value / bucket index
322 * @member: name of the &struct rhash_head within the hashable struct.
323 *
324 * This hash chain list-traversal primitive may safely run concurrently with
325 * the _rcu mutation primitives such as rhashtable_insert() as long as the
326 * traversal is guarded by rcu_read_lock().
327 */
328 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
329 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
330 tbl, hash, member)
331
332 #endif /* _LINUX_RHASHTABLE_H */
This page took 0.051527 seconds and 5 git commands to generate.