6 #include <arch_atomic.h>
9 #include <urcu-defer.h>
16 struct rcu_ht_node
*next
;
22 struct rcu_ht_node
*tbl
[HASH_SIZE
];
24 void (*free_fct
)(void *data
); /* fct to free data */
27 struct rcu_ht
*ht_new(ht_hash_fct hash_fct
, void (*free_fct
)(void *data
))
31 ht
= calloc(1, sizeof(struct rcu_ht
));
32 ht
->hash_fct
= hash_fct
;
33 ht
->free_fct
= free_fct
;
37 /* delete all elements */
38 void ht_delete_all(struct rcu_ht
*ht
)
41 struct rcu_ht_node
**prev
, *node
;
43 for (i
= 0; i
< HASH_SIZE
; i
++) {
47 node
= rcu_dereference(*prev
);
49 * Cut the head, therefore whole bucket will be unused
50 * after GP. (except for concurrent additions)
52 rcu_assign_pointer(prev
, NULL
);
59 * FIXME: calling call_rcu within a read-side C.S. is a
60 * bug, because it may call synchronize_rcu() if the
61 * callback queue is full.
64 call_rcu(ht
->free_fct
, node
->data
);
66 node
= rcu_dereference(*prev
);
73 * Should only be called when no more concurrent readers nor writers can
74 * possibly access the table.
76 void ht_destroy(struct rcu_ht
*ht
)
82 void *ht_lookup(struct rcu_ht
*ht
, void *key
)
85 struct rcu_ht_node
*node
;
88 hash
= ht
->hash_fct(key
);
91 node
= rcu_dereference(ht
->tbl
[hash
]);
97 if (node
->key
== key
) {
101 node
= rcu_dereference(node
->next
);
109 * Will re-try until either:
110 * - The key is already there (-EEXIST)
111 * - We successfully add the key at the head of a table bucket.
113 int ht_add(struct rcu_ht
*ht
, void *key
, void *data
)
115 struct rcu_ht_node
*node
, *old_head
, *new_head
;
119 new_head
= calloc(1, sizeof(struct rcu_ht_node
));
121 new_head
->data
= data
;
122 hash
= ht
->hash_fct(key
);
124 /* here comes the fun and tricky part.
125 * Add at the beginning with a cmpxchg.
126 * Hold a read lock between the moment the first element is read
127 * and the nodes traversal (to find duplicates). This ensures
128 * the head pointer has not been reclaimed when cmpxchg is done.
129 * Always adding at the head ensures that we would have to
130 * re-try if a new item has been added concurrently. So we ensure that
131 * we never add duplicates. */
135 old_head
= node
= rcu_dereference(ht
->tbl
[hash
]);
140 if (node
->key
== key
) {
144 node
= rcu_dereference(node
->next
);
146 if (rcu_cmpxchg_pointer(&ht
->tbl
[hash
], old_head
, new_head
) != old_head
)
153 /* restart loop, release and re-take the read lock to be kind to GP */
160 * Restart until we successfully remove the entry, or no entry is left
161 * ((void *)(unsigned long)-ENOENT).
163 void *ht_steal(struct rcu_ht
*ht
, void *key
)
165 struct rcu_ht_node
**prev
, *node
;
169 hash
= ht
->hash_fct(key
);
174 prev
= &ht
->tbl
[hash
];
175 node
= rcu_dereference(*prev
);
178 data
= (void *)(unsigned long)-ENOENT
;
181 if (node
->key
== key
) {
185 node
= rcu_dereference(*prev
);
187 /* Found it ! pointer to object is in "prev" */
188 if (rcu_cmpxchg_pointer(prev
, node
, node
->next
) != node
)
194 call_rcu(free
, node
);
198 /* restart loop, release and re-take the read lock to be kind to GP */
204 int ht_delete(struct rcu_ht
*ht
, void *key
)
208 data
= ht_steal(ht
, key
);
210 if (ht
->free_fct
&& data
)
211 call_rcu(ht
->free_fct
, data
);
218 unsigned long stupid_hash(void *key
)
220 return (unsigned long)key
% HASH_SIZE
;
This page took 0.052515 seconds and 5 git commands to generate.