Merge tag 'metag-for-v4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[deliverable/linux.git] / kernel / bpf / arraymap.c
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12 #include <linux/bpf.h>
13 #include <linux/err.h>
14 #include <linux/vmalloc.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/filter.h>
18
19 /* Called from syscall */
20 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
21 {
22 struct bpf_array *array;
23 u32 elem_size, array_size;
24
25 /* check sanity of attributes */
26 if (attr->max_entries == 0 || attr->key_size != 4 ||
27 attr->value_size == 0)
28 return ERR_PTR(-EINVAL);
29
30 elem_size = round_up(attr->value_size, 8);
31
32 /* check round_up into zero and u32 overflow */
33 if (elem_size == 0 ||
34 attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size)
35 return ERR_PTR(-ENOMEM);
36
37 array_size = sizeof(*array) + attr->max_entries * elem_size;
38
39 /* allocate all map elements and zero-initialize them */
40 array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
41 if (!array) {
42 array = vzalloc(array_size);
43 if (!array)
44 return ERR_PTR(-ENOMEM);
45 }
46
47 /* copy mandatory map attributes */
48 array->map.key_size = attr->key_size;
49 array->map.value_size = attr->value_size;
50 array->map.max_entries = attr->max_entries;
51
52 array->elem_size = elem_size;
53
54 return &array->map;
55 }
56
57 /* Called from syscall or from eBPF program */
58 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
59 {
60 struct bpf_array *array = container_of(map, struct bpf_array, map);
61 u32 index = *(u32 *)key;
62
63 if (index >= array->map.max_entries)
64 return NULL;
65
66 return array->value + array->elem_size * index;
67 }
68
69 /* Called from syscall */
70 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
71 {
72 struct bpf_array *array = container_of(map, struct bpf_array, map);
73 u32 index = *(u32 *)key;
74 u32 *next = (u32 *)next_key;
75
76 if (index >= array->map.max_entries) {
77 *next = 0;
78 return 0;
79 }
80
81 if (index == array->map.max_entries - 1)
82 return -ENOENT;
83
84 *next = index + 1;
85 return 0;
86 }
87
88 /* Called from syscall or from eBPF program */
89 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
90 u64 map_flags)
91 {
92 struct bpf_array *array = container_of(map, struct bpf_array, map);
93 u32 index = *(u32 *)key;
94
95 if (map_flags > BPF_EXIST)
96 /* unknown flags */
97 return -EINVAL;
98
99 if (index >= array->map.max_entries)
100 /* all elements were pre-allocated, cannot insert a new one */
101 return -E2BIG;
102
103 if (map_flags == BPF_NOEXIST)
104 /* all elements already exist */
105 return -EEXIST;
106
107 memcpy(array->value + array->elem_size * index, value, array->elem_size);
108 return 0;
109 }
110
111 /* Called from syscall or from eBPF program */
112 static int array_map_delete_elem(struct bpf_map *map, void *key)
113 {
114 return -EINVAL;
115 }
116
117 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
118 static void array_map_free(struct bpf_map *map)
119 {
120 struct bpf_array *array = container_of(map, struct bpf_array, map);
121
122 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
123 * so the programs (can be more than one that used this map) were
124 * disconnected from events. Wait for outstanding programs to complete
125 * and free the array
126 */
127 synchronize_rcu();
128
129 kvfree(array);
130 }
131
132 static const struct bpf_map_ops array_ops = {
133 .map_alloc = array_map_alloc,
134 .map_free = array_map_free,
135 .map_get_next_key = array_map_get_next_key,
136 .map_lookup_elem = array_map_lookup_elem,
137 .map_update_elem = array_map_update_elem,
138 .map_delete_elem = array_map_delete_elem,
139 };
140
141 static struct bpf_map_type_list array_type __read_mostly = {
142 .ops = &array_ops,
143 .type = BPF_MAP_TYPE_ARRAY,
144 };
145
146 static int __init register_array_map(void)
147 {
148 bpf_register_map_type(&array_type);
149 return 0;
150 }
151 late_initcall(register_array_map);
152
153 static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
154 {
155 /* only file descriptors can be stored in this type of map */
156 if (attr->value_size != sizeof(u32))
157 return ERR_PTR(-EINVAL);
158 return array_map_alloc(attr);
159 }
160
161 static void fd_array_map_free(struct bpf_map *map)
162 {
163 struct bpf_array *array = container_of(map, struct bpf_array, map);
164 int i;
165
166 synchronize_rcu();
167
168 /* make sure it's empty */
169 for (i = 0; i < array->map.max_entries; i++)
170 BUG_ON(array->ptrs[i] != NULL);
171 kvfree(array);
172 }
173
174 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
175 {
176 return NULL;
177 }
178
179 /* only called from syscall */
180 static int fd_array_map_update_elem(struct bpf_map *map, void *key,
181 void *value, u64 map_flags)
182 {
183 struct bpf_array *array = container_of(map, struct bpf_array, map);
184 void *new_ptr, *old_ptr;
185 u32 index = *(u32 *)key, ufd;
186
187 if (map_flags != BPF_ANY)
188 return -EINVAL;
189
190 if (index >= array->map.max_entries)
191 return -E2BIG;
192
193 ufd = *(u32 *)value;
194 new_ptr = map->ops->map_fd_get_ptr(map, ufd);
195 if (IS_ERR(new_ptr))
196 return PTR_ERR(new_ptr);
197
198 old_ptr = xchg(array->ptrs + index, new_ptr);
199 if (old_ptr)
200 map->ops->map_fd_put_ptr(old_ptr);
201
202 return 0;
203 }
204
205 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
206 {
207 struct bpf_array *array = container_of(map, struct bpf_array, map);
208 void *old_ptr;
209 u32 index = *(u32 *)key;
210
211 if (index >= array->map.max_entries)
212 return -E2BIG;
213
214 old_ptr = xchg(array->ptrs + index, NULL);
215 if (old_ptr) {
216 map->ops->map_fd_put_ptr(old_ptr);
217 return 0;
218 } else {
219 return -ENOENT;
220 }
221 }
222
223 static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd)
224 {
225 struct bpf_array *array = container_of(map, struct bpf_array, map);
226 struct bpf_prog *prog = bpf_prog_get(fd);
227 if (IS_ERR(prog))
228 return prog;
229
230 if (!bpf_prog_array_compatible(array, prog)) {
231 bpf_prog_put(prog);
232 return ERR_PTR(-EINVAL);
233 }
234 return prog;
235 }
236
237 static void prog_fd_array_put_ptr(void *ptr)
238 {
239 struct bpf_prog *prog = ptr;
240
241 bpf_prog_put_rcu(prog);
242 }
243
244 /* decrement refcnt of all bpf_progs that are stored in this map */
245 void bpf_fd_array_map_clear(struct bpf_map *map)
246 {
247 struct bpf_array *array = container_of(map, struct bpf_array, map);
248 int i;
249
250 for (i = 0; i < array->map.max_entries; i++)
251 fd_array_map_delete_elem(map, &i);
252 }
253
254 static const struct bpf_map_ops prog_array_ops = {
255 .map_alloc = fd_array_map_alloc,
256 .map_free = fd_array_map_free,
257 .map_get_next_key = array_map_get_next_key,
258 .map_lookup_elem = fd_array_map_lookup_elem,
259 .map_update_elem = fd_array_map_update_elem,
260 .map_delete_elem = fd_array_map_delete_elem,
261 .map_fd_get_ptr = prog_fd_array_get_ptr,
262 .map_fd_put_ptr = prog_fd_array_put_ptr,
263 };
264
265 static struct bpf_map_type_list prog_array_type __read_mostly = {
266 .ops = &prog_array_ops,
267 .type = BPF_MAP_TYPE_PROG_ARRAY,
268 };
269
270 static int __init register_prog_array_map(void)
271 {
272 bpf_register_map_type(&prog_array_type);
273 return 0;
274 }
275 late_initcall(register_prog_array_map);
276
277 static void perf_event_array_map_free(struct bpf_map *map)
278 {
279 bpf_fd_array_map_clear(map);
280 fd_array_map_free(map);
281 }
282
283 static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
284 {
285 struct perf_event *event;
286 const struct perf_event_attr *attr;
287
288 event = perf_event_get(fd);
289 if (IS_ERR(event))
290 return event;
291
292 attr = perf_event_attrs(event);
293 if (IS_ERR(attr))
294 return (void *)attr;
295
296 if (attr->type != PERF_TYPE_RAW &&
297 attr->type != PERF_TYPE_HARDWARE) {
298 perf_event_release_kernel(event);
299 return ERR_PTR(-EINVAL);
300 }
301 return event;
302 }
303
304 static void perf_event_fd_array_put_ptr(void *ptr)
305 {
306 struct perf_event *event = ptr;
307
308 perf_event_release_kernel(event);
309 }
310
311 static const struct bpf_map_ops perf_event_array_ops = {
312 .map_alloc = fd_array_map_alloc,
313 .map_free = perf_event_array_map_free,
314 .map_get_next_key = array_map_get_next_key,
315 .map_lookup_elem = fd_array_map_lookup_elem,
316 .map_update_elem = fd_array_map_update_elem,
317 .map_delete_elem = fd_array_map_delete_elem,
318 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
319 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
320 };
321
322 static struct bpf_map_type_list perf_event_array_type __read_mostly = {
323 .ops = &perf_event_array_ops,
324 .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
325 };
326
327 static int __init register_perf_event_array_map(void)
328 {
329 bpf_register_map_type(&perf_event_array_type);
330 return 0;
331 }
332 late_initcall(register_perf_event_array_map);
This page took 0.064488 seconds and 6 git commands to generate.