ipset: Stop using NLA_PUT*().
[deliverable/linux.git] / net / netfilter / ipset / ip_set_bitmap_ip.c
1 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
2 * Patrick Schaaf <bof@bof.de>
3 * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 /* Kernel module implementing an IP set type: the bitmap:ip type */
11
12 #include <linux/module.h>
13 #include <linux/ip.h>
14 #include <linux/skbuff.h>
15 #include <linux/errno.h>
16 #include <linux/bitops.h>
17 #include <linux/spinlock.h>
18 #include <linux/netlink.h>
19 #include <linux/jiffies.h>
20 #include <linux/timer.h>
21 #include <net/netlink.h>
22 #include <net/tcp.h>
23
24 #include <linux/netfilter/ipset/pfxlen.h>
25 #include <linux/netfilter/ipset/ip_set.h>
26 #include <linux/netfilter/ipset/ip_set_bitmap.h>
27 #define IP_SET_BITMAP_TIMEOUT
28 #include <linux/netfilter/ipset/ip_set_timeout.h>
29
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
32 MODULE_DESCRIPTION("bitmap:ip type of IP sets");
33 MODULE_ALIAS("ip_set_bitmap:ip");
34
35 /* Type structure */
36 struct bitmap_ip {
37 void *members; /* the set members */
38 u32 first_ip; /* host byte order, included in range */
39 u32 last_ip; /* host byte order, included in range */
40 u32 elements; /* number of max elements in the set */
41 u32 hosts; /* number of hosts in a subnet */
42 size_t memsize; /* members size */
43 u8 netmask; /* subnet netmask */
44 u32 timeout; /* timeout parameter */
45 struct timer_list gc; /* garbage collection */
46 };
47
48 /* Base variant */
49
50 static inline u32
51 ip_to_id(const struct bitmap_ip *m, u32 ip)
52 {
53 return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
54 }
55
56 static int
57 bitmap_ip_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
58 {
59 const struct bitmap_ip *map = set->data;
60 u16 id = *(u16 *)value;
61
62 return !!test_bit(id, map->members);
63 }
64
65 static int
66 bitmap_ip_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
67 {
68 struct bitmap_ip *map = set->data;
69 u16 id = *(u16 *)value;
70
71 if (test_and_set_bit(id, map->members))
72 return -IPSET_ERR_EXIST;
73
74 return 0;
75 }
76
77 static int
78 bitmap_ip_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
79 {
80 struct bitmap_ip *map = set->data;
81 u16 id = *(u16 *)value;
82
83 if (!test_and_clear_bit(id, map->members))
84 return -IPSET_ERR_EXIST;
85
86 return 0;
87 }
88
89 static int
90 bitmap_ip_list(const struct ip_set *set,
91 struct sk_buff *skb, struct netlink_callback *cb)
92 {
93 const struct bitmap_ip *map = set->data;
94 struct nlattr *atd, *nested;
95 u32 id, first = cb->args[2];
96
97 atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
98 if (!atd)
99 return -EMSGSIZE;
100 for (; cb->args[2] < map->elements; cb->args[2]++) {
101 id = cb->args[2];
102 if (!test_bit(id, map->members))
103 continue;
104 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
105 if (!nested) {
106 if (id == first) {
107 nla_nest_cancel(skb, atd);
108 return -EMSGSIZE;
109 } else
110 goto nla_put_failure;
111 }
112 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
113 htonl(map->first_ip + id * map->hosts)))
114 goto nla_put_failure;
115 ipset_nest_end(skb, nested);
116 }
117 ipset_nest_end(skb, atd);
118 /* Set listing finished */
119 cb->args[2] = 0;
120 return 0;
121
122 nla_put_failure:
123 nla_nest_cancel(skb, nested);
124 ipset_nest_end(skb, atd);
125 if (unlikely(id == first)) {
126 cb->args[2] = 0;
127 return -EMSGSIZE;
128 }
129 return 0;
130 }
131
132 /* Timeout variant */
133
134 static int
135 bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
136 {
137 const struct bitmap_ip *map = set->data;
138 const unsigned long *members = map->members;
139 u16 id = *(u16 *)value;
140
141 return ip_set_timeout_test(members[id]);
142 }
143
144 static int
145 bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
146 {
147 struct bitmap_ip *map = set->data;
148 unsigned long *members = map->members;
149 u16 id = *(u16 *)value;
150
151 if (ip_set_timeout_test(members[id]) && !(flags & IPSET_FLAG_EXIST))
152 return -IPSET_ERR_EXIST;
153
154 members[id] = ip_set_timeout_set(timeout);
155
156 return 0;
157 }
158
159 static int
160 bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
161 {
162 struct bitmap_ip *map = set->data;
163 unsigned long *members = map->members;
164 u16 id = *(u16 *)value;
165 int ret = -IPSET_ERR_EXIST;
166
167 if (ip_set_timeout_test(members[id]))
168 ret = 0;
169
170 members[id] = IPSET_ELEM_UNSET;
171 return ret;
172 }
173
174 static int
175 bitmap_ip_tlist(const struct ip_set *set,
176 struct sk_buff *skb, struct netlink_callback *cb)
177 {
178 const struct bitmap_ip *map = set->data;
179 struct nlattr *adt, *nested;
180 u32 id, first = cb->args[2];
181 const unsigned long *members = map->members;
182
183 adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
184 if (!adt)
185 return -EMSGSIZE;
186 for (; cb->args[2] < map->elements; cb->args[2]++) {
187 id = cb->args[2];
188 if (!ip_set_timeout_test(members[id]))
189 continue;
190 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
191 if (!nested) {
192 if (id == first) {
193 nla_nest_cancel(skb, adt);
194 return -EMSGSIZE;
195 } else
196 goto nla_put_failure;
197 }
198 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
199 htonl(map->first_ip + id * map->hosts)) ||
200 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
201 htonl(ip_set_timeout_get(members[id]))))
202 goto nla_put_failure;
203 ipset_nest_end(skb, nested);
204 }
205 ipset_nest_end(skb, adt);
206
207 /* Set listing finished */
208 cb->args[2] = 0;
209
210 return 0;
211
212 nla_put_failure:
213 nla_nest_cancel(skb, nested);
214 ipset_nest_end(skb, adt);
215 if (unlikely(id == first)) {
216 cb->args[2] = 0;
217 return -EMSGSIZE;
218 }
219 return 0;
220 }
221
222 static int
223 bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
224 const struct xt_action_param *par,
225 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
226 {
227 struct bitmap_ip *map = set->data;
228 ipset_adtfn adtfn = set->variant->adt[adt];
229 u32 ip;
230
231 ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
232 if (ip < map->first_ip || ip > map->last_ip)
233 return -IPSET_ERR_BITMAP_RANGE;
234
235 ip = ip_to_id(map, ip);
236
237 return adtfn(set, &ip, opt_timeout(opt, map), opt->cmdflags);
238 }
239
240 static int
241 bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
242 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
243 {
244 struct bitmap_ip *map = set->data;
245 ipset_adtfn adtfn = set->variant->adt[adt];
246 u32 timeout = map->timeout;
247 u32 ip, ip_to, id;
248 int ret = 0;
249
250 if (unlikely(!tb[IPSET_ATTR_IP] ||
251 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
252 return -IPSET_ERR_PROTOCOL;
253
254 if (tb[IPSET_ATTR_LINENO])
255 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
256
257 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
258 if (ret)
259 return ret;
260
261 if (ip < map->first_ip || ip > map->last_ip)
262 return -IPSET_ERR_BITMAP_RANGE;
263
264 if (tb[IPSET_ATTR_TIMEOUT]) {
265 if (!with_timeout(map->timeout))
266 return -IPSET_ERR_TIMEOUT;
267 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
268 }
269
270 if (adt == IPSET_TEST) {
271 id = ip_to_id(map, ip);
272 return adtfn(set, &id, timeout, flags);
273 }
274
275 if (tb[IPSET_ATTR_IP_TO]) {
276 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
277 if (ret)
278 return ret;
279 if (ip > ip_to) {
280 swap(ip, ip_to);
281 if (ip < map->first_ip)
282 return -IPSET_ERR_BITMAP_RANGE;
283 }
284 } else if (tb[IPSET_ATTR_CIDR]) {
285 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
286
287 if (cidr > 32)
288 return -IPSET_ERR_INVALID_CIDR;
289 ip_set_mask_from_to(ip, ip_to, cidr);
290 } else
291 ip_to = ip;
292
293 if (ip_to > map->last_ip)
294 return -IPSET_ERR_BITMAP_RANGE;
295
296 for (; !before(ip_to, ip); ip += map->hosts) {
297 id = ip_to_id(map, ip);
298 ret = adtfn(set, &id, timeout, flags);
299
300 if (ret && !ip_set_eexist(ret, flags))
301 return ret;
302 else
303 ret = 0;
304 }
305 return ret;
306 }
307
308 static void
309 bitmap_ip_destroy(struct ip_set *set)
310 {
311 struct bitmap_ip *map = set->data;
312
313 if (with_timeout(map->timeout))
314 del_timer_sync(&map->gc);
315
316 ip_set_free(map->members);
317 kfree(map);
318
319 set->data = NULL;
320 }
321
322 static void
323 bitmap_ip_flush(struct ip_set *set)
324 {
325 struct bitmap_ip *map = set->data;
326
327 memset(map->members, 0, map->memsize);
328 }
329
330 static int
331 bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
332 {
333 const struct bitmap_ip *map = set->data;
334 struct nlattr *nested;
335
336 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
337 if (!nested)
338 goto nla_put_failure;
339 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
340 nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
341 (map->netmask != 32 &&
342 nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) ||
343 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
344 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
345 htonl(sizeof(*map) + map->memsize)) ||
346 (with_timeout(map->timeout) &&
347 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
348 goto nla_put_failure;
349 ipset_nest_end(skb, nested);
350
351 return 0;
352 nla_put_failure:
353 return -EMSGSIZE;
354 }
355
356 static bool
357 bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
358 {
359 const struct bitmap_ip *x = a->data;
360 const struct bitmap_ip *y = b->data;
361
362 return x->first_ip == y->first_ip &&
363 x->last_ip == y->last_ip &&
364 x->netmask == y->netmask &&
365 x->timeout == y->timeout;
366 }
367
368 static const struct ip_set_type_variant bitmap_ip = {
369 .kadt = bitmap_ip_kadt,
370 .uadt = bitmap_ip_uadt,
371 .adt = {
372 [IPSET_ADD] = bitmap_ip_add,
373 [IPSET_DEL] = bitmap_ip_del,
374 [IPSET_TEST] = bitmap_ip_test,
375 },
376 .destroy = bitmap_ip_destroy,
377 .flush = bitmap_ip_flush,
378 .head = bitmap_ip_head,
379 .list = bitmap_ip_list,
380 .same_set = bitmap_ip_same_set,
381 };
382
383 static const struct ip_set_type_variant bitmap_tip = {
384 .kadt = bitmap_ip_kadt,
385 .uadt = bitmap_ip_uadt,
386 .adt = {
387 [IPSET_ADD] = bitmap_ip_tadd,
388 [IPSET_DEL] = bitmap_ip_tdel,
389 [IPSET_TEST] = bitmap_ip_ttest,
390 },
391 .destroy = bitmap_ip_destroy,
392 .flush = bitmap_ip_flush,
393 .head = bitmap_ip_head,
394 .list = bitmap_ip_tlist,
395 .same_set = bitmap_ip_same_set,
396 };
397
398 static void
399 bitmap_ip_gc(unsigned long ul_set)
400 {
401 struct ip_set *set = (struct ip_set *) ul_set;
402 struct bitmap_ip *map = set->data;
403 unsigned long *table = map->members;
404 u32 id;
405
406 /* We run parallel with other readers (test element)
407 * but adding/deleting new entries is locked out */
408 read_lock_bh(&set->lock);
409 for (id = 0; id < map->elements; id++)
410 if (ip_set_timeout_expired(table[id]))
411 table[id] = IPSET_ELEM_UNSET;
412 read_unlock_bh(&set->lock);
413
414 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
415 add_timer(&map->gc);
416 }
417
418 static void
419 bitmap_ip_gc_init(struct ip_set *set)
420 {
421 struct bitmap_ip *map = set->data;
422
423 init_timer(&map->gc);
424 map->gc.data = (unsigned long) set;
425 map->gc.function = bitmap_ip_gc;
426 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
427 add_timer(&map->gc);
428 }
429
430 /* Create bitmap:ip type of sets */
431
432 static bool
433 init_map_ip(struct ip_set *set, struct bitmap_ip *map,
434 u32 first_ip, u32 last_ip,
435 u32 elements, u32 hosts, u8 netmask)
436 {
437 map->members = ip_set_alloc(map->memsize);
438 if (!map->members)
439 return false;
440 map->first_ip = first_ip;
441 map->last_ip = last_ip;
442 map->elements = elements;
443 map->hosts = hosts;
444 map->netmask = netmask;
445 map->timeout = IPSET_NO_TIMEOUT;
446
447 set->data = map;
448 set->family = NFPROTO_IPV4;
449
450 return true;
451 }
452
453 static int
454 bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
455 {
456 struct bitmap_ip *map;
457 u32 first_ip, last_ip, hosts, elements;
458 u8 netmask = 32;
459 int ret;
460
461 if (unlikely(!tb[IPSET_ATTR_IP] ||
462 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
463 return -IPSET_ERR_PROTOCOL;
464
465 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
466 if (ret)
467 return ret;
468
469 if (tb[IPSET_ATTR_IP_TO]) {
470 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
471 if (ret)
472 return ret;
473 if (first_ip > last_ip) {
474 u32 tmp = first_ip;
475
476 first_ip = last_ip;
477 last_ip = tmp;
478 }
479 } else if (tb[IPSET_ATTR_CIDR]) {
480 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
481
482 if (cidr >= 32)
483 return -IPSET_ERR_INVALID_CIDR;
484 ip_set_mask_from_to(first_ip, last_ip, cidr);
485 } else
486 return -IPSET_ERR_PROTOCOL;
487
488 if (tb[IPSET_ATTR_NETMASK]) {
489 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
490
491 if (netmask > 32)
492 return -IPSET_ERR_INVALID_NETMASK;
493
494 first_ip &= ip_set_hostmask(netmask);
495 last_ip |= ~ip_set_hostmask(netmask);
496 }
497
498 if (netmask == 32) {
499 hosts = 1;
500 elements = last_ip - first_ip + 1;
501 } else {
502 u8 mask_bits;
503 u32 mask;
504
505 mask = range_to_mask(first_ip, last_ip, &mask_bits);
506
507 if ((!mask && (first_ip || last_ip != 0xFFFFFFFF)) ||
508 netmask <= mask_bits)
509 return -IPSET_ERR_BITMAP_RANGE;
510
511 pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
512 hosts = 2 << (32 - netmask - 1);
513 elements = 2 << (netmask - mask_bits - 1);
514 }
515 if (elements > IPSET_BITMAP_MAX_RANGE + 1)
516 return -IPSET_ERR_BITMAP_RANGE_SIZE;
517
518 pr_debug("hosts %u, elements %u\n", hosts, elements);
519
520 map = kzalloc(sizeof(*map), GFP_KERNEL);
521 if (!map)
522 return -ENOMEM;
523
524 if (tb[IPSET_ATTR_TIMEOUT]) {
525 map->memsize = elements * sizeof(unsigned long);
526
527 if (!init_map_ip(set, map, first_ip, last_ip,
528 elements, hosts, netmask)) {
529 kfree(map);
530 return -ENOMEM;
531 }
532
533 map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
534 set->variant = &bitmap_tip;
535
536 bitmap_ip_gc_init(set);
537 } else {
538 map->memsize = bitmap_bytes(0, elements - 1);
539
540 if (!init_map_ip(set, map, first_ip, last_ip,
541 elements, hosts, netmask)) {
542 kfree(map);
543 return -ENOMEM;
544 }
545
546 set->variant = &bitmap_ip;
547 }
548 return 0;
549 }
550
551 static struct ip_set_type bitmap_ip_type __read_mostly = {
552 .name = "bitmap:ip",
553 .protocol = IPSET_PROTOCOL,
554 .features = IPSET_TYPE_IP,
555 .dimension = IPSET_DIM_ONE,
556 .family = NFPROTO_IPV4,
557 .revision_min = 0,
558 .revision_max = 0,
559 .create = bitmap_ip_create,
560 .create_policy = {
561 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
562 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
563 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
564 [IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
565 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
566 },
567 .adt_policy = {
568 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
569 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
570 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
571 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
572 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
573 },
574 .me = THIS_MODULE,
575 };
576
577 static int __init
578 bitmap_ip_init(void)
579 {
580 return ip_set_type_register(&bitmap_ip_type);
581 }
582
583 static void __exit
584 bitmap_ip_fini(void)
585 {
586 ip_set_type_unregister(&bitmap_ip_type);
587 }
588
589 module_init(bitmap_ip_init);
590 module_exit(bitmap_ip_fini);
This page took 0.066788 seconds and 5 git commands to generate.