[NET_SCHED]: act_api: use PTR_ERR in tcf_action_init/tcf_action_get
[deliverable/linux.git] / net / sched / act_api.c
1 /*
2 * net/sched/act_api.c Packet action API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Author: Jamal Hadi Salim
10 *
11 *
12 */
13
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/err.h>
22 #include <net/net_namespace.h>
23 #include <net/sock.h>
24 #include <net/sch_generic.h>
25 #include <net/act_api.h>
26 #include <net/netlink.h>
27
28 void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
29 {
30 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
31 struct tcf_common **p1p;
32
33 for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
34 if (*p1p == p) {
35 write_lock_bh(hinfo->lock);
36 *p1p = p->tcfc_next;
37 write_unlock_bh(hinfo->lock);
38 gen_kill_estimator(&p->tcfc_bstats,
39 &p->tcfc_rate_est);
40 kfree(p);
41 return;
42 }
43 }
44 BUG_TRAP(0);
45 }
46 EXPORT_SYMBOL(tcf_hash_destroy);
47
48 int tcf_hash_release(struct tcf_common *p, int bind,
49 struct tcf_hashinfo *hinfo)
50 {
51 int ret = 0;
52
53 if (p) {
54 if (bind)
55 p->tcfc_bindcnt--;
56
57 p->tcfc_refcnt--;
58 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
59 tcf_hash_destroy(p, hinfo);
60 ret = 1;
61 }
62 }
63 return ret;
64 }
65 EXPORT_SYMBOL(tcf_hash_release);
66
67 static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
68 struct tc_action *a, struct tcf_hashinfo *hinfo)
69 {
70 struct tcf_common *p;
71 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
72 struct nlattr *r ;
73
74 read_lock_bh(hinfo->lock);
75
76 s_i = cb->args[0];
77
78 for (i = 0; i < (hinfo->hmask + 1); i++) {
79 p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
80
81 for (; p; p = p->tcfc_next) {
82 index++;
83 if (index < s_i)
84 continue;
85 a->priv = p;
86 a->order = n_i;
87 r = (struct nlattr *)skb_tail_pointer(skb);
88 NLA_PUT(skb, a->order, 0, NULL);
89 err = tcf_action_dump_1(skb, a, 0, 0);
90 if (err < 0) {
91 index--;
92 nlmsg_trim(skb, r);
93 goto done;
94 }
95 r->nla_len = skb_tail_pointer(skb) - (u8 *)r;
96 n_i++;
97 if (n_i >= TCA_ACT_MAX_PRIO)
98 goto done;
99 }
100 }
101 done:
102 read_unlock_bh(hinfo->lock);
103 if (n_i)
104 cb->args[0] += n_i;
105 return n_i;
106
107 nla_put_failure:
108 nlmsg_trim(skb, r);
109 goto done;
110 }
111
112 static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
113 struct tcf_hashinfo *hinfo)
114 {
115 struct tcf_common *p, *s_p;
116 struct nlattr *r ;
117 int i= 0, n_i = 0;
118
119 r = (struct nlattr *)skb_tail_pointer(skb);
120 NLA_PUT(skb, a->order, 0, NULL);
121 NLA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
122 for (i = 0; i < (hinfo->hmask + 1); i++) {
123 p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
124
125 while (p != NULL) {
126 s_p = p->tcfc_next;
127 if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
128 module_put(a->ops->owner);
129 n_i++;
130 p = s_p;
131 }
132 }
133 NLA_PUT(skb, TCA_FCNT, 4, &n_i);
134 r->nla_len = skb_tail_pointer(skb) - (u8 *)r;
135
136 return n_i;
137 nla_put_failure:
138 nlmsg_trim(skb, r);
139 return -EINVAL;
140 }
141
142 int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
143 int type, struct tc_action *a)
144 {
145 struct tcf_hashinfo *hinfo = a->ops->hinfo;
146
147 if (type == RTM_DELACTION) {
148 return tcf_del_walker(skb, a, hinfo);
149 } else if (type == RTM_GETACTION) {
150 return tcf_dump_walker(skb, cb, a, hinfo);
151 } else {
152 printk("tcf_generic_walker: unknown action %d\n", type);
153 return -EINVAL;
154 }
155 }
156 EXPORT_SYMBOL(tcf_generic_walker);
157
158 struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
159 {
160 struct tcf_common *p;
161
162 read_lock_bh(hinfo->lock);
163 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
164 p = p->tcfc_next) {
165 if (p->tcfc_index == index)
166 break;
167 }
168 read_unlock_bh(hinfo->lock);
169
170 return p;
171 }
172 EXPORT_SYMBOL(tcf_hash_lookup);
173
174 u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo)
175 {
176 u32 val = *idx_gen;
177
178 do {
179 if (++val == 0)
180 val = 1;
181 } while (tcf_hash_lookup(val, hinfo));
182
183 return (*idx_gen = val);
184 }
185 EXPORT_SYMBOL(tcf_hash_new_index);
186
187 int tcf_hash_search(struct tc_action *a, u32 index)
188 {
189 struct tcf_hashinfo *hinfo = a->ops->hinfo;
190 struct tcf_common *p = tcf_hash_lookup(index, hinfo);
191
192 if (p) {
193 a->priv = p;
194 return 1;
195 }
196 return 0;
197 }
198 EXPORT_SYMBOL(tcf_hash_search);
199
200 struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
201 struct tcf_hashinfo *hinfo)
202 {
203 struct tcf_common *p = NULL;
204 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
205 if (bind) {
206 p->tcfc_bindcnt++;
207 p->tcfc_refcnt++;
208 }
209 a->priv = p;
210 }
211 return p;
212 }
213 EXPORT_SYMBOL(tcf_hash_check);
214
215 struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo)
216 {
217 struct tcf_common *p = kzalloc(size, GFP_KERNEL);
218
219 if (unlikely(!p))
220 return p;
221 p->tcfc_refcnt = 1;
222 if (bind)
223 p->tcfc_bindcnt = 1;
224
225 spin_lock_init(&p->tcfc_lock);
226 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
227 p->tcfc_tm.install = jiffies;
228 p->tcfc_tm.lastuse = jiffies;
229 if (est)
230 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
231 &p->tcfc_lock, est);
232 a->priv = (void *) p;
233 return p;
234 }
235 EXPORT_SYMBOL(tcf_hash_create);
236
237 void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo)
238 {
239 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
240
241 write_lock_bh(hinfo->lock);
242 p->tcfc_next = hinfo->htab[h];
243 hinfo->htab[h] = p;
244 write_unlock_bh(hinfo->lock);
245 }
246 EXPORT_SYMBOL(tcf_hash_insert);
247
248 static struct tc_action_ops *act_base = NULL;
249 static DEFINE_RWLOCK(act_mod_lock);
250
251 int tcf_register_action(struct tc_action_ops *act)
252 {
253 struct tc_action_ops *a, **ap;
254
255 write_lock(&act_mod_lock);
256 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
257 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
258 write_unlock(&act_mod_lock);
259 return -EEXIST;
260 }
261 }
262 act->next = NULL;
263 *ap = act;
264 write_unlock(&act_mod_lock);
265 return 0;
266 }
267 EXPORT_SYMBOL(tcf_register_action);
268
269 int tcf_unregister_action(struct tc_action_ops *act)
270 {
271 struct tc_action_ops *a, **ap;
272 int err = -ENOENT;
273
274 write_lock(&act_mod_lock);
275 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next)
276 if (a == act)
277 break;
278 if (a) {
279 *ap = a->next;
280 a->next = NULL;
281 err = 0;
282 }
283 write_unlock(&act_mod_lock);
284 return err;
285 }
286 EXPORT_SYMBOL(tcf_unregister_action);
287
288 /* lookup by name */
289 static struct tc_action_ops *tc_lookup_action_n(char *kind)
290 {
291 struct tc_action_ops *a = NULL;
292
293 if (kind) {
294 read_lock(&act_mod_lock);
295 for (a = act_base; a; a = a->next) {
296 if (strcmp(kind, a->kind) == 0) {
297 if (!try_module_get(a->owner)) {
298 read_unlock(&act_mod_lock);
299 return NULL;
300 }
301 break;
302 }
303 }
304 read_unlock(&act_mod_lock);
305 }
306 return a;
307 }
308
309 /* lookup by nlattr */
310 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
311 {
312 struct tc_action_ops *a = NULL;
313
314 if (kind) {
315 read_lock(&act_mod_lock);
316 for (a = act_base; a; a = a->next) {
317 if (nla_strcmp(kind, a->kind) == 0) {
318 if (!try_module_get(a->owner)) {
319 read_unlock(&act_mod_lock);
320 return NULL;
321 }
322 break;
323 }
324 }
325 read_unlock(&act_mod_lock);
326 }
327 return a;
328 }
329
330 #if 0
331 /* lookup by id */
332 static struct tc_action_ops *tc_lookup_action_id(u32 type)
333 {
334 struct tc_action_ops *a = NULL;
335
336 if (type) {
337 read_lock(&act_mod_lock);
338 for (a = act_base; a; a = a->next) {
339 if (a->type == type) {
340 if (!try_module_get(a->owner)) {
341 read_unlock(&act_mod_lock);
342 return NULL;
343 }
344 break;
345 }
346 }
347 read_unlock(&act_mod_lock);
348 }
349 return a;
350 }
351 #endif
352
353 int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
354 struct tcf_result *res)
355 {
356 struct tc_action *a;
357 int ret = -1;
358
359 if (skb->tc_verd & TC_NCLS) {
360 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
361 ret = TC_ACT_OK;
362 goto exec_done;
363 }
364 while ((a = act) != NULL) {
365 repeat:
366 if (a->ops && a->ops->act) {
367 ret = a->ops->act(skb, a, res);
368 if (TC_MUNGED & skb->tc_verd) {
369 /* copied already, allow trampling */
370 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
371 skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
372 }
373 if (ret == TC_ACT_REPEAT)
374 goto repeat; /* we need a ttl - JHS */
375 if (ret != TC_ACT_PIPE)
376 goto exec_done;
377 }
378 act = a->next;
379 }
380 exec_done:
381 return ret;
382 }
383 EXPORT_SYMBOL(tcf_action_exec);
384
385 void tcf_action_destroy(struct tc_action *act, int bind)
386 {
387 struct tc_action *a;
388
389 for (a = act; a; a = act) {
390 if (a->ops && a->ops->cleanup) {
391 if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
392 module_put(a->ops->owner);
393 act = act->next;
394 kfree(a);
395 } else { /*FIXME: Remove later - catch insertion bugs*/
396 printk("tcf_action_destroy: BUG? destroying NULL ops\n");
397 act = act->next;
398 kfree(a);
399 }
400 }
401 }
402
403 int
404 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
405 {
406 int err = -EINVAL;
407
408 if (a->ops == NULL || a->ops->dump == NULL)
409 return err;
410 return a->ops->dump(skb, a, bind, ref);
411 }
412
413 int
414 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
415 {
416 int err = -EINVAL;
417 unsigned char *b = skb_tail_pointer(skb);
418 struct nlattr *r;
419
420 if (a->ops == NULL || a->ops->dump == NULL)
421 return err;
422
423 NLA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
424 if (tcf_action_copy_stats(skb, a, 0))
425 goto nla_put_failure;
426 r = (struct nlattr *)skb_tail_pointer(skb);
427 NLA_PUT(skb, TCA_OPTIONS, 0, NULL);
428 if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
429 r->nla_len = skb_tail_pointer(skb) - (u8 *)r;
430 return err;
431 }
432
433 nla_put_failure:
434 nlmsg_trim(skb, b);
435 return -1;
436 }
437 EXPORT_SYMBOL(tcf_action_dump_1);
438
439 int
440 tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref)
441 {
442 struct tc_action *a;
443 int err = -EINVAL;
444 unsigned char *b = skb_tail_pointer(skb);
445 struct nlattr *r ;
446
447 while ((a = act) != NULL) {
448 r = (struct nlattr *)skb_tail_pointer(skb);
449 act = a->next;
450 NLA_PUT(skb, a->order, 0, NULL);
451 err = tcf_action_dump_1(skb, a, bind, ref);
452 if (err < 0)
453 goto errout;
454 r->nla_len = skb_tail_pointer(skb) - (u8 *)r;
455 }
456
457 return 0;
458
459 nla_put_failure:
460 err = -EINVAL;
461 errout:
462 nlmsg_trim(skb, b);
463 return err;
464 }
465
466 struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
467 char *name, int ovr, int bind)
468 {
469 struct tc_action *a;
470 struct tc_action_ops *a_o;
471 char act_name[IFNAMSIZ];
472 struct nlattr *tb[TCA_ACT_MAX+1];
473 struct nlattr *kind;
474 int err;
475
476 err = -EINVAL;
477
478 if (name == NULL) {
479 if (nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL) < 0)
480 goto err_out;
481 kind = tb[TCA_ACT_KIND];
482 if (kind == NULL)
483 goto err_out;
484 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
485 goto err_out;
486 } else {
487 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
488 goto err_out;
489 }
490
491 a_o = tc_lookup_action_n(act_name);
492 if (a_o == NULL) {
493 #ifdef CONFIG_KMOD
494 rtnl_unlock();
495 request_module("act_%s", act_name);
496 rtnl_lock();
497
498 a_o = tc_lookup_action_n(act_name);
499
500 /* We dropped the RTNL semaphore in order to
501 * perform the module load. So, even if we
502 * succeeded in loading the module we have to
503 * tell the caller to replay the request. We
504 * indicate this using -EAGAIN.
505 */
506 if (a_o != NULL) {
507 err = -EAGAIN;
508 goto err_mod;
509 }
510 #endif
511 err = -ENOENT;
512 goto err_out;
513 }
514
515 err = -ENOMEM;
516 a = kzalloc(sizeof(*a), GFP_KERNEL);
517 if (a == NULL)
518 goto err_mod;
519
520 /* backward compatibility for policer */
521 if (name == NULL)
522 err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
523 else
524 err = a_o->init(nla, est, a, ovr, bind);
525 if (err < 0)
526 goto err_free;
527
528 /* module count goes up only when brand new policy is created
529 if it exists and is only bound to in a_o->init() then
530 ACT_P_CREATED is not returned (a zero is).
531 */
532 if (err != ACT_P_CREATED)
533 module_put(a_o->owner);
534 a->ops = a_o;
535
536 return a;
537
538 err_free:
539 kfree(a);
540 err_mod:
541 module_put(a_o->owner);
542 err_out:
543 return ERR_PTR(err);
544 }
545
546 struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
547 char *name, int ovr, int bind)
548 {
549 struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
550 struct tc_action *head = NULL, *act, *act_prev = NULL;
551 int i;
552
553 if (nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL) < 0)
554 return ERR_PTR(-EINVAL);
555
556 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
557 act = tcf_action_init_1(tb[i], est, name, ovr, bind);
558 if (IS_ERR(act))
559 goto err;
560 act->order = i;
561
562 if (head == NULL)
563 head = act;
564 else
565 act_prev->next = act;
566 act_prev = act;
567 }
568 return head;
569
570 err:
571 if (head != NULL)
572 tcf_action_destroy(head, bind);
573 return act;
574 }
575
576 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
577 int compat_mode)
578 {
579 int err = 0;
580 struct gnet_dump d;
581 struct tcf_act_hdr *h = a->priv;
582
583 if (h == NULL)
584 goto errout;
585
586 /* compat_mode being true specifies a call that is supposed
587 * to add additional backward compatiblity statistic TLVs.
588 */
589 if (compat_mode) {
590 if (a->type == TCA_OLD_COMPAT)
591 err = gnet_stats_start_copy_compat(skb, 0,
592 TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d);
593 else
594 return 0;
595 } else
596 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
597 &h->tcf_lock, &d);
598
599 if (err < 0)
600 goto errout;
601
602 if (a->ops != NULL && a->ops->get_stats != NULL)
603 if (a->ops->get_stats(skb, a) < 0)
604 goto errout;
605
606 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
607 gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
608 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
609 goto errout;
610
611 if (gnet_stats_finish_copy(&d) < 0)
612 goto errout;
613
614 return 0;
615
616 errout:
617 return -1;
618 }
619
620 static int
621 tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
622 u16 flags, int event, int bind, int ref)
623 {
624 struct tcamsg *t;
625 struct nlmsghdr *nlh;
626 unsigned char *b = skb_tail_pointer(skb);
627 struct nlattr *x;
628
629 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
630
631 t = NLMSG_DATA(nlh);
632 t->tca_family = AF_UNSPEC;
633 t->tca__pad1 = 0;
634 t->tca__pad2 = 0;
635
636 x = (struct nlattr *)skb_tail_pointer(skb);
637 NLA_PUT(skb, TCA_ACT_TAB, 0, NULL);
638
639 if (tcf_action_dump(skb, a, bind, ref) < 0)
640 goto nla_put_failure;
641
642 x->nla_len = skb_tail_pointer(skb) - (u8 *)x;
643
644 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
645 return skb->len;
646
647 nla_put_failure:
648 nlmsg_failure:
649 nlmsg_trim(skb, b);
650 return -1;
651 }
652
653 static int
654 act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
655 {
656 struct sk_buff *skb;
657
658 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
659 if (!skb)
660 return -ENOBUFS;
661 if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
662 kfree_skb(skb);
663 return -EINVAL;
664 }
665
666 return rtnl_unicast(skb, &init_net, pid);
667 }
668
669 static struct tc_action *
670 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
671 {
672 struct nlattr *tb[TCA_ACT_MAX+1];
673 struct tc_action *a;
674 int index;
675 int err;
676
677 err = -EINVAL;
678 if (nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL) < 0)
679 goto err_out;
680
681 if (tb[TCA_ACT_INDEX] == NULL ||
682 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index))
683 goto err_out;
684 index = *(int *)nla_data(tb[TCA_ACT_INDEX]);
685
686 err = -ENOMEM;
687 a = kzalloc(sizeof(struct tc_action), GFP_KERNEL);
688 if (a == NULL)
689 goto err_out;
690
691 err = -EINVAL;
692 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
693 if (a->ops == NULL)
694 goto err_free;
695 if (a->ops->lookup == NULL)
696 goto err_mod;
697 err = -ENOENT;
698 if (a->ops->lookup(a, index) == 0)
699 goto err_mod;
700
701 module_put(a->ops->owner);
702 return a;
703
704 err_mod:
705 module_put(a->ops->owner);
706 err_free:
707 kfree(a);
708 err_out:
709 return ERR_PTR(err);
710 }
711
712 static void cleanup_a(struct tc_action *act)
713 {
714 struct tc_action *a;
715
716 for (a = act; a; a = act) {
717 act = a->next;
718 kfree(a);
719 }
720 }
721
722 static struct tc_action *create_a(int i)
723 {
724 struct tc_action *act;
725
726 act = kzalloc(sizeof(*act), GFP_KERNEL);
727 if (act == NULL) {
728 printk("create_a: failed to alloc!\n");
729 return NULL;
730 }
731 act->order = i;
732 return act;
733 }
734
735 static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
736 {
737 struct sk_buff *skb;
738 unsigned char *b;
739 struct nlmsghdr *nlh;
740 struct tcamsg *t;
741 struct netlink_callback dcb;
742 struct nlattr *x;
743 struct nlattr *tb[TCA_ACT_MAX+1];
744 struct nlattr *kind;
745 struct tc_action *a = create_a(0);
746 int err = -EINVAL;
747
748 if (a == NULL) {
749 printk("tca_action_flush: couldnt create tc_action\n");
750 return err;
751 }
752
753 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
754 if (!skb) {
755 printk("tca_action_flush: failed skb alloc\n");
756 kfree(a);
757 return -ENOBUFS;
758 }
759
760 b = skb_tail_pointer(skb);
761
762 if (nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL) < 0)
763 goto err_out;
764
765 kind = tb[TCA_ACT_KIND];
766 a->ops = tc_lookup_action(kind);
767 if (a->ops == NULL)
768 goto err_out;
769
770 nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t));
771 t = NLMSG_DATA(nlh);
772 t->tca_family = AF_UNSPEC;
773 t->tca__pad1 = 0;
774 t->tca__pad2 = 0;
775
776 x = (struct nlattr *)skb_tail_pointer(skb);
777 NLA_PUT(skb, TCA_ACT_TAB, 0, NULL);
778
779 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
780 if (err < 0)
781 goto nla_put_failure;
782
783 x->nla_len = skb_tail_pointer(skb) - (u8 *)x;
784
785 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
786 nlh->nlmsg_flags |= NLM_F_ROOT;
787 module_put(a->ops->owner);
788 kfree(a);
789 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
790 if (err > 0)
791 return 0;
792
793 return err;
794
795 nla_put_failure:
796 nlmsg_failure:
797 module_put(a->ops->owner);
798 err_out:
799 kfree_skb(skb);
800 kfree(a);
801 return err;
802 }
803
804 static int
805 tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
806 {
807 int i, ret = 0;
808 struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
809 struct tc_action *head = NULL, *act, *act_prev = NULL;
810
811 if (nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL) < 0)
812 return -EINVAL;
813
814 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
815 if (tb[0] != NULL && tb[1] == NULL)
816 return tca_action_flush(tb[0], n, pid);
817 }
818
819 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
820 act = tcf_action_get_1(tb[i], n, pid);
821 if (IS_ERR(act)) {
822 ret = PTR_ERR(act);
823 goto err;
824 }
825 act->order = i;
826
827 if (head == NULL)
828 head = act;
829 else
830 act_prev->next = act;
831 act_prev = act;
832 }
833
834 if (event == RTM_GETACTION)
835 ret = act_get_notify(pid, n, head, event);
836 else { /* delete */
837 struct sk_buff *skb;
838
839 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
840 if (!skb) {
841 ret = -ENOBUFS;
842 goto err;
843 }
844
845 if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event,
846 0, 1) <= 0) {
847 kfree_skb(skb);
848 ret = -EINVAL;
849 goto err;
850 }
851
852 /* now do the delete */
853 tcf_action_destroy(head, 0);
854 ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC,
855 n->nlmsg_flags&NLM_F_ECHO);
856 if (ret > 0)
857 return 0;
858 return ret;
859 }
860 err:
861 cleanup_a(head);
862 return ret;
863 }
864
865 static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
866 u16 flags)
867 {
868 struct tcamsg *t;
869 struct nlmsghdr *nlh;
870 struct sk_buff *skb;
871 struct nlattr *x;
872 unsigned char *b;
873 int err = 0;
874
875 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
876 if (!skb)
877 return -ENOBUFS;
878
879 b = skb_tail_pointer(skb);
880
881 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
882 t = NLMSG_DATA(nlh);
883 t->tca_family = AF_UNSPEC;
884 t->tca__pad1 = 0;
885 t->tca__pad2 = 0;
886
887 x = (struct nlattr *)skb_tail_pointer(skb);
888 NLA_PUT(skb, TCA_ACT_TAB, 0, NULL);
889
890 if (tcf_action_dump(skb, a, 0, 0) < 0)
891 goto nla_put_failure;
892
893 x->nla_len = skb_tail_pointer(skb) - (u8 *)x;
894
895 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
896 NETLINK_CB(skb).dst_group = RTNLGRP_TC;
897
898 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
899 if (err > 0)
900 err = 0;
901 return err;
902
903 nla_put_failure:
904 nlmsg_failure:
905 kfree_skb(skb);
906 return -1;
907 }
908
909
910 static int
911 tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr)
912 {
913 int ret = 0;
914 struct tc_action *act;
915 struct tc_action *a;
916 u32 seq = n->nlmsg_seq;
917
918 act = tcf_action_init(nla, NULL, NULL, ovr, 0);
919 if (act == NULL)
920 goto done;
921 if (IS_ERR(act)) {
922 ret = PTR_ERR(act);
923 goto done;
924 }
925
926 /* dump then free all the actions after update; inserted policy
927 * stays intact
928 * */
929 ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
930 for (a = act; a; a = act) {
931 act = a->next;
932 kfree(a);
933 }
934 done:
935 return ret;
936 }
937
938 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
939 {
940 struct net *net = skb->sk->sk_net;
941 struct nlattr *tca[TCA_ACT_MAX + 1];
942 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
943 int ret = 0, ovr = 0;
944
945 if (net != &init_net)
946 return -EINVAL;
947
948 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
949 if (ret < 0)
950 return ret;
951
952 if (tca[TCA_ACT_TAB] == NULL) {
953 printk("tc_ctl_action: received NO action attribs\n");
954 return -EINVAL;
955 }
956
957 /* n->nlmsg_flags&NLM_F_CREATE
958 * */
959 switch (n->nlmsg_type) {
960 case RTM_NEWACTION:
961 /* we are going to assume all other flags
962 * imply create only if it doesnt exist
963 * Note that CREATE | EXCL implies that
964 * but since we want avoid ambiguity (eg when flags
965 * is zero) then just set this
966 */
967 if (n->nlmsg_flags&NLM_F_REPLACE)
968 ovr = 1;
969 replay:
970 ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr);
971 if (ret == -EAGAIN)
972 goto replay;
973 break;
974 case RTM_DELACTION:
975 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION);
976 break;
977 case RTM_GETACTION:
978 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION);
979 break;
980 default:
981 BUG();
982 }
983
984 return ret;
985 }
986
987 static struct nlattr *
988 find_dump_kind(struct nlmsghdr *n)
989 {
990 struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
991 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
992 struct nlattr *nla[TCAA_MAX + 1];
993 struct nlattr *kind;
994
995 if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0)
996 return NULL;
997 tb1 = nla[TCA_ACT_TAB];
998 if (tb1 == NULL)
999 return NULL;
1000
1001 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
1002 NLMSG_ALIGN(nla_len(tb1)), NULL) < 0)
1003 return NULL;
1004
1005 if (tb[1] == NULL)
1006 return NULL;
1007 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
1008 nla_len(tb[1]), NULL) < 0)
1009 return NULL;
1010 kind = tb2[TCA_ACT_KIND];
1011
1012 return kind;
1013 }
1014
1015 static int
1016 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1017 {
1018 struct net *net = skb->sk->sk_net;
1019 struct nlmsghdr *nlh;
1020 unsigned char *b = skb_tail_pointer(skb);
1021 struct nlattr *x;
1022 struct tc_action_ops *a_o;
1023 struct tc_action a;
1024 int ret = 0;
1025 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
1026 struct nlattr *kind = find_dump_kind(cb->nlh);
1027
1028 if (net != &init_net)
1029 return 0;
1030
1031 if (kind == NULL) {
1032 printk("tc_dump_action: action bad kind\n");
1033 return 0;
1034 }
1035
1036 a_o = tc_lookup_action(kind);
1037 if (a_o == NULL) {
1038 return 0;
1039 }
1040
1041 memset(&a, 0, sizeof(struct tc_action));
1042 a.ops = a_o;
1043
1044 if (a_o->walk == NULL) {
1045 printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind);
1046 goto nla_put_failure;
1047 }
1048
1049 nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
1050 cb->nlh->nlmsg_type, sizeof(*t));
1051 t = NLMSG_DATA(nlh);
1052 t->tca_family = AF_UNSPEC;
1053 t->tca__pad1 = 0;
1054 t->tca__pad2 = 0;
1055
1056 x = (struct nlattr *)skb_tail_pointer(skb);
1057 NLA_PUT(skb, TCA_ACT_TAB, 0, NULL);
1058
1059 ret = a_o->walk(skb, cb, RTM_GETACTION, &a);
1060 if (ret < 0)
1061 goto nla_put_failure;
1062
1063 if (ret > 0) {
1064 x->nla_len = skb_tail_pointer(skb) - (u8 *)x;
1065 ret = skb->len;
1066 } else
1067 nlmsg_trim(skb, x);
1068
1069 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1070 if (NETLINK_CB(cb->skb).pid && ret)
1071 nlh->nlmsg_flags |= NLM_F_MULTI;
1072 module_put(a_o->owner);
1073 return skb->len;
1074
1075 nla_put_failure:
1076 nlmsg_failure:
1077 module_put(a_o->owner);
1078 nlmsg_trim(skb, b);
1079 return skb->len;
1080 }
1081
1082 static int __init tc_action_init(void)
1083 {
1084 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL);
1085 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL);
1086 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action);
1087
1088 return 0;
1089 }
1090
1091 subsys_initcall(tc_action_init);
This page took 0.10833 seconds and 5 git commands to generate.