net sched: indentation and other OCD stylistic fixes
[deliverable/linux.git] / net / sched / act_api.c
1 /*
2 * net/sched/act_api.c Packet action API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Author: Jamal Hadi Salim
10 *
11 *
12 */
13
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <net/net_namespace.h>
25 #include <net/sock.h>
26 #include <net/sch_generic.h>
27 #include <net/act_api.h>
28 #include <net/netlink.h>
29
30 static void free_tcf(struct rcu_head *head)
31 {
32 struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
33
34 free_percpu(p->cpu_bstats);
35 free_percpu(p->cpu_qstats);
36 kfree(p);
37 }
38
39 static void tcf_hash_destroy(struct tcf_hashinfo *hinfo, struct tc_action *a)
40 {
41 struct tcf_common *p = a->priv;
42
43 spin_lock_bh(&hinfo->lock);
44 hlist_del(&p->tcfc_head);
45 spin_unlock_bh(&hinfo->lock);
46 gen_kill_estimator(&p->tcfc_bstats,
47 &p->tcfc_rate_est);
48 /*
49 * gen_estimator est_timer() might access p->tcfc_lock
50 * or bstats, wait a RCU grace period before freeing p
51 */
52 call_rcu(&p->tcfc_rcu, free_tcf);
53 }
54
55 int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
56 {
57 struct tcf_common *p = a->priv;
58 int ret = 0;
59
60 if (p) {
61 if (bind)
62 p->tcfc_bindcnt--;
63 else if (strict && p->tcfc_bindcnt > 0)
64 return -EPERM;
65
66 p->tcfc_refcnt--;
67 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
68 if (a->ops->cleanup)
69 a->ops->cleanup(a, bind);
70 tcf_hash_destroy(a->hinfo, a);
71 ret = ACT_P_DELETED;
72 }
73 }
74
75 return ret;
76 }
77 EXPORT_SYMBOL(__tcf_hash_release);
78
79 static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
80 struct netlink_callback *cb, struct tc_action *a)
81 {
82 struct hlist_head *head;
83 struct tcf_common *p;
84 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
85 struct nlattr *nest;
86
87 spin_lock_bh(&hinfo->lock);
88
89 s_i = cb->args[0];
90
91 for (i = 0; i < (hinfo->hmask + 1); i++) {
92 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
93
94 hlist_for_each_entry_rcu(p, head, tcfc_head) {
95 index++;
96 if (index < s_i)
97 continue;
98 a->priv = p;
99 a->order = n_i;
100
101 nest = nla_nest_start(skb, a->order);
102 if (nest == NULL)
103 goto nla_put_failure;
104 err = tcf_action_dump_1(skb, a, 0, 0);
105 if (err < 0) {
106 index--;
107 nlmsg_trim(skb, nest);
108 goto done;
109 }
110 nla_nest_end(skb, nest);
111 n_i++;
112 if (n_i >= TCA_ACT_MAX_PRIO)
113 goto done;
114 }
115 }
116 done:
117 spin_unlock_bh(&hinfo->lock);
118 if (n_i)
119 cb->args[0] += n_i;
120 return n_i;
121
122 nla_put_failure:
123 nla_nest_cancel(skb, nest);
124 goto done;
125 }
126
127 static int tcf_del_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
128 struct tc_action *a)
129 {
130 struct hlist_head *head;
131 struct hlist_node *n;
132 struct tcf_common *p;
133 struct nlattr *nest;
134 int i = 0, n_i = 0;
135 int ret = -EINVAL;
136
137 nest = nla_nest_start(skb, a->order);
138 if (nest == NULL)
139 goto nla_put_failure;
140 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
141 goto nla_put_failure;
142 for (i = 0; i < (hinfo->hmask + 1); i++) {
143 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
144 hlist_for_each_entry_safe(p, n, head, tcfc_head) {
145 a->priv = p;
146 ret = __tcf_hash_release(a, false, true);
147 if (ret == ACT_P_DELETED) {
148 module_put(a->ops->owner);
149 n_i++;
150 } else if (ret < 0)
151 goto nla_put_failure;
152 }
153 }
154 if (nla_put_u32(skb, TCA_FCNT, n_i))
155 goto nla_put_failure;
156 nla_nest_end(skb, nest);
157
158 return n_i;
159 nla_put_failure:
160 nla_nest_cancel(skb, nest);
161 return ret;
162 }
163
164 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
165 struct netlink_callback *cb, int type,
166 struct tc_action *a)
167 {
168 struct tcf_hashinfo *hinfo = tn->hinfo;
169
170 a->hinfo = hinfo;
171
172 if (type == RTM_DELACTION) {
173 return tcf_del_walker(hinfo, skb, a);
174 } else if (type == RTM_GETACTION) {
175 return tcf_dump_walker(hinfo, skb, cb, a);
176 } else {
177 WARN(1, "tcf_generic_walker: unknown action %d\n", type);
178 return -EINVAL;
179 }
180 }
181 EXPORT_SYMBOL(tcf_generic_walker);
182
183 static struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
184 {
185 struct tcf_common *p = NULL;
186 struct hlist_head *head;
187
188 spin_lock_bh(&hinfo->lock);
189 head = &hinfo->htab[tcf_hash(index, hinfo->hmask)];
190 hlist_for_each_entry_rcu(p, head, tcfc_head)
191 if (p->tcfc_index == index)
192 break;
193 spin_unlock_bh(&hinfo->lock);
194
195 return p;
196 }
197
198 u32 tcf_hash_new_index(struct tc_action_net *tn)
199 {
200 struct tcf_hashinfo *hinfo = tn->hinfo;
201 u32 val = hinfo->index;
202
203 do {
204 if (++val == 0)
205 val = 1;
206 } while (tcf_hash_lookup(val, hinfo));
207
208 hinfo->index = val;
209 return val;
210 }
211 EXPORT_SYMBOL(tcf_hash_new_index);
212
213 int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index)
214 {
215 struct tcf_hashinfo *hinfo = tn->hinfo;
216 struct tcf_common *p = tcf_hash_lookup(index, hinfo);
217
218 if (p) {
219 a->priv = p;
220 a->hinfo = hinfo;
221 return 1;
222 }
223 return 0;
224 }
225 EXPORT_SYMBOL(tcf_hash_search);
226
227 int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a,
228 int bind)
229 {
230 struct tcf_hashinfo *hinfo = tn->hinfo;
231 struct tcf_common *p = NULL;
232 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
233 if (bind)
234 p->tcfc_bindcnt++;
235 p->tcfc_refcnt++;
236 a->priv = p;
237 a->hinfo = hinfo;
238 return 1;
239 }
240 return 0;
241 }
242 EXPORT_SYMBOL(tcf_hash_check);
243
244 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
245 {
246 struct tcf_common *pc = a->priv;
247 if (est)
248 gen_kill_estimator(&pc->tcfc_bstats,
249 &pc->tcfc_rate_est);
250 call_rcu(&pc->tcfc_rcu, free_tcf);
251 }
252 EXPORT_SYMBOL(tcf_hash_cleanup);
253
254 int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
255 struct tc_action *a, int size, int bind, bool cpustats)
256 {
257 struct tcf_common *p = kzalloc(size, GFP_KERNEL);
258 struct tcf_hashinfo *hinfo = tn->hinfo;
259 int err = -ENOMEM;
260
261 if (unlikely(!p))
262 return -ENOMEM;
263 p->tcfc_refcnt = 1;
264 if (bind)
265 p->tcfc_bindcnt = 1;
266
267 if (cpustats) {
268 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
269 if (!p->cpu_bstats) {
270 err1:
271 kfree(p);
272 return err;
273 }
274 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
275 if (!p->cpu_qstats) {
276 err2:
277 free_percpu(p->cpu_bstats);
278 goto err1;
279 }
280 }
281 spin_lock_init(&p->tcfc_lock);
282 INIT_HLIST_NODE(&p->tcfc_head);
283 p->tcfc_index = index ? index : tcf_hash_new_index(tn);
284 p->tcfc_tm.install = jiffies;
285 p->tcfc_tm.lastuse = jiffies;
286 p->tcfc_tm.firstuse = 0;
287 if (est) {
288 err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
289 &p->tcfc_rate_est,
290 &p->tcfc_lock, est);
291 if (err) {
292 free_percpu(p->cpu_qstats);
293 goto err2;
294 }
295 }
296
297 a->priv = (void *) p;
298 a->hinfo = hinfo;
299 return 0;
300 }
301 EXPORT_SYMBOL(tcf_hash_create);
302
303 void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a)
304 {
305 struct tcf_common *p = a->priv;
306 struct tcf_hashinfo *hinfo = tn->hinfo;
307 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
308
309 spin_lock_bh(&hinfo->lock);
310 hlist_add_head(&p->tcfc_head, &hinfo->htab[h]);
311 spin_unlock_bh(&hinfo->lock);
312 }
313 EXPORT_SYMBOL(tcf_hash_insert);
314
315 void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
316 struct tcf_hashinfo *hinfo)
317 {
318 struct tc_action a = {
319 .ops = ops,
320 .hinfo = hinfo,
321 };
322 int i;
323
324 for (i = 0; i < hinfo->hmask + 1; i++) {
325 struct tcf_common *p;
326 struct hlist_node *n;
327
328 hlist_for_each_entry_safe(p, n, &hinfo->htab[i], tcfc_head) {
329 int ret;
330
331 a.priv = p;
332 ret = __tcf_hash_release(&a, false, true);
333 if (ret == ACT_P_DELETED)
334 module_put(ops->owner);
335 else if (ret < 0)
336 return;
337 }
338 }
339 kfree(hinfo->htab);
340 }
341 EXPORT_SYMBOL(tcf_hashinfo_destroy);
342
343 static LIST_HEAD(act_base);
344 static DEFINE_RWLOCK(act_mod_lock);
345
346 int tcf_register_action(struct tc_action_ops *act,
347 struct pernet_operations *ops)
348 {
349 struct tc_action_ops *a;
350 int ret;
351
352 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
353 return -EINVAL;
354
355 write_lock(&act_mod_lock);
356 list_for_each_entry(a, &act_base, head) {
357 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
358 write_unlock(&act_mod_lock);
359 return -EEXIST;
360 }
361 }
362 list_add_tail(&act->head, &act_base);
363 write_unlock(&act_mod_lock);
364
365 ret = register_pernet_subsys(ops);
366 if (ret) {
367 tcf_unregister_action(act, ops);
368 return ret;
369 }
370
371 return 0;
372 }
373 EXPORT_SYMBOL(tcf_register_action);
374
375 int tcf_unregister_action(struct tc_action_ops *act,
376 struct pernet_operations *ops)
377 {
378 struct tc_action_ops *a;
379 int err = -ENOENT;
380
381 unregister_pernet_subsys(ops);
382
383 write_lock(&act_mod_lock);
384 list_for_each_entry(a, &act_base, head) {
385 if (a == act) {
386 list_del(&act->head);
387 err = 0;
388 break;
389 }
390 }
391 write_unlock(&act_mod_lock);
392 return err;
393 }
394 EXPORT_SYMBOL(tcf_unregister_action);
395
396 /* lookup by name */
397 static struct tc_action_ops *tc_lookup_action_n(char *kind)
398 {
399 struct tc_action_ops *a, *res = NULL;
400
401 if (kind) {
402 read_lock(&act_mod_lock);
403 list_for_each_entry(a, &act_base, head) {
404 if (strcmp(kind, a->kind) == 0) {
405 if (try_module_get(a->owner))
406 res = a;
407 break;
408 }
409 }
410 read_unlock(&act_mod_lock);
411 }
412 return res;
413 }
414
415 /* lookup by nlattr */
416 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
417 {
418 struct tc_action_ops *a, *res = NULL;
419
420 if (kind) {
421 read_lock(&act_mod_lock);
422 list_for_each_entry(a, &act_base, head) {
423 if (nla_strcmp(kind, a->kind) == 0) {
424 if (try_module_get(a->owner))
425 res = a;
426 break;
427 }
428 }
429 read_unlock(&act_mod_lock);
430 }
431 return res;
432 }
433
434 int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
435 struct tcf_result *res)
436 {
437 const struct tc_action *a;
438 int ret = -1;
439
440 if (skb->tc_verd & TC_NCLS) {
441 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
442 ret = TC_ACT_OK;
443 goto exec_done;
444 }
445 list_for_each_entry(a, actions, list) {
446 repeat:
447 ret = a->ops->act(skb, a, res);
448 if (ret == TC_ACT_REPEAT)
449 goto repeat; /* we need a ttl - JHS */
450 if (ret != TC_ACT_PIPE)
451 goto exec_done;
452 }
453 exec_done:
454 return ret;
455 }
456 EXPORT_SYMBOL(tcf_action_exec);
457
458 int tcf_action_destroy(struct list_head *actions, int bind)
459 {
460 struct tc_action *a, *tmp;
461 int ret = 0;
462
463 list_for_each_entry_safe(a, tmp, actions, list) {
464 ret = __tcf_hash_release(a, bind, true);
465 if (ret == ACT_P_DELETED)
466 module_put(a->ops->owner);
467 else if (ret < 0)
468 return ret;
469 list_del(&a->list);
470 kfree(a);
471 }
472 return ret;
473 }
474
475 int
476 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
477 {
478 return a->ops->dump(skb, a, bind, ref);
479 }
480
481 int
482 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
483 {
484 int err = -EINVAL;
485 unsigned char *b = skb_tail_pointer(skb);
486 struct nlattr *nest;
487
488 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
489 goto nla_put_failure;
490 if (tcf_action_copy_stats(skb, a, 0))
491 goto nla_put_failure;
492 nest = nla_nest_start(skb, TCA_OPTIONS);
493 if (nest == NULL)
494 goto nla_put_failure;
495 err = tcf_action_dump_old(skb, a, bind, ref);
496 if (err > 0) {
497 nla_nest_end(skb, nest);
498 return err;
499 }
500
501 nla_put_failure:
502 nlmsg_trim(skb, b);
503 return -1;
504 }
505 EXPORT_SYMBOL(tcf_action_dump_1);
506
507 int tcf_action_dump(struct sk_buff *skb, struct list_head *actions,
508 int bind, int ref)
509 {
510 struct tc_action *a;
511 int err = -EINVAL;
512 struct nlattr *nest;
513
514 list_for_each_entry(a, actions, list) {
515 nest = nla_nest_start(skb, a->order);
516 if (nest == NULL)
517 goto nla_put_failure;
518 err = tcf_action_dump_1(skb, a, bind, ref);
519 if (err < 0)
520 goto errout;
521 nla_nest_end(skb, nest);
522 }
523
524 return 0;
525
526 nla_put_failure:
527 err = -EINVAL;
528 errout:
529 nla_nest_cancel(skb, nest);
530 return err;
531 }
532
533 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
534 struct nlattr *est, char *name, int ovr,
535 int bind)
536 {
537 struct tc_action *a;
538 struct tc_action_ops *a_o;
539 char act_name[IFNAMSIZ];
540 struct nlattr *tb[TCA_ACT_MAX + 1];
541 struct nlattr *kind;
542 int err;
543
544 if (name == NULL) {
545 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
546 if (err < 0)
547 goto err_out;
548 err = -EINVAL;
549 kind = tb[TCA_ACT_KIND];
550 if (kind == NULL)
551 goto err_out;
552 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
553 goto err_out;
554 } else {
555 err = -EINVAL;
556 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
557 goto err_out;
558 }
559
560 a_o = tc_lookup_action_n(act_name);
561 if (a_o == NULL) {
562 #ifdef CONFIG_MODULES
563 rtnl_unlock();
564 request_module("act_%s", act_name);
565 rtnl_lock();
566
567 a_o = tc_lookup_action_n(act_name);
568
569 /* We dropped the RTNL semaphore in order to
570 * perform the module load. So, even if we
571 * succeeded in loading the module we have to
572 * tell the caller to replay the request. We
573 * indicate this using -EAGAIN.
574 */
575 if (a_o != NULL) {
576 err = -EAGAIN;
577 goto err_mod;
578 }
579 #endif
580 err = -ENOENT;
581 goto err_out;
582 }
583
584 err = -ENOMEM;
585 a = kzalloc(sizeof(*a), GFP_KERNEL);
586 if (a == NULL)
587 goto err_mod;
588
589 a->ops = a_o;
590 INIT_LIST_HEAD(&a->list);
591 /* backward compatibility for policer */
592 if (name == NULL)
593 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
594 else
595 err = a_o->init(net, nla, est, a, ovr, bind);
596 if (err < 0)
597 goto err_free;
598
599 /* module count goes up only when brand new policy is created
600 * if it exists and is only bound to in a_o->init() then
601 * ACT_P_CREATED is not returned (a zero is).
602 */
603 if (err != ACT_P_CREATED)
604 module_put(a_o->owner);
605
606 return a;
607
608 err_free:
609 kfree(a);
610 err_mod:
611 module_put(a_o->owner);
612 err_out:
613 return ERR_PTR(err);
614 }
615
616 int tcf_action_init(struct net *net, struct nlattr *nla,
617 struct nlattr *est, char *name, int ovr,
618 int bind, struct list_head *actions)
619 {
620 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
621 struct tc_action *act;
622 int err;
623 int i;
624
625 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
626 if (err < 0)
627 return err;
628
629 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
630 act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
631 if (IS_ERR(act)) {
632 err = PTR_ERR(act);
633 goto err;
634 }
635 act->order = i;
636 list_add_tail(&act->list, actions);
637 }
638 return 0;
639
640 err:
641 tcf_action_destroy(actions, bind);
642 return err;
643 }
644
645 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
646 int compat_mode)
647 {
648 int err = 0;
649 struct gnet_dump d;
650 struct tcf_common *p = a->priv;
651
652 if (p == NULL)
653 goto errout;
654
655 /* compat_mode being true specifies a call that is supposed
656 * to add additional backward compatibility statistic TLVs.
657 */
658 if (compat_mode) {
659 if (a->type == TCA_OLD_COMPAT)
660 err = gnet_stats_start_copy_compat(skb, 0,
661 TCA_STATS,
662 TCA_XSTATS,
663 &p->tcfc_lock, &d,
664 TCA_PAD);
665 else
666 return 0;
667 } else
668 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
669 &p->tcfc_lock, &d, TCA_ACT_PAD);
670
671 if (err < 0)
672 goto errout;
673
674 if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
675 gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
676 &p->tcfc_rate_est) < 0 ||
677 gnet_stats_copy_queue(&d, p->cpu_qstats,
678 &p->tcfc_qstats,
679 p->tcfc_qstats.qlen) < 0)
680 goto errout;
681
682 if (gnet_stats_finish_copy(&d) < 0)
683 goto errout;
684
685 return 0;
686
687 errout:
688 return -1;
689 }
690
691 static int tca_get_fill(struct sk_buff *skb, struct list_head *actions,
692 u32 portid, u32 seq, u16 flags, int event, int bind,
693 int ref)
694 {
695 struct tcamsg *t;
696 struct nlmsghdr *nlh;
697 unsigned char *b = skb_tail_pointer(skb);
698 struct nlattr *nest;
699
700 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
701 if (!nlh)
702 goto out_nlmsg_trim;
703 t = nlmsg_data(nlh);
704 t->tca_family = AF_UNSPEC;
705 t->tca__pad1 = 0;
706 t->tca__pad2 = 0;
707
708 nest = nla_nest_start(skb, TCA_ACT_TAB);
709 if (nest == NULL)
710 goto out_nlmsg_trim;
711
712 if (tcf_action_dump(skb, actions, bind, ref) < 0)
713 goto out_nlmsg_trim;
714
715 nla_nest_end(skb, nest);
716
717 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
718 return skb->len;
719
720 out_nlmsg_trim:
721 nlmsg_trim(skb, b);
722 return -1;
723 }
724
725 static int
726 act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
727 struct list_head *actions, int event)
728 {
729 struct sk_buff *skb;
730
731 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
732 if (!skb)
733 return -ENOBUFS;
734 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
735 0, 0) <= 0) {
736 kfree_skb(skb);
737 return -EINVAL;
738 }
739
740 return rtnl_unicast(skb, net, portid);
741 }
742
743 static struct tc_action *create_a(int i)
744 {
745 struct tc_action *act;
746
747 act = kzalloc(sizeof(*act), GFP_KERNEL);
748 if (act == NULL) {
749 pr_debug("create_a: failed to alloc!\n");
750 return NULL;
751 }
752 act->order = i;
753 INIT_LIST_HEAD(&act->list);
754 return act;
755 }
756
757 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
758 struct nlmsghdr *n, u32 portid)
759 {
760 struct nlattr *tb[TCA_ACT_MAX + 1];
761 struct tc_action *a;
762 int index;
763 int err;
764
765 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
766 if (err < 0)
767 goto err_out;
768
769 err = -EINVAL;
770 if (tb[TCA_ACT_INDEX] == NULL ||
771 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index))
772 goto err_out;
773 index = nla_get_u32(tb[TCA_ACT_INDEX]);
774
775 err = -ENOMEM;
776 a = create_a(0);
777 if (a == NULL)
778 goto err_out;
779
780 err = -EINVAL;
781 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
782 if (a->ops == NULL) /* could happen in batch of actions */
783 goto err_free;
784 err = -ENOENT;
785 if (a->ops->lookup(net, a, index) == 0)
786 goto err_mod;
787
788 module_put(a->ops->owner);
789 return a;
790
791 err_mod:
792 module_put(a->ops->owner);
793 err_free:
794 kfree(a);
795 err_out:
796 return ERR_PTR(err);
797 }
798
799 static void cleanup_a(struct list_head *actions)
800 {
801 struct tc_action *a, *tmp;
802
803 list_for_each_entry_safe(a, tmp, actions, list) {
804 list_del(&a->list);
805 kfree(a);
806 }
807 }
808
809 static int tca_action_flush(struct net *net, struct nlattr *nla,
810 struct nlmsghdr *n, u32 portid)
811 {
812 struct sk_buff *skb;
813 unsigned char *b;
814 struct nlmsghdr *nlh;
815 struct tcamsg *t;
816 struct netlink_callback dcb;
817 struct nlattr *nest;
818 struct nlattr *tb[TCA_ACT_MAX + 1];
819 struct nlattr *kind;
820 struct tc_action a;
821 int err = -ENOMEM;
822
823 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
824 if (!skb) {
825 pr_debug("tca_action_flush: failed skb alloc\n");
826 return err;
827 }
828
829 b = skb_tail_pointer(skb);
830
831 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
832 if (err < 0)
833 goto err_out;
834
835 err = -EINVAL;
836 kind = tb[TCA_ACT_KIND];
837 memset(&a, 0, sizeof(struct tc_action));
838 INIT_LIST_HEAD(&a.list);
839 a.ops = tc_lookup_action(kind);
840 if (a.ops == NULL) /*some idjot trying to flush unknown action */
841 goto err_out;
842
843 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
844 sizeof(*t), 0);
845 if (!nlh)
846 goto out_module_put;
847 t = nlmsg_data(nlh);
848 t->tca_family = AF_UNSPEC;
849 t->tca__pad1 = 0;
850 t->tca__pad2 = 0;
851
852 nest = nla_nest_start(skb, TCA_ACT_TAB);
853 if (nest == NULL)
854 goto out_module_put;
855
856 err = a.ops->walk(net, skb, &dcb, RTM_DELACTION, &a);
857 if (err < 0)
858 goto out_module_put;
859 if (err == 0)
860 goto noflush_out;
861
862 nla_nest_end(skb, nest);
863
864 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
865 nlh->nlmsg_flags |= NLM_F_ROOT;
866 module_put(a.ops->owner);
867 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
868 n->nlmsg_flags & NLM_F_ECHO);
869 if (err > 0)
870 return 0;
871
872 return err;
873
874 out_module_put:
875 module_put(a.ops->owner);
876 err_out:
877 noflush_out:
878 kfree_skb(skb);
879 return err;
880 }
881
882 static int
883 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
884 u32 portid)
885 {
886 int ret;
887 struct sk_buff *skb;
888
889 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
890 if (!skb)
891 return -ENOBUFS;
892
893 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
894 0, 1) <= 0) {
895 kfree_skb(skb);
896 return -EINVAL;
897 }
898
899 /* now do the delete */
900 ret = tcf_action_destroy(actions, 0);
901 if (ret < 0) {
902 kfree_skb(skb);
903 return ret;
904 }
905
906 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
907 n->nlmsg_flags & NLM_F_ECHO);
908 if (ret > 0)
909 return 0;
910 return ret;
911 }
912
913 static int
914 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
915 u32 portid, int event)
916 {
917 int i, ret;
918 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
919 struct tc_action *act;
920 LIST_HEAD(actions);
921
922 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
923 if (ret < 0)
924 return ret;
925
926 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
927 if (tb[1] != NULL)
928 return tca_action_flush(net, tb[1], n, portid);
929 else
930 return -EINVAL;
931 }
932
933 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
934 act = tcf_action_get_1(net, tb[i], n, portid);
935 if (IS_ERR(act)) {
936 ret = PTR_ERR(act);
937 goto err;
938 }
939 act->order = i;
940 list_add_tail(&act->list, &actions);
941 }
942
943 if (event == RTM_GETACTION)
944 ret = act_get_notify(net, portid, n, &actions, event);
945 else { /* delete */
946 ret = tcf_del_notify(net, n, &actions, portid);
947 if (ret)
948 goto err;
949 return ret;
950 }
951 err:
952 cleanup_a(&actions);
953 return ret;
954 }
955
956 static int
957 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
958 u32 portid)
959 {
960 struct sk_buff *skb;
961 int err = 0;
962
963 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
964 if (!skb)
965 return -ENOBUFS;
966
967 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
968 RTM_NEWACTION, 0, 0) <= 0) {
969 kfree_skb(skb);
970 return -EINVAL;
971 }
972
973 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
974 n->nlmsg_flags & NLM_F_ECHO);
975 if (err > 0)
976 err = 0;
977 return err;
978 }
979
980 static int
981 tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
982 u32 portid, int ovr)
983 {
984 int ret = 0;
985 LIST_HEAD(actions);
986
987 ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
988 if (ret)
989 goto done;
990
991 /* dump then free all the actions after update; inserted policy
992 * stays intact
993 */
994 ret = tcf_add_notify(net, n, &actions, portid);
995 cleanup_a(&actions);
996 done:
997 return ret;
998 }
999
1000 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
1001 {
1002 struct net *net = sock_net(skb->sk);
1003 struct nlattr *tca[TCA_ACT_MAX + 1];
1004 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1005 int ret = 0, ovr = 0;
1006
1007 if ((n->nlmsg_type != RTM_GETACTION) &&
1008 !netlink_capable(skb, CAP_NET_ADMIN))
1009 return -EPERM;
1010
1011 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
1012 if (ret < 0)
1013 return ret;
1014
1015 if (tca[TCA_ACT_TAB] == NULL) {
1016 pr_notice("tc_ctl_action: received NO action attribs\n");
1017 return -EINVAL;
1018 }
1019
1020 /* n->nlmsg_flags & NLM_F_CREATE */
1021 switch (n->nlmsg_type) {
1022 case RTM_NEWACTION:
1023 /* we are going to assume all other flags
1024 * imply create only if it doesn't exist
1025 * Note that CREATE | EXCL implies that
1026 * but since we want avoid ambiguity (eg when flags
1027 * is zero) then just set this
1028 */
1029 if (n->nlmsg_flags & NLM_F_REPLACE)
1030 ovr = 1;
1031 replay:
1032 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
1033 if (ret == -EAGAIN)
1034 goto replay;
1035 break;
1036 case RTM_DELACTION:
1037 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1038 portid, RTM_DELACTION);
1039 break;
1040 case RTM_GETACTION:
1041 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1042 portid, RTM_GETACTION);
1043 break;
1044 default:
1045 BUG();
1046 }
1047
1048 return ret;
1049 }
1050
1051 static struct nlattr *
1052 find_dump_kind(const struct nlmsghdr *n)
1053 {
1054 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1055 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1056 struct nlattr *nla[TCAA_MAX + 1];
1057 struct nlattr *kind;
1058
1059 if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0)
1060 return NULL;
1061 tb1 = nla[TCA_ACT_TAB];
1062 if (tb1 == NULL)
1063 return NULL;
1064
1065 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
1066 NLMSG_ALIGN(nla_len(tb1)), NULL) < 0)
1067 return NULL;
1068
1069 if (tb[1] == NULL)
1070 return NULL;
1071 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
1072 nla_len(tb[1]), NULL) < 0)
1073 return NULL;
1074 kind = tb2[TCA_ACT_KIND];
1075
1076 return kind;
1077 }
1078
1079 static int
1080 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1081 {
1082 struct net *net = sock_net(skb->sk);
1083 struct nlmsghdr *nlh;
1084 unsigned char *b = skb_tail_pointer(skb);
1085 struct nlattr *nest;
1086 struct tc_action_ops *a_o;
1087 struct tc_action a;
1088 int ret = 0;
1089 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1090 struct nlattr *kind = find_dump_kind(cb->nlh);
1091
1092 if (kind == NULL) {
1093 pr_info("tc_dump_action: action bad kind\n");
1094 return 0;
1095 }
1096
1097 a_o = tc_lookup_action(kind);
1098 if (a_o == NULL)
1099 return 0;
1100
1101 memset(&a, 0, sizeof(struct tc_action));
1102 a.ops = a_o;
1103
1104 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1105 cb->nlh->nlmsg_type, sizeof(*t), 0);
1106 if (!nlh)
1107 goto out_module_put;
1108 t = nlmsg_data(nlh);
1109 t->tca_family = AF_UNSPEC;
1110 t->tca__pad1 = 0;
1111 t->tca__pad2 = 0;
1112
1113 nest = nla_nest_start(skb, TCA_ACT_TAB);
1114 if (nest == NULL)
1115 goto out_module_put;
1116
1117 ret = a_o->walk(net, skb, cb, RTM_GETACTION, &a);
1118 if (ret < 0)
1119 goto out_module_put;
1120
1121 if (ret > 0) {
1122 nla_nest_end(skb, nest);
1123 ret = skb->len;
1124 } else
1125 nla_nest_cancel(skb, nest);
1126
1127 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1128 if (NETLINK_CB(cb->skb).portid && ret)
1129 nlh->nlmsg_flags |= NLM_F_MULTI;
1130 module_put(a_o->owner);
1131 return skb->len;
1132
1133 out_module_put:
1134 module_put(a_o->owner);
1135 nlmsg_trim(skb, b);
1136 return skb->len;
1137 }
1138
1139 static int __init tc_action_init(void)
1140 {
1141 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL);
1142 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL);
1143 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1144 NULL);
1145
1146 return 0;
1147 }
1148
1149 subsys_initcall(tc_action_init);
This page took 0.053687 seconds and 6 git commands to generate.