[NET_SCHED]: Convert packet schedulers from rtnetlink to new netlink API
[deliverable/linux.git] / net / sched / cls_api.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/cls_api.c Packet classifier API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 *
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14 *
15 */
16
1da177e4
LT
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
1da177e4 20#include <linux/string.h>
1da177e4 21#include <linux/errno.h>
1da177e4 22#include <linux/skbuff.h>
1da177e4
LT
23#include <linux/init.h>
24#include <linux/kmod.h>
dc5fc579 25#include <linux/netlink.h>
b854272b
DL
26#include <net/net_namespace.h>
27#include <net/sock.h>
dc5fc579 28#include <net/netlink.h>
1da177e4
LT
29#include <net/pkt_sched.h>
30#include <net/pkt_cls.h>
31
1da177e4
LT
32/* The list of all installed classifier types */
33
2eb9d75c 34static struct tcf_proto_ops *tcf_proto_base __read_mostly;
1da177e4
LT
35
36/* Protects list of registered TC modules. It is pure SMP lock. */
37static DEFINE_RWLOCK(cls_mod_lock);
38
39/* Find classifier type by string name */
40
aa767bfe 41static struct tcf_proto_ops *tcf_proto_lookup_ops(struct rtattr *kind)
1da177e4
LT
42{
43 struct tcf_proto_ops *t = NULL;
44
45 if (kind) {
46 read_lock(&cls_mod_lock);
47 for (t = tcf_proto_base; t; t = t->next) {
48 if (rtattr_strcmp(kind, t->kind) == 0) {
49 if (!try_module_get(t->owner))
50 t = NULL;
51 break;
52 }
53 }
54 read_unlock(&cls_mod_lock);
55 }
56 return t;
57}
58
59/* Register(unregister) new classifier type */
60
61int register_tcf_proto_ops(struct tcf_proto_ops *ops)
62{
63 struct tcf_proto_ops *t, **tp;
64 int rc = -EEXIST;
65
66 write_lock(&cls_mod_lock);
67 for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
68 if (!strcmp(ops->kind, t->kind))
69 goto out;
70
71 ops->next = NULL;
72 *tp = ops;
73 rc = 0;
74out:
75 write_unlock(&cls_mod_lock);
76 return rc;
77}
aa767bfe 78EXPORT_SYMBOL(register_tcf_proto_ops);
1da177e4
LT
79
80int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
81{
82 struct tcf_proto_ops *t, **tp;
83 int rc = -ENOENT;
84
85 write_lock(&cls_mod_lock);
86 for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
87 if (t == ops)
88 break;
89
90 if (!t)
91 goto out;
92 *tp = t->next;
93 rc = 0;
94out:
95 write_unlock(&cls_mod_lock);
96 return rc;
97}
aa767bfe 98EXPORT_SYMBOL(unregister_tcf_proto_ops);
1da177e4
LT
99
100static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
101 struct tcf_proto *tp, unsigned long fh, int event);
102
103
104/* Select new prio value from the range, managed by kernel. */
105
aa767bfe 106static inline u32 tcf_auto_prio(struct tcf_proto *tp)
1da177e4 107{
aa767bfe 108 u32 first = TC_H_MAKE(0xC0000000U, 0U);
1da177e4
LT
109
110 if (tp)
111 first = tp->prio-1;
112
113 return first;
114}
115
116/* Add/change/delete/get a filter node */
117
118static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
119{
b854272b 120 struct net *net = skb->sk->sk_net;
1da177e4
LT
121 struct rtattr **tca;
122 struct tcmsg *t;
123 u32 protocol;
124 u32 prio;
125 u32 nprio;
126 u32 parent;
127 struct net_device *dev;
128 struct Qdisc *q;
129 struct tcf_proto **back, **chain;
130 struct tcf_proto *tp;
131 struct tcf_proto_ops *tp_ops;
20fea08b 132 const struct Qdisc_class_ops *cops;
1da177e4
LT
133 unsigned long cl;
134 unsigned long fh;
135 int err;
136
b854272b
DL
137 if (net != &init_net)
138 return -EINVAL;
139
1da177e4
LT
140replay:
141 tca = arg;
142 t = NLMSG_DATA(n);
143 protocol = TC_H_MIN(t->tcm_info);
144 prio = TC_H_MAJ(t->tcm_info);
145 nprio = prio;
146 parent = t->tcm_parent;
147 cl = 0;
148
149 if (prio == 0) {
150 /* If no priority is given, user wants we allocated it. */
151 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
152 return -ENOENT;
aa767bfe 153 prio = TC_H_MAKE(0x80000000U, 0U);
1da177e4
LT
154 }
155
156 /* Find head of filter chain. */
157
158 /* Find link */
aa767bfe
SH
159 dev = __dev_get_by_index(&init_net, t->tcm_ifindex);
160 if (dev == NULL)
1da177e4
LT
161 return -ENODEV;
162
163 /* Find qdisc */
164 if (!parent) {
165 q = dev->qdisc_sleeping;
166 parent = q->handle;
aa767bfe
SH
167 } else {
168 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
169 if (q == NULL)
170 return -EINVAL;
171 }
1da177e4
LT
172
173 /* Is it classful? */
174 if ((cops = q->ops->cl_ops) == NULL)
175 return -EINVAL;
176
177 /* Do we search for filter, attached to class? */
178 if (TC_H_MIN(parent)) {
179 cl = cops->get(q, parent);
180 if (cl == 0)
181 return -ENOENT;
182 }
183
184 /* And the last stroke */
185 chain = cops->tcf_chain(q, cl);
186 err = -EINVAL;
187 if (chain == NULL)
188 goto errout;
189
190 /* Check the chain for existence of proto-tcf with this priority */
191 for (back = chain; (tp=*back) != NULL; back = &tp->next) {
192 if (tp->prio >= prio) {
193 if (tp->prio == prio) {
194 if (!nprio || (tp->protocol != protocol && protocol))
195 goto errout;
196 } else
197 tp = NULL;
198 break;
199 }
200 }
201
202 if (tp == NULL) {
203 /* Proto-tcf does not exist, create new one */
204
205 if (tca[TCA_KIND-1] == NULL || !protocol)
206 goto errout;
207
208 err = -ENOENT;
209 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
210 goto errout;
211
212
213 /* Create new proto tcf */
214
215 err = -ENOBUFS;
aa767bfe
SH
216 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
217 if (tp == NULL)
1da177e4
LT
218 goto errout;
219 err = -EINVAL;
220 tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]);
221 if (tp_ops == NULL) {
222#ifdef CONFIG_KMOD
223 struct rtattr *kind = tca[TCA_KIND-1];
224 char name[IFNAMSIZ];
225
226 if (kind != NULL &&
227 rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
228 rtnl_unlock();
229 request_module("cls_%s", name);
230 rtnl_lock();
231 tp_ops = tcf_proto_lookup_ops(kind);
232 /* We dropped the RTNL semaphore in order to
233 * perform the module load. So, even if we
234 * succeeded in loading the module we have to
235 * replay the request. We indicate this using
236 * -EAGAIN.
237 */
238 if (tp_ops != NULL) {
239 module_put(tp_ops->owner);
240 err = -EAGAIN;
241 }
242 }
243#endif
244 kfree(tp);
245 goto errout;
246 }
1da177e4
LT
247 tp->ops = tp_ops;
248 tp->protocol = protocol;
249 tp->prio = nprio ? : tcf_auto_prio(*back);
250 tp->q = q;
251 tp->classify = tp_ops->classify;
252 tp->classid = parent;
aa767bfe
SH
253
254 err = tp_ops->init(tp);
255 if (err != 0) {
1da177e4
LT
256 module_put(tp_ops->owner);
257 kfree(tp);
258 goto errout;
259 }
260
261 qdisc_lock_tree(dev);
262 tp->next = *back;
263 *back = tp;
264 qdisc_unlock_tree(dev);
265
266 } else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
267 goto errout;
268
269 fh = tp->ops->get(tp, t->tcm_handle);
270
271 if (fh == 0) {
272 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
273 qdisc_lock_tree(dev);
274 *back = tp->next;
275 qdisc_unlock_tree(dev);
276
277 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
278 tcf_destroy(tp);
279 err = 0;
280 goto errout;
281 }
282
283 err = -ENOENT;
aa767bfe
SH
284 if (n->nlmsg_type != RTM_NEWTFILTER ||
285 !(n->nlmsg_flags & NLM_F_CREATE))
1da177e4
LT
286 goto errout;
287 } else {
288 switch (n->nlmsg_type) {
10297b99 289 case RTM_NEWTFILTER:
1da177e4 290 err = -EEXIST;
aa767bfe 291 if (n->nlmsg_flags & NLM_F_EXCL)
1da177e4
LT
292 goto errout;
293 break;
294 case RTM_DELTFILTER:
295 err = tp->ops->delete(tp, fh);
296 if (err == 0)
297 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
298 goto errout;
299 case RTM_GETTFILTER:
300 err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
301 goto errout;
302 default:
303 err = -EINVAL;
304 goto errout;
305 }
306 }
307
308 err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh);
309 if (err == 0)
310 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
311
312errout:
313 if (cl)
314 cops->put(q, cl);
315 if (err == -EAGAIN)
316 /* Replay the request. */
317 goto replay;
318 return err;
319}
320
aa767bfe
SH
321static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
322 unsigned long fh, u32 pid, u32 seq, u16 flags, int event)
1da177e4
LT
323{
324 struct tcmsg *tcm;
325 struct nlmsghdr *nlh;
27a884dc 326 unsigned char *b = skb_tail_pointer(skb);
1da177e4 327
e431b8c0 328 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1da177e4
LT
329 tcm = NLMSG_DATA(nlh);
330 tcm->tcm_family = AF_UNSPEC;
9ef1d4c7
PM
331 tcm->tcm__pad1 = 0;
332 tcm->tcm__pad1 = 0;
1da177e4
LT
333 tcm->tcm_ifindex = tp->q->dev->ifindex;
334 tcm->tcm_parent = tp->classid;
335 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
336 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, tp->ops->kind);
337 tcm->tcm_handle = fh;
338 if (RTM_DELTFILTER != event) {
339 tcm->tcm_handle = 0;
340 if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0)
341 goto rtattr_failure;
342 }
27a884dc 343 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1da177e4
LT
344 return skb->len;
345
346nlmsg_failure:
347rtattr_failure:
dc5fc579 348 nlmsg_trim(skb, b);
1da177e4
LT
349 return -1;
350}
351
352static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
353 struct tcf_proto *tp, unsigned long fh, int event)
354{
355 struct sk_buff *skb;
356 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
357
358 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
359 if (!skb)
360 return -ENOBUFS;
361
362 if (tcf_fill_node(skb, tp, fh, pid, n->nlmsg_seq, 0, event) <= 0) {
363 kfree_skb(skb);
364 return -EINVAL;
365 }
366
aa767bfe
SH
367 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC,
368 n->nlmsg_flags & NLM_F_ECHO);
1da177e4
LT
369}
370
aa767bfe 371struct tcf_dump_args {
1da177e4
LT
372 struct tcf_walker w;
373 struct sk_buff *skb;
374 struct netlink_callback *cb;
375};
376
aa767bfe
SH
377static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
378 struct tcf_walker *arg)
1da177e4 379{
aa767bfe 380 struct tcf_dump_args *a = (void *)arg;
1da177e4
LT
381
382 return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).pid,
383 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
384}
385
386static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
387{
b854272b 388 struct net *net = skb->sk->sk_net;
1da177e4
LT
389 int t;
390 int s_t;
391 struct net_device *dev;
392 struct Qdisc *q;
393 struct tcf_proto *tp, **chain;
aa767bfe 394 struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
1da177e4 395 unsigned long cl = 0;
20fea08b 396 const struct Qdisc_class_ops *cops;
1da177e4
LT
397 struct tcf_dump_args arg;
398
b854272b
DL
399 if (net != &init_net)
400 return 0;
401
1da177e4
LT
402 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
403 return skb->len;
881d966b 404 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1da177e4
LT
405 return skb->len;
406
1da177e4
LT
407 if (!tcm->tcm_parent)
408 q = dev->qdisc_sleeping;
409 else
410 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
411 if (!q)
412 goto out;
413 if ((cops = q->ops->cl_ops) == NULL)
414 goto errout;
415 if (TC_H_MIN(tcm->tcm_parent)) {
416 cl = cops->get(q, tcm->tcm_parent);
417 if (cl == 0)
418 goto errout;
419 }
420 chain = cops->tcf_chain(q, cl);
421 if (chain == NULL)
422 goto errout;
423
424 s_t = cb->args[0];
425
426 for (tp=*chain, t=0; tp; tp = tp->next, t++) {
427 if (t < s_t) continue;
428 if (TC_H_MAJ(tcm->tcm_info) &&
429 TC_H_MAJ(tcm->tcm_info) != tp->prio)
430 continue;
431 if (TC_H_MIN(tcm->tcm_info) &&
432 TC_H_MIN(tcm->tcm_info) != tp->protocol)
433 continue;
434 if (t > s_t)
435 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
436 if (cb->args[1] == 0) {
437 if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid,
aa767bfe
SH
438 cb->nlh->nlmsg_seq, NLM_F_MULTI,
439 RTM_NEWTFILTER) <= 0)
1da177e4 440 break;
aa767bfe 441
1da177e4
LT
442 cb->args[1] = 1;
443 }
444 if (tp->ops->walk == NULL)
445 continue;
446 arg.w.fn = tcf_node_dump;
447 arg.skb = skb;
448 arg.cb = cb;
449 arg.w.stop = 0;
450 arg.w.skip = cb->args[1]-1;
451 arg.w.count = 0;
452 tp->ops->walk(tp, &arg.w);
453 cb->args[1] = arg.w.count+1;
454 if (arg.w.stop)
455 break;
456 }
457
458 cb->args[0] = t;
459
460errout:
461 if (cl)
462 cops->put(q, cl);
463out:
1da177e4
LT
464 dev_put(dev);
465 return skb->len;
466}
467
aa767bfe 468void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
1da177e4
LT
469{
470#ifdef CONFIG_NET_CLS_ACT
471 if (exts->action) {
472 tcf_action_destroy(exts->action, TCA_ACT_UNBIND);
473 exts->action = NULL;
474 }
1da177e4
LT
475#endif
476}
aa767bfe 477EXPORT_SYMBOL(tcf_exts_destroy);
1da177e4 478
aa767bfe 479int tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb,
10297b99
YH
480 struct rtattr *rate_tlv, struct tcf_exts *exts,
481 struct tcf_ext_map *map)
1da177e4
LT
482{
483 memset(exts, 0, sizeof(*exts));
10297b99 484
1da177e4
LT
485#ifdef CONFIG_NET_CLS_ACT
486 {
487 int err;
488 struct tc_action *act;
489
490 if (map->police && tb[map->police-1]) {
aa767bfe
SH
491 act = tcf_action_init_1(tb[map->police-1], rate_tlv,
492 "police", TCA_ACT_NOREPLACE,
493 TCA_ACT_BIND, &err);
1da177e4
LT
494 if (act == NULL)
495 return err;
496
497 act->type = TCA_OLD_COMPAT;
498 exts->action = act;
499 } else if (map->action && tb[map->action-1]) {
500 act = tcf_action_init(tb[map->action-1], rate_tlv, NULL,
501 TCA_ACT_NOREPLACE, TCA_ACT_BIND, &err);
502 if (act == NULL)
503 return err;
504
505 exts->action = act;
506 }
507 }
1da177e4
LT
508#else
509 if ((map->action && tb[map->action-1]) ||
510 (map->police && tb[map->police-1]))
511 return -EOPNOTSUPP;
512#endif
513
514 return 0;
515}
aa767bfe 516EXPORT_SYMBOL(tcf_exts_validate);
1da177e4 517
aa767bfe
SH
518void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
519 struct tcf_exts *src)
1da177e4
LT
520{
521#ifdef CONFIG_NET_CLS_ACT
522 if (src->action) {
523 struct tc_action *act;
524 tcf_tree_lock(tp);
525 act = xchg(&dst->action, src->action);
526 tcf_tree_unlock(tp);
527 if (act)
528 tcf_action_destroy(act, TCA_ACT_UNBIND);
529 }
1da177e4
LT
530#endif
531}
aa767bfe 532EXPORT_SYMBOL(tcf_exts_change);
1da177e4 533
aa767bfe 534int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
1da177e4
LT
535 struct tcf_ext_map *map)
536{
537#ifdef CONFIG_NET_CLS_ACT
538 if (map->action && exts->action) {
539 /*
540 * again for backward compatible mode - we want
541 * to work with both old and new modes of entering
542 * tc data even if iproute2 was newer - jhs
543 */
27a884dc 544 struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb);
1da177e4
LT
545
546 if (exts->action->type != TCA_OLD_COMPAT) {
547 RTA_PUT(skb, map->action, 0, NULL);
548 if (tcf_action_dump(skb, exts->action, 0, 0) < 0)
549 goto rtattr_failure;
27a884dc 550 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
1da177e4
LT
551 } else if (map->police) {
552 RTA_PUT(skb, map->police, 0, NULL);
553 if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0)
554 goto rtattr_failure;
27a884dc 555 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
1da177e4
LT
556 }
557 }
1da177e4
LT
558#endif
559 return 0;
560rtattr_failure: __attribute__ ((unused))
561 return -1;
562}
aa767bfe 563EXPORT_SYMBOL(tcf_exts_dump);
1da177e4 564
aa767bfe
SH
565
566int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
567 struct tcf_ext_map *map)
1da177e4
LT
568{
569#ifdef CONFIG_NET_CLS_ACT
570 if (exts->action)
571 if (tcf_action_copy_stats(skb, exts->action, 1) < 0)
572 goto rtattr_failure;
1da177e4
LT
573#endif
574 return 0;
575rtattr_failure: __attribute__ ((unused))
576 return -1;
577}
aa767bfe 578EXPORT_SYMBOL(tcf_exts_dump_stats);
1da177e4
LT
579
580static int __init tc_filter_init(void)
581{
82623c0d
TG
582 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL);
583 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL);
584 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
585 tc_dump_tfilter);
1da177e4 586
1da177e4
LT
587 return 0;
588}
589
590subsys_initcall(tc_filter_init);
This page took 0.345537 seconds and 5 git commands to generate.