[NET_SCHED]: mark classifier ops __read_mostly
[deliverable/linux.git] / net / sched / cls_api.c
1 /*
2 * net/sched/cls_api.c Packet classifier API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
12 *
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14 *
15 */
16
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/kmod.h>
25 #include <linux/netlink.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31
32 /* The list of all installed classifier types */
33
34 static struct tcf_proto_ops *tcf_proto_base __read_mostly;
35
36 /* Protects list of registered TC modules. It is pure SMP lock. */
37 static DEFINE_RWLOCK(cls_mod_lock);
38
39 /* Find classifier type by string name */
40
41 static struct tcf_proto_ops *tcf_proto_lookup_ops(struct rtattr *kind)
42 {
43 struct tcf_proto_ops *t = NULL;
44
45 if (kind) {
46 read_lock(&cls_mod_lock);
47 for (t = tcf_proto_base; t; t = t->next) {
48 if (rtattr_strcmp(kind, t->kind) == 0) {
49 if (!try_module_get(t->owner))
50 t = NULL;
51 break;
52 }
53 }
54 read_unlock(&cls_mod_lock);
55 }
56 return t;
57 }
58
59 /* Register(unregister) new classifier type */
60
61 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
62 {
63 struct tcf_proto_ops *t, **tp;
64 int rc = -EEXIST;
65
66 write_lock(&cls_mod_lock);
67 for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
68 if (!strcmp(ops->kind, t->kind))
69 goto out;
70
71 ops->next = NULL;
72 *tp = ops;
73 rc = 0;
74 out:
75 write_unlock(&cls_mod_lock);
76 return rc;
77 }
78 EXPORT_SYMBOL(register_tcf_proto_ops);
79
80 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
81 {
82 struct tcf_proto_ops *t, **tp;
83 int rc = -ENOENT;
84
85 write_lock(&cls_mod_lock);
86 for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
87 if (t == ops)
88 break;
89
90 if (!t)
91 goto out;
92 *tp = t->next;
93 rc = 0;
94 out:
95 write_unlock(&cls_mod_lock);
96 return rc;
97 }
98 EXPORT_SYMBOL(unregister_tcf_proto_ops);
99
100 static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
101 struct tcf_proto *tp, unsigned long fh, int event);
102
103
104 /* Select new prio value from the range, managed by kernel. */
105
106 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
107 {
108 u32 first = TC_H_MAKE(0xC0000000U, 0U);
109
110 if (tp)
111 first = tp->prio-1;
112
113 return first;
114 }
115
116 /* Add/change/delete/get a filter node */
117
118 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
119 {
120 struct net *net = skb->sk->sk_net;
121 struct rtattr **tca;
122 struct tcmsg *t;
123 u32 protocol;
124 u32 prio;
125 u32 nprio;
126 u32 parent;
127 struct net_device *dev;
128 struct Qdisc *q;
129 struct tcf_proto **back, **chain;
130 struct tcf_proto *tp;
131 struct tcf_proto_ops *tp_ops;
132 const struct Qdisc_class_ops *cops;
133 unsigned long cl;
134 unsigned long fh;
135 int err;
136
137 if (net != &init_net)
138 return -EINVAL;
139
140 replay:
141 tca = arg;
142 t = NLMSG_DATA(n);
143 protocol = TC_H_MIN(t->tcm_info);
144 prio = TC_H_MAJ(t->tcm_info);
145 nprio = prio;
146 parent = t->tcm_parent;
147 cl = 0;
148
149 if (prio == 0) {
150 /* If no priority is given, user wants we allocated it. */
151 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
152 return -ENOENT;
153 prio = TC_H_MAKE(0x80000000U, 0U);
154 }
155
156 /* Find head of filter chain. */
157
158 /* Find link */
159 dev = __dev_get_by_index(&init_net, t->tcm_ifindex);
160 if (dev == NULL)
161 return -ENODEV;
162
163 /* Find qdisc */
164 if (!parent) {
165 q = dev->qdisc_sleeping;
166 parent = q->handle;
167 } else {
168 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
169 if (q == NULL)
170 return -EINVAL;
171 }
172
173 /* Is it classful? */
174 if ((cops = q->ops->cl_ops) == NULL)
175 return -EINVAL;
176
177 /* Do we search for filter, attached to class? */
178 if (TC_H_MIN(parent)) {
179 cl = cops->get(q, parent);
180 if (cl == 0)
181 return -ENOENT;
182 }
183
184 /* And the last stroke */
185 chain = cops->tcf_chain(q, cl);
186 err = -EINVAL;
187 if (chain == NULL)
188 goto errout;
189
190 /* Check the chain for existence of proto-tcf with this priority */
191 for (back = chain; (tp=*back) != NULL; back = &tp->next) {
192 if (tp->prio >= prio) {
193 if (tp->prio == prio) {
194 if (!nprio || (tp->protocol != protocol && protocol))
195 goto errout;
196 } else
197 tp = NULL;
198 break;
199 }
200 }
201
202 if (tp == NULL) {
203 /* Proto-tcf does not exist, create new one */
204
205 if (tca[TCA_KIND-1] == NULL || !protocol)
206 goto errout;
207
208 err = -ENOENT;
209 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
210 goto errout;
211
212
213 /* Create new proto tcf */
214
215 err = -ENOBUFS;
216 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
217 if (tp == NULL)
218 goto errout;
219 err = -EINVAL;
220 tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]);
221 if (tp_ops == NULL) {
222 #ifdef CONFIG_KMOD
223 struct rtattr *kind = tca[TCA_KIND-1];
224 char name[IFNAMSIZ];
225
226 if (kind != NULL &&
227 rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
228 rtnl_unlock();
229 request_module("cls_%s", name);
230 rtnl_lock();
231 tp_ops = tcf_proto_lookup_ops(kind);
232 /* We dropped the RTNL semaphore in order to
233 * perform the module load. So, even if we
234 * succeeded in loading the module we have to
235 * replay the request. We indicate this using
236 * -EAGAIN.
237 */
238 if (tp_ops != NULL) {
239 module_put(tp_ops->owner);
240 err = -EAGAIN;
241 }
242 }
243 #endif
244 kfree(tp);
245 goto errout;
246 }
247 tp->ops = tp_ops;
248 tp->protocol = protocol;
249 tp->prio = nprio ? : tcf_auto_prio(*back);
250 tp->q = q;
251 tp->classify = tp_ops->classify;
252 tp->classid = parent;
253
254 err = tp_ops->init(tp);
255 if (err != 0) {
256 module_put(tp_ops->owner);
257 kfree(tp);
258 goto errout;
259 }
260
261 qdisc_lock_tree(dev);
262 tp->next = *back;
263 *back = tp;
264 qdisc_unlock_tree(dev);
265
266 } else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
267 goto errout;
268
269 fh = tp->ops->get(tp, t->tcm_handle);
270
271 if (fh == 0) {
272 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
273 qdisc_lock_tree(dev);
274 *back = tp->next;
275 qdisc_unlock_tree(dev);
276
277 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
278 tcf_destroy(tp);
279 err = 0;
280 goto errout;
281 }
282
283 err = -ENOENT;
284 if (n->nlmsg_type != RTM_NEWTFILTER ||
285 !(n->nlmsg_flags & NLM_F_CREATE))
286 goto errout;
287 } else {
288 switch (n->nlmsg_type) {
289 case RTM_NEWTFILTER:
290 err = -EEXIST;
291 if (n->nlmsg_flags & NLM_F_EXCL)
292 goto errout;
293 break;
294 case RTM_DELTFILTER:
295 err = tp->ops->delete(tp, fh);
296 if (err == 0)
297 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
298 goto errout;
299 case RTM_GETTFILTER:
300 err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
301 goto errout;
302 default:
303 err = -EINVAL;
304 goto errout;
305 }
306 }
307
308 err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh);
309 if (err == 0)
310 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
311
312 errout:
313 if (cl)
314 cops->put(q, cl);
315 if (err == -EAGAIN)
316 /* Replay the request. */
317 goto replay;
318 return err;
319 }
320
321 static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
322 unsigned long fh, u32 pid, u32 seq, u16 flags, int event)
323 {
324 struct tcmsg *tcm;
325 struct nlmsghdr *nlh;
326 unsigned char *b = skb_tail_pointer(skb);
327
328 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
329 tcm = NLMSG_DATA(nlh);
330 tcm->tcm_family = AF_UNSPEC;
331 tcm->tcm__pad1 = 0;
332 tcm->tcm__pad1 = 0;
333 tcm->tcm_ifindex = tp->q->dev->ifindex;
334 tcm->tcm_parent = tp->classid;
335 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
336 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, tp->ops->kind);
337 tcm->tcm_handle = fh;
338 if (RTM_DELTFILTER != event) {
339 tcm->tcm_handle = 0;
340 if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0)
341 goto rtattr_failure;
342 }
343 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
344 return skb->len;
345
346 nlmsg_failure:
347 rtattr_failure:
348 nlmsg_trim(skb, b);
349 return -1;
350 }
351
352 static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
353 struct tcf_proto *tp, unsigned long fh, int event)
354 {
355 struct sk_buff *skb;
356 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
357
358 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
359 if (!skb)
360 return -ENOBUFS;
361
362 if (tcf_fill_node(skb, tp, fh, pid, n->nlmsg_seq, 0, event) <= 0) {
363 kfree_skb(skb);
364 return -EINVAL;
365 }
366
367 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC,
368 n->nlmsg_flags & NLM_F_ECHO);
369 }
370
371 struct tcf_dump_args {
372 struct tcf_walker w;
373 struct sk_buff *skb;
374 struct netlink_callback *cb;
375 };
376
377 static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
378 struct tcf_walker *arg)
379 {
380 struct tcf_dump_args *a = (void *)arg;
381
382 return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).pid,
383 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
384 }
385
386 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
387 {
388 struct net *net = skb->sk->sk_net;
389 int t;
390 int s_t;
391 struct net_device *dev;
392 struct Qdisc *q;
393 struct tcf_proto *tp, **chain;
394 struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
395 unsigned long cl = 0;
396 const struct Qdisc_class_ops *cops;
397 struct tcf_dump_args arg;
398
399 if (net != &init_net)
400 return 0;
401
402 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
403 return skb->len;
404 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
405 return skb->len;
406
407 if (!tcm->tcm_parent)
408 q = dev->qdisc_sleeping;
409 else
410 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
411 if (!q)
412 goto out;
413 if ((cops = q->ops->cl_ops) == NULL)
414 goto errout;
415 if (TC_H_MIN(tcm->tcm_parent)) {
416 cl = cops->get(q, tcm->tcm_parent);
417 if (cl == 0)
418 goto errout;
419 }
420 chain = cops->tcf_chain(q, cl);
421 if (chain == NULL)
422 goto errout;
423
424 s_t = cb->args[0];
425
426 for (tp=*chain, t=0; tp; tp = tp->next, t++) {
427 if (t < s_t) continue;
428 if (TC_H_MAJ(tcm->tcm_info) &&
429 TC_H_MAJ(tcm->tcm_info) != tp->prio)
430 continue;
431 if (TC_H_MIN(tcm->tcm_info) &&
432 TC_H_MIN(tcm->tcm_info) != tp->protocol)
433 continue;
434 if (t > s_t)
435 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
436 if (cb->args[1] == 0) {
437 if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid,
438 cb->nlh->nlmsg_seq, NLM_F_MULTI,
439 RTM_NEWTFILTER) <= 0)
440 break;
441
442 cb->args[1] = 1;
443 }
444 if (tp->ops->walk == NULL)
445 continue;
446 arg.w.fn = tcf_node_dump;
447 arg.skb = skb;
448 arg.cb = cb;
449 arg.w.stop = 0;
450 arg.w.skip = cb->args[1]-1;
451 arg.w.count = 0;
452 tp->ops->walk(tp, &arg.w);
453 cb->args[1] = arg.w.count+1;
454 if (arg.w.stop)
455 break;
456 }
457
458 cb->args[0] = t;
459
460 errout:
461 if (cl)
462 cops->put(q, cl);
463 out:
464 dev_put(dev);
465 return skb->len;
466 }
467
468 void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
469 {
470 #ifdef CONFIG_NET_CLS_ACT
471 if (exts->action) {
472 tcf_action_destroy(exts->action, TCA_ACT_UNBIND);
473 exts->action = NULL;
474 }
475 #endif
476 }
477 EXPORT_SYMBOL(tcf_exts_destroy);
478
479 int tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb,
480 struct rtattr *rate_tlv, struct tcf_exts *exts,
481 struct tcf_ext_map *map)
482 {
483 memset(exts, 0, sizeof(*exts));
484
485 #ifdef CONFIG_NET_CLS_ACT
486 {
487 int err;
488 struct tc_action *act;
489
490 if (map->police && tb[map->police-1]) {
491 act = tcf_action_init_1(tb[map->police-1], rate_tlv,
492 "police", TCA_ACT_NOREPLACE,
493 TCA_ACT_BIND, &err);
494 if (act == NULL)
495 return err;
496
497 act->type = TCA_OLD_COMPAT;
498 exts->action = act;
499 } else if (map->action && tb[map->action-1]) {
500 act = tcf_action_init(tb[map->action-1], rate_tlv, NULL,
501 TCA_ACT_NOREPLACE, TCA_ACT_BIND, &err);
502 if (act == NULL)
503 return err;
504
505 exts->action = act;
506 }
507 }
508 #else
509 if ((map->action && tb[map->action-1]) ||
510 (map->police && tb[map->police-1]))
511 return -EOPNOTSUPP;
512 #endif
513
514 return 0;
515 }
516 EXPORT_SYMBOL(tcf_exts_validate);
517
518 void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
519 struct tcf_exts *src)
520 {
521 #ifdef CONFIG_NET_CLS_ACT
522 if (src->action) {
523 struct tc_action *act;
524 tcf_tree_lock(tp);
525 act = xchg(&dst->action, src->action);
526 tcf_tree_unlock(tp);
527 if (act)
528 tcf_action_destroy(act, TCA_ACT_UNBIND);
529 }
530 #endif
531 }
532 EXPORT_SYMBOL(tcf_exts_change);
533
534 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
535 struct tcf_ext_map *map)
536 {
537 #ifdef CONFIG_NET_CLS_ACT
538 if (map->action && exts->action) {
539 /*
540 * again for backward compatible mode - we want
541 * to work with both old and new modes of entering
542 * tc data even if iproute2 was newer - jhs
543 */
544 struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb);
545
546 if (exts->action->type != TCA_OLD_COMPAT) {
547 RTA_PUT(skb, map->action, 0, NULL);
548 if (tcf_action_dump(skb, exts->action, 0, 0) < 0)
549 goto rtattr_failure;
550 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
551 } else if (map->police) {
552 RTA_PUT(skb, map->police, 0, NULL);
553 if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0)
554 goto rtattr_failure;
555 p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
556 }
557 }
558 #endif
559 return 0;
560 rtattr_failure: __attribute__ ((unused))
561 return -1;
562 }
563 EXPORT_SYMBOL(tcf_exts_dump);
564
565
566 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
567 struct tcf_ext_map *map)
568 {
569 #ifdef CONFIG_NET_CLS_ACT
570 if (exts->action)
571 if (tcf_action_copy_stats(skb, exts->action, 1) < 0)
572 goto rtattr_failure;
573 #endif
574 return 0;
575 rtattr_failure: __attribute__ ((unused))
576 return -1;
577 }
578 EXPORT_SYMBOL(tcf_exts_dump_stats);
579
580 static int __init tc_filter_init(void)
581 {
582 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL);
583 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL);
584 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
585 tc_dump_tfilter);
586
587 return 0;
588 }
589
590 subsys_initcall(tc_filter_init);
This page took 0.057148 seconds and 5 git commands to generate.