| 1 | /* netfilter.c: look after the filters for various protocols. |
| 2 | * Heavily influenced by the old firewall.c by David Bonn and Alan Cox. |
| 3 | * |
| 4 | * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any |
| 5 | * way. |
| 6 | * |
| 7 | * Rusty Russell (C)2000 -- This code is GPL. |
| 8 | * Patrick McHardy (c) 2006-2012 |
| 9 | */ |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/netfilter.h> |
| 12 | #include <net/protocol.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/skbuff.h> |
| 15 | #include <linux/wait.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/if.h> |
| 19 | #include <linux/netdevice.h> |
| 20 | #include <linux/netfilter_ipv6.h> |
| 21 | #include <linux/inetdevice.h> |
| 22 | #include <linux/proc_fs.h> |
| 23 | #include <linux/mutex.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <net/net_namespace.h> |
| 26 | #include <net/sock.h> |
| 27 | |
| 28 | #include "nf_internals.h" |
| 29 | |
| 30 | static DEFINE_MUTEX(afinfo_mutex); |
| 31 | |
| 32 | const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; |
| 33 | EXPORT_SYMBOL(nf_afinfo); |
| 34 | const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; |
| 35 | EXPORT_SYMBOL_GPL(nf_ipv6_ops); |
| 36 | |
| 37 | DEFINE_PER_CPU(bool, nf_skb_duplicated); |
| 38 | EXPORT_SYMBOL_GPL(nf_skb_duplicated); |
| 39 | |
| 40 | int nf_register_afinfo(const struct nf_afinfo *afinfo) |
| 41 | { |
| 42 | mutex_lock(&afinfo_mutex); |
| 43 | RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo); |
| 44 | mutex_unlock(&afinfo_mutex); |
| 45 | return 0; |
| 46 | } |
| 47 | EXPORT_SYMBOL_GPL(nf_register_afinfo); |
| 48 | |
| 49 | void nf_unregister_afinfo(const struct nf_afinfo *afinfo) |
| 50 | { |
| 51 | mutex_lock(&afinfo_mutex); |
| 52 | RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL); |
| 53 | mutex_unlock(&afinfo_mutex); |
| 54 | synchronize_rcu(); |
| 55 | } |
| 56 | EXPORT_SYMBOL_GPL(nf_unregister_afinfo); |
| 57 | |
| 58 | #ifdef HAVE_JUMP_LABEL |
| 59 | struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; |
| 60 | EXPORT_SYMBOL(nf_hooks_needed); |
| 61 | #endif |
| 62 | |
| 63 | static DEFINE_MUTEX(nf_hook_mutex); |
| 64 | |
| 65 | static struct list_head *nf_find_hook_list(struct net *net, |
| 66 | const struct nf_hook_ops *reg) |
| 67 | { |
| 68 | struct list_head *hook_list = NULL; |
| 69 | |
| 70 | if (reg->pf != NFPROTO_NETDEV) |
| 71 | hook_list = &net->nf.hooks[reg->pf][reg->hooknum]; |
| 72 | else if (reg->hooknum == NF_NETDEV_INGRESS) { |
| 73 | #ifdef CONFIG_NETFILTER_INGRESS |
| 74 | if (reg->dev && dev_net(reg->dev) == net) |
| 75 | hook_list = ®->dev->nf_hooks_ingress; |
| 76 | #endif |
| 77 | } |
| 78 | return hook_list; |
| 79 | } |
| 80 | |
| 81 | struct nf_hook_entry { |
| 82 | const struct nf_hook_ops *orig_ops; |
| 83 | struct nf_hook_ops ops; |
| 84 | }; |
| 85 | |
| 86 | int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) |
| 87 | { |
| 88 | struct list_head *hook_list; |
| 89 | struct nf_hook_entry *entry; |
| 90 | struct nf_hook_ops *elem; |
| 91 | |
| 92 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
| 93 | if (!entry) |
| 94 | return -ENOMEM; |
| 95 | |
| 96 | entry->orig_ops = reg; |
| 97 | entry->ops = *reg; |
| 98 | |
| 99 | hook_list = nf_find_hook_list(net, reg); |
| 100 | if (!hook_list) { |
| 101 | kfree(entry); |
| 102 | return -ENOENT; |
| 103 | } |
| 104 | |
| 105 | mutex_lock(&nf_hook_mutex); |
| 106 | list_for_each_entry(elem, hook_list, list) { |
| 107 | if (reg->priority < elem->priority) |
| 108 | break; |
| 109 | } |
| 110 | list_add_rcu(&entry->ops.list, elem->list.prev); |
| 111 | mutex_unlock(&nf_hook_mutex); |
| 112 | #ifdef CONFIG_NETFILTER_INGRESS |
| 113 | if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) |
| 114 | net_inc_ingress_queue(); |
| 115 | #endif |
| 116 | #ifdef HAVE_JUMP_LABEL |
| 117 | static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); |
| 118 | #endif |
| 119 | return 0; |
| 120 | } |
| 121 | EXPORT_SYMBOL(nf_register_net_hook); |
| 122 | |
| 123 | void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) |
| 124 | { |
| 125 | struct list_head *hook_list; |
| 126 | struct nf_hook_entry *entry; |
| 127 | struct nf_hook_ops *elem; |
| 128 | |
| 129 | hook_list = nf_find_hook_list(net, reg); |
| 130 | if (!hook_list) |
| 131 | return; |
| 132 | |
| 133 | mutex_lock(&nf_hook_mutex); |
| 134 | list_for_each_entry(elem, hook_list, list) { |
| 135 | entry = container_of(elem, struct nf_hook_entry, ops); |
| 136 | if (entry->orig_ops == reg) { |
| 137 | list_del_rcu(&entry->ops.list); |
| 138 | break; |
| 139 | } |
| 140 | } |
| 141 | mutex_unlock(&nf_hook_mutex); |
| 142 | if (&elem->list == hook_list) { |
| 143 | WARN(1, "nf_unregister_net_hook: hook not found!\n"); |
| 144 | return; |
| 145 | } |
| 146 | #ifdef CONFIG_NETFILTER_INGRESS |
| 147 | if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) |
| 148 | net_dec_ingress_queue(); |
| 149 | #endif |
| 150 | #ifdef HAVE_JUMP_LABEL |
| 151 | static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); |
| 152 | #endif |
| 153 | synchronize_net(); |
| 154 | nf_queue_nf_hook_drop(net, &entry->ops); |
| 155 | kfree(entry); |
| 156 | } |
| 157 | EXPORT_SYMBOL(nf_unregister_net_hook); |
| 158 | |
| 159 | int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, |
| 160 | unsigned int n) |
| 161 | { |
| 162 | unsigned int i; |
| 163 | int err = 0; |
| 164 | |
| 165 | for (i = 0; i < n; i++) { |
| 166 | err = nf_register_net_hook(net, ®[i]); |
| 167 | if (err) |
| 168 | goto err; |
| 169 | } |
| 170 | return err; |
| 171 | |
| 172 | err: |
| 173 | if (i > 0) |
| 174 | nf_unregister_net_hooks(net, reg, i); |
| 175 | return err; |
| 176 | } |
| 177 | EXPORT_SYMBOL(nf_register_net_hooks); |
| 178 | |
| 179 | void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, |
| 180 | unsigned int n) |
| 181 | { |
| 182 | while (n-- > 0) |
| 183 | nf_unregister_net_hook(net, ®[n]); |
| 184 | } |
| 185 | EXPORT_SYMBOL(nf_unregister_net_hooks); |
| 186 | |
| 187 | static LIST_HEAD(nf_hook_list); |
| 188 | |
| 189 | int nf_register_hook(struct nf_hook_ops *reg) |
| 190 | { |
| 191 | struct net *net, *last; |
| 192 | int ret; |
| 193 | |
| 194 | rtnl_lock(); |
| 195 | for_each_net(net) { |
| 196 | ret = nf_register_net_hook(net, reg); |
| 197 | if (ret && ret != -ENOENT) |
| 198 | goto rollback; |
| 199 | } |
| 200 | list_add_tail(®->list, &nf_hook_list); |
| 201 | rtnl_unlock(); |
| 202 | |
| 203 | return 0; |
| 204 | rollback: |
| 205 | last = net; |
| 206 | for_each_net(net) { |
| 207 | if (net == last) |
| 208 | break; |
| 209 | nf_unregister_net_hook(net, reg); |
| 210 | } |
| 211 | rtnl_unlock(); |
| 212 | return ret; |
| 213 | } |
| 214 | EXPORT_SYMBOL(nf_register_hook); |
| 215 | |
| 216 | void nf_unregister_hook(struct nf_hook_ops *reg) |
| 217 | { |
| 218 | struct net *net; |
| 219 | |
| 220 | rtnl_lock(); |
| 221 | list_del(®->list); |
| 222 | for_each_net(net) |
| 223 | nf_unregister_net_hook(net, reg); |
| 224 | rtnl_unlock(); |
| 225 | } |
| 226 | EXPORT_SYMBOL(nf_unregister_hook); |
| 227 | |
| 228 | int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n) |
| 229 | { |
| 230 | unsigned int i; |
| 231 | int err = 0; |
| 232 | |
| 233 | for (i = 0; i < n; i++) { |
| 234 | err = nf_register_hook(®[i]); |
| 235 | if (err) |
| 236 | goto err; |
| 237 | } |
| 238 | return err; |
| 239 | |
| 240 | err: |
| 241 | if (i > 0) |
| 242 | nf_unregister_hooks(reg, i); |
| 243 | return err; |
| 244 | } |
| 245 | EXPORT_SYMBOL(nf_register_hooks); |
| 246 | |
| 247 | void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n) |
| 248 | { |
| 249 | while (n-- > 0) |
| 250 | nf_unregister_hook(®[n]); |
| 251 | } |
| 252 | EXPORT_SYMBOL(nf_unregister_hooks); |
| 253 | |
| 254 | unsigned int nf_iterate(struct list_head *head, |
| 255 | struct sk_buff *skb, |
| 256 | struct nf_hook_state *state, |
| 257 | struct nf_hook_ops **elemp) |
| 258 | { |
| 259 | unsigned int verdict; |
| 260 | |
| 261 | /* |
| 262 | * The caller must not block between calls to this |
| 263 | * function because of risk of continuing from deleted element. |
| 264 | */ |
| 265 | list_for_each_entry_continue_rcu((*elemp), head, list) { |
| 266 | if (state->thresh > (*elemp)->priority) |
| 267 | continue; |
| 268 | |
| 269 | /* Optimization: we don't need to hold module |
| 270 | reference here, since function can't sleep. --RR */ |
| 271 | repeat: |
| 272 | verdict = (*elemp)->hook(*elemp, skb, state); |
| 273 | if (verdict != NF_ACCEPT) { |
| 274 | #ifdef CONFIG_NETFILTER_DEBUG |
| 275 | if (unlikely((verdict & NF_VERDICT_MASK) |
| 276 | > NF_MAX_VERDICT)) { |
| 277 | NFDEBUG("Evil return from %p(%u).\n", |
| 278 | (*elemp)->hook, state->hook); |
| 279 | continue; |
| 280 | } |
| 281 | #endif |
| 282 | if (verdict != NF_REPEAT) |
| 283 | return verdict; |
| 284 | goto repeat; |
| 285 | } |
| 286 | } |
| 287 | return NF_ACCEPT; |
| 288 | } |
| 289 | |
| 290 | |
| 291 | /* Returns 1 if okfn() needs to be executed by the caller, |
| 292 | * -EPERM for NF_DROP, 0 otherwise. */ |
| 293 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state) |
| 294 | { |
| 295 | struct nf_hook_ops *elem; |
| 296 | unsigned int verdict; |
| 297 | int ret = 0; |
| 298 | |
| 299 | /* We may already have this, but read-locks nest anyway */ |
| 300 | rcu_read_lock(); |
| 301 | |
| 302 | elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list); |
| 303 | next_hook: |
| 304 | verdict = nf_iterate(state->hook_list, skb, state, &elem); |
| 305 | if (verdict == NF_ACCEPT || verdict == NF_STOP) { |
| 306 | ret = 1; |
| 307 | } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { |
| 308 | kfree_skb(skb); |
| 309 | ret = NF_DROP_GETERR(verdict); |
| 310 | if (ret == 0) |
| 311 | ret = -EPERM; |
| 312 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { |
| 313 | int err = nf_queue(skb, elem, state, |
| 314 | verdict >> NF_VERDICT_QBITS); |
| 315 | if (err < 0) { |
| 316 | if (err == -ECANCELED) |
| 317 | goto next_hook; |
| 318 | if (err == -ESRCH && |
| 319 | (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) |
| 320 | goto next_hook; |
| 321 | kfree_skb(skb); |
| 322 | } |
| 323 | } |
| 324 | rcu_read_unlock(); |
| 325 | return ret; |
| 326 | } |
| 327 | EXPORT_SYMBOL(nf_hook_slow); |
| 328 | |
| 329 | |
| 330 | int skb_make_writable(struct sk_buff *skb, unsigned int writable_len) |
| 331 | { |
| 332 | if (writable_len > skb->len) |
| 333 | return 0; |
| 334 | |
| 335 | /* Not exclusive use of packet? Must copy. */ |
| 336 | if (!skb_cloned(skb)) { |
| 337 | if (writable_len <= skb_headlen(skb)) |
| 338 | return 1; |
| 339 | } else if (skb_clone_writable(skb, writable_len)) |
| 340 | return 1; |
| 341 | |
| 342 | if (writable_len <= skb_headlen(skb)) |
| 343 | writable_len = 0; |
| 344 | else |
| 345 | writable_len -= skb_headlen(skb); |
| 346 | |
| 347 | return !!__pskb_pull_tail(skb, writable_len); |
| 348 | } |
| 349 | EXPORT_SYMBOL(skb_make_writable); |
| 350 | |
| 351 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
| 352 | /* This does not belong here, but locally generated errors need it if connection |
| 353 | tracking in use: without this, connection may not be in hash table, and hence |
| 354 | manufactured ICMP or RST packets will not be associated with it. */ |
| 355 | void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) |
| 356 | __rcu __read_mostly; |
| 357 | EXPORT_SYMBOL(ip_ct_attach); |
| 358 | |
| 359 | void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb) |
| 360 | { |
| 361 | void (*attach)(struct sk_buff *, const struct sk_buff *); |
| 362 | |
| 363 | if (skb->nfct) { |
| 364 | rcu_read_lock(); |
| 365 | attach = rcu_dereference(ip_ct_attach); |
| 366 | if (attach) |
| 367 | attach(new, skb); |
| 368 | rcu_read_unlock(); |
| 369 | } |
| 370 | } |
| 371 | EXPORT_SYMBOL(nf_ct_attach); |
| 372 | |
| 373 | void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly; |
| 374 | EXPORT_SYMBOL(nf_ct_destroy); |
| 375 | |
| 376 | void nf_conntrack_destroy(struct nf_conntrack *nfct) |
| 377 | { |
| 378 | void (*destroy)(struct nf_conntrack *); |
| 379 | |
| 380 | rcu_read_lock(); |
| 381 | destroy = rcu_dereference(nf_ct_destroy); |
| 382 | BUG_ON(destroy == NULL); |
| 383 | destroy(nfct); |
| 384 | rcu_read_unlock(); |
| 385 | } |
| 386 | EXPORT_SYMBOL(nf_conntrack_destroy); |
| 387 | |
| 388 | struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly; |
| 389 | EXPORT_SYMBOL_GPL(nfq_ct_hook); |
| 390 | |
| 391 | /* Built-in default zone used e.g. by modules. */ |
| 392 | const struct nf_conntrack_zone nf_ct_zone_dflt = { |
| 393 | .id = NF_CT_DEFAULT_ZONE_ID, |
| 394 | .dir = NF_CT_DEFAULT_ZONE_DIR, |
| 395 | }; |
| 396 | EXPORT_SYMBOL_GPL(nf_ct_zone_dflt); |
| 397 | #endif /* CONFIG_NF_CONNTRACK */ |
| 398 | |
| 399 | #ifdef CONFIG_NF_NAT_NEEDED |
| 400 | void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); |
| 401 | EXPORT_SYMBOL(nf_nat_decode_session_hook); |
| 402 | #endif |
| 403 | |
| 404 | static int nf_register_hook_list(struct net *net) |
| 405 | { |
| 406 | struct nf_hook_ops *elem; |
| 407 | int ret; |
| 408 | |
| 409 | rtnl_lock(); |
| 410 | list_for_each_entry(elem, &nf_hook_list, list) { |
| 411 | ret = nf_register_net_hook(net, elem); |
| 412 | if (ret && ret != -ENOENT) |
| 413 | goto out_undo; |
| 414 | } |
| 415 | rtnl_unlock(); |
| 416 | return 0; |
| 417 | |
| 418 | out_undo: |
| 419 | list_for_each_entry_continue_reverse(elem, &nf_hook_list, list) |
| 420 | nf_unregister_net_hook(net, elem); |
| 421 | rtnl_unlock(); |
| 422 | return ret; |
| 423 | } |
| 424 | |
| 425 | static void nf_unregister_hook_list(struct net *net) |
| 426 | { |
| 427 | struct nf_hook_ops *elem; |
| 428 | |
| 429 | rtnl_lock(); |
| 430 | list_for_each_entry(elem, &nf_hook_list, list) |
| 431 | nf_unregister_net_hook(net, elem); |
| 432 | rtnl_unlock(); |
| 433 | } |
| 434 | |
| 435 | static int __net_init netfilter_net_init(struct net *net) |
| 436 | { |
| 437 | int i, h, ret; |
| 438 | |
| 439 | for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) { |
| 440 | for (h = 0; h < NF_MAX_HOOKS; h++) |
| 441 | INIT_LIST_HEAD(&net->nf.hooks[i][h]); |
| 442 | } |
| 443 | |
| 444 | #ifdef CONFIG_PROC_FS |
| 445 | net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter", |
| 446 | net->proc_net); |
| 447 | if (!net->nf.proc_netfilter) { |
| 448 | if (!net_eq(net, &init_net)) |
| 449 | pr_err("cannot create netfilter proc entry"); |
| 450 | |
| 451 | return -ENOMEM; |
| 452 | } |
| 453 | #endif |
| 454 | ret = nf_register_hook_list(net); |
| 455 | if (ret) |
| 456 | remove_proc_entry("netfilter", net->proc_net); |
| 457 | |
| 458 | return ret; |
| 459 | } |
| 460 | |
| 461 | static void __net_exit netfilter_net_exit(struct net *net) |
| 462 | { |
| 463 | nf_unregister_hook_list(net); |
| 464 | remove_proc_entry("netfilter", net->proc_net); |
| 465 | } |
| 466 | |
| 467 | static struct pernet_operations netfilter_net_ops = { |
| 468 | .init = netfilter_net_init, |
| 469 | .exit = netfilter_net_exit, |
| 470 | }; |
| 471 | |
| 472 | int __init netfilter_init(void) |
| 473 | { |
| 474 | int ret; |
| 475 | |
| 476 | ret = register_pernet_subsys(&netfilter_net_ops); |
| 477 | if (ret < 0) |
| 478 | goto err; |
| 479 | |
| 480 | ret = netfilter_log_init(); |
| 481 | if (ret < 0) |
| 482 | goto err_pernet; |
| 483 | |
| 484 | return 0; |
| 485 | err_pernet: |
| 486 | unregister_pernet_subsys(&netfilter_net_ops); |
| 487 | err: |
| 488 | return ret; |
| 489 | } |