Merge tag 'dmaengine-fix-4.6-rc4' of git://git.infradead.org/users/vkoul/slave-dma
[deliverable/linux.git] / net / core / filter.c
index 94d26201080d6671080f63865994bb41d7d3d8bc..ca7f832b29802dadb8d5b21e089261cad25eef07 100644 (file)
@@ -530,12 +530,14 @@ do_pass:
                        *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
                        break;
 
-               /* RET_K, RET_A are remaped into 2 insns. */
+               /* RET_K is remaped into 2 insns. RET_A case doesn't need an
+                * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
+                */
                case BPF_RET | BPF_A:
                case BPF_RET | BPF_K:
-                       *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ?
-                                               BPF_K : BPF_X, BPF_REG_0,
-                                               BPF_REG_A, fp->k);
+                       if (BPF_RVAL(fp->code) == BPF_K)
+                               *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
+                                                       0, fp->k);
                        *insn = BPF_EXIT_INSN();
                        break;
 
@@ -1147,7 +1149,8 @@ void bpf_prog_destroy(struct bpf_prog *fp)
 }
 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
 
-static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
+static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
+                           bool locked)
 {
        struct sk_filter *fp, *old_fp;
 
@@ -1163,10 +1166,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
                return -ENOMEM;
        }
 
-       old_fp = rcu_dereference_protected(sk->sk_filter,
-                                          sock_owned_by_user(sk));
+       old_fp = rcu_dereference_protected(sk->sk_filter, locked);
        rcu_assign_pointer(sk->sk_filter, fp);
-
        if (old_fp)
                sk_filter_uncharge(sk, old_fp);
 
@@ -1181,7 +1182,7 @@ static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
        if (bpf_prog_size(prog->len) > sysctl_optmem_max)
                return -ENOMEM;
 
-       if (sk_unhashed(sk)) {
+       if (sk_unhashed(sk) && sk->sk_reuseport) {
                err = reuseport_alloc(sk);
                if (err)
                        return err;
@@ -1245,7 +1246,8 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
  * occurs or there is insufficient memory for the filter a negative
  * errno code is returned. On success the return is zero.
  */
-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
+                      bool locked)
 {
        struct bpf_prog *prog = __get_filter(fprog, sk);
        int err;
@@ -1253,7 +1255,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       err = __sk_attach_prog(prog, sk);
+       err = __sk_attach_prog(prog, sk, locked);
        if (err < 0) {
                __bpf_prog_release(prog);
                return err;
@@ -1261,7 +1263,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(sk_attach_filter);
+EXPORT_SYMBOL_GPL(__sk_attach_filter);
+
+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+{
+       return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
+}
 
 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
@@ -1307,7 +1314,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
-       err = __sk_attach_prog(prog, sk);
+       err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
        if (err < 0) {
                bpf_prog_put(prog);
                return err;
@@ -1333,18 +1340,25 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
        return 0;
 }
 
-#define BPF_LDST_LEN 16U
+struct bpf_scratchpad {
+       union {
+               __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
+               u8     buff[MAX_BPF_STACK];
+       };
+};
+
+static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
 
 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 {
+       struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        int offset = (int) r2;
        void *from = (void *) (long) r3;
        unsigned int len = (unsigned int) r4;
-       char buf[BPF_LDST_LEN];
        void *ptr;
 
-       if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM)))
+       if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
                return -EINVAL;
 
        /* bpf verifier guarantees that:
@@ -1355,14 +1369,12 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
         *
         * so check for invalid 'offset' and too large 'len'
         */
-       if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
+       if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
                return -EFAULT;
-
-       if (unlikely(skb_cloned(skb) &&
-                    !skb_clone_writable(skb, offset + len)))
+       if (unlikely(skb_try_make_writable(skb, offset + len)))
                return -EFAULT;
 
-       ptr = skb_header_pointer(skb, offset, len, buf);
+       ptr = skb_header_pointer(skb, offset, len, sp->buff);
        if (unlikely(!ptr))
                return -EFAULT;
 
@@ -1371,17 +1383,19 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 
        memcpy(ptr, from, len);
 
-       if (ptr == buf)
+       if (ptr == sp->buff)
                /* skb_store_bits cannot return -EFAULT here */
                skb_store_bits(skb, offset, ptr, len);
 
        if (flags & BPF_F_RECOMPUTE_CSUM)
                skb_postpush_rcsum(skb, ptr, len);
+       if (flags & BPF_F_INVALIDATE_HASH)
+               skb_clear_hash(skb);
 
        return 0;
 }
 
-const struct bpf_func_proto bpf_skb_store_bytes_proto = {
+static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
        .func           = bpf_skb_store_bytes,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -1400,7 +1414,7 @@ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        unsigned int len = (unsigned int) r4;
        void *ptr;
 
-       if (unlikely((u32) offset > 0xffff || len > BPF_LDST_LEN))
+       if (unlikely((u32) offset > 0xffff || len > MAX_BPF_STACK))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, len, to);
@@ -1412,7 +1426,7 @@ static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
        return 0;
 }
 
-const struct bpf_func_proto bpf_skb_load_bytes_proto = {
+static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
        .func           = bpf_skb_load_bytes,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -1432,9 +1446,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-
-       if (unlikely(skb_cloned(skb) &&
-                    !skb_clone_writable(skb, offset + sizeof(sum))))
+       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1442,6 +1454,12 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EFAULT;
 
        switch (flags & BPF_F_HDR_FIELD_MASK) {
+       case 0:
+               if (unlikely(from != 0))
+                       return -EINVAL;
+
+               csum_replace_by_diff(ptr, to);
+               break;
        case 2:
                csum_replace2(ptr, from, to);
                break;
@@ -1459,7 +1477,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
        return 0;
 }
 
-const struct bpf_func_proto bpf_l3_csum_replace_proto = {
+static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
        .func           = bpf_l3_csum_replace,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -1474,23 +1492,31 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
+       bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
        int offset = (int) r2;
        __sum16 sum, *ptr;
 
-       if (unlikely(flags & ~(BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
+       if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
+                              BPF_F_HDR_FIELD_MASK)))
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-
-       if (unlikely(skb_cloned(skb) &&
-                    !skb_clone_writable(skb, offset + sizeof(sum))))
+       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
        if (unlikely(!ptr))
                return -EFAULT;
+       if (is_mmzero && !*ptr)
+               return 0;
 
        switch (flags & BPF_F_HDR_FIELD_MASK) {
+       case 0:
+               if (unlikely(from != 0))
+                       return -EINVAL;
+
+               inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
+               break;
        case 2:
                inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
                break;
@@ -1501,6 +1527,8 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EINVAL;
        }
 
+       if (is_mmzero && !*ptr)
+               *ptr = CSUM_MANGLED_0;
        if (ptr == &sum)
                /* skb_store_bits guaranteed to not return -EFAULT here */
                skb_store_bits(skb, offset, ptr, sizeof(sum));
@@ -1508,7 +1536,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
        return 0;
 }
 
-const struct bpf_func_proto bpf_l4_csum_replace_proto = {
+static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
        .func           = bpf_l4_csum_replace,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -1519,6 +1547,45 @@ const struct bpf_func_proto bpf_l4_csum_replace_proto = {
        .arg5_type      = ARG_ANYTHING,
 };
 
+static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
+{
+       struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
+       u64 diff_size = from_size + to_size;
+       __be32 *from = (__be32 *) (long) r1;
+       __be32 *to   = (__be32 *) (long) r3;
+       int i, j = 0;
+
+       /* This is quite flexible, some examples:
+        *
+        * from_size == 0, to_size > 0,  seed := csum --> pushing data
+        * from_size > 0,  to_size == 0, seed := csum --> pulling data
+        * from_size > 0,  to_size > 0,  seed := 0    --> diffing data
+        *
+        * Even for diffing, from_size and to_size don't need to be equal.
+        */
+       if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
+                    diff_size > sizeof(sp->diff)))
+               return -EINVAL;
+
+       for (i = 0; i < from_size / sizeof(__be32); i++, j++)
+               sp->diff[j] = ~from[i];
+       for (i = 0; i <   to_size / sizeof(__be32); i++, j++)
+               sp->diff[j] = to[i];
+
+       return csum_partial(sp->diff, diff_size, seed);
+}
+
+static const struct bpf_func_proto bpf_csum_diff_proto = {
+       .func           = bpf_csum_diff,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_STACK,
+       .arg2_type      = ARG_CONST_STACK_SIZE_OR_ZERO,
+       .arg3_type      = ARG_PTR_TO_STACK,
+       .arg4_type      = ARG_CONST_STACK_SIZE_OR_ZERO,
+       .arg5_type      = ARG_ANYTHING,
+};
+
 static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
@@ -1543,11 +1610,10 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
        }
 
        skb2->dev = dev;
-       skb_sender_cpu_clear(skb2);
        return dev_queue_xmit(skb2);
 }
 
-const struct bpf_func_proto bpf_clone_redirect_proto = {
+static const struct bpf_func_proto bpf_clone_redirect_proto = {
        .func           = bpf_clone_redirect,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -1596,11 +1662,10 @@ int skb_do_redirect(struct sk_buff *skb)
        }
 
        skb->dev = dev;
-       skb_sender_cpu_clear(skb);
        return dev_queue_xmit(skb);
 }
 
-const struct bpf_func_proto bpf_redirect_proto = {
+static const struct bpf_func_proto bpf_redirect_proto = {
        .func           = bpf_redirect,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -1622,14 +1687,7 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
 
 static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
-#ifdef CONFIG_IP_ROUTE_CLASSID
-       const struct dst_entry *dst;
-
-       dst = skb_dst((struct sk_buff *) (unsigned long) r1);
-       if (dst)
-               return dst->tclassid;
-#endif
-       return 0;
+       return dst_tclassid((struct sk_buff *) (unsigned long) r1);
 }
 
 static const struct bpf_func_proto bpf_get_route_realm_proto = {
@@ -1682,6 +1740,13 @@ bool bpf_helper_changes_skb_data(void *func)
                return true;
        if (func == bpf_skb_vlan_pop)
                return true;
+       if (func == bpf_skb_store_bytes)
+               return true;
+       if (func == bpf_l3_csum_replace)
+               return true;
+       if (func == bpf_l4_csum_replace)
+               return true;
+
        return false;
 }
 
@@ -1703,12 +1768,16 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
                return -EPROTO;
        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
                switch (size) {
+               case offsetof(struct bpf_tunnel_key, tunnel_label):
+               case offsetof(struct bpf_tunnel_key, tunnel_ext):
+                       goto set_compat;
                case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
                        /* Fixup deprecated structure layouts here, so we have
                         * a common path later on.
                         */
                        if (ip_tunnel_info_af(info) != AF_INET)
                                return -EINVAL;
+set_compat:
                        to = (struct bpf_tunnel_key *)compat;
                        break;
                default:
@@ -1720,11 +1789,13 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        to->tunnel_tos = info->key.tos;
        to->tunnel_ttl = info->key.ttl;
 
-       if (flags & BPF_F_TUNINFO_IPV6)
+       if (flags & BPF_F_TUNINFO_IPV6) {
                memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
                       sizeof(to->remote_ipv6));
-       else
+               to->tunnel_label = be32_to_cpu(info->key.label);
+       } else {
                to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
+       }
 
        if (unlikely(size != sizeof(struct bpf_tunnel_key)))
                memcpy((void *)(long) r2, to, size);
@@ -1732,7 +1803,7 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        return 0;
 }
 
-const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
+static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
        .func           = bpf_skb_get_tunnel_key,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -1742,6 +1813,32 @@ const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
        .arg4_type      = ARG_ANYTHING,
 };
 
+static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       u8 *to = (u8 *) (long) r2;
+       const struct ip_tunnel_info *info = skb_tunnel_info(skb);
+
+       if (unlikely(!info ||
+                    !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT)))
+               return -ENOENT;
+       if (unlikely(size < info->options_len))
+               return -ENOMEM;
+
+       ip_tunnel_info_opts_get(to, info);
+
+       return info->options_len;
+}
+
+static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
+       .func           = bpf_skb_get_tunnel_opt,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg3_type      = ARG_CONST_STACK_SIZE,
+};
+
 static struct metadata_dst __percpu *md_dst;
 
 static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
@@ -1752,10 +1849,13 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        u8 compat[sizeof(struct bpf_tunnel_key)];
        struct ip_tunnel_info *info;
 
-       if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6)))
+       if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
+                              BPF_F_DONT_FRAGMENT)))
                return -EINVAL;
        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
                switch (size) {
+               case offsetof(struct bpf_tunnel_key, tunnel_label):
+               case offsetof(struct bpf_tunnel_key, tunnel_ext):
                case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
                        /* Fixup deprecated structure layouts here, so we have
                         * a common path later on.
@@ -1768,6 +1868,9 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
                        return -EINVAL;
                }
        }
+       if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
+                    from->tunnel_ext))
+               return -EINVAL;
 
        skb_dst_drop(skb);
        dst_hold((struct dst_entry *) md);
@@ -1776,7 +1879,10 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        info = &md->u.tun_info;
        info->mode = IP_TUNNEL_INFO_TX;
 
-       info->key.tun_flags = TUNNEL_KEY;
+       info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
+       if (flags & BPF_F_DONT_FRAGMENT)
+               info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
+
        info->key.tun_id = cpu_to_be64(from->tunnel_id);
        info->key.tos = from->tunnel_tos;
        info->key.ttl = from->tunnel_ttl;
@@ -1785,14 +1891,18 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
                info->mode |= IP_TUNNEL_INFO_IPV6;
                memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
                       sizeof(from->remote_ipv6));
+               info->key.label = cpu_to_be32(from->tunnel_label) &
+                                 IPV6_FLOWLABEL_MASK;
        } else {
                info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
+               if (flags & BPF_F_ZERO_CSUM_TX)
+                       info->key.tun_flags &= ~TUNNEL_CSUM;
        }
 
        return 0;
 }
 
-const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
+static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
        .func           = bpf_skb_set_tunnel_key,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
@@ -1802,17 +1912,53 @@ const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
        .arg4_type      = ARG_ANYTHING,
 };
 
-static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
+static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       u8 *from = (u8 *) (long) r2;
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       const struct metadata_dst *md = this_cpu_ptr(md_dst);
+
+       if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
+               return -EINVAL;
+       if (unlikely(size > IP_TUNNEL_OPTS_MAX))
+               return -ENOMEM;
+
+       ip_tunnel_info_opts_set(info, from, size);
+
+       return 0;
+}
+
+static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
+       .func           = bpf_skb_set_tunnel_opt,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg3_type      = ARG_CONST_STACK_SIZE,
+};
+
+static const struct bpf_func_proto *
+bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
 {
        if (!md_dst) {
-               /* race is not possible, since it's called from
-                * verifier that is holding verifier mutex
+               /* Race is not possible, since it's called from verifier
+                * that is holding verifier mutex.
                 */
-               md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
+               md_dst = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
+                                                  GFP_KERNEL);
                if (!md_dst)
                        return NULL;
        }
-       return &bpf_skb_set_tunnel_key_proto;
+
+       switch (which) {
+       case BPF_FUNC_skb_set_tunnel_key:
+               return &bpf_skb_set_tunnel_key_proto;
+       case BPF_FUNC_skb_set_tunnel_opt:
+               return &bpf_skb_set_tunnel_opt_proto;
+       default:
+               return NULL;
+       }
 }
 
 static const struct bpf_func_proto *
@@ -1849,6 +1995,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
                return &bpf_skb_store_bytes_proto;
        case BPF_FUNC_skb_load_bytes:
                return &bpf_skb_load_bytes_proto;
+       case BPF_FUNC_csum_diff:
+               return &bpf_csum_diff_proto;
        case BPF_FUNC_l3_csum_replace:
                return &bpf_l3_csum_replace_proto;
        case BPF_FUNC_l4_csum_replace:
@@ -1864,7 +2012,11 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_skb_get_tunnel_key:
                return &bpf_skb_get_tunnel_key_proto;
        case BPF_FUNC_skb_set_tunnel_key:
-               return bpf_get_skb_set_tunnel_key_proto();
+               return bpf_get_skb_set_tunnel_proto(func_id);
+       case BPF_FUNC_skb_get_tunnel_opt:
+               return &bpf_skb_get_tunnel_opt_proto;
+       case BPF_FUNC_skb_set_tunnel_opt:
+               return bpf_get_skb_set_tunnel_proto(func_id);
        case BPF_FUNC_redirect:
                return &bpf_redirect_proto;
        case BPF_FUNC_get_route_realm:
@@ -1913,16 +2065,14 @@ static bool sk_filter_is_valid_access(int off, int size,
 static bool tc_cls_act_is_valid_access(int off, int size,
                                       enum bpf_access_type type)
 {
-       if (off == offsetof(struct __sk_buff, tc_classid))
-               return type == BPF_WRITE ? true : false;
-
        if (type == BPF_WRITE) {
                switch (off) {
                case offsetof(struct __sk_buff, mark):
                case offsetof(struct __sk_buff, tc_index):
                case offsetof(struct __sk_buff, priority):
                case offsetof(struct __sk_buff, cb[0]) ...
-                       offsetof(struct __sk_buff, cb[4]):
+                    offsetof(struct __sk_buff, cb[4]):
+               case offsetof(struct __sk_buff, tc_classid):
                        break;
                default:
                        return false;
@@ -2039,8 +2189,10 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                ctx_off -= offsetof(struct __sk_buff, tc_classid);
                ctx_off += offsetof(struct sk_buff, cb);
                ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
-               WARN_ON(type != BPF_WRITE);
-               *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
+               if (type == BPF_WRITE)
+                       *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
+               else
+                       *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
                break;
 
        case offsetof(struct __sk_buff, tc_index):
@@ -2103,7 +2255,7 @@ static int __init register_sk_filter_ops(void)
 }
 late_initcall(register_sk_filter_ops);
 
-int sk_detach_filter(struct sock *sk)
+int __sk_detach_filter(struct sock *sk, bool locked)
 {
        int ret = -ENOENT;
        struct sk_filter *filter;
@@ -2111,8 +2263,7 @@ int sk_detach_filter(struct sock *sk)
        if (sock_flag(sk, SOCK_FILTER_LOCKED))
                return -EPERM;
 
-       filter = rcu_dereference_protected(sk->sk_filter,
-                                          sock_owned_by_user(sk));
+       filter = rcu_dereference_protected(sk->sk_filter, locked);
        if (filter) {
                RCU_INIT_POINTER(sk->sk_filter, NULL);
                sk_filter_uncharge(sk, filter);
@@ -2121,7 +2272,12 @@ int sk_detach_filter(struct sock *sk)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(sk_detach_filter);
+EXPORT_SYMBOL_GPL(__sk_detach_filter);
+
+int sk_detach_filter(struct sock *sk)
+{
+       return __sk_detach_filter(sk, sock_owned_by_user(sk));
+}
 
 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
                  unsigned int len)
This page took 0.031557 seconds and 5 git commands to generate.