bpf: wire in data and data_end for cls_act_bpf
authorAlexei Starovoitov <ast@fb.com>
Fri, 6 May 2016 02:49:12 +0000 (19:49 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 6 May 2016 20:01:54 +0000 (16:01 -0400)
allow cls_bpf and act_bpf programs access skb->data and skb->data_end pointers.
The bpf helpers that change skb->data need to update data_end pointer as well.
The verifier checks that programs always reload data, data_end pointers
after calls to such bpf helpers.
We cannot add 'data_end' pointer to struct qdisc_skb_cb directly,
since it's embedded as-is by infiniband ipoib, so wrapper struct is needed.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/filter.h
net/core/filter.c
net/sched/act_bpf.c
net/sched/cls_bpf.c

index 43aa1f8855c7ff59ab562b722cfd8dc19c4eee7e..ec1411c891056daa6f372f5fabc622c9cdb22a52 100644 (file)
@@ -352,6 +352,22 @@ struct sk_filter {
 
 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 
+struct bpf_skb_data_end {
+       struct qdisc_skb_cb qdisc_cb;
+       void *data_end;
+};
+
+/* compute the linear packet data range [data, data_end) which
+ * will be accessed by cls_bpf and act_bpf programs
+ */
+static inline void bpf_compute_data_end(struct sk_buff *skb)
+{
+       struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+       BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
+       cb->data_end = skb->data + skb_headlen(skb);
+}
+
 static inline u8 *bpf_skb_cb(struct sk_buff *skb)
 {
        /* eBPF programs may read/write skb->cb[] area to transfer meta
index 218e5de8c402b9fac28083fbf1400590022be514..71c2a1f473adb82a77974cc377f88b5c26f5d42a 100644 (file)
@@ -1344,6 +1344,21 @@ struct bpf_scratchpad {
 
 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
 
+static inline int bpf_try_make_writable(struct sk_buff *skb,
+                                       unsigned int write_len)
+{
+       int err;
+
+       if (!skb_cloned(skb))
+               return 0;
+       if (skb_clone_writable(skb, write_len))
+               return 0;
+       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+       if (!err)
+               bpf_compute_data_end(skb);
+       return err;
+}
+
 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
 {
        struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
@@ -1366,7 +1381,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
         */
        if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff)))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + len)))
+       if (unlikely(bpf_try_make_writable(skb, offset + len)))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, len, sp->buff);
@@ -1444,7 +1459,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1499,7 +1514,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
                return -EINVAL;
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
-       if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
+       if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1699,12 +1714,15 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
        __be16 vlan_proto = (__force __be16) r2;
+       int ret;
 
        if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
                     vlan_proto != htons(ETH_P_8021AD)))
                vlan_proto = htons(ETH_P_8021Q);
 
-       return skb_vlan_push(skb, vlan_proto, vlan_tci);
+       ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
+       bpf_compute_data_end(skb);
+       return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_push_proto = {
@@ -1720,8 +1738,11 @@ EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
 static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       int ret;
 
-       return skb_vlan_pop(skb);
+       ret = skb_vlan_pop(skb);
+       bpf_compute_data_end(skb);
+       return ret;
 }
 
 const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
@@ -2066,8 +2087,12 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 static bool sk_filter_is_valid_access(int off, int size,
                                      enum bpf_access_type type)
 {
-       if (off == offsetof(struct __sk_buff, tc_classid))
+       switch (off) {
+       case offsetof(struct __sk_buff, tc_classid):
+       case offsetof(struct __sk_buff, data):
+       case offsetof(struct __sk_buff, data_end):
                return false;
+       }
 
        if (type == BPF_WRITE) {
                switch (off) {
@@ -2215,6 +2240,20 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                        *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
                break;
 
+       case offsetof(struct __sk_buff, data):
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)),
+                                     dst_reg, src_reg,
+                                     offsetof(struct sk_buff, data));
+               break;
+
+       case offsetof(struct __sk_buff, data_end):
+               ctx_off -= offsetof(struct __sk_buff, data_end);
+               ctx_off += offsetof(struct sk_buff, cb);
+               ctx_off += offsetof(struct bpf_skb_data_end, data_end);
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)),
+                                     dst_reg, src_reg, ctx_off);
+               break;
+
        case offsetof(struct __sk_buff, tc_index):
 #ifdef CONFIG_NET_SCHED
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
index 4fd703362563e99eba8a6a60aceb0c9bcccb5a74..c7123e01c2cabc9c58bbe4d3be2c3836d7f5e2e2 100644 (file)
@@ -53,9 +53,11 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
        filter = rcu_dereference(prog->filter);
        if (at_ingress) {
                __skb_push(skb, skb->mac_len);
+               bpf_compute_data_end(skb);
                filter_res = BPF_PROG_RUN(filter, skb);
                __skb_pull(skb, skb->mac_len);
        } else {
+               bpf_compute_data_end(skb);
                filter_res = BPF_PROG_RUN(filter, skb);
        }
        rcu_read_unlock();
index 425fe6a0eda33e47952802956c0c48aea67f80df..7b342c779da7b1f7572f50e6bb45d4b99a1238df 100644 (file)
@@ -96,9 +96,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                if (at_ingress) {
                        /* It is safe to push/pull even if skb_shared() */
                        __skb_push(skb, skb->mac_len);
+                       bpf_compute_data_end(skb);
                        filter_res = BPF_PROG_RUN(prog->filter, skb);
                        __skb_pull(skb, skb->mac_len);
                } else {
+                       bpf_compute_data_end(skb);
                        filter_res = BPF_PROG_RUN(prog->filter, skb);
                }
 
This page took 0.031724 seconds and 5 git commands to generate.