| 1 | #ifndef __NET_FRAG_H__ |
| 2 | #define __NET_FRAG_H__ |
| 3 | |
| 4 | #include <linux/percpu_counter.h> |
| 5 | |
| 6 | struct netns_frags { |
| 7 | /* The percpu_counter "mem" need to be cacheline aligned. |
| 8 | * mem.count must not share cacheline with other writers |
| 9 | */ |
| 10 | struct percpu_counter mem ____cacheline_aligned_in_smp; |
| 11 | |
| 12 | /* sysctls */ |
| 13 | int timeout; |
| 14 | int high_thresh; |
| 15 | int low_thresh; |
| 16 | }; |
| 17 | |
| 18 | struct inet_frag_queue { |
| 19 | spinlock_t lock; |
| 20 | struct timer_list timer; /* when will this queue expire? */ |
| 21 | struct hlist_node list; |
| 22 | atomic_t refcnt; |
| 23 | struct sk_buff *fragments; /* list of received fragments */ |
| 24 | struct sk_buff *fragments_tail; |
| 25 | ktime_t stamp; |
| 26 | int len; /* total length of orig datagram */ |
| 27 | int meat; |
| 28 | __u8 last_in; /* first/last segment arrived? */ |
| 29 | |
| 30 | #define INET_FRAG_EVICTED 8 |
| 31 | #define INET_FRAG_COMPLETE 4 |
| 32 | #define INET_FRAG_FIRST_IN 2 |
| 33 | #define INET_FRAG_LAST_IN 1 |
| 34 | |
| 35 | u16 max_size; |
| 36 | |
| 37 | struct netns_frags *net; |
| 38 | }; |
| 39 | |
| 40 | #define INETFRAGS_HASHSZ 1024 |
| 41 | |
| 42 | /* averaged: |
| 43 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / |
| 44 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or |
| 45 | * struct frag_queue)) |
| 46 | */ |
| 47 | #define INETFRAGS_MAXDEPTH 128 |
| 48 | |
| 49 | struct inet_frag_bucket { |
| 50 | struct hlist_head chain; |
| 51 | spinlock_t chain_lock; |
| 52 | }; |
| 53 | |
| 54 | struct inet_frags { |
| 55 | struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; |
| 56 | /* This rwlock is a global lock (seperate per IPv4, IPv6 and |
| 57 | * netfilter). Important to keep this on a seperate cacheline. |
| 58 | * Its primarily a rebuild protection rwlock. |
| 59 | */ |
| 60 | rwlock_t lock ____cacheline_aligned_in_smp; |
| 61 | int secret_interval; |
| 62 | struct timer_list secret_timer; |
| 63 | |
| 64 | struct work_struct frags_work; |
| 65 | unsigned int next_bucket; |
| 66 | |
| 67 | /* The first call to hashfn is responsible to initialize |
| 68 | * rnd. This is best done with net_get_random_once. |
| 69 | */ |
| 70 | u32 rnd; |
| 71 | int qsize; |
| 72 | |
| 73 | unsigned int (*hashfn)(const struct inet_frag_queue *); |
| 74 | bool (*match)(const struct inet_frag_queue *q, |
| 75 | const void *arg); |
| 76 | void (*constructor)(struct inet_frag_queue *q, |
| 77 | const void *arg); |
| 78 | void (*destructor)(struct inet_frag_queue *); |
| 79 | void (*skb_free)(struct sk_buff *); |
| 80 | void (*frag_expire)(unsigned long data); |
| 81 | }; |
| 82 | |
| 83 | void inet_frags_init(struct inet_frags *); |
| 84 | void inet_frags_fini(struct inet_frags *); |
| 85 | |
| 86 | void inet_frags_init_net(struct netns_frags *nf); |
| 87 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); |
| 88 | |
| 89 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
| 90 | void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f); |
| 91 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
| 92 | struct inet_frags *f, void *key, unsigned int hash) |
| 93 | __releases(&f->lock); |
| 94 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, |
| 95 | const char *prefix); |
| 96 | |
| 97 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
| 98 | { |
| 99 | if (atomic_dec_and_test(&q->refcnt)) |
| 100 | inet_frag_destroy(q, f); |
| 101 | } |
| 102 | |
| 103 | /* Memory Tracking Functions. */ |
| 104 | |
| 105 | /* The default percpu_counter batch size is not big enough to scale to |
| 106 | * fragmentation mem acct sizes. |
| 107 | * The mem size of a 64K fragment is approx: |
| 108 | * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes |
| 109 | */ |
| 110 | static unsigned int frag_percpu_counter_batch = 130000; |
| 111 | |
| 112 | static inline int frag_mem_limit(struct netns_frags *nf) |
| 113 | { |
| 114 | return percpu_counter_read(&nf->mem); |
| 115 | } |
| 116 | |
| 117 | static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) |
| 118 | { |
| 119 | __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); |
| 120 | } |
| 121 | |
| 122 | static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) |
| 123 | { |
| 124 | __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); |
| 125 | } |
| 126 | |
| 127 | static inline void init_frag_mem_limit(struct netns_frags *nf) |
| 128 | { |
| 129 | percpu_counter_init(&nf->mem, 0); |
| 130 | } |
| 131 | |
| 132 | static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) |
| 133 | { |
| 134 | unsigned int res; |
| 135 | |
| 136 | local_bh_disable(); |
| 137 | res = percpu_counter_sum_positive(&nf->mem); |
| 138 | local_bh_enable(); |
| 139 | |
| 140 | return res; |
| 141 | } |
| 142 | |
| 143 | /* RFC 3168 support : |
| 144 | * We want to check ECN values of all fragments, do detect invalid combinations. |
| 145 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. |
| 146 | */ |
| 147 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ |
| 148 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ |
| 149 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ |
| 150 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ |
| 151 | |
| 152 | extern const u8 ip_frag_ecn_table[16]; |
| 153 | |
| 154 | #endif |