Commit | Line | Data |
---|---|---|
5ab11c98 PE |
1 | #ifndef __NET_FRAG_H__ |
2 | #define __NET_FRAG_H__ | |
3 | ||
6d7b857d JDB |
4 | #include <linux/percpu_counter.h> |
5 | ||
ac18e750 | 6 | struct netns_frags { |
6d7b857d JDB |
7 | /* The percpu_counter "mem" need to be cacheline aligned. |
8 | * mem.count must not share cacheline with other writers | |
cd39a789 | 9 | */ |
6d7b857d JDB |
10 | struct percpu_counter mem ____cacheline_aligned_in_smp; |
11 | ||
b2fd5321 PE |
12 | /* sysctls */ |
13 | int timeout; | |
e31e0bdc PE |
14 | int high_thresh; |
15 | int low_thresh; | |
ac18e750 PE |
16 | }; |
17 | ||
1ab1934e NA |
18 | /** |
19 | * fragment queue flags | |
20 | * | |
21 | * @INET_FRAG_FIRST_IN: first fragment has arrived | |
22 | * @INET_FRAG_LAST_IN: final fragment has arrived | |
23 | * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction | |
24 | * @INET_FRAG_EVICTED: frag queue is being evicted | |
25 | */ | |
26 | enum { | |
27 | INET_FRAG_FIRST_IN = BIT(0), | |
28 | INET_FRAG_LAST_IN = BIT(1), | |
29 | INET_FRAG_COMPLETE = BIT(2), | |
30 | INET_FRAG_EVICTED = BIT(3) | |
31 | }; | |
32 | ||
33 | /** | |
34 | * struct inet_frag_queue - fragment queue | |
35 | * | |
36 | * @lock: spinlock protecting the queue | |
37 | * @timer: queue expiration timer | |
38 | * @list: hash bucket list | |
39 | * @refcnt: reference count of the queue | |
40 | * @fragments: received fragments head | |
41 | * @fragments_tail: received fragments tail | |
42 | * @stamp: timestamp of the last received fragment | |
43 | * @len: total length of the original datagram | |
44 | * @meat: length of received fragments so far | |
45 | * @flags: fragment queue flags | |
46 | * @max_size: (ipv4 only) maximum received fragment size with IP_DF set | |
47 | * @net: namespace that this frag belongs to | |
48 | */ | |
5ab11c98 | 49 | struct inet_frag_queue { |
5ab11c98 | 50 | spinlock_t lock; |
1ab1934e | 51 | struct timer_list timer; |
6e34a8b3 JDB |
52 | struct hlist_node list; |
53 | atomic_t refcnt; | |
1ab1934e | 54 | struct sk_buff *fragments; |
d6bebca9 | 55 | struct sk_buff *fragments_tail; |
5ab11c98 | 56 | ktime_t stamp; |
1ab1934e | 57 | int len; |
5ab11c98 | 58 | int meat; |
1ab1934e | 59 | __u8 flags; |
5f2d04f1 | 60 | u16 max_size; |
6e34a8b3 | 61 | struct netns_frags *net; |
5ab11c98 PE |
62 | }; |
63 | ||
a4c4009f | 64 | #define INETFRAGS_HASHSZ 1024 |
7eb95156 | 65 | |
5a3da1fe HFS |
66 | /* averaged: |
67 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / | |
68 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or | |
69 | * struct frag_queue)) | |
70 | */ | |
b13d3cbf | 71 | #define INETFRAGS_MAXDEPTH 128 |
5a3da1fe | 72 | |
19952cc4 JDB |
73 | struct inet_frag_bucket { |
74 | struct hlist_head chain; | |
75 | spinlock_t chain_lock; | |
76 | }; | |
77 | ||
7eb95156 | 78 | struct inet_frags { |
19952cc4 | 79 | struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; |
7088ad74 | 80 | |
b13d3cbf FW |
81 | struct work_struct frags_work; |
82 | unsigned int next_bucket; | |
e3a57d18 FW |
83 | unsigned long last_rebuild_jiffies; |
84 | bool rebuild; | |
b13d3cbf | 85 | |
7088ad74 HFS |
86 | /* The first call to hashfn is responsible to initialize |
87 | * rnd. This is best done with net_get_random_once. | |
ab1c724f FW |
88 | * |
89 | * rnd_seqlock is used to let hash insertion detect | |
90 | * when it needs to re-lookup the hash chain to use. | |
7088ad74 | 91 | */ |
5f8e1e8b | 92 | u32 rnd; |
ab1c724f | 93 | seqlock_t rnd_seqlock; |
5f8e1e8b | 94 | int qsize; |
321a3a99 | 95 | |
36c77782 FW |
96 | unsigned int (*hashfn)(const struct inet_frag_queue *); |
97 | bool (*match)(const struct inet_frag_queue *q, | |
98 | const void *arg); | |
c6fda282 | 99 | void (*constructor)(struct inet_frag_queue *q, |
36c77782 | 100 | const void *arg); |
1e4b8287 PE |
101 | void (*destructor)(struct inet_frag_queue *); |
102 | void (*skb_free)(struct sk_buff *); | |
e521db9d | 103 | void (*frag_expire)(unsigned long data); |
d4ad4d22 NA |
104 | struct kmem_cache *frags_cachep; |
105 | const char *frags_cache_name; | |
7eb95156 PE |
106 | }; |
107 | ||
d4ad4d22 | 108 | int inet_frags_init(struct inet_frags *); |
7eb95156 PE |
109 | void inet_frags_fini(struct inet_frags *); |
110 | ||
e5a2bb84 | 111 | void inet_frags_init_net(struct netns_frags *nf); |
81566e83 | 112 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); |
e5a2bb84 | 113 | |
277e650d | 114 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
3fd588eb | 115 | void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f); |
ac18e750 | 116 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
ab1c724f FW |
117 | struct inet_frags *f, void *key, unsigned int hash); |
118 | ||
5a3da1fe HFS |
119 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, |
120 | const char *prefix); | |
277e650d | 121 | |
762cc408 PE |
122 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
123 | { | |
124 | if (atomic_dec_and_test(&q->refcnt)) | |
3fd588eb | 125 | inet_frag_destroy(q, f); |
762cc408 PE |
126 | } |
127 | ||
d433673e JDB |
128 | /* Memory Tracking Functions. */ |
129 | ||
6d7b857d JDB |
130 | /* The default percpu_counter batch size is not big enough to scale to |
131 | * fragmentation mem acct sizes. | |
132 | * The mem size of a 64K fragment is approx: | |
133 | * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes | |
134 | */ | |
135 | static unsigned int frag_percpu_counter_batch = 130000; | |
136 | ||
d433673e JDB |
137 | static inline int frag_mem_limit(struct netns_frags *nf) |
138 | { | |
6d7b857d | 139 | return percpu_counter_read(&nf->mem); |
d433673e JDB |
140 | } |
141 | ||
142 | static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) | |
143 | { | |
6d7b857d | 144 | __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); |
d433673e JDB |
145 | } |
146 | ||
147 | static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) | |
148 | { | |
6d7b857d | 149 | __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); |
d433673e JDB |
150 | } |
151 | ||
152 | static inline void init_frag_mem_limit(struct netns_frags *nf) | |
153 | { | |
908c7f19 | 154 | percpu_counter_init(&nf->mem, 0, GFP_KERNEL); |
d433673e JDB |
155 | } |
156 | ||
36c77782 | 157 | static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) |
d433673e | 158 | { |
36c77782 | 159 | unsigned int res; |
4cfb0485 ED |
160 | |
161 | local_bh_disable(); | |
162 | res = percpu_counter_sum_positive(&nf->mem); | |
163 | local_bh_enable(); | |
164 | ||
165 | return res; | |
d433673e JDB |
166 | } |
167 | ||
be991971 HFS |
168 | /* RFC 3168 support : |
169 | * We want to check ECN values of all fragments, do detect invalid combinations. | |
170 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. | |
171 | */ | |
172 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ | |
173 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ | |
174 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ | |
175 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ | |
176 | ||
177 | extern const u8 ip_frag_ecn_table[16]; | |
178 | ||
5ab11c98 | 179 | #endif |