Commit | Line | Data |
---|---|---|
5ab11c98 PE |
1 | #ifndef __NET_FRAG_H__ |
2 | #define __NET_FRAG_H__ | |
3 | ||
6d7b857d JDB |
4 | #include <linux/percpu_counter.h> |
5 | ||
ac18e750 | 6 | struct netns_frags { |
6d7b857d JDB |
7 | /* The percpu_counter "mem" need to be cacheline aligned. |
8 | * mem.count must not share cacheline with other writers | |
cd39a789 | 9 | */ |
6d7b857d JDB |
10 | struct percpu_counter mem ____cacheline_aligned_in_smp; |
11 | ||
b2fd5321 PE |
12 | /* sysctls */ |
13 | int timeout; | |
e31e0bdc PE |
14 | int high_thresh; |
15 | int low_thresh; | |
0fbf4cb2 | 16 | int max_dist; |
ac18e750 PE |
17 | }; |
18 | ||
1ab1934e NA |
19 | /** |
20 | * fragment queue flags | |
21 | * | |
22 | * @INET_FRAG_FIRST_IN: first fragment has arrived | |
23 | * @INET_FRAG_LAST_IN: final fragment has arrived | |
24 | * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction | |
1ab1934e NA |
25 | */ |
26 | enum { | |
27 | INET_FRAG_FIRST_IN = BIT(0), | |
28 | INET_FRAG_LAST_IN = BIT(1), | |
29 | INET_FRAG_COMPLETE = BIT(2), | |
1ab1934e NA |
30 | }; |
31 | ||
32 | /** | |
33 | * struct inet_frag_queue - fragment queue | |
34 | * | |
35 | * @lock: spinlock protecting the queue | |
36 | * @timer: queue expiration timer | |
37 | * @list: hash bucket list | |
38 | * @refcnt: reference count of the queue | |
39 | * @fragments: received fragments head | |
40 | * @fragments_tail: received fragments tail | |
41 | * @stamp: timestamp of the last received fragment | |
42 | * @len: total length of the original datagram | |
43 | * @meat: length of received fragments so far | |
44 | * @flags: fragment queue flags | |
d6b915e2 | 45 | * @max_size: maximum received fragment size |
1ab1934e | 46 | * @net: namespace that this frag belongs to |
d1fe1944 | 47 | * @list_evictor: list of queues to forcefully evict (e.g. due to low memory) |
1ab1934e | 48 | */ |
5ab11c98 | 49 | struct inet_frag_queue { |
5ab11c98 | 50 | spinlock_t lock; |
1ab1934e | 51 | struct timer_list timer; |
6e34a8b3 JDB |
52 | struct hlist_node list; |
53 | atomic_t refcnt; | |
1ab1934e | 54 | struct sk_buff *fragments; |
d6bebca9 | 55 | struct sk_buff *fragments_tail; |
5ab11c98 | 56 | ktime_t stamp; |
1ab1934e | 57 | int len; |
5ab11c98 | 58 | int meat; |
1ab1934e | 59 | __u8 flags; |
5f2d04f1 | 60 | u16 max_size; |
6e34a8b3 | 61 | struct netns_frags *net; |
d1fe1944 | 62 | struct hlist_node list_evictor; |
5ab11c98 PE |
63 | }; |
64 | ||
a4c4009f | 65 | #define INETFRAGS_HASHSZ 1024 |
7eb95156 | 66 | |
5a3da1fe HFS |
67 | /* averaged: |
68 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / | |
69 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or | |
70 | * struct frag_queue)) | |
71 | */ | |
b13d3cbf | 72 | #define INETFRAGS_MAXDEPTH 128 |
5a3da1fe | 73 | |
19952cc4 JDB |
74 | struct inet_frag_bucket { |
75 | struct hlist_head chain; | |
76 | spinlock_t chain_lock; | |
77 | }; | |
78 | ||
7eb95156 | 79 | struct inet_frags { |
19952cc4 | 80 | struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; |
7088ad74 | 81 | |
b13d3cbf FW |
82 | struct work_struct frags_work; |
83 | unsigned int next_bucket; | |
e3a57d18 FW |
84 | unsigned long last_rebuild_jiffies; |
85 | bool rebuild; | |
b13d3cbf | 86 | |
7088ad74 HFS |
87 | /* The first call to hashfn is responsible to initialize |
88 | * rnd. This is best done with net_get_random_once. | |
ab1c724f FW |
89 | * |
90 | * rnd_seqlock is used to let hash insertion detect | |
91 | * when it needs to re-lookup the hash chain to use. | |
7088ad74 | 92 | */ |
5f8e1e8b | 93 | u32 rnd; |
ab1c724f | 94 | seqlock_t rnd_seqlock; |
5f8e1e8b | 95 | int qsize; |
321a3a99 | 96 | |
36c77782 FW |
97 | unsigned int (*hashfn)(const struct inet_frag_queue *); |
98 | bool (*match)(const struct inet_frag_queue *q, | |
99 | const void *arg); | |
c6fda282 | 100 | void (*constructor)(struct inet_frag_queue *q, |
36c77782 | 101 | const void *arg); |
1e4b8287 | 102 | void (*destructor)(struct inet_frag_queue *); |
e521db9d | 103 | void (*frag_expire)(unsigned long data); |
d4ad4d22 NA |
104 | struct kmem_cache *frags_cachep; |
105 | const char *frags_cache_name; | |
7eb95156 PE |
106 | }; |
107 | ||
d4ad4d22 | 108 | int inet_frags_init(struct inet_frags *); |
7eb95156 PE |
109 | void inet_frags_fini(struct inet_frags *); |
110 | ||
1d6119ba ED |
111 | static inline int inet_frags_init_net(struct netns_frags *nf) |
112 | { | |
113 | return percpu_counter_init(&nf->mem, 0, GFP_KERNEL); | |
114 | } | |
115 | static inline void inet_frags_uninit_net(struct netns_frags *nf) | |
116 | { | |
117 | percpu_counter_destroy(&nf->mem); | |
118 | } | |
119 | ||
81566e83 | 120 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); |
e5a2bb84 | 121 | |
277e650d | 122 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
3fd588eb | 123 | void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f); |
ac18e750 | 124 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
ab1c724f FW |
125 | struct inet_frags *f, void *key, unsigned int hash); |
126 | ||
5a3da1fe HFS |
127 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, |
128 | const char *prefix); | |
277e650d | 129 | |
762cc408 PE |
130 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
131 | { | |
132 | if (atomic_dec_and_test(&q->refcnt)) | |
3fd588eb | 133 | inet_frag_destroy(q, f); |
762cc408 PE |
134 | } |
135 | ||
caaecdd3 NA |
136 | static inline bool inet_frag_evicting(struct inet_frag_queue *q) |
137 | { | |
138 | return !hlist_unhashed(&q->list_evictor); | |
139 | } | |
140 | ||
d433673e JDB |
141 | /* Memory Tracking Functions. */ |
142 | ||
6d7b857d JDB |
143 | /* The default percpu_counter batch size is not big enough to scale to |
144 | * fragmentation mem acct sizes. | |
145 | * The mem size of a 64K fragment is approx: | |
146 | * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes | |
147 | */ | |
148 | static unsigned int frag_percpu_counter_batch = 130000; | |
149 | ||
d433673e JDB |
150 | static inline int frag_mem_limit(struct netns_frags *nf) |
151 | { | |
6d7b857d | 152 | return percpu_counter_read(&nf->mem); |
d433673e JDB |
153 | } |
154 | ||
0e60d245 | 155 | static inline void sub_frag_mem_limit(struct netns_frags *nf, int i) |
d433673e | 156 | { |
0e60d245 | 157 | __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch); |
d433673e JDB |
158 | } |
159 | ||
0e60d245 | 160 | static inline void add_frag_mem_limit(struct netns_frags *nf, int i) |
d433673e | 161 | { |
0e60d245 | 162 | __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch); |
d433673e JDB |
163 | } |
164 | ||
36c77782 | 165 | static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) |
d433673e | 166 | { |
36c77782 | 167 | unsigned int res; |
4cfb0485 ED |
168 | |
169 | local_bh_disable(); | |
170 | res = percpu_counter_sum_positive(&nf->mem); | |
171 | local_bh_enable(); | |
172 | ||
173 | return res; | |
d433673e JDB |
174 | } |
175 | ||
be991971 HFS |
176 | /* RFC 3168 support : |
177 | * We want to check ECN values of all fragments, do detect invalid combinations. | |
178 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. | |
179 | */ | |
180 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ | |
181 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ | |
182 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ | |
183 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ | |
184 | ||
185 | extern const u8 ip_frag_ecn_table[16]; | |
186 | ||
5ab11c98 | 187 | #endif |