net: fix percpu memory leaks
[deliverable/linux.git] / include / net / inet_frag.h
1 #ifndef __NET_FRAG_H__
2 #define __NET_FRAG_H__
3
4 #include <linux/percpu_counter.h>
5
6 struct netns_frags {
7 /* The percpu_counter "mem" need to be cacheline aligned.
8 * mem.count must not share cacheline with other writers
9 */
10 struct percpu_counter mem ____cacheline_aligned_in_smp;
11
12 /* sysctls */
13 int timeout;
14 int high_thresh;
15 int low_thresh;
16 };
17
18 /**
19 * fragment queue flags
20 *
21 * @INET_FRAG_FIRST_IN: first fragment has arrived
22 * @INET_FRAG_LAST_IN: final fragment has arrived
23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
24 */
25 enum {
26 INET_FRAG_FIRST_IN = BIT(0),
27 INET_FRAG_LAST_IN = BIT(1),
28 INET_FRAG_COMPLETE = BIT(2),
29 };
30
31 /**
32 * struct inet_frag_queue - fragment queue
33 *
34 * @lock: spinlock protecting the queue
35 * @timer: queue expiration timer
36 * @list: hash bucket list
37 * @refcnt: reference count of the queue
38 * @fragments: received fragments head
39 * @fragments_tail: received fragments tail
40 * @stamp: timestamp of the last received fragment
41 * @len: total length of the original datagram
42 * @meat: length of received fragments so far
43 * @flags: fragment queue flags
44 * @max_size: maximum received fragment size
45 * @net: namespace that this frag belongs to
46 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
47 */
48 struct inet_frag_queue {
49 spinlock_t lock;
50 struct timer_list timer;
51 struct hlist_node list;
52 atomic_t refcnt;
53 struct sk_buff *fragments;
54 struct sk_buff *fragments_tail;
55 ktime_t stamp;
56 int len;
57 int meat;
58 __u8 flags;
59 u16 max_size;
60 struct netns_frags *net;
61 struct hlist_node list_evictor;
62 };
63
64 #define INETFRAGS_HASHSZ 1024
65
66 /* averaged:
67 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
68 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
69 * struct frag_queue))
70 */
71 #define INETFRAGS_MAXDEPTH 128
72
73 struct inet_frag_bucket {
74 struct hlist_head chain;
75 spinlock_t chain_lock;
76 };
77
78 struct inet_frags {
79 struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
80
81 struct work_struct frags_work;
82 unsigned int next_bucket;
83 unsigned long last_rebuild_jiffies;
84 bool rebuild;
85
86 /* The first call to hashfn is responsible to initialize
87 * rnd. This is best done with net_get_random_once.
88 *
89 * rnd_seqlock is used to let hash insertion detect
90 * when it needs to re-lookup the hash chain to use.
91 */
92 u32 rnd;
93 seqlock_t rnd_seqlock;
94 int qsize;
95
96 unsigned int (*hashfn)(const struct inet_frag_queue *);
97 bool (*match)(const struct inet_frag_queue *q,
98 const void *arg);
99 void (*constructor)(struct inet_frag_queue *q,
100 const void *arg);
101 void (*destructor)(struct inet_frag_queue *);
102 void (*skb_free)(struct sk_buff *);
103 void (*frag_expire)(unsigned long data);
104 struct kmem_cache *frags_cachep;
105 const char *frags_cache_name;
106 };
107
108 int inet_frags_init(struct inet_frags *);
109 void inet_frags_fini(struct inet_frags *);
110
111 static inline int inet_frags_init_net(struct netns_frags *nf)
112 {
113 return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
114 }
115 static inline void inet_frags_uninit_net(struct netns_frags *nf)
116 {
117 percpu_counter_destroy(&nf->mem);
118 }
119
120 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
121
122 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
123 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
124 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
125 struct inet_frags *f, void *key, unsigned int hash);
126
127 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
128 const char *prefix);
129
130 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
131 {
132 if (atomic_dec_and_test(&q->refcnt))
133 inet_frag_destroy(q, f);
134 }
135
136 static inline bool inet_frag_evicting(struct inet_frag_queue *q)
137 {
138 return !hlist_unhashed(&q->list_evictor);
139 }
140
141 /* Memory Tracking Functions. */
142
143 /* The default percpu_counter batch size is not big enough to scale to
144 * fragmentation mem acct sizes.
145 * The mem size of a 64K fragment is approx:
146 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
147 */
148 static unsigned int frag_percpu_counter_batch = 130000;
149
150 static inline int frag_mem_limit(struct netns_frags *nf)
151 {
152 return percpu_counter_read(&nf->mem);
153 }
154
155 static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
156 {
157 __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
158 }
159
160 static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
161 {
162 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
163 }
164
165 static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
166 {
167 unsigned int res;
168
169 local_bh_disable();
170 res = percpu_counter_sum_positive(&nf->mem);
171 local_bh_enable();
172
173 return res;
174 }
175
176 /* RFC 3168 support :
177 * We want to check ECN values of all fragments, do detect invalid combinations.
178 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
179 */
180 #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
181 #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
182 #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
183 #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
184
185 extern const u8 ip_frag_ecn_table[16];
186
187 #endif
This page took 0.037472 seconds and 5 git commands to generate.