Commit | Line | Data |
---|---|---|
5ab11c98 PE |
1 | #ifndef __NET_FRAG_H__ |
2 | #define __NET_FRAG_H__ | |
3 | ||
6d7b857d JDB |
4 | #include <linux/percpu_counter.h> |
5 | ||
ac18e750 | 6 | struct netns_frags { |
e5a2bb84 | 7 | int nqueues; |
3140c25c | 8 | struct list_head lru_list; |
3ef0eb0d | 9 | spinlock_t lru_lock; |
b2fd5321 | 10 | |
6d7b857d JDB |
11 | /* The percpu_counter "mem" need to be cacheline aligned. |
12 | * mem.count must not share cacheline with other writers | |
cd39a789 | 13 | */ |
6d7b857d JDB |
14 | struct percpu_counter mem ____cacheline_aligned_in_smp; |
15 | ||
b2fd5321 PE |
16 | /* sysctls */ |
17 | int timeout; | |
e31e0bdc PE |
18 | int high_thresh; |
19 | int low_thresh; | |
ac18e750 PE |
20 | }; |
21 | ||
5ab11c98 | 22 | struct inet_frag_queue { |
5ab11c98 | 23 | spinlock_t lock; |
5ab11c98 | 24 | struct timer_list timer; /* when will this queue expire? */ |
6e34a8b3 JDB |
25 | struct list_head lru_list; /* lru list member */ |
26 | struct hlist_node list; | |
27 | atomic_t refcnt; | |
5ab11c98 | 28 | struct sk_buff *fragments; /* list of received fragments */ |
d6bebca9 | 29 | struct sk_buff *fragments_tail; |
5ab11c98 PE |
30 | ktime_t stamp; |
31 | int len; /* total length of orig datagram */ | |
32 | int meat; | |
33 | __u8 last_in; /* first/last segment arrived? */ | |
34 | ||
bc578a54 JP |
35 | #define INET_FRAG_COMPLETE 4 |
36 | #define INET_FRAG_FIRST_IN 2 | |
37 | #define INET_FRAG_LAST_IN 1 | |
5f2d04f1 PM |
38 | |
39 | u16 max_size; | |
6e34a8b3 JDB |
40 | |
41 | struct netns_frags *net; | |
5ab11c98 PE |
42 | }; |
43 | ||
7eb95156 PE |
44 | #define INETFRAGS_HASHSZ 64 |
45 | ||
5a3da1fe HFS |
46 | /* averaged: |
47 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / | |
48 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or | |
49 | * struct frag_queue)) | |
50 | */ | |
51 | #define INETFRAGS_MAXDEPTH 128 | |
52 | ||
19952cc4 JDB |
53 | struct inet_frag_bucket { |
54 | struct hlist_head chain; | |
55 | spinlock_t chain_lock; | |
56 | }; | |
57 | ||
7eb95156 | 58 | struct inet_frags { |
19952cc4 | 59 | struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; |
5f8e1e8b JDB |
60 | /* This rwlock is a global lock (seperate per IPv4, IPv6 and |
61 | * netfilter). Important to keep this on a seperate cacheline. | |
19952cc4 | 62 | * Its primarily a rebuild protection rwlock. |
5f8e1e8b JDB |
63 | */ |
64 | rwlock_t lock ____cacheline_aligned_in_smp; | |
3b4bc4a2 | 65 | int secret_interval; |
7eb95156 | 66 | struct timer_list secret_timer; |
5f8e1e8b JDB |
67 | u32 rnd; |
68 | int qsize; | |
321a3a99 PE |
69 | |
70 | unsigned int (*hashfn)(struct inet_frag_queue *); | |
5f8e1e8b | 71 | bool (*match)(struct inet_frag_queue *q, void *arg); |
c6fda282 PE |
72 | void (*constructor)(struct inet_frag_queue *q, |
73 | void *arg); | |
1e4b8287 PE |
74 | void (*destructor)(struct inet_frag_queue *); |
75 | void (*skb_free)(struct sk_buff *); | |
e521db9d | 76 | void (*frag_expire)(unsigned long data); |
7eb95156 PE |
77 | }; |
78 | ||
79 | void inet_frags_init(struct inet_frags *); | |
80 | void inet_frags_fini(struct inet_frags *); | |
81 | ||
e5a2bb84 | 82 | void inet_frags_init_net(struct netns_frags *nf); |
81566e83 | 83 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); |
e5a2bb84 | 84 | |
277e650d | 85 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
1e4b8287 PE |
86 | void inet_frag_destroy(struct inet_frag_queue *q, |
87 | struct inet_frags *f, int *work); | |
6b102865 | 88 | int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); |
ac18e750 | 89 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
56bca31f HE |
90 | struct inet_frags *f, void *key, unsigned int hash) |
91 | __releases(&f->lock); | |
5a3da1fe HFS |
92 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, |
93 | const char *prefix); | |
277e650d | 94 | |
762cc408 PE |
95 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
96 | { | |
97 | if (atomic_dec_and_test(&q->refcnt)) | |
98 | inet_frag_destroy(q, f, NULL); | |
99 | } | |
100 | ||
d433673e JDB |
101 | /* Memory Tracking Functions. */ |
102 | ||
6d7b857d JDB |
103 | /* The default percpu_counter batch size is not big enough to scale to |
104 | * fragmentation mem acct sizes. | |
105 | * The mem size of a 64K fragment is approx: | |
106 | * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes | |
107 | */ | |
108 | static unsigned int frag_percpu_counter_batch = 130000; | |
109 | ||
d433673e JDB |
110 | static inline int frag_mem_limit(struct netns_frags *nf) |
111 | { | |
6d7b857d | 112 | return percpu_counter_read(&nf->mem); |
d433673e JDB |
113 | } |
114 | ||
115 | static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) | |
116 | { | |
6d7b857d | 117 | __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); |
d433673e JDB |
118 | } |
119 | ||
120 | static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) | |
121 | { | |
6d7b857d | 122 | __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); |
d433673e JDB |
123 | } |
124 | ||
125 | static inline void init_frag_mem_limit(struct netns_frags *nf) | |
126 | { | |
6d7b857d | 127 | percpu_counter_init(&nf->mem, 0); |
d433673e JDB |
128 | } |
129 | ||
130 | static inline int sum_frag_mem_limit(struct netns_frags *nf) | |
131 | { | |
4cfb0485 ED |
132 | int res; |
133 | ||
134 | local_bh_disable(); | |
135 | res = percpu_counter_sum_positive(&nf->mem); | |
136 | local_bh_enable(); | |
137 | ||
138 | return res; | |
d433673e JDB |
139 | } |
140 | ||
3ef0eb0d JDB |
141 | static inline void inet_frag_lru_move(struct inet_frag_queue *q) |
142 | { | |
143 | spin_lock(&q->net->lru_lock); | |
144 | list_move_tail(&q->lru_list, &q->net->lru_list); | |
145 | spin_unlock(&q->net->lru_lock); | |
146 | } | |
147 | ||
148 | static inline void inet_frag_lru_del(struct inet_frag_queue *q) | |
149 | { | |
150 | spin_lock(&q->net->lru_lock); | |
151 | list_del(&q->lru_list); | |
1b5ab0de | 152 | q->net->nqueues--; |
3ef0eb0d JDB |
153 | spin_unlock(&q->net->lru_lock); |
154 | } | |
155 | ||
156 | static inline void inet_frag_lru_add(struct netns_frags *nf, | |
157 | struct inet_frag_queue *q) | |
158 | { | |
159 | spin_lock(&nf->lru_lock); | |
160 | list_add_tail(&q->lru_list, &nf->lru_list); | |
1b5ab0de | 161 | q->net->nqueues++; |
3ef0eb0d JDB |
162 | spin_unlock(&nf->lru_lock); |
163 | } | |
be991971 HFS |
164 | |
165 | /* RFC 3168 support : | |
166 | * We want to check ECN values of all fragments, do detect invalid combinations. | |
167 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. | |
168 | */ | |
169 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ | |
170 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ | |
171 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ | |
172 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ | |
173 | ||
174 | extern const u8 ip_frag_ecn_table[16]; | |
175 | ||
5ab11c98 | 176 | #endif |