Commit | Line | Data |
---|---|---|
5ab11c98 PE |
1 | #ifndef __NET_FRAG_H__ |
2 | #define __NET_FRAG_H__ | |
3 | ||
6d7b857d JDB |
4 | #include <linux/percpu_counter.h> |
5 | ||
ac18e750 | 6 | struct netns_frags { |
e5a2bb84 | 7 | int nqueues; |
3140c25c | 8 | struct list_head lru_list; |
3ef0eb0d | 9 | spinlock_t lru_lock; |
b2fd5321 | 10 | |
6d7b857d JDB |
11 | /* The percpu_counter "mem" need to be cacheline aligned. |
12 | * mem.count must not share cacheline with other writers | |
cd39a789 | 13 | */ |
6d7b857d JDB |
14 | struct percpu_counter mem ____cacheline_aligned_in_smp; |
15 | ||
b2fd5321 PE |
16 | /* sysctls */ |
17 | int timeout; | |
e31e0bdc PE |
18 | int high_thresh; |
19 | int low_thresh; | |
ac18e750 PE |
20 | }; |
21 | ||
5ab11c98 | 22 | struct inet_frag_queue { |
5ab11c98 | 23 | spinlock_t lock; |
5ab11c98 | 24 | struct timer_list timer; /* when will this queue expire? */ |
6e34a8b3 JDB |
25 | struct list_head lru_list; /* lru list member */ |
26 | struct hlist_node list; | |
27 | atomic_t refcnt; | |
5ab11c98 | 28 | struct sk_buff *fragments; /* list of received fragments */ |
d6bebca9 | 29 | struct sk_buff *fragments_tail; |
5ab11c98 PE |
30 | ktime_t stamp; |
31 | int len; /* total length of orig datagram */ | |
32 | int meat; | |
33 | __u8 last_in; /* first/last segment arrived? */ | |
34 | ||
bc578a54 JP |
35 | #define INET_FRAG_COMPLETE 4 |
36 | #define INET_FRAG_FIRST_IN 2 | |
37 | #define INET_FRAG_LAST_IN 1 | |
5f2d04f1 PM |
38 | |
39 | u16 max_size; | |
6e34a8b3 JDB |
40 | |
41 | struct netns_frags *net; | |
5ab11c98 PE |
42 | }; |
43 | ||
7eb95156 PE |
44 | #define INETFRAGS_HASHSZ 64 |
45 | ||
46 | struct inet_frags { | |
7eb95156 | 47 | struct hlist_head hash[INETFRAGS_HASHSZ]; |
5f8e1e8b JDB |
48 | /* This rwlock is a global lock (seperate per IPv4, IPv6 and |
49 | * netfilter). Important to keep this on a seperate cacheline. | |
50 | */ | |
51 | rwlock_t lock ____cacheline_aligned_in_smp; | |
3b4bc4a2 | 52 | int secret_interval; |
7eb95156 | 53 | struct timer_list secret_timer; |
5f8e1e8b JDB |
54 | u32 rnd; |
55 | int qsize; | |
321a3a99 PE |
56 | |
57 | unsigned int (*hashfn)(struct inet_frag_queue *); | |
5f8e1e8b | 58 | bool (*match)(struct inet_frag_queue *q, void *arg); |
c6fda282 PE |
59 | void (*constructor)(struct inet_frag_queue *q, |
60 | void *arg); | |
1e4b8287 PE |
61 | void (*destructor)(struct inet_frag_queue *); |
62 | void (*skb_free)(struct sk_buff *); | |
e521db9d | 63 | void (*frag_expire)(unsigned long data); |
7eb95156 PE |
64 | }; |
65 | ||
66 | void inet_frags_init(struct inet_frags *); | |
67 | void inet_frags_fini(struct inet_frags *); | |
68 | ||
e5a2bb84 | 69 | void inet_frags_init_net(struct netns_frags *nf); |
81566e83 | 70 | void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); |
e5a2bb84 | 71 | |
277e650d | 72 | void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); |
1e4b8287 PE |
73 | void inet_frag_destroy(struct inet_frag_queue *q, |
74 | struct inet_frags *f, int *work); | |
6b102865 | 75 | int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); |
ac18e750 | 76 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
56bca31f HE |
77 | struct inet_frags *f, void *key, unsigned int hash) |
78 | __releases(&f->lock); | |
277e650d | 79 | |
762cc408 PE |
80 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
81 | { | |
82 | if (atomic_dec_and_test(&q->refcnt)) | |
83 | inet_frag_destroy(q, f, NULL); | |
84 | } | |
85 | ||
d433673e JDB |
86 | /* Memory Tracking Functions. */ |
87 | ||
6d7b857d JDB |
88 | /* The default percpu_counter batch size is not big enough to scale to |
89 | * fragmentation mem acct sizes. | |
90 | * The mem size of a 64K fragment is approx: | |
91 | * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes | |
92 | */ | |
93 | static unsigned int frag_percpu_counter_batch = 130000; | |
94 | ||
d433673e JDB |
95 | static inline int frag_mem_limit(struct netns_frags *nf) |
96 | { | |
6d7b857d | 97 | return percpu_counter_read(&nf->mem); |
d433673e JDB |
98 | } |
99 | ||
100 | static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) | |
101 | { | |
6d7b857d | 102 | __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); |
d433673e JDB |
103 | } |
104 | ||
105 | static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) | |
106 | { | |
6d7b857d | 107 | __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); |
d433673e JDB |
108 | } |
109 | ||
110 | static inline void init_frag_mem_limit(struct netns_frags *nf) | |
111 | { | |
6d7b857d | 112 | percpu_counter_init(&nf->mem, 0); |
d433673e JDB |
113 | } |
114 | ||
115 | static inline int sum_frag_mem_limit(struct netns_frags *nf) | |
116 | { | |
6d7b857d | 117 | return percpu_counter_sum_positive(&nf->mem); |
d433673e JDB |
118 | } |
119 | ||
3ef0eb0d JDB |
120 | static inline void inet_frag_lru_move(struct inet_frag_queue *q) |
121 | { | |
122 | spin_lock(&q->net->lru_lock); | |
123 | list_move_tail(&q->lru_list, &q->net->lru_list); | |
124 | spin_unlock(&q->net->lru_lock); | |
125 | } | |
126 | ||
127 | static inline void inet_frag_lru_del(struct inet_frag_queue *q) | |
128 | { | |
129 | spin_lock(&q->net->lru_lock); | |
130 | list_del(&q->lru_list); | |
131 | spin_unlock(&q->net->lru_lock); | |
132 | } | |
133 | ||
134 | static inline void inet_frag_lru_add(struct netns_frags *nf, | |
135 | struct inet_frag_queue *q) | |
136 | { | |
137 | spin_lock(&nf->lru_lock); | |
138 | list_add_tail(&q->lru_list, &nf->lru_list); | |
139 | spin_unlock(&nf->lru_lock); | |
140 | } | |
5ab11c98 | 141 | #endif |