[netdrvr] Fix 8390 build breakage
[deliverable/linux.git] / net / ipv4 / ipvs / ip_vs_est.c
1 /*
2 * ip_vs_est.c: simple rate estimator for IPVS
3 *
4 * Version: $Id: ip_vs_est.c,v 1.4 2002/11/30 01:50:35 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Changes:
14 *
15 */
16 #include <linux/kernel.h>
17 #include <linux/jiffies.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/interrupt.h>
21 #include <linux/sysctl.h>
22
23 #include <net/ip_vs.h>
24
25 /*
26 This code is to estimate rate in a shorter interval (such as 8
27 seconds) for virtual services and real servers. For measure rate in a
28 long interval, it is easy to implement a user level daemon which
29 periodically reads those statistical counters and measure rate.
30
31 Currently, the measurement is activated by slow timer handler. Hope
32 this measurement will not introduce too much load.
33
34 We measure rate during the last 8 seconds every 2 seconds:
35
36 avgrate = avgrate*(1-W) + rate*W
37
38 where W = 2^(-2)
39
40 NOTES.
41
42 * The stored value for average bps is scaled by 2^5, so that maximal
43 rate is ~2.15Gbits/s, average pps and cps are scaled by 2^10.
44
45 * A lot code is taken from net/sched/estimator.c
46 */
47
48
49 struct ip_vs_estimator
50 {
51 struct ip_vs_estimator *next;
52 struct ip_vs_stats *stats;
53
54 u32 last_conns;
55 u32 last_inpkts;
56 u32 last_outpkts;
57 u64 last_inbytes;
58 u64 last_outbytes;
59
60 u32 cps;
61 u32 inpps;
62 u32 outpps;
63 u32 inbps;
64 u32 outbps;
65 };
66
67
68 static struct ip_vs_estimator *est_list = NULL;
69 static DEFINE_RWLOCK(est_lock);
70 static struct timer_list est_timer;
71
72 static void estimation_timer(unsigned long arg)
73 {
74 struct ip_vs_estimator *e;
75 struct ip_vs_stats *s;
76 u32 n_conns;
77 u32 n_inpkts, n_outpkts;
78 u64 n_inbytes, n_outbytes;
79 u32 rate;
80
81 read_lock(&est_lock);
82 for (e = est_list; e; e = e->next) {
83 s = e->stats;
84
85 spin_lock(&s->lock);
86 n_conns = s->conns;
87 n_inpkts = s->inpkts;
88 n_outpkts = s->outpkts;
89 n_inbytes = s->inbytes;
90 n_outbytes = s->outbytes;
91
92 /* scaled by 2^10, but divided 2 seconds */
93 rate = (n_conns - e->last_conns)<<9;
94 e->last_conns = n_conns;
95 e->cps += ((long)rate - (long)e->cps)>>2;
96 s->cps = (e->cps+0x1FF)>>10;
97
98 rate = (n_inpkts - e->last_inpkts)<<9;
99 e->last_inpkts = n_inpkts;
100 e->inpps += ((long)rate - (long)e->inpps)>>2;
101 s->inpps = (e->inpps+0x1FF)>>10;
102
103 rate = (n_outpkts - e->last_outpkts)<<9;
104 e->last_outpkts = n_outpkts;
105 e->outpps += ((long)rate - (long)e->outpps)>>2;
106 s->outpps = (e->outpps+0x1FF)>>10;
107
108 rate = (n_inbytes - e->last_inbytes)<<4;
109 e->last_inbytes = n_inbytes;
110 e->inbps += ((long)rate - (long)e->inbps)>>2;
111 s->inbps = (e->inbps+0xF)>>5;
112
113 rate = (n_outbytes - e->last_outbytes)<<4;
114 e->last_outbytes = n_outbytes;
115 e->outbps += ((long)rate - (long)e->outbps)>>2;
116 s->outbps = (e->outbps+0xF)>>5;
117 spin_unlock(&s->lock);
118 }
119 read_unlock(&est_lock);
120 mod_timer(&est_timer, jiffies + 2*HZ);
121 }
122
123 int ip_vs_new_estimator(struct ip_vs_stats *stats)
124 {
125 struct ip_vs_estimator *est;
126
127 est = kzalloc(sizeof(*est), GFP_KERNEL);
128 if (est == NULL)
129 return -ENOMEM;
130
131 est->stats = stats;
132 est->last_conns = stats->conns;
133 est->cps = stats->cps<<10;
134
135 est->last_inpkts = stats->inpkts;
136 est->inpps = stats->inpps<<10;
137
138 est->last_outpkts = stats->outpkts;
139 est->outpps = stats->outpps<<10;
140
141 est->last_inbytes = stats->inbytes;
142 est->inbps = stats->inbps<<5;
143
144 est->last_outbytes = stats->outbytes;
145 est->outbps = stats->outbps<<5;
146
147 write_lock_bh(&est_lock);
148 est->next = est_list;
149 if (est->next == NULL) {
150 setup_timer(&est_timer, estimation_timer, 0);
151 est_timer.expires = jiffies + 2*HZ;
152 add_timer(&est_timer);
153 }
154 est_list = est;
155 write_unlock_bh(&est_lock);
156 return 0;
157 }
158
159 void ip_vs_kill_estimator(struct ip_vs_stats *stats)
160 {
161 struct ip_vs_estimator *est, **pest;
162 int killed = 0;
163
164 write_lock_bh(&est_lock);
165 pest = &est_list;
166 while ((est=*pest) != NULL) {
167 if (est->stats != stats) {
168 pest = &est->next;
169 continue;
170 }
171 *pest = est->next;
172 kfree(est);
173 killed++;
174 }
175 if (killed && est_list == NULL)
176 del_timer_sync(&est_timer);
177 write_unlock_bh(&est_lock);
178 }
179
180 void ip_vs_zero_estimator(struct ip_vs_stats *stats)
181 {
182 struct ip_vs_estimator *e;
183
184 write_lock_bh(&est_lock);
185 for (e = est_list; e; e = e->next) {
186 if (e->stats != stats)
187 continue;
188
189 /* set counters zero */
190 e->last_conns = 0;
191 e->last_inpkts = 0;
192 e->last_outpkts = 0;
193 e->last_inbytes = 0;
194 e->last_outbytes = 0;
195 e->cps = 0;
196 e->inpps = 0;
197 e->outpps = 0;
198 e->inbps = 0;
199 e->outbps = 0;
200 }
201 write_unlock_bh(&est_lock);
202 }
This page took 0.035765 seconds and 5 git commands to generate.