Merge tag 'gpio-v3.16-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[deliverable/linux.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/types.h>
14 #include <linux/netfilter.h>
15 #include <linux/skbuff.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/stddef.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/percpu.h>
22 #include <linux/kernel.h>
23 #include <linux/jhash.h>
24 #include <linux/moduleparam.h>
25 #include <linux/export.h>
26 #include <net/net_namespace.h>
27
28 #include <net/netfilter/nf_conntrack.h>
29 #include <net/netfilter/nf_conntrack_core.h>
30 #include <net/netfilter/nf_conntrack_expect.h>
31 #include <net/netfilter/nf_conntrack_helper.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
34
35 unsigned int nf_ct_expect_hsize __read_mostly;
36 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37
38 unsigned int nf_ct_expect_max __read_mostly;
39
40 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
41
42 /* nf_conntrack_expect helper functions */
43 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
44 u32 portid, int report)
45 {
46 struct nf_conn_help *master_help = nfct_help(exp->master);
47 struct net *net = nf_ct_exp_net(exp);
48
49 NF_CT_ASSERT(master_help);
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
52 hlist_del_rcu(&exp->hnode);
53 net->ct.expect_count--;
54
55 hlist_del(&exp->lnode);
56 master_help->expecting[exp->class]--;
57
58 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
59 nf_ct_expect_put(exp);
60
61 NF_CT_STAT_INC(net, expect_delete);
62 }
63 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
64
65 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
66 {
67 struct nf_conntrack_expect *exp = (void *)ul_expect;
68
69 spin_lock_bh(&nf_conntrack_expect_lock);
70 nf_ct_unlink_expect(exp);
71 spin_unlock_bh(&nf_conntrack_expect_lock);
72 nf_ct_expect_put(exp);
73 }
74
75 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
76 {
77 unsigned int hash;
78
79 if (unlikely(!nf_conntrack_hash_rnd)) {
80 init_nf_conntrack_hash_rnd();
81 }
82
83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
85 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
86 return ((u64)hash * nf_ct_expect_hsize) >> 32;
87 }
88
89 struct nf_conntrack_expect *
90 __nf_ct_expect_find(struct net *net, u16 zone,
91 const struct nf_conntrack_tuple *tuple)
92 {
93 struct nf_conntrack_expect *i;
94 unsigned int h;
95
96 if (!net->ct.expect_count)
97 return NULL;
98
99 h = nf_ct_expect_dst_hash(tuple);
100 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 nf_ct_zone(i->master) == zone)
103 return i;
104 }
105 return NULL;
106 }
107 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
108
109 /* Just find a expectation corresponding to a tuple. */
110 struct nf_conntrack_expect *
111 nf_ct_expect_find_get(struct net *net, u16 zone,
112 const struct nf_conntrack_tuple *tuple)
113 {
114 struct nf_conntrack_expect *i;
115
116 rcu_read_lock();
117 i = __nf_ct_expect_find(net, zone, tuple);
118 if (i && !atomic_inc_not_zero(&i->use))
119 i = NULL;
120 rcu_read_unlock();
121
122 return i;
123 }
124 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
125
126 /* If an expectation for this connection is found, it gets delete from
127 * global list then returned. */
128 struct nf_conntrack_expect *
129 nf_ct_find_expectation(struct net *net, u16 zone,
130 const struct nf_conntrack_tuple *tuple)
131 {
132 struct nf_conntrack_expect *i, *exp = NULL;
133 unsigned int h;
134
135 if (!net->ct.expect_count)
136 return NULL;
137
138 h = nf_ct_expect_dst_hash(tuple);
139 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
140 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
141 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
142 nf_ct_zone(i->master) == zone) {
143 exp = i;
144 break;
145 }
146 }
147 if (!exp)
148 return NULL;
149
150 /* If master is not in hash table yet (ie. packet hasn't left
151 this machine yet), how can other end know about expected?
152 Hence these are not the droids you are looking for (if
153 master ct never got confirmed, we'd hold a reference to it
154 and weird things would happen to future packets). */
155 if (!nf_ct_is_confirmed(exp->master))
156 return NULL;
157
158 /* Avoid race with other CPUs, that for exp->master ct, is
159 * about to invoke ->destroy(), or nf_ct_delete() via timeout
160 * or early_drop().
161 *
162 * The atomic_inc_not_zero() check tells: If that fails, we
163 * know that the ct is being destroyed. If it succeeds, we
164 * can be sure the ct cannot disappear underneath.
165 */
166 if (unlikely(nf_ct_is_dying(exp->master) ||
167 !atomic_inc_not_zero(&exp->master->ct_general.use)))
168 return NULL;
169
170 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
171 atomic_inc(&exp->use);
172 return exp;
173 } else if (del_timer(&exp->timeout)) {
174 nf_ct_unlink_expect(exp);
175 return exp;
176 }
177 /* Undo exp->master refcnt increase, if del_timer() failed */
178 nf_ct_put(exp->master);
179
180 return NULL;
181 }
182
183 /* delete all expectations for this conntrack */
184 void nf_ct_remove_expectations(struct nf_conn *ct)
185 {
186 struct nf_conn_help *help = nfct_help(ct);
187 struct nf_conntrack_expect *exp;
188 struct hlist_node *next;
189
190 /* Optimization: most connection never expect any others. */
191 if (!help)
192 return;
193
194 spin_lock_bh(&nf_conntrack_expect_lock);
195 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
196 if (del_timer(&exp->timeout)) {
197 nf_ct_unlink_expect(exp);
198 nf_ct_expect_put(exp);
199 }
200 }
201 spin_unlock_bh(&nf_conntrack_expect_lock);
202 }
203 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
204
205 /* Would two expected things clash? */
206 static inline int expect_clash(const struct nf_conntrack_expect *a,
207 const struct nf_conntrack_expect *b)
208 {
209 /* Part covered by intersection of masks must be unequal,
210 otherwise they clash */
211 struct nf_conntrack_tuple_mask intersect_mask;
212 int count;
213
214 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
215
216 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
217 intersect_mask.src.u3.all[count] =
218 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
219 }
220
221 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
222 }
223
224 static inline int expect_matches(const struct nf_conntrack_expect *a,
225 const struct nf_conntrack_expect *b)
226 {
227 return a->master == b->master && a->class == b->class &&
228 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
229 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
230 nf_ct_zone(a->master) == nf_ct_zone(b->master);
231 }
232
233 /* Generally a bad idea to call this: could have matched already. */
234 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
235 {
236 spin_lock_bh(&nf_conntrack_expect_lock);
237 if (del_timer(&exp->timeout)) {
238 nf_ct_unlink_expect(exp);
239 nf_ct_expect_put(exp);
240 }
241 spin_unlock_bh(&nf_conntrack_expect_lock);
242 }
243 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
244
245 /* We don't increase the master conntrack refcount for non-fulfilled
246 * conntracks. During the conntrack destruction, the expectations are
247 * always killed before the conntrack itself */
248 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
249 {
250 struct nf_conntrack_expect *new;
251
252 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
253 if (!new)
254 return NULL;
255
256 new->master = me;
257 atomic_set(&new->use, 1);
258 return new;
259 }
260 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
261
262 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
263 u_int8_t family,
264 const union nf_inet_addr *saddr,
265 const union nf_inet_addr *daddr,
266 u_int8_t proto, const __be16 *src, const __be16 *dst)
267 {
268 int len;
269
270 if (family == AF_INET)
271 len = 4;
272 else
273 len = 16;
274
275 exp->flags = 0;
276 exp->class = class;
277 exp->expectfn = NULL;
278 exp->helper = NULL;
279 exp->tuple.src.l3num = family;
280 exp->tuple.dst.protonum = proto;
281
282 if (saddr) {
283 memcpy(&exp->tuple.src.u3, saddr, len);
284 if (sizeof(exp->tuple.src.u3) > len)
285 /* address needs to be cleared for nf_ct_tuple_equal */
286 memset((void *)&exp->tuple.src.u3 + len, 0x00,
287 sizeof(exp->tuple.src.u3) - len);
288 memset(&exp->mask.src.u3, 0xFF, len);
289 if (sizeof(exp->mask.src.u3) > len)
290 memset((void *)&exp->mask.src.u3 + len, 0x00,
291 sizeof(exp->mask.src.u3) - len);
292 } else {
293 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
294 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
295 }
296
297 if (src) {
298 exp->tuple.src.u.all = *src;
299 exp->mask.src.u.all = htons(0xFFFF);
300 } else {
301 exp->tuple.src.u.all = 0;
302 exp->mask.src.u.all = 0;
303 }
304
305 memcpy(&exp->tuple.dst.u3, daddr, len);
306 if (sizeof(exp->tuple.dst.u3) > len)
307 /* address needs to be cleared for nf_ct_tuple_equal */
308 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
309 sizeof(exp->tuple.dst.u3) - len);
310
311 exp->tuple.dst.u.all = *dst;
312
313 #ifdef CONFIG_NF_NAT_NEEDED
314 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
315 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
316 #endif
317 }
318 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
319
320 static void nf_ct_expect_free_rcu(struct rcu_head *head)
321 {
322 struct nf_conntrack_expect *exp;
323
324 exp = container_of(head, struct nf_conntrack_expect, rcu);
325 kmem_cache_free(nf_ct_expect_cachep, exp);
326 }
327
328 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
329 {
330 if (atomic_dec_and_test(&exp->use))
331 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
332 }
333 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
334
335 static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
336 {
337 struct nf_conn_help *master_help = nfct_help(exp->master);
338 struct nf_conntrack_helper *helper;
339 struct net *net = nf_ct_exp_net(exp);
340 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
341
342 /* two references : one for hash insert, one for the timer */
343 atomic_add(2, &exp->use);
344
345 hlist_add_head(&exp->lnode, &master_help->expectations);
346 master_help->expecting[exp->class]++;
347
348 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
349 net->ct.expect_count++;
350
351 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
352 (unsigned long)exp);
353 helper = rcu_dereference_protected(master_help->helper,
354 lockdep_is_held(&nf_conntrack_expect_lock));
355 if (helper) {
356 exp->timeout.expires = jiffies +
357 helper->expect_policy[exp->class].timeout * HZ;
358 }
359 add_timer(&exp->timeout);
360
361 NF_CT_STAT_INC(net, expect_create);
362 return 0;
363 }
364
365 /* Race with expectations being used means we could have none to find; OK. */
366 static void evict_oldest_expect(struct nf_conn *master,
367 struct nf_conntrack_expect *new)
368 {
369 struct nf_conn_help *master_help = nfct_help(master);
370 struct nf_conntrack_expect *exp, *last = NULL;
371
372 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
373 if (exp->class == new->class)
374 last = exp;
375 }
376
377 if (last && del_timer(&last->timeout)) {
378 nf_ct_unlink_expect(last);
379 nf_ct_expect_put(last);
380 }
381 }
382
383 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
384 {
385 const struct nf_conntrack_expect_policy *p;
386 struct nf_conntrack_expect *i;
387 struct nf_conn *master = expect->master;
388 struct nf_conn_help *master_help = nfct_help(master);
389 struct nf_conntrack_helper *helper;
390 struct net *net = nf_ct_exp_net(expect);
391 struct hlist_node *next;
392 unsigned int h;
393 int ret = 1;
394
395 if (!master_help) {
396 ret = -ESHUTDOWN;
397 goto out;
398 }
399 h = nf_ct_expect_dst_hash(&expect->tuple);
400 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
401 if (expect_matches(i, expect)) {
402 if (del_timer(&i->timeout)) {
403 nf_ct_unlink_expect(i);
404 nf_ct_expect_put(i);
405 break;
406 }
407 } else if (expect_clash(i, expect)) {
408 ret = -EBUSY;
409 goto out;
410 }
411 }
412 /* Will be over limit? */
413 helper = rcu_dereference_protected(master_help->helper,
414 lockdep_is_held(&nf_conntrack_expect_lock));
415 if (helper) {
416 p = &helper->expect_policy[expect->class];
417 if (p->max_expected &&
418 master_help->expecting[expect->class] >= p->max_expected) {
419 evict_oldest_expect(master, expect);
420 if (master_help->expecting[expect->class]
421 >= p->max_expected) {
422 ret = -EMFILE;
423 goto out;
424 }
425 }
426 }
427
428 if (net->ct.expect_count >= nf_ct_expect_max) {
429 net_warn_ratelimited("nf_conntrack: expectation table full\n");
430 ret = -EMFILE;
431 }
432 out:
433 return ret;
434 }
435
436 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
437 u32 portid, int report)
438 {
439 int ret;
440
441 spin_lock_bh(&nf_conntrack_expect_lock);
442 ret = __nf_ct_expect_check(expect);
443 if (ret <= 0)
444 goto out;
445
446 ret = nf_ct_expect_insert(expect);
447 if (ret < 0)
448 goto out;
449 spin_unlock_bh(&nf_conntrack_expect_lock);
450 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
451 return ret;
452 out:
453 spin_unlock_bh(&nf_conntrack_expect_lock);
454 return ret;
455 }
456 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
457
458 #ifdef CONFIG_NF_CONNTRACK_PROCFS
459 struct ct_expect_iter_state {
460 struct seq_net_private p;
461 unsigned int bucket;
462 };
463
464 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
465 {
466 struct net *net = seq_file_net(seq);
467 struct ct_expect_iter_state *st = seq->private;
468 struct hlist_node *n;
469
470 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
471 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
472 if (n)
473 return n;
474 }
475 return NULL;
476 }
477
478 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
479 struct hlist_node *head)
480 {
481 struct net *net = seq_file_net(seq);
482 struct ct_expect_iter_state *st = seq->private;
483
484 head = rcu_dereference(hlist_next_rcu(head));
485 while (head == NULL) {
486 if (++st->bucket >= nf_ct_expect_hsize)
487 return NULL;
488 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
489 }
490 return head;
491 }
492
493 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
494 {
495 struct hlist_node *head = ct_expect_get_first(seq);
496
497 if (head)
498 while (pos && (head = ct_expect_get_next(seq, head)))
499 pos--;
500 return pos ? NULL : head;
501 }
502
503 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
504 __acquires(RCU)
505 {
506 rcu_read_lock();
507 return ct_expect_get_idx(seq, *pos);
508 }
509
510 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
511 {
512 (*pos)++;
513 return ct_expect_get_next(seq, v);
514 }
515
516 static void exp_seq_stop(struct seq_file *seq, void *v)
517 __releases(RCU)
518 {
519 rcu_read_unlock();
520 }
521
522 static int exp_seq_show(struct seq_file *s, void *v)
523 {
524 struct nf_conntrack_expect *expect;
525 struct nf_conntrack_helper *helper;
526 struct hlist_node *n = v;
527 char *delim = "";
528
529 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
530
531 if (expect->timeout.function)
532 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
533 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
534 else
535 seq_printf(s, "- ");
536 seq_printf(s, "l3proto = %u proto=%u ",
537 expect->tuple.src.l3num,
538 expect->tuple.dst.protonum);
539 print_tuple(s, &expect->tuple,
540 __nf_ct_l3proto_find(expect->tuple.src.l3num),
541 __nf_ct_l4proto_find(expect->tuple.src.l3num,
542 expect->tuple.dst.protonum));
543
544 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
545 seq_printf(s, "PERMANENT");
546 delim = ",";
547 }
548 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
549 seq_printf(s, "%sINACTIVE", delim);
550 delim = ",";
551 }
552 if (expect->flags & NF_CT_EXPECT_USERSPACE)
553 seq_printf(s, "%sUSERSPACE", delim);
554
555 helper = rcu_dereference(nfct_help(expect->master)->helper);
556 if (helper) {
557 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
558 if (helper->expect_policy[expect->class].name)
559 seq_printf(s, "/%s",
560 helper->expect_policy[expect->class].name);
561 }
562
563 return seq_putc(s, '\n');
564 }
565
566 static const struct seq_operations exp_seq_ops = {
567 .start = exp_seq_start,
568 .next = exp_seq_next,
569 .stop = exp_seq_stop,
570 .show = exp_seq_show
571 };
572
573 static int exp_open(struct inode *inode, struct file *file)
574 {
575 return seq_open_net(inode, file, &exp_seq_ops,
576 sizeof(struct ct_expect_iter_state));
577 }
578
579 static const struct file_operations exp_file_ops = {
580 .owner = THIS_MODULE,
581 .open = exp_open,
582 .read = seq_read,
583 .llseek = seq_lseek,
584 .release = seq_release_net,
585 };
586 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
587
588 static int exp_proc_init(struct net *net)
589 {
590 #ifdef CONFIG_NF_CONNTRACK_PROCFS
591 struct proc_dir_entry *proc;
592
593 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
594 &exp_file_ops);
595 if (!proc)
596 return -ENOMEM;
597 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
598 return 0;
599 }
600
601 static void exp_proc_remove(struct net *net)
602 {
603 #ifdef CONFIG_NF_CONNTRACK_PROCFS
604 remove_proc_entry("nf_conntrack_expect", net->proc_net);
605 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
606 }
607
608 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
609
610 int nf_conntrack_expect_pernet_init(struct net *net)
611 {
612 int err = -ENOMEM;
613
614 net->ct.expect_count = 0;
615 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
616 if (net->ct.expect_hash == NULL)
617 goto err1;
618
619 err = exp_proc_init(net);
620 if (err < 0)
621 goto err2;
622
623 return 0;
624 err2:
625 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
626 err1:
627 return err;
628 }
629
630 void nf_conntrack_expect_pernet_fini(struct net *net)
631 {
632 exp_proc_remove(net);
633 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
634 }
635
636 int nf_conntrack_expect_init(void)
637 {
638 if (!nf_ct_expect_hsize) {
639 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
640 if (!nf_ct_expect_hsize)
641 nf_ct_expect_hsize = 1;
642 }
643 nf_ct_expect_max = nf_ct_expect_hsize * 4;
644 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
645 sizeof(struct nf_conntrack_expect),
646 0, 0, NULL);
647 if (!nf_ct_expect_cachep)
648 return -ENOMEM;
649 return 0;
650 }
651
652 void nf_conntrack_expect_fini(void)
653 {
654 rcu_barrier(); /* Wait for call_rcu() before destroy */
655 kmem_cache_destroy(nf_ct_expect_cachep);
656 }
This page took 0.0566950000000001 seconds and 5 git commands to generate.