inet_diag: Partly rename inet_ to sock_
[deliverable/linux.git] / net / ipv4 / inet_diag.c
1 /*
2 * inet_diag.c Module for monitoring INET transport protocols sockets.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/cache.h>
19 #include <linux/init.h>
20 #include <linux/time.h>
21
22 #include <net/icmp.h>
23 #include <net/tcp.h>
24 #include <net/ipv6.h>
25 #include <net/inet_common.h>
26 #include <net/inet_connection_sock.h>
27 #include <net/inet_hashtables.h>
28 #include <net/inet_timewait_sock.h>
29 #include <net/inet6_hashtables.h>
30 #include <net/netlink.h>
31
32 #include <linux/inet.h>
33 #include <linux/stddef.h>
34
35 #include <linux/inet_diag.h>
36
37 static const struct inet_diag_handler **inet_diag_table;
38
39 struct inet_diag_entry {
40 __be32 *saddr;
41 __be32 *daddr;
42 u16 sport;
43 u16 dport;
44 u16 family;
45 u16 userlocks;
46 };
47
48 static struct sock *sdiagnl;
49
50 #define INET_DIAG_PUT(skb, attrtype, attrlen) \
51 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
52
53 static DEFINE_MUTEX(inet_diag_table_mutex);
54
55 static const struct inet_diag_handler *inet_diag_lock_handler(int type)
56 {
57 if (!inet_diag_table[type])
58 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
59 NETLINK_SOCK_DIAG, type);
60
61 mutex_lock(&inet_diag_table_mutex);
62 if (!inet_diag_table[type])
63 return ERR_PTR(-ENOENT);
64
65 return inet_diag_table[type];
66 }
67
68 static inline void inet_diag_unlock_handler(
69 const struct inet_diag_handler *handler)
70 {
71 mutex_unlock(&inet_diag_table_mutex);
72 }
73
74 static int inet_csk_diag_fill(struct sock *sk,
75 struct sk_buff *skb,
76 int ext, u32 pid, u32 seq, u16 nlmsg_flags,
77 const struct nlmsghdr *unlh)
78 {
79 const struct inet_sock *inet = inet_sk(sk);
80 const struct inet_connection_sock *icsk = inet_csk(sk);
81 struct inet_diag_msg *r;
82 struct nlmsghdr *nlh;
83 void *info = NULL;
84 struct inet_diag_meminfo *minfo = NULL;
85 unsigned char *b = skb_tail_pointer(skb);
86 const struct inet_diag_handler *handler;
87
88 handler = inet_diag_table[unlh->nlmsg_type];
89 BUG_ON(handler == NULL);
90
91 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
92 nlh->nlmsg_flags = nlmsg_flags;
93
94 r = NLMSG_DATA(nlh);
95 BUG_ON(sk->sk_state == TCP_TIME_WAIT);
96
97 if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
98 minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
99
100 if (ext & (1 << (INET_DIAG_INFO - 1)))
101 info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
102 handler->idiag_info_size);
103
104 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
105 const size_t len = strlen(icsk->icsk_ca_ops->name);
106
107 strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
108 icsk->icsk_ca_ops->name);
109 }
110
111 r->idiag_family = sk->sk_family;
112 r->idiag_state = sk->sk_state;
113 r->idiag_timer = 0;
114 r->idiag_retrans = 0;
115
116 r->id.idiag_if = sk->sk_bound_dev_if;
117 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
118 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
119
120 r->id.idiag_sport = inet->inet_sport;
121 r->id.idiag_dport = inet->inet_dport;
122 r->id.idiag_src[0] = inet->inet_rcv_saddr;
123 r->id.idiag_dst[0] = inet->inet_daddr;
124
125 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
126 * hence this needs to be included regardless of socket family.
127 */
128 if (ext & (1 << (INET_DIAG_TOS - 1)))
129 RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
130
131 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
132 if (r->idiag_family == AF_INET6) {
133 const struct ipv6_pinfo *np = inet6_sk(sk);
134
135 *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
136 *(struct in6_addr *)r->id.idiag_dst = np->daddr;
137 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
138 RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
139 }
140 #endif
141
142 #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
143
144 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
145 r->idiag_timer = 1;
146 r->idiag_retrans = icsk->icsk_retransmits;
147 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
148 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
149 r->idiag_timer = 4;
150 r->idiag_retrans = icsk->icsk_probes_out;
151 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
152 } else if (timer_pending(&sk->sk_timer)) {
153 r->idiag_timer = 2;
154 r->idiag_retrans = icsk->icsk_probes_out;
155 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
156 } else {
157 r->idiag_timer = 0;
158 r->idiag_expires = 0;
159 }
160 #undef EXPIRES_IN_MS
161
162 r->idiag_uid = sock_i_uid(sk);
163 r->idiag_inode = sock_i_ino(sk);
164
165 if (minfo) {
166 minfo->idiag_rmem = sk_rmem_alloc_get(sk);
167 minfo->idiag_wmem = sk->sk_wmem_queued;
168 minfo->idiag_fmem = sk->sk_forward_alloc;
169 minfo->idiag_tmem = sk_wmem_alloc_get(sk);
170 }
171
172 handler->idiag_get_info(sk, r, info);
173
174 if (sk->sk_state < TCP_TIME_WAIT &&
175 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
176 icsk->icsk_ca_ops->get_info(sk, ext, skb);
177
178 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
179 return skb->len;
180
181 rtattr_failure:
182 nlmsg_failure:
183 nlmsg_trim(skb, b);
184 return -EMSGSIZE;
185 }
186
187 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
188 struct sk_buff *skb, int ext, u32 pid,
189 u32 seq, u16 nlmsg_flags,
190 const struct nlmsghdr *unlh)
191 {
192 long tmo;
193 struct inet_diag_msg *r;
194 const unsigned char *previous_tail = skb_tail_pointer(skb);
195 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
196 unlh->nlmsg_type, sizeof(*r));
197
198 r = NLMSG_DATA(nlh);
199 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
200
201 nlh->nlmsg_flags = nlmsg_flags;
202
203 tmo = tw->tw_ttd - jiffies;
204 if (tmo < 0)
205 tmo = 0;
206
207 r->idiag_family = tw->tw_family;
208 r->idiag_retrans = 0;
209 r->id.idiag_if = tw->tw_bound_dev_if;
210 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
211 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
212 r->id.idiag_sport = tw->tw_sport;
213 r->id.idiag_dport = tw->tw_dport;
214 r->id.idiag_src[0] = tw->tw_rcv_saddr;
215 r->id.idiag_dst[0] = tw->tw_daddr;
216 r->idiag_state = tw->tw_substate;
217 r->idiag_timer = 3;
218 r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
219 r->idiag_rqueue = 0;
220 r->idiag_wqueue = 0;
221 r->idiag_uid = 0;
222 r->idiag_inode = 0;
223 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
224 if (tw->tw_family == AF_INET6) {
225 const struct inet6_timewait_sock *tw6 =
226 inet6_twsk((struct sock *)tw);
227
228 *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
229 *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
230 }
231 #endif
232 nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
233 return skb->len;
234 nlmsg_failure:
235 nlmsg_trim(skb, previous_tail);
236 return -EMSGSIZE;
237 }
238
239 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
240 int ext, u32 pid, u32 seq, u16 nlmsg_flags,
241 const struct nlmsghdr *unlh)
242 {
243 if (sk->sk_state == TCP_TIME_WAIT)
244 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
245 skb, ext, pid, seq, nlmsg_flags,
246 unlh);
247 return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
248 }
249
250 static int inet_diag_get_exact(struct sk_buff *in_skb,
251 const struct nlmsghdr *nlh)
252 {
253 int err;
254 struct sock *sk;
255 struct inet_diag_req *req = NLMSG_DATA(nlh);
256 struct sk_buff *rep;
257 struct inet_hashinfo *hashinfo;
258 const struct inet_diag_handler *handler;
259
260 handler = inet_diag_lock_handler(nlh->nlmsg_type);
261 if (IS_ERR(handler)) {
262 err = PTR_ERR(handler);
263 goto unlock;
264 }
265
266 hashinfo = handler->idiag_hashinfo;
267 err = -EINVAL;
268
269 if (req->idiag_family == AF_INET) {
270 sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
271 req->id.idiag_dport, req->id.idiag_src[0],
272 req->id.idiag_sport, req->id.idiag_if);
273 }
274 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
275 else if (req->idiag_family == AF_INET6) {
276 sk = inet6_lookup(&init_net, hashinfo,
277 (struct in6_addr *)req->id.idiag_dst,
278 req->id.idiag_dport,
279 (struct in6_addr *)req->id.idiag_src,
280 req->id.idiag_sport,
281 req->id.idiag_if);
282 }
283 #endif
284 else {
285 goto unlock;
286 }
287
288 err = -ENOENT;
289 if (sk == NULL)
290 goto unlock;
291
292 err = -ESTALE;
293 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
294 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
295 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
296 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
297 goto out;
298
299 err = -ENOMEM;
300 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
301 sizeof(struct inet_diag_meminfo) +
302 handler->idiag_info_size + 64)),
303 GFP_KERNEL);
304 if (!rep)
305 goto out;
306
307 err = sk_diag_fill(sk, rep, req->idiag_ext,
308 NETLINK_CB(in_skb).pid,
309 nlh->nlmsg_seq, 0, nlh);
310 if (err < 0) {
311 WARN_ON(err == -EMSGSIZE);
312 kfree_skb(rep);
313 goto out;
314 }
315 err = netlink_unicast(sdiagnl, rep, NETLINK_CB(in_skb).pid,
316 MSG_DONTWAIT);
317 if (err > 0)
318 err = 0;
319
320 out:
321 if (sk) {
322 if (sk->sk_state == TCP_TIME_WAIT)
323 inet_twsk_put((struct inet_timewait_sock *)sk);
324 else
325 sock_put(sk);
326 }
327 unlock:
328 inet_diag_unlock_handler(handler);
329 return err;
330 }
331
332 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
333 {
334 int words = bits >> 5;
335
336 bits &= 0x1f;
337
338 if (words) {
339 if (memcmp(a1, a2, words << 2))
340 return 0;
341 }
342 if (bits) {
343 __be32 w1, w2;
344 __be32 mask;
345
346 w1 = a1[words];
347 w2 = a2[words];
348
349 mask = htonl((0xffffffff) << (32 - bits));
350
351 if ((w1 ^ w2) & mask)
352 return 0;
353 }
354
355 return 1;
356 }
357
358
359 static int inet_diag_bc_run(const void *bc, int len,
360 const struct inet_diag_entry *entry)
361 {
362 while (len > 0) {
363 int yes = 1;
364 const struct inet_diag_bc_op *op = bc;
365
366 switch (op->code) {
367 case INET_DIAG_BC_NOP:
368 break;
369 case INET_DIAG_BC_JMP:
370 yes = 0;
371 break;
372 case INET_DIAG_BC_S_GE:
373 yes = entry->sport >= op[1].no;
374 break;
375 case INET_DIAG_BC_S_LE:
376 yes = entry->sport <= op[1].no;
377 break;
378 case INET_DIAG_BC_D_GE:
379 yes = entry->dport >= op[1].no;
380 break;
381 case INET_DIAG_BC_D_LE:
382 yes = entry->dport <= op[1].no;
383 break;
384 case INET_DIAG_BC_AUTO:
385 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
386 break;
387 case INET_DIAG_BC_S_COND:
388 case INET_DIAG_BC_D_COND: {
389 struct inet_diag_hostcond *cond;
390 __be32 *addr;
391
392 cond = (struct inet_diag_hostcond *)(op + 1);
393 if (cond->port != -1 &&
394 cond->port != (op->code == INET_DIAG_BC_S_COND ?
395 entry->sport : entry->dport)) {
396 yes = 0;
397 break;
398 }
399
400 if (cond->prefix_len == 0)
401 break;
402
403 if (op->code == INET_DIAG_BC_S_COND)
404 addr = entry->saddr;
405 else
406 addr = entry->daddr;
407
408 if (bitstring_match(addr, cond->addr,
409 cond->prefix_len))
410 break;
411 if (entry->family == AF_INET6 &&
412 cond->family == AF_INET) {
413 if (addr[0] == 0 && addr[1] == 0 &&
414 addr[2] == htonl(0xffff) &&
415 bitstring_match(addr + 3, cond->addr,
416 cond->prefix_len))
417 break;
418 }
419 yes = 0;
420 break;
421 }
422 }
423
424 if (yes) {
425 len -= op->yes;
426 bc += op->yes;
427 } else {
428 len -= op->no;
429 bc += op->no;
430 }
431 }
432 return len == 0;
433 }
434
435 static int valid_cc(const void *bc, int len, int cc)
436 {
437 while (len >= 0) {
438 const struct inet_diag_bc_op *op = bc;
439
440 if (cc > len)
441 return 0;
442 if (cc == len)
443 return 1;
444 if (op->yes < 4 || op->yes & 3)
445 return 0;
446 len -= op->yes;
447 bc += op->yes;
448 }
449 return 0;
450 }
451
452 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
453 {
454 const void *bc = bytecode;
455 int len = bytecode_len;
456
457 while (len > 0) {
458 const struct inet_diag_bc_op *op = bc;
459
460 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
461 switch (op->code) {
462 case INET_DIAG_BC_AUTO:
463 case INET_DIAG_BC_S_COND:
464 case INET_DIAG_BC_D_COND:
465 case INET_DIAG_BC_S_GE:
466 case INET_DIAG_BC_S_LE:
467 case INET_DIAG_BC_D_GE:
468 case INET_DIAG_BC_D_LE:
469 case INET_DIAG_BC_JMP:
470 if (op->no < 4 || op->no > len + 4 || op->no & 3)
471 return -EINVAL;
472 if (op->no < len &&
473 !valid_cc(bytecode, bytecode_len, len - op->no))
474 return -EINVAL;
475 break;
476 case INET_DIAG_BC_NOP:
477 break;
478 default:
479 return -EINVAL;
480 }
481 if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
482 return -EINVAL;
483 bc += op->yes;
484 len -= op->yes;
485 }
486 return len == 0 ? 0 : -EINVAL;
487 }
488
489 static int inet_csk_diag_dump(struct sock *sk,
490 struct sk_buff *skb,
491 struct netlink_callback *cb)
492 {
493 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
494
495 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
496 struct inet_diag_entry entry;
497 const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
498 sizeof(*r),
499 INET_DIAG_REQ_BYTECODE);
500 struct inet_sock *inet = inet_sk(sk);
501
502 entry.family = sk->sk_family;
503 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
504 if (entry.family == AF_INET6) {
505 struct ipv6_pinfo *np = inet6_sk(sk);
506
507 entry.saddr = np->rcv_saddr.s6_addr32;
508 entry.daddr = np->daddr.s6_addr32;
509 } else
510 #endif
511 {
512 entry.saddr = &inet->inet_rcv_saddr;
513 entry.daddr = &inet->inet_daddr;
514 }
515 entry.sport = inet->inet_num;
516 entry.dport = ntohs(inet->inet_dport);
517 entry.userlocks = sk->sk_userlocks;
518
519 if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
520 return 0;
521 }
522
523 return inet_csk_diag_fill(sk, skb, r->idiag_ext,
524 NETLINK_CB(cb->skb).pid,
525 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
526 }
527
528 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
529 struct sk_buff *skb,
530 struct netlink_callback *cb)
531 {
532 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
533
534 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
535 struct inet_diag_entry entry;
536 const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
537 sizeof(*r),
538 INET_DIAG_REQ_BYTECODE);
539
540 entry.family = tw->tw_family;
541 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
542 if (tw->tw_family == AF_INET6) {
543 struct inet6_timewait_sock *tw6 =
544 inet6_twsk((struct sock *)tw);
545 entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
546 entry.daddr = tw6->tw_v6_daddr.s6_addr32;
547 } else
548 #endif
549 {
550 entry.saddr = &tw->tw_rcv_saddr;
551 entry.daddr = &tw->tw_daddr;
552 }
553 entry.sport = tw->tw_num;
554 entry.dport = ntohs(tw->tw_dport);
555 entry.userlocks = 0;
556
557 if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
558 return 0;
559 }
560
561 return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
562 NETLINK_CB(cb->skb).pid,
563 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
564 }
565
566 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
567 struct request_sock *req, u32 pid, u32 seq,
568 const struct nlmsghdr *unlh)
569 {
570 const struct inet_request_sock *ireq = inet_rsk(req);
571 struct inet_sock *inet = inet_sk(sk);
572 unsigned char *b = skb_tail_pointer(skb);
573 struct inet_diag_msg *r;
574 struct nlmsghdr *nlh;
575 long tmo;
576
577 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
578 nlh->nlmsg_flags = NLM_F_MULTI;
579 r = NLMSG_DATA(nlh);
580
581 r->idiag_family = sk->sk_family;
582 r->idiag_state = TCP_SYN_RECV;
583 r->idiag_timer = 1;
584 r->idiag_retrans = req->retrans;
585
586 r->id.idiag_if = sk->sk_bound_dev_if;
587 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
588 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
589
590 tmo = req->expires - jiffies;
591 if (tmo < 0)
592 tmo = 0;
593
594 r->id.idiag_sport = inet->inet_sport;
595 r->id.idiag_dport = ireq->rmt_port;
596 r->id.idiag_src[0] = ireq->loc_addr;
597 r->id.idiag_dst[0] = ireq->rmt_addr;
598 r->idiag_expires = jiffies_to_msecs(tmo);
599 r->idiag_rqueue = 0;
600 r->idiag_wqueue = 0;
601 r->idiag_uid = sock_i_uid(sk);
602 r->idiag_inode = 0;
603 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
604 if (r->idiag_family == AF_INET6) {
605 *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
606 *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
607 }
608 #endif
609 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
610
611 return skb->len;
612
613 nlmsg_failure:
614 nlmsg_trim(skb, b);
615 return -1;
616 }
617
618 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
619 struct netlink_callback *cb)
620 {
621 struct inet_diag_entry entry;
622 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
623 struct inet_connection_sock *icsk = inet_csk(sk);
624 struct listen_sock *lopt;
625 const struct nlattr *bc = NULL;
626 struct inet_sock *inet = inet_sk(sk);
627 int j, s_j;
628 int reqnum, s_reqnum;
629 int err = 0;
630
631 s_j = cb->args[3];
632 s_reqnum = cb->args[4];
633
634 if (s_j > 0)
635 s_j--;
636
637 entry.family = sk->sk_family;
638
639 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
640
641 lopt = icsk->icsk_accept_queue.listen_opt;
642 if (!lopt || !lopt->qlen)
643 goto out;
644
645 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
646 bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
647 INET_DIAG_REQ_BYTECODE);
648 entry.sport = inet->inet_num;
649 entry.userlocks = sk->sk_userlocks;
650 }
651
652 for (j = s_j; j < lopt->nr_table_entries; j++) {
653 struct request_sock *req, *head = lopt->syn_table[j];
654
655 reqnum = 0;
656 for (req = head; req; reqnum++, req = req->dl_next) {
657 struct inet_request_sock *ireq = inet_rsk(req);
658
659 if (reqnum < s_reqnum)
660 continue;
661 if (r->id.idiag_dport != ireq->rmt_port &&
662 r->id.idiag_dport)
663 continue;
664
665 if (bc) {
666 entry.saddr =
667 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
668 (entry.family == AF_INET6) ?
669 inet6_rsk(req)->loc_addr.s6_addr32 :
670 #endif
671 &ireq->loc_addr;
672 entry.daddr =
673 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
674 (entry.family == AF_INET6) ?
675 inet6_rsk(req)->rmt_addr.s6_addr32 :
676 #endif
677 &ireq->rmt_addr;
678 entry.dport = ntohs(ireq->rmt_port);
679
680 if (!inet_diag_bc_run(nla_data(bc),
681 nla_len(bc), &entry))
682 continue;
683 }
684
685 err = inet_diag_fill_req(skb, sk, req,
686 NETLINK_CB(cb->skb).pid,
687 cb->nlh->nlmsg_seq, cb->nlh);
688 if (err < 0) {
689 cb->args[3] = j + 1;
690 cb->args[4] = reqnum;
691 goto out;
692 }
693 }
694
695 s_reqnum = 0;
696 }
697
698 out:
699 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
700
701 return err;
702 }
703
704 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
705 {
706 int i, num;
707 int s_i, s_num;
708 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
709 const struct inet_diag_handler *handler;
710 struct inet_hashinfo *hashinfo;
711
712 handler = inet_diag_lock_handler(cb->nlh->nlmsg_type);
713 if (IS_ERR(handler))
714 goto unlock;
715
716 hashinfo = handler->idiag_hashinfo;
717
718 s_i = cb->args[1];
719 s_num = num = cb->args[2];
720
721 if (cb->args[0] == 0) {
722 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
723 goto skip_listen_ht;
724
725 for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
726 struct sock *sk;
727 struct hlist_nulls_node *node;
728 struct inet_listen_hashbucket *ilb;
729
730 num = 0;
731 ilb = &hashinfo->listening_hash[i];
732 spin_lock_bh(&ilb->lock);
733 sk_nulls_for_each(sk, node, &ilb->head) {
734 struct inet_sock *inet = inet_sk(sk);
735
736 if (num < s_num) {
737 num++;
738 continue;
739 }
740
741 if (r->id.idiag_sport != inet->inet_sport &&
742 r->id.idiag_sport)
743 goto next_listen;
744
745 if (!(r->idiag_states & TCPF_LISTEN) ||
746 r->id.idiag_dport ||
747 cb->args[3] > 0)
748 goto syn_recv;
749
750 if (inet_csk_diag_dump(sk, skb, cb) < 0) {
751 spin_unlock_bh(&ilb->lock);
752 goto done;
753 }
754
755 syn_recv:
756 if (!(r->idiag_states & TCPF_SYN_RECV))
757 goto next_listen;
758
759 if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
760 spin_unlock_bh(&ilb->lock);
761 goto done;
762 }
763
764 next_listen:
765 cb->args[3] = 0;
766 cb->args[4] = 0;
767 ++num;
768 }
769 spin_unlock_bh(&ilb->lock);
770
771 s_num = 0;
772 cb->args[3] = 0;
773 cb->args[4] = 0;
774 }
775 skip_listen_ht:
776 cb->args[0] = 1;
777 s_i = num = s_num = 0;
778 }
779
780 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
781 goto unlock;
782
783 for (i = s_i; i <= hashinfo->ehash_mask; i++) {
784 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
785 spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
786 struct sock *sk;
787 struct hlist_nulls_node *node;
788
789 num = 0;
790
791 if (hlist_nulls_empty(&head->chain) &&
792 hlist_nulls_empty(&head->twchain))
793 continue;
794
795 if (i > s_i)
796 s_num = 0;
797
798 spin_lock_bh(lock);
799 sk_nulls_for_each(sk, node, &head->chain) {
800 struct inet_sock *inet = inet_sk(sk);
801
802 if (num < s_num)
803 goto next_normal;
804 if (!(r->idiag_states & (1 << sk->sk_state)))
805 goto next_normal;
806 if (r->id.idiag_sport != inet->inet_sport &&
807 r->id.idiag_sport)
808 goto next_normal;
809 if (r->id.idiag_dport != inet->inet_dport &&
810 r->id.idiag_dport)
811 goto next_normal;
812 if (inet_csk_diag_dump(sk, skb, cb) < 0) {
813 spin_unlock_bh(lock);
814 goto done;
815 }
816 next_normal:
817 ++num;
818 }
819
820 if (r->idiag_states & TCPF_TIME_WAIT) {
821 struct inet_timewait_sock *tw;
822
823 inet_twsk_for_each(tw, node,
824 &head->twchain) {
825
826 if (num < s_num)
827 goto next_dying;
828 if (r->id.idiag_sport != tw->tw_sport &&
829 r->id.idiag_sport)
830 goto next_dying;
831 if (r->id.idiag_dport != tw->tw_dport &&
832 r->id.idiag_dport)
833 goto next_dying;
834 if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
835 spin_unlock_bh(lock);
836 goto done;
837 }
838 next_dying:
839 ++num;
840 }
841 }
842 spin_unlock_bh(lock);
843 }
844
845 done:
846 cb->args[1] = i;
847 cb->args[2] = num;
848 unlock:
849 inet_diag_unlock_handler(handler);
850 return skb->len;
851 }
852
853 static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
854 {
855 int hdrlen = sizeof(struct inet_diag_req);
856
857 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
858 nlmsg_len(nlh) < hdrlen)
859 return -EINVAL;
860
861 if (nlh->nlmsg_flags & NLM_F_DUMP) {
862 if (nlmsg_attrlen(nlh, hdrlen)) {
863 struct nlattr *attr;
864
865 attr = nlmsg_find_attr(nlh, hdrlen,
866 INET_DIAG_REQ_BYTECODE);
867 if (attr == NULL ||
868 nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
869 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
870 return -EINVAL;
871 }
872
873 return netlink_dump_start(sdiagnl, skb, nlh,
874 inet_diag_dump, NULL, 0);
875 }
876
877 return inet_diag_get_exact(skb, nlh);
878 }
879
880 static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
881 {
882 return inet_diag_rcv_msg(skb, nlh);
883 }
884
885 static DEFINE_MUTEX(sock_diag_mutex);
886
887 static void sock_diag_rcv(struct sk_buff *skb)
888 {
889 mutex_lock(&sock_diag_mutex);
890 netlink_rcv_skb(skb, &sock_diag_rcv_msg);
891 mutex_unlock(&sock_diag_mutex);
892 }
893
894 int inet_diag_register(const struct inet_diag_handler *h)
895 {
896 const __u16 type = h->idiag_type;
897 int err = -EINVAL;
898
899 if (type >= INET_DIAG_GETSOCK_MAX)
900 goto out;
901
902 mutex_lock(&inet_diag_table_mutex);
903 err = -EEXIST;
904 if (inet_diag_table[type] == NULL) {
905 inet_diag_table[type] = h;
906 err = 0;
907 }
908 mutex_unlock(&inet_diag_table_mutex);
909 out:
910 return err;
911 }
912 EXPORT_SYMBOL_GPL(inet_diag_register);
913
914 void inet_diag_unregister(const struct inet_diag_handler *h)
915 {
916 const __u16 type = h->idiag_type;
917
918 if (type >= INET_DIAG_GETSOCK_MAX)
919 return;
920
921 mutex_lock(&inet_diag_table_mutex);
922 inet_diag_table[type] = NULL;
923 mutex_unlock(&inet_diag_table_mutex);
924 }
925 EXPORT_SYMBOL_GPL(inet_diag_unregister);
926
927 static int __init inet_diag_init(void)
928 {
929 const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
930 sizeof(struct inet_diag_handler *));
931 int err = -ENOMEM;
932
933 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
934 if (!inet_diag_table)
935 goto out;
936
937 sdiagnl = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0,
938 sock_diag_rcv, NULL, THIS_MODULE);
939 if (sdiagnl == NULL)
940 goto out_free_table;
941 err = 0;
942 out:
943 return err;
944 out_free_table:
945 kfree(inet_diag_table);
946 goto out;
947 }
948
949 static void __exit inet_diag_exit(void)
950 {
951 netlink_kernel_release(sdiagnl);
952 kfree(inet_diag_table);
953 }
954
955 module_init(inet_diag_init);
956 module_exit(inet_diag_exit);
957 MODULE_LICENSE("GPL");
958 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
This page took 0.066157 seconds and 6 git commands to generate.