staging: lustre: Ignore hops if not explicitly set
[deliverable/linux.git] / drivers / staging / lustre / lnet / lnet / router_proc.c
1 /*
2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
3 *
4 * Copyright (c) 2011, 2012, Intel Corporation.
5 *
6 * This file is part of Portals
7 * http://sourceforge.net/projects/sandiaportals/
8 *
9 * Portals is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
12 *
13 * Portals is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20 #define DEBUG_SUBSYSTEM S_LNET
21 #include "../../include/linux/libcfs/libcfs.h"
22 #include "../../include/linux/lnet/lib-lnet.h"
23
24 /*
25 * This is really lnet_proc.c. You might need to update sanity test 215
26 * if any file format is changed.
27 */
28
29 #define LNET_LOFFT_BITS (sizeof(loff_t) * 8)
30 /*
31 * NB: max allowed LNET_CPT_BITS is 8 on 64-bit system and 2 on 32-bit system
32 */
33 #define LNET_PROC_CPT_BITS (LNET_CPT_BITS + 1)
34 /* change version, 16 bits or 8 bits */
35 #define LNET_PROC_VER_BITS max_t(size_t, min_t(size_t, LNET_LOFFT_BITS, 64) / 4, 8)
36
37 #define LNET_PROC_HASH_BITS LNET_PEER_HASH_BITS
38 /*
39 * bits for peer hash offset
40 * NB: we don't use the highest bit of *ppos because it's signed
41 */
42 #define LNET_PROC_HOFF_BITS (LNET_LOFFT_BITS - \
43 LNET_PROC_CPT_BITS - \
44 LNET_PROC_VER_BITS - \
45 LNET_PROC_HASH_BITS - 1)
46 /* bits for hash index + position */
47 #define LNET_PROC_HPOS_BITS (LNET_PROC_HASH_BITS + LNET_PROC_HOFF_BITS)
48 /* bits for peer hash table + hash version */
49 #define LNET_PROC_VPOS_BITS (LNET_PROC_HPOS_BITS + LNET_PROC_VER_BITS)
50
51 #define LNET_PROC_CPT_MASK ((1ULL << LNET_PROC_CPT_BITS) - 1)
52 #define LNET_PROC_VER_MASK ((1ULL << LNET_PROC_VER_BITS) - 1)
53 #define LNET_PROC_HASH_MASK ((1ULL << LNET_PROC_HASH_BITS) - 1)
54 #define LNET_PROC_HOFF_MASK ((1ULL << LNET_PROC_HOFF_BITS) - 1)
55
56 #define LNET_PROC_CPT_GET(pos) \
57 (int)(((pos) >> LNET_PROC_VPOS_BITS) & LNET_PROC_CPT_MASK)
58
59 #define LNET_PROC_VER_GET(pos) \
60 (int)(((pos) >> LNET_PROC_HPOS_BITS) & LNET_PROC_VER_MASK)
61
62 #define LNET_PROC_HASH_GET(pos) \
63 (int)(((pos) >> LNET_PROC_HOFF_BITS) & LNET_PROC_HASH_MASK)
64
65 #define LNET_PROC_HOFF_GET(pos) \
66 (int)((pos) & LNET_PROC_HOFF_MASK)
67
68 #define LNET_PROC_POS_MAKE(cpt, ver, hash, off) \
69 (((((loff_t)(cpt)) & LNET_PROC_CPT_MASK) << LNET_PROC_VPOS_BITS) | \
70 ((((loff_t)(ver)) & LNET_PROC_VER_MASK) << LNET_PROC_HPOS_BITS) | \
71 ((((loff_t)(hash)) & LNET_PROC_HASH_MASK) << LNET_PROC_HOFF_BITS) | \
72 ((off) & LNET_PROC_HOFF_MASK))
73
74 #define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK))
75
76 static int proc_call_handler(void *data, int write, loff_t *ppos,
77 void __user *buffer, size_t *lenp,
78 int (*handler)(void *data, int write,
79 loff_t pos, void __user *buffer,
80 int len))
81 {
82 int rc = handler(data, write, *ppos, buffer, *lenp);
83
84 if (rc < 0)
85 return rc;
86
87 if (write) {
88 *ppos += *lenp;
89 } else {
90 *lenp = rc;
91 *ppos += rc;
92 }
93 return 0;
94 }
95
96 static int __proc_lnet_stats(void *data, int write,
97 loff_t pos, void __user *buffer, int nob)
98 {
99 int rc;
100 lnet_counters_t *ctrs;
101 int len;
102 char *tmpstr;
103 const int tmpsiz = 256; /* 7 %u and 4 %llu */
104
105 if (write) {
106 lnet_counters_reset();
107 return 0;
108 }
109
110 /* read */
111
112 LIBCFS_ALLOC(ctrs, sizeof(*ctrs));
113 if (!ctrs)
114 return -ENOMEM;
115
116 LIBCFS_ALLOC(tmpstr, tmpsiz);
117 if (!tmpstr) {
118 LIBCFS_FREE(ctrs, sizeof(*ctrs));
119 return -ENOMEM;
120 }
121
122 lnet_counters_get(ctrs);
123
124 len = snprintf(tmpstr, tmpsiz,
125 "%u %u %u %u %u %u %u %llu %llu %llu %llu",
126 ctrs->msgs_alloc, ctrs->msgs_max,
127 ctrs->errors,
128 ctrs->send_count, ctrs->recv_count,
129 ctrs->route_count, ctrs->drop_count,
130 ctrs->send_length, ctrs->recv_length,
131 ctrs->route_length, ctrs->drop_length);
132
133 if (pos >= min_t(int, len, strlen(tmpstr)))
134 rc = 0;
135 else
136 rc = cfs_trace_copyout_string(buffer, nob,
137 tmpstr + pos, "\n");
138
139 LIBCFS_FREE(tmpstr, tmpsiz);
140 LIBCFS_FREE(ctrs, sizeof(*ctrs));
141 return rc;
142 }
143
144 static int proc_lnet_stats(struct ctl_table *table, int write,
145 void __user *buffer, size_t *lenp, loff_t *ppos)
146 {
147 return proc_call_handler(table->data, write, ppos, buffer, lenp,
148 __proc_lnet_stats);
149 }
150
151 static int proc_lnet_routes(struct ctl_table *table, int write,
152 void __user *buffer, size_t *lenp, loff_t *ppos)
153 {
154 const int tmpsiz = 256;
155 char *tmpstr;
156 char *s;
157 int rc = 0;
158 int len;
159 int ver;
160 int off;
161
162 CLASSERT(sizeof(loff_t) >= 4);
163
164 off = LNET_PROC_HOFF_GET(*ppos);
165 ver = LNET_PROC_VER_GET(*ppos);
166
167 LASSERT(!write);
168
169 if (!*lenp)
170 return 0;
171
172 LIBCFS_ALLOC(tmpstr, tmpsiz);
173 if (!tmpstr)
174 return -ENOMEM;
175
176 s = tmpstr; /* points to current position in tmpstr[] */
177
178 if (!*ppos) {
179 s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
180 the_lnet.ln_routing ? "enabled" : "disabled");
181 LASSERT(tmpstr + tmpsiz - s > 0);
182
183 s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %8s %7s %s\n",
184 "net", "hops", "priority", "state", "router");
185 LASSERT(tmpstr + tmpsiz - s > 0);
186
187 lnet_net_lock(0);
188 ver = (unsigned int)the_lnet.ln_remote_nets_version;
189 lnet_net_unlock(0);
190 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
191 } else {
192 struct list_head *n;
193 struct list_head *r;
194 lnet_route_t *route = NULL;
195 lnet_remotenet_t *rnet = NULL;
196 int skip = off - 1;
197 struct list_head *rn_list;
198 int i;
199
200 lnet_net_lock(0);
201
202 if (ver != LNET_PROC_VERSION(the_lnet.ln_remote_nets_version)) {
203 lnet_net_unlock(0);
204 LIBCFS_FREE(tmpstr, tmpsiz);
205 return -ESTALE;
206 }
207
208 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && !route; i++) {
209 rn_list = &the_lnet.ln_remote_nets_hash[i];
210
211 n = rn_list->next;
212
213 while (n != rn_list && !route) {
214 rnet = list_entry(n, lnet_remotenet_t,
215 lrn_list);
216
217 r = rnet->lrn_routes.next;
218
219 while (r != &rnet->lrn_routes) {
220 lnet_route_t *re =
221 list_entry(r, lnet_route_t,
222 lr_list);
223 if (!skip) {
224 route = re;
225 break;
226 }
227
228 skip--;
229 r = r->next;
230 }
231
232 n = n->next;
233 }
234 }
235
236 if (route) {
237 __u32 net = rnet->lrn_net;
238 __u32 hops = route->lr_hops;
239 unsigned int priority = route->lr_priority;
240 lnet_nid_t nid = route->lr_gateway->lp_nid;
241 int alive = lnet_is_route_alive(route);
242
243 s += snprintf(s, tmpstr + tmpsiz - s,
244 "%-8s %4u %8u %7s %s\n",
245 libcfs_net2str(net), hops,
246 priority,
247 alive ? "up" : "down",
248 libcfs_nid2str(nid));
249 LASSERT(tmpstr + tmpsiz - s > 0);
250 }
251
252 lnet_net_unlock(0);
253 }
254
255 len = s - tmpstr; /* how many bytes was written */
256
257 if (len > *lenp) { /* linux-supplied buffer is too small */
258 rc = -EINVAL;
259 } else if (len > 0) { /* wrote something */
260 if (copy_to_user(buffer, tmpstr, len)) {
261 rc = -EFAULT;
262 } else {
263 off += 1;
264 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
265 }
266 }
267
268 LIBCFS_FREE(tmpstr, tmpsiz);
269
270 if (!rc)
271 *lenp = len;
272
273 return rc;
274 }
275
276 static int proc_lnet_routers(struct ctl_table *table, int write,
277 void __user *buffer, size_t *lenp, loff_t *ppos)
278 {
279 int rc = 0;
280 char *tmpstr;
281 char *s;
282 const int tmpsiz = 256;
283 int len;
284 int ver;
285 int off;
286
287 off = LNET_PROC_HOFF_GET(*ppos);
288 ver = LNET_PROC_VER_GET(*ppos);
289
290 LASSERT(!write);
291
292 if (!*lenp)
293 return 0;
294
295 LIBCFS_ALLOC(tmpstr, tmpsiz);
296 if (!tmpstr)
297 return -ENOMEM;
298
299 s = tmpstr; /* points to current position in tmpstr[] */
300
301 if (!*ppos) {
302 s += snprintf(s, tmpstr + tmpsiz - s,
303 "%-4s %7s %9s %6s %12s %9s %8s %7s %s\n",
304 "ref", "rtr_ref", "alive_cnt", "state",
305 "last_ping", "ping_sent", "deadline",
306 "down_ni", "router");
307 LASSERT(tmpstr + tmpsiz - s > 0);
308
309 lnet_net_lock(0);
310 ver = (unsigned int)the_lnet.ln_routers_version;
311 lnet_net_unlock(0);
312 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
313 } else {
314 struct list_head *r;
315 struct lnet_peer *peer = NULL;
316 int skip = off - 1;
317
318 lnet_net_lock(0);
319
320 if (ver != LNET_PROC_VERSION(the_lnet.ln_routers_version)) {
321 lnet_net_unlock(0);
322
323 LIBCFS_FREE(tmpstr, tmpsiz);
324 return -ESTALE;
325 }
326
327 r = the_lnet.ln_routers.next;
328
329 while (r != &the_lnet.ln_routers) {
330 lnet_peer_t *lp = list_entry(r, lnet_peer_t,
331 lp_rtr_list);
332
333 if (!skip) {
334 peer = lp;
335 break;
336 }
337
338 skip--;
339 r = r->next;
340 }
341
342 if (peer) {
343 lnet_nid_t nid = peer->lp_nid;
344 unsigned long now = cfs_time_current();
345 unsigned long deadline = peer->lp_ping_deadline;
346 int nrefs = peer->lp_refcount;
347 int nrtrrefs = peer->lp_rtr_refcount;
348 int alive_cnt = peer->lp_alive_count;
349 int alive = peer->lp_alive;
350 int pingsent = !peer->lp_ping_notsent;
351 int last_ping = cfs_duration_sec(cfs_time_sub(now,
352 peer->lp_ping_timestamp));
353 int down_ni = 0;
354 lnet_route_t *rtr;
355
356 if ((peer->lp_ping_feats &
357 LNET_PING_FEAT_NI_STATUS)) {
358 list_for_each_entry(rtr, &peer->lp_routes,
359 lr_gwlist) {
360 /*
361 * downis on any route should be the
362 * number of downis on the gateway
363 */
364 if (rtr->lr_downis) {
365 down_ni = rtr->lr_downis;
366 break;
367 }
368 }
369 }
370
371 if (!deadline)
372 s += snprintf(s, tmpstr + tmpsiz - s,
373 "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n",
374 nrefs, nrtrrefs, alive_cnt,
375 alive ? "up" : "down", last_ping,
376 pingsent, "NA", down_ni,
377 libcfs_nid2str(nid));
378 else
379 s += snprintf(s, tmpstr + tmpsiz - s,
380 "%-4d %7d %9d %6s %12d %9d %8lu %7d %s\n",
381 nrefs, nrtrrefs, alive_cnt,
382 alive ? "up" : "down", last_ping,
383 pingsent,
384 cfs_duration_sec(cfs_time_sub(deadline, now)),
385 down_ni, libcfs_nid2str(nid));
386 LASSERT(tmpstr + tmpsiz - s > 0);
387 }
388
389 lnet_net_unlock(0);
390 }
391
392 len = s - tmpstr; /* how many bytes was written */
393
394 if (len > *lenp) { /* linux-supplied buffer is too small */
395 rc = -EINVAL;
396 } else if (len > 0) { /* wrote something */
397 if (copy_to_user(buffer, tmpstr, len)) {
398 rc = -EFAULT;
399 } else {
400 off += 1;
401 *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
402 }
403 }
404
405 LIBCFS_FREE(tmpstr, tmpsiz);
406
407 if (!rc)
408 *lenp = len;
409
410 return rc;
411 }
412
413 static int proc_lnet_peers(struct ctl_table *table, int write,
414 void __user *buffer, size_t *lenp, loff_t *ppos)
415 {
416 const int tmpsiz = 256;
417 struct lnet_peer_table *ptable;
418 char *tmpstr;
419 char *s;
420 int cpt = LNET_PROC_CPT_GET(*ppos);
421 int ver = LNET_PROC_VER_GET(*ppos);
422 int hash = LNET_PROC_HASH_GET(*ppos);
423 int hoff = LNET_PROC_HOFF_GET(*ppos);
424 int rc = 0;
425 int len;
426
427 CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
428 LASSERT(!write);
429
430 if (!*lenp)
431 return 0;
432
433 if (cpt >= LNET_CPT_NUMBER) {
434 *lenp = 0;
435 return 0;
436 }
437
438 LIBCFS_ALLOC(tmpstr, tmpsiz);
439 if (!tmpstr)
440 return -ENOMEM;
441
442 s = tmpstr; /* points to current position in tmpstr[] */
443
444 if (!*ppos) {
445 s += snprintf(s, tmpstr + tmpsiz - s,
446 "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
447 "nid", "refs", "state", "last", "max",
448 "rtr", "min", "tx", "min", "queue");
449 LASSERT(tmpstr + tmpsiz - s > 0);
450
451 hoff++;
452 } else {
453 struct lnet_peer *peer;
454 struct list_head *p;
455 int skip;
456 again:
457 p = NULL;
458 peer = NULL;
459 skip = hoff - 1;
460
461 lnet_net_lock(cpt);
462 ptable = the_lnet.ln_peer_tables[cpt];
463 if (hoff == 1)
464 ver = LNET_PROC_VERSION(ptable->pt_version);
465
466 if (ver != LNET_PROC_VERSION(ptable->pt_version)) {
467 lnet_net_unlock(cpt);
468 LIBCFS_FREE(tmpstr, tmpsiz);
469 return -ESTALE;
470 }
471
472 while (hash < LNET_PEER_HASH_SIZE) {
473 if (!p)
474 p = ptable->pt_hash[hash].next;
475
476 while (p != &ptable->pt_hash[hash]) {
477 lnet_peer_t *lp = list_entry(p, lnet_peer_t,
478 lp_hashlist);
479 if (!skip) {
480 peer = lp;
481
482 /*
483 * minor optimization: start from idx+1
484 * on next iteration if we've just
485 * drained lp_hashlist
486 */
487 if (lp->lp_hashlist.next ==
488 &ptable->pt_hash[hash]) {
489 hoff = 1;
490 hash++;
491 } else {
492 hoff++;
493 }
494
495 break;
496 }
497
498 skip--;
499 p = lp->lp_hashlist.next;
500 }
501
502 if (peer)
503 break;
504
505 p = NULL;
506 hoff = 1;
507 hash++;
508 }
509
510 if (peer) {
511 lnet_nid_t nid = peer->lp_nid;
512 int nrefs = peer->lp_refcount;
513 int lastalive = -1;
514 char *aliveness = "NA";
515 int maxcr = peer->lp_ni->ni_peertxcredits;
516 int txcr = peer->lp_txcredits;
517 int mintxcr = peer->lp_mintxcredits;
518 int rtrcr = peer->lp_rtrcredits;
519 int minrtrcr = peer->lp_minrtrcredits;
520 int txqnob = peer->lp_txqnob;
521
522 if (lnet_isrouter(peer) ||
523 lnet_peer_aliveness_enabled(peer))
524 aliveness = peer->lp_alive ? "up" : "down";
525
526 if (lnet_peer_aliveness_enabled(peer)) {
527 unsigned long now = cfs_time_current();
528 long delta;
529
530 delta = cfs_time_sub(now, peer->lp_last_alive);
531 lastalive = cfs_duration_sec(delta);
532
533 /* No need to mess up peers contents with
534 * arbitrarily long integers - it suffices to
535 * know that lastalive is more than 10000s old
536 */
537 if (lastalive >= 10000)
538 lastalive = 9999;
539 }
540
541 lnet_net_unlock(cpt);
542
543 s += snprintf(s, tmpstr + tmpsiz - s,
544 "%-24s %4d %5s %5d %5d %5d %5d %5d %5d %d\n",
545 libcfs_nid2str(nid), nrefs, aliveness,
546 lastalive, maxcr, rtrcr, minrtrcr, txcr,
547 mintxcr, txqnob);
548 LASSERT(tmpstr + tmpsiz - s > 0);
549
550 } else { /* peer is NULL */
551 lnet_net_unlock(cpt);
552 }
553
554 if (hash == LNET_PEER_HASH_SIZE) {
555 cpt++;
556 hash = 0;
557 hoff = 1;
558 if (!peer && cpt < LNET_CPT_NUMBER)
559 goto again;
560 }
561 }
562
563 len = s - tmpstr; /* how many bytes was written */
564
565 if (len > *lenp) { /* linux-supplied buffer is too small */
566 rc = -EINVAL;
567 } else if (len > 0) { /* wrote something */
568 if (copy_to_user(buffer, tmpstr, len))
569 rc = -EFAULT;
570 else
571 *ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
572 }
573
574 LIBCFS_FREE(tmpstr, tmpsiz);
575
576 if (!rc)
577 *lenp = len;
578
579 return rc;
580 }
581
582 static int __proc_lnet_buffers(void *data, int write,
583 loff_t pos, void __user *buffer, int nob)
584 {
585 char *s;
586 char *tmpstr;
587 int tmpsiz;
588 int idx;
589 int len;
590 int rc;
591 int i;
592
593 LASSERT(!write);
594
595 /* (4 %d) * 4 * LNET_CPT_NUMBER */
596 tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER;
597 LIBCFS_ALLOC(tmpstr, tmpsiz);
598 if (!tmpstr)
599 return -ENOMEM;
600
601 s = tmpstr; /* points to current position in tmpstr[] */
602
603 s += snprintf(s, tmpstr + tmpsiz - s,
604 "%5s %5s %7s %7s\n",
605 "pages", "count", "credits", "min");
606 LASSERT(tmpstr + tmpsiz - s > 0);
607
608 if (!the_lnet.ln_rtrpools)
609 goto out; /* I'm not a router */
610
611 for (idx = 0; idx < LNET_NRBPOOLS; idx++) {
612 lnet_rtrbufpool_t *rbp;
613
614 lnet_net_lock(LNET_LOCK_EX);
615 cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
616 s += snprintf(s, tmpstr + tmpsiz - s,
617 "%5d %5d %7d %7d\n",
618 rbp[idx].rbp_npages,
619 rbp[idx].rbp_nbuffers,
620 rbp[idx].rbp_credits,
621 rbp[idx].rbp_mincredits);
622 LASSERT(tmpstr + tmpsiz - s > 0);
623 }
624 lnet_net_unlock(LNET_LOCK_EX);
625 }
626
627 out:
628 len = s - tmpstr;
629
630 if (pos >= min_t(int, len, strlen(tmpstr)))
631 rc = 0;
632 else
633 rc = cfs_trace_copyout_string(buffer, nob,
634 tmpstr + pos, NULL);
635
636 LIBCFS_FREE(tmpstr, tmpsiz);
637 return rc;
638 }
639
640 static int proc_lnet_buffers(struct ctl_table *table, int write,
641 void __user *buffer, size_t *lenp, loff_t *ppos)
642 {
643 return proc_call_handler(table->data, write, ppos, buffer, lenp,
644 __proc_lnet_buffers);
645 }
646
647 static int proc_lnet_nis(struct ctl_table *table, int write,
648 void __user *buffer, size_t *lenp, loff_t *ppos)
649 {
650 int tmpsiz = 128 * LNET_CPT_NUMBER;
651 int rc = 0;
652 char *tmpstr;
653 char *s;
654 int len;
655
656 LASSERT(!write);
657
658 if (!*lenp)
659 return 0;
660
661 LIBCFS_ALLOC(tmpstr, tmpsiz);
662 if (!tmpstr)
663 return -ENOMEM;
664
665 s = tmpstr; /* points to current position in tmpstr[] */
666
667 if (!*ppos) {
668 s += snprintf(s, tmpstr + tmpsiz - s,
669 "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
670 "nid", "status", "alive", "refs", "peer",
671 "rtr", "max", "tx", "min");
672 LASSERT(tmpstr + tmpsiz - s > 0);
673 } else {
674 struct list_head *n;
675 lnet_ni_t *ni = NULL;
676 int skip = *ppos - 1;
677
678 lnet_net_lock(0);
679
680 n = the_lnet.ln_nis.next;
681
682 while (n != &the_lnet.ln_nis) {
683 lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
684
685 if (!skip) {
686 ni = a_ni;
687 break;
688 }
689
690 skip--;
691 n = n->next;
692 }
693
694 if (ni) {
695 struct lnet_tx_queue *tq;
696 char *stat;
697 time64_t now = ktime_get_real_seconds();
698 int last_alive = -1;
699 int i;
700 int j;
701
702 if (the_lnet.ln_routing)
703 last_alive = now - ni->ni_last_alive;
704
705 /* @lo forever alive */
706 if (ni->ni_lnd->lnd_type == LOLND)
707 last_alive = 0;
708
709 lnet_ni_lock(ni);
710 LASSERT(ni->ni_status);
711 stat = (ni->ni_status->ns_status ==
712 LNET_NI_STATUS_UP) ? "up" : "down";
713 lnet_ni_unlock(ni);
714
715 /*
716 * we actually output credits information for
717 * TX queue of each partition
718 */
719 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
720 for (j = 0; ni->ni_cpts &&
721 j < ni->ni_ncpts; j++) {
722 if (i == ni->ni_cpts[j])
723 break;
724 }
725
726 if (j == ni->ni_ncpts)
727 continue;
728
729 if (i)
730 lnet_net_lock(i);
731
732 s += snprintf(s, tmpstr + tmpsiz - s,
733 "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n",
734 libcfs_nid2str(ni->ni_nid), stat,
735 last_alive, *ni->ni_refs[i],
736 ni->ni_peertxcredits,
737 ni->ni_peerrtrcredits,
738 tq->tq_credits_max,
739 tq->tq_credits,
740 tq->tq_credits_min);
741 if (i)
742 lnet_net_unlock(i);
743 }
744 LASSERT(tmpstr + tmpsiz - s > 0);
745 }
746
747 lnet_net_unlock(0);
748 }
749
750 len = s - tmpstr; /* how many bytes was written */
751
752 if (len > *lenp) { /* linux-supplied buffer is too small */
753 rc = -EINVAL;
754 } else if (len > 0) { /* wrote something */
755 if (copy_to_user(buffer, tmpstr, len))
756 rc = -EFAULT;
757 else
758 *ppos += 1;
759 }
760
761 LIBCFS_FREE(tmpstr, tmpsiz);
762
763 if (!rc)
764 *lenp = len;
765
766 return rc;
767 }
768
769 struct lnet_portal_rotors {
770 int pr_value;
771 const char *pr_name;
772 const char *pr_desc;
773 };
774
775 static struct lnet_portal_rotors portal_rotors[] = {
776 {
777 .pr_value = LNET_PTL_ROTOR_OFF,
778 .pr_name = "OFF",
779 .pr_desc = "Turn off message rotor for wildcard portals"
780 },
781 {
782 .pr_value = LNET_PTL_ROTOR_ON,
783 .pr_name = "ON",
784 .pr_desc = "round-robin dispatch all PUT messages for wildcard portals"
785 },
786 {
787 .pr_value = LNET_PTL_ROTOR_RR_RT,
788 .pr_name = "RR_RT",
789 .pr_desc = "round-robin dispatch routed PUT message for wildcard portals"
790 },
791 {
792 .pr_value = LNET_PTL_ROTOR_HASH_RT,
793 .pr_name = "HASH_RT",
794 .pr_desc = "dispatch routed PUT message by hashing source NID for wildcard portals"
795 },
796 {
797 .pr_value = -1,
798 .pr_name = NULL,
799 .pr_desc = NULL
800 },
801 };
802
803 static int __proc_lnet_portal_rotor(void *data, int write,
804 loff_t pos, void __user *buffer, int nob)
805 {
806 const int buf_len = 128;
807 char *buf;
808 char *tmp;
809 int rc;
810 int i;
811
812 LIBCFS_ALLOC(buf, buf_len);
813 if (!buf)
814 return -ENOMEM;
815
816 if (!write) {
817 lnet_res_lock(0);
818
819 for (i = 0; portal_rotors[i].pr_value >= 0; i++) {
820 if (portal_rotors[i].pr_value == portal_rotor)
821 break;
822 }
823
824 LASSERT(portal_rotors[i].pr_value == portal_rotor);
825 lnet_res_unlock(0);
826
827 rc = snprintf(buf, buf_len,
828 "{\n\tportals: all\n"
829 "\trotor: %s\n\tdescription: %s\n}",
830 portal_rotors[i].pr_name,
831 portal_rotors[i].pr_desc);
832
833 if (pos >= min_t(int, rc, buf_len)) {
834 rc = 0;
835 } else {
836 rc = cfs_trace_copyout_string(buffer, nob,
837 buf + pos, "\n");
838 }
839 goto out;
840 }
841
842 rc = cfs_trace_copyin_string(buf, buf_len, buffer, nob);
843 if (rc < 0)
844 goto out;
845
846 tmp = cfs_trimwhite(buf);
847
848 rc = -EINVAL;
849 lnet_res_lock(0);
850 for (i = 0; portal_rotors[i].pr_name; i++) {
851 if (!strncasecmp(portal_rotors[i].pr_name, tmp,
852 strlen(portal_rotors[i].pr_name))) {
853 portal_rotor = portal_rotors[i].pr_value;
854 rc = 0;
855 break;
856 }
857 }
858 lnet_res_unlock(0);
859 out:
860 LIBCFS_FREE(buf, buf_len);
861 return rc;
862 }
863
864 static int proc_lnet_portal_rotor(struct ctl_table *table, int write,
865 void __user *buffer, size_t *lenp,
866 loff_t *ppos)
867 {
868 return proc_call_handler(table->data, write, ppos, buffer, lenp,
869 __proc_lnet_portal_rotor);
870 }
871
872 static struct ctl_table lnet_table[] = {
873 /*
874 * NB No .strategy entries have been provided since sysctl(8) prefers
875 * to go via /proc for portability.
876 */
877 {
878 .procname = "stats",
879 .mode = 0644,
880 .proc_handler = &proc_lnet_stats,
881 },
882 {
883 .procname = "routes",
884 .mode = 0444,
885 .proc_handler = &proc_lnet_routes,
886 },
887 {
888 .procname = "routers",
889 .mode = 0444,
890 .proc_handler = &proc_lnet_routers,
891 },
892 {
893 .procname = "peers",
894 .mode = 0444,
895 .proc_handler = &proc_lnet_peers,
896 },
897 {
898 .procname = "buffers",
899 .mode = 0444,
900 .proc_handler = &proc_lnet_buffers,
901 },
902 {
903 .procname = "nis",
904 .mode = 0444,
905 .proc_handler = &proc_lnet_nis,
906 },
907 {
908 .procname = "portal_rotor",
909 .mode = 0644,
910 .proc_handler = &proc_lnet_portal_rotor,
911 },
912 {
913 }
914 };
915
916 void lnet_router_debugfs_init(void)
917 {
918 lustre_insert_debugfs(lnet_table, NULL);
919 }
920
921 void lnet_router_debugfs_fini(void)
922 {
923 }
This page took 0.173 seconds and 5 git commands to generate.