staging/lustre/ptlrpc: Fix Multiple Assignments
[deliverable/linux.git] / drivers / staging / lustre / lustre / ptlrpc / lproc_ptlrpc.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36 #define DEBUG_SUBSYSTEM S_CLASS
37
38 #include "../include/obd_support.h"
39 #include "../include/obd.h"
40 #include "../include/lprocfs_status.h"
41 #include "../include/lustre/lustre_idl.h"
42 #include "../include/lustre_net.h"
43 #include "../include/obd_class.h"
44 #include "ptlrpc_internal.h"
45
46 static struct ll_rpc_opcode {
47 __u32 opcode;
48 const char *opname;
49 } ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
50 { OST_REPLY, "ost_reply" },
51 { OST_GETATTR, "ost_getattr" },
52 { OST_SETATTR, "ost_setattr" },
53 { OST_READ, "ost_read" },
54 { OST_WRITE, "ost_write" },
55 { OST_CREATE, "ost_create" },
56 { OST_DESTROY, "ost_destroy" },
57 { OST_GET_INFO, "ost_get_info" },
58 { OST_CONNECT, "ost_connect" },
59 { OST_DISCONNECT, "ost_disconnect" },
60 { OST_PUNCH, "ost_punch" },
61 { OST_OPEN, "ost_open" },
62 { OST_CLOSE, "ost_close" },
63 { OST_STATFS, "ost_statfs" },
64 { 14, NULL }, /* formerly OST_SAN_READ */
65 { 15, NULL }, /* formerly OST_SAN_WRITE */
66 { OST_SYNC, "ost_sync" },
67 { OST_SET_INFO, "ost_set_info" },
68 { OST_QUOTACHECK, "ost_quotacheck" },
69 { OST_QUOTACTL, "ost_quotactl" },
70 { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },
71 { MDS_GETATTR, "mds_getattr" },
72 { MDS_GETATTR_NAME, "mds_getattr_lock" },
73 { MDS_CLOSE, "mds_close" },
74 { MDS_REINT, "mds_reint" },
75 { MDS_READPAGE, "mds_readpage" },
76 { MDS_CONNECT, "mds_connect" },
77 { MDS_DISCONNECT, "mds_disconnect" },
78 { MDS_GETSTATUS, "mds_getstatus" },
79 { MDS_STATFS, "mds_statfs" },
80 { MDS_PIN, "mds_pin" },
81 { MDS_UNPIN, "mds_unpin" },
82 { MDS_SYNC, "mds_sync" },
83 { MDS_DONE_WRITING, "mds_done_writing" },
84 { MDS_SET_INFO, "mds_set_info" },
85 { MDS_QUOTACHECK, "mds_quotacheck" },
86 { MDS_QUOTACTL, "mds_quotactl" },
87 { MDS_GETXATTR, "mds_getxattr" },
88 { MDS_SETXATTR, "mds_setxattr" },
89 { MDS_WRITEPAGE, "mds_writepage" },
90 { MDS_IS_SUBDIR, "mds_is_subdir" },
91 { MDS_GET_INFO, "mds_get_info" },
92 { MDS_HSM_STATE_GET, "mds_hsm_state_get" },
93 { MDS_HSM_STATE_SET, "mds_hsm_state_set" },
94 { MDS_HSM_ACTION, "mds_hsm_action" },
95 { MDS_HSM_PROGRESS, "mds_hsm_progress" },
96 { MDS_HSM_REQUEST, "mds_hsm_request" },
97 { MDS_HSM_CT_REGISTER, "mds_hsm_ct_register" },
98 { MDS_HSM_CT_UNREGISTER, "mds_hsm_ct_unregister" },
99 { MDS_SWAP_LAYOUTS, "mds_swap_layouts" },
100 { LDLM_ENQUEUE, "ldlm_enqueue" },
101 { LDLM_CONVERT, "ldlm_convert" },
102 { LDLM_CANCEL, "ldlm_cancel" },
103 { LDLM_BL_CALLBACK, "ldlm_bl_callback" },
104 { LDLM_CP_CALLBACK, "ldlm_cp_callback" },
105 { LDLM_GL_CALLBACK, "ldlm_gl_callback" },
106 { LDLM_SET_INFO, "ldlm_set_info" },
107 { MGS_CONNECT, "mgs_connect" },
108 { MGS_DISCONNECT, "mgs_disconnect" },
109 { MGS_EXCEPTION, "mgs_exception" },
110 { MGS_TARGET_REG, "mgs_target_reg" },
111 { MGS_TARGET_DEL, "mgs_target_del" },
112 { MGS_SET_INFO, "mgs_set_info" },
113 { MGS_CONFIG_READ, "mgs_config_read" },
114 { OBD_PING, "obd_ping" },
115 { OBD_LOG_CANCEL, "llog_cancel" },
116 { OBD_QC_CALLBACK, "obd_quota_callback" },
117 { OBD_IDX_READ, "dt_index_read" },
118 { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_open" },
119 { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
120 { LLOG_ORIGIN_HANDLE_READ_HEADER, "llog_origin_handle_read_header" },
121 { LLOG_ORIGIN_HANDLE_WRITE_REC, "llog_origin_handle_write_rec" },
122 { LLOG_ORIGIN_HANDLE_CLOSE, "llog_origin_handle_close" },
123 { LLOG_ORIGIN_CONNECT, "llog_origin_connect" },
124 { LLOG_CATINFO, "llog_catinfo" },
125 { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" },
126 { LLOG_ORIGIN_HANDLE_DESTROY, "llog_origin_handle_destroy" },
127 { QUOTA_DQACQ, "quota_acquire" },
128 { QUOTA_DQREL, "quota_release" },
129 { SEQ_QUERY, "seq_query" },
130 { SEC_CTX_INIT, "sec_ctx_init" },
131 { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
132 { SEC_CTX_FINI, "sec_ctx_fini" },
133 { FLD_QUERY, "fld_query" },
134 { FLD_READ, "fld_read" },
135 };
136
137 static struct ll_eopcode {
138 __u32 opcode;
139 const char *opname;
140 } ll_eopcode_table[EXTRA_LAST_OPC] = {
141 { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
142 { LDLM_PLAIN_ENQUEUE, "ldlm_plain_enqueue" },
143 { LDLM_EXTENT_ENQUEUE, "ldlm_extent_enqueue" },
144 { LDLM_FLOCK_ENQUEUE, "ldlm_flock_enqueue" },
145 { LDLM_IBITS_ENQUEUE, "ldlm_ibits_enqueue" },
146 { MDS_REINT_SETATTR, "mds_reint_setattr" },
147 { MDS_REINT_CREATE, "mds_reint_create" },
148 { MDS_REINT_LINK, "mds_reint_link" },
149 { MDS_REINT_UNLINK, "mds_reint_unlink" },
150 { MDS_REINT_RENAME, "mds_reint_rename" },
151 { MDS_REINT_OPEN, "mds_reint_open" },
152 { MDS_REINT_SETXATTR, "mds_reint_setxattr" },
153 { BRW_READ_BYTES, "read_bytes" },
154 { BRW_WRITE_BYTES, "write_bytes" },
155 };
156
157 const char *ll_opcode2str(__u32 opcode)
158 {
159 /* When one of the assertions below fail, chances are that:
160 * 1) A new opcode was added in include/lustre/lustre_idl.h,
161 * but is missing from the table above.
162 * or 2) The opcode space was renumbered or rearranged,
163 * and the opcode_offset() function in
164 * ptlrpc_internal.h needs to be modified.
165 */
166 __u32 offset = opcode_offset(opcode);
167
168 LASSERTF(offset < LUSTRE_MAX_OPCODES,
169 "offset %u >= LUSTRE_MAX_OPCODES %u\n",
170 offset, LUSTRE_MAX_OPCODES);
171 LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
172 "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
173 offset, ll_rpc_opcode_table[offset].opcode, opcode);
174 return ll_rpc_opcode_table[offset].opname;
175 }
176
177 static const char *ll_eopcode2str(__u32 opcode)
178 {
179 LASSERT(ll_eopcode_table[opcode].opcode == opcode);
180 return ll_eopcode_table[opcode].opname;
181 }
182
183 static void
184 ptlrpc_ldebugfs_register(struct dentry *root, char *dir,
185 char *name,
186 struct dentry **debugfs_root_ret,
187 struct lprocfs_stats **stats_ret)
188 {
189 struct dentry *svc_debugfs_entry;
190 struct lprocfs_stats *svc_stats;
191 int i, rc;
192 unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
193 LPROCFS_CNTR_STDDEV;
194
195 LASSERT(!*debugfs_root_ret);
196 LASSERT(!*stats_ret);
197
198 svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,
199 0);
200 if (!svc_stats)
201 return;
202
203 if (dir) {
204 svc_debugfs_entry = ldebugfs_register(dir, root, NULL, NULL);
205 if (IS_ERR(svc_debugfs_entry)) {
206 lprocfs_free_stats(&svc_stats);
207 return;
208 }
209 } else {
210 svc_debugfs_entry = root;
211 }
212
213 lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,
214 svc_counter_config, "req_waittime", "usec");
215 lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,
216 svc_counter_config, "req_qdepth", "reqs");
217 lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,
218 svc_counter_config, "req_active", "reqs");
219 lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT,
220 svc_counter_config, "req_timeout", "sec");
221 lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
222 svc_counter_config, "reqbuf_avail", "bufs");
223 for (i = 0; i < EXTRA_LAST_OPC; i++) {
224 char *units;
225
226 switch (i) {
227 case BRW_WRITE_BYTES:
228 case BRW_READ_BYTES:
229 units = "bytes";
230 break;
231 default:
232 units = "reqs";
233 break;
234 }
235 lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
236 svc_counter_config,
237 ll_eopcode2str(i), units);
238 }
239 for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
240 __u32 opcode = ll_rpc_opcode_table[i].opcode;
241
242 lprocfs_counter_init(svc_stats,
243 EXTRA_MAX_OPCODES + i, svc_counter_config,
244 ll_opcode2str(opcode), "usec");
245 }
246
247 rc = ldebugfs_register_stats(svc_debugfs_entry, name, svc_stats);
248 if (rc < 0) {
249 if (dir)
250 ldebugfs_remove(&svc_debugfs_entry);
251 lprocfs_free_stats(&svc_stats);
252 } else {
253 if (dir)
254 *debugfs_root_ret = svc_debugfs_entry;
255 *stats_ret = svc_stats;
256 }
257 }
258
259 static int
260 ptlrpc_lprocfs_req_history_len_seq_show(struct seq_file *m, void *v)
261 {
262 struct ptlrpc_service *svc = m->private;
263 struct ptlrpc_service_part *svcpt;
264 int total = 0;
265 int i;
266
267 ptlrpc_service_for_each_part(svcpt, i, svc)
268 total += svcpt->scp_hist_nrqbds;
269
270 seq_printf(m, "%d\n", total);
271 return 0;
272 }
273
274 LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_req_history_len);
275
276 static int
277 ptlrpc_lprocfs_req_history_max_seq_show(struct seq_file *m, void *n)
278 {
279 struct ptlrpc_service *svc = m->private;
280 struct ptlrpc_service_part *svcpt;
281 int total = 0;
282 int i;
283
284 ptlrpc_service_for_each_part(svcpt, i, svc)
285 total += svc->srv_hist_nrqbds_cpt_max;
286
287 seq_printf(m, "%d\n", total);
288 return 0;
289 }
290
291 static ssize_t
292 ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
293 const char __user *buffer,
294 size_t count, loff_t *off)
295 {
296 struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
297 int bufpages;
298 int val;
299 int rc;
300
301 rc = lprocfs_write_helper(buffer, count, &val);
302 if (rc < 0)
303 return rc;
304
305 if (val < 0)
306 return -ERANGE;
307
308 /* This sanity check is more of an insanity check; we can still
309 * hose a kernel by allowing the request history to grow too
310 * far.
311 */
312 bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
313 if (val > totalram_pages / (2 * bufpages))
314 return -ERANGE;
315
316 spin_lock(&svc->srv_lock);
317
318 if (val == 0)
319 svc->srv_hist_nrqbds_cpt_max = 0;
320 else
321 svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts));
322
323 spin_unlock(&svc->srv_lock);
324
325 return count;
326 }
327
328 LPROC_SEQ_FOPS(ptlrpc_lprocfs_req_history_max);
329
330 static ssize_t threads_min_show(struct kobject *kobj, struct attribute *attr,
331 char *buf)
332 {
333 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
334 srv_kobj);
335
336 return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_init * svc->srv_ncpts);
337 }
338
339 static ssize_t threads_min_store(struct kobject *kobj, struct attribute *attr,
340 const char *buffer, size_t count)
341 {
342 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
343 srv_kobj);
344 unsigned long val;
345 int rc = kstrtoul(buffer, 10, &val);
346
347 if (rc < 0)
348 return rc;
349
350 if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
351 return -ERANGE;
352
353 spin_lock(&svc->srv_lock);
354 if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) {
355 spin_unlock(&svc->srv_lock);
356 return -ERANGE;
357 }
358
359 svc->srv_nthrs_cpt_init = val / svc->srv_ncpts;
360
361 spin_unlock(&svc->srv_lock);
362
363 return count;
364 }
365 LUSTRE_RW_ATTR(threads_min);
366
367 static ssize_t threads_started_show(struct kobject *kobj,
368 struct attribute *attr,
369 char *buf)
370 {
371 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
372 srv_kobj);
373 struct ptlrpc_service_part *svcpt;
374 int total = 0;
375 int i;
376
377 ptlrpc_service_for_each_part(svcpt, i, svc)
378 total += svcpt->scp_nthrs_running;
379
380 return sprintf(buf, "%d\n", total);
381 }
382 LUSTRE_RO_ATTR(threads_started);
383
384 static ssize_t threads_max_show(struct kobject *kobj, struct attribute *attr,
385 char *buf)
386 {
387 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
388 srv_kobj);
389
390 return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_limit * svc->srv_ncpts);
391 }
392
393 static ssize_t threads_max_store(struct kobject *kobj, struct attribute *attr,
394 const char *buffer, size_t count)
395 {
396 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
397 srv_kobj);
398 unsigned long val;
399 int rc = kstrtoul(buffer, 10, &val);
400
401 if (rc < 0)
402 return rc;
403
404 if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
405 return -ERANGE;
406
407 spin_lock(&svc->srv_lock);
408 if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) {
409 spin_unlock(&svc->srv_lock);
410 return -ERANGE;
411 }
412
413 svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts;
414
415 spin_unlock(&svc->srv_lock);
416
417 return count;
418 }
419 LUSTRE_RW_ATTR(threads_max);
420
421 /**
422 * \addtogoup nrs
423 * @{
424 */
425
426 /**
427 * Translates \e ptlrpc_nrs_pol_state values to human-readable strings.
428 *
429 * \param[in] state The policy state
430 */
431 static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state)
432 {
433 switch (state) {
434 default:
435 LBUG();
436 case NRS_POL_STATE_INVALID:
437 return "invalid";
438 case NRS_POL_STATE_STOPPED:
439 return "stopped";
440 case NRS_POL_STATE_STOPPING:
441 return "stopping";
442 case NRS_POL_STATE_STARTING:
443 return "starting";
444 case NRS_POL_STATE_STARTED:
445 return "started";
446 }
447 }
448
449 /**
450 * Obtains status information for \a policy.
451 *
452 * Information is copied in \a info.
453 *
454 * \param[in] policy The policy
455 * \param[out] info Holds returned status information
456 */
457 static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
458 struct ptlrpc_nrs_pol_info *info)
459 {
460 assert_spin_locked(&policy->pol_nrs->nrs_lock);
461
462 memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
463
464 info->pi_fallback = !!(policy->pol_flags & PTLRPC_NRS_FL_FALLBACK);
465 info->pi_state = policy->pol_state;
466 /**
467 * XXX: These are accessed without holding
468 * ptlrpc_service_part::scp_req_lock.
469 */
470 info->pi_req_queued = policy->pol_req_queued;
471 info->pi_req_started = policy->pol_req_started;
472 }
473
474 /**
475 * Reads and prints policy status information for all policies of a PTLRPC
476 * service.
477 */
478 static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
479 {
480 struct ptlrpc_service *svc = m->private;
481 struct ptlrpc_service_part *svcpt;
482 struct ptlrpc_nrs *nrs;
483 struct ptlrpc_nrs_policy *policy;
484 struct ptlrpc_nrs_pol_info *infos;
485 struct ptlrpc_nrs_pol_info tmp;
486 unsigned num_pols;
487 unsigned pol_idx = 0;
488 bool hp = false;
489 int i;
490 int rc = 0;
491
492 /**
493 * Serialize NRS core lprocfs operations with policy registration/
494 * unregistration.
495 */
496 mutex_lock(&nrs_core.nrs_mutex);
497
498 /**
499 * Use the first service partition's regular NRS head in order to obtain
500 * the number of policies registered with NRS heads of this service. All
501 * service partitions will have the same number of policies.
502 */
503 nrs = nrs_svcpt2nrs(svc->srv_parts[0], false);
504
505 spin_lock(&nrs->nrs_lock);
506 num_pols = svc->srv_parts[0]->scp_nrs_reg.nrs_num_pols;
507 spin_unlock(&nrs->nrs_lock);
508
509 infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS);
510 if (!infos) {
511 rc = -ENOMEM;
512 goto unlock;
513 }
514 again:
515
516 ptlrpc_service_for_each_part(svcpt, i, svc) {
517 nrs = nrs_svcpt2nrs(svcpt, hp);
518 spin_lock(&nrs->nrs_lock);
519
520 pol_idx = 0;
521
522 list_for_each_entry(policy, &nrs->nrs_policy_list, pol_list) {
523 LASSERT(pol_idx < num_pols);
524
525 nrs_policy_get_info_locked(policy, &tmp);
526 /**
527 * Copy values when handling the first service
528 * partition.
529 */
530 if (i == 0) {
531 memcpy(infos[pol_idx].pi_name, tmp.pi_name,
532 NRS_POL_NAME_MAX);
533 memcpy(&infos[pol_idx].pi_state, &tmp.pi_state,
534 sizeof(tmp.pi_state));
535 infos[pol_idx].pi_fallback = tmp.pi_fallback;
536 /**
537 * For the rest of the service partitions
538 * sanity-check the values we get.
539 */
540 } else {
541 LASSERT(strncmp(infos[pol_idx].pi_name,
542 tmp.pi_name,
543 NRS_POL_NAME_MAX) == 0);
544 /**
545 * Not asserting ptlrpc_nrs_pol_info::pi_state,
546 * because it may be different between
547 * instances of the same policy in different
548 * service partitions.
549 */
550 LASSERT(infos[pol_idx].pi_fallback ==
551 tmp.pi_fallback);
552 }
553
554 infos[pol_idx].pi_req_queued += tmp.pi_req_queued;
555 infos[pol_idx].pi_req_started += tmp.pi_req_started;
556
557 pol_idx++;
558 }
559 spin_unlock(&nrs->nrs_lock);
560 }
561
562 /**
563 * Policy status information output is in YAML format.
564 * For example:
565 *
566 * regular_requests:
567 * - name: fifo
568 * state: started
569 * fallback: yes
570 * queued: 0
571 * active: 0
572 *
573 * - name: crrn
574 * state: started
575 * fallback: no
576 * queued: 2015
577 * active: 384
578 *
579 * high_priority_requests:
580 * - name: fifo
581 * state: started
582 * fallback: yes
583 * queued: 0
584 * active: 2
585 *
586 * - name: crrn
587 * state: stopped
588 * fallback: no
589 * queued: 0
590 * active: 0
591 */
592 seq_printf(m, "%s\n",
593 !hp ? "\nregular_requests:" : "high_priority_requests:");
594
595 for (pol_idx = 0; pol_idx < num_pols; pol_idx++) {
596 seq_printf(m, " - name: %s\n"
597 " state: %s\n"
598 " fallback: %s\n"
599 " queued: %-20d\n"
600 " active: %-20d\n\n",
601 infos[pol_idx].pi_name,
602 nrs_state2str(infos[pol_idx].pi_state),
603 infos[pol_idx].pi_fallback ? "yes" : "no",
604 (int)infos[pol_idx].pi_req_queued,
605 (int)infos[pol_idx].pi_req_started);
606 }
607
608 if (!hp && nrs_svc_has_hp(svc)) {
609 memset(infos, 0, num_pols * sizeof(*infos));
610
611 /**
612 * Redo the processing for the service's HP NRS heads' policies.
613 */
614 hp = true;
615 goto again;
616 }
617
618 kfree(infos);
619 unlock:
620 mutex_unlock(&nrs_core.nrs_mutex);
621
622 return rc;
623 }
624
625 /**
626 * The longest valid command string is the maximum policy name size, plus the
627 * length of the " reg" substring
628 */
629 #define LPROCFS_NRS_WR_MAX_CMD (NRS_POL_NAME_MAX + sizeof(" reg") - 1)
630
631 /**
632 * Starts and stops a given policy on a PTLRPC service.
633 *
634 * Commands consist of the policy name, followed by an optional [reg|hp] token;
635 * if the optional token is omitted, the operation is performed on both the
636 * regular and high-priority (if the service has one) NRS head.
637 */
638 static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
639 const char __user *buffer,
640 size_t count, loff_t *off)
641 {
642 struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
643 enum ptlrpc_nrs_queue_type queue = PTLRPC_NRS_QUEUE_BOTH;
644 char *cmd;
645 char *cmd_copy = NULL;
646 char *token;
647 int rc = 0;
648
649 if (count >= LPROCFS_NRS_WR_MAX_CMD)
650 return -EINVAL;
651
652 cmd = kzalloc(LPROCFS_NRS_WR_MAX_CMD, GFP_NOFS);
653 if (!cmd)
654 return -ENOMEM;
655 /**
656 * strsep() modifies its argument, so keep a copy
657 */
658 cmd_copy = cmd;
659
660 if (copy_from_user(cmd, buffer, count)) {
661 rc = -EFAULT;
662 goto out;
663 }
664
665 cmd[count] = '\0';
666
667 token = strsep(&cmd, " ");
668
669 if (strlen(token) > NRS_POL_NAME_MAX - 1) {
670 rc = -EINVAL;
671 goto out;
672 }
673
674 /**
675 * No [reg|hp] token has been specified
676 */
677 if (!cmd)
678 goto default_queue;
679
680 /**
681 * The second token is either NULL, or an optional [reg|hp] string
682 */
683 if (strcmp(cmd, "reg") == 0) {
684 queue = PTLRPC_NRS_QUEUE_REG;
685 } else if (strcmp(cmd, "hp") == 0) {
686 queue = PTLRPC_NRS_QUEUE_HP;
687 } else {
688 rc = -EINVAL;
689 goto out;
690 }
691
692 default_queue:
693
694 if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
695 rc = -ENODEV;
696 goto out;
697 } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) {
698 queue = PTLRPC_NRS_QUEUE_REG;
699 }
700
701 /**
702 * Serialize NRS core lprocfs operations with policy registration/
703 * unregistration.
704 */
705 mutex_lock(&nrs_core.nrs_mutex);
706
707 rc = ptlrpc_nrs_policy_control(svc, queue, token, PTLRPC_NRS_CTL_START,
708 false, NULL);
709
710 mutex_unlock(&nrs_core.nrs_mutex);
711 out:
712 kfree(cmd_copy);
713
714 return rc < 0 ? rc : count;
715 }
716
717 LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs);
718
719 /** @} nrs */
720
721 struct ptlrpc_srh_iterator {
722 int srhi_idx;
723 __u64 srhi_seq;
724 struct ptlrpc_request *srhi_req;
725 };
726
727 static int
728 ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
729 struct ptlrpc_srh_iterator *srhi,
730 __u64 seq)
731 {
732 struct list_head *e;
733 struct ptlrpc_request *req;
734
735 if (srhi->srhi_req && srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
736 srhi->srhi_seq <= seq) {
737 /* If srhi_req was set previously, hasn't been culled and
738 * we're searching for a seq on or after it (i.e. more
739 * recent), search from it onwards.
740 * Since the service history is LRU (i.e. culled reqs will
741 * be near the head), we shouldn't have to do long
742 * re-scans
743 */
744 LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
745 "%s:%d: seek seq %llu, request seq %llu\n",
746 svcpt->scp_service->srv_name, svcpt->scp_cpt,
747 srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
748 LASSERTF(!list_empty(&svcpt->scp_hist_reqs),
749 "%s:%d: seek offset %llu, request seq %llu, last culled %llu\n",
750 svcpt->scp_service->srv_name, svcpt->scp_cpt,
751 seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled);
752 e = &srhi->srhi_req->rq_history_list;
753 } else {
754 /* search from start */
755 e = svcpt->scp_hist_reqs.next;
756 }
757
758 while (e != &svcpt->scp_hist_reqs) {
759 req = list_entry(e, struct ptlrpc_request, rq_history_list);
760
761 if (req->rq_history_seq >= seq) {
762 srhi->srhi_seq = req->rq_history_seq;
763 srhi->srhi_req = req;
764 return 0;
765 }
766 e = e->next;
767 }
768
769 return -ENOENT;
770 }
771
772 /*
773 * ptlrpc history sequence is used as "position" of seq_file, in some case,
774 * seq_read() will increase "position" to indicate reading the next
775 * element, however, low bits of history sequence are reserved for CPT id
776 * (check the details from comments before ptlrpc_req_add_history), which
777 * means seq_read() might change CPT id of history sequence and never
778 * finish reading of requests on a CPT. To make it work, we have to shift
779 * CPT id to high bits and timestamp to low bits, so seq_read() will only
780 * increase timestamp which can correctly indicate the next position.
781 */
782
783 /* convert seq_file pos to cpt */
784 #define PTLRPC_REQ_POS2CPT(svc, pos) \
785 ((svc)->srv_cpt_bits == 0 ? 0 : \
786 (__u64)(pos) >> (64 - (svc)->srv_cpt_bits))
787
788 /* make up seq_file pos from cpt */
789 #define PTLRPC_REQ_CPT2POS(svc, cpt) \
790 ((svc)->srv_cpt_bits == 0 ? 0 : \
791 (cpt) << (64 - (svc)->srv_cpt_bits))
792
793 /* convert sequence to position */
794 #define PTLRPC_REQ_SEQ2POS(svc, seq) \
795 ((svc)->srv_cpt_bits == 0 ? (seq) : \
796 ((seq) >> (svc)->srv_cpt_bits) | \
797 ((seq) << (64 - (svc)->srv_cpt_bits)))
798
799 /* convert position to sequence */
800 #define PTLRPC_REQ_POS2SEQ(svc, pos) \
801 ((svc)->srv_cpt_bits == 0 ? (pos) : \
802 ((__u64)(pos) << (svc)->srv_cpt_bits) | \
803 ((__u64)(pos) >> (64 - (svc)->srv_cpt_bits)))
804
805 static void *
806 ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
807 {
808 struct ptlrpc_service *svc = s->private;
809 struct ptlrpc_service_part *svcpt;
810 struct ptlrpc_srh_iterator *srhi;
811 unsigned int cpt;
812 int rc;
813 int i;
814
815 if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */
816 CWARN("Failed to read request history because size of loff_t %d can't match size of u64\n",
817 (int)sizeof(loff_t));
818 return NULL;
819 }
820
821 srhi = kzalloc(sizeof(*srhi), GFP_NOFS);
822 if (!srhi)
823 return NULL;
824
825 srhi->srhi_seq = 0;
826 srhi->srhi_req = NULL;
827
828 cpt = PTLRPC_REQ_POS2CPT(svc, *pos);
829
830 ptlrpc_service_for_each_part(svcpt, i, svc) {
831 if (i < cpt) /* skip */
832 continue;
833 if (i > cpt) /* make up the lowest position for this CPT */
834 *pos = PTLRPC_REQ_CPT2POS(svc, i);
835
836 spin_lock(&svcpt->scp_lock);
837 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi,
838 PTLRPC_REQ_POS2SEQ(svc, *pos));
839 spin_unlock(&svcpt->scp_lock);
840 if (rc == 0) {
841 *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
842 srhi->srhi_idx = i;
843 return srhi;
844 }
845 }
846
847 kfree(srhi);
848 return NULL;
849 }
850
851 static void
852 ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
853 {
854 struct ptlrpc_srh_iterator *srhi = iter;
855
856 kfree(srhi);
857 }
858
859 static void *
860 ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
861 void *iter, loff_t *pos)
862 {
863 struct ptlrpc_service *svc = s->private;
864 struct ptlrpc_srh_iterator *srhi = iter;
865 struct ptlrpc_service_part *svcpt;
866 __u64 seq;
867 int rc;
868 int i;
869
870 for (i = srhi->srhi_idx; i < svc->srv_ncpts; i++) {
871 svcpt = svc->srv_parts[i];
872
873 if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
874 srhi->srhi_req = NULL;
875 seq = 0;
876 srhi->srhi_seq = 0;
877 } else { /* the next sequence */
878 seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
879 }
880
881 spin_lock(&svcpt->scp_lock);
882 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, seq);
883 spin_unlock(&svcpt->scp_lock);
884 if (rc == 0) {
885 *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
886 srhi->srhi_idx = i;
887 return srhi;
888 }
889 }
890
891 kfree(srhi);
892 return NULL;
893 }
894
895 static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
896 {
897 struct ptlrpc_service *svc = s->private;
898 struct ptlrpc_srh_iterator *srhi = iter;
899 struct ptlrpc_service_part *svcpt;
900 struct ptlrpc_request *req;
901 int rc;
902
903 LASSERT(srhi->srhi_idx < svc->srv_ncpts);
904
905 svcpt = svc->srv_parts[srhi->srhi_idx];
906
907 spin_lock(&svcpt->scp_lock);
908
909 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
910
911 if (rc == 0) {
912 char nidstr[LNET_NIDSTR_SIZE];
913
914 req = srhi->srhi_req;
915
916 libcfs_nid2str_r(req->rq_self, nidstr, sizeof(nidstr));
917 /* Print common req fields.
918 * CAVEAT EMPTOR: we're racing with the service handler
919 * here. The request could contain any old crap, so you
920 * must be just as careful as the service's request
921 * parser. Currently I only print stuff here I know is OK
922 * to look at coz it was set up in request_in_callback()!!!
923 */
924 seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ",
925 req->rq_history_seq, nidstr,
926 libcfs_id2str(req->rq_peer), req->rq_xid,
927 req->rq_reqlen, ptlrpc_rqphase2str(req),
928 (s64)req->rq_arrival_time.tv_sec,
929 (long)(req->rq_sent - req->rq_arrival_time.tv_sec),
930 (long)(req->rq_sent - req->rq_deadline));
931 if (!svc->srv_ops.so_req_printer)
932 seq_putc(s, '\n');
933 else
934 svc->srv_ops.so_req_printer(s, srhi->srhi_req);
935 }
936
937 spin_unlock(&svcpt->scp_lock);
938 return rc;
939 }
940
941 static int
942 ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
943 {
944 static struct seq_operations sops = {
945 .start = ptlrpc_lprocfs_svc_req_history_start,
946 .stop = ptlrpc_lprocfs_svc_req_history_stop,
947 .next = ptlrpc_lprocfs_svc_req_history_next,
948 .show = ptlrpc_lprocfs_svc_req_history_show,
949 };
950 struct seq_file *seqf;
951 int rc;
952
953 rc = seq_open(file, &sops);
954 if (rc)
955 return rc;
956
957 seqf = file->private_data;
958 seqf->private = inode->i_private;
959 return 0;
960 }
961
962 /* See also lprocfs_rd_timeouts */
963 static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n)
964 {
965 struct ptlrpc_service *svc = m->private;
966 struct ptlrpc_service_part *svcpt;
967 struct dhms ts;
968 time64_t worstt;
969 unsigned int cur;
970 unsigned int worst;
971 int i;
972
973 if (AT_OFF) {
974 seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n",
975 obd_timeout);
976 return 0;
977 }
978
979 ptlrpc_service_for_each_part(svcpt, i, svc) {
980 cur = at_get(&svcpt->scp_at_estimate);
981 worst = svcpt->scp_at_estimate.at_worst_ever;
982 worstt = svcpt->scp_at_estimate.at_worst_time;
983 s2dhms(&ts, ktime_get_real_seconds() - worstt);
984
985 seq_printf(m, "%10s : cur %3u worst %3u (at %lld, "
986 DHMS_FMT " ago) ", "service",
987 cur, worst, (s64)worstt, DHMS_VARS(&ts));
988
989 lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate);
990 }
991
992 return 0;
993 }
994
995 LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_timeouts);
996
997 static ssize_t high_priority_ratio_show(struct kobject *kobj,
998 struct attribute *attr,
999 char *buf)
1000 {
1001 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
1002 srv_kobj);
1003 return sprintf(buf, "%d\n", svc->srv_hpreq_ratio);
1004 }
1005
1006 static ssize_t high_priority_ratio_store(struct kobject *kobj,
1007 struct attribute *attr,
1008 const char *buffer,
1009 size_t count)
1010 {
1011 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
1012 srv_kobj);
1013 int rc;
1014 int val;
1015
1016 rc = kstrtoint(buffer, 10, &val);
1017 if (rc < 0)
1018 return rc;
1019
1020 if (val < 0)
1021 return -ERANGE;
1022
1023 spin_lock(&svc->srv_lock);
1024 svc->srv_hpreq_ratio = val;
1025 spin_unlock(&svc->srv_lock);
1026
1027 return count;
1028 }
1029 LUSTRE_RW_ATTR(high_priority_ratio);
1030
1031 static struct attribute *ptlrpc_svc_attrs[] = {
1032 &lustre_attr_threads_min.attr,
1033 &lustre_attr_threads_started.attr,
1034 &lustre_attr_threads_max.attr,
1035 &lustre_attr_high_priority_ratio.attr,
1036 NULL,
1037 };
1038
1039 static void ptlrpc_sysfs_svc_release(struct kobject *kobj)
1040 {
1041 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
1042 srv_kobj);
1043
1044 complete(&svc->srv_kobj_unregister);
1045 }
1046
1047 static struct kobj_type ptlrpc_svc_ktype = {
1048 .default_attrs = ptlrpc_svc_attrs,
1049 .sysfs_ops = &lustre_sysfs_ops,
1050 .release = ptlrpc_sysfs_svc_release,
1051 };
1052
1053 void ptlrpc_sysfs_unregister_service(struct ptlrpc_service *svc)
1054 {
1055 /* Let's see if we had a chance at initialization first */
1056 if (svc->srv_kobj.kset) {
1057 kobject_put(&svc->srv_kobj);
1058 wait_for_completion(&svc->srv_kobj_unregister);
1059 }
1060 }
1061
1062 int ptlrpc_sysfs_register_service(struct kset *parent,
1063 struct ptlrpc_service *svc)
1064 {
1065 int rc;
1066
1067 svc->srv_kobj.kset = parent;
1068 init_completion(&svc->srv_kobj_unregister);
1069 rc = kobject_init_and_add(&svc->srv_kobj, &ptlrpc_svc_ktype, NULL,
1070 "%s", svc->srv_name);
1071
1072 return rc;
1073 }
1074
1075 void ptlrpc_ldebugfs_register_service(struct dentry *entry,
1076 struct ptlrpc_service *svc)
1077 {
1078 struct lprocfs_vars lproc_vars[] = {
1079 {.name = "req_buffer_history_len",
1080 .fops = &ptlrpc_lprocfs_req_history_len_fops,
1081 .data = svc},
1082 {.name = "req_buffer_history_max",
1083 .fops = &ptlrpc_lprocfs_req_history_max_fops,
1084 .data = svc},
1085 {.name = "timeouts",
1086 .fops = &ptlrpc_lprocfs_timeouts_fops,
1087 .data = svc},
1088 {.name = "nrs_policies",
1089 .fops = &ptlrpc_lprocfs_nrs_fops,
1090 .data = svc},
1091 {NULL}
1092 };
1093 static const struct file_operations req_history_fops = {
1094 .owner = THIS_MODULE,
1095 .open = ptlrpc_lprocfs_svc_req_history_open,
1096 .read = seq_read,
1097 .llseek = seq_lseek,
1098 .release = lprocfs_seq_release,
1099 };
1100
1101 int rc;
1102
1103 ptlrpc_ldebugfs_register(entry, svc->srv_name,
1104 "stats", &svc->srv_debugfs_entry,
1105 &svc->srv_stats);
1106
1107 if (IS_ERR_OR_NULL(svc->srv_debugfs_entry))
1108 return;
1109
1110 ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL);
1111
1112 rc = ldebugfs_seq_create(svc->srv_debugfs_entry, "req_history",
1113 0400, &req_history_fops, svc);
1114 if (rc)
1115 CWARN("Error adding the req_history file\n");
1116 }
1117
1118 void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
1119 {
1120 ptlrpc_ldebugfs_register(obddev->obd_debugfs_entry, NULL, "stats",
1121 &obddev->obd_svc_debugfs_entry,
1122 &obddev->obd_svc_stats);
1123 }
1124 EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
1125
1126 void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount)
1127 {
1128 struct lprocfs_stats *svc_stats;
1129 __u32 op = lustre_msg_get_opc(req->rq_reqmsg);
1130 int opc = opcode_offset(op);
1131
1132 svc_stats = req->rq_import->imp_obd->obd_svc_stats;
1133 if (!svc_stats || opc <= 0)
1134 return;
1135 LASSERT(opc < LUSTRE_MAX_OPCODES);
1136 if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
1137 lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, amount);
1138 }
1139
1140 void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
1141 {
1142 struct lprocfs_stats *svc_stats;
1143 int idx;
1144
1145 if (!req->rq_import)
1146 return;
1147 svc_stats = req->rq_import->imp_obd->obd_svc_stats;
1148 if (!svc_stats)
1149 return;
1150 idx = lustre_msg_get_opc(req->rq_reqmsg);
1151 switch (idx) {
1152 case OST_READ:
1153 idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR;
1154 break;
1155 case OST_WRITE:
1156 idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR;
1157 break;
1158 default:
1159 LASSERTF(0, "unsupported opcode %u\n", idx);
1160 break;
1161 }
1162
1163 lprocfs_counter_add(svc_stats, idx, bytes);
1164 }
1165
1166 EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
1167
1168 void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
1169 {
1170 if (!IS_ERR_OR_NULL(svc->srv_debugfs_entry))
1171 ldebugfs_remove(&svc->srv_debugfs_entry);
1172
1173 if (svc->srv_stats)
1174 lprocfs_free_stats(&svc->srv_stats);
1175 }
1176
1177 void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
1178 {
1179 if (!IS_ERR_OR_NULL(obd->obd_svc_debugfs_entry))
1180 ldebugfs_remove(&obd->obd_svc_debugfs_entry);
1181
1182 if (obd->obd_svc_stats)
1183 lprocfs_free_stats(&obd->obd_svc_stats);
1184 }
1185 EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
1186
1187 #undef BUFLEN
1188
1189 int lprocfs_wr_ping(struct file *file, const char __user *buffer,
1190 size_t count, loff_t *off)
1191 {
1192 struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
1193 struct ptlrpc_request *req;
1194 int rc;
1195
1196 rc = lprocfs_climp_check(obd);
1197 if (rc)
1198 return rc;
1199
1200 req = ptlrpc_prep_ping(obd->u.cli.cl_import);
1201 up_read(&obd->u.cli.cl_sem);
1202 if (!req)
1203 return -ENOMEM;
1204
1205 req->rq_send_state = LUSTRE_IMP_FULL;
1206
1207 rc = ptlrpc_queue_wait(req);
1208
1209 ptlrpc_req_finished(req);
1210 if (rc >= 0)
1211 return count;
1212 return rc;
1213 }
1214 EXPORT_SYMBOL(lprocfs_wr_ping);
1215
1216 /* Write the connection UUID to this file to attempt to connect to that node.
1217 * The connection UUID is a node's primary NID. For example,
1218 * "echo connection=192.168.0.1@tcp0::instance > .../import".
1219 */
1220 int lprocfs_wr_import(struct file *file, const char __user *buffer,
1221 size_t count, loff_t *off)
1222 {
1223 struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
1224 struct obd_import *imp = obd->u.cli.cl_import;
1225 char *kbuf = NULL;
1226 char *uuid;
1227 char *ptr;
1228 int do_reconn = 1;
1229 const char prefix[] = "connection=";
1230 const int prefix_len = sizeof(prefix) - 1;
1231
1232 if (count > PAGE_SIZE - 1 || count <= prefix_len)
1233 return -EINVAL;
1234
1235 kbuf = kzalloc(count + 1, GFP_NOFS);
1236 if (!kbuf)
1237 return -ENOMEM;
1238
1239 if (copy_from_user(kbuf, buffer, count)) {
1240 count = -EFAULT;
1241 goto out;
1242 }
1243
1244 kbuf[count] = 0;
1245
1246 /* only support connection=uuid::instance now */
1247 if (strncmp(prefix, kbuf, prefix_len) != 0) {
1248 count = -EINVAL;
1249 goto out;
1250 }
1251
1252 uuid = kbuf + prefix_len;
1253 ptr = strstr(uuid, "::");
1254 if (ptr) {
1255 __u32 inst;
1256 char *endptr;
1257
1258 *ptr = 0;
1259 do_reconn = 0;
1260 ptr += strlen("::");
1261 inst = simple_strtoul(ptr, &endptr, 10);
1262 if (*endptr) {
1263 CERROR("config: wrong instance # %s\n", ptr);
1264 } else if (inst != imp->imp_connect_data.ocd_instance) {
1265 CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted target(%u/%u), reconnecting...\n",
1266 imp->imp_obd->obd_name,
1267 imp->imp_connect_data.ocd_instance, inst);
1268 do_reconn = 1;
1269 } else {
1270 CDEBUG(D_INFO, "IR: %s has already been connecting to new target(%u)\n",
1271 imp->imp_obd->obd_name, inst);
1272 }
1273 }
1274
1275 if (do_reconn)
1276 ptlrpc_recover_import(imp, uuid, 1);
1277
1278 out:
1279 kfree(kbuf);
1280 return count;
1281 }
1282 EXPORT_SYMBOL(lprocfs_wr_import);
1283
1284 int lprocfs_rd_pinger_recov(struct seq_file *m, void *n)
1285 {
1286 struct obd_device *obd = m->private;
1287 struct obd_import *imp = obd->u.cli.cl_import;
1288 int rc;
1289
1290 rc = lprocfs_climp_check(obd);
1291 if (rc)
1292 return rc;
1293
1294 seq_printf(m, "%d\n", !imp->imp_no_pinger_recover);
1295 up_read(&obd->u.cli.cl_sem);
1296
1297 return 0;
1298 }
1299 EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
1300
1301 int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
1302 size_t count, loff_t *off)
1303 {
1304 struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
1305 struct client_obd *cli = &obd->u.cli;
1306 struct obd_import *imp = cli->cl_import;
1307 int rc, val;
1308
1309 rc = lprocfs_write_helper(buffer, count, &val);
1310 if (rc < 0)
1311 return rc;
1312
1313 if (val != 0 && val != 1)
1314 return -ERANGE;
1315
1316 rc = lprocfs_climp_check(obd);
1317 if (rc)
1318 return rc;
1319
1320 spin_lock(&imp->imp_lock);
1321 imp->imp_no_pinger_recover = !val;
1322 spin_unlock(&imp->imp_lock);
1323 up_read(&obd->u.cli.cl_sem);
1324
1325 return count;
1326 }
1327 EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
This page took 0.082802 seconds and 5 git commands to generate.