]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
[mirror_ubuntu-artful-kernel.git] / drivers / staging / lustre / lustre / ptlrpc / lproc_ptlrpc.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36#define DEBUG_SUBSYSTEM S_CLASS
37
e27db149
GKH
38#include "../include/obd_support.h"
39#include "../include/obd.h"
40#include "../include/lprocfs_status.h"
41#include "../include/lustre/lustre_idl.h"
42#include "../include/lustre_net.h"
43#include "../include/obd_class.h"
d7e09d03
PT
44#include "ptlrpc_internal.h"
45
b3c986d3 46static struct ll_rpc_opcode {
2a763282
KM
47 __u32 opcode;
48 const char *opname;
d7e09d03
PT
49} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
50 { OST_REPLY, "ost_reply" },
51 { OST_GETATTR, "ost_getattr" },
52 { OST_SETATTR, "ost_setattr" },
53 { OST_READ, "ost_read" },
54 { OST_WRITE, "ost_write" },
b2952d62 55 { OST_CREATE, "ost_create" },
d7e09d03
PT
56 { OST_DESTROY, "ost_destroy" },
57 { OST_GET_INFO, "ost_get_info" },
58 { OST_CONNECT, "ost_connect" },
59 { OST_DISCONNECT, "ost_disconnect" },
60 { OST_PUNCH, "ost_punch" },
61 { OST_OPEN, "ost_open" },
62 { OST_CLOSE, "ost_close" },
63 { OST_STATFS, "ost_statfs" },
64 { 14, NULL }, /* formerly OST_SAN_READ */
65 { 15, NULL }, /* formerly OST_SAN_WRITE */
66 { OST_SYNC, "ost_sync" },
67 { OST_SET_INFO, "ost_set_info" },
68 { OST_QUOTACHECK, "ost_quotacheck" },
69 { OST_QUOTACTL, "ost_quotactl" },
70 { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },
71 { MDS_GETATTR, "mds_getattr" },
72 { MDS_GETATTR_NAME, "mds_getattr_lock" },
73 { MDS_CLOSE, "mds_close" },
74 { MDS_REINT, "mds_reint" },
75 { MDS_READPAGE, "mds_readpage" },
76 { MDS_CONNECT, "mds_connect" },
77 { MDS_DISCONNECT, "mds_disconnect" },
78 { MDS_GETSTATUS, "mds_getstatus" },
79 { MDS_STATFS, "mds_statfs" },
80 { MDS_PIN, "mds_pin" },
81 { MDS_UNPIN, "mds_unpin" },
82 { MDS_SYNC, "mds_sync" },
83 { MDS_DONE_WRITING, "mds_done_writing" },
84 { MDS_SET_INFO, "mds_set_info" },
85 { MDS_QUOTACHECK, "mds_quotacheck" },
86 { MDS_QUOTACTL, "mds_quotactl" },
87 { MDS_GETXATTR, "mds_getxattr" },
88 { MDS_SETXATTR, "mds_setxattr" },
89 { MDS_WRITEPAGE, "mds_writepage" },
90 { MDS_IS_SUBDIR, "mds_is_subdir" },
91 { MDS_GET_INFO, "mds_get_info" },
92 { MDS_HSM_STATE_GET, "mds_hsm_state_get" },
93 { MDS_HSM_STATE_SET, "mds_hsm_state_set" },
94 { MDS_HSM_ACTION, "mds_hsm_action" },
95 { MDS_HSM_PROGRESS, "mds_hsm_progress" },
96 { MDS_HSM_REQUEST, "mds_hsm_request" },
97 { MDS_HSM_CT_REGISTER, "mds_hsm_ct_register" },
98 { MDS_HSM_CT_UNREGISTER, "mds_hsm_ct_unregister" },
99 { MDS_SWAP_LAYOUTS, "mds_swap_layouts" },
100 { LDLM_ENQUEUE, "ldlm_enqueue" },
101 { LDLM_CONVERT, "ldlm_convert" },
102 { LDLM_CANCEL, "ldlm_cancel" },
103 { LDLM_BL_CALLBACK, "ldlm_bl_callback" },
104 { LDLM_CP_CALLBACK, "ldlm_cp_callback" },
105 { LDLM_GL_CALLBACK, "ldlm_gl_callback" },
106 { LDLM_SET_INFO, "ldlm_set_info" },
107 { MGS_CONNECT, "mgs_connect" },
108 { MGS_DISCONNECT, "mgs_disconnect" },
109 { MGS_EXCEPTION, "mgs_exception" },
110 { MGS_TARGET_REG, "mgs_target_reg" },
111 { MGS_TARGET_DEL, "mgs_target_del" },
112 { MGS_SET_INFO, "mgs_set_info" },
113 { MGS_CONFIG_READ, "mgs_config_read" },
114 { OBD_PING, "obd_ping" },
65f1c781 115 { OBD_LOG_CANCEL, "llog_cancel" },
d7e09d03
PT
116 { OBD_QC_CALLBACK, "obd_quota_callback" },
117 { OBD_IDX_READ, "dt_index_read" },
65f1c781 118 { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_open" },
d7e09d03 119 { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
1d8cb70c 120 { LLOG_ORIGIN_HANDLE_READ_HEADER, "llog_origin_handle_read_header" },
d7e09d03
PT
121 { LLOG_ORIGIN_HANDLE_WRITE_REC, "llog_origin_handle_write_rec" },
122 { LLOG_ORIGIN_HANDLE_CLOSE, "llog_origin_handle_close" },
123 { LLOG_ORIGIN_CONNECT, "llog_origin_connect" },
124 { LLOG_CATINFO, "llog_catinfo" },
125 { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" },
126 { LLOG_ORIGIN_HANDLE_DESTROY, "llog_origin_handle_destroy" },
127 { QUOTA_DQACQ, "quota_acquire" },
128 { QUOTA_DQREL, "quota_release" },
129 { SEQ_QUERY, "seq_query" },
130 { SEC_CTX_INIT, "sec_ctx_init" },
1d8cb70c 131 { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
d7e09d03
PT
132 { SEC_CTX_FINI, "sec_ctx_fini" },
133 { FLD_QUERY, "fld_query" },
d7e09d03
PT
134};
135
b3c986d3 136static struct ll_eopcode {
2a763282
KM
137 __u32 opcode;
138 const char *opname;
d7e09d03
PT
139} ll_eopcode_table[EXTRA_LAST_OPC] = {
140 { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
141 { LDLM_PLAIN_ENQUEUE, "ldlm_plain_enqueue" },
142 { LDLM_EXTENT_ENQUEUE, "ldlm_extent_enqueue" },
143 { LDLM_FLOCK_ENQUEUE, "ldlm_flock_enqueue" },
144 { LDLM_IBITS_ENQUEUE, "ldlm_ibits_enqueue" },
145 { MDS_REINT_SETATTR, "mds_reint_setattr" },
146 { MDS_REINT_CREATE, "mds_reint_create" },
147 { MDS_REINT_LINK, "mds_reint_link" },
148 { MDS_REINT_UNLINK, "mds_reint_unlink" },
149 { MDS_REINT_RENAME, "mds_reint_rename" },
150 { MDS_REINT_OPEN, "mds_reint_open" },
151 { MDS_REINT_SETXATTR, "mds_reint_setxattr" },
152 { BRW_READ_BYTES, "read_bytes" },
153 { BRW_WRITE_BYTES, "write_bytes" },
154};
155
156const char *ll_opcode2str(__u32 opcode)
157{
158 /* When one of the assertions below fail, chances are that:
159 * 1) A new opcode was added in include/lustre/lustre_idl.h,
160 * but is missing from the table above.
161 * or 2) The opcode space was renumbered or rearranged,
162 * and the opcode_offset() function in
163 * ptlrpc_internal.h needs to be modified.
164 */
165 __u32 offset = opcode_offset(opcode);
50ffcb7e 166
d7e09d03
PT
167 LASSERTF(offset < LUSTRE_MAX_OPCODES,
168 "offset %u >= LUSTRE_MAX_OPCODES %u\n",
169 offset, LUSTRE_MAX_OPCODES);
170 LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
171 "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
172 offset, ll_rpc_opcode_table[offset].opcode, opcode);
173 return ll_rpc_opcode_table[offset].opname;
174}
175
b3c986d3 176static const char *ll_eopcode2str(__u32 opcode)
d7e09d03
PT
177{
178 LASSERT(ll_eopcode_table[opcode].opcode == opcode);
179 return ll_eopcode_table[opcode].opname;
180}
b3c986d3 181
700815d4
DE
182static void
183ptlrpc_ldebugfs_register(struct dentry *root, char *dir,
184 char *name,
185 struct dentry **debugfs_root_ret,
186 struct lprocfs_stats **stats_ret)
d7e09d03 187{
700815d4 188 struct dentry *svc_debugfs_entry;
d7e09d03
PT
189 struct lprocfs_stats *svc_stats;
190 int i, rc;
191 unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
192 LPROCFS_CNTR_STDDEV;
193
8b382089
OD
194 LASSERT(!*debugfs_root_ret);
195 LASSERT(!*stats_ret);
d7e09d03 196
1d8cb70c
GD
197 svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,
198 0);
8b382089 199 if (!svc_stats)
d7e09d03
PT
200 return;
201
8b382089 202 if (dir) {
700815d4
DE
203 svc_debugfs_entry = ldebugfs_register(dir, root, NULL, NULL);
204 if (IS_ERR(svc_debugfs_entry)) {
d7e09d03
PT
205 lprocfs_free_stats(&svc_stats);
206 return;
207 }
208 } else {
700815d4 209 svc_debugfs_entry = root;
d7e09d03
PT
210 }
211
212 lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,
213 svc_counter_config, "req_waittime", "usec");
214 lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,
215 svc_counter_config, "req_qdepth", "reqs");
216 lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,
217 svc_counter_config, "req_active", "reqs");
218 lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT,
219 svc_counter_config, "req_timeout", "sec");
220 lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
221 svc_counter_config, "reqbuf_avail", "bufs");
222 for (i = 0; i < EXTRA_LAST_OPC; i++) {
223 char *units;
224
3949015e 225 switch (i) {
d7e09d03
PT
226 case BRW_WRITE_BYTES:
227 case BRW_READ_BYTES:
228 units = "bytes";
229 break;
230 default:
231 units = "reqs";
232 break;
233 }
234 lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
235 svc_counter_config,
236 ll_eopcode2str(i), units);
237 }
238 for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
239 __u32 opcode = ll_rpc_opcode_table[i].opcode;
50ffcb7e 240
d7e09d03
PT
241 lprocfs_counter_init(svc_stats,
242 EXTRA_MAX_OPCODES + i, svc_counter_config,
243 ll_opcode2str(opcode), "usec");
244 }
245
700815d4 246 rc = ldebugfs_register_stats(svc_debugfs_entry, name, svc_stats);
d7e09d03 247 if (rc < 0) {
8b382089 248 if (dir)
700815d4 249 ldebugfs_remove(&svc_debugfs_entry);
d7e09d03
PT
250 lprocfs_free_stats(&svc_stats);
251 } else {
8b382089 252 if (dir)
700815d4 253 *debugfs_root_ret = svc_debugfs_entry;
d7e09d03
PT
254 *stats_ret = svc_stats;
255 }
256}
257
258static int
73bb1da6 259ptlrpc_lprocfs_req_history_len_seq_show(struct seq_file *m, void *v)
d7e09d03 260{
73bb1da6 261 struct ptlrpc_service *svc = m->private;
d7e09d03 262 struct ptlrpc_service_part *svcpt;
d0bfef31
CH
263 int total = 0;
264 int i;
d7e09d03 265
d7e09d03
PT
266 ptlrpc_service_for_each_part(svcpt, i, svc)
267 total += svcpt->scp_hist_nrqbds;
268
8faeebdf
JP
269 seq_printf(m, "%d\n", total);
270 return 0;
d7e09d03 271}
c9f6bb96 272
73bb1da6 273LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_req_history_len);
d7e09d03
PT
274
275static int
73bb1da6 276ptlrpc_lprocfs_req_history_max_seq_show(struct seq_file *m, void *n)
d7e09d03 277{
73bb1da6 278 struct ptlrpc_service *svc = m->private;
d7e09d03 279 struct ptlrpc_service_part *svcpt;
d0bfef31
CH
280 int total = 0;
281 int i;
d7e09d03 282
d7e09d03
PT
283 ptlrpc_service_for_each_part(svcpt, i, svc)
284 total += svc->srv_hist_nrqbds_cpt_max;
285
8faeebdf
JP
286 seq_printf(m, "%d\n", total);
287 return 0;
d7e09d03
PT
288}
289
73bb1da6 290static ssize_t
e84962e3 291ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
d0bfef31
CH
292 const char __user *buffer,
293 size_t count, loff_t *off)
d7e09d03 294{
73bb1da6 295 struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
d0bfef31
CH
296 int bufpages;
297 int val;
298 int rc;
d7e09d03
PT
299
300 rc = lprocfs_write_helper(buffer, count, &val);
301 if (rc < 0)
302 return rc;
303
304 if (val < 0)
305 return -ERANGE;
306
307 /* This sanity check is more of an insanity check; we can still
308 * hose a kernel by allowing the request history to grow too
dadfcdab
OD
309 * far.
310 */
09cbfeaf 311 bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
4f6cc9ab 312 if (val > totalram_pages / (2 * bufpages))
d7e09d03
PT
313 return -ERANGE;
314
315 spin_lock(&svc->srv_lock);
316
317 if (val == 0)
318 svc->srv_hist_nrqbds_cpt_max = 0;
319 else
320 svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts));
321
322 spin_unlock(&svc->srv_lock);
323
324 return count;
325}
c9f6bb96 326
73bb1da6 327LPROC_SEQ_FOPS(ptlrpc_lprocfs_req_history_max);
d7e09d03 328
673a6796
OD
329static ssize_t threads_min_show(struct kobject *kobj, struct attribute *attr,
330 char *buf)
d7e09d03 331{
673a6796
OD
332 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
333 srv_kobj);
d7e09d03 334
673a6796 335 return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_init * svc->srv_ncpts);
d7e09d03
PT
336}
337
673a6796
OD
338static ssize_t threads_min_store(struct kobject *kobj, struct attribute *attr,
339 const char *buffer, size_t count)
d7e09d03 340{
673a6796
OD
341 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
342 srv_kobj);
343 unsigned long val;
344 int rc = kstrtoul(buffer, 10, &val);
d7e09d03
PT
345
346 if (rc < 0)
347 return rc;
348
349 if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
350 return -ERANGE;
351
352 spin_lock(&svc->srv_lock);
353 if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) {
354 spin_unlock(&svc->srv_lock);
355 return -ERANGE;
356 }
357
358 svc->srv_nthrs_cpt_init = val / svc->srv_ncpts;
359
360 spin_unlock(&svc->srv_lock);
361
362 return count;
363}
673a6796 364LUSTRE_RW_ATTR(threads_min);
d7e09d03 365
673a6796
OD
366static ssize_t threads_started_show(struct kobject *kobj,
367 struct attribute *attr,
368 char *buf)
d7e09d03 369{
673a6796
OD
370 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
371 srv_kobj);
d7e09d03 372 struct ptlrpc_service_part *svcpt;
673a6796
OD
373 int total = 0;
374 int i;
d7e09d03
PT
375
376 ptlrpc_service_for_each_part(svcpt, i, svc)
377 total += svcpt->scp_nthrs_running;
378
673a6796 379 return sprintf(buf, "%d\n", total);
d7e09d03 380}
673a6796 381LUSTRE_RO_ATTR(threads_started);
d7e09d03 382
673a6796
OD
383static ssize_t threads_max_show(struct kobject *kobj, struct attribute *attr,
384 char *buf)
d7e09d03 385{
673a6796
OD
386 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
387 srv_kobj);
d7e09d03 388
673a6796 389 return sprintf(buf, "%d\n", svc->srv_nthrs_cpt_limit * svc->srv_ncpts);
d7e09d03
PT
390}
391
673a6796
OD
392static ssize_t threads_max_store(struct kobject *kobj, struct attribute *attr,
393 const char *buffer, size_t count)
d7e09d03 394{
673a6796
OD
395 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
396 srv_kobj);
397 unsigned long val;
398 int rc = kstrtoul(buffer, 10, &val);
d7e09d03
PT
399
400 if (rc < 0)
401 return rc;
402
403 if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
404 return -ERANGE;
405
406 spin_lock(&svc->srv_lock);
407 if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) {
408 spin_unlock(&svc->srv_lock);
409 return -ERANGE;
410 }
411
412 svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts;
413
414 spin_unlock(&svc->srv_lock);
415
416 return count;
417}
673a6796 418LUSTRE_RW_ATTR(threads_max);
d7e09d03
PT
419
420/**
421 * \addtogoup nrs
422 * @{
423 */
d7e09d03
PT
424
425/**
426 * Translates \e ptlrpc_nrs_pol_state values to human-readable strings.
427 *
428 * \param[in] state The policy state
429 */
430static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state)
431{
432 switch (state) {
433 default:
434 LBUG();
435 case NRS_POL_STATE_INVALID:
436 return "invalid";
437 case NRS_POL_STATE_STOPPED:
438 return "stopped";
439 case NRS_POL_STATE_STOPPING:
440 return "stopping";
441 case NRS_POL_STATE_STARTING:
442 return "starting";
443 case NRS_POL_STATE_STARTED:
444 return "started";
445 }
446}
447
448/**
449 * Obtains status information for \a policy.
450 *
451 * Information is copied in \a info.
452 *
453 * \param[in] policy The policy
454 * \param[out] info Holds returned status information
455 */
96135461 456static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
30c0aa39 457 struct ptlrpc_nrs_pol_info *info)
d7e09d03 458{
5e42bc9d 459 assert_spin_locked(&policy->pol_nrs->nrs_lock);
d7e09d03
PT
460
461 memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
462
463 info->pi_fallback = !!(policy->pol_flags & PTLRPC_NRS_FL_FALLBACK);
464 info->pi_state = policy->pol_state;
465 /**
466 * XXX: These are accessed without holding
467 * ptlrpc_service_part::scp_req_lock.
468 */
469 info->pi_req_queued = policy->pol_req_queued;
470 info->pi_req_started = policy->pol_req_started;
471}
472
473/**
474 * Reads and prints policy status information for all policies of a PTLRPC
475 * service.
476 */
73bb1da6 477static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
d7e09d03 478{
d0bfef31
CH
479 struct ptlrpc_service *svc = m->private;
480 struct ptlrpc_service_part *svcpt;
481 struct ptlrpc_nrs *nrs;
482 struct ptlrpc_nrs_policy *policy;
483 struct ptlrpc_nrs_pol_info *infos;
484 struct ptlrpc_nrs_pol_info tmp;
485 unsigned num_pols;
486 unsigned pol_idx = 0;
487 bool hp = false;
488 int i;
489 int rc = 0;
d7e09d03
PT
490
491 /**
492 * Serialize NRS core lprocfs operations with policy registration/
493 * unregistration.
494 */
495 mutex_lock(&nrs_core.nrs_mutex);
496
497 /**
498 * Use the first service partition's regular NRS head in order to obtain
499 * the number of policies registered with NRS heads of this service. All
500 * service partitions will have the same number of policies.
501 */
502 nrs = nrs_svcpt2nrs(svc->srv_parts[0], false);
503
504 spin_lock(&nrs->nrs_lock);
505 num_pols = svc->srv_parts[0]->scp_nrs_reg.nrs_num_pols;
506 spin_unlock(&nrs->nrs_lock);
507
9ae10597 508 infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS);
8b382089 509 if (!infos) {
a9b3e8f3 510 rc = -ENOMEM;
848e90c3 511 goto unlock;
a9b3e8f3 512 }
d7e09d03
PT
513again:
514
515 ptlrpc_service_for_each_part(svcpt, i, svc) {
516 nrs = nrs_svcpt2nrs(svcpt, hp);
517 spin_lock(&nrs->nrs_lock);
518
519 pol_idx = 0;
520
30c0aa39 521 list_for_each_entry(policy, &nrs->nrs_policy_list, pol_list) {
d7e09d03
PT
522 LASSERT(pol_idx < num_pols);
523
524 nrs_policy_get_info_locked(policy, &tmp);
525 /**
526 * Copy values when handling the first service
527 * partition.
528 */
529 if (i == 0) {
530 memcpy(infos[pol_idx].pi_name, tmp.pi_name,
531 NRS_POL_NAME_MAX);
532 memcpy(&infos[pol_idx].pi_state, &tmp.pi_state,
533 sizeof(tmp.pi_state));
534 infos[pol_idx].pi_fallback = tmp.pi_fallback;
535 /**
536 * For the rest of the service partitions
537 * sanity-check the values we get.
538 */
539 } else {
540 LASSERT(strncmp(infos[pol_idx].pi_name,
541 tmp.pi_name,
542 NRS_POL_NAME_MAX) == 0);
543 /**
544 * Not asserting ptlrpc_nrs_pol_info::pi_state,
545 * because it may be different between
546 * instances of the same policy in different
547 * service partitions.
548 */
549 LASSERT(infos[pol_idx].pi_fallback ==
550 tmp.pi_fallback);
551 }
552
553 infos[pol_idx].pi_req_queued += tmp.pi_req_queued;
554 infos[pol_idx].pi_req_started += tmp.pi_req_started;
555
556 pol_idx++;
557 }
558 spin_unlock(&nrs->nrs_lock);
559 }
560
561 /**
562 * Policy status information output is in YAML format.
563 * For example:
564 *
565 * regular_requests:
566 * - name: fifo
567 * state: started
568 * fallback: yes
569 * queued: 0
570 * active: 0
571 *
572 * - name: crrn
573 * state: started
574 * fallback: no
575 * queued: 2015
576 * active: 384
577 *
578 * high_priority_requests:
579 * - name: fifo
580 * state: started
581 * fallback: yes
582 * queued: 0
583 * active: 2
584 *
585 * - name: crrn
586 * state: stopped
587 * fallback: no
588 * queued: 0
589 * active: 0
590 */
73bb1da6 591 seq_printf(m, "%s\n",
30c0aa39 592 !hp ? "\nregular_requests:" : "high_priority_requests:");
d7e09d03
PT
593
594 for (pol_idx = 0; pol_idx < num_pols; pol_idx++) {
73bb1da6 595 seq_printf(m, " - name: %s\n"
d7e09d03
PT
596 " state: %s\n"
597 " fallback: %s\n"
598 " queued: %-20d\n"
599 " active: %-20d\n\n",
600 infos[pol_idx].pi_name,
601 nrs_state2str(infos[pol_idx].pi_state),
602 infos[pol_idx].pi_fallback ? "yes" : "no",
603 (int)infos[pol_idx].pi_req_queued,
604 (int)infos[pol_idx].pi_req_started);
d7e09d03
PT
605 }
606
607 if (!hp && nrs_svc_has_hp(svc)) {
608 memset(infos, 0, num_pols * sizeof(*infos));
609
610 /**
611 * Redo the processing for the service's HP NRS heads' policies.
612 */
613 hp = true;
614 goto again;
615 }
616
848e90c3
JL
617 kfree(infos);
618unlock:
d7e09d03
PT
619 mutex_unlock(&nrs_core.nrs_mutex);
620
0a3bdb00 621 return rc;
d7e09d03
PT
622}
623
624/**
b6da17f3 625 * The longest valid command string is the maximum policy name size, plus the
d7e09d03
PT
626 * length of the " reg" substring
627 */
628#define LPROCFS_NRS_WR_MAX_CMD (NRS_POL_NAME_MAX + sizeof(" reg") - 1)
629
630/**
631 * Starts and stops a given policy on a PTLRPC service.
632 *
633 * Commands consist of the policy name, followed by an optional [reg|hp] token;
634 * if the optional token is omitted, the operation is performed on both the
635 * regular and high-priority (if the service has one) NRS head.
636 */
29dd4f9b 637static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
d0bfef31
CH
638 const char __user *buffer,
639 size_t count, loff_t *off)
d7e09d03 640{
73bb1da6 641 struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
d0bfef31
CH
642 enum ptlrpc_nrs_queue_type queue = PTLRPC_NRS_QUEUE_BOTH;
643 char *cmd;
644 char *cmd_copy = NULL;
645 char *token;
646 int rc = 0;
d7e09d03 647
848e90c3
JL
648 if (count >= LPROCFS_NRS_WR_MAX_CMD)
649 return -EINVAL;
d7e09d03 650
9ae10597 651 cmd = kzalloc(LPROCFS_NRS_WR_MAX_CMD, GFP_NOFS);
597851ac 652 if (!cmd)
848e90c3 653 return -ENOMEM;
d7e09d03
PT
654 /**
655 * strsep() modifies its argument, so keep a copy
656 */
657 cmd_copy = cmd;
658
a9b3e8f3
JL
659 if (copy_from_user(cmd, buffer, count)) {
660 rc = -EFAULT;
661 goto out;
662 }
d7e09d03
PT
663
664 cmd[count] = '\0';
665
666 token = strsep(&cmd, " ");
667
a9b3e8f3
JL
668 if (strlen(token) > NRS_POL_NAME_MAX - 1) {
669 rc = -EINVAL;
670 goto out;
671 }
d7e09d03
PT
672
673 /**
674 * No [reg|hp] token has been specified
675 */
8b382089 676 if (!cmd)
d7e09d03
PT
677 goto default_queue;
678
679 /**
680 * The second token is either NULL, or an optional [reg|hp] string
681 */
682 if (strcmp(cmd, "reg") == 0)
683 queue = PTLRPC_NRS_QUEUE_REG;
684 else if (strcmp(cmd, "hp") == 0)
685 queue = PTLRPC_NRS_QUEUE_HP;
a9b3e8f3
JL
686 else {
687 rc = -EINVAL;
688 goto out;
689 }
d7e09d03
PT
690
691default_queue:
692
a9b3e8f3
JL
693 if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
694 rc = -ENODEV;
695 goto out;
4d1d413a 696 } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
d7e09d03
PT
697 queue = PTLRPC_NRS_QUEUE_REG;
698
699 /**
700 * Serialize NRS core lprocfs operations with policy registration/
701 * unregistration.
702 */
703 mutex_lock(&nrs_core.nrs_mutex);
704
705 rc = ptlrpc_nrs_policy_control(svc, queue, token, PTLRPC_NRS_CTL_START,
706 false, NULL);
707
708 mutex_unlock(&nrs_core.nrs_mutex);
709out:
848e90c3 710 kfree(cmd_copy);
d7e09d03 711
0a3bdb00 712 return rc < 0 ? rc : count;
d7e09d03 713}
c9f6bb96 714
73bb1da6 715LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs);
d7e09d03
PT
716
717/** @} nrs */
718
719struct ptlrpc_srh_iterator {
720 int srhi_idx;
721 __u64 srhi_seq;
722 struct ptlrpc_request *srhi_req;
723};
724
b3c986d3 725static int
d7e09d03
PT
726ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
727 struct ptlrpc_srh_iterator *srhi,
728 __u64 seq)
729{
d0bfef31
CH
730 struct list_head *e;
731 struct ptlrpc_request *req;
d7e09d03 732
8b382089 733 if (srhi->srhi_req && srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
d7e09d03
PT
734 srhi->srhi_seq <= seq) {
735 /* If srhi_req was set previously, hasn't been culled and
736 * we're searching for a seq on or after it (i.e. more
737 * recent), search from it onwards.
738 * Since the service history is LRU (i.e. culled reqs will
739 * be near the head), we shouldn't have to do long
dadfcdab
OD
740 * re-scans
741 */
d7e09d03 742 LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
b0f5aad5 743 "%s:%d: seek seq %llu, request seq %llu\n",
d7e09d03
PT
744 svcpt->scp_service->srv_name, svcpt->scp_cpt,
745 srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
746 LASSERTF(!list_empty(&svcpt->scp_hist_reqs),
2d00bd17 747 "%s:%d: seek offset %llu, request seq %llu, last culled %llu\n",
d7e09d03
PT
748 svcpt->scp_service->srv_name, svcpt->scp_cpt,
749 seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled);
750 e = &srhi->srhi_req->rq_history_list;
751 } else {
752 /* search from start */
753 e = svcpt->scp_hist_reqs.next;
754 }
755
756 while (e != &svcpt->scp_hist_reqs) {
757 req = list_entry(e, struct ptlrpc_request, rq_history_list);
758
759 if (req->rq_history_seq >= seq) {
760 srhi->srhi_seq = req->rq_history_seq;
761 srhi->srhi_req = req;
762 return 0;
763 }
764 e = e->next;
765 }
766
767 return -ENOENT;
768}
769
770/*
771 * ptlrpc history sequence is used as "position" of seq_file, in some case,
772 * seq_read() will increase "position" to indicate reading the next
773 * element, however, low bits of history sequence are reserved for CPT id
774 * (check the details from comments before ptlrpc_req_add_history), which
775 * means seq_read() might change CPT id of history sequence and never
776 * finish reading of requests on a CPT. To make it work, we have to shift
777 * CPT id to high bits and timestamp to low bits, so seq_read() will only
778 * increase timestamp which can correctly indicate the next position.
779 */
780
781/* convert seq_file pos to cpt */
782#define PTLRPC_REQ_POS2CPT(svc, pos) \
783 ((svc)->srv_cpt_bits == 0 ? 0 : \
784 (__u64)(pos) >> (64 - (svc)->srv_cpt_bits))
785
786/* make up seq_file pos from cpt */
787#define PTLRPC_REQ_CPT2POS(svc, cpt) \
788 ((svc)->srv_cpt_bits == 0 ? 0 : \
789 (cpt) << (64 - (svc)->srv_cpt_bits))
790
791/* convert sequence to position */
792#define PTLRPC_REQ_SEQ2POS(svc, seq) \
793 ((svc)->srv_cpt_bits == 0 ? (seq) : \
794 ((seq) >> (svc)->srv_cpt_bits) | \
795 ((seq) << (64 - (svc)->srv_cpt_bits)))
796
797/* convert position to sequence */
798#define PTLRPC_REQ_POS2SEQ(svc, pos) \
799 ((svc)->srv_cpt_bits == 0 ? (pos) : \
800 ((__u64)(pos) << (svc)->srv_cpt_bits) | \
801 ((__u64)(pos) >> (64 - (svc)->srv_cpt_bits)))
802
803static void *
804ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
805{
806 struct ptlrpc_service *svc = s->private;
807 struct ptlrpc_service_part *svcpt;
808 struct ptlrpc_srh_iterator *srhi;
809 unsigned int cpt;
810 int rc;
811 int i;
812
813 if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */
2d00bd17
JP
814 CWARN("Failed to read request history because size of loff_t %d can't match size of u64\n",
815 (int)sizeof(loff_t));
d7e09d03
PT
816 return NULL;
817 }
818
9ae10597 819 srhi = kzalloc(sizeof(*srhi), GFP_NOFS);
597851ac 820 if (!srhi)
d7e09d03
PT
821 return NULL;
822
823 srhi->srhi_seq = 0;
824 srhi->srhi_req = NULL;
825
826 cpt = PTLRPC_REQ_POS2CPT(svc, *pos);
827
828 ptlrpc_service_for_each_part(svcpt, i, svc) {
829 if (i < cpt) /* skip */
830 continue;
831 if (i > cpt) /* make up the lowest position for this CPT */
832 *pos = PTLRPC_REQ_CPT2POS(svc, i);
833
834 spin_lock(&svcpt->scp_lock);
835 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi,
836 PTLRPC_REQ_POS2SEQ(svc, *pos));
837 spin_unlock(&svcpt->scp_lock);
838 if (rc == 0) {
839 *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
840 srhi->srhi_idx = i;
841 return srhi;
842 }
843 }
844
9ae10597 845 kfree(srhi);
d7e09d03
PT
846 return NULL;
847}
848
849static void
850ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
851{
852 struct ptlrpc_srh_iterator *srhi = iter;
853
848e90c3 854 kfree(srhi);
d7e09d03
PT
855}
856
857static void *
858ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
859 void *iter, loff_t *pos)
860{
d0bfef31
CH
861 struct ptlrpc_service *svc = s->private;
862 struct ptlrpc_srh_iterator *srhi = iter;
863 struct ptlrpc_service_part *svcpt;
864 __u64 seq;
865 int rc;
866 int i;
d7e09d03
PT
867
868 for (i = srhi->srhi_idx; i < svc->srv_ncpts; i++) {
869 svcpt = svc->srv_parts[i];
870
871 if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
872 srhi->srhi_req = NULL;
873 seq = srhi->srhi_seq = 0;
874 } else { /* the next sequence */
875 seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
876 }
877
878 spin_lock(&svcpt->scp_lock);
879 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, seq);
880 spin_unlock(&svcpt->scp_lock);
881 if (rc == 0) {
882 *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
883 srhi->srhi_idx = i;
884 return srhi;
885 }
886 }
887
9ae10597 888 kfree(srhi);
d7e09d03
PT
889 return NULL;
890}
891
d7e09d03
PT
892static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
893{
d0bfef31
CH
894 struct ptlrpc_service *svc = s->private;
895 struct ptlrpc_srh_iterator *srhi = iter;
896 struct ptlrpc_service_part *svcpt;
897 struct ptlrpc_request *req;
898 int rc;
d7e09d03
PT
899
900 LASSERT(srhi->srhi_idx < svc->srv_ncpts);
901
902 svcpt = svc->srv_parts[srhi->srhi_idx];
903
904 spin_lock(&svcpt->scp_lock);
905
906 rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
907
908 if (rc == 0) {
80feb1ef
DE
909 char nidstr[LNET_NIDSTR_SIZE];
910
d7e09d03
PT
911 req = srhi->srhi_req;
912
80feb1ef 913 libcfs_nid2str_r(req->rq_self, nidstr, sizeof(nidstr));
d7e09d03
PT
914 /* Print common req fields.
915 * CAVEAT EMPTOR: we're racing with the service handler
916 * here. The request could contain any old crap, so you
917 * must be just as careful as the service's request
918 * parser. Currently I only print stuff here I know is OK
dadfcdab
OD
919 * to look at coz it was set up in request_in_callback()!!!
920 */
219e6de6 921 seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ",
80feb1ef 922 req->rq_history_seq, nidstr,
d7e09d03
PT
923 libcfs_id2str(req->rq_peer), req->rq_xid,
924 req->rq_reqlen, ptlrpc_rqphase2str(req),
219e6de6
AB
925 (s64)req->rq_arrival_time.tv_sec,
926 (long)(req->rq_sent - req->rq_arrival_time.tv_sec),
927 (long)(req->rq_sent - req->rq_deadline));
8b382089 928 if (!svc->srv_ops.so_req_printer)
8d816fb6 929 seq_putc(s, '\n');
d7e09d03
PT
930 else
931 svc->srv_ops.so_req_printer(s, srhi->srhi_req);
932 }
933
934 spin_unlock(&svcpt->scp_lock);
935 return rc;
936}
937
938static int
939ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
940{
941 static struct seq_operations sops = {
942 .start = ptlrpc_lprocfs_svc_req_history_start,
943 .stop = ptlrpc_lprocfs_svc_req_history_stop,
944 .next = ptlrpc_lprocfs_svc_req_history_next,
945 .show = ptlrpc_lprocfs_svc_req_history_show,
946 };
d0bfef31
CH
947 struct seq_file *seqf;
948 int rc;
d7e09d03 949
d7e09d03 950 rc = seq_open(file, &sops);
73bb1da6 951 if (rc)
d7e09d03 952 return rc;
d7e09d03
PT
953
954 seqf = file->private_data;
700815d4 955 seqf->private = inode->i_private;
d7e09d03
PT
956 return 0;
957}
958
959/* See also lprocfs_rd_timeouts */
73bb1da6 960static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n)
d7e09d03 961{
d0bfef31
CH
962 struct ptlrpc_service *svc = m->private;
963 struct ptlrpc_service_part *svcpt;
964 struct dhms ts;
0ac0478b 965 time64_t worstt;
d0bfef31
CH
966 unsigned int cur;
967 unsigned int worst;
968 int i;
d7e09d03
PT
969
970 if (AT_OFF) {
73bb1da6 971 seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n",
30c0aa39 972 obd_timeout);
73bb1da6 973 return 0;
d7e09d03
PT
974 }
975
976 ptlrpc_service_for_each_part(svcpt, i, svc) {
977 cur = at_get(&svcpt->scp_at_estimate);
978 worst = svcpt->scp_at_estimate.at_worst_ever;
979 worstt = svcpt->scp_at_estimate.at_worst_time;
0ac0478b 980 s2dhms(&ts, ktime_get_real_seconds() - worstt);
d7e09d03 981
0ac0478b 982 seq_printf(m, "%10s : cur %3u worst %3u (at %lld, "
30c0aa39
OD
983 DHMS_FMT " ago) ", "service",
984 cur, worst, (s64)worstt, DHMS_VARS(&ts));
73bb1da6
PT
985
986 lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate);
d7e09d03
PT
987 }
988
73bb1da6 989 return 0;
d7e09d03 990}
c9f6bb96 991
73bb1da6 992LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_timeouts);
d7e09d03 993
b40881e5
OD
994static ssize_t high_priority_ratio_show(struct kobject *kobj,
995 struct attribute *attr,
996 char *buf)
d7e09d03 997{
b40881e5
OD
998 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
999 srv_kobj);
1000 return sprintf(buf, "%d\n", svc->srv_hpreq_ratio);
d7e09d03
PT
1001}
1002
b40881e5
OD
1003static ssize_t high_priority_ratio_store(struct kobject *kobj,
1004 struct attribute *attr,
1005 const char *buffer,
1006 size_t count)
d7e09d03 1007{
b40881e5
OD
1008 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
1009 srv_kobj);
1010 int rc;
b815555d 1011 int val;
d7e09d03 1012
b815555d 1013 rc = kstrtoint(buffer, 10, &val);
d7e09d03
PT
1014 if (rc < 0)
1015 return rc;
1016
1017 if (val < 0)
1018 return -ERANGE;
1019
1020 spin_lock(&svc->srv_lock);
1021 svc->srv_hpreq_ratio = val;
1022 spin_unlock(&svc->srv_lock);
1023
1024 return count;
1025}
b40881e5 1026LUSTRE_RW_ATTR(high_priority_ratio);
d7e09d03 1027
328676f8 1028static struct attribute *ptlrpc_svc_attrs[] = {
673a6796
OD
1029 &lustre_attr_threads_min.attr,
1030 &lustre_attr_threads_started.attr,
1031 &lustre_attr_threads_max.attr,
b40881e5 1032 &lustre_attr_high_priority_ratio.attr,
328676f8
OD
1033 NULL,
1034};
1035
1036static void ptlrpc_sysfs_svc_release(struct kobject *kobj)
1037{
1038 struct ptlrpc_service *svc = container_of(kobj, struct ptlrpc_service,
1039 srv_kobj);
1040
1041 complete(&svc->srv_kobj_unregister);
1042}
1043
1044static struct kobj_type ptlrpc_svc_ktype = {
1045 .default_attrs = ptlrpc_svc_attrs,
1046 .sysfs_ops = &lustre_sysfs_ops,
1047 .release = ptlrpc_sysfs_svc_release,
1048};
1049
1050void ptlrpc_sysfs_unregister_service(struct ptlrpc_service *svc)
1051{
1052 /* Let's see if we had a chance at initialization first */
1053 if (svc->srv_kobj.kset) {
1054 kobject_put(&svc->srv_kobj);
1055 wait_for_completion(&svc->srv_kobj_unregister);
1056 }
1057}
1058
1059int ptlrpc_sysfs_register_service(struct kset *parent,
1060 struct ptlrpc_service *svc)
1061{
1062 int rc;
1063
1064 svc->srv_kobj.kset = parent;
1065 init_completion(&svc->srv_kobj_unregister);
1066 rc = kobject_init_and_add(&svc->srv_kobj, &ptlrpc_svc_ktype, NULL,
1067 "%s", svc->srv_name);
1068
1069 return rc;
1070}
1071
700815d4
DE
1072void ptlrpc_ldebugfs_register_service(struct dentry *entry,
1073 struct ptlrpc_service *svc)
d7e09d03
PT
1074{
1075 struct lprocfs_vars lproc_vars[] = {
d7e09d03 1076 {.name = "req_buffer_history_len",
73bb1da6 1077 .fops = &ptlrpc_lprocfs_req_history_len_fops,
d7e09d03
PT
1078 .data = svc},
1079 {.name = "req_buffer_history_max",
73bb1da6 1080 .fops = &ptlrpc_lprocfs_req_history_max_fops,
d7e09d03 1081 .data = svc},
d7e09d03 1082 {.name = "timeouts",
73bb1da6 1083 .fops = &ptlrpc_lprocfs_timeouts_fops,
d7e09d03
PT
1084 .data = svc},
1085 {.name = "nrs_policies",
73bb1da6 1086 .fops = &ptlrpc_lprocfs_nrs_fops,
d7e09d03
PT
1087 .data = svc},
1088 {NULL}
1089 };
dddf4c23 1090 static const struct file_operations req_history_fops = {
d7e09d03
PT
1091 .owner = THIS_MODULE,
1092 .open = ptlrpc_lprocfs_svc_req_history_open,
1093 .read = seq_read,
1094 .llseek = seq_lseek,
1095 .release = lprocfs_seq_release,
1096 };
1097
1098 int rc;
1099
700815d4
DE
1100 ptlrpc_ldebugfs_register(entry, svc->srv_name,
1101 "stats", &svc->srv_debugfs_entry,
1102 &svc->srv_stats);
d7e09d03 1103
8b382089 1104 if (IS_ERR_OR_NULL(svc->srv_debugfs_entry))
d7e09d03
PT
1105 return;
1106
700815d4 1107 ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL);
d7e09d03 1108
700815d4
DE
1109 rc = ldebugfs_seq_create(svc->srv_debugfs_entry, "req_history",
1110 0400, &req_history_fops, svc);
d7e09d03
PT
1111 if (rc)
1112 CWARN("Error adding the req_history file\n");
1113}
1114
1115void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
1116{
61e87ab0
DE
1117 ptlrpc_ldebugfs_register(obddev->obd_debugfs_entry, NULL, "stats",
1118 &obddev->obd_svc_debugfs_entry,
1119 &obddev->obd_svc_stats);
d7e09d03
PT
1120}
1121EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
1122
1123void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount)
1124{
1125 struct lprocfs_stats *svc_stats;
1126 __u32 op = lustre_msg_get_opc(req->rq_reqmsg);
1127 int opc = opcode_offset(op);
1128
1129 svc_stats = req->rq_import->imp_obd->obd_svc_stats;
8b382089 1130 if (!svc_stats || opc <= 0)
d7e09d03
PT
1131 return;
1132 LASSERT(opc < LUSTRE_MAX_OPCODES);
1133 if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
1134 lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, amount);
1135}
1136
1137void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
1138{
1139 struct lprocfs_stats *svc_stats;
1140 int idx;
1141
1142 if (!req->rq_import)
1143 return;
1144 svc_stats = req->rq_import->imp_obd->obd_svc_stats;
1145 if (!svc_stats)
1146 return;
1147 idx = lustre_msg_get_opc(req->rq_reqmsg);
1148 switch (idx) {
1149 case OST_READ:
1150 idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR;
1151 break;
1152 case OST_WRITE:
1153 idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR;
1154 break;
1155 default:
1156 LASSERTF(0, "unsupported opcode %u\n", idx);
1157 break;
1158 }
1159
1160 lprocfs_counter_add(svc_stats, idx, bytes);
1161}
1162
1163EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
1164
1165void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
1166{
8b382089 1167 if (!IS_ERR_OR_NULL(svc->srv_debugfs_entry))
700815d4 1168 ldebugfs_remove(&svc->srv_debugfs_entry);
d7e09d03
PT
1169
1170 if (svc->srv_stats)
1171 lprocfs_free_stats(&svc->srv_stats);
1172}
1173
1174void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
1175{
61e87ab0
DE
1176 if (!IS_ERR_OR_NULL(obd->obd_svc_debugfs_entry))
1177 ldebugfs_remove(&obd->obd_svc_debugfs_entry);
d7e09d03
PT
1178
1179 if (obd->obd_svc_stats)
1180 lprocfs_free_stats(&obd->obd_svc_stats);
1181}
1182EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
1183
d7e09d03
PT
1184#undef BUFLEN
1185
e84962e3 1186int lprocfs_wr_ping(struct file *file, const char __user *buffer,
73bb1da6 1187 size_t count, loff_t *off)
d7e09d03 1188{
73bb1da6 1189 struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
d7e09d03 1190 struct ptlrpc_request *req;
d0bfef31 1191 int rc;
d7e09d03 1192
a15b2225
KS
1193 rc = lprocfs_climp_check(obd);
1194 if (rc)
1195 return rc;
1196
d7e09d03 1197 req = ptlrpc_prep_ping(obd->u.cli.cl_import);
e5b6b5d4 1198 up_read(&obd->u.cli.cl_sem);
8b382089 1199 if (!req)
0a3bdb00 1200 return -ENOMEM;
d7e09d03
PT
1201
1202 req->rq_send_state = LUSTRE_IMP_FULL;
1203
1204 rc = ptlrpc_queue_wait(req);
1205
1206 ptlrpc_req_finished(req);
1207 if (rc >= 0)
0a3bdb00
GKH
1208 return count;
1209 return rc;
d7e09d03
PT
1210}
1211EXPORT_SYMBOL(lprocfs_wr_ping);
1212
1213/* Write the connection UUID to this file to attempt to connect to that node.
1214 * The connection UUID is a node's primary NID. For example,
1215 * "echo connection=192.168.0.1@tcp0::instance > .../import".
1216 */
e84962e3 1217int lprocfs_wr_import(struct file *file, const char __user *buffer,
73bb1da6 1218 size_t count, loff_t *off)
d7e09d03 1219{
73bb1da6 1220 struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
d7e09d03
PT
1221 struct obd_import *imp = obd->u.cli.cl_import;
1222 char *kbuf = NULL;
1223 char *uuid;
1224 char *ptr;
1225 int do_reconn = 1;
1226 const char prefix[] = "connection=";
1227 const int prefix_len = sizeof(prefix) - 1;
1228
09cbfeaf 1229 if (count > PAGE_SIZE - 1 || count <= prefix_len)
d7e09d03
PT
1230 return -EINVAL;
1231
9ae10597 1232 kbuf = kzalloc(count + 1, GFP_NOFS);
597851ac 1233 if (!kbuf)
d7e09d03
PT
1234 return -ENOMEM;
1235
a9b3e8f3
JL
1236 if (copy_from_user(kbuf, buffer, count)) {
1237 count = -EFAULT;
1238 goto out;
1239 }
d7e09d03
PT
1240
1241 kbuf[count] = 0;
1242
1243 /* only support connection=uuid::instance now */
a9b3e8f3
JL
1244 if (strncmp(prefix, kbuf, prefix_len) != 0) {
1245 count = -EINVAL;
1246 goto out;
1247 }
d7e09d03
PT
1248
1249 uuid = kbuf + prefix_len;
1250 ptr = strstr(uuid, "::");
1251 if (ptr) {
1252 __u32 inst;
1253 char *endptr;
1254
1255 *ptr = 0;
1256 do_reconn = 0;
1257 ptr += strlen("::");
e3ce40cb 1258 inst = simple_strtoul(ptr, &endptr, 10);
d7e09d03
PT
1259 if (*endptr) {
1260 CERROR("config: wrong instance # %s\n", ptr);
1261 } else if (inst != imp->imp_connect_data.ocd_instance) {
2d00bd17 1262 CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted target(%u/%u), reconnecting...\n",
d7e09d03
PT
1263 imp->imp_obd->obd_name,
1264 imp->imp_connect_data.ocd_instance, inst);
1265 do_reconn = 1;
1266 } else {
2d00bd17 1267 CDEBUG(D_INFO, "IR: %s has already been connecting to new target(%u)\n",
d7e09d03
PT
1268 imp->imp_obd->obd_name, inst);
1269 }
1270 }
1271
1272 if (do_reconn)
1273 ptlrpc_recover_import(imp, uuid, 1);
1274
1275out:
9ae10597 1276 kfree(kbuf);
d7e09d03
PT
1277 return count;
1278}
1279EXPORT_SYMBOL(lprocfs_wr_import);
1280
73bb1da6 1281int lprocfs_rd_pinger_recov(struct seq_file *m, void *n)
d7e09d03 1282{
73bb1da6 1283 struct obd_device *obd = m->private;
d7e09d03 1284 struct obd_import *imp = obd->u.cli.cl_import;
a15b2225
KS
1285 int rc;
1286
1287 rc = lprocfs_climp_check(obd);
1288 if (rc)
1289 return rc;
d7e09d03 1290
91b3a685 1291 seq_printf(m, "%d\n", !imp->imp_no_pinger_recover);
e5b6b5d4 1292 up_read(&obd->u.cli.cl_sem);
d7e09d03 1293
91b3a685 1294 return 0;
d7e09d03
PT
1295}
1296EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
1297
e84962e3 1298int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
30c0aa39 1299 size_t count, loff_t *off)
d7e09d03 1300{
73bb1da6 1301 struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
d7e09d03
PT
1302 struct client_obd *cli = &obd->u.cli;
1303 struct obd_import *imp = cli->cl_import;
1304 int rc, val;
1305
1306 rc = lprocfs_write_helper(buffer, count, &val);
1307 if (rc < 0)
1308 return rc;
1309
1310 if (val != 0 && val != 1)
1311 return -ERANGE;
1312
a15b2225
KS
1313 rc = lprocfs_climp_check(obd);
1314 if (rc)
1315 return rc;
1316
d7e09d03
PT
1317 spin_lock(&imp->imp_lock);
1318 imp->imp_no_pinger_recover = !val;
1319 spin_unlock(&imp->imp_lock);
e5b6b5d4 1320 up_read(&obd->u.cli.cl_sem);
d7e09d03
PT
1321
1322 return count;
1323
1324}
1325EXPORT_SYMBOL(lprocfs_wr_pinger_recov);