]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/lustre/lustre/mgc/mgc_request.c
Merge tag 'pinctrl-v4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / lustre / lustre / mgc / mgc_request.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/mgc/mgc_request.c
37 *
38 * Author: Nathan Rutman <nathan@clusterfs.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_MGC
42 #define D_MGC D_CONFIG /*|D_WARNING*/
43
44 #include <linux/module.h>
45 #include "../include/obd_class.h"
46 #include "../include/lustre_dlm.h"
47 #include "../include/lprocfs_status.h"
48 #include "../include/lustre_log.h"
49 #include "../include/lustre_disk.h"
50
51 #include "mgc_internal.h"
52
53 static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
54 int type)
55 {
56 __u64 resname = 0;
57
58 if (len > sizeof(resname)) {
59 CERROR("name too long: %s\n", name);
60 return -EINVAL;
61 }
62 if (len <= 0) {
63 CERROR("missing name: %s\n", name);
64 return -EINVAL;
65 }
66 memcpy(&resname, name, len);
67
68 /* Always use the same endianness for the resid */
69 memset(res_id, 0, sizeof(*res_id));
70 res_id->name[0] = cpu_to_le64(resname);
71 /* XXX: unfortunately, sptlprc and config llog share one lock */
72 switch (type) {
73 case CONFIG_T_CONFIG:
74 case CONFIG_T_SPTLRPC:
75 resname = 0;
76 break;
77 case CONFIG_T_RECOVER:
78 case CONFIG_T_PARAMS:
79 resname = type;
80 break;
81 default:
82 LBUG();
83 }
84 res_id->name[1] = cpu_to_le64(resname);
85 CDEBUG(D_MGC, "log %s to resid %#llx/%#llx (%.8s)\n", name,
86 res_id->name[0], res_id->name[1], (char *)&res_id->name[0]);
87 return 0;
88 }
89
90 int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type)
91 {
92 /* fsname is at most 8 chars long, maybe contain "-".
93 * e.g. "lustre", "SUN-000" */
94 return mgc_name2resid(fsname, strlen(fsname), res_id, type);
95 }
96 EXPORT_SYMBOL(mgc_fsname2resid);
97
98 static int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type)
99 {
100 char *name_end;
101 int len;
102
103 /* logname consists of "fsname-nodetype".
104 * e.g. "lustre-MDT0001", "SUN-000-client"
105 * there is an exception: llog "params" */
106 name_end = strrchr(logname, '-');
107 if (!name_end)
108 len = strlen(logname);
109 else
110 len = name_end - logname;
111 return mgc_name2resid(logname, len, res_id, type);
112 }
113
114 /********************** config llog list **********************/
115 static LIST_HEAD(config_llog_list);
116 static DEFINE_SPINLOCK(config_list_lock);
117
118 /* Take a reference to a config log */
119 static int config_log_get(struct config_llog_data *cld)
120 {
121 atomic_inc(&cld->cld_refcount);
122 CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
123 atomic_read(&cld->cld_refcount));
124 return 0;
125 }
126
127 /* Drop a reference to a config log. When no longer referenced,
128 we can free the config log data */
129 static void config_log_put(struct config_llog_data *cld)
130 {
131 CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
132 atomic_read(&cld->cld_refcount));
133 LASSERT(atomic_read(&cld->cld_refcount) > 0);
134
135 /* spinlock to make sure no item with 0 refcount in the list */
136 if (atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
137 list_del(&cld->cld_list_chain);
138 spin_unlock(&config_list_lock);
139
140 CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
141
142 if (cld->cld_recover)
143 config_log_put(cld->cld_recover);
144 if (cld->cld_sptlrpc)
145 config_log_put(cld->cld_sptlrpc);
146 if (cld->cld_params)
147 config_log_put(cld->cld_params);
148 if (cld_is_sptlrpc(cld))
149 sptlrpc_conf_log_stop(cld->cld_logname);
150
151 class_export_put(cld->cld_mgcexp);
152 OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1);
153 }
154 }
155
156 /* Find a config log by name */
157 static
158 struct config_llog_data *config_log_find(char *logname,
159 struct config_llog_instance *cfg)
160 {
161 struct config_llog_data *cld;
162 struct config_llog_data *found = NULL;
163 void *instance;
164
165 LASSERT(logname != NULL);
166
167 instance = cfg ? cfg->cfg_instance : NULL;
168 spin_lock(&config_list_lock);
169 list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
170 /* check if instance equals */
171 if (instance != cld->cld_cfg.cfg_instance)
172 continue;
173
174 /* instance may be NULL, should check name */
175 if (strcmp(logname, cld->cld_logname) == 0) {
176 found = cld;
177 break;
178 }
179 }
180 if (found) {
181 atomic_inc(&found->cld_refcount);
182 LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
183 }
184 spin_unlock(&config_list_lock);
185 return found;
186 }
187
188 static
189 struct config_llog_data *do_config_log_add(struct obd_device *obd,
190 char *logname,
191 int type,
192 struct config_llog_instance *cfg,
193 struct super_block *sb)
194 {
195 struct config_llog_data *cld;
196 int rc;
197
198 CDEBUG(D_MGC, "do adding config log %s:%p\n", logname,
199 cfg ? cfg->cfg_instance : NULL);
200
201 OBD_ALLOC(cld, sizeof(*cld) + strlen(logname) + 1);
202 if (!cld)
203 return ERR_PTR(-ENOMEM);
204
205 strcpy(cld->cld_logname, logname);
206 if (cfg)
207 cld->cld_cfg = *cfg;
208 else
209 cld->cld_cfg.cfg_callback = class_config_llog_handler;
210 mutex_init(&cld->cld_lock);
211 cld->cld_cfg.cfg_last_idx = 0;
212 cld->cld_cfg.cfg_flags = 0;
213 cld->cld_cfg.cfg_sb = sb;
214 cld->cld_type = type;
215 atomic_set(&cld->cld_refcount, 1);
216
217 /* Keep the mgc around until we are done */
218 cld->cld_mgcexp = class_export_get(obd->obd_self_export);
219
220 if (cld_is_sptlrpc(cld)) {
221 sptlrpc_conf_log_start(logname);
222 cld->cld_cfg.cfg_obdname = obd->obd_name;
223 }
224
225 rc = mgc_logname2resid(logname, &cld->cld_resid, type);
226
227 spin_lock(&config_list_lock);
228 list_add(&cld->cld_list_chain, &config_llog_list);
229 spin_unlock(&config_list_lock);
230
231 if (rc) {
232 config_log_put(cld);
233 return ERR_PTR(rc);
234 }
235
236 if (cld_is_sptlrpc(cld)) {
237 rc = mgc_process_log(obd, cld);
238 if (rc && rc != -ENOENT)
239 CERROR("failed processing sptlrpc log: %d\n", rc);
240 }
241
242 return cld;
243 }
244
245 static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
246 char *fsname,
247 struct config_llog_instance *cfg,
248 struct super_block *sb)
249 {
250 struct config_llog_instance lcfg = *cfg;
251 struct lustre_sb_info *lsi = s2lsi(sb);
252 struct config_llog_data *cld;
253 char logname[32];
254
255 if (IS_OST(lsi))
256 return NULL;
257
258 /* for osp-on-ost, see lustre_start_osp() */
259 if (IS_MDT(lsi) && lcfg.cfg_instance)
260 return NULL;
261
262 /* we have to use different llog for clients and mdts for cmd
263 * where only clients are notified if one of cmd server restarts */
264 LASSERT(strlen(fsname) < sizeof(logname) / 2);
265 strcpy(logname, fsname);
266 if (IS_SERVER(lsi)) { /* mdt */
267 LASSERT(lcfg.cfg_instance == NULL);
268 lcfg.cfg_instance = sb;
269 strcat(logname, "-mdtir");
270 } else {
271 LASSERT(lcfg.cfg_instance != NULL);
272 strcat(logname, "-cliir");
273 }
274
275 cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb);
276 return cld;
277 }
278
279 static struct config_llog_data *config_params_log_add(struct obd_device *obd,
280 struct config_llog_instance *cfg, struct super_block *sb)
281 {
282 struct config_llog_instance lcfg = *cfg;
283 struct config_llog_data *cld;
284
285 lcfg.cfg_instance = sb;
286
287 cld = do_config_log_add(obd, PARAMS_FILENAME, CONFIG_T_PARAMS,
288 &lcfg, sb);
289
290 return cld;
291 }
292
293 /** Add this log to the list of active logs watched by an MGC.
294 * Active means we're watching for updates.
295 * We have one active log per "mount" - client instance or servername.
296 * Each instance may be at a different point in the log.
297 */
298 static int config_log_add(struct obd_device *obd, char *logname,
299 struct config_llog_instance *cfg,
300 struct super_block *sb)
301 {
302 struct lustre_sb_info *lsi = s2lsi(sb);
303 struct config_llog_data *cld;
304 struct config_llog_data *sptlrpc_cld;
305 struct config_llog_data *params_cld;
306 char seclogname[32];
307 char *ptr;
308 int rc;
309
310 CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
311
312 /*
313 * for each regular log, the depended sptlrpc log name is
314 * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log.
315 */
316 ptr = strrchr(logname, '-');
317 if (ptr == NULL || ptr - logname > 8) {
318 CERROR("logname %s is too long\n", logname);
319 return -EINVAL;
320 }
321
322 memcpy(seclogname, logname, ptr - logname);
323 strcpy(seclogname + (ptr - logname), "-sptlrpc");
324
325 sptlrpc_cld = config_log_find(seclogname, NULL);
326 if (sptlrpc_cld == NULL) {
327 sptlrpc_cld = do_config_log_add(obd, seclogname,
328 CONFIG_T_SPTLRPC, NULL, NULL);
329 if (IS_ERR(sptlrpc_cld)) {
330 CERROR("can't create sptlrpc log: %s\n", seclogname);
331 rc = PTR_ERR(sptlrpc_cld);
332 goto out_err;
333 }
334 }
335 params_cld = config_params_log_add(obd, cfg, sb);
336 if (IS_ERR(params_cld)) {
337 rc = PTR_ERR(params_cld);
338 CERROR("%s: can't create params log: rc = %d\n",
339 obd->obd_name, rc);
340 goto out_err1;
341 }
342
343 cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb);
344 if (IS_ERR(cld)) {
345 CERROR("can't create log: %s\n", logname);
346 rc = PTR_ERR(cld);
347 goto out_err2;
348 }
349
350 cld->cld_sptlrpc = sptlrpc_cld;
351 cld->cld_params = params_cld;
352
353 LASSERT(lsi->lsi_lmd);
354 if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) {
355 struct config_llog_data *recover_cld;
356 *strrchr(seclogname, '-') = 0;
357 recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
358 if (IS_ERR(recover_cld)) {
359 rc = PTR_ERR(recover_cld);
360 goto out_err3;
361 }
362 cld->cld_recover = recover_cld;
363 }
364
365 return 0;
366
367 out_err3:
368 config_log_put(cld);
369
370 out_err2:
371 config_log_put(params_cld);
372
373 out_err1:
374 config_log_put(sptlrpc_cld);
375
376 out_err:
377 return rc;
378 }
379
380 DEFINE_MUTEX(llog_process_lock);
381
382 /** Stop watching for updates on this log.
383 */
384 static int config_log_end(char *logname, struct config_llog_instance *cfg)
385 {
386 struct config_llog_data *cld;
387 struct config_llog_data *cld_sptlrpc = NULL;
388 struct config_llog_data *cld_params = NULL;
389 struct config_llog_data *cld_recover = NULL;
390 int rc = 0;
391
392 cld = config_log_find(logname, cfg);
393 if (cld == NULL)
394 return -ENOENT;
395
396 mutex_lock(&cld->cld_lock);
397 /*
398 * if cld_stopping is set, it means we didn't start the log thus
399 * not owning the start ref. this can happen after previous umount:
400 * the cld still hanging there waiting for lock cancel, and we
401 * remount again but failed in the middle and call log_end without
402 * calling start_log.
403 */
404 if (unlikely(cld->cld_stopping)) {
405 mutex_unlock(&cld->cld_lock);
406 /* drop the ref from the find */
407 config_log_put(cld);
408 return rc;
409 }
410
411 cld->cld_stopping = 1;
412
413 cld_recover = cld->cld_recover;
414 cld->cld_recover = NULL;
415 mutex_unlock(&cld->cld_lock);
416
417 if (cld_recover) {
418 mutex_lock(&cld_recover->cld_lock);
419 cld_recover->cld_stopping = 1;
420 mutex_unlock(&cld_recover->cld_lock);
421 config_log_put(cld_recover);
422 }
423
424 spin_lock(&config_list_lock);
425 cld_sptlrpc = cld->cld_sptlrpc;
426 cld->cld_sptlrpc = NULL;
427 cld_params = cld->cld_params;
428 cld->cld_params = NULL;
429 spin_unlock(&config_list_lock);
430
431 if (cld_sptlrpc)
432 config_log_put(cld_sptlrpc);
433
434 if (cld_params) {
435 mutex_lock(&cld_params->cld_lock);
436 cld_params->cld_stopping = 1;
437 mutex_unlock(&cld_params->cld_lock);
438 config_log_put(cld_params);
439 }
440
441 /* drop the ref from the find */
442 config_log_put(cld);
443 /* drop the start ref */
444 config_log_put(cld);
445
446 CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client",
447 rc);
448 return rc;
449 }
450
451 #if defined (CONFIG_PROC_FS)
452 int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
453 {
454 struct obd_device *obd = data;
455 struct obd_import *imp;
456 struct obd_connect_data *ocd;
457 struct config_llog_data *cld;
458
459 LPROCFS_CLIMP_CHECK(obd);
460 imp = obd->u.cli.cl_import;
461 ocd = &imp->imp_connect_data;
462
463 seq_printf(m, "imperative_recovery: %s\n",
464 OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
465 seq_printf(m, "client_state:\n");
466
467 spin_lock(&config_list_lock);
468 list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
469 if (cld->cld_recover == NULL)
470 continue;
471 seq_printf(m, " - { client: %s, nidtbl_version: %u }\n",
472 cld->cld_logname,
473 cld->cld_recover->cld_cfg.cfg_last_idx);
474 }
475 spin_unlock(&config_list_lock);
476
477 LPROCFS_CLIMP_EXIT(obd);
478 return 0;
479 }
480 #endif
481
482 /* reenqueue any lost locks */
483 #define RQ_RUNNING 0x1
484 #define RQ_NOW 0x2
485 #define RQ_LATER 0x4
486 #define RQ_STOP 0x8
487 #define RQ_PRECLEANUP 0x10
488 static int rq_state;
489 static wait_queue_head_t rq_waitq;
490 static DECLARE_COMPLETION(rq_exit);
491 static DECLARE_COMPLETION(rq_start);
492
493 static void do_requeue(struct config_llog_data *cld)
494 {
495 LASSERT(atomic_read(&cld->cld_refcount) > 0);
496
497 /* Do not run mgc_process_log on a disconnected export or an
498 export which is being disconnected. Take the client
499 semaphore to make the check non-racy. */
500 down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
501 if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
502 CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
503 mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
504 } else {
505 CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
506 cld->cld_logname);
507 }
508 up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
509 }
510
511 /* this timeout represents how many seconds MGC should wait before
512 * requeue config and recover lock to the MGS. We need to randomize this
513 * in order to not flood the MGS.
514 */
515 #define MGC_TIMEOUT_MIN_SECONDS 5
516 #define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */
517
518 static int mgc_requeue_thread(void *data)
519 {
520 bool first = true;
521
522 CDEBUG(D_MGC, "Starting requeue thread\n");
523
524 /* Keep trying failed locks periodically */
525 spin_lock(&config_list_lock);
526 rq_state |= RQ_RUNNING;
527 while (1) {
528 struct l_wait_info lwi;
529 struct config_llog_data *cld, *cld_prev;
530 int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
531 int stopped = !!(rq_state & RQ_STOP);
532 int to;
533
534 /* Any new or requeued lostlocks will change the state */
535 rq_state &= ~(RQ_NOW | RQ_LATER);
536 spin_unlock(&config_list_lock);
537
538 if (first) {
539 first = false;
540 complete(&rq_start);
541 }
542
543 /* Always wait a few seconds to allow the server who
544 caused the lock revocation to finish its setup, plus some
545 random so everyone doesn't try to reconnect at once. */
546 to = MGC_TIMEOUT_MIN_SECONDS * HZ;
547 to += rand * HZ / 100; /* rand is centi-seconds */
548 lwi = LWI_TIMEOUT(to, NULL, NULL);
549 l_wait_event(rq_waitq, rq_state & (RQ_STOP | RQ_PRECLEANUP),
550 &lwi);
551
552 /*
553 * iterate & processing through the list. for each cld, process
554 * its depending sptlrpc cld firstly (if any) and then itself.
555 *
556 * it's guaranteed any item in the list must have
557 * reference > 0; and if cld_lostlock is set, at
558 * least one reference is taken by the previous enqueue.
559 */
560 cld_prev = NULL;
561
562 spin_lock(&config_list_lock);
563 rq_state &= ~RQ_PRECLEANUP;
564 list_for_each_entry(cld, &config_llog_list,
565 cld_list_chain) {
566 if (!cld->cld_lostlock)
567 continue;
568
569 spin_unlock(&config_list_lock);
570
571 LASSERT(atomic_read(&cld->cld_refcount) > 0);
572
573 /* Whether we enqueued again or not in mgc_process_log,
574 * we're done with the ref from the old enqueue */
575 if (cld_prev)
576 config_log_put(cld_prev);
577 cld_prev = cld;
578
579 cld->cld_lostlock = 0;
580 if (likely(!stopped))
581 do_requeue(cld);
582
583 spin_lock(&config_list_lock);
584 }
585 spin_unlock(&config_list_lock);
586 if (cld_prev)
587 config_log_put(cld_prev);
588
589 /* break after scanning the list so that we can drop
590 * refcount to losing lock clds */
591 if (unlikely(stopped)) {
592 spin_lock(&config_list_lock);
593 break;
594 }
595
596 /* Wait a bit to see if anyone else needs a requeue */
597 lwi = (struct l_wait_info) { 0 };
598 l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
599 &lwi);
600 spin_lock(&config_list_lock);
601 }
602 /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
603 rq_state &= ~RQ_RUNNING;
604 spin_unlock(&config_list_lock);
605
606 complete(&rq_exit);
607
608 CDEBUG(D_MGC, "Ending requeue thread\n");
609 return 0;
610 }
611
612 /* Add a cld to the list to requeue. Start the requeue thread if needed.
613 We are responsible for dropping the config log reference from here on out. */
614 static void mgc_requeue_add(struct config_llog_data *cld)
615 {
616 CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
617 cld->cld_logname, atomic_read(&cld->cld_refcount),
618 cld->cld_stopping, rq_state);
619 LASSERT(atomic_read(&cld->cld_refcount) > 0);
620
621 mutex_lock(&cld->cld_lock);
622 if (cld->cld_stopping || cld->cld_lostlock) {
623 mutex_unlock(&cld->cld_lock);
624 return;
625 }
626 /* this refcount will be released in mgc_requeue_thread. */
627 config_log_get(cld);
628 cld->cld_lostlock = 1;
629 mutex_unlock(&cld->cld_lock);
630
631 /* Hold lock for rq_state */
632 spin_lock(&config_list_lock);
633 if (rq_state & RQ_STOP) {
634 spin_unlock(&config_list_lock);
635 cld->cld_lostlock = 0;
636 config_log_put(cld);
637 } else {
638 rq_state |= RQ_NOW;
639 spin_unlock(&config_list_lock);
640 wake_up(&rq_waitq);
641 }
642 }
643
644 static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd)
645 {
646 struct llog_ctxt *ctxt;
647 int rc;
648
649 /* setup only remote ctxt, the local disk context is switched per each
650 * filesystem during mgc_fs_setup() */
651 rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd,
652 &llog_client_ops);
653 if (rc)
654 return rc;
655
656 ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
657 LASSERT(ctxt);
658
659 llog_initiator_connect(ctxt);
660 llog_ctxt_put(ctxt);
661
662 return 0;
663 }
664
665 static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd)
666 {
667 struct llog_ctxt *ctxt;
668
669 ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
670 if (ctxt)
671 llog_cleanup(env, ctxt);
672
673 return 0;
674 }
675
676 static atomic_t mgc_count = ATOMIC_INIT(0);
677 static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
678 {
679 int rc = 0;
680 int temp;
681
682 switch (stage) {
683 case OBD_CLEANUP_EARLY:
684 break;
685 case OBD_CLEANUP_EXPORTS:
686 if (atomic_dec_and_test(&mgc_count)) {
687 LASSERT(rq_state & RQ_RUNNING);
688 /* stop requeue thread */
689 temp = RQ_STOP;
690 } else {
691 /* wakeup requeue thread to clean our cld */
692 temp = RQ_NOW | RQ_PRECLEANUP;
693 }
694 spin_lock(&config_list_lock);
695 rq_state |= temp;
696 spin_unlock(&config_list_lock);
697 wake_up(&rq_waitq);
698 if (temp & RQ_STOP)
699 wait_for_completion(&rq_exit);
700 obd_cleanup_client_import(obd);
701 rc = mgc_llog_fini(NULL, obd);
702 if (rc != 0)
703 CERROR("failed to cleanup llogging subsystems\n");
704 break;
705 }
706 return rc;
707 }
708
709 static int mgc_cleanup(struct obd_device *obd)
710 {
711 /* COMPAT_146 - old config logs may have added profiles we don't
712 know about */
713 if (obd->obd_type->typ_refcnt <= 1)
714 /* Only for the last mgc */
715 class_del_profiles();
716
717 lprocfs_obd_cleanup(obd);
718 ptlrpcd_decref();
719
720 return client_obd_cleanup(obd);
721 }
722
723 static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
724 {
725 struct lprocfs_static_vars lvars;
726 int rc;
727
728 ptlrpcd_addref();
729
730 rc = client_obd_setup(obd, lcfg);
731 if (rc)
732 goto err_decref;
733
734 rc = mgc_llog_init(NULL, obd);
735 if (rc) {
736 CERROR("failed to setup llogging subsystems\n");
737 goto err_cleanup;
738 }
739
740 lprocfs_mgc_init_vars(&lvars);
741 lprocfs_obd_setup(obd, lvars.obd_vars);
742 sptlrpc_lprocfs_cliobd_attach(obd);
743
744 if (atomic_inc_return(&mgc_count) == 1) {
745 rq_state = 0;
746 init_waitqueue_head(&rq_waitq);
747
748 /* start requeue thread */
749 rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
750 "ll_cfg_requeue"));
751 if (IS_ERR_VALUE(rc)) {
752 CERROR("%s: Cannot start requeue thread (%d),no more log updates!\n",
753 obd->obd_name, rc);
754 goto err_cleanup;
755 }
756 /* rc is the task_struct pointer of mgc_requeue_thread. */
757 rc = 0;
758 wait_for_completion(&rq_start);
759 }
760
761 return rc;
762
763 err_cleanup:
764 client_obd_cleanup(obd);
765 err_decref:
766 ptlrpcd_decref();
767 return rc;
768 }
769
770 /* based on ll_mdc_blocking_ast */
771 static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
772 void *data, int flag)
773 {
774 struct lustre_handle lockh;
775 struct config_llog_data *cld = (struct config_llog_data *)data;
776 int rc = 0;
777
778 switch (flag) {
779 case LDLM_CB_BLOCKING:
780 /* mgs wants the lock, give it up... */
781 LDLM_DEBUG(lock, "MGC blocking CB");
782 ldlm_lock2handle(lock, &lockh);
783 rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
784 break;
785 case LDLM_CB_CANCELING:
786 /* We've given up the lock, prepare ourselves to update. */
787 LDLM_DEBUG(lock, "MGC cancel CB");
788
789 CDEBUG(D_MGC, "Lock res "DLDLMRES" (%.8s)\n",
790 PLDLMRES(lock->l_resource),
791 (char *)&lock->l_resource->lr_name.name[0]);
792
793 if (!cld) {
794 CDEBUG(D_INFO, "missing data, won't requeue\n");
795 break;
796 }
797
798 /* held at mgc_process_log(). */
799 LASSERT(atomic_read(&cld->cld_refcount) > 0);
800 /* Are we done with this log? */
801 if (cld->cld_stopping) {
802 CDEBUG(D_MGC, "log %s: stopping, won't requeue\n",
803 cld->cld_logname);
804 config_log_put(cld);
805 break;
806 }
807 /* Make sure not to re-enqueue when the mgc is stopping
808 (we get called from client_disconnect_export) */
809 if (!lock->l_conn_export ||
810 !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) {
811 CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n",
812 cld->cld_logname);
813 config_log_put(cld);
814 break;
815 }
816
817 /* Re-enqueue now */
818 mgc_requeue_add(cld);
819 config_log_put(cld);
820 break;
821 default:
822 LBUG();
823 }
824
825 return rc;
826 }
827
828 /* Not sure where this should go... */
829 /* This is the timeout value for MGS_CONNECT request plus a ping interval, such
830 * that we can have a chance to try the secondary MGS if any. */
831 #define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \
832 + PING_INTERVAL)
833 #define MGC_TARGET_REG_LIMIT 10
834 #define MGC_SEND_PARAM_LIMIT 10
835
836 /* Send parameter to MGS*/
837 static int mgc_set_mgs_param(struct obd_export *exp,
838 struct mgs_send_param *msp)
839 {
840 struct ptlrpc_request *req;
841 struct mgs_send_param *req_msp, *rep_msp;
842 int rc;
843
844 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
845 &RQF_MGS_SET_INFO, LUSTRE_MGS_VERSION,
846 MGS_SET_INFO);
847 if (!req)
848 return -ENOMEM;
849
850 req_msp = req_capsule_client_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
851 if (!req_msp) {
852 ptlrpc_req_finished(req);
853 return -ENOMEM;
854 }
855
856 memcpy(req_msp, msp, sizeof(*req_msp));
857 ptlrpc_request_set_replen(req);
858
859 /* Limit how long we will wait for the enqueue to complete */
860 req->rq_delay_limit = MGC_SEND_PARAM_LIMIT;
861 rc = ptlrpc_queue_wait(req);
862 if (!rc) {
863 rep_msp = req_capsule_server_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
864 memcpy(msp, rep_msp, sizeof(*rep_msp));
865 }
866
867 ptlrpc_req_finished(req);
868
869 return rc;
870 }
871
872 /* Take a config lock so we can get cancel notifications */
873 static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
874 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
875 __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
876 void *data, __u32 lvb_len, void *lvb_swabber,
877 struct lustre_handle *lockh)
878 {
879 struct config_llog_data *cld = (struct config_llog_data *)data;
880 struct ldlm_enqueue_info einfo = {
881 .ei_type = type,
882 .ei_mode = mode,
883 .ei_cb_bl = mgc_blocking_ast,
884 .ei_cb_cp = ldlm_completion_ast,
885 };
886 struct ptlrpc_request *req;
887 int short_limit = cld_is_sptlrpc(cld);
888 int rc;
889
890 CDEBUG(D_MGC, "Enqueue for %s (res %#llx)\n", cld->cld_logname,
891 cld->cld_resid.name[0]);
892
893 /* We need a callback for every lockholder, so don't try to
894 ldlm_lock_match (see rev 1.1.2.11.2.47) */
895 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
896 &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION,
897 LDLM_ENQUEUE);
898 if (req == NULL)
899 return -ENOMEM;
900
901 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0);
902 ptlrpc_request_set_replen(req);
903
904 /* check if this is server or client */
905 if (cld->cld_cfg.cfg_sb) {
906 struct lustre_sb_info *lsi = s2lsi(cld->cld_cfg.cfg_sb);
907 if (lsi && IS_SERVER(lsi))
908 short_limit = 1;
909 }
910 /* Limit how long we will wait for the enqueue to complete */
911 req->rq_delay_limit = short_limit ? 5 : MGC_ENQUEUE_LIMIT;
912 rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags,
913 NULL, 0, LVB_T_NONE, lockh, 0);
914 /* A failed enqueue should still call the mgc_blocking_ast,
915 where it will be requeued if needed ("grant failed"). */
916 ptlrpc_req_finished(req);
917 return rc;
918 }
919
920 static void mgc_notify_active(struct obd_device *unused)
921 {
922 /* wakeup mgc_requeue_thread to requeue mgc lock */
923 spin_lock(&config_list_lock);
924 rq_state |= RQ_NOW;
925 spin_unlock(&config_list_lock);
926 wake_up(&rq_waitq);
927
928 /* TODO: Help the MGS rebuild nidtbl. -jay */
929 }
930
931 /* Send target_reg message to MGS */
932 static int mgc_target_register(struct obd_export *exp,
933 struct mgs_target_info *mti)
934 {
935 struct ptlrpc_request *req;
936 struct mgs_target_info *req_mti, *rep_mti;
937 int rc;
938
939 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
940 &RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION,
941 MGS_TARGET_REG);
942 if (req == NULL)
943 return -ENOMEM;
944
945 req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO);
946 if (!req_mti) {
947 ptlrpc_req_finished(req);
948 return -ENOMEM;
949 }
950
951 memcpy(req_mti, mti, sizeof(*req_mti));
952 ptlrpc_request_set_replen(req);
953 CDEBUG(D_MGC, "register %s\n", mti->mti_svname);
954 /* Limit how long we will wait for the enqueue to complete */
955 req->rq_delay_limit = MGC_TARGET_REG_LIMIT;
956
957 rc = ptlrpc_queue_wait(req);
958 if (!rc) {
959 rep_mti = req_capsule_server_get(&req->rq_pill,
960 &RMF_MGS_TARGET_INFO);
961 memcpy(mti, rep_mti, sizeof(*rep_mti));
962 CDEBUG(D_MGC, "register %s got index = %d\n",
963 mti->mti_svname, mti->mti_stripe_index);
964 }
965 ptlrpc_req_finished(req);
966
967 return rc;
968 }
969
970 static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
971 u32 keylen, void *key, u32 vallen,
972 void *val, struct ptlrpc_request_set *set)
973 {
974 int rc = -EINVAL;
975
976 /* Turn off initial_recov after we try all backup servers once */
977 if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
978 struct obd_import *imp = class_exp2cliimp(exp);
979 int value;
980 if (vallen != sizeof(int))
981 return -EINVAL;
982 value = *(int *)val;
983 CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
984 imp->imp_obd->obd_name, value,
985 imp->imp_deactive, imp->imp_invalid,
986 imp->imp_replayable, imp->imp_obd->obd_replayable,
987 ptlrpc_import_state_name(imp->imp_state));
988 /* Resurrect if we previously died */
989 if ((imp->imp_state != LUSTRE_IMP_FULL &&
990 imp->imp_state != LUSTRE_IMP_NEW) || value > 1)
991 ptlrpc_reconnect_import(imp);
992 return 0;
993 }
994 if (KEY_IS(KEY_SET_INFO)) {
995 struct mgs_send_param *msp;
996
997 msp = (struct mgs_send_param *)val;
998 rc = mgc_set_mgs_param(exp, msp);
999 return rc;
1000 }
1001 if (KEY_IS(KEY_MGSSEC)) {
1002 struct client_obd *cli = &exp->exp_obd->u.cli;
1003 struct sptlrpc_flavor flvr;
1004
1005 /*
1006 * empty string means using current flavor, if which haven't
1007 * been set yet, set it as null.
1008 *
1009 * if flavor has been set previously, check the asking flavor
1010 * must match the existing one.
1011 */
1012 if (vallen == 0) {
1013 if (cli->cl_flvr_mgc.sf_rpc != SPTLRPC_FLVR_INVALID)
1014 return 0;
1015 val = "null";
1016 vallen = 4;
1017 }
1018
1019 rc = sptlrpc_parse_flavor(val, &flvr);
1020 if (rc) {
1021 CERROR("invalid sptlrpc flavor %s to MGS\n",
1022 (char *) val);
1023 return rc;
1024 }
1025
1026 /*
1027 * caller already hold a mutex
1028 */
1029 if (cli->cl_flvr_mgc.sf_rpc == SPTLRPC_FLVR_INVALID) {
1030 cli->cl_flvr_mgc = flvr;
1031 } else if (memcmp(&cli->cl_flvr_mgc, &flvr,
1032 sizeof(flvr)) != 0) {
1033 char str[20];
1034
1035 sptlrpc_flavor2name(&cli->cl_flvr_mgc,
1036 str, sizeof(str));
1037 LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n",
1038 (char *) val, str);
1039 rc = -EPERM;
1040 }
1041 return rc;
1042 }
1043
1044 return rc;
1045 }
1046
1047 static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
1048 __u32 keylen, void *key, __u32 *vallen, void *val,
1049 struct lov_stripe_md *unused)
1050 {
1051 int rc = -EINVAL;
1052
1053 if (KEY_IS(KEY_CONN_DATA)) {
1054 struct obd_import *imp = class_exp2cliimp(exp);
1055 struct obd_connect_data *data = val;
1056
1057 if (*vallen == sizeof(*data)) {
1058 *data = imp->imp_connect_data;
1059 rc = 0;
1060 }
1061 }
1062
1063 return rc;
1064 }
1065
1066 static int mgc_import_event(struct obd_device *obd,
1067 struct obd_import *imp,
1068 enum obd_import_event event)
1069 {
1070 LASSERT(imp->imp_obd == obd);
1071 CDEBUG(D_MGC, "import event %#x\n", event);
1072
1073 switch (event) {
1074 case IMP_EVENT_DISCON:
1075 /* MGC imports should not wait for recovery */
1076 if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
1077 ptlrpc_pinger_ir_down();
1078 break;
1079 case IMP_EVENT_INACTIVE:
1080 break;
1081 case IMP_EVENT_INVALIDATE: {
1082 struct ldlm_namespace *ns = obd->obd_namespace;
1083 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
1084 break;
1085 }
1086 case IMP_EVENT_ACTIVE:
1087 CDEBUG(D_INFO, "%s: Reactivating import\n", obd->obd_name);
1088 /* Clearing obd_no_recov allows us to continue pinging */
1089 obd->obd_no_recov = 0;
1090 mgc_notify_active(obd);
1091 if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
1092 ptlrpc_pinger_ir_up();
1093 break;
1094 case IMP_EVENT_OCD:
1095 break;
1096 case IMP_EVENT_DEACTIVATE:
1097 case IMP_EVENT_ACTIVATE:
1098 break;
1099 default:
1100 CERROR("Unknown import event %#x\n", event);
1101 LBUG();
1102 }
1103 return 0;
1104 }
1105
1106 enum {
1107 CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
1108 CONFIG_READ_NRPAGES = 4
1109 };
1110
1111 static int mgc_apply_recover_logs(struct obd_device *mgc,
1112 struct config_llog_data *cld,
1113 __u64 max_version,
1114 void *data, int datalen, bool mne_swab)
1115 {
1116 struct config_llog_instance *cfg = &cld->cld_cfg;
1117 struct lustre_sb_info *lsi = s2lsi(cfg->cfg_sb);
1118 struct mgs_nidtbl_entry *entry;
1119 struct lustre_cfg *lcfg;
1120 struct lustre_cfg_bufs bufs;
1121 u64 prev_version = 0;
1122 char *inst;
1123 char *buf;
1124 int bufsz;
1125 int pos;
1126 int rc = 0;
1127 int off = 0;
1128
1129 LASSERT(cfg->cfg_instance != NULL);
1130 LASSERT(cfg->cfg_sb == cfg->cfg_instance);
1131
1132 OBD_ALLOC(inst, PAGE_CACHE_SIZE);
1133 if (inst == NULL)
1134 return -ENOMEM;
1135
1136 if (!IS_SERVER(lsi)) {
1137 pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
1138 if (pos >= PAGE_CACHE_SIZE) {
1139 OBD_FREE(inst, PAGE_CACHE_SIZE);
1140 return -E2BIG;
1141 }
1142 } else {
1143 LASSERT(IS_MDT(lsi));
1144 rc = server_name2svname(lsi->lsi_svname, inst, NULL,
1145 PAGE_CACHE_SIZE);
1146 if (rc) {
1147 OBD_FREE(inst, PAGE_CACHE_SIZE);
1148 return -EINVAL;
1149 }
1150 pos = strlen(inst);
1151 }
1152
1153 ++pos;
1154 buf = inst + pos;
1155 bufsz = PAGE_CACHE_SIZE - pos;
1156
1157 while (datalen > 0) {
1158 int entry_len = sizeof(*entry);
1159 int is_ost;
1160 struct obd_device *obd;
1161 char *obdname;
1162 char *cname;
1163 char *params;
1164 char *uuid;
1165
1166 rc = -EINVAL;
1167 if (datalen < sizeof(*entry))
1168 break;
1169
1170 entry = (typeof(entry))(data + off);
1171
1172 /* sanity check */
1173 if (entry->mne_nid_type != 0) /* only support type 0 for ipv4 */
1174 break;
1175 if (entry->mne_nid_count == 0) /* at least one nid entry */
1176 break;
1177 if (entry->mne_nid_size != sizeof(lnet_nid_t))
1178 break;
1179
1180 entry_len += entry->mne_nid_count * entry->mne_nid_size;
1181 if (datalen < entry_len) /* must have entry_len at least */
1182 break;
1183
1184 /* Keep this swab for normal mixed endian handling. LU-1644 */
1185 if (mne_swab)
1186 lustre_swab_mgs_nidtbl_entry(entry);
1187 if (entry->mne_length > PAGE_CACHE_SIZE) {
1188 CERROR("MNE too large (%u)\n", entry->mne_length);
1189 break;
1190 }
1191
1192 if (entry->mne_length < entry_len)
1193 break;
1194
1195 off += entry->mne_length;
1196 datalen -= entry->mne_length;
1197 if (datalen < 0)
1198 break;
1199
1200 if (entry->mne_version > max_version) {
1201 CERROR("entry index(%lld) is over max_index(%lld)\n",
1202 entry->mne_version, max_version);
1203 break;
1204 }
1205
1206 if (prev_version >= entry->mne_version) {
1207 CERROR("index unsorted, prev %lld, now %lld\n",
1208 prev_version, entry->mne_version);
1209 break;
1210 }
1211 prev_version = entry->mne_version;
1212
1213 /*
1214 * Write a string with format "nid::instance" to
1215 * lustre/<osc|mdc>/<target>-<osc|mdc>-<instance>/import.
1216 */
1217
1218 is_ost = entry->mne_type == LDD_F_SV_TYPE_OST;
1219 memset(buf, 0, bufsz);
1220 obdname = buf;
1221 pos = 0;
1222
1223 /* lustre-OST0001-osc-<instance #> */
1224 strcpy(obdname, cld->cld_logname);
1225 cname = strrchr(obdname, '-');
1226 if (cname == NULL) {
1227 CERROR("mgc %s: invalid logname %s\n",
1228 mgc->obd_name, obdname);
1229 break;
1230 }
1231
1232 pos = cname - obdname;
1233 obdname[pos] = 0;
1234 pos += sprintf(obdname + pos, "-%s%04x",
1235 is_ost ? "OST" : "MDT", entry->mne_index);
1236
1237 cname = is_ost ? "osc" : "mdc",
1238 pos += sprintf(obdname + pos, "-%s-%s", cname, inst);
1239 lustre_cfg_bufs_reset(&bufs, obdname);
1240
1241 /* find the obd by obdname */
1242 obd = class_name2obd(obdname);
1243 if (obd == NULL) {
1244 CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n",
1245 mgc->obd_name, obdname);
1246 rc = 0;
1247 /* this is a safe race, when the ost is starting up...*/
1248 continue;
1249 }
1250
1251 /* osc.import = "connection=<Conn UUID>::<target instance>" */
1252 ++pos;
1253 params = buf + pos;
1254 pos += sprintf(params, "%s.import=%s", cname, "connection=");
1255 uuid = buf + pos;
1256
1257 down_read(&obd->u.cli.cl_sem);
1258 if (obd->u.cli.cl_import == NULL) {
1259 /* client does not connect to the OST yet */
1260 up_read(&obd->u.cli.cl_sem);
1261 rc = 0;
1262 continue;
1263 }
1264
1265 /* TODO: iterate all nids to find one */
1266 /* find uuid by nid */
1267 rc = client_import_find_conn(obd->u.cli.cl_import,
1268 entry->u.nids[0],
1269 (struct obd_uuid *)uuid);
1270 up_read(&obd->u.cli.cl_sem);
1271 if (rc < 0) {
1272 CERROR("mgc: cannot find uuid by nid %s\n",
1273 libcfs_nid2str(entry->u.nids[0]));
1274 break;
1275 }
1276
1277 CDEBUG(D_INFO, "Find uuid %s by nid %s\n",
1278 uuid, libcfs_nid2str(entry->u.nids[0]));
1279
1280 pos += strlen(uuid);
1281 pos += sprintf(buf + pos, "::%u", entry->mne_instance);
1282 LASSERT(pos < bufsz);
1283
1284 lustre_cfg_bufs_set_string(&bufs, 1, params);
1285
1286 rc = -ENOMEM;
1287 lcfg = lustre_cfg_new(LCFG_PARAM, &bufs);
1288 if (lcfg == NULL) {
1289 CERROR("mgc: cannot allocate memory\n");
1290 break;
1291 }
1292
1293 CDEBUG(D_INFO, "ir apply logs %lld/%lld for %s -> %s\n",
1294 prev_version, max_version, obdname, params);
1295
1296 rc = class_process_config(lcfg);
1297 lustre_cfg_free(lcfg);
1298 if (rc)
1299 CDEBUG(D_INFO, "process config for %s error %d\n",
1300 obdname, rc);
1301
1302 /* continue, even one with error */
1303 }
1304
1305 OBD_FREE(inst, PAGE_CACHE_SIZE);
1306 return rc;
1307 }
1308
1309 /**
1310 * This function is called if this client was notified for target restarting
1311 * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery logs.
1312 */
1313 static int mgc_process_recover_log(struct obd_device *obd,
1314 struct config_llog_data *cld)
1315 {
1316 struct ptlrpc_request *req = NULL;
1317 struct config_llog_instance *cfg = &cld->cld_cfg;
1318 struct mgs_config_body *body;
1319 struct mgs_config_res *res;
1320 struct ptlrpc_bulk_desc *desc;
1321 struct page **pages;
1322 int nrpages;
1323 bool eof = true;
1324 bool mne_swab = false;
1325 int i;
1326 int ealen;
1327 int rc;
1328
1329 /* allocate buffer for bulk transfer.
1330 * if this is the first time for this mgs to read logs,
1331 * CONFIG_READ_NRPAGES_INIT will be used since it will read all logs
1332 * once; otherwise, it only reads increment of logs, this should be
1333 * small and CONFIG_READ_NRPAGES will be used.
1334 */
1335 nrpages = CONFIG_READ_NRPAGES;
1336 if (cfg->cfg_last_idx == 0) /* the first time */
1337 nrpages = CONFIG_READ_NRPAGES_INIT;
1338
1339 OBD_ALLOC(pages, sizeof(*pages) * nrpages);
1340 if (pages == NULL) {
1341 rc = -ENOMEM;
1342 goto out;
1343 }
1344
1345 for (i = 0; i < nrpages; i++) {
1346 pages[i] = alloc_page(GFP_IOFS);
1347 if (pages[i] == NULL) {
1348 rc = -ENOMEM;
1349 goto out;
1350 }
1351 }
1352
1353 again:
1354 LASSERT(cld_is_recover(cld));
1355 LASSERT(mutex_is_locked(&cld->cld_lock));
1356 req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
1357 &RQF_MGS_CONFIG_READ);
1358 if (req == NULL) {
1359 rc = -ENOMEM;
1360 goto out;
1361 }
1362
1363 rc = ptlrpc_request_pack(req, LUSTRE_MGS_VERSION, MGS_CONFIG_READ);
1364 if (rc)
1365 goto out;
1366
1367 /* pack request */
1368 body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
1369 LASSERT(body != NULL);
1370 LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
1371 if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name))
1372 >= sizeof(body->mcb_name)) {
1373 rc = -E2BIG;
1374 goto out;
1375 }
1376 body->mcb_offset = cfg->cfg_last_idx + 1;
1377 body->mcb_type = cld->cld_type;
1378 body->mcb_bits = PAGE_CACHE_SHIFT;
1379 body->mcb_units = nrpages;
1380
1381 /* allocate bulk transfer descriptor */
1382 desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
1383 MGS_BULK_PORTAL);
1384 if (desc == NULL) {
1385 rc = -ENOMEM;
1386 goto out;
1387 }
1388
1389 for (i = 0; i < nrpages; i++)
1390 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
1391
1392 ptlrpc_request_set_replen(req);
1393 rc = ptlrpc_queue_wait(req);
1394 if (rc)
1395 goto out;
1396
1397 res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES);
1398 if (res->mcr_size < res->mcr_offset) {
1399 rc = -EINVAL;
1400 goto out;
1401 }
1402
1403 /* always update the index even though it might have errors with
1404 * handling the recover logs */
1405 cfg->cfg_last_idx = res->mcr_offset;
1406 eof = res->mcr_offset == res->mcr_size;
1407
1408 CDEBUG(D_INFO, "Latest version %lld, more %d.\n",
1409 res->mcr_offset, eof == false);
1410
1411 ealen = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, 0);
1412 if (ealen < 0) {
1413 rc = ealen;
1414 goto out;
1415 }
1416
1417 if (ealen > nrpages << PAGE_CACHE_SHIFT) {
1418 rc = -EINVAL;
1419 goto out;
1420 }
1421
1422 if (ealen == 0) { /* no logs transferred */
1423 if (!eof)
1424 rc = -EINVAL;
1425 goto out;
1426 }
1427
1428 mne_swab = !!ptlrpc_rep_need_swab(req);
1429 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
1430 /* This import flag means the server did an extra swab of IR MNE
1431 * records (fixed in LU-1252), reverse it here if needed. LU-1644 */
1432 if (unlikely(req->rq_import->imp_need_mne_swab))
1433 mne_swab = !mne_swab;
1434 #else
1435 #warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
1436 #endif
1437
1438 for (i = 0; i < nrpages && ealen > 0; i++) {
1439 int rc2;
1440 void *ptr;
1441
1442 ptr = kmap(pages[i]);
1443 rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
1444 min_t(int, ealen, PAGE_CACHE_SIZE),
1445 mne_swab);
1446 kunmap(pages[i]);
1447 if (rc2 < 0) {
1448 CWARN("Process recover log %s error %d\n",
1449 cld->cld_logname, rc2);
1450 break;
1451 }
1452
1453 ealen -= PAGE_CACHE_SIZE;
1454 }
1455
1456 out:
1457 if (req)
1458 ptlrpc_req_finished(req);
1459
1460 if (rc == 0 && !eof)
1461 goto again;
1462
1463 if (pages) {
1464 for (i = 0; i < nrpages; i++) {
1465 if (pages[i] == NULL)
1466 break;
1467 __free_page(pages[i]);
1468 }
1469 OBD_FREE(pages, sizeof(*pages) * nrpages);
1470 }
1471 return rc;
1472 }
1473
1474 /* local_only means it cannot get remote llogs */
1475 static int mgc_process_cfg_log(struct obd_device *mgc,
1476 struct config_llog_data *cld, int local_only)
1477 {
1478 struct llog_ctxt *ctxt;
1479 struct lustre_sb_info *lsi = NULL;
1480 int rc = 0;
1481 bool sptlrpc_started = false;
1482 struct lu_env *env;
1483
1484 LASSERT(cld);
1485 LASSERT(mutex_is_locked(&cld->cld_lock));
1486
1487 /*
1488 * local copy of sptlrpc log is controlled elsewhere, don't try to
1489 * read it up here.
1490 */
1491 if (cld_is_sptlrpc(cld) && local_only)
1492 return 0;
1493
1494 if (cld->cld_cfg.cfg_sb)
1495 lsi = s2lsi(cld->cld_cfg.cfg_sb);
1496
1497 OBD_ALLOC_PTR(env);
1498 if (env == NULL)
1499 return -ENOMEM;
1500
1501 rc = lu_env_init(env, LCT_MG_THREAD);
1502 if (rc)
1503 goto out_free;
1504
1505 ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
1506 LASSERT(ctxt);
1507
1508 if (local_only) /* no local log at client side */ {
1509 rc = -EIO;
1510 goto out_pop;
1511 }
1512
1513 if (cld_is_sptlrpc(cld)) {
1514 sptlrpc_conf_log_update_begin(cld->cld_logname);
1515 sptlrpc_started = true;
1516 }
1517
1518 /* logname and instance info should be the same, so use our
1519 * copy of the instance for the update. The cfg_last_idx will
1520 * be updated here. */
1521 rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
1522 &cld->cld_cfg);
1523
1524 out_pop:
1525 __llog_ctxt_put(env, ctxt);
1526
1527 /*
1528 * update settings on existing OBDs. doing it inside
1529 * of llog_process_lock so no device is attaching/detaching
1530 * in parallel.
1531 * the logname must be <fsname>-sptlrpc
1532 */
1533 if (sptlrpc_started) {
1534 LASSERT(cld_is_sptlrpc(cld));
1535 sptlrpc_conf_log_update_end(cld->cld_logname);
1536 class_notify_sptlrpc_conf(cld->cld_logname,
1537 strlen(cld->cld_logname) -
1538 strlen("-sptlrpc"));
1539 }
1540
1541 lu_env_fini(env);
1542 out_free:
1543 OBD_FREE_PTR(env);
1544 return rc;
1545 }
1546
1547 /** Get a config log from the MGS and process it.
1548 * This func is called for both clients and servers.
1549 * Copy the log locally before parsing it if appropriate (non-MGS server)
1550 */
1551 int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
1552 {
1553 struct lustre_handle lockh = { 0 };
1554 __u64 flags = LDLM_FL_NO_LRU;
1555 int rc = 0, rcl;
1556
1557 LASSERT(cld);
1558
1559 /* I don't want multiple processes running process_log at once --
1560 sounds like badness. It actually might be fine, as long as
1561 we're not trying to update from the same log
1562 simultaneously (in which case we should use a per-log sem.) */
1563 mutex_lock(&cld->cld_lock);
1564 if (cld->cld_stopping) {
1565 mutex_unlock(&cld->cld_lock);
1566 return 0;
1567 }
1568
1569 OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
1570
1571 CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname,
1572 cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
1573
1574 /* Get the cfg lock on the llog */
1575 rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL,
1576 LCK_CR, &flags, NULL, NULL, NULL,
1577 cld, 0, NULL, &lockh);
1578 if (rcl == 0) {
1579 /* Get the cld, it will be released in mgc_blocking_ast. */
1580 config_log_get(cld);
1581 rc = ldlm_lock_set_data(&lockh, (void *)cld);
1582 LASSERT(rc == 0);
1583 } else {
1584 CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
1585
1586 /* mark cld_lostlock so that it will requeue
1587 * after MGC becomes available. */
1588 cld->cld_lostlock = 1;
1589 /* Get extra reference, it will be put in requeue thread */
1590 config_log_get(cld);
1591 }
1592
1593
1594 if (cld_is_recover(cld)) {
1595 rc = 0; /* this is not a fatal error for recover log */
1596 if (rcl == 0)
1597 rc = mgc_process_recover_log(mgc, cld);
1598 } else {
1599 rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
1600 }
1601
1602 CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
1603 mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
1604
1605 mutex_unlock(&cld->cld_lock);
1606
1607 /* Now drop the lock so MGS can revoke it */
1608 if (!rcl)
1609 ldlm_lock_decref(&lockh, LCK_CR);
1610
1611 return rc;
1612 }
1613
1614
1615 /** Called from lustre_process_log.
1616 * LCFG_LOG_START gets the config log from the MGS, processes it to start
1617 * any services, and adds it to the list logs to watch (follow).
1618 */
1619 static int mgc_process_config(struct obd_device *obd, u32 len, void *buf)
1620 {
1621 struct lustre_cfg *lcfg = buf;
1622 struct config_llog_instance *cfg = NULL;
1623 char *logname;
1624 int rc = 0;
1625
1626 switch (lcfg->lcfg_command) {
1627 case LCFG_LOV_ADD_OBD: {
1628 /* Overloading this cfg command: register a new target */
1629 struct mgs_target_info *mti;
1630
1631 if (LUSTRE_CFG_BUFLEN(lcfg, 1) !=
1632 sizeof(struct mgs_target_info)) {
1633 rc = -EINVAL;
1634 goto out;
1635 }
1636
1637 mti = (struct mgs_target_info *)lustre_cfg_buf(lcfg, 1);
1638 CDEBUG(D_MGC, "add_target %s %#x\n",
1639 mti->mti_svname, mti->mti_flags);
1640 rc = mgc_target_register(obd->u.cli.cl_mgc_mgsexp, mti);
1641 break;
1642 }
1643 case LCFG_LOV_DEL_OBD:
1644 /* Unregister has no meaning at the moment. */
1645 CERROR("lov_del_obd unimplemented\n");
1646 rc = -ENOSYS;
1647 break;
1648 case LCFG_SPTLRPC_CONF: {
1649 rc = sptlrpc_process_config(lcfg);
1650 break;
1651 }
1652 case LCFG_LOG_START: {
1653 struct config_llog_data *cld;
1654 struct super_block *sb;
1655
1656 logname = lustre_cfg_string(lcfg, 1);
1657 cfg = (struct config_llog_instance *)lustre_cfg_buf(lcfg, 2);
1658 sb = *(struct super_block **)lustre_cfg_buf(lcfg, 3);
1659
1660 CDEBUG(D_MGC, "parse_log %s from %d\n", logname,
1661 cfg->cfg_last_idx);
1662
1663 /* We're only called through here on the initial mount */
1664 rc = config_log_add(obd, logname, cfg, sb);
1665 if (rc)
1666 break;
1667 cld = config_log_find(logname, cfg);
1668 if (cld == NULL) {
1669 rc = -ENOENT;
1670 break;
1671 }
1672
1673 /* COMPAT_146 */
1674 /* FIXME only set this for old logs! Right now this forces
1675 us to always skip the "inside markers" check */
1676 cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146;
1677
1678 rc = mgc_process_log(obd, cld);
1679 if (rc == 0 && cld->cld_recover != NULL) {
1680 if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
1681 imp_connect_data, IMP_RECOV)) {
1682 rc = mgc_process_log(obd, cld->cld_recover);
1683 } else {
1684 struct config_llog_data *cir = cld->cld_recover;
1685 cld->cld_recover = NULL;
1686 config_log_put(cir);
1687 }
1688 if (rc)
1689 CERROR("Cannot process recover llog %d\n", rc);
1690 }
1691
1692 if (rc == 0 && cld->cld_params != NULL) {
1693 rc = mgc_process_log(obd, cld->cld_params);
1694 if (rc == -ENOENT) {
1695 CDEBUG(D_MGC,
1696 "There is no params config file yet\n");
1697 rc = 0;
1698 }
1699 /* params log is optional */
1700 if (rc)
1701 CERROR(
1702 "%s: can't process params llog: rc = %d\n",
1703 obd->obd_name, rc);
1704 }
1705 config_log_put(cld);
1706
1707 break;
1708 }
1709 case LCFG_LOG_END: {
1710 logname = lustre_cfg_string(lcfg, 1);
1711
1712 if (lcfg->lcfg_bufcount >= 2)
1713 cfg = (struct config_llog_instance *)lustre_cfg_buf(
1714 lcfg, 2);
1715 rc = config_log_end(logname, cfg);
1716 break;
1717 }
1718 default: {
1719 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
1720 rc = -EINVAL;
1721 goto out;
1722
1723 }
1724 }
1725 out:
1726 return rc;
1727 }
1728
1729 struct obd_ops mgc_obd_ops = {
1730 .o_owner = THIS_MODULE,
1731 .o_setup = mgc_setup,
1732 .o_precleanup = mgc_precleanup,
1733 .o_cleanup = mgc_cleanup,
1734 .o_add_conn = client_import_add_conn,
1735 .o_del_conn = client_import_del_conn,
1736 .o_connect = client_connect_import,
1737 .o_disconnect = client_disconnect_export,
1738 /* .o_enqueue = mgc_enqueue, */
1739 /* .o_iocontrol = mgc_iocontrol, */
1740 .o_set_info_async = mgc_set_info_async,
1741 .o_get_info = mgc_get_info,
1742 .o_import_event = mgc_import_event,
1743 .o_process_config = mgc_process_config,
1744 };
1745
1746 static int __init mgc_init(void)
1747 {
1748 return class_register_type(&mgc_obd_ops, NULL, NULL,
1749 LUSTRE_MGC_NAME, NULL);
1750 }
1751
1752 static void /*__exit*/ mgc_exit(void)
1753 {
1754 class_unregister_type(LUSTRE_MGC_NAME);
1755 }
1756
1757 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
1758 MODULE_DESCRIPTION("Lustre Management Client");
1759 MODULE_LICENSE("GPL");
1760
1761 module_init(mgc_init);
1762 module_exit(mgc_exit);