]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/lustre/lustre/ptlrpc/sec.c
efi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y
[mirror_ubuntu-artful-kernel.git] / drivers / staging / lustre / lustre / ptlrpc / sec.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22 /*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2012, Intel Corporation.
27 */
28 /*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/ptlrpc/sec.c
33 *
34 * Author: Eric Mei <ericm@clusterfs.com>
35 */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39 #include "../../include/linux/libcfs/libcfs.h"
40 #include <linux/crypto.h>
41 #include <linux/key.h>
42
43 #include "../include/obd.h"
44 #include "../include/obd_class.h"
45 #include "../include/obd_support.h"
46 #include "../include/lustre_net.h"
47 #include "../include/lustre_import.h"
48 #include "../include/lustre_dlm.h"
49 #include "../include/lustre_sec.h"
50
51 #include "ptlrpc_internal.h"
52
53 /***********************************************
54 * policy registers *
55 ***********************************************/
56
57 static rwlock_t policy_lock;
58 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
59 NULL,
60 };
61
62 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
63 {
64 __u16 number = policy->sp_policy;
65
66 LASSERT(policy->sp_name);
67 LASSERT(policy->sp_cops);
68 LASSERT(policy->sp_sops);
69
70 if (number >= SPTLRPC_POLICY_MAX)
71 return -EINVAL;
72
73 write_lock(&policy_lock);
74 if (unlikely(policies[number])) {
75 write_unlock(&policy_lock);
76 return -EALREADY;
77 }
78 policies[number] = policy;
79 write_unlock(&policy_lock);
80
81 CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
82 return 0;
83 }
84 EXPORT_SYMBOL(sptlrpc_register_policy);
85
86 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
87 {
88 __u16 number = policy->sp_policy;
89
90 LASSERT(number < SPTLRPC_POLICY_MAX);
91
92 write_lock(&policy_lock);
93 if (unlikely(!policies[number])) {
94 write_unlock(&policy_lock);
95 CERROR("%s: already unregistered\n", policy->sp_name);
96 return -EINVAL;
97 }
98
99 LASSERT(policies[number] == policy);
100 policies[number] = NULL;
101 write_unlock(&policy_lock);
102
103 CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
104 return 0;
105 }
106 EXPORT_SYMBOL(sptlrpc_unregister_policy);
107
108 static
109 struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
110 {
111 static DEFINE_MUTEX(load_mutex);
112 static atomic_t loaded = ATOMIC_INIT(0);
113 struct ptlrpc_sec_policy *policy;
114 __u16 number = SPTLRPC_FLVR_POLICY(flavor);
115 __u16 flag = 0;
116
117 if (number >= SPTLRPC_POLICY_MAX)
118 return NULL;
119
120 while (1) {
121 read_lock(&policy_lock);
122 policy = policies[number];
123 if (policy && !try_module_get(policy->sp_owner))
124 policy = NULL;
125 if (!policy)
126 flag = atomic_read(&loaded);
127 read_unlock(&policy_lock);
128
129 if (policy || flag != 0 ||
130 number != SPTLRPC_POLICY_GSS)
131 break;
132
133 /* try to load gss module, once */
134 mutex_lock(&load_mutex);
135 if (atomic_read(&loaded) == 0) {
136 if (request_module("ptlrpc_gss") == 0)
137 CDEBUG(D_SEC,
138 "module ptlrpc_gss loaded on demand\n");
139 else
140 CERROR("Unable to load module ptlrpc_gss\n");
141
142 atomic_set(&loaded, 1);
143 }
144 mutex_unlock(&load_mutex);
145 }
146
147 return policy;
148 }
149
150 __u32 sptlrpc_name2flavor_base(const char *name)
151 {
152 if (!strcmp(name, "null"))
153 return SPTLRPC_FLVR_NULL;
154 if (!strcmp(name, "plain"))
155 return SPTLRPC_FLVR_PLAIN;
156 if (!strcmp(name, "krb5n"))
157 return SPTLRPC_FLVR_KRB5N;
158 if (!strcmp(name, "krb5a"))
159 return SPTLRPC_FLVR_KRB5A;
160 if (!strcmp(name, "krb5i"))
161 return SPTLRPC_FLVR_KRB5I;
162 if (!strcmp(name, "krb5p"))
163 return SPTLRPC_FLVR_KRB5P;
164
165 return SPTLRPC_FLVR_INVALID;
166 }
167 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
168
169 const char *sptlrpc_flavor2name_base(__u32 flvr)
170 {
171 __u32 base = SPTLRPC_FLVR_BASE(flvr);
172
173 if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
174 return "null";
175 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
176 return "plain";
177 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
178 return "krb5n";
179 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
180 return "krb5a";
181 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
182 return "krb5i";
183 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
184 return "krb5p";
185
186 CERROR("invalid wire flavor 0x%x\n", flvr);
187 return "invalid";
188 }
189 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
190
191 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
192 char *buf, int bufsize)
193 {
194 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
195 snprintf(buf, bufsize, "hash:%s",
196 sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
197 else
198 snprintf(buf, bufsize, "%s",
199 sptlrpc_flavor2name_base(sf->sf_rpc));
200
201 buf[bufsize - 1] = '\0';
202 return buf;
203 }
204 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
205
206 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
207 {
208 strlcpy(buf, sptlrpc_flavor2name_base(sf->sf_rpc), bufsize);
209
210 /*
211 * currently we don't support customized bulk specification for
212 * flavors other than plain
213 */
214 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
215 char bspec[16];
216
217 bspec[0] = '-';
218 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
219 strlcat(buf, bspec, bufsize);
220 }
221
222 return buf;
223 }
224 EXPORT_SYMBOL(sptlrpc_flavor2name);
225
226 static char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
227 {
228 buf[0] = '\0';
229
230 if (flags & PTLRPC_SEC_FL_REVERSE)
231 strlcat(buf, "reverse,", bufsize);
232 if (flags & PTLRPC_SEC_FL_ROOTONLY)
233 strlcat(buf, "rootonly,", bufsize);
234 if (flags & PTLRPC_SEC_FL_UDESC)
235 strlcat(buf, "udesc,", bufsize);
236 if (flags & PTLRPC_SEC_FL_BULK)
237 strlcat(buf, "bulk,", bufsize);
238 if (buf[0] == '\0')
239 strlcat(buf, "-,", bufsize);
240
241 return buf;
242 }
243
244 /**************************************************
245 * client context APIs *
246 **************************************************/
247
248 static
249 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
250 {
251 struct vfs_cred vcred;
252 int create = 1, remove_dead = 1;
253
254 LASSERT(sec);
255 LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
256
257 if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
258 PTLRPC_SEC_FL_ROOTONLY)) {
259 vcred.vc_uid = 0;
260 vcred.vc_gid = 0;
261 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
262 create = 0;
263 remove_dead = 0;
264 }
265 } else {
266 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
267 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
268 }
269
270 return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
271 create, remove_dead);
272 }
273
274 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
275 {
276 atomic_inc(&ctx->cc_refcount);
277 return ctx;
278 }
279 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
280
281 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
282 {
283 struct ptlrpc_sec *sec = ctx->cc_sec;
284
285 LASSERT(sec);
286 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
287
288 if (!atomic_dec_and_test(&ctx->cc_refcount))
289 return;
290
291 sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
292 }
293 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
294
295 static int import_sec_check_expire(struct obd_import *imp)
296 {
297 int adapt = 0;
298
299 spin_lock(&imp->imp_lock);
300 if (imp->imp_sec_expire &&
301 imp->imp_sec_expire < ktime_get_real_seconds()) {
302 adapt = 1;
303 imp->imp_sec_expire = 0;
304 }
305 spin_unlock(&imp->imp_lock);
306
307 if (!adapt)
308 return 0;
309
310 CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
311 return sptlrpc_import_sec_adapt(imp, NULL, NULL);
312 }
313
314 /**
315 * Get and validate the client side ptlrpc security facilities from
316 * \a imp. There is a race condition on client reconnect when the import is
317 * being destroyed while there are outstanding client bound requests. In
318 * this case do not output any error messages if import secuity is not
319 * found.
320 *
321 * \param[in] imp obd import associated with client
322 * \param[out] sec client side ptlrpc security
323 *
324 * \retval 0 if security retrieved successfully
325 * \retval -ve errno if there was a problem
326 */
327 static int import_sec_validate_get(struct obd_import *imp,
328 struct ptlrpc_sec **sec)
329 {
330 int rc;
331
332 if (unlikely(imp->imp_sec_expire)) {
333 rc = import_sec_check_expire(imp);
334 if (rc)
335 return rc;
336 }
337
338 *sec = sptlrpc_import_sec_ref(imp);
339 /* Only output an error when the import is still active */
340 if (!*sec) {
341 if (list_empty(&imp->imp_zombie_chain))
342 CERROR("import %p (%s) with no sec\n",
343 imp, ptlrpc_import_state_name(imp->imp_state));
344 return -EACCES;
345 }
346
347 if (unlikely((*sec)->ps_dying)) {
348 CERROR("attempt to use dying sec %p\n", sec);
349 sptlrpc_sec_put(*sec);
350 return -EACCES;
351 }
352
353 return 0;
354 }
355
356 /**
357 * Given a \a req, find or allocate a appropriate context for it.
358 * \pre req->rq_cli_ctx == NULL.
359 *
360 * \retval 0 succeed, and req->rq_cli_ctx is set.
361 * \retval -ev error number, and req->rq_cli_ctx == NULL.
362 */
363 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
364 {
365 struct obd_import *imp = req->rq_import;
366 struct ptlrpc_sec *sec;
367 int rc;
368
369 LASSERT(!req->rq_cli_ctx);
370 LASSERT(imp);
371
372 rc = import_sec_validate_get(imp, &sec);
373 if (rc)
374 return rc;
375
376 req->rq_cli_ctx = get_my_ctx(sec);
377
378 sptlrpc_sec_put(sec);
379
380 if (!req->rq_cli_ctx) {
381 CERROR("req %p: fail to get context\n", req);
382 return -ECONNREFUSED;
383 }
384
385 return 0;
386 }
387
388 /**
389 * Drop the context for \a req.
390 * \pre req->rq_cli_ctx != NULL.
391 * \post req->rq_cli_ctx == NULL.
392 *
393 * If \a sync == 0, this function should return quickly without sleep;
394 * otherwise it might trigger and wait for the whole process of sending
395 * an context-destroying rpc to server.
396 */
397 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
398 {
399 LASSERT(req);
400 LASSERT(req->rq_cli_ctx);
401
402 /* request might be asked to release earlier while still
403 * in the context waiting list.
404 */
405 if (!list_empty(&req->rq_ctx_chain)) {
406 spin_lock(&req->rq_cli_ctx->cc_lock);
407 list_del_init(&req->rq_ctx_chain);
408 spin_unlock(&req->rq_cli_ctx->cc_lock);
409 }
410
411 sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
412 req->rq_cli_ctx = NULL;
413 }
414
415 static
416 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
417 struct ptlrpc_cli_ctx *oldctx,
418 struct ptlrpc_cli_ctx *newctx)
419 {
420 struct sptlrpc_flavor old_flvr;
421 char *reqmsg = NULL; /* to workaround old gcc */
422 int reqmsg_size;
423 int rc = 0;
424
425 LASSERT(req->rq_reqmsg);
426 LASSERT(req->rq_reqlen);
427 LASSERT(req->rq_replen);
428
429 CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
430 req,
431 oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
432 newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
433 oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
434 newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
435
436 /* save flavor */
437 old_flvr = req->rq_flvr;
438
439 /* save request message */
440 reqmsg_size = req->rq_reqlen;
441 if (reqmsg_size != 0) {
442 reqmsg = libcfs_kvzalloc(reqmsg_size, GFP_NOFS);
443 if (!reqmsg)
444 return -ENOMEM;
445 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
446 }
447
448 /* release old req/rep buf */
449 req->rq_cli_ctx = oldctx;
450 sptlrpc_cli_free_reqbuf(req);
451 sptlrpc_cli_free_repbuf(req);
452 req->rq_cli_ctx = newctx;
453
454 /* recalculate the flavor */
455 sptlrpc_req_set_flavor(req, 0);
456
457 /* alloc new request buffer
458 * we don't need to alloc reply buffer here, leave it to the
459 * rest procedure of ptlrpc
460 */
461 if (reqmsg_size != 0) {
462 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
463 if (!rc) {
464 LASSERT(req->rq_reqmsg);
465 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
466 } else {
467 CWARN("failed to alloc reqbuf: %d\n", rc);
468 req->rq_flvr = old_flvr;
469 }
470
471 kvfree(reqmsg);
472 }
473 return rc;
474 }
475
476 /**
477 * If current context of \a req is dead somehow, e.g. we just switched flavor
478 * thus marked original contexts dead, we'll find a new context for it. if
479 * no switch is needed, \a req will end up with the same context.
480 *
481 * \note a request must have a context, to keep other parts of code happy.
482 * In any case of failure during the switching, we must restore the old one.
483 */
484 static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
485 {
486 struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
487 struct ptlrpc_cli_ctx *newctx;
488 int rc;
489
490 LASSERT(oldctx);
491
492 sptlrpc_cli_ctx_get(oldctx);
493 sptlrpc_req_put_ctx(req, 0);
494
495 rc = sptlrpc_req_get_ctx(req);
496 if (unlikely(rc)) {
497 LASSERT(!req->rq_cli_ctx);
498
499 /* restore old ctx */
500 req->rq_cli_ctx = oldctx;
501 return rc;
502 }
503
504 newctx = req->rq_cli_ctx;
505 LASSERT(newctx);
506
507 if (unlikely(newctx == oldctx &&
508 test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
509 /*
510 * still get the old dead ctx, usually means system too busy
511 */
512 CDEBUG(D_SEC,
513 "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
514 newctx, newctx->cc_flags);
515
516 set_current_state(TASK_INTERRUPTIBLE);
517 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
518 } else if (unlikely(!test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags))) {
519 /*
520 * new ctx not up to date yet
521 */
522 CDEBUG(D_SEC,
523 "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
524 newctx, newctx->cc_flags);
525 } else {
526 /*
527 * it's possible newctx == oldctx if we're switching
528 * subflavor with the same sec.
529 */
530 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
531 if (rc) {
532 /* restore old ctx */
533 sptlrpc_req_put_ctx(req, 0);
534 req->rq_cli_ctx = oldctx;
535 return rc;
536 }
537
538 LASSERT(req->rq_cli_ctx == newctx);
539 }
540
541 sptlrpc_cli_ctx_put(oldctx, 1);
542 return 0;
543 }
544
545 static
546 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
547 {
548 if (cli_ctx_is_refreshed(ctx))
549 return 1;
550 return 0;
551 }
552
553 static
554 int ctx_refresh_timeout(void *data)
555 {
556 struct ptlrpc_request *req = data;
557 int rc;
558
559 /* conn_cnt is needed in expire_one_request */
560 lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
561
562 rc = ptlrpc_expire_one_request(req, 1);
563 /* if we started recovery, we should mark this ctx dead; otherwise
564 * in case of lgssd died nobody would retire this ctx, following
565 * connecting will still find the same ctx thus cause deadlock.
566 * there's an assumption that expire time of the request should be
567 * later than the context refresh expire time.
568 */
569 if (rc == 0)
570 req->rq_cli_ctx->cc_ops->force_die(req->rq_cli_ctx, 0);
571 return rc;
572 }
573
574 static
575 void ctx_refresh_interrupt(void *data)
576 {
577 struct ptlrpc_request *req = data;
578
579 spin_lock(&req->rq_lock);
580 req->rq_intr = 1;
581 spin_unlock(&req->rq_lock);
582 }
583
584 static
585 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
586 {
587 spin_lock(&ctx->cc_lock);
588 if (!list_empty(&req->rq_ctx_chain))
589 list_del_init(&req->rq_ctx_chain);
590 spin_unlock(&ctx->cc_lock);
591 }
592
593 /**
594 * To refresh the context of \req, if it's not up-to-date.
595 * \param timeout
596 * - < 0: don't wait
597 * - = 0: wait until success or fatal error occur
598 * - > 0: timeout value (in seconds)
599 *
600 * The status of the context could be subject to be changed by other threads
601 * at any time. We allow this race, but once we return with 0, the caller will
602 * suppose it's uptodated and keep using it until the owning rpc is done.
603 *
604 * \retval 0 only if the context is uptodated.
605 * \retval -ev error number.
606 */
607 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
608 {
609 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
610 struct ptlrpc_sec *sec;
611 struct l_wait_info lwi;
612 int rc;
613
614 LASSERT(ctx);
615
616 if (req->rq_ctx_init || req->rq_ctx_fini)
617 return 0;
618
619 /*
620 * during the process a request's context might change type even
621 * (e.g. from gss ctx to null ctx), so each loop we need to re-check
622 * everything
623 */
624 again:
625 rc = import_sec_validate_get(req->rq_import, &sec);
626 if (rc)
627 return rc;
628
629 if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
630 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
631 req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
632 req_off_ctx_list(req, ctx);
633 sptlrpc_req_replace_dead_ctx(req);
634 ctx = req->rq_cli_ctx;
635 }
636 sptlrpc_sec_put(sec);
637
638 if (cli_ctx_is_eternal(ctx))
639 return 0;
640
641 if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
642 LASSERT(ctx->cc_ops->refresh);
643 ctx->cc_ops->refresh(ctx);
644 }
645 LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
646
647 LASSERT(ctx->cc_ops->validate);
648 if (ctx->cc_ops->validate(ctx) == 0) {
649 req_off_ctx_list(req, ctx);
650 return 0;
651 }
652
653 if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
654 spin_lock(&req->rq_lock);
655 req->rq_err = 1;
656 spin_unlock(&req->rq_lock);
657 req_off_ctx_list(req, ctx);
658 return -EPERM;
659 }
660
661 /*
662 * There's a subtle issue for resending RPCs, suppose following
663 * situation:
664 * 1. the request was sent to server.
665 * 2. recovery was kicked start, after finished the request was
666 * marked as resent.
667 * 3. resend the request.
668 * 4. old reply from server received, we accept and verify the reply.
669 * this has to be success, otherwise the error will be aware
670 * by application.
671 * 5. new reply from server received, dropped by LNet.
672 *
673 * Note the xid of old & new request is the same. We can't simply
674 * change xid for the resent request because the server replies on
675 * it for reply reconstruction.
676 *
677 * Commonly the original context should be uptodate because we
678 * have a expiry nice time; server will keep its context because
679 * we at least hold a ref of old context which prevent context
680 * destroying RPC being sent. So server still can accept the request
681 * and finish the RPC. But if that's not the case:
682 * 1. If server side context has been trimmed, a NO_CONTEXT will
683 * be returned, gss_cli_ctx_verify/unseal will switch to new
684 * context by force.
685 * 2. Current context never be refreshed, then we are fine: we
686 * never really send request with old context before.
687 */
688 if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
689 unlikely(req->rq_reqmsg) &&
690 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
691 req_off_ctx_list(req, ctx);
692 return 0;
693 }
694
695 if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
696 req_off_ctx_list(req, ctx);
697 /*
698 * don't switch ctx if import was deactivated
699 */
700 if (req->rq_import->imp_deactive) {
701 spin_lock(&req->rq_lock);
702 req->rq_err = 1;
703 spin_unlock(&req->rq_lock);
704 return -EINTR;
705 }
706
707 rc = sptlrpc_req_replace_dead_ctx(req);
708 if (rc) {
709 LASSERT(ctx == req->rq_cli_ctx);
710 CERROR("req %p: failed to replace dead ctx %p: %d\n",
711 req, ctx, rc);
712 spin_lock(&req->rq_lock);
713 req->rq_err = 1;
714 spin_unlock(&req->rq_lock);
715 return rc;
716 }
717
718 ctx = req->rq_cli_ctx;
719 goto again;
720 }
721
722 /*
723 * Now we're sure this context is during upcall, add myself into
724 * waiting list
725 */
726 spin_lock(&ctx->cc_lock);
727 if (list_empty(&req->rq_ctx_chain))
728 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
729 spin_unlock(&ctx->cc_lock);
730
731 if (timeout < 0)
732 return -EWOULDBLOCK;
733
734 /* Clear any flags that may be present from previous sends */
735 LASSERT(req->rq_receiving_reply == 0);
736 spin_lock(&req->rq_lock);
737 req->rq_err = 0;
738 req->rq_timedout = 0;
739 req->rq_resend = 0;
740 req->rq_restart = 0;
741 spin_unlock(&req->rq_lock);
742
743 lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
744 ctx_refresh_timeout, ctx_refresh_interrupt,
745 req);
746 rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
747
748 /*
749 * following cases could lead us here:
750 * - successfully refreshed;
751 * - interrupted;
752 * - timedout, and we don't want recover from the failure;
753 * - timedout, and waked up upon recovery finished;
754 * - someone else mark this ctx dead by force;
755 * - someone invalidate the req and call ptlrpc_client_wake_req(),
756 * e.g. ptlrpc_abort_inflight();
757 */
758 if (!cli_ctx_is_refreshed(ctx)) {
759 /* timed out or interrupted */
760 req_off_ctx_list(req, ctx);
761
762 LASSERT(rc != 0);
763 return rc;
764 }
765
766 goto again;
767 }
768
769 /**
770 * Initialize flavor settings for \a req, according to \a opcode.
771 *
772 * \note this could be called in two situations:
773 * - new request from ptlrpc_pre_req(), with proper @opcode
774 * - old request which changed ctx in the middle, with @opcode == 0
775 */
776 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
777 {
778 struct ptlrpc_sec *sec;
779
780 LASSERT(req->rq_import);
781 LASSERT(req->rq_cli_ctx);
782 LASSERT(req->rq_cli_ctx->cc_sec);
783 LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
784
785 /* special security flags according to opcode */
786 switch (opcode) {
787 case OST_READ:
788 case MDS_READPAGE:
789 case MGS_CONFIG_READ:
790 case OBD_IDX_READ:
791 req->rq_bulk_read = 1;
792 break;
793 case OST_WRITE:
794 case MDS_WRITEPAGE:
795 req->rq_bulk_write = 1;
796 break;
797 case SEC_CTX_INIT:
798 req->rq_ctx_init = 1;
799 break;
800 case SEC_CTX_FINI:
801 req->rq_ctx_fini = 1;
802 break;
803 case 0:
804 /* init/fini rpc won't be resend, so can't be here */
805 LASSERT(req->rq_ctx_init == 0);
806 LASSERT(req->rq_ctx_fini == 0);
807
808 /* cleanup flags, which should be recalculated */
809 req->rq_pack_udesc = 0;
810 req->rq_pack_bulk = 0;
811 break;
812 }
813
814 sec = req->rq_cli_ctx->cc_sec;
815
816 spin_lock(&sec->ps_lock);
817 req->rq_flvr = sec->ps_flvr;
818 spin_unlock(&sec->ps_lock);
819
820 /* force SVC_NULL for context initiation rpc, SVC_INTG for context
821 * destruction rpc
822 */
823 if (unlikely(req->rq_ctx_init))
824 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
825 else if (unlikely(req->rq_ctx_fini))
826 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
827
828 /* user descriptor flag, null security can't do it anyway */
829 if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
830 (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
831 req->rq_pack_udesc = 1;
832
833 /* bulk security flag */
834 if ((req->rq_bulk_read || req->rq_bulk_write) &&
835 sptlrpc_flavor_has_bulk(&req->rq_flvr))
836 req->rq_pack_bulk = 1;
837 }
838
839 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
840 {
841 if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
842 return;
843
844 LASSERT(req->rq_clrbuf);
845 if (req->rq_pool || !req->rq_reqbuf)
846 return;
847
848 kfree(req->rq_reqbuf);
849 req->rq_reqbuf = NULL;
850 req->rq_reqbuf_len = 0;
851 }
852
853 /**
854 * Given an import \a imp, check whether current user has a valid context
855 * or not. We may create a new context and try to refresh it, and try
856 * repeatedly try in case of non-fatal errors. Return 0 means success.
857 */
858 int sptlrpc_import_check_ctx(struct obd_import *imp)
859 {
860 struct ptlrpc_sec *sec;
861 struct ptlrpc_cli_ctx *ctx;
862 struct ptlrpc_request *req = NULL;
863 int rc;
864
865 might_sleep();
866
867 sec = sptlrpc_import_sec_ref(imp);
868 ctx = get_my_ctx(sec);
869 sptlrpc_sec_put(sec);
870
871 if (!ctx)
872 return -ENOMEM;
873
874 if (cli_ctx_is_eternal(ctx) ||
875 ctx->cc_ops->validate(ctx) == 0) {
876 sptlrpc_cli_ctx_put(ctx, 1);
877 return 0;
878 }
879
880 if (cli_ctx_is_error(ctx)) {
881 sptlrpc_cli_ctx_put(ctx, 1);
882 return -EACCES;
883 }
884
885 req = ptlrpc_request_cache_alloc(GFP_NOFS);
886 if (!req)
887 return -ENOMEM;
888
889 ptlrpc_cli_req_init(req);
890 atomic_set(&req->rq_refcount, 10000);
891
892 req->rq_import = imp;
893 req->rq_flvr = sec->ps_flvr;
894 req->rq_cli_ctx = ctx;
895
896 rc = sptlrpc_req_refresh_ctx(req, 0);
897 LASSERT(list_empty(&req->rq_ctx_chain));
898 sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
899 ptlrpc_request_cache_free(req);
900
901 return rc;
902 }
903
904 /**
905 * Used by ptlrpc client, to perform the pre-defined security transformation
906 * upon the request message of \a req. After this function called,
907 * req->rq_reqmsg is still accessible as clear text.
908 */
909 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
910 {
911 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
912 int rc = 0;
913
914 LASSERT(ctx);
915 LASSERT(ctx->cc_sec);
916 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
917
918 /* we wrap bulk request here because now we can be sure
919 * the context is uptodate.
920 */
921 if (req->rq_bulk) {
922 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
923 if (rc)
924 return rc;
925 }
926
927 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
928 case SPTLRPC_SVC_NULL:
929 case SPTLRPC_SVC_AUTH:
930 case SPTLRPC_SVC_INTG:
931 LASSERT(ctx->cc_ops->sign);
932 rc = ctx->cc_ops->sign(ctx, req);
933 break;
934 case SPTLRPC_SVC_PRIV:
935 LASSERT(ctx->cc_ops->seal);
936 rc = ctx->cc_ops->seal(ctx, req);
937 break;
938 default:
939 LBUG();
940 }
941
942 if (rc == 0) {
943 LASSERT(req->rq_reqdata_len);
944 LASSERT(req->rq_reqdata_len % 8 == 0);
945 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
946 }
947
948 return rc;
949 }
950
951 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
952 {
953 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
954 int rc;
955
956 LASSERT(ctx);
957 LASSERT(ctx->cc_sec);
958 LASSERT(req->rq_repbuf);
959 LASSERT(req->rq_repdata);
960 LASSERT(!req->rq_repmsg);
961
962 req->rq_rep_swab_mask = 0;
963
964 rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
965 switch (rc) {
966 case 1:
967 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
968 case 0:
969 break;
970 default:
971 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
972 return -EPROTO;
973 }
974
975 if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
976 CERROR("replied data length %d too small\n",
977 req->rq_repdata_len);
978 return -EPROTO;
979 }
980
981 if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
982 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
983 CERROR("reply policy %u doesn't match request policy %u\n",
984 SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
985 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
986 return -EPROTO;
987 }
988
989 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
990 case SPTLRPC_SVC_NULL:
991 case SPTLRPC_SVC_AUTH:
992 case SPTLRPC_SVC_INTG:
993 LASSERT(ctx->cc_ops->verify);
994 rc = ctx->cc_ops->verify(ctx, req);
995 break;
996 case SPTLRPC_SVC_PRIV:
997 LASSERT(ctx->cc_ops->unseal);
998 rc = ctx->cc_ops->unseal(ctx, req);
999 break;
1000 default:
1001 LBUG();
1002 }
1003 LASSERT(rc || req->rq_repmsg || req->rq_resend);
1004
1005 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1006 !req->rq_ctx_init)
1007 req->rq_rep_swab_mask = 0;
1008 return rc;
1009 }
1010
1011 /**
1012 * Used by ptlrpc client, to perform security transformation upon the reply
1013 * message of \a req. After return successfully, req->rq_repmsg points to
1014 * the reply message in clear text.
1015 *
1016 * \pre the reply buffer should have been un-posted from LNet, so nothing is
1017 * going to change.
1018 */
1019 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1020 {
1021 LASSERT(req->rq_repbuf);
1022 LASSERT(!req->rq_repdata);
1023 LASSERT(!req->rq_repmsg);
1024 LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1025
1026 if (req->rq_reply_off == 0 &&
1027 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1028 CERROR("real reply with offset 0\n");
1029 return -EPROTO;
1030 }
1031
1032 if (req->rq_reply_off % 8 != 0) {
1033 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1034 return -EPROTO;
1035 }
1036
1037 req->rq_repdata = (struct lustre_msg *)
1038 (req->rq_repbuf + req->rq_reply_off);
1039 req->rq_repdata_len = req->rq_nob_received;
1040
1041 return do_cli_unwrap_reply(req);
1042 }
1043
1044 /**
1045 * Used by ptlrpc client, to perform security transformation upon the early
1046 * reply message of \a req. We expect the rq_reply_off is 0, and
1047 * rq_nob_received is the early reply size.
1048 *
1049 * Because the receive buffer might be still posted, the reply data might be
1050 * changed at any time, no matter we're holding rq_lock or not. For this reason
1051 * we allocate a separate ptlrpc_request and reply buffer for early reply
1052 * processing.
1053 *
1054 * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1055 * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1056 * \a *req_ret to release it.
1057 * \retval -ev error number, and \a req_ret will not be set.
1058 */
1059 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1060 struct ptlrpc_request **req_ret)
1061 {
1062 struct ptlrpc_request *early_req;
1063 char *early_buf;
1064 int early_bufsz, early_size;
1065 int rc;
1066
1067 early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1068 if (!early_req)
1069 return -ENOMEM;
1070
1071 ptlrpc_cli_req_init(early_req);
1072
1073 early_size = req->rq_nob_received;
1074 early_bufsz = size_roundup_power2(early_size);
1075 early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS);
1076 if (!early_buf) {
1077 rc = -ENOMEM;
1078 goto err_req;
1079 }
1080
1081 /* sanity checkings and copy data out, do it inside spinlock */
1082 spin_lock(&req->rq_lock);
1083
1084 if (req->rq_replied) {
1085 spin_unlock(&req->rq_lock);
1086 rc = -EALREADY;
1087 goto err_buf;
1088 }
1089
1090 LASSERT(req->rq_repbuf);
1091 LASSERT(!req->rq_repdata);
1092 LASSERT(!req->rq_repmsg);
1093
1094 if (req->rq_reply_off != 0) {
1095 CERROR("early reply with offset %u\n", req->rq_reply_off);
1096 spin_unlock(&req->rq_lock);
1097 rc = -EPROTO;
1098 goto err_buf;
1099 }
1100
1101 if (req->rq_nob_received != early_size) {
1102 /* even another early arrived the size should be the same */
1103 CERROR("data size has changed from %u to %u\n",
1104 early_size, req->rq_nob_received);
1105 spin_unlock(&req->rq_lock);
1106 rc = -EINVAL;
1107 goto err_buf;
1108 }
1109
1110 if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1111 CERROR("early reply length %d too small\n",
1112 req->rq_nob_received);
1113 spin_unlock(&req->rq_lock);
1114 rc = -EALREADY;
1115 goto err_buf;
1116 }
1117
1118 memcpy(early_buf, req->rq_repbuf, early_size);
1119 spin_unlock(&req->rq_lock);
1120
1121 early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1122 early_req->rq_flvr = req->rq_flvr;
1123 early_req->rq_repbuf = early_buf;
1124 early_req->rq_repbuf_len = early_bufsz;
1125 early_req->rq_repdata = (struct lustre_msg *)early_buf;
1126 early_req->rq_repdata_len = early_size;
1127 early_req->rq_early = 1;
1128 early_req->rq_reqmsg = req->rq_reqmsg;
1129
1130 rc = do_cli_unwrap_reply(early_req);
1131 if (rc) {
1132 DEBUG_REQ(D_ADAPTTO, early_req,
1133 "error %d unwrap early reply", rc);
1134 goto err_ctx;
1135 }
1136
1137 LASSERT(early_req->rq_repmsg);
1138 *req_ret = early_req;
1139 return 0;
1140
1141 err_ctx:
1142 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1143 err_buf:
1144 kvfree(early_buf);
1145 err_req:
1146 ptlrpc_request_cache_free(early_req);
1147 return rc;
1148 }
1149
1150 /**
1151 * Used by ptlrpc client, to release a processed early reply \a early_req.
1152 *
1153 * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1154 */
1155 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1156 {
1157 LASSERT(early_req->rq_repbuf);
1158 LASSERT(early_req->rq_repdata);
1159 LASSERT(early_req->rq_repmsg);
1160
1161 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1162 kvfree(early_req->rq_repbuf);
1163 ptlrpc_request_cache_free(early_req);
1164 }
1165
1166 /**************************************************
1167 * sec ID *
1168 **************************************************/
1169
1170 /*
1171 * "fixed" sec (e.g. null) use sec_id < 0
1172 */
1173 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1174
1175 int sptlrpc_get_next_secid(void)
1176 {
1177 return atomic_inc_return(&sptlrpc_sec_id);
1178 }
1179 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1180
1181 /**************************************************
1182 * client side high-level security APIs *
1183 **************************************************/
1184
1185 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1186 int grace, int force)
1187 {
1188 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1189
1190 LASSERT(policy->sp_cops);
1191 LASSERT(policy->sp_cops->flush_ctx_cache);
1192
1193 return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1194 }
1195
1196 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1197 {
1198 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1199
1200 LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1201 LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1202 LASSERT(policy->sp_cops->destroy_sec);
1203
1204 CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec);
1205
1206 policy->sp_cops->destroy_sec(sec);
1207 sptlrpc_policy_put(policy);
1208 }
1209
1210 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1211 {
1212 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1213
1214 if (sec->ps_policy->sp_cops->kill_sec) {
1215 sec->ps_policy->sp_cops->kill_sec(sec);
1216
1217 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1218 }
1219 }
1220
1221 static struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1222 {
1223 if (sec)
1224 atomic_inc(&sec->ps_refcount);
1225
1226 return sec;
1227 }
1228
1229 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1230 {
1231 if (sec) {
1232 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1233
1234 if (atomic_dec_and_test(&sec->ps_refcount)) {
1235 sptlrpc_gc_del_sec(sec);
1236 sec_cop_destroy_sec(sec);
1237 }
1238 }
1239 }
1240 EXPORT_SYMBOL(sptlrpc_sec_put);
1241
1242 /*
1243 * policy module is responsible for taking reference of import
1244 */
1245 static
1246 struct ptlrpc_sec *sptlrpc_sec_create(struct obd_import *imp,
1247 struct ptlrpc_svc_ctx *svc_ctx,
1248 struct sptlrpc_flavor *sf,
1249 enum lustre_sec_part sp)
1250 {
1251 struct ptlrpc_sec_policy *policy;
1252 struct ptlrpc_sec *sec;
1253 char str[32];
1254
1255 if (svc_ctx) {
1256 LASSERT(imp->imp_dlm_fake == 1);
1257
1258 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1259 imp->imp_obd->obd_type->typ_name,
1260 imp->imp_obd->obd_name,
1261 sptlrpc_flavor2name(sf, str, sizeof(str)));
1262
1263 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1264 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1265 } else {
1266 LASSERT(imp->imp_dlm_fake == 0);
1267
1268 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1269 imp->imp_obd->obd_type->typ_name,
1270 imp->imp_obd->obd_name,
1271 sptlrpc_flavor2name(sf, str, sizeof(str)));
1272
1273 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1274 if (!policy) {
1275 CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1276 return NULL;
1277 }
1278 }
1279
1280 sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1281 if (sec) {
1282 atomic_inc(&sec->ps_refcount);
1283
1284 sec->ps_part = sp;
1285
1286 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1287 sptlrpc_gc_add_sec(sec);
1288 } else {
1289 sptlrpc_policy_put(policy);
1290 }
1291
1292 return sec;
1293 }
1294
1295 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1296 {
1297 struct ptlrpc_sec *sec;
1298
1299 spin_lock(&imp->imp_lock);
1300 sec = sptlrpc_sec_get(imp->imp_sec);
1301 spin_unlock(&imp->imp_lock);
1302
1303 return sec;
1304 }
1305 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1306
1307 static void sptlrpc_import_sec_install(struct obd_import *imp,
1308 struct ptlrpc_sec *sec)
1309 {
1310 struct ptlrpc_sec *old_sec;
1311
1312 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1313
1314 spin_lock(&imp->imp_lock);
1315 old_sec = imp->imp_sec;
1316 imp->imp_sec = sec;
1317 spin_unlock(&imp->imp_lock);
1318
1319 if (old_sec) {
1320 sptlrpc_sec_kill(old_sec);
1321
1322 /* balance the ref taken by this import */
1323 sptlrpc_sec_put(old_sec);
1324 }
1325 }
1326
1327 static inline
1328 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1329 {
1330 return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1331 }
1332
1333 static inline
1334 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1335 {
1336 *dst = *src;
1337 }
1338
1339 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1340 struct ptlrpc_sec *sec,
1341 struct sptlrpc_flavor *sf)
1342 {
1343 char str1[32], str2[32];
1344
1345 if (sec->ps_flvr.sf_flags != sf->sf_flags)
1346 CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
1347 sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1348 str1, sizeof(str1)),
1349 sptlrpc_secflags2str(sf->sf_flags,
1350 str2, sizeof(str2)));
1351
1352 spin_lock(&sec->ps_lock);
1353 flavor_copy(&sec->ps_flvr, sf);
1354 spin_unlock(&sec->ps_lock);
1355 }
1356
1357 /**
1358 * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1359 * configuration. Upon called, imp->imp_sec may or may not be NULL.
1360 *
1361 * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1362 * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1363 */
1364 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1365 struct ptlrpc_svc_ctx *svc_ctx,
1366 struct sptlrpc_flavor *flvr)
1367 {
1368 struct ptlrpc_connection *conn;
1369 struct sptlrpc_flavor sf;
1370 struct ptlrpc_sec *sec, *newsec;
1371 enum lustre_sec_part sp;
1372 char str[24];
1373 int rc = 0;
1374
1375 might_sleep();
1376
1377 if (!imp)
1378 return 0;
1379
1380 conn = imp->imp_connection;
1381
1382 if (!svc_ctx) {
1383 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1384 /*
1385 * normal import, determine flavor from rule set, except
1386 * for mgc the flavor is predetermined.
1387 */
1388 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1389 sf = cliobd->cl_flvr_mgc;
1390 else
1391 sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1392 cliobd->cl_sp_to,
1393 &cliobd->cl_target_uuid,
1394 conn->c_self, &sf);
1395
1396 sp = imp->imp_obd->u.cli.cl_sp_me;
1397 } else {
1398 /* reverse import, determine flavor from incoming request */
1399 sf = *flvr;
1400
1401 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1402 sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1403 PTLRPC_SEC_FL_ROOTONLY;
1404
1405 sp = sptlrpc_target_sec_part(imp->imp_obd);
1406 }
1407
1408 sec = sptlrpc_import_sec_ref(imp);
1409 if (sec) {
1410 char str2[24];
1411
1412 if (flavor_equal(&sf, &sec->ps_flvr))
1413 goto out;
1414
1415 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1416 imp->imp_obd->obd_name,
1417 obd_uuid2str(&conn->c_remote_uuid),
1418 sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1419 sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1420
1421 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1422 SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1423 SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1424 SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1425 sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1426 goto out;
1427 }
1428 } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1429 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1430 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1431 imp->imp_obd->obd_name,
1432 obd_uuid2str(&conn->c_remote_uuid),
1433 LNET_NIDNET(conn->c_self),
1434 sptlrpc_flavor2name(&sf, str, sizeof(str)));
1435 }
1436
1437 mutex_lock(&imp->imp_sec_mutex);
1438
1439 newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1440 if (newsec) {
1441 sptlrpc_import_sec_install(imp, newsec);
1442 } else {
1443 CERROR("import %s->%s: failed to create new sec\n",
1444 imp->imp_obd->obd_name,
1445 obd_uuid2str(&conn->c_remote_uuid));
1446 rc = -EPERM;
1447 }
1448
1449 mutex_unlock(&imp->imp_sec_mutex);
1450 out:
1451 sptlrpc_sec_put(sec);
1452 return rc;
1453 }
1454
1455 void sptlrpc_import_sec_put(struct obd_import *imp)
1456 {
1457 if (imp->imp_sec) {
1458 sptlrpc_sec_kill(imp->imp_sec);
1459
1460 sptlrpc_sec_put(imp->imp_sec);
1461 imp->imp_sec = NULL;
1462 }
1463 }
1464
1465 static void import_flush_ctx_common(struct obd_import *imp,
1466 uid_t uid, int grace, int force)
1467 {
1468 struct ptlrpc_sec *sec;
1469
1470 if (!imp)
1471 return;
1472
1473 sec = sptlrpc_import_sec_ref(imp);
1474 if (!sec)
1475 return;
1476
1477 sec_cop_flush_ctx_cache(sec, uid, grace, force);
1478 sptlrpc_sec_put(sec);
1479 }
1480
1481 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1482 {
1483 import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1484 1, 1);
1485 }
1486 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1487
1488 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1489 {
1490 import_flush_ctx_common(imp, -1, 1, 1);
1491 }
1492 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1493
1494 /**
1495 * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1496 * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1497 */
1498 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1499 {
1500 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1501 struct ptlrpc_sec_policy *policy;
1502 int rc;
1503
1504 LASSERT(ctx);
1505 LASSERT(ctx->cc_sec);
1506 LASSERT(ctx->cc_sec->ps_policy);
1507 LASSERT(!req->rq_reqmsg);
1508 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1509
1510 policy = ctx->cc_sec->ps_policy;
1511 rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1512 if (!rc) {
1513 LASSERT(req->rq_reqmsg);
1514 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1515
1516 /* zeroing preallocated buffer */
1517 if (req->rq_pool)
1518 memset(req->rq_reqmsg, 0, msgsize);
1519 }
1520
1521 return rc;
1522 }
1523
1524 /**
1525 * Used by ptlrpc client to free request buffer of \a req. After this
1526 * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1527 */
1528 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1529 {
1530 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1531 struct ptlrpc_sec_policy *policy;
1532
1533 LASSERT(ctx);
1534 LASSERT(ctx->cc_sec);
1535 LASSERT(ctx->cc_sec->ps_policy);
1536 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1537
1538 if (!req->rq_reqbuf && !req->rq_clrbuf)
1539 return;
1540
1541 policy = ctx->cc_sec->ps_policy;
1542 policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1543 req->rq_reqmsg = NULL;
1544 }
1545
1546 /*
1547 * NOTE caller must guarantee the buffer size is enough for the enlargement
1548 */
1549 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1550 int segment, int newsize)
1551 {
1552 void *src, *dst;
1553 int oldsize, oldmsg_size, movesize;
1554
1555 LASSERT(segment < msg->lm_bufcount);
1556 LASSERT(msg->lm_buflens[segment] <= newsize);
1557
1558 if (msg->lm_buflens[segment] == newsize)
1559 return;
1560
1561 /* nothing to do if we are enlarging the last segment */
1562 if (segment == msg->lm_bufcount - 1) {
1563 msg->lm_buflens[segment] = newsize;
1564 return;
1565 }
1566
1567 oldsize = msg->lm_buflens[segment];
1568
1569 src = lustre_msg_buf(msg, segment + 1, 0);
1570 msg->lm_buflens[segment] = newsize;
1571 dst = lustre_msg_buf(msg, segment + 1, 0);
1572 msg->lm_buflens[segment] = oldsize;
1573
1574 /* move from segment + 1 to end segment */
1575 LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1576 oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1577 movesize = oldmsg_size - ((unsigned long)src - (unsigned long)msg);
1578 LASSERT(movesize >= 0);
1579
1580 if (movesize)
1581 memmove(dst, src, movesize);
1582
1583 /* note we don't clear the ares where old data live, not secret */
1584
1585 /* finally set new segment size */
1586 msg->lm_buflens[segment] = newsize;
1587 }
1588 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1589
1590 /**
1591 * Used by ptlrpc client to enlarge the \a segment of request message pointed
1592 * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1593 * preserved after the enlargement. this must be called after original request
1594 * buffer being allocated.
1595 *
1596 * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1597 * so caller should refresh its local pointers if needed.
1598 */
1599 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1600 int segment, int newsize)
1601 {
1602 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1603 struct ptlrpc_sec_cops *cops;
1604 struct lustre_msg *msg = req->rq_reqmsg;
1605
1606 LASSERT(ctx);
1607 LASSERT(msg);
1608 LASSERT(msg->lm_bufcount > segment);
1609 LASSERT(msg->lm_buflens[segment] <= newsize);
1610
1611 if (msg->lm_buflens[segment] == newsize)
1612 return 0;
1613
1614 cops = ctx->cc_sec->ps_policy->sp_cops;
1615 LASSERT(cops->enlarge_reqbuf);
1616 return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1617 }
1618 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1619
1620 /**
1621 * Used by ptlrpc client to allocate reply buffer of \a req.
1622 *
1623 * \note After this, req->rq_repmsg is still not accessible.
1624 */
1625 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1626 {
1627 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1628 struct ptlrpc_sec_policy *policy;
1629
1630 LASSERT(ctx);
1631 LASSERT(ctx->cc_sec);
1632 LASSERT(ctx->cc_sec->ps_policy);
1633
1634 if (req->rq_repbuf)
1635 return 0;
1636
1637 policy = ctx->cc_sec->ps_policy;
1638 return policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize);
1639 }
1640
1641 /**
1642 * Used by ptlrpc client to free reply buffer of \a req. After this
1643 * req->rq_repmsg is set to NULL and should not be accessed anymore.
1644 */
1645 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1646 {
1647 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1648 struct ptlrpc_sec_policy *policy;
1649
1650 LASSERT(ctx);
1651 LASSERT(ctx->cc_sec);
1652 LASSERT(ctx->cc_sec->ps_policy);
1653 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1654
1655 if (!req->rq_repbuf)
1656 return;
1657 LASSERT(req->rq_repbuf_len);
1658
1659 policy = ctx->cc_sec->ps_policy;
1660 policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1661 req->rq_repmsg = NULL;
1662 }
1663
1664 static int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1665 struct ptlrpc_svc_ctx *ctx)
1666 {
1667 struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1668
1669 if (!policy->sp_sops->install_rctx)
1670 return 0;
1671 return policy->sp_sops->install_rctx(imp, ctx);
1672 }
1673
1674 /****************************************
1675 * server side security *
1676 ****************************************/
1677
1678 static int flavor_allowed(struct sptlrpc_flavor *exp,
1679 struct ptlrpc_request *req)
1680 {
1681 struct sptlrpc_flavor *flvr = &req->rq_flvr;
1682
1683 if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1684 return 1;
1685
1686 if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1687 SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1688 SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1689 SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1690 return 1;
1691
1692 return 0;
1693 }
1694
1695 #define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
1696
1697 /**
1698 * Given an export \a exp, check whether the flavor of incoming \a req
1699 * is allowed by the export \a exp. Main logic is about taking care of
1700 * changing configurations. Return 0 means success.
1701 */
1702 int sptlrpc_target_export_check(struct obd_export *exp,
1703 struct ptlrpc_request *req)
1704 {
1705 struct sptlrpc_flavor flavor;
1706
1707 if (!exp)
1708 return 0;
1709
1710 /* client side export has no imp_reverse, skip
1711 * FIXME maybe we should check flavor this as well???
1712 */
1713 if (!exp->exp_imp_reverse)
1714 return 0;
1715
1716 /* don't care about ctx fini rpc */
1717 if (req->rq_ctx_fini)
1718 return 0;
1719
1720 spin_lock(&exp->exp_lock);
1721
1722 /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1723 * the first req with the new flavor, then treat it as current flavor,
1724 * adapt reverse sec according to it.
1725 * note the first rpc with new flavor might not be with root ctx, in
1726 * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
1727 */
1728 if (unlikely(exp->exp_flvr_changed) &&
1729 flavor_allowed(&exp->exp_flvr_old[1], req)) {
1730 /* make the new flavor as "current", and old ones as
1731 * about-to-expire
1732 */
1733 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1734 exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1735 flavor = exp->exp_flvr_old[1];
1736 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1737 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1738 exp->exp_flvr_old[0] = exp->exp_flvr;
1739 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
1740 EXP_FLVR_UPDATE_EXPIRE;
1741 exp->exp_flvr = flavor;
1742
1743 /* flavor change finished */
1744 exp->exp_flvr_changed = 0;
1745 LASSERT(exp->exp_flvr_adapt == 1);
1746
1747 /* if it's gss, we only interested in root ctx init */
1748 if (req->rq_auth_gss &&
1749 !(req->rq_ctx_init &&
1750 (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1751 req->rq_auth_usr_ost))) {
1752 spin_unlock(&exp->exp_lock);
1753 CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1754 req->rq_auth_gss, req->rq_ctx_init,
1755 req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1756 req->rq_auth_usr_ost);
1757 return 0;
1758 }
1759
1760 exp->exp_flvr_adapt = 0;
1761 spin_unlock(&exp->exp_lock);
1762
1763 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1764 req->rq_svc_ctx, &flavor);
1765 }
1766
1767 /* if it equals to the current flavor, we accept it, but need to
1768 * dealing with reverse sec/ctx
1769 */
1770 if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1771 /* most cases should return here, we only interested in
1772 * gss root ctx init
1773 */
1774 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1775 (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1776 !req->rq_auth_usr_ost)) {
1777 spin_unlock(&exp->exp_lock);
1778 return 0;
1779 }
1780
1781 /* if flavor just changed, we should not proceed, just leave
1782 * it and current flavor will be discovered and replaced
1783 * shortly, and let _this_ rpc pass through
1784 */
1785 if (exp->exp_flvr_changed) {
1786 LASSERT(exp->exp_flvr_adapt);
1787 spin_unlock(&exp->exp_lock);
1788 return 0;
1789 }
1790
1791 if (exp->exp_flvr_adapt) {
1792 exp->exp_flvr_adapt = 0;
1793 CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1794 exp, exp->exp_flvr.sf_rpc,
1795 exp->exp_flvr_old[0].sf_rpc,
1796 exp->exp_flvr_old[1].sf_rpc);
1797 flavor = exp->exp_flvr;
1798 spin_unlock(&exp->exp_lock);
1799
1800 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1801 req->rq_svc_ctx,
1802 &flavor);
1803 } else {
1804 CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
1805 exp, exp->exp_flvr.sf_rpc,
1806 exp->exp_flvr_old[0].sf_rpc,
1807 exp->exp_flvr_old[1].sf_rpc);
1808 spin_unlock(&exp->exp_lock);
1809
1810 return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1811 req->rq_svc_ctx);
1812 }
1813 }
1814
1815 if (exp->exp_flvr_expire[0]) {
1816 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
1817 if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1818 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the middle one (%lld)\n", exp,
1819 exp->exp_flvr.sf_rpc,
1820 exp->exp_flvr_old[0].sf_rpc,
1821 exp->exp_flvr_old[1].sf_rpc,
1822 (s64)(exp->exp_flvr_expire[0] -
1823 ktime_get_real_seconds()));
1824 spin_unlock(&exp->exp_lock);
1825 return 0;
1826 }
1827 } else {
1828 CDEBUG(D_SEC, "mark middle expired\n");
1829 exp->exp_flvr_expire[0] = 0;
1830 }
1831 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1832 exp->exp_flvr.sf_rpc,
1833 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1834 req->rq_flvr.sf_rpc);
1835 }
1836
1837 /* now it doesn't match the current flavor, the only chance we can
1838 * accept it is match the old flavors which is not expired.
1839 */
1840 if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1841 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
1842 if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1843 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
1844 exp,
1845 exp->exp_flvr.sf_rpc,
1846 exp->exp_flvr_old[0].sf_rpc,
1847 exp->exp_flvr_old[1].sf_rpc,
1848 (s64)(exp->exp_flvr_expire[1] -
1849 ktime_get_real_seconds()));
1850 spin_unlock(&exp->exp_lock);
1851 return 0;
1852 }
1853 } else {
1854 CDEBUG(D_SEC, "mark oldest expired\n");
1855 exp->exp_flvr_expire[1] = 0;
1856 }
1857 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1858 exp, exp->exp_flvr.sf_rpc,
1859 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1860 req->rq_flvr.sf_rpc);
1861 } else {
1862 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1863 exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1864 exp->exp_flvr_old[1].sf_rpc);
1865 }
1866
1867 spin_unlock(&exp->exp_lock);
1868
1869 CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
1870 exp, exp->exp_obd->obd_name,
1871 req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1872 req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1873 req->rq_flvr.sf_rpc,
1874 exp->exp_flvr.sf_rpc,
1875 exp->exp_flvr_old[0].sf_rpc,
1876 exp->exp_flvr_expire[0] ?
1877 (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
1878 exp->exp_flvr_old[1].sf_rpc,
1879 exp->exp_flvr_expire[1] ?
1880 (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
1881 return -EACCES;
1882 }
1883 EXPORT_SYMBOL(sptlrpc_target_export_check);
1884
1885 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1886 {
1887 /* peer's claim is unreliable unless gss is being used */
1888 if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
1889 return svc_rc;
1890
1891 switch (req->rq_sp_from) {
1892 case LUSTRE_SP_CLI:
1893 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
1894 DEBUG_REQ(D_ERROR, req, "faked source CLI");
1895 svc_rc = SECSVC_DROP;
1896 }
1897 break;
1898 case LUSTRE_SP_MDT:
1899 if (!req->rq_auth_usr_mdt) {
1900 DEBUG_REQ(D_ERROR, req, "faked source MDT");
1901 svc_rc = SECSVC_DROP;
1902 }
1903 break;
1904 case LUSTRE_SP_OST:
1905 if (!req->rq_auth_usr_ost) {
1906 DEBUG_REQ(D_ERROR, req, "faked source OST");
1907 svc_rc = SECSVC_DROP;
1908 }
1909 break;
1910 case LUSTRE_SP_MGS:
1911 case LUSTRE_SP_MGC:
1912 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1913 !req->rq_auth_usr_ost) {
1914 DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
1915 svc_rc = SECSVC_DROP;
1916 }
1917 break;
1918 case LUSTRE_SP_ANY:
1919 default:
1920 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
1921 svc_rc = SECSVC_DROP;
1922 }
1923
1924 return svc_rc;
1925 }
1926
1927 /**
1928 * Used by ptlrpc server, to perform transformation upon request message of
1929 * incoming \a req. This must be the first thing to do with a incoming
1930 * request in ptlrpc layer.
1931 *
1932 * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
1933 * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
1934 * \retval SECSVC_COMPLETE success, the request has been fully processed, and
1935 * reply message has been prepared.
1936 * \retval SECSVC_DROP failed, this request should be dropped.
1937 */
1938 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
1939 {
1940 struct ptlrpc_sec_policy *policy;
1941 struct lustre_msg *msg = req->rq_reqbuf;
1942 int rc;
1943
1944 LASSERT(msg);
1945 LASSERT(!req->rq_reqmsg);
1946 LASSERT(!req->rq_repmsg);
1947 LASSERT(!req->rq_svc_ctx);
1948
1949 req->rq_req_swab_mask = 0;
1950
1951 rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
1952 switch (rc) {
1953 case 1:
1954 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1955 case 0:
1956 break;
1957 default:
1958 CERROR("error unpacking request from %s x%llu\n",
1959 libcfs_id2str(req->rq_peer), req->rq_xid);
1960 return SECSVC_DROP;
1961 }
1962
1963 req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
1964 req->rq_sp_from = LUSTRE_SP_ANY;
1965 req->rq_auth_uid = -1;
1966 req->rq_auth_mapped_uid = -1;
1967
1968 policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
1969 if (!policy) {
1970 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
1971 return SECSVC_DROP;
1972 }
1973
1974 LASSERT(policy->sp_sops->accept);
1975 rc = policy->sp_sops->accept(req);
1976 sptlrpc_policy_put(policy);
1977 LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
1978 LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
1979
1980 /*
1981 * if it's not null flavor (which means embedded packing msg),
1982 * reset the swab mask for the coming inner msg unpacking.
1983 */
1984 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
1985 req->rq_req_swab_mask = 0;
1986
1987 /* sanity check for the request source */
1988 rc = sptlrpc_svc_check_from(req, rc);
1989 return rc;
1990 }
1991
1992 /**
1993 * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
1994 * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
1995 * a buffer of \a msglen size.
1996 */
1997 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
1998 {
1999 struct ptlrpc_sec_policy *policy;
2000 struct ptlrpc_reply_state *rs;
2001 int rc;
2002
2003 LASSERT(req->rq_svc_ctx);
2004 LASSERT(req->rq_svc_ctx->sc_policy);
2005
2006 policy = req->rq_svc_ctx->sc_policy;
2007 LASSERT(policy->sp_sops->alloc_rs);
2008
2009 rc = policy->sp_sops->alloc_rs(req, msglen);
2010 if (unlikely(rc == -ENOMEM)) {
2011 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2012
2013 if (svcpt->scp_service->srv_max_reply_size <
2014 msglen + sizeof(struct ptlrpc_reply_state)) {
2015 /* Just return failure if the size is too big */
2016 CERROR("size of message is too big (%zd), %d allowed\n",
2017 msglen + sizeof(struct ptlrpc_reply_state),
2018 svcpt->scp_service->srv_max_reply_size);
2019 return -ENOMEM;
2020 }
2021
2022 /* failed alloc, try emergency pool */
2023 rs = lustre_get_emerg_rs(svcpt);
2024 if (!rs)
2025 return -ENOMEM;
2026
2027 req->rq_reply_state = rs;
2028 rc = policy->sp_sops->alloc_rs(req, msglen);
2029 if (rc) {
2030 lustre_put_emerg_rs(rs);
2031 req->rq_reply_state = NULL;
2032 }
2033 }
2034
2035 LASSERT(rc != 0 ||
2036 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2037
2038 return rc;
2039 }
2040
2041 /**
2042 * Used by ptlrpc server, to perform transformation upon reply message.
2043 *
2044 * \post req->rq_reply_off is set to appropriate server-controlled reply offset.
2045 * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2046 */
2047 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2048 {
2049 struct ptlrpc_sec_policy *policy;
2050 int rc;
2051
2052 LASSERT(req->rq_svc_ctx);
2053 LASSERT(req->rq_svc_ctx->sc_policy);
2054
2055 policy = req->rq_svc_ctx->sc_policy;
2056 LASSERT(policy->sp_sops->authorize);
2057
2058 rc = policy->sp_sops->authorize(req);
2059 LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2060
2061 return rc;
2062 }
2063
2064 /**
2065 * Used by ptlrpc server, to free reply_state.
2066 */
2067 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2068 {
2069 struct ptlrpc_sec_policy *policy;
2070 unsigned int prealloc;
2071
2072 LASSERT(rs->rs_svc_ctx);
2073 LASSERT(rs->rs_svc_ctx->sc_policy);
2074
2075 policy = rs->rs_svc_ctx->sc_policy;
2076 LASSERT(policy->sp_sops->free_rs);
2077
2078 prealloc = rs->rs_prealloc;
2079 policy->sp_sops->free_rs(rs);
2080
2081 if (prealloc)
2082 lustre_put_emerg_rs(rs);
2083 }
2084
2085 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2086 {
2087 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2088
2089 if (ctx)
2090 atomic_inc(&ctx->sc_refcount);
2091 }
2092
2093 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2094 {
2095 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2096
2097 if (!ctx)
2098 return;
2099
2100 LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2101 if (atomic_dec_and_test(&ctx->sc_refcount)) {
2102 if (ctx->sc_policy->sp_sops->free_ctx)
2103 ctx->sc_policy->sp_sops->free_ctx(ctx);
2104 }
2105 req->rq_svc_ctx = NULL;
2106 }
2107
2108 /****************************************
2109 * bulk security *
2110 ****************************************/
2111
2112 /**
2113 * Perform transformation upon bulk data pointed by \a desc. This is called
2114 * before transforming the request message.
2115 */
2116 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2117 struct ptlrpc_bulk_desc *desc)
2118 {
2119 struct ptlrpc_cli_ctx *ctx;
2120
2121 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2122
2123 if (!req->rq_pack_bulk)
2124 return 0;
2125
2126 ctx = req->rq_cli_ctx;
2127 if (ctx->cc_ops->wrap_bulk)
2128 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2129 return 0;
2130 }
2131 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2132
2133 /**
2134 * This is called after unwrap the reply message.
2135 * return nob of actual plain text size received, or error code.
2136 */
2137 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2138 struct ptlrpc_bulk_desc *desc,
2139 int nob)
2140 {
2141 struct ptlrpc_cli_ctx *ctx;
2142 int rc;
2143
2144 LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2145
2146 if (!req->rq_pack_bulk)
2147 return desc->bd_nob_transferred;
2148
2149 ctx = req->rq_cli_ctx;
2150 if (ctx->cc_ops->unwrap_bulk) {
2151 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2152 if (rc < 0)
2153 return rc;
2154 }
2155 return desc->bd_nob_transferred;
2156 }
2157 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2158
2159 /**
2160 * This is called after unwrap the reply message.
2161 * return 0 for success or error code.
2162 */
2163 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2164 struct ptlrpc_bulk_desc *desc)
2165 {
2166 struct ptlrpc_cli_ctx *ctx;
2167 int rc;
2168
2169 LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2170
2171 if (!req->rq_pack_bulk)
2172 return 0;
2173
2174 ctx = req->rq_cli_ctx;
2175 if (ctx->cc_ops->unwrap_bulk) {
2176 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2177 if (rc < 0)
2178 return rc;
2179 }
2180
2181 /*
2182 * if everything is going right, nob should equals to nob_transferred.
2183 * in case of privacy mode, nob_transferred needs to be adjusted.
2184 */
2185 if (desc->bd_nob != desc->bd_nob_transferred) {
2186 CERROR("nob %d doesn't match transferred nob %d\n",
2187 desc->bd_nob, desc->bd_nob_transferred);
2188 return -EPROTO;
2189 }
2190
2191 return 0;
2192 }
2193 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2194
2195 /****************************************
2196 * user descriptor helpers *
2197 ****************************************/
2198
2199 int sptlrpc_current_user_desc_size(void)
2200 {
2201 int ngroups;
2202
2203 ngroups = current_ngroups;
2204
2205 if (ngroups > LUSTRE_MAX_GROUPS)
2206 ngroups = LUSTRE_MAX_GROUPS;
2207 return sptlrpc_user_desc_size(ngroups);
2208 }
2209 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2210
2211 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2212 {
2213 struct ptlrpc_user_desc *pud;
2214
2215 pud = lustre_msg_buf(msg, offset, 0);
2216
2217 if (!pud)
2218 return -EINVAL;
2219
2220 pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2221 pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2222 pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2223 pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2224 pud->pud_cap = cfs_curproc_cap_pack();
2225 pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2226
2227 task_lock(current);
2228 if (pud->pud_ngroups > current_ngroups)
2229 pud->pud_ngroups = current_ngroups;
2230 memcpy(pud->pud_groups, current_cred()->group_info->gid,
2231 pud->pud_ngroups * sizeof(__u32));
2232 task_unlock(current);
2233
2234 return 0;
2235 }
2236 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2237
2238 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2239 {
2240 struct ptlrpc_user_desc *pud;
2241 int i;
2242
2243 pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2244 if (!pud)
2245 return -EINVAL;
2246
2247 if (swabbed) {
2248 __swab32s(&pud->pud_uid);
2249 __swab32s(&pud->pud_gid);
2250 __swab32s(&pud->pud_fsuid);
2251 __swab32s(&pud->pud_fsgid);
2252 __swab32s(&pud->pud_cap);
2253 __swab32s(&pud->pud_ngroups);
2254 }
2255
2256 if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2257 CERROR("%u groups is too large\n", pud->pud_ngroups);
2258 return -EINVAL;
2259 }
2260
2261 if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2262 msg->lm_buflens[offset]) {
2263 CERROR("%u groups are claimed but bufsize only %u\n",
2264 pud->pud_ngroups, msg->lm_buflens[offset]);
2265 return -EINVAL;
2266 }
2267
2268 if (swabbed) {
2269 for (i = 0; i < pud->pud_ngroups; i++)
2270 __swab32s(&pud->pud_groups[i]);
2271 }
2272
2273 return 0;
2274 }
2275 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2276
2277 /****************************************
2278 * misc helpers *
2279 ****************************************/
2280
2281 const char *sec2target_str(struct ptlrpc_sec *sec)
2282 {
2283 if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2284 return "*";
2285 if (sec_is_reverse(sec))
2286 return "c";
2287 return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2288 }
2289 EXPORT_SYMBOL(sec2target_str);
2290
2291 /*
2292 * return true if the bulk data is protected
2293 */
2294 bool sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2295 {
2296 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2297 case SPTLRPC_BULK_SVC_INTG:
2298 case SPTLRPC_BULK_SVC_PRIV:
2299 return true;
2300 default:
2301 return false;
2302 }
2303 }
2304 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2305
2306 /****************************************
2307 * crypto API helper/alloc blkciper *
2308 ****************************************/
2309
2310 /****************************************
2311 * initialize/finalize *
2312 ****************************************/
2313
2314 int sptlrpc_init(void)
2315 {
2316 int rc;
2317
2318 rwlock_init(&policy_lock);
2319
2320 rc = sptlrpc_gc_init();
2321 if (rc)
2322 goto out;
2323
2324 rc = sptlrpc_conf_init();
2325 if (rc)
2326 goto out_gc;
2327
2328 rc = sptlrpc_enc_pool_init();
2329 if (rc)
2330 goto out_conf;
2331
2332 rc = sptlrpc_null_init();
2333 if (rc)
2334 goto out_pool;
2335
2336 rc = sptlrpc_plain_init();
2337 if (rc)
2338 goto out_null;
2339
2340 rc = sptlrpc_lproc_init();
2341 if (rc)
2342 goto out_plain;
2343
2344 return 0;
2345
2346 out_plain:
2347 sptlrpc_plain_fini();
2348 out_null:
2349 sptlrpc_null_fini();
2350 out_pool:
2351 sptlrpc_enc_pool_fini();
2352 out_conf:
2353 sptlrpc_conf_fini();
2354 out_gc:
2355 sptlrpc_gc_fini();
2356 out:
2357 return rc;
2358 }
2359
2360 void sptlrpc_fini(void)
2361 {
2362 sptlrpc_lproc_fini();
2363 sptlrpc_plain_fini();
2364 sptlrpc_null_fini();
2365 sptlrpc_enc_pool_fini();
2366 sptlrpc_conf_fini();
2367 sptlrpc_gc_fini();
2368 }