staging: lustre: remove RETURN macro
[deliverable/linux.git] / drivers / staging / lustre / lustre / ptlrpc / gss / gss_keyring.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ptlrpc/gss/gss_keyring.c
37 *
38 * Author: Eric Mei <ericm@clusterfs.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_SEC
42 #include <linux/init.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/dcache.h>
46 #include <linux/fs.h>
47 #include <linux/crypto.h>
48 #include <linux/key.h>
49 #include <linux/keyctl.h>
50 #include <linux/key-type.h>
51 #include <linux/mutex.h>
52 #include <asm/atomic.h>
53
54 #include <obd.h>
55 #include <obd_class.h>
56 #include <obd_support.h>
57 #include <lustre/lustre_idl.h>
58 #include <lustre_sec.h>
59 #include <lustre_net.h>
60 #include <lustre_import.h>
61
62 #include "gss_err.h"
63 #include "gss_internal.h"
64 #include "gss_api.h"
65
66 static struct ptlrpc_sec_policy gss_policy_keyring;
67 static struct ptlrpc_ctx_ops gss_keyring_ctxops;
68 static struct key_type gss_key_type;
69
70 static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
71 struct ptlrpc_svc_ctx *svc_ctx);
72
73 /*
74 * the timeout is only for the case that upcall child process die abnormally.
75 * in any other cases it should finally update kernel key.
76 *
77 * FIXME we'd better to incorporate the client & server side upcall timeouts
78 * into the framework of Adaptive Timeouts, but we need to figure out how to
79 * make sure that kernel knows the upcall processes is in-progress or died
80 * unexpectedly.
81 */
82 #define KEYRING_UPCALL_TIMEOUT (obd_timeout + obd_timeout)
83
84 /****************************************
85 * internal helpers *
86 ****************************************/
87
88 #define DUMP_PROCESS_KEYRINGS(tsk) \
89 { \
90 CWARN("DUMP PK: %s[%u,%u/%u](<-%s[%u,%u/%u]): " \
91 "a %d, t %d, p %d, s %d, u %d, us %d, df %d\n", \
92 tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \
93 tsk->parent->comm, tsk->parent->pid, \
94 tsk->parent->uid, tsk->parent->fsuid, \
95 tsk->request_key_auth ? \
96 tsk->request_key_auth->serial : 0, \
97 key_cred(tsk)->thread_keyring ? \
98 key_cred(tsk)->thread_keyring->serial : 0, \
99 key_tgcred(tsk)->process_keyring ? \
100 key_tgcred(tsk)->process_keyring->serial : 0, \
101 key_tgcred(tsk)->session_keyring ? \
102 key_tgcred(tsk)->session_keyring->serial : 0, \
103 key_cred(tsk)->user->uid_keyring ? \
104 key_cred(tsk)->user->uid_keyring->serial : 0, \
105 key_cred(tsk)->user->session_keyring ? \
106 key_cred(tsk)->user->session_keyring->serial : 0, \
107 key_cred(tsk)->jit_keyring \
108 ); \
109 }
110
111 #define DUMP_KEY(key) \
112 { \
113 CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \
114 key, key->serial, atomic_read(&key->usage), \
115 key->uid, key->gid, \
116 key->description ? key->description : "n/a" \
117 ); \
118 }
119
120 #define key_cred(tsk) ((tsk)->cred)
121 #define key_tgcred(tsk) ((tsk)->cred->tgcred)
122
123 static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
124 {
125 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
126 mutex_lock(&gsec_kr->gsk_uc_lock);
127 #endif
128 }
129
130 static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
131 {
132 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
133 mutex_unlock(&gsec_kr->gsk_uc_lock);
134 #endif
135 }
136
137 static inline void key_revoke_locked(struct key *key)
138 {
139 set_bit(KEY_FLAG_REVOKED, &key->flags);
140 }
141
142 static void ctx_upcall_timeout_kr(unsigned long data)
143 {
144 struct ptlrpc_cli_ctx *ctx = (struct ptlrpc_cli_ctx *) data;
145 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
146
147 CWARN("ctx %p, key %p\n", ctx, key);
148
149 LASSERT(key);
150
151 cli_ctx_expire(ctx);
152 key_revoke_locked(key);
153 }
154
155 static
156 void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
157 {
158 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
159 struct timer_list *timer = gctx_kr->gck_timer;
160
161 LASSERT(timer);
162
163 CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
164 timeout = timeout * HZ + cfs_time_current();
165
166 init_timer(timer);
167 timer->expires = timeout;
168 timer->data = (unsigned long ) ctx;
169 timer->function = ctx_upcall_timeout_kr;
170
171 add_timer(timer);
172 }
173
174 /*
175 * caller should make sure no race with other threads
176 */
177 static
178 void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx)
179 {
180 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
181 struct timer_list *timer = gctx_kr->gck_timer;
182
183 if (timer == NULL)
184 return;
185
186 CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key);
187
188 gctx_kr->gck_timer = NULL;
189
190 del_singleshot_timer_sync(timer);
191
192 OBD_FREE_PTR(timer);
193 }
194
195 static
196 struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
197 struct vfs_cred *vcred)
198 {
199 struct ptlrpc_cli_ctx *ctx;
200 struct gss_cli_ctx_keyring *gctx_kr;
201
202 OBD_ALLOC_PTR(gctx_kr);
203 if (gctx_kr == NULL)
204 return NULL;
205
206 OBD_ALLOC_PTR(gctx_kr->gck_timer);
207 if (gctx_kr->gck_timer == NULL) {
208 OBD_FREE_PTR(gctx_kr);
209 return NULL;
210 }
211 init_timer(gctx_kr->gck_timer);
212
213 ctx = &gctx_kr->gck_base.gc_base;
214
215 if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
216 OBD_FREE_PTR(gctx_kr->gck_timer);
217 OBD_FREE_PTR(gctx_kr);
218 return NULL;
219 }
220
221 ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
222 clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
223 atomic_inc(&ctx->cc_refcount); /* for the caller */
224
225 return ctx;
226 }
227
228 static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
229 {
230 struct ptlrpc_sec *sec = ctx->cc_sec;
231 struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
232
233 CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
234
235 /* at this time the association with key has been broken. */
236 LASSERT(sec);
237 LASSERT(atomic_read(&sec->ps_refcount) > 0);
238 LASSERT(atomic_read(&sec->ps_nctx) > 0);
239 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
240 LASSERT(gctx_kr->gck_key == NULL);
241
242 ctx_clear_timer_kr(ctx);
243 LASSERT(gctx_kr->gck_timer == NULL);
244
245 if (gss_cli_ctx_fini_common(sec, ctx))
246 return;
247
248 OBD_FREE_PTR(gctx_kr);
249
250 atomic_dec(&sec->ps_nctx);
251 sptlrpc_sec_put(sec);
252 }
253
254 static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
255 {
256 if (sync) {
257 ctx_destroy_kr(ctx);
258 } else {
259 atomic_inc(&ctx->cc_refcount);
260 sptlrpc_gc_add_ctx(ctx);
261 }
262 }
263
264 static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
265 {
266 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
267
268 if (atomic_dec_and_test(&ctx->cc_refcount))
269 ctx_release_kr(ctx, sync);
270 }
271
272 /*
273 * key <-> ctx association and rules:
274 * - ctx might not bind with any key
275 * - key/ctx binding is protected by key semaphore (if the key present)
276 * - key and ctx each take a reference of the other
277 * - ctx enlist/unlist is protected by ctx spinlock
278 * - never enlist a ctx after it's been unlisted
279 * - whoever do enlist should also do bind, lock key before enlist:
280 * - lock key -> lock ctx -> enlist -> unlock ctx -> bind -> unlock key
281 * - whoever do unlist should also do unbind:
282 * - lock key -> lock ctx -> unlist -> unlock ctx -> unbind -> unlock key
283 * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
284 */
285
286 static inline void spin_lock_if(spinlock_t *lock, int condition)
287 {
288 if (condition)
289 spin_lock(lock);
290 }
291
292 static inline void spin_unlock_if(spinlock_t *lock, int condition)
293 {
294 if (condition)
295 spin_unlock(lock);
296 }
297
298 static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
299 {
300 struct ptlrpc_sec *sec = ctx->cc_sec;
301 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
302
303 LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
304 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
305
306 spin_lock_if(&sec->ps_lock, !locked);
307
308 atomic_inc(&ctx->cc_refcount);
309 set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
310 hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
311 if (is_root)
312 gsec_kr->gsk_root_ctx = ctx;
313
314 spin_unlock_if(&sec->ps_lock, !locked);
315 }
316
317 /*
318 * Note after this get called, caller should not access ctx again because
319 * it might have been freed, unless caller hold at least one refcount of
320 * the ctx.
321 *
322 * return non-zero if we indeed unlist this ctx.
323 */
324 static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
325 {
326 struct ptlrpc_sec *sec = ctx->cc_sec;
327 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
328
329 /* if hashed bit has gone, leave the job to somebody who is doing it */
330 if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
331 return 0;
332
333 /* drop ref inside spin lock to prevent race with other operations */
334 spin_lock_if(&sec->ps_lock, !locked);
335
336 if (gsec_kr->gsk_root_ctx == ctx)
337 gsec_kr->gsk_root_ctx = NULL;
338 hlist_del_init(&ctx->cc_cache);
339 atomic_dec(&ctx->cc_refcount);
340
341 spin_unlock_if(&sec->ps_lock, !locked);
342
343 return 1;
344 }
345
346 /*
347 * bind a key with a ctx together.
348 * caller must hold write lock of the key, as well as ref on key & ctx.
349 */
350 static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
351 {
352 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
353 LASSERT(atomic_read(&key->usage) > 0);
354 LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
355 LASSERT(key->payload.data == NULL);
356
357 /* at this time context may or may not in list. */
358 key_get(key);
359 atomic_inc(&ctx->cc_refcount);
360 ctx2gctx_keyring(ctx)->gck_key = key;
361 key->payload.data = ctx;
362 }
363
364 /*
365 * unbind a key and a ctx.
366 * caller must hold write lock, as well as a ref of the key.
367 */
368 static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
369 {
370 LASSERT(key->payload.data == ctx);
371 LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
372
373 /* must revoke the key, or others may treat it as newly created */
374 key_revoke_locked(key);
375
376 key->payload.data = NULL;
377 ctx2gctx_keyring(ctx)->gck_key = NULL;
378
379 /* once ctx get split from key, the timer is meaningless */
380 ctx_clear_timer_kr(ctx);
381
382 ctx_put_kr(ctx, 1);
383 key_put(key);
384 }
385
386 /*
387 * given a ctx, unbind with its coupled key, if any.
388 * unbind could only be called once, so we don't worry the key be released
389 * by someone else.
390 */
391 static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx)
392 {
393 struct key *key = ctx2gctx_keyring(ctx)->gck_key;
394
395 if (key) {
396 LASSERT(key->payload.data == ctx);
397
398 key_get(key);
399 down_write(&key->sem);
400 unbind_key_ctx(key, ctx);
401 up_write(&key->sem);
402 key_put(key);
403 }
404 }
405
406 /*
407 * given a key, unbind with its coupled ctx, if any.
408 * caller must hold write lock, as well as a ref of the key.
409 */
410 static void unbind_key_locked(struct key *key)
411 {
412 struct ptlrpc_cli_ctx *ctx = key->payload.data;
413
414 if (ctx)
415 unbind_key_ctx(key, ctx);
416 }
417
418 /*
419 * unlist a ctx, and unbind from coupled key
420 */
421 static void kill_ctx_kr(struct ptlrpc_cli_ctx *ctx)
422 {
423 if (ctx_unlist_kr(ctx, 0))
424 unbind_ctx_kr(ctx);
425 }
426
427 /*
428 * given a key, unlist and unbind with the coupled ctx (if any).
429 * caller must hold write lock, as well as a ref of the key.
430 */
431 static void kill_key_locked(struct key *key)
432 {
433 struct ptlrpc_cli_ctx *ctx = key->payload.data;
434
435 if (ctx && ctx_unlist_kr(ctx, 0))
436 unbind_key_locked(key);
437 }
438
439 /*
440 * caller should hold one ref on contexts in freelist.
441 */
442 static void dispose_ctx_list_kr(struct hlist_head *freelist)
443 {
444 struct hlist_node *next;
445 struct ptlrpc_cli_ctx *ctx;
446 struct gss_cli_ctx *gctx;
447
448 hlist_for_each_entry_safe(ctx, next, freelist, cc_cache) {
449 hlist_del_init(&ctx->cc_cache);
450
451 /* reverse ctx: update current seq to buddy svcctx if exist.
452 * ideally this should be done at gss_cli_ctx_finalize(), but
453 * the ctx destroy could be delayed by:
454 * 1) ctx still has reference;
455 * 2) ctx destroy is asynchronous;
456 * and reverse import call inval_all_ctx() require this be done
457 *_immediately_ otherwise newly created reverse ctx might copy
458 * the very old sequence number from svcctx. */
459 gctx = ctx2gctx(ctx);
460 if (!rawobj_empty(&gctx->gc_svc_handle) &&
461 sec_is_reverse(gctx->gc_base.cc_sec)) {
462 gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
463 (__u32) atomic_read(&gctx->gc_seq));
464 }
465
466 /* we need to wakeup waiting reqs here. the context might
467 * be forced released before upcall finished, then the
468 * late-arrived downcall can't find the ctx even. */
469 sptlrpc_cli_ctx_wakeup(ctx);
470
471 unbind_ctx_kr(ctx);
472 ctx_put_kr(ctx, 0);
473 }
474 }
475
476 /*
477 * lookup a root context directly in a sec, return root ctx with a
478 * reference taken or NULL.
479 */
480 static
481 struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
482 {
483 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
484 struct ptlrpc_cli_ctx *ctx = NULL;
485
486 spin_lock(&sec->ps_lock);
487
488 ctx = gsec_kr->gsk_root_ctx;
489
490 if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
491 struct ptlrpc_cli_ctx *tmp;
492
493 /* reverse ctx, search root ctx in list, choose the one
494 * with shortest expire time, which is most possibly have
495 * an established peer ctx at client side. */
496 hlist_for_each_entry(tmp, &gsec_kr->gsk_clist, cc_cache) {
497 if (ctx == NULL || ctx->cc_expire == 0 ||
498 ctx->cc_expire > tmp->cc_expire) {
499 ctx = tmp;
500 /* promote to be root_ctx */
501 gsec_kr->gsk_root_ctx = ctx;
502 }
503 }
504 }
505
506 if (ctx) {
507 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
508 LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
509 atomic_inc(&ctx->cc_refcount);
510 }
511
512 spin_unlock(&sec->ps_lock);
513
514 return ctx;
515 }
516
517 #define RVS_CTX_EXPIRE_NICE (10)
518
519 static
520 void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
521 struct ptlrpc_cli_ctx *new_ctx,
522 struct key *key)
523 {
524 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
525 struct ptlrpc_cli_ctx *ctx;
526 cfs_time_t now;
527
528 LASSERT(sec_is_reverse(sec));
529
530 spin_lock(&sec->ps_lock);
531
532 now = cfs_time_current_sec();
533
534 /* set all existing ctxs short expiry */
535 hlist_for_each_entry(ctx, &gsec_kr->gsk_clist, cc_cache) {
536 if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
537 ctx->cc_early_expire = 1;
538 ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
539 }
540 }
541
542 /* if there's root_ctx there, instead obsolete the current
543 * immediately, we leave it continue operating for a little while.
544 * hopefully when the first backward rpc with newest ctx send out,
545 * the client side already have the peer ctx well established. */
546 ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
547
548 if (key)
549 bind_key_ctx(key, new_ctx);
550
551 spin_unlock(&sec->ps_lock);
552 }
553
554 static void construct_key_desc(void *buf, int bufsize,
555 struct ptlrpc_sec *sec, uid_t uid)
556 {
557 snprintf(buf, bufsize, "%d@%x", uid, sec->ps_id);
558 ((char *)buf)[bufsize - 1] = '\0';
559 }
560
561 /****************************************
562 * sec apis *
563 ****************************************/
564
565 static
566 struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
567 struct ptlrpc_svc_ctx *svcctx,
568 struct sptlrpc_flavor *sf)
569 {
570 struct gss_sec_keyring *gsec_kr;
571
572 OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
573 if (gsec_kr == NULL)
574 return NULL;
575
576 INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
577 gsec_kr->gsk_root_ctx = NULL;
578 mutex_init(&gsec_kr->gsk_root_uc_lock);
579 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
580 mutex_init(&gsec_kr->gsk_uc_lock);
581 #endif
582
583 if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
584 imp, svcctx, sf))
585 goto err_free;
586
587 if (svcctx != NULL &&
588 sec_install_rctx_kr(&gsec_kr->gsk_base.gs_base, svcctx)) {
589 gss_sec_destroy_common(&gsec_kr->gsk_base);
590 goto err_free;
591 }
592
593 return &gsec_kr->gsk_base.gs_base;
594
595 err_free:
596 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
597 return NULL;
598 }
599
600 static
601 void gss_sec_destroy_kr(struct ptlrpc_sec *sec)
602 {
603 struct gss_sec *gsec = sec2gsec(sec);
604 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
605
606 CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
607
608 LASSERT(hlist_empty(&gsec_kr->gsk_clist));
609 LASSERT(gsec_kr->gsk_root_ctx == NULL);
610
611 gss_sec_destroy_common(gsec);
612
613 OBD_FREE(gsec_kr, sizeof(*gsec_kr));
614 }
615
616 static inline int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred)
617 {
618 /* except the ROOTONLY flag, treat it as root user only if real uid
619 * is 0, euid/fsuid being 0 are handled as setuid scenarios */
620 if (sec_is_rootonly(sec) || (vcred->vc_uid == 0))
621 return 1;
622 else
623 return 0;
624 }
625
626 /*
627 * unlink request key from it's ring, which is linked during request_key().
628 * sadly, we have to 'guess' which keyring it's linked to.
629 *
630 * FIXME this code is fragile, depend on how request_key_link() is implemented.
631 */
632 static void request_key_unlink(struct key *key)
633 {
634 struct task_struct *tsk = current;
635 struct key *ring;
636
637 switch (key_cred(tsk)->jit_keyring) {
638 case KEY_REQKEY_DEFL_DEFAULT:
639 case KEY_REQKEY_DEFL_THREAD_KEYRING:
640 ring = key_get(key_cred(tsk)->thread_keyring);
641 if (ring)
642 break;
643 case KEY_REQKEY_DEFL_PROCESS_KEYRING:
644 ring = key_get(key_tgcred(tsk)->process_keyring);
645 if (ring)
646 break;
647 case KEY_REQKEY_DEFL_SESSION_KEYRING:
648 rcu_read_lock();
649 ring = key_get(rcu_dereference(key_tgcred(tsk)
650 ->session_keyring));
651 rcu_read_unlock();
652 if (ring)
653 break;
654 case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
655 ring = key_get(key_cred(tsk)->user->session_keyring);
656 break;
657 case KEY_REQKEY_DEFL_USER_KEYRING:
658 ring = key_get(key_cred(tsk)->user->uid_keyring);
659 break;
660 case KEY_REQKEY_DEFL_GROUP_KEYRING:
661 default:
662 LBUG();
663 }
664
665 LASSERT(ring);
666 key_unlink(ring, key);
667 key_put(ring);
668 }
669
670 static
671 struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
672 struct vfs_cred *vcred,
673 int create, int remove_dead)
674 {
675 struct obd_import *imp = sec->ps_import;
676 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
677 struct ptlrpc_cli_ctx *ctx = NULL;
678 unsigned int is_root = 0, create_new = 0;
679 struct key *key;
680 char desc[24];
681 char *coinfo;
682 int coinfo_size;
683 char *co_flags = "";
684
685 LASSERT(imp != NULL);
686
687 is_root = user_is_root(sec, vcred);
688
689 /* a little bit optimization for root context */
690 if (is_root) {
691 ctx = sec_lookup_root_ctx_kr(sec);
692 /*
693 * Only lookup directly for REVERSE sec, which should
694 * always succeed.
695 */
696 if (ctx || sec_is_reverse(sec))
697 return ctx;
698 }
699
700 LASSERT(create != 0);
701
702 /* for root context, obtain lock and check again, this time hold
703 * the root upcall lock, make sure nobody else populated new root
704 * context after last check. */
705 if (is_root) {
706 mutex_lock(&gsec_kr->gsk_root_uc_lock);
707
708 ctx = sec_lookup_root_ctx_kr(sec);
709 if (ctx)
710 goto out;
711
712 /* update reverse handle for root user */
713 sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
714
715 switch (sec->ps_part) {
716 case LUSTRE_SP_MDT:
717 co_flags = "m";
718 break;
719 case LUSTRE_SP_OST:
720 co_flags = "o";
721 break;
722 case LUSTRE_SP_MGC:
723 co_flags = "rmo";
724 break;
725 case LUSTRE_SP_CLI:
726 co_flags = "r";
727 break;
728 case LUSTRE_SP_MGS:
729 default:
730 LBUG();
731 }
732 }
733
734 /* in case of setuid, key will be constructed as owner of fsuid/fsgid,
735 * but we do authentication based on real uid/gid. the key permission
736 * bits will be exactly as POS_ALL, so only processes who subscribed
737 * this key could have the access, although the quota might be counted
738 * on others (fsuid/fsgid).
739 *
740 * keyring will use fsuid/fsgid as upcall parameters, so we have to
741 * encode real uid/gid into callout info.
742 */
743
744 construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
745
746 /* callout info format:
747 * secid:mech:uid:gid:flags:svc_type:peer_nid:target_uuid
748 */
749 coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64;
750 OBD_ALLOC(coinfo, coinfo_size);
751 if (coinfo == NULL)
752 goto out;
753
754 snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%d:"LPX64":%s",
755 sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
756 vcred->vc_uid, vcred->vc_gid,
757 co_flags, import_to_gss_svc(imp),
758 imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name);
759
760 CDEBUG(D_SEC, "requesting key for %s\n", desc);
761
762 keyring_upcall_lock(gsec_kr);
763 key = request_key(&gss_key_type, desc, coinfo);
764 keyring_upcall_unlock(gsec_kr);
765
766 OBD_FREE(coinfo, coinfo_size);
767
768 if (IS_ERR(key)) {
769 CERROR("failed request key: %ld\n", PTR_ERR(key));
770 goto out;
771 }
772 CDEBUG(D_SEC, "obtained key %08x for %s\n", key->serial, desc);
773
774 /* once payload.data was pointed to a ctx, it never changes until
775 * we de-associate them; but parallel request_key() may return
776 * a key with payload.data == NULL at the same time. so we still
777 * need wirtelock of key->sem to serialize them. */
778 down_write(&key->sem);
779
780 if (likely(key->payload.data != NULL)) {
781 ctx = key->payload.data;
782
783 LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
784 LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
785 LASSERT(atomic_read(&key->usage) >= 2);
786
787 /* simply take a ref and return. it's upper layer's
788 * responsibility to detect & replace dead ctx. */
789 atomic_inc(&ctx->cc_refcount);
790 } else {
791 /* pre initialization with a cli_ctx. this can't be done in
792 * key_instantiate() because we'v no enough information
793 * there. */
794 ctx = ctx_create_kr(sec, vcred);
795 if (ctx != NULL) {
796 ctx_enlist_kr(ctx, is_root, 0);
797 bind_key_ctx(key, ctx);
798
799 ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
800
801 CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
802 key, ctx, sec);
803 } else {
804 /* we'd prefer to call key_revoke(), but we more like
805 * to revoke it within this key->sem locked period. */
806 key_revoke_locked(key);
807 }
808
809 create_new = 1;
810 }
811
812 up_write(&key->sem);
813
814 if (is_root && create_new)
815 request_key_unlink(key);
816
817 key_put(key);
818 out:
819 if (is_root)
820 mutex_unlock(&gsec_kr->gsk_root_uc_lock);
821 return ctx;
822 }
823
824 static
825 void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec,
826 struct ptlrpc_cli_ctx *ctx,
827 int sync)
828 {
829 LASSERT(atomic_read(&sec->ps_refcount) > 0);
830 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
831 ctx_release_kr(ctx, sync);
832 }
833
834 /*
835 * flush context of normal user, we must resort to keyring itself to find out
836 * contexts which belong to me.
837 *
838 * Note here we suppose only to flush _my_ context, the "uid" will
839 * be ignored in the search.
840 */
841 static
842 void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec,
843 uid_t uid,
844 int grace, int force)
845 {
846 struct key *key;
847 char desc[24];
848
849 /* nothing to do for reverse or rootonly sec */
850 if (sec_is_reverse(sec) || sec_is_rootonly(sec))
851 return;
852
853 construct_key_desc(desc, sizeof(desc), sec, uid);
854
855 /* there should be only one valid key, but we put it in the
856 * loop in case of any weird cases */
857 for (;;) {
858 key = request_key(&gss_key_type, desc, NULL);
859 if (IS_ERR(key)) {
860 CDEBUG(D_SEC, "No more key found for current user\n");
861 break;
862 }
863
864 down_write(&key->sem);
865
866 kill_key_locked(key);
867
868 /* kill_key_locked() should usually revoke the key, but we
869 * revoke it again to make sure, e.g. some case the key may
870 * not well coupled with a context. */
871 key_revoke_locked(key);
872
873 up_write(&key->sem);
874
875 key_put(key);
876 }
877 }
878
879 /*
880 * flush context of root or all, we iterate through the list.
881 */
882 static
883 void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
884 uid_t uid,
885 int grace, int force)
886 {
887 struct gss_sec_keyring *gsec_kr;
888 struct hlist_head freelist = HLIST_HEAD_INIT;
889 struct hlist_node *next;
890 struct ptlrpc_cli_ctx *ctx;
891
892 gsec_kr = sec2gsec_keyring(sec);
893
894 spin_lock(&sec->ps_lock);
895 hlist_for_each_entry_safe(ctx, next,
896 &gsec_kr->gsk_clist, cc_cache) {
897 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
898
899 if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
900 continue;
901
902 /* at this moment there's at least 2 base reference:
903 * key association and in-list. */
904 if (atomic_read(&ctx->cc_refcount) > 2) {
905 if (!force)
906 continue;
907 CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
908 ctx, ctx->cc_vcred.vc_uid,
909 sec2target_str(ctx->cc_sec),
910 atomic_read(&ctx->cc_refcount) - 2);
911 }
912
913 set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
914 if (!grace)
915 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
916
917 atomic_inc(&ctx->cc_refcount);
918
919 if (ctx_unlist_kr(ctx, 1)) {
920 hlist_add_head(&ctx->cc_cache, &freelist);
921 } else {
922 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
923 atomic_dec(&ctx->cc_refcount);
924 }
925 }
926 spin_unlock(&sec->ps_lock);
927
928 dispose_ctx_list_kr(&freelist);
929 }
930
931 static
932 int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
933 uid_t uid, int grace, int force)
934 {
935 CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
936 sec, atomic_read(&sec->ps_refcount),
937 atomic_read(&sec->ps_nctx),
938 uid, grace, force);
939
940 if (uid != -1 && uid != 0)
941 flush_user_ctx_cache_kr(sec, uid, grace, force);
942 else
943 flush_spec_ctx_cache_kr(sec, uid, grace, force);
944
945 return 0;
946 }
947
948 static
949 void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
950 {
951 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
952 struct hlist_head freelist = HLIST_HEAD_INIT;
953 struct hlist_node *next;
954 struct ptlrpc_cli_ctx *ctx;
955
956 CWARN("running gc\n");
957
958 spin_lock(&sec->ps_lock);
959 hlist_for_each_entry_safe(ctx, next,
960 &gsec_kr->gsk_clist, cc_cache) {
961 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
962
963 atomic_inc(&ctx->cc_refcount);
964
965 if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
966 hlist_add_head(&ctx->cc_cache, &freelist);
967 CWARN("unhashed ctx %p\n", ctx);
968 } else {
969 LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
970 atomic_dec(&ctx->cc_refcount);
971 }
972 }
973 spin_unlock(&sec->ps_lock);
974
975 dispose_ctx_list_kr(&freelist);
976 }
977
978 static
979 int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
980 {
981 struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
982 struct hlist_node *next;
983 struct ptlrpc_cli_ctx *ctx;
984 struct gss_cli_ctx *gctx;
985 time_t now = cfs_time_current_sec();
986
987 spin_lock(&sec->ps_lock);
988 hlist_for_each_entry_safe(ctx, next,
989 &gsec_kr->gsk_clist, cc_cache) {
990 struct key *key;
991 char flags_str[40];
992 char mech[40];
993
994 gctx = ctx2gctx(ctx);
995 key = ctx2gctx_keyring(ctx)->gck_key;
996
997 gss_cli_ctx_flags2str(ctx->cc_flags,
998 flags_str, sizeof(flags_str));
999
1000 if (gctx->gc_mechctx)
1001 lgss_display(gctx->gc_mechctx, mech, sizeof(mech));
1002 else
1003 snprintf(mech, sizeof(mech), "N/A");
1004 mech[sizeof(mech) - 1] = '\0';
1005
1006 seq_printf(seq, "%p: uid %u, ref %d, expire %ld(%+ld), fl %s, "
1007 "seq %d, win %u, key %08x(ref %d), "
1008 "hdl "LPX64":"LPX64", mech: %s\n",
1009 ctx, ctx->cc_vcred.vc_uid,
1010 atomic_read(&ctx->cc_refcount),
1011 ctx->cc_expire,
1012 ctx->cc_expire ? ctx->cc_expire - now : 0,
1013 flags_str,
1014 atomic_read(&gctx->gc_seq),
1015 gctx->gc_win,
1016 key ? key->serial : 0,
1017 key ? atomic_read(&key->usage) : 0,
1018 gss_handle_to_u64(&gctx->gc_handle),
1019 gss_handle_to_u64(&gctx->gc_svc_handle),
1020 mech);
1021 }
1022 spin_unlock(&sec->ps_lock);
1023
1024 return 0;
1025 }
1026
1027 /****************************************
1028 * cli_ctx apis *
1029 ****************************************/
1030
1031 static
1032 int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx)
1033 {
1034 /* upcall is already on the way */
1035 return 0;
1036 }
1037
1038 static
1039 int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
1040 {
1041 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1042 LASSERT(ctx->cc_sec);
1043
1044 if (cli_ctx_check_death(ctx)) {
1045 kill_ctx_kr(ctx);
1046 return 1;
1047 }
1048
1049 if (cli_ctx_is_ready(ctx))
1050 return 0;
1051 return 1;
1052 }
1053
1054 static
1055 void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
1056 {
1057 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1058 LASSERT(ctx->cc_sec);
1059
1060 cli_ctx_expire(ctx);
1061 kill_ctx_kr(ctx);
1062 }
1063
1064 /****************************************
1065 * (reverse) service *
1066 ****************************************/
1067
1068 /*
1069 * reverse context could have nothing to do with keyrings. here we still keep
1070 * the version which bind to a key, for future reference.
1071 */
1072 #define HAVE_REVERSE_CTX_NOKEY
1073
1074
1075 static
1076 int sec_install_rctx_kr(struct ptlrpc_sec *sec,
1077 struct ptlrpc_svc_ctx *svc_ctx)
1078 {
1079 struct ptlrpc_cli_ctx *cli_ctx;
1080 struct vfs_cred vcred = { 0, 0 };
1081 int rc;
1082
1083 LASSERT(sec);
1084 LASSERT(svc_ctx);
1085
1086 cli_ctx = ctx_create_kr(sec, &vcred);
1087 if (cli_ctx == NULL)
1088 return -ENOMEM;
1089
1090 rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
1091 if (rc) {
1092 CERROR("failed copy reverse cli ctx: %d\n", rc);
1093
1094 ctx_put_kr(cli_ctx, 1);
1095 return rc;
1096 }
1097
1098 rvs_sec_install_root_ctx_kr(sec, cli_ctx, NULL);
1099
1100 ctx_put_kr(cli_ctx, 1);
1101
1102 return 0;
1103 }
1104
1105
1106 /****************************************
1107 * service apis *
1108 ****************************************/
1109
1110 static
1111 int gss_svc_accept_kr(struct ptlrpc_request *req)
1112 {
1113 return gss_svc_accept(&gss_policy_keyring, req);
1114 }
1115
1116 static
1117 int gss_svc_install_rctx_kr(struct obd_import *imp,
1118 struct ptlrpc_svc_ctx *svc_ctx)
1119 {
1120 struct ptlrpc_sec *sec;
1121 int rc;
1122
1123 sec = sptlrpc_import_sec_ref(imp);
1124 LASSERT(sec);
1125
1126 rc = sec_install_rctx_kr(sec, svc_ctx);
1127 sptlrpc_sec_put(sec);
1128
1129 return rc;
1130 }
1131
1132 /****************************************
1133 * key apis *
1134 ****************************************/
1135
1136 static
1137 int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
1138 {
1139 int rc;
1140
1141 if (data != NULL || datalen != 0) {
1142 CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
1143 return -EINVAL;
1144 }
1145
1146 if (key->payload.data != 0) {
1147 CERROR("key already have payload\n");
1148 return -EINVAL;
1149 }
1150
1151 /* link the key to session keyring, so following context negotiation
1152 * rpc fired from user space could find this key. This will be unlinked
1153 * automatically when upcall processes die.
1154 *
1155 * we can't do this through keyctl from userspace, because the upcall
1156 * might be neither possessor nor owner of the key (setuid).
1157 *
1158 * the session keyring is created upon upcall, and don't change all
1159 * the way until upcall finished, so rcu lock is not needed here.
1160 */
1161 LASSERT(key_tgcred(current)->session_keyring);
1162
1163 lockdep_off();
1164 rc = key_link(key_tgcred(current)->session_keyring, key);
1165 lockdep_on();
1166 if (unlikely(rc)) {
1167 CERROR("failed to link key %08x to keyring %08x: %d\n",
1168 key->serial,
1169 key_tgcred(current)->session_keyring->serial, rc);
1170 return rc;
1171 }
1172
1173 CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, key->payload.data);
1174 return 0;
1175 }
1176
1177 /*
1178 * called with key semaphore write locked. it means we can operate
1179 * on the context without fear of loosing refcount.
1180 */
1181 static
1182 int gss_kt_update(struct key *key, const void *data, size_t datalen)
1183 {
1184 struct ptlrpc_cli_ctx *ctx = key->payload.data;
1185 struct gss_cli_ctx *gctx;
1186 rawobj_t tmpobj = RAWOBJ_EMPTY;
1187 __u32 datalen32 = (__u32) datalen;
1188 int rc;
1189
1190 if (data == NULL || datalen == 0) {
1191 CWARN("invalid: data %p, len %lu\n", data, (long)datalen);
1192 return -EINVAL;
1193 }
1194
1195 /* if upcall finished negotiation too fast (mostly likely because
1196 * of local error happened) and call kt_update(), the ctx
1197 * might be still NULL. but the key will finally be associate
1198 * with a context, or be revoked. if key status is fine, return
1199 * -EAGAIN to allow userspace sleep a while and call again. */
1200 if (ctx == NULL) {
1201 CDEBUG(D_SEC, "update too soon: key %p(%x) flags %lx\n",
1202 key, key->serial, key->flags);
1203
1204 rc = key_validate(key);
1205 if (rc == 0)
1206 return -EAGAIN;
1207 else
1208 return rc;
1209 }
1210
1211 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
1212 LASSERT(ctx->cc_sec);
1213
1214 ctx_clear_timer_kr(ctx);
1215
1216 /* don't proceed if already refreshed */
1217 if (cli_ctx_is_refreshed(ctx)) {
1218 CWARN("ctx already done refresh\n");
1219 return 0;
1220 }
1221
1222 sptlrpc_cli_ctx_get(ctx);
1223 gctx = ctx2gctx(ctx);
1224
1225 rc = buffer_extract_bytes(&data, &datalen32, &gctx->gc_win,
1226 sizeof(gctx->gc_win));
1227 if (rc) {
1228 CERROR("failed extract seq_win\n");
1229 goto out;
1230 }
1231
1232 if (gctx->gc_win == 0) {
1233 __u32 nego_rpc_err, nego_gss_err;
1234
1235 rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err,
1236 sizeof(nego_rpc_err));
1237 if (rc) {
1238 CERROR("failed to extrace rpc rc\n");
1239 goto out;
1240 }
1241
1242 rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err,
1243 sizeof(nego_gss_err));
1244 if (rc) {
1245 CERROR("failed to extrace gss rc\n");
1246 goto out;
1247 }
1248
1249 CERROR("negotiation: rpc err %d, gss err %x\n",
1250 nego_rpc_err, nego_gss_err);
1251
1252 rc = nego_rpc_err ? nego_rpc_err : -EACCES;
1253 } else {
1254 rc = rawobj_extract_local_alloc(&gctx->gc_handle,
1255 (__u32 **) &data, &datalen32);
1256 if (rc) {
1257 CERROR("failed extract handle\n");
1258 goto out;
1259 }
1260
1261 rc = rawobj_extract_local(&tmpobj, (__u32 **) &data,&datalen32);
1262 if (rc) {
1263 CERROR("failed extract mech\n");
1264 goto out;
1265 }
1266
1267 rc = lgss_import_sec_context(&tmpobj,
1268 sec2gsec(ctx->cc_sec)->gs_mech,
1269 &gctx->gc_mechctx);
1270 if (rc != GSS_S_COMPLETE)
1271 CERROR("failed import context\n");
1272 else
1273 rc = 0;
1274 }
1275 out:
1276 /* we don't care what current status of this ctx, even someone else
1277 * is operating on the ctx at the same time. we just add up our own
1278 * opinions here. */
1279 if (rc == 0) {
1280 gss_cli_ctx_uptodate(gctx);
1281 } else {
1282 /* this will also revoke the key. has to be done before
1283 * wakeup waiters otherwise they can find the stale key */
1284 kill_key_locked(key);
1285
1286 cli_ctx_expire(ctx);
1287
1288 if (rc != -ERESTART)
1289 set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
1290 }
1291
1292 /* let user space think it's a success */
1293 sptlrpc_cli_ctx_put(ctx, 1);
1294 return 0;
1295 }
1296
1297 static
1298 int gss_kt_match(const struct key *key, const void *desc)
1299 {
1300 return (strcmp(key->description, (const char *) desc) == 0);
1301 }
1302
1303 static
1304 void gss_kt_destroy(struct key *key)
1305 {
1306 LASSERT(key->payload.data == NULL);
1307 CDEBUG(D_SEC, "destroy key %p\n", key);
1308 }
1309
1310 static
1311 void gss_kt_describe(const struct key *key, struct seq_file *s)
1312 {
1313 if (key->description == NULL)
1314 seq_puts(s, "[null]");
1315 else
1316 seq_puts(s, key->description);
1317 }
1318
1319 static struct key_type gss_key_type =
1320 {
1321 .name = "lgssc",
1322 .def_datalen = 0,
1323 .instantiate = gss_kt_instantiate,
1324 .update = gss_kt_update,
1325 .match = gss_kt_match,
1326 .destroy = gss_kt_destroy,
1327 .describe = gss_kt_describe,
1328 };
1329
1330 /****************************************
1331 * lustre gss keyring policy *
1332 ****************************************/
1333
1334 static struct ptlrpc_ctx_ops gss_keyring_ctxops = {
1335 .match = gss_cli_ctx_match,
1336 .refresh = gss_cli_ctx_refresh_kr,
1337 .validate = gss_cli_ctx_validate_kr,
1338 .die = gss_cli_ctx_die_kr,
1339 .sign = gss_cli_ctx_sign,
1340 .verify = gss_cli_ctx_verify,
1341 .seal = gss_cli_ctx_seal,
1342 .unseal = gss_cli_ctx_unseal,
1343 .wrap_bulk = gss_cli_ctx_wrap_bulk,
1344 .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
1345 };
1346
1347 static struct ptlrpc_sec_cops gss_sec_keyring_cops = {
1348 .create_sec = gss_sec_create_kr,
1349 .destroy_sec = gss_sec_destroy_kr,
1350 .kill_sec = gss_sec_kill,
1351 .lookup_ctx = gss_sec_lookup_ctx_kr,
1352 .release_ctx = gss_sec_release_ctx_kr,
1353 .flush_ctx_cache = gss_sec_flush_ctx_cache_kr,
1354 .gc_ctx = gss_sec_gc_ctx_kr,
1355 .install_rctx = gss_sec_install_rctx,
1356 .alloc_reqbuf = gss_alloc_reqbuf,
1357 .free_reqbuf = gss_free_reqbuf,
1358 .alloc_repbuf = gss_alloc_repbuf,
1359 .free_repbuf = gss_free_repbuf,
1360 .enlarge_reqbuf = gss_enlarge_reqbuf,
1361 .display = gss_sec_display_kr,
1362 };
1363
1364 static struct ptlrpc_sec_sops gss_sec_keyring_sops = {
1365 .accept = gss_svc_accept_kr,
1366 .invalidate_ctx = gss_svc_invalidate_ctx,
1367 .alloc_rs = gss_svc_alloc_rs,
1368 .authorize = gss_svc_authorize,
1369 .free_rs = gss_svc_free_rs,
1370 .free_ctx = gss_svc_free_ctx,
1371 .prep_bulk = gss_svc_prep_bulk,
1372 .unwrap_bulk = gss_svc_unwrap_bulk,
1373 .wrap_bulk = gss_svc_wrap_bulk,
1374 .install_rctx = gss_svc_install_rctx_kr,
1375 };
1376
1377 static struct ptlrpc_sec_policy gss_policy_keyring = {
1378 .sp_owner = THIS_MODULE,
1379 .sp_name = "gss.keyring",
1380 .sp_policy = SPTLRPC_POLICY_GSS,
1381 .sp_cops = &gss_sec_keyring_cops,
1382 .sp_sops = &gss_sec_keyring_sops,
1383 };
1384
1385
1386 int __init gss_init_keyring(void)
1387 {
1388 int rc;
1389
1390 rc = register_key_type(&gss_key_type);
1391 if (rc) {
1392 CERROR("failed to register keyring type: %d\n", rc);
1393 return rc;
1394 }
1395
1396 rc = sptlrpc_register_policy(&gss_policy_keyring);
1397 if (rc) {
1398 unregister_key_type(&gss_key_type);
1399 return rc;
1400 }
1401
1402 return 0;
1403 }
1404
1405 void __exit gss_exit_keyring(void)
1406 {
1407 unregister_key_type(&gss_key_type);
1408 sptlrpc_unregister_policy(&gss_policy_keyring);
1409 }
This page took 0.062719 seconds and 5 git commands to generate.