Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[deliverable/linux.git] / drivers / staging / lustre / lustre / ptlrpc / sec_plain.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ptlrpc/sec_plain.c
37 *
38 * Author: Eric Mei <ericm@clusterfs.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_SEC
42
43
44 #include <obd_support.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_net.h>
48 #include <lustre_sec.h>
49
50 struct plain_sec {
51 struct ptlrpc_sec pls_base;
52 rwlock_t pls_lock;
53 struct ptlrpc_cli_ctx *pls_ctx;
54 };
55
56 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
57 {
58 return container_of(sec, struct plain_sec, pls_base);
59 }
60
61 static struct ptlrpc_sec_policy plain_policy;
62 static struct ptlrpc_ctx_ops plain_ctx_ops;
63 static struct ptlrpc_svc_ctx plain_svc_ctx;
64
65 static unsigned int plain_at_offset;
66
67 /*
68 * for simplicity, plain policy rpc use fixed layout.
69 */
70 #define PLAIN_PACK_SEGMENTS (4)
71
72 #define PLAIN_PACK_HDR_OFF (0)
73 #define PLAIN_PACK_MSG_OFF (1)
74 #define PLAIN_PACK_USER_OFF (2)
75 #define PLAIN_PACK_BULK_OFF (3)
76
77 #define PLAIN_FL_USER (0x01)
78 #define PLAIN_FL_BULK (0x02)
79
80 struct plain_header {
81 __u8 ph_ver; /* 0 */
82 __u8 ph_flags;
83 __u8 ph_sp; /* source */
84 __u8 ph_bulk_hash_alg; /* complete flavor desc */
85 __u8 ph_pad[4];
86 };
87
88 struct plain_bulk_token {
89 __u8 pbt_hash[8];
90 };
91
92 #define PLAIN_BSD_SIZE \
93 (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
94
95 /****************************************
96 * bulk checksum helpers *
97 ****************************************/
98
99 static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
100 {
101 struct ptlrpc_bulk_sec_desc *bsd;
102
103 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
104 return -EPROTO;
105
106 bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
107 if (bsd == NULL) {
108 CERROR("bulk sec desc has short size %d\n",
109 lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
110 return -EPROTO;
111 }
112
113 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
114 bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
115 CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
116 return -EPROTO;
117 }
118
119 return 0;
120 }
121
122 static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
123 __u8 hash_alg,
124 struct plain_bulk_token *token)
125 {
126 if (hash_alg == BULK_HASH_ALG_NULL)
127 return 0;
128
129 memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
130 return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
131 sizeof(token->pbt_hash));
132 }
133
134 static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
135 __u8 hash_alg,
136 struct plain_bulk_token *tokenr)
137 {
138 struct plain_bulk_token tokenv;
139 int rc;
140
141 if (hash_alg == BULK_HASH_ALG_NULL)
142 return 0;
143
144 memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
145 rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
146 sizeof(tokenv.pbt_hash));
147 if (rc)
148 return rc;
149
150 if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
151 return -EACCES;
152 return 0;
153 }
154
155 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
156 {
157 char *ptr;
158 unsigned int off, i;
159
160 for (i = 0; i < desc->bd_iov_count; i++) {
161 if (desc->bd_iov[i].kiov_len == 0)
162 continue;
163
164 ptr = kmap(desc->bd_iov[i].kiov_page);
165 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
166 ptr[off] ^= 0x1;
167 kunmap(desc->bd_iov[i].kiov_page);
168 return;
169 }
170 }
171
172 /****************************************
173 * cli_ctx apis *
174 ****************************************/
175
176 static
177 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
178 {
179 /* should never reach here */
180 LBUG();
181 return 0;
182 }
183
184 static
185 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
186 {
187 return 0;
188 }
189
190 static
191 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
192 {
193 struct lustre_msg *msg = req->rq_reqbuf;
194 struct plain_header *phdr;
195 ENTRY;
196
197 msg->lm_secflvr = req->rq_flvr.sf_rpc;
198
199 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
200 phdr->ph_ver = 0;
201 phdr->ph_flags = 0;
202 phdr->ph_sp = ctx->cc_sec->ps_part;
203 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
204
205 if (req->rq_pack_udesc)
206 phdr->ph_flags |= PLAIN_FL_USER;
207 if (req->rq_pack_bulk)
208 phdr->ph_flags |= PLAIN_FL_BULK;
209
210 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
211 msg->lm_buflens);
212 RETURN(0);
213 }
214
215 static
216 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
217 {
218 struct lustre_msg *msg = req->rq_repdata;
219 struct plain_header *phdr;
220 __u32 cksum;
221 int swabbed;
222 ENTRY;
223
224 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
225 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
226 RETURN(-EPROTO);
227 }
228
229 swabbed = ptlrpc_rep_need_swab(req);
230
231 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
232 if (phdr == NULL) {
233 CERROR("missing plain header\n");
234 RETURN(-EPROTO);
235 }
236
237 if (phdr->ph_ver != 0) {
238 CERROR("Invalid header version\n");
239 RETURN(-EPROTO);
240 }
241
242 /* expect no user desc in reply */
243 if (phdr->ph_flags & PLAIN_FL_USER) {
244 CERROR("Unexpected udesc flag in reply\n");
245 RETURN(-EPROTO);
246 }
247
248 if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
249 CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
250 req->rq_flvr.u_bulk.hash.hash_alg);
251 RETURN(-EPROTO);
252 }
253
254 if (unlikely(req->rq_early)) {
255 unsigned int hsize = 4;
256
257 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
258 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
259 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
260 NULL, 0, (unsigned char *)&cksum, &hsize);
261 if (cksum != msg->lm_cksum) {
262 CDEBUG(D_SEC,
263 "early reply checksum mismatch: %08x != %08x\n",
264 cpu_to_le32(cksum), msg->lm_cksum);
265 RETURN(-EINVAL);
266 }
267 } else {
268 /* whether we sent with bulk or not, we expect the same
269 * in reply, except for early reply */
270 if (!req->rq_early &&
271 !equi(req->rq_pack_bulk == 1,
272 phdr->ph_flags & PLAIN_FL_BULK)) {
273 CERROR("%s bulk checksum in reply\n",
274 req->rq_pack_bulk ? "Missing" : "Unexpected");
275 RETURN(-EPROTO);
276 }
277
278 if (phdr->ph_flags & PLAIN_FL_BULK) {
279 if (plain_unpack_bsd(msg, swabbed))
280 RETURN(-EPROTO);
281 }
282 }
283
284 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
285 req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
286 RETURN(0);
287 }
288
289 static
290 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
291 struct ptlrpc_request *req,
292 struct ptlrpc_bulk_desc *desc)
293 {
294 struct ptlrpc_bulk_sec_desc *bsd;
295 struct plain_bulk_token *token;
296 int rc;
297
298 LASSERT(req->rq_pack_bulk);
299 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
300
301 bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
302 token = (struct plain_bulk_token *) bsd->bsd_data;
303
304 bsd->bsd_version = 0;
305 bsd->bsd_flags = 0;
306 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
307 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
308
309 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
310 RETURN(0);
311
312 if (req->rq_bulk_read)
313 RETURN(0);
314
315 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
316 token);
317 if (rc) {
318 CERROR("bulk write: failed to compute checksum: %d\n", rc);
319 } else {
320 /*
321 * for sending we only compute the wrong checksum instead
322 * of corrupting the data so it is still correct on a redo
323 */
324 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
325 req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
326 token->pbt_hash[0] ^= 0x1;
327 }
328
329 return rc;
330 }
331
332 static
333 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
334 struct ptlrpc_request *req,
335 struct ptlrpc_bulk_desc *desc)
336 {
337 struct ptlrpc_bulk_sec_desc *bsdv;
338 struct plain_bulk_token *tokenv;
339 int rc;
340 int i, nob;
341
342 LASSERT(req->rq_pack_bulk);
343 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
344 LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
345
346 bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
347 tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
348
349 if (req->rq_bulk_write) {
350 if (bsdv->bsd_flags & BSD_FL_ERR)
351 return -EIO;
352 return 0;
353 }
354
355 /* fix the actual data size */
356 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
357 if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
358 desc->bd_iov[i].kiov_len =
359 desc->bd_nob_transferred - nob;
360 }
361 nob += desc->bd_iov[i].kiov_len;
362 }
363
364 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
365 tokenv);
366 if (rc)
367 CERROR("bulk read: client verify failed: %d\n", rc);
368
369 return rc;
370 }
371
372 /****************************************
373 * sec apis *
374 ****************************************/
375
376 static
377 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
378 {
379 struct ptlrpc_cli_ctx *ctx, *ctx_new;
380
381 OBD_ALLOC_PTR(ctx_new);
382
383 write_lock(&plsec->pls_lock);
384
385 ctx = plsec->pls_ctx;
386 if (ctx) {
387 atomic_inc(&ctx->cc_refcount);
388
389 if (ctx_new)
390 OBD_FREE_PTR(ctx_new);
391 } else if (ctx_new) {
392 ctx = ctx_new;
393
394 atomic_set(&ctx->cc_refcount, 1); /* for cache */
395 ctx->cc_sec = &plsec->pls_base;
396 ctx->cc_ops = &plain_ctx_ops;
397 ctx->cc_expire = 0;
398 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
399 ctx->cc_vcred.vc_uid = 0;
400 spin_lock_init(&ctx->cc_lock);
401 INIT_LIST_HEAD(&ctx->cc_req_list);
402 INIT_LIST_HEAD(&ctx->cc_gc_chain);
403
404 plsec->pls_ctx = ctx;
405 atomic_inc(&plsec->pls_base.ps_nctx);
406 atomic_inc(&plsec->pls_base.ps_refcount);
407
408 atomic_inc(&ctx->cc_refcount); /* for caller */
409 }
410
411 write_unlock(&plsec->pls_lock);
412
413 return ctx;
414 }
415
416 static
417 void plain_destroy_sec(struct ptlrpc_sec *sec)
418 {
419 struct plain_sec *plsec = sec2plsec(sec);
420 ENTRY;
421
422 LASSERT(sec->ps_policy == &plain_policy);
423 LASSERT(sec->ps_import);
424 LASSERT(atomic_read(&sec->ps_refcount) == 0);
425 LASSERT(atomic_read(&sec->ps_nctx) == 0);
426 LASSERT(plsec->pls_ctx == NULL);
427
428 class_import_put(sec->ps_import);
429
430 OBD_FREE_PTR(plsec);
431 EXIT;
432 }
433
434 static
435 void plain_kill_sec(struct ptlrpc_sec *sec)
436 {
437 sec->ps_dying = 1;
438 }
439
440 static
441 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
442 struct ptlrpc_svc_ctx *svc_ctx,
443 struct sptlrpc_flavor *sf)
444 {
445 struct plain_sec *plsec;
446 struct ptlrpc_sec *sec;
447 struct ptlrpc_cli_ctx *ctx;
448 ENTRY;
449
450 LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
451
452 OBD_ALLOC_PTR(plsec);
453 if (plsec == NULL)
454 RETURN(NULL);
455
456 /*
457 * initialize plain_sec
458 */
459 rwlock_init(&plsec->pls_lock);
460 plsec->pls_ctx = NULL;
461
462 sec = &plsec->pls_base;
463 sec->ps_policy = &plain_policy;
464 atomic_set(&sec->ps_refcount, 0);
465 atomic_set(&sec->ps_nctx, 0);
466 sec->ps_id = sptlrpc_get_next_secid();
467 sec->ps_import = class_import_get(imp);
468 sec->ps_flvr = *sf;
469 spin_lock_init(&sec->ps_lock);
470 INIT_LIST_HEAD(&sec->ps_gc_list);
471 sec->ps_gc_interval = 0;
472 sec->ps_gc_next = 0;
473
474 /* install ctx immediately if this is a reverse sec */
475 if (svc_ctx) {
476 ctx = plain_sec_install_ctx(plsec);
477 if (ctx == NULL) {
478 plain_destroy_sec(sec);
479 RETURN(NULL);
480 }
481 sptlrpc_cli_ctx_put(ctx, 1);
482 }
483
484 RETURN(sec);
485 }
486
487 static
488 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
489 struct vfs_cred *vcred,
490 int create, int remove_dead)
491 {
492 struct plain_sec *plsec = sec2plsec(sec);
493 struct ptlrpc_cli_ctx *ctx;
494 ENTRY;
495
496 read_lock(&plsec->pls_lock);
497 ctx = plsec->pls_ctx;
498 if (ctx)
499 atomic_inc(&ctx->cc_refcount);
500 read_unlock(&plsec->pls_lock);
501
502 if (unlikely(ctx == NULL))
503 ctx = plain_sec_install_ctx(plsec);
504
505 RETURN(ctx);
506 }
507
508 static
509 void plain_release_ctx(struct ptlrpc_sec *sec,
510 struct ptlrpc_cli_ctx *ctx, int sync)
511 {
512 LASSERT(atomic_read(&sec->ps_refcount) > 0);
513 LASSERT(atomic_read(&sec->ps_nctx) > 0);
514 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
515 LASSERT(ctx->cc_sec == sec);
516
517 OBD_FREE_PTR(ctx);
518
519 atomic_dec(&sec->ps_nctx);
520 sptlrpc_sec_put(sec);
521 }
522
523 static
524 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
525 uid_t uid, int grace, int force)
526 {
527 struct plain_sec *plsec = sec2plsec(sec);
528 struct ptlrpc_cli_ctx *ctx;
529 ENTRY;
530
531 /* do nothing unless caller want to flush for 'all' */
532 if (uid != -1)
533 RETURN(0);
534
535 write_lock(&plsec->pls_lock);
536 ctx = plsec->pls_ctx;
537 plsec->pls_ctx = NULL;
538 write_unlock(&plsec->pls_lock);
539
540 if (ctx)
541 sptlrpc_cli_ctx_put(ctx, 1);
542 RETURN(0);
543 }
544
545 static
546 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
547 struct ptlrpc_request *req,
548 int msgsize)
549 {
550 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
551 int alloc_len;
552 ENTRY;
553
554 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
555 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
556
557 if (req->rq_pack_udesc)
558 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
559
560 if (req->rq_pack_bulk) {
561 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
562 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
563 }
564
565 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
566
567 if (!req->rq_reqbuf) {
568 LASSERT(!req->rq_pool);
569
570 alloc_len = size_roundup_power2(alloc_len);
571 OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
572 if (!req->rq_reqbuf)
573 RETURN(-ENOMEM);
574
575 req->rq_reqbuf_len = alloc_len;
576 } else {
577 LASSERT(req->rq_pool);
578 LASSERT(req->rq_reqbuf_len >= alloc_len);
579 memset(req->rq_reqbuf, 0, alloc_len);
580 }
581
582 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
583 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
584
585 if (req->rq_pack_udesc)
586 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
587
588 RETURN(0);
589 }
590
591 static
592 void plain_free_reqbuf(struct ptlrpc_sec *sec,
593 struct ptlrpc_request *req)
594 {
595 ENTRY;
596 if (!req->rq_pool) {
597 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
598 req->rq_reqbuf = NULL;
599 req->rq_reqbuf_len = 0;
600 }
601 EXIT;
602 }
603
604 static
605 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
606 struct ptlrpc_request *req,
607 int msgsize)
608 {
609 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
610 int alloc_len;
611 ENTRY;
612
613 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
614 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
615
616 if (req->rq_pack_bulk) {
617 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
618 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
619 }
620
621 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
622
623 /* add space for early reply */
624 alloc_len += plain_at_offset;
625
626 alloc_len = size_roundup_power2(alloc_len);
627
628 OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
629 if (!req->rq_repbuf)
630 RETURN(-ENOMEM);
631
632 req->rq_repbuf_len = alloc_len;
633 RETURN(0);
634 }
635
636 static
637 void plain_free_repbuf(struct ptlrpc_sec *sec,
638 struct ptlrpc_request *req)
639 {
640 ENTRY;
641 OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
642 req->rq_repbuf = NULL;
643 req->rq_repbuf_len = 0;
644 EXIT;
645 }
646
647 static
648 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
649 struct ptlrpc_request *req,
650 int segment, int newsize)
651 {
652 struct lustre_msg *newbuf;
653 int oldsize;
654 int newmsg_size, newbuf_size;
655 ENTRY;
656
657 LASSERT(req->rq_reqbuf);
658 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
659 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
660 req->rq_reqmsg);
661
662 /* compute new embedded msg size. */
663 oldsize = req->rq_reqmsg->lm_buflens[segment];
664 req->rq_reqmsg->lm_buflens[segment] = newsize;
665 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
666 req->rq_reqmsg->lm_buflens);
667 req->rq_reqmsg->lm_buflens[segment] = oldsize;
668
669 /* compute new wrapper msg size. */
670 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
671 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
672 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
673 req->rq_reqbuf->lm_buflens);
674 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
675
676 /* request from pool should always have enough buffer */
677 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
678
679 if (req->rq_reqbuf_len < newbuf_size) {
680 newbuf_size = size_roundup_power2(newbuf_size);
681
682 OBD_ALLOC_LARGE(newbuf, newbuf_size);
683 if (newbuf == NULL)
684 RETURN(-ENOMEM);
685
686 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
687
688 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
689 req->rq_reqbuf = newbuf;
690 req->rq_reqbuf_len = newbuf_size;
691 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
692 PLAIN_PACK_MSG_OFF, 0);
693 }
694
695 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
696 newmsg_size);
697 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
698
699 req->rq_reqlen = newmsg_size;
700 RETURN(0);
701 }
702
703 /****************************************
704 * service apis *
705 ****************************************/
706
707 static struct ptlrpc_svc_ctx plain_svc_ctx = {
708 .sc_refcount = ATOMIC_INIT(1),
709 .sc_policy = &plain_policy,
710 };
711
712 static
713 int plain_accept(struct ptlrpc_request *req)
714 {
715 struct lustre_msg *msg = req->rq_reqbuf;
716 struct plain_header *phdr;
717 int swabbed;
718 ENTRY;
719
720 LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
721 SPTLRPC_POLICY_PLAIN);
722
723 if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
724 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
725 SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
726 SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
727 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
728 RETURN(SECSVC_DROP);
729 }
730
731 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
732 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
733 RETURN(SECSVC_DROP);
734 }
735
736 swabbed = ptlrpc_req_need_swab(req);
737
738 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
739 if (phdr == NULL) {
740 CERROR("missing plain header\n");
741 RETURN(-EPROTO);
742 }
743
744 if (phdr->ph_ver != 0) {
745 CERROR("Invalid header version\n");
746 RETURN(-EPROTO);
747 }
748
749 if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
750 CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
751 RETURN(-EPROTO);
752 }
753
754 req->rq_sp_from = phdr->ph_sp;
755 req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
756
757 if (phdr->ph_flags & PLAIN_FL_USER) {
758 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
759 swabbed)) {
760 CERROR("Mal-formed user descriptor\n");
761 RETURN(SECSVC_DROP);
762 }
763
764 req->rq_pack_udesc = 1;
765 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
766 }
767
768 if (phdr->ph_flags & PLAIN_FL_BULK) {
769 if (plain_unpack_bsd(msg, swabbed))
770 RETURN(SECSVC_DROP);
771
772 req->rq_pack_bulk = 1;
773 }
774
775 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
776 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
777
778 req->rq_svc_ctx = &plain_svc_ctx;
779 atomic_inc(&req->rq_svc_ctx->sc_refcount);
780
781 RETURN(SECSVC_OK);
782 }
783
784 static
785 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
786 {
787 struct ptlrpc_reply_state *rs;
788 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
789 int rs_size = sizeof(*rs);
790 ENTRY;
791
792 LASSERT(msgsize % 8 == 0);
793
794 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
795 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
796
797 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
798 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
799
800 rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
801
802 rs = req->rq_reply_state;
803
804 if (rs) {
805 /* pre-allocated */
806 LASSERT(rs->rs_size >= rs_size);
807 } else {
808 OBD_ALLOC_LARGE(rs, rs_size);
809 if (rs == NULL)
810 RETURN(-ENOMEM);
811
812 rs->rs_size = rs_size;
813 }
814
815 rs->rs_svc_ctx = req->rq_svc_ctx;
816 atomic_inc(&req->rq_svc_ctx->sc_refcount);
817 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
818 rs->rs_repbuf_len = rs_size - sizeof(*rs);
819
820 lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
821 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
822
823 req->rq_reply_state = rs;
824 RETURN(0);
825 }
826
827 static
828 void plain_free_rs(struct ptlrpc_reply_state *rs)
829 {
830 ENTRY;
831
832 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
833 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
834
835 if (!rs->rs_prealloc)
836 OBD_FREE_LARGE(rs, rs->rs_size);
837 EXIT;
838 }
839
840 static
841 int plain_authorize(struct ptlrpc_request *req)
842 {
843 struct ptlrpc_reply_state *rs = req->rq_reply_state;
844 struct lustre_msg_v2 *msg = rs->rs_repbuf;
845 struct plain_header *phdr;
846 int len;
847 ENTRY;
848
849 LASSERT(rs);
850 LASSERT(msg);
851
852 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
853 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
854 req->rq_replen, 1);
855 else
856 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
857
858 msg->lm_secflvr = req->rq_flvr.sf_rpc;
859
860 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
861 phdr->ph_ver = 0;
862 phdr->ph_flags = 0;
863 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
864
865 if (req->rq_pack_bulk)
866 phdr->ph_flags |= PLAIN_FL_BULK;
867
868 rs->rs_repdata_len = len;
869
870 if (likely(req->rq_packed_final)) {
871 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
872 req->rq_reply_off = plain_at_offset;
873 else
874 req->rq_reply_off = 0;
875 } else {
876 unsigned int hsize = 4;
877
878 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
879 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
880 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
881 NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
882 req->rq_reply_off = 0;
883 }
884
885 RETURN(0);
886 }
887
888 static
889 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
890 struct ptlrpc_bulk_desc *desc)
891 {
892 struct ptlrpc_reply_state *rs = req->rq_reply_state;
893 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
894 struct plain_bulk_token *tokenr;
895 int rc;
896
897 LASSERT(req->rq_bulk_write);
898 LASSERT(req->rq_pack_bulk);
899
900 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
901 tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
902 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
903
904 bsdv->bsd_version = 0;
905 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
906 bsdv->bsd_svc = bsdr->bsd_svc;
907 bsdv->bsd_flags = 0;
908
909 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
910 return 0;
911
912 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
913 tokenr);
914 if (rc) {
915 bsdv->bsd_flags |= BSD_FL_ERR;
916 CERROR("bulk write: server verify failed: %d\n", rc);
917 }
918
919 return rc;
920 }
921
922 static
923 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
924 struct ptlrpc_bulk_desc *desc)
925 {
926 struct ptlrpc_reply_state *rs = req->rq_reply_state;
927 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
928 struct plain_bulk_token *tokenv;
929 int rc;
930
931 LASSERT(req->rq_bulk_read);
932 LASSERT(req->rq_pack_bulk);
933
934 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
935 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
936 tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
937
938 bsdv->bsd_version = 0;
939 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
940 bsdv->bsd_svc = bsdr->bsd_svc;
941 bsdv->bsd_flags = 0;
942
943 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
944 return 0;
945
946 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
947 tokenv);
948 if (rc) {
949 CERROR("bulk read: server failed to compute "
950 "checksum: %d\n", rc);
951 } else {
952 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
953 corrupt_bulk_data(desc);
954 }
955
956 return rc;
957 }
958
959 static struct ptlrpc_ctx_ops plain_ctx_ops = {
960 .refresh = plain_ctx_refresh,
961 .validate = plain_ctx_validate,
962 .sign = plain_ctx_sign,
963 .verify = plain_ctx_verify,
964 .wrap_bulk = plain_cli_wrap_bulk,
965 .unwrap_bulk = plain_cli_unwrap_bulk,
966 };
967
968 static struct ptlrpc_sec_cops plain_sec_cops = {
969 .create_sec = plain_create_sec,
970 .destroy_sec = plain_destroy_sec,
971 .kill_sec = plain_kill_sec,
972 .lookup_ctx = plain_lookup_ctx,
973 .release_ctx = plain_release_ctx,
974 .flush_ctx_cache = plain_flush_ctx_cache,
975 .alloc_reqbuf = plain_alloc_reqbuf,
976 .free_reqbuf = plain_free_reqbuf,
977 .alloc_repbuf = plain_alloc_repbuf,
978 .free_repbuf = plain_free_repbuf,
979 .enlarge_reqbuf = plain_enlarge_reqbuf,
980 };
981
982 static struct ptlrpc_sec_sops plain_sec_sops = {
983 .accept = plain_accept,
984 .alloc_rs = plain_alloc_rs,
985 .authorize = plain_authorize,
986 .free_rs = plain_free_rs,
987 .unwrap_bulk = plain_svc_unwrap_bulk,
988 .wrap_bulk = plain_svc_wrap_bulk,
989 };
990
991 static struct ptlrpc_sec_policy plain_policy = {
992 .sp_owner = THIS_MODULE,
993 .sp_name = "plain",
994 .sp_policy = SPTLRPC_POLICY_PLAIN,
995 .sp_cops = &plain_sec_cops,
996 .sp_sops = &plain_sec_sops,
997 };
998
999 int sptlrpc_plain_init(void)
1000 {
1001 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
1002 int rc;
1003
1004 buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
1005 plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
1006
1007 rc = sptlrpc_register_policy(&plain_policy);
1008 if (rc)
1009 CERROR("failed to register: %d\n", rc);
1010
1011 return rc;
1012 }
1013
1014 void sptlrpc_plain_fini(void)
1015 {
1016 int rc;
1017
1018 rc = sptlrpc_unregister_policy(&plain_policy);
1019 if (rc)
1020 CERROR("cannot unregister: %d\n", rc);
1021 }
This page took 0.053471 seconds and 6 git commands to generate.