4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/sec_plain.c
38 * Author: Eric Mei <ericm@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_SEC
44 #include <obd_support.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_net.h>
48 #include <lustre_sec.h>
51 struct ptlrpc_sec pls_base
;
53 struct ptlrpc_cli_ctx
*pls_ctx
;
56 static inline struct plain_sec
*sec2plsec(struct ptlrpc_sec
*sec
)
58 return container_of(sec
, struct plain_sec
, pls_base
);
61 static struct ptlrpc_sec_policy plain_policy
;
62 static struct ptlrpc_ctx_ops plain_ctx_ops
;
63 static struct ptlrpc_svc_ctx plain_svc_ctx
;
65 static unsigned int plain_at_offset
;
68 * for simplicity, plain policy rpc use fixed layout.
70 #define PLAIN_PACK_SEGMENTS (4)
72 #define PLAIN_PACK_HDR_OFF (0)
73 #define PLAIN_PACK_MSG_OFF (1)
74 #define PLAIN_PACK_USER_OFF (2)
75 #define PLAIN_PACK_BULK_OFF (3)
77 #define PLAIN_FL_USER (0x01)
78 #define PLAIN_FL_BULK (0x02)
83 __u8 ph_sp
; /* source */
84 __u8 ph_bulk_hash_alg
; /* complete flavor desc */
88 struct plain_bulk_token
{
92 #define PLAIN_BSD_SIZE \
93 (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
95 /****************************************
96 * bulk checksum helpers *
97 ****************************************/
99 static int plain_unpack_bsd(struct lustre_msg
*msg
, int swabbed
)
101 struct ptlrpc_bulk_sec_desc
*bsd
;
103 if (bulk_sec_desc_unpack(msg
, PLAIN_PACK_BULK_OFF
, swabbed
))
106 bsd
= lustre_msg_buf(msg
, PLAIN_PACK_BULK_OFF
, PLAIN_BSD_SIZE
);
108 CERROR("bulk sec desc has short size %d\n",
109 lustre_msg_buflen(msg
, PLAIN_PACK_BULK_OFF
));
113 if (bsd
->bsd_svc
!= SPTLRPC_BULK_SVC_NULL
&&
114 bsd
->bsd_svc
!= SPTLRPC_BULK_SVC_INTG
) {
115 CERROR("invalid bulk svc %u\n", bsd
->bsd_svc
);
122 static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc
*desc
,
124 struct plain_bulk_token
*token
)
126 if (hash_alg
== BULK_HASH_ALG_NULL
)
129 memset(token
->pbt_hash
, 0, sizeof(token
->pbt_hash
));
130 return sptlrpc_get_bulk_checksum(desc
, hash_alg
, token
->pbt_hash
,
131 sizeof(token
->pbt_hash
));
134 static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc
*desc
,
136 struct plain_bulk_token
*tokenr
)
138 struct plain_bulk_token tokenv
;
141 if (hash_alg
== BULK_HASH_ALG_NULL
)
144 memset(&tokenv
.pbt_hash
, 0, sizeof(tokenv
.pbt_hash
));
145 rc
= sptlrpc_get_bulk_checksum(desc
, hash_alg
, tokenv
.pbt_hash
,
146 sizeof(tokenv
.pbt_hash
));
150 if (memcmp(tokenr
->pbt_hash
, tokenv
.pbt_hash
, sizeof(tokenr
->pbt_hash
)))
155 static void corrupt_bulk_data(struct ptlrpc_bulk_desc
*desc
)
160 for (i
= 0; i
< desc
->bd_iov_count
; i
++) {
161 if (desc
->bd_iov
[i
].kiov_len
== 0)
164 ptr
= kmap(desc
->bd_iov
[i
].kiov_page
);
165 off
= desc
->bd_iov
[i
].kiov_offset
& ~CFS_PAGE_MASK
;
167 kunmap(desc
->bd_iov
[i
].kiov_page
);
172 /****************************************
174 ****************************************/
177 int plain_ctx_refresh(struct ptlrpc_cli_ctx
*ctx
)
179 /* should never reach here */
185 int plain_ctx_validate(struct ptlrpc_cli_ctx
*ctx
)
191 int plain_ctx_sign(struct ptlrpc_cli_ctx
*ctx
, struct ptlrpc_request
*req
)
193 struct lustre_msg
*msg
= req
->rq_reqbuf
;
194 struct plain_header
*phdr
;
197 msg
->lm_secflvr
= req
->rq_flvr
.sf_rpc
;
199 phdr
= lustre_msg_buf(msg
, PLAIN_PACK_HDR_OFF
, 0);
202 phdr
->ph_sp
= ctx
->cc_sec
->ps_part
;
203 phdr
->ph_bulk_hash_alg
= req
->rq_flvr
.u_bulk
.hash
.hash_alg
;
205 if (req
->rq_pack_udesc
)
206 phdr
->ph_flags
|= PLAIN_FL_USER
;
207 if (req
->rq_pack_bulk
)
208 phdr
->ph_flags
|= PLAIN_FL_BULK
;
210 req
->rq_reqdata_len
= lustre_msg_size_v2(msg
->lm_bufcount
,
216 int plain_ctx_verify(struct ptlrpc_cli_ctx
*ctx
, struct ptlrpc_request
*req
)
218 struct lustre_msg
*msg
= req
->rq_repdata
;
219 struct plain_header
*phdr
;
224 if (msg
->lm_bufcount
!= PLAIN_PACK_SEGMENTS
) {
225 CERROR("unexpected reply buf count %u\n", msg
->lm_bufcount
);
229 swabbed
= ptlrpc_rep_need_swab(req
);
231 phdr
= lustre_msg_buf(msg
, PLAIN_PACK_HDR_OFF
, sizeof(*phdr
));
233 CERROR("missing plain header\n");
237 if (phdr
->ph_ver
!= 0) {
238 CERROR("Invalid header version\n");
242 /* expect no user desc in reply */
243 if (phdr
->ph_flags
& PLAIN_FL_USER
) {
244 CERROR("Unexpected udesc flag in reply\n");
248 if (phdr
->ph_bulk_hash_alg
!= req
->rq_flvr
.u_bulk
.hash
.hash_alg
) {
249 CERROR("reply bulk flavor %u != %u\n", phdr
->ph_bulk_hash_alg
,
250 req
->rq_flvr
.u_bulk
.hash
.hash_alg
);
254 if (unlikely(req
->rq_early
)) {
255 unsigned int hsize
= 4;
257 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32
,
258 lustre_msg_buf(msg
, PLAIN_PACK_MSG_OFF
, 0),
259 lustre_msg_buflen(msg
, PLAIN_PACK_MSG_OFF
),
260 NULL
, 0, (unsigned char *)&cksum
, &hsize
);
261 if (cksum
!= msg
->lm_cksum
) {
263 "early reply checksum mismatch: %08x != %08x\n",
264 cpu_to_le32(cksum
), msg
->lm_cksum
);
268 /* whether we sent with bulk or not, we expect the same
269 * in reply, except for early reply */
270 if (!req
->rq_early
&&
271 !equi(req
->rq_pack_bulk
== 1,
272 phdr
->ph_flags
& PLAIN_FL_BULK
)) {
273 CERROR("%s bulk checksum in reply\n",
274 req
->rq_pack_bulk
? "Missing" : "Unexpected");
278 if (phdr
->ph_flags
& PLAIN_FL_BULK
) {
279 if (plain_unpack_bsd(msg
, swabbed
))
284 req
->rq_repmsg
= lustre_msg_buf(msg
, PLAIN_PACK_MSG_OFF
, 0);
285 req
->rq_replen
= lustre_msg_buflen(msg
, PLAIN_PACK_MSG_OFF
);
290 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx
*ctx
,
291 struct ptlrpc_request
*req
,
292 struct ptlrpc_bulk_desc
*desc
)
294 struct ptlrpc_bulk_sec_desc
*bsd
;
295 struct plain_bulk_token
*token
;
298 LASSERT(req
->rq_pack_bulk
);
299 LASSERT(req
->rq_reqbuf
->lm_bufcount
== PLAIN_PACK_SEGMENTS
);
301 bsd
= lustre_msg_buf(req
->rq_reqbuf
, PLAIN_PACK_BULK_OFF
, 0);
302 token
= (struct plain_bulk_token
*) bsd
->bsd_data
;
304 bsd
->bsd_version
= 0;
306 bsd
->bsd_type
= SPTLRPC_BULK_DEFAULT
;
307 bsd
->bsd_svc
= SPTLRPC_FLVR_BULK_SVC(req
->rq_flvr
.sf_rpc
);
309 if (bsd
->bsd_svc
== SPTLRPC_BULK_SVC_NULL
)
312 if (req
->rq_bulk_read
)
315 rc
= plain_generate_bulk_csum(desc
, req
->rq_flvr
.u_bulk
.hash
.hash_alg
,
318 CERROR("bulk write: failed to compute checksum: %d\n", rc
);
321 * for sending we only compute the wrong checksum instead
322 * of corrupting the data so it is still correct on a redo
324 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND
) &&
325 req
->rq_flvr
.u_bulk
.hash
.hash_alg
!= BULK_HASH_ALG_NULL
)
326 token
->pbt_hash
[0] ^= 0x1;
333 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx
*ctx
,
334 struct ptlrpc_request
*req
,
335 struct ptlrpc_bulk_desc
*desc
)
337 struct ptlrpc_bulk_sec_desc
*bsdv
;
338 struct plain_bulk_token
*tokenv
;
342 LASSERT(req
->rq_pack_bulk
);
343 LASSERT(req
->rq_reqbuf
->lm_bufcount
== PLAIN_PACK_SEGMENTS
);
344 LASSERT(req
->rq_repdata
->lm_bufcount
== PLAIN_PACK_SEGMENTS
);
346 bsdv
= lustre_msg_buf(req
->rq_repdata
, PLAIN_PACK_BULK_OFF
, 0);
347 tokenv
= (struct plain_bulk_token
*) bsdv
->bsd_data
;
349 if (req
->rq_bulk_write
) {
350 if (bsdv
->bsd_flags
& BSD_FL_ERR
)
355 /* fix the actual data size */
356 for (i
= 0, nob
= 0; i
< desc
->bd_iov_count
; i
++) {
357 if (desc
->bd_iov
[i
].kiov_len
+ nob
> desc
->bd_nob_transferred
) {
358 desc
->bd_iov
[i
].kiov_len
=
359 desc
->bd_nob_transferred
- nob
;
361 nob
+= desc
->bd_iov
[i
].kiov_len
;
364 rc
= plain_verify_bulk_csum(desc
, req
->rq_flvr
.u_bulk
.hash
.hash_alg
,
367 CERROR("bulk read: client verify failed: %d\n", rc
);
372 /****************************************
374 ****************************************/
377 struct ptlrpc_cli_ctx
*plain_sec_install_ctx(struct plain_sec
*plsec
)
379 struct ptlrpc_cli_ctx
*ctx
, *ctx_new
;
381 OBD_ALLOC_PTR(ctx_new
);
383 write_lock(&plsec
->pls_lock
);
385 ctx
= plsec
->pls_ctx
;
387 atomic_inc(&ctx
->cc_refcount
);
390 OBD_FREE_PTR(ctx_new
);
391 } else if (ctx_new
) {
394 atomic_set(&ctx
->cc_refcount
, 1); /* for cache */
395 ctx
->cc_sec
= &plsec
->pls_base
;
396 ctx
->cc_ops
= &plain_ctx_ops
;
398 ctx
->cc_flags
= PTLRPC_CTX_CACHED
| PTLRPC_CTX_UPTODATE
;
399 ctx
->cc_vcred
.vc_uid
= 0;
400 spin_lock_init(&ctx
->cc_lock
);
401 INIT_LIST_HEAD(&ctx
->cc_req_list
);
402 INIT_LIST_HEAD(&ctx
->cc_gc_chain
);
404 plsec
->pls_ctx
= ctx
;
405 atomic_inc(&plsec
->pls_base
.ps_nctx
);
406 atomic_inc(&plsec
->pls_base
.ps_refcount
);
408 atomic_inc(&ctx
->cc_refcount
); /* for caller */
411 write_unlock(&plsec
->pls_lock
);
417 void plain_destroy_sec(struct ptlrpc_sec
*sec
)
419 struct plain_sec
*plsec
= sec2plsec(sec
);
422 LASSERT(sec
->ps_policy
== &plain_policy
);
423 LASSERT(sec
->ps_import
);
424 LASSERT(atomic_read(&sec
->ps_refcount
) == 0);
425 LASSERT(atomic_read(&sec
->ps_nctx
) == 0);
426 LASSERT(plsec
->pls_ctx
== NULL
);
428 class_import_put(sec
->ps_import
);
435 void plain_kill_sec(struct ptlrpc_sec
*sec
)
441 struct ptlrpc_sec
*plain_create_sec(struct obd_import
*imp
,
442 struct ptlrpc_svc_ctx
*svc_ctx
,
443 struct sptlrpc_flavor
*sf
)
445 struct plain_sec
*plsec
;
446 struct ptlrpc_sec
*sec
;
447 struct ptlrpc_cli_ctx
*ctx
;
450 LASSERT(SPTLRPC_FLVR_POLICY(sf
->sf_rpc
) == SPTLRPC_POLICY_PLAIN
);
452 OBD_ALLOC_PTR(plsec
);
457 * initialize plain_sec
459 rwlock_init(&plsec
->pls_lock
);
460 plsec
->pls_ctx
= NULL
;
462 sec
= &plsec
->pls_base
;
463 sec
->ps_policy
= &plain_policy
;
464 atomic_set(&sec
->ps_refcount
, 0);
465 atomic_set(&sec
->ps_nctx
, 0);
466 sec
->ps_id
= sptlrpc_get_next_secid();
467 sec
->ps_import
= class_import_get(imp
);
469 spin_lock_init(&sec
->ps_lock
);
470 INIT_LIST_HEAD(&sec
->ps_gc_list
);
471 sec
->ps_gc_interval
= 0;
474 /* install ctx immediately if this is a reverse sec */
476 ctx
= plain_sec_install_ctx(plsec
);
478 plain_destroy_sec(sec
);
481 sptlrpc_cli_ctx_put(ctx
, 1);
488 struct ptlrpc_cli_ctx
*plain_lookup_ctx(struct ptlrpc_sec
*sec
,
489 struct vfs_cred
*vcred
,
490 int create
, int remove_dead
)
492 struct plain_sec
*plsec
= sec2plsec(sec
);
493 struct ptlrpc_cli_ctx
*ctx
;
496 read_lock(&plsec
->pls_lock
);
497 ctx
= plsec
->pls_ctx
;
499 atomic_inc(&ctx
->cc_refcount
);
500 read_unlock(&plsec
->pls_lock
);
502 if (unlikely(ctx
== NULL
))
503 ctx
= plain_sec_install_ctx(plsec
);
509 void plain_release_ctx(struct ptlrpc_sec
*sec
,
510 struct ptlrpc_cli_ctx
*ctx
, int sync
)
512 LASSERT(atomic_read(&sec
->ps_refcount
) > 0);
513 LASSERT(atomic_read(&sec
->ps_nctx
) > 0);
514 LASSERT(atomic_read(&ctx
->cc_refcount
) == 0);
515 LASSERT(ctx
->cc_sec
== sec
);
519 atomic_dec(&sec
->ps_nctx
);
520 sptlrpc_sec_put(sec
);
524 int plain_flush_ctx_cache(struct ptlrpc_sec
*sec
,
525 uid_t uid
, int grace
, int force
)
527 struct plain_sec
*plsec
= sec2plsec(sec
);
528 struct ptlrpc_cli_ctx
*ctx
;
531 /* do nothing unless caller want to flush for 'all' */
535 write_lock(&plsec
->pls_lock
);
536 ctx
= plsec
->pls_ctx
;
537 plsec
->pls_ctx
= NULL
;
538 write_unlock(&plsec
->pls_lock
);
541 sptlrpc_cli_ctx_put(ctx
, 1);
546 int plain_alloc_reqbuf(struct ptlrpc_sec
*sec
,
547 struct ptlrpc_request
*req
,
550 __u32 buflens
[PLAIN_PACK_SEGMENTS
] = { 0, };
554 buflens
[PLAIN_PACK_HDR_OFF
] = sizeof(struct plain_header
);
555 buflens
[PLAIN_PACK_MSG_OFF
] = msgsize
;
557 if (req
->rq_pack_udesc
)
558 buflens
[PLAIN_PACK_USER_OFF
] = sptlrpc_current_user_desc_size();
560 if (req
->rq_pack_bulk
) {
561 LASSERT(req
->rq_bulk_read
|| req
->rq_bulk_write
);
562 buflens
[PLAIN_PACK_BULK_OFF
] = PLAIN_BSD_SIZE
;
565 alloc_len
= lustre_msg_size_v2(PLAIN_PACK_SEGMENTS
, buflens
);
567 if (!req
->rq_reqbuf
) {
568 LASSERT(!req
->rq_pool
);
570 alloc_len
= size_roundup_power2(alloc_len
);
571 OBD_ALLOC_LARGE(req
->rq_reqbuf
, alloc_len
);
575 req
->rq_reqbuf_len
= alloc_len
;
577 LASSERT(req
->rq_pool
);
578 LASSERT(req
->rq_reqbuf_len
>= alloc_len
);
579 memset(req
->rq_reqbuf
, 0, alloc_len
);
582 lustre_init_msg_v2(req
->rq_reqbuf
, PLAIN_PACK_SEGMENTS
, buflens
, NULL
);
583 req
->rq_reqmsg
= lustre_msg_buf(req
->rq_reqbuf
, PLAIN_PACK_MSG_OFF
, 0);
585 if (req
->rq_pack_udesc
)
586 sptlrpc_pack_user_desc(req
->rq_reqbuf
, PLAIN_PACK_USER_OFF
);
592 void plain_free_reqbuf(struct ptlrpc_sec
*sec
,
593 struct ptlrpc_request
*req
)
597 OBD_FREE_LARGE(req
->rq_reqbuf
, req
->rq_reqbuf_len
);
598 req
->rq_reqbuf
= NULL
;
599 req
->rq_reqbuf_len
= 0;
605 int plain_alloc_repbuf(struct ptlrpc_sec
*sec
,
606 struct ptlrpc_request
*req
,
609 __u32 buflens
[PLAIN_PACK_SEGMENTS
] = { 0, };
613 buflens
[PLAIN_PACK_HDR_OFF
] = sizeof(struct plain_header
);
614 buflens
[PLAIN_PACK_MSG_OFF
] = msgsize
;
616 if (req
->rq_pack_bulk
) {
617 LASSERT(req
->rq_bulk_read
|| req
->rq_bulk_write
);
618 buflens
[PLAIN_PACK_BULK_OFF
] = PLAIN_BSD_SIZE
;
621 alloc_len
= lustre_msg_size_v2(PLAIN_PACK_SEGMENTS
, buflens
);
623 /* add space for early reply */
624 alloc_len
+= plain_at_offset
;
626 alloc_len
= size_roundup_power2(alloc_len
);
628 OBD_ALLOC_LARGE(req
->rq_repbuf
, alloc_len
);
632 req
->rq_repbuf_len
= alloc_len
;
637 void plain_free_repbuf(struct ptlrpc_sec
*sec
,
638 struct ptlrpc_request
*req
)
641 OBD_FREE_LARGE(req
->rq_repbuf
, req
->rq_repbuf_len
);
642 req
->rq_repbuf
= NULL
;
643 req
->rq_repbuf_len
= 0;
648 int plain_enlarge_reqbuf(struct ptlrpc_sec
*sec
,
649 struct ptlrpc_request
*req
,
650 int segment
, int newsize
)
652 struct lustre_msg
*newbuf
;
654 int newmsg_size
, newbuf_size
;
657 LASSERT(req
->rq_reqbuf
);
658 LASSERT(req
->rq_reqbuf_len
>= req
->rq_reqlen
);
659 LASSERT(lustre_msg_buf(req
->rq_reqbuf
, PLAIN_PACK_MSG_OFF
, 0) ==
662 /* compute new embedded msg size. */
663 oldsize
= req
->rq_reqmsg
->lm_buflens
[segment
];
664 req
->rq_reqmsg
->lm_buflens
[segment
] = newsize
;
665 newmsg_size
= lustre_msg_size_v2(req
->rq_reqmsg
->lm_bufcount
,
666 req
->rq_reqmsg
->lm_buflens
);
667 req
->rq_reqmsg
->lm_buflens
[segment
] = oldsize
;
669 /* compute new wrapper msg size. */
670 oldsize
= req
->rq_reqbuf
->lm_buflens
[PLAIN_PACK_MSG_OFF
];
671 req
->rq_reqbuf
->lm_buflens
[PLAIN_PACK_MSG_OFF
] = newmsg_size
;
672 newbuf_size
= lustre_msg_size_v2(req
->rq_reqbuf
->lm_bufcount
,
673 req
->rq_reqbuf
->lm_buflens
);
674 req
->rq_reqbuf
->lm_buflens
[PLAIN_PACK_MSG_OFF
] = oldsize
;
676 /* request from pool should always have enough buffer */
677 LASSERT(!req
->rq_pool
|| req
->rq_reqbuf_len
>= newbuf_size
);
679 if (req
->rq_reqbuf_len
< newbuf_size
) {
680 newbuf_size
= size_roundup_power2(newbuf_size
);
682 OBD_ALLOC_LARGE(newbuf
, newbuf_size
);
686 memcpy(newbuf
, req
->rq_reqbuf
, req
->rq_reqbuf_len
);
688 OBD_FREE_LARGE(req
->rq_reqbuf
, req
->rq_reqbuf_len
);
689 req
->rq_reqbuf
= newbuf
;
690 req
->rq_reqbuf_len
= newbuf_size
;
691 req
->rq_reqmsg
= lustre_msg_buf(req
->rq_reqbuf
,
692 PLAIN_PACK_MSG_OFF
, 0);
695 _sptlrpc_enlarge_msg_inplace(req
->rq_reqbuf
, PLAIN_PACK_MSG_OFF
,
697 _sptlrpc_enlarge_msg_inplace(req
->rq_reqmsg
, segment
, newsize
);
699 req
->rq_reqlen
= newmsg_size
;
703 /****************************************
705 ****************************************/
707 static struct ptlrpc_svc_ctx plain_svc_ctx
= {
708 .sc_refcount
= ATOMIC_INIT(1),
709 .sc_policy
= &plain_policy
,
713 int plain_accept(struct ptlrpc_request
*req
)
715 struct lustre_msg
*msg
= req
->rq_reqbuf
;
716 struct plain_header
*phdr
;
720 LASSERT(SPTLRPC_FLVR_POLICY(req
->rq_flvr
.sf_rpc
) ==
721 SPTLRPC_POLICY_PLAIN
);
723 if (SPTLRPC_FLVR_BASE(req
->rq_flvr
.sf_rpc
) !=
724 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN
) ||
725 SPTLRPC_FLVR_BULK_TYPE(req
->rq_flvr
.sf_rpc
) !=
726 SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN
)) {
727 CERROR("Invalid rpc flavor %x\n", req
->rq_flvr
.sf_rpc
);
731 if (msg
->lm_bufcount
< PLAIN_PACK_SEGMENTS
) {
732 CERROR("unexpected request buf count %u\n", msg
->lm_bufcount
);
736 swabbed
= ptlrpc_req_need_swab(req
);
738 phdr
= lustre_msg_buf(msg
, PLAIN_PACK_HDR_OFF
, sizeof(*phdr
));
740 CERROR("missing plain header\n");
744 if (phdr
->ph_ver
!= 0) {
745 CERROR("Invalid header version\n");
749 if (phdr
->ph_bulk_hash_alg
>= BULK_HASH_ALG_MAX
) {
750 CERROR("invalid hash algorithm: %u\n", phdr
->ph_bulk_hash_alg
);
754 req
->rq_sp_from
= phdr
->ph_sp
;
755 req
->rq_flvr
.u_bulk
.hash
.hash_alg
= phdr
->ph_bulk_hash_alg
;
757 if (phdr
->ph_flags
& PLAIN_FL_USER
) {
758 if (sptlrpc_unpack_user_desc(msg
, PLAIN_PACK_USER_OFF
,
760 CERROR("Mal-formed user descriptor\n");
764 req
->rq_pack_udesc
= 1;
765 req
->rq_user_desc
= lustre_msg_buf(msg
, PLAIN_PACK_USER_OFF
, 0);
768 if (phdr
->ph_flags
& PLAIN_FL_BULK
) {
769 if (plain_unpack_bsd(msg
, swabbed
))
772 req
->rq_pack_bulk
= 1;
775 req
->rq_reqmsg
= lustre_msg_buf(msg
, PLAIN_PACK_MSG_OFF
, 0);
776 req
->rq_reqlen
= msg
->lm_buflens
[PLAIN_PACK_MSG_OFF
];
778 req
->rq_svc_ctx
= &plain_svc_ctx
;
779 atomic_inc(&req
->rq_svc_ctx
->sc_refcount
);
785 int plain_alloc_rs(struct ptlrpc_request
*req
, int msgsize
)
787 struct ptlrpc_reply_state
*rs
;
788 __u32 buflens
[PLAIN_PACK_SEGMENTS
] = { 0, };
789 int rs_size
= sizeof(*rs
);
792 LASSERT(msgsize
% 8 == 0);
794 buflens
[PLAIN_PACK_HDR_OFF
] = sizeof(struct plain_header
);
795 buflens
[PLAIN_PACK_MSG_OFF
] = msgsize
;
797 if (req
->rq_pack_bulk
&& (req
->rq_bulk_read
|| req
->rq_bulk_write
))
798 buflens
[PLAIN_PACK_BULK_OFF
] = PLAIN_BSD_SIZE
;
800 rs_size
+= lustre_msg_size_v2(PLAIN_PACK_SEGMENTS
, buflens
);
802 rs
= req
->rq_reply_state
;
806 LASSERT(rs
->rs_size
>= rs_size
);
808 OBD_ALLOC_LARGE(rs
, rs_size
);
812 rs
->rs_size
= rs_size
;
815 rs
->rs_svc_ctx
= req
->rq_svc_ctx
;
816 atomic_inc(&req
->rq_svc_ctx
->sc_refcount
);
817 rs
->rs_repbuf
= (struct lustre_msg
*) (rs
+ 1);
818 rs
->rs_repbuf_len
= rs_size
- sizeof(*rs
);
820 lustre_init_msg_v2(rs
->rs_repbuf
, PLAIN_PACK_SEGMENTS
, buflens
, NULL
);
821 rs
->rs_msg
= lustre_msg_buf_v2(rs
->rs_repbuf
, PLAIN_PACK_MSG_OFF
, 0);
823 req
->rq_reply_state
= rs
;
828 void plain_free_rs(struct ptlrpc_reply_state
*rs
)
832 LASSERT(atomic_read(&rs
->rs_svc_ctx
->sc_refcount
) > 1);
833 atomic_dec(&rs
->rs_svc_ctx
->sc_refcount
);
835 if (!rs
->rs_prealloc
)
836 OBD_FREE_LARGE(rs
, rs
->rs_size
);
841 int plain_authorize(struct ptlrpc_request
*req
)
843 struct ptlrpc_reply_state
*rs
= req
->rq_reply_state
;
844 struct lustre_msg_v2
*msg
= rs
->rs_repbuf
;
845 struct plain_header
*phdr
;
852 if (req
->rq_replen
!= msg
->lm_buflens
[PLAIN_PACK_MSG_OFF
])
853 len
= lustre_shrink_msg(msg
, PLAIN_PACK_MSG_OFF
,
856 len
= lustre_msg_size_v2(msg
->lm_bufcount
, msg
->lm_buflens
);
858 msg
->lm_secflvr
= req
->rq_flvr
.sf_rpc
;
860 phdr
= lustre_msg_buf(msg
, PLAIN_PACK_HDR_OFF
, 0);
863 phdr
->ph_bulk_hash_alg
= req
->rq_flvr
.u_bulk
.hash
.hash_alg
;
865 if (req
->rq_pack_bulk
)
866 phdr
->ph_flags
|= PLAIN_FL_BULK
;
868 rs
->rs_repdata_len
= len
;
870 if (likely(req
->rq_packed_final
)) {
871 if (lustre_msghdr_get_flags(req
->rq_reqmsg
) & MSGHDR_AT_SUPPORT
)
872 req
->rq_reply_off
= plain_at_offset
;
874 req
->rq_reply_off
= 0;
876 unsigned int hsize
= 4;
878 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32
,
879 lustre_msg_buf(msg
, PLAIN_PACK_MSG_OFF
, 0),
880 lustre_msg_buflen(msg
, PLAIN_PACK_MSG_OFF
),
881 NULL
, 0, (unsigned char *)&msg
->lm_cksum
, &hsize
);
882 req
->rq_reply_off
= 0;
889 int plain_svc_unwrap_bulk(struct ptlrpc_request
*req
,
890 struct ptlrpc_bulk_desc
*desc
)
892 struct ptlrpc_reply_state
*rs
= req
->rq_reply_state
;
893 struct ptlrpc_bulk_sec_desc
*bsdr
, *bsdv
;
894 struct plain_bulk_token
*tokenr
;
897 LASSERT(req
->rq_bulk_write
);
898 LASSERT(req
->rq_pack_bulk
);
900 bsdr
= lustre_msg_buf(req
->rq_reqbuf
, PLAIN_PACK_BULK_OFF
, 0);
901 tokenr
= (struct plain_bulk_token
*) bsdr
->bsd_data
;
902 bsdv
= lustre_msg_buf(rs
->rs_repbuf
, PLAIN_PACK_BULK_OFF
, 0);
904 bsdv
->bsd_version
= 0;
905 bsdv
->bsd_type
= SPTLRPC_BULK_DEFAULT
;
906 bsdv
->bsd_svc
= bsdr
->bsd_svc
;
909 if (bsdr
->bsd_svc
== SPTLRPC_BULK_SVC_NULL
)
912 rc
= plain_verify_bulk_csum(desc
, req
->rq_flvr
.u_bulk
.hash
.hash_alg
,
915 bsdv
->bsd_flags
|= BSD_FL_ERR
;
916 CERROR("bulk write: server verify failed: %d\n", rc
);
923 int plain_svc_wrap_bulk(struct ptlrpc_request
*req
,
924 struct ptlrpc_bulk_desc
*desc
)
926 struct ptlrpc_reply_state
*rs
= req
->rq_reply_state
;
927 struct ptlrpc_bulk_sec_desc
*bsdr
, *bsdv
;
928 struct plain_bulk_token
*tokenv
;
931 LASSERT(req
->rq_bulk_read
);
932 LASSERT(req
->rq_pack_bulk
);
934 bsdr
= lustre_msg_buf(req
->rq_reqbuf
, PLAIN_PACK_BULK_OFF
, 0);
935 bsdv
= lustre_msg_buf(rs
->rs_repbuf
, PLAIN_PACK_BULK_OFF
, 0);
936 tokenv
= (struct plain_bulk_token
*) bsdv
->bsd_data
;
938 bsdv
->bsd_version
= 0;
939 bsdv
->bsd_type
= SPTLRPC_BULK_DEFAULT
;
940 bsdv
->bsd_svc
= bsdr
->bsd_svc
;
943 if (bsdr
->bsd_svc
== SPTLRPC_BULK_SVC_NULL
)
946 rc
= plain_generate_bulk_csum(desc
, req
->rq_flvr
.u_bulk
.hash
.hash_alg
,
949 CERROR("bulk read: server failed to compute "
950 "checksum: %d\n", rc
);
952 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE
))
953 corrupt_bulk_data(desc
);
959 static struct ptlrpc_ctx_ops plain_ctx_ops
= {
960 .refresh
= plain_ctx_refresh
,
961 .validate
= plain_ctx_validate
,
962 .sign
= plain_ctx_sign
,
963 .verify
= plain_ctx_verify
,
964 .wrap_bulk
= plain_cli_wrap_bulk
,
965 .unwrap_bulk
= plain_cli_unwrap_bulk
,
968 static struct ptlrpc_sec_cops plain_sec_cops
= {
969 .create_sec
= plain_create_sec
,
970 .destroy_sec
= plain_destroy_sec
,
971 .kill_sec
= plain_kill_sec
,
972 .lookup_ctx
= plain_lookup_ctx
,
973 .release_ctx
= plain_release_ctx
,
974 .flush_ctx_cache
= plain_flush_ctx_cache
,
975 .alloc_reqbuf
= plain_alloc_reqbuf
,
976 .free_reqbuf
= plain_free_reqbuf
,
977 .alloc_repbuf
= plain_alloc_repbuf
,
978 .free_repbuf
= plain_free_repbuf
,
979 .enlarge_reqbuf
= plain_enlarge_reqbuf
,
982 static struct ptlrpc_sec_sops plain_sec_sops
= {
983 .accept
= plain_accept
,
984 .alloc_rs
= plain_alloc_rs
,
985 .authorize
= plain_authorize
,
986 .free_rs
= plain_free_rs
,
987 .unwrap_bulk
= plain_svc_unwrap_bulk
,
988 .wrap_bulk
= plain_svc_wrap_bulk
,
991 static struct ptlrpc_sec_policy plain_policy
= {
992 .sp_owner
= THIS_MODULE
,
994 .sp_policy
= SPTLRPC_POLICY_PLAIN
,
995 .sp_cops
= &plain_sec_cops
,
996 .sp_sops
= &plain_sec_sops
,
999 int sptlrpc_plain_init(void)
1001 __u32 buflens
[PLAIN_PACK_SEGMENTS
] = { 0, };
1004 buflens
[PLAIN_PACK_MSG_OFF
] = lustre_msg_early_size();
1005 plain_at_offset
= lustre_msg_size_v2(PLAIN_PACK_SEGMENTS
, buflens
);
1007 rc
= sptlrpc_register_policy(&plain_policy
);
1009 CERROR("failed to register: %d\n", rc
);
1014 void sptlrpc_plain_fini(void)
1018 rc
= sptlrpc_unregister_policy(&plain_policy
);
1020 CERROR("cannot unregister: %d\n", rc
);