4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lustre/ptlrpc/gss/gss_bulk.c
36 * Author: Eric Mei <eric.mei@sun.com>
39 #define DEBUG_SUBSYSTEM S_SEC
40 #include <linux/module.h>
41 #include <linux/slab.h>
42 #include <linux/dcache.h>
44 #include <linux/mutex.h>
45 #include <linux/crypto.h>
48 #include <obd_class.h>
49 #include <obd_support.h>
50 #include <lustre/lustre_idl.h>
51 #include <lustre_net.h>
52 #include <lustre_import.h>
53 #include <lustre_sec.h>
56 #include "gss_internal.h"
59 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx
*ctx
,
60 struct ptlrpc_request
*req
,
61 struct ptlrpc_bulk_desc
*desc
)
63 struct gss_cli_ctx
*gctx
;
64 struct lustre_msg
*msg
;
65 struct ptlrpc_bulk_sec_desc
*bsd
;
71 LASSERT(req
->rq_pack_bulk
);
72 LASSERT(req
->rq_bulk_read
|| req
->rq_bulk_write
);
74 gctx
= container_of(ctx
, struct gss_cli_ctx
, gc_base
);
75 LASSERT(gctx
->gc_mechctx
);
77 switch (SPTLRPC_FLVR_SVC(req
->rq_flvr
.sf_rpc
)) {
78 case SPTLRPC_SVC_NULL
:
79 LASSERT(req
->rq_reqbuf
->lm_bufcount
>= 3);
81 offset
= msg
->lm_bufcount
- 1;
83 case SPTLRPC_SVC_AUTH
:
84 case SPTLRPC_SVC_INTG
:
85 LASSERT(req
->rq_reqbuf
->lm_bufcount
>= 4);
87 offset
= msg
->lm_bufcount
- 2;
89 case SPTLRPC_SVC_PRIV
:
90 LASSERT(req
->rq_clrbuf
->lm_bufcount
>= 2);
92 offset
= msg
->lm_bufcount
- 1;
98 bsd
= lustre_msg_buf(msg
, offset
, sizeof(*bsd
));
101 bsd
->bsd_type
= SPTLRPC_BULK_DEFAULT
;
102 bsd
->bsd_svc
= SPTLRPC_FLVR_BULK_SVC(req
->rq_flvr
.sf_rpc
);
104 if (bsd
->bsd_svc
== SPTLRPC_BULK_SVC_NULL
)
107 LASSERT(bsd
->bsd_svc
== SPTLRPC_BULK_SVC_INTG
||
108 bsd
->bsd_svc
== SPTLRPC_BULK_SVC_PRIV
);
110 if (req
->rq_bulk_read
) {
112 * bulk read: prepare receiving pages only for privacy mode.
114 if (bsd
->bsd_svc
== SPTLRPC_BULK_SVC_PRIV
)
115 return gss_cli_prep_bulk(req
, desc
);
118 * bulk write: sign or encrypt bulk pages.
120 bsd
->bsd_nob
= desc
->bd_nob
;
122 if (bsd
->bsd_svc
== SPTLRPC_BULK_SVC_INTG
) {
124 token
.data
= bsd
->bsd_data
;
125 token
.len
= lustre_msg_buflen(msg
, offset
) -
128 maj
= lgss_get_mic(gctx
->gc_mechctx
, 0, NULL
,
129 desc
->bd_iov_count
, desc
->bd_iov
,
131 if (maj
!= GSS_S_COMPLETE
) {
132 CWARN("failed to sign bulk data: %x\n", maj
);
137 if (desc
->bd_iov_count
== 0)
140 rc
= sptlrpc_enc_pool_get_pages(desc
);
142 CERROR("bulk write: failed to allocate "
143 "encryption pages: %d\n", rc
);
147 token
.data
= bsd
->bsd_data
;
148 token
.len
= lustre_msg_buflen(msg
, offset
) -
151 maj
= lgss_wrap_bulk(gctx
->gc_mechctx
, desc
, &token
, 0);
152 if (maj
!= GSS_S_COMPLETE
) {
153 CWARN("fail to encrypt bulk data: %x\n", maj
);
162 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx
*ctx
,
163 struct ptlrpc_request
*req
,
164 struct ptlrpc_bulk_desc
*desc
)
166 struct gss_cli_ctx
*gctx
;
167 struct lustre_msg
*rmsg
, *vmsg
;
168 struct ptlrpc_bulk_sec_desc
*bsdr
, *bsdv
;
173 LASSERT(req
->rq_pack_bulk
);
174 LASSERT(req
->rq_bulk_read
|| req
->rq_bulk_write
);
176 switch (SPTLRPC_FLVR_SVC(req
->rq_flvr
.sf_rpc
)) {
177 case SPTLRPC_SVC_NULL
:
178 vmsg
= req
->rq_repdata
;
179 voff
= vmsg
->lm_bufcount
- 1;
180 LASSERT(vmsg
&& vmsg
->lm_bufcount
>= 3);
182 rmsg
= req
->rq_reqbuf
;
183 roff
= rmsg
->lm_bufcount
- 1; /* last segment */
184 LASSERT(rmsg
&& rmsg
->lm_bufcount
>= 3);
186 case SPTLRPC_SVC_AUTH
:
187 case SPTLRPC_SVC_INTG
:
188 vmsg
= req
->rq_repdata
;
189 voff
= vmsg
->lm_bufcount
- 2;
190 LASSERT(vmsg
&& vmsg
->lm_bufcount
>= 4);
192 rmsg
= req
->rq_reqbuf
;
193 roff
= rmsg
->lm_bufcount
- 2; /* second last segment */
194 LASSERT(rmsg
&& rmsg
->lm_bufcount
>= 4);
196 case SPTLRPC_SVC_PRIV
:
197 vmsg
= req
->rq_repdata
;
198 voff
= vmsg
->lm_bufcount
- 1;
199 LASSERT(vmsg
&& vmsg
->lm_bufcount
>= 2);
201 rmsg
= req
->rq_clrbuf
;
202 roff
= rmsg
->lm_bufcount
- 1; /* last segment */
203 LASSERT(rmsg
&& rmsg
->lm_bufcount
>= 2);
209 bsdr
= lustre_msg_buf(rmsg
, roff
, sizeof(*bsdr
));
210 bsdv
= lustre_msg_buf(vmsg
, voff
, sizeof(*bsdv
));
211 LASSERT(bsdr
&& bsdv
);
213 if (bsdr
->bsd_version
!= bsdv
->bsd_version
||
214 bsdr
->bsd_type
!= bsdv
->bsd_type
||
215 bsdr
->bsd_svc
!= bsdv
->bsd_svc
) {
216 CERROR("bulk security descriptor mismatch: "
217 "(%u,%u,%u) != (%u,%u,%u)\n",
218 bsdr
->bsd_version
, bsdr
->bsd_type
, bsdr
->bsd_svc
,
219 bsdv
->bsd_version
, bsdv
->bsd_type
, bsdv
->bsd_svc
);
223 LASSERT(bsdv
->bsd_svc
== SPTLRPC_BULK_SVC_NULL
||
224 bsdv
->bsd_svc
== SPTLRPC_BULK_SVC_INTG
||
225 bsdv
->bsd_svc
== SPTLRPC_BULK_SVC_PRIV
);
228 * in privacy mode if return success, make sure bd_nob_transferred
229 * is the actual size of the clear text, otherwise upper layer
232 if (req
->rq_bulk_write
) {
233 if (bsdv
->bsd_flags
& BSD_FL_ERR
) {
234 CERROR("server reported bulk i/o failure\n");
238 if (bsdv
->bsd_svc
== SPTLRPC_BULK_SVC_PRIV
)
239 desc
->bd_nob_transferred
= desc
->bd_nob
;
242 * bulk read, upon return success, bd_nob_transferred is
243 * the size of plain text actually received.
245 gctx
= container_of(ctx
, struct gss_cli_ctx
, gc_base
);
246 LASSERT(gctx
->gc_mechctx
);
248 if (bsdv
->bsd_svc
== SPTLRPC_BULK_SVC_INTG
) {
251 /* fix the actual data size */
252 for (i
= 0, nob
= 0; i
< desc
->bd_iov_count
; i
++) {
253 if (desc
->bd_iov
[i
].kiov_len
+ nob
>
254 desc
->bd_nob_transferred
) {
255 desc
->bd_iov
[i
].kiov_len
=
256 desc
->bd_nob_transferred
- nob
;
258 nob
+= desc
->bd_iov
[i
].kiov_len
;
261 token
.data
= bsdv
->bsd_data
;
262 token
.len
= lustre_msg_buflen(vmsg
, voff
) -
265 maj
= lgss_verify_mic(gctx
->gc_mechctx
, 0, NULL
,
266 desc
->bd_iov_count
, desc
->bd_iov
,
268 if (maj
!= GSS_S_COMPLETE
) {
269 CERROR("failed to verify bulk read: %x\n", maj
);
272 } else if (bsdv
->bsd_svc
== SPTLRPC_BULK_SVC_PRIV
) {
273 desc
->bd_nob
= bsdv
->bsd_nob
;
274 if (desc
->bd_nob
== 0)
277 token
.data
= bsdv
->bsd_data
;
278 token
.len
= lustre_msg_buflen(vmsg
, voff
) -
281 maj
= lgss_unwrap_bulk(gctx
->gc_mechctx
, desc
,
283 if (maj
!= GSS_S_COMPLETE
) {
284 CERROR("failed to decrypt bulk read: %x\n",
289 desc
->bd_nob_transferred
= desc
->bd_nob
;
296 static int gss_prep_bulk(struct ptlrpc_bulk_desc
*desc
,
297 struct gss_ctx
*mechctx
)
301 if (desc
->bd_iov_count
== 0)
304 rc
= sptlrpc_enc_pool_get_pages(desc
);
308 if (lgss_prep_bulk(mechctx
, desc
) != GSS_S_COMPLETE
)
314 int gss_cli_prep_bulk(struct ptlrpc_request
*req
,
315 struct ptlrpc_bulk_desc
*desc
)
319 LASSERT(req
->rq_cli_ctx
);
320 LASSERT(req
->rq_pack_bulk
);
321 LASSERT(req
->rq_bulk_read
);
323 if (SPTLRPC_FLVR_BULK_SVC(req
->rq_flvr
.sf_rpc
) != SPTLRPC_BULK_SVC_PRIV
)
326 rc
= gss_prep_bulk(desc
, ctx2gctx(req
->rq_cli_ctx
)->gc_mechctx
);
328 CERROR("bulk read: failed to prepare encryption "
334 int gss_svc_prep_bulk(struct ptlrpc_request
*req
,
335 struct ptlrpc_bulk_desc
*desc
)
337 struct gss_svc_reqctx
*grctx
;
338 struct ptlrpc_bulk_sec_desc
*bsd
;
341 LASSERT(req
->rq_svc_ctx
);
342 LASSERT(req
->rq_pack_bulk
);
343 LASSERT(req
->rq_bulk_write
);
345 grctx
= gss_svc_ctx2reqctx(req
->rq_svc_ctx
);
346 LASSERT(grctx
->src_reqbsd
);
347 LASSERT(grctx
->src_repbsd
);
348 LASSERT(grctx
->src_ctx
);
349 LASSERT(grctx
->src_ctx
->gsc_mechctx
);
351 bsd
= grctx
->src_reqbsd
;
352 if (bsd
->bsd_svc
!= SPTLRPC_BULK_SVC_PRIV
)
355 rc
= gss_prep_bulk(desc
, grctx
->src_ctx
->gsc_mechctx
);
357 CERROR("bulk write: failed to prepare encryption "
363 int gss_svc_unwrap_bulk(struct ptlrpc_request
*req
,
364 struct ptlrpc_bulk_desc
*desc
)
366 struct gss_svc_reqctx
*grctx
;
367 struct ptlrpc_bulk_sec_desc
*bsdr
, *bsdv
;
371 LASSERT(req
->rq_svc_ctx
);
372 LASSERT(req
->rq_pack_bulk
);
373 LASSERT(req
->rq_bulk_write
);
375 grctx
= gss_svc_ctx2reqctx(req
->rq_svc_ctx
);
377 LASSERT(grctx
->src_reqbsd
);
378 LASSERT(grctx
->src_repbsd
);
379 LASSERT(grctx
->src_ctx
);
380 LASSERT(grctx
->src_ctx
->gsc_mechctx
);
382 bsdr
= grctx
->src_reqbsd
;
383 bsdv
= grctx
->src_repbsd
;
385 /* bsdr has been sanity checked during unpacking */
386 bsdv
->bsd_version
= 0;
387 bsdv
->bsd_type
= SPTLRPC_BULK_DEFAULT
;
388 bsdv
->bsd_svc
= bsdr
->bsd_svc
;
391 switch (bsdv
->bsd_svc
) {
392 case SPTLRPC_BULK_SVC_INTG
:
393 token
.data
= bsdr
->bsd_data
;
394 token
.len
= grctx
->src_reqbsd_size
- sizeof(*bsdr
);
396 maj
= lgss_verify_mic(grctx
->src_ctx
->gsc_mechctx
, 0, NULL
,
397 desc
->bd_iov_count
, desc
->bd_iov
, &token
);
398 if (maj
!= GSS_S_COMPLETE
) {
399 bsdv
->bsd_flags
|= BSD_FL_ERR
;
400 CERROR("failed to verify bulk signature: %x\n", maj
);
404 case SPTLRPC_BULK_SVC_PRIV
:
405 if (bsdr
->bsd_nob
!= desc
->bd_nob
) {
406 bsdv
->bsd_flags
|= BSD_FL_ERR
;
407 CERROR("prepared nob %d doesn't match the actual "
408 "nob %d\n", desc
->bd_nob
, bsdr
->bsd_nob
);
412 if (desc
->bd_iov_count
== 0) {
413 LASSERT(desc
->bd_nob
== 0);
417 token
.data
= bsdr
->bsd_data
;
418 token
.len
= grctx
->src_reqbsd_size
- sizeof(*bsdr
);
420 maj
= lgss_unwrap_bulk(grctx
->src_ctx
->gsc_mechctx
,
422 if (maj
!= GSS_S_COMPLETE
) {
423 bsdv
->bsd_flags
|= BSD_FL_ERR
;
424 CERROR("failed decrypt bulk data: %x\n", maj
);
433 int gss_svc_wrap_bulk(struct ptlrpc_request
*req
,
434 struct ptlrpc_bulk_desc
*desc
)
436 struct gss_svc_reqctx
*grctx
;
437 struct ptlrpc_bulk_sec_desc
*bsdr
, *bsdv
;
442 LASSERT(req
->rq_svc_ctx
);
443 LASSERT(req
->rq_pack_bulk
);
444 LASSERT(req
->rq_bulk_read
);
446 grctx
= gss_svc_ctx2reqctx(req
->rq_svc_ctx
);
448 LASSERT(grctx
->src_reqbsd
);
449 LASSERT(grctx
->src_repbsd
);
450 LASSERT(grctx
->src_ctx
);
451 LASSERT(grctx
->src_ctx
->gsc_mechctx
);
453 bsdr
= grctx
->src_reqbsd
;
454 bsdv
= grctx
->src_repbsd
;
456 /* bsdr has been sanity checked during unpacking */
457 bsdv
->bsd_version
= 0;
458 bsdv
->bsd_type
= SPTLRPC_BULK_DEFAULT
;
459 bsdv
->bsd_svc
= bsdr
->bsd_svc
;
462 switch (bsdv
->bsd_svc
) {
463 case SPTLRPC_BULK_SVC_INTG
:
464 token
.data
= bsdv
->bsd_data
;
465 token
.len
= grctx
->src_repbsd_size
- sizeof(*bsdv
);
467 maj
= lgss_get_mic(grctx
->src_ctx
->gsc_mechctx
, 0, NULL
,
468 desc
->bd_iov_count
, desc
->bd_iov
, &token
);
469 if (maj
!= GSS_S_COMPLETE
) {
470 bsdv
->bsd_flags
|= BSD_FL_ERR
;
471 CERROR("failed to sign bulk data: %x\n", maj
);
475 case SPTLRPC_BULK_SVC_PRIV
:
476 bsdv
->bsd_nob
= desc
->bd_nob
;
478 if (desc
->bd_iov_count
== 0) {
479 LASSERT(desc
->bd_nob
== 0);
483 rc
= sptlrpc_enc_pool_get_pages(desc
);
485 bsdv
->bsd_flags
|= BSD_FL_ERR
;
486 CERROR("bulk read: failed to allocate encryption "
491 token
.data
= bsdv
->bsd_data
;
492 token
.len
= grctx
->src_repbsd_size
- sizeof(*bsdv
);
494 maj
= lgss_wrap_bulk(grctx
->src_ctx
->gsc_mechctx
,
496 if (maj
!= GSS_S_COMPLETE
) {
497 bsdv
->bsd_flags
|= BSD_FL_ERR
;
498 CERROR("failed to encrypt bulk data: %x\n", maj
);
This page took 0.042336 seconds and 5 git commands to generate.