staging: delete non-required instances of include <linux/init.h>
[deliverable/linux.git] / drivers / staging / lustre / lustre / ptlrpc / gss / gss_bulk.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 */
30 /*
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
33 *
34 * lustre/ptlrpc/gss/gss_bulk.c
35 *
36 * Author: Eric Mei <eric.mei@sun.com>
37 */
38
39 #define DEBUG_SUBSYSTEM S_SEC
40 #include <linux/module.h>
41 #include <linux/slab.h>
42 #include <linux/dcache.h>
43 #include <linux/fs.h>
44 #include <linux/mutex.h>
45 #include <linux/crypto.h>
46
47 #include <obd.h>
48 #include <obd_class.h>
49 #include <obd_support.h>
50 #include <lustre/lustre_idl.h>
51 #include <lustre_net.h>
52 #include <lustre_import.h>
53 #include <lustre_sec.h>
54
55 #include "gss_err.h"
56 #include "gss_internal.h"
57 #include "gss_api.h"
58
59 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
60 struct ptlrpc_request *req,
61 struct ptlrpc_bulk_desc *desc)
62 {
63 struct gss_cli_ctx *gctx;
64 struct lustre_msg *msg;
65 struct ptlrpc_bulk_sec_desc *bsd;
66 rawobj_t token;
67 __u32 maj;
68 int offset;
69 int rc;
70
71 LASSERT(req->rq_pack_bulk);
72 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
73
74 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
75 LASSERT(gctx->gc_mechctx);
76
77 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
78 case SPTLRPC_SVC_NULL:
79 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
80 msg = req->rq_reqbuf;
81 offset = msg->lm_bufcount - 1;
82 break;
83 case SPTLRPC_SVC_AUTH:
84 case SPTLRPC_SVC_INTG:
85 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
86 msg = req->rq_reqbuf;
87 offset = msg->lm_bufcount - 2;
88 break;
89 case SPTLRPC_SVC_PRIV:
90 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
91 msg = req->rq_clrbuf;
92 offset = msg->lm_bufcount - 1;
93 break;
94 default:
95 LBUG();
96 }
97
98 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
99 bsd->bsd_version = 0;
100 bsd->bsd_flags = 0;
101 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
102 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
103
104 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
105 return 0;
106
107 LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
108 bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
109
110 if (req->rq_bulk_read) {
111 /*
112 * bulk read: prepare receiving pages only for privacy mode.
113 */
114 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
115 return gss_cli_prep_bulk(req, desc);
116 } else {
117 /*
118 * bulk write: sign or encrypt bulk pages.
119 */
120 bsd->bsd_nob = desc->bd_nob;
121
122 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
123 /* integrity mode */
124 token.data = bsd->bsd_data;
125 token.len = lustre_msg_buflen(msg, offset) -
126 sizeof(*bsd);
127
128 maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
129 desc->bd_iov_count, desc->bd_iov,
130 &token);
131 if (maj != GSS_S_COMPLETE) {
132 CWARN("failed to sign bulk data: %x\n", maj);
133 return -EACCES;
134 }
135 } else {
136 /* privacy mode */
137 if (desc->bd_iov_count == 0)
138 return 0;
139
140 rc = sptlrpc_enc_pool_get_pages(desc);
141 if (rc) {
142 CERROR("bulk write: failed to allocate "
143 "encryption pages: %d\n", rc);
144 return rc;
145 }
146
147 token.data = bsd->bsd_data;
148 token.len = lustre_msg_buflen(msg, offset) -
149 sizeof(*bsd);
150
151 maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
152 if (maj != GSS_S_COMPLETE) {
153 CWARN("fail to encrypt bulk data: %x\n", maj);
154 return -EACCES;
155 }
156 }
157 }
158
159 return 0;
160 }
161
162 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
163 struct ptlrpc_request *req,
164 struct ptlrpc_bulk_desc *desc)
165 {
166 struct gss_cli_ctx *gctx;
167 struct lustre_msg *rmsg, *vmsg;
168 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
169 rawobj_t token;
170 __u32 maj;
171 int roff, voff;
172
173 LASSERT(req->rq_pack_bulk);
174 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
175
176 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
177 case SPTLRPC_SVC_NULL:
178 vmsg = req->rq_repdata;
179 voff = vmsg->lm_bufcount - 1;
180 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
181
182 rmsg = req->rq_reqbuf;
183 roff = rmsg->lm_bufcount - 1; /* last segment */
184 LASSERT(rmsg && rmsg->lm_bufcount >= 3);
185 break;
186 case SPTLRPC_SVC_AUTH:
187 case SPTLRPC_SVC_INTG:
188 vmsg = req->rq_repdata;
189 voff = vmsg->lm_bufcount - 2;
190 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
191
192 rmsg = req->rq_reqbuf;
193 roff = rmsg->lm_bufcount - 2; /* second last segment */
194 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
195 break;
196 case SPTLRPC_SVC_PRIV:
197 vmsg = req->rq_repdata;
198 voff = vmsg->lm_bufcount - 1;
199 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
200
201 rmsg = req->rq_clrbuf;
202 roff = rmsg->lm_bufcount - 1; /* last segment */
203 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
204 break;
205 default:
206 LBUG();
207 }
208
209 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
210 bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
211 LASSERT(bsdr && bsdv);
212
213 if (bsdr->bsd_version != bsdv->bsd_version ||
214 bsdr->bsd_type != bsdv->bsd_type ||
215 bsdr->bsd_svc != bsdv->bsd_svc) {
216 CERROR("bulk security descriptor mismatch: "
217 "(%u,%u,%u) != (%u,%u,%u)\n",
218 bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
219 bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
220 return -EPROTO;
221 }
222
223 LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
224 bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
225 bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
226
227 /*
228 * in privacy mode if return success, make sure bd_nob_transferred
229 * is the actual size of the clear text, otherwise upper layer
230 * may be surprised.
231 */
232 if (req->rq_bulk_write) {
233 if (bsdv->bsd_flags & BSD_FL_ERR) {
234 CERROR("server reported bulk i/o failure\n");
235 return -EIO;
236 }
237
238 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
239 desc->bd_nob_transferred = desc->bd_nob;
240 } else {
241 /*
242 * bulk read, upon return success, bd_nob_transferred is
243 * the size of plain text actually received.
244 */
245 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
246 LASSERT(gctx->gc_mechctx);
247
248 if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
249 int i, nob;
250
251 /* fix the actual data size */
252 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
253 if (desc->bd_iov[i].kiov_len + nob >
254 desc->bd_nob_transferred) {
255 desc->bd_iov[i].kiov_len =
256 desc->bd_nob_transferred - nob;
257 }
258 nob += desc->bd_iov[i].kiov_len;
259 }
260
261 token.data = bsdv->bsd_data;
262 token.len = lustre_msg_buflen(vmsg, voff) -
263 sizeof(*bsdv);
264
265 maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
266 desc->bd_iov_count, desc->bd_iov,
267 &token);
268 if (maj != GSS_S_COMPLETE) {
269 CERROR("failed to verify bulk read: %x\n", maj);
270 return -EACCES;
271 }
272 } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
273 desc->bd_nob = bsdv->bsd_nob;
274 if (desc->bd_nob == 0)
275 return 0;
276
277 token.data = bsdv->bsd_data;
278 token.len = lustre_msg_buflen(vmsg, voff) -
279 sizeof(*bsdr);
280
281 maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
282 &token, 1);
283 if (maj != GSS_S_COMPLETE) {
284 CERROR("failed to decrypt bulk read: %x\n",
285 maj);
286 return -EACCES;
287 }
288
289 desc->bd_nob_transferred = desc->bd_nob;
290 }
291 }
292
293 return 0;
294 }
295
296 static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
297 struct gss_ctx *mechctx)
298 {
299 int rc;
300
301 if (desc->bd_iov_count == 0)
302 return 0;
303
304 rc = sptlrpc_enc_pool_get_pages(desc);
305 if (rc)
306 return rc;
307
308 if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
309 return -EACCES;
310
311 return 0;
312 }
313
314 int gss_cli_prep_bulk(struct ptlrpc_request *req,
315 struct ptlrpc_bulk_desc *desc)
316 {
317 int rc;
318
319 LASSERT(req->rq_cli_ctx);
320 LASSERT(req->rq_pack_bulk);
321 LASSERT(req->rq_bulk_read);
322
323 if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
324 return 0;
325
326 rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
327 if (rc)
328 CERROR("bulk read: failed to prepare encryption "
329 "pages: %d\n", rc);
330
331 return rc;
332 }
333
334 int gss_svc_prep_bulk(struct ptlrpc_request *req,
335 struct ptlrpc_bulk_desc *desc)
336 {
337 struct gss_svc_reqctx *grctx;
338 struct ptlrpc_bulk_sec_desc *bsd;
339 int rc;
340
341 LASSERT(req->rq_svc_ctx);
342 LASSERT(req->rq_pack_bulk);
343 LASSERT(req->rq_bulk_write);
344
345 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
346 LASSERT(grctx->src_reqbsd);
347 LASSERT(grctx->src_repbsd);
348 LASSERT(grctx->src_ctx);
349 LASSERT(grctx->src_ctx->gsc_mechctx);
350
351 bsd = grctx->src_reqbsd;
352 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
353 return 0;
354
355 rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
356 if (rc)
357 CERROR("bulk write: failed to prepare encryption "
358 "pages: %d\n", rc);
359
360 return rc;
361 }
362
363 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
364 struct ptlrpc_bulk_desc *desc)
365 {
366 struct gss_svc_reqctx *grctx;
367 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
368 rawobj_t token;
369 __u32 maj;
370
371 LASSERT(req->rq_svc_ctx);
372 LASSERT(req->rq_pack_bulk);
373 LASSERT(req->rq_bulk_write);
374
375 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
376
377 LASSERT(grctx->src_reqbsd);
378 LASSERT(grctx->src_repbsd);
379 LASSERT(grctx->src_ctx);
380 LASSERT(grctx->src_ctx->gsc_mechctx);
381
382 bsdr = grctx->src_reqbsd;
383 bsdv = grctx->src_repbsd;
384
385 /* bsdr has been sanity checked during unpacking */
386 bsdv->bsd_version = 0;
387 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
388 bsdv->bsd_svc = bsdr->bsd_svc;
389 bsdv->bsd_flags = 0;
390
391 switch (bsdv->bsd_svc) {
392 case SPTLRPC_BULK_SVC_INTG:
393 token.data = bsdr->bsd_data;
394 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
395
396 maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
397 desc->bd_iov_count, desc->bd_iov, &token);
398 if (maj != GSS_S_COMPLETE) {
399 bsdv->bsd_flags |= BSD_FL_ERR;
400 CERROR("failed to verify bulk signature: %x\n", maj);
401 return -EACCES;
402 }
403 break;
404 case SPTLRPC_BULK_SVC_PRIV:
405 if (bsdr->bsd_nob != desc->bd_nob) {
406 bsdv->bsd_flags |= BSD_FL_ERR;
407 CERROR("prepared nob %d doesn't match the actual "
408 "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
409 return -EPROTO;
410 }
411
412 if (desc->bd_iov_count == 0) {
413 LASSERT(desc->bd_nob == 0);
414 break;
415 }
416
417 token.data = bsdr->bsd_data;
418 token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
419
420 maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
421 desc, &token, 0);
422 if (maj != GSS_S_COMPLETE) {
423 bsdv->bsd_flags |= BSD_FL_ERR;
424 CERROR("failed decrypt bulk data: %x\n", maj);
425 return -EACCES;
426 }
427 break;
428 }
429
430 return 0;
431 }
432
433 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
434 struct ptlrpc_bulk_desc *desc)
435 {
436 struct gss_svc_reqctx *grctx;
437 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
438 rawobj_t token;
439 __u32 maj;
440 int rc;
441
442 LASSERT(req->rq_svc_ctx);
443 LASSERT(req->rq_pack_bulk);
444 LASSERT(req->rq_bulk_read);
445
446 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
447
448 LASSERT(grctx->src_reqbsd);
449 LASSERT(grctx->src_repbsd);
450 LASSERT(grctx->src_ctx);
451 LASSERT(grctx->src_ctx->gsc_mechctx);
452
453 bsdr = grctx->src_reqbsd;
454 bsdv = grctx->src_repbsd;
455
456 /* bsdr has been sanity checked during unpacking */
457 bsdv->bsd_version = 0;
458 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
459 bsdv->bsd_svc = bsdr->bsd_svc;
460 bsdv->bsd_flags = 0;
461
462 switch (bsdv->bsd_svc) {
463 case SPTLRPC_BULK_SVC_INTG:
464 token.data = bsdv->bsd_data;
465 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
466
467 maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
468 desc->bd_iov_count, desc->bd_iov, &token);
469 if (maj != GSS_S_COMPLETE) {
470 bsdv->bsd_flags |= BSD_FL_ERR;
471 CERROR("failed to sign bulk data: %x\n", maj);
472 return -EACCES;
473 }
474 break;
475 case SPTLRPC_BULK_SVC_PRIV:
476 bsdv->bsd_nob = desc->bd_nob;
477
478 if (desc->bd_iov_count == 0) {
479 LASSERT(desc->bd_nob == 0);
480 break;
481 }
482
483 rc = sptlrpc_enc_pool_get_pages(desc);
484 if (rc) {
485 bsdv->bsd_flags |= BSD_FL_ERR;
486 CERROR("bulk read: failed to allocate encryption "
487 "pages: %d\n", rc);
488 return rc;
489 }
490
491 token.data = bsdv->bsd_data;
492 token.len = grctx->src_repbsd_size - sizeof(*bsdv);
493
494 maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
495 desc, &token, 1);
496 if (maj != GSS_S_COMPLETE) {
497 bsdv->bsd_flags |= BSD_FL_ERR;
498 CERROR("failed to encrypt bulk data: %x\n", maj);
499 return -EACCES;
500 }
501 break;
502 }
503
504 return 0;
505 }
This page took 0.042336 seconds and 5 git commands to generate.