staging: lustre: obdclass: obd_mount: Declare function as static
[deliverable/linux.git] / drivers / staging / lustre / lustre / osc / osc_request.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37#define DEBUG_SUBSYSTEM S_OSC
38
9fdaf8c0 39#include "../../include/linux/libcfs/libcfs.h"
d7e09d03 40
3ee30015
GKH
41#include "../include/lustre_dlm.h"
42#include "../include/lustre_net.h"
43#include "../include/lustre/lustre_user.h"
44#include "../include/obd_cksum.h"
d7e09d03 45
3ee30015
GKH
46#include "../include/lustre_ha.h"
47#include "../include/lprocfs_status.h"
3ee30015
GKH
48#include "../include/lustre_debug.h"
49#include "../include/lustre_param.h"
50#include "../include/lustre_fid.h"
dd45f477 51#include "../include/obd_class.h"
aefd9d71 52#include "../include/obd.h"
d7e09d03
PT
53#include "osc_internal.h"
54#include "osc_cl_internal.h"
55
aefd9d71
LX
56atomic_t osc_pool_req_count;
57unsigned int osc_reqpool_maxreqcount;
58struct ptlrpc_request_pool *osc_rq_pool;
59
60/* max memory used for request pool, unit is MB */
61static unsigned int osc_reqpool_mem_max = 5;
62module_param(osc_reqpool_mem_max, uint, 0444);
63
f024bad4
JH
64struct osc_brw_async_args {
65 struct obdo *aa_oa;
66 int aa_requested_nob;
67 int aa_nio_count;
68 u32 aa_page_count;
69 int aa_resends;
70 struct brw_page **aa_ppga;
71 struct client_obd *aa_cli;
72 struct list_head aa_oaps;
73 struct list_head aa_exts;
f024bad4
JH
74 struct cl_req *aa_clerq;
75};
76
77struct osc_async_args {
78 struct obd_info *aa_oi;
79};
80
81struct osc_setattr_args {
82 struct obdo *sa_oa;
83 obd_enqueue_update_f sa_upcall;
84 void *sa_cookie;
85};
86
87struct osc_fsync_args {
88 struct obd_info *fa_oi;
89 obd_enqueue_update_f fa_upcall;
90 void *fa_cookie;
91};
92
93struct osc_enqueue_args {
94 struct obd_export *oa_exp;
95 __u64 *oa_flags;
96 obd_enqueue_update_f oa_upcall;
97 void *oa_cookie;
98 struct ost_lvb *oa_lvb;
99 struct lustre_handle *oa_lockh;
100 struct ldlm_enqueue_info *oa_ei;
101 unsigned int oa_agl:1;
102};
103
21aef7d9 104static void osc_release_ppga(struct brw_page **ppga, u32 count);
d7e09d03
PT
105static int brw_interpret(const struct lu_env *env,
106 struct ptlrpc_request *req, void *data, int rc);
74d4ec11 107static int osc_cleanup(struct obd_device *obd);
d7e09d03
PT
108
109/* Pack OSC object metadata for disk storage (LE byte order). */
110static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
111 struct lov_stripe_md *lsm)
112{
113 int lmm_size;
d7e09d03
PT
114
115 lmm_size = sizeof(**lmmp);
7f1ae4c0 116 if (!lmmp)
0a3bdb00 117 return lmm_size;
d7e09d03 118
7f1ae4c0 119 if (*lmmp && !lsm) {
7795178d 120 kfree(*lmmp);
d7e09d03 121 *lmmp = NULL;
0a3bdb00 122 return 0;
7f1ae4c0 123 } else if (unlikely(lsm && ostid_id(&lsm->lsm_oi) == 0)) {
0a3bdb00 124 return -EBADF;
d7e09d03
PT
125 }
126
7f1ae4c0 127 if (!*lmmp) {
7795178d 128 *lmmp = kzalloc(lmm_size, GFP_NOFS);
3408e9ae 129 if (!*lmmp)
0a3bdb00 130 return -ENOMEM;
d7e09d03
PT
131 }
132
133 if (lsm)
134 ostid_cpu_to_le(&lsm->lsm_oi, &(*lmmp)->lmm_oi);
135
0a3bdb00 136 return lmm_size;
d7e09d03
PT
137}
138
139/* Unpack OSC object metadata from disk storage (LE byte order). */
140static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
141 struct lov_mds_md *lmm, int lmm_bytes)
142{
143 int lsm_size;
144 struct obd_import *imp = class_exp2cliimp(exp);
d7e09d03 145
7f1ae4c0 146 if (lmm) {
d7e09d03
PT
147 if (lmm_bytes < sizeof(*lmm)) {
148 CERROR("%s: lov_mds_md too small: %d, need %d\n",
149 exp->exp_obd->obd_name, lmm_bytes,
150 (int)sizeof(*lmm));
0a3bdb00 151 return -EINVAL;
d7e09d03
PT
152 }
153 /* XXX LOV_MAGIC etc check? */
154
155 if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
156 CERROR("%s: zero lmm_object_id: rc = %d\n",
157 exp->exp_obd->obd_name, -EINVAL);
0a3bdb00 158 return -EINVAL;
d7e09d03
PT
159 }
160 }
161
162 lsm_size = lov_stripe_md_size(1);
7f1ae4c0 163 if (!lsmp)
0a3bdb00 164 return lsm_size;
d7e09d03 165
7f1ae4c0 166 if (*lsmp && !lmm) {
7795178d
JL
167 kfree((*lsmp)->lsm_oinfo[0]);
168 kfree(*lsmp);
d7e09d03 169 *lsmp = NULL;
0a3bdb00 170 return 0;
d7e09d03
PT
171 }
172
7f1ae4c0 173 if (!*lsmp) {
7795178d 174 *lsmp = kzalloc(lsm_size, GFP_NOFS);
7f1ae4c0 175 if (unlikely(!*lsmp))
0a3bdb00 176 return -ENOMEM;
7795178d
JL
177 (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
178 GFP_NOFS);
7f1ae4c0 179 if (unlikely(!(*lsmp)->lsm_oinfo[0])) {
7795178d 180 kfree(*lsmp);
0a3bdb00 181 return -ENOMEM;
d7e09d03
PT
182 }
183 loi_init((*lsmp)->lsm_oinfo[0]);
184 } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
0a3bdb00 185 return -EBADF;
d7e09d03
PT
186 }
187
7f1ae4c0 188 if (lmm)
d7e09d03
PT
189 /* XXX zero *lsmp? */
190 ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
191
7f1ae4c0 192 if (imp &&
d7e09d03
PT
193 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
194 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
195 else
196 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
197
0a3bdb00 198 return lsm_size;
d7e09d03
PT
199}
200
d7e09d03
PT
201static inline void osc_pack_req_body(struct ptlrpc_request *req,
202 struct obd_info *oinfo)
203{
204 struct ost_body *body;
205
206 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
207 LASSERT(body);
208
3b2f75fd 209 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
210 oinfo->oi_oa);
d7e09d03
PT
211}
212
213static int osc_getattr_interpret(const struct lu_env *env,
214 struct ptlrpc_request *req,
215 struct osc_async_args *aa, int rc)
216{
217 struct ost_body *body;
d7e09d03
PT
218
219 if (rc != 0)
26c4ea46 220 goto out;
d7e09d03
PT
221
222 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
223 if (body) {
224 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
3b2f75fd 225 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
226 aa->aa_oi->oi_oa, &body->oa);
d7e09d03
PT
227
228 /* This should really be sent by the OST */
229 aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
230 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
231 } else {
232 CDEBUG(D_INFO, "can't unpack ost_body\n");
233 rc = -EPROTO;
234 aa->aa_oi->oi_oa->o_valid = 0;
235 }
236out:
237 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
0a3bdb00 238 return rc;
d7e09d03
PT
239}
240
241static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
242 struct ptlrpc_request_set *set)
243{
244 struct ptlrpc_request *req;
245 struct osc_async_args *aa;
29ac6840 246 int rc;
d7e09d03
PT
247
248 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
7f1ae4c0 249 if (!req)
0a3bdb00 250 return -ENOMEM;
d7e09d03 251
d7e09d03
PT
252 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
253 if (rc) {
254 ptlrpc_request_free(req);
0a3bdb00 255 return rc;
d7e09d03
PT
256 }
257
258 osc_pack_req_body(req, oinfo);
259
260 ptlrpc_request_set_replen(req);
261 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
262
263 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
264 aa = ptlrpc_req_async_args(req);
265 aa->aa_oi = oinfo;
266
267 ptlrpc_set_add_req(set, req);
0a3bdb00 268 return 0;
d7e09d03
PT
269}
270
271static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
272 struct obd_info *oinfo)
273{
274 struct ptlrpc_request *req;
29ac6840
CH
275 struct ost_body *body;
276 int rc;
d7e09d03
PT
277
278 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
7f1ae4c0 279 if (!req)
0a3bdb00 280 return -ENOMEM;
d7e09d03 281
d7e09d03
PT
282 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
283 if (rc) {
284 ptlrpc_request_free(req);
0a3bdb00 285 return rc;
d7e09d03
PT
286 }
287
288 osc_pack_req_body(req, oinfo);
289
290 ptlrpc_request_set_replen(req);
291
292 rc = ptlrpc_queue_wait(req);
293 if (rc)
26c4ea46 294 goto out;
d7e09d03
PT
295
296 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
7f1ae4c0 297 if (!body) {
26c4ea46
TJ
298 rc = -EPROTO;
299 goto out;
300 }
d7e09d03
PT
301
302 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
3b2f75fd 303 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
304 &body->oa);
d7e09d03
PT
305
306 oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
307 oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
308
d7e09d03
PT
309 out:
310 ptlrpc_req_finished(req);
311 return rc;
312}
313
314static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
315 struct obd_info *oinfo, struct obd_trans_info *oti)
316{
317 struct ptlrpc_request *req;
29ac6840
CH
318 struct ost_body *body;
319 int rc;
d7e09d03
PT
320
321 LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
322
323 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
7f1ae4c0 324 if (!req)
0a3bdb00 325 return -ENOMEM;
d7e09d03 326
d7e09d03
PT
327 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
328 if (rc) {
329 ptlrpc_request_free(req);
0a3bdb00 330 return rc;
d7e09d03
PT
331 }
332
333 osc_pack_req_body(req, oinfo);
334
335 ptlrpc_request_set_replen(req);
336
337 rc = ptlrpc_queue_wait(req);
338 if (rc)
26c4ea46 339 goto out;
d7e09d03
PT
340
341 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
7f1ae4c0 342 if (!body) {
26c4ea46
TJ
343 rc = -EPROTO;
344 goto out;
345 }
d7e09d03 346
3b2f75fd 347 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
348 &body->oa);
d7e09d03 349
d7e09d03
PT
350out:
351 ptlrpc_req_finished(req);
0a3bdb00 352 return rc;
d7e09d03
PT
353}
354
355static int osc_setattr_interpret(const struct lu_env *env,
356 struct ptlrpc_request *req,
357 struct osc_setattr_args *sa, int rc)
358{
359 struct ost_body *body;
d7e09d03
PT
360
361 if (rc != 0)
26c4ea46 362 goto out;
d7e09d03
PT
363
364 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
7f1ae4c0 365 if (!body) {
26c4ea46
TJ
366 rc = -EPROTO;
367 goto out;
368 }
d7e09d03 369
3b2f75fd 370 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
371 &body->oa);
d7e09d03
PT
372out:
373 rc = sa->sa_upcall(sa->sa_cookie, rc);
0a3bdb00 374 return rc;
d7e09d03
PT
375}
376
377int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
378 struct obd_trans_info *oti,
379 obd_enqueue_update_f upcall, void *cookie,
380 struct ptlrpc_request_set *rqset)
381{
29ac6840 382 struct ptlrpc_request *req;
d7e09d03 383 struct osc_setattr_args *sa;
29ac6840 384 int rc;
d7e09d03
PT
385
386 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
7f1ae4c0 387 if (!req)
0a3bdb00 388 return -ENOMEM;
d7e09d03 389
d7e09d03
PT
390 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
391 if (rc) {
392 ptlrpc_request_free(req);
0a3bdb00 393 return rc;
d7e09d03
PT
394 }
395
396 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
397 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
398
399 osc_pack_req_body(req, oinfo);
400
401 ptlrpc_request_set_replen(req);
402
403 /* do mds to ost setattr asynchronously */
404 if (!rqset) {
405 /* Do not wait for response. */
c5c4c6fa 406 ptlrpcd_add_req(req);
d7e09d03
PT
407 } else {
408 req->rq_interpret_reply =
409 (ptlrpc_interpterer_t)osc_setattr_interpret;
410
e72f36e2 411 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
d7e09d03
PT
412 sa = ptlrpc_req_async_args(req);
413 sa->sa_oa = oinfo->oi_oa;
414 sa->sa_upcall = upcall;
415 sa->sa_cookie = cookie;
416
417 if (rqset == PTLRPCD_SET)
c5c4c6fa 418 ptlrpcd_add_req(req);
d7e09d03
PT
419 else
420 ptlrpc_set_add_req(rqset, req);
421 }
422
0a3bdb00 423 return 0;
d7e09d03
PT
424}
425
426static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
427 struct obd_trans_info *oti,
428 struct ptlrpc_request_set *rqset)
429{
430 return osc_setattr_async_base(exp, oinfo, oti,
431 oinfo->oi_cb_up, oinfo, rqset);
432}
433
74d4ec11
SB
434static int osc_real_create(struct obd_export *exp, struct obdo *oa,
435 struct lov_stripe_md **ea,
436 struct obd_trans_info *oti)
d7e09d03
PT
437{
438 struct ptlrpc_request *req;
29ac6840
CH
439 struct ost_body *body;
440 struct lov_stripe_md *lsm;
441 int rc;
d7e09d03
PT
442
443 LASSERT(oa);
444 LASSERT(ea);
445
446 lsm = *ea;
447 if (!lsm) {
448 rc = obd_alloc_memmd(exp, &lsm);
449 if (rc < 0)
0a3bdb00 450 return rc;
d7e09d03
PT
451 }
452
453 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
7f1ae4c0 454 if (!req) {
26c4ea46
TJ
455 rc = -ENOMEM;
456 goto out;
457 }
d7e09d03
PT
458
459 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
460 if (rc) {
461 ptlrpc_request_free(req);
26c4ea46 462 goto out;
d7e09d03
PT
463 }
464
465 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
466 LASSERT(body);
3b2f75fd 467
468 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
d7e09d03
PT
469
470 ptlrpc_request_set_replen(req);
471
472 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
473 oa->o_flags == OBD_FL_DELORPHAN) {
474 DEBUG_REQ(D_HA, req,
475 "delorphan from OST integration");
476 /* Don't resend the delorphan req */
477 req->rq_no_resend = req->rq_no_delay = 1;
478 }
479
480 rc = ptlrpc_queue_wait(req);
481 if (rc)
26c4ea46 482 goto out_req;
d7e09d03
PT
483
484 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
7f1ae4c0 485 if (!body) {
26c4ea46
TJ
486 rc = -EPROTO;
487 goto out_req;
488 }
d7e09d03 489
3b2f75fd 490 CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
491 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
d7e09d03
PT
492
493 oa->o_blksize = cli_brw_size(exp->exp_obd);
494 oa->o_valid |= OBD_MD_FLBLKSZ;
495
496 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
497 * have valid lsm_oinfo data structs, so don't go touching that.
498 * This needs to be fixed in a big way.
499 */
500 lsm->lsm_oi = oa->o_oi;
501 *ea = lsm;
502
7f1ae4c0 503 if (oti) {
d7e09d03
PT
504 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
505
506 if (oa->o_valid & OBD_MD_FLCOOKIE) {
507 if (!oti->oti_logcookies)
508 oti_alloc_cookies(oti, 1);
509 *oti->oti_logcookies = oa->o_lcookie;
510 }
511 }
512
f537dd2c 513 CDEBUG(D_HA, "transno: %lld\n",
d7e09d03
PT
514 lustre_msg_get_transno(req->rq_repmsg));
515out_req:
516 ptlrpc_req_finished(req);
517out:
518 if (rc && !*ea)
519 obd_free_memmd(exp, &lsm);
0a3bdb00 520 return rc;
d7e09d03
PT
521}
522
523int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
524 obd_enqueue_update_f upcall, void *cookie,
525 struct ptlrpc_request_set *rqset)
526{
29ac6840 527 struct ptlrpc_request *req;
d7e09d03 528 struct osc_setattr_args *sa;
29ac6840
CH
529 struct ost_body *body;
530 int rc;
d7e09d03
PT
531
532 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
7f1ae4c0 533 if (!req)
0a3bdb00 534 return -ENOMEM;
d7e09d03 535
d7e09d03
PT
536 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
537 if (rc) {
538 ptlrpc_request_free(req);
0a3bdb00 539 return rc;
d7e09d03
PT
540 }
541 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
542 ptlrpc_at_set_req_timeout(req);
543
544 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
545 LASSERT(body);
3b2f75fd 546 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
547 oinfo->oi_oa);
d7e09d03
PT
548
549 ptlrpc_request_set_replen(req);
550
551 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
e72f36e2 552 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
d7e09d03 553 sa = ptlrpc_req_async_args(req);
29ac6840 554 sa->sa_oa = oinfo->oi_oa;
d7e09d03
PT
555 sa->sa_upcall = upcall;
556 sa->sa_cookie = cookie;
557 if (rqset == PTLRPCD_SET)
c5c4c6fa 558 ptlrpcd_add_req(req);
d7e09d03
PT
559 else
560 ptlrpc_set_add_req(rqset, req);
561
0a3bdb00 562 return 0;
d7e09d03
PT
563}
564
d7e09d03
PT
565static int osc_sync_interpret(const struct lu_env *env,
566 struct ptlrpc_request *req,
567 void *arg, int rc)
568{
569 struct osc_fsync_args *fa = arg;
570 struct ost_body *body;
d7e09d03
PT
571
572 if (rc)
26c4ea46 573 goto out;
d7e09d03
PT
574
575 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
7f1ae4c0 576 if (!body) {
e72f36e2 577 CERROR("can't unpack ost_body\n");
26c4ea46
TJ
578 rc = -EPROTO;
579 goto out;
d7e09d03
PT
580 }
581
582 *fa->fa_oi->oi_oa = body->oa;
583out:
584 rc = fa->fa_upcall(fa->fa_cookie, rc);
0a3bdb00 585 return rc;
d7e09d03
PT
586}
587
588int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
589 obd_enqueue_update_f upcall, void *cookie,
590 struct ptlrpc_request_set *rqset)
591{
592 struct ptlrpc_request *req;
29ac6840 593 struct ost_body *body;
d7e09d03 594 struct osc_fsync_args *fa;
29ac6840 595 int rc;
d7e09d03
PT
596
597 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
7f1ae4c0 598 if (!req)
0a3bdb00 599 return -ENOMEM;
d7e09d03 600
d7e09d03
PT
601 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
602 if (rc) {
603 ptlrpc_request_free(req);
0a3bdb00 604 return rc;
d7e09d03
PT
605 }
606
607 /* overload the size and blocks fields in the oa with start/end */
608 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
609 LASSERT(body);
3b2f75fd 610 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
611 oinfo->oi_oa);
d7e09d03
PT
612
613 ptlrpc_request_set_replen(req);
614 req->rq_interpret_reply = osc_sync_interpret;
615
616 CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
617 fa = ptlrpc_req_async_args(req);
618 fa->fa_oi = oinfo;
619 fa->fa_upcall = upcall;
620 fa->fa_cookie = cookie;
621
622 if (rqset == PTLRPCD_SET)
c5c4c6fa 623 ptlrpcd_add_req(req);
d7e09d03
PT
624 else
625 ptlrpc_set_add_req(rqset, req);
626
0a3bdb00 627 return 0;
d7e09d03
PT
628}
629
d7e09d03
PT
630/* Find and cancel locally locks matched by @mode in the resource found by
631 * @objid. Found locks are added into @cancel list. Returns the amount of
632 * locks added to @cancels list. */
633static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
634 struct list_head *cancels,
875332d4 635 ldlm_mode_t mode, __u64 lock_flags)
d7e09d03
PT
636{
637 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
638 struct ldlm_res_id res_id;
639 struct ldlm_resource *res;
640 int count;
d7e09d03
PT
641
642 /* Return, i.e. cancel nothing, only if ELC is supported (flag in
643 * export) but disabled through procfs (flag in NS).
644 *
645 * This distinguishes from a case when ELC is not supported originally,
646 * when we still want to cancel locks in advance and just cancel them
647 * locally, without sending any RPC. */
648 if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
0a3bdb00 649 return 0;
d7e09d03
PT
650
651 ostid_build_res_name(&oa->o_oi, &res_id);
652 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
7f1ae4c0 653 if (!res)
0a3bdb00 654 return 0;
d7e09d03
PT
655
656 LDLM_RESOURCE_ADDREF(res);
657 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
658 lock_flags, 0, NULL);
659 LDLM_RESOURCE_DELREF(res);
660 ldlm_resource_putref(res);
0a3bdb00 661 return count;
d7e09d03
PT
662}
663
664static int osc_destroy_interpret(const struct lu_env *env,
665 struct ptlrpc_request *req, void *data,
666 int rc)
667{
668 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
669
670 atomic_dec(&cli->cl_destroy_in_flight);
671 wake_up(&cli->cl_destroy_waitq);
672 return 0;
673}
674
675static int osc_can_send_destroy(struct client_obd *cli)
676{
677 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
678 cli->cl_max_rpcs_in_flight) {
679 /* The destroy request can be sent */
680 return 1;
681 }
682 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
683 cli->cl_max_rpcs_in_flight) {
684 /*
685 * The counter has been modified between the two atomic
686 * operations.
687 */
688 wake_up(&cli->cl_destroy_waitq);
689 }
690 return 0;
691}
692
74d4ec11
SB
693static int osc_create(const struct lu_env *env, struct obd_export *exp,
694 struct obdo *oa, struct lov_stripe_md **ea,
695 struct obd_trans_info *oti)
d7e09d03
PT
696{
697 int rc = 0;
d7e09d03
PT
698
699 LASSERT(oa);
700 LASSERT(ea);
701 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
702
703 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
704 oa->o_flags == OBD_FL_RECREATE_OBJS) {
0a3bdb00 705 return osc_real_create(exp, oa, ea, oti);
d7e09d03
PT
706 }
707
708 if (!fid_seq_is_mdt(ostid_seq(&oa->o_oi)))
0a3bdb00 709 return osc_real_create(exp, oa, ea, oti);
d7e09d03
PT
710
711 /* we should not get here anymore */
712 LBUG();
713
0a3bdb00 714 return rc;
d7e09d03
PT
715}
716
717/* Destroy requests can be async always on the client, and we don't even really
718 * care about the return code since the client cannot do anything at all about
719 * a destroy failure.
720 * When the MDS is unlinking a filename, it saves the file objects into a
721 * recovery llog, and these object records are cancelled when the OST reports
722 * they were destroyed and sync'd to disk (i.e. transaction committed).
723 * If the client dies, or the OST is down when the object should be destroyed,
724 * the records are not cancelled, and when the OST reconnects to the MDS next,
725 * it will retrieve the llog unlink logs and then sends the log cancellation
726 * cookies to the MDS after committing destroy transactions. */
727static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
728 struct obdo *oa, struct lov_stripe_md *ea,
ef2e0f55 729 struct obd_trans_info *oti, struct obd_export *md_export)
d7e09d03 730{
29ac6840 731 struct client_obd *cli = &exp->exp_obd->u.cli;
d7e09d03 732 struct ptlrpc_request *req;
29ac6840 733 struct ost_body *body;
d7e09d03
PT
734 LIST_HEAD(cancels);
735 int rc, count;
d7e09d03
PT
736
737 if (!oa) {
738 CDEBUG(D_INFO, "oa NULL\n");
0a3bdb00 739 return -EINVAL;
d7e09d03
PT
740 }
741
742 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
743 LDLM_FL_DISCARD_DATA);
744
745 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
7f1ae4c0 746 if (!req) {
d7e09d03 747 ldlm_lock_list_put(&cancels, l_bl_ast, count);
0a3bdb00 748 return -ENOMEM;
d7e09d03
PT
749 }
750
d7e09d03
PT
751 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
752 0, &cancels, count);
753 if (rc) {
754 ptlrpc_request_free(req);
0a3bdb00 755 return rc;
d7e09d03
PT
756 }
757
758 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
759 ptlrpc_at_set_req_timeout(req);
760
7f1ae4c0 761 if (oti && oa->o_valid & OBD_MD_FLCOOKIE)
d7e09d03
PT
762 oa->o_lcookie = *oti->oti_logcookies;
763 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
764 LASSERT(body);
3b2f75fd 765 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
d7e09d03 766
d7e09d03
PT
767 ptlrpc_request_set_replen(req);
768
11d66e89 769 /* If osc_destroy is for destroying the unlink orphan,
d7e09d03
PT
770 * sent from MDT to OST, which should not be blocked here,
771 * because the process might be triggered by ptlrpcd, and
772 * it is not good to block ptlrpcd thread (b=16006)*/
773 if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
774 req->rq_interpret_reply = osc_destroy_interpret;
775 if (!osc_can_send_destroy(cli)) {
776 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
777 NULL);
778
779 /*
780 * Wait until the number of on-going destroy RPCs drops
781 * under max_rpc_in_flight
782 */
783 l_wait_event_exclusive(cli->cl_destroy_waitq,
784 osc_can_send_destroy(cli), &lwi);
785 }
786 }
787
788 /* Do not wait for response */
c5c4c6fa 789 ptlrpcd_add_req(req);
0a3bdb00 790 return 0;
d7e09d03
PT
791}
792
793static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
794 long writing_bytes)
795{
21aef7d9 796 u32 bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
d7e09d03
PT
797
798 LASSERT(!(oa->o_valid & bits));
799
800 oa->o_valid |= bits;
801 client_obd_list_lock(&cli->cl_loi_list_lock);
802 oa->o_dirty = cli->cl_dirty;
803 if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
804 cli->cl_dirty_max)) {
805 CERROR("dirty %lu - %lu > dirty_max %lu\n",
806 cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
807 oa->o_undirty = 0;
c52f69c5 808 } else if (unlikely(atomic_read(&obd_dirty_pages) -
d7e09d03
PT
809 atomic_read(&obd_dirty_transit_pages) >
810 (long)(obd_max_dirty_pages + 1))) {
811 /* The atomic_read() allowing the atomic_inc() are
812 * not covered by a lock thus they may safely race and trip
813 * this CERROR() unless we add in a small fudge factor (+1). */
c52f69c5 814 CERROR("dirty %d - %d > system dirty_max %d\n",
d7e09d03
PT
815 atomic_read(&obd_dirty_pages),
816 atomic_read(&obd_dirty_transit_pages),
817 obd_max_dirty_pages);
818 oa->o_undirty = 0;
819 } else if (unlikely(cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff)) {
820 CERROR("dirty %lu - dirty_max %lu too big???\n",
821 cli->cl_dirty, cli->cl_dirty_max);
822 oa->o_undirty = 0;
823 } else {
824 long max_in_flight = (cli->cl_max_pages_per_rpc <<
825 PAGE_CACHE_SHIFT)*
826 (cli->cl_max_rpcs_in_flight + 1);
827 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
828 }
829 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
830 oa->o_dropped = cli->cl_lost_grant;
831 cli->cl_lost_grant = 0;
832 client_obd_list_unlock(&cli->cl_loi_list_lock);
1d8cb70c 833 CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
d7e09d03
PT
834 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
835
836}
837
838void osc_update_next_shrink(struct client_obd *cli)
839{
840 cli->cl_next_shrink_grant =
841 cfs_time_shift(cli->cl_grant_shrink_interval);
842 CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
843 cli->cl_next_shrink_grant);
844}
845
21aef7d9 846static void __osc_update_grant(struct client_obd *cli, u64 grant)
d7e09d03
PT
847{
848 client_obd_list_lock(&cli->cl_loi_list_lock);
849 cli->cl_avail_grant += grant;
850 client_obd_list_unlock(&cli->cl_loi_list_lock);
851}
852
853static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
854{
855 if (body->oa.o_valid & OBD_MD_FLGRANT) {
b0f5aad5 856 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
d7e09d03
PT
857 __osc_update_grant(cli, body->oa.o_grant);
858 }
859}
860
861static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
21aef7d9 862 u32 keylen, void *key, u32 vallen,
d7e09d03
PT
863 void *val, struct ptlrpc_request_set *set);
864
865static int osc_shrink_grant_interpret(const struct lu_env *env,
866 struct ptlrpc_request *req,
867 void *aa, int rc)
868{
869 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
f024bad4 870 struct obdo *oa = ((struct osc_brw_async_args *)aa)->aa_oa;
d7e09d03
PT
871 struct ost_body *body;
872
873 if (rc != 0) {
874 __osc_update_grant(cli, oa->o_grant);
26c4ea46 875 goto out;
d7e09d03
PT
876 }
877
878 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
879 LASSERT(body);
880 osc_update_grant(cli, body);
881out:
2ba262fb 882 kmem_cache_free(obdo_cachep, oa);
d7e09d03
PT
883 return rc;
884}
885
886static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
887{
888 client_obd_list_lock(&cli->cl_loi_list_lock);
889 oa->o_grant = cli->cl_avail_grant / 4;
890 cli->cl_avail_grant -= oa->o_grant;
891 client_obd_list_unlock(&cli->cl_loi_list_lock);
892 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
893 oa->o_valid |= OBD_MD_FLFLAGS;
894 oa->o_flags = 0;
895 }
896 oa->o_flags |= OBD_FL_SHRINK_GRANT;
897 osc_update_next_shrink(cli);
898}
899
900/* Shrink the current grant, either from some large amount to enough for a
901 * full set of in-flight RPCs, or if we have already shrunk to that limit
902 * then to enough for a single RPC. This avoids keeping more grant than
903 * needed, and avoids shrinking the grant piecemeal. */
904static int osc_shrink_grant(struct client_obd *cli)
905{
906 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
907 (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
908
909 client_obd_list_lock(&cli->cl_loi_list_lock);
910 if (cli->cl_avail_grant <= target_bytes)
911 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
912 client_obd_list_unlock(&cli->cl_loi_list_lock);
913
914 return osc_shrink_grant_to_target(cli, target_bytes);
915}
916
917int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
918{
29ac6840 919 int rc = 0;
d7e09d03 920 struct ost_body *body;
d7e09d03
PT
921
922 client_obd_list_lock(&cli->cl_loi_list_lock);
923 /* Don't shrink if we are already above or below the desired limit
924 * We don't want to shrink below a single RPC, as that will negatively
925 * impact block allocation and long-term performance. */
926 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
927 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
928
929 if (target_bytes >= cli->cl_avail_grant) {
930 client_obd_list_unlock(&cli->cl_loi_list_lock);
0a3bdb00 931 return 0;
d7e09d03
PT
932 }
933 client_obd_list_unlock(&cli->cl_loi_list_lock);
934
7795178d 935 body = kzalloc(sizeof(*body), GFP_NOFS);
d7e09d03 936 if (!body)
0a3bdb00 937 return -ENOMEM;
d7e09d03
PT
938
939 osc_announce_cached(cli, &body->oa, 0);
940
941 client_obd_list_lock(&cli->cl_loi_list_lock);
942 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
943 cli->cl_avail_grant = target_bytes;
944 client_obd_list_unlock(&cli->cl_loi_list_lock);
945 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
946 body->oa.o_valid |= OBD_MD_FLFLAGS;
947 body->oa.o_flags = 0;
948 }
949 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
950 osc_update_next_shrink(cli);
951
952 rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
953 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
954 sizeof(*body), body, NULL);
955 if (rc != 0)
956 __osc_update_grant(cli, body->oa.o_grant);
7795178d 957 kfree(body);
0a3bdb00 958 return rc;
d7e09d03
PT
959}
960
961static int osc_should_shrink_grant(struct client_obd *client)
962{
a649ad1d
GKH
963 unsigned long time = cfs_time_current();
964 unsigned long next_shrink = client->cl_next_shrink_grant;
d7e09d03
PT
965
966 if ((client->cl_import->imp_connect_data.ocd_connect_flags &
967 OBD_CONNECT_GRANT_SHRINK) == 0)
968 return 0;
969
970 if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
971 /* Get the current RPC size directly, instead of going via:
972 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
973 * Keep comment here so that it can be found by searching. */
974 int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
975
976 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
977 client->cl_avail_grant > brw_size)
978 return 1;
71e8dd9a
AM
979
980 osc_update_next_shrink(client);
d7e09d03
PT
981 }
982 return 0;
983}
984
985static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
986{
987 struct client_obd *client;
988
989 list_for_each_entry(client, &item->ti_obd_list,
990 cl_grant_shrink_list) {
991 if (osc_should_shrink_grant(client))
992 osc_shrink_grant(client);
993 }
994 return 0;
995}
996
997static int osc_add_shrink_grant(struct client_obd *client)
998{
999 int rc;
1000
1001 rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1002 TIMEOUT_GRANT,
1003 osc_grant_shrink_grant_cb, NULL,
1004 &client->cl_grant_shrink_list);
1005 if (rc) {
1006 CERROR("add grant client %s error %d\n",
1007 client->cl_import->imp_obd->obd_name, rc);
1008 return rc;
1009 }
1010 CDEBUG(D_CACHE, "add grant client %s \n",
1011 client->cl_import->imp_obd->obd_name);
1012 osc_update_next_shrink(client);
1013 return 0;
1014}
1015
1016static int osc_del_shrink_grant(struct client_obd *client)
1017{
1018 return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1019 TIMEOUT_GRANT);
1020}
1021
1022static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1023{
1024 /*
1025 * ocd_grant is the total grant amount we're expect to hold: if we've
1026 * been evicted, it's the new avail_grant amount, cl_dirty will drop
1027 * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
1028 *
1029 * race is tolerable here: if we're evicted, but imp_state already
1030 * left EVICTED state, then cl_dirty must be 0 already.
1031 */
1032 client_obd_list_lock(&cli->cl_loi_list_lock);
1033 if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1034 cli->cl_avail_grant = ocd->ocd_grant;
1035 else
1036 cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
1037
1038 if (cli->cl_avail_grant < 0) {
1039 CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
1040 cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
1041 ocd->ocd_grant, cli->cl_dirty);
1042 /* workaround for servers which do not have the patch from
1043 * LU-2679 */
1044 cli->cl_avail_grant = ocd->ocd_grant;
1045 }
1046
1047 /* determine the appropriate chunk size used by osc_extent. */
1048 cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
1049 client_obd_list_unlock(&cli->cl_loi_list_lock);
1050
2d00bd17
JP
1051 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
1052 cli->cl_import->imp_obd->obd_name,
1053 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
d7e09d03
PT
1054
1055 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1056 list_empty(&cli->cl_grant_shrink_list))
1057 osc_add_shrink_grant(cli);
1058}
1059
1060/* We assume that the reason this OSC got a short read is because it read
1061 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1062 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1063 * this stripe never got written at or beyond this stripe offset yet. */
21aef7d9 1064static void handle_short_read(int nob_read, u32 page_count,
d7e09d03
PT
1065 struct brw_page **pga)
1066{
1067 char *ptr;
1068 int i = 0;
1069
1070 /* skip bytes read OK */
1071 while (nob_read > 0) {
e72f36e2 1072 LASSERT(page_count > 0);
d7e09d03
PT
1073
1074 if (pga[i]->count > nob_read) {
1075 /* EOF inside this page */
1076 ptr = kmap(pga[i]->pg) +
1077 (pga[i]->off & ~CFS_PAGE_MASK);
1078 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1079 kunmap(pga[i]->pg);
1080 page_count--;
1081 i++;
1082 break;
1083 }
1084
1085 nob_read -= pga[i]->count;
1086 page_count--;
1087 i++;
1088 }
1089
1090 /* zero remaining pages */
1091 while (page_count-- > 0) {
1092 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
1093 memset(ptr, 0, pga[i]->count);
1094 kunmap(pga[i]->pg);
1095 i++;
1096 }
1097}
1098
1099static int check_write_rcs(struct ptlrpc_request *req,
1100 int requested_nob, int niocount,
21aef7d9 1101 u32 page_count, struct brw_page **pga)
d7e09d03 1102{
29ac6840
CH
1103 int i;
1104 __u32 *remote_rcs;
d7e09d03
PT
1105
1106 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1107 sizeof(*remote_rcs) *
1108 niocount);
7f1ae4c0 1109 if (!remote_rcs) {
d7e09d03 1110 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
fbe7c6c7 1111 return -EPROTO;
d7e09d03
PT
1112 }
1113
1114 /* return error if any niobuf was in error */
1115 for (i = 0; i < niocount; i++) {
1116 if ((int)remote_rcs[i] < 0)
e8291974 1117 return remote_rcs[i];
d7e09d03
PT
1118
1119 if (remote_rcs[i] != 0) {
1120 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1121 i, remote_rcs[i], req);
fbe7c6c7 1122 return -EPROTO;
d7e09d03
PT
1123 }
1124 }
1125
1126 if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1127 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1128 req->rq_bulk->bd_nob_transferred, requested_nob);
fbe7c6c7 1129 return -EPROTO;
d7e09d03
PT
1130 }
1131
fbe7c6c7 1132 return 0;
d7e09d03
PT
1133}
1134
1135static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1136{
1137 if (p1->flag != p2->flag) {
7cf1054b
HE
1138 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1139 OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
d7e09d03
PT
1140
1141 /* warn if we try to combine flags that we don't know to be
1142 * safe to combine */
1143 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
2d00bd17 1144 CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n",
d7e09d03
PT
1145 p1->flag, p2->flag);
1146 }
1147 return 0;
1148 }
1149
1150 return (p1->off + p1->count == p2->off);
1151}
1152
21aef7d9 1153static u32 osc_checksum_bulk(int nob, u32 pg_count,
29ac6840
CH
1154 struct brw_page **pga, int opc,
1155 cksum_type_t cksum_type)
d7e09d03 1156{
29ac6840
CH
1157 __u32 cksum;
1158 int i = 0;
1159 struct cfs_crypto_hash_desc *hdesc;
1160 unsigned int bufsize;
1161 int err;
1162 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
d7e09d03
PT
1163
1164 LASSERT(pg_count > 0);
1165
1166 hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1167 if (IS_ERR(hdesc)) {
1168 CERROR("Unable to initialize checksum hash %s\n",
1169 cfs_crypto_hash_name(cfs_alg));
1170 return PTR_ERR(hdesc);
1171 }
1172
1173 while (nob > 0 && pg_count > 0) {
1174 int count = pga[i]->count > nob ? nob : pga[i]->count;
1175
1176 /* corrupt the data before we compute the checksum, to
1177 * simulate an OST->client data error */
1178 if (i == 0 && opc == OST_READ &&
1179 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1180 unsigned char *ptr = kmap(pga[i]->pg);
1181 int off = pga[i]->off & ~CFS_PAGE_MASK;
50ffcb7e 1182
d7e09d03
PT
1183 memcpy(ptr + off, "bad1", min(4, nob));
1184 kunmap(pga[i]->pg);
1185 }
1186 cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
1187 pga[i]->off & ~CFS_PAGE_MASK,
1188 count);
aa3bee0d
GKH
1189 CDEBUG(D_PAGE,
1190 "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
1191 pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
1192 (long)pga[i]->pg->flags, page_count(pga[i]->pg),
1193 page_private(pga[i]->pg),
1194 (int)(pga[i]->off & ~CFS_PAGE_MASK));
d7e09d03
PT
1195
1196 nob -= pga[i]->count;
1197 pg_count--;
1198 i++;
1199 }
1200
1201 bufsize = 4;
1202 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
1203
1204 if (err)
1205 cfs_crypto_hash_final(hdesc, NULL, NULL);
1206
1207 /* For sending we only compute the wrong checksum instead
1208 * of corrupting the data so it is still correct on a redo */
1209 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1210 cksum++;
1211
1212 return cksum;
1213}
1214
1d8cb70c
GD
1215static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1216 struct obdo *oa,
21aef7d9 1217 struct lov_stripe_md *lsm, u32 page_count,
d7e09d03
PT
1218 struct brw_page **pga,
1219 struct ptlrpc_request **reqp,
ef2e0f55 1220 int reserve,
d7e09d03
PT
1221 int resend)
1222{
29ac6840 1223 struct ptlrpc_request *req;
d7e09d03 1224 struct ptlrpc_bulk_desc *desc;
29ac6840
CH
1225 struct ost_body *body;
1226 struct obd_ioobj *ioobj;
1227 struct niobuf_remote *niobuf;
d7e09d03
PT
1228 int niocount, i, requested_nob, opc, rc;
1229 struct osc_brw_async_args *aa;
29ac6840 1230 struct req_capsule *pill;
d7e09d03
PT
1231 struct brw_page *pg_prev;
1232
d7e09d03 1233 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
0a3bdb00 1234 return -ENOMEM; /* Recoverable */
d7e09d03 1235 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
0a3bdb00 1236 return -EINVAL; /* Fatal */
d7e09d03
PT
1237
1238 if ((cmd & OBD_BRW_WRITE) != 0) {
1239 opc = OST_WRITE;
1240 req = ptlrpc_request_alloc_pool(cli->cl_import,
aefd9d71 1241 osc_rq_pool,
d7e09d03
PT
1242 &RQF_OST_BRW_WRITE);
1243 } else {
1244 opc = OST_READ;
1245 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1246 }
7f1ae4c0 1247 if (!req)
0a3bdb00 1248 return -ENOMEM;
d7e09d03
PT
1249
1250 for (niocount = i = 1; i < page_count; i++) {
1251 if (!can_merge_pages(pga[i - 1], pga[i]))
1252 niocount++;
1253 }
1254
1255 pill = &req->rq_pill;
1256 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1257 sizeof(*ioobj));
1258 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1259 niocount * sizeof(*niobuf));
d7e09d03
PT
1260
1261 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1262 if (rc) {
1263 ptlrpc_request_free(req);
0a3bdb00 1264 return rc;
d7e09d03
PT
1265 }
1266 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1267 ptlrpc_at_set_req_timeout(req);
1268 /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1269 * retry logic */
1270 req->rq_no_retry_einprogress = 1;
1271
1272 desc = ptlrpc_prep_bulk_imp(req, page_count,
1273 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1274 opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
1275 OST_BULK_PORTAL);
1276
7f1ae4c0 1277 if (!desc) {
26c4ea46
TJ
1278 rc = -ENOMEM;
1279 goto out;
1280 }
d7e09d03
PT
1281 /* NB request now owns desc and will free it when it gets freed */
1282
1283 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1284 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1285 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
7f1ae4c0 1286 LASSERT(body && ioobj && niobuf);
d7e09d03 1287
3b2f75fd 1288 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
d7e09d03
PT
1289
1290 obdo_to_ioobj(oa, ioobj);
1291 ioobj->ioo_bufcnt = niocount;
1292 /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1293 * that might be send for this request. The actual number is decided
1294 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1295 * "max - 1" for old client compatibility sending "0", and also so the
1296 * the actual maximum is a power-of-two number, not one less. LU-1431 */
1297 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
d7e09d03
PT
1298 LASSERT(page_count > 0);
1299 pg_prev = pga[0];
1300 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1301 struct brw_page *pg = pga[i];
1302 int poff = pg->off & ~CFS_PAGE_MASK;
1303
1304 LASSERT(pg->count > 0);
1305 /* make sure there is no gap in the middle of page array */
1306 LASSERTF(page_count == 1 ||
1307 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
1308 ergo(i > 0 && i < page_count - 1,
1309 poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
1310 ergo(i == page_count - 1, poff == 0)),
b0f5aad5 1311 "i: %d/%d pg: %p off: %llu, count: %u\n",
d7e09d03
PT
1312 i, page_count, pg, pg->off, pg->count);
1313 LASSERTF(i == 0 || pg->off > pg_prev->off,
2d00bd17 1314 "i %d p_c %u pg %p [pri %lu ind %lu] off %llu prev_pg %p [pri %lu ind %lu] off %llu\n",
d7e09d03
PT
1315 i, page_count,
1316 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1317 pg_prev->pg, page_private(pg_prev->pg),
1318 pg_prev->pg->index, pg_prev->off);
1319 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1320 (pg->flag & OBD_BRW_SRVLOCK));
1321
1322 ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
1323 requested_nob += pg->count;
1324
1325 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1326 niobuf--;
1327 niobuf->len += pg->count;
1328 } else {
1329 niobuf->offset = pg->off;
29ac6840
CH
1330 niobuf->len = pg->count;
1331 niobuf->flags = pg->flag;
d7e09d03
PT
1332 }
1333 pg_prev = pg;
1334 }
1335
1336 LASSERTF((void *)(niobuf - niocount) ==
1337 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1338 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1339 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1340
1341 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1342 if (resend) {
1343 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1344 body->oa.o_valid |= OBD_MD_FLFLAGS;
1345 body->oa.o_flags = 0;
1346 }
1347 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1348 }
1349
1350 if (osc_should_shrink_grant(cli))
1351 osc_shrink_grant_local(cli, &body->oa);
1352
1353 /* size[REQ_REC_OFF] still sizeof (*body) */
1354 if (opc == OST_WRITE) {
1355 if (cli->cl_checksum &&
1356 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1357 /* store cl_cksum_type in a local variable since
1358 * it can be changed via lprocfs */
1359 cksum_type_t cksum_type = cli->cl_cksum_type;
1360
1361 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1362 oa->o_flags &= OBD_FL_LOCAL_MASK;
1363 body->oa.o_flags = 0;
1364 }
1365 body->oa.o_flags |= cksum_type_pack(cksum_type);
1366 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1367 body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1368 page_count, pga,
1369 OST_WRITE,
1370 cksum_type);
1371 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1372 body->oa.o_cksum);
1373 /* save this in 'oa', too, for later checking */
1374 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1375 oa->o_flags |= cksum_type_pack(cksum_type);
1376 } else {
1377 /* clear out the checksum flag, in case this is a
1378 * resend but cl_checksum is no longer set. b=11238 */
1379 oa->o_valid &= ~OBD_MD_FLCKSUM;
1380 }
1381 oa->o_cksum = body->oa.o_cksum;
1382 /* 1 RC per niobuf */
1383 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1384 sizeof(__u32) * niocount);
1385 } else {
1386 if (cli->cl_checksum &&
1387 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1388 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1389 body->oa.o_flags = 0;
1390 body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1391 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1392 }
1393 }
1394 ptlrpc_request_set_replen(req);
1395
1396 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1397 aa = ptlrpc_req_async_args(req);
1398 aa->aa_oa = oa;
1399 aa->aa_requested_nob = requested_nob;
1400 aa->aa_nio_count = niocount;
1401 aa->aa_page_count = page_count;
1402 aa->aa_resends = 0;
1403 aa->aa_ppga = pga;
1404 aa->aa_cli = cli;
1405 INIT_LIST_HEAD(&aa->aa_oaps);
d7e09d03
PT
1406
1407 *reqp = req;
0a3bdb00 1408 return 0;
d7e09d03
PT
1409
1410 out:
1411 ptlrpc_req_finished(req);
0a3bdb00 1412 return rc;
d7e09d03
PT
1413}
1414
1415static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1416 __u32 client_cksum, __u32 server_cksum, int nob,
21aef7d9 1417 u32 page_count, struct brw_page **pga,
d7e09d03
PT
1418 cksum_type_t client_cksum_type)
1419{
1420 __u32 new_cksum;
1421 char *msg;
1422 cksum_type_t cksum_type;
1423
1424 if (server_cksum == client_cksum) {
1425 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1426 return 0;
1427 }
1428
1429 cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1430 oa->o_flags : 0);
1431 new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1432 cksum_type);
1433
1434 if (cksum_type != client_cksum_type)
2d00bd17
JP
1435 msg = "the server did not use the checksum type specified in the original request - likely a protocol problem"
1436 ;
d7e09d03 1437 else if (new_cksum == server_cksum)
2d00bd17
JP
1438 msg = "changed on the client after we checksummed it - likely false positive due to mmap IO (bug 11742)"
1439 ;
d7e09d03
PT
1440 else if (new_cksum == client_cksum)
1441 msg = "changed in transit before arrival at OST";
1442 else
2d00bd17
JP
1443 msg = "changed in transit AND doesn't match the original - likely false positive due to mmap IO (bug 11742)"
1444 ;
d7e09d03
PT
1445
1446 LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
b0f5aad5 1447 " object "DOSTID" extent [%llu-%llu]\n",
d7e09d03
PT
1448 msg, libcfs_nid2str(peer->nid),
1449 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1450 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1451 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1452 POSTID(&oa->o_oi), pga[0]->off,
1453 pga[page_count-1]->off + pga[page_count-1]->count - 1);
2d00bd17
JP
1454 CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n",
1455 client_cksum, client_cksum_type,
d7e09d03
PT
1456 server_cksum, cksum_type, new_cksum);
1457 return 1;
1458}
1459
1460/* Note rc enters this function as number of bytes transferred */
1461static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1462{
1463 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1464 const lnet_process_id_t *peer =
1465 &req->rq_import->imp_connection->c_peer;
1466 struct client_obd *cli = aa->aa_cli;
1467 struct ost_body *body;
1468 __u32 client_cksum = 0;
d7e09d03
PT
1469
1470 if (rc < 0 && rc != -EDQUOT) {
1471 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
0a3bdb00 1472 return rc;
d7e09d03
PT
1473 }
1474
7f1ae4c0 1475 LASSERTF(req->rq_repmsg, "rc = %d\n", rc);
d7e09d03 1476 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
7f1ae4c0 1477 if (!body) {
d7e09d03 1478 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
0a3bdb00 1479 return -EPROTO;
d7e09d03
PT
1480 }
1481
1482 /* set/clear over quota flag for a uid/gid */
1483 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1484 body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1485 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1486
55f5a824 1487 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid %#llx, flags %x\n",
d7e09d03
PT
1488 body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1489 body->oa.o_flags);
1490 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
1491 }
1492
1493 osc_update_grant(cli, body);
1494
1495 if (rc < 0)
0a3bdb00 1496 return rc;
d7e09d03
PT
1497
1498 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1499 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1500
1501 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1502 if (rc > 0) {
1503 CERROR("Unexpected +ve rc %d\n", rc);
0a3bdb00 1504 return -EPROTO;
d7e09d03
PT
1505 }
1506 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1507
1508 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
0a3bdb00 1509 return -EAGAIN;
d7e09d03
PT
1510
1511 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1512 check_write_checksum(&body->oa, peer, client_cksum,
1513 body->oa.o_cksum, aa->aa_requested_nob,
1514 aa->aa_page_count, aa->aa_ppga,
1515 cksum_type_unpack(aa->aa_oa->o_flags)))
0a3bdb00 1516 return -EAGAIN;
d7e09d03 1517
1d8cb70c
GD
1518 rc = check_write_rcs(req, aa->aa_requested_nob,
1519 aa->aa_nio_count,
d7e09d03 1520 aa->aa_page_count, aa->aa_ppga);
26c4ea46 1521 goto out;
d7e09d03
PT
1522 }
1523
1524 /* The rest of this function executes only for OST_READs */
1525
1526 /* if unwrap_bulk failed, return -EAGAIN to retry */
1527 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
26c4ea46
TJ
1528 if (rc < 0) {
1529 rc = -EAGAIN;
1530 goto out;
1531 }
d7e09d03
PT
1532
1533 if (rc > aa->aa_requested_nob) {
1534 CERROR("Unexpected rc %d (%d requested)\n", rc,
1535 aa->aa_requested_nob);
0a3bdb00 1536 return -EPROTO;
d7e09d03
PT
1537 }
1538
1539 if (rc != req->rq_bulk->bd_nob_transferred) {
e72f36e2 1540 CERROR("Unexpected rc %d (%d transferred)\n",
d7e09d03 1541 rc, req->rq_bulk->bd_nob_transferred);
fbe7c6c7 1542 return -EPROTO;
d7e09d03
PT
1543 }
1544
1545 if (rc < aa->aa_requested_nob)
1546 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1547
1548 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1549 static int cksum_counter;
29ac6840 1550 __u32 server_cksum = body->oa.o_cksum;
80feb1ef
DE
1551 char *via = "";
1552 char *router = "";
d7e09d03
PT
1553 cksum_type_t cksum_type;
1554
b2952d62 1555 cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ?
d7e09d03
PT
1556 body->oa.o_flags : 0);
1557 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1558 aa->aa_ppga, OST_READ,
1559 cksum_type);
1560
80feb1ef 1561 if (peer->nid != req->rq_bulk->bd_sender) {
d7e09d03
PT
1562 via = " via ";
1563 router = libcfs_nid2str(req->rq_bulk->bd_sender);
1564 }
1565
a2ff0f97 1566 if (server_cksum != client_cksum) {
2d00bd17 1567 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from %s%s%s inode " DFID " object " DOSTID " extent [%llu-%llu]\n",
d7e09d03
PT
1568 req->rq_import->imp_obd->obd_name,
1569 libcfs_nid2str(peer->nid),
1570 via, router,
1571 body->oa.o_valid & OBD_MD_FLFID ?
2d00bd17 1572 body->oa.o_parent_seq : (__u64)0,
d7e09d03 1573 body->oa.o_valid & OBD_MD_FLFID ?
2d00bd17 1574 body->oa.o_parent_oid : 0,
d7e09d03 1575 body->oa.o_valid & OBD_MD_FLFID ?
2d00bd17 1576 body->oa.o_parent_ver : 0,
d7e09d03
PT
1577 POSTID(&body->oa.o_oi),
1578 aa->aa_ppga[0]->off,
1579 aa->aa_ppga[aa->aa_page_count-1]->off +
1580 aa->aa_ppga[aa->aa_page_count-1]->count -
2d00bd17 1581 1);
d7e09d03
PT
1582 CERROR("client %x, server %x, cksum_type %x\n",
1583 client_cksum, server_cksum, cksum_type);
1584 cksum_counter = 0;
1585 aa->aa_oa->o_cksum = client_cksum;
1586 rc = -EAGAIN;
1587 } else {
1588 cksum_counter++;
1589 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1590 rc = 0;
1591 }
1592 } else if (unlikely(client_cksum)) {
1593 static int cksum_missed;
1594
1595 cksum_missed++;
1596 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1597 CERROR("Checksum %u requested from %s but not sent\n",
1598 cksum_missed, libcfs_nid2str(peer->nid));
1599 } else {
1600 rc = 0;
1601 }
1602out:
1603 if (rc >= 0)
3b2f75fd 1604 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
1605 aa->aa_oa, &body->oa);
d7e09d03 1606
0a3bdb00 1607 return rc;
d7e09d03
PT
1608}
1609
d7e09d03
PT
1610static int osc_brw_redo_request(struct ptlrpc_request *request,
1611 struct osc_brw_async_args *aa, int rc)
1612{
1613 struct ptlrpc_request *new_req;
1614 struct osc_brw_async_args *new_aa;
1615 struct osc_async_page *oap;
d7e09d03
PT
1616
1617 DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
1618 "redo for recoverable error %d", rc);
1619
1620 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
b2952d62 1621 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
d7e09d03
PT
1622 aa->aa_cli, aa->aa_oa,
1623 NULL /* lsm unused by osc currently */,
1624 aa->aa_page_count, aa->aa_ppga,
ef2e0f55 1625 &new_req, 0, 1);
d7e09d03 1626 if (rc)
0a3bdb00 1627 return rc;
d7e09d03
PT
1628
1629 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
7f1ae4c0 1630 if (oap->oap_request) {
d7e09d03
PT
1631 LASSERTF(request == oap->oap_request,
1632 "request %p != oap_request %p\n",
1633 request, oap->oap_request);
1634 if (oap->oap_interrupted) {
1635 ptlrpc_req_finished(new_req);
0a3bdb00 1636 return -EINTR;
d7e09d03
PT
1637 }
1638 }
1639 }
1640 /* New request takes over pga and oaps from old request.
1641 * Note that copying a list_head doesn't work, need to move it... */
1642 aa->aa_resends++;
1643 new_req->rq_interpret_reply = request->rq_interpret_reply;
1644 new_req->rq_async_args = request->rq_async_args;
d7e09d03
PT
1645 /* cap resend delay to the current request timeout, this is similar to
1646 * what ptlrpc does (see after_reply()) */
1647 if (aa->aa_resends > new_req->rq_timeout)
219e6de6 1648 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
d7e09d03 1649 else
219e6de6 1650 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
d7e09d03
PT
1651 new_req->rq_generation_set = 1;
1652 new_req->rq_import_generation = request->rq_import_generation;
1653
1654 new_aa = ptlrpc_req_async_args(new_req);
1655
1656 INIT_LIST_HEAD(&new_aa->aa_oaps);
1657 list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
1658 INIT_LIST_HEAD(&new_aa->aa_exts);
1659 list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
1660 new_aa->aa_resends = aa->aa_resends;
1661
1662 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1663 if (oap->oap_request) {
1664 ptlrpc_req_finished(oap->oap_request);
1665 oap->oap_request = ptlrpc_request_addref(new_req);
1666 }
1667 }
1668
d7e09d03
PT
1669 /* XXX: This code will run into problem if we're going to support
1670 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
1671 * and wait for all of them to be finished. We should inherit request
1672 * set from old request. */
c5c4c6fa 1673 ptlrpcd_add_req(new_req);
d7e09d03
PT
1674
1675 DEBUG_REQ(D_INFO, new_req, "new request");
0a3bdb00 1676 return 0;
d7e09d03
PT
1677}
1678
1679/*
1680 * ugh, we want disk allocation on the target to happen in offset order. we'll
1681 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1682 * fine for our small page arrays and doesn't require allocation. its an
1683 * insertion sort that swaps elements that are strides apart, shrinking the
1684 * stride down until its '1' and the array is sorted.
1685 */
1686static void sort_brw_pages(struct brw_page **array, int num)
1687{
1688 int stride, i, j;
1689 struct brw_page *tmp;
1690
1691 if (num == 1)
1692 return;
1693 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1694 ;
1695
1696 do {
1697 stride /= 3;
1698 for (i = stride ; i < num ; i++) {
1699 tmp = array[i];
1700 j = i;
1701 while (j >= stride && array[j - stride]->off > tmp->off) {
1702 array[j] = array[j - stride];
1703 j -= stride;
1704 }
1705 array[j] = tmp;
1706 }
1707 } while (stride > 1);
1708}
1709
21aef7d9 1710static void osc_release_ppga(struct brw_page **ppga, u32 count)
d7e09d03 1711{
7f1ae4c0 1712 LASSERT(ppga);
7795178d 1713 kfree(ppga);
d7e09d03
PT
1714}
1715
d7e09d03
PT
1716static int brw_interpret(const struct lu_env *env,
1717 struct ptlrpc_request *req, void *data, int rc)
1718{
1719 struct osc_brw_async_args *aa = data;
1720 struct osc_extent *ext;
1721 struct osc_extent *tmp;
29ac6840 1722 struct cl_object *obj = NULL;
d7e09d03 1723 struct client_obd *cli = aa->aa_cli;
d7e09d03
PT
1724
1725 rc = osc_brw_fini_request(req, rc);
1726 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
1727 /* When server return -EINPROGRESS, client should always retry
1728 * regardless of the number of times the bulk was resent already. */
1729 if (osc_recoverable_error(rc)) {
1730 if (req->rq_import_generation !=
1731 req->rq_import->imp_generation) {
2d00bd17 1732 CDEBUG(D_HA, "%s: resend cross eviction for object: " DOSTID ", rc = %d.\n",
d7e09d03
PT
1733 req->rq_import->imp_obd->obd_name,
1734 POSTID(&aa->aa_oa->o_oi), rc);
1735 } else if (rc == -EINPROGRESS ||
1736 client_should_resend(aa->aa_resends, aa->aa_cli)) {
1737 rc = osc_brw_redo_request(req, aa, rc);
1738 } else {
b0f5aad5 1739 CERROR("%s: too many resent retries for object: %llu:%llu, rc = %d.\n",
d7e09d03
PT
1740 req->rq_import->imp_obd->obd_name,
1741 POSTID(&aa->aa_oa->o_oi), rc);
1742 }
1743
1744 if (rc == 0)
0a3bdb00 1745 return 0;
d7e09d03
PT
1746 else if (rc == -EAGAIN || rc == -EINPROGRESS)
1747 rc = -EIO;
1748 }
1749
d7e09d03 1750 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
7f1ae4c0 1751 if (!obj && rc == 0) {
d7e09d03
PT
1752 obj = osc2cl(ext->oe_obj);
1753 cl_object_get(obj);
1754 }
1755
1756 list_del_init(&ext->oe_link);
1757 osc_extent_finish(env, ext, 1, rc);
1758 }
1759 LASSERT(list_empty(&aa->aa_exts));
1760 LASSERT(list_empty(&aa->aa_oaps));
1761
7f1ae4c0 1762 if (obj) {
d7e09d03
PT
1763 struct obdo *oa = aa->aa_oa;
1764 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1765 unsigned long valid = 0;
1766
1767 LASSERT(rc == 0);
1768 if (oa->o_valid & OBD_MD_FLBLOCKS) {
1769 attr->cat_blocks = oa->o_blocks;
1770 valid |= CAT_BLOCKS;
1771 }
1772 if (oa->o_valid & OBD_MD_FLMTIME) {
1773 attr->cat_mtime = oa->o_mtime;
1774 valid |= CAT_MTIME;
1775 }
1776 if (oa->o_valid & OBD_MD_FLATIME) {
1777 attr->cat_atime = oa->o_atime;
1778 valid |= CAT_ATIME;
1779 }
1780 if (oa->o_valid & OBD_MD_FLCTIME) {
1781 attr->cat_ctime = oa->o_ctime;
1782 valid |= CAT_CTIME;
1783 }
1784 if (valid != 0) {
1785 cl_object_attr_lock(obj);
1786 cl_object_attr_set(env, obj, attr, valid);
1787 cl_object_attr_unlock(obj);
1788 }
1789 cl_object_put(env, obj);
1790 }
2ba262fb 1791 kmem_cache_free(obdo_cachep, aa->aa_oa);
d7e09d03
PT
1792
1793 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
1794 req->rq_bulk->bd_nob_transferred);
1795 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
1796 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
1797
1798 client_obd_list_lock(&cli->cl_loi_list_lock);
1799 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1800 * is called so we know whether to go to sync BRWs or wait for more
1801 * RPCs to complete */
1802 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
1803 cli->cl_w_in_flight--;
1804 else
1805 cli->cl_r_in_flight--;
1806 osc_wake_cache_waiters(cli);
1807 client_obd_list_unlock(&cli->cl_loi_list_lock);
1808
c5c4c6fa 1809 osc_io_unplug(env, cli, NULL);
0a3bdb00 1810 return rc;
d7e09d03
PT
1811}
1812
d7e09d03
PT
1813/**
1814 * Build an RPC by the list of extent @ext_list. The caller must ensure
1815 * that the total pages in this list are NOT over max pages per RPC.
1816 * Extents in the list must be in OES_RPC state.
1817 */
1818int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
c5c4c6fa 1819 struct list_head *ext_list, int cmd)
d7e09d03 1820{
29ac6840
CH
1821 struct ptlrpc_request *req = NULL;
1822 struct osc_extent *ext;
1823 struct brw_page **pga = NULL;
1824 struct osc_brw_async_args *aa = NULL;
1825 struct obdo *oa = NULL;
1826 struct osc_async_page *oap;
1827 struct osc_async_page *tmp;
1828 struct cl_req *clerq = NULL;
1829 enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
1830 struct ldlm_lock *lock = NULL;
1831 struct cl_req_attr *crattr = NULL;
1832 u64 starting_offset = OBD_OBJECT_EOF;
1833 u64 ending_offset = 0;
1834 int mpflag = 0;
1835 int mem_tight = 0;
1836 int page_count = 0;
1837 int i;
1838 int rc;
1839 struct ost_body *body;
d7e09d03 1840 LIST_HEAD(rpc_list);
d7e09d03 1841
d7e09d03
PT
1842 LASSERT(!list_empty(ext_list));
1843
1844 /* add pages into rpc_list to build BRW rpc */
1845 list_for_each_entry(ext, ext_list, oe_link) {
1846 LASSERT(ext->oe_state == OES_RPC);
1847 mem_tight |= ext->oe_memalloc;
1848 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1849 ++page_count;
1850 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1851 if (starting_offset > oap->oap_obj_off)
1852 starting_offset = oap->oap_obj_off;
1853 else
1854 LASSERT(oap->oap_page_off == 0);
1855 if (ending_offset < oap->oap_obj_off + oap->oap_count)
1856 ending_offset = oap->oap_obj_off +
1857 oap->oap_count;
1858 else
1859 LASSERT(oap->oap_page_off + oap->oap_count ==
1860 PAGE_CACHE_SIZE);
1861 }
1862 }
1863
1864 if (mem_tight)
1865 mpflag = cfs_memory_pressure_get_and_set();
1866
7795178d 1867 crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
3408e9ae 1868 if (!crattr) {
26c4ea46
TJ
1869 rc = -ENOMEM;
1870 goto out;
1871 }
cad6fafa 1872
7795178d 1873 pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
7f1ae4c0 1874 if (!pga) {
26c4ea46
TJ
1875 rc = -ENOMEM;
1876 goto out;
1877 }
d7e09d03 1878
131637b8 1879 oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
7f1ae4c0 1880 if (!oa) {
26c4ea46
TJ
1881 rc = -ENOMEM;
1882 goto out;
1883 }
d7e09d03
PT
1884
1885 i = 0;
1886 list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
1887 struct cl_page *page = oap2cl_page(oap);
50ffcb7e 1888
7f1ae4c0 1889 if (!clerq) {
d7e09d03 1890 clerq = cl_req_alloc(env, page, crt,
cad6fafa 1891 1 /* only 1-object rpcs for now */);
26c4ea46
TJ
1892 if (IS_ERR(clerq)) {
1893 rc = PTR_ERR(clerq);
1894 goto out;
1895 }
d7e09d03
PT
1896 lock = oap->oap_ldlm_lock;
1897 }
1898 if (mem_tight)
1899 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
1900 pga[i] = &oap->oap_brw_page;
1901 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
1902 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
cad6fafa
BJ
1903 pga[i]->pg, page_index(oap->oap_page), oap,
1904 pga[i]->flag);
d7e09d03
PT
1905 i++;
1906 cl_req_page_add(env, clerq, page);
1907 }
1908
1909 /* always get the data for the obdo for the rpc */
7f1ae4c0 1910 LASSERT(clerq);
cad6fafa
BJ
1911 crattr->cra_oa = oa;
1912 cl_req_attr_set(env, clerq, crattr, ~0ULL);
d7e09d03
PT
1913 if (lock) {
1914 oa->o_handle = lock->l_remote_handle;
1915 oa->o_valid |= OBD_MD_FLHANDLE;
1916 }
1917
1918 rc = cl_req_prep(env, clerq);
1919 if (rc != 0) {
1920 CERROR("cl_req_prep failed: %d\n", rc);
26c4ea46 1921 goto out;
d7e09d03
PT
1922 }
1923
1924 sort_brw_pages(pga, page_count);
1925 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
ef2e0f55 1926 pga, &req, 1, 0);
d7e09d03
PT
1927 if (rc != 0) {
1928 CERROR("prep_req failed: %d\n", rc);
26c4ea46 1929 goto out;
d7e09d03
PT
1930 }
1931
d7e09d03
PT
1932 req->rq_interpret_reply = brw_interpret;
1933
1934 if (mem_tight != 0)
1935 req->rq_memalloc = 1;
1936
1937 /* Need to update the timestamps after the request is built in case
1938 * we race with setattr (locally or in queue at OST). If OST gets
1939 * later setattr before earlier BRW (as determined by the request xid),
1940 * the OST will not use BRW timestamps. Sadly, there is no obvious
1941 * way to do this in a single call. bug 10150 */
3ce08cd7
NY
1942 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1943 crattr->cra_oa = &body->oa;
cad6fafa 1944 cl_req_attr_set(env, clerq, crattr,
d7e09d03
PT
1945 OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
1946
cad6fafa 1947 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
d7e09d03
PT
1948
1949 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1950 aa = ptlrpc_req_async_args(req);
1951 INIT_LIST_HEAD(&aa->aa_oaps);
1952 list_splice_init(&rpc_list, &aa->aa_oaps);
1953 INIT_LIST_HEAD(&aa->aa_exts);
1954 list_splice_init(ext_list, &aa->aa_exts);
1955 aa->aa_clerq = clerq;
1956
1957 /* queued sync pages can be torn down while the pages
1958 * were between the pending list and the rpc */
1959 tmp = NULL;
1960 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1961 /* only one oap gets a request reference */
7f1ae4c0 1962 if (!tmp)
d7e09d03
PT
1963 tmp = oap;
1964 if (oap->oap_interrupted && !req->rq_intr) {
1965 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1966 oap, req);
1967 ptlrpc_mark_interrupted(req);
1968 }
1969 }
7f1ae4c0 1970 if (tmp)
d7e09d03
PT
1971 tmp->oap_request = ptlrpc_request_addref(req);
1972
1973 client_obd_list_lock(&cli->cl_loi_list_lock);
1974 starting_offset >>= PAGE_CACHE_SHIFT;
1975 if (cmd == OBD_BRW_READ) {
1976 cli->cl_r_in_flight++;
1977 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1978 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
1979 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
1980 starting_offset + 1);
1981 } else {
1982 cli->cl_w_in_flight++;
1983 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1984 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
1985 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
1986 starting_offset + 1);
1987 }
1988 client_obd_list_unlock(&cli->cl_loi_list_lock);
1989
1990 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
1991 page_count, aa, cli->cl_r_in_flight,
1992 cli->cl_w_in_flight);
1993
c5c4c6fa 1994 ptlrpcd_add_req(req);
d7e09d03 1995 rc = 0;
d7e09d03
PT
1996
1997out:
1998 if (mem_tight != 0)
1999 cfs_memory_pressure_restore(mpflag);
2000
f999d098 2001 kfree(crattr);
cad6fafa 2002
d7e09d03 2003 if (rc != 0) {
7f1ae4c0 2004 LASSERT(!req);
d7e09d03
PT
2005
2006 if (oa)
2ba262fb 2007 kmem_cache_free(obdo_cachep, oa);
59e267c0 2008 kfree(pga);
d7e09d03
PT
2009 /* this should happen rarely and is pretty bad, it makes the
2010 * pending list not follow the dirty order */
2011 while (!list_empty(ext_list)) {
2012 ext = list_entry(ext_list->next, struct osc_extent,
2013 oe_link);
2014 list_del_init(&ext->oe_link);
2015 osc_extent_finish(env, ext, 0, rc);
2016 }
2017 if (clerq && !IS_ERR(clerq))
2018 cl_req_completion(env, clerq, rc);
2019 }
0a3bdb00 2020 return rc;
d7e09d03
PT
2021}
2022
2023static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
2024 struct ldlm_enqueue_info *einfo)
2025{
2026 void *data = einfo->ei_cbdata;
2027 int set = 0;
2028
d7e09d03
PT
2029 LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
2030 LASSERT(lock->l_resource->lr_type == einfo->ei_type);
2031 LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
2032 LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
2033
2034 lock_res_and_lock(lock);
2035 spin_lock(&osc_ast_guard);
2036
7f1ae4c0 2037 if (!lock->l_ast_data)
d7e09d03
PT
2038 lock->l_ast_data = data;
2039 if (lock->l_ast_data == data)
2040 set = 1;
2041
2042 spin_unlock(&osc_ast_guard);
2043 unlock_res_and_lock(lock);
2044
2045 return set;
2046}
2047
2048static int osc_set_data_with_check(struct lustre_handle *lockh,
2049 struct ldlm_enqueue_info *einfo)
2050{
2051 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2052 int set = 0;
2053
7f1ae4c0 2054 if (lock) {
d7e09d03
PT
2055 set = osc_set_lock_data_with_check(lock, einfo);
2056 LDLM_LOCK_PUT(lock);
2057 } else
2058 CERROR("lockh %p, data %p - client evicted?\n",
2059 lockh, einfo->ei_cbdata);
2060 return set;
2061}
2062
d7e09d03
PT
2063/* find any ldlm lock of the inode in osc
2064 * return 0 not find
2065 * 1 find one
2066 * < 0 error */
2067static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2068 ldlm_iterator_t replace, void *data)
2069{
2070 struct ldlm_res_id res_id;
2071 struct obd_device *obd = class_exp2obd(exp);
2072 int rc = 0;
2073
2074 ostid_build_res_name(&lsm->lsm_oi, &res_id);
2075 rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
2076 if (rc == LDLM_ITER_STOP)
fbe7c6c7 2077 return 1;
d7e09d03 2078 if (rc == LDLM_ITER_CONTINUE)
fbe7c6c7
JL
2079 return 0;
2080 return rc;
d7e09d03
PT
2081}
2082
2083static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
2084 obd_enqueue_update_f upcall, void *cookie,
2085 __u64 *flags, int agl, int rc)
2086{
2087 int intent = *flags & LDLM_FL_HAS_INTENT;
d7e09d03
PT
2088
2089 if (intent) {
2090 /* The request was created before ldlm_cli_enqueue call. */
2091 if (rc == ELDLM_LOCK_ABORTED) {
2092 struct ldlm_reply *rep;
50ffcb7e 2093
d7e09d03
PT
2094 rep = req_capsule_server_get(&req->rq_pill,
2095 &RMF_DLM_REP);
2096
2d58de78
LW
2097 rep->lock_policy_res1 =
2098 ptlrpc_status_ntoh(rep->lock_policy_res1);
d7e09d03
PT
2099 if (rep->lock_policy_res1)
2100 rc = rep->lock_policy_res1;
2101 }
2102 }
2103
2104 if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
2105 (rc == 0)) {
2106 *flags |= LDLM_FL_LVB_READY;
1d8cb70c 2107 CDEBUG(D_INODE, "got kms %llu blocks %llu mtime %llu\n",
d7e09d03
PT
2108 lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
2109 }
2110
2111 /* Call the update callback. */
2112 rc = (*upcall)(cookie, rc);
0a3bdb00 2113 return rc;
d7e09d03
PT
2114}
2115
2116static int osc_enqueue_interpret(const struct lu_env *env,
2117 struct ptlrpc_request *req,
2118 struct osc_enqueue_args *aa, int rc)
2119{
2120 struct ldlm_lock *lock;
2121 struct lustre_handle handle;
2122 __u32 mode;
2123 struct ost_lvb *lvb;
2124 __u32 lvb_len;
2125 __u64 *flags = aa->oa_flags;
2126
2127 /* Make a local copy of a lock handle and a mode, because aa->oa_*
2128 * might be freed anytime after lock upcall has been called. */
2129 lustre_handle_copy(&handle, aa->oa_lockh);
2130 mode = aa->oa_ei->ei_mode;
2131
2132 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2133 * be valid. */
2134 lock = ldlm_handle2lock(&handle);
2135
2136 /* Take an additional reference so that a blocking AST that
2137 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2138 * to arrive after an upcall has been executed by
2139 * osc_enqueue_fini(). */
2140 ldlm_lock_addref(&handle, mode);
2141
2142 /* Let CP AST to grant the lock first. */
2143 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2144
2145 if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
2146 lvb = NULL;
2147 lvb_len = 0;
2148 } else {
2149 lvb = aa->oa_lvb;
2150 lvb_len = sizeof(*aa->oa_lvb);
2151 }
2152
2153 /* Complete obtaining the lock procedure. */
2154 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
2155 mode, flags, lvb, lvb_len, &handle, rc);
2156 /* Complete osc stuff. */
2157 rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
2158 flags, aa->oa_agl, rc);
2159
2160 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2161
2162 /* Release the lock for async request. */
2163 if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
2164 /*
2165 * Releases a reference taken by ldlm_cli_enqueue(), if it is
2166 * not already released by
2167 * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
2168 */
2169 ldlm_lock_decref(&handle, mode);
2170
7f1ae4c0 2171 LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n",
d7e09d03
PT
2172 aa->oa_lockh, req, aa);
2173 ldlm_lock_decref(&handle, mode);
2174 LDLM_LOCK_PUT(lock);
2175 return rc;
2176}
2177
d7e09d03
PT
2178struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
2179
2180/* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2181 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2182 * other synchronous requests, however keeping some locks and trying to obtain
2183 * others may take a considerable amount of time in a case of ost failure; and
2184 * when other sync requests do not get released lock from a client, the client
2185 * is excluded from the cluster -- such scenarious make the life difficult, so
2186 * release locks just after they are obtained. */
2187int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2188 __u64 *flags, ldlm_policy_data_t *policy,
2189 struct ost_lvb *lvb, int kms_valid,
2190 obd_enqueue_update_f upcall, void *cookie,
2191 struct ldlm_enqueue_info *einfo,
2192 struct lustre_handle *lockh,
2193 struct ptlrpc_request_set *rqset, int async, int agl)
2194{
2195 struct obd_device *obd = exp->exp_obd;
2196 struct ptlrpc_request *req = NULL;
2197 int intent = *flags & LDLM_FL_HAS_INTENT;
875332d4 2198 __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
d7e09d03
PT
2199 ldlm_mode_t mode;
2200 int rc;
d7e09d03
PT
2201
2202 /* Filesystem lock extents are extended to page boundaries so that
2203 * dealing with the page cache is a little smoother. */
2204 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
2205 policy->l_extent.end |= ~CFS_PAGE_MASK;
2206
2207 /*
2208 * kms is not valid when either object is completely fresh (so that no
2209 * locks are cached), or object was evicted. In the latter case cached
2210 * lock cannot be used, because it would prime inode state with
2211 * potentially stale LVB.
2212 */
2213 if (!kms_valid)
2214 goto no_match;
2215
2216 /* Next, search for already existing extent locks that will cover us */
2217 /* If we're trying to read, we also search for an existing PW lock. The
2218 * VFS and page cache already protect us locally, so lots of readers/
2219 * writers can share a single PW lock.
2220 *
2221 * There are problems with conversion deadlocks, so instead of
2222 * converting a read lock to a write lock, we'll just enqueue a new
2223 * one.
2224 *
2225 * At some point we should cancel the read lock instead of making them
2226 * send us a blocking callback, but there are problems with canceling
2227 * locks out from other users right now, too. */
2228 mode = einfo->ei_mode;
2229 if (einfo->ei_mode == LCK_PR)
2230 mode |= LCK_PW;
2231 mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
2232 einfo->ei_type, policy, mode, lockh, 0);
2233 if (mode) {
2234 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
2235
2236 if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
2237 /* For AGL, if enqueue RPC is sent but the lock is not
2238 * granted, then skip to process this strpe.
2239 * Return -ECANCELED to tell the caller. */
2240 ldlm_lock_decref(lockh, mode);
2241 LDLM_LOCK_PUT(matched);
0a3bdb00 2242 return -ECANCELED;
71e8dd9a
AM
2243 }
2244
2245 if (osc_set_lock_data_with_check(matched, einfo)) {
d7e09d03
PT
2246 *flags |= LDLM_FL_LVB_READY;
2247 /* addref the lock only if not async requests and PW
2248 * lock is matched whereas we asked for PR. */
2249 if (!rqset && einfo->ei_mode != mode)
2250 ldlm_lock_addref(lockh, LCK_PR);
2251 if (intent) {
2252 /* I would like to be able to ASSERT here that
2253 * rss <= kms, but I can't, for reasons which
2254 * are explained in lov_enqueue() */
2255 }
2256
2257 /* We already have a lock, and it's referenced.
2258 *
2259 * At this point, the cl_lock::cll_state is CLS_QUEUING,
2260 * AGL upcall may change it to CLS_HELD directly. */
2261 (*upcall)(cookie, ELDLM_OK);
2262
2263 if (einfo->ei_mode != mode)
2264 ldlm_lock_decref(lockh, LCK_PW);
2265 else if (rqset)
2266 /* For async requests, decref the lock. */
2267 ldlm_lock_decref(lockh, einfo->ei_mode);
2268 LDLM_LOCK_PUT(matched);
0a3bdb00 2269 return ELDLM_OK;
d7e09d03 2270 }
71e8dd9a
AM
2271
2272 ldlm_lock_decref(lockh, mode);
2273 LDLM_LOCK_PUT(matched);
d7e09d03
PT
2274 }
2275
2276 no_match:
2277 if (intent) {
2278 LIST_HEAD(cancels);
50ffcb7e 2279
d7e09d03
PT
2280 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2281 &RQF_LDLM_ENQUEUE_LVB);
7f1ae4c0 2282 if (!req)
0a3bdb00 2283 return -ENOMEM;
d7e09d03
PT
2284
2285 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
2286 if (rc) {
2287 ptlrpc_request_free(req);
0a3bdb00 2288 return rc;
d7e09d03
PT
2289 }
2290
2291 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
ec83e611 2292 sizeof(*lvb));
d7e09d03
PT
2293 ptlrpc_request_set_replen(req);
2294 }
2295
2296 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2297 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2298
2299 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2300 sizeof(*lvb), LVB_T_OST, lockh, async);
2301 if (rqset) {
2302 if (!rc) {
2303 struct osc_enqueue_args *aa;
50ffcb7e 2304
d7e09d03
PT
2305 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
2306 aa = ptlrpc_req_async_args(req);
2307 aa->oa_ei = einfo;
2308 aa->oa_exp = exp;
2309 aa->oa_flags = flags;
2310 aa->oa_upcall = upcall;
2311 aa->oa_cookie = cookie;
2312 aa->oa_lvb = lvb;
2313 aa->oa_lockh = lockh;
2314 aa->oa_agl = !!agl;
2315
2316 req->rq_interpret_reply =
2317 (ptlrpc_interpterer_t)osc_enqueue_interpret;
2318 if (rqset == PTLRPCD_SET)
c5c4c6fa 2319 ptlrpcd_add_req(req);
d7e09d03
PT
2320 else
2321 ptlrpc_set_add_req(rqset, req);
2322 } else if (intent) {
2323 ptlrpc_req_finished(req);
2324 }
0a3bdb00 2325 return rc;
d7e09d03
PT
2326 }
2327
2328 rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
2329 if (intent)
2330 ptlrpc_req_finished(req);
2331
0a3bdb00 2332 return rc;
d7e09d03
PT
2333}
2334
d7e09d03
PT
2335int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2336 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
875332d4 2337 __u64 *flags, void *data, struct lustre_handle *lockh,
d7e09d03
PT
2338 int unref)
2339{
2340 struct obd_device *obd = exp->exp_obd;
875332d4 2341 __u64 lflags = *flags;
d7e09d03 2342 ldlm_mode_t rc;
d7e09d03
PT
2343
2344 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
0a3bdb00 2345 return -EIO;
d7e09d03
PT
2346
2347 /* Filesystem lock extents are extended to page boundaries so that
2348 * dealing with the page cache is a little smoother */
2349 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
2350 policy->l_extent.end |= ~CFS_PAGE_MASK;
2351
2352 /* Next, search for already existing extent locks that will cover us */
2353 /* If we're trying to read, we also search for an existing PW lock. The
2354 * VFS and page cache already protect us locally, so lots of readers/
2355 * writers can share a single PW lock. */
2356 rc = mode;
2357 if (mode == LCK_PR)
2358 rc |= LCK_PW;
2359 rc = ldlm_lock_match(obd->obd_namespace, lflags,
2360 res_id, type, policy, rc, lockh, unref);
2361 if (rc) {
7f1ae4c0 2362 if (data) {
d7e09d03
PT
2363 if (!osc_set_data_with_check(lockh, data)) {
2364 if (!(lflags & LDLM_FL_TEST_LOCK))
2365 ldlm_lock_decref(lockh, rc);
0a3bdb00 2366 return 0;
d7e09d03
PT
2367 }
2368 }
2369 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
2370 ldlm_lock_addref(lockh, LCK_PR);
2371 ldlm_lock_decref(lockh, LCK_PW);
2372 }
0a3bdb00 2373 return rc;
d7e09d03 2374 }
0a3bdb00 2375 return rc;
d7e09d03
PT
2376}
2377
2378int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
2379{
d7e09d03
PT
2380 if (unlikely(mode == LCK_GROUP))
2381 ldlm_lock_decref_and_cancel(lockh, mode);
2382 else
2383 ldlm_lock_decref(lockh, mode);
2384
0a3bdb00 2385 return 0;
d7e09d03
PT
2386}
2387
d7e09d03
PT
2388static int osc_statfs_interpret(const struct lu_env *env,
2389 struct ptlrpc_request *req,
2390 struct osc_async_args *aa, int rc)
2391{
2392 struct obd_statfs *msfs;
d7e09d03
PT
2393
2394 if (rc == -EBADR)
2395 /* The request has in fact never been sent
2396 * due to issues at a higher level (LOV).
2397 * Exit immediately since the caller is
2398 * aware of the problem and takes care
2399 * of the clean up */
0a3bdb00 2400 return rc;
d7e09d03
PT
2401
2402 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
26c4ea46
TJ
2403 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) {
2404 rc = 0;
2405 goto out;
2406 }
d7e09d03
PT
2407
2408 if (rc != 0)
26c4ea46 2409 goto out;
d7e09d03
PT
2410
2411 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
7f1ae4c0 2412 if (!msfs) {
26c4ea46
TJ
2413 rc = -EPROTO;
2414 goto out;
d7e09d03
PT
2415 }
2416
2417 *aa->aa_oi->oi_osfs = *msfs;
2418out:
2419 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
0a3bdb00 2420 return rc;
d7e09d03
PT
2421}
2422
2423static int osc_statfs_async(struct obd_export *exp,
2424 struct obd_info *oinfo, __u64 max_age,
2425 struct ptlrpc_request_set *rqset)
2426{
29ac6840 2427 struct obd_device *obd = class_exp2obd(exp);
d7e09d03
PT
2428 struct ptlrpc_request *req;
2429 struct osc_async_args *aa;
29ac6840 2430 int rc;
d7e09d03
PT
2431
2432 /* We could possibly pass max_age in the request (as an absolute
2433 * timestamp or a "seconds.usec ago") so the target can avoid doing
2434 * extra calls into the filesystem if that isn't necessary (e.g.
2435 * during mount that would help a bit). Having relative timestamps
2436 * is not so great if request processing is slow, while absolute
2437 * timestamps are not ideal because they need time synchronization. */
2438 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
7f1ae4c0 2439 if (!req)
0a3bdb00 2440 return -ENOMEM;
d7e09d03
PT
2441
2442 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2443 if (rc) {
2444 ptlrpc_request_free(req);
0a3bdb00 2445 return rc;
d7e09d03
PT
2446 }
2447 ptlrpc_request_set_replen(req);
2448 req->rq_request_portal = OST_CREATE_PORTAL;
2449 ptlrpc_at_set_req_timeout(req);
2450
2451 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
2452 /* procfs requests not want stat in wait for avoid deadlock */
2453 req->rq_no_resend = 1;
2454 req->rq_no_delay = 1;
2455 }
2456
2457 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
2458 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
2459 aa = ptlrpc_req_async_args(req);
2460 aa->aa_oi = oinfo;
2461
2462 ptlrpc_set_add_req(rqset, req);
0a3bdb00 2463 return 0;
d7e09d03
PT
2464}
2465
2466static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
2467 struct obd_statfs *osfs, __u64 max_age, __u32 flags)
2468{
29ac6840
CH
2469 struct obd_device *obd = class_exp2obd(exp);
2470 struct obd_statfs *msfs;
d7e09d03 2471 struct ptlrpc_request *req;
29ac6840 2472 struct obd_import *imp = NULL;
d7e09d03 2473 int rc;
d7e09d03
PT
2474
2475 /*Since the request might also come from lprocfs, so we need
2476 *sync this with client_disconnect_export Bug15684*/
2477 down_read(&obd->u.cli.cl_sem);
2478 if (obd->u.cli.cl_import)
2479 imp = class_import_get(obd->u.cli.cl_import);
2480 up_read(&obd->u.cli.cl_sem);
2481 if (!imp)
0a3bdb00 2482 return -ENODEV;
d7e09d03
PT
2483
2484 /* We could possibly pass max_age in the request (as an absolute
2485 * timestamp or a "seconds.usec ago") so the target can avoid doing
2486 * extra calls into the filesystem if that isn't necessary (e.g.
2487 * during mount that would help a bit). Having relative timestamps
2488 * is not so great if request processing is slow, while absolute
2489 * timestamps are not ideal because they need time synchronization. */
2490 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
2491
2492 class_import_put(imp);
2493
7f1ae4c0 2494 if (!req)
0a3bdb00 2495 return -ENOMEM;
d7e09d03
PT
2496
2497 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2498 if (rc) {
2499 ptlrpc_request_free(req);
0a3bdb00 2500 return rc;
d7e09d03
PT
2501 }
2502 ptlrpc_request_set_replen(req);
2503 req->rq_request_portal = OST_CREATE_PORTAL;
2504 ptlrpc_at_set_req_timeout(req);
2505
2506 if (flags & OBD_STATFS_NODELAY) {
2507 /* procfs requests not want stat in wait for avoid deadlock */
2508 req->rq_no_resend = 1;
2509 req->rq_no_delay = 1;
2510 }
2511
2512 rc = ptlrpc_queue_wait(req);
2513 if (rc)
26c4ea46 2514 goto out;
d7e09d03
PT
2515
2516 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
7f1ae4c0 2517 if (!msfs) {
26c4ea46
TJ
2518 rc = -EPROTO;
2519 goto out;
d7e09d03
PT
2520 }
2521
2522 *osfs = *msfs;
2523
d7e09d03
PT
2524 out:
2525 ptlrpc_req_finished(req);
2526 return rc;
2527}
2528
2529/* Retrieve object striping information.
2530 *
2531 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2532 * the maximum number of OST indices which will fit in the user buffer.
2533 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2534 */
ec2d71d0
OD
2535static int osc_getstripe(struct lov_stripe_md *lsm,
2536 struct lov_user_md __user *lump)
d7e09d03
PT
2537{
2538 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
2539 struct lov_user_md_v3 lum, *lumk;
2540 struct lov_user_ost_data_v1 *lmm_objects;
2541 int rc = 0, lum_size;
d7e09d03
PT
2542
2543 if (!lsm)
0a3bdb00 2544 return -ENODATA;
d7e09d03
PT
2545
2546 /* we only need the header part from user space to get lmm_magic and
2547 * lmm_stripe_count, (the header part is common to v1 and v3) */
2548 lum_size = sizeof(struct lov_user_md_v1);
2549 if (copy_from_user(&lum, lump, lum_size))
0a3bdb00 2550 return -EFAULT;
d7e09d03
PT
2551
2552 if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
2553 (lum.lmm_magic != LOV_USER_MAGIC_V3))
0a3bdb00 2554 return -EINVAL;
d7e09d03
PT
2555
2556 /* lov_user_md_vX and lov_mds_md_vX must have the same size */
2557 LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
2558 LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
2559 LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
2560
2561 /* we can use lov_mds_md_size() to compute lum_size
2562 * because lov_user_md_vX and lov_mds_md_vX have the same size */
2563 if (lum.lmm_stripe_count > 0) {
2564 lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
7795178d 2565 lumk = kzalloc(lum_size, GFP_NOFS);
d7e09d03 2566 if (!lumk)
0a3bdb00 2567 return -ENOMEM;
d7e09d03
PT
2568
2569 if (lum.lmm_magic == LOV_USER_MAGIC_V1)
2570 lmm_objects =
2571 &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
2572 else
2573 lmm_objects = &(lumk->lmm_objects[0]);
2574 lmm_objects->l_ost_oi = lsm->lsm_oi;
2575 } else {
2576 lum_size = lov_mds_md_size(0, lum.lmm_magic);
2577 lumk = &lum;
2578 }
2579
2580 lumk->lmm_oi = lsm->lsm_oi;
2581 lumk->lmm_stripe_count = 1;
2582
2583 if (copy_to_user(lump, lumk, lum_size))
2584 rc = -EFAULT;
2585
2586 if (lumk != &lum)
7795178d 2587 kfree(lumk);
d7e09d03 2588
0a3bdb00 2589 return rc;
d7e09d03
PT
2590}
2591
d7e09d03 2592static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
e09bee34 2593 void *karg, void __user *uarg)
d7e09d03
PT
2594{
2595 struct obd_device *obd = exp->exp_obd;
2596 struct obd_ioctl_data *data = karg;
2597 int err = 0;
d7e09d03
PT
2598
2599 if (!try_module_get(THIS_MODULE)) {
2600 CERROR("Can't get module. Is it alive?");
2601 return -EINVAL;
2602 }
2603 switch (cmd) {
2604 case OBD_IOC_LOV_GET_CONFIG: {
2605 char *buf;
2606 struct lov_desc *desc;
2607 struct obd_uuid uuid;
2608
2609 buf = NULL;
2610 len = 0;
b7856753 2611 if (obd_ioctl_getdata(&buf, &len, uarg)) {
26c4ea46
TJ
2612 err = -EINVAL;
2613 goto out;
2614 }
d7e09d03
PT
2615
2616 data = (struct obd_ioctl_data *)buf;
2617
2618 if (sizeof(*desc) > data->ioc_inllen1) {
2619 obd_ioctl_freedata(buf, len);
26c4ea46
TJ
2620 err = -EINVAL;
2621 goto out;
d7e09d03
PT
2622 }
2623
2624 if (data->ioc_inllen2 < sizeof(uuid)) {
2625 obd_ioctl_freedata(buf, len);
26c4ea46
TJ
2626 err = -EINVAL;
2627 goto out;
d7e09d03
PT
2628 }
2629
2630 desc = (struct lov_desc *)data->ioc_inlbuf1;
2631 desc->ld_tgt_count = 1;
2632 desc->ld_active_tgt_count = 1;
2633 desc->ld_default_stripe_count = 1;
2634 desc->ld_default_stripe_size = 0;
2635 desc->ld_default_stripe_offset = 0;
2636 desc->ld_pattern = 0;
2637 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2638
2639 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2640
b7856753 2641 err = copy_to_user(uarg, buf, len);
d7e09d03
PT
2642 if (err)
2643 err = -EFAULT;
2644 obd_ioctl_freedata(buf, len);
26c4ea46 2645 goto out;
d7e09d03
PT
2646 }
2647 case LL_IOC_LOV_SETSTRIPE:
2648 err = obd_alloc_memmd(exp, karg);
2649 if (err > 0)
2650 err = 0;
26c4ea46 2651 goto out;
d7e09d03
PT
2652 case LL_IOC_LOV_GETSTRIPE:
2653 err = osc_getstripe(karg, uarg);
26c4ea46 2654 goto out;
d7e09d03
PT
2655 case OBD_IOC_CLIENT_RECOVER:
2656 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2657 data->ioc_inlbuf1, 0);
2658 if (err > 0)
2659 err = 0;
26c4ea46 2660 goto out;
d7e09d03
PT
2661 case IOC_OSC_SET_ACTIVE:
2662 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2663 data->ioc_offset);
26c4ea46 2664 goto out;
d7e09d03 2665 case OBD_IOC_POLL_QUOTACHECK:
167a47c5 2666 err = osc_quota_poll_check(exp, karg);
26c4ea46 2667 goto out;
d7e09d03
PT
2668 case OBD_IOC_PING_TARGET:
2669 err = ptlrpc_obd_ping(obd);
26c4ea46 2670 goto out;
d7e09d03
PT
2671 default:
2672 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
2673 cmd, current_comm());
26c4ea46
TJ
2674 err = -ENOTTY;
2675 goto out;
d7e09d03
PT
2676 }
2677out:
2678 module_put(THIS_MODULE);
2679 return err;
2680}
2681
2682static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
21aef7d9 2683 u32 keylen, void *key, __u32 *vallen, void *val,
d7e09d03
PT
2684 struct lov_stripe_md *lsm)
2685{
d7e09d03 2686 if (!vallen || !val)
0a3bdb00 2687 return -EFAULT;
d7e09d03
PT
2688
2689 if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
2690 __u32 *stripe = val;
2691 *vallen = sizeof(*stripe);
2692 *stripe = 0;
0a3bdb00 2693 return 0;
d7e09d03
PT
2694 } else if (KEY_IS(KEY_LAST_ID)) {
2695 struct ptlrpc_request *req;
29ac6840
CH
2696 u64 *reply;
2697 char *tmp;
2698 int rc;
d7e09d03
PT
2699
2700 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2701 &RQF_OST_GET_INFO_LAST_ID);
7f1ae4c0 2702 if (!req)
0a3bdb00 2703 return -ENOMEM;
d7e09d03
PT
2704
2705 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2706 RCL_CLIENT, keylen);
2707 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2708 if (rc) {
2709 ptlrpc_request_free(req);
0a3bdb00 2710 return rc;
d7e09d03
PT
2711 }
2712
2713 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2714 memcpy(tmp, key, keylen);
2715
2716 req->rq_no_delay = req->rq_no_resend = 1;
2717 ptlrpc_request_set_replen(req);
2718 rc = ptlrpc_queue_wait(req);
2719 if (rc)
26c4ea46 2720 goto out;
d7e09d03
PT
2721
2722 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
7f1ae4c0 2723 if (!reply) {
26c4ea46
TJ
2724 rc = -EPROTO;
2725 goto out;
2726 }
d7e09d03 2727
21aef7d9 2728 *((u64 *)val) = *reply;
c71d2645 2729out:
d7e09d03 2730 ptlrpc_req_finished(req);
0a3bdb00 2731 return rc;
d7e09d03 2732 } else if (KEY_IS(KEY_FIEMAP)) {
167a47c5 2733 struct ll_fiemap_info_key *fm_key = key;
29ac6840
CH
2734 struct ldlm_res_id res_id;
2735 ldlm_policy_data_t policy;
2736 struct lustre_handle lockh;
2737 ldlm_mode_t mode = 0;
2738 struct ptlrpc_request *req;
2739 struct ll_user_fiemap *reply;
2740 char *tmp;
2741 int rc;
9d865439
AB
2742
2743 if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
2744 goto skip_locking;
2745
2746 policy.l_extent.start = fm_key->fiemap.fm_start &
2747 CFS_PAGE_MASK;
2748
2749 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2750 fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
2751 policy.l_extent.end = OBD_OBJECT_EOF;
2752 else
2753 policy.l_extent.end = (fm_key->fiemap.fm_start +
2754 fm_key->fiemap.fm_length +
2755 PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
2756
2757 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2758 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
2759 LDLM_FL_BLOCK_GRANTED |
2760 LDLM_FL_LVB_READY,
2761 &res_id, LDLM_EXTENT, &policy,
2762 LCK_PR | LCK_PW, &lockh, 0);
2763 if (mode) { /* lock is cached on client */
2764 if (mode != LCK_PR) {
2765 ldlm_lock_addref(&lockh, LCK_PR);
2766 ldlm_lock_decref(&lockh, LCK_PW);
2767 }
2768 } else { /* no cached lock, needs acquire lock on server side */
2769 fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
2770 fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
2771 }
d7e09d03 2772
9d865439 2773skip_locking:
d7e09d03
PT
2774 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2775 &RQF_OST_GET_INFO_FIEMAP);
7f1ae4c0 2776 if (!req) {
26c4ea46
TJ
2777 rc = -ENOMEM;
2778 goto drop_lock;
2779 }
d7e09d03
PT
2780
2781 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
2782 RCL_CLIENT, keylen);
2783 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2784 RCL_CLIENT, *vallen);
2785 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2786 RCL_SERVER, *vallen);
2787
2788 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2789 if (rc) {
2790 ptlrpc_request_free(req);
26c4ea46 2791 goto drop_lock;
d7e09d03
PT
2792 }
2793
2794 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
2795 memcpy(tmp, key, keylen);
2796 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2797 memcpy(tmp, val, *vallen);
2798
2799 ptlrpc_request_set_replen(req);
2800 rc = ptlrpc_queue_wait(req);
2801 if (rc)
26c4ea46 2802 goto fini_req;
d7e09d03
PT
2803
2804 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
7f1ae4c0 2805 if (!reply) {
26c4ea46
TJ
2806 rc = -EPROTO;
2807 goto fini_req;
2808 }
d7e09d03
PT
2809
2810 memcpy(val, reply, *vallen);
9d865439 2811fini_req:
d7e09d03 2812 ptlrpc_req_finished(req);
9d865439
AB
2813drop_lock:
2814 if (mode)
2815 ldlm_lock_decref(&lockh, LCK_PR);
0a3bdb00 2816 return rc;
d7e09d03
PT
2817 }
2818
0a3bdb00 2819 return -EINVAL;
d7e09d03
PT
2820}
2821
2822static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
21aef7d9 2823 u32 keylen, void *key, u32 vallen,
d7e09d03
PT
2824 void *val, struct ptlrpc_request_set *set)
2825{
2826 struct ptlrpc_request *req;
29ac6840
CH
2827 struct obd_device *obd = exp->exp_obd;
2828 struct obd_import *imp = class_exp2cliimp(exp);
2829 char *tmp;
2830 int rc;
d7e09d03
PT
2831
2832 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
2833
2834 if (KEY_IS(KEY_CHECKSUM)) {
2835 if (vallen != sizeof(int))
0a3bdb00 2836 return -EINVAL;
d7e09d03 2837 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
0a3bdb00 2838 return 0;
d7e09d03
PT
2839 }
2840
2841 if (KEY_IS(KEY_SPTLRPC_CONF)) {
2842 sptlrpc_conf_client_adapt(obd);
0a3bdb00 2843 return 0;
d7e09d03
PT
2844 }
2845
2846 if (KEY_IS(KEY_FLUSH_CTX)) {
2847 sptlrpc_import_flush_my_ctx(imp);
0a3bdb00 2848 return 0;
d7e09d03
PT
2849 }
2850
2851 if (KEY_IS(KEY_CACHE_SET)) {
2852 struct client_obd *cli = &obd->u.cli;
2853
7f1ae4c0 2854 LASSERT(!cli->cl_cache); /* only once */
167a47c5 2855 cli->cl_cache = val;
d7e09d03
PT
2856 atomic_inc(&cli->cl_cache->ccc_users);
2857 cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
2858
2859 /* add this osc into entity list */
2860 LASSERT(list_empty(&cli->cl_lru_osc));
2861 spin_lock(&cli->cl_cache->ccc_lru_lock);
2862 list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
2863 spin_unlock(&cli->cl_cache->ccc_lru_lock);
2864
0a3bdb00 2865 return 0;
d7e09d03
PT
2866 }
2867
2868 if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
2869 struct client_obd *cli = &obd->u.cli;
2870 int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
2871 int target = *(int *)val;
2872
2873 nr = osc_lru_shrink(cli, min(nr, target));
2874 *(int *)val -= nr;
0a3bdb00 2875 return 0;
d7e09d03
PT
2876 }
2877
2878 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
0a3bdb00 2879 return -EINVAL;
d7e09d03
PT
2880
2881 /* We pass all other commands directly to OST. Since nobody calls osc
2882 methods directly and everybody is supposed to go through LOV, we
2883 assume lov checked invalid values for us.
2884 The only recognised values so far are evict_by_nid and mds_conn.
2885 Even if something bad goes through, we'd get a -EINVAL from OST
2886 anyway. */
2887
2888 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
2889 &RQF_OST_SET_GRANT_INFO :
2890 &RQF_OBD_SET_INFO);
7f1ae4c0 2891 if (!req)
0a3bdb00 2892 return -ENOMEM;
d7e09d03
PT
2893
2894 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2895 RCL_CLIENT, keylen);
2896 if (!KEY_IS(KEY_GRANT_SHRINK))
2897 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
2898 RCL_CLIENT, vallen);
2899 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
2900 if (rc) {
2901 ptlrpc_request_free(req);
0a3bdb00 2902 return rc;
d7e09d03
PT
2903 }
2904
2905 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2906 memcpy(tmp, key, keylen);
2907 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
2908 &RMF_OST_BODY :
2909 &RMF_SETINFO_VAL);
2910 memcpy(tmp, val, vallen);
2911
2912 if (KEY_IS(KEY_GRANT_SHRINK)) {
f024bad4 2913 struct osc_brw_async_args *aa;
d7e09d03
PT
2914 struct obdo *oa;
2915
2916 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2917 aa = ptlrpc_req_async_args(req);
131637b8 2918 oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
d7e09d03
PT
2919 if (!oa) {
2920 ptlrpc_req_finished(req);
0a3bdb00 2921 return -ENOMEM;
d7e09d03
PT
2922 }
2923 *oa = ((struct ost_body *)val)->oa;
2924 aa->aa_oa = oa;
2925 req->rq_interpret_reply = osc_shrink_grant_interpret;
2926 }
2927
2928 ptlrpc_request_set_replen(req);
2929 if (!KEY_IS(KEY_GRANT_SHRINK)) {
7f1ae4c0 2930 LASSERT(set);
d7e09d03
PT
2931 ptlrpc_set_add_req(set, req);
2932 ptlrpc_check_set(NULL, set);
c5c4c6fa
OW
2933 } else {
2934 ptlrpcd_add_req(req);
2935 }
d7e09d03 2936
0a3bdb00 2937 return 0;
d7e09d03
PT
2938}
2939
d7e09d03
PT
2940static int osc_reconnect(const struct lu_env *env,
2941 struct obd_export *exp, struct obd_device *obd,
2942 struct obd_uuid *cluuid,
2943 struct obd_connect_data *data,
2944 void *localdata)
2945{
2946 struct client_obd *cli = &obd->u.cli;
2947
7f1ae4c0 2948 if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
d7e09d03
PT
2949 long lost_grant;
2950
2951 client_obd_list_lock(&cli->cl_loi_list_lock);
2952 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
2953 2 * cli_brw_size(obd);
2954 lost_grant = cli->cl_lost_grant;
2955 cli->cl_lost_grant = 0;
2956 client_obd_list_unlock(&cli->cl_loi_list_lock);
2957
2d00bd17
JP
2958 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
2959 data->ocd_connect_flags,
d7e09d03
PT
2960 data->ocd_version, data->ocd_grant, lost_grant);
2961 }
2962
0a3bdb00 2963 return 0;
d7e09d03
PT
2964}
2965
2966static int osc_disconnect(struct obd_export *exp)
2967{
2968 struct obd_device *obd = class_exp2obd(exp);
d7e09d03
PT
2969 int rc;
2970
d7e09d03
PT
2971 rc = client_disconnect_export(exp);
2972 /**
2973 * Initially we put del_shrink_grant before disconnect_export, but it
2974 * causes the following problem if setup (connect) and cleanup
2975 * (disconnect) are tangled together.
2976 * connect p1 disconnect p2
2977 * ptlrpc_connect_import
2978 * ............... class_manual_cleanup
2979 * osc_disconnect
2980 * del_shrink_grant
2981 * ptlrpc_connect_interrupt
2982 * init_grant_shrink
2983 * add this client to shrink list
2984 * cleanup_osc
2985 * Bang! pinger trigger the shrink.
2986 * So the osc should be disconnected from the shrink list, after we
2987 * are sure the import has been destroyed. BUG18662
2988 */
7f1ae4c0 2989 if (!obd->u.cli.cl_import)
d7e09d03
PT
2990 osc_del_shrink_grant(&obd->u.cli);
2991 return rc;
2992}
2993
2994static int osc_import_event(struct obd_device *obd,
2995 struct obd_import *imp,
2996 enum obd_import_event event)
2997{
2998 struct client_obd *cli;
2999 int rc = 0;
3000
d7e09d03
PT
3001 LASSERT(imp->imp_obd == obd);
3002
3003 switch (event) {
3004 case IMP_EVENT_DISCON: {
3005 cli = &obd->u.cli;
3006 client_obd_list_lock(&cli->cl_loi_list_lock);
3007 cli->cl_avail_grant = 0;
3008 cli->cl_lost_grant = 0;
3009 client_obd_list_unlock(&cli->cl_loi_list_lock);
3010 break;
3011 }
3012 case IMP_EVENT_INACTIVE: {
3013 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
3014 break;
3015 }
3016 case IMP_EVENT_INVALIDATE: {
3017 struct ldlm_namespace *ns = obd->obd_namespace;
29ac6840
CH
3018 struct lu_env *env;
3019 int refcheck;
d7e09d03
PT
3020
3021 env = cl_env_get(&refcheck);
3022 if (!IS_ERR(env)) {
3023 /* Reset grants */
3024 cli = &obd->u.cli;
3025 /* all pages go to failing rpcs due to the invalid
3026 * import */
c5c4c6fa 3027 osc_io_unplug(env, cli, NULL);
d7e09d03
PT
3028
3029 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3030 cl_env_put(env, &refcheck);
3031 } else
3032 rc = PTR_ERR(env);
3033 break;
3034 }
3035 case IMP_EVENT_ACTIVE: {
3036 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
3037 break;
3038 }
3039 case IMP_EVENT_OCD: {
3040 struct obd_connect_data *ocd = &imp->imp_connect_data;
3041
3042 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3043 osc_init_grant(&obd->u.cli, ocd);
3044
3045 /* See bug 7198 */
3046 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
b2952d62 3047 imp->imp_client->cli_request_portal = OST_REQUEST_PORTAL;
d7e09d03
PT
3048
3049 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
3050 break;
3051 }
3052 case IMP_EVENT_DEACTIVATE: {
3053 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
3054 break;
3055 }
3056 case IMP_EVENT_ACTIVATE: {
3057 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
3058 break;
3059 }
3060 default:
3061 CERROR("Unknown import event %d\n", event);
3062 LBUG();
3063 }
0a3bdb00 3064 return rc;
d7e09d03
PT
3065}
3066
3067/**
3068 * Determine whether the lock can be canceled before replaying the lock
3069 * during recovery, see bug16774 for detailed information.
3070 *
3071 * \retval zero the lock can't be canceled
3072 * \retval other ok to cancel
3073 */
3074static int osc_cancel_for_recovery(struct ldlm_lock *lock)
3075{
3076 check_res_locked(lock->l_resource);
3077
3078 /*
3079 * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
3080 *
3081 * XXX as a future improvement, we can also cancel unused write lock
3082 * if it doesn't have dirty data and active mmaps.
3083 */
3084 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3085 (lock->l_granted_mode == LCK_PR ||
3086 lock->l_granted_mode == LCK_CR) &&
3087 (osc_dlm_lock_pageref(lock) == 0))
0a3bdb00 3088 return 1;
d7e09d03 3089
0a3bdb00 3090 return 0;
d7e09d03
PT
3091}
3092
3093static int brw_queue_work(const struct lu_env *env, void *data)
3094{
3095 struct client_obd *cli = data;
3096
3097 CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3098
c5c4c6fa 3099 osc_io_unplug(env, cli, NULL);
0a3bdb00 3100 return 0;
d7e09d03
PT
3101}
3102
3103int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3104{
ea7893bb 3105 struct lprocfs_static_vars lvars = { NULL };
29ac6840
CH
3106 struct client_obd *cli = &obd->u.cli;
3107 void *handler;
3108 int rc;
aefd9d71
LX
3109 int adding;
3110 int added;
3111 int req_count;
d7e09d03
PT
3112
3113 rc = ptlrpcd_addref();
3114 if (rc)
0a3bdb00 3115 return rc;
d7e09d03
PT
3116
3117 rc = client_obd_setup(obd, lcfg);
3118 if (rc)
26c4ea46 3119 goto out_ptlrpcd;
d7e09d03
PT
3120
3121 handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
26c4ea46
TJ
3122 if (IS_ERR(handler)) {
3123 rc = PTR_ERR(handler);
3124 goto out_client_setup;
3125 }
d7e09d03
PT
3126 cli->cl_writeback_work = handler;
3127
3128 rc = osc_quota_setup(obd);
3129 if (rc)
26c4ea46 3130 goto out_ptlrpcd_work;
d7e09d03
PT
3131
3132 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3133 lprocfs_osc_init_vars(&lvars);
9b801302 3134 if (lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars) == 0) {
d7e09d03
PT
3135 lproc_osc_attach_seqstat(obd);
3136 sptlrpc_lprocfs_cliobd_attach(obd);
3137 ptlrpc_lprocfs_register_obd(obd);
3138 }
3139
aefd9d71
LX
3140 /*
3141 * We try to control the total number of requests with a upper limit
3142 * osc_reqpool_maxreqcount. There might be some race which will cause
3143 * over-limit allocation, but it is fine.
3144 */
3145 req_count = atomic_read(&osc_pool_req_count);
3146 if (req_count < osc_reqpool_maxreqcount) {
3147 adding = cli->cl_max_rpcs_in_flight + 2;
3148 if (req_count + adding > osc_reqpool_maxreqcount)
3149 adding = osc_reqpool_maxreqcount - req_count;
3150
3151 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3152 atomic_add(added, &osc_pool_req_count);
3153 }
d7e09d03
PT
3154
3155 INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
3156 ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
0a3bdb00 3157 return rc;
d7e09d03
PT
3158
3159out_ptlrpcd_work:
3160 ptlrpcd_destroy_work(handler);
3161out_client_setup:
3162 client_obd_cleanup(obd);
3163out_ptlrpcd:
3164 ptlrpcd_decref();
0a3bdb00 3165 return rc;
d7e09d03
PT
3166}
3167
3168static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
3169{
d7e09d03
PT
3170 switch (stage) {
3171 case OBD_CLEANUP_EARLY: {
3172 struct obd_import *imp;
50ffcb7e 3173
d7e09d03
PT
3174 imp = obd->u.cli.cl_import;
3175 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
3176 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
3177 ptlrpc_deactivate_import(imp);
3178 spin_lock(&imp->imp_lock);
3179 imp->imp_pingable = 0;
3180 spin_unlock(&imp->imp_lock);
3181 break;
3182 }
3183 case OBD_CLEANUP_EXPORTS: {
3184 struct client_obd *cli = &obd->u.cli;
3185 /* LU-464
3186 * for echo client, export may be on zombie list, wait for
3187 * zombie thread to cull it, because cli.cl_import will be
3188 * cleared in client_disconnect_export():
3189 * class_export_destroy() -> obd_cleanup() ->
3190 * echo_device_free() -> echo_client_cleanup() ->
3191 * obd_disconnect() -> osc_disconnect() ->
3192 * client_disconnect_export()
3193 */
3194 obd_zombie_barrier();
3195 if (cli->cl_writeback_work) {
3196 ptlrpcd_destroy_work(cli->cl_writeback_work);
3197 cli->cl_writeback_work = NULL;
3198 }
3199 obd_cleanup_client_import(obd);
3200 ptlrpc_lprocfs_unregister_obd(obd);
3201 lprocfs_obd_cleanup(obd);
d7e09d03
PT
3202 break;
3203 }
3204 }
41f8d410 3205 return 0;
d7e09d03
PT
3206}
3207
3208int osc_cleanup(struct obd_device *obd)
3209{
3210 struct client_obd *cli = &obd->u.cli;
3211 int rc;
3212
d7e09d03 3213 /* lru cleanup */
7f1ae4c0 3214 if (cli->cl_cache) {
d7e09d03
PT
3215 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3216 spin_lock(&cli->cl_cache->ccc_lru_lock);
3217 list_del_init(&cli->cl_lru_osc);
3218 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3219 cli->cl_lru_left = NULL;
3220 atomic_dec(&cli->cl_cache->ccc_users);
3221 cli->cl_cache = NULL;
3222 }
3223
3224 /* free memory of osc quota cache */
3225 osc_quota_cleanup(obd);
3226
3227 rc = client_obd_cleanup(obd);
3228
3229 ptlrpcd_decref();
0a3bdb00 3230 return rc;
d7e09d03
PT
3231}
3232
3233int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
3234{
ea7893bb 3235 struct lprocfs_static_vars lvars = { NULL };
d7e09d03
PT
3236 int rc = 0;
3237
3238 lprocfs_osc_init_vars(&lvars);
3239
3240 switch (lcfg->lcfg_command) {
3241 default:
3242 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
3243 lcfg, obd);
3244 if (rc > 0)
3245 rc = 0;
3246 break;
3247 }
3248
fbe7c6c7 3249 return rc;
d7e09d03
PT
3250}
3251
21aef7d9 3252static int osc_process_config(struct obd_device *obd, u32 len, void *buf)
d7e09d03
PT
3253{
3254 return osc_process_config_base(obd, buf);
3255}
3256
3257struct obd_ops osc_obd_ops = {
a13b1f32
DC
3258 .owner = THIS_MODULE,
3259 .setup = osc_setup,
3260 .precleanup = osc_precleanup,
3261 .cleanup = osc_cleanup,
3262 .add_conn = client_import_add_conn,
3263 .del_conn = client_import_del_conn,
3264 .connect = client_connect_import,
3265 .reconnect = osc_reconnect,
3266 .disconnect = osc_disconnect,
3267 .statfs = osc_statfs,
3268 .statfs_async = osc_statfs_async,
3269 .packmd = osc_packmd,
3270 .unpackmd = osc_unpackmd,
3271 .create = osc_create,
3272 .destroy = osc_destroy,
3273 .getattr = osc_getattr,
3274 .getattr_async = osc_getattr_async,
3275 .setattr = osc_setattr,
3276 .setattr_async = osc_setattr_async,
3277 .find_cbdata = osc_find_cbdata,
3278 .iocontrol = osc_iocontrol,
3279 .get_info = osc_get_info,
3280 .set_info_async = osc_set_info_async,
3281 .import_event = osc_import_event,
3282 .process_config = osc_process_config,
3283 .quotactl = osc_quotactl,
3284 .quotacheck = osc_quotacheck,
d7e09d03
PT
3285};
3286
3287extern struct lu_kmem_descr osc_caches[];
3288extern spinlock_t osc_ast_guard;
3289extern struct lock_class_key osc_ast_guard_class;
3290
b47ea4bb 3291static int __init osc_init(void)
d7e09d03 3292{
ea7893bb 3293 struct lprocfs_static_vars lvars = { NULL };
aefd9d71
LX
3294 unsigned int reqpool_size;
3295 unsigned int reqsize;
d7e09d03 3296 int rc;
d7e09d03
PT
3297
3298 /* print an address of _any_ initialized kernel symbol from this
3299 * module, to allow debugging with gdb that doesn't support data
3300 * symbols from modules.*/
3301 CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3302
3303 rc = lu_kmem_init(osc_caches);
a55e0f44 3304 if (rc)
0a3bdb00 3305 return rc;
d7e09d03
PT
3306
3307 lprocfs_osc_init_vars(&lvars);
3308
2962b440 3309 rc = class_register_type(&osc_obd_ops, NULL,
d7e09d03 3310 LUSTRE_OSC_NAME, &osc_device_type);
aefd9d71
LX
3311 if (rc)
3312 goto out_kmem;
d7e09d03
PT
3313
3314 spin_lock_init(&osc_ast_guard);
3315 lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
3316
aefd9d71
LX
3317 /* This is obviously too much memory, only prevent overflow here */
3318 if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
3319 rc = -EINVAL;
3320 goto out_type;
3321 }
3322
3323 reqpool_size = osc_reqpool_mem_max << 20;
3324
3325 reqsize = 1;
3326 while (reqsize < OST_MAXREQSIZE)
3327 reqsize = reqsize << 1;
3328
3329 /*
3330 * We don't enlarge the request count in OSC pool according to
3331 * cl_max_rpcs_in_flight. The allocation from the pool will only be
3332 * tried after normal allocation failed. So a small OSC pool won't
3333 * cause much performance degression in most of cases.
3334 */
3335 osc_reqpool_maxreqcount = reqpool_size / reqsize;
3336
3337 atomic_set(&osc_pool_req_count, 0);
3338 osc_rq_pool = ptlrpc_init_rq_pool(0, OST_MAXREQSIZE,
3339 ptlrpc_add_rqs_to_pool);
3340
3341 if (osc_rq_pool)
3342 return 0;
3343
3344 rc = -ENOMEM;
3345
3346out_type:
3347 class_unregister_type(LUSTRE_OSC_NAME);
3348out_kmem:
3349 lu_kmem_fini(osc_caches);
0a3bdb00 3350 return rc;
d7e09d03
PT
3351}
3352
3353static void /*__exit*/ osc_exit(void)
3354{
3355 class_unregister_type(LUSTRE_OSC_NAME);
3356 lu_kmem_fini(osc_caches);
aefd9d71 3357 ptlrpc_free_rq_pool(osc_rq_pool);
d7e09d03
PT
3358}
3359
a0455471 3360MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
d7e09d03
PT
3361MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3362MODULE_LICENSE("GPL");
6960736c 3363MODULE_VERSION(LUSTRE_VERSION_STRING);
d7e09d03 3364
6960736c
GKH
3365module_init(osc_init);
3366module_exit(osc_exit);
This page took 0.71739 seconds and 5 git commands to generate.