Merge tag 'dax-misc-for-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm...
[deliverable/linux.git] / drivers / staging / lustre / lustre / osc / osc_request.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37 #define DEBUG_SUBSYSTEM S_OSC
38
39 #include "../../include/linux/libcfs/libcfs.h"
40
41 #include "../include/lustre_dlm.h"
42 #include "../include/lustre_net.h"
43 #include "../include/lustre/lustre_user.h"
44 #include "../include/obd_cksum.h"
45
46 #include "../include/lustre_ha.h"
47 #include "../include/lprocfs_status.h"
48 #include "../include/lustre_debug.h"
49 #include "../include/lustre_param.h"
50 #include "../include/lustre_fid.h"
51 #include "../include/obd_class.h"
52 #include "../include/obd.h"
53 #include "osc_internal.h"
54 #include "osc_cl_internal.h"
55
56 atomic_t osc_pool_req_count;
57 unsigned int osc_reqpool_maxreqcount;
58 struct ptlrpc_request_pool *osc_rq_pool;
59
60 /* max memory used for request pool, unit is MB */
61 static unsigned int osc_reqpool_mem_max = 5;
62 module_param(osc_reqpool_mem_max, uint, 0444);
63
64 struct osc_brw_async_args {
65 struct obdo *aa_oa;
66 int aa_requested_nob;
67 int aa_nio_count;
68 u32 aa_page_count;
69 int aa_resends;
70 struct brw_page **aa_ppga;
71 struct client_obd *aa_cli;
72 struct list_head aa_oaps;
73 struct list_head aa_exts;
74 struct cl_req *aa_clerq;
75 };
76
77 struct osc_async_args {
78 struct obd_info *aa_oi;
79 };
80
81 struct osc_setattr_args {
82 struct obdo *sa_oa;
83 obd_enqueue_update_f sa_upcall;
84 void *sa_cookie;
85 };
86
87 struct osc_fsync_args {
88 struct obd_info *fa_oi;
89 obd_enqueue_update_f fa_upcall;
90 void *fa_cookie;
91 };
92
93 struct osc_enqueue_args {
94 struct obd_export *oa_exp;
95 enum ldlm_type oa_type;
96 enum ldlm_mode oa_mode;
97 __u64 *oa_flags;
98 osc_enqueue_upcall_f oa_upcall;
99 void *oa_cookie;
100 struct ost_lvb *oa_lvb;
101 struct lustre_handle oa_lockh;
102 unsigned int oa_agl:1;
103 };
104
105 static void osc_release_ppga(struct brw_page **ppga, u32 count);
106 static int brw_interpret(const struct lu_env *env,
107 struct ptlrpc_request *req, void *data, int rc);
108
109 /* Pack OSC object metadata for disk storage (LE byte order). */
110 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
111 struct lov_stripe_md *lsm)
112 {
113 int lmm_size;
114
115 lmm_size = sizeof(**lmmp);
116 if (!lmmp)
117 return lmm_size;
118
119 if (*lmmp && !lsm) {
120 kfree(*lmmp);
121 *lmmp = NULL;
122 return 0;
123 } else if (unlikely(lsm && ostid_id(&lsm->lsm_oi) == 0)) {
124 return -EBADF;
125 }
126
127 if (!*lmmp) {
128 *lmmp = kzalloc(lmm_size, GFP_NOFS);
129 if (!*lmmp)
130 return -ENOMEM;
131 }
132
133 if (lsm)
134 ostid_cpu_to_le(&lsm->lsm_oi, &(*lmmp)->lmm_oi);
135
136 return lmm_size;
137 }
138
139 /* Unpack OSC object metadata from disk storage (LE byte order). */
140 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
141 struct lov_mds_md *lmm, int lmm_bytes)
142 {
143 int lsm_size;
144 struct obd_import *imp = class_exp2cliimp(exp);
145
146 if (lmm) {
147 if (lmm_bytes < sizeof(*lmm)) {
148 CERROR("%s: lov_mds_md too small: %d, need %d\n",
149 exp->exp_obd->obd_name, lmm_bytes,
150 (int)sizeof(*lmm));
151 return -EINVAL;
152 }
153 /* XXX LOV_MAGIC etc check? */
154
155 if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
156 CERROR("%s: zero lmm_object_id: rc = %d\n",
157 exp->exp_obd->obd_name, -EINVAL);
158 return -EINVAL;
159 }
160 }
161
162 lsm_size = lov_stripe_md_size(1);
163 if (!lsmp)
164 return lsm_size;
165
166 if (*lsmp && !lmm) {
167 kfree((*lsmp)->lsm_oinfo[0]);
168 kfree(*lsmp);
169 *lsmp = NULL;
170 return 0;
171 }
172
173 if (!*lsmp) {
174 *lsmp = kzalloc(lsm_size, GFP_NOFS);
175 if (unlikely(!*lsmp))
176 return -ENOMEM;
177 (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
178 GFP_NOFS);
179 if (unlikely(!(*lsmp)->lsm_oinfo[0])) {
180 kfree(*lsmp);
181 return -ENOMEM;
182 }
183 loi_init((*lsmp)->lsm_oinfo[0]);
184 } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
185 return -EBADF;
186 }
187
188 if (lmm)
189 /* XXX zero *lsmp? */
190 ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
191
192 if (imp &&
193 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
194 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
195 else
196 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
197
198 return lsm_size;
199 }
200
201 static inline void osc_pack_req_body(struct ptlrpc_request *req,
202 struct obd_info *oinfo)
203 {
204 struct ost_body *body;
205
206 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
207 LASSERT(body);
208
209 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
210 oinfo->oi_oa);
211 }
212
213 static int osc_getattr_interpret(const struct lu_env *env,
214 struct ptlrpc_request *req,
215 struct osc_async_args *aa, int rc)
216 {
217 struct ost_body *body;
218
219 if (rc != 0)
220 goto out;
221
222 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
223 if (body) {
224 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
225 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
226 aa->aa_oi->oi_oa, &body->oa);
227
228 /* This should really be sent by the OST */
229 aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
230 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
231 } else {
232 CDEBUG(D_INFO, "can't unpack ost_body\n");
233 rc = -EPROTO;
234 aa->aa_oi->oi_oa->o_valid = 0;
235 }
236 out:
237 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
238 return rc;
239 }
240
241 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
242 struct ptlrpc_request_set *set)
243 {
244 struct ptlrpc_request *req;
245 struct osc_async_args *aa;
246 int rc;
247
248 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
249 if (!req)
250 return -ENOMEM;
251
252 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
253 if (rc) {
254 ptlrpc_request_free(req);
255 return rc;
256 }
257
258 osc_pack_req_body(req, oinfo);
259
260 ptlrpc_request_set_replen(req);
261 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
262
263 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
264 aa = ptlrpc_req_async_args(req);
265 aa->aa_oi = oinfo;
266
267 ptlrpc_set_add_req(set, req);
268 return 0;
269 }
270
271 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
272 struct obd_info *oinfo)
273 {
274 struct ptlrpc_request *req;
275 struct ost_body *body;
276 int rc;
277
278 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
279 if (!req)
280 return -ENOMEM;
281
282 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
283 if (rc) {
284 ptlrpc_request_free(req);
285 return rc;
286 }
287
288 osc_pack_req_body(req, oinfo);
289
290 ptlrpc_request_set_replen(req);
291
292 rc = ptlrpc_queue_wait(req);
293 if (rc)
294 goto out;
295
296 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
297 if (!body) {
298 rc = -EPROTO;
299 goto out;
300 }
301
302 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
303 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
304 &body->oa);
305
306 oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
307 oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
308
309 out:
310 ptlrpc_req_finished(req);
311 return rc;
312 }
313
314 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
315 struct obd_info *oinfo, struct obd_trans_info *oti)
316 {
317 struct ptlrpc_request *req;
318 struct ost_body *body;
319 int rc;
320
321 LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
322
323 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
324 if (!req)
325 return -ENOMEM;
326
327 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
328 if (rc) {
329 ptlrpc_request_free(req);
330 return rc;
331 }
332
333 osc_pack_req_body(req, oinfo);
334
335 ptlrpc_request_set_replen(req);
336
337 rc = ptlrpc_queue_wait(req);
338 if (rc)
339 goto out;
340
341 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
342 if (!body) {
343 rc = -EPROTO;
344 goto out;
345 }
346
347 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
348 &body->oa);
349
350 out:
351 ptlrpc_req_finished(req);
352 return rc;
353 }
354
355 static int osc_setattr_interpret(const struct lu_env *env,
356 struct ptlrpc_request *req,
357 struct osc_setattr_args *sa, int rc)
358 {
359 struct ost_body *body;
360
361 if (rc != 0)
362 goto out;
363
364 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365 if (!body) {
366 rc = -EPROTO;
367 goto out;
368 }
369
370 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
371 &body->oa);
372 out:
373 rc = sa->sa_upcall(sa->sa_cookie, rc);
374 return rc;
375 }
376
377 int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
378 struct obd_trans_info *oti,
379 obd_enqueue_update_f upcall, void *cookie,
380 struct ptlrpc_request_set *rqset)
381 {
382 struct ptlrpc_request *req;
383 struct osc_setattr_args *sa;
384 int rc;
385
386 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
387 if (!req)
388 return -ENOMEM;
389
390 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
391 if (rc) {
392 ptlrpc_request_free(req);
393 return rc;
394 }
395
396 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
397 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
398
399 osc_pack_req_body(req, oinfo);
400
401 ptlrpc_request_set_replen(req);
402
403 /* do mds to ost setattr asynchronously */
404 if (!rqset) {
405 /* Do not wait for response. */
406 ptlrpcd_add_req(req);
407 } else {
408 req->rq_interpret_reply =
409 (ptlrpc_interpterer_t)osc_setattr_interpret;
410
411 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
412 sa = ptlrpc_req_async_args(req);
413 sa->sa_oa = oinfo->oi_oa;
414 sa->sa_upcall = upcall;
415 sa->sa_cookie = cookie;
416
417 if (rqset == PTLRPCD_SET)
418 ptlrpcd_add_req(req);
419 else
420 ptlrpc_set_add_req(rqset, req);
421 }
422
423 return 0;
424 }
425
426 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
427 struct obd_trans_info *oti,
428 struct ptlrpc_request_set *rqset)
429 {
430 return osc_setattr_async_base(exp, oinfo, oti,
431 oinfo->oi_cb_up, oinfo, rqset);
432 }
433
434 static int osc_real_create(struct obd_export *exp, struct obdo *oa,
435 struct lov_stripe_md **ea,
436 struct obd_trans_info *oti)
437 {
438 struct ptlrpc_request *req;
439 struct ost_body *body;
440 struct lov_stripe_md *lsm;
441 int rc;
442
443 LASSERT(oa);
444 LASSERT(ea);
445
446 lsm = *ea;
447 if (!lsm) {
448 rc = obd_alloc_memmd(exp, &lsm);
449 if (rc < 0)
450 return rc;
451 }
452
453 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
454 if (!req) {
455 rc = -ENOMEM;
456 goto out;
457 }
458
459 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
460 if (rc) {
461 ptlrpc_request_free(req);
462 goto out;
463 }
464
465 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
466 LASSERT(body);
467
468 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
469
470 ptlrpc_request_set_replen(req);
471
472 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
473 oa->o_flags == OBD_FL_DELORPHAN) {
474 DEBUG_REQ(D_HA, req,
475 "delorphan from OST integration");
476 /* Don't resend the delorphan req */
477 req->rq_no_resend = req->rq_no_delay = 1;
478 }
479
480 rc = ptlrpc_queue_wait(req);
481 if (rc)
482 goto out_req;
483
484 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
485 if (!body) {
486 rc = -EPROTO;
487 goto out_req;
488 }
489
490 CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
491 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
492
493 oa->o_blksize = cli_brw_size(exp->exp_obd);
494 oa->o_valid |= OBD_MD_FLBLKSZ;
495
496 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
497 * have valid lsm_oinfo data structs, so don't go touching that.
498 * This needs to be fixed in a big way.
499 */
500 lsm->lsm_oi = oa->o_oi;
501 *ea = lsm;
502
503 if (oti) {
504 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
505
506 if (oa->o_valid & OBD_MD_FLCOOKIE) {
507 if (!oti->oti_logcookies)
508 oti_alloc_cookies(oti, 1);
509 *oti->oti_logcookies = oa->o_lcookie;
510 }
511 }
512
513 CDEBUG(D_HA, "transno: %lld\n",
514 lustre_msg_get_transno(req->rq_repmsg));
515 out_req:
516 ptlrpc_req_finished(req);
517 out:
518 if (rc && !*ea)
519 obd_free_memmd(exp, &lsm);
520 return rc;
521 }
522
523 int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
524 obd_enqueue_update_f upcall, void *cookie,
525 struct ptlrpc_request_set *rqset)
526 {
527 struct ptlrpc_request *req;
528 struct osc_setattr_args *sa;
529 struct ost_body *body;
530 int rc;
531
532 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
533 if (!req)
534 return -ENOMEM;
535
536 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
537 if (rc) {
538 ptlrpc_request_free(req);
539 return rc;
540 }
541 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
542 ptlrpc_at_set_req_timeout(req);
543
544 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
545 LASSERT(body);
546 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
547 oinfo->oi_oa);
548
549 ptlrpc_request_set_replen(req);
550
551 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
552 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
553 sa = ptlrpc_req_async_args(req);
554 sa->sa_oa = oinfo->oi_oa;
555 sa->sa_upcall = upcall;
556 sa->sa_cookie = cookie;
557 if (rqset == PTLRPCD_SET)
558 ptlrpcd_add_req(req);
559 else
560 ptlrpc_set_add_req(rqset, req);
561
562 return 0;
563 }
564
565 static int osc_sync_interpret(const struct lu_env *env,
566 struct ptlrpc_request *req,
567 void *arg, int rc)
568 {
569 struct osc_fsync_args *fa = arg;
570 struct ost_body *body;
571
572 if (rc)
573 goto out;
574
575 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
576 if (!body) {
577 CERROR("can't unpack ost_body\n");
578 rc = -EPROTO;
579 goto out;
580 }
581
582 *fa->fa_oi->oi_oa = body->oa;
583 out:
584 rc = fa->fa_upcall(fa->fa_cookie, rc);
585 return rc;
586 }
587
588 int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
589 obd_enqueue_update_f upcall, void *cookie,
590 struct ptlrpc_request_set *rqset)
591 {
592 struct ptlrpc_request *req;
593 struct ost_body *body;
594 struct osc_fsync_args *fa;
595 int rc;
596
597 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
598 if (!req)
599 return -ENOMEM;
600
601 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
602 if (rc) {
603 ptlrpc_request_free(req);
604 return rc;
605 }
606
607 /* overload the size and blocks fields in the oa with start/end */
608 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
609 LASSERT(body);
610 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
611 oinfo->oi_oa);
612
613 ptlrpc_request_set_replen(req);
614 req->rq_interpret_reply = osc_sync_interpret;
615
616 CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
617 fa = ptlrpc_req_async_args(req);
618 fa->fa_oi = oinfo;
619 fa->fa_upcall = upcall;
620 fa->fa_cookie = cookie;
621
622 if (rqset == PTLRPCD_SET)
623 ptlrpcd_add_req(req);
624 else
625 ptlrpc_set_add_req(rqset, req);
626
627 return 0;
628 }
629
630 /* Find and cancel locally locks matched by @mode in the resource found by
631 * @objid. Found locks are added into @cancel list. Returns the amount of
632 * locks added to @cancels list.
633 */
634 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
635 struct list_head *cancels,
636 enum ldlm_mode mode, __u64 lock_flags)
637 {
638 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
639 struct ldlm_res_id res_id;
640 struct ldlm_resource *res;
641 int count;
642
643 /* Return, i.e. cancel nothing, only if ELC is supported (flag in
644 * export) but disabled through procfs (flag in NS).
645 *
646 * This distinguishes from a case when ELC is not supported originally,
647 * when we still want to cancel locks in advance and just cancel them
648 * locally, without sending any RPC.
649 */
650 if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
651 return 0;
652
653 ostid_build_res_name(&oa->o_oi, &res_id);
654 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
655 if (!res)
656 return 0;
657
658 LDLM_RESOURCE_ADDREF(res);
659 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
660 lock_flags, 0, NULL);
661 LDLM_RESOURCE_DELREF(res);
662 ldlm_resource_putref(res);
663 return count;
664 }
665
666 static int osc_destroy_interpret(const struct lu_env *env,
667 struct ptlrpc_request *req, void *data,
668 int rc)
669 {
670 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
671
672 atomic_dec(&cli->cl_destroy_in_flight);
673 wake_up(&cli->cl_destroy_waitq);
674 return 0;
675 }
676
677 static int osc_can_send_destroy(struct client_obd *cli)
678 {
679 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
680 cli->cl_max_rpcs_in_flight) {
681 /* The destroy request can be sent */
682 return 1;
683 }
684 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
685 cli->cl_max_rpcs_in_flight) {
686 /*
687 * The counter has been modified between the two atomic
688 * operations.
689 */
690 wake_up(&cli->cl_destroy_waitq);
691 }
692 return 0;
693 }
694
695 static int osc_create(const struct lu_env *env, struct obd_export *exp,
696 struct obdo *oa, struct lov_stripe_md **ea,
697 struct obd_trans_info *oti)
698 {
699 int rc = 0;
700
701 LASSERT(oa);
702 LASSERT(ea);
703 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
704
705 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
706 oa->o_flags == OBD_FL_RECREATE_OBJS) {
707 return osc_real_create(exp, oa, ea, oti);
708 }
709
710 if (!fid_seq_is_mdt(ostid_seq(&oa->o_oi)))
711 return osc_real_create(exp, oa, ea, oti);
712
713 /* we should not get here anymore */
714 LBUG();
715
716 return rc;
717 }
718
719 /* Destroy requests can be async always on the client, and we don't even really
720 * care about the return code since the client cannot do anything at all about
721 * a destroy failure.
722 * When the MDS is unlinking a filename, it saves the file objects into a
723 * recovery llog, and these object records are cancelled when the OST reports
724 * they were destroyed and sync'd to disk (i.e. transaction committed).
725 * If the client dies, or the OST is down when the object should be destroyed,
726 * the records are not cancelled, and when the OST reconnects to the MDS next,
727 * it will retrieve the llog unlink logs and then sends the log cancellation
728 * cookies to the MDS after committing destroy transactions.
729 */
730 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
731 struct obdo *oa, struct lov_stripe_md *ea,
732 struct obd_trans_info *oti, struct obd_export *md_export)
733 {
734 struct client_obd *cli = &exp->exp_obd->u.cli;
735 struct ptlrpc_request *req;
736 struct ost_body *body;
737 LIST_HEAD(cancels);
738 int rc, count;
739
740 if (!oa) {
741 CDEBUG(D_INFO, "oa NULL\n");
742 return -EINVAL;
743 }
744
745 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
746 LDLM_FL_DISCARD_DATA);
747
748 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
749 if (!req) {
750 ldlm_lock_list_put(&cancels, l_bl_ast, count);
751 return -ENOMEM;
752 }
753
754 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
755 0, &cancels, count);
756 if (rc) {
757 ptlrpc_request_free(req);
758 return rc;
759 }
760
761 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
762 ptlrpc_at_set_req_timeout(req);
763
764 if (oti && oa->o_valid & OBD_MD_FLCOOKIE)
765 oa->o_lcookie = *oti->oti_logcookies;
766 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
767 LASSERT(body);
768 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
769
770 ptlrpc_request_set_replen(req);
771
772 /* If osc_destroy is for destroying the unlink orphan,
773 * sent from MDT to OST, which should not be blocked here,
774 * because the process might be triggered by ptlrpcd, and
775 * it is not good to block ptlrpcd thread (b=16006
776 **/
777 if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
778 req->rq_interpret_reply = osc_destroy_interpret;
779 if (!osc_can_send_destroy(cli)) {
780 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
781 NULL);
782
783 /*
784 * Wait until the number of on-going destroy RPCs drops
785 * under max_rpc_in_flight
786 */
787 l_wait_event_exclusive(cli->cl_destroy_waitq,
788 osc_can_send_destroy(cli), &lwi);
789 }
790 }
791
792 /* Do not wait for response */
793 ptlrpcd_add_req(req);
794 return 0;
795 }
796
797 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
798 long writing_bytes)
799 {
800 u32 bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
801
802 LASSERT(!(oa->o_valid & bits));
803
804 oa->o_valid |= bits;
805 spin_lock(&cli->cl_loi_list_lock);
806 oa->o_dirty = cli->cl_dirty;
807 if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
808 cli->cl_dirty_max)) {
809 CERROR("dirty %lu - %lu > dirty_max %lu\n",
810 cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
811 oa->o_undirty = 0;
812 } else if (unlikely(atomic_read(&obd_unstable_pages) +
813 atomic_read(&obd_dirty_pages) -
814 atomic_read(&obd_dirty_transit_pages) >
815 (long)(obd_max_dirty_pages + 1))) {
816 /* The atomic_read() allowing the atomic_inc() are
817 * not covered by a lock thus they may safely race and trip
818 * this CERROR() unless we add in a small fudge factor (+1).
819 */
820 CERROR("%s: dirty %d + %d - %d > system dirty_max %d\n",
821 cli->cl_import->imp_obd->obd_name,
822 atomic_read(&obd_unstable_pages),
823 atomic_read(&obd_dirty_pages),
824 atomic_read(&obd_dirty_transit_pages),
825 obd_max_dirty_pages);
826 oa->o_undirty = 0;
827 } else if (unlikely(cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff)) {
828 CERROR("dirty %lu - dirty_max %lu too big???\n",
829 cli->cl_dirty, cli->cl_dirty_max);
830 oa->o_undirty = 0;
831 } else {
832 long max_in_flight = (cli->cl_max_pages_per_rpc <<
833 PAGE_SHIFT)*
834 (cli->cl_max_rpcs_in_flight + 1);
835 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
836 }
837 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
838 oa->o_dropped = cli->cl_lost_grant;
839 cli->cl_lost_grant = 0;
840 spin_unlock(&cli->cl_loi_list_lock);
841 CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
842 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
843 }
844
845 void osc_update_next_shrink(struct client_obd *cli)
846 {
847 cli->cl_next_shrink_grant =
848 cfs_time_shift(cli->cl_grant_shrink_interval);
849 CDEBUG(D_CACHE, "next time %ld to shrink grant\n",
850 cli->cl_next_shrink_grant);
851 }
852
853 static void __osc_update_grant(struct client_obd *cli, u64 grant)
854 {
855 spin_lock(&cli->cl_loi_list_lock);
856 cli->cl_avail_grant += grant;
857 spin_unlock(&cli->cl_loi_list_lock);
858 }
859
860 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
861 {
862 if (body->oa.o_valid & OBD_MD_FLGRANT) {
863 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
864 __osc_update_grant(cli, body->oa.o_grant);
865 }
866 }
867
868 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
869 u32 keylen, void *key, u32 vallen,
870 void *val, struct ptlrpc_request_set *set);
871
872 static int osc_shrink_grant_interpret(const struct lu_env *env,
873 struct ptlrpc_request *req,
874 void *aa, int rc)
875 {
876 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
877 struct obdo *oa = ((struct osc_brw_async_args *)aa)->aa_oa;
878 struct ost_body *body;
879
880 if (rc != 0) {
881 __osc_update_grant(cli, oa->o_grant);
882 goto out;
883 }
884
885 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
886 LASSERT(body);
887 osc_update_grant(cli, body);
888 out:
889 kmem_cache_free(obdo_cachep, oa);
890 return rc;
891 }
892
893 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
894 {
895 spin_lock(&cli->cl_loi_list_lock);
896 oa->o_grant = cli->cl_avail_grant / 4;
897 cli->cl_avail_grant -= oa->o_grant;
898 spin_unlock(&cli->cl_loi_list_lock);
899 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
900 oa->o_valid |= OBD_MD_FLFLAGS;
901 oa->o_flags = 0;
902 }
903 oa->o_flags |= OBD_FL_SHRINK_GRANT;
904 osc_update_next_shrink(cli);
905 }
906
907 /* Shrink the current grant, either from some large amount to enough for a
908 * full set of in-flight RPCs, or if we have already shrunk to that limit
909 * then to enough for a single RPC. This avoids keeping more grant than
910 * needed, and avoids shrinking the grant piecemeal.
911 */
912 static int osc_shrink_grant(struct client_obd *cli)
913 {
914 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
915 (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
916
917 spin_lock(&cli->cl_loi_list_lock);
918 if (cli->cl_avail_grant <= target_bytes)
919 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
920 spin_unlock(&cli->cl_loi_list_lock);
921
922 return osc_shrink_grant_to_target(cli, target_bytes);
923 }
924
925 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
926 {
927 int rc = 0;
928 struct ost_body *body;
929
930 spin_lock(&cli->cl_loi_list_lock);
931 /* Don't shrink if we are already above or below the desired limit
932 * We don't want to shrink below a single RPC, as that will negatively
933 * impact block allocation and long-term performance.
934 */
935 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
936 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
937
938 if (target_bytes >= cli->cl_avail_grant) {
939 spin_unlock(&cli->cl_loi_list_lock);
940 return 0;
941 }
942 spin_unlock(&cli->cl_loi_list_lock);
943
944 body = kzalloc(sizeof(*body), GFP_NOFS);
945 if (!body)
946 return -ENOMEM;
947
948 osc_announce_cached(cli, &body->oa, 0);
949
950 spin_lock(&cli->cl_loi_list_lock);
951 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
952 cli->cl_avail_grant = target_bytes;
953 spin_unlock(&cli->cl_loi_list_lock);
954 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
955 body->oa.o_valid |= OBD_MD_FLFLAGS;
956 body->oa.o_flags = 0;
957 }
958 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
959 osc_update_next_shrink(cli);
960
961 rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
962 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
963 sizeof(*body), body, NULL);
964 if (rc != 0)
965 __osc_update_grant(cli, body->oa.o_grant);
966 kfree(body);
967 return rc;
968 }
969
970 static int osc_should_shrink_grant(struct client_obd *client)
971 {
972 unsigned long time = cfs_time_current();
973 unsigned long next_shrink = client->cl_next_shrink_grant;
974
975 if ((client->cl_import->imp_connect_data.ocd_connect_flags &
976 OBD_CONNECT_GRANT_SHRINK) == 0)
977 return 0;
978
979 if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
980 /* Get the current RPC size directly, instead of going via:
981 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
982 * Keep comment here so that it can be found by searching.
983 */
984 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
985
986 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
987 client->cl_avail_grant > brw_size)
988 return 1;
989
990 osc_update_next_shrink(client);
991 }
992 return 0;
993 }
994
995 static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
996 {
997 struct client_obd *client;
998
999 list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
1000 if (osc_should_shrink_grant(client))
1001 osc_shrink_grant(client);
1002 }
1003 return 0;
1004 }
1005
1006 static int osc_add_shrink_grant(struct client_obd *client)
1007 {
1008 int rc;
1009
1010 rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1011 TIMEOUT_GRANT,
1012 osc_grant_shrink_grant_cb, NULL,
1013 &client->cl_grant_shrink_list);
1014 if (rc) {
1015 CERROR("add grant client %s error %d\n",
1016 client->cl_import->imp_obd->obd_name, rc);
1017 return rc;
1018 }
1019 CDEBUG(D_CACHE, "add grant client %s\n",
1020 client->cl_import->imp_obd->obd_name);
1021 osc_update_next_shrink(client);
1022 return 0;
1023 }
1024
1025 static int osc_del_shrink_grant(struct client_obd *client)
1026 {
1027 return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1028 TIMEOUT_GRANT);
1029 }
1030
1031 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1032 {
1033 /*
1034 * ocd_grant is the total grant amount we're expect to hold: if we've
1035 * been evicted, it's the new avail_grant amount, cl_dirty will drop
1036 * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
1037 *
1038 * race is tolerable here: if we're evicted, but imp_state already
1039 * left EVICTED state, then cl_dirty must be 0 already.
1040 */
1041 spin_lock(&cli->cl_loi_list_lock);
1042 if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1043 cli->cl_avail_grant = ocd->ocd_grant;
1044 else
1045 cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
1046
1047 if (cli->cl_avail_grant < 0) {
1048 CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
1049 cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
1050 ocd->ocd_grant, cli->cl_dirty);
1051 /* workaround for servers which do not have the patch from
1052 * LU-2679
1053 */
1054 cli->cl_avail_grant = ocd->ocd_grant;
1055 }
1056
1057 /* determine the appropriate chunk size used by osc_extent. */
1058 cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
1059 spin_unlock(&cli->cl_loi_list_lock);
1060
1061 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
1062 cli->cl_import->imp_obd->obd_name,
1063 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
1064
1065 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1066 list_empty(&cli->cl_grant_shrink_list))
1067 osc_add_shrink_grant(cli);
1068 }
1069
1070 /* We assume that the reason this OSC got a short read is because it read
1071 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1072 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1073 * this stripe never got written at or beyond this stripe offset yet.
1074 */
1075 static void handle_short_read(int nob_read, u32 page_count,
1076 struct brw_page **pga)
1077 {
1078 char *ptr;
1079 int i = 0;
1080
1081 /* skip bytes read OK */
1082 while (nob_read > 0) {
1083 LASSERT(page_count > 0);
1084
1085 if (pga[i]->count > nob_read) {
1086 /* EOF inside this page */
1087 ptr = kmap(pga[i]->pg) +
1088 (pga[i]->off & ~PAGE_MASK);
1089 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1090 kunmap(pga[i]->pg);
1091 page_count--;
1092 i++;
1093 break;
1094 }
1095
1096 nob_read -= pga[i]->count;
1097 page_count--;
1098 i++;
1099 }
1100
1101 /* zero remaining pages */
1102 while (page_count-- > 0) {
1103 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1104 memset(ptr, 0, pga[i]->count);
1105 kunmap(pga[i]->pg);
1106 i++;
1107 }
1108 }
1109
1110 static int check_write_rcs(struct ptlrpc_request *req,
1111 int requested_nob, int niocount,
1112 u32 page_count, struct brw_page **pga)
1113 {
1114 int i;
1115 __u32 *remote_rcs;
1116
1117 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1118 sizeof(*remote_rcs) *
1119 niocount);
1120 if (!remote_rcs) {
1121 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1122 return -EPROTO;
1123 }
1124
1125 /* return error if any niobuf was in error */
1126 for (i = 0; i < niocount; i++) {
1127 if ((int)remote_rcs[i] < 0)
1128 return remote_rcs[i];
1129
1130 if (remote_rcs[i] != 0) {
1131 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1132 i, remote_rcs[i], req);
1133 return -EPROTO;
1134 }
1135 }
1136
1137 if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1138 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1139 req->rq_bulk->bd_nob_transferred, requested_nob);
1140 return -EPROTO;
1141 }
1142
1143 return 0;
1144 }
1145
1146 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1147 {
1148 if (p1->flag != p2->flag) {
1149 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1150 OBD_BRW_SYNC | OBD_BRW_ASYNC |
1151 OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
1152
1153 /* warn if we try to combine flags that we don't know to be
1154 * safe to combine
1155 */
1156 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1157 CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n",
1158 p1->flag, p2->flag);
1159 }
1160 return 0;
1161 }
1162
1163 return (p1->off + p1->count == p2->off);
1164 }
1165
1166 static u32 osc_checksum_bulk(int nob, u32 pg_count,
1167 struct brw_page **pga, int opc,
1168 enum cksum_type cksum_type)
1169 {
1170 __u32 cksum;
1171 int i = 0;
1172 struct cfs_crypto_hash_desc *hdesc;
1173 unsigned int bufsize;
1174 int err;
1175 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1176
1177 LASSERT(pg_count > 0);
1178
1179 hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1180 if (IS_ERR(hdesc)) {
1181 CERROR("Unable to initialize checksum hash %s\n",
1182 cfs_crypto_hash_name(cfs_alg));
1183 return PTR_ERR(hdesc);
1184 }
1185
1186 while (nob > 0 && pg_count > 0) {
1187 int count = pga[i]->count > nob ? nob : pga[i]->count;
1188
1189 /* corrupt the data before we compute the checksum, to
1190 * simulate an OST->client data error
1191 */
1192 if (i == 0 && opc == OST_READ &&
1193 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1194 unsigned char *ptr = kmap(pga[i]->pg);
1195 int off = pga[i]->off & ~PAGE_MASK;
1196
1197 memcpy(ptr + off, "bad1", min(4, nob));
1198 kunmap(pga[i]->pg);
1199 }
1200 cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
1201 pga[i]->off & ~PAGE_MASK,
1202 count);
1203 CDEBUG(D_PAGE,
1204 "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
1205 pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
1206 (long)pga[i]->pg->flags, page_count(pga[i]->pg),
1207 page_private(pga[i]->pg),
1208 (int)(pga[i]->off & ~PAGE_MASK));
1209
1210 nob -= pga[i]->count;
1211 pg_count--;
1212 i++;
1213 }
1214
1215 bufsize = sizeof(cksum);
1216 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
1217
1218 /* For sending we only compute the wrong checksum instead
1219 * of corrupting the data so it is still correct on a redo
1220 */
1221 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1222 cksum++;
1223
1224 return cksum;
1225 }
1226
1227 static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1228 struct obdo *oa,
1229 struct lov_stripe_md *lsm, u32 page_count,
1230 struct brw_page **pga,
1231 struct ptlrpc_request **reqp,
1232 int reserve,
1233 int resend)
1234 {
1235 struct ptlrpc_request *req;
1236 struct ptlrpc_bulk_desc *desc;
1237 struct ost_body *body;
1238 struct obd_ioobj *ioobj;
1239 struct niobuf_remote *niobuf;
1240 int niocount, i, requested_nob, opc, rc;
1241 struct osc_brw_async_args *aa;
1242 struct req_capsule *pill;
1243 struct brw_page *pg_prev;
1244
1245 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1246 return -ENOMEM; /* Recoverable */
1247 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1248 return -EINVAL; /* Fatal */
1249
1250 if ((cmd & OBD_BRW_WRITE) != 0) {
1251 opc = OST_WRITE;
1252 req = ptlrpc_request_alloc_pool(cli->cl_import,
1253 osc_rq_pool,
1254 &RQF_OST_BRW_WRITE);
1255 } else {
1256 opc = OST_READ;
1257 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1258 }
1259 if (!req)
1260 return -ENOMEM;
1261
1262 for (niocount = i = 1; i < page_count; i++) {
1263 if (!can_merge_pages(pga[i - 1], pga[i]))
1264 niocount++;
1265 }
1266
1267 pill = &req->rq_pill;
1268 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1269 sizeof(*ioobj));
1270 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1271 niocount * sizeof(*niobuf));
1272
1273 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1274 if (rc) {
1275 ptlrpc_request_free(req);
1276 return rc;
1277 }
1278 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1279 ptlrpc_at_set_req_timeout(req);
1280 /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1281 * retry logic
1282 */
1283 req->rq_no_retry_einprogress = 1;
1284
1285 desc = ptlrpc_prep_bulk_imp(req, page_count,
1286 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1287 opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
1288 OST_BULK_PORTAL);
1289
1290 if (!desc) {
1291 rc = -ENOMEM;
1292 goto out;
1293 }
1294 /* NB request now owns desc and will free it when it gets freed */
1295
1296 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1297 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1298 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1299 LASSERT(body && ioobj && niobuf);
1300
1301 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1302
1303 obdo_to_ioobj(oa, ioobj);
1304 ioobj->ioo_bufcnt = niocount;
1305 /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1306 * that might be send for this request. The actual number is decided
1307 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1308 * "max - 1" for old client compatibility sending "0", and also so the
1309 * the actual maximum is a power-of-two number, not one less. LU-1431
1310 */
1311 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1312 LASSERT(page_count > 0);
1313 pg_prev = pga[0];
1314 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1315 struct brw_page *pg = pga[i];
1316 int poff = pg->off & ~PAGE_MASK;
1317
1318 LASSERT(pg->count > 0);
1319 /* make sure there is no gap in the middle of page array */
1320 LASSERTF(page_count == 1 ||
1321 (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1322 ergo(i > 0 && i < page_count - 1,
1323 poff == 0 && pg->count == PAGE_SIZE) &&
1324 ergo(i == page_count - 1, poff == 0)),
1325 "i: %d/%d pg: %p off: %llu, count: %u\n",
1326 i, page_count, pg, pg->off, pg->count);
1327 LASSERTF(i == 0 || pg->off > pg_prev->off,
1328 "i %d p_c %u pg %p [pri %lu ind %lu] off %llu prev_pg %p [pri %lu ind %lu] off %llu\n",
1329 i, page_count,
1330 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1331 pg_prev->pg, page_private(pg_prev->pg),
1332 pg_prev->pg->index, pg_prev->off);
1333 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1334 (pg->flag & OBD_BRW_SRVLOCK));
1335
1336 ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
1337 requested_nob += pg->count;
1338
1339 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1340 niobuf--;
1341 niobuf->len += pg->count;
1342 } else {
1343 niobuf->offset = pg->off;
1344 niobuf->len = pg->count;
1345 niobuf->flags = pg->flag;
1346 }
1347 pg_prev = pg;
1348 }
1349
1350 LASSERTF((void *)(niobuf - niocount) ==
1351 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1352 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1353 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1354
1355 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1356 if (resend) {
1357 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1358 body->oa.o_valid |= OBD_MD_FLFLAGS;
1359 body->oa.o_flags = 0;
1360 }
1361 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1362 }
1363
1364 if (osc_should_shrink_grant(cli))
1365 osc_shrink_grant_local(cli, &body->oa);
1366
1367 /* size[REQ_REC_OFF] still sizeof (*body) */
1368 if (opc == OST_WRITE) {
1369 if (cli->cl_checksum &&
1370 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1371 /* store cl_cksum_type in a local variable since
1372 * it can be changed via lprocfs
1373 */
1374 enum cksum_type cksum_type = cli->cl_cksum_type;
1375
1376 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1377 oa->o_flags &= OBD_FL_LOCAL_MASK;
1378 body->oa.o_flags = 0;
1379 }
1380 body->oa.o_flags |= cksum_type_pack(cksum_type);
1381 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1382 body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1383 page_count, pga,
1384 OST_WRITE,
1385 cksum_type);
1386 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1387 body->oa.o_cksum);
1388 /* save this in 'oa', too, for later checking */
1389 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1390 oa->o_flags |= cksum_type_pack(cksum_type);
1391 } else {
1392 /* clear out the checksum flag, in case this is a
1393 * resend but cl_checksum is no longer set. b=11238
1394 */
1395 oa->o_valid &= ~OBD_MD_FLCKSUM;
1396 }
1397 oa->o_cksum = body->oa.o_cksum;
1398 /* 1 RC per niobuf */
1399 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1400 sizeof(__u32) * niocount);
1401 } else {
1402 if (cli->cl_checksum &&
1403 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1404 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1405 body->oa.o_flags = 0;
1406 body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1407 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1408 }
1409 }
1410 ptlrpc_request_set_replen(req);
1411
1412 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1413 aa = ptlrpc_req_async_args(req);
1414 aa->aa_oa = oa;
1415 aa->aa_requested_nob = requested_nob;
1416 aa->aa_nio_count = niocount;
1417 aa->aa_page_count = page_count;
1418 aa->aa_resends = 0;
1419 aa->aa_ppga = pga;
1420 aa->aa_cli = cli;
1421 INIT_LIST_HEAD(&aa->aa_oaps);
1422
1423 *reqp = req;
1424 return 0;
1425
1426 out:
1427 ptlrpc_req_finished(req);
1428 return rc;
1429 }
1430
1431 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1432 __u32 client_cksum, __u32 server_cksum, int nob,
1433 u32 page_count, struct brw_page **pga,
1434 enum cksum_type client_cksum_type)
1435 {
1436 __u32 new_cksum;
1437 char *msg;
1438 enum cksum_type cksum_type;
1439
1440 if (server_cksum == client_cksum) {
1441 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1442 return 0;
1443 }
1444
1445 cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1446 oa->o_flags : 0);
1447 new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1448 cksum_type);
1449
1450 if (cksum_type != client_cksum_type)
1451 msg = "the server did not use the checksum type specified in the original request - likely a protocol problem"
1452 ;
1453 else if (new_cksum == server_cksum)
1454 msg = "changed on the client after we checksummed it - likely false positive due to mmap IO (bug 11742)"
1455 ;
1456 else if (new_cksum == client_cksum)
1457 msg = "changed in transit before arrival at OST";
1458 else
1459 msg = "changed in transit AND doesn't match the original - likely false positive due to mmap IO (bug 11742)"
1460 ;
1461
1462 LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
1463 " object "DOSTID" extent [%llu-%llu]\n",
1464 msg, libcfs_nid2str(peer->nid),
1465 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1466 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1467 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1468 POSTID(&oa->o_oi), pga[0]->off,
1469 pga[page_count-1]->off + pga[page_count-1]->count - 1);
1470 CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n",
1471 client_cksum, client_cksum_type,
1472 server_cksum, cksum_type, new_cksum);
1473 return 1;
1474 }
1475
1476 /* Note rc enters this function as number of bytes transferred */
1477 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1478 {
1479 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1480 const lnet_process_id_t *peer =
1481 &req->rq_import->imp_connection->c_peer;
1482 struct client_obd *cli = aa->aa_cli;
1483 struct ost_body *body;
1484 __u32 client_cksum = 0;
1485
1486 if (rc < 0 && rc != -EDQUOT) {
1487 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1488 return rc;
1489 }
1490
1491 LASSERTF(req->rq_repmsg, "rc = %d\n", rc);
1492 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1493 if (!body) {
1494 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1495 return -EPROTO;
1496 }
1497
1498 /* set/clear over quota flag for a uid/gid */
1499 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1500 body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1501 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1502
1503 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid %#llx, flags %x\n",
1504 body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1505 body->oa.o_flags);
1506 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
1507 }
1508
1509 osc_update_grant(cli, body);
1510
1511 if (rc < 0)
1512 return rc;
1513
1514 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1515 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1516
1517 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1518 if (rc > 0) {
1519 CERROR("Unexpected +ve rc %d\n", rc);
1520 return -EPROTO;
1521 }
1522 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1523
1524 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1525 return -EAGAIN;
1526
1527 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1528 check_write_checksum(&body->oa, peer, client_cksum,
1529 body->oa.o_cksum, aa->aa_requested_nob,
1530 aa->aa_page_count, aa->aa_ppga,
1531 cksum_type_unpack(aa->aa_oa->o_flags)))
1532 return -EAGAIN;
1533
1534 rc = check_write_rcs(req, aa->aa_requested_nob,
1535 aa->aa_nio_count,
1536 aa->aa_page_count, aa->aa_ppga);
1537 goto out;
1538 }
1539
1540 /* The rest of this function executes only for OST_READs */
1541
1542 /* if unwrap_bulk failed, return -EAGAIN to retry */
1543 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1544 if (rc < 0) {
1545 rc = -EAGAIN;
1546 goto out;
1547 }
1548
1549 if (rc > aa->aa_requested_nob) {
1550 CERROR("Unexpected rc %d (%d requested)\n", rc,
1551 aa->aa_requested_nob);
1552 return -EPROTO;
1553 }
1554
1555 if (rc != req->rq_bulk->bd_nob_transferred) {
1556 CERROR("Unexpected rc %d (%d transferred)\n",
1557 rc, req->rq_bulk->bd_nob_transferred);
1558 return -EPROTO;
1559 }
1560
1561 if (rc < aa->aa_requested_nob)
1562 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1563
1564 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1565 static int cksum_counter;
1566 __u32 server_cksum = body->oa.o_cksum;
1567 char *via = "";
1568 char *router = "";
1569 enum cksum_type cksum_type;
1570
1571 cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ?
1572 body->oa.o_flags : 0);
1573 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1574 aa->aa_ppga, OST_READ,
1575 cksum_type);
1576
1577 if (peer->nid != req->rq_bulk->bd_sender) {
1578 via = " via ";
1579 router = libcfs_nid2str(req->rq_bulk->bd_sender);
1580 }
1581
1582 if (server_cksum != client_cksum) {
1583 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from %s%s%s inode " DFID " object " DOSTID " extent [%llu-%llu]\n",
1584 req->rq_import->imp_obd->obd_name,
1585 libcfs_nid2str(peer->nid),
1586 via, router,
1587 body->oa.o_valid & OBD_MD_FLFID ?
1588 body->oa.o_parent_seq : (__u64)0,
1589 body->oa.o_valid & OBD_MD_FLFID ?
1590 body->oa.o_parent_oid : 0,
1591 body->oa.o_valid & OBD_MD_FLFID ?
1592 body->oa.o_parent_ver : 0,
1593 POSTID(&body->oa.o_oi),
1594 aa->aa_ppga[0]->off,
1595 aa->aa_ppga[aa->aa_page_count-1]->off +
1596 aa->aa_ppga[aa->aa_page_count-1]->count -
1597 1);
1598 CERROR("client %x, server %x, cksum_type %x\n",
1599 client_cksum, server_cksum, cksum_type);
1600 cksum_counter = 0;
1601 aa->aa_oa->o_cksum = client_cksum;
1602 rc = -EAGAIN;
1603 } else {
1604 cksum_counter++;
1605 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1606 rc = 0;
1607 }
1608 } else if (unlikely(client_cksum)) {
1609 static int cksum_missed;
1610
1611 cksum_missed++;
1612 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1613 CERROR("Checksum %u requested from %s but not sent\n",
1614 cksum_missed, libcfs_nid2str(peer->nid));
1615 } else {
1616 rc = 0;
1617 }
1618 out:
1619 if (rc >= 0)
1620 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
1621 aa->aa_oa, &body->oa);
1622
1623 return rc;
1624 }
1625
1626 static int osc_brw_redo_request(struct ptlrpc_request *request,
1627 struct osc_brw_async_args *aa, int rc)
1628 {
1629 struct ptlrpc_request *new_req;
1630 struct osc_brw_async_args *new_aa;
1631 struct osc_async_page *oap;
1632
1633 DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
1634 "redo for recoverable error %d", rc);
1635
1636 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1637 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
1638 aa->aa_cli, aa->aa_oa,
1639 NULL /* lsm unused by osc currently */,
1640 aa->aa_page_count, aa->aa_ppga,
1641 &new_req, 0, 1);
1642 if (rc)
1643 return rc;
1644
1645 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1646 if (oap->oap_request) {
1647 LASSERTF(request == oap->oap_request,
1648 "request %p != oap_request %p\n",
1649 request, oap->oap_request);
1650 if (oap->oap_interrupted) {
1651 ptlrpc_req_finished(new_req);
1652 return -EINTR;
1653 }
1654 }
1655 }
1656 /* New request takes over pga and oaps from old request.
1657 * Note that copying a list_head doesn't work, need to move it...
1658 */
1659 aa->aa_resends++;
1660 new_req->rq_interpret_reply = request->rq_interpret_reply;
1661 new_req->rq_async_args = request->rq_async_args;
1662 new_req->rq_commit_cb = request->rq_commit_cb;
1663 /* cap resend delay to the current request timeout, this is similar to
1664 * what ptlrpc does (see after_reply())
1665 */
1666 if (aa->aa_resends > new_req->rq_timeout)
1667 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
1668 else
1669 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
1670 new_req->rq_generation_set = 1;
1671 new_req->rq_import_generation = request->rq_import_generation;
1672
1673 new_aa = ptlrpc_req_async_args(new_req);
1674
1675 INIT_LIST_HEAD(&new_aa->aa_oaps);
1676 list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
1677 INIT_LIST_HEAD(&new_aa->aa_exts);
1678 list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
1679 new_aa->aa_resends = aa->aa_resends;
1680
1681 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1682 if (oap->oap_request) {
1683 ptlrpc_req_finished(oap->oap_request);
1684 oap->oap_request = ptlrpc_request_addref(new_req);
1685 }
1686 }
1687
1688 /* XXX: This code will run into problem if we're going to support
1689 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
1690 * and wait for all of them to be finished. We should inherit request
1691 * set from old request.
1692 */
1693 ptlrpcd_add_req(new_req);
1694
1695 DEBUG_REQ(D_INFO, new_req, "new request");
1696 return 0;
1697 }
1698
1699 /*
1700 * ugh, we want disk allocation on the target to happen in offset order. we'll
1701 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1702 * fine for our small page arrays and doesn't require allocation. its an
1703 * insertion sort that swaps elements that are strides apart, shrinking the
1704 * stride down until its '1' and the array is sorted.
1705 */
1706 static void sort_brw_pages(struct brw_page **array, int num)
1707 {
1708 int stride, i, j;
1709 struct brw_page *tmp;
1710
1711 if (num == 1)
1712 return;
1713 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1714 ;
1715
1716 do {
1717 stride /= 3;
1718 for (i = stride ; i < num ; i++) {
1719 tmp = array[i];
1720 j = i;
1721 while (j >= stride && array[j - stride]->off > tmp->off) {
1722 array[j] = array[j - stride];
1723 j -= stride;
1724 }
1725 array[j] = tmp;
1726 }
1727 } while (stride > 1);
1728 }
1729
1730 static void osc_release_ppga(struct brw_page **ppga, u32 count)
1731 {
1732 LASSERT(ppga);
1733 kfree(ppga);
1734 }
1735
1736 static int brw_interpret(const struct lu_env *env,
1737 struct ptlrpc_request *req, void *data, int rc)
1738 {
1739 struct osc_brw_async_args *aa = data;
1740 struct osc_extent *ext;
1741 struct osc_extent *tmp;
1742 struct client_obd *cli = aa->aa_cli;
1743
1744 rc = osc_brw_fini_request(req, rc);
1745 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
1746 /* When server return -EINPROGRESS, client should always retry
1747 * regardless of the number of times the bulk was resent already.
1748 */
1749 if (osc_recoverable_error(rc)) {
1750 if (req->rq_import_generation !=
1751 req->rq_import->imp_generation) {
1752 CDEBUG(D_HA, "%s: resend cross eviction for object: " DOSTID ", rc = %d.\n",
1753 req->rq_import->imp_obd->obd_name,
1754 POSTID(&aa->aa_oa->o_oi), rc);
1755 } else if (rc == -EINPROGRESS ||
1756 client_should_resend(aa->aa_resends, aa->aa_cli)) {
1757 rc = osc_brw_redo_request(req, aa, rc);
1758 } else {
1759 CERROR("%s: too many resent retries for object: %llu:%llu, rc = %d.\n",
1760 req->rq_import->imp_obd->obd_name,
1761 POSTID(&aa->aa_oa->o_oi), rc);
1762 }
1763
1764 if (rc == 0)
1765 return 0;
1766 else if (rc == -EAGAIN || rc == -EINPROGRESS)
1767 rc = -EIO;
1768 }
1769
1770 if (rc == 0) {
1771 struct obdo *oa = aa->aa_oa;
1772 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1773 unsigned long valid = 0;
1774 struct cl_object *obj;
1775 struct osc_async_page *last;
1776
1777 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
1778 obj = osc2cl(last->oap_obj);
1779
1780 cl_object_attr_lock(obj);
1781 if (oa->o_valid & OBD_MD_FLBLOCKS) {
1782 attr->cat_blocks = oa->o_blocks;
1783 valid |= CAT_BLOCKS;
1784 }
1785 if (oa->o_valid & OBD_MD_FLMTIME) {
1786 attr->cat_mtime = oa->o_mtime;
1787 valid |= CAT_MTIME;
1788 }
1789 if (oa->o_valid & OBD_MD_FLATIME) {
1790 attr->cat_atime = oa->o_atime;
1791 valid |= CAT_ATIME;
1792 }
1793 if (oa->o_valid & OBD_MD_FLCTIME) {
1794 attr->cat_ctime = oa->o_ctime;
1795 valid |= CAT_CTIME;
1796 }
1797
1798 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1799 struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
1800 loff_t last_off = last->oap_count + last->oap_obj_off;
1801
1802 /* Change file size if this is an out of quota or
1803 * direct IO write and it extends the file size
1804 */
1805 if (loi->loi_lvb.lvb_size < last_off) {
1806 attr->cat_size = last_off;
1807 valid |= CAT_SIZE;
1808 }
1809 /* Extend KMS if it's not a lockless write */
1810 if (loi->loi_kms < last_off &&
1811 oap2osc_page(last)->ops_srvlock == 0) {
1812 attr->cat_kms = last_off;
1813 valid |= CAT_KMS;
1814 }
1815 }
1816
1817 if (valid != 0)
1818 cl_object_attr_set(env, obj, attr, valid);
1819 cl_object_attr_unlock(obj);
1820 }
1821 kmem_cache_free(obdo_cachep, aa->aa_oa);
1822
1823 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
1824 list_del_init(&ext->oe_link);
1825 osc_extent_finish(env, ext, 1, rc);
1826 }
1827 LASSERT(list_empty(&aa->aa_exts));
1828 LASSERT(list_empty(&aa->aa_oaps));
1829
1830 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
1831 req->rq_bulk->bd_nob_transferred);
1832 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
1833 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
1834
1835 spin_lock(&cli->cl_loi_list_lock);
1836 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1837 * is called so we know whether to go to sync BRWs or wait for more
1838 * RPCs to complete
1839 */
1840 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
1841 cli->cl_w_in_flight--;
1842 else
1843 cli->cl_r_in_flight--;
1844 osc_wake_cache_waiters(cli);
1845 spin_unlock(&cli->cl_loi_list_lock);
1846
1847 osc_io_unplug(env, cli, NULL);
1848 return rc;
1849 }
1850
1851 static void brw_commit(struct ptlrpc_request *req)
1852 {
1853 spin_lock(&req->rq_lock);
1854 /*
1855 * If osc_inc_unstable_pages (via osc_extent_finish) races with
1856 * this called via the rq_commit_cb, I need to ensure
1857 * osc_dec_unstable_pages is still called. Otherwise unstable
1858 * pages may be leaked.
1859 */
1860 if (req->rq_unstable) {
1861 spin_unlock(&req->rq_lock);
1862 osc_dec_unstable_pages(req);
1863 spin_lock(&req->rq_lock);
1864 } else {
1865 req->rq_committed = 1;
1866 }
1867 spin_unlock(&req->rq_lock);
1868 }
1869
1870 /**
1871 * Build an RPC by the list of extent @ext_list. The caller must ensure
1872 * that the total pages in this list are NOT over max pages per RPC.
1873 * Extents in the list must be in OES_RPC state.
1874 */
1875 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1876 struct list_head *ext_list, int cmd)
1877 {
1878 struct ptlrpc_request *req = NULL;
1879 struct osc_extent *ext;
1880 struct brw_page **pga = NULL;
1881 struct osc_brw_async_args *aa = NULL;
1882 struct obdo *oa = NULL;
1883 struct osc_async_page *oap;
1884 struct osc_async_page *tmp;
1885 struct cl_req *clerq = NULL;
1886 enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
1887 struct ldlm_lock *lock = NULL;
1888 struct cl_req_attr *crattr = NULL;
1889 u64 starting_offset = OBD_OBJECT_EOF;
1890 u64 ending_offset = 0;
1891 int mpflag = 0;
1892 int mem_tight = 0;
1893 int page_count = 0;
1894 int i;
1895 int rc;
1896 struct ost_body *body;
1897 LIST_HEAD(rpc_list);
1898
1899 LASSERT(!list_empty(ext_list));
1900
1901 /* add pages into rpc_list to build BRW rpc */
1902 list_for_each_entry(ext, ext_list, oe_link) {
1903 LASSERT(ext->oe_state == OES_RPC);
1904 mem_tight |= ext->oe_memalloc;
1905 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1906 ++page_count;
1907 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1908 if (starting_offset > oap->oap_obj_off)
1909 starting_offset = oap->oap_obj_off;
1910 else
1911 LASSERT(oap->oap_page_off == 0);
1912 if (ending_offset < oap->oap_obj_off + oap->oap_count)
1913 ending_offset = oap->oap_obj_off +
1914 oap->oap_count;
1915 else
1916 LASSERT(oap->oap_page_off + oap->oap_count ==
1917 PAGE_SIZE);
1918 }
1919 }
1920
1921 if (mem_tight)
1922 mpflag = cfs_memory_pressure_get_and_set();
1923
1924 crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
1925 if (!crattr) {
1926 rc = -ENOMEM;
1927 goto out;
1928 }
1929
1930 pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
1931 if (!pga) {
1932 rc = -ENOMEM;
1933 goto out;
1934 }
1935
1936 oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
1937 if (!oa) {
1938 rc = -ENOMEM;
1939 goto out;
1940 }
1941
1942 i = 0;
1943 list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
1944 struct cl_page *page = oap2cl_page(oap);
1945
1946 if (!clerq) {
1947 clerq = cl_req_alloc(env, page, crt,
1948 1 /* only 1-object rpcs for now */);
1949 if (IS_ERR(clerq)) {
1950 rc = PTR_ERR(clerq);
1951 goto out;
1952 }
1953 lock = oap->oap_ldlm_lock;
1954 }
1955 if (mem_tight)
1956 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
1957 pga[i] = &oap->oap_brw_page;
1958 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
1959 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1960 pga[i]->pg, oap->oap_page->index, oap,
1961 pga[i]->flag);
1962 i++;
1963 cl_req_page_add(env, clerq, page);
1964 }
1965
1966 /* always get the data for the obdo for the rpc */
1967 LASSERT(clerq);
1968 crattr->cra_oa = oa;
1969 cl_req_attr_set(env, clerq, crattr, ~0ULL);
1970 if (lock) {
1971 oa->o_handle = lock->l_remote_handle;
1972 oa->o_valid |= OBD_MD_FLHANDLE;
1973 }
1974
1975 rc = cl_req_prep(env, clerq);
1976 if (rc != 0) {
1977 CERROR("cl_req_prep failed: %d\n", rc);
1978 goto out;
1979 }
1980
1981 sort_brw_pages(pga, page_count);
1982 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
1983 pga, &req, 1, 0);
1984 if (rc != 0) {
1985 CERROR("prep_req failed: %d\n", rc);
1986 goto out;
1987 }
1988
1989 req->rq_commit_cb = brw_commit;
1990 req->rq_interpret_reply = brw_interpret;
1991
1992 if (mem_tight != 0)
1993 req->rq_memalloc = 1;
1994
1995 /* Need to update the timestamps after the request is built in case
1996 * we race with setattr (locally or in queue at OST). If OST gets
1997 * later setattr before earlier BRW (as determined by the request xid),
1998 * the OST will not use BRW timestamps. Sadly, there is no obvious
1999 * way to do this in a single call. bug 10150
2000 */
2001 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2002 crattr->cra_oa = &body->oa;
2003 cl_req_attr_set(env, clerq, crattr,
2004 OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
2005
2006 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2007
2008 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2009 aa = ptlrpc_req_async_args(req);
2010 INIT_LIST_HEAD(&aa->aa_oaps);
2011 list_splice_init(&rpc_list, &aa->aa_oaps);
2012 INIT_LIST_HEAD(&aa->aa_exts);
2013 list_splice_init(ext_list, &aa->aa_exts);
2014 aa->aa_clerq = clerq;
2015
2016 /* queued sync pages can be torn down while the pages
2017 * were between the pending list and the rpc
2018 */
2019 tmp = NULL;
2020 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2021 /* only one oap gets a request reference */
2022 if (!tmp)
2023 tmp = oap;
2024 if (oap->oap_interrupted && !req->rq_intr) {
2025 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2026 oap, req);
2027 ptlrpc_mark_interrupted(req);
2028 }
2029 }
2030 if (tmp)
2031 tmp->oap_request = ptlrpc_request_addref(req);
2032
2033 spin_lock(&cli->cl_loi_list_lock);
2034 starting_offset >>= PAGE_SHIFT;
2035 if (cmd == OBD_BRW_READ) {
2036 cli->cl_r_in_flight++;
2037 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2038 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2039 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2040 starting_offset + 1);
2041 } else {
2042 cli->cl_w_in_flight++;
2043 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2044 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2045 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2046 starting_offset + 1);
2047 }
2048 spin_unlock(&cli->cl_loi_list_lock);
2049
2050 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2051 page_count, aa, cli->cl_r_in_flight,
2052 cli->cl_w_in_flight);
2053
2054 ptlrpcd_add_req(req);
2055 rc = 0;
2056
2057 out:
2058 if (mem_tight != 0)
2059 cfs_memory_pressure_restore(mpflag);
2060
2061 kfree(crattr);
2062
2063 if (rc != 0) {
2064 LASSERT(!req);
2065
2066 if (oa)
2067 kmem_cache_free(obdo_cachep, oa);
2068 kfree(pga);
2069 /* this should happen rarely and is pretty bad, it makes the
2070 * pending list not follow the dirty order
2071 */
2072 while (!list_empty(ext_list)) {
2073 ext = list_entry(ext_list->next, struct osc_extent,
2074 oe_link);
2075 list_del_init(&ext->oe_link);
2076 osc_extent_finish(env, ext, 0, rc);
2077 }
2078 if (clerq && !IS_ERR(clerq))
2079 cl_req_completion(env, clerq, rc);
2080 }
2081 return rc;
2082 }
2083
2084 static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
2085 struct ldlm_enqueue_info *einfo)
2086 {
2087 void *data = einfo->ei_cbdata;
2088 int set = 0;
2089
2090 LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
2091 LASSERT(lock->l_resource->lr_type == einfo->ei_type);
2092 LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
2093 LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
2094
2095 lock_res_and_lock(lock);
2096
2097 if (!lock->l_ast_data)
2098 lock->l_ast_data = data;
2099 if (lock->l_ast_data == data)
2100 set = 1;
2101
2102 unlock_res_and_lock(lock);
2103
2104 return set;
2105 }
2106
2107 static int osc_set_data_with_check(struct lustre_handle *lockh,
2108 struct ldlm_enqueue_info *einfo)
2109 {
2110 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2111 int set = 0;
2112
2113 if (lock) {
2114 set = osc_set_lock_data_with_check(lock, einfo);
2115 LDLM_LOCK_PUT(lock);
2116 } else
2117 CERROR("lockh %p, data %p - client evicted?\n",
2118 lockh, einfo->ei_cbdata);
2119 return set;
2120 }
2121
2122 /* find any ldlm lock of the inode in osc
2123 * return 0 not find
2124 * 1 find one
2125 * < 0 error
2126 */
2127 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2128 ldlm_iterator_t replace, void *data)
2129 {
2130 struct ldlm_res_id res_id;
2131 struct obd_device *obd = class_exp2obd(exp);
2132 int rc = 0;
2133
2134 ostid_build_res_name(&lsm->lsm_oi, &res_id);
2135 rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
2136 if (rc == LDLM_ITER_STOP)
2137 return 1;
2138 if (rc == LDLM_ITER_CONTINUE)
2139 return 0;
2140 return rc;
2141 }
2142
2143 static int osc_enqueue_fini(struct ptlrpc_request *req,
2144 osc_enqueue_upcall_f upcall, void *cookie,
2145 struct lustre_handle *lockh, enum ldlm_mode mode,
2146 __u64 *flags, int agl, int errcode)
2147 {
2148 bool intent = *flags & LDLM_FL_HAS_INTENT;
2149 int rc;
2150
2151 /* The request was created before ldlm_cli_enqueue call. */
2152 if (intent && errcode == ELDLM_LOCK_ABORTED) {
2153 struct ldlm_reply *rep;
2154
2155 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2156
2157 rep->lock_policy_res1 =
2158 ptlrpc_status_ntoh(rep->lock_policy_res1);
2159 if (rep->lock_policy_res1)
2160 errcode = rep->lock_policy_res1;
2161 if (!agl)
2162 *flags |= LDLM_FL_LVB_READY;
2163 } else if (errcode == ELDLM_OK) {
2164 *flags |= LDLM_FL_LVB_READY;
2165 }
2166
2167 /* Call the update callback. */
2168 rc = (*upcall)(cookie, lockh, errcode);
2169 /* release the reference taken in ldlm_cli_enqueue() */
2170 if (errcode == ELDLM_LOCK_MATCHED)
2171 errcode = ELDLM_OK;
2172 if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2173 ldlm_lock_decref(lockh, mode);
2174
2175 return rc;
2176 }
2177
2178 static int osc_enqueue_interpret(const struct lu_env *env,
2179 struct ptlrpc_request *req,
2180 struct osc_enqueue_args *aa, int rc)
2181 {
2182 struct ldlm_lock *lock;
2183 struct lustre_handle *lockh = &aa->oa_lockh;
2184 enum ldlm_mode mode = aa->oa_mode;
2185 struct ost_lvb *lvb = aa->oa_lvb;
2186 __u32 lvb_len = sizeof(*lvb);
2187 __u64 flags = 0;
2188
2189
2190 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2191 * be valid.
2192 */
2193 lock = ldlm_handle2lock(lockh);
2194 LASSERTF(lock, "lockh %llx, req %p, aa %p - client evicted?\n",
2195 lockh->cookie, req, aa);
2196
2197 /* Take an additional reference so that a blocking AST that
2198 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2199 * to arrive after an upcall has been executed by
2200 * osc_enqueue_fini().
2201 */
2202 ldlm_lock_addref(lockh, mode);
2203
2204 /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2205 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2206
2207 /* Let CP AST to grant the lock first. */
2208 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2209
2210 if (aa->oa_agl) {
2211 LASSERT(!aa->oa_lvb);
2212 LASSERT(!aa->oa_flags);
2213 aa->oa_flags = &flags;
2214 }
2215
2216 /* Complete obtaining the lock procedure. */
2217 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
2218 aa->oa_mode, aa->oa_flags, lvb, lvb_len,
2219 lockh, rc);
2220 /* Complete osc stuff. */
2221 rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2222 aa->oa_flags, aa->oa_agl, rc);
2223
2224 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2225
2226 ldlm_lock_decref(lockh, mode);
2227 LDLM_LOCK_PUT(lock);
2228 return rc;
2229 }
2230
2231 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
2232
2233 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2234 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2235 * other synchronous requests, however keeping some locks and trying to obtain
2236 * others may take a considerable amount of time in a case of ost failure; and
2237 * when other sync requests do not get released lock from a client, the client
2238 * is evicted from the cluster -- such scenaries make the life difficult, so
2239 * release locks just after they are obtained.
2240 */
2241 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2242 __u64 *flags, ldlm_policy_data_t *policy,
2243 struct ost_lvb *lvb, int kms_valid,
2244 osc_enqueue_upcall_f upcall, void *cookie,
2245 struct ldlm_enqueue_info *einfo,
2246 struct ptlrpc_request_set *rqset, int async, int agl)
2247 {
2248 struct obd_device *obd = exp->exp_obd;
2249 struct lustre_handle lockh = { 0 };
2250 struct ptlrpc_request *req = NULL;
2251 int intent = *flags & LDLM_FL_HAS_INTENT;
2252 __u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
2253 enum ldlm_mode mode;
2254 int rc;
2255
2256 /* Filesystem lock extents are extended to page boundaries so that
2257 * dealing with the page cache is a little smoother.
2258 */
2259 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2260 policy->l_extent.end |= ~PAGE_MASK;
2261
2262 /*
2263 * kms is not valid when either object is completely fresh (so that no
2264 * locks are cached), or object was evicted. In the latter case cached
2265 * lock cannot be used, because it would prime inode state with
2266 * potentially stale LVB.
2267 */
2268 if (!kms_valid)
2269 goto no_match;
2270
2271 /* Next, search for already existing extent locks that will cover us */
2272 /* If we're trying to read, we also search for an existing PW lock. The
2273 * VFS and page cache already protect us locally, so lots of readers/
2274 * writers can share a single PW lock.
2275 *
2276 * There are problems with conversion deadlocks, so instead of
2277 * converting a read lock to a write lock, we'll just enqueue a new
2278 * one.
2279 *
2280 * At some point we should cancel the read lock instead of making them
2281 * send us a blocking callback, but there are problems with canceling
2282 * locks out from other users right now, too.
2283 */
2284 mode = einfo->ei_mode;
2285 if (einfo->ei_mode == LCK_PR)
2286 mode |= LCK_PW;
2287 mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
2288 einfo->ei_type, policy, mode, &lockh, 0);
2289 if (mode) {
2290 struct ldlm_lock *matched;
2291
2292 if (*flags & LDLM_FL_TEST_LOCK)
2293 return ELDLM_OK;
2294
2295 matched = ldlm_handle2lock(&lockh);
2296 if (agl) {
2297 /* AGL enqueues DLM locks speculatively. Therefore if
2298 * it already exists a DLM lock, it wll just inform the
2299 * caller to cancel the AGL process for this stripe.
2300 */
2301 ldlm_lock_decref(&lockh, mode);
2302 LDLM_LOCK_PUT(matched);
2303 return -ECANCELED;
2304 } else if (osc_set_lock_data_with_check(matched, einfo)) {
2305 *flags |= LDLM_FL_LVB_READY;
2306 /* We already have a lock, and it's referenced. */
2307 (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2308
2309 ldlm_lock_decref(&lockh, mode);
2310 LDLM_LOCK_PUT(matched);
2311 return ELDLM_OK;
2312 } else {
2313 ldlm_lock_decref(&lockh, mode);
2314 LDLM_LOCK_PUT(matched);
2315 }
2316 }
2317
2318 no_match:
2319 if (*flags & LDLM_FL_TEST_LOCK)
2320 return -ENOLCK;
2321 if (intent) {
2322 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2323 &RQF_LDLM_ENQUEUE_LVB);
2324 if (!req)
2325 return -ENOMEM;
2326
2327 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
2328 if (rc) {
2329 ptlrpc_request_free(req);
2330 return rc;
2331 }
2332
2333 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
2334 sizeof(*lvb));
2335 ptlrpc_request_set_replen(req);
2336 }
2337
2338 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2339 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2340
2341 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2342 sizeof(*lvb), LVB_T_OST, &lockh, async);
2343 if (async) {
2344 if (!rc) {
2345 struct osc_enqueue_args *aa;
2346
2347 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2348 aa = ptlrpc_req_async_args(req);
2349 aa->oa_exp = exp;
2350 aa->oa_mode = einfo->ei_mode;
2351 aa->oa_type = einfo->ei_type;
2352 lustre_handle_copy(&aa->oa_lockh, &lockh);
2353 aa->oa_upcall = upcall;
2354 aa->oa_cookie = cookie;
2355 aa->oa_agl = !!agl;
2356 if (!agl) {
2357 aa->oa_flags = flags;
2358 aa->oa_lvb = lvb;
2359 } else {
2360 /* AGL is essentially to enqueue an DLM lock
2361 * in advance, so we don't care about the
2362 * result of AGL enqueue.
2363 */
2364 aa->oa_lvb = NULL;
2365 aa->oa_flags = NULL;
2366 }
2367
2368 req->rq_interpret_reply =
2369 (ptlrpc_interpterer_t)osc_enqueue_interpret;
2370 if (rqset == PTLRPCD_SET)
2371 ptlrpcd_add_req(req);
2372 else
2373 ptlrpc_set_add_req(rqset, req);
2374 } else if (intent) {
2375 ptlrpc_req_finished(req);
2376 }
2377 return rc;
2378 }
2379
2380 rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2381 flags, agl, rc);
2382 if (intent)
2383 ptlrpc_req_finished(req);
2384
2385 return rc;
2386 }
2387
2388 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2389 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2390 __u64 *flags, void *data, struct lustre_handle *lockh,
2391 int unref)
2392 {
2393 struct obd_device *obd = exp->exp_obd;
2394 __u64 lflags = *flags;
2395 enum ldlm_mode rc;
2396
2397 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2398 return -EIO;
2399
2400 /* Filesystem lock extents are extended to page boundaries so that
2401 * dealing with the page cache is a little smoother
2402 */
2403 policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2404 policy->l_extent.end |= ~PAGE_MASK;
2405
2406 /* Next, search for already existing extent locks that will cover us */
2407 /* If we're trying to read, we also search for an existing PW lock. The
2408 * VFS and page cache already protect us locally, so lots of readers/
2409 * writers can share a single PW lock.
2410 */
2411 rc = mode;
2412 if (mode == LCK_PR)
2413 rc |= LCK_PW;
2414 rc = ldlm_lock_match(obd->obd_namespace, lflags,
2415 res_id, type, policy, rc, lockh, unref);
2416 if (rc) {
2417 if (data) {
2418 if (!osc_set_data_with_check(lockh, data)) {
2419 if (!(lflags & LDLM_FL_TEST_LOCK))
2420 ldlm_lock_decref(lockh, rc);
2421 return 0;
2422 }
2423 }
2424 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
2425 ldlm_lock_addref(lockh, LCK_PR);
2426 ldlm_lock_decref(lockh, LCK_PW);
2427 }
2428 return rc;
2429 }
2430 return rc;
2431 }
2432
2433 int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
2434 {
2435 if (unlikely(mode == LCK_GROUP))
2436 ldlm_lock_decref_and_cancel(lockh, mode);
2437 else
2438 ldlm_lock_decref(lockh, mode);
2439
2440 return 0;
2441 }
2442
2443 static int osc_statfs_interpret(const struct lu_env *env,
2444 struct ptlrpc_request *req,
2445 struct osc_async_args *aa, int rc)
2446 {
2447 struct obd_statfs *msfs;
2448
2449 if (rc == -EBADR)
2450 /* The request has in fact never been sent
2451 * due to issues at a higher level (LOV).
2452 * Exit immediately since the caller is
2453 * aware of the problem and takes care
2454 * of the clean up
2455 */
2456 return rc;
2457
2458 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2459 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) {
2460 rc = 0;
2461 goto out;
2462 }
2463
2464 if (rc != 0)
2465 goto out;
2466
2467 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2468 if (!msfs) {
2469 rc = -EPROTO;
2470 goto out;
2471 }
2472
2473 *aa->aa_oi->oi_osfs = *msfs;
2474 out:
2475 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2476 return rc;
2477 }
2478
2479 static int osc_statfs_async(struct obd_export *exp,
2480 struct obd_info *oinfo, __u64 max_age,
2481 struct ptlrpc_request_set *rqset)
2482 {
2483 struct obd_device *obd = class_exp2obd(exp);
2484 struct ptlrpc_request *req;
2485 struct osc_async_args *aa;
2486 int rc;
2487
2488 /* We could possibly pass max_age in the request (as an absolute
2489 * timestamp or a "seconds.usec ago") so the target can avoid doing
2490 * extra calls into the filesystem if that isn't necessary (e.g.
2491 * during mount that would help a bit). Having relative timestamps
2492 * is not so great if request processing is slow, while absolute
2493 * timestamps are not ideal because they need time synchronization.
2494 */
2495 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
2496 if (!req)
2497 return -ENOMEM;
2498
2499 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2500 if (rc) {
2501 ptlrpc_request_free(req);
2502 return rc;
2503 }
2504 ptlrpc_request_set_replen(req);
2505 req->rq_request_portal = OST_CREATE_PORTAL;
2506 ptlrpc_at_set_req_timeout(req);
2507
2508 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
2509 /* procfs requests not want stat in wait for avoid deadlock */
2510 req->rq_no_resend = 1;
2511 req->rq_no_delay = 1;
2512 }
2513
2514 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
2515 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2516 aa = ptlrpc_req_async_args(req);
2517 aa->aa_oi = oinfo;
2518
2519 ptlrpc_set_add_req(rqset, req);
2520 return 0;
2521 }
2522
2523 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
2524 struct obd_statfs *osfs, __u64 max_age, __u32 flags)
2525 {
2526 struct obd_device *obd = class_exp2obd(exp);
2527 struct obd_statfs *msfs;
2528 struct ptlrpc_request *req;
2529 struct obd_import *imp = NULL;
2530 int rc;
2531
2532 /* Since the request might also come from lprocfs, so we need
2533 * sync this with client_disconnect_export Bug15684
2534 */
2535 down_read(&obd->u.cli.cl_sem);
2536 if (obd->u.cli.cl_import)
2537 imp = class_import_get(obd->u.cli.cl_import);
2538 up_read(&obd->u.cli.cl_sem);
2539 if (!imp)
2540 return -ENODEV;
2541
2542 /* We could possibly pass max_age in the request (as an absolute
2543 * timestamp or a "seconds.usec ago") so the target can avoid doing
2544 * extra calls into the filesystem if that isn't necessary (e.g.
2545 * during mount that would help a bit). Having relative timestamps
2546 * is not so great if request processing is slow, while absolute
2547 * timestamps are not ideal because they need time synchronization.
2548 */
2549 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
2550
2551 class_import_put(imp);
2552
2553 if (!req)
2554 return -ENOMEM;
2555
2556 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2557 if (rc) {
2558 ptlrpc_request_free(req);
2559 return rc;
2560 }
2561 ptlrpc_request_set_replen(req);
2562 req->rq_request_portal = OST_CREATE_PORTAL;
2563 ptlrpc_at_set_req_timeout(req);
2564
2565 if (flags & OBD_STATFS_NODELAY) {
2566 /* procfs requests not want stat in wait for avoid deadlock */
2567 req->rq_no_resend = 1;
2568 req->rq_no_delay = 1;
2569 }
2570
2571 rc = ptlrpc_queue_wait(req);
2572 if (rc)
2573 goto out;
2574
2575 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2576 if (!msfs) {
2577 rc = -EPROTO;
2578 goto out;
2579 }
2580
2581 *osfs = *msfs;
2582
2583 out:
2584 ptlrpc_req_finished(req);
2585 return rc;
2586 }
2587
2588 /* Retrieve object striping information.
2589 *
2590 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2591 * the maximum number of OST indices which will fit in the user buffer.
2592 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2593 */
2594 static int osc_getstripe(struct lov_stripe_md *lsm,
2595 struct lov_user_md __user *lump)
2596 {
2597 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
2598 struct lov_user_md_v3 lum, *lumk;
2599 struct lov_user_ost_data_v1 *lmm_objects;
2600 int rc = 0, lum_size;
2601
2602 if (!lsm)
2603 return -ENODATA;
2604
2605 /* we only need the header part from user space to get lmm_magic and
2606 * lmm_stripe_count, (the header part is common to v1 and v3)
2607 */
2608 lum_size = sizeof(struct lov_user_md_v1);
2609 if (copy_from_user(&lum, lump, lum_size))
2610 return -EFAULT;
2611
2612 if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
2613 (lum.lmm_magic != LOV_USER_MAGIC_V3))
2614 return -EINVAL;
2615
2616 /* lov_user_md_vX and lov_mds_md_vX must have the same size */
2617 LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
2618 LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
2619 LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
2620
2621 /* we can use lov_mds_md_size() to compute lum_size
2622 * because lov_user_md_vX and lov_mds_md_vX have the same size
2623 */
2624 if (lum.lmm_stripe_count > 0) {
2625 lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
2626 lumk = kzalloc(lum_size, GFP_NOFS);
2627 if (!lumk)
2628 return -ENOMEM;
2629
2630 if (lum.lmm_magic == LOV_USER_MAGIC_V1)
2631 lmm_objects =
2632 &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
2633 else
2634 lmm_objects = &(lumk->lmm_objects[0]);
2635 lmm_objects->l_ost_oi = lsm->lsm_oi;
2636 } else {
2637 lum_size = lov_mds_md_size(0, lum.lmm_magic);
2638 lumk = &lum;
2639 }
2640
2641 lumk->lmm_oi = lsm->lsm_oi;
2642 lumk->lmm_stripe_count = 1;
2643
2644 if (copy_to_user(lump, lumk, lum_size))
2645 rc = -EFAULT;
2646
2647 if (lumk != &lum)
2648 kfree(lumk);
2649
2650 return rc;
2651 }
2652
2653 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2654 void *karg, void __user *uarg)
2655 {
2656 struct obd_device *obd = exp->exp_obd;
2657 struct obd_ioctl_data *data = karg;
2658 int err = 0;
2659
2660 if (!try_module_get(THIS_MODULE)) {
2661 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
2662 module_name(THIS_MODULE));
2663 return -EINVAL;
2664 }
2665 switch (cmd) {
2666 case OBD_IOC_LOV_GET_CONFIG: {
2667 char *buf;
2668 struct lov_desc *desc;
2669 struct obd_uuid uuid;
2670
2671 buf = NULL;
2672 len = 0;
2673 if (obd_ioctl_getdata(&buf, &len, uarg)) {
2674 err = -EINVAL;
2675 goto out;
2676 }
2677
2678 data = (struct obd_ioctl_data *)buf;
2679
2680 if (sizeof(*desc) > data->ioc_inllen1) {
2681 obd_ioctl_freedata(buf, len);
2682 err = -EINVAL;
2683 goto out;
2684 }
2685
2686 if (data->ioc_inllen2 < sizeof(uuid)) {
2687 obd_ioctl_freedata(buf, len);
2688 err = -EINVAL;
2689 goto out;
2690 }
2691
2692 desc = (struct lov_desc *)data->ioc_inlbuf1;
2693 desc->ld_tgt_count = 1;
2694 desc->ld_active_tgt_count = 1;
2695 desc->ld_default_stripe_count = 1;
2696 desc->ld_default_stripe_size = 0;
2697 desc->ld_default_stripe_offset = 0;
2698 desc->ld_pattern = 0;
2699 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2700
2701 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2702
2703 err = copy_to_user(uarg, buf, len);
2704 if (err)
2705 err = -EFAULT;
2706 obd_ioctl_freedata(buf, len);
2707 goto out;
2708 }
2709 case LL_IOC_LOV_SETSTRIPE:
2710 err = obd_alloc_memmd(exp, karg);
2711 if (err > 0)
2712 err = 0;
2713 goto out;
2714 case LL_IOC_LOV_GETSTRIPE:
2715 err = osc_getstripe(karg, uarg);
2716 goto out;
2717 case OBD_IOC_CLIENT_RECOVER:
2718 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2719 data->ioc_inlbuf1, 0);
2720 if (err > 0)
2721 err = 0;
2722 goto out;
2723 case IOC_OSC_SET_ACTIVE:
2724 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2725 data->ioc_offset);
2726 goto out;
2727 case OBD_IOC_POLL_QUOTACHECK:
2728 err = osc_quota_poll_check(exp, karg);
2729 goto out;
2730 case OBD_IOC_PING_TARGET:
2731 err = ptlrpc_obd_ping(obd);
2732 goto out;
2733 default:
2734 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
2735 cmd, current_comm());
2736 err = -ENOTTY;
2737 goto out;
2738 }
2739 out:
2740 module_put(THIS_MODULE);
2741 return err;
2742 }
2743
2744 static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
2745 u32 keylen, void *key, __u32 *vallen, void *val,
2746 struct lov_stripe_md *lsm)
2747 {
2748 if (!vallen || !val)
2749 return -EFAULT;
2750
2751 if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
2752 __u32 *stripe = val;
2753 *vallen = sizeof(*stripe);
2754 *stripe = 0;
2755 return 0;
2756 } else if (KEY_IS(KEY_LAST_ID)) {
2757 struct ptlrpc_request *req;
2758 u64 *reply;
2759 char *tmp;
2760 int rc;
2761
2762 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2763 &RQF_OST_GET_INFO_LAST_ID);
2764 if (!req)
2765 return -ENOMEM;
2766
2767 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2768 RCL_CLIENT, keylen);
2769 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2770 if (rc) {
2771 ptlrpc_request_free(req);
2772 return rc;
2773 }
2774
2775 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2776 memcpy(tmp, key, keylen);
2777
2778 req->rq_no_delay = req->rq_no_resend = 1;
2779 ptlrpc_request_set_replen(req);
2780 rc = ptlrpc_queue_wait(req);
2781 if (rc)
2782 goto out;
2783
2784 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
2785 if (!reply) {
2786 rc = -EPROTO;
2787 goto out;
2788 }
2789
2790 *((u64 *)val) = *reply;
2791 out:
2792 ptlrpc_req_finished(req);
2793 return rc;
2794 } else if (KEY_IS(KEY_FIEMAP)) {
2795 struct ll_fiemap_info_key *fm_key = key;
2796 struct ldlm_res_id res_id;
2797 ldlm_policy_data_t policy;
2798 struct lustre_handle lockh;
2799 enum ldlm_mode mode = 0;
2800 struct ptlrpc_request *req;
2801 struct ll_user_fiemap *reply;
2802 char *tmp;
2803 int rc;
2804
2805 if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
2806 goto skip_locking;
2807
2808 policy.l_extent.start = fm_key->fiemap.fm_start &
2809 PAGE_MASK;
2810
2811 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2812 fm_key->fiemap.fm_start + PAGE_SIZE - 1)
2813 policy.l_extent.end = OBD_OBJECT_EOF;
2814 else
2815 policy.l_extent.end = (fm_key->fiemap.fm_start +
2816 fm_key->fiemap.fm_length +
2817 PAGE_SIZE - 1) & PAGE_MASK;
2818
2819 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2820 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
2821 LDLM_FL_BLOCK_GRANTED |
2822 LDLM_FL_LVB_READY,
2823 &res_id, LDLM_EXTENT, &policy,
2824 LCK_PR | LCK_PW, &lockh, 0);
2825 if (mode) { /* lock is cached on client */
2826 if (mode != LCK_PR) {
2827 ldlm_lock_addref(&lockh, LCK_PR);
2828 ldlm_lock_decref(&lockh, LCK_PW);
2829 }
2830 } else { /* no cached lock, needs acquire lock on server side */
2831 fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
2832 fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
2833 }
2834
2835 skip_locking:
2836 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2837 &RQF_OST_GET_INFO_FIEMAP);
2838 if (!req) {
2839 rc = -ENOMEM;
2840 goto drop_lock;
2841 }
2842
2843 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
2844 RCL_CLIENT, keylen);
2845 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2846 RCL_CLIENT, *vallen);
2847 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2848 RCL_SERVER, *vallen);
2849
2850 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2851 if (rc) {
2852 ptlrpc_request_free(req);
2853 goto drop_lock;
2854 }
2855
2856 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
2857 memcpy(tmp, key, keylen);
2858 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2859 memcpy(tmp, val, *vallen);
2860
2861 ptlrpc_request_set_replen(req);
2862 rc = ptlrpc_queue_wait(req);
2863 if (rc)
2864 goto fini_req;
2865
2866 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2867 if (!reply) {
2868 rc = -EPROTO;
2869 goto fini_req;
2870 }
2871
2872 memcpy(val, reply, *vallen);
2873 fini_req:
2874 ptlrpc_req_finished(req);
2875 drop_lock:
2876 if (mode)
2877 ldlm_lock_decref(&lockh, LCK_PR);
2878 return rc;
2879 }
2880
2881 return -EINVAL;
2882 }
2883
2884 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
2885 u32 keylen, void *key, u32 vallen,
2886 void *val, struct ptlrpc_request_set *set)
2887 {
2888 struct ptlrpc_request *req;
2889 struct obd_device *obd = exp->exp_obd;
2890 struct obd_import *imp = class_exp2cliimp(exp);
2891 char *tmp;
2892 int rc;
2893
2894 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
2895
2896 if (KEY_IS(KEY_CHECKSUM)) {
2897 if (vallen != sizeof(int))
2898 return -EINVAL;
2899 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
2900 return 0;
2901 }
2902
2903 if (KEY_IS(KEY_SPTLRPC_CONF)) {
2904 sptlrpc_conf_client_adapt(obd);
2905 return 0;
2906 }
2907
2908 if (KEY_IS(KEY_FLUSH_CTX)) {
2909 sptlrpc_import_flush_my_ctx(imp);
2910 return 0;
2911 }
2912
2913 if (KEY_IS(KEY_CACHE_SET)) {
2914 struct client_obd *cli = &obd->u.cli;
2915
2916 LASSERT(!cli->cl_cache); /* only once */
2917 cli->cl_cache = val;
2918 atomic_inc(&cli->cl_cache->ccc_users);
2919 cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
2920
2921 /* add this osc into entity list */
2922 LASSERT(list_empty(&cli->cl_lru_osc));
2923 spin_lock(&cli->cl_cache->ccc_lru_lock);
2924 list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
2925 spin_unlock(&cli->cl_cache->ccc_lru_lock);
2926
2927 return 0;
2928 }
2929
2930 if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
2931 struct client_obd *cli = &obd->u.cli;
2932 int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
2933 int target = *(int *)val;
2934
2935 nr = osc_lru_shrink(env, cli, min(nr, target), true);
2936 *(int *)val -= nr;
2937 return 0;
2938 }
2939
2940 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
2941 return -EINVAL;
2942
2943 /* We pass all other commands directly to OST. Since nobody calls osc
2944 * methods directly and everybody is supposed to go through LOV, we
2945 * assume lov checked invalid values for us.
2946 * The only recognised values so far are evict_by_nid and mds_conn.
2947 * Even if something bad goes through, we'd get a -EINVAL from OST
2948 * anyway.
2949 */
2950
2951 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
2952 &RQF_OST_SET_GRANT_INFO :
2953 &RQF_OBD_SET_INFO);
2954 if (!req)
2955 return -ENOMEM;
2956
2957 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2958 RCL_CLIENT, keylen);
2959 if (!KEY_IS(KEY_GRANT_SHRINK))
2960 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
2961 RCL_CLIENT, vallen);
2962 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
2963 if (rc) {
2964 ptlrpc_request_free(req);
2965 return rc;
2966 }
2967
2968 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2969 memcpy(tmp, key, keylen);
2970 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
2971 &RMF_OST_BODY :
2972 &RMF_SETINFO_VAL);
2973 memcpy(tmp, val, vallen);
2974
2975 if (KEY_IS(KEY_GRANT_SHRINK)) {
2976 struct osc_brw_async_args *aa;
2977 struct obdo *oa;
2978
2979 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2980 aa = ptlrpc_req_async_args(req);
2981 oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
2982 if (!oa) {
2983 ptlrpc_req_finished(req);
2984 return -ENOMEM;
2985 }
2986 *oa = ((struct ost_body *)val)->oa;
2987 aa->aa_oa = oa;
2988 req->rq_interpret_reply = osc_shrink_grant_interpret;
2989 }
2990
2991 ptlrpc_request_set_replen(req);
2992 if (!KEY_IS(KEY_GRANT_SHRINK)) {
2993 LASSERT(set);
2994 ptlrpc_set_add_req(set, req);
2995 ptlrpc_check_set(NULL, set);
2996 } else {
2997 ptlrpcd_add_req(req);
2998 }
2999
3000 return 0;
3001 }
3002
3003 static int osc_reconnect(const struct lu_env *env,
3004 struct obd_export *exp, struct obd_device *obd,
3005 struct obd_uuid *cluuid,
3006 struct obd_connect_data *data,
3007 void *localdata)
3008 {
3009 struct client_obd *cli = &obd->u.cli;
3010
3011 if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3012 long lost_grant;
3013
3014 spin_lock(&cli->cl_loi_list_lock);
3015 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
3016 2 * cli_brw_size(obd);
3017 lost_grant = cli->cl_lost_grant;
3018 cli->cl_lost_grant = 0;
3019 spin_unlock(&cli->cl_loi_list_lock);
3020
3021 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
3022 data->ocd_connect_flags,
3023 data->ocd_version, data->ocd_grant, lost_grant);
3024 }
3025
3026 return 0;
3027 }
3028
3029 static int osc_disconnect(struct obd_export *exp)
3030 {
3031 struct obd_device *obd = class_exp2obd(exp);
3032 int rc;
3033
3034 rc = client_disconnect_export(exp);
3035 /**
3036 * Initially we put del_shrink_grant before disconnect_export, but it
3037 * causes the following problem if setup (connect) and cleanup
3038 * (disconnect) are tangled together.
3039 * connect p1 disconnect p2
3040 * ptlrpc_connect_import
3041 * ............... class_manual_cleanup
3042 * osc_disconnect
3043 * del_shrink_grant
3044 * ptlrpc_connect_interrupt
3045 * init_grant_shrink
3046 * add this client to shrink list
3047 * cleanup_osc
3048 * Bang! pinger trigger the shrink.
3049 * So the osc should be disconnected from the shrink list, after we
3050 * are sure the import has been destroyed. BUG18662
3051 */
3052 if (!obd->u.cli.cl_import)
3053 osc_del_shrink_grant(&obd->u.cli);
3054 return rc;
3055 }
3056
3057 static int osc_import_event(struct obd_device *obd,
3058 struct obd_import *imp,
3059 enum obd_import_event event)
3060 {
3061 struct client_obd *cli;
3062 int rc = 0;
3063
3064 LASSERT(imp->imp_obd == obd);
3065
3066 switch (event) {
3067 case IMP_EVENT_DISCON: {
3068 cli = &obd->u.cli;
3069 spin_lock(&cli->cl_loi_list_lock);
3070 cli->cl_avail_grant = 0;
3071 cli->cl_lost_grant = 0;
3072 spin_unlock(&cli->cl_loi_list_lock);
3073 break;
3074 }
3075 case IMP_EVENT_INACTIVE: {
3076 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
3077 break;
3078 }
3079 case IMP_EVENT_INVALIDATE: {
3080 struct ldlm_namespace *ns = obd->obd_namespace;
3081 struct lu_env *env;
3082 int refcheck;
3083
3084 env = cl_env_get(&refcheck);
3085 if (!IS_ERR(env)) {
3086 /* Reset grants */
3087 cli = &obd->u.cli;
3088 /* all pages go to failing rpcs due to the invalid
3089 * import
3090 */
3091 osc_io_unplug(env, cli, NULL);
3092
3093 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3094 cl_env_put(env, &refcheck);
3095 } else {
3096 rc = PTR_ERR(env);
3097 }
3098 break;
3099 }
3100 case IMP_EVENT_ACTIVE: {
3101 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
3102 break;
3103 }
3104 case IMP_EVENT_OCD: {
3105 struct obd_connect_data *ocd = &imp->imp_connect_data;
3106
3107 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3108 osc_init_grant(&obd->u.cli, ocd);
3109
3110 /* See bug 7198 */
3111 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3112 imp->imp_client->cli_request_portal = OST_REQUEST_PORTAL;
3113
3114 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
3115 break;
3116 }
3117 case IMP_EVENT_DEACTIVATE: {
3118 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
3119 break;
3120 }
3121 case IMP_EVENT_ACTIVATE: {
3122 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
3123 break;
3124 }
3125 default:
3126 CERROR("Unknown import event %d\n", event);
3127 LBUG();
3128 }
3129 return rc;
3130 }
3131
3132 /**
3133 * Determine whether the lock can be canceled before replaying the lock
3134 * during recovery, see bug16774 for detailed information.
3135 *
3136 * \retval zero the lock can't be canceled
3137 * \retval other ok to cancel
3138 */
3139 static int osc_cancel_weight(struct ldlm_lock *lock)
3140 {
3141 /*
3142 * Cancel all unused and granted extent lock.
3143 */
3144 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3145 lock->l_granted_mode == lock->l_req_mode &&
3146 osc_ldlm_weigh_ast(lock) == 0)
3147 return 1;
3148
3149 return 0;
3150 }
3151
3152 static int brw_queue_work(const struct lu_env *env, void *data)
3153 {
3154 struct client_obd *cli = data;
3155
3156 CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3157
3158 osc_io_unplug(env, cli, NULL);
3159 return 0;
3160 }
3161
3162 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3163 {
3164 struct lprocfs_static_vars lvars = { NULL };
3165 struct client_obd *cli = &obd->u.cli;
3166 void *handler;
3167 int rc;
3168 int adding;
3169 int added;
3170 int req_count;
3171
3172 rc = ptlrpcd_addref();
3173 if (rc)
3174 return rc;
3175
3176 rc = client_obd_setup(obd, lcfg);
3177 if (rc)
3178 goto out_ptlrpcd;
3179
3180 handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3181 if (IS_ERR(handler)) {
3182 rc = PTR_ERR(handler);
3183 goto out_client_setup;
3184 }
3185 cli->cl_writeback_work = handler;
3186
3187 handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3188 if (IS_ERR(handler)) {
3189 rc = PTR_ERR(handler);
3190 goto out_ptlrpcd_work;
3191 }
3192
3193 cli->cl_lru_work = handler;
3194
3195 rc = osc_quota_setup(obd);
3196 if (rc)
3197 goto out_ptlrpcd_work;
3198
3199 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3200 lprocfs_osc_init_vars(&lvars);
3201 if (lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars) == 0) {
3202 lproc_osc_attach_seqstat(obd);
3203 sptlrpc_lprocfs_cliobd_attach(obd);
3204 ptlrpc_lprocfs_register_obd(obd);
3205 }
3206
3207 /*
3208 * We try to control the total number of requests with a upper limit
3209 * osc_reqpool_maxreqcount. There might be some race which will cause
3210 * over-limit allocation, but it is fine.
3211 */
3212 req_count = atomic_read(&osc_pool_req_count);
3213 if (req_count < osc_reqpool_maxreqcount) {
3214 adding = cli->cl_max_rpcs_in_flight + 2;
3215 if (req_count + adding > osc_reqpool_maxreqcount)
3216 adding = osc_reqpool_maxreqcount - req_count;
3217
3218 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3219 atomic_add(added, &osc_pool_req_count);
3220 }
3221
3222 INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
3223 ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3224 return rc;
3225
3226 out_ptlrpcd_work:
3227 if (cli->cl_writeback_work) {
3228 ptlrpcd_destroy_work(cli->cl_writeback_work);
3229 cli->cl_writeback_work = NULL;
3230 }
3231 if (cli->cl_lru_work) {
3232 ptlrpcd_destroy_work(cli->cl_lru_work);
3233 cli->cl_lru_work = NULL;
3234 }
3235 out_client_setup:
3236 client_obd_cleanup(obd);
3237 out_ptlrpcd:
3238 ptlrpcd_decref();
3239 return rc;
3240 }
3241
3242 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
3243 {
3244 switch (stage) {
3245 case OBD_CLEANUP_EARLY: {
3246 struct obd_import *imp;
3247
3248 imp = obd->u.cli.cl_import;
3249 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
3250 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
3251 ptlrpc_deactivate_import(imp);
3252 spin_lock(&imp->imp_lock);
3253 imp->imp_pingable = 0;
3254 spin_unlock(&imp->imp_lock);
3255 break;
3256 }
3257 case OBD_CLEANUP_EXPORTS: {
3258 struct client_obd *cli = &obd->u.cli;
3259 /* LU-464
3260 * for echo client, export may be on zombie list, wait for
3261 * zombie thread to cull it, because cli.cl_import will be
3262 * cleared in client_disconnect_export():
3263 * class_export_destroy() -> obd_cleanup() ->
3264 * echo_device_free() -> echo_client_cleanup() ->
3265 * obd_disconnect() -> osc_disconnect() ->
3266 * client_disconnect_export()
3267 */
3268 obd_zombie_barrier();
3269 if (cli->cl_writeback_work) {
3270 ptlrpcd_destroy_work(cli->cl_writeback_work);
3271 cli->cl_writeback_work = NULL;
3272 }
3273 if (cli->cl_lru_work) {
3274 ptlrpcd_destroy_work(cli->cl_lru_work);
3275 cli->cl_lru_work = NULL;
3276 }
3277 obd_cleanup_client_import(obd);
3278 ptlrpc_lprocfs_unregister_obd(obd);
3279 lprocfs_obd_cleanup(obd);
3280 break;
3281 }
3282 }
3283 return 0;
3284 }
3285
3286 static int osc_cleanup(struct obd_device *obd)
3287 {
3288 struct client_obd *cli = &obd->u.cli;
3289 int rc;
3290
3291 /* lru cleanup */
3292 if (cli->cl_cache) {
3293 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3294 spin_lock(&cli->cl_cache->ccc_lru_lock);
3295 list_del_init(&cli->cl_lru_osc);
3296 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3297 cli->cl_lru_left = NULL;
3298 atomic_dec(&cli->cl_cache->ccc_users);
3299 cli->cl_cache = NULL;
3300 }
3301
3302 /* free memory of osc quota cache */
3303 osc_quota_cleanup(obd);
3304
3305 rc = client_obd_cleanup(obd);
3306
3307 ptlrpcd_decref();
3308 return rc;
3309 }
3310
3311 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
3312 {
3313 struct lprocfs_static_vars lvars = { NULL };
3314 int rc = 0;
3315
3316 lprocfs_osc_init_vars(&lvars);
3317
3318 switch (lcfg->lcfg_command) {
3319 default:
3320 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
3321 lcfg, obd);
3322 if (rc > 0)
3323 rc = 0;
3324 break;
3325 }
3326
3327 return rc;
3328 }
3329
3330 static int osc_process_config(struct obd_device *obd, u32 len, void *buf)
3331 {
3332 return osc_process_config_base(obd, buf);
3333 }
3334
3335 static struct obd_ops osc_obd_ops = {
3336 .owner = THIS_MODULE,
3337 .setup = osc_setup,
3338 .precleanup = osc_precleanup,
3339 .cleanup = osc_cleanup,
3340 .add_conn = client_import_add_conn,
3341 .del_conn = client_import_del_conn,
3342 .connect = client_connect_import,
3343 .reconnect = osc_reconnect,
3344 .disconnect = osc_disconnect,
3345 .statfs = osc_statfs,
3346 .statfs_async = osc_statfs_async,
3347 .packmd = osc_packmd,
3348 .unpackmd = osc_unpackmd,
3349 .create = osc_create,
3350 .destroy = osc_destroy,
3351 .getattr = osc_getattr,
3352 .getattr_async = osc_getattr_async,
3353 .setattr = osc_setattr,
3354 .setattr_async = osc_setattr_async,
3355 .find_cbdata = osc_find_cbdata,
3356 .iocontrol = osc_iocontrol,
3357 .get_info = osc_get_info,
3358 .set_info_async = osc_set_info_async,
3359 .import_event = osc_import_event,
3360 .process_config = osc_process_config,
3361 .quotactl = osc_quotactl,
3362 .quotacheck = osc_quotacheck,
3363 };
3364
3365 extern struct lu_kmem_descr osc_caches[];
3366 extern struct lock_class_key osc_ast_guard_class;
3367
3368 static int __init osc_init(void)
3369 {
3370 struct lprocfs_static_vars lvars = { NULL };
3371 unsigned int reqpool_size;
3372 unsigned int reqsize;
3373 int rc;
3374
3375 /* print an address of _any_ initialized kernel symbol from this
3376 * module, to allow debugging with gdb that doesn't support data
3377 * symbols from modules.
3378 */
3379 CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3380
3381 rc = lu_kmem_init(osc_caches);
3382 if (rc)
3383 return rc;
3384
3385 lprocfs_osc_init_vars(&lvars);
3386
3387 rc = class_register_type(&osc_obd_ops, NULL,
3388 LUSTRE_OSC_NAME, &osc_device_type);
3389 if (rc)
3390 goto out_kmem;
3391
3392 /* This is obviously too much memory, only prevent overflow here */
3393 if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
3394 rc = -EINVAL;
3395 goto out_type;
3396 }
3397
3398 reqpool_size = osc_reqpool_mem_max << 20;
3399
3400 reqsize = 1;
3401 while (reqsize < OST_MAXREQSIZE)
3402 reqsize = reqsize << 1;
3403
3404 /*
3405 * We don't enlarge the request count in OSC pool according to
3406 * cl_max_rpcs_in_flight. The allocation from the pool will only be
3407 * tried after normal allocation failed. So a small OSC pool won't
3408 * cause much performance degression in most of cases.
3409 */
3410 osc_reqpool_maxreqcount = reqpool_size / reqsize;
3411
3412 atomic_set(&osc_pool_req_count, 0);
3413 osc_rq_pool = ptlrpc_init_rq_pool(0, OST_MAXREQSIZE,
3414 ptlrpc_add_rqs_to_pool);
3415
3416 if (osc_rq_pool)
3417 return 0;
3418
3419 rc = -ENOMEM;
3420
3421 out_type:
3422 class_unregister_type(LUSTRE_OSC_NAME);
3423 out_kmem:
3424 lu_kmem_fini(osc_caches);
3425 return rc;
3426 }
3427
3428 static void /*__exit*/ osc_exit(void)
3429 {
3430 class_unregister_type(LUSTRE_OSC_NAME);
3431 lu_kmem_fini(osc_caches);
3432 ptlrpc_free_rq_pool(osc_rq_pool);
3433 }
3434
3435 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3436 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3437 MODULE_LICENSE("GPL");
3438 MODULE_VERSION(LUSTRE_VERSION_STRING);
3439
3440 module_init(osc_init);
3441 module_exit(osc_exit);
This page took 0.114951 seconds and 6 git commands to generate.