Merge tag 'samsung-fixes-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/krzk...
[deliverable/linux.git] / net / ceph / osd_client.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #ifdef CONFIG_BLOCK
12 #include <linux/bio.h>
13 #endif
14
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
21
22 #define OSD_OPREPLY_FRONT_LEN 512
23
24 static struct kmem_cache *ceph_osd_request_cache;
25
26 static const struct ceph_connection_operations osd_con_ops;
27
28 /*
29 * Implement client access to distributed object storage cluster.
30 *
31 * All data objects are stored within a cluster/cloud of OSDs, or
32 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
33 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
34 * remote daemons serving up and coordinating consistent and safe
35 * access to storage.
36 *
37 * Cluster membership and the mapping of data objects onto storage devices
38 * are described by the osd map.
39 *
40 * We keep track of pending OSD requests (read, write), resubmit
41 * requests to different OSDs when the cluster topology/data layout
42 * change, or retry the affected requests when the communications
43 * channel with an OSD is reset.
44 */
45
46 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
47 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
48 static void link_linger(struct ceph_osd *osd,
49 struct ceph_osd_linger_request *lreq);
50 static void unlink_linger(struct ceph_osd *osd,
51 struct ceph_osd_linger_request *lreq);
52
53 #if 1
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
55 {
56 bool wrlocked = true;
57
58 if (unlikely(down_read_trylock(sem))) {
59 wrlocked = false;
60 up_read(sem);
61 }
62
63 return wrlocked;
64 }
65 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
66 {
67 WARN_ON(!rwsem_is_locked(&osdc->lock));
68 }
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
70 {
71 WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
72 }
73 static inline void verify_osd_locked(struct ceph_osd *osd)
74 {
75 struct ceph_osd_client *osdc = osd->o_osdc;
76
77 WARN_ON(!(mutex_is_locked(&osd->lock) &&
78 rwsem_is_locked(&osdc->lock)) &&
79 !rwsem_is_wrlocked(&osdc->lock));
80 }
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
82 {
83 WARN_ON(!mutex_is_locked(&lreq->lock));
84 }
85 #else
86 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
88 static inline void verify_osd_locked(struct ceph_osd *osd) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
90 #endif
91
92 /*
93 * calculate the mapping of a file extent onto an object, and fill out the
94 * request accordingly. shorten extent as necessary if it crosses an
95 * object boundary.
96 *
97 * fill osd op in request message.
98 */
99 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
100 u64 *objnum, u64 *objoff, u64 *objlen)
101 {
102 u64 orig_len = *plen;
103 int r;
104
105 /* object extent? */
106 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
107 objoff, objlen);
108 if (r < 0)
109 return r;
110 if (*objlen < orig_len) {
111 *plen = *objlen;
112 dout(" skipping last %llu, final file extent %llu~%llu\n",
113 orig_len - *plen, off, *plen);
114 }
115
116 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
117
118 return 0;
119 }
120
121 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
122 {
123 memset(osd_data, 0, sizeof (*osd_data));
124 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
125 }
126
127 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
128 struct page **pages, u64 length, u32 alignment,
129 bool pages_from_pool, bool own_pages)
130 {
131 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
132 osd_data->pages = pages;
133 osd_data->length = length;
134 osd_data->alignment = alignment;
135 osd_data->pages_from_pool = pages_from_pool;
136 osd_data->own_pages = own_pages;
137 }
138
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
140 struct ceph_pagelist *pagelist)
141 {
142 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
143 osd_data->pagelist = pagelist;
144 }
145
146 #ifdef CONFIG_BLOCK
147 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
148 struct bio *bio, size_t bio_length)
149 {
150 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
151 osd_data->bio = bio;
152 osd_data->bio_length = bio_length;
153 }
154 #endif /* CONFIG_BLOCK */
155
156 #define osd_req_op_data(oreq, whch, typ, fld) \
157 ({ \
158 struct ceph_osd_request *__oreq = (oreq); \
159 unsigned int __whch = (whch); \
160 BUG_ON(__whch >= __oreq->r_num_ops); \
161 &__oreq->r_ops[__whch].typ.fld; \
162 })
163
164 static struct ceph_osd_data *
165 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
166 {
167 BUG_ON(which >= osd_req->r_num_ops);
168
169 return &osd_req->r_ops[which].raw_data_in;
170 }
171
172 struct ceph_osd_data *
173 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
174 unsigned int which)
175 {
176 return osd_req_op_data(osd_req, which, extent, osd_data);
177 }
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
179
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
181 unsigned int which, struct page **pages,
182 u64 length, u32 alignment,
183 bool pages_from_pool, bool own_pages)
184 {
185 struct ceph_osd_data *osd_data;
186
187 osd_data = osd_req_op_raw_data_in(osd_req, which);
188 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
189 pages_from_pool, own_pages);
190 }
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
192
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
194 unsigned int which, struct page **pages,
195 u64 length, u32 alignment,
196 bool pages_from_pool, bool own_pages)
197 {
198 struct ceph_osd_data *osd_data;
199
200 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
201 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
202 pages_from_pool, own_pages);
203 }
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
205
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
207 unsigned int which, struct ceph_pagelist *pagelist)
208 {
209 struct ceph_osd_data *osd_data;
210
211 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
212 ceph_osd_data_pagelist_init(osd_data, pagelist);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
215
216 #ifdef CONFIG_BLOCK
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
218 unsigned int which, struct bio *bio, size_t bio_length)
219 {
220 struct ceph_osd_data *osd_data;
221
222 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
223 ceph_osd_data_bio_init(osd_data, bio, bio_length);
224 }
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
226 #endif /* CONFIG_BLOCK */
227
228 static void osd_req_op_cls_request_info_pagelist(
229 struct ceph_osd_request *osd_req,
230 unsigned int which, struct ceph_pagelist *pagelist)
231 {
232 struct ceph_osd_data *osd_data;
233
234 osd_data = osd_req_op_data(osd_req, which, cls, request_info);
235 ceph_osd_data_pagelist_init(osd_data, pagelist);
236 }
237
238 void osd_req_op_cls_request_data_pagelist(
239 struct ceph_osd_request *osd_req,
240 unsigned int which, struct ceph_pagelist *pagelist)
241 {
242 struct ceph_osd_data *osd_data;
243
244 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
245 ceph_osd_data_pagelist_init(osd_data, pagelist);
246 osd_req->r_ops[which].cls.indata_len += pagelist->length;
247 osd_req->r_ops[which].indata_len += pagelist->length;
248 }
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
250
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
252 unsigned int which, struct page **pages, u64 length,
253 u32 alignment, bool pages_from_pool, bool own_pages)
254 {
255 struct ceph_osd_data *osd_data;
256
257 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
258 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
259 pages_from_pool, own_pages);
260 osd_req->r_ops[which].cls.indata_len += length;
261 osd_req->r_ops[which].indata_len += length;
262 }
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
264
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
266 unsigned int which, struct page **pages, u64 length,
267 u32 alignment, bool pages_from_pool, bool own_pages)
268 {
269 struct ceph_osd_data *osd_data;
270
271 osd_data = osd_req_op_data(osd_req, which, cls, response_data);
272 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
273 pages_from_pool, own_pages);
274 }
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
276
277 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
278 {
279 switch (osd_data->type) {
280 case CEPH_OSD_DATA_TYPE_NONE:
281 return 0;
282 case CEPH_OSD_DATA_TYPE_PAGES:
283 return osd_data->length;
284 case CEPH_OSD_DATA_TYPE_PAGELIST:
285 return (u64)osd_data->pagelist->length;
286 #ifdef CONFIG_BLOCK
287 case CEPH_OSD_DATA_TYPE_BIO:
288 return (u64)osd_data->bio_length;
289 #endif /* CONFIG_BLOCK */
290 default:
291 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
292 return 0;
293 }
294 }
295
296 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
297 {
298 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
299 int num_pages;
300
301 num_pages = calc_pages_for((u64)osd_data->alignment,
302 (u64)osd_data->length);
303 ceph_release_page_vector(osd_data->pages, num_pages);
304 }
305 ceph_osd_data_init(osd_data);
306 }
307
308 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
309 unsigned int which)
310 {
311 struct ceph_osd_req_op *op;
312
313 BUG_ON(which >= osd_req->r_num_ops);
314 op = &osd_req->r_ops[which];
315
316 switch (op->op) {
317 case CEPH_OSD_OP_READ:
318 case CEPH_OSD_OP_WRITE:
319 case CEPH_OSD_OP_WRITEFULL:
320 ceph_osd_data_release(&op->extent.osd_data);
321 break;
322 case CEPH_OSD_OP_CALL:
323 ceph_osd_data_release(&op->cls.request_info);
324 ceph_osd_data_release(&op->cls.request_data);
325 ceph_osd_data_release(&op->cls.response_data);
326 break;
327 case CEPH_OSD_OP_SETXATTR:
328 case CEPH_OSD_OP_CMPXATTR:
329 ceph_osd_data_release(&op->xattr.osd_data);
330 break;
331 case CEPH_OSD_OP_STAT:
332 ceph_osd_data_release(&op->raw_data_in);
333 break;
334 case CEPH_OSD_OP_NOTIFY_ACK:
335 ceph_osd_data_release(&op->notify_ack.request_data);
336 break;
337 case CEPH_OSD_OP_NOTIFY:
338 ceph_osd_data_release(&op->notify.request_data);
339 ceph_osd_data_release(&op->notify.response_data);
340 break;
341 default:
342 break;
343 }
344 }
345
346 /*
347 * Assumes @t is zero-initialized.
348 */
349 static void target_init(struct ceph_osd_request_target *t)
350 {
351 ceph_oid_init(&t->base_oid);
352 ceph_oloc_init(&t->base_oloc);
353 ceph_oid_init(&t->target_oid);
354 ceph_oloc_init(&t->target_oloc);
355
356 ceph_osds_init(&t->acting);
357 ceph_osds_init(&t->up);
358 t->size = -1;
359 t->min_size = -1;
360
361 t->osd = CEPH_HOMELESS_OSD;
362 }
363
364 static void target_copy(struct ceph_osd_request_target *dest,
365 const struct ceph_osd_request_target *src)
366 {
367 ceph_oid_copy(&dest->base_oid, &src->base_oid);
368 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
369 ceph_oid_copy(&dest->target_oid, &src->target_oid);
370 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
371
372 dest->pgid = src->pgid; /* struct */
373 dest->pg_num = src->pg_num;
374 dest->pg_num_mask = src->pg_num_mask;
375 ceph_osds_copy(&dest->acting, &src->acting);
376 ceph_osds_copy(&dest->up, &src->up);
377 dest->size = src->size;
378 dest->min_size = src->min_size;
379 dest->sort_bitwise = src->sort_bitwise;
380
381 dest->flags = src->flags;
382 dest->paused = src->paused;
383
384 dest->osd = src->osd;
385 }
386
387 static void target_destroy(struct ceph_osd_request_target *t)
388 {
389 ceph_oid_destroy(&t->base_oid);
390 ceph_oid_destroy(&t->target_oid);
391 }
392
393 /*
394 * requests
395 */
396 static void request_release_checks(struct ceph_osd_request *req)
397 {
398 WARN_ON(!RB_EMPTY_NODE(&req->r_node));
399 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
400 WARN_ON(!list_empty(&req->r_unsafe_item));
401 WARN_ON(req->r_osd);
402 }
403
404 static void ceph_osdc_release_request(struct kref *kref)
405 {
406 struct ceph_osd_request *req = container_of(kref,
407 struct ceph_osd_request, r_kref);
408 unsigned int which;
409
410 dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
411 req->r_request, req->r_reply);
412 request_release_checks(req);
413
414 if (req->r_request)
415 ceph_msg_put(req->r_request);
416 if (req->r_reply)
417 ceph_msg_put(req->r_reply);
418
419 for (which = 0; which < req->r_num_ops; which++)
420 osd_req_op_data_release(req, which);
421
422 target_destroy(&req->r_t);
423 ceph_put_snap_context(req->r_snapc);
424
425 if (req->r_mempool)
426 mempool_free(req, req->r_osdc->req_mempool);
427 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
428 kmem_cache_free(ceph_osd_request_cache, req);
429 else
430 kfree(req);
431 }
432
433 void ceph_osdc_get_request(struct ceph_osd_request *req)
434 {
435 dout("%s %p (was %d)\n", __func__, req,
436 atomic_read(&req->r_kref.refcount));
437 kref_get(&req->r_kref);
438 }
439 EXPORT_SYMBOL(ceph_osdc_get_request);
440
441 void ceph_osdc_put_request(struct ceph_osd_request *req)
442 {
443 if (req) {
444 dout("%s %p (was %d)\n", __func__, req,
445 atomic_read(&req->r_kref.refcount));
446 kref_put(&req->r_kref, ceph_osdc_release_request);
447 }
448 }
449 EXPORT_SYMBOL(ceph_osdc_put_request);
450
451 static void request_init(struct ceph_osd_request *req)
452 {
453 /* req only, each op is zeroed in _osd_req_op_init() */
454 memset(req, 0, sizeof(*req));
455
456 kref_init(&req->r_kref);
457 init_completion(&req->r_completion);
458 init_completion(&req->r_safe_completion);
459 RB_CLEAR_NODE(&req->r_node);
460 RB_CLEAR_NODE(&req->r_mc_node);
461 INIT_LIST_HEAD(&req->r_unsafe_item);
462
463 target_init(&req->r_t);
464 }
465
466 /*
467 * This is ugly, but it allows us to reuse linger registration and ping
468 * requests, keeping the structure of the code around send_linger{_ping}()
469 * reasonable. Setting up a min_nr=2 mempool for each linger request
470 * and dealing with copying ops (this blasts req only, watch op remains
471 * intact) isn't any better.
472 */
473 static void request_reinit(struct ceph_osd_request *req)
474 {
475 struct ceph_osd_client *osdc = req->r_osdc;
476 bool mempool = req->r_mempool;
477 unsigned int num_ops = req->r_num_ops;
478 u64 snapid = req->r_snapid;
479 struct ceph_snap_context *snapc = req->r_snapc;
480 bool linger = req->r_linger;
481 struct ceph_msg *request_msg = req->r_request;
482 struct ceph_msg *reply_msg = req->r_reply;
483
484 dout("%s req %p\n", __func__, req);
485 WARN_ON(atomic_read(&req->r_kref.refcount) != 1);
486 request_release_checks(req);
487
488 WARN_ON(atomic_read(&request_msg->kref.refcount) != 1);
489 WARN_ON(atomic_read(&reply_msg->kref.refcount) != 1);
490 target_destroy(&req->r_t);
491
492 request_init(req);
493 req->r_osdc = osdc;
494 req->r_mempool = mempool;
495 req->r_num_ops = num_ops;
496 req->r_snapid = snapid;
497 req->r_snapc = snapc;
498 req->r_linger = linger;
499 req->r_request = request_msg;
500 req->r_reply = reply_msg;
501 }
502
503 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
504 struct ceph_snap_context *snapc,
505 unsigned int num_ops,
506 bool use_mempool,
507 gfp_t gfp_flags)
508 {
509 struct ceph_osd_request *req;
510
511 if (use_mempool) {
512 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
513 req = mempool_alloc(osdc->req_mempool, gfp_flags);
514 } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
515 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
516 } else {
517 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
518 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
519 gfp_flags);
520 }
521 if (unlikely(!req))
522 return NULL;
523
524 request_init(req);
525 req->r_osdc = osdc;
526 req->r_mempool = use_mempool;
527 req->r_num_ops = num_ops;
528 req->r_snapid = CEPH_NOSNAP;
529 req->r_snapc = ceph_get_snap_context(snapc);
530
531 dout("%s req %p\n", __func__, req);
532 return req;
533 }
534 EXPORT_SYMBOL(ceph_osdc_alloc_request);
535
536 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
537 {
538 struct ceph_osd_client *osdc = req->r_osdc;
539 struct ceph_msg *msg;
540 int msg_size;
541
542 WARN_ON(ceph_oid_empty(&req->r_base_oid));
543
544 /* create request message */
545 msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
546 msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
547 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
548 msg_size += 1 + 8 + 4 + 4; /* pgid */
549 msg_size += 4 + req->r_base_oid.name_len; /* oid */
550 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
551 msg_size += 8; /* snapid */
552 msg_size += 8; /* snap_seq */
553 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
554 msg_size += 4; /* retry_attempt */
555
556 if (req->r_mempool)
557 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
558 else
559 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
560 if (!msg)
561 return -ENOMEM;
562
563 memset(msg->front.iov_base, 0, msg->front.iov_len);
564 req->r_request = msg;
565
566 /* create reply message */
567 msg_size = OSD_OPREPLY_FRONT_LEN;
568 msg_size += req->r_base_oid.name_len;
569 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
570
571 if (req->r_mempool)
572 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
573 else
574 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
575 if (!msg)
576 return -ENOMEM;
577
578 req->r_reply = msg;
579
580 return 0;
581 }
582 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
583
584 static bool osd_req_opcode_valid(u16 opcode)
585 {
586 switch (opcode) {
587 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
588 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
589 #undef GENERATE_CASE
590 default:
591 return false;
592 }
593 }
594
595 /*
596 * This is an osd op init function for opcodes that have no data or
597 * other information associated with them. It also serves as a
598 * common init routine for all the other init functions, below.
599 */
600 static struct ceph_osd_req_op *
601 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
602 u16 opcode, u32 flags)
603 {
604 struct ceph_osd_req_op *op;
605
606 BUG_ON(which >= osd_req->r_num_ops);
607 BUG_ON(!osd_req_opcode_valid(opcode));
608
609 op = &osd_req->r_ops[which];
610 memset(op, 0, sizeof (*op));
611 op->op = opcode;
612 op->flags = flags;
613
614 return op;
615 }
616
617 void osd_req_op_init(struct ceph_osd_request *osd_req,
618 unsigned int which, u16 opcode, u32 flags)
619 {
620 (void)_osd_req_op_init(osd_req, which, opcode, flags);
621 }
622 EXPORT_SYMBOL(osd_req_op_init);
623
624 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
625 unsigned int which, u16 opcode,
626 u64 offset, u64 length,
627 u64 truncate_size, u32 truncate_seq)
628 {
629 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
630 opcode, 0);
631 size_t payload_len = 0;
632
633 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
634 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
635 opcode != CEPH_OSD_OP_TRUNCATE);
636
637 op->extent.offset = offset;
638 op->extent.length = length;
639 op->extent.truncate_size = truncate_size;
640 op->extent.truncate_seq = truncate_seq;
641 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
642 payload_len += length;
643
644 op->indata_len = payload_len;
645 }
646 EXPORT_SYMBOL(osd_req_op_extent_init);
647
648 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
649 unsigned int which, u64 length)
650 {
651 struct ceph_osd_req_op *op;
652 u64 previous;
653
654 BUG_ON(which >= osd_req->r_num_ops);
655 op = &osd_req->r_ops[which];
656 previous = op->extent.length;
657
658 if (length == previous)
659 return; /* Nothing to do */
660 BUG_ON(length > previous);
661
662 op->extent.length = length;
663 op->indata_len -= previous - length;
664 }
665 EXPORT_SYMBOL(osd_req_op_extent_update);
666
667 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
668 unsigned int which, u64 offset_inc)
669 {
670 struct ceph_osd_req_op *op, *prev_op;
671
672 BUG_ON(which + 1 >= osd_req->r_num_ops);
673
674 prev_op = &osd_req->r_ops[which];
675 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
676 /* dup previous one */
677 op->indata_len = prev_op->indata_len;
678 op->outdata_len = prev_op->outdata_len;
679 op->extent = prev_op->extent;
680 /* adjust offset */
681 op->extent.offset += offset_inc;
682 op->extent.length -= offset_inc;
683
684 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
685 op->indata_len -= offset_inc;
686 }
687 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
688
689 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
690 u16 opcode, const char *class, const char *method)
691 {
692 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
693 opcode, 0);
694 struct ceph_pagelist *pagelist;
695 size_t payload_len = 0;
696 size_t size;
697
698 BUG_ON(opcode != CEPH_OSD_OP_CALL);
699
700 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
701 BUG_ON(!pagelist);
702 ceph_pagelist_init(pagelist);
703
704 op->cls.class_name = class;
705 size = strlen(class);
706 BUG_ON(size > (size_t) U8_MAX);
707 op->cls.class_len = size;
708 ceph_pagelist_append(pagelist, class, size);
709 payload_len += size;
710
711 op->cls.method_name = method;
712 size = strlen(method);
713 BUG_ON(size > (size_t) U8_MAX);
714 op->cls.method_len = size;
715 ceph_pagelist_append(pagelist, method, size);
716 payload_len += size;
717
718 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
719
720 op->indata_len = payload_len;
721 }
722 EXPORT_SYMBOL(osd_req_op_cls_init);
723
724 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
725 u16 opcode, const char *name, const void *value,
726 size_t size, u8 cmp_op, u8 cmp_mode)
727 {
728 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
729 opcode, 0);
730 struct ceph_pagelist *pagelist;
731 size_t payload_len;
732
733 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
734
735 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
736 if (!pagelist)
737 return -ENOMEM;
738
739 ceph_pagelist_init(pagelist);
740
741 payload_len = strlen(name);
742 op->xattr.name_len = payload_len;
743 ceph_pagelist_append(pagelist, name, payload_len);
744
745 op->xattr.value_len = size;
746 ceph_pagelist_append(pagelist, value, size);
747 payload_len += size;
748
749 op->xattr.cmp_op = cmp_op;
750 op->xattr.cmp_mode = cmp_mode;
751
752 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
753 op->indata_len = payload_len;
754 return 0;
755 }
756 EXPORT_SYMBOL(osd_req_op_xattr_init);
757
758 /*
759 * @watch_opcode: CEPH_OSD_WATCH_OP_*
760 */
761 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
762 u64 cookie, u8 watch_opcode)
763 {
764 struct ceph_osd_req_op *op;
765
766 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
767 op->watch.cookie = cookie;
768 op->watch.op = watch_opcode;
769 op->watch.gen = 0;
770 }
771
772 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
773 unsigned int which,
774 u64 expected_object_size,
775 u64 expected_write_size)
776 {
777 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
778 CEPH_OSD_OP_SETALLOCHINT,
779 0);
780
781 op->alloc_hint.expected_object_size = expected_object_size;
782 op->alloc_hint.expected_write_size = expected_write_size;
783
784 /*
785 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
786 * not worth a feature bit. Set FAILOK per-op flag to make
787 * sure older osds don't trip over an unsupported opcode.
788 */
789 op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
790 }
791 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
792
793 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
794 struct ceph_osd_data *osd_data)
795 {
796 u64 length = ceph_osd_data_length(osd_data);
797
798 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
799 BUG_ON(length > (u64) SIZE_MAX);
800 if (length)
801 ceph_msg_data_add_pages(msg, osd_data->pages,
802 length, osd_data->alignment);
803 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
804 BUG_ON(!length);
805 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
806 #ifdef CONFIG_BLOCK
807 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
808 ceph_msg_data_add_bio(msg, osd_data->bio, length);
809 #endif
810 } else {
811 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
812 }
813 }
814
815 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
816 const struct ceph_osd_req_op *src)
817 {
818 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
819 pr_err("unrecognized osd opcode %d\n", src->op);
820
821 return 0;
822 }
823
824 switch (src->op) {
825 case CEPH_OSD_OP_STAT:
826 break;
827 case CEPH_OSD_OP_READ:
828 case CEPH_OSD_OP_WRITE:
829 case CEPH_OSD_OP_WRITEFULL:
830 case CEPH_OSD_OP_ZERO:
831 case CEPH_OSD_OP_TRUNCATE:
832 dst->extent.offset = cpu_to_le64(src->extent.offset);
833 dst->extent.length = cpu_to_le64(src->extent.length);
834 dst->extent.truncate_size =
835 cpu_to_le64(src->extent.truncate_size);
836 dst->extent.truncate_seq =
837 cpu_to_le32(src->extent.truncate_seq);
838 break;
839 case CEPH_OSD_OP_CALL:
840 dst->cls.class_len = src->cls.class_len;
841 dst->cls.method_len = src->cls.method_len;
842 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
843 break;
844 case CEPH_OSD_OP_STARTSYNC:
845 break;
846 case CEPH_OSD_OP_WATCH:
847 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
848 dst->watch.ver = cpu_to_le64(0);
849 dst->watch.op = src->watch.op;
850 dst->watch.gen = cpu_to_le32(src->watch.gen);
851 break;
852 case CEPH_OSD_OP_NOTIFY_ACK:
853 break;
854 case CEPH_OSD_OP_NOTIFY:
855 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
856 break;
857 case CEPH_OSD_OP_SETALLOCHINT:
858 dst->alloc_hint.expected_object_size =
859 cpu_to_le64(src->alloc_hint.expected_object_size);
860 dst->alloc_hint.expected_write_size =
861 cpu_to_le64(src->alloc_hint.expected_write_size);
862 break;
863 case CEPH_OSD_OP_SETXATTR:
864 case CEPH_OSD_OP_CMPXATTR:
865 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
866 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
867 dst->xattr.cmp_op = src->xattr.cmp_op;
868 dst->xattr.cmp_mode = src->xattr.cmp_mode;
869 break;
870 case CEPH_OSD_OP_CREATE:
871 case CEPH_OSD_OP_DELETE:
872 break;
873 default:
874 pr_err("unsupported osd opcode %s\n",
875 ceph_osd_op_name(src->op));
876 WARN_ON(1);
877
878 return 0;
879 }
880
881 dst->op = cpu_to_le16(src->op);
882 dst->flags = cpu_to_le32(src->flags);
883 dst->payload_len = cpu_to_le32(src->indata_len);
884
885 return src->indata_len;
886 }
887
888 /*
889 * build new request AND message, calculate layout, and adjust file
890 * extent as needed.
891 *
892 * if the file was recently truncated, we include information about its
893 * old and new size so that the object can be updated appropriately. (we
894 * avoid synchronously deleting truncated objects because it's slow.)
895 *
896 * if @do_sync, include a 'startsync' command so that the osd will flush
897 * data quickly.
898 */
899 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
900 struct ceph_file_layout *layout,
901 struct ceph_vino vino,
902 u64 off, u64 *plen,
903 unsigned int which, int num_ops,
904 int opcode, int flags,
905 struct ceph_snap_context *snapc,
906 u32 truncate_seq,
907 u64 truncate_size,
908 bool use_mempool)
909 {
910 struct ceph_osd_request *req;
911 u64 objnum = 0;
912 u64 objoff = 0;
913 u64 objlen = 0;
914 int r;
915
916 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
917 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
918 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
919
920 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
921 GFP_NOFS);
922 if (!req) {
923 r = -ENOMEM;
924 goto fail;
925 }
926
927 /* calculate max write size */
928 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
929 if (r)
930 goto fail;
931
932 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
933 osd_req_op_init(req, which, opcode, 0);
934 } else {
935 u32 object_size = le32_to_cpu(layout->fl_object_size);
936 u32 object_base = off - objoff;
937 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
938 if (truncate_size <= object_base) {
939 truncate_size = 0;
940 } else {
941 truncate_size -= object_base;
942 if (truncate_size > object_size)
943 truncate_size = object_size;
944 }
945 }
946 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
947 truncate_size, truncate_seq);
948 }
949
950 req->r_flags = flags;
951 req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
952 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
953
954 req->r_snapid = vino.snap;
955 if (flags & CEPH_OSD_FLAG_WRITE)
956 req->r_data_offset = off;
957
958 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
959 if (r)
960 goto fail;
961
962 return req;
963
964 fail:
965 ceph_osdc_put_request(req);
966 return ERR_PTR(r);
967 }
968 EXPORT_SYMBOL(ceph_osdc_new_request);
969
970 /*
971 * We keep osd requests in an rbtree, sorted by ->r_tid.
972 */
973 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
974 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
975
976 static bool osd_homeless(struct ceph_osd *osd)
977 {
978 return osd->o_osd == CEPH_HOMELESS_OSD;
979 }
980
981 static bool osd_registered(struct ceph_osd *osd)
982 {
983 verify_osdc_locked(osd->o_osdc);
984
985 return !RB_EMPTY_NODE(&osd->o_node);
986 }
987
988 /*
989 * Assumes @osd is zero-initialized.
990 */
991 static void osd_init(struct ceph_osd *osd)
992 {
993 atomic_set(&osd->o_ref, 1);
994 RB_CLEAR_NODE(&osd->o_node);
995 osd->o_requests = RB_ROOT;
996 osd->o_linger_requests = RB_ROOT;
997 INIT_LIST_HEAD(&osd->o_osd_lru);
998 INIT_LIST_HEAD(&osd->o_keepalive_item);
999 osd->o_incarnation = 1;
1000 mutex_init(&osd->lock);
1001 }
1002
1003 static void osd_cleanup(struct ceph_osd *osd)
1004 {
1005 WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1006 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1007 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1008 WARN_ON(!list_empty(&osd->o_osd_lru));
1009 WARN_ON(!list_empty(&osd->o_keepalive_item));
1010
1011 if (osd->o_auth.authorizer) {
1012 WARN_ON(osd_homeless(osd));
1013 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1014 }
1015 }
1016
1017 /*
1018 * Track open sessions with osds.
1019 */
1020 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1021 {
1022 struct ceph_osd *osd;
1023
1024 WARN_ON(onum == CEPH_HOMELESS_OSD);
1025
1026 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1027 osd_init(osd);
1028 osd->o_osdc = osdc;
1029 osd->o_osd = onum;
1030
1031 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1032
1033 return osd;
1034 }
1035
1036 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1037 {
1038 if (atomic_inc_not_zero(&osd->o_ref)) {
1039 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
1040 atomic_read(&osd->o_ref));
1041 return osd;
1042 } else {
1043 dout("get_osd %p FAIL\n", osd);
1044 return NULL;
1045 }
1046 }
1047
1048 static void put_osd(struct ceph_osd *osd)
1049 {
1050 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1051 atomic_read(&osd->o_ref) - 1);
1052 if (atomic_dec_and_test(&osd->o_ref)) {
1053 osd_cleanup(osd);
1054 kfree(osd);
1055 }
1056 }
1057
1058 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1059
1060 static void __move_osd_to_lru(struct ceph_osd *osd)
1061 {
1062 struct ceph_osd_client *osdc = osd->o_osdc;
1063
1064 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1065 BUG_ON(!list_empty(&osd->o_osd_lru));
1066
1067 spin_lock(&osdc->osd_lru_lock);
1068 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1069 spin_unlock(&osdc->osd_lru_lock);
1070
1071 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1072 }
1073
1074 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1075 {
1076 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1077 RB_EMPTY_ROOT(&osd->o_linger_requests))
1078 __move_osd_to_lru(osd);
1079 }
1080
1081 static void __remove_osd_from_lru(struct ceph_osd *osd)
1082 {
1083 struct ceph_osd_client *osdc = osd->o_osdc;
1084
1085 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1086
1087 spin_lock(&osdc->osd_lru_lock);
1088 if (!list_empty(&osd->o_osd_lru))
1089 list_del_init(&osd->o_osd_lru);
1090 spin_unlock(&osdc->osd_lru_lock);
1091 }
1092
1093 /*
1094 * Close the connection and assign any leftover requests to the
1095 * homeless session.
1096 */
1097 static void close_osd(struct ceph_osd *osd)
1098 {
1099 struct ceph_osd_client *osdc = osd->o_osdc;
1100 struct rb_node *n;
1101
1102 verify_osdc_wrlocked(osdc);
1103 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1104
1105 ceph_con_close(&osd->o_con);
1106
1107 for (n = rb_first(&osd->o_requests); n; ) {
1108 struct ceph_osd_request *req =
1109 rb_entry(n, struct ceph_osd_request, r_node);
1110
1111 n = rb_next(n); /* unlink_request() */
1112
1113 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1114 unlink_request(osd, req);
1115 link_request(&osdc->homeless_osd, req);
1116 }
1117 for (n = rb_first(&osd->o_linger_requests); n; ) {
1118 struct ceph_osd_linger_request *lreq =
1119 rb_entry(n, struct ceph_osd_linger_request, node);
1120
1121 n = rb_next(n); /* unlink_linger() */
1122
1123 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1124 lreq->linger_id);
1125 unlink_linger(osd, lreq);
1126 link_linger(&osdc->homeless_osd, lreq);
1127 }
1128
1129 __remove_osd_from_lru(osd);
1130 erase_osd(&osdc->osds, osd);
1131 put_osd(osd);
1132 }
1133
1134 /*
1135 * reset osd connect
1136 */
1137 static int reopen_osd(struct ceph_osd *osd)
1138 {
1139 struct ceph_entity_addr *peer_addr;
1140
1141 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1142
1143 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1144 RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1145 close_osd(osd);
1146 return -ENODEV;
1147 }
1148
1149 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1150 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1151 !ceph_con_opened(&osd->o_con)) {
1152 struct rb_node *n;
1153
1154 dout("osd addr hasn't changed and connection never opened, "
1155 "letting msgr retry\n");
1156 /* touch each r_stamp for handle_timeout()'s benfit */
1157 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1158 struct ceph_osd_request *req =
1159 rb_entry(n, struct ceph_osd_request, r_node);
1160 req->r_stamp = jiffies;
1161 }
1162
1163 return -EAGAIN;
1164 }
1165
1166 ceph_con_close(&osd->o_con);
1167 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1168 osd->o_incarnation++;
1169
1170 return 0;
1171 }
1172
1173 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1174 bool wrlocked)
1175 {
1176 struct ceph_osd *osd;
1177
1178 if (wrlocked)
1179 verify_osdc_wrlocked(osdc);
1180 else
1181 verify_osdc_locked(osdc);
1182
1183 if (o != CEPH_HOMELESS_OSD)
1184 osd = lookup_osd(&osdc->osds, o);
1185 else
1186 osd = &osdc->homeless_osd;
1187 if (!osd) {
1188 if (!wrlocked)
1189 return ERR_PTR(-EAGAIN);
1190
1191 osd = create_osd(osdc, o);
1192 insert_osd(&osdc->osds, osd);
1193 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1194 &osdc->osdmap->osd_addr[osd->o_osd]);
1195 }
1196
1197 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1198 return osd;
1199 }
1200
1201 /*
1202 * Create request <-> OSD session relation.
1203 *
1204 * @req has to be assigned a tid, @osd may be homeless.
1205 */
1206 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1207 {
1208 verify_osd_locked(osd);
1209 WARN_ON(!req->r_tid || req->r_osd);
1210 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1211 req, req->r_tid);
1212
1213 if (!osd_homeless(osd))
1214 __remove_osd_from_lru(osd);
1215 else
1216 atomic_inc(&osd->o_osdc->num_homeless);
1217
1218 get_osd(osd);
1219 insert_request(&osd->o_requests, req);
1220 req->r_osd = osd;
1221 }
1222
1223 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1224 {
1225 verify_osd_locked(osd);
1226 WARN_ON(req->r_osd != osd);
1227 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1228 req, req->r_tid);
1229
1230 req->r_osd = NULL;
1231 erase_request(&osd->o_requests, req);
1232 put_osd(osd);
1233
1234 if (!osd_homeless(osd))
1235 maybe_move_osd_to_lru(osd);
1236 else
1237 atomic_dec(&osd->o_osdc->num_homeless);
1238 }
1239
1240 static bool __pool_full(struct ceph_pg_pool_info *pi)
1241 {
1242 return pi->flags & CEPH_POOL_FLAG_FULL;
1243 }
1244
1245 static bool have_pool_full(struct ceph_osd_client *osdc)
1246 {
1247 struct rb_node *n;
1248
1249 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1250 struct ceph_pg_pool_info *pi =
1251 rb_entry(n, struct ceph_pg_pool_info, node);
1252
1253 if (__pool_full(pi))
1254 return true;
1255 }
1256
1257 return false;
1258 }
1259
1260 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1261 {
1262 struct ceph_pg_pool_info *pi;
1263
1264 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1265 if (!pi)
1266 return false;
1267
1268 return __pool_full(pi);
1269 }
1270
1271 /*
1272 * Returns whether a request should be blocked from being sent
1273 * based on the current osdmap and osd_client settings.
1274 */
1275 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1276 const struct ceph_osd_request_target *t,
1277 struct ceph_pg_pool_info *pi)
1278 {
1279 bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
1280 bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
1281 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
1282 __pool_full(pi);
1283
1284 WARN_ON(pi->id != t->base_oloc.pool);
1285 return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
1286 (t->flags & CEPH_OSD_FLAG_WRITE && pausewr);
1287 }
1288
1289 enum calc_target_result {
1290 CALC_TARGET_NO_ACTION = 0,
1291 CALC_TARGET_NEED_RESEND,
1292 CALC_TARGET_POOL_DNE,
1293 };
1294
1295 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1296 struct ceph_osd_request_target *t,
1297 u32 *last_force_resend,
1298 bool any_change)
1299 {
1300 struct ceph_pg_pool_info *pi;
1301 struct ceph_pg pgid, last_pgid;
1302 struct ceph_osds up, acting;
1303 bool force_resend = false;
1304 bool need_check_tiering = false;
1305 bool need_resend = false;
1306 bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap,
1307 CEPH_OSDMAP_SORTBITWISE);
1308 enum calc_target_result ct_res;
1309 int ret;
1310
1311 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1312 if (!pi) {
1313 t->osd = CEPH_HOMELESS_OSD;
1314 ct_res = CALC_TARGET_POOL_DNE;
1315 goto out;
1316 }
1317
1318 if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1319 if (last_force_resend &&
1320 *last_force_resend < pi->last_force_request_resend) {
1321 *last_force_resend = pi->last_force_request_resend;
1322 force_resend = true;
1323 } else if (!last_force_resend) {
1324 force_resend = true;
1325 }
1326 }
1327 if (ceph_oid_empty(&t->target_oid) || force_resend) {
1328 ceph_oid_copy(&t->target_oid, &t->base_oid);
1329 need_check_tiering = true;
1330 }
1331 if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
1332 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1333 need_check_tiering = true;
1334 }
1335
1336 if (need_check_tiering &&
1337 (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1338 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1339 t->target_oloc.pool = pi->read_tier;
1340 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1341 t->target_oloc.pool = pi->write_tier;
1342 }
1343
1344 ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
1345 &t->target_oloc, &pgid);
1346 if (ret) {
1347 WARN_ON(ret != -ENOENT);
1348 t->osd = CEPH_HOMELESS_OSD;
1349 ct_res = CALC_TARGET_POOL_DNE;
1350 goto out;
1351 }
1352 last_pgid.pool = pgid.pool;
1353 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1354
1355 ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
1356 if (any_change &&
1357 ceph_is_new_interval(&t->acting,
1358 &acting,
1359 &t->up,
1360 &up,
1361 t->size,
1362 pi->size,
1363 t->min_size,
1364 pi->min_size,
1365 t->pg_num,
1366 pi->pg_num,
1367 t->sort_bitwise,
1368 sort_bitwise,
1369 &last_pgid))
1370 force_resend = true;
1371
1372 if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1373 t->paused = false;
1374 need_resend = true;
1375 }
1376
1377 if (ceph_pg_compare(&t->pgid, &pgid) ||
1378 ceph_osds_changed(&t->acting, &acting, any_change) ||
1379 force_resend) {
1380 t->pgid = pgid; /* struct */
1381 ceph_osds_copy(&t->acting, &acting);
1382 ceph_osds_copy(&t->up, &up);
1383 t->size = pi->size;
1384 t->min_size = pi->min_size;
1385 t->pg_num = pi->pg_num;
1386 t->pg_num_mask = pi->pg_num_mask;
1387 t->sort_bitwise = sort_bitwise;
1388
1389 t->osd = acting.primary;
1390 need_resend = true;
1391 }
1392
1393 ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
1394 out:
1395 dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1396 return ct_res;
1397 }
1398
1399 static void setup_request_data(struct ceph_osd_request *req,
1400 struct ceph_msg *msg)
1401 {
1402 u32 data_len = 0;
1403 int i;
1404
1405 if (!list_empty(&msg->data))
1406 return;
1407
1408 WARN_ON(msg->data_length);
1409 for (i = 0; i < req->r_num_ops; i++) {
1410 struct ceph_osd_req_op *op = &req->r_ops[i];
1411
1412 switch (op->op) {
1413 /* request */
1414 case CEPH_OSD_OP_WRITE:
1415 case CEPH_OSD_OP_WRITEFULL:
1416 WARN_ON(op->indata_len != op->extent.length);
1417 ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1418 break;
1419 case CEPH_OSD_OP_SETXATTR:
1420 case CEPH_OSD_OP_CMPXATTR:
1421 WARN_ON(op->indata_len != op->xattr.name_len +
1422 op->xattr.value_len);
1423 ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1424 break;
1425 case CEPH_OSD_OP_NOTIFY_ACK:
1426 ceph_osdc_msg_data_add(msg,
1427 &op->notify_ack.request_data);
1428 break;
1429
1430 /* reply */
1431 case CEPH_OSD_OP_STAT:
1432 ceph_osdc_msg_data_add(req->r_reply,
1433 &op->raw_data_in);
1434 break;
1435 case CEPH_OSD_OP_READ:
1436 ceph_osdc_msg_data_add(req->r_reply,
1437 &op->extent.osd_data);
1438 break;
1439
1440 /* both */
1441 case CEPH_OSD_OP_CALL:
1442 WARN_ON(op->indata_len != op->cls.class_len +
1443 op->cls.method_len +
1444 op->cls.indata_len);
1445 ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1446 /* optional, can be NONE */
1447 ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1448 /* optional, can be NONE */
1449 ceph_osdc_msg_data_add(req->r_reply,
1450 &op->cls.response_data);
1451 break;
1452 case CEPH_OSD_OP_NOTIFY:
1453 ceph_osdc_msg_data_add(msg,
1454 &op->notify.request_data);
1455 ceph_osdc_msg_data_add(req->r_reply,
1456 &op->notify.response_data);
1457 break;
1458 }
1459
1460 data_len += op->indata_len;
1461 }
1462
1463 WARN_ON(data_len != msg->data_length);
1464 }
1465
1466 static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1467 {
1468 void *p = msg->front.iov_base;
1469 void *const end = p + msg->front_alloc_len;
1470 u32 data_len = 0;
1471 int i;
1472
1473 if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1474 /* snapshots aren't writeable */
1475 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1476 } else {
1477 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1478 req->r_data_offset || req->r_snapc);
1479 }
1480
1481 setup_request_data(req, msg);
1482
1483 ceph_encode_32(&p, 1); /* client_inc, always 1 */
1484 ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1485 ceph_encode_32(&p, req->r_flags);
1486 ceph_encode_timespec(p, &req->r_mtime);
1487 p += sizeof(struct ceph_timespec);
1488 /* aka reassert_version */
1489 memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version));
1490 p += sizeof(req->r_replay_version);
1491
1492 /* oloc */
1493 ceph_encode_8(&p, 4);
1494 ceph_encode_8(&p, 4);
1495 ceph_encode_32(&p, 8 + 4 + 4);
1496 ceph_encode_64(&p, req->r_t.target_oloc.pool);
1497 ceph_encode_32(&p, -1); /* preferred */
1498 ceph_encode_32(&p, 0); /* key len */
1499
1500 /* pgid */
1501 ceph_encode_8(&p, 1);
1502 ceph_encode_64(&p, req->r_t.pgid.pool);
1503 ceph_encode_32(&p, req->r_t.pgid.seed);
1504 ceph_encode_32(&p, -1); /* preferred */
1505
1506 /* oid */
1507 ceph_encode_32(&p, req->r_t.target_oid.name_len);
1508 memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1509 p += req->r_t.target_oid.name_len;
1510
1511 /* ops, can imply data */
1512 ceph_encode_16(&p, req->r_num_ops);
1513 for (i = 0; i < req->r_num_ops; i++) {
1514 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1515 p += sizeof(struct ceph_osd_op);
1516 }
1517
1518 ceph_encode_64(&p, req->r_snapid); /* snapid */
1519 if (req->r_snapc) {
1520 ceph_encode_64(&p, req->r_snapc->seq);
1521 ceph_encode_32(&p, req->r_snapc->num_snaps);
1522 for (i = 0; i < req->r_snapc->num_snaps; i++)
1523 ceph_encode_64(&p, req->r_snapc->snaps[i]);
1524 } else {
1525 ceph_encode_64(&p, 0); /* snap_seq */
1526 ceph_encode_32(&p, 0); /* snaps len */
1527 }
1528
1529 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1530
1531 BUG_ON(p > end);
1532 msg->front.iov_len = p - msg->front.iov_base;
1533 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
1534 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1535 msg->hdr.data_len = cpu_to_le32(data_len);
1536 /*
1537 * The header "data_off" is a hint to the receiver allowing it
1538 * to align received data into its buffers such that there's no
1539 * need to re-copy it before writing it to disk (direct I/O).
1540 */
1541 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1542
1543 dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__,
1544 req, req->r_t.target_oid.name_len, req->r_t.target_oid.name,
1545 req->r_t.target_oid.name_len, msg->front.iov_len, data_len);
1546 }
1547
1548 /*
1549 * @req has to be assigned a tid and registered.
1550 */
1551 static void send_request(struct ceph_osd_request *req)
1552 {
1553 struct ceph_osd *osd = req->r_osd;
1554
1555 verify_osd_locked(osd);
1556 WARN_ON(osd->o_osd != req->r_t.osd);
1557
1558 /*
1559 * We may have a previously queued request message hanging
1560 * around. Cancel it to avoid corrupting the msgr.
1561 */
1562 if (req->r_sent)
1563 ceph_msg_revoke(req->r_request);
1564
1565 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
1566 if (req->r_attempts)
1567 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1568 else
1569 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
1570
1571 encode_request(req, req->r_request);
1572
1573 dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
1574 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
1575 req->r_t.osd, req->r_flags, req->r_attempts);
1576
1577 req->r_t.paused = false;
1578 req->r_stamp = jiffies;
1579 req->r_attempts++;
1580
1581 req->r_sent = osd->o_incarnation;
1582 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1583 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
1584 }
1585
1586 static void maybe_request_map(struct ceph_osd_client *osdc)
1587 {
1588 bool continuous = false;
1589
1590 verify_osdc_locked(osdc);
1591 WARN_ON(!osdc->osdmap->epoch);
1592
1593 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
1594 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
1595 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
1596 dout("%s osdc %p continuous\n", __func__, osdc);
1597 continuous = true;
1598 } else {
1599 dout("%s osdc %p onetime\n", __func__, osdc);
1600 }
1601
1602 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
1603 osdc->osdmap->epoch + 1, continuous))
1604 ceph_monc_renew_subs(&osdc->client->monc);
1605 }
1606
1607 static void send_map_check(struct ceph_osd_request *req);
1608
1609 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
1610 {
1611 struct ceph_osd_client *osdc = req->r_osdc;
1612 struct ceph_osd *osd;
1613 enum calc_target_result ct_res;
1614 bool need_send = false;
1615 bool promoted = false;
1616
1617 WARN_ON(req->r_tid || req->r_got_reply);
1618 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
1619
1620 again:
1621 ct_res = calc_target(osdc, &req->r_t, &req->r_last_force_resend, false);
1622 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
1623 goto promote;
1624
1625 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
1626 if (IS_ERR(osd)) {
1627 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
1628 goto promote;
1629 }
1630
1631 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1632 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
1633 dout("req %p pausewr\n", req);
1634 req->r_t.paused = true;
1635 maybe_request_map(osdc);
1636 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1637 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
1638 dout("req %p pauserd\n", req);
1639 req->r_t.paused = true;
1640 maybe_request_map(osdc);
1641 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1642 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1643 CEPH_OSD_FLAG_FULL_FORCE)) &&
1644 (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
1645 pool_full(osdc, req->r_t.base_oloc.pool))) {
1646 dout("req %p full/pool_full\n", req);
1647 pr_warn_ratelimited("FULL or reached pool quota\n");
1648 req->r_t.paused = true;
1649 maybe_request_map(osdc);
1650 } else if (!osd_homeless(osd)) {
1651 need_send = true;
1652 } else {
1653 maybe_request_map(osdc);
1654 }
1655
1656 mutex_lock(&osd->lock);
1657 /*
1658 * Assign the tid atomically with send_request() to protect
1659 * multiple writes to the same object from racing with each
1660 * other, resulting in out of order ops on the OSDs.
1661 */
1662 req->r_tid = atomic64_inc_return(&osdc->last_tid);
1663 link_request(osd, req);
1664 if (need_send)
1665 send_request(req);
1666 mutex_unlock(&osd->lock);
1667
1668 if (ct_res == CALC_TARGET_POOL_DNE)
1669 send_map_check(req);
1670
1671 if (promoted)
1672 downgrade_write(&osdc->lock);
1673 return;
1674
1675 promote:
1676 up_read(&osdc->lock);
1677 down_write(&osdc->lock);
1678 wrlocked = true;
1679 promoted = true;
1680 goto again;
1681 }
1682
1683 static void account_request(struct ceph_osd_request *req)
1684 {
1685 unsigned int mask = CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
1686
1687 if (req->r_flags & CEPH_OSD_FLAG_READ) {
1688 WARN_ON(req->r_flags & mask);
1689 req->r_flags |= CEPH_OSD_FLAG_ACK;
1690 } else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
1691 WARN_ON(!(req->r_flags & mask));
1692 else
1693 WARN_ON(1);
1694
1695 WARN_ON(req->r_unsafe_callback && (req->r_flags & mask) != mask);
1696 atomic_inc(&req->r_osdc->num_requests);
1697 }
1698
1699 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
1700 {
1701 ceph_osdc_get_request(req);
1702 account_request(req);
1703 __submit_request(req, wrlocked);
1704 }
1705
1706 static void __finish_request(struct ceph_osd_request *req)
1707 {
1708 struct ceph_osd_client *osdc = req->r_osdc;
1709 struct ceph_osd *osd = req->r_osd;
1710
1711 verify_osd_locked(osd);
1712 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1713
1714 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
1715 unlink_request(osd, req);
1716 atomic_dec(&osdc->num_requests);
1717
1718 /*
1719 * If an OSD has failed or returned and a request has been sent
1720 * twice, it's possible to get a reply and end up here while the
1721 * request message is queued for delivery. We will ignore the
1722 * reply, so not a big deal, but better to try and catch it.
1723 */
1724 ceph_msg_revoke(req->r_request);
1725 ceph_msg_revoke_incoming(req->r_reply);
1726 }
1727
1728 static void finish_request(struct ceph_osd_request *req)
1729 {
1730 __finish_request(req);
1731 ceph_osdc_put_request(req);
1732 }
1733
1734 static void __complete_request(struct ceph_osd_request *req)
1735 {
1736 if (req->r_callback)
1737 req->r_callback(req);
1738 else
1739 complete_all(&req->r_completion);
1740 }
1741
1742 /*
1743 * Note that this is open-coded in handle_reply(), which has to deal
1744 * with ack vs commit, dup acks, etc.
1745 */
1746 static void complete_request(struct ceph_osd_request *req, int err)
1747 {
1748 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1749
1750 req->r_result = err;
1751 __finish_request(req);
1752 __complete_request(req);
1753 complete_all(&req->r_safe_completion);
1754 ceph_osdc_put_request(req);
1755 }
1756
1757 static void cancel_map_check(struct ceph_osd_request *req)
1758 {
1759 struct ceph_osd_client *osdc = req->r_osdc;
1760 struct ceph_osd_request *lookup_req;
1761
1762 verify_osdc_wrlocked(osdc);
1763
1764 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1765 if (!lookup_req)
1766 return;
1767
1768 WARN_ON(lookup_req != req);
1769 erase_request_mc(&osdc->map_checks, req);
1770 ceph_osdc_put_request(req);
1771 }
1772
1773 static void cancel_request(struct ceph_osd_request *req)
1774 {
1775 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1776
1777 cancel_map_check(req);
1778 finish_request(req);
1779 }
1780
1781 static void check_pool_dne(struct ceph_osd_request *req)
1782 {
1783 struct ceph_osd_client *osdc = req->r_osdc;
1784 struct ceph_osdmap *map = osdc->osdmap;
1785
1786 verify_osdc_wrlocked(osdc);
1787 WARN_ON(!map->epoch);
1788
1789 if (req->r_attempts) {
1790 /*
1791 * We sent a request earlier, which means that
1792 * previously the pool existed, and now it does not
1793 * (i.e., it was deleted).
1794 */
1795 req->r_map_dne_bound = map->epoch;
1796 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
1797 req->r_tid);
1798 } else {
1799 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
1800 req, req->r_tid, req->r_map_dne_bound, map->epoch);
1801 }
1802
1803 if (req->r_map_dne_bound) {
1804 if (map->epoch >= req->r_map_dne_bound) {
1805 /* we had a new enough map */
1806 pr_info_ratelimited("tid %llu pool does not exist\n",
1807 req->r_tid);
1808 complete_request(req, -ENOENT);
1809 }
1810 } else {
1811 send_map_check(req);
1812 }
1813 }
1814
1815 static void map_check_cb(struct ceph_mon_generic_request *greq)
1816 {
1817 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
1818 struct ceph_osd_request *req;
1819 u64 tid = greq->private_data;
1820
1821 WARN_ON(greq->result || !greq->u.newest);
1822
1823 down_write(&osdc->lock);
1824 req = lookup_request_mc(&osdc->map_checks, tid);
1825 if (!req) {
1826 dout("%s tid %llu dne\n", __func__, tid);
1827 goto out_unlock;
1828 }
1829
1830 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
1831 req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
1832 if (!req->r_map_dne_bound)
1833 req->r_map_dne_bound = greq->u.newest;
1834 erase_request_mc(&osdc->map_checks, req);
1835 check_pool_dne(req);
1836
1837 ceph_osdc_put_request(req);
1838 out_unlock:
1839 up_write(&osdc->lock);
1840 }
1841
1842 static void send_map_check(struct ceph_osd_request *req)
1843 {
1844 struct ceph_osd_client *osdc = req->r_osdc;
1845 struct ceph_osd_request *lookup_req;
1846 int ret;
1847
1848 verify_osdc_wrlocked(osdc);
1849
1850 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1851 if (lookup_req) {
1852 WARN_ON(lookup_req != req);
1853 return;
1854 }
1855
1856 ceph_osdc_get_request(req);
1857 insert_request_mc(&osdc->map_checks, req);
1858 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
1859 map_check_cb, req->r_tid);
1860 WARN_ON(ret);
1861 }
1862
1863 /*
1864 * lingering requests, watch/notify v2 infrastructure
1865 */
1866 static void linger_release(struct kref *kref)
1867 {
1868 struct ceph_osd_linger_request *lreq =
1869 container_of(kref, struct ceph_osd_linger_request, kref);
1870
1871 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
1872 lreq->reg_req, lreq->ping_req);
1873 WARN_ON(!RB_EMPTY_NODE(&lreq->node));
1874 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
1875 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
1876 WARN_ON(!list_empty(&lreq->scan_item));
1877 WARN_ON(!list_empty(&lreq->pending_lworks));
1878 WARN_ON(lreq->osd);
1879
1880 if (lreq->reg_req)
1881 ceph_osdc_put_request(lreq->reg_req);
1882 if (lreq->ping_req)
1883 ceph_osdc_put_request(lreq->ping_req);
1884 target_destroy(&lreq->t);
1885 kfree(lreq);
1886 }
1887
1888 static void linger_put(struct ceph_osd_linger_request *lreq)
1889 {
1890 if (lreq)
1891 kref_put(&lreq->kref, linger_release);
1892 }
1893
1894 static struct ceph_osd_linger_request *
1895 linger_get(struct ceph_osd_linger_request *lreq)
1896 {
1897 kref_get(&lreq->kref);
1898 return lreq;
1899 }
1900
1901 static struct ceph_osd_linger_request *
1902 linger_alloc(struct ceph_osd_client *osdc)
1903 {
1904 struct ceph_osd_linger_request *lreq;
1905
1906 lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
1907 if (!lreq)
1908 return NULL;
1909
1910 kref_init(&lreq->kref);
1911 mutex_init(&lreq->lock);
1912 RB_CLEAR_NODE(&lreq->node);
1913 RB_CLEAR_NODE(&lreq->osdc_node);
1914 RB_CLEAR_NODE(&lreq->mc_node);
1915 INIT_LIST_HEAD(&lreq->scan_item);
1916 INIT_LIST_HEAD(&lreq->pending_lworks);
1917 init_completion(&lreq->reg_commit_wait);
1918 init_completion(&lreq->notify_finish_wait);
1919
1920 lreq->osdc = osdc;
1921 target_init(&lreq->t);
1922
1923 dout("%s lreq %p\n", __func__, lreq);
1924 return lreq;
1925 }
1926
1927 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
1928 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
1929 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
1930
1931 /*
1932 * Create linger request <-> OSD session relation.
1933 *
1934 * @lreq has to be registered, @osd may be homeless.
1935 */
1936 static void link_linger(struct ceph_osd *osd,
1937 struct ceph_osd_linger_request *lreq)
1938 {
1939 verify_osd_locked(osd);
1940 WARN_ON(!lreq->linger_id || lreq->osd);
1941 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1942 osd->o_osd, lreq, lreq->linger_id);
1943
1944 if (!osd_homeless(osd))
1945 __remove_osd_from_lru(osd);
1946 else
1947 atomic_inc(&osd->o_osdc->num_homeless);
1948
1949 get_osd(osd);
1950 insert_linger(&osd->o_linger_requests, lreq);
1951 lreq->osd = osd;
1952 }
1953
1954 static void unlink_linger(struct ceph_osd *osd,
1955 struct ceph_osd_linger_request *lreq)
1956 {
1957 verify_osd_locked(osd);
1958 WARN_ON(lreq->osd != osd);
1959 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1960 osd->o_osd, lreq, lreq->linger_id);
1961
1962 lreq->osd = NULL;
1963 erase_linger(&osd->o_linger_requests, lreq);
1964 put_osd(osd);
1965
1966 if (!osd_homeless(osd))
1967 maybe_move_osd_to_lru(osd);
1968 else
1969 atomic_dec(&osd->o_osdc->num_homeless);
1970 }
1971
1972 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
1973 {
1974 verify_osdc_locked(lreq->osdc);
1975
1976 return !RB_EMPTY_NODE(&lreq->osdc_node);
1977 }
1978
1979 static bool linger_registered(struct ceph_osd_linger_request *lreq)
1980 {
1981 struct ceph_osd_client *osdc = lreq->osdc;
1982 bool registered;
1983
1984 down_read(&osdc->lock);
1985 registered = __linger_registered(lreq);
1986 up_read(&osdc->lock);
1987
1988 return registered;
1989 }
1990
1991 static void linger_register(struct ceph_osd_linger_request *lreq)
1992 {
1993 struct ceph_osd_client *osdc = lreq->osdc;
1994
1995 verify_osdc_wrlocked(osdc);
1996 WARN_ON(lreq->linger_id);
1997
1998 linger_get(lreq);
1999 lreq->linger_id = ++osdc->last_linger_id;
2000 insert_linger_osdc(&osdc->linger_requests, lreq);
2001 }
2002
2003 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2004 {
2005 struct ceph_osd_client *osdc = lreq->osdc;
2006
2007 verify_osdc_wrlocked(osdc);
2008
2009 erase_linger_osdc(&osdc->linger_requests, lreq);
2010 linger_put(lreq);
2011 }
2012
2013 static void cancel_linger_request(struct ceph_osd_request *req)
2014 {
2015 struct ceph_osd_linger_request *lreq = req->r_priv;
2016
2017 WARN_ON(!req->r_linger);
2018 cancel_request(req);
2019 linger_put(lreq);
2020 }
2021
2022 struct linger_work {
2023 struct work_struct work;
2024 struct ceph_osd_linger_request *lreq;
2025 struct list_head pending_item;
2026 unsigned long queued_stamp;
2027
2028 union {
2029 struct {
2030 u64 notify_id;
2031 u64 notifier_id;
2032 void *payload; /* points into @msg front */
2033 size_t payload_len;
2034
2035 struct ceph_msg *msg; /* for ceph_msg_put() */
2036 } notify;
2037 struct {
2038 int err;
2039 } error;
2040 };
2041 };
2042
2043 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2044 work_func_t workfn)
2045 {
2046 struct linger_work *lwork;
2047
2048 lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2049 if (!lwork)
2050 return NULL;
2051
2052 INIT_WORK(&lwork->work, workfn);
2053 INIT_LIST_HEAD(&lwork->pending_item);
2054 lwork->lreq = linger_get(lreq);
2055
2056 return lwork;
2057 }
2058
2059 static void lwork_free(struct linger_work *lwork)
2060 {
2061 struct ceph_osd_linger_request *lreq = lwork->lreq;
2062
2063 mutex_lock(&lreq->lock);
2064 list_del(&lwork->pending_item);
2065 mutex_unlock(&lreq->lock);
2066
2067 linger_put(lreq);
2068 kfree(lwork);
2069 }
2070
2071 static void lwork_queue(struct linger_work *lwork)
2072 {
2073 struct ceph_osd_linger_request *lreq = lwork->lreq;
2074 struct ceph_osd_client *osdc = lreq->osdc;
2075
2076 verify_lreq_locked(lreq);
2077 WARN_ON(!list_empty(&lwork->pending_item));
2078
2079 lwork->queued_stamp = jiffies;
2080 list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2081 queue_work(osdc->notify_wq, &lwork->work);
2082 }
2083
2084 static void do_watch_notify(struct work_struct *w)
2085 {
2086 struct linger_work *lwork = container_of(w, struct linger_work, work);
2087 struct ceph_osd_linger_request *lreq = lwork->lreq;
2088
2089 if (!linger_registered(lreq)) {
2090 dout("%s lreq %p not registered\n", __func__, lreq);
2091 goto out;
2092 }
2093
2094 WARN_ON(!lreq->is_watch);
2095 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2096 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2097 lwork->notify.payload_len);
2098 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2099 lwork->notify.notifier_id, lwork->notify.payload,
2100 lwork->notify.payload_len);
2101
2102 out:
2103 ceph_msg_put(lwork->notify.msg);
2104 lwork_free(lwork);
2105 }
2106
2107 static void do_watch_error(struct work_struct *w)
2108 {
2109 struct linger_work *lwork = container_of(w, struct linger_work, work);
2110 struct ceph_osd_linger_request *lreq = lwork->lreq;
2111
2112 if (!linger_registered(lreq)) {
2113 dout("%s lreq %p not registered\n", __func__, lreq);
2114 goto out;
2115 }
2116
2117 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2118 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2119
2120 out:
2121 lwork_free(lwork);
2122 }
2123
2124 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2125 {
2126 struct linger_work *lwork;
2127
2128 lwork = lwork_alloc(lreq, do_watch_error);
2129 if (!lwork) {
2130 pr_err("failed to allocate error-lwork\n");
2131 return;
2132 }
2133
2134 lwork->error.err = lreq->last_error;
2135 lwork_queue(lwork);
2136 }
2137
2138 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2139 int result)
2140 {
2141 if (!completion_done(&lreq->reg_commit_wait)) {
2142 lreq->reg_commit_error = (result <= 0 ? result : 0);
2143 complete_all(&lreq->reg_commit_wait);
2144 }
2145 }
2146
2147 static void linger_commit_cb(struct ceph_osd_request *req)
2148 {
2149 struct ceph_osd_linger_request *lreq = req->r_priv;
2150
2151 mutex_lock(&lreq->lock);
2152 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2153 lreq->linger_id, req->r_result);
2154 WARN_ON(!__linger_registered(lreq));
2155 linger_reg_commit_complete(lreq, req->r_result);
2156 lreq->committed = true;
2157
2158 if (!lreq->is_watch) {
2159 struct ceph_osd_data *osd_data =
2160 osd_req_op_data(req, 0, notify, response_data);
2161 void *p = page_address(osd_data->pages[0]);
2162
2163 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2164 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2165
2166 /* make note of the notify_id */
2167 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2168 lreq->notify_id = ceph_decode_64(&p);
2169 dout("lreq %p notify_id %llu\n", lreq,
2170 lreq->notify_id);
2171 } else {
2172 dout("lreq %p no notify_id\n", lreq);
2173 }
2174 }
2175
2176 mutex_unlock(&lreq->lock);
2177 linger_put(lreq);
2178 }
2179
2180 static int normalize_watch_error(int err)
2181 {
2182 /*
2183 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2184 * notification and a failure to reconnect because we raced with
2185 * the delete appear the same to the user.
2186 */
2187 if (err == -ENOENT)
2188 err = -ENOTCONN;
2189
2190 return err;
2191 }
2192
2193 static void linger_reconnect_cb(struct ceph_osd_request *req)
2194 {
2195 struct ceph_osd_linger_request *lreq = req->r_priv;
2196
2197 mutex_lock(&lreq->lock);
2198 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2199 lreq, lreq->linger_id, req->r_result, lreq->last_error);
2200 if (req->r_result < 0) {
2201 if (!lreq->last_error) {
2202 lreq->last_error = normalize_watch_error(req->r_result);
2203 queue_watch_error(lreq);
2204 }
2205 }
2206
2207 mutex_unlock(&lreq->lock);
2208 linger_put(lreq);
2209 }
2210
2211 static void send_linger(struct ceph_osd_linger_request *lreq)
2212 {
2213 struct ceph_osd_request *req = lreq->reg_req;
2214 struct ceph_osd_req_op *op = &req->r_ops[0];
2215
2216 verify_osdc_wrlocked(req->r_osdc);
2217 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2218
2219 if (req->r_osd)
2220 cancel_linger_request(req);
2221
2222 request_reinit(req);
2223 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2224 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2225 req->r_flags = lreq->t.flags;
2226 req->r_mtime = lreq->mtime;
2227
2228 mutex_lock(&lreq->lock);
2229 if (lreq->is_watch && lreq->committed) {
2230 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2231 op->watch.cookie != lreq->linger_id);
2232 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2233 op->watch.gen = ++lreq->register_gen;
2234 dout("lreq %p reconnect register_gen %u\n", lreq,
2235 op->watch.gen);
2236 req->r_callback = linger_reconnect_cb;
2237 } else {
2238 if (!lreq->is_watch)
2239 lreq->notify_id = 0;
2240 else
2241 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2242 dout("lreq %p register\n", lreq);
2243 req->r_callback = linger_commit_cb;
2244 }
2245 mutex_unlock(&lreq->lock);
2246
2247 req->r_priv = linger_get(lreq);
2248 req->r_linger = true;
2249
2250 submit_request(req, true);
2251 }
2252
2253 static void linger_ping_cb(struct ceph_osd_request *req)
2254 {
2255 struct ceph_osd_linger_request *lreq = req->r_priv;
2256
2257 mutex_lock(&lreq->lock);
2258 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2259 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2260 lreq->last_error);
2261 if (lreq->register_gen == req->r_ops[0].watch.gen) {
2262 if (!req->r_result) {
2263 lreq->watch_valid_thru = lreq->ping_sent;
2264 } else if (!lreq->last_error) {
2265 lreq->last_error = normalize_watch_error(req->r_result);
2266 queue_watch_error(lreq);
2267 }
2268 } else {
2269 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2270 lreq->register_gen, req->r_ops[0].watch.gen);
2271 }
2272
2273 mutex_unlock(&lreq->lock);
2274 linger_put(lreq);
2275 }
2276
2277 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2278 {
2279 struct ceph_osd_client *osdc = lreq->osdc;
2280 struct ceph_osd_request *req = lreq->ping_req;
2281 struct ceph_osd_req_op *op = &req->r_ops[0];
2282
2283 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
2284 dout("%s PAUSERD\n", __func__);
2285 return;
2286 }
2287
2288 lreq->ping_sent = jiffies;
2289 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2290 __func__, lreq, lreq->linger_id, lreq->ping_sent,
2291 lreq->register_gen);
2292
2293 if (req->r_osd)
2294 cancel_linger_request(req);
2295
2296 request_reinit(req);
2297 target_copy(&req->r_t, &lreq->t);
2298
2299 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2300 op->watch.cookie != lreq->linger_id ||
2301 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2302 op->watch.gen = lreq->register_gen;
2303 req->r_callback = linger_ping_cb;
2304 req->r_priv = linger_get(lreq);
2305 req->r_linger = true;
2306
2307 ceph_osdc_get_request(req);
2308 account_request(req);
2309 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2310 link_request(lreq->osd, req);
2311 send_request(req);
2312 }
2313
2314 static void linger_submit(struct ceph_osd_linger_request *lreq)
2315 {
2316 struct ceph_osd_client *osdc = lreq->osdc;
2317 struct ceph_osd *osd;
2318
2319 calc_target(osdc, &lreq->t, &lreq->last_force_resend, false);
2320 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2321 link_linger(osd, lreq);
2322
2323 send_linger(lreq);
2324 }
2325
2326 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2327 {
2328 struct ceph_osd_client *osdc = lreq->osdc;
2329 struct ceph_osd_linger_request *lookup_lreq;
2330
2331 verify_osdc_wrlocked(osdc);
2332
2333 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2334 lreq->linger_id);
2335 if (!lookup_lreq)
2336 return;
2337
2338 WARN_ON(lookup_lreq != lreq);
2339 erase_linger_mc(&osdc->linger_map_checks, lreq);
2340 linger_put(lreq);
2341 }
2342
2343 /*
2344 * @lreq has to be both registered and linked.
2345 */
2346 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2347 {
2348 if (lreq->is_watch && lreq->ping_req->r_osd)
2349 cancel_linger_request(lreq->ping_req);
2350 if (lreq->reg_req->r_osd)
2351 cancel_linger_request(lreq->reg_req);
2352 cancel_linger_map_check(lreq);
2353 unlink_linger(lreq->osd, lreq);
2354 linger_unregister(lreq);
2355 }
2356
2357 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2358 {
2359 struct ceph_osd_client *osdc = lreq->osdc;
2360
2361 down_write(&osdc->lock);
2362 if (__linger_registered(lreq))
2363 __linger_cancel(lreq);
2364 up_write(&osdc->lock);
2365 }
2366
2367 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2368
2369 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2370 {
2371 struct ceph_osd_client *osdc = lreq->osdc;
2372 struct ceph_osdmap *map = osdc->osdmap;
2373
2374 verify_osdc_wrlocked(osdc);
2375 WARN_ON(!map->epoch);
2376
2377 if (lreq->register_gen) {
2378 lreq->map_dne_bound = map->epoch;
2379 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2380 lreq, lreq->linger_id);
2381 } else {
2382 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2383 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2384 map->epoch);
2385 }
2386
2387 if (lreq->map_dne_bound) {
2388 if (map->epoch >= lreq->map_dne_bound) {
2389 /* we had a new enough map */
2390 pr_info("linger_id %llu pool does not exist\n",
2391 lreq->linger_id);
2392 linger_reg_commit_complete(lreq, -ENOENT);
2393 __linger_cancel(lreq);
2394 }
2395 } else {
2396 send_linger_map_check(lreq);
2397 }
2398 }
2399
2400 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2401 {
2402 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2403 struct ceph_osd_linger_request *lreq;
2404 u64 linger_id = greq->private_data;
2405
2406 WARN_ON(greq->result || !greq->u.newest);
2407
2408 down_write(&osdc->lock);
2409 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
2410 if (!lreq) {
2411 dout("%s linger_id %llu dne\n", __func__, linger_id);
2412 goto out_unlock;
2413 }
2414
2415 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
2416 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2417 greq->u.newest);
2418 if (!lreq->map_dne_bound)
2419 lreq->map_dne_bound = greq->u.newest;
2420 erase_linger_mc(&osdc->linger_map_checks, lreq);
2421 check_linger_pool_dne(lreq);
2422
2423 linger_put(lreq);
2424 out_unlock:
2425 up_write(&osdc->lock);
2426 }
2427
2428 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
2429 {
2430 struct ceph_osd_client *osdc = lreq->osdc;
2431 struct ceph_osd_linger_request *lookup_lreq;
2432 int ret;
2433
2434 verify_osdc_wrlocked(osdc);
2435
2436 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2437 lreq->linger_id);
2438 if (lookup_lreq) {
2439 WARN_ON(lookup_lreq != lreq);
2440 return;
2441 }
2442
2443 linger_get(lreq);
2444 insert_linger_mc(&osdc->linger_map_checks, lreq);
2445 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2446 linger_map_check_cb, lreq->linger_id);
2447 WARN_ON(ret);
2448 }
2449
2450 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
2451 {
2452 int ret;
2453
2454 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2455 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
2456 return ret ?: lreq->reg_commit_error;
2457 }
2458
2459 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
2460 {
2461 int ret;
2462
2463 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2464 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
2465 return ret ?: lreq->notify_finish_error;
2466 }
2467
2468 /*
2469 * Timeout callback, called every N seconds. When 1 or more OSD
2470 * requests has been active for more than N seconds, we send a keepalive
2471 * (tag + timestamp) to its OSD to ensure any communications channel
2472 * reset is detected.
2473 */
2474 static void handle_timeout(struct work_struct *work)
2475 {
2476 struct ceph_osd_client *osdc =
2477 container_of(work, struct ceph_osd_client, timeout_work.work);
2478 struct ceph_options *opts = osdc->client->options;
2479 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
2480 LIST_HEAD(slow_osds);
2481 struct rb_node *n, *p;
2482
2483 dout("%s osdc %p\n", __func__, osdc);
2484 down_write(&osdc->lock);
2485
2486 /*
2487 * ping osds that are a bit slow. this ensures that if there
2488 * is a break in the TCP connection we will notice, and reopen
2489 * a connection with that osd (from the fault callback).
2490 */
2491 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2492 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2493 bool found = false;
2494
2495 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
2496 struct ceph_osd_request *req =
2497 rb_entry(p, struct ceph_osd_request, r_node);
2498
2499 if (time_before(req->r_stamp, cutoff)) {
2500 dout(" req %p tid %llu on osd%d is laggy\n",
2501 req, req->r_tid, osd->o_osd);
2502 found = true;
2503 }
2504 }
2505 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
2506 struct ceph_osd_linger_request *lreq =
2507 rb_entry(p, struct ceph_osd_linger_request, node);
2508
2509 dout(" lreq %p linger_id %llu is served by osd%d\n",
2510 lreq, lreq->linger_id, osd->o_osd);
2511 found = true;
2512
2513 mutex_lock(&lreq->lock);
2514 if (lreq->is_watch && lreq->committed && !lreq->last_error)
2515 send_linger_ping(lreq);
2516 mutex_unlock(&lreq->lock);
2517 }
2518
2519 if (found)
2520 list_move_tail(&osd->o_keepalive_item, &slow_osds);
2521 }
2522
2523 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
2524 maybe_request_map(osdc);
2525
2526 while (!list_empty(&slow_osds)) {
2527 struct ceph_osd *osd = list_first_entry(&slow_osds,
2528 struct ceph_osd,
2529 o_keepalive_item);
2530 list_del_init(&osd->o_keepalive_item);
2531 ceph_con_keepalive(&osd->o_con);
2532 }
2533
2534 up_write(&osdc->lock);
2535 schedule_delayed_work(&osdc->timeout_work,
2536 osdc->client->options->osd_keepalive_timeout);
2537 }
2538
2539 static void handle_osds_timeout(struct work_struct *work)
2540 {
2541 struct ceph_osd_client *osdc =
2542 container_of(work, struct ceph_osd_client,
2543 osds_timeout_work.work);
2544 unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
2545 struct ceph_osd *osd, *nosd;
2546
2547 dout("%s osdc %p\n", __func__, osdc);
2548 down_write(&osdc->lock);
2549 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
2550 if (time_before(jiffies, osd->lru_ttl))
2551 break;
2552
2553 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
2554 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
2555 close_osd(osd);
2556 }
2557
2558 up_write(&osdc->lock);
2559 schedule_delayed_work(&osdc->osds_timeout_work,
2560 round_jiffies_relative(delay));
2561 }
2562
2563 static int ceph_oloc_decode(void **p, void *end,
2564 struct ceph_object_locator *oloc)
2565 {
2566 u8 struct_v, struct_cv;
2567 u32 len;
2568 void *struct_end;
2569 int ret = 0;
2570
2571 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2572 struct_v = ceph_decode_8(p);
2573 struct_cv = ceph_decode_8(p);
2574 if (struct_v < 3) {
2575 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2576 struct_v, struct_cv);
2577 goto e_inval;
2578 }
2579 if (struct_cv > 6) {
2580 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2581 struct_v, struct_cv);
2582 goto e_inval;
2583 }
2584 len = ceph_decode_32(p);
2585 ceph_decode_need(p, end, len, e_inval);
2586 struct_end = *p + len;
2587
2588 oloc->pool = ceph_decode_64(p);
2589 *p += 4; /* skip preferred */
2590
2591 len = ceph_decode_32(p);
2592 if (len > 0) {
2593 pr_warn("ceph_object_locator::key is set\n");
2594 goto e_inval;
2595 }
2596
2597 if (struct_v >= 5) {
2598 len = ceph_decode_32(p);
2599 if (len > 0) {
2600 pr_warn("ceph_object_locator::nspace is set\n");
2601 goto e_inval;
2602 }
2603 }
2604
2605 if (struct_v >= 6) {
2606 s64 hash = ceph_decode_64(p);
2607 if (hash != -1) {
2608 pr_warn("ceph_object_locator::hash is set\n");
2609 goto e_inval;
2610 }
2611 }
2612
2613 /* skip the rest */
2614 *p = struct_end;
2615 out:
2616 return ret;
2617
2618 e_inval:
2619 ret = -EINVAL;
2620 goto out;
2621 }
2622
2623 static int ceph_redirect_decode(void **p, void *end,
2624 struct ceph_request_redirect *redir)
2625 {
2626 u8 struct_v, struct_cv;
2627 u32 len;
2628 void *struct_end;
2629 int ret;
2630
2631 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2632 struct_v = ceph_decode_8(p);
2633 struct_cv = ceph_decode_8(p);
2634 if (struct_cv > 1) {
2635 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2636 struct_v, struct_cv);
2637 goto e_inval;
2638 }
2639 len = ceph_decode_32(p);
2640 ceph_decode_need(p, end, len, e_inval);
2641 struct_end = *p + len;
2642
2643 ret = ceph_oloc_decode(p, end, &redir->oloc);
2644 if (ret)
2645 goto out;
2646
2647 len = ceph_decode_32(p);
2648 if (len > 0) {
2649 pr_warn("ceph_request_redirect::object_name is set\n");
2650 goto e_inval;
2651 }
2652
2653 len = ceph_decode_32(p);
2654 *p += len; /* skip osd_instructions */
2655
2656 /* skip the rest */
2657 *p = struct_end;
2658 out:
2659 return ret;
2660
2661 e_inval:
2662 ret = -EINVAL;
2663 goto out;
2664 }
2665
2666 struct MOSDOpReply {
2667 struct ceph_pg pgid;
2668 u64 flags;
2669 int result;
2670 u32 epoch;
2671 int num_ops;
2672 u32 outdata_len[CEPH_OSD_MAX_OPS];
2673 s32 rval[CEPH_OSD_MAX_OPS];
2674 int retry_attempt;
2675 struct ceph_eversion replay_version;
2676 u64 user_version;
2677 struct ceph_request_redirect redirect;
2678 };
2679
2680 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
2681 {
2682 void *p = msg->front.iov_base;
2683 void *const end = p + msg->front.iov_len;
2684 u16 version = le16_to_cpu(msg->hdr.version);
2685 struct ceph_eversion bad_replay_version;
2686 u8 decode_redir;
2687 u32 len;
2688 int ret;
2689 int i;
2690
2691 ceph_decode_32_safe(&p, end, len, e_inval);
2692 ceph_decode_need(&p, end, len, e_inval);
2693 p += len; /* skip oid */
2694
2695 ret = ceph_decode_pgid(&p, end, &m->pgid);
2696 if (ret)
2697 return ret;
2698
2699 ceph_decode_64_safe(&p, end, m->flags, e_inval);
2700 ceph_decode_32_safe(&p, end, m->result, e_inval);
2701 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
2702 memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
2703 p += sizeof(bad_replay_version);
2704 ceph_decode_32_safe(&p, end, m->epoch, e_inval);
2705
2706 ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
2707 if (m->num_ops > ARRAY_SIZE(m->outdata_len))
2708 goto e_inval;
2709
2710 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
2711 e_inval);
2712 for (i = 0; i < m->num_ops; i++) {
2713 struct ceph_osd_op *op = p;
2714
2715 m->outdata_len[i] = le32_to_cpu(op->payload_len);
2716 p += sizeof(*op);
2717 }
2718
2719 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
2720 for (i = 0; i < m->num_ops; i++)
2721 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
2722
2723 if (version >= 5) {
2724 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
2725 memcpy(&m->replay_version, p, sizeof(m->replay_version));
2726 p += sizeof(m->replay_version);
2727 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
2728 } else {
2729 m->replay_version = bad_replay_version; /* struct */
2730 m->user_version = le64_to_cpu(m->replay_version.version);
2731 }
2732
2733 if (version >= 6) {
2734 if (version >= 7)
2735 ceph_decode_8_safe(&p, end, decode_redir, e_inval);
2736 else
2737 decode_redir = 1;
2738 } else {
2739 decode_redir = 0;
2740 }
2741
2742 if (decode_redir) {
2743 ret = ceph_redirect_decode(&p, end, &m->redirect);
2744 if (ret)
2745 return ret;
2746 } else {
2747 ceph_oloc_init(&m->redirect.oloc);
2748 }
2749
2750 return 0;
2751
2752 e_inval:
2753 return -EINVAL;
2754 }
2755
2756 /*
2757 * We are done with @req if
2758 * - @m is a safe reply, or
2759 * - @m is an unsafe reply and we didn't want a safe one
2760 */
2761 static bool done_request(const struct ceph_osd_request *req,
2762 const struct MOSDOpReply *m)
2763 {
2764 return (m->result < 0 ||
2765 (m->flags & CEPH_OSD_FLAG_ONDISK) ||
2766 !(req->r_flags & CEPH_OSD_FLAG_ONDISK));
2767 }
2768
2769 /*
2770 * handle osd op reply. either call the callback if it is specified,
2771 * or do the completion to wake up the waiting thread.
2772 *
2773 * ->r_unsafe_callback is set? yes no
2774 *
2775 * first reply is OK (needed r_cb/r_completion, r_cb/r_completion,
2776 * any or needed/got safe) r_safe_completion r_safe_completion
2777 *
2778 * first reply is unsafe r_unsafe_cb(true) (nothing)
2779 *
2780 * when we get the safe reply r_unsafe_cb(false), r_cb/r_completion,
2781 * r_safe_completion r_safe_completion
2782 */
2783 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2784 {
2785 struct ceph_osd_client *osdc = osd->o_osdc;
2786 struct ceph_osd_request *req;
2787 struct MOSDOpReply m;
2788 u64 tid = le64_to_cpu(msg->hdr.tid);
2789 u32 data_len = 0;
2790 bool already_acked;
2791 int ret;
2792 int i;
2793
2794 dout("%s msg %p tid %llu\n", __func__, msg, tid);
2795
2796 down_read(&osdc->lock);
2797 if (!osd_registered(osd)) {
2798 dout("%s osd%d unknown\n", __func__, osd->o_osd);
2799 goto out_unlock_osdc;
2800 }
2801 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
2802
2803 mutex_lock(&osd->lock);
2804 req = lookup_request(&osd->o_requests, tid);
2805 if (!req) {
2806 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
2807 goto out_unlock_session;
2808 }
2809
2810 ret = decode_MOSDOpReply(msg, &m);
2811 if (ret) {
2812 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2813 req->r_tid, ret);
2814 ceph_msg_dump(msg);
2815 goto fail_request;
2816 }
2817 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2818 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
2819 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
2820 le64_to_cpu(m.replay_version.version), m.user_version);
2821
2822 if (m.retry_attempt >= 0) {
2823 if (m.retry_attempt != req->r_attempts - 1) {
2824 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2825 req, req->r_tid, m.retry_attempt,
2826 req->r_attempts - 1);
2827 goto out_unlock_session;
2828 }
2829 } else {
2830 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2831 }
2832
2833 if (!ceph_oloc_empty(&m.redirect.oloc)) {
2834 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
2835 m.redirect.oloc.pool);
2836 unlink_request(osd, req);
2837 mutex_unlock(&osd->lock);
2838
2839 ceph_oloc_copy(&req->r_t.target_oloc, &m.redirect.oloc);
2840 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
2841 req->r_tid = 0;
2842 __submit_request(req, false);
2843 goto out_unlock_osdc;
2844 }
2845
2846 if (m.num_ops != req->r_num_ops) {
2847 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
2848 req->r_num_ops, req->r_tid);
2849 goto fail_request;
2850 }
2851 for (i = 0; i < req->r_num_ops; i++) {
2852 dout(" req %p tid %llu op %d rval %d len %u\n", req,
2853 req->r_tid, i, m.rval[i], m.outdata_len[i]);
2854 req->r_ops[i].rval = m.rval[i];
2855 req->r_ops[i].outdata_len = m.outdata_len[i];
2856 data_len += m.outdata_len[i];
2857 }
2858 if (data_len != le32_to_cpu(msg->hdr.data_len)) {
2859 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
2860 le32_to_cpu(msg->hdr.data_len), req->r_tid);
2861 goto fail_request;
2862 }
2863 dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__,
2864 req, req->r_tid, req->r_got_reply, m.result, data_len);
2865
2866 already_acked = req->r_got_reply;
2867 if (!already_acked) {
2868 req->r_result = m.result ?: data_len;
2869 req->r_replay_version = m.replay_version; /* struct */
2870 req->r_got_reply = true;
2871 } else if (!(m.flags & CEPH_OSD_FLAG_ONDISK)) {
2872 dout("req %p tid %llu dup ack\n", req, req->r_tid);
2873 goto out_unlock_session;
2874 }
2875
2876 if (done_request(req, &m)) {
2877 __finish_request(req);
2878 if (req->r_linger) {
2879 WARN_ON(req->r_unsafe_callback);
2880 dout("req %p tid %llu cb (locked)\n", req, req->r_tid);
2881 __complete_request(req);
2882 }
2883 }
2884
2885 mutex_unlock(&osd->lock);
2886 up_read(&osdc->lock);
2887
2888 if (done_request(req, &m)) {
2889 if (already_acked && req->r_unsafe_callback) {
2890 dout("req %p tid %llu safe-cb\n", req, req->r_tid);
2891 req->r_unsafe_callback(req, false);
2892 } else if (!req->r_linger) {
2893 dout("req %p tid %llu cb\n", req, req->r_tid);
2894 __complete_request(req);
2895 }
2896 } else {
2897 if (req->r_unsafe_callback) {
2898 dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
2899 req->r_unsafe_callback(req, true);
2900 } else {
2901 WARN_ON(1);
2902 }
2903 }
2904 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2905 complete_all(&req->r_safe_completion);
2906
2907 ceph_osdc_put_request(req);
2908 return;
2909
2910 fail_request:
2911 complete_request(req, -EIO);
2912 out_unlock_session:
2913 mutex_unlock(&osd->lock);
2914 out_unlock_osdc:
2915 up_read(&osdc->lock);
2916 }
2917
2918 static void set_pool_was_full(struct ceph_osd_client *osdc)
2919 {
2920 struct rb_node *n;
2921
2922 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
2923 struct ceph_pg_pool_info *pi =
2924 rb_entry(n, struct ceph_pg_pool_info, node);
2925
2926 pi->was_full = __pool_full(pi);
2927 }
2928 }
2929
2930 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
2931 {
2932 struct ceph_pg_pool_info *pi;
2933
2934 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
2935 if (!pi)
2936 return false;
2937
2938 return pi->was_full && !__pool_full(pi);
2939 }
2940
2941 static enum calc_target_result
2942 recalc_linger_target(struct ceph_osd_linger_request *lreq)
2943 {
2944 struct ceph_osd_client *osdc = lreq->osdc;
2945 enum calc_target_result ct_res;
2946
2947 ct_res = calc_target(osdc, &lreq->t, &lreq->last_force_resend, true);
2948 if (ct_res == CALC_TARGET_NEED_RESEND) {
2949 struct ceph_osd *osd;
2950
2951 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2952 if (osd != lreq->osd) {
2953 unlink_linger(lreq->osd, lreq);
2954 link_linger(osd, lreq);
2955 }
2956 }
2957
2958 return ct_res;
2959 }
2960
2961 /*
2962 * Requeue requests whose mapping to an OSD has changed.
2963 */
2964 static void scan_requests(struct ceph_osd *osd,
2965 bool force_resend,
2966 bool cleared_full,
2967 bool check_pool_cleared_full,
2968 struct rb_root *need_resend,
2969 struct list_head *need_resend_linger)
2970 {
2971 struct ceph_osd_client *osdc = osd->o_osdc;
2972 struct rb_node *n;
2973 bool force_resend_writes;
2974
2975 for (n = rb_first(&osd->o_linger_requests); n; ) {
2976 struct ceph_osd_linger_request *lreq =
2977 rb_entry(n, struct ceph_osd_linger_request, node);
2978 enum calc_target_result ct_res;
2979
2980 n = rb_next(n); /* recalc_linger_target() */
2981
2982 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
2983 lreq->linger_id);
2984 ct_res = recalc_linger_target(lreq);
2985 switch (ct_res) {
2986 case CALC_TARGET_NO_ACTION:
2987 force_resend_writes = cleared_full ||
2988 (check_pool_cleared_full &&
2989 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
2990 if (!force_resend && !force_resend_writes)
2991 break;
2992
2993 /* fall through */
2994 case CALC_TARGET_NEED_RESEND:
2995 cancel_linger_map_check(lreq);
2996 /*
2997 * scan_requests() for the previous epoch(s)
2998 * may have already added it to the list, since
2999 * it's not unlinked here.
3000 */
3001 if (list_empty(&lreq->scan_item))
3002 list_add_tail(&lreq->scan_item, need_resend_linger);
3003 break;
3004 case CALC_TARGET_POOL_DNE:
3005 check_linger_pool_dne(lreq);
3006 break;
3007 }
3008 }
3009
3010 for (n = rb_first(&osd->o_requests); n; ) {
3011 struct ceph_osd_request *req =
3012 rb_entry(n, struct ceph_osd_request, r_node);
3013 enum calc_target_result ct_res;
3014
3015 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3016
3017 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3018 ct_res = calc_target(osdc, &req->r_t,
3019 &req->r_last_force_resend, false);
3020 switch (ct_res) {
3021 case CALC_TARGET_NO_ACTION:
3022 force_resend_writes = cleared_full ||
3023 (check_pool_cleared_full &&
3024 pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3025 if (!force_resend &&
3026 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3027 !force_resend_writes))
3028 break;
3029
3030 /* fall through */
3031 case CALC_TARGET_NEED_RESEND:
3032 cancel_map_check(req);
3033 unlink_request(osd, req);
3034 insert_request(need_resend, req);
3035 break;
3036 case CALC_TARGET_POOL_DNE:
3037 check_pool_dne(req);
3038 break;
3039 }
3040 }
3041 }
3042
3043 static int handle_one_map(struct ceph_osd_client *osdc,
3044 void *p, void *end, bool incremental,
3045 struct rb_root *need_resend,
3046 struct list_head *need_resend_linger)
3047 {
3048 struct ceph_osdmap *newmap;
3049 struct rb_node *n;
3050 bool skipped_map = false;
3051 bool was_full;
3052
3053 was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
3054 set_pool_was_full(osdc);
3055
3056 if (incremental)
3057 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3058 else
3059 newmap = ceph_osdmap_decode(&p, end);
3060 if (IS_ERR(newmap))
3061 return PTR_ERR(newmap);
3062
3063 if (newmap != osdc->osdmap) {
3064 /*
3065 * Preserve ->was_full before destroying the old map.
3066 * For pools that weren't in the old map, ->was_full
3067 * should be false.
3068 */
3069 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3070 struct ceph_pg_pool_info *pi =
3071 rb_entry(n, struct ceph_pg_pool_info, node);
3072 struct ceph_pg_pool_info *old_pi;
3073
3074 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3075 if (old_pi)
3076 pi->was_full = old_pi->was_full;
3077 else
3078 WARN_ON(pi->was_full);
3079 }
3080
3081 if (osdc->osdmap->epoch &&
3082 osdc->osdmap->epoch + 1 < newmap->epoch) {
3083 WARN_ON(incremental);
3084 skipped_map = true;
3085 }
3086
3087 ceph_osdmap_destroy(osdc->osdmap);
3088 osdc->osdmap = newmap;
3089 }
3090
3091 was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
3092 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3093 need_resend, need_resend_linger);
3094
3095 for (n = rb_first(&osdc->osds); n; ) {
3096 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3097
3098 n = rb_next(n); /* close_osd() */
3099
3100 scan_requests(osd, skipped_map, was_full, true, need_resend,
3101 need_resend_linger);
3102 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3103 memcmp(&osd->o_con.peer_addr,
3104 ceph_osd_addr(osdc->osdmap, osd->o_osd),
3105 sizeof(struct ceph_entity_addr)))
3106 close_osd(osd);
3107 }
3108
3109 return 0;
3110 }
3111
3112 static void kick_requests(struct ceph_osd_client *osdc,
3113 struct rb_root *need_resend,
3114 struct list_head *need_resend_linger)
3115 {
3116 struct ceph_osd_linger_request *lreq, *nlreq;
3117 struct rb_node *n;
3118
3119 for (n = rb_first(need_resend); n; ) {
3120 struct ceph_osd_request *req =
3121 rb_entry(n, struct ceph_osd_request, r_node);
3122 struct ceph_osd *osd;
3123
3124 n = rb_next(n);
3125 erase_request(need_resend, req); /* before link_request() */
3126
3127 WARN_ON(req->r_osd);
3128 calc_target(osdc, &req->r_t, NULL, false);
3129 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3130 link_request(osd, req);
3131 if (!req->r_linger) {
3132 if (!osd_homeless(osd) && !req->r_t.paused)
3133 send_request(req);
3134 } else {
3135 cancel_linger_request(req);
3136 }
3137 }
3138
3139 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3140 if (!osd_homeless(lreq->osd))
3141 send_linger(lreq);
3142
3143 list_del_init(&lreq->scan_item);
3144 }
3145 }
3146
3147 /*
3148 * Process updated osd map.
3149 *
3150 * The message contains any number of incremental and full maps, normally
3151 * indicating some sort of topology change in the cluster. Kick requests
3152 * off to different OSDs as needed.
3153 */
3154 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3155 {
3156 void *p = msg->front.iov_base;
3157 void *const end = p + msg->front.iov_len;
3158 u32 nr_maps, maplen;
3159 u32 epoch;
3160 struct ceph_fsid fsid;
3161 struct rb_root need_resend = RB_ROOT;
3162 LIST_HEAD(need_resend_linger);
3163 bool handled_incremental = false;
3164 bool was_pauserd, was_pausewr;
3165 bool pauserd, pausewr;
3166 int err;
3167
3168 dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3169 down_write(&osdc->lock);
3170
3171 /* verify fsid */
3172 ceph_decode_need(&p, end, sizeof(fsid), bad);
3173 ceph_decode_copy(&p, &fsid, sizeof(fsid));
3174 if (ceph_check_fsid(osdc->client, &fsid) < 0)
3175 goto bad;
3176
3177 was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
3178 was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
3179 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
3180 have_pool_full(osdc);
3181
3182 /* incremental maps */
3183 ceph_decode_32_safe(&p, end, nr_maps, bad);
3184 dout(" %d inc maps\n", nr_maps);
3185 while (nr_maps > 0) {
3186 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3187 epoch = ceph_decode_32(&p);
3188 maplen = ceph_decode_32(&p);
3189 ceph_decode_need(&p, end, maplen, bad);
3190 if (osdc->osdmap->epoch &&
3191 osdc->osdmap->epoch + 1 == epoch) {
3192 dout("applying incremental map %u len %d\n",
3193 epoch, maplen);
3194 err = handle_one_map(osdc, p, p + maplen, true,
3195 &need_resend, &need_resend_linger);
3196 if (err)
3197 goto bad;
3198 handled_incremental = true;
3199 } else {
3200 dout("ignoring incremental map %u len %d\n",
3201 epoch, maplen);
3202 }
3203 p += maplen;
3204 nr_maps--;
3205 }
3206 if (handled_incremental)
3207 goto done;
3208
3209 /* full maps */
3210 ceph_decode_32_safe(&p, end, nr_maps, bad);
3211 dout(" %d full maps\n", nr_maps);
3212 while (nr_maps) {
3213 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3214 epoch = ceph_decode_32(&p);
3215 maplen = ceph_decode_32(&p);
3216 ceph_decode_need(&p, end, maplen, bad);
3217 if (nr_maps > 1) {
3218 dout("skipping non-latest full map %u len %d\n",
3219 epoch, maplen);
3220 } else if (osdc->osdmap->epoch >= epoch) {
3221 dout("skipping full map %u len %d, "
3222 "older than our %u\n", epoch, maplen,
3223 osdc->osdmap->epoch);
3224 } else {
3225 dout("taking full map %u len %d\n", epoch, maplen);
3226 err = handle_one_map(osdc, p, p + maplen, false,
3227 &need_resend, &need_resend_linger);
3228 if (err)
3229 goto bad;
3230 }
3231 p += maplen;
3232 nr_maps--;
3233 }
3234
3235 done:
3236 /*
3237 * subscribe to subsequent osdmap updates if full to ensure
3238 * we find out when we are no longer full and stop returning
3239 * ENOSPC.
3240 */
3241 pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
3242 pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
3243 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
3244 have_pool_full(osdc);
3245 if (was_pauserd || was_pausewr || pauserd || pausewr)
3246 maybe_request_map(osdc);
3247
3248 kick_requests(osdc, &need_resend, &need_resend_linger);
3249
3250 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3251 osdc->osdmap->epoch);
3252 up_write(&osdc->lock);
3253 wake_up_all(&osdc->client->auth_wq);
3254 return;
3255
3256 bad:
3257 pr_err("osdc handle_map corrupt msg\n");
3258 ceph_msg_dump(msg);
3259 up_write(&osdc->lock);
3260 }
3261
3262 /*
3263 * Resubmit requests pending on the given osd.
3264 */
3265 static void kick_osd_requests(struct ceph_osd *osd)
3266 {
3267 struct rb_node *n;
3268
3269 for (n = rb_first(&osd->o_requests); n; ) {
3270 struct ceph_osd_request *req =
3271 rb_entry(n, struct ceph_osd_request, r_node);
3272
3273 n = rb_next(n); /* cancel_linger_request() */
3274
3275 if (!req->r_linger) {
3276 if (!req->r_t.paused)
3277 send_request(req);
3278 } else {
3279 cancel_linger_request(req);
3280 }
3281 }
3282 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3283 struct ceph_osd_linger_request *lreq =
3284 rb_entry(n, struct ceph_osd_linger_request, node);
3285
3286 send_linger(lreq);
3287 }
3288 }
3289
3290 /*
3291 * If the osd connection drops, we need to resubmit all requests.
3292 */
3293 static void osd_fault(struct ceph_connection *con)
3294 {
3295 struct ceph_osd *osd = con->private;
3296 struct ceph_osd_client *osdc = osd->o_osdc;
3297
3298 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3299
3300 down_write(&osdc->lock);
3301 if (!osd_registered(osd)) {
3302 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3303 goto out_unlock;
3304 }
3305
3306 if (!reopen_osd(osd))
3307 kick_osd_requests(osd);
3308 maybe_request_map(osdc);
3309
3310 out_unlock:
3311 up_write(&osdc->lock);
3312 }
3313
3314 /*
3315 * Process osd watch notifications
3316 */
3317 static void handle_watch_notify(struct ceph_osd_client *osdc,
3318 struct ceph_msg *msg)
3319 {
3320 void *p = msg->front.iov_base;
3321 void *const end = p + msg->front.iov_len;
3322 struct ceph_osd_linger_request *lreq;
3323 struct linger_work *lwork;
3324 u8 proto_ver, opcode;
3325 u64 cookie, notify_id;
3326 u64 notifier_id = 0;
3327 s32 return_code = 0;
3328 void *payload = NULL;
3329 u32 payload_len = 0;
3330
3331 ceph_decode_8_safe(&p, end, proto_ver, bad);
3332 ceph_decode_8_safe(&p, end, opcode, bad);
3333 ceph_decode_64_safe(&p, end, cookie, bad);
3334 p += 8; /* skip ver */
3335 ceph_decode_64_safe(&p, end, notify_id, bad);
3336
3337 if (proto_ver >= 1) {
3338 ceph_decode_32_safe(&p, end, payload_len, bad);
3339 ceph_decode_need(&p, end, payload_len, bad);
3340 payload = p;
3341 p += payload_len;
3342 }
3343
3344 if (le16_to_cpu(msg->hdr.version) >= 2)
3345 ceph_decode_32_safe(&p, end, return_code, bad);
3346
3347 if (le16_to_cpu(msg->hdr.version) >= 3)
3348 ceph_decode_64_safe(&p, end, notifier_id, bad);
3349
3350 down_read(&osdc->lock);
3351 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
3352 if (!lreq) {
3353 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
3354 cookie);
3355 goto out_unlock_osdc;
3356 }
3357
3358 mutex_lock(&lreq->lock);
3359 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
3360 opcode, cookie, lreq, lreq->is_watch);
3361 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
3362 if (!lreq->last_error) {
3363 lreq->last_error = -ENOTCONN;
3364 queue_watch_error(lreq);
3365 }
3366 } else if (!lreq->is_watch) {
3367 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3368 if (lreq->notify_id && lreq->notify_id != notify_id) {
3369 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
3370 lreq->notify_id, notify_id);
3371 } else if (!completion_done(&lreq->notify_finish_wait)) {
3372 struct ceph_msg_data *data =
3373 list_first_entry_or_null(&msg->data,
3374 struct ceph_msg_data,
3375 links);
3376
3377 if (data) {
3378 if (lreq->preply_pages) {
3379 WARN_ON(data->type !=
3380 CEPH_MSG_DATA_PAGES);
3381 *lreq->preply_pages = data->pages;
3382 *lreq->preply_len = data->length;
3383 } else {
3384 ceph_release_page_vector(data->pages,
3385 calc_pages_for(0, data->length));
3386 }
3387 }
3388 lreq->notify_finish_error = return_code;
3389 complete_all(&lreq->notify_finish_wait);
3390 }
3391 } else {
3392 /* CEPH_WATCH_EVENT_NOTIFY */
3393 lwork = lwork_alloc(lreq, do_watch_notify);
3394 if (!lwork) {
3395 pr_err("failed to allocate notify-lwork\n");
3396 goto out_unlock_lreq;
3397 }
3398
3399 lwork->notify.notify_id = notify_id;
3400 lwork->notify.notifier_id = notifier_id;
3401 lwork->notify.payload = payload;
3402 lwork->notify.payload_len = payload_len;
3403 lwork->notify.msg = ceph_msg_get(msg);
3404 lwork_queue(lwork);
3405 }
3406
3407 out_unlock_lreq:
3408 mutex_unlock(&lreq->lock);
3409 out_unlock_osdc:
3410 up_read(&osdc->lock);
3411 return;
3412
3413 bad:
3414 pr_err("osdc handle_watch_notify corrupt msg\n");
3415 }
3416
3417 /*
3418 * Register request, send initial attempt.
3419 */
3420 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
3421 struct ceph_osd_request *req,
3422 bool nofail)
3423 {
3424 down_read(&osdc->lock);
3425 submit_request(req, false);
3426 up_read(&osdc->lock);
3427
3428 return 0;
3429 }
3430 EXPORT_SYMBOL(ceph_osdc_start_request);
3431
3432 /*
3433 * Unregister a registered request. The request is not completed (i.e.
3434 * no callbacks or wakeups) - higher layers are supposed to know what
3435 * they are canceling.
3436 */
3437 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
3438 {
3439 struct ceph_osd_client *osdc = req->r_osdc;
3440
3441 down_write(&osdc->lock);
3442 if (req->r_osd)
3443 cancel_request(req);
3444 up_write(&osdc->lock);
3445 }
3446 EXPORT_SYMBOL(ceph_osdc_cancel_request);
3447
3448 /*
3449 * @timeout: in jiffies, 0 means "wait forever"
3450 */
3451 static int wait_request_timeout(struct ceph_osd_request *req,
3452 unsigned long timeout)
3453 {
3454 long left;
3455
3456 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3457 left = wait_for_completion_killable_timeout(&req->r_completion,
3458 ceph_timeout_jiffies(timeout));
3459 if (left <= 0) {
3460 left = left ?: -ETIMEDOUT;
3461 ceph_osdc_cancel_request(req);
3462
3463 /* kludge - need to to wake ceph_osdc_sync() */
3464 complete_all(&req->r_safe_completion);
3465 } else {
3466 left = req->r_result; /* completed */
3467 }
3468
3469 return left;
3470 }
3471
3472 /*
3473 * wait for a request to complete
3474 */
3475 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
3476 struct ceph_osd_request *req)
3477 {
3478 return wait_request_timeout(req, 0);
3479 }
3480 EXPORT_SYMBOL(ceph_osdc_wait_request);
3481
3482 /*
3483 * sync - wait for all in-flight requests to flush. avoid starvation.
3484 */
3485 void ceph_osdc_sync(struct ceph_osd_client *osdc)
3486 {
3487 struct rb_node *n, *p;
3488 u64 last_tid = atomic64_read(&osdc->last_tid);
3489
3490 again:
3491 down_read(&osdc->lock);
3492 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3493 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3494
3495 mutex_lock(&osd->lock);
3496 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
3497 struct ceph_osd_request *req =
3498 rb_entry(p, struct ceph_osd_request, r_node);
3499
3500 if (req->r_tid > last_tid)
3501 break;
3502
3503 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
3504 continue;
3505
3506 ceph_osdc_get_request(req);
3507 mutex_unlock(&osd->lock);
3508 up_read(&osdc->lock);
3509 dout("%s waiting on req %p tid %llu last_tid %llu\n",
3510 __func__, req, req->r_tid, last_tid);
3511 wait_for_completion(&req->r_safe_completion);
3512 ceph_osdc_put_request(req);
3513 goto again;
3514 }
3515
3516 mutex_unlock(&osd->lock);
3517 }
3518
3519 up_read(&osdc->lock);
3520 dout("%s done last_tid %llu\n", __func__, last_tid);
3521 }
3522 EXPORT_SYMBOL(ceph_osdc_sync);
3523
3524 static struct ceph_osd_request *
3525 alloc_linger_request(struct ceph_osd_linger_request *lreq)
3526 {
3527 struct ceph_osd_request *req;
3528
3529 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
3530 if (!req)
3531 return NULL;
3532
3533 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3534 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3535
3536 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
3537 ceph_osdc_put_request(req);
3538 return NULL;
3539 }
3540
3541 return req;
3542 }
3543
3544 /*
3545 * Returns a handle, caller owns a ref.
3546 */
3547 struct ceph_osd_linger_request *
3548 ceph_osdc_watch(struct ceph_osd_client *osdc,
3549 struct ceph_object_id *oid,
3550 struct ceph_object_locator *oloc,
3551 rados_watchcb2_t wcb,
3552 rados_watcherrcb_t errcb,
3553 void *data)
3554 {
3555 struct ceph_osd_linger_request *lreq;
3556 int ret;
3557
3558 lreq = linger_alloc(osdc);
3559 if (!lreq)
3560 return ERR_PTR(-ENOMEM);
3561
3562 lreq->is_watch = true;
3563 lreq->wcb = wcb;
3564 lreq->errcb = errcb;
3565 lreq->data = data;
3566 lreq->watch_valid_thru = jiffies;
3567
3568 ceph_oid_copy(&lreq->t.base_oid, oid);
3569 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3570 lreq->t.flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3571 lreq->mtime = CURRENT_TIME;
3572
3573 lreq->reg_req = alloc_linger_request(lreq);
3574 if (!lreq->reg_req) {
3575 ret = -ENOMEM;
3576 goto err_put_lreq;
3577 }
3578
3579 lreq->ping_req = alloc_linger_request(lreq);
3580 if (!lreq->ping_req) {
3581 ret = -ENOMEM;
3582 goto err_put_lreq;
3583 }
3584
3585 down_write(&osdc->lock);
3586 linger_register(lreq); /* before osd_req_op_* */
3587 osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
3588 CEPH_OSD_WATCH_OP_WATCH);
3589 osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
3590 CEPH_OSD_WATCH_OP_PING);
3591 linger_submit(lreq);
3592 up_write(&osdc->lock);
3593
3594 ret = linger_reg_commit_wait(lreq);
3595 if (ret) {
3596 linger_cancel(lreq);
3597 goto err_put_lreq;
3598 }
3599
3600 return lreq;
3601
3602 err_put_lreq:
3603 linger_put(lreq);
3604 return ERR_PTR(ret);
3605 }
3606 EXPORT_SYMBOL(ceph_osdc_watch);
3607
3608 /*
3609 * Releases a ref.
3610 *
3611 * Times out after mount_timeout to preserve rbd unmap behaviour
3612 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3613 * with mount_timeout").
3614 */
3615 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
3616 struct ceph_osd_linger_request *lreq)
3617 {
3618 struct ceph_options *opts = osdc->client->options;
3619 struct ceph_osd_request *req;
3620 int ret;
3621
3622 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3623 if (!req)
3624 return -ENOMEM;
3625
3626 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3627 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3628 req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3629 req->r_mtime = CURRENT_TIME;
3630 osd_req_op_watch_init(req, 0, lreq->linger_id,
3631 CEPH_OSD_WATCH_OP_UNWATCH);
3632
3633 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3634 if (ret)
3635 goto out_put_req;
3636
3637 ceph_osdc_start_request(osdc, req, false);
3638 linger_cancel(lreq);
3639 linger_put(lreq);
3640 ret = wait_request_timeout(req, opts->mount_timeout);
3641
3642 out_put_req:
3643 ceph_osdc_put_request(req);
3644 return ret;
3645 }
3646 EXPORT_SYMBOL(ceph_osdc_unwatch);
3647
3648 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
3649 u64 notify_id, u64 cookie, void *payload,
3650 size_t payload_len)
3651 {
3652 struct ceph_osd_req_op *op;
3653 struct ceph_pagelist *pl;
3654 int ret;
3655
3656 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
3657
3658 pl = kmalloc(sizeof(*pl), GFP_NOIO);
3659 if (!pl)
3660 return -ENOMEM;
3661
3662 ceph_pagelist_init(pl);
3663 ret = ceph_pagelist_encode_64(pl, notify_id);
3664 ret |= ceph_pagelist_encode_64(pl, cookie);
3665 if (payload) {
3666 ret |= ceph_pagelist_encode_32(pl, payload_len);
3667 ret |= ceph_pagelist_append(pl, payload, payload_len);
3668 } else {
3669 ret |= ceph_pagelist_encode_32(pl, 0);
3670 }
3671 if (ret) {
3672 ceph_pagelist_release(pl);
3673 return -ENOMEM;
3674 }
3675
3676 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
3677 op->indata_len = pl->length;
3678 return 0;
3679 }
3680
3681 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
3682 struct ceph_object_id *oid,
3683 struct ceph_object_locator *oloc,
3684 u64 notify_id,
3685 u64 cookie,
3686 void *payload,
3687 size_t payload_len)
3688 {
3689 struct ceph_osd_request *req;
3690 int ret;
3691
3692 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3693 if (!req)
3694 return -ENOMEM;
3695
3696 ceph_oid_copy(&req->r_base_oid, oid);
3697 ceph_oloc_copy(&req->r_base_oloc, oloc);
3698 req->r_flags = CEPH_OSD_FLAG_READ;
3699
3700 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3701 if (ret)
3702 goto out_put_req;
3703
3704 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
3705 payload_len);
3706 if (ret)
3707 goto out_put_req;
3708
3709 ceph_osdc_start_request(osdc, req, false);
3710 ret = ceph_osdc_wait_request(osdc, req);
3711
3712 out_put_req:
3713 ceph_osdc_put_request(req);
3714 return ret;
3715 }
3716 EXPORT_SYMBOL(ceph_osdc_notify_ack);
3717
3718 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
3719 u64 cookie, u32 prot_ver, u32 timeout,
3720 void *payload, size_t payload_len)
3721 {
3722 struct ceph_osd_req_op *op;
3723 struct ceph_pagelist *pl;
3724 int ret;
3725
3726 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
3727 op->notify.cookie = cookie;
3728
3729 pl = kmalloc(sizeof(*pl), GFP_NOIO);
3730 if (!pl)
3731 return -ENOMEM;
3732
3733 ceph_pagelist_init(pl);
3734 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
3735 ret |= ceph_pagelist_encode_32(pl, timeout);
3736 ret |= ceph_pagelist_encode_32(pl, payload_len);
3737 ret |= ceph_pagelist_append(pl, payload, payload_len);
3738 if (ret) {
3739 ceph_pagelist_release(pl);
3740 return -ENOMEM;
3741 }
3742
3743 ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
3744 op->indata_len = pl->length;
3745 return 0;
3746 }
3747
3748 /*
3749 * @timeout: in seconds
3750 *
3751 * @preply_{pages,len} are initialized both on success and error.
3752 * The caller is responsible for:
3753 *
3754 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3755 */
3756 int ceph_osdc_notify(struct ceph_osd_client *osdc,
3757 struct ceph_object_id *oid,
3758 struct ceph_object_locator *oloc,
3759 void *payload,
3760 size_t payload_len,
3761 u32 timeout,
3762 struct page ***preply_pages,
3763 size_t *preply_len)
3764 {
3765 struct ceph_osd_linger_request *lreq;
3766 struct page **pages;
3767 int ret;
3768
3769 WARN_ON(!timeout);
3770 if (preply_pages) {
3771 *preply_pages = NULL;
3772 *preply_len = 0;
3773 }
3774
3775 lreq = linger_alloc(osdc);
3776 if (!lreq)
3777 return -ENOMEM;
3778
3779 lreq->preply_pages = preply_pages;
3780 lreq->preply_len = preply_len;
3781
3782 ceph_oid_copy(&lreq->t.base_oid, oid);
3783 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3784 lreq->t.flags = CEPH_OSD_FLAG_READ;
3785
3786 lreq->reg_req = alloc_linger_request(lreq);
3787 if (!lreq->reg_req) {
3788 ret = -ENOMEM;
3789 goto out_put_lreq;
3790 }
3791
3792 /* for notify_id */
3793 pages = ceph_alloc_page_vector(1, GFP_NOIO);
3794 if (IS_ERR(pages)) {
3795 ret = PTR_ERR(pages);
3796 goto out_put_lreq;
3797 }
3798
3799 down_write(&osdc->lock);
3800 linger_register(lreq); /* before osd_req_op_* */
3801 ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
3802 timeout, payload, payload_len);
3803 if (ret) {
3804 linger_unregister(lreq);
3805 up_write(&osdc->lock);
3806 ceph_release_page_vector(pages, 1);
3807 goto out_put_lreq;
3808 }
3809 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
3810 response_data),
3811 pages, PAGE_SIZE, 0, false, true);
3812 linger_submit(lreq);
3813 up_write(&osdc->lock);
3814
3815 ret = linger_reg_commit_wait(lreq);
3816 if (!ret)
3817 ret = linger_notify_finish_wait(lreq);
3818 else
3819 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
3820
3821 linger_cancel(lreq);
3822 out_put_lreq:
3823 linger_put(lreq);
3824 return ret;
3825 }
3826 EXPORT_SYMBOL(ceph_osdc_notify);
3827
3828 /*
3829 * Return the number of milliseconds since the watch was last
3830 * confirmed, or an error. If there is an error, the watch is no
3831 * longer valid, and should be destroyed with ceph_osdc_unwatch().
3832 */
3833 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
3834 struct ceph_osd_linger_request *lreq)
3835 {
3836 unsigned long stamp, age;
3837 int ret;
3838
3839 down_read(&osdc->lock);
3840 mutex_lock(&lreq->lock);
3841 stamp = lreq->watch_valid_thru;
3842 if (!list_empty(&lreq->pending_lworks)) {
3843 struct linger_work *lwork =
3844 list_first_entry(&lreq->pending_lworks,
3845 struct linger_work,
3846 pending_item);
3847
3848 if (time_before(lwork->queued_stamp, stamp))
3849 stamp = lwork->queued_stamp;
3850 }
3851 age = jiffies - stamp;
3852 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
3853 lreq, lreq->linger_id, age, lreq->last_error);
3854 /* we are truncating to msecs, so return a safe upper bound */
3855 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
3856
3857 mutex_unlock(&lreq->lock);
3858 up_read(&osdc->lock);
3859 return ret;
3860 }
3861
3862 /*
3863 * Call all pending notify callbacks - for use after a watch is
3864 * unregistered, to make sure no more callbacks for it will be invoked
3865 */
3866 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
3867 {
3868 flush_workqueue(osdc->notify_wq);
3869 }
3870 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
3871
3872 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
3873 {
3874 down_read(&osdc->lock);
3875 maybe_request_map(osdc);
3876 up_read(&osdc->lock);
3877 }
3878 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
3879
3880 /*
3881 * init, shutdown
3882 */
3883 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
3884 {
3885 int err;
3886
3887 dout("init\n");
3888 osdc->client = client;
3889 init_rwsem(&osdc->lock);
3890 osdc->osds = RB_ROOT;
3891 INIT_LIST_HEAD(&osdc->osd_lru);
3892 spin_lock_init(&osdc->osd_lru_lock);
3893 osd_init(&osdc->homeless_osd);
3894 osdc->homeless_osd.o_osdc = osdc;
3895 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
3896 osdc->linger_requests = RB_ROOT;
3897 osdc->map_checks = RB_ROOT;
3898 osdc->linger_map_checks = RB_ROOT;
3899 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
3900 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
3901
3902 err = -ENOMEM;
3903 osdc->osdmap = ceph_osdmap_alloc();
3904 if (!osdc->osdmap)
3905 goto out;
3906
3907 osdc->req_mempool = mempool_create_slab_pool(10,
3908 ceph_osd_request_cache);
3909 if (!osdc->req_mempool)
3910 goto out_map;
3911
3912 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
3913 PAGE_SIZE, 10, true, "osd_op");
3914 if (err < 0)
3915 goto out_mempool;
3916 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
3917 PAGE_SIZE, 10, true, "osd_op_reply");
3918 if (err < 0)
3919 goto out_msgpool;
3920
3921 err = -ENOMEM;
3922 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
3923 if (!osdc->notify_wq)
3924 goto out_msgpool_reply;
3925
3926 schedule_delayed_work(&osdc->timeout_work,
3927 osdc->client->options->osd_keepalive_timeout);
3928 schedule_delayed_work(&osdc->osds_timeout_work,
3929 round_jiffies_relative(osdc->client->options->osd_idle_ttl));
3930
3931 return 0;
3932
3933 out_msgpool_reply:
3934 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
3935 out_msgpool:
3936 ceph_msgpool_destroy(&osdc->msgpool_op);
3937 out_mempool:
3938 mempool_destroy(osdc->req_mempool);
3939 out_map:
3940 ceph_osdmap_destroy(osdc->osdmap);
3941 out:
3942 return err;
3943 }
3944
3945 void ceph_osdc_stop(struct ceph_osd_client *osdc)
3946 {
3947 flush_workqueue(osdc->notify_wq);
3948 destroy_workqueue(osdc->notify_wq);
3949 cancel_delayed_work_sync(&osdc->timeout_work);
3950 cancel_delayed_work_sync(&osdc->osds_timeout_work);
3951
3952 down_write(&osdc->lock);
3953 while (!RB_EMPTY_ROOT(&osdc->osds)) {
3954 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
3955 struct ceph_osd, o_node);
3956 close_osd(osd);
3957 }
3958 up_write(&osdc->lock);
3959 WARN_ON(atomic_read(&osdc->homeless_osd.o_ref) != 1);
3960 osd_cleanup(&osdc->homeless_osd);
3961
3962 WARN_ON(!list_empty(&osdc->osd_lru));
3963 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
3964 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
3965 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
3966 WARN_ON(atomic_read(&osdc->num_requests));
3967 WARN_ON(atomic_read(&osdc->num_homeless));
3968
3969 ceph_osdmap_destroy(osdc->osdmap);
3970 mempool_destroy(osdc->req_mempool);
3971 ceph_msgpool_destroy(&osdc->msgpool_op);
3972 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
3973 }
3974
3975 /*
3976 * Read some contiguous pages. If we cross a stripe boundary, shorten
3977 * *plen. Return number of bytes read, or error.
3978 */
3979 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
3980 struct ceph_vino vino, struct ceph_file_layout *layout,
3981 u64 off, u64 *plen,
3982 u32 truncate_seq, u64 truncate_size,
3983 struct page **pages, int num_pages, int page_align)
3984 {
3985 struct ceph_osd_request *req;
3986 int rc = 0;
3987
3988 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
3989 vino.snap, off, *plen);
3990 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
3991 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
3992 NULL, truncate_seq, truncate_size,
3993 false);
3994 if (IS_ERR(req))
3995 return PTR_ERR(req);
3996
3997 /* it may be a short read due to an object boundary */
3998 osd_req_op_extent_osd_data_pages(req, 0,
3999 pages, *plen, page_align, false, false);
4000
4001 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
4002 off, *plen, *plen, page_align);
4003
4004 rc = ceph_osdc_start_request(osdc, req, false);
4005 if (!rc)
4006 rc = ceph_osdc_wait_request(osdc, req);
4007
4008 ceph_osdc_put_request(req);
4009 dout("readpages result %d\n", rc);
4010 return rc;
4011 }
4012 EXPORT_SYMBOL(ceph_osdc_readpages);
4013
4014 /*
4015 * do a synchronous write on N pages
4016 */
4017 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
4018 struct ceph_file_layout *layout,
4019 struct ceph_snap_context *snapc,
4020 u64 off, u64 len,
4021 u32 truncate_seq, u64 truncate_size,
4022 struct timespec *mtime,
4023 struct page **pages, int num_pages)
4024 {
4025 struct ceph_osd_request *req;
4026 int rc = 0;
4027 int page_align = off & ~PAGE_MASK;
4028
4029 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
4030 CEPH_OSD_OP_WRITE,
4031 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
4032 snapc, truncate_seq, truncate_size,
4033 true);
4034 if (IS_ERR(req))
4035 return PTR_ERR(req);
4036
4037 /* it may be a short write due to an object boundary */
4038 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
4039 false, false);
4040 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
4041
4042 req->r_mtime = *mtime;
4043 rc = ceph_osdc_start_request(osdc, req, true);
4044 if (!rc)
4045 rc = ceph_osdc_wait_request(osdc, req);
4046
4047 ceph_osdc_put_request(req);
4048 if (rc == 0)
4049 rc = len;
4050 dout("writepages result %d\n", rc);
4051 return rc;
4052 }
4053 EXPORT_SYMBOL(ceph_osdc_writepages);
4054
4055 int ceph_osdc_setup(void)
4056 {
4057 size_t size = sizeof(struct ceph_osd_request) +
4058 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
4059
4060 BUG_ON(ceph_osd_request_cache);
4061 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
4062 0, 0, NULL);
4063
4064 return ceph_osd_request_cache ? 0 : -ENOMEM;
4065 }
4066 EXPORT_SYMBOL(ceph_osdc_setup);
4067
4068 void ceph_osdc_cleanup(void)
4069 {
4070 BUG_ON(!ceph_osd_request_cache);
4071 kmem_cache_destroy(ceph_osd_request_cache);
4072 ceph_osd_request_cache = NULL;
4073 }
4074 EXPORT_SYMBOL(ceph_osdc_cleanup);
4075
4076 /*
4077 * handle incoming message
4078 */
4079 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4080 {
4081 struct ceph_osd *osd = con->private;
4082 struct ceph_osd_client *osdc = osd->o_osdc;
4083 int type = le16_to_cpu(msg->hdr.type);
4084
4085 switch (type) {
4086 case CEPH_MSG_OSD_MAP:
4087 ceph_osdc_handle_map(osdc, msg);
4088 break;
4089 case CEPH_MSG_OSD_OPREPLY:
4090 handle_reply(osd, msg);
4091 break;
4092 case CEPH_MSG_WATCH_NOTIFY:
4093 handle_watch_notify(osdc, msg);
4094 break;
4095
4096 default:
4097 pr_err("received unknown message type %d %s\n", type,
4098 ceph_msg_type_name(type));
4099 }
4100
4101 ceph_msg_put(msg);
4102 }
4103
4104 /*
4105 * Lookup and return message for incoming reply. Don't try to do
4106 * anything about a larger than preallocated data portion of the
4107 * message at the moment - for now, just skip the message.
4108 */
4109 static struct ceph_msg *get_reply(struct ceph_connection *con,
4110 struct ceph_msg_header *hdr,
4111 int *skip)
4112 {
4113 struct ceph_osd *osd = con->private;
4114 struct ceph_osd_client *osdc = osd->o_osdc;
4115 struct ceph_msg *m = NULL;
4116 struct ceph_osd_request *req;
4117 int front_len = le32_to_cpu(hdr->front_len);
4118 int data_len = le32_to_cpu(hdr->data_len);
4119 u64 tid = le64_to_cpu(hdr->tid);
4120
4121 down_read(&osdc->lock);
4122 if (!osd_registered(osd)) {
4123 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
4124 *skip = 1;
4125 goto out_unlock_osdc;
4126 }
4127 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
4128
4129 mutex_lock(&osd->lock);
4130 req = lookup_request(&osd->o_requests, tid);
4131 if (!req) {
4132 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
4133 osd->o_osd, tid);
4134 *skip = 1;
4135 goto out_unlock_session;
4136 }
4137
4138 ceph_msg_revoke_incoming(req->r_reply);
4139
4140 if (front_len > req->r_reply->front_alloc_len) {
4141 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
4142 __func__, osd->o_osd, req->r_tid, front_len,
4143 req->r_reply->front_alloc_len);
4144 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
4145 false);
4146 if (!m)
4147 goto out_unlock_session;
4148 ceph_msg_put(req->r_reply);
4149 req->r_reply = m;
4150 }
4151
4152 if (data_len > req->r_reply->data_length) {
4153 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
4154 __func__, osd->o_osd, req->r_tid, data_len,
4155 req->r_reply->data_length);
4156 m = NULL;
4157 *skip = 1;
4158 goto out_unlock_session;
4159 }
4160
4161 m = ceph_msg_get(req->r_reply);
4162 dout("get_reply tid %lld %p\n", tid, m);
4163
4164 out_unlock_session:
4165 mutex_unlock(&osd->lock);
4166 out_unlock_osdc:
4167 up_read(&osdc->lock);
4168 return m;
4169 }
4170
4171 /*
4172 * TODO: switch to a msg-owned pagelist
4173 */
4174 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
4175 {
4176 struct ceph_msg *m;
4177 int type = le16_to_cpu(hdr->type);
4178 u32 front_len = le32_to_cpu(hdr->front_len);
4179 u32 data_len = le32_to_cpu(hdr->data_len);
4180
4181 m = ceph_msg_new(type, front_len, GFP_NOIO, false);
4182 if (!m)
4183 return NULL;
4184
4185 if (data_len) {
4186 struct page **pages;
4187 struct ceph_osd_data osd_data;
4188
4189 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
4190 GFP_NOIO);
4191 if (!pages) {
4192 ceph_msg_put(m);
4193 return NULL;
4194 }
4195
4196 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
4197 false);
4198 ceph_osdc_msg_data_add(m, &osd_data);
4199 }
4200
4201 return m;
4202 }
4203
4204 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
4205 struct ceph_msg_header *hdr,
4206 int *skip)
4207 {
4208 struct ceph_osd *osd = con->private;
4209 int type = le16_to_cpu(hdr->type);
4210
4211 *skip = 0;
4212 switch (type) {
4213 case CEPH_MSG_OSD_MAP:
4214 case CEPH_MSG_WATCH_NOTIFY:
4215 return alloc_msg_with_page_vector(hdr);
4216 case CEPH_MSG_OSD_OPREPLY:
4217 return get_reply(con, hdr, skip);
4218 default:
4219 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
4220 osd->o_osd, type);
4221 *skip = 1;
4222 return NULL;
4223 }
4224 }
4225
4226 /*
4227 * Wrappers to refcount containing ceph_osd struct
4228 */
4229 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
4230 {
4231 struct ceph_osd *osd = con->private;
4232 if (get_osd(osd))
4233 return con;
4234 return NULL;
4235 }
4236
4237 static void put_osd_con(struct ceph_connection *con)
4238 {
4239 struct ceph_osd *osd = con->private;
4240 put_osd(osd);
4241 }
4242
4243 /*
4244 * authentication
4245 */
4246 /*
4247 * Note: returned pointer is the address of a structure that's
4248 * managed separately. Caller must *not* attempt to free it.
4249 */
4250 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4251 int *proto, int force_new)
4252 {
4253 struct ceph_osd *o = con->private;
4254 struct ceph_osd_client *osdc = o->o_osdc;
4255 struct ceph_auth_client *ac = osdc->client->monc.auth;
4256 struct ceph_auth_handshake *auth = &o->o_auth;
4257
4258 if (force_new && auth->authorizer) {
4259 ceph_auth_destroy_authorizer(auth->authorizer);
4260 auth->authorizer = NULL;
4261 }
4262 if (!auth->authorizer) {
4263 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4264 auth);
4265 if (ret)
4266 return ERR_PTR(ret);
4267 } else {
4268 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4269 auth);
4270 if (ret)
4271 return ERR_PTR(ret);
4272 }
4273 *proto = ac->protocol;
4274
4275 return auth;
4276 }
4277
4278
4279 static int verify_authorizer_reply(struct ceph_connection *con, int len)
4280 {
4281 struct ceph_osd *o = con->private;
4282 struct ceph_osd_client *osdc = o->o_osdc;
4283 struct ceph_auth_client *ac = osdc->client->monc.auth;
4284
4285 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
4286 }
4287
4288 static int invalidate_authorizer(struct ceph_connection *con)
4289 {
4290 struct ceph_osd *o = con->private;
4291 struct ceph_osd_client *osdc = o->o_osdc;
4292 struct ceph_auth_client *ac = osdc->client->monc.auth;
4293
4294 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
4295 return ceph_monc_validate_auth(&osdc->client->monc);
4296 }
4297
4298 static int osd_sign_message(struct ceph_msg *msg)
4299 {
4300 struct ceph_osd *o = msg->con->private;
4301 struct ceph_auth_handshake *auth = &o->o_auth;
4302
4303 return ceph_auth_sign_message(auth, msg);
4304 }
4305
4306 static int osd_check_message_signature(struct ceph_msg *msg)
4307 {
4308 struct ceph_osd *o = msg->con->private;
4309 struct ceph_auth_handshake *auth = &o->o_auth;
4310
4311 return ceph_auth_check_message_signature(auth, msg);
4312 }
4313
4314 static const struct ceph_connection_operations osd_con_ops = {
4315 .get = get_osd_con,
4316 .put = put_osd_con,
4317 .dispatch = dispatch,
4318 .get_authorizer = get_authorizer,
4319 .verify_authorizer_reply = verify_authorizer_reply,
4320 .invalidate_authorizer = invalidate_authorizer,
4321 .alloc_msg = alloc_msg,
4322 .sign_message = osd_sign_message,
4323 .check_message_signature = osd_check_message_signature,
4324 .fault = osd_fault,
4325 };
This page took 0.114932 seconds and 6 git commands to generate.