1 #include "ceph_debug.h"
4 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/slab.h>
8 #include <linux/uaccess.h>
11 #include "osd_client.h"
12 #include "messenger.h"
15 const static struct ceph_connection_operations osd_con_ops
;
17 static void kick_requests(struct ceph_osd_client
*osdc
, struct ceph_osd
*osd
);
20 * Implement client access to distributed object storage cluster.
22 * All data objects are stored within a cluster/cloud of OSDs, or
23 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
24 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
25 * remote daemons serving up and coordinating consistent and safe
28 * Cluster membership and the mapping of data objects onto storage devices
29 * are described by the osd map.
31 * We keep track of pending OSD requests (read, write), resubmit
32 * requests to different OSDs when the cluster topology/data layout
33 * change, or retry the affected requests when the communications
34 * channel with an OSD is reset.
38 * calculate the mapping of a file extent onto an object, and fill out the
39 * request accordingly. shorten extent as necessary if it crosses an
42 * fill osd op in request message.
44 static void calc_layout(struct ceph_osd_client
*osdc
,
45 struct ceph_vino vino
, struct ceph_file_layout
*layout
,
47 struct ceph_osd_request
*req
)
49 struct ceph_osd_request_head
*reqhead
= req
->r_request
->front
.iov_base
;
50 struct ceph_osd_op
*op
= (void *)(reqhead
+ 1);
52 u64 objoff
, objlen
; /* extent in object */
55 reqhead
->snapid
= cpu_to_le64(vino
.snap
);
58 ceph_calc_file_object_mapping(layout
, off
, plen
, &bno
,
61 dout(" skipping last %llu, final file extent %llu~%llu\n",
62 orig_len
- *plen
, off
, *plen
);
64 sprintf(req
->r_oid
, "%llx.%08llx", vino
.ino
, bno
);
65 req
->r_oid_len
= strlen(req
->r_oid
);
67 op
->extent
.offset
= cpu_to_le64(objoff
);
68 op
->extent
.length
= cpu_to_le64(objlen
);
69 req
->r_num_pages
= calc_pages_for(off
, *plen
);
71 dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
72 req
->r_oid
, req
->r_oid_len
, objoff
, objlen
, req
->r_num_pages
);
79 void ceph_osdc_put_request(struct ceph_osd_request
*req
)
81 dout("osdc put_request %p %d -> %d\n", req
, atomic_read(&req
->r_ref
),
82 atomic_read(&req
->r_ref
)-1);
83 BUG_ON(atomic_read(&req
->r_ref
) <= 0);
84 if (atomic_dec_and_test(&req
->r_ref
)) {
86 ceph_msg_put(req
->r_request
);
88 ceph_msg_put(req
->r_reply
);
90 ceph_release_page_vector(req
->r_pages
,
92 ceph_put_snap_context(req
->r_snapc
);
94 mempool_free(req
, req
->r_osdc
->req_mempool
);
101 * build new request AND message, calculate layout, and adjust file
104 * if the file was recently truncated, we include information about its
105 * old and new size so that the object can be updated appropriately. (we
106 * avoid synchronously deleting truncated objects because it's slow.)
108 * if @do_sync, include a 'startsync' command so that the osd will flush
111 struct ceph_osd_request
*ceph_osdc_new_request(struct ceph_osd_client
*osdc
,
112 struct ceph_file_layout
*layout
,
113 struct ceph_vino vino
,
115 int opcode
, int flags
,
116 struct ceph_snap_context
*snapc
,
120 struct timespec
*mtime
,
121 bool use_mempool
, int num_reply
)
123 struct ceph_osd_request
*req
;
124 struct ceph_msg
*msg
;
125 struct ceph_osd_request_head
*head
;
126 struct ceph_osd_op
*op
;
128 int do_trunc
= truncate_seq
&& (off
+ *plen
> truncate_size
);
129 int num_op
= 1 + do_sync
+ do_trunc
;
130 size_t msg_size
= sizeof(*head
) + num_op
*sizeof(*op
);
135 req
= mempool_alloc(osdc
->req_mempool
, GFP_NOFS
);
136 memset(req
, 0, sizeof(*req
));
138 req
= kzalloc(sizeof(*req
), GFP_NOFS
);
141 return ERR_PTR(-ENOMEM
);
143 err
= ceph_msgpool_resv(&osdc
->msgpool_op_reply
, num_reply
);
145 ceph_osdc_put_request(req
);
146 return ERR_PTR(-ENOMEM
);
150 req
->r_mempool
= use_mempool
;
151 atomic_set(&req
->r_ref
, 1);
152 init_completion(&req
->r_completion
);
153 init_completion(&req
->r_safe_completion
);
154 INIT_LIST_HEAD(&req
->r_unsafe_item
);
155 req
->r_flags
= flags
;
157 WARN_ON((flags
& (CEPH_OSD_FLAG_READ
|CEPH_OSD_FLAG_WRITE
)) == 0);
159 /* create message; allow space for oid */
162 msg_size
+= sizeof(u64
) * snapc
->num_snaps
;
164 msg
= ceph_msgpool_get(&osdc
->msgpool_op
, 0);
166 msg
= ceph_msg_new(CEPH_MSG_OSD_OP
, msg_size
, 0, 0, NULL
);
168 ceph_msgpool_resv(&osdc
->msgpool_op_reply
, num_reply
);
169 ceph_osdc_put_request(req
);
170 return ERR_PTR(PTR_ERR(msg
));
172 msg
->hdr
.type
= cpu_to_le16(CEPH_MSG_OSD_OP
);
173 memset(msg
->front
.iov_base
, 0, msg
->front
.iov_len
);
174 head
= msg
->front
.iov_base
;
175 op
= (void *)(head
+ 1);
176 p
= (void *)(op
+ num_op
);
178 req
->r_request
= msg
;
179 req
->r_snapc
= ceph_get_snap_context(snapc
);
181 head
->client_inc
= cpu_to_le32(1); /* always, for now. */
182 head
->flags
= cpu_to_le32(flags
);
183 if (flags
& CEPH_OSD_FLAG_WRITE
)
184 ceph_encode_timespec(&head
->mtime
, mtime
);
185 head
->num_ops
= cpu_to_le16(num_op
);
186 op
->op
= cpu_to_le16(opcode
);
188 /* calculate max write size */
189 calc_layout(osdc
, vino
, layout
, off
, plen
, req
);
190 req
->r_file_layout
= *layout
; /* keep a copy */
192 if (flags
& CEPH_OSD_FLAG_WRITE
) {
193 req
->r_request
->hdr
.data_off
= cpu_to_le16(off
);
194 req
->r_request
->hdr
.data_len
= cpu_to_le32(*plen
);
195 op
->payload_len
= cpu_to_le32(*plen
);
199 head
->object_len
= cpu_to_le32(req
->r_oid_len
);
200 memcpy(p
, req
->r_oid
, req
->r_oid_len
);
206 op
->op
= cpu_to_le16(opcode
== CEPH_OSD_OP_READ
?
207 CEPH_OSD_OP_MASKTRUNC
: CEPH_OSD_OP_SETTRUNC
);
208 op
->trunc
.truncate_seq
= cpu_to_le32(truncate_seq
);
209 prevofs
= le64_to_cpu((op
-1)->extent
.offset
);
210 op
->trunc
.truncate_size
= cpu_to_le64(truncate_size
-
215 op
->op
= cpu_to_le16(CEPH_OSD_OP_STARTSYNC
);
218 head
->snap_seq
= cpu_to_le64(snapc
->seq
);
219 head
->num_snaps
= cpu_to_le32(snapc
->num_snaps
);
220 for (i
= 0; i
< snapc
->num_snaps
; i
++) {
221 put_unaligned_le64(snapc
->snaps
[i
], p
);
226 BUG_ON(p
> msg
->front
.iov_base
+ msg
->front
.iov_len
);
231 * We keep osd requests in an rbtree, sorted by ->r_tid.
233 static void __insert_request(struct ceph_osd_client
*osdc
,
234 struct ceph_osd_request
*new)
236 struct rb_node
**p
= &osdc
->requests
.rb_node
;
237 struct rb_node
*parent
= NULL
;
238 struct ceph_osd_request
*req
= NULL
;
242 req
= rb_entry(parent
, struct ceph_osd_request
, r_node
);
243 if (new->r_tid
< req
->r_tid
)
245 else if (new->r_tid
> req
->r_tid
)
251 rb_link_node(&new->r_node
, parent
, p
);
252 rb_insert_color(&new->r_node
, &osdc
->requests
);
255 static struct ceph_osd_request
*__lookup_request(struct ceph_osd_client
*osdc
,
258 struct ceph_osd_request
*req
;
259 struct rb_node
*n
= osdc
->requests
.rb_node
;
262 req
= rb_entry(n
, struct ceph_osd_request
, r_node
);
263 if (tid
< req
->r_tid
)
265 else if (tid
> req
->r_tid
)
273 static struct ceph_osd_request
*
274 __lookup_request_ge(struct ceph_osd_client
*osdc
,
277 struct ceph_osd_request
*req
;
278 struct rb_node
*n
= osdc
->requests
.rb_node
;
281 req
= rb_entry(n
, struct ceph_osd_request
, r_node
);
282 if (tid
< req
->r_tid
) {
286 } else if (tid
> req
->r_tid
) {
297 * If the osd connection drops, we need to resubmit all requests.
299 static void osd_reset(struct ceph_connection
*con
)
301 struct ceph_osd
*osd
= con
->private;
302 struct ceph_osd_client
*osdc
;
306 dout("osd_reset osd%d\n", osd
->o_osd
);
308 osd
->o_incarnation
++;
309 down_read(&osdc
->map_sem
);
310 kick_requests(osdc
, osd
);
311 up_read(&osdc
->map_sem
);
315 * Track open sessions with osds.
317 static struct ceph_osd
*create_osd(struct ceph_osd_client
*osdc
)
319 struct ceph_osd
*osd
;
321 osd
= kzalloc(sizeof(*osd
), GFP_NOFS
);
325 atomic_set(&osd
->o_ref
, 1);
327 INIT_LIST_HEAD(&osd
->o_requests
);
328 osd
->o_incarnation
= 1;
330 ceph_con_init(osdc
->client
->msgr
, &osd
->o_con
);
331 osd
->o_con
.private = osd
;
332 osd
->o_con
.ops
= &osd_con_ops
;
333 osd
->o_con
.peer_name
.type
= CEPH_ENTITY_TYPE_OSD
;
337 static struct ceph_osd
*get_osd(struct ceph_osd
*osd
)
339 if (atomic_inc_not_zero(&osd
->o_ref
)) {
340 dout("get_osd %p %d -> %d\n", osd
, atomic_read(&osd
->o_ref
)-1,
341 atomic_read(&osd
->o_ref
));
344 dout("get_osd %p FAIL\n", osd
);
349 static void put_osd(struct ceph_osd
*osd
)
351 dout("put_osd %p %d -> %d\n", osd
, atomic_read(&osd
->o_ref
),
352 atomic_read(&osd
->o_ref
) - 1);
353 if (atomic_dec_and_test(&osd
->o_ref
)) {
354 ceph_con_shutdown(&osd
->o_con
);
360 * remove an osd from our map
362 static void remove_osd(struct ceph_osd_client
*osdc
, struct ceph_osd
*osd
)
364 dout("remove_osd %p\n", osd
);
365 BUG_ON(!list_empty(&osd
->o_requests
));
366 rb_erase(&osd
->o_node
, &osdc
->osds
);
367 ceph_con_close(&osd
->o_con
);
374 static int reset_osd(struct ceph_osd_client
*osdc
, struct ceph_osd
*osd
)
378 dout("reset_osd %p osd%d\n", osd
, osd
->o_osd
);
379 if (list_empty(&osd
->o_requests
)) {
380 remove_osd(osdc
, osd
);
382 ceph_con_close(&osd
->o_con
);
383 ceph_con_open(&osd
->o_con
, &osdc
->osdmap
->osd_addr
[osd
->o_osd
]);
384 osd
->o_incarnation
++;
389 static void __insert_osd(struct ceph_osd_client
*osdc
, struct ceph_osd
*new)
391 struct rb_node
**p
= &osdc
->osds
.rb_node
;
392 struct rb_node
*parent
= NULL
;
393 struct ceph_osd
*osd
= NULL
;
397 osd
= rb_entry(parent
, struct ceph_osd
, o_node
);
398 if (new->o_osd
< osd
->o_osd
)
400 else if (new->o_osd
> osd
->o_osd
)
406 rb_link_node(&new->o_node
, parent
, p
);
407 rb_insert_color(&new->o_node
, &osdc
->osds
);
410 static struct ceph_osd
*__lookup_osd(struct ceph_osd_client
*osdc
, int o
)
412 struct ceph_osd
*osd
;
413 struct rb_node
*n
= osdc
->osds
.rb_node
;
416 osd
= rb_entry(n
, struct ceph_osd
, o_node
);
419 else if (o
> osd
->o_osd
)
429 * Register request, assign tid. If this is the first request, set up
432 static void register_request(struct ceph_osd_client
*osdc
,
433 struct ceph_osd_request
*req
)
435 struct ceph_osd_request_head
*head
= req
->r_request
->front
.iov_base
;
437 mutex_lock(&osdc
->request_mutex
);
438 req
->r_tid
= ++osdc
->last_tid
;
439 head
->tid
= cpu_to_le64(req
->r_tid
);
441 dout("register_request %p tid %lld\n", req
, req
->r_tid
);
442 __insert_request(osdc
, req
);
443 ceph_osdc_get_request(req
);
444 osdc
->num_requests
++;
446 req
->r_timeout_stamp
=
447 jiffies
+ osdc
->client
->mount_args
.osd_timeout
*HZ
;
449 if (osdc
->num_requests
== 1) {
450 osdc
->timeout_tid
= req
->r_tid
;
451 dout(" timeout on tid %llu at %lu\n", req
->r_tid
,
452 req
->r_timeout_stamp
);
453 schedule_delayed_work(&osdc
->timeout_work
,
454 round_jiffies_relative(req
->r_timeout_stamp
- jiffies
));
456 mutex_unlock(&osdc
->request_mutex
);
460 * called under osdc->request_mutex
462 static void __unregister_request(struct ceph_osd_client
*osdc
,
463 struct ceph_osd_request
*req
)
465 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
466 rb_erase(&req
->r_node
, &osdc
->requests
);
467 osdc
->num_requests
--;
470 /* make sure the original request isn't in flight. */
471 ceph_con_revoke(&req
->r_osd
->o_con
, req
->r_request
);
473 list_del_init(&req
->r_osd_item
);
474 if (list_empty(&req
->r_osd
->o_requests
))
475 remove_osd(osdc
, req
->r_osd
);
479 ceph_osdc_put_request(req
);
481 if (req
->r_tid
== osdc
->timeout_tid
) {
482 if (osdc
->num_requests
== 0) {
483 dout("no requests, canceling timeout\n");
484 osdc
->timeout_tid
= 0;
485 cancel_delayed_work(&osdc
->timeout_work
);
487 req
= rb_entry(rb_first(&osdc
->requests
),
488 struct ceph_osd_request
, r_node
);
489 osdc
->timeout_tid
= req
->r_tid
;
490 dout("rescheduled timeout on tid %llu at %lu\n",
491 req
->r_tid
, req
->r_timeout_stamp
);
492 schedule_delayed_work(&osdc
->timeout_work
,
493 round_jiffies_relative(req
->r_timeout_stamp
-
500 * Cancel a previously queued request message
502 static void __cancel_request(struct ceph_osd_request
*req
)
505 ceph_con_revoke(&req
->r_osd
->o_con
, req
->r_request
);
511 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
512 * (as needed), and set the request r_osd appropriately. If there is
513 * no up osd, set r_osd to NULL.
515 * Return 0 if unchanged, 1 if changed, or negative on error.
517 * Caller should hold map_sem for read and request_mutex.
519 static int __map_osds(struct ceph_osd_client
*osdc
,
520 struct ceph_osd_request
*req
)
522 struct ceph_osd_request_head
*reqhead
= req
->r_request
->front
.iov_base
;
526 struct ceph_osd
*newosd
= NULL
;
528 dout("map_osds %p tid %lld\n", req
, req
->r_tid
);
529 err
= ceph_calc_object_layout(&reqhead
->layout
, req
->r_oid
,
530 &req
->r_file_layout
, osdc
->osdmap
);
533 pgid
.pg64
= le64_to_cpu(reqhead
->layout
.ol_pgid
);
534 o
= ceph_calc_pg_primary(osdc
->osdmap
, pgid
);
536 if ((req
->r_osd
&& req
->r_osd
->o_osd
== o
&&
537 req
->r_sent
>= req
->r_osd
->o_incarnation
) ||
538 (req
->r_osd
== NULL
&& o
== -1))
539 return 0; /* no change */
541 dout("map_osds tid %llu pgid %llx pool %d osd%d (was osd%d)\n",
542 req
->r_tid
, pgid
.pg64
, pgid
.pg
.pool
, o
,
543 req
->r_osd
? req
->r_osd
->o_osd
: -1);
546 __cancel_request(req
);
547 list_del_init(&req
->r_osd_item
);
548 if (list_empty(&req
->r_osd
->o_requests
)) {
549 /* try to re-use r_osd if possible */
550 newosd
= get_osd(req
->r_osd
);
551 remove_osd(osdc
, newosd
);
556 req
->r_osd
= __lookup_osd(osdc
, o
);
557 if (!req
->r_osd
&& o
>= 0) {
563 req
->r_osd
= create_osd(osdc
);
568 dout("map_osds osd %p is osd%d\n", req
->r_osd
, o
);
569 req
->r_osd
->o_osd
= o
;
570 req
->r_osd
->o_con
.peer_name
.num
= cpu_to_le64(o
);
571 __insert_osd(osdc
, req
->r_osd
);
573 ceph_con_open(&req
->r_osd
->o_con
, &osdc
->osdmap
->osd_addr
[o
]);
577 list_add(&req
->r_osd_item
, &req
->r_osd
->o_requests
);
578 err
= 1; /* osd changed */
587 * caller should hold map_sem (for read) and request_mutex
589 static int __send_request(struct ceph_osd_client
*osdc
,
590 struct ceph_osd_request
*req
)
592 struct ceph_osd_request_head
*reqhead
;
595 err
= __map_osds(osdc
, req
);
598 if (req
->r_osd
== NULL
) {
599 dout("send_request %p no up osds in pg\n", req
);
600 ceph_monc_request_next_osdmap(&osdc
->client
->monc
);
604 dout("send_request %p tid %llu to osd%d flags %d\n",
605 req
, req
->r_tid
, req
->r_osd
->o_osd
, req
->r_flags
);
607 reqhead
= req
->r_request
->front
.iov_base
;
608 reqhead
->osdmap_epoch
= cpu_to_le32(osdc
->osdmap
->epoch
);
609 reqhead
->flags
|= cpu_to_le32(req
->r_flags
); /* e.g., RETRY */
610 reqhead
->reassert_version
= req
->r_reassert_version
;
612 req
->r_timeout_stamp
= jiffies
+osdc
->client
->mount_args
.osd_timeout
*HZ
;
614 ceph_msg_get(req
->r_request
); /* send consumes a ref */
615 ceph_con_send(&req
->r_osd
->o_con
, req
->r_request
);
616 req
->r_sent
= req
->r_osd
->o_incarnation
;
621 * Timeout callback, called every N seconds when 1 or more osd
622 * requests has been active for more than N seconds. When this
623 * happens, we ping all OSDs with requests who have timed out to
624 * ensure any communications channel reset is detected. Reset the
625 * request timeouts another N seconds in the future as we go.
626 * Reschedule the timeout event another N seconds in future (unless
627 * there are no open requests).
629 static void handle_timeout(struct work_struct
*work
)
631 struct ceph_osd_client
*osdc
=
632 container_of(work
, struct ceph_osd_client
, timeout_work
.work
);
633 struct ceph_osd_request
*req
;
634 struct ceph_osd
*osd
;
635 unsigned long timeout
= osdc
->client
->mount_args
.osd_timeout
* HZ
;
636 unsigned long next_timeout
= timeout
+ jiffies
;
640 down_read(&osdc
->map_sem
);
642 ceph_monc_request_next_osdmap(&osdc
->client
->monc
);
644 mutex_lock(&osdc
->request_mutex
);
645 for (p
= rb_first(&osdc
->requests
); p
; p
= rb_next(p
)) {
646 req
= rb_entry(p
, struct ceph_osd_request
, r_node
);
651 dout("osdc resending prev failed %lld\n", req
->r_tid
);
652 err
= __send_request(osdc
, req
);
654 dout("osdc failed again on %lld\n", req
->r_tid
);
656 req
->r_resend
= false;
660 for (p
= rb_first(&osdc
->osds
); p
; p
= rb_next(p
)) {
661 osd
= rb_entry(p
, struct ceph_osd
, o_node
);
662 if (list_empty(&osd
->o_requests
))
664 req
= list_first_entry(&osd
->o_requests
,
665 struct ceph_osd_request
, r_osd_item
);
666 if (time_before(jiffies
, req
->r_timeout_stamp
))
669 dout(" tid %llu (at least) timed out on osd%d\n",
670 req
->r_tid
, osd
->o_osd
);
671 req
->r_timeout_stamp
= next_timeout
;
672 ceph_con_keepalive(&osd
->o_con
);
675 if (osdc
->timeout_tid
)
676 schedule_delayed_work(&osdc
->timeout_work
,
677 round_jiffies_relative(timeout
));
679 mutex_unlock(&osdc
->request_mutex
);
681 up_read(&osdc
->map_sem
);
685 * handle osd op reply. either call the callback if it is specified,
686 * or do the completion to wake up the waiting thread.
688 static void handle_reply(struct ceph_osd_client
*osdc
, struct ceph_msg
*msg
)
690 struct ceph_osd_reply_head
*rhead
= msg
->front
.iov_base
;
691 struct ceph_osd_request
*req
;
693 int numops
, object_len
, flags
;
695 if (msg
->front
.iov_len
< sizeof(*rhead
))
697 tid
= le64_to_cpu(rhead
->tid
);
698 numops
= le32_to_cpu(rhead
->num_ops
);
699 object_len
= le32_to_cpu(rhead
->object_len
);
700 if (msg
->front
.iov_len
!= sizeof(*rhead
) + object_len
+
701 numops
* sizeof(struct ceph_osd_op
))
703 dout("handle_reply %p tid %llu\n", msg
, tid
);
706 mutex_lock(&osdc
->request_mutex
);
707 req
= __lookup_request(osdc
, tid
);
709 dout("handle_reply tid %llu dne\n", tid
);
710 mutex_unlock(&osdc
->request_mutex
);
713 ceph_osdc_get_request(req
);
714 flags
= le32_to_cpu(rhead
->flags
);
718 * once we see the message has been received, we don't
719 * need a ref (which is only needed for revoking
722 ceph_msg_put(req
->r_reply
);
726 if (!req
->r_got_reply
) {
729 req
->r_result
= le32_to_cpu(rhead
->result
);
730 bytes
= le32_to_cpu(msg
->hdr
.data_len
);
731 dout("handle_reply result %d bytes %d\n", req
->r_result
,
733 if (req
->r_result
== 0)
734 req
->r_result
= bytes
;
736 /* in case this is a write and we need to replay, */
737 req
->r_reassert_version
= rhead
->reassert_version
;
739 req
->r_got_reply
= 1;
740 } else if ((flags
& CEPH_OSD_FLAG_ONDISK
) == 0) {
741 dout("handle_reply tid %llu dup ack\n", tid
);
745 dout("handle_reply tid %llu flags %d\n", tid
, flags
);
747 /* either this is a read, or we got the safe response */
748 if ((flags
& CEPH_OSD_FLAG_ONDISK
) ||
749 ((flags
& CEPH_OSD_FLAG_WRITE
) == 0))
750 __unregister_request(osdc
, req
);
752 mutex_unlock(&osdc
->request_mutex
);
755 req
->r_callback(req
, msg
);
757 complete(&req
->r_completion
);
759 if (flags
& CEPH_OSD_FLAG_ONDISK
) {
760 if (req
->r_safe_callback
)
761 req
->r_safe_callback(req
, msg
);
762 complete(&req
->r_safe_completion
); /* fsync waiter */
766 ceph_osdc_put_request(req
);
770 pr_err("corrupt osd_op_reply got %d %d expected %d\n",
771 (int)msg
->front
.iov_len
, le32_to_cpu(msg
->hdr
.front_len
),
772 (int)sizeof(*rhead
));
777 * Resubmit osd requests whose osd or osd address has changed. Request
778 * a new osd map if osds are down, or we are otherwise unable to determine
779 * how to direct a request.
781 * Close connections to down osds.
783 * If @who is specified, resubmit requests for that specific osd.
785 * Caller should hold map_sem for read and request_mutex.
787 static void kick_requests(struct ceph_osd_client
*osdc
,
788 struct ceph_osd
*kickosd
)
790 struct ceph_osd_request
*req
;
791 struct rb_node
*p
, *n
;
795 dout("kick_requests osd%d\n", kickosd
? kickosd
->o_osd
: -1);
796 mutex_lock(&osdc
->request_mutex
);
798 for (p
= rb_first(&osdc
->osds
); p
; p
= n
) {
799 struct ceph_osd
*osd
=
800 rb_entry(p
, struct ceph_osd
, o_node
);
803 if (!ceph_osd_is_up(osdc
->osdmap
, osd
->o_osd
) ||
804 !ceph_entity_addr_equal(&osd
->o_con
.peer_addr
,
805 ceph_osd_addr(osdc
->osdmap
,
807 reset_osd(osdc
, osd
);
811 for (p
= rb_first(&osdc
->requests
); p
; p
= rb_next(p
)) {
812 req
= rb_entry(p
, struct ceph_osd_request
, r_node
);
815 dout(" r_resend set on tid %llu\n", req
->r_tid
);
816 __cancel_request(req
);
819 if (req
->r_osd
&& kickosd
== req
->r_osd
) {
820 __cancel_request(req
);
824 err
= __map_osds(osdc
, req
);
826 continue; /* no change */
829 * FIXME: really, we should set the request
830 * error and fail if this isn't a 'nofail'
831 * request, but that's a fair bit more
832 * complicated to do. So retry!
834 dout(" setting r_resend on %llu\n", req
->r_tid
);
835 req
->r_resend
= true;
838 if (req
->r_osd
== NULL
) {
839 dout("tid %llu maps to no valid osd\n", req
->r_tid
);
840 needmap
++; /* request a newer map */
845 dout("kicking %p tid %llu osd%d\n", req
, req
->r_tid
,
847 req
->r_flags
|= CEPH_OSD_FLAG_RETRY
;
848 err
= __send_request(osdc
, req
);
850 dout(" setting r_resend on %llu\n", req
->r_tid
);
851 req
->r_resend
= true;
854 mutex_unlock(&osdc
->request_mutex
);
857 dout("%d requests for down osds, need new map\n", needmap
);
858 ceph_monc_request_next_osdmap(&osdc
->client
->monc
);
863 * Process updated osd map.
865 * The message contains any number of incremental and full maps, normally
866 * indicating some sort of topology change in the cluster. Kick requests
867 * off to different OSDs as needed.
869 void ceph_osdc_handle_map(struct ceph_osd_client
*osdc
, struct ceph_msg
*msg
)
871 void *p
, *end
, *next
;
874 struct ceph_osdmap
*newmap
= NULL
, *oldmap
;
876 struct ceph_fsid fsid
;
878 dout("handle_map have %u\n", osdc
->osdmap
? osdc
->osdmap
->epoch
: 0);
879 p
= msg
->front
.iov_base
;
880 end
= p
+ msg
->front
.iov_len
;
883 ceph_decode_need(&p
, end
, sizeof(fsid
), bad
);
884 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
885 if (ceph_fsid_compare(&fsid
, &osdc
->client
->monc
.monmap
->fsid
)) {
886 pr_err("got osdmap with wrong fsid, ignoring\n");
890 down_write(&osdc
->map_sem
);
892 /* incremental maps */
893 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
894 dout(" %d inc maps\n", nr_maps
);
895 while (nr_maps
> 0) {
896 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
897 epoch
= ceph_decode_32(&p
);
898 maplen
= ceph_decode_32(&p
);
899 ceph_decode_need(&p
, end
, maplen
, bad
);
901 if (osdc
->osdmap
&& osdc
->osdmap
->epoch
+1 == epoch
) {
902 dout("applying incremental map %u len %d\n",
904 newmap
= osdmap_apply_incremental(&p
, next
,
907 if (IS_ERR(newmap
)) {
908 err
= PTR_ERR(newmap
);
911 if (newmap
!= osdc
->osdmap
) {
912 ceph_osdmap_destroy(osdc
->osdmap
);
913 osdc
->osdmap
= newmap
;
916 dout("ignoring incremental map %u len %d\n",
926 ceph_decode_32_safe(&p
, end
, nr_maps
, bad
);
927 dout(" %d full maps\n", nr_maps
);
929 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
930 epoch
= ceph_decode_32(&p
);
931 maplen
= ceph_decode_32(&p
);
932 ceph_decode_need(&p
, end
, maplen
, bad
);
934 dout("skipping non-latest full map %u len %d\n",
936 } else if (osdc
->osdmap
&& osdc
->osdmap
->epoch
>= epoch
) {
937 dout("skipping full map %u len %d, "
938 "older than our %u\n", epoch
, maplen
,
939 osdc
->osdmap
->epoch
);
941 dout("taking full map %u len %d\n", epoch
, maplen
);
942 newmap
= osdmap_decode(&p
, p
+maplen
);
943 if (IS_ERR(newmap
)) {
944 err
= PTR_ERR(newmap
);
947 oldmap
= osdc
->osdmap
;
948 osdc
->osdmap
= newmap
;
950 ceph_osdmap_destroy(oldmap
);
957 downgrade_write(&osdc
->map_sem
);
958 ceph_monc_got_osdmap(&osdc
->client
->monc
, osdc
->osdmap
->epoch
);
960 kick_requests(osdc
, NULL
);
961 up_read(&osdc
->map_sem
);
965 pr_err("osdc handle_map corrupt msg\n");
966 up_write(&osdc
->map_sem
);
972 * A read request prepares specific pages that data is to be read into.
973 * When a message is being read off the wire, we call prepare_pages to
975 * 0 = success, -1 failure.
977 static int prepare_pages(struct ceph_connection
*con
, struct ceph_msg
*m
,
980 struct ceph_osd
*osd
= con
->private;
981 struct ceph_osd_client
*osdc
;
982 struct ceph_osd_reply_head
*rhead
= m
->front
.iov_base
;
983 struct ceph_osd_request
*req
;
986 int type
= le16_to_cpu(m
->hdr
.type
);
992 dout("prepare_pages on msg %p want %d\n", m
, want
);
993 if (unlikely(type
!= CEPH_MSG_OSD_OPREPLY
))
994 return -1; /* hmm! */
996 tid
= le64_to_cpu(rhead
->tid
);
997 mutex_lock(&osdc
->request_mutex
);
998 req
= __lookup_request(osdc
, tid
);
1000 dout("prepare_pages unknown tid %llu\n", tid
);
1003 dout("prepare_pages tid %llu has %d pages, want %d\n",
1004 tid
, req
->r_num_pages
, want
);
1005 if (likely(req
->r_num_pages
>= want
&& !req
->r_prepared_pages
)) {
1006 m
->pages
= req
->r_pages
;
1007 m
->nr_pages
= req
->r_num_pages
;
1008 req
->r_reply
= m
; /* only for duration of read over socket */
1010 req
->r_prepared_pages
= 1;
1011 ret
= 0; /* success */
1014 mutex_unlock(&osdc
->request_mutex
);
1019 * Register request, send initial attempt.
1021 int ceph_osdc_start_request(struct ceph_osd_client
*osdc
,
1022 struct ceph_osd_request
*req
,
1027 req
->r_request
->pages
= req
->r_pages
;
1028 req
->r_request
->nr_pages
= req
->r_num_pages
;
1030 register_request(osdc
, req
);
1032 down_read(&osdc
->map_sem
);
1033 mutex_lock(&osdc
->request_mutex
);
1035 * a racing kick_requests() may have sent the message for us
1036 * while we dropped request_mutex above, so only send now if
1037 * the request still han't been touched yet.
1039 if (req
->r_sent
== 0) {
1040 rc
= __send_request(osdc
, req
);
1043 dout("osdc_start_request failed send, "
1044 " marking %lld\n", req
->r_tid
);
1045 req
->r_resend
= true;
1048 __unregister_request(osdc
, req
);
1052 mutex_unlock(&osdc
->request_mutex
);
1053 up_read(&osdc
->map_sem
);
1058 * wait for a request to complete
1060 int ceph_osdc_wait_request(struct ceph_osd_client
*osdc
,
1061 struct ceph_osd_request
*req
)
1065 rc
= wait_for_completion_interruptible(&req
->r_completion
);
1067 mutex_lock(&osdc
->request_mutex
);
1068 __cancel_request(req
);
1069 mutex_unlock(&osdc
->request_mutex
);
1070 dout("wait_request tid %llu timed out\n", req
->r_tid
);
1074 dout("wait_request tid %llu result %d\n", req
->r_tid
, req
->r_result
);
1075 return req
->r_result
;
1079 * sync - wait for all in-flight requests to flush. avoid starvation.
1081 void ceph_osdc_sync(struct ceph_osd_client
*osdc
)
1083 struct ceph_osd_request
*req
;
1084 u64 last_tid
, next_tid
= 0;
1086 mutex_lock(&osdc
->request_mutex
);
1087 last_tid
= osdc
->last_tid
;
1089 req
= __lookup_request_ge(osdc
, next_tid
);
1092 if (req
->r_tid
> last_tid
)
1095 next_tid
= req
->r_tid
+ 1;
1096 if ((req
->r_flags
& CEPH_OSD_FLAG_WRITE
) == 0)
1099 ceph_osdc_get_request(req
);
1100 mutex_unlock(&osdc
->request_mutex
);
1101 dout("sync waiting on tid %llu (last is %llu)\n",
1102 req
->r_tid
, last_tid
);
1103 wait_for_completion(&req
->r_safe_completion
);
1104 mutex_lock(&osdc
->request_mutex
);
1105 ceph_osdc_put_request(req
);
1107 mutex_unlock(&osdc
->request_mutex
);
1108 dout("sync done (thru tid %llu)\n", last_tid
);
1114 int ceph_osdc_init(struct ceph_osd_client
*osdc
, struct ceph_client
*client
)
1119 osdc
->client
= client
;
1120 osdc
->osdmap
= NULL
;
1121 init_rwsem(&osdc
->map_sem
);
1122 init_completion(&osdc
->map_waiters
);
1123 osdc
->last_requested_map
= 0;
1124 mutex_init(&osdc
->request_mutex
);
1125 osdc
->timeout_tid
= 0;
1127 osdc
->osds
= RB_ROOT
;
1128 osdc
->requests
= RB_ROOT
;
1129 osdc
->num_requests
= 0;
1130 INIT_DELAYED_WORK(&osdc
->timeout_work
, handle_timeout
);
1132 osdc
->req_mempool
= mempool_create_kmalloc_pool(10,
1133 sizeof(struct ceph_osd_request
));
1134 if (!osdc
->req_mempool
)
1137 err
= ceph_msgpool_init(&osdc
->msgpool_op
, 4096, 10, true);
1140 err
= ceph_msgpool_init(&osdc
->msgpool_op_reply
, 512, 0, false);
1147 void ceph_osdc_stop(struct ceph_osd_client
*osdc
)
1149 cancel_delayed_work_sync(&osdc
->timeout_work
);
1151 ceph_osdmap_destroy(osdc
->osdmap
);
1152 osdc
->osdmap
= NULL
;
1154 mempool_destroy(osdc
->req_mempool
);
1155 ceph_msgpool_destroy(&osdc
->msgpool_op
);
1156 ceph_msgpool_destroy(&osdc
->msgpool_op_reply
);
1160 * Read some contiguous pages. If we cross a stripe boundary, shorten
1161 * *plen. Return number of bytes read, or error.
1163 int ceph_osdc_readpages(struct ceph_osd_client
*osdc
,
1164 struct ceph_vino vino
, struct ceph_file_layout
*layout
,
1166 u32 truncate_seq
, u64 truncate_size
,
1167 struct page
**pages
, int num_pages
)
1169 struct ceph_osd_request
*req
;
1172 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino
.ino
,
1173 vino
.snap
, off
, *plen
);
1174 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, plen
,
1175 CEPH_OSD_OP_READ
, CEPH_OSD_FLAG_READ
,
1176 NULL
, 0, truncate_seq
, truncate_size
, NULL
,
1179 return PTR_ERR(req
);
1181 /* it may be a short read due to an object boundary */
1182 req
->r_pages
= pages
;
1183 num_pages
= calc_pages_for(off
, *plen
);
1184 req
->r_num_pages
= num_pages
;
1186 dout("readpages final extent is %llu~%llu (%d pages)\n",
1187 off
, *plen
, req
->r_num_pages
);
1189 rc
= ceph_osdc_start_request(osdc
, req
, false);
1191 rc
= ceph_osdc_wait_request(osdc
, req
);
1193 ceph_osdc_put_request(req
);
1194 dout("readpages result %d\n", rc
);
1199 * do a synchronous write on N pages
1201 int ceph_osdc_writepages(struct ceph_osd_client
*osdc
, struct ceph_vino vino
,
1202 struct ceph_file_layout
*layout
,
1203 struct ceph_snap_context
*snapc
,
1205 u32 truncate_seq
, u64 truncate_size
,
1206 struct timespec
*mtime
,
1207 struct page
**pages
, int num_pages
,
1208 int flags
, int do_sync
, bool nofail
)
1210 struct ceph_osd_request
*req
;
1213 BUG_ON(vino
.snap
!= CEPH_NOSNAP
);
1214 req
= ceph_osdc_new_request(osdc
, layout
, vino
, off
, &len
,
1216 flags
| CEPH_OSD_FLAG_ONDISK
|
1217 CEPH_OSD_FLAG_WRITE
,
1219 truncate_seq
, truncate_size
, mtime
,
1222 return PTR_ERR(req
);
1224 /* it may be a short write due to an object boundary */
1225 req
->r_pages
= pages
;
1226 req
->r_num_pages
= calc_pages_for(off
, len
);
1227 dout("writepages %llu~%llu (%d pages)\n", off
, len
,
1230 rc
= ceph_osdc_start_request(osdc
, req
, nofail
);
1232 rc
= ceph_osdc_wait_request(osdc
, req
);
1234 ceph_osdc_put_request(req
);
1237 dout("writepages result %d\n", rc
);
1242 * handle incoming message
1244 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
1246 struct ceph_osd
*osd
= con
->private;
1247 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1248 int type
= le16_to_cpu(msg
->hdr
.type
);
1254 case CEPH_MSG_OSD_MAP
:
1255 ceph_osdc_handle_map(osdc
, msg
);
1257 case CEPH_MSG_OSD_OPREPLY
:
1258 handle_reply(osdc
, msg
);
1262 pr_err("received unknown message type %d %s\n", type
,
1263 ceph_msg_type_name(type
));
1268 static struct ceph_msg
*alloc_msg(struct ceph_connection
*con
,
1269 struct ceph_msg_header
*hdr
)
1271 struct ceph_osd
*osd
= con
->private;
1272 struct ceph_osd_client
*osdc
= osd
->o_osdc
;
1273 int type
= le16_to_cpu(hdr
->type
);
1274 int front
= le32_to_cpu(hdr
->front_len
);
1277 case CEPH_MSG_OSD_OPREPLY
:
1278 return ceph_msgpool_get(&osdc
->msgpool_op_reply
, front
);
1280 return ceph_alloc_msg(con
, hdr
);
1284 * Wrappers to refcount containing ceph_osd struct
1286 static struct ceph_connection
*get_osd_con(struct ceph_connection
*con
)
1288 struct ceph_osd
*osd
= con
->private;
1294 static void put_osd_con(struct ceph_connection
*con
)
1296 struct ceph_osd
*osd
= con
->private;
1300 const static struct ceph_connection_operations osd_con_ops
= {
1303 .dispatch
= dispatch
,
1304 .alloc_msg
= alloc_msg
,
1306 .alloc_middle
= ceph_alloc_middle
,
1307 .prepare_pages
= prepare_pages
,