Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev | |
11 | from Logicworks, Inc. for making SDP replication support possible. | |
12 | ||
13 | drbd is free software; you can redistribute it and/or modify | |
14 | it under the terms of the GNU General Public License as published by | |
15 | the Free Software Foundation; either version 2, or (at your option) | |
16 | any later version. | |
17 | ||
18 | drbd is distributed in the hope that it will be useful, | |
19 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | GNU General Public License for more details. | |
22 | ||
23 | You should have received a copy of the GNU General Public License | |
24 | along with drbd; see the file COPYING. If not, write to | |
25 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
26 | ||
27 | */ | |
28 | ||
b411b363 | 29 | #include <linux/module.h> |
b411b363 PR |
30 | #include <linux/drbd.h> |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/types.h> | |
33 | #include <net/sock.h> | |
34 | #include <linux/ctype.h> | |
2a48fc0a | 35 | #include <linux/mutex.h> |
b411b363 PR |
36 | #include <linux/fs.h> |
37 | #include <linux/file.h> | |
38 | #include <linux/proc_fs.h> | |
39 | #include <linux/init.h> | |
40 | #include <linux/mm.h> | |
41 | #include <linux/memcontrol.h> | |
42 | #include <linux/mm_inline.h> | |
43 | #include <linux/slab.h> | |
44 | #include <linux/random.h> | |
45 | #include <linux/reboot.h> | |
46 | #include <linux/notifier.h> | |
47 | #include <linux/kthread.h> | |
48 | ||
49 | #define __KERNEL_SYSCALLS__ | |
50 | #include <linux/unistd.h> | |
51 | #include <linux/vmalloc.h> | |
52 | ||
53 | #include <linux/drbd_limits.h> | |
54 | #include "drbd_int.h" | |
b411b363 PR |
55 | #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */ |
56 | ||
57 | #include "drbd_vli.h" | |
58 | ||
2a48fc0a | 59 | static DEFINE_MUTEX(drbd_main_mutex); |
b411b363 PR |
60 | int drbdd_init(struct drbd_thread *); |
61 | int drbd_worker(struct drbd_thread *); | |
62 | int drbd_asender(struct drbd_thread *); | |
63 | ||
64 | int drbd_init(void); | |
65 | static int drbd_open(struct block_device *bdev, fmode_t mode); | |
66 | static int drbd_release(struct gendisk *gd, fmode_t mode); | |
b411b363 PR |
67 | static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused); |
68 | static void md_sync_timer_fn(unsigned long data); | |
69 | static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); | |
e9e6f3ec | 70 | static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused); |
b411b363 | 71 | |
b411b363 PR |
72 | MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " |
73 | "Lars Ellenberg <lars@linbit.com>"); | |
74 | MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); | |
75 | MODULE_VERSION(REL_VERSION); | |
76 | MODULE_LICENSE("GPL"); | |
2b8a90b5 PR |
77 | MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (" |
78 | __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")"); | |
b411b363 PR |
79 | MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); |
80 | ||
81 | #include <linux/moduleparam.h> | |
82 | /* allow_open_on_secondary */ | |
83 | MODULE_PARM_DESC(allow_oos, "DONT USE!"); | |
84 | /* thanks to these macros, if compiled into the kernel (not-module), | |
85 | * this becomes the boot parameter drbd.minor_count */ | |
86 | module_param(minor_count, uint, 0444); | |
87 | module_param(disable_sendpage, bool, 0644); | |
88 | module_param(allow_oos, bool, 0); | |
89 | module_param(cn_idx, uint, 0444); | |
90 | module_param(proc_details, int, 0644); | |
91 | ||
92 | #ifdef CONFIG_DRBD_FAULT_INJECTION | |
93 | int enable_faults; | |
94 | int fault_rate; | |
95 | static int fault_count; | |
96 | int fault_devs; | |
97 | /* bitmap of enabled faults */ | |
98 | module_param(enable_faults, int, 0664); | |
99 | /* fault rate % value - applies to all enabled faults */ | |
100 | module_param(fault_rate, int, 0664); | |
101 | /* count of faults inserted */ | |
102 | module_param(fault_count, int, 0664); | |
103 | /* bitmap of devices to insert faults on */ | |
104 | module_param(fault_devs, int, 0644); | |
105 | #endif | |
106 | ||
107 | /* module parameter, defined */ | |
2b8a90b5 | 108 | unsigned int minor_count = DRBD_MINOR_COUNT_DEF; |
b411b363 PR |
109 | int disable_sendpage; |
110 | int allow_oos; | |
111 | unsigned int cn_idx = CN_IDX_DRBD; | |
112 | int proc_details; /* Detail level in proc drbd*/ | |
113 | ||
114 | /* Module parameter for setting the user mode helper program | |
115 | * to run. Default is /sbin/drbdadm */ | |
116 | char usermode_helper[80] = "/sbin/drbdadm"; | |
117 | ||
118 | module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644); | |
119 | ||
120 | /* in 2.6.x, our device mapping and config info contains our virtual gendisks | |
121 | * as member "struct gendisk *vdisk;" | |
122 | */ | |
123 | struct drbd_conf **minor_table; | |
2111438b | 124 | struct list_head drbd_tconns; /* list of struct drbd_tconn */ |
b411b363 PR |
125 | |
126 | struct kmem_cache *drbd_request_cache; | |
6c852bec | 127 | struct kmem_cache *drbd_ee_cache; /* peer requests */ |
b411b363 PR |
128 | struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ |
129 | struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ | |
130 | mempool_t *drbd_request_mempool; | |
131 | mempool_t *drbd_ee_mempool; | |
132 | ||
133 | /* I do not use a standard mempool, because: | |
134 | 1) I want to hand out the pre-allocated objects first. | |
135 | 2) I want to be able to interrupt sleeping allocation with a signal. | |
136 | Note: This is a single linked list, the next pointer is the private | |
137 | member of struct page. | |
138 | */ | |
139 | struct page *drbd_pp_pool; | |
140 | spinlock_t drbd_pp_lock; | |
141 | int drbd_pp_vacant; | |
142 | wait_queue_head_t drbd_pp_wait; | |
143 | ||
144 | DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); | |
145 | ||
7d4e9d09 | 146 | static const struct block_device_operations drbd_ops = { |
b411b363 PR |
147 | .owner = THIS_MODULE, |
148 | .open = drbd_open, | |
149 | .release = drbd_release, | |
150 | }; | |
151 | ||
152 | #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0])) | |
153 | ||
154 | #ifdef __CHECKER__ | |
155 | /* When checking with sparse, and this is an inline function, sparse will | |
156 | give tons of false positives. When this is a real functions sparse works. | |
157 | */ | |
158 | int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) | |
159 | { | |
160 | int io_allowed; | |
161 | ||
162 | atomic_inc(&mdev->local_cnt); | |
163 | io_allowed = (mdev->state.disk >= mins); | |
164 | if (!io_allowed) { | |
165 | if (atomic_dec_and_test(&mdev->local_cnt)) | |
166 | wake_up(&mdev->misc_wait); | |
167 | } | |
168 | return io_allowed; | |
169 | } | |
170 | ||
171 | #endif | |
172 | ||
173 | /** | |
174 | * DOC: The transfer log | |
175 | * | |
176 | * The transfer log is a single linked list of &struct drbd_tl_epoch objects. | |
87eeee41 | 177 | * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail |
b411b363 PR |
178 | * of the list. There is always at least one &struct drbd_tl_epoch object. |
179 | * | |
180 | * Each &struct drbd_tl_epoch has a circular double linked list of requests | |
181 | * attached. | |
182 | */ | |
183 | static int tl_init(struct drbd_conf *mdev) | |
184 | { | |
185 | struct drbd_tl_epoch *b; | |
186 | ||
187 | /* during device minor initialization, we may well use GFP_KERNEL */ | |
188 | b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL); | |
189 | if (!b) | |
190 | return 0; | |
191 | INIT_LIST_HEAD(&b->requests); | |
192 | INIT_LIST_HEAD(&b->w.list); | |
193 | b->next = NULL; | |
194 | b->br_number = 4711; | |
7e602c0a | 195 | b->n_writes = 0; |
b411b363 PR |
196 | b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ |
197 | ||
87eeee41 PR |
198 | mdev->tconn->oldest_tle = b; |
199 | mdev->tconn->newest_tle = b; | |
200 | INIT_LIST_HEAD(&mdev->tconn->out_of_sequence_requests); | |
b411b363 | 201 | |
b411b363 PR |
202 | return 1; |
203 | } | |
204 | ||
205 | static void tl_cleanup(struct drbd_conf *mdev) | |
206 | { | |
87eeee41 PR |
207 | D_ASSERT(mdev->tconn->oldest_tle == mdev->tconn->newest_tle); |
208 | D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests)); | |
209 | kfree(mdev->tconn->oldest_tle); | |
210 | mdev->tconn->oldest_tle = NULL; | |
211 | kfree(mdev->tconn->unused_spare_tle); | |
212 | mdev->tconn->unused_spare_tle = NULL; | |
d628769b AG |
213 | } |
214 | ||
b411b363 PR |
215 | /** |
216 | * _tl_add_barrier() - Adds a barrier to the transfer log | |
217 | * @mdev: DRBD device. | |
218 | * @new: Barrier to be added before the current head of the TL. | |
219 | * | |
220 | * The caller must hold the req_lock. | |
221 | */ | |
222 | void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new) | |
223 | { | |
224 | struct drbd_tl_epoch *newest_before; | |
225 | ||
226 | INIT_LIST_HEAD(&new->requests); | |
227 | INIT_LIST_HEAD(&new->w.list); | |
228 | new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ | |
229 | new->next = NULL; | |
7e602c0a | 230 | new->n_writes = 0; |
b411b363 | 231 | |
87eeee41 | 232 | newest_before = mdev->tconn->newest_tle; |
b411b363 PR |
233 | /* never send a barrier number == 0, because that is special-cased |
234 | * when using TCQ for our write ordering code */ | |
235 | new->br_number = (newest_before->br_number+1) ?: 1; | |
87eeee41 PR |
236 | if (mdev->tconn->newest_tle != new) { |
237 | mdev->tconn->newest_tle->next = new; | |
238 | mdev->tconn->newest_tle = new; | |
b411b363 PR |
239 | } |
240 | } | |
241 | ||
242 | /** | |
243 | * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL | |
244 | * @mdev: DRBD device. | |
245 | * @barrier_nr: Expected identifier of the DRBD write barrier packet. | |
246 | * @set_size: Expected number of requests before that barrier. | |
247 | * | |
248 | * In case the passed barrier_nr or set_size does not match the oldest | |
249 | * &struct drbd_tl_epoch objects this function will cause a termination | |
250 | * of the connection. | |
251 | */ | |
252 | void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, | |
253 | unsigned int set_size) | |
254 | { | |
255 | struct drbd_tl_epoch *b, *nob; /* next old barrier */ | |
256 | struct list_head *le, *tle; | |
257 | struct drbd_request *r; | |
258 | ||
87eeee41 | 259 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 | 260 | |
87eeee41 | 261 | b = mdev->tconn->oldest_tle; |
b411b363 PR |
262 | |
263 | /* first some paranoia code */ | |
264 | if (b == NULL) { | |
265 | dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n", | |
266 | barrier_nr); | |
267 | goto bail; | |
268 | } | |
269 | if (b->br_number != barrier_nr) { | |
270 | dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n", | |
271 | barrier_nr, b->br_number); | |
272 | goto bail; | |
273 | } | |
7e602c0a PR |
274 | if (b->n_writes != set_size) { |
275 | dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", | |
276 | barrier_nr, set_size, b->n_writes); | |
b411b363 PR |
277 | goto bail; |
278 | } | |
279 | ||
280 | /* Clean up list of requests processed during current epoch */ | |
281 | list_for_each_safe(le, tle, &b->requests) { | |
282 | r = list_entry(le, struct drbd_request, tl_requests); | |
8554df1c | 283 | _req_mod(r, BARRIER_ACKED); |
b411b363 PR |
284 | } |
285 | /* There could be requests on the list waiting for completion | |
286 | of the write to the local disk. To avoid corruptions of | |
287 | slab's data structures we have to remove the lists head. | |
288 | ||
289 | Also there could have been a barrier ack out of sequence, overtaking | |
290 | the write acks - which would be a bug and violating write ordering. | |
291 | To not deadlock in case we lose connection while such requests are | |
292 | still pending, we need some way to find them for the | |
8554df1c | 293 | _req_mode(CONNECTION_LOST_WHILE_PENDING). |
b411b363 PR |
294 | |
295 | These have been list_move'd to the out_of_sequence_requests list in | |
8554df1c | 296 | _req_mod(, BARRIER_ACKED) above. |
b411b363 PR |
297 | */ |
298 | list_del_init(&b->requests); | |
299 | ||
300 | nob = b->next; | |
301 | if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { | |
302 | _tl_add_barrier(mdev, b); | |
303 | if (nob) | |
87eeee41 | 304 | mdev->tconn->oldest_tle = nob; |
b411b363 | 305 | /* if nob == NULL b was the only barrier, and becomes the new |
87eeee41 | 306 | barrier. Therefore mdev->tconn->oldest_tle points already to b */ |
b411b363 PR |
307 | } else { |
308 | D_ASSERT(nob != NULL); | |
87eeee41 | 309 | mdev->tconn->oldest_tle = nob; |
b411b363 PR |
310 | kfree(b); |
311 | } | |
312 | ||
87eeee41 | 313 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
314 | dec_ap_pending(mdev); |
315 | ||
316 | return; | |
317 | ||
318 | bail: | |
87eeee41 | 319 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
320 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); |
321 | } | |
322 | ||
617049aa | 323 | |
b411b363 | 324 | /** |
11b58e73 | 325 | * _tl_restart() - Walks the transfer log, and applies an action to all requests |
b411b363 | 326 | * @mdev: DRBD device. |
11b58e73 | 327 | * @what: The action/event to perform with all request objects |
b411b363 | 328 | * |
8554df1c AG |
329 | * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO, |
330 | * RESTART_FROZEN_DISK_IO. | |
b411b363 | 331 | */ |
b8907339 | 332 | void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) |
b411b363 | 333 | { |
11b58e73 | 334 | struct drbd_tl_epoch *b, *tmp, **pn; |
b9b98716 | 335 | struct list_head *le, *tle, carry_reads; |
11b58e73 PR |
336 | struct drbd_request *req; |
337 | int rv, n_writes, n_reads; | |
b411b363 | 338 | |
87eeee41 PR |
339 | b = mdev->tconn->oldest_tle; |
340 | pn = &mdev->tconn->oldest_tle; | |
b411b363 | 341 | while (b) { |
11b58e73 PR |
342 | n_writes = 0; |
343 | n_reads = 0; | |
b9b98716 | 344 | INIT_LIST_HEAD(&carry_reads); |
b411b363 | 345 | list_for_each_safe(le, tle, &b->requests) { |
11b58e73 PR |
346 | req = list_entry(le, struct drbd_request, tl_requests); |
347 | rv = _req_mod(req, what); | |
348 | ||
349 | n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT; | |
350 | n_reads += (rv & MR_READ) >> MR_READ_SHIFT; | |
b411b363 PR |
351 | } |
352 | tmp = b->next; | |
353 | ||
b9b98716 | 354 | if (n_writes) { |
8554df1c | 355 | if (what == RESEND) { |
11b58e73 PR |
356 | b->n_writes = n_writes; |
357 | if (b->w.cb == NULL) { | |
358 | b->w.cb = w_send_barrier; | |
359 | inc_ap_pending(mdev); | |
360 | set_bit(CREATE_BARRIER, &mdev->flags); | |
361 | } | |
362 | ||
e42325a5 | 363 | drbd_queue_work(&mdev->tconn->data.work, &b->w); |
11b58e73 PR |
364 | } |
365 | pn = &b->next; | |
366 | } else { | |
b9b98716 PR |
367 | if (n_reads) |
368 | list_add(&carry_reads, &b->requests); | |
11b58e73 PR |
369 | /* there could still be requests on that ring list, |
370 | * in case local io is still pending */ | |
371 | list_del(&b->requests); | |
372 | ||
373 | /* dec_ap_pending corresponding to queue_barrier. | |
374 | * the newest barrier may not have been queued yet, | |
375 | * in which case w.cb is still NULL. */ | |
376 | if (b->w.cb != NULL) | |
377 | dec_ap_pending(mdev); | |
378 | ||
87eeee41 | 379 | if (b == mdev->tconn->newest_tle) { |
11b58e73 PR |
380 | /* recycle, but reinit! */ |
381 | D_ASSERT(tmp == NULL); | |
382 | INIT_LIST_HEAD(&b->requests); | |
b9b98716 | 383 | list_splice(&carry_reads, &b->requests); |
11b58e73 PR |
384 | INIT_LIST_HEAD(&b->w.list); |
385 | b->w.cb = NULL; | |
386 | b->br_number = net_random(); | |
387 | b->n_writes = 0; | |
388 | ||
389 | *pn = b; | |
390 | break; | |
391 | } | |
392 | *pn = tmp; | |
393 | kfree(b); | |
b411b363 | 394 | } |
b411b363 | 395 | b = tmp; |
b9b98716 | 396 | list_splice(&carry_reads, &b->requests); |
b411b363 | 397 | } |
11b58e73 PR |
398 | } |
399 | ||
b411b363 PR |
400 | |
401 | /** | |
402 | * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL | |
403 | * @mdev: DRBD device. | |
404 | * | |
405 | * This is called after the connection to the peer was lost. The storage covered | |
406 | * by the requests on the transfer gets marked as our of sync. Called from the | |
407 | * receiver thread and the worker thread. | |
408 | */ | |
409 | void tl_clear(struct drbd_conf *mdev) | |
410 | { | |
b411b363 PR |
411 | struct list_head *le, *tle; |
412 | struct drbd_request *r; | |
b411b363 | 413 | |
87eeee41 | 414 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 | 415 | |
8554df1c | 416 | _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING); |
b411b363 PR |
417 | |
418 | /* we expect this list to be empty. */ | |
87eeee41 | 419 | D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests)); |
b411b363 PR |
420 | |
421 | /* but just in case, clean it up anyways! */ | |
87eeee41 | 422 | list_for_each_safe(le, tle, &mdev->tconn->out_of_sequence_requests) { |
b411b363 PR |
423 | r = list_entry(le, struct drbd_request, tl_requests); |
424 | /* It would be nice to complete outside of spinlock. | |
425 | * But this is easier for now. */ | |
8554df1c | 426 | _req_mod(r, CONNECTION_LOST_WHILE_PENDING); |
b411b363 PR |
427 | } |
428 | ||
429 | /* ensure bit indicating barrier is required is clear */ | |
430 | clear_bit(CREATE_BARRIER, &mdev->flags); | |
431 | ||
87eeee41 | 432 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
433 | } |
434 | ||
11b58e73 PR |
435 | void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) |
436 | { | |
87eeee41 | 437 | spin_lock_irq(&mdev->tconn->req_lock); |
11b58e73 | 438 | _tl_restart(mdev, what); |
87eeee41 | 439 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
440 | } |
441 | ||
b411b363 PR |
442 | static int drbd_thread_setup(void *arg) |
443 | { | |
444 | struct drbd_thread *thi = (struct drbd_thread *) arg; | |
445 | struct drbd_conf *mdev = thi->mdev; | |
446 | unsigned long flags; | |
447 | int retval; | |
448 | ||
f1b3a6ec PR |
449 | snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s", |
450 | thi->name[0], thi->mdev->tconn->name); | |
451 | ||
b411b363 PR |
452 | restart: |
453 | retval = thi->function(thi); | |
454 | ||
455 | spin_lock_irqsave(&thi->t_lock, flags); | |
456 | ||
e77a0a5c | 457 | /* if the receiver has been "EXITING", the last thing it did |
b411b363 PR |
458 | * was set the conn state to "StandAlone", |
459 | * if now a re-connect request comes in, conn state goes C_UNCONNECTED, | |
460 | * and receiver thread will be "started". | |
e77a0a5c | 461 | * drbd_thread_start needs to set "RESTARTING" in that case. |
b411b363 | 462 | * t_state check and assignment needs to be within the same spinlock, |
e77a0a5c AG |
463 | * so either thread_start sees EXITING, and can remap to RESTARTING, |
464 | * or thread_start see NONE, and can proceed as normal. | |
b411b363 PR |
465 | */ |
466 | ||
e77a0a5c | 467 | if (thi->t_state == RESTARTING) { |
bed879ae | 468 | dev_info(DEV, "Restarting %s thread\n", thi->name); |
e77a0a5c | 469 | thi->t_state = RUNNING; |
b411b363 PR |
470 | spin_unlock_irqrestore(&thi->t_lock, flags); |
471 | goto restart; | |
472 | } | |
473 | ||
474 | thi->task = NULL; | |
e77a0a5c | 475 | thi->t_state = NONE; |
b411b363 PR |
476 | smp_mb(); |
477 | complete(&thi->stop); | |
478 | spin_unlock_irqrestore(&thi->t_lock, flags); | |
479 | ||
480 | dev_info(DEV, "Terminating %s\n", current->comm); | |
481 | ||
482 | /* Release mod reference taken when thread was started */ | |
483 | module_put(THIS_MODULE); | |
484 | return retval; | |
485 | } | |
486 | ||
487 | static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi, | |
bed879ae | 488 | int (*func) (struct drbd_thread *), char *name) |
b411b363 PR |
489 | { |
490 | spin_lock_init(&thi->t_lock); | |
491 | thi->task = NULL; | |
e77a0a5c | 492 | thi->t_state = NONE; |
b411b363 PR |
493 | thi->function = func; |
494 | thi->mdev = mdev; | |
bed879ae | 495 | strncpy(thi->name, name, ARRAY_SIZE(thi->name)); |
b411b363 PR |
496 | } |
497 | ||
498 | int drbd_thread_start(struct drbd_thread *thi) | |
499 | { | |
500 | struct drbd_conf *mdev = thi->mdev; | |
501 | struct task_struct *nt; | |
502 | unsigned long flags; | |
503 | ||
b411b363 PR |
504 | /* is used from state engine doing drbd_thread_stop_nowait, |
505 | * while holding the req lock irqsave */ | |
506 | spin_lock_irqsave(&thi->t_lock, flags); | |
507 | ||
508 | switch (thi->t_state) { | |
e77a0a5c | 509 | case NONE: |
b411b363 | 510 | dev_info(DEV, "Starting %s thread (from %s [%d])\n", |
bed879ae | 511 | thi->name, current->comm, current->pid); |
b411b363 PR |
512 | |
513 | /* Get ref on module for thread - this is released when thread exits */ | |
514 | if (!try_module_get(THIS_MODULE)) { | |
515 | dev_err(DEV, "Failed to get module reference in drbd_thread_start\n"); | |
516 | spin_unlock_irqrestore(&thi->t_lock, flags); | |
81e84650 | 517 | return false; |
b411b363 PR |
518 | } |
519 | ||
520 | init_completion(&thi->stop); | |
521 | D_ASSERT(thi->task == NULL); | |
522 | thi->reset_cpu_mask = 1; | |
e77a0a5c | 523 | thi->t_state = RUNNING; |
b411b363 PR |
524 | spin_unlock_irqrestore(&thi->t_lock, flags); |
525 | flush_signals(current); /* otherw. may get -ERESTARTNOINTR */ | |
526 | ||
527 | nt = kthread_create(drbd_thread_setup, (void *) thi, | |
bed879ae | 528 | "drbd%d_%s", mdev_to_minor(mdev), thi->name); |
b411b363 PR |
529 | |
530 | if (IS_ERR(nt)) { | |
531 | dev_err(DEV, "Couldn't start thread\n"); | |
532 | ||
533 | module_put(THIS_MODULE); | |
81e84650 | 534 | return false; |
b411b363 PR |
535 | } |
536 | spin_lock_irqsave(&thi->t_lock, flags); | |
537 | thi->task = nt; | |
e77a0a5c | 538 | thi->t_state = RUNNING; |
b411b363 PR |
539 | spin_unlock_irqrestore(&thi->t_lock, flags); |
540 | wake_up_process(nt); | |
541 | break; | |
e77a0a5c AG |
542 | case EXITING: |
543 | thi->t_state = RESTARTING; | |
b411b363 | 544 | dev_info(DEV, "Restarting %s thread (from %s [%d])\n", |
bed879ae | 545 | thi->name, current->comm, current->pid); |
b411b363 | 546 | /* fall through */ |
e77a0a5c AG |
547 | case RUNNING: |
548 | case RESTARTING: | |
b411b363 PR |
549 | default: |
550 | spin_unlock_irqrestore(&thi->t_lock, flags); | |
551 | break; | |
552 | } | |
553 | ||
81e84650 | 554 | return true; |
b411b363 PR |
555 | } |
556 | ||
557 | ||
558 | void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait) | |
559 | { | |
560 | unsigned long flags; | |
561 | ||
e77a0a5c | 562 | enum drbd_thread_state ns = restart ? RESTARTING : EXITING; |
b411b363 PR |
563 | |
564 | /* may be called from state engine, holding the req lock irqsave */ | |
565 | spin_lock_irqsave(&thi->t_lock, flags); | |
566 | ||
e77a0a5c | 567 | if (thi->t_state == NONE) { |
b411b363 PR |
568 | spin_unlock_irqrestore(&thi->t_lock, flags); |
569 | if (restart) | |
570 | drbd_thread_start(thi); | |
571 | return; | |
572 | } | |
573 | ||
574 | if (thi->t_state != ns) { | |
575 | if (thi->task == NULL) { | |
576 | spin_unlock_irqrestore(&thi->t_lock, flags); | |
577 | return; | |
578 | } | |
579 | ||
580 | thi->t_state = ns; | |
581 | smp_mb(); | |
582 | init_completion(&thi->stop); | |
583 | if (thi->task != current) | |
584 | force_sig(DRBD_SIGKILL, thi->task); | |
585 | ||
586 | } | |
587 | ||
588 | spin_unlock_irqrestore(&thi->t_lock, flags); | |
589 | ||
590 | if (wait) | |
591 | wait_for_completion(&thi->stop); | |
592 | } | |
593 | ||
bed879ae PR |
594 | static struct drbd_thread *drbd_task_to_thread(struct drbd_conf *mdev, struct task_struct *task) |
595 | { | |
596 | struct drbd_tconn *tconn = mdev->tconn; | |
597 | struct drbd_thread *thi = | |
598 | task == tconn->receiver.task ? &tconn->receiver : | |
599 | task == tconn->asender.task ? &tconn->asender : | |
600 | task == tconn->worker.task ? &tconn->worker : NULL; | |
601 | ||
602 | return thi; | |
603 | } | |
604 | ||
605 | char *drbd_task_to_thread_name(struct drbd_conf *mdev, struct task_struct *task) | |
606 | { | |
607 | struct drbd_thread *thi = drbd_task_to_thread(mdev, task); | |
608 | return thi ? thi->name : task->comm; | |
609 | } | |
610 | ||
b411b363 | 611 | #ifdef CONFIG_SMP |
80822284 PR |
612 | static int conn_lowest_minor(struct drbd_tconn *tconn) |
613 | { | |
614 | int minor = 0; | |
615 | idr_get_next(&tconn->volumes, &minor); | |
616 | return minor; | |
617 | } | |
b411b363 PR |
618 | /** |
619 | * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs | |
620 | * @mdev: DRBD device. | |
621 | * | |
622 | * Forces all threads of a device onto the same CPU. This is beneficial for | |
623 | * DRBD's performance. May be overwritten by user's configuration. | |
624 | */ | |
80822284 | 625 | void drbd_calc_cpu_mask(struct drbd_tconn *tconn) |
b411b363 PR |
626 | { |
627 | int ord, cpu; | |
628 | ||
629 | /* user override. */ | |
80822284 | 630 | if (cpumask_weight(tconn->cpu_mask)) |
b411b363 PR |
631 | return; |
632 | ||
80822284 | 633 | ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask); |
b411b363 PR |
634 | for_each_online_cpu(cpu) { |
635 | if (ord-- == 0) { | |
80822284 | 636 | cpumask_set_cpu(cpu, tconn->cpu_mask); |
b411b363 PR |
637 | return; |
638 | } | |
639 | } | |
640 | /* should not be reached */ | |
80822284 | 641 | cpumask_setall(tconn->cpu_mask); |
b411b363 PR |
642 | } |
643 | ||
644 | /** | |
645 | * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread | |
646 | * @mdev: DRBD device. | |
bc31fe33 | 647 | * @thi: drbd_thread object |
b411b363 PR |
648 | * |
649 | * call in the "main loop" of _all_ threads, no need for any mutex, current won't die | |
650 | * prematurely. | |
651 | */ | |
80822284 | 652 | void drbd_thread_current_set_cpu(struct drbd_thread *thi) |
b411b363 PR |
653 | { |
654 | struct task_struct *p = current; | |
bed879ae | 655 | |
b411b363 PR |
656 | if (!thi->reset_cpu_mask) |
657 | return; | |
658 | thi->reset_cpu_mask = 0; | |
80822284 | 659 | set_cpus_allowed_ptr(p, thi->mdev->tconn->cpu_mask); |
b411b363 PR |
660 | } |
661 | #endif | |
662 | ||
d38e787e | 663 | static void prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size) |
fd340c12 PR |
664 | { |
665 | h->magic = cpu_to_be32(DRBD_MAGIC); | |
666 | h->command = cpu_to_be16(cmd); | |
667 | h->length = cpu_to_be16(size); | |
668 | } | |
669 | ||
d38e787e | 670 | static void prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size) |
fd340c12 PR |
671 | { |
672 | h->magic = cpu_to_be16(DRBD_MAGIC_BIG); | |
673 | h->command = cpu_to_be16(cmd); | |
674 | h->length = cpu_to_be32(size); | |
675 | } | |
676 | ||
d38e787e PR |
677 | static void _prepare_header(struct drbd_tconn *tconn, int vnr, struct p_header *h, |
678 | enum drbd_packet cmd, int size) | |
679 | { | |
680 | if (tconn->agreed_pro_version >= 100 || size > DRBD_MAX_SIZE_H80_PACKET) | |
681 | prepare_header95(&h->h95, cmd, size); | |
682 | else | |
683 | prepare_header80(&h->h80, cmd, size); | |
684 | } | |
685 | ||
fd340c12 | 686 | static void prepare_header(struct drbd_conf *mdev, struct p_header *h, |
d8763023 | 687 | enum drbd_packet cmd, int size) |
fd340c12 | 688 | { |
d38e787e | 689 | _prepare_header(mdev->tconn, mdev->vnr, h, cmd, size); |
fd340c12 PR |
690 | } |
691 | ||
b411b363 | 692 | /* the appropriate socket mutex must be held already */ |
d38e787e | 693 | int _conn_send_cmd(struct drbd_tconn *tconn, int vnr, struct socket *sock, |
d8763023 AG |
694 | enum drbd_packet cmd, struct p_header *h, size_t size, |
695 | unsigned msg_flags) | |
b411b363 PR |
696 | { |
697 | int sent, ok; | |
698 | ||
d38e787e | 699 | _prepare_header(tconn, vnr, h, cmd, size - sizeof(struct p_header)); |
b411b363 | 700 | |
d38e787e | 701 | sent = drbd_send(tconn, sock, h, size, msg_flags); |
b411b363 PR |
702 | |
703 | ok = (sent == size); | |
0ddc5549 | 704 | if (!ok && !signal_pending(current)) |
d38e787e PR |
705 | conn_warn(tconn, "short sent %s size=%d sent=%d\n", |
706 | cmdname(cmd), (int)size, sent); | |
b411b363 PR |
707 | return ok; |
708 | } | |
709 | ||
710 | /* don't pass the socket. we may only look at it | |
711 | * when we hold the appropriate socket mutex. | |
712 | */ | |
713 | int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, | |
d8763023 | 714 | enum drbd_packet cmd, struct p_header *h, size_t size) |
b411b363 PR |
715 | { |
716 | int ok = 0; | |
717 | struct socket *sock; | |
718 | ||
719 | if (use_data_socket) { | |
e42325a5 PR |
720 | mutex_lock(&mdev->tconn->data.mutex); |
721 | sock = mdev->tconn->data.socket; | |
b411b363 | 722 | } else { |
e42325a5 PR |
723 | mutex_lock(&mdev->tconn->meta.mutex); |
724 | sock = mdev->tconn->meta.socket; | |
b411b363 PR |
725 | } |
726 | ||
727 | /* drbd_disconnect() could have called drbd_free_sock() | |
728 | * while we were waiting in down()... */ | |
729 | if (likely(sock != NULL)) | |
730 | ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0); | |
731 | ||
732 | if (use_data_socket) | |
e42325a5 | 733 | mutex_unlock(&mdev->tconn->data.mutex); |
b411b363 | 734 | else |
e42325a5 | 735 | mutex_unlock(&mdev->tconn->meta.mutex); |
b411b363 PR |
736 | return ok; |
737 | } | |
738 | ||
61120870 | 739 | int conn_send_cmd2(struct drbd_tconn *tconn, enum drbd_packet cmd, char *data, |
b411b363 PR |
740 | size_t size) |
741 | { | |
61120870 | 742 | struct p_header80 h; |
b411b363 PR |
743 | int ok; |
744 | ||
61120870 | 745 | prepare_header80(&h, cmd, size); |
b411b363 | 746 | |
61120870 | 747 | if (!drbd_get_data_sock(tconn)) |
b411b363 PR |
748 | return 0; |
749 | ||
b411b363 | 750 | ok = (sizeof(h) == |
61120870 | 751 | drbd_send(tconn, tconn->data.socket, &h, sizeof(h), 0)); |
b411b363 | 752 | ok = ok && (size == |
61120870 | 753 | drbd_send(tconn, tconn->data.socket, data, size, 0)); |
b411b363 | 754 | |
61120870 | 755 | drbd_put_data_sock(tconn); |
b411b363 PR |
756 | |
757 | return ok; | |
758 | } | |
759 | ||
760 | int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) | |
761 | { | |
8e26f9cc | 762 | struct p_rs_param_95 *p; |
b411b363 PR |
763 | struct socket *sock; |
764 | int size, rv; | |
31890f4a | 765 | const int apv = mdev->tconn->agreed_pro_version; |
b411b363 PR |
766 | |
767 | size = apv <= 87 ? sizeof(struct p_rs_param) | |
768 | : apv == 88 ? sizeof(struct p_rs_param) | |
769 | + strlen(mdev->sync_conf.verify_alg) + 1 | |
8e26f9cc PR |
770 | : apv <= 94 ? sizeof(struct p_rs_param_89) |
771 | : /* apv >= 95 */ sizeof(struct p_rs_param_95); | |
b411b363 PR |
772 | |
773 | /* used from admin command context and receiver/worker context. | |
774 | * to avoid kmalloc, grab the socket right here, | |
775 | * then use the pre-allocated sbuf there */ | |
e42325a5 PR |
776 | mutex_lock(&mdev->tconn->data.mutex); |
777 | sock = mdev->tconn->data.socket; | |
b411b363 PR |
778 | |
779 | if (likely(sock != NULL)) { | |
d8763023 AG |
780 | enum drbd_packet cmd = |
781 | apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM; | |
b411b363 | 782 | |
e42325a5 | 783 | p = &mdev->tconn->data.sbuf.rs_param_95; |
b411b363 PR |
784 | |
785 | /* initialize verify_alg and csums_alg */ | |
786 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); | |
787 | ||
788 | p->rate = cpu_to_be32(sc->rate); | |
8e26f9cc PR |
789 | p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead); |
790 | p->c_delay_target = cpu_to_be32(sc->c_delay_target); | |
791 | p->c_fill_target = cpu_to_be32(sc->c_fill_target); | |
792 | p->c_max_rate = cpu_to_be32(sc->c_max_rate); | |
b411b363 PR |
793 | |
794 | if (apv >= 88) | |
795 | strcpy(p->verify_alg, mdev->sync_conf.verify_alg); | |
796 | if (apv >= 89) | |
797 | strcpy(p->csums_alg, mdev->sync_conf.csums_alg); | |
798 | ||
799 | rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0); | |
800 | } else | |
801 | rv = 0; /* not ok */ | |
802 | ||
e42325a5 | 803 | mutex_unlock(&mdev->tconn->data.mutex); |
b411b363 PR |
804 | |
805 | return rv; | |
806 | } | |
807 | ||
dc8228d1 | 808 | int drbd_send_protocol(struct drbd_tconn *tconn) |
b411b363 PR |
809 | { |
810 | struct p_protocol *p; | |
cf14c2e9 | 811 | int size, cf, rv; |
b411b363 PR |
812 | |
813 | size = sizeof(struct p_protocol); | |
814 | ||
dc8228d1 PR |
815 | if (tconn->agreed_pro_version >= 87) |
816 | size += strlen(tconn->net_conf->integrity_alg) + 1; | |
b411b363 PR |
817 | |
818 | /* we must not recurse into our own queue, | |
819 | * as that is blocked during handshake */ | |
820 | p = kmalloc(size, GFP_NOIO); | |
821 | if (p == NULL) | |
822 | return 0; | |
823 | ||
dc8228d1 PR |
824 | p->protocol = cpu_to_be32(tconn->net_conf->wire_protocol); |
825 | p->after_sb_0p = cpu_to_be32(tconn->net_conf->after_sb_0p); | |
826 | p->after_sb_1p = cpu_to_be32(tconn->net_conf->after_sb_1p); | |
827 | p->after_sb_2p = cpu_to_be32(tconn->net_conf->after_sb_2p); | |
828 | p->two_primaries = cpu_to_be32(tconn->net_conf->two_primaries); | |
b411b363 | 829 | |
cf14c2e9 | 830 | cf = 0; |
dc8228d1 | 831 | if (tconn->net_conf->want_lose) |
cf14c2e9 | 832 | cf |= CF_WANT_LOSE; |
dc8228d1 PR |
833 | if (tconn->net_conf->dry_run) { |
834 | if (tconn->agreed_pro_version >= 92) | |
cf14c2e9 PR |
835 | cf |= CF_DRY_RUN; |
836 | else { | |
dc8228d1 | 837 | conn_err(tconn, "--dry-run is not supported by peer"); |
7ac314c8 | 838 | kfree(p); |
148efa16 | 839 | return -1; |
cf14c2e9 PR |
840 | } |
841 | } | |
842 | p->conn_flags = cpu_to_be32(cf); | |
843 | ||
dc8228d1 PR |
844 | if (tconn->agreed_pro_version >= 87) |
845 | strcpy(p->integrity_alg, tconn->net_conf->integrity_alg); | |
b411b363 | 846 | |
dc8228d1 | 847 | rv = conn_send_cmd2(tconn, P_PROTOCOL, p->head.payload, size - sizeof(struct p_header)); |
b411b363 PR |
848 | kfree(p); |
849 | return rv; | |
850 | } | |
851 | ||
852 | int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags) | |
853 | { | |
854 | struct p_uuids p; | |
855 | int i; | |
856 | ||
857 | if (!get_ldev_if_state(mdev, D_NEGOTIATING)) | |
858 | return 1; | |
859 | ||
860 | for (i = UI_CURRENT; i < UI_SIZE; i++) | |
861 | p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0; | |
862 | ||
863 | mdev->comm_bm_set = drbd_bm_total_weight(mdev); | |
864 | p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set); | |
89e58e75 | 865 | uuid_flags |= mdev->tconn->net_conf->want_lose ? 1 : 0; |
b411b363 PR |
866 | uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0; |
867 | uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; | |
868 | p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags); | |
869 | ||
870 | put_ldev(mdev); | |
871 | ||
c012949a | 872 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS, &p.head, sizeof(p)); |
b411b363 PR |
873 | } |
874 | ||
875 | int drbd_send_uuids(struct drbd_conf *mdev) | |
876 | { | |
877 | return _drbd_send_uuids(mdev, 0); | |
878 | } | |
879 | ||
880 | int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) | |
881 | { | |
882 | return _drbd_send_uuids(mdev, 8); | |
883 | } | |
884 | ||
62b0da3a LE |
885 | void drbd_print_uuids(struct drbd_conf *mdev, const char *text) |
886 | { | |
887 | if (get_ldev_if_state(mdev, D_NEGOTIATING)) { | |
888 | u64 *uuid = mdev->ldev->md.uuid; | |
889 | dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n", | |
890 | text, | |
891 | (unsigned long long)uuid[UI_CURRENT], | |
892 | (unsigned long long)uuid[UI_BITMAP], | |
893 | (unsigned long long)uuid[UI_HISTORY_START], | |
894 | (unsigned long long)uuid[UI_HISTORY_END]); | |
895 | put_ldev(mdev); | |
896 | } else { | |
897 | dev_info(DEV, "%s effective data uuid: %016llX\n", | |
898 | text, | |
899 | (unsigned long long)mdev->ed_uuid); | |
900 | } | |
901 | } | |
902 | ||
5a22db89 | 903 | int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) |
b411b363 PR |
904 | { |
905 | struct p_rs_uuid p; | |
5a22db89 LE |
906 | u64 uuid; |
907 | ||
908 | D_ASSERT(mdev->state.disk == D_UP_TO_DATE); | |
b411b363 | 909 | |
4a23f264 | 910 | uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET; |
5a22db89 | 911 | drbd_uuid_set(mdev, UI_BITMAP, uuid); |
62b0da3a | 912 | drbd_print_uuids(mdev, "updated sync UUID"); |
5a22db89 LE |
913 | drbd_md_sync(mdev); |
914 | p.uuid = cpu_to_be64(uuid); | |
b411b363 | 915 | |
c012949a | 916 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, &p.head, sizeof(p)); |
b411b363 PR |
917 | } |
918 | ||
e89b591c | 919 | int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) |
b411b363 PR |
920 | { |
921 | struct p_sizes p; | |
922 | sector_t d_size, u_size; | |
99432fcc | 923 | int q_order_type, max_bio_size; |
b411b363 PR |
924 | int ok; |
925 | ||
926 | if (get_ldev_if_state(mdev, D_NEGOTIATING)) { | |
927 | D_ASSERT(mdev->ldev->backing_bdev); | |
928 | d_size = drbd_get_max_capacity(mdev->ldev); | |
929 | u_size = mdev->ldev->dc.disk_size; | |
930 | q_order_type = drbd_queue_order_type(mdev); | |
99432fcc PR |
931 | max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; |
932 | max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); | |
b411b363 PR |
933 | put_ldev(mdev); |
934 | } else { | |
935 | d_size = 0; | |
936 | u_size = 0; | |
937 | q_order_type = QUEUE_ORDERED_NONE; | |
99432fcc | 938 | max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */ |
b411b363 PR |
939 | } |
940 | ||
941 | p.d_size = cpu_to_be64(d_size); | |
942 | p.u_size = cpu_to_be64(u_size); | |
943 | p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); | |
99432fcc | 944 | p.max_bio_size = cpu_to_be32(max_bio_size); |
e89b591c PR |
945 | p.queue_order_type = cpu_to_be16(q_order_type); |
946 | p.dds_flags = cpu_to_be16(flags); | |
b411b363 | 947 | |
c012949a | 948 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, &p.head, sizeof(p)); |
b411b363 PR |
949 | return ok; |
950 | } | |
951 | ||
952 | /** | |
953 | * drbd_send_state() - Sends the drbd state to the peer | |
954 | * @mdev: DRBD device. | |
955 | */ | |
956 | int drbd_send_state(struct drbd_conf *mdev) | |
957 | { | |
958 | struct socket *sock; | |
959 | struct p_state p; | |
960 | int ok = 0; | |
961 | ||
e42325a5 | 962 | mutex_lock(&mdev->tconn->data.mutex); |
b411b363 PR |
963 | |
964 | p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */ | |
e42325a5 | 965 | sock = mdev->tconn->data.socket; |
b411b363 PR |
966 | |
967 | if (likely(sock != NULL)) { | |
c012949a | 968 | ok = _drbd_send_cmd(mdev, sock, P_STATE, &p.head, sizeof(p), 0); |
b411b363 PR |
969 | } |
970 | ||
e42325a5 | 971 | mutex_unlock(&mdev->tconn->data.mutex); |
b411b363 | 972 | |
b411b363 PR |
973 | return ok; |
974 | } | |
975 | ||
976 | int drbd_send_state_req(struct drbd_conf *mdev, | |
977 | union drbd_state mask, union drbd_state val) | |
978 | { | |
979 | struct p_req_state p; | |
980 | ||
981 | p.mask = cpu_to_be32(mask.i); | |
982 | p.val = cpu_to_be32(val.i); | |
983 | ||
c012949a | 984 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ, &p.head, sizeof(p)); |
b411b363 PR |
985 | } |
986 | ||
bf885f8a | 987 | int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) |
b411b363 PR |
988 | { |
989 | struct p_req_state_reply p; | |
990 | ||
991 | p.retcode = cpu_to_be32(retcode); | |
992 | ||
c012949a | 993 | return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY, &p.head, sizeof(p)); |
b411b363 PR |
994 | } |
995 | ||
996 | int fill_bitmap_rle_bits(struct drbd_conf *mdev, | |
997 | struct p_compressed_bm *p, | |
998 | struct bm_xfer_ctx *c) | |
999 | { | |
1000 | struct bitstream bs; | |
1001 | unsigned long plain_bits; | |
1002 | unsigned long tmp; | |
1003 | unsigned long rl; | |
1004 | unsigned len; | |
1005 | unsigned toggle; | |
1006 | int bits; | |
1007 | ||
1008 | /* may we use this feature? */ | |
1009 | if ((mdev->sync_conf.use_rle == 0) || | |
31890f4a | 1010 | (mdev->tconn->agreed_pro_version < 90)) |
b411b363 PR |
1011 | return 0; |
1012 | ||
1013 | if (c->bit_offset >= c->bm_bits) | |
1014 | return 0; /* nothing to do. */ | |
1015 | ||
1016 | /* use at most thus many bytes */ | |
1017 | bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0); | |
1018 | memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX); | |
1019 | /* plain bits covered in this code string */ | |
1020 | plain_bits = 0; | |
1021 | ||
1022 | /* p->encoding & 0x80 stores whether the first run length is set. | |
1023 | * bit offset is implicit. | |
1024 | * start with toggle == 2 to be able to tell the first iteration */ | |
1025 | toggle = 2; | |
1026 | ||
1027 | /* see how much plain bits we can stuff into one packet | |
1028 | * using RLE and VLI. */ | |
1029 | do { | |
1030 | tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset) | |
1031 | : _drbd_bm_find_next(mdev, c->bit_offset); | |
1032 | if (tmp == -1UL) | |
1033 | tmp = c->bm_bits; | |
1034 | rl = tmp - c->bit_offset; | |
1035 | ||
1036 | if (toggle == 2) { /* first iteration */ | |
1037 | if (rl == 0) { | |
1038 | /* the first checked bit was set, | |
1039 | * store start value, */ | |
1040 | DCBP_set_start(p, 1); | |
1041 | /* but skip encoding of zero run length */ | |
1042 | toggle = !toggle; | |
1043 | continue; | |
1044 | } | |
1045 | DCBP_set_start(p, 0); | |
1046 | } | |
1047 | ||
1048 | /* paranoia: catch zero runlength. | |
1049 | * can only happen if bitmap is modified while we scan it. */ | |
1050 | if (rl == 0) { | |
1051 | dev_err(DEV, "unexpected zero runlength while encoding bitmap " | |
1052 | "t:%u bo:%lu\n", toggle, c->bit_offset); | |
1053 | return -1; | |
1054 | } | |
1055 | ||
1056 | bits = vli_encode_bits(&bs, rl); | |
1057 | if (bits == -ENOBUFS) /* buffer full */ | |
1058 | break; | |
1059 | if (bits <= 0) { | |
1060 | dev_err(DEV, "error while encoding bitmap: %d\n", bits); | |
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | toggle = !toggle; | |
1065 | plain_bits += rl; | |
1066 | c->bit_offset = tmp; | |
1067 | } while (c->bit_offset < c->bm_bits); | |
1068 | ||
1069 | len = bs.cur.b - p->code + !!bs.cur.bit; | |
1070 | ||
1071 | if (plain_bits < (len << 3)) { | |
1072 | /* incompressible with this method. | |
1073 | * we need to rewind both word and bit position. */ | |
1074 | c->bit_offset -= plain_bits; | |
1075 | bm_xfer_ctx_bit_to_word_offset(c); | |
1076 | c->bit_offset = c->word_offset * BITS_PER_LONG; | |
1077 | return 0; | |
1078 | } | |
1079 | ||
1080 | /* RLE + VLI was able to compress it just fine. | |
1081 | * update c->word_offset. */ | |
1082 | bm_xfer_ctx_bit_to_word_offset(c); | |
1083 | ||
1084 | /* store pad_bits */ | |
1085 | DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7); | |
1086 | ||
1087 | return len; | |
1088 | } | |
1089 | ||
f70af118 AG |
1090 | /** |
1091 | * send_bitmap_rle_or_plain | |
1092 | * | |
1093 | * Return 0 when done, 1 when another iteration is needed, and a negative error | |
1094 | * code upon failure. | |
1095 | */ | |
1096 | static int | |
b411b363 | 1097 | send_bitmap_rle_or_plain(struct drbd_conf *mdev, |
c012949a | 1098 | struct p_header *h, struct bm_xfer_ctx *c) |
b411b363 PR |
1099 | { |
1100 | struct p_compressed_bm *p = (void*)h; | |
1101 | unsigned long num_words; | |
1102 | int len; | |
1103 | int ok; | |
1104 | ||
1105 | len = fill_bitmap_rle_bits(mdev, p, c); | |
1106 | ||
1107 | if (len < 0) | |
f70af118 | 1108 | return -EIO; |
b411b363 PR |
1109 | |
1110 | if (len) { | |
1111 | DCBP_set_code(p, RLE_VLI_Bits); | |
e42325a5 | 1112 | ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_COMPRESSED_BITMAP, h, |
b411b363 PR |
1113 | sizeof(*p) + len, 0); |
1114 | ||
1115 | c->packets[0]++; | |
1116 | c->bytes[0] += sizeof(*p) + len; | |
1117 | ||
1118 | if (c->bit_offset >= c->bm_bits) | |
1119 | len = 0; /* DONE */ | |
1120 | } else { | |
1121 | /* was not compressible. | |
1122 | * send a buffer full of plain text bits instead. */ | |
1123 | num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); | |
1124 | len = num_words * sizeof(long); | |
1125 | if (len) | |
1126 | drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload); | |
e42325a5 | 1127 | ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BITMAP, |
0b70a13d | 1128 | h, sizeof(struct p_header80) + len, 0); |
b411b363 PR |
1129 | c->word_offset += num_words; |
1130 | c->bit_offset = c->word_offset * BITS_PER_LONG; | |
1131 | ||
1132 | c->packets[1]++; | |
0b70a13d | 1133 | c->bytes[1] += sizeof(struct p_header80) + len; |
b411b363 PR |
1134 | |
1135 | if (c->bit_offset > c->bm_bits) | |
1136 | c->bit_offset = c->bm_bits; | |
1137 | } | |
f70af118 AG |
1138 | if (ok) { |
1139 | if (len == 0) { | |
1140 | INFO_bm_xfer_stats(mdev, "send", c); | |
1141 | return 0; | |
1142 | } else | |
1143 | return 1; | |
1144 | } | |
1145 | return -EIO; | |
b411b363 PR |
1146 | } |
1147 | ||
1148 | /* See the comment at receive_bitmap() */ | |
1149 | int _drbd_send_bitmap(struct drbd_conf *mdev) | |
1150 | { | |
1151 | struct bm_xfer_ctx c; | |
c012949a | 1152 | struct p_header *p; |
f70af118 | 1153 | int err; |
b411b363 | 1154 | |
841ce241 AG |
1155 | if (!expect(mdev->bitmap)) |
1156 | return false; | |
b411b363 PR |
1157 | |
1158 | /* maybe we should use some per thread scratch page, | |
1159 | * and allocate that during initial device creation? */ | |
c012949a | 1160 | p = (struct p_header *) __get_free_page(GFP_NOIO); |
b411b363 PR |
1161 | if (!p) { |
1162 | dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); | |
81e84650 | 1163 | return false; |
b411b363 PR |
1164 | } |
1165 | ||
1166 | if (get_ldev(mdev)) { | |
1167 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { | |
1168 | dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n"); | |
1169 | drbd_bm_set_all(mdev); | |
1170 | if (drbd_bm_write(mdev)) { | |
1171 | /* write_bm did fail! Leave full sync flag set in Meta P_DATA | |
1172 | * but otherwise process as per normal - need to tell other | |
1173 | * side that a full resync is required! */ | |
1174 | dev_err(DEV, "Failed to write bitmap to disk!\n"); | |
1175 | } else { | |
1176 | drbd_md_clear_flag(mdev, MDF_FULL_SYNC); | |
1177 | drbd_md_sync(mdev); | |
1178 | } | |
1179 | } | |
1180 | put_ldev(mdev); | |
1181 | } | |
1182 | ||
1183 | c = (struct bm_xfer_ctx) { | |
1184 | .bm_bits = drbd_bm_bits(mdev), | |
1185 | .bm_words = drbd_bm_words(mdev), | |
1186 | }; | |
1187 | ||
1188 | do { | |
f70af118 AG |
1189 | err = send_bitmap_rle_or_plain(mdev, p, &c); |
1190 | } while (err > 0); | |
b411b363 PR |
1191 | |
1192 | free_page((unsigned long) p); | |
f70af118 | 1193 | return err == 0; |
b411b363 PR |
1194 | } |
1195 | ||
1196 | int drbd_send_bitmap(struct drbd_conf *mdev) | |
1197 | { | |
1198 | int err; | |
1199 | ||
61120870 | 1200 | if (!drbd_get_data_sock(mdev->tconn)) |
b411b363 PR |
1201 | return -1; |
1202 | err = !_drbd_send_bitmap(mdev); | |
61120870 | 1203 | drbd_put_data_sock(mdev->tconn); |
b411b363 PR |
1204 | return err; |
1205 | } | |
1206 | ||
1207 | int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size) | |
1208 | { | |
1209 | int ok; | |
1210 | struct p_barrier_ack p; | |
1211 | ||
1212 | p.barrier = barrier_nr; | |
1213 | p.set_size = cpu_to_be32(set_size); | |
1214 | ||
1215 | if (mdev->state.conn < C_CONNECTED) | |
81e84650 | 1216 | return false; |
c012949a | 1217 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, &p.head, sizeof(p)); |
b411b363 PR |
1218 | return ok; |
1219 | } | |
1220 | ||
1221 | /** | |
1222 | * _drbd_send_ack() - Sends an ack packet | |
1223 | * @mdev: DRBD device. | |
1224 | * @cmd: Packet command code. | |
1225 | * @sector: sector, needs to be in big endian byte order | |
1226 | * @blksize: size in byte, needs to be in big endian byte order | |
1227 | * @block_id: Id, big endian byte order | |
1228 | */ | |
d8763023 AG |
1229 | static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, |
1230 | u64 sector, u32 blksize, u64 block_id) | |
b411b363 PR |
1231 | { |
1232 | int ok; | |
1233 | struct p_block_ack p; | |
1234 | ||
1235 | p.sector = sector; | |
1236 | p.block_id = block_id; | |
1237 | p.blksize = blksize; | |
1238 | p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); | |
1239 | ||
e42325a5 | 1240 | if (!mdev->tconn->meta.socket || mdev->state.conn < C_CONNECTED) |
81e84650 | 1241 | return false; |
c012949a | 1242 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, &p.head, sizeof(p)); |
b411b363 PR |
1243 | return ok; |
1244 | } | |
1245 | ||
2b2bf214 LE |
1246 | /* dp->sector and dp->block_id already/still in network byte order, |
1247 | * data_size is payload size according to dp->head, | |
1248 | * and may need to be corrected for digest size. */ | |
d8763023 | 1249 | int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd, |
2b2bf214 | 1250 | struct p_data *dp, int data_size) |
b411b363 | 1251 | { |
a0638456 PR |
1252 | data_size -= (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ? |
1253 | crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0; | |
b411b363 PR |
1254 | return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size), |
1255 | dp->block_id); | |
1256 | } | |
1257 | ||
d8763023 | 1258 | int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, |
b411b363 PR |
1259 | struct p_block_req *rp) |
1260 | { | |
1261 | return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id); | |
1262 | } | |
1263 | ||
1264 | /** | |
1265 | * drbd_send_ack() - Sends an ack packet | |
db830c46 AG |
1266 | * @mdev: DRBD device |
1267 | * @cmd: packet command code | |
1268 | * @peer_req: peer request | |
b411b363 | 1269 | */ |
d8763023 | 1270 | int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, |
db830c46 | 1271 | struct drbd_peer_request *peer_req) |
b411b363 PR |
1272 | { |
1273 | return _drbd_send_ack(mdev, cmd, | |
db830c46 AG |
1274 | cpu_to_be64(peer_req->i.sector), |
1275 | cpu_to_be32(peer_req->i.size), | |
1276 | peer_req->block_id); | |
b411b363 PR |
1277 | } |
1278 | ||
1279 | /* This function misuses the block_id field to signal if the blocks | |
1280 | * are is sync or not. */ | |
d8763023 | 1281 | int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd, |
b411b363 PR |
1282 | sector_t sector, int blksize, u64 block_id) |
1283 | { | |
1284 | return _drbd_send_ack(mdev, cmd, | |
1285 | cpu_to_be64(sector), | |
1286 | cpu_to_be32(blksize), | |
1287 | cpu_to_be64(block_id)); | |
1288 | } | |
1289 | ||
1290 | int drbd_send_drequest(struct drbd_conf *mdev, int cmd, | |
1291 | sector_t sector, int size, u64 block_id) | |
1292 | { | |
1293 | int ok; | |
1294 | struct p_block_req p; | |
1295 | ||
1296 | p.sector = cpu_to_be64(sector); | |
1297 | p.block_id = block_id; | |
1298 | p.blksize = cpu_to_be32(size); | |
1299 | ||
c012949a | 1300 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &p.head, sizeof(p)); |
b411b363 PR |
1301 | return ok; |
1302 | } | |
1303 | ||
d8763023 AG |
1304 | int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size, |
1305 | void *digest, int digest_size, enum drbd_packet cmd) | |
b411b363 PR |
1306 | { |
1307 | int ok; | |
1308 | struct p_block_req p; | |
1309 | ||
fd340c12 | 1310 | prepare_header(mdev, &p.head, cmd, sizeof(p) - sizeof(struct p_header) + digest_size); |
b411b363 | 1311 | p.sector = cpu_to_be64(sector); |
9a8e7753 | 1312 | p.block_id = ID_SYNCER /* unused */; |
b411b363 PR |
1313 | p.blksize = cpu_to_be32(size); |
1314 | ||
e42325a5 | 1315 | mutex_lock(&mdev->tconn->data.mutex); |
b411b363 | 1316 | |
bedbd2a5 PR |
1317 | ok = (sizeof(p) == drbd_send(mdev->tconn, mdev->tconn->data.socket, &p, sizeof(p), 0)); |
1318 | ok = ok && (digest_size == drbd_send(mdev->tconn, mdev->tconn->data.socket, digest, digest_size, 0)); | |
b411b363 | 1319 | |
e42325a5 | 1320 | mutex_unlock(&mdev->tconn->data.mutex); |
b411b363 PR |
1321 | |
1322 | return ok; | |
1323 | } | |
1324 | ||
1325 | int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) | |
1326 | { | |
1327 | int ok; | |
1328 | struct p_block_req p; | |
1329 | ||
1330 | p.sector = cpu_to_be64(sector); | |
9a8e7753 | 1331 | p.block_id = ID_SYNCER /* unused */; |
b411b363 PR |
1332 | p.blksize = cpu_to_be32(size); |
1333 | ||
c012949a | 1334 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST, &p.head, sizeof(p)); |
b411b363 PR |
1335 | return ok; |
1336 | } | |
1337 | ||
1338 | /* called on sndtimeo | |
81e84650 AG |
1339 | * returns false if we should retry, |
1340 | * true if we think connection is dead | |
b411b363 | 1341 | */ |
1a7ba646 | 1342 | static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock) |
b411b363 PR |
1343 | { |
1344 | int drop_it; | |
1345 | /* long elapsed = (long)(jiffies - mdev->last_received); */ | |
1346 | ||
1a7ba646 PR |
1347 | drop_it = tconn->meta.socket == sock |
1348 | || !tconn->asender.task | |
1349 | || get_t_state(&tconn->asender) != RUNNING | |
1350 | || tconn->volume0->state.conn < C_CONNECTED; | |
b411b363 PR |
1351 | |
1352 | if (drop_it) | |
81e84650 | 1353 | return true; |
b411b363 | 1354 | |
1a7ba646 | 1355 | drop_it = !--tconn->ko_count; |
b411b363 | 1356 | if (!drop_it) { |
1a7ba646 PR |
1357 | conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n", |
1358 | current->comm, current->pid, tconn->ko_count); | |
1359 | request_ping(tconn); | |
b411b363 PR |
1360 | } |
1361 | ||
1362 | return drop_it; /* && (mdev->state == R_PRIMARY) */; | |
1363 | } | |
1364 | ||
1a7ba646 | 1365 | static void drbd_update_congested(struct drbd_tconn *tconn) |
9e204cdd | 1366 | { |
1a7ba646 | 1367 | struct sock *sk = tconn->data.socket->sk; |
9e204cdd | 1368 | if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) |
1a7ba646 | 1369 | set_bit(NET_CONGESTED, &tconn->flags); |
9e204cdd AG |
1370 | } |
1371 | ||
b411b363 PR |
1372 | /* The idea of sendpage seems to be to put some kind of reference |
1373 | * to the page into the skb, and to hand it over to the NIC. In | |
1374 | * this process get_page() gets called. | |
1375 | * | |
1376 | * As soon as the page was really sent over the network put_page() | |
1377 | * gets called by some part of the network layer. [ NIC driver? ] | |
1378 | * | |
1379 | * [ get_page() / put_page() increment/decrement the count. If count | |
1380 | * reaches 0 the page will be freed. ] | |
1381 | * | |
1382 | * This works nicely with pages from FSs. | |
1383 | * But this means that in protocol A we might signal IO completion too early! | |
1384 | * | |
1385 | * In order not to corrupt data during a resync we must make sure | |
1386 | * that we do not reuse our own buffer pages (EEs) to early, therefore | |
1387 | * we have the net_ee list. | |
1388 | * | |
1389 | * XFS seems to have problems, still, it submits pages with page_count == 0! | |
1390 | * As a workaround, we disable sendpage on pages | |
1391 | * with page_count == 0 or PageSlab. | |
1392 | */ | |
1393 | static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, | |
ba11ad9a | 1394 | int offset, size_t size, unsigned msg_flags) |
b411b363 | 1395 | { |
bedbd2a5 | 1396 | int sent = drbd_send(mdev->tconn, mdev->tconn->data.socket, kmap(page) + offset, size, msg_flags); |
b411b363 PR |
1397 | kunmap(page); |
1398 | if (sent == size) | |
1399 | mdev->send_cnt += size>>9; | |
1400 | return sent == size; | |
1401 | } | |
1402 | ||
1403 | static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, | |
ba11ad9a | 1404 | int offset, size_t size, unsigned msg_flags) |
b411b363 PR |
1405 | { |
1406 | mm_segment_t oldfs = get_fs(); | |
1407 | int sent, ok; | |
1408 | int len = size; | |
1409 | ||
1410 | /* e.g. XFS meta- & log-data is in slab pages, which have a | |
1411 | * page_count of 0 and/or have PageSlab() set. | |
1412 | * we cannot use send_page for those, as that does get_page(); | |
1413 | * put_page(); and would cause either a VM_BUG directly, or | |
1414 | * __page_cache_release a page that would actually still be referenced | |
1415 | * by someone, leading to some obscure delayed Oops somewhere else. */ | |
1416 | if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) | |
ba11ad9a | 1417 | return _drbd_no_send_page(mdev, page, offset, size, msg_flags); |
b411b363 | 1418 | |
ba11ad9a | 1419 | msg_flags |= MSG_NOSIGNAL; |
1a7ba646 | 1420 | drbd_update_congested(mdev->tconn); |
b411b363 PR |
1421 | set_fs(KERNEL_DS); |
1422 | do { | |
e42325a5 | 1423 | sent = mdev->tconn->data.socket->ops->sendpage(mdev->tconn->data.socket, page, |
b411b363 | 1424 | offset, len, |
ba11ad9a | 1425 | msg_flags); |
b411b363 | 1426 | if (sent == -EAGAIN) { |
1a7ba646 | 1427 | if (we_should_drop_the_connection(mdev->tconn, |
e42325a5 | 1428 | mdev->tconn->data.socket)) |
b411b363 PR |
1429 | break; |
1430 | else | |
1431 | continue; | |
1432 | } | |
1433 | if (sent <= 0) { | |
1434 | dev_warn(DEV, "%s: size=%d len=%d sent=%d\n", | |
1435 | __func__, (int)size, len, sent); | |
1436 | break; | |
1437 | } | |
1438 | len -= sent; | |
1439 | offset += sent; | |
1440 | } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/); | |
1441 | set_fs(oldfs); | |
01a311a5 | 1442 | clear_bit(NET_CONGESTED, &mdev->tconn->flags); |
b411b363 PR |
1443 | |
1444 | ok = (len == 0); | |
1445 | if (likely(ok)) | |
1446 | mdev->send_cnt += size>>9; | |
1447 | return ok; | |
1448 | } | |
1449 | ||
1450 | static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) | |
1451 | { | |
1452 | struct bio_vec *bvec; | |
1453 | int i; | |
ba11ad9a | 1454 | /* hint all but last page with MSG_MORE */ |
b411b363 PR |
1455 | __bio_for_each_segment(bvec, bio, i, 0) { |
1456 | if (!_drbd_no_send_page(mdev, bvec->bv_page, | |
ba11ad9a LE |
1457 | bvec->bv_offset, bvec->bv_len, |
1458 | i == bio->bi_vcnt -1 ? 0 : MSG_MORE)) | |
b411b363 PR |
1459 | return 0; |
1460 | } | |
1461 | return 1; | |
1462 | } | |
1463 | ||
1464 | static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) | |
1465 | { | |
1466 | struct bio_vec *bvec; | |
1467 | int i; | |
ba11ad9a | 1468 | /* hint all but last page with MSG_MORE */ |
b411b363 PR |
1469 | __bio_for_each_segment(bvec, bio, i, 0) { |
1470 | if (!_drbd_send_page(mdev, bvec->bv_page, | |
ba11ad9a LE |
1471 | bvec->bv_offset, bvec->bv_len, |
1472 | i == bio->bi_vcnt -1 ? 0 : MSG_MORE)) | |
b411b363 PR |
1473 | return 0; |
1474 | } | |
b411b363 PR |
1475 | return 1; |
1476 | } | |
1477 | ||
db830c46 AG |
1478 | static int _drbd_send_zc_ee(struct drbd_conf *mdev, |
1479 | struct drbd_peer_request *peer_req) | |
45bb912b | 1480 | { |
db830c46 AG |
1481 | struct page *page = peer_req->pages; |
1482 | unsigned len = peer_req->i.size; | |
1483 | ||
ba11ad9a | 1484 | /* hint all but last page with MSG_MORE */ |
45bb912b LE |
1485 | page_chain_for_each(page) { |
1486 | unsigned l = min_t(unsigned, len, PAGE_SIZE); | |
ba11ad9a LE |
1487 | if (!_drbd_send_page(mdev, page, 0, l, |
1488 | page_chain_next(page) ? MSG_MORE : 0)) | |
45bb912b LE |
1489 | return 0; |
1490 | len -= l; | |
1491 | } | |
1492 | return 1; | |
1493 | } | |
1494 | ||
76d2e7ec PR |
1495 | static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) |
1496 | { | |
31890f4a | 1497 | if (mdev->tconn->agreed_pro_version >= 95) |
76d2e7ec | 1498 | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | |
76d2e7ec PR |
1499 | (bi_rw & REQ_FUA ? DP_FUA : 0) | |
1500 | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | | |
1501 | (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); | |
1502 | else | |
721a9602 | 1503 | return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; |
76d2e7ec PR |
1504 | } |
1505 | ||
b411b363 PR |
1506 | /* Used to send write requests |
1507 | * R_PRIMARY -> Peer (P_DATA) | |
1508 | */ | |
1509 | int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) | |
1510 | { | |
1511 | int ok = 1; | |
1512 | struct p_data p; | |
1513 | unsigned int dp_flags = 0; | |
1514 | void *dgb; | |
1515 | int dgs; | |
1516 | ||
61120870 | 1517 | if (!drbd_get_data_sock(mdev->tconn)) |
b411b363 PR |
1518 | return 0; |
1519 | ||
a0638456 PR |
1520 | dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ? |
1521 | crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0; | |
b411b363 | 1522 | |
fd340c12 | 1523 | prepare_header(mdev, &p.head, P_DATA, sizeof(p) - sizeof(struct p_header) + dgs + req->i.size); |
ace652ac | 1524 | p.sector = cpu_to_be64(req->i.sector); |
b411b363 | 1525 | p.block_id = (unsigned long)req; |
fd340c12 | 1526 | p.seq_num = cpu_to_be32(req->seq_num = atomic_add_return(1, &mdev->packet_seq)); |
b411b363 | 1527 | |
76d2e7ec PR |
1528 | dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw); |
1529 | ||
b411b363 PR |
1530 | if (mdev->state.conn >= C_SYNC_SOURCE && |
1531 | mdev->state.conn <= C_PAUSED_SYNC_T) | |
1532 | dp_flags |= DP_MAY_SET_IN_SYNC; | |
1533 | ||
1534 | p.dp_flags = cpu_to_be32(dp_flags); | |
b411b363 PR |
1535 | set_bit(UNPLUG_REMOTE, &mdev->flags); |
1536 | ok = (sizeof(p) == | |
bedbd2a5 | 1537 | drbd_send(mdev->tconn, mdev->tconn->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0)); |
b411b363 | 1538 | if (ok && dgs) { |
a0638456 PR |
1539 | dgb = mdev->tconn->int_dig_out; |
1540 | drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, dgb); | |
bedbd2a5 | 1541 | ok = dgs == drbd_send(mdev->tconn, mdev->tconn->data.socket, dgb, dgs, 0); |
b411b363 PR |
1542 | } |
1543 | if (ok) { | |
470be44a LE |
1544 | /* For protocol A, we have to memcpy the payload into |
1545 | * socket buffers, as we may complete right away | |
1546 | * as soon as we handed it over to tcp, at which point the data | |
1547 | * pages may become invalid. | |
1548 | * | |
1549 | * For data-integrity enabled, we copy it as well, so we can be | |
1550 | * sure that even if the bio pages may still be modified, it | |
1551 | * won't change the data on the wire, thus if the digest checks | |
1552 | * out ok after sending on this side, but does not fit on the | |
1553 | * receiving side, we sure have detected corruption elsewhere. | |
1554 | */ | |
89e58e75 | 1555 | if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || dgs) |
b411b363 PR |
1556 | ok = _drbd_send_bio(mdev, req->master_bio); |
1557 | else | |
1558 | ok = _drbd_send_zc_bio(mdev, req->master_bio); | |
470be44a LE |
1559 | |
1560 | /* double check digest, sometimes buffers have been modified in flight. */ | |
1561 | if (dgs > 0 && dgs <= 64) { | |
24c4830c | 1562 | /* 64 byte, 512 bit, is the largest digest size |
470be44a LE |
1563 | * currently supported in kernel crypto. */ |
1564 | unsigned char digest[64]; | |
a0638456 PR |
1565 | drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, digest); |
1566 | if (memcmp(mdev->tconn->int_dig_out, digest, dgs)) { | |
470be44a LE |
1567 | dev_warn(DEV, |
1568 | "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", | |
ace652ac | 1569 | (unsigned long long)req->i.sector, req->i.size); |
470be44a LE |
1570 | } |
1571 | } /* else if (dgs > 64) { | |
1572 | ... Be noisy about digest too large ... | |
1573 | } */ | |
b411b363 PR |
1574 | } |
1575 | ||
61120870 | 1576 | drbd_put_data_sock(mdev->tconn); |
bd26bfc5 | 1577 | |
b411b363 PR |
1578 | return ok; |
1579 | } | |
1580 | ||
1581 | /* answer packet, used to send data back for read requests: | |
1582 | * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY) | |
1583 | * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY) | |
1584 | */ | |
d8763023 | 1585 | int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, |
db830c46 | 1586 | struct drbd_peer_request *peer_req) |
b411b363 PR |
1587 | { |
1588 | int ok; | |
1589 | struct p_data p; | |
1590 | void *dgb; | |
1591 | int dgs; | |
1592 | ||
a0638456 PR |
1593 | dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ? |
1594 | crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0; | |
b411b363 | 1595 | |
db830c46 AG |
1596 | prepare_header(mdev, &p.head, cmd, sizeof(p) - |
1597 | sizeof(struct p_header80) + | |
1598 | dgs + peer_req->i.size); | |
1599 | p.sector = cpu_to_be64(peer_req->i.sector); | |
1600 | p.block_id = peer_req->block_id; | |
cc378270 | 1601 | p.seq_num = 0; /* unused */ |
b411b363 PR |
1602 | |
1603 | /* Only called by our kernel thread. | |
1604 | * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL | |
1605 | * in response to admin command or module unload. | |
1606 | */ | |
61120870 | 1607 | if (!drbd_get_data_sock(mdev->tconn)) |
b411b363 PR |
1608 | return 0; |
1609 | ||
bedbd2a5 | 1610 | ok = sizeof(p) == drbd_send(mdev->tconn, mdev->tconn->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0); |
b411b363 | 1611 | if (ok && dgs) { |
a0638456 | 1612 | dgb = mdev->tconn->int_dig_out; |
db830c46 | 1613 | drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, peer_req, dgb); |
bedbd2a5 | 1614 | ok = dgs == drbd_send(mdev->tconn, mdev->tconn->data.socket, dgb, dgs, 0); |
b411b363 PR |
1615 | } |
1616 | if (ok) | |
db830c46 | 1617 | ok = _drbd_send_zc_ee(mdev, peer_req); |
b411b363 | 1618 | |
61120870 | 1619 | drbd_put_data_sock(mdev->tconn); |
bd26bfc5 | 1620 | |
b411b363 PR |
1621 | return ok; |
1622 | } | |
1623 | ||
73a01a18 PR |
1624 | int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req) |
1625 | { | |
1626 | struct p_block_desc p; | |
1627 | ||
ace652ac AG |
1628 | p.sector = cpu_to_be64(req->i.sector); |
1629 | p.blksize = cpu_to_be32(req->i.size); | |
73a01a18 PR |
1630 | |
1631 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p)); | |
1632 | } | |
1633 | ||
b411b363 PR |
1634 | /* |
1635 | drbd_send distinguishes two cases: | |
1636 | ||
1637 | Packets sent via the data socket "sock" | |
1638 | and packets sent via the meta data socket "msock" | |
1639 | ||
1640 | sock msock | |
1641 | -----------------+-------------------------+------------------------------ | |
1642 | timeout conf.timeout / 2 conf.timeout / 2 | |
1643 | timeout action send a ping via msock Abort communication | |
1644 | and close all sockets | |
1645 | */ | |
1646 | ||
1647 | /* | |
1648 | * you must have down()ed the appropriate [m]sock_mutex elsewhere! | |
1649 | */ | |
bedbd2a5 | 1650 | int drbd_send(struct drbd_tconn *tconn, struct socket *sock, |
b411b363 PR |
1651 | void *buf, size_t size, unsigned msg_flags) |
1652 | { | |
1653 | struct kvec iov; | |
1654 | struct msghdr msg; | |
1655 | int rv, sent = 0; | |
1656 | ||
1657 | if (!sock) | |
1658 | return -1000; | |
1659 | ||
1660 | /* THINK if (signal_pending) return ... ? */ | |
1661 | ||
1662 | iov.iov_base = buf; | |
1663 | iov.iov_len = size; | |
1664 | ||
1665 | msg.msg_name = NULL; | |
1666 | msg.msg_namelen = 0; | |
1667 | msg.msg_control = NULL; | |
1668 | msg.msg_controllen = 0; | |
1669 | msg.msg_flags = msg_flags | MSG_NOSIGNAL; | |
1670 | ||
bedbd2a5 PR |
1671 | if (sock == tconn->data.socket) { |
1672 | tconn->ko_count = tconn->net_conf->ko_count; | |
1673 | drbd_update_congested(tconn); | |
b411b363 PR |
1674 | } |
1675 | do { | |
1676 | /* STRANGE | |
1677 | * tcp_sendmsg does _not_ use its size parameter at all ? | |
1678 | * | |
1679 | * -EAGAIN on timeout, -EINTR on signal. | |
1680 | */ | |
1681 | /* THINK | |
1682 | * do we need to block DRBD_SIG if sock == &meta.socket ?? | |
1683 | * otherwise wake_asender() might interrupt some send_*Ack ! | |
1684 | */ | |
1685 | rv = kernel_sendmsg(sock, &msg, &iov, 1, size); | |
1686 | if (rv == -EAGAIN) { | |
bedbd2a5 | 1687 | if (we_should_drop_the_connection(tconn, sock)) |
b411b363 PR |
1688 | break; |
1689 | else | |
1690 | continue; | |
1691 | } | |
b411b363 PR |
1692 | if (rv == -EINTR) { |
1693 | flush_signals(current); | |
1694 | rv = 0; | |
1695 | } | |
1696 | if (rv < 0) | |
1697 | break; | |
1698 | sent += rv; | |
1699 | iov.iov_base += rv; | |
1700 | iov.iov_len -= rv; | |
1701 | } while (sent < size); | |
1702 | ||
bedbd2a5 PR |
1703 | if (sock == tconn->data.socket) |
1704 | clear_bit(NET_CONGESTED, &tconn->flags); | |
b411b363 PR |
1705 | |
1706 | if (rv <= 0) { | |
1707 | if (rv != -EAGAIN) { | |
bedbd2a5 PR |
1708 | conn_err(tconn, "%s_sendmsg returned %d\n", |
1709 | sock == tconn->meta.socket ? "msock" : "sock", | |
1710 | rv); | |
1711 | drbd_force_state(tconn->volume0, NS(conn, C_BROKEN_PIPE)); | |
b411b363 | 1712 | } else |
bedbd2a5 | 1713 | drbd_force_state(tconn->volume0, NS(conn, C_TIMEOUT)); |
b411b363 PR |
1714 | } |
1715 | ||
1716 | return sent; | |
1717 | } | |
1718 | ||
1719 | static int drbd_open(struct block_device *bdev, fmode_t mode) | |
1720 | { | |
1721 | struct drbd_conf *mdev = bdev->bd_disk->private_data; | |
1722 | unsigned long flags; | |
1723 | int rv = 0; | |
1724 | ||
2a48fc0a | 1725 | mutex_lock(&drbd_main_mutex); |
87eeee41 | 1726 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
b411b363 PR |
1727 | /* to have a stable mdev->state.role |
1728 | * and no race with updating open_cnt */ | |
1729 | ||
1730 | if (mdev->state.role != R_PRIMARY) { | |
1731 | if (mode & FMODE_WRITE) | |
1732 | rv = -EROFS; | |
1733 | else if (!allow_oos) | |
1734 | rv = -EMEDIUMTYPE; | |
1735 | } | |
1736 | ||
1737 | if (!rv) | |
1738 | mdev->open_cnt++; | |
87eeee41 | 1739 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
2a48fc0a | 1740 | mutex_unlock(&drbd_main_mutex); |
b411b363 PR |
1741 | |
1742 | return rv; | |
1743 | } | |
1744 | ||
1745 | static int drbd_release(struct gendisk *gd, fmode_t mode) | |
1746 | { | |
1747 | struct drbd_conf *mdev = gd->private_data; | |
2a48fc0a | 1748 | mutex_lock(&drbd_main_mutex); |
b411b363 | 1749 | mdev->open_cnt--; |
2a48fc0a | 1750 | mutex_unlock(&drbd_main_mutex); |
b411b363 PR |
1751 | return 0; |
1752 | } | |
1753 | ||
b411b363 PR |
1754 | static void drbd_set_defaults(struct drbd_conf *mdev) |
1755 | { | |
85f4cc17 PR |
1756 | /* This way we get a compile error when sync_conf grows, |
1757 | and we forgot to initialize it here */ | |
1758 | mdev->sync_conf = (struct syncer_conf) { | |
1759 | /* .rate = */ DRBD_RATE_DEF, | |
1760 | /* .after = */ DRBD_AFTER_DEF, | |
1761 | /* .al_extents = */ DRBD_AL_EXTENTS_DEF, | |
85f4cc17 PR |
1762 | /* .verify_alg = */ {}, 0, |
1763 | /* .cpu_mask = */ {}, 0, | |
1764 | /* .csums_alg = */ {}, 0, | |
e756414f | 1765 | /* .use_rle = */ 0, |
9a31d716 PR |
1766 | /* .on_no_data = */ DRBD_ON_NO_DATA_DEF, |
1767 | /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF, | |
1768 | /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF, | |
1769 | /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF, | |
0f0601f4 LE |
1770 | /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF, |
1771 | /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF | |
85f4cc17 PR |
1772 | }; |
1773 | ||
1774 | /* Have to use that way, because the layout differs between | |
1775 | big endian and little endian */ | |
b411b363 PR |
1776 | mdev->state = (union drbd_state) { |
1777 | { .role = R_SECONDARY, | |
1778 | .peer = R_UNKNOWN, | |
1779 | .conn = C_STANDALONE, | |
1780 | .disk = D_DISKLESS, | |
1781 | .pdsk = D_UNKNOWN, | |
fb22c402 PR |
1782 | .susp = 0, |
1783 | .susp_nod = 0, | |
1784 | .susp_fen = 0 | |
b411b363 PR |
1785 | } }; |
1786 | } | |
1787 | ||
1788 | void drbd_init_set_defaults(struct drbd_conf *mdev) | |
1789 | { | |
1790 | /* the memset(,0,) did most of this. | |
1791 | * note: only assignments, no allocation in here */ | |
1792 | ||
1793 | drbd_set_defaults(mdev); | |
1794 | ||
b411b363 PR |
1795 | atomic_set(&mdev->ap_bio_cnt, 0); |
1796 | atomic_set(&mdev->ap_pending_cnt, 0); | |
1797 | atomic_set(&mdev->rs_pending_cnt, 0); | |
1798 | atomic_set(&mdev->unacked_cnt, 0); | |
1799 | atomic_set(&mdev->local_cnt, 0); | |
b411b363 | 1800 | atomic_set(&mdev->pp_in_use, 0); |
435f0740 | 1801 | atomic_set(&mdev->pp_in_use_by_net, 0); |
778f271d | 1802 | atomic_set(&mdev->rs_sect_in, 0); |
0f0601f4 | 1803 | atomic_set(&mdev->rs_sect_ev, 0); |
759fbdfb | 1804 | atomic_set(&mdev->ap_in_flight, 0); |
b411b363 PR |
1805 | |
1806 | mutex_init(&mdev->md_io_mutex); | |
e42325a5 PR |
1807 | mutex_init(&mdev->tconn->data.mutex); |
1808 | mutex_init(&mdev->tconn->meta.mutex); | |
1809 | sema_init(&mdev->tconn->data.work.s, 0); | |
1810 | sema_init(&mdev->tconn->meta.work.s, 0); | |
b411b363 PR |
1811 | mutex_init(&mdev->state_mutex); |
1812 | ||
e42325a5 PR |
1813 | spin_lock_init(&mdev->tconn->data.work.q_lock); |
1814 | spin_lock_init(&mdev->tconn->meta.work.q_lock); | |
b411b363 PR |
1815 | |
1816 | spin_lock_init(&mdev->al_lock); | |
87eeee41 | 1817 | spin_lock_init(&mdev->tconn->req_lock); |
b411b363 PR |
1818 | spin_lock_init(&mdev->peer_seq_lock); |
1819 | spin_lock_init(&mdev->epoch_lock); | |
1820 | ||
1821 | INIT_LIST_HEAD(&mdev->active_ee); | |
1822 | INIT_LIST_HEAD(&mdev->sync_ee); | |
1823 | INIT_LIST_HEAD(&mdev->done_ee); | |
1824 | INIT_LIST_HEAD(&mdev->read_ee); | |
1825 | INIT_LIST_HEAD(&mdev->net_ee); | |
1826 | INIT_LIST_HEAD(&mdev->resync_reads); | |
e42325a5 PR |
1827 | INIT_LIST_HEAD(&mdev->tconn->data.work.q); |
1828 | INIT_LIST_HEAD(&mdev->tconn->meta.work.q); | |
b411b363 PR |
1829 | INIT_LIST_HEAD(&mdev->resync_work.list); |
1830 | INIT_LIST_HEAD(&mdev->unplug_work.list); | |
e9e6f3ec | 1831 | INIT_LIST_HEAD(&mdev->go_diskless.list); |
b411b363 | 1832 | INIT_LIST_HEAD(&mdev->md_sync_work.list); |
c4752ef1 | 1833 | INIT_LIST_HEAD(&mdev->start_resync_work.list); |
b411b363 | 1834 | INIT_LIST_HEAD(&mdev->bm_io_work.w.list); |
0ced55a3 | 1835 | |
794abb75 | 1836 | mdev->resync_work.cb = w_resync_timer; |
b411b363 | 1837 | mdev->unplug_work.cb = w_send_write_hint; |
e9e6f3ec | 1838 | mdev->go_diskless.cb = w_go_diskless; |
b411b363 PR |
1839 | mdev->md_sync_work.cb = w_md_sync; |
1840 | mdev->bm_io_work.w.cb = w_bitmap_io; | |
370a43e7 | 1841 | mdev->start_resync_work.cb = w_start_resync; |
a21e9298 PR |
1842 | |
1843 | mdev->resync_work.mdev = mdev; | |
1844 | mdev->unplug_work.mdev = mdev; | |
1845 | mdev->go_diskless.mdev = mdev; | |
1846 | mdev->md_sync_work.mdev = mdev; | |
1847 | mdev->bm_io_work.w.mdev = mdev; | |
1848 | mdev->start_resync_work.mdev = mdev; | |
1849 | ||
b411b363 PR |
1850 | init_timer(&mdev->resync_timer); |
1851 | init_timer(&mdev->md_sync_timer); | |
370a43e7 | 1852 | init_timer(&mdev->start_resync_timer); |
7fde2be9 | 1853 | init_timer(&mdev->request_timer); |
b411b363 PR |
1854 | mdev->resync_timer.function = resync_timer_fn; |
1855 | mdev->resync_timer.data = (unsigned long) mdev; | |
1856 | mdev->md_sync_timer.function = md_sync_timer_fn; | |
1857 | mdev->md_sync_timer.data = (unsigned long) mdev; | |
370a43e7 PR |
1858 | mdev->start_resync_timer.function = start_resync_timer_fn; |
1859 | mdev->start_resync_timer.data = (unsigned long) mdev; | |
7fde2be9 PR |
1860 | mdev->request_timer.function = request_timer_fn; |
1861 | mdev->request_timer.data = (unsigned long) mdev; | |
b411b363 PR |
1862 | |
1863 | init_waitqueue_head(&mdev->misc_wait); | |
1864 | init_waitqueue_head(&mdev->state_wait); | |
1865 | init_waitqueue_head(&mdev->ee_wait); | |
1866 | init_waitqueue_head(&mdev->al_wait); | |
1867 | init_waitqueue_head(&mdev->seq_wait); | |
1868 | ||
bed879ae PR |
1869 | drbd_thread_init(mdev, &mdev->tconn->receiver, drbdd_init, "receiver"); |
1870 | drbd_thread_init(mdev, &mdev->tconn->worker, drbd_worker, "worker"); | |
1871 | drbd_thread_init(mdev, &mdev->tconn->asender, drbd_asender, "asender"); | |
b411b363 | 1872 | |
fd340c12 | 1873 | /* mdev->tconn->agreed_pro_version gets initialized in drbd_connect() */ |
2451fc3b | 1874 | mdev->write_ordering = WO_bdev_flush; |
b411b363 | 1875 | mdev->resync_wenr = LC_FREE; |
99432fcc PR |
1876 | mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; |
1877 | mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; | |
b411b363 PR |
1878 | } |
1879 | ||
1880 | void drbd_mdev_cleanup(struct drbd_conf *mdev) | |
1881 | { | |
1d7734a0 | 1882 | int i; |
e6b3ea83 | 1883 | if (mdev->tconn->receiver.t_state != NONE) |
b411b363 | 1884 | dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", |
e6b3ea83 | 1885 | mdev->tconn->receiver.t_state); |
b411b363 PR |
1886 | |
1887 | /* no need to lock it, I'm the only thread alive */ | |
1888 | if (atomic_read(&mdev->current_epoch->epoch_size) != 0) | |
1889 | dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size)); | |
1890 | mdev->al_writ_cnt = | |
1891 | mdev->bm_writ_cnt = | |
1892 | mdev->read_cnt = | |
1893 | mdev->recv_cnt = | |
1894 | mdev->send_cnt = | |
1895 | mdev->writ_cnt = | |
1896 | mdev->p_size = | |
1897 | mdev->rs_start = | |
1898 | mdev->rs_total = | |
1d7734a0 LE |
1899 | mdev->rs_failed = 0; |
1900 | mdev->rs_last_events = 0; | |
0f0601f4 | 1901 | mdev->rs_last_sect_ev = 0; |
1d7734a0 LE |
1902 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { |
1903 | mdev->rs_mark_left[i] = 0; | |
1904 | mdev->rs_mark_time[i] = 0; | |
1905 | } | |
89e58e75 | 1906 | D_ASSERT(mdev->tconn->net_conf == NULL); |
b411b363 PR |
1907 | |
1908 | drbd_set_my_capacity(mdev, 0); | |
1909 | if (mdev->bitmap) { | |
1910 | /* maybe never allocated. */ | |
02d9a94b | 1911 | drbd_bm_resize(mdev, 0, 1); |
b411b363 PR |
1912 | drbd_bm_cleanup(mdev); |
1913 | } | |
1914 | ||
1915 | drbd_free_resources(mdev); | |
0778286a | 1916 | clear_bit(AL_SUSPENDED, &mdev->flags); |
b411b363 PR |
1917 | |
1918 | /* | |
1919 | * currently we drbd_init_ee only on module load, so | |
1920 | * we may do drbd_release_ee only on module unload! | |
1921 | */ | |
1922 | D_ASSERT(list_empty(&mdev->active_ee)); | |
1923 | D_ASSERT(list_empty(&mdev->sync_ee)); | |
1924 | D_ASSERT(list_empty(&mdev->done_ee)); | |
1925 | D_ASSERT(list_empty(&mdev->read_ee)); | |
1926 | D_ASSERT(list_empty(&mdev->net_ee)); | |
1927 | D_ASSERT(list_empty(&mdev->resync_reads)); | |
e42325a5 PR |
1928 | D_ASSERT(list_empty(&mdev->tconn->data.work.q)); |
1929 | D_ASSERT(list_empty(&mdev->tconn->meta.work.q)); | |
b411b363 PR |
1930 | D_ASSERT(list_empty(&mdev->resync_work.list)); |
1931 | D_ASSERT(list_empty(&mdev->unplug_work.list)); | |
e9e6f3ec | 1932 | D_ASSERT(list_empty(&mdev->go_diskless.list)); |
2265b473 LE |
1933 | |
1934 | drbd_set_defaults(mdev); | |
b411b363 PR |
1935 | } |
1936 | ||
1937 | ||
1938 | static void drbd_destroy_mempools(void) | |
1939 | { | |
1940 | struct page *page; | |
1941 | ||
1942 | while (drbd_pp_pool) { | |
1943 | page = drbd_pp_pool; | |
1944 | drbd_pp_pool = (struct page *)page_private(page); | |
1945 | __free_page(page); | |
1946 | drbd_pp_vacant--; | |
1947 | } | |
1948 | ||
1949 | /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */ | |
1950 | ||
1951 | if (drbd_ee_mempool) | |
1952 | mempool_destroy(drbd_ee_mempool); | |
1953 | if (drbd_request_mempool) | |
1954 | mempool_destroy(drbd_request_mempool); | |
1955 | if (drbd_ee_cache) | |
1956 | kmem_cache_destroy(drbd_ee_cache); | |
1957 | if (drbd_request_cache) | |
1958 | kmem_cache_destroy(drbd_request_cache); | |
1959 | if (drbd_bm_ext_cache) | |
1960 | kmem_cache_destroy(drbd_bm_ext_cache); | |
1961 | if (drbd_al_ext_cache) | |
1962 | kmem_cache_destroy(drbd_al_ext_cache); | |
1963 | ||
1964 | drbd_ee_mempool = NULL; | |
1965 | drbd_request_mempool = NULL; | |
1966 | drbd_ee_cache = NULL; | |
1967 | drbd_request_cache = NULL; | |
1968 | drbd_bm_ext_cache = NULL; | |
1969 | drbd_al_ext_cache = NULL; | |
1970 | ||
1971 | return; | |
1972 | } | |
1973 | ||
1974 | static int drbd_create_mempools(void) | |
1975 | { | |
1976 | struct page *page; | |
1816a2b4 | 1977 | const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; |
b411b363 PR |
1978 | int i; |
1979 | ||
1980 | /* prepare our caches and mempools */ | |
1981 | drbd_request_mempool = NULL; | |
1982 | drbd_ee_cache = NULL; | |
1983 | drbd_request_cache = NULL; | |
1984 | drbd_bm_ext_cache = NULL; | |
1985 | drbd_al_ext_cache = NULL; | |
1986 | drbd_pp_pool = NULL; | |
1987 | ||
1988 | /* caches */ | |
1989 | drbd_request_cache = kmem_cache_create( | |
1990 | "drbd_req", sizeof(struct drbd_request), 0, 0, NULL); | |
1991 | if (drbd_request_cache == NULL) | |
1992 | goto Enomem; | |
1993 | ||
1994 | drbd_ee_cache = kmem_cache_create( | |
f6ffca9f | 1995 | "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL); |
b411b363 PR |
1996 | if (drbd_ee_cache == NULL) |
1997 | goto Enomem; | |
1998 | ||
1999 | drbd_bm_ext_cache = kmem_cache_create( | |
2000 | "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL); | |
2001 | if (drbd_bm_ext_cache == NULL) | |
2002 | goto Enomem; | |
2003 | ||
2004 | drbd_al_ext_cache = kmem_cache_create( | |
2005 | "drbd_al", sizeof(struct lc_element), 0, 0, NULL); | |
2006 | if (drbd_al_ext_cache == NULL) | |
2007 | goto Enomem; | |
2008 | ||
2009 | /* mempools */ | |
2010 | drbd_request_mempool = mempool_create(number, | |
2011 | mempool_alloc_slab, mempool_free_slab, drbd_request_cache); | |
2012 | if (drbd_request_mempool == NULL) | |
2013 | goto Enomem; | |
2014 | ||
2015 | drbd_ee_mempool = mempool_create(number, | |
2016 | mempool_alloc_slab, mempool_free_slab, drbd_ee_cache); | |
2027ae1f | 2017 | if (drbd_ee_mempool == NULL) |
b411b363 PR |
2018 | goto Enomem; |
2019 | ||
2020 | /* drbd's page pool */ | |
2021 | spin_lock_init(&drbd_pp_lock); | |
2022 | ||
2023 | for (i = 0; i < number; i++) { | |
2024 | page = alloc_page(GFP_HIGHUSER); | |
2025 | if (!page) | |
2026 | goto Enomem; | |
2027 | set_page_private(page, (unsigned long)drbd_pp_pool); | |
2028 | drbd_pp_pool = page; | |
2029 | } | |
2030 | drbd_pp_vacant = number; | |
2031 | ||
2032 | return 0; | |
2033 | ||
2034 | Enomem: | |
2035 | drbd_destroy_mempools(); /* in case we allocated some */ | |
2036 | return -ENOMEM; | |
2037 | } | |
2038 | ||
2039 | static int drbd_notify_sys(struct notifier_block *this, unsigned long code, | |
2040 | void *unused) | |
2041 | { | |
2042 | /* just so we have it. you never know what interesting things we | |
2043 | * might want to do here some day... | |
2044 | */ | |
2045 | ||
2046 | return NOTIFY_DONE; | |
2047 | } | |
2048 | ||
2049 | static struct notifier_block drbd_notifier = { | |
2050 | .notifier_call = drbd_notify_sys, | |
2051 | }; | |
2052 | ||
2053 | static void drbd_release_ee_lists(struct drbd_conf *mdev) | |
2054 | { | |
2055 | int rr; | |
2056 | ||
2057 | rr = drbd_release_ee(mdev, &mdev->active_ee); | |
2058 | if (rr) | |
2059 | dev_err(DEV, "%d EEs in active list found!\n", rr); | |
2060 | ||
2061 | rr = drbd_release_ee(mdev, &mdev->sync_ee); | |
2062 | if (rr) | |
2063 | dev_err(DEV, "%d EEs in sync list found!\n", rr); | |
2064 | ||
2065 | rr = drbd_release_ee(mdev, &mdev->read_ee); | |
2066 | if (rr) | |
2067 | dev_err(DEV, "%d EEs in read list found!\n", rr); | |
2068 | ||
2069 | rr = drbd_release_ee(mdev, &mdev->done_ee); | |
2070 | if (rr) | |
2071 | dev_err(DEV, "%d EEs in done list found!\n", rr); | |
2072 | ||
2073 | rr = drbd_release_ee(mdev, &mdev->net_ee); | |
2074 | if (rr) | |
2075 | dev_err(DEV, "%d EEs in net list found!\n", rr); | |
2076 | } | |
2077 | ||
2078 | /* caution. no locking. | |
2079 | * currently only used from module cleanup code. */ | |
2080 | static void drbd_delete_device(unsigned int minor) | |
2081 | { | |
2082 | struct drbd_conf *mdev = minor_to_mdev(minor); | |
2083 | ||
2084 | if (!mdev) | |
2085 | return; | |
2086 | ||
2087 | /* paranoia asserts */ | |
70dc65e1 | 2088 | D_ASSERT(mdev->open_cnt == 0); |
e42325a5 | 2089 | D_ASSERT(list_empty(&mdev->tconn->data.work.q)); |
b411b363 PR |
2090 | /* end paranoia asserts */ |
2091 | ||
2092 | del_gendisk(mdev->vdisk); | |
2093 | ||
2094 | /* cleanup stuff that may have been allocated during | |
2095 | * device (re-)configuration or state changes */ | |
2096 | ||
2097 | if (mdev->this_bdev) | |
2098 | bdput(mdev->this_bdev); | |
2099 | ||
2100 | drbd_free_resources(mdev); | |
2111438b | 2101 | drbd_free_tconn(mdev->tconn); |
b411b363 PR |
2102 | |
2103 | drbd_release_ee_lists(mdev); | |
2104 | ||
b411b363 PR |
2105 | lc_destroy(mdev->act_log); |
2106 | lc_destroy(mdev->resync); | |
2107 | ||
2108 | kfree(mdev->p_uuid); | |
2109 | /* mdev->p_uuid = NULL; */ | |
2110 | ||
b411b363 PR |
2111 | /* cleanup the rest that has been |
2112 | * allocated from drbd_new_device | |
2113 | * and actually free the mdev itself */ | |
2114 | drbd_free_mdev(mdev); | |
2115 | } | |
2116 | ||
2117 | static void drbd_cleanup(void) | |
2118 | { | |
2119 | unsigned int i; | |
2120 | ||
2121 | unregister_reboot_notifier(&drbd_notifier); | |
2122 | ||
17a93f30 LE |
2123 | /* first remove proc, |
2124 | * drbdsetup uses it's presence to detect | |
2125 | * whether DRBD is loaded. | |
2126 | * If we would get stuck in proc removal, | |
2127 | * but have netlink already deregistered, | |
2128 | * some drbdsetup commands may wait forever | |
2129 | * for an answer. | |
2130 | */ | |
2131 | if (drbd_proc) | |
2132 | remove_proc_entry("drbd", NULL); | |
2133 | ||
b411b363 PR |
2134 | drbd_nl_cleanup(); |
2135 | ||
2136 | if (minor_table) { | |
b411b363 PR |
2137 | i = minor_count; |
2138 | while (i--) | |
2139 | drbd_delete_device(i); | |
2140 | drbd_destroy_mempools(); | |
2141 | } | |
2142 | ||
2143 | kfree(minor_table); | |
2144 | ||
2145 | unregister_blkdev(DRBD_MAJOR, "drbd"); | |
2146 | ||
2147 | printk(KERN_INFO "drbd: module cleanup done.\n"); | |
2148 | } | |
2149 | ||
2150 | /** | |
2151 | * drbd_congested() - Callback for pdflush | |
2152 | * @congested_data: User data | |
2153 | * @bdi_bits: Bits pdflush is currently interested in | |
2154 | * | |
2155 | * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested. | |
2156 | */ | |
2157 | static int drbd_congested(void *congested_data, int bdi_bits) | |
2158 | { | |
2159 | struct drbd_conf *mdev = congested_data; | |
2160 | struct request_queue *q; | |
2161 | char reason = '-'; | |
2162 | int r = 0; | |
2163 | ||
1b881ef7 | 2164 | if (!may_inc_ap_bio(mdev)) { |
b411b363 PR |
2165 | /* DRBD has frozen IO */ |
2166 | r = bdi_bits; | |
2167 | reason = 'd'; | |
2168 | goto out; | |
2169 | } | |
2170 | ||
2171 | if (get_ldev(mdev)) { | |
2172 | q = bdev_get_queue(mdev->ldev->backing_bdev); | |
2173 | r = bdi_congested(&q->backing_dev_info, bdi_bits); | |
2174 | put_ldev(mdev); | |
2175 | if (r) | |
2176 | reason = 'b'; | |
2177 | } | |
2178 | ||
01a311a5 | 2179 | if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) { |
b411b363 PR |
2180 | r |= (1 << BDI_async_congested); |
2181 | reason = reason == 'b' ? 'a' : 'n'; | |
2182 | } | |
2183 | ||
2184 | out: | |
2185 | mdev->congestion_reason = reason; | |
2186 | return r; | |
2187 | } | |
2188 | ||
2111438b PR |
2189 | struct drbd_tconn *drbd_new_tconn(char *name) |
2190 | { | |
2191 | struct drbd_tconn *tconn; | |
2192 | ||
2193 | tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL); | |
2194 | if (!tconn) | |
2195 | return NULL; | |
2196 | ||
2197 | tconn->name = kstrdup(name, GFP_KERNEL); | |
2198 | if (!tconn->name) | |
2199 | goto fail; | |
2200 | ||
b2fb6dbe PR |
2201 | atomic_set(&tconn->net_cnt, 0); |
2202 | init_waitqueue_head(&tconn->net_cnt_wait); | |
062e879c | 2203 | idr_init(&tconn->volumes); |
b2fb6dbe | 2204 | |
2111438b PR |
2205 | write_lock_irq(&global_state_lock); |
2206 | list_add(&tconn->all_tconn, &drbd_tconns); | |
2207 | write_unlock_irq(&global_state_lock); | |
2208 | ||
2209 | return tconn; | |
2210 | ||
2211 | fail: | |
2212 | kfree(tconn->name); | |
2213 | kfree(tconn); | |
2214 | ||
2215 | return NULL; | |
2216 | } | |
2217 | ||
2218 | void drbd_free_tconn(struct drbd_tconn *tconn) | |
2219 | { | |
2220 | write_lock_irq(&global_state_lock); | |
2221 | list_del(&tconn->all_tconn); | |
2222 | write_unlock_irq(&global_state_lock); | |
062e879c | 2223 | idr_destroy(&tconn->volumes); |
2111438b PR |
2224 | |
2225 | kfree(tconn->name); | |
b42a70ad PR |
2226 | kfree(tconn->int_dig_out); |
2227 | kfree(tconn->int_dig_in); | |
2228 | kfree(tconn->int_dig_vv); | |
2111438b PR |
2229 | kfree(tconn); |
2230 | } | |
2231 | ||
b411b363 PR |
2232 | struct drbd_conf *drbd_new_device(unsigned int minor) |
2233 | { | |
2234 | struct drbd_conf *mdev; | |
2235 | struct gendisk *disk; | |
2236 | struct request_queue *q; | |
60ae4966 | 2237 | char conn_name[9]; /* drbd1234N */ |
062e879c | 2238 | int vnr; |
b411b363 PR |
2239 | |
2240 | /* GFP_KERNEL, we are outside of all write-out paths */ | |
2241 | mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL); | |
2242 | if (!mdev) | |
2243 | return NULL; | |
60ae4966 PR |
2244 | sprintf(conn_name, "drbd%d", minor); |
2245 | mdev->tconn = drbd_new_tconn(conn_name); | |
2111438b PR |
2246 | if (!mdev->tconn) |
2247 | goto out_no_tconn; | |
062e879c PR |
2248 | if (!idr_pre_get(&mdev->tconn->volumes, GFP_KERNEL)) |
2249 | goto out_no_cpumask; | |
2250 | if (idr_get_new(&mdev->tconn->volumes, mdev, &vnr)) | |
2251 | goto out_no_cpumask; | |
2252 | if (vnr != 0) { | |
2253 | dev_err(DEV, "vnr = %d\n", vnr); | |
2254 | goto out_no_cpumask; | |
2255 | } | |
80822284 | 2256 | if (!zalloc_cpumask_var(&mdev->tconn->cpu_mask, GFP_KERNEL)) |
b411b363 PR |
2257 | goto out_no_cpumask; |
2258 | ||
2111438b | 2259 | mdev->tconn->volume0 = mdev; |
b411b363 PR |
2260 | mdev->minor = minor; |
2261 | ||
2262 | drbd_init_set_defaults(mdev); | |
2263 | ||
2264 | q = blk_alloc_queue(GFP_KERNEL); | |
2265 | if (!q) | |
2266 | goto out_no_q; | |
2267 | mdev->rq_queue = q; | |
2268 | q->queuedata = mdev; | |
b411b363 PR |
2269 | |
2270 | disk = alloc_disk(1); | |
2271 | if (!disk) | |
2272 | goto out_no_disk; | |
2273 | mdev->vdisk = disk; | |
2274 | ||
81e84650 | 2275 | set_disk_ro(disk, true); |
b411b363 PR |
2276 | |
2277 | disk->queue = q; | |
2278 | disk->major = DRBD_MAJOR; | |
2279 | disk->first_minor = minor; | |
2280 | disk->fops = &drbd_ops; | |
2281 | sprintf(disk->disk_name, "drbd%d", minor); | |
2282 | disk->private_data = mdev; | |
2283 | ||
2284 | mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor)); | |
2285 | /* we have no partitions. we contain only ourselves. */ | |
2286 | mdev->this_bdev->bd_contains = mdev->this_bdev; | |
2287 | ||
2288 | q->backing_dev_info.congested_fn = drbd_congested; | |
2289 | q->backing_dev_info.congested_data = mdev; | |
2290 | ||
2f58dcfc | 2291 | blk_queue_make_request(q, drbd_make_request); |
99432fcc PR |
2292 | /* Setting the max_hw_sectors to an odd value of 8kibyte here |
2293 | This triggers a max_bio_size message upon first attach or connect */ | |
2294 | blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); | |
b411b363 PR |
2295 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
2296 | blk_queue_merge_bvec(q, drbd_merge_bvec); | |
87eeee41 | 2297 | q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */ |
b411b363 PR |
2298 | |
2299 | mdev->md_io_page = alloc_page(GFP_KERNEL); | |
2300 | if (!mdev->md_io_page) | |
2301 | goto out_no_io_page; | |
2302 | ||
2303 | if (drbd_bm_init(mdev)) | |
2304 | goto out_no_bitmap; | |
2305 | /* no need to lock access, we are still initializing this minor device. */ | |
2306 | if (!tl_init(mdev)) | |
2307 | goto out_no_tl; | |
dac1389c | 2308 | mdev->read_requests = RB_ROOT; |
de696716 | 2309 | mdev->write_requests = RB_ROOT; |
b411b363 | 2310 | |
b411b363 PR |
2311 | mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL); |
2312 | if (!mdev->current_epoch) | |
2313 | goto out_no_epoch; | |
2314 | ||
2315 | INIT_LIST_HEAD(&mdev->current_epoch->list); | |
2316 | mdev->epochs = 1; | |
2317 | ||
2318 | return mdev; | |
2319 | ||
2320 | /* out_whatever_else: | |
2321 | kfree(mdev->current_epoch); */ | |
2322 | out_no_epoch: | |
b411b363 PR |
2323 | tl_cleanup(mdev); |
2324 | out_no_tl: | |
2325 | drbd_bm_cleanup(mdev); | |
2326 | out_no_bitmap: | |
2327 | __free_page(mdev->md_io_page); | |
2328 | out_no_io_page: | |
2329 | put_disk(disk); | |
2330 | out_no_disk: | |
2331 | blk_cleanup_queue(q); | |
2332 | out_no_q: | |
80822284 | 2333 | free_cpumask_var(mdev->tconn->cpu_mask); |
b411b363 | 2334 | out_no_cpumask: |
2111438b PR |
2335 | drbd_free_tconn(mdev->tconn); |
2336 | out_no_tconn: | |
b411b363 PR |
2337 | kfree(mdev); |
2338 | return NULL; | |
2339 | } | |
2340 | ||
2341 | /* counterpart of drbd_new_device. | |
2342 | * last part of drbd_delete_device. */ | |
2343 | void drbd_free_mdev(struct drbd_conf *mdev) | |
2344 | { | |
2345 | kfree(mdev->current_epoch); | |
b411b363 PR |
2346 | tl_cleanup(mdev); |
2347 | if (mdev->bitmap) /* should no longer be there. */ | |
2348 | drbd_bm_cleanup(mdev); | |
2349 | __free_page(mdev->md_io_page); | |
2350 | put_disk(mdev->vdisk); | |
2351 | blk_cleanup_queue(mdev->rq_queue); | |
b411b363 PR |
2352 | kfree(mdev); |
2353 | } | |
2354 | ||
2355 | ||
2356 | int __init drbd_init(void) | |
2357 | { | |
2358 | int err; | |
2359 | ||
fd340c12 PR |
2360 | BUILD_BUG_ON(sizeof(struct p_header80) != sizeof(struct p_header95)); |
2361 | BUILD_BUG_ON(sizeof(struct p_handshake) != 80); | |
b411b363 | 2362 | |
2b8a90b5 | 2363 | if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { |
b411b363 PR |
2364 | printk(KERN_ERR |
2365 | "drbd: invalid minor_count (%d)\n", minor_count); | |
2366 | #ifdef MODULE | |
2367 | return -EINVAL; | |
2368 | #else | |
2369 | minor_count = 8; | |
2370 | #endif | |
2371 | } | |
2372 | ||
2373 | err = drbd_nl_init(); | |
2374 | if (err) | |
2375 | return err; | |
2376 | ||
2377 | err = register_blkdev(DRBD_MAJOR, "drbd"); | |
2378 | if (err) { | |
2379 | printk(KERN_ERR | |
2380 | "drbd: unable to register block device major %d\n", | |
2381 | DRBD_MAJOR); | |
2382 | return err; | |
2383 | } | |
2384 | ||
2385 | register_reboot_notifier(&drbd_notifier); | |
2386 | ||
2387 | /* | |
2388 | * allocate all necessary structs | |
2389 | */ | |
2390 | err = -ENOMEM; | |
2391 | ||
2392 | init_waitqueue_head(&drbd_pp_wait); | |
2393 | ||
2394 | drbd_proc = NULL; /* play safe for drbd_cleanup */ | |
2395 | minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count, | |
2396 | GFP_KERNEL); | |
2397 | if (!minor_table) | |
2398 | goto Enomem; | |
2399 | ||
2400 | err = drbd_create_mempools(); | |
2401 | if (err) | |
2402 | goto Enomem; | |
2403 | ||
8c484ee4 | 2404 | drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); |
b411b363 PR |
2405 | if (!drbd_proc) { |
2406 | printk(KERN_ERR "drbd: unable to register proc file\n"); | |
2407 | goto Enomem; | |
2408 | } | |
2409 | ||
2410 | rwlock_init(&global_state_lock); | |
2111438b | 2411 | INIT_LIST_HEAD(&drbd_tconns); |
b411b363 PR |
2412 | |
2413 | printk(KERN_INFO "drbd: initialized. " | |
2414 | "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n", | |
2415 | API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX); | |
2416 | printk(KERN_INFO "drbd: %s\n", drbd_buildtag()); | |
2417 | printk(KERN_INFO "drbd: registered as block device major %d\n", | |
2418 | DRBD_MAJOR); | |
2419 | printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table); | |
2420 | ||
2421 | return 0; /* Success! */ | |
2422 | ||
2423 | Enomem: | |
2424 | drbd_cleanup(); | |
2425 | if (err == -ENOMEM) | |
2426 | /* currently always the case */ | |
2427 | printk(KERN_ERR "drbd: ran out of memory\n"); | |
2428 | else | |
2429 | printk(KERN_ERR "drbd: initialization failure\n"); | |
2430 | return err; | |
2431 | } | |
2432 | ||
2433 | void drbd_free_bc(struct drbd_backing_dev *ldev) | |
2434 | { | |
2435 | if (ldev == NULL) | |
2436 | return; | |
2437 | ||
e525fd89 TH |
2438 | blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); |
2439 | blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); | |
b411b363 PR |
2440 | |
2441 | kfree(ldev); | |
2442 | } | |
2443 | ||
360cc740 PR |
2444 | void drbd_free_sock(struct drbd_tconn *tconn) |
2445 | { | |
2446 | if (tconn->data.socket) { | |
2447 | mutex_lock(&tconn->data.mutex); | |
2448 | kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR); | |
2449 | sock_release(tconn->data.socket); | |
2450 | tconn->data.socket = NULL; | |
2451 | mutex_unlock(&tconn->data.mutex); | |
b411b363 | 2452 | } |
360cc740 PR |
2453 | if (tconn->meta.socket) { |
2454 | mutex_lock(&tconn->meta.mutex); | |
2455 | kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR); | |
2456 | sock_release(tconn->meta.socket); | |
2457 | tconn->meta.socket = NULL; | |
2458 | mutex_unlock(&tconn->meta.mutex); | |
b411b363 PR |
2459 | } |
2460 | } | |
2461 | ||
2462 | ||
2463 | void drbd_free_resources(struct drbd_conf *mdev) | |
2464 | { | |
2465 | crypto_free_hash(mdev->csums_tfm); | |
2466 | mdev->csums_tfm = NULL; | |
2467 | crypto_free_hash(mdev->verify_tfm); | |
2468 | mdev->verify_tfm = NULL; | |
a0638456 PR |
2469 | crypto_free_hash(mdev->tconn->cram_hmac_tfm); |
2470 | mdev->tconn->cram_hmac_tfm = NULL; | |
2471 | crypto_free_hash(mdev->tconn->integrity_w_tfm); | |
2472 | mdev->tconn->integrity_w_tfm = NULL; | |
2473 | crypto_free_hash(mdev->tconn->integrity_r_tfm); | |
2474 | mdev->tconn->integrity_r_tfm = NULL; | |
b411b363 | 2475 | |
360cc740 | 2476 | drbd_free_sock(mdev->tconn); |
b411b363 PR |
2477 | |
2478 | __no_warn(local, | |
2479 | drbd_free_bc(mdev->ldev); | |
2480 | mdev->ldev = NULL;); | |
2481 | } | |
2482 | ||
2483 | /* meta data management */ | |
2484 | ||
2485 | struct meta_data_on_disk { | |
2486 | u64 la_size; /* last agreed size. */ | |
2487 | u64 uuid[UI_SIZE]; /* UUIDs. */ | |
2488 | u64 device_uuid; | |
2489 | u64 reserved_u64_1; | |
2490 | u32 flags; /* MDF */ | |
2491 | u32 magic; | |
2492 | u32 md_size_sect; | |
2493 | u32 al_offset; /* offset to this block */ | |
2494 | u32 al_nr_extents; /* important for restoring the AL */ | |
2495 | /* `-- act_log->nr_elements <-- sync_conf.al_extents */ | |
2496 | u32 bm_offset; /* offset to the bitmap, from here */ | |
2497 | u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ | |
99432fcc PR |
2498 | u32 la_peer_max_bio_size; /* last peer max_bio_size */ |
2499 | u32 reserved_u32[3]; | |
b411b363 PR |
2500 | |
2501 | } __packed; | |
2502 | ||
2503 | /** | |
2504 | * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set | |
2505 | * @mdev: DRBD device. | |
2506 | */ | |
2507 | void drbd_md_sync(struct drbd_conf *mdev) | |
2508 | { | |
2509 | struct meta_data_on_disk *buffer; | |
2510 | sector_t sector; | |
2511 | int i; | |
2512 | ||
ee15b038 LE |
2513 | del_timer(&mdev->md_sync_timer); |
2514 | /* timer may be rearmed by drbd_md_mark_dirty() now. */ | |
b411b363 PR |
2515 | if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) |
2516 | return; | |
b411b363 PR |
2517 | |
2518 | /* We use here D_FAILED and not D_ATTACHING because we try to write | |
2519 | * metadata even if we detach due to a disk failure! */ | |
2520 | if (!get_ldev_if_state(mdev, D_FAILED)) | |
2521 | return; | |
2522 | ||
b411b363 PR |
2523 | mutex_lock(&mdev->md_io_mutex); |
2524 | buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page); | |
2525 | memset(buffer, 0, 512); | |
2526 | ||
2527 | buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); | |
2528 | for (i = UI_CURRENT; i < UI_SIZE; i++) | |
2529 | buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]); | |
2530 | buffer->flags = cpu_to_be32(mdev->ldev->md.flags); | |
2531 | buffer->magic = cpu_to_be32(DRBD_MD_MAGIC); | |
2532 | ||
2533 | buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect); | |
2534 | buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset); | |
2535 | buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements); | |
2536 | buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE); | |
2537 | buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid); | |
2538 | ||
2539 | buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); | |
99432fcc | 2540 | buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size); |
b411b363 PR |
2541 | |
2542 | D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset); | |
2543 | sector = mdev->ldev->md.md_offset; | |
2544 | ||
3f3a9b84 | 2545 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { |
b411b363 PR |
2546 | /* this was a try anyways ... */ |
2547 | dev_err(DEV, "meta data update failed!\n"); | |
81e84650 | 2548 | drbd_chk_io_error(mdev, 1, true); |
b411b363 PR |
2549 | } |
2550 | ||
2551 | /* Update mdev->ldev->md.la_size_sect, | |
2552 | * since we updated it on metadata. */ | |
2553 | mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev); | |
2554 | ||
2555 | mutex_unlock(&mdev->md_io_mutex); | |
2556 | put_ldev(mdev); | |
2557 | } | |
2558 | ||
2559 | /** | |
2560 | * drbd_md_read() - Reads in the meta data super block | |
2561 | * @mdev: DRBD device. | |
2562 | * @bdev: Device from which the meta data should be read in. | |
2563 | * | |
116676ca | 2564 | * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case |
b411b363 PR |
2565 | * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID. |
2566 | */ | |
2567 | int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | |
2568 | { | |
2569 | struct meta_data_on_disk *buffer; | |
2570 | int i, rv = NO_ERROR; | |
2571 | ||
2572 | if (!get_ldev_if_state(mdev, D_ATTACHING)) | |
2573 | return ERR_IO_MD_DISK; | |
2574 | ||
b411b363 PR |
2575 | mutex_lock(&mdev->md_io_mutex); |
2576 | buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page); | |
2577 | ||
2578 | if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) { | |
25985edc | 2579 | /* NOTE: can't do normal error processing here as this is |
b411b363 PR |
2580 | called BEFORE disk is attached */ |
2581 | dev_err(DEV, "Error while reading metadata.\n"); | |
2582 | rv = ERR_IO_MD_DISK; | |
2583 | goto err; | |
2584 | } | |
2585 | ||
e7fad8af | 2586 | if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) { |
b411b363 PR |
2587 | dev_err(DEV, "Error while reading metadata, magic not found.\n"); |
2588 | rv = ERR_MD_INVALID; | |
2589 | goto err; | |
2590 | } | |
2591 | if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) { | |
2592 | dev_err(DEV, "unexpected al_offset: %d (expected %d)\n", | |
2593 | be32_to_cpu(buffer->al_offset), bdev->md.al_offset); | |
2594 | rv = ERR_MD_INVALID; | |
2595 | goto err; | |
2596 | } | |
2597 | if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { | |
2598 | dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n", | |
2599 | be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); | |
2600 | rv = ERR_MD_INVALID; | |
2601 | goto err; | |
2602 | } | |
2603 | if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { | |
2604 | dev_err(DEV, "unexpected md_size: %u (expected %u)\n", | |
2605 | be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); | |
2606 | rv = ERR_MD_INVALID; | |
2607 | goto err; | |
2608 | } | |
2609 | ||
2610 | if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) { | |
2611 | dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n", | |
2612 | be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE); | |
2613 | rv = ERR_MD_INVALID; | |
2614 | goto err; | |
2615 | } | |
2616 | ||
2617 | bdev->md.la_size_sect = be64_to_cpu(buffer->la_size); | |
2618 | for (i = UI_CURRENT; i < UI_SIZE; i++) | |
2619 | bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); | |
2620 | bdev->md.flags = be32_to_cpu(buffer->flags); | |
2621 | mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents); | |
2622 | bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); | |
2623 | ||
87eeee41 | 2624 | spin_lock_irq(&mdev->tconn->req_lock); |
99432fcc PR |
2625 | if (mdev->state.conn < C_CONNECTED) { |
2626 | int peer; | |
2627 | peer = be32_to_cpu(buffer->la_peer_max_bio_size); | |
2628 | peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); | |
2629 | mdev->peer_max_bio_size = peer; | |
2630 | } | |
87eeee41 | 2631 | spin_unlock_irq(&mdev->tconn->req_lock); |
99432fcc | 2632 | |
b411b363 PR |
2633 | if (mdev->sync_conf.al_extents < 7) |
2634 | mdev->sync_conf.al_extents = 127; | |
2635 | ||
2636 | err: | |
2637 | mutex_unlock(&mdev->md_io_mutex); | |
2638 | put_ldev(mdev); | |
2639 | ||
2640 | return rv; | |
2641 | } | |
2642 | ||
2643 | /** | |
2644 | * drbd_md_mark_dirty() - Mark meta data super block as dirty | |
2645 | * @mdev: DRBD device. | |
2646 | * | |
2647 | * Call this function if you change anything that should be written to | |
2648 | * the meta-data super block. This function sets MD_DIRTY, and starts a | |
2649 | * timer that ensures that within five seconds you have to call drbd_md_sync(). | |
2650 | */ | |
ca0e6098 | 2651 | #ifdef DEBUG |
ee15b038 LE |
2652 | void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func) |
2653 | { | |
2654 | if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) { | |
2655 | mod_timer(&mdev->md_sync_timer, jiffies + HZ); | |
2656 | mdev->last_md_mark_dirty.line = line; | |
2657 | mdev->last_md_mark_dirty.func = func; | |
2658 | } | |
2659 | } | |
2660 | #else | |
b411b363 PR |
2661 | void drbd_md_mark_dirty(struct drbd_conf *mdev) |
2662 | { | |
ee15b038 | 2663 | if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) |
ca0e6098 | 2664 | mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); |
b411b363 | 2665 | } |
ee15b038 | 2666 | #endif |
b411b363 PR |
2667 | |
2668 | static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) | |
2669 | { | |
2670 | int i; | |
2671 | ||
62b0da3a | 2672 | for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) |
b411b363 | 2673 | mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; |
b411b363 PR |
2674 | } |
2675 | ||
2676 | void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |
2677 | { | |
2678 | if (idx == UI_CURRENT) { | |
2679 | if (mdev->state.role == R_PRIMARY) | |
2680 | val |= 1; | |
2681 | else | |
2682 | val &= ~((u64)1); | |
2683 | ||
2684 | drbd_set_ed_uuid(mdev, val); | |
2685 | } | |
2686 | ||
2687 | mdev->ldev->md.uuid[idx] = val; | |
b411b363 PR |
2688 | drbd_md_mark_dirty(mdev); |
2689 | } | |
2690 | ||
2691 | ||
2692 | void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |
2693 | { | |
2694 | if (mdev->ldev->md.uuid[idx]) { | |
2695 | drbd_uuid_move_history(mdev); | |
2696 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; | |
b411b363 PR |
2697 | } |
2698 | _drbd_uuid_set(mdev, idx, val); | |
2699 | } | |
2700 | ||
2701 | /** | |
2702 | * drbd_uuid_new_current() - Creates a new current UUID | |
2703 | * @mdev: DRBD device. | |
2704 | * | |
2705 | * Creates a new current UUID, and rotates the old current UUID into | |
2706 | * the bitmap slot. Causes an incremental resync upon next connect. | |
2707 | */ | |
2708 | void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) | |
2709 | { | |
2710 | u64 val; | |
62b0da3a LE |
2711 | unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; |
2712 | ||
2713 | if (bm_uuid) | |
2714 | dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); | |
b411b363 | 2715 | |
b411b363 | 2716 | mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; |
b411b363 PR |
2717 | |
2718 | get_random_bytes(&val, sizeof(u64)); | |
2719 | _drbd_uuid_set(mdev, UI_CURRENT, val); | |
62b0da3a | 2720 | drbd_print_uuids(mdev, "new current UUID"); |
aaa8e2b3 LE |
2721 | /* get it to stable storage _now_ */ |
2722 | drbd_md_sync(mdev); | |
b411b363 PR |
2723 | } |
2724 | ||
2725 | void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) | |
2726 | { | |
2727 | if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) | |
2728 | return; | |
2729 | ||
2730 | if (val == 0) { | |
2731 | drbd_uuid_move_history(mdev); | |
2732 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; | |
2733 | mdev->ldev->md.uuid[UI_BITMAP] = 0; | |
b411b363 | 2734 | } else { |
62b0da3a LE |
2735 | unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; |
2736 | if (bm_uuid) | |
2737 | dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); | |
b411b363 | 2738 | |
62b0da3a | 2739 | mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); |
b411b363 PR |
2740 | } |
2741 | drbd_md_mark_dirty(mdev); | |
2742 | } | |
2743 | ||
2744 | /** | |
2745 | * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() | |
2746 | * @mdev: DRBD device. | |
2747 | * | |
2748 | * Sets all bits in the bitmap and writes the whole bitmap to stable storage. | |
2749 | */ | |
2750 | int drbd_bmio_set_n_write(struct drbd_conf *mdev) | |
2751 | { | |
2752 | int rv = -EIO; | |
2753 | ||
2754 | if (get_ldev_if_state(mdev, D_ATTACHING)) { | |
2755 | drbd_md_set_flag(mdev, MDF_FULL_SYNC); | |
2756 | drbd_md_sync(mdev); | |
2757 | drbd_bm_set_all(mdev); | |
2758 | ||
2759 | rv = drbd_bm_write(mdev); | |
2760 | ||
2761 | if (!rv) { | |
2762 | drbd_md_clear_flag(mdev, MDF_FULL_SYNC); | |
2763 | drbd_md_sync(mdev); | |
2764 | } | |
2765 | ||
2766 | put_ldev(mdev); | |
2767 | } | |
2768 | ||
2769 | return rv; | |
2770 | } | |
2771 | ||
2772 | /** | |
2773 | * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() | |
2774 | * @mdev: DRBD device. | |
2775 | * | |
2776 | * Clears all bits in the bitmap and writes the whole bitmap to stable storage. | |
2777 | */ | |
2778 | int drbd_bmio_clear_n_write(struct drbd_conf *mdev) | |
2779 | { | |
2780 | int rv = -EIO; | |
2781 | ||
0778286a | 2782 | drbd_resume_al(mdev); |
b411b363 PR |
2783 | if (get_ldev_if_state(mdev, D_ATTACHING)) { |
2784 | drbd_bm_clear_all(mdev); | |
2785 | rv = drbd_bm_write(mdev); | |
2786 | put_ldev(mdev); | |
2787 | } | |
2788 | ||
2789 | return rv; | |
2790 | } | |
2791 | ||
2792 | static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |
2793 | { | |
2794 | struct bm_io_work *work = container_of(w, struct bm_io_work, w); | |
02851e9f | 2795 | int rv = -EIO; |
b411b363 PR |
2796 | |
2797 | D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); | |
2798 | ||
02851e9f | 2799 | if (get_ldev(mdev)) { |
20ceb2b2 | 2800 | drbd_bm_lock(mdev, work->why, work->flags); |
02851e9f LE |
2801 | rv = work->io_fn(mdev); |
2802 | drbd_bm_unlock(mdev); | |
2803 | put_ldev(mdev); | |
2804 | } | |
b411b363 PR |
2805 | |
2806 | clear_bit(BITMAP_IO, &mdev->flags); | |
127b3178 | 2807 | smp_mb__after_clear_bit(); |
b411b363 PR |
2808 | wake_up(&mdev->misc_wait); |
2809 | ||
2810 | if (work->done) | |
2811 | work->done(mdev, rv); | |
2812 | ||
2813 | clear_bit(BITMAP_IO_QUEUED, &mdev->flags); | |
2814 | work->why = NULL; | |
20ceb2b2 | 2815 | work->flags = 0; |
b411b363 PR |
2816 | |
2817 | return 1; | |
2818 | } | |
2819 | ||
82f59cc6 LE |
2820 | void drbd_ldev_destroy(struct drbd_conf *mdev) |
2821 | { | |
2822 | lc_destroy(mdev->resync); | |
2823 | mdev->resync = NULL; | |
2824 | lc_destroy(mdev->act_log); | |
2825 | mdev->act_log = NULL; | |
2826 | __no_warn(local, | |
2827 | drbd_free_bc(mdev->ldev); | |
2828 | mdev->ldev = NULL;); | |
2829 | ||
2830 | if (mdev->md_io_tmpp) { | |
2831 | __free_page(mdev->md_io_tmpp); | |
2832 | mdev->md_io_tmpp = NULL; | |
2833 | } | |
2834 | clear_bit(GO_DISKLESS, &mdev->flags); | |
2835 | } | |
2836 | ||
e9e6f3ec LE |
2837 | static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) |
2838 | { | |
2839 | D_ASSERT(mdev->state.disk == D_FAILED); | |
9d282875 LE |
2840 | /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will |
2841 | * inc/dec it frequently. Once we are D_DISKLESS, no one will touch | |
82f59cc6 LE |
2842 | * the protected members anymore, though, so once put_ldev reaches zero |
2843 | * again, it will be safe to free them. */ | |
e9e6f3ec | 2844 | drbd_force_state(mdev, NS(disk, D_DISKLESS)); |
e9e6f3ec LE |
2845 | return 1; |
2846 | } | |
2847 | ||
2848 | void drbd_go_diskless(struct drbd_conf *mdev) | |
2849 | { | |
2850 | D_ASSERT(mdev->state.disk == D_FAILED); | |
2851 | if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) | |
e42325a5 | 2852 | drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless); |
e9e6f3ec LE |
2853 | } |
2854 | ||
b411b363 PR |
2855 | /** |
2856 | * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap | |
2857 | * @mdev: DRBD device. | |
2858 | * @io_fn: IO callback to be called when bitmap IO is possible | |
2859 | * @done: callback to be called after the bitmap IO was performed | |
2860 | * @why: Descriptive text of the reason for doing the IO | |
2861 | * | |
2862 | * While IO on the bitmap happens we freeze application IO thus we ensure | |
2863 | * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be | |
2864 | * called from worker context. It MUST NOT be used while a previous such | |
2865 | * work is still pending! | |
2866 | */ | |
2867 | void drbd_queue_bitmap_io(struct drbd_conf *mdev, | |
2868 | int (*io_fn)(struct drbd_conf *), | |
2869 | void (*done)(struct drbd_conf *, int), | |
20ceb2b2 | 2870 | char *why, enum bm_flag flags) |
b411b363 | 2871 | { |
e6b3ea83 | 2872 | D_ASSERT(current == mdev->tconn->worker.task); |
b411b363 PR |
2873 | |
2874 | D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags)); | |
2875 | D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags)); | |
2876 | D_ASSERT(list_empty(&mdev->bm_io_work.w.list)); | |
2877 | if (mdev->bm_io_work.why) | |
2878 | dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n", | |
2879 | why, mdev->bm_io_work.why); | |
2880 | ||
2881 | mdev->bm_io_work.io_fn = io_fn; | |
2882 | mdev->bm_io_work.done = done; | |
2883 | mdev->bm_io_work.why = why; | |
20ceb2b2 | 2884 | mdev->bm_io_work.flags = flags; |
b411b363 | 2885 | |
87eeee41 | 2886 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
2887 | set_bit(BITMAP_IO, &mdev->flags); |
2888 | if (atomic_read(&mdev->ap_bio_cnt) == 0) { | |
127b3178 | 2889 | if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) |
e42325a5 | 2890 | drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w); |
b411b363 | 2891 | } |
87eeee41 | 2892 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
2893 | } |
2894 | ||
2895 | /** | |
2896 | * drbd_bitmap_io() - Does an IO operation on the whole bitmap | |
2897 | * @mdev: DRBD device. | |
2898 | * @io_fn: IO callback to be called when bitmap IO is possible | |
2899 | * @why: Descriptive text of the reason for doing the IO | |
2900 | * | |
2901 | * freezes application IO while that the actual IO operations runs. This | |
2902 | * functions MAY NOT be called from worker context. | |
2903 | */ | |
20ceb2b2 LE |
2904 | int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), |
2905 | char *why, enum bm_flag flags) | |
b411b363 PR |
2906 | { |
2907 | int rv; | |
2908 | ||
e6b3ea83 | 2909 | D_ASSERT(current != mdev->tconn->worker.task); |
b411b363 | 2910 | |
20ceb2b2 LE |
2911 | if ((flags & BM_LOCKED_SET_ALLOWED) == 0) |
2912 | drbd_suspend_io(mdev); | |
b411b363 | 2913 | |
20ceb2b2 | 2914 | drbd_bm_lock(mdev, why, flags); |
b411b363 PR |
2915 | rv = io_fn(mdev); |
2916 | drbd_bm_unlock(mdev); | |
2917 | ||
20ceb2b2 LE |
2918 | if ((flags & BM_LOCKED_SET_ALLOWED) == 0) |
2919 | drbd_resume_io(mdev); | |
b411b363 PR |
2920 | |
2921 | return rv; | |
2922 | } | |
2923 | ||
2924 | void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local) | |
2925 | { | |
2926 | if ((mdev->ldev->md.flags & flag) != flag) { | |
2927 | drbd_md_mark_dirty(mdev); | |
2928 | mdev->ldev->md.flags |= flag; | |
2929 | } | |
2930 | } | |
2931 | ||
2932 | void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local) | |
2933 | { | |
2934 | if ((mdev->ldev->md.flags & flag) != 0) { | |
2935 | drbd_md_mark_dirty(mdev); | |
2936 | mdev->ldev->md.flags &= ~flag; | |
2937 | } | |
2938 | } | |
2939 | int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag) | |
2940 | { | |
2941 | return (bdev->md.flags & flag) != 0; | |
2942 | } | |
2943 | ||
2944 | static void md_sync_timer_fn(unsigned long data) | |
2945 | { | |
2946 | struct drbd_conf *mdev = (struct drbd_conf *) data; | |
2947 | ||
e42325a5 | 2948 | drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work); |
b411b363 PR |
2949 | } |
2950 | ||
2951 | static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused) | |
2952 | { | |
2953 | dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); | |
ee15b038 LE |
2954 | #ifdef DEBUG |
2955 | dev_warn(DEV, "last md_mark_dirty: %s:%u\n", | |
2956 | mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line); | |
2957 | #endif | |
b411b363 | 2958 | drbd_md_sync(mdev); |
b411b363 PR |
2959 | return 1; |
2960 | } | |
2961 | ||
d8763023 | 2962 | const char *cmdname(enum drbd_packet cmd) |
f2ad9063 AG |
2963 | { |
2964 | /* THINK may need to become several global tables | |
2965 | * when we want to support more than | |
2966 | * one PRO_VERSION */ | |
2967 | static const char *cmdnames[] = { | |
2968 | [P_DATA] = "Data", | |
2969 | [P_DATA_REPLY] = "DataReply", | |
2970 | [P_RS_DATA_REPLY] = "RSDataReply", | |
2971 | [P_BARRIER] = "Barrier", | |
2972 | [P_BITMAP] = "ReportBitMap", | |
2973 | [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget", | |
2974 | [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource", | |
2975 | [P_UNPLUG_REMOTE] = "UnplugRemote", | |
2976 | [P_DATA_REQUEST] = "DataRequest", | |
2977 | [P_RS_DATA_REQUEST] = "RSDataRequest", | |
2978 | [P_SYNC_PARAM] = "SyncParam", | |
2979 | [P_SYNC_PARAM89] = "SyncParam89", | |
2980 | [P_PROTOCOL] = "ReportProtocol", | |
2981 | [P_UUIDS] = "ReportUUIDs", | |
2982 | [P_SIZES] = "ReportSizes", | |
2983 | [P_STATE] = "ReportState", | |
2984 | [P_SYNC_UUID] = "ReportSyncUUID", | |
2985 | [P_AUTH_CHALLENGE] = "AuthChallenge", | |
2986 | [P_AUTH_RESPONSE] = "AuthResponse", | |
2987 | [P_PING] = "Ping", | |
2988 | [P_PING_ACK] = "PingAck", | |
2989 | [P_RECV_ACK] = "RecvAck", | |
2990 | [P_WRITE_ACK] = "WriteAck", | |
2991 | [P_RS_WRITE_ACK] = "RSWriteAck", | |
2992 | [P_DISCARD_ACK] = "DiscardAck", | |
2993 | [P_NEG_ACK] = "NegAck", | |
2994 | [P_NEG_DREPLY] = "NegDReply", | |
2995 | [P_NEG_RS_DREPLY] = "NegRSDReply", | |
2996 | [P_BARRIER_ACK] = "BarrierAck", | |
2997 | [P_STATE_CHG_REQ] = "StateChgRequest", | |
2998 | [P_STATE_CHG_REPLY] = "StateChgReply", | |
2999 | [P_OV_REQUEST] = "OVRequest", | |
3000 | [P_OV_REPLY] = "OVReply", | |
3001 | [P_OV_RESULT] = "OVResult", | |
3002 | [P_CSUM_RS_REQUEST] = "CsumRSRequest", | |
3003 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", | |
3004 | [P_COMPRESSED_BITMAP] = "CBitmap", | |
3005 | [P_DELAY_PROBE] = "DelayProbe", | |
3006 | [P_OUT_OF_SYNC] = "OutOfSync", | |
3007 | [P_MAX_CMD] = NULL, | |
3008 | }; | |
3009 | ||
3010 | if (cmd == P_HAND_SHAKE_M) | |
3011 | return "HandShakeM"; | |
3012 | if (cmd == P_HAND_SHAKE_S) | |
3013 | return "HandShakeS"; | |
3014 | if (cmd == P_HAND_SHAKE) | |
3015 | return "HandShake"; | |
3016 | if (cmd >= P_MAX_CMD) | |
3017 | return "Unknown"; | |
3018 | return cmdnames[cmd]; | |
3019 | } | |
3020 | ||
b411b363 PR |
3021 | #ifdef CONFIG_DRBD_FAULT_INJECTION |
3022 | /* Fault insertion support including random number generator shamelessly | |
3023 | * stolen from kernel/rcutorture.c */ | |
3024 | struct fault_random_state { | |
3025 | unsigned long state; | |
3026 | unsigned long count; | |
3027 | }; | |
3028 | ||
3029 | #define FAULT_RANDOM_MULT 39916801 /* prime */ | |
3030 | #define FAULT_RANDOM_ADD 479001701 /* prime */ | |
3031 | #define FAULT_RANDOM_REFRESH 10000 | |
3032 | ||
3033 | /* | |
3034 | * Crude but fast random-number generator. Uses a linear congruential | |
3035 | * generator, with occasional help from get_random_bytes(). | |
3036 | */ | |
3037 | static unsigned long | |
3038 | _drbd_fault_random(struct fault_random_state *rsp) | |
3039 | { | |
3040 | long refresh; | |
3041 | ||
49829ea7 | 3042 | if (!rsp->count--) { |
b411b363 PR |
3043 | get_random_bytes(&refresh, sizeof(refresh)); |
3044 | rsp->state += refresh; | |
3045 | rsp->count = FAULT_RANDOM_REFRESH; | |
3046 | } | |
3047 | rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD; | |
3048 | return swahw32(rsp->state); | |
3049 | } | |
3050 | ||
3051 | static char * | |
3052 | _drbd_fault_str(unsigned int type) { | |
3053 | static char *_faults[] = { | |
3054 | [DRBD_FAULT_MD_WR] = "Meta-data write", | |
3055 | [DRBD_FAULT_MD_RD] = "Meta-data read", | |
3056 | [DRBD_FAULT_RS_WR] = "Resync write", | |
3057 | [DRBD_FAULT_RS_RD] = "Resync read", | |
3058 | [DRBD_FAULT_DT_WR] = "Data write", | |
3059 | [DRBD_FAULT_DT_RD] = "Data read", | |
3060 | [DRBD_FAULT_DT_RA] = "Data read ahead", | |
3061 | [DRBD_FAULT_BM_ALLOC] = "BM allocation", | |
6b4388ac PR |
3062 | [DRBD_FAULT_AL_EE] = "EE allocation", |
3063 | [DRBD_FAULT_RECEIVE] = "receive data corruption", | |
b411b363 PR |
3064 | }; |
3065 | ||
3066 | return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**"; | |
3067 | } | |
3068 | ||
3069 | unsigned int | |
3070 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) | |
3071 | { | |
3072 | static struct fault_random_state rrs = {0, 0}; | |
3073 | ||
3074 | unsigned int ret = ( | |
3075 | (fault_devs == 0 || | |
3076 | ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) && | |
3077 | (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate)); | |
3078 | ||
3079 | if (ret) { | |
3080 | fault_count++; | |
3081 | ||
7383506c | 3082 | if (__ratelimit(&drbd_ratelimit_state)) |
b411b363 PR |
3083 | dev_warn(DEV, "***Simulating %s failure\n", |
3084 | _drbd_fault_str(type)); | |
3085 | } | |
3086 | ||
3087 | return ret; | |
3088 | } | |
3089 | #endif | |
3090 | ||
3091 | const char *drbd_buildtag(void) | |
3092 | { | |
3093 | /* DRBD built from external sources has here a reference to the | |
3094 | git hash of the source code. */ | |
3095 | ||
3096 | static char buildtag[38] = "\0uilt-in"; | |
3097 | ||
3098 | if (buildtag[0] == 0) { | |
3099 | #ifdef CONFIG_MODULES | |
3100 | if (THIS_MODULE != NULL) | |
3101 | sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion); | |
3102 | else | |
3103 | #endif | |
3104 | buildtag[0] = 'b'; | |
3105 | } | |
3106 | ||
3107 | return buildtag; | |
3108 | } | |
3109 | ||
3110 | module_init(drbd_init) | |
3111 | module_exit(drbd_cleanup) | |
3112 | ||
b411b363 PR |
3113 | EXPORT_SYMBOL(drbd_conn_str); |
3114 | EXPORT_SYMBOL(drbd_role_str); | |
3115 | EXPORT_SYMBOL(drbd_disk_str); | |
3116 | EXPORT_SYMBOL(drbd_set_st_err_str); |