drbd: cleanup, drop unused struct
[deliverable/linux.git] / drivers / block / drbd / drbd_main.c
CommitLineData
b411b363
PR
1/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
b411b363 29#include <linux/module.h>
b411b363
PR
30#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
2a48fc0a 35#include <linux/mutex.h>
b411b363
PR
36#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
b411b363
PR
55#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
2a48fc0a 59static DEFINE_MUTEX(drbd_main_mutex);
b411b363
PR
60int drbdd_init(struct drbd_thread *);
61int drbd_worker(struct drbd_thread *);
62int drbd_asender(struct drbd_thread *);
63
64int drbd_init(void);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static int drbd_release(struct gendisk *gd, fmode_t mode);
99920dc5 67static int w_md_sync(struct drbd_work *w, int unused);
b411b363 68static void md_sync_timer_fn(unsigned long data);
99920dc5
AG
69static int w_bitmap_io(struct drbd_work *w, int unused);
70static int w_go_diskless(struct drbd_work *w, int unused);
b411b363 71
b411b363
PR
72MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75MODULE_VERSION(REL_VERSION);
76MODULE_LICENSE("GPL");
81a5d60e 77MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
2b8a90b5 78 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
b411b363
PR
79MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81#include <linux/moduleparam.h>
82/* allow_open_on_secondary */
83MODULE_PARM_DESC(allow_oos, "DONT USE!");
84/* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86module_param(minor_count, uint, 0444);
87module_param(disable_sendpage, bool, 0644);
88module_param(allow_oos, bool, 0);
b411b363
PR
89module_param(proc_details, int, 0644);
90
91#ifdef CONFIG_DRBD_FAULT_INJECTION
92int enable_faults;
93int fault_rate;
94static int fault_count;
95int fault_devs;
96/* bitmap of enabled faults */
97module_param(enable_faults, int, 0664);
98/* fault rate % value - applies to all enabled faults */
99module_param(fault_rate, int, 0664);
100/* count of faults inserted */
101module_param(fault_count, int, 0664);
102/* bitmap of devices to insert faults on */
103module_param(fault_devs, int, 0644);
104#endif
105
106/* module parameter, defined */
2b8a90b5 107unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
b411b363
PR
108int disable_sendpage;
109int allow_oos;
b411b363
PR
110int proc_details; /* Detail level in proc drbd*/
111
112/* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114char usermode_helper[80] = "/sbin/drbdadm";
115
116module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118/* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
120 */
81a5d60e 121struct idr minors;
2111438b 122struct list_head drbd_tconns; /* list of struct drbd_tconn */
b411b363
PR
123
124struct kmem_cache *drbd_request_cache;
6c852bec 125struct kmem_cache *drbd_ee_cache; /* peer requests */
b411b363
PR
126struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
127struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
128mempool_t *drbd_request_mempool;
129mempool_t *drbd_ee_mempool;
35abf594 130mempool_t *drbd_md_io_page_pool;
da4a75d2 131struct bio_set *drbd_md_io_bio_set;
b411b363
PR
132
133/* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
138 */
139struct page *drbd_pp_pool;
140spinlock_t drbd_pp_lock;
141int drbd_pp_vacant;
142wait_queue_head_t drbd_pp_wait;
143
144DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145
7d4e9d09 146static const struct block_device_operations drbd_ops = {
b411b363
PR
147 .owner = THIS_MODULE,
148 .open = drbd_open,
149 .release = drbd_release,
150};
151
da4a75d2
LE
152static void bio_destructor_drbd(struct bio *bio)
153{
154 bio_free(bio, drbd_md_io_bio_set);
155}
156
157struct bio *bio_alloc_drbd(gfp_t gfp_mask)
158{
159 struct bio *bio;
160
161 if (!drbd_md_io_bio_set)
162 return bio_alloc(gfp_mask, 1);
163
164 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
165 if (!bio)
166 return NULL;
167 bio->bi_destructor = bio_destructor_drbd;
168 return bio;
169}
170
b411b363
PR
171#ifdef __CHECKER__
172/* When checking with sparse, and this is an inline function, sparse will
173 give tons of false positives. When this is a real functions sparse works.
174 */
175int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
176{
177 int io_allowed;
178
179 atomic_inc(&mdev->local_cnt);
180 io_allowed = (mdev->state.disk >= mins);
181 if (!io_allowed) {
182 if (atomic_dec_and_test(&mdev->local_cnt))
183 wake_up(&mdev->misc_wait);
184 }
185 return io_allowed;
186}
187
188#endif
189
190/**
b6dd1a89
LE
191 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
192 * @tconn: DRBD connection.
b411b363
PR
193 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
194 * @set_size: Expected number of requests before that barrier.
195 *
196 * In case the passed barrier_nr or set_size does not match the oldest
b6dd1a89
LE
197 * epoch of not yet barrier-acked requests, this function will cause a
198 * termination of the connection.
b411b363 199 */
2f5cdd0b
PR
200void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
201 unsigned int set_size)
b411b363 202{
b411b363 203 struct drbd_request *r;
b6dd1a89
LE
204 struct drbd_request *req = NULL;
205 int expect_epoch = 0;
206 int expect_size = 0;
b411b363 207
2f5cdd0b 208 spin_lock_irq(&tconn->req_lock);
b411b363 209
b6dd1a89
LE
210 /* find latest not yet barrier-acked write request,
211 * count writes in its epoch. */
212 list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
a0d856df 213 const unsigned s = r->rq_state;
b6dd1a89
LE
214 if (!req) {
215 if (!(s & RQ_WRITE))
216 continue;
217 if (!(s & RQ_NET_MASK))
218 continue;
219 if (s & RQ_NET_DONE)
220 continue;
221 req = r;
222 expect_epoch = req->epoch;
223 expect_size ++;
224 } else {
225 if (r->epoch != expect_epoch)
226 break;
227 if (!(s & RQ_WRITE))
228 continue;
229 /* if (s & RQ_DONE): not expected */
230 /* if (!(s & RQ_NET_MASK)): not expected */
231 expect_size++;
232 }
233 }
b411b363
PR
234
235 /* first some paranoia code */
b6dd1a89 236 if (req == NULL) {
2f5cdd0b
PR
237 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
238 barrier_nr);
b411b363
PR
239 goto bail;
240 }
b6dd1a89 241 if (expect_epoch != barrier_nr) {
2f5cdd0b 242 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
b6dd1a89 243 barrier_nr, expect_epoch);
b411b363
PR
244 goto bail;
245 }
b6dd1a89
LE
246
247 if (expect_size != set_size) {
2f5cdd0b 248 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
b6dd1a89 249 barrier_nr, set_size, expect_size);
b411b363
PR
250 goto bail;
251 }
252
253 /* Clean up list of requests processed during current epoch */
b6dd1a89
LE
254 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
255 if (req->epoch != expect_epoch)
256 break;
257 _req_mod(req, BARRIER_ACKED);
b411b363 258 }
2f5cdd0b 259 spin_unlock_irq(&tconn->req_lock);
b411b363
PR
260
261 return;
262
263bail:
2f5cdd0b
PR
264 spin_unlock_irq(&tconn->req_lock);
265 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
b411b363
PR
266}
267
617049aa 268
b411b363 269/**
11b58e73 270 * _tl_restart() - Walks the transfer log, and applies an action to all requests
b411b363 271 * @mdev: DRBD device.
11b58e73 272 * @what: The action/event to perform with all request objects
b411b363 273 *
8554df1c
AG
274 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
275 * RESTART_FROZEN_DISK_IO.
b411b363 276 */
b6dd1a89 277/* must hold resource->req_lock */
2f5cdd0b 278void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
b411b363 279{
b6dd1a89 280 struct drbd_request *req, *r;
11b58e73 281
b6dd1a89
LE
282 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
283 _req_mod(req, what);
284}
285
286void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
287{
288 spin_lock_irq(&tconn->req_lock);
289 _tl_restart(tconn, what);
290 spin_unlock_irq(&tconn->req_lock);
cdfda633 291}
b411b363
PR
292
293/**
294 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
295 * @mdev: DRBD device.
296 *
297 * This is called after the connection to the peer was lost. The storage covered
298 * by the requests on the transfer gets marked as our of sync. Called from the
299 * receiver thread and the worker thread.
300 */
2f5cdd0b 301void tl_clear(struct drbd_tconn *tconn)
b411b363 302{
b6dd1a89 303 tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
b411b363
PR
304}
305
cdfda633 306/**
71fc7eed 307 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
cdfda633 308 * @mdev: DRBD device.
cdfda633 309 */
71fc7eed 310void tl_abort_disk_io(struct drbd_conf *mdev)
cdfda633
PR
311{
312 struct drbd_tconn *tconn = mdev->tconn;
b6dd1a89 313 struct drbd_request *req, *r;
cdfda633 314
cdfda633 315 spin_lock_irq(&tconn->req_lock);
b6dd1a89 316 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
97ddb687
LE
317 if (!(req->rq_state & RQ_LOCAL_PENDING))
318 continue;
b6dd1a89
LE
319 if (req->w.mdev != mdev)
320 continue;
321 _req_mod(req, ABORT_DISK_IO);
cdfda633 322 }
cdfda633
PR
323 spin_unlock_irq(&tconn->req_lock);
324}
325
b411b363
PR
326static int drbd_thread_setup(void *arg)
327{
328 struct drbd_thread *thi = (struct drbd_thread *) arg;
392c8801 329 struct drbd_tconn *tconn = thi->tconn;
b411b363
PR
330 unsigned long flags;
331 int retval;
332
f1b3a6ec 333 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
392c8801 334 thi->name[0], thi->tconn->name);
f1b3a6ec 335
b411b363
PR
336restart:
337 retval = thi->function(thi);
338
339 spin_lock_irqsave(&thi->t_lock, flags);
340
e77a0a5c 341 /* if the receiver has been "EXITING", the last thing it did
b411b363
PR
342 * was set the conn state to "StandAlone",
343 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
344 * and receiver thread will be "started".
e77a0a5c 345 * drbd_thread_start needs to set "RESTARTING" in that case.
b411b363 346 * t_state check and assignment needs to be within the same spinlock,
e77a0a5c
AG
347 * so either thread_start sees EXITING, and can remap to RESTARTING,
348 * or thread_start see NONE, and can proceed as normal.
b411b363
PR
349 */
350
e77a0a5c 351 if (thi->t_state == RESTARTING) {
392c8801 352 conn_info(tconn, "Restarting %s thread\n", thi->name);
e77a0a5c 353 thi->t_state = RUNNING;
b411b363
PR
354 spin_unlock_irqrestore(&thi->t_lock, flags);
355 goto restart;
356 }
357
358 thi->task = NULL;
e77a0a5c 359 thi->t_state = NONE;
b411b363 360 smp_mb();
992d6e91 361 complete_all(&thi->stop);
b411b363
PR
362 spin_unlock_irqrestore(&thi->t_lock, flags);
363
392c8801 364 conn_info(tconn, "Terminating %s\n", current->comm);
b411b363
PR
365
366 /* Release mod reference taken when thread was started */
9dc9fbb3
PR
367
368 kref_put(&tconn->kref, &conn_destroy);
b411b363
PR
369 module_put(THIS_MODULE);
370 return retval;
371}
372
392c8801 373static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
bed879ae 374 int (*func) (struct drbd_thread *), char *name)
b411b363
PR
375{
376 spin_lock_init(&thi->t_lock);
377 thi->task = NULL;
e77a0a5c 378 thi->t_state = NONE;
b411b363 379 thi->function = func;
392c8801 380 thi->tconn = tconn;
bed879ae 381 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
b411b363
PR
382}
383
384int drbd_thread_start(struct drbd_thread *thi)
385{
392c8801 386 struct drbd_tconn *tconn = thi->tconn;
b411b363
PR
387 struct task_struct *nt;
388 unsigned long flags;
389
b411b363
PR
390 /* is used from state engine doing drbd_thread_stop_nowait,
391 * while holding the req lock irqsave */
392 spin_lock_irqsave(&thi->t_lock, flags);
393
394 switch (thi->t_state) {
e77a0a5c 395 case NONE:
392c8801 396 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
bed879ae 397 thi->name, current->comm, current->pid);
b411b363
PR
398
399 /* Get ref on module for thread - this is released when thread exits */
400 if (!try_module_get(THIS_MODULE)) {
392c8801 401 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
b411b363 402 spin_unlock_irqrestore(&thi->t_lock, flags);
81e84650 403 return false;
b411b363
PR
404 }
405
9dc9fbb3
PR
406 kref_get(&thi->tconn->kref);
407
b411b363 408 init_completion(&thi->stop);
b411b363 409 thi->reset_cpu_mask = 1;
e77a0a5c 410 thi->t_state = RUNNING;
b411b363
PR
411 spin_unlock_irqrestore(&thi->t_lock, flags);
412 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
413
414 nt = kthread_create(drbd_thread_setup, (void *) thi,
392c8801 415 "drbd_%c_%s", thi->name[0], thi->tconn->name);
b411b363
PR
416
417 if (IS_ERR(nt)) {
392c8801 418 conn_err(tconn, "Couldn't start thread\n");
b411b363 419
9dc9fbb3 420 kref_put(&tconn->kref, &conn_destroy);
b411b363 421 module_put(THIS_MODULE);
81e84650 422 return false;
b411b363
PR
423 }
424 spin_lock_irqsave(&thi->t_lock, flags);
425 thi->task = nt;
e77a0a5c 426 thi->t_state = RUNNING;
b411b363
PR
427 spin_unlock_irqrestore(&thi->t_lock, flags);
428 wake_up_process(nt);
429 break;
e77a0a5c
AG
430 case EXITING:
431 thi->t_state = RESTARTING;
392c8801 432 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
bed879ae 433 thi->name, current->comm, current->pid);
b411b363 434 /* fall through */
e77a0a5c
AG
435 case RUNNING:
436 case RESTARTING:
b411b363
PR
437 default:
438 spin_unlock_irqrestore(&thi->t_lock, flags);
439 break;
440 }
441
81e84650 442 return true;
b411b363
PR
443}
444
445
446void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
447{
448 unsigned long flags;
449
e77a0a5c 450 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
b411b363
PR
451
452 /* may be called from state engine, holding the req lock irqsave */
453 spin_lock_irqsave(&thi->t_lock, flags);
454
e77a0a5c 455 if (thi->t_state == NONE) {
b411b363
PR
456 spin_unlock_irqrestore(&thi->t_lock, flags);
457 if (restart)
458 drbd_thread_start(thi);
459 return;
460 }
461
462 if (thi->t_state != ns) {
463 if (thi->task == NULL) {
464 spin_unlock_irqrestore(&thi->t_lock, flags);
465 return;
466 }
467
468 thi->t_state = ns;
469 smp_mb();
470 init_completion(&thi->stop);
471 if (thi->task != current)
472 force_sig(DRBD_SIGKILL, thi->task);
b411b363
PR
473 }
474
475 spin_unlock_irqrestore(&thi->t_lock, flags);
476
477 if (wait)
478 wait_for_completion(&thi->stop);
479}
480
392c8801 481static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
bed879ae 482{
bed879ae
PR
483 struct drbd_thread *thi =
484 task == tconn->receiver.task ? &tconn->receiver :
485 task == tconn->asender.task ? &tconn->asender :
486 task == tconn->worker.task ? &tconn->worker : NULL;
487
488 return thi;
489}
490
392c8801 491char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
bed879ae 492{
392c8801 493 struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
bed879ae
PR
494 return thi ? thi->name : task->comm;
495}
496
80883197 497int conn_lowest_minor(struct drbd_tconn *tconn)
80822284 498{
e90285e0 499 struct drbd_conf *mdev;
695d08fa 500 int vnr = 0, m;
774b3055 501
695d08fa 502 rcu_read_lock();
e90285e0 503 mdev = idr_get_next(&tconn->volumes, &vnr);
695d08fa
PR
504 m = mdev ? mdev_to_minor(mdev) : -1;
505 rcu_read_unlock();
506
507 return m;
80822284 508}
774b3055
PR
509
510#ifdef CONFIG_SMP
b411b363
PR
511/**
512 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
513 * @mdev: DRBD device.
514 *
515 * Forces all threads of a device onto the same CPU. This is beneficial for
516 * DRBD's performance. May be overwritten by user's configuration.
517 */
80822284 518void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
b411b363
PR
519{
520 int ord, cpu;
521
522 /* user override. */
80822284 523 if (cpumask_weight(tconn->cpu_mask))
b411b363
PR
524 return;
525
80822284 526 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
b411b363
PR
527 for_each_online_cpu(cpu) {
528 if (ord-- == 0) {
80822284 529 cpumask_set_cpu(cpu, tconn->cpu_mask);
b411b363
PR
530 return;
531 }
532 }
533 /* should not be reached */
80822284 534 cpumask_setall(tconn->cpu_mask);
b411b363
PR
535}
536
537/**
538 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
539 * @mdev: DRBD device.
bc31fe33 540 * @thi: drbd_thread object
b411b363
PR
541 *
542 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
543 * prematurely.
544 */
80822284 545void drbd_thread_current_set_cpu(struct drbd_thread *thi)
b411b363
PR
546{
547 struct task_struct *p = current;
bed879ae 548
b411b363
PR
549 if (!thi->reset_cpu_mask)
550 return;
551 thi->reset_cpu_mask = 0;
392c8801 552 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
b411b363
PR
553}
554#endif
555
52b061a4
AG
556/**
557 * drbd_header_size - size of a packet header
558 *
559 * The header size is a multiple of 8, so any payload following the header is
560 * word aligned on 64-bit architectures. (The bitmap send and receive code
561 * relies on this.)
562 */
563unsigned int drbd_header_size(struct drbd_tconn *tconn)
564{
0c8e36d9
AG
565 if (tconn->agreed_pro_version >= 100) {
566 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
567 return sizeof(struct p_header100);
568 } else {
569 BUILD_BUG_ON(sizeof(struct p_header80) !=
570 sizeof(struct p_header95));
571 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
572 return sizeof(struct p_header80);
573 }
52b061a4
AG
574}
575
e658983a 576static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
fd340c12
PR
577{
578 h->magic = cpu_to_be32(DRBD_MAGIC);
579 h->command = cpu_to_be16(cmd);
580 h->length = cpu_to_be16(size);
e658983a 581 return sizeof(struct p_header80);
fd340c12
PR
582}
583
e658983a 584static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
fd340c12
PR
585{
586 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
587 h->command = cpu_to_be16(cmd);
b55d84ba 588 h->length = cpu_to_be32(size);
e658983a 589 return sizeof(struct p_header95);
fd340c12
PR
590}
591
0c8e36d9
AG
592static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
593 int size, int vnr)
594{
595 h->magic = cpu_to_be32(DRBD_MAGIC_100);
596 h->volume = cpu_to_be16(vnr);
597 h->command = cpu_to_be16(cmd);
598 h->length = cpu_to_be32(size);
599 h->pad = 0;
600 return sizeof(struct p_header100);
601}
602
603static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
604 void *buffer, enum drbd_packet cmd, int size)
d38e787e 605{
0c8e36d9
AG
606 if (tconn->agreed_pro_version >= 100)
607 return prepare_header100(buffer, cmd, size, vnr);
608 else if (tconn->agreed_pro_version >= 95 &&
609 size > DRBD_MAX_SIZE_H80_PACKET)
e658983a 610 return prepare_header95(buffer, cmd, size);
d38e787e 611 else
e658983a 612 return prepare_header80(buffer, cmd, size);
d38e787e
PR
613}
614
a7eb7bdf
AG
615static void *__conn_prepare_command(struct drbd_tconn *tconn,
616 struct drbd_socket *sock)
617{
618 if (!sock->socket)
619 return NULL;
620 return sock->sbuf + drbd_header_size(tconn);
621}
622
dba58587
AG
623void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
624{
a7eb7bdf
AG
625 void *p;
626
dba58587 627 mutex_lock(&sock->mutex);
a7eb7bdf
AG
628 p = __conn_prepare_command(tconn, sock);
629 if (!p)
dba58587 630 mutex_unlock(&sock->mutex);
a7eb7bdf
AG
631
632 return p;
dba58587
AG
633}
634
635void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
636{
637 return conn_prepare_command(mdev->tconn, sock);
638}
639
640static int __send_command(struct drbd_tconn *tconn, int vnr,
641 struct drbd_socket *sock, enum drbd_packet cmd,
642 unsigned int header_size, void *data,
643 unsigned int size)
644{
645 int msg_flags;
646 int err;
647
648 /*
649 * Called with @data == NULL and the size of the data blocks in @size
650 * for commands that send data blocks. For those commands, omit the
651 * MSG_MORE flag: this will increase the likelihood that data blocks
652 * which are page aligned on the sender will end up page aligned on the
653 * receiver.
654 */
655 msg_flags = data ? MSG_MORE : 0;
656
e658983a
AG
657 header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
658 header_size + size);
dba58587
AG
659 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
660 msg_flags);
661 if (data && !err)
662 err = drbd_send_all(tconn, sock->socket, data, size, 0);
663 return err;
664}
665
a7eb7bdf
AG
666static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
667 enum drbd_packet cmd, unsigned int header_size,
668 void *data, unsigned int size)
669{
670 return __send_command(tconn, 0, sock, cmd, header_size, data, size);
671}
672
dba58587
AG
673int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
674 enum drbd_packet cmd, unsigned int header_size,
675 void *data, unsigned int size)
676{
677 int err;
678
a7eb7bdf 679 err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
dba58587
AG
680 mutex_unlock(&sock->mutex);
681 return err;
682}
683
684int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
685 enum drbd_packet cmd, unsigned int header_size,
686 void *data, unsigned int size)
687{
688 int err;
689
690 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
691 data, size);
692 mutex_unlock(&sock->mutex);
693 return err;
694}
695
e307f352
AG
696int drbd_send_ping(struct drbd_tconn *tconn)
697{
9f5bdc33
AG
698 struct drbd_socket *sock;
699
700 sock = &tconn->meta;
701 if (!conn_prepare_command(tconn, sock))
702 return -EIO;
e658983a 703 return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
e307f352
AG
704}
705
706int drbd_send_ping_ack(struct drbd_tconn *tconn)
707{
9f5bdc33
AG
708 struct drbd_socket *sock;
709
710 sock = &tconn->meta;
711 if (!conn_prepare_command(tconn, sock))
712 return -EIO;
e658983a 713 return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
e307f352
AG
714}
715
f399002e 716int drbd_send_sync_param(struct drbd_conf *mdev)
b411b363 717{
7c96715a 718 struct drbd_socket *sock;
9f5bdc33
AG
719 struct p_rs_param_95 *p;
720 int size;
31890f4a 721 const int apv = mdev->tconn->agreed_pro_version;
9f5bdc33 722 enum drbd_packet cmd;
44ed167d 723 struct net_conf *nc;
daeda1cc 724 struct disk_conf *dc;
9f5bdc33
AG
725
726 sock = &mdev->tconn->data;
727 p = drbd_prepare_command(mdev, sock);
728 if (!p)
729 return -EIO;
b411b363 730
44ed167d
PR
731 rcu_read_lock();
732 nc = rcu_dereference(mdev->tconn->net_conf);
733
b411b363
PR
734 size = apv <= 87 ? sizeof(struct p_rs_param)
735 : apv == 88 ? sizeof(struct p_rs_param)
44ed167d 736 + strlen(nc->verify_alg) + 1
8e26f9cc
PR
737 : apv <= 94 ? sizeof(struct p_rs_param_89)
738 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363 739
9f5bdc33 740 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
b411b363 741
9f5bdc33
AG
742 /* initialize verify_alg and csums_alg */
743 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
b411b363 744
9f5bdc33 745 if (get_ldev(mdev)) {
daeda1cc 746 dc = rcu_dereference(mdev->ldev->disk_conf);
6394b935 747 p->resync_rate = cpu_to_be32(dc->resync_rate);
daeda1cc
PR
748 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
749 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
750 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
751 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
9f5bdc33
AG
752 put_ldev(mdev);
753 } else {
6394b935 754 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
9f5bdc33
AG
755 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
756 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
757 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
758 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
759 }
b411b363 760
9f5bdc33 761 if (apv >= 88)
44ed167d 762 strcpy(p->verify_alg, nc->verify_alg);
9f5bdc33 763 if (apv >= 89)
44ed167d
PR
764 strcpy(p->csums_alg, nc->csums_alg);
765 rcu_read_unlock();
b411b363 766
9f5bdc33 767 return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
b411b363
PR
768}
769
d659f2aa 770int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
b411b363 771{
9f5bdc33 772 struct drbd_socket *sock;
b411b363 773 struct p_protocol *p;
44ed167d 774 struct net_conf *nc;
9f5bdc33 775 int size, cf;
b411b363 776
9f5bdc33 777 sock = &tconn->data;
a7eb7bdf 778 p = __conn_prepare_command(tconn, sock);
9f5bdc33
AG
779 if (!p)
780 return -EIO;
781
44ed167d
PR
782 rcu_read_lock();
783 nc = rcu_dereference(tconn->net_conf);
784
6dff2902 785 if (nc->tentative && tconn->agreed_pro_version < 92) {
44ed167d
PR
786 rcu_read_unlock();
787 mutex_unlock(&sock->mutex);
788 conn_err(tconn, "--dry-run is not supported by peer");
789 return -EOPNOTSUPP;
790 }
791
9f5bdc33 792 size = sizeof(*p);
dc8228d1 793 if (tconn->agreed_pro_version >= 87)
44ed167d 794 size += strlen(nc->integrity_alg) + 1;
b411b363 795
44ed167d
PR
796 p->protocol = cpu_to_be32(nc->wire_protocol);
797 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
798 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
799 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
800 p->two_primaries = cpu_to_be32(nc->two_primaries);
cf14c2e9 801 cf = 0;
6139f60d
AG
802 if (nc->discard_my_data)
803 cf |= CF_DISCARD_MY_DATA;
6dff2902 804 if (nc->tentative)
9f5bdc33 805 cf |= CF_DRY_RUN;
cf14c2e9
PR
806 p->conn_flags = cpu_to_be32(cf);
807
dc8228d1 808 if (tconn->agreed_pro_version >= 87)
44ed167d
PR
809 strcpy(p->integrity_alg, nc->integrity_alg);
810 rcu_read_unlock();
811
d659f2aa 812 return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
a7eb7bdf
AG
813}
814
815int drbd_send_protocol(struct drbd_tconn *tconn)
816{
817 int err;
818
819 mutex_lock(&tconn->data.mutex);
d659f2aa 820 err = __drbd_send_protocol(tconn, P_PROTOCOL);
a7eb7bdf
AG
821 mutex_unlock(&tconn->data.mutex);
822
823 return err;
b411b363
PR
824}
825
826int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
827{
9f5bdc33
AG
828 struct drbd_socket *sock;
829 struct p_uuids *p;
b411b363
PR
830 int i;
831
832 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2ae5f95b 833 return 0;
b411b363 834
9f5bdc33
AG
835 sock = &mdev->tconn->data;
836 p = drbd_prepare_command(mdev, sock);
837 if (!p) {
838 put_ldev(mdev);
839 return -EIO;
840 }
b411b363 841 for (i = UI_CURRENT; i < UI_SIZE; i++)
9f5bdc33 842 p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
b411b363
PR
843
844 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
9f5bdc33 845 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
44ed167d 846 rcu_read_lock();
6139f60d 847 uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
44ed167d 848 rcu_read_unlock();
b411b363
PR
849 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
850 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
9f5bdc33 851 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
b411b363
PR
852
853 put_ldev(mdev);
9f5bdc33 854 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
b411b363
PR
855}
856
857int drbd_send_uuids(struct drbd_conf *mdev)
858{
859 return _drbd_send_uuids(mdev, 0);
860}
861
862int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
863{
864 return _drbd_send_uuids(mdev, 8);
865}
866
62b0da3a
LE
867void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
868{
869 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
870 u64 *uuid = mdev->ldev->md.uuid;
871 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
872 text,
873 (unsigned long long)uuid[UI_CURRENT],
874 (unsigned long long)uuid[UI_BITMAP],
875 (unsigned long long)uuid[UI_HISTORY_START],
876 (unsigned long long)uuid[UI_HISTORY_END]);
877 put_ldev(mdev);
878 } else {
879 dev_info(DEV, "%s effective data uuid: %016llX\n",
880 text,
881 (unsigned long long)mdev->ed_uuid);
882 }
883}
884
9c1b7f72 885void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
b411b363 886{
9f5bdc33
AG
887 struct drbd_socket *sock;
888 struct p_rs_uuid *p;
5a22db89
LE
889 u64 uuid;
890
891 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
b411b363 892
0cfac5dd
PR
893 uuid = mdev->ldev->md.uuid[UI_BITMAP];
894 if (uuid && uuid != UUID_JUST_CREATED)
895 uuid = uuid + UUID_NEW_BM_OFFSET;
896 else
897 get_random_bytes(&uuid, sizeof(u64));
5a22db89 898 drbd_uuid_set(mdev, UI_BITMAP, uuid);
62b0da3a 899 drbd_print_uuids(mdev, "updated sync UUID");
5a22db89 900 drbd_md_sync(mdev);
b411b363 901
9f5bdc33
AG
902 sock = &mdev->tconn->data;
903 p = drbd_prepare_command(mdev, sock);
904 if (p) {
905 p->uuid = cpu_to_be64(uuid);
906 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
907 }
b411b363
PR
908}
909
e89b591c 910int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
b411b363 911{
9f5bdc33
AG
912 struct drbd_socket *sock;
913 struct p_sizes *p;
b411b363 914 sector_t d_size, u_size;
99432fcc 915 int q_order_type, max_bio_size;
b411b363
PR
916
917 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
918 D_ASSERT(mdev->ldev->backing_bdev);
919 d_size = drbd_get_max_capacity(mdev->ldev);
daeda1cc
PR
920 rcu_read_lock();
921 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
922 rcu_read_unlock();
b411b363 923 q_order_type = drbd_queue_order_type(mdev);
99432fcc
PR
924 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
925 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
b411b363
PR
926 put_ldev(mdev);
927 } else {
928 d_size = 0;
929 u_size = 0;
930 q_order_type = QUEUE_ORDERED_NONE;
99432fcc 931 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
b411b363
PR
932 }
933
9f5bdc33
AG
934 sock = &mdev->tconn->data;
935 p = drbd_prepare_command(mdev, sock);
936 if (!p)
937 return -EIO;
2ffca4f3
PR
938
939 if (mdev->tconn->agreed_pro_version <= 94)
940 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
941 else if (mdev->tconn->agreed_pro_version < 100)
942 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE_P95);
943
9f5bdc33
AG
944 p->d_size = cpu_to_be64(d_size);
945 p->u_size = cpu_to_be64(u_size);
946 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
947 p->max_bio_size = cpu_to_be32(max_bio_size);
948 p->queue_order_type = cpu_to_be16(q_order_type);
949 p->dds_flags = cpu_to_be16(flags);
950 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
b411b363
PR
951}
952
953/**
43de7c85 954 * drbd_send_current_state() - Sends the drbd state to the peer
b411b363
PR
955 * @mdev: DRBD device.
956 */
43de7c85 957int drbd_send_current_state(struct drbd_conf *mdev)
b411b363 958{
7c96715a 959 struct drbd_socket *sock;
9f5bdc33 960 struct p_state *p;
b411b363 961
7c96715a 962 sock = &mdev->tconn->data;
9f5bdc33
AG
963 p = drbd_prepare_command(mdev, sock);
964 if (!p)
965 return -EIO;
966 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
967 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
968}
b411b363 969
43de7c85
PR
970/**
971 * drbd_send_state() - After a state change, sends the new state to the peer
972 * @mdev: DRBD device.
973 * @state: the state to send, not necessarily the current state.
974 *
975 * Each state change queues an "after_state_ch" work, which will eventually
976 * send the resulting new state to the peer. If more state changes happen
977 * between queuing and processing of the after_state_ch work, we still
978 * want to send each intermediary state in the order it occurred.
979 */
980int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
981{
982 struct drbd_socket *sock;
983 struct p_state *p;
984
985 sock = &mdev->tconn->data;
986 p = drbd_prepare_command(mdev, sock);
987 if (!p)
988 return -EIO;
989 p->state = cpu_to_be32(state.i); /* Within the send mutex */
990 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
991}
992
9f5bdc33
AG
993int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
994{
995 struct drbd_socket *sock;
996 struct p_req_state *p;
b411b363 997
9f5bdc33
AG
998 sock = &mdev->tconn->data;
999 p = drbd_prepare_command(mdev, sock);
1000 if (!p)
1001 return -EIO;
1002 p->mask = cpu_to_be32(mask.i);
1003 p->val = cpu_to_be32(val.i);
1004 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
b411b363
PR
1005}
1006
9f5bdc33 1007int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
b411b363 1008{
9f5bdc33
AG
1009 enum drbd_packet cmd;
1010 struct drbd_socket *sock;
1011 struct p_req_state *p;
b411b363 1012
9f5bdc33
AG
1013 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1014 sock = &tconn->data;
1015 p = conn_prepare_command(tconn, sock);
1016 if (!p)
1017 return -EIO;
1018 p->mask = cpu_to_be32(mask.i);
1019 p->val = cpu_to_be32(val.i);
1020 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
b411b363
PR
1021}
1022
2f4e7abe 1023void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
b411b363 1024{
9f5bdc33
AG
1025 struct drbd_socket *sock;
1026 struct p_req_state_reply *p;
b411b363 1027
9f5bdc33
AG
1028 sock = &mdev->tconn->meta;
1029 p = drbd_prepare_command(mdev, sock);
1030 if (p) {
1031 p->retcode = cpu_to_be32(retcode);
1032 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1033 }
b411b363
PR
1034}
1035
9f5bdc33 1036void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
047cd4a6 1037{
9f5bdc33
AG
1038 struct drbd_socket *sock;
1039 struct p_req_state_reply *p;
047cd4a6
PR
1040 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1041
9f5bdc33
AG
1042 sock = &tconn->meta;
1043 p = conn_prepare_command(tconn, sock);
1044 if (p) {
1045 p->retcode = cpu_to_be32(retcode);
1046 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1047 }
047cd4a6
PR
1048}
1049
a02d1240
AG
1050static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1051{
1052 BUG_ON(code & ~0xf);
1053 p->encoding = (p->encoding & ~0xf) | code;
1054}
1055
1056static void dcbp_set_start(struct p_compressed_bm *p, int set)
1057{
1058 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1059}
1060
1061static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1062{
1063 BUG_ON(n & ~0x7);
1064 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1065}
1066
b411b363 1067int fill_bitmap_rle_bits(struct drbd_conf *mdev,
50d0b1ad
AG
1068 struct p_compressed_bm *p,
1069 unsigned int size,
1070 struct bm_xfer_ctx *c)
b411b363
PR
1071{
1072 struct bitstream bs;
1073 unsigned long plain_bits;
1074 unsigned long tmp;
1075 unsigned long rl;
1076 unsigned len;
1077 unsigned toggle;
44ed167d 1078 int bits, use_rle;
b411b363
PR
1079
1080 /* may we use this feature? */
44ed167d
PR
1081 rcu_read_lock();
1082 use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
1083 rcu_read_unlock();
1084 if (!use_rle || mdev->tconn->agreed_pro_version < 90)
1085 return 0;
b411b363
PR
1086
1087 if (c->bit_offset >= c->bm_bits)
1088 return 0; /* nothing to do. */
1089
1090 /* use at most thus many bytes */
50d0b1ad
AG
1091 bitstream_init(&bs, p->code, size, 0);
1092 memset(p->code, 0, size);
b411b363
PR
1093 /* plain bits covered in this code string */
1094 plain_bits = 0;
1095
1096 /* p->encoding & 0x80 stores whether the first run length is set.
1097 * bit offset is implicit.
1098 * start with toggle == 2 to be able to tell the first iteration */
1099 toggle = 2;
1100
1101 /* see how much plain bits we can stuff into one packet
1102 * using RLE and VLI. */
1103 do {
1104 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1105 : _drbd_bm_find_next(mdev, c->bit_offset);
1106 if (tmp == -1UL)
1107 tmp = c->bm_bits;
1108 rl = tmp - c->bit_offset;
1109
1110 if (toggle == 2) { /* first iteration */
1111 if (rl == 0) {
1112 /* the first checked bit was set,
1113 * store start value, */
a02d1240 1114 dcbp_set_start(p, 1);
b411b363
PR
1115 /* but skip encoding of zero run length */
1116 toggle = !toggle;
1117 continue;
1118 }
a02d1240 1119 dcbp_set_start(p, 0);
b411b363
PR
1120 }
1121
1122 /* paranoia: catch zero runlength.
1123 * can only happen if bitmap is modified while we scan it. */
1124 if (rl == 0) {
1125 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1126 "t:%u bo:%lu\n", toggle, c->bit_offset);
1127 return -1;
1128 }
1129
1130 bits = vli_encode_bits(&bs, rl);
1131 if (bits == -ENOBUFS) /* buffer full */
1132 break;
1133 if (bits <= 0) {
1134 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1135 return 0;
1136 }
1137
1138 toggle = !toggle;
1139 plain_bits += rl;
1140 c->bit_offset = tmp;
1141 } while (c->bit_offset < c->bm_bits);
1142
1143 len = bs.cur.b - p->code + !!bs.cur.bit;
1144
1145 if (plain_bits < (len << 3)) {
1146 /* incompressible with this method.
1147 * we need to rewind both word and bit position. */
1148 c->bit_offset -= plain_bits;
1149 bm_xfer_ctx_bit_to_word_offset(c);
1150 c->bit_offset = c->word_offset * BITS_PER_LONG;
1151 return 0;
1152 }
1153
1154 /* RLE + VLI was able to compress it just fine.
1155 * update c->word_offset. */
1156 bm_xfer_ctx_bit_to_word_offset(c);
1157
1158 /* store pad_bits */
a02d1240 1159 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
b411b363
PR
1160
1161 return len;
1162}
1163
f70af118
AG
1164/**
1165 * send_bitmap_rle_or_plain
1166 *
1167 * Return 0 when done, 1 when another iteration is needed, and a negative error
1168 * code upon failure.
1169 */
1170static int
79ed9bd0 1171send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
b411b363 1172{
9f5bdc33 1173 struct drbd_socket *sock = &mdev->tconn->data;
50d0b1ad 1174 unsigned int header_size = drbd_header_size(mdev->tconn);
e658983a 1175 struct p_compressed_bm *p = sock->sbuf + header_size;
a982dd57 1176 int len, err;
b411b363 1177
e658983a
AG
1178 len = fill_bitmap_rle_bits(mdev, p,
1179 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
b411b363 1180 if (len < 0)
f70af118 1181 return -EIO;
b411b363
PR
1182
1183 if (len) {
a02d1240 1184 dcbp_set_code(p, RLE_VLI_Bits);
9f5bdc33
AG
1185 err = __send_command(mdev->tconn, mdev->vnr, sock,
1186 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1187 NULL, 0);
b411b363 1188 c->packets[0]++;
e658983a 1189 c->bytes[0] += header_size + sizeof(*p) + len;
b411b363
PR
1190
1191 if (c->bit_offset >= c->bm_bits)
1192 len = 0; /* DONE */
1193 } else {
1194 /* was not compressible.
1195 * send a buffer full of plain text bits instead. */
50d0b1ad
AG
1196 unsigned int data_size;
1197 unsigned long num_words;
e658983a 1198 unsigned long *p = sock->sbuf + header_size;
50d0b1ad
AG
1199
1200 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
e658983a 1201 num_words = min_t(size_t, data_size / sizeof(*p),
50d0b1ad 1202 c->bm_words - c->word_offset);
e658983a 1203 len = num_words * sizeof(*p);
b411b363 1204 if (len)
e658983a
AG
1205 drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
1206 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
b411b363
PR
1207 c->word_offset += num_words;
1208 c->bit_offset = c->word_offset * BITS_PER_LONG;
1209
1210 c->packets[1]++;
50d0b1ad 1211 c->bytes[1] += header_size + len;
b411b363
PR
1212
1213 if (c->bit_offset > c->bm_bits)
1214 c->bit_offset = c->bm_bits;
1215 }
a982dd57 1216 if (!err) {
f70af118
AG
1217 if (len == 0) {
1218 INFO_bm_xfer_stats(mdev, "send", c);
1219 return 0;
1220 } else
1221 return 1;
1222 }
1223 return -EIO;
b411b363
PR
1224}
1225
1226/* See the comment at receive_bitmap() */
058820cd 1227static int _drbd_send_bitmap(struct drbd_conf *mdev)
b411b363
PR
1228{
1229 struct bm_xfer_ctx c;
f70af118 1230 int err;
b411b363 1231
841ce241
AG
1232 if (!expect(mdev->bitmap))
1233 return false;
b411b363 1234
b411b363
PR
1235 if (get_ldev(mdev)) {
1236 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1237 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1238 drbd_bm_set_all(mdev);
1239 if (drbd_bm_write(mdev)) {
1240 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1241 * but otherwise process as per normal - need to tell other
1242 * side that a full resync is required! */
1243 dev_err(DEV, "Failed to write bitmap to disk!\n");
1244 } else {
1245 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1246 drbd_md_sync(mdev);
1247 }
1248 }
1249 put_ldev(mdev);
1250 }
1251
1252 c = (struct bm_xfer_ctx) {
1253 .bm_bits = drbd_bm_bits(mdev),
1254 .bm_words = drbd_bm_words(mdev),
1255 };
1256
1257 do {
79ed9bd0 1258 err = send_bitmap_rle_or_plain(mdev, &c);
f70af118 1259 } while (err > 0);
b411b363 1260
f70af118 1261 return err == 0;
b411b363
PR
1262}
1263
1264int drbd_send_bitmap(struct drbd_conf *mdev)
1265{
9f5bdc33
AG
1266 struct drbd_socket *sock = &mdev->tconn->data;
1267 int err = -1;
b411b363 1268
9f5bdc33
AG
1269 mutex_lock(&sock->mutex);
1270 if (sock->socket)
1271 err = !_drbd_send_bitmap(mdev);
1272 mutex_unlock(&sock->mutex);
b411b363
PR
1273 return err;
1274}
9f5bdc33 1275
9ed57dcb 1276void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
b411b363 1277{
9f5bdc33
AG
1278 struct drbd_socket *sock;
1279 struct p_barrier_ack *p;
b411b363 1280
9ed57dcb 1281 if (tconn->cstate < C_WF_REPORT_PARAMS)
9f5bdc33 1282 return;
b411b363 1283
9ed57dcb
LE
1284 sock = &tconn->meta;
1285 p = conn_prepare_command(tconn, sock);
9f5bdc33
AG
1286 if (!p)
1287 return;
1288 p->barrier = barrier_nr;
1289 p->set_size = cpu_to_be32(set_size);
9ed57dcb 1290 conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
b411b363
PR
1291}
1292
1293/**
1294 * _drbd_send_ack() - Sends an ack packet
1295 * @mdev: DRBD device.
1296 * @cmd: Packet command code.
1297 * @sector: sector, needs to be in big endian byte order
1298 * @blksize: size in byte, needs to be in big endian byte order
1299 * @block_id: Id, big endian byte order
1300 */
d8763023
AG
1301static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1302 u64 sector, u32 blksize, u64 block_id)
b411b363 1303{
9f5bdc33
AG
1304 struct drbd_socket *sock;
1305 struct p_block_ack *p;
b411b363 1306
9f5bdc33
AG
1307 if (mdev->state.conn < C_CONNECTED)
1308 return -EIO;
b411b363 1309
9f5bdc33
AG
1310 sock = &mdev->tconn->meta;
1311 p = drbd_prepare_command(mdev, sock);
1312 if (!p)
a8c32aa8 1313 return -EIO;
9f5bdc33
AG
1314 p->sector = sector;
1315 p->block_id = block_id;
1316 p->blksize = blksize;
1317 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1318 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
b411b363
PR
1319}
1320
2b2bf214
LE
1321/* dp->sector and dp->block_id already/still in network byte order,
1322 * data_size is payload size according to dp->head,
1323 * and may need to be corrected for digest size. */
a9a9994d
AG
1324void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1325 struct p_data *dp, int data_size)
b411b363 1326{
88104ca4
AG
1327 if (mdev->tconn->peer_integrity_tfm)
1328 data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
a9a9994d
AG
1329 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1330 dp->block_id);
b411b363
PR
1331}
1332
a9a9994d
AG
1333void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1334 struct p_block_req *rp)
b411b363 1335{
a9a9994d 1336 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
b411b363
PR
1337}
1338
1339/**
1340 * drbd_send_ack() - Sends an ack packet
db830c46
AG
1341 * @mdev: DRBD device
1342 * @cmd: packet command code
1343 * @peer_req: peer request
b411b363 1344 */
d8763023 1345int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
db830c46 1346 struct drbd_peer_request *peer_req)
b411b363 1347{
dd516121
AG
1348 return _drbd_send_ack(mdev, cmd,
1349 cpu_to_be64(peer_req->i.sector),
1350 cpu_to_be32(peer_req->i.size),
1351 peer_req->block_id);
b411b363
PR
1352}
1353
1354/* This function misuses the block_id field to signal if the blocks
1355 * are is sync or not. */
d8763023 1356int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
b411b363
PR
1357 sector_t sector, int blksize, u64 block_id)
1358{
fa79abd8
AG
1359 return _drbd_send_ack(mdev, cmd,
1360 cpu_to_be64(sector),
1361 cpu_to_be32(blksize),
1362 cpu_to_be64(block_id));
b411b363
PR
1363}
1364
1365int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1366 sector_t sector, int size, u64 block_id)
1367{
9f5bdc33
AG
1368 struct drbd_socket *sock;
1369 struct p_block_req *p;
b411b363 1370
9f5bdc33
AG
1371 sock = &mdev->tconn->data;
1372 p = drbd_prepare_command(mdev, sock);
1373 if (!p)
1374 return -EIO;
1375 p->sector = cpu_to_be64(sector);
1376 p->block_id = block_id;
1377 p->blksize = cpu_to_be32(size);
1378 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
b411b363
PR
1379}
1380
d8763023
AG
1381int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1382 void *digest, int digest_size, enum drbd_packet cmd)
b411b363 1383{
9f5bdc33
AG
1384 struct drbd_socket *sock;
1385 struct p_block_req *p;
b411b363 1386
9f5bdc33 1387 /* FIXME: Put the digest into the preallocated socket buffer. */
b411b363 1388
9f5bdc33
AG
1389 sock = &mdev->tconn->data;
1390 p = drbd_prepare_command(mdev, sock);
1391 if (!p)
1392 return -EIO;
1393 p->sector = cpu_to_be64(sector);
1394 p->block_id = ID_SYNCER /* unused */;
1395 p->blksize = cpu_to_be32(size);
1396 return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1397 digest, digest_size);
b411b363
PR
1398}
1399
1400int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1401{
9f5bdc33
AG
1402 struct drbd_socket *sock;
1403 struct p_block_req *p;
b411b363 1404
9f5bdc33
AG
1405 sock = &mdev->tconn->data;
1406 p = drbd_prepare_command(mdev, sock);
1407 if (!p)
1408 return -EIO;
1409 p->sector = cpu_to_be64(sector);
1410 p->block_id = ID_SYNCER /* unused */;
1411 p->blksize = cpu_to_be32(size);
1412 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
b411b363
PR
1413}
1414
1415/* called on sndtimeo
81e84650
AG
1416 * returns false if we should retry,
1417 * true if we think connection is dead
b411b363 1418 */
1a7ba646 1419static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
b411b363
PR
1420{
1421 int drop_it;
1422 /* long elapsed = (long)(jiffies - mdev->last_received); */
1423
1a7ba646
PR
1424 drop_it = tconn->meta.socket == sock
1425 || !tconn->asender.task
1426 || get_t_state(&tconn->asender) != RUNNING
bbeb641c 1427 || tconn->cstate < C_WF_REPORT_PARAMS;
b411b363
PR
1428
1429 if (drop_it)
81e84650 1430 return true;
b411b363 1431
1a7ba646 1432 drop_it = !--tconn->ko_count;
b411b363 1433 if (!drop_it) {
1a7ba646
PR
1434 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1435 current->comm, current->pid, tconn->ko_count);
1436 request_ping(tconn);
b411b363
PR
1437 }
1438
1439 return drop_it; /* && (mdev->state == R_PRIMARY) */;
1440}
1441
1a7ba646 1442static void drbd_update_congested(struct drbd_tconn *tconn)
9e204cdd 1443{
1a7ba646 1444 struct sock *sk = tconn->data.socket->sk;
9e204cdd 1445 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1a7ba646 1446 set_bit(NET_CONGESTED, &tconn->flags);
9e204cdd
AG
1447}
1448
b411b363
PR
1449/* The idea of sendpage seems to be to put some kind of reference
1450 * to the page into the skb, and to hand it over to the NIC. In
1451 * this process get_page() gets called.
1452 *
1453 * As soon as the page was really sent over the network put_page()
1454 * gets called by some part of the network layer. [ NIC driver? ]
1455 *
1456 * [ get_page() / put_page() increment/decrement the count. If count
1457 * reaches 0 the page will be freed. ]
1458 *
1459 * This works nicely with pages from FSs.
1460 * But this means that in protocol A we might signal IO completion too early!
1461 *
1462 * In order not to corrupt data during a resync we must make sure
1463 * that we do not reuse our own buffer pages (EEs) to early, therefore
1464 * we have the net_ee list.
1465 *
1466 * XFS seems to have problems, still, it submits pages with page_count == 0!
1467 * As a workaround, we disable sendpage on pages
1468 * with page_count == 0 or PageSlab.
1469 */
1470static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
b987427b 1471 int offset, size_t size, unsigned msg_flags)
b411b363 1472{
b987427b
AG
1473 struct socket *socket;
1474 void *addr;
1475 int err;
1476
1477 socket = mdev->tconn->data.socket;
1478 addr = kmap(page) + offset;
1479 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
b411b363 1480 kunmap(page);
b987427b
AG
1481 if (!err)
1482 mdev->send_cnt += size >> 9;
1483 return err;
b411b363
PR
1484}
1485
1486static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 1487 int offset, size_t size, unsigned msg_flags)
b411b363 1488{
88b390ff 1489 struct socket *socket = mdev->tconn->data.socket;
b411b363 1490 mm_segment_t oldfs = get_fs();
b411b363 1491 int len = size;
88b390ff 1492 int err = -EIO;
b411b363
PR
1493
1494 /* e.g. XFS meta- & log-data is in slab pages, which have a
1495 * page_count of 0 and/or have PageSlab() set.
1496 * we cannot use send_page for those, as that does get_page();
1497 * put_page(); and would cause either a VM_BUG directly, or
1498 * __page_cache_release a page that would actually still be referenced
1499 * by someone, leading to some obscure delayed Oops somewhere else. */
1500 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
88b390ff 1501 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
b411b363 1502
ba11ad9a 1503 msg_flags |= MSG_NOSIGNAL;
1a7ba646 1504 drbd_update_congested(mdev->tconn);
b411b363
PR
1505 set_fs(KERNEL_DS);
1506 do {
88b390ff
AG
1507 int sent;
1508
1509 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
b411b363 1510 if (sent <= 0) {
88b390ff
AG
1511 if (sent == -EAGAIN) {
1512 if (we_should_drop_the_connection(mdev->tconn, socket))
1513 break;
1514 continue;
1515 }
b411b363
PR
1516 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1517 __func__, (int)size, len, sent);
88b390ff
AG
1518 if (sent < 0)
1519 err = sent;
b411b363
PR
1520 break;
1521 }
1522 len -= sent;
1523 offset += sent;
1524 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1525 set_fs(oldfs);
01a311a5 1526 clear_bit(NET_CONGESTED, &mdev->tconn->flags);
b411b363 1527
88b390ff
AG
1528 if (len == 0) {
1529 err = 0;
1530 mdev->send_cnt += size >> 9;
1531 }
1532 return err;
b411b363
PR
1533}
1534
1535static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1536{
1537 struct bio_vec *bvec;
1538 int i;
ba11ad9a 1539 /* hint all but last page with MSG_MORE */
4b8514ee 1540 bio_for_each_segment(bvec, bio, i) {
7fae55da
AG
1541 int err;
1542
1543 err = _drbd_no_send_page(mdev, bvec->bv_page,
1544 bvec->bv_offset, bvec->bv_len,
1545 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1546 if (err)
1547 return err;
b411b363 1548 }
7fae55da 1549 return 0;
b411b363
PR
1550}
1551
1552static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1553{
1554 struct bio_vec *bvec;
1555 int i;
ba11ad9a 1556 /* hint all but last page with MSG_MORE */
4b8514ee 1557 bio_for_each_segment(bvec, bio, i) {
7fae55da
AG
1558 int err;
1559
1560 err = _drbd_send_page(mdev, bvec->bv_page,
1561 bvec->bv_offset, bvec->bv_len,
1562 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1563 if (err)
1564 return err;
b411b363 1565 }
7fae55da 1566 return 0;
b411b363
PR
1567}
1568
db830c46
AG
1569static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1570 struct drbd_peer_request *peer_req)
45bb912b 1571{
db830c46
AG
1572 struct page *page = peer_req->pages;
1573 unsigned len = peer_req->i.size;
9f69230c 1574 int err;
db830c46 1575
ba11ad9a 1576 /* hint all but last page with MSG_MORE */
45bb912b
LE
1577 page_chain_for_each(page) {
1578 unsigned l = min_t(unsigned, len, PAGE_SIZE);
9f69230c
AG
1579
1580 err = _drbd_send_page(mdev, page, 0, l,
1581 page_chain_next(page) ? MSG_MORE : 0);
1582 if (err)
1583 return err;
45bb912b
LE
1584 len -= l;
1585 }
9f69230c 1586 return 0;
45bb912b
LE
1587}
1588
76d2e7ec
PR
1589static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1590{
31890f4a 1591 if (mdev->tconn->agreed_pro_version >= 95)
76d2e7ec 1592 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
76d2e7ec
PR
1593 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1594 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1595 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1596 else
721a9602 1597 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
76d2e7ec
PR
1598}
1599
b411b363
PR
1600/* Used to send write requests
1601 * R_PRIMARY -> Peer (P_DATA)
1602 */
1603int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1604{
9f5bdc33
AG
1605 struct drbd_socket *sock;
1606 struct p_data *p;
b411b363 1607 unsigned int dp_flags = 0;
b411b363 1608 int dgs;
9f5bdc33 1609 int err;
b411b363 1610
46e1ce41
PR
1611 sock = &mdev->tconn->data;
1612 p = drbd_prepare_command(mdev, sock);
7d4c782c 1613 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
b411b363 1614
9f5bdc33
AG
1615 if (!p)
1616 return -EIO;
1617 p->sector = cpu_to_be64(req->i.sector);
1618 p->block_id = (unsigned long)req;
5cdb0bf3 1619 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
76d2e7ec 1620 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
b411b363
PR
1621 if (mdev->state.conn >= C_SYNC_SOURCE &&
1622 mdev->state.conn <= C_PAUSED_SYNC_T)
1623 dp_flags |= DP_MAY_SET_IN_SYNC;
303d1448
PR
1624 if (mdev->tconn->agreed_pro_version >= 100) {
1625 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1626 dp_flags |= DP_SEND_RECEIVE_ACK;
1627 if (req->rq_state & RQ_EXP_WRITE_ACK)
1628 dp_flags |= DP_SEND_WRITE_ACK;
1629 }
9f5bdc33
AG
1630 p->dp_flags = cpu_to_be32(dp_flags);
1631 if (dgs)
8d412fc6 1632 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
9f5bdc33 1633 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
6bdb9b0e 1634 if (!err) {
470be44a
LE
1635 /* For protocol A, we have to memcpy the payload into
1636 * socket buffers, as we may complete right away
1637 * as soon as we handed it over to tcp, at which point the data
1638 * pages may become invalid.
1639 *
1640 * For data-integrity enabled, we copy it as well, so we can be
1641 * sure that even if the bio pages may still be modified, it
1642 * won't change the data on the wire, thus if the digest checks
1643 * out ok after sending on this side, but does not fit on the
1644 * receiving side, we sure have detected corruption elsewhere.
1645 */
303d1448 1646 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
6bdb9b0e 1647 err = _drbd_send_bio(mdev, req->master_bio);
b411b363 1648 else
6bdb9b0e 1649 err = _drbd_send_zc_bio(mdev, req->master_bio);
470be44a
LE
1650
1651 /* double check digest, sometimes buffers have been modified in flight. */
1652 if (dgs > 0 && dgs <= 64) {
24c4830c 1653 /* 64 byte, 512 bit, is the largest digest size
470be44a
LE
1654 * currently supported in kernel crypto. */
1655 unsigned char digest[64];
8d412fc6 1656 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
9f5bdc33 1657 if (memcmp(p + 1, digest, dgs)) {
470be44a
LE
1658 dev_warn(DEV,
1659 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
ace652ac 1660 (unsigned long long)req->i.sector, req->i.size);
470be44a
LE
1661 }
1662 } /* else if (dgs > 64) {
1663 ... Be noisy about digest too large ...
1664 } */
b411b363 1665 }
9f5bdc33 1666 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
bd26bfc5 1667
6bdb9b0e 1668 return err;
b411b363
PR
1669}
1670
1671/* answer packet, used to send data back for read requests:
1672 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1673 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1674 */
d8763023 1675int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
db830c46 1676 struct drbd_peer_request *peer_req)
b411b363 1677{
9f5bdc33
AG
1678 struct drbd_socket *sock;
1679 struct p_data *p;
7b57b89d 1680 int err;
b411b363
PR
1681 int dgs;
1682
46e1ce41
PR
1683 sock = &mdev->tconn->data;
1684 p = drbd_prepare_command(mdev, sock);
1685
7d4c782c 1686 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
b411b363 1687
9f5bdc33
AG
1688 if (!p)
1689 return -EIO;
1690 p->sector = cpu_to_be64(peer_req->i.sector);
1691 p->block_id = peer_req->block_id;
1692 p->seq_num = 0; /* unused */
b17f33cb 1693 p->dp_flags = 0;
9f5bdc33 1694 if (dgs)
8d412fc6 1695 drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
9f5bdc33 1696 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
7b57b89d
AG
1697 if (!err)
1698 err = _drbd_send_zc_ee(mdev, peer_req);
9f5bdc33 1699 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
bd26bfc5 1700
7b57b89d 1701 return err;
b411b363
PR
1702}
1703
8f7bed77 1704int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
73a01a18 1705{
9f5bdc33
AG
1706 struct drbd_socket *sock;
1707 struct p_block_desc *p;
73a01a18 1708
9f5bdc33
AG
1709 sock = &mdev->tconn->data;
1710 p = drbd_prepare_command(mdev, sock);
1711 if (!p)
1712 return -EIO;
1713 p->sector = cpu_to_be64(req->i.sector);
1714 p->blksize = cpu_to_be32(req->i.size);
1715 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
73a01a18
PR
1716}
1717
b411b363
PR
1718/*
1719 drbd_send distinguishes two cases:
1720
1721 Packets sent via the data socket "sock"
1722 and packets sent via the meta data socket "msock"
1723
1724 sock msock
1725 -----------------+-------------------------+------------------------------
1726 timeout conf.timeout / 2 conf.timeout / 2
1727 timeout action send a ping via msock Abort communication
1728 and close all sockets
1729*/
1730
1731/*
1732 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1733 */
bedbd2a5 1734int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
b411b363
PR
1735 void *buf, size_t size, unsigned msg_flags)
1736{
1737 struct kvec iov;
1738 struct msghdr msg;
1739 int rv, sent = 0;
1740
1741 if (!sock)
c0d42c8e 1742 return -EBADR;
b411b363
PR
1743
1744 /* THINK if (signal_pending) return ... ? */
1745
1746 iov.iov_base = buf;
1747 iov.iov_len = size;
1748
1749 msg.msg_name = NULL;
1750 msg.msg_namelen = 0;
1751 msg.msg_control = NULL;
1752 msg.msg_controllen = 0;
1753 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1754
bedbd2a5 1755 if (sock == tconn->data.socket) {
44ed167d
PR
1756 rcu_read_lock();
1757 tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
1758 rcu_read_unlock();
bedbd2a5 1759 drbd_update_congested(tconn);
b411b363
PR
1760 }
1761 do {
1762 /* STRANGE
1763 * tcp_sendmsg does _not_ use its size parameter at all ?
1764 *
1765 * -EAGAIN on timeout, -EINTR on signal.
1766 */
1767/* THINK
1768 * do we need to block DRBD_SIG if sock == &meta.socket ??
1769 * otherwise wake_asender() might interrupt some send_*Ack !
1770 */
1771 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1772 if (rv == -EAGAIN) {
bedbd2a5 1773 if (we_should_drop_the_connection(tconn, sock))
b411b363
PR
1774 break;
1775 else
1776 continue;
1777 }
b411b363
PR
1778 if (rv == -EINTR) {
1779 flush_signals(current);
1780 rv = 0;
1781 }
1782 if (rv < 0)
1783 break;
1784 sent += rv;
1785 iov.iov_base += rv;
1786 iov.iov_len -= rv;
1787 } while (sent < size);
1788
bedbd2a5
PR
1789 if (sock == tconn->data.socket)
1790 clear_bit(NET_CONGESTED, &tconn->flags);
b411b363
PR
1791
1792 if (rv <= 0) {
1793 if (rv != -EAGAIN) {
bedbd2a5
PR
1794 conn_err(tconn, "%s_sendmsg returned %d\n",
1795 sock == tconn->meta.socket ? "msock" : "sock",
1796 rv);
bbeb641c 1797 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
b411b363 1798 } else
bbeb641c 1799 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
b411b363
PR
1800 }
1801
1802 return sent;
1803}
1804
fb708e40
AG
1805/**
1806 * drbd_send_all - Send an entire buffer
1807 *
1808 * Returns 0 upon success and a negative error value otherwise.
1809 */
1810int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1811 size_t size, unsigned msg_flags)
1812{
1813 int err;
1814
1815 err = drbd_send(tconn, sock, buffer, size, msg_flags);
1816 if (err < 0)
1817 return err;
1818 if (err != size)
1819 return -EIO;
1820 return 0;
1821}
1822
b411b363
PR
1823static int drbd_open(struct block_device *bdev, fmode_t mode)
1824{
1825 struct drbd_conf *mdev = bdev->bd_disk->private_data;
1826 unsigned long flags;
1827 int rv = 0;
1828
2a48fc0a 1829 mutex_lock(&drbd_main_mutex);
87eeee41 1830 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
b411b363
PR
1831 /* to have a stable mdev->state.role
1832 * and no race with updating open_cnt */
1833
1834 if (mdev->state.role != R_PRIMARY) {
1835 if (mode & FMODE_WRITE)
1836 rv = -EROFS;
1837 else if (!allow_oos)
1838 rv = -EMEDIUMTYPE;
1839 }
1840
1841 if (!rv)
1842 mdev->open_cnt++;
87eeee41 1843 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
2a48fc0a 1844 mutex_unlock(&drbd_main_mutex);
b411b363
PR
1845
1846 return rv;
1847}
1848
1849static int drbd_release(struct gendisk *gd, fmode_t mode)
1850{
1851 struct drbd_conf *mdev = gd->private_data;
2a48fc0a 1852 mutex_lock(&drbd_main_mutex);
b411b363 1853 mdev->open_cnt--;
2a48fc0a 1854 mutex_unlock(&drbd_main_mutex);
b411b363
PR
1855 return 0;
1856}
1857
b411b363
PR
1858static void drbd_set_defaults(struct drbd_conf *mdev)
1859{
f399002e
LE
1860 /* Beware! The actual layout differs
1861 * between big endian and little endian */
da9fbc27 1862 mdev->state = (union drbd_dev_state) {
b411b363
PR
1863 { .role = R_SECONDARY,
1864 .peer = R_UNKNOWN,
1865 .conn = C_STANDALONE,
1866 .disk = D_DISKLESS,
1867 .pdsk = D_UNKNOWN,
b411b363
PR
1868 } };
1869}
1870
1871void drbd_init_set_defaults(struct drbd_conf *mdev)
1872{
1873 /* the memset(,0,) did most of this.
1874 * note: only assignments, no allocation in here */
1875
1876 drbd_set_defaults(mdev);
1877
b411b363
PR
1878 atomic_set(&mdev->ap_bio_cnt, 0);
1879 atomic_set(&mdev->ap_pending_cnt, 0);
1880 atomic_set(&mdev->rs_pending_cnt, 0);
1881 atomic_set(&mdev->unacked_cnt, 0);
1882 atomic_set(&mdev->local_cnt, 0);
435f0740 1883 atomic_set(&mdev->pp_in_use_by_net, 0);
778f271d 1884 atomic_set(&mdev->rs_sect_in, 0);
0f0601f4 1885 atomic_set(&mdev->rs_sect_ev, 0);
759fbdfb 1886 atomic_set(&mdev->ap_in_flight, 0);
cdfda633 1887 atomic_set(&mdev->md_io_in_use, 0);
b411b363 1888
8410da8f
PR
1889 mutex_init(&mdev->own_state_mutex);
1890 mdev->state_mutex = &mdev->own_state_mutex;
b411b363 1891
b411b363 1892 spin_lock_init(&mdev->al_lock);
b411b363 1893 spin_lock_init(&mdev->peer_seq_lock);
b411b363
PR
1894
1895 INIT_LIST_HEAD(&mdev->active_ee);
1896 INIT_LIST_HEAD(&mdev->sync_ee);
1897 INIT_LIST_HEAD(&mdev->done_ee);
1898 INIT_LIST_HEAD(&mdev->read_ee);
1899 INIT_LIST_HEAD(&mdev->net_ee);
1900 INIT_LIST_HEAD(&mdev->resync_reads);
b411b363
PR
1901 INIT_LIST_HEAD(&mdev->resync_work.list);
1902 INIT_LIST_HEAD(&mdev->unplug_work.list);
e9e6f3ec 1903 INIT_LIST_HEAD(&mdev->go_diskless.list);
b411b363 1904 INIT_LIST_HEAD(&mdev->md_sync_work.list);
c4752ef1 1905 INIT_LIST_HEAD(&mdev->start_resync_work.list);
b411b363 1906 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
0ced55a3 1907
794abb75 1908 mdev->resync_work.cb = w_resync_timer;
b411b363 1909 mdev->unplug_work.cb = w_send_write_hint;
e9e6f3ec 1910 mdev->go_diskless.cb = w_go_diskless;
b411b363
PR
1911 mdev->md_sync_work.cb = w_md_sync;
1912 mdev->bm_io_work.w.cb = w_bitmap_io;
370a43e7 1913 mdev->start_resync_work.cb = w_start_resync;
a21e9298
PR
1914
1915 mdev->resync_work.mdev = mdev;
1916 mdev->unplug_work.mdev = mdev;
1917 mdev->go_diskless.mdev = mdev;
1918 mdev->md_sync_work.mdev = mdev;
1919 mdev->bm_io_work.w.mdev = mdev;
1920 mdev->start_resync_work.mdev = mdev;
1921
b411b363
PR
1922 init_timer(&mdev->resync_timer);
1923 init_timer(&mdev->md_sync_timer);
370a43e7 1924 init_timer(&mdev->start_resync_timer);
7fde2be9 1925 init_timer(&mdev->request_timer);
b411b363
PR
1926 mdev->resync_timer.function = resync_timer_fn;
1927 mdev->resync_timer.data = (unsigned long) mdev;
1928 mdev->md_sync_timer.function = md_sync_timer_fn;
1929 mdev->md_sync_timer.data = (unsigned long) mdev;
370a43e7
PR
1930 mdev->start_resync_timer.function = start_resync_timer_fn;
1931 mdev->start_resync_timer.data = (unsigned long) mdev;
7fde2be9
PR
1932 mdev->request_timer.function = request_timer_fn;
1933 mdev->request_timer.data = (unsigned long) mdev;
b411b363
PR
1934
1935 init_waitqueue_head(&mdev->misc_wait);
1936 init_waitqueue_head(&mdev->state_wait);
1937 init_waitqueue_head(&mdev->ee_wait);
1938 init_waitqueue_head(&mdev->al_wait);
1939 init_waitqueue_head(&mdev->seq_wait);
1940
b411b363 1941 mdev->resync_wenr = LC_FREE;
99432fcc
PR
1942 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1943 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
b411b363
PR
1944}
1945
1946void drbd_mdev_cleanup(struct drbd_conf *mdev)
1947{
1d7734a0 1948 int i;
e6b3ea83 1949 if (mdev->tconn->receiver.t_state != NONE)
b411b363 1950 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
e6b3ea83 1951 mdev->tconn->receiver.t_state);
b411b363 1952
b411b363
PR
1953 mdev->al_writ_cnt =
1954 mdev->bm_writ_cnt =
1955 mdev->read_cnt =
1956 mdev->recv_cnt =
1957 mdev->send_cnt =
1958 mdev->writ_cnt =
1959 mdev->p_size =
1960 mdev->rs_start =
1961 mdev->rs_total =
1d7734a0
LE
1962 mdev->rs_failed = 0;
1963 mdev->rs_last_events = 0;
0f0601f4 1964 mdev->rs_last_sect_ev = 0;
1d7734a0
LE
1965 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1966 mdev->rs_mark_left[i] = 0;
1967 mdev->rs_mark_time[i] = 0;
1968 }
89e58e75 1969 D_ASSERT(mdev->tconn->net_conf == NULL);
b411b363
PR
1970
1971 drbd_set_my_capacity(mdev, 0);
1972 if (mdev->bitmap) {
1973 /* maybe never allocated. */
02d9a94b 1974 drbd_bm_resize(mdev, 0, 1);
b411b363
PR
1975 drbd_bm_cleanup(mdev);
1976 }
1977
1d041225
PR
1978 drbd_free_bc(mdev->ldev);
1979 mdev->ldev = NULL;
1980
0778286a 1981 clear_bit(AL_SUSPENDED, &mdev->flags);
b411b363 1982
b411b363
PR
1983 D_ASSERT(list_empty(&mdev->active_ee));
1984 D_ASSERT(list_empty(&mdev->sync_ee));
1985 D_ASSERT(list_empty(&mdev->done_ee));
1986 D_ASSERT(list_empty(&mdev->read_ee));
1987 D_ASSERT(list_empty(&mdev->net_ee));
1988 D_ASSERT(list_empty(&mdev->resync_reads));
d5b27b01 1989 D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
b411b363
PR
1990 D_ASSERT(list_empty(&mdev->resync_work.list));
1991 D_ASSERT(list_empty(&mdev->unplug_work.list));
e9e6f3ec 1992 D_ASSERT(list_empty(&mdev->go_diskless.list));
2265b473
LE
1993
1994 drbd_set_defaults(mdev);
b411b363
PR
1995}
1996
1997
1998static void drbd_destroy_mempools(void)
1999{
2000 struct page *page;
2001
2002 while (drbd_pp_pool) {
2003 page = drbd_pp_pool;
2004 drbd_pp_pool = (struct page *)page_private(page);
2005 __free_page(page);
2006 drbd_pp_vacant--;
2007 }
2008
2009 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2010
da4a75d2
LE
2011 if (drbd_md_io_bio_set)
2012 bioset_free(drbd_md_io_bio_set);
35abf594
LE
2013 if (drbd_md_io_page_pool)
2014 mempool_destroy(drbd_md_io_page_pool);
b411b363
PR
2015 if (drbd_ee_mempool)
2016 mempool_destroy(drbd_ee_mempool);
2017 if (drbd_request_mempool)
2018 mempool_destroy(drbd_request_mempool);
2019 if (drbd_ee_cache)
2020 kmem_cache_destroy(drbd_ee_cache);
2021 if (drbd_request_cache)
2022 kmem_cache_destroy(drbd_request_cache);
2023 if (drbd_bm_ext_cache)
2024 kmem_cache_destroy(drbd_bm_ext_cache);
2025 if (drbd_al_ext_cache)
2026 kmem_cache_destroy(drbd_al_ext_cache);
2027
da4a75d2 2028 drbd_md_io_bio_set = NULL;
35abf594 2029 drbd_md_io_page_pool = NULL;
b411b363
PR
2030 drbd_ee_mempool = NULL;
2031 drbd_request_mempool = NULL;
2032 drbd_ee_cache = NULL;
2033 drbd_request_cache = NULL;
2034 drbd_bm_ext_cache = NULL;
2035 drbd_al_ext_cache = NULL;
2036
2037 return;
2038}
2039
2040static int drbd_create_mempools(void)
2041{
2042 struct page *page;
1816a2b4 2043 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
b411b363
PR
2044 int i;
2045
2046 /* prepare our caches and mempools */
2047 drbd_request_mempool = NULL;
2048 drbd_ee_cache = NULL;
2049 drbd_request_cache = NULL;
2050 drbd_bm_ext_cache = NULL;
2051 drbd_al_ext_cache = NULL;
2052 drbd_pp_pool = NULL;
35abf594 2053 drbd_md_io_page_pool = NULL;
da4a75d2 2054 drbd_md_io_bio_set = NULL;
b411b363
PR
2055
2056 /* caches */
2057 drbd_request_cache = kmem_cache_create(
2058 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2059 if (drbd_request_cache == NULL)
2060 goto Enomem;
2061
2062 drbd_ee_cache = kmem_cache_create(
f6ffca9f 2063 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
b411b363
PR
2064 if (drbd_ee_cache == NULL)
2065 goto Enomem;
2066
2067 drbd_bm_ext_cache = kmem_cache_create(
2068 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2069 if (drbd_bm_ext_cache == NULL)
2070 goto Enomem;
2071
2072 drbd_al_ext_cache = kmem_cache_create(
2073 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2074 if (drbd_al_ext_cache == NULL)
2075 goto Enomem;
2076
2077 /* mempools */
da4a75d2
LE
2078 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2079 if (drbd_md_io_bio_set == NULL)
2080 goto Enomem;
2081
35abf594
LE
2082 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2083 if (drbd_md_io_page_pool == NULL)
2084 goto Enomem;
2085
b411b363
PR
2086 drbd_request_mempool = mempool_create(number,
2087 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2088 if (drbd_request_mempool == NULL)
2089 goto Enomem;
2090
2091 drbd_ee_mempool = mempool_create(number,
2092 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2027ae1f 2093 if (drbd_ee_mempool == NULL)
b411b363
PR
2094 goto Enomem;
2095
2096 /* drbd's page pool */
2097 spin_lock_init(&drbd_pp_lock);
2098
2099 for (i = 0; i < number; i++) {
2100 page = alloc_page(GFP_HIGHUSER);
2101 if (!page)
2102 goto Enomem;
2103 set_page_private(page, (unsigned long)drbd_pp_pool);
2104 drbd_pp_pool = page;
2105 }
2106 drbd_pp_vacant = number;
2107
2108 return 0;
2109
2110Enomem:
2111 drbd_destroy_mempools(); /* in case we allocated some */
2112 return -ENOMEM;
2113}
2114
2115static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2116 void *unused)
2117{
2118 /* just so we have it. you never know what interesting things we
2119 * might want to do here some day...
2120 */
2121
2122 return NOTIFY_DONE;
2123}
2124
2125static struct notifier_block drbd_notifier = {
2126 .notifier_call = drbd_notify_sys,
2127};
2128
7721f567 2129static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
b411b363
PR
2130{
2131 int rr;
2132
7721f567 2133 rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
b411b363
PR
2134 if (rr)
2135 dev_err(DEV, "%d EEs in active list found!\n", rr);
2136
7721f567 2137 rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
b411b363
PR
2138 if (rr)
2139 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2140
7721f567 2141 rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
b411b363
PR
2142 if (rr)
2143 dev_err(DEV, "%d EEs in read list found!\n", rr);
2144
7721f567 2145 rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
b411b363
PR
2146 if (rr)
2147 dev_err(DEV, "%d EEs in done list found!\n", rr);
2148
7721f567 2149 rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
b411b363
PR
2150 if (rr)
2151 dev_err(DEV, "%d EEs in net list found!\n", rr);
2152}
2153
774b3055 2154/* caution. no locking. */
81fa2e67 2155void drbd_minor_destroy(struct kref *kref)
b411b363 2156{
81fa2e67 2157 struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
9dc9fbb3
PR
2158 struct drbd_tconn *tconn = mdev->tconn;
2159
cdfda633
PR
2160 del_timer_sync(&mdev->request_timer);
2161
b411b363 2162 /* paranoia asserts */
70dc65e1 2163 D_ASSERT(mdev->open_cnt == 0);
b411b363
PR
2164 /* end paranoia asserts */
2165
b411b363
PR
2166 /* cleanup stuff that may have been allocated during
2167 * device (re-)configuration or state changes */
2168
2169 if (mdev->this_bdev)
2170 bdput(mdev->this_bdev);
2171
1d041225
PR
2172 drbd_free_bc(mdev->ldev);
2173 mdev->ldev = NULL;
b411b363 2174
7721f567 2175 drbd_release_all_peer_reqs(mdev);
b411b363 2176
b411b363
PR
2177 lc_destroy(mdev->act_log);
2178 lc_destroy(mdev->resync);
2179
2180 kfree(mdev->p_uuid);
2181 /* mdev->p_uuid = NULL; */
2182
cd1d9950
PR
2183 if (mdev->bitmap) /* should no longer be there. */
2184 drbd_bm_cleanup(mdev);
2185 __free_page(mdev->md_io_page);
2186 put_disk(mdev->vdisk);
2187 blk_cleanup_queue(mdev->rq_queue);
9958c857 2188 kfree(mdev->rs_plan_s);
cd1d9950 2189 kfree(mdev);
9dc9fbb3
PR
2190
2191 kref_put(&tconn->kref, &conn_destroy);
b411b363
PR
2192}
2193
2312f0b3
LE
2194/* One global retry thread, if we need to push back some bio and have it
2195 * reinserted through our make request function.
2196 */
2197static struct retry_worker {
2198 struct workqueue_struct *wq;
2199 struct work_struct worker;
2200
2201 spinlock_t lock;
2202 struct list_head writes;
2203} retry;
2204
2205static void do_retry(struct work_struct *ws)
2206{
2207 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2208 LIST_HEAD(writes);
2209 struct drbd_request *req, *tmp;
2210
2211 spin_lock_irq(&retry->lock);
2212 list_splice_init(&retry->writes, &writes);
2213 spin_unlock_irq(&retry->lock);
2214
2215 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2216 struct drbd_conf *mdev = req->w.mdev;
2217 struct bio *bio = req->master_bio;
2218 unsigned long start_time = req->start_time;
9a278a79
LE
2219 bool expected;
2220
2221 expected =
2222 expect(atomic_read(&req->completion_ref) == 0) &&
2223 expect(req->rq_state & RQ_POSTPONED) &&
2224 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2225 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2226
2227 if (!expected)
2228 dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
2229 req, atomic_read(&req->completion_ref),
2230 req->rq_state);
2231
2232 /* We still need to put one kref associated with the
2233 * "completion_ref" going zero in the code path that queued it
2234 * here. The request object may still be referenced by a
2235 * frozen local req->private_bio, in case we force-detached.
2312f0b3 2236 */
9a278a79 2237 kref_put(&req->kref, drbd_req_destroy);
2312f0b3
LE
2238
2239 /* A single suspended or otherwise blocking device may stall
2240 * all others as well. Fortunately, this code path is to
2241 * recover from a situation that "should not happen":
2242 * concurrent writes in multi-primary setup.
2243 * In a "normal" lifecycle, this workqueue is supposed to be
2244 * destroyed without ever doing anything.
2245 * If it turns out to be an issue anyways, we can do per
2246 * resource (replication group) or per device (minor) retry
2247 * workqueues instead.
2248 */
2249
2250 /* We are not just doing generic_make_request(),
2251 * as we want to keep the start_time information. */
5df69ece
LE
2252 inc_ap_bio(mdev);
2253 __drbd_make_request(mdev, bio, start_time);
2312f0b3
LE
2254 }
2255}
2256
9d05e7c4 2257void drbd_restart_request(struct drbd_request *req)
2312f0b3
LE
2258{
2259 unsigned long flags;
2260 spin_lock_irqsave(&retry.lock, flags);
2261 list_move_tail(&req->tl_requests, &retry.writes);
2262 spin_unlock_irqrestore(&retry.lock, flags);
2263
2264 /* Drop the extra reference that would otherwise
2265 * have been dropped by complete_master_bio.
2266 * do_retry() needs to grab a new one. */
2267 dec_ap_bio(req->w.mdev);
2268
2269 queue_work(retry.wq, &retry.worker);
2270}
2271
2272
b411b363
PR
2273static void drbd_cleanup(void)
2274{
2275 unsigned int i;
81a5d60e 2276 struct drbd_conf *mdev;
81fa2e67 2277 struct drbd_tconn *tconn, *tmp;
b411b363
PR
2278
2279 unregister_reboot_notifier(&drbd_notifier);
2280
17a93f30
LE
2281 /* first remove proc,
2282 * drbdsetup uses it's presence to detect
2283 * whether DRBD is loaded.
2284 * If we would get stuck in proc removal,
2285 * but have netlink already deregistered,
2286 * some drbdsetup commands may wait forever
2287 * for an answer.
2288 */
2289 if (drbd_proc)
2290 remove_proc_entry("drbd", NULL);
2291
2312f0b3
LE
2292 if (retry.wq)
2293 destroy_workqueue(retry.wq);
2294
3b98c0c2 2295 drbd_genl_unregister();
b411b363 2296
81fa2e67
PR
2297 idr_for_each_entry(&minors, mdev, i) {
2298 idr_remove(&minors, mdev_to_minor(mdev));
2299 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2300 del_gendisk(mdev->vdisk);
c141ebda 2301 /* synchronize_rcu(); No other threads running at this point */
81fa2e67
PR
2302 kref_put(&mdev->kref, &drbd_minor_destroy);
2303 }
2304
c141ebda 2305 /* not _rcu since, no other updater anymore. Genl already unregistered */
81fa2e67 2306 list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
c141ebda
PR
2307 list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
2308 /* synchronize_rcu(); */
81fa2e67
PR
2309 kref_put(&tconn->kref, &conn_destroy);
2310 }
ff370e5a 2311
81a5d60e 2312 drbd_destroy_mempools();
b411b363
PR
2313 unregister_blkdev(DRBD_MAJOR, "drbd");
2314
81a5d60e
PR
2315 idr_destroy(&minors);
2316
b411b363
PR
2317 printk(KERN_INFO "drbd: module cleanup done.\n");
2318}
2319
2320/**
2321 * drbd_congested() - Callback for pdflush
2322 * @congested_data: User data
2323 * @bdi_bits: Bits pdflush is currently interested in
2324 *
2325 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2326 */
2327static int drbd_congested(void *congested_data, int bdi_bits)
2328{
2329 struct drbd_conf *mdev = congested_data;
2330 struct request_queue *q;
2331 char reason = '-';
2332 int r = 0;
2333
1b881ef7 2334 if (!may_inc_ap_bio(mdev)) {
b411b363
PR
2335 /* DRBD has frozen IO */
2336 r = bdi_bits;
2337 reason = 'd';
2338 goto out;
2339 }
2340
6f3465ed
LE
2341 if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) {
2342 r |= (1 << BDI_async_congested);
2343 /* Without good local data, we would need to read from remote,
2344 * and that would need the worker thread as well, which is
2345 * currently blocked waiting for that usermode helper to
2346 * finish.
2347 */
2348 if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
2349 r |= (1 << BDI_sync_congested);
2350 else
2351 put_ldev(mdev);
2352 r &= bdi_bits;
2353 reason = 'c';
2354 goto out;
2355 }
2356
b411b363
PR
2357 if (get_ldev(mdev)) {
2358 q = bdev_get_queue(mdev->ldev->backing_bdev);
2359 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2360 put_ldev(mdev);
2361 if (r)
2362 reason = 'b';
2363 }
2364
01a311a5 2365 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
b411b363
PR
2366 r |= (1 << BDI_async_congested);
2367 reason = reason == 'b' ? 'a' : 'n';
2368 }
2369
2370out:
2371 mdev->congestion_reason = reason;
2372 return r;
2373}
2374
6699b655
PR
2375static void drbd_init_workqueue(struct drbd_work_queue* wq)
2376{
6699b655
PR
2377 spin_lock_init(&wq->q_lock);
2378 INIT_LIST_HEAD(&wq->q);
8c0785a5 2379 init_waitqueue_head(&wq->q_wait);
6699b655
PR
2380}
2381
0ace9dfa 2382struct drbd_tconn *conn_get_by_name(const char *name)
1aba4d7f
PR
2383{
2384 struct drbd_tconn *tconn;
2385
3b98c0c2
LE
2386 if (!name || !name[0])
2387 return NULL;
2388
c141ebda 2389 rcu_read_lock();
ec0bddbc 2390 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
0ace9dfa
PR
2391 if (!strcmp(tconn->name, name)) {
2392 kref_get(&tconn->kref);
1aba4d7f 2393 goto found;
0ace9dfa 2394 }
1aba4d7f
PR
2395 }
2396 tconn = NULL;
2397found:
c141ebda 2398 rcu_read_unlock();
1aba4d7f
PR
2399 return tconn;
2400}
2401
089c075d
AG
2402struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
2403 void *peer_addr, int peer_addr_len)
2404{
2405 struct drbd_tconn *tconn;
2406
2407 rcu_read_lock();
2408 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
2409 if (tconn->my_addr_len == my_addr_len &&
2410 tconn->peer_addr_len == peer_addr_len &&
2411 !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
2412 !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
2413 kref_get(&tconn->kref);
2414 goto found;
2415 }
2416 }
2417 tconn = NULL;
2418found:
2419 rcu_read_unlock();
2420 return tconn;
2421}
2422
e6ef8a5c
AG
2423static int drbd_alloc_socket(struct drbd_socket *socket)
2424{
2425 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2426 if (!socket->rbuf)
2427 return -ENOMEM;
5a87d920
AG
2428 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2429 if (!socket->sbuf)
2430 return -ENOMEM;
e6ef8a5c
AG
2431 return 0;
2432}
2433
2434static void drbd_free_socket(struct drbd_socket *socket)
2435{
5a87d920 2436 free_page((unsigned long) socket->sbuf);
e6ef8a5c
AG
2437 free_page((unsigned long) socket->rbuf);
2438}
2439
91fd4dad
PR
2440void conn_free_crypto(struct drbd_tconn *tconn)
2441{
1d041225
PR
2442 drbd_free_sock(tconn);
2443
2444 crypto_free_hash(tconn->csums_tfm);
2445 crypto_free_hash(tconn->verify_tfm);
91fd4dad 2446 crypto_free_hash(tconn->cram_hmac_tfm);
8d412fc6 2447 crypto_free_hash(tconn->integrity_tfm);
5b614abe 2448 crypto_free_hash(tconn->peer_integrity_tfm);
91fd4dad
PR
2449 kfree(tconn->int_dig_in);
2450 kfree(tconn->int_dig_vv);
1d041225
PR
2451
2452 tconn->csums_tfm = NULL;
2453 tconn->verify_tfm = NULL;
91fd4dad 2454 tconn->cram_hmac_tfm = NULL;
8d412fc6 2455 tconn->integrity_tfm = NULL;
5b614abe 2456 tconn->peer_integrity_tfm = NULL;
91fd4dad
PR
2457 tconn->int_dig_in = NULL;
2458 tconn->int_dig_vv = NULL;
2459}
2460
afbbfa88
AG
2461int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
2462{
2463 cpumask_var_t new_cpu_mask;
2464 int err;
2465
2466 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2467 return -ENOMEM;
2468 /*
2469 retcode = ERR_NOMEM;
2470 drbd_msg_put_info("unable to allocate cpumask");
2471 */
2472
2473 /* silently ignore cpu mask on UP kernel */
2474 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2475 /* FIXME: Get rid of constant 32 here */
c5b005ab
PR
2476 err = bitmap_parse(res_opts->cpu_mask, 32,
2477 cpumask_bits(new_cpu_mask), nr_cpu_ids);
afbbfa88 2478 if (err) {
c5b005ab 2479 conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
afbbfa88
AG
2480 /* retcode = ERR_CPU_MASK_PARSE; */
2481 goto fail;
2482 }
2483 }
2484 tconn->res_opts = *res_opts;
2485 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2486 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2487 drbd_calc_cpu_mask(tconn);
2488 tconn->receiver.reset_cpu_mask = 1;
2489 tconn->asender.reset_cpu_mask = 1;
2490 tconn->worker.reset_cpu_mask = 1;
2491 }
2492 err = 0;
2493
2494fail:
2495 free_cpumask_var(new_cpu_mask);
2496 return err;
2497
2498}
2499
ec0bddbc 2500/* caller must be under genl_lock() */
afbbfa88 2501struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
2111438b
PR
2502{
2503 struct drbd_tconn *tconn;
2504
2505 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2506 if (!tconn)
2507 return NULL;
2508
2509 tconn->name = kstrdup(name, GFP_KERNEL);
2510 if (!tconn->name)
2511 goto fail;
2512
e6ef8a5c
AG
2513 if (drbd_alloc_socket(&tconn->data))
2514 goto fail;
2515 if (drbd_alloc_socket(&tconn->meta))
2516 goto fail;
2517
774b3055
PR
2518 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2519 goto fail;
2520
afbbfa88
AG
2521 if (set_resource_options(tconn, res_opts))
2522 goto fail;
2523
12038a3a
PR
2524 tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2525 if (!tconn->current_epoch)
2526 goto fail;
b6dd1a89
LE
2527
2528 INIT_LIST_HEAD(&tconn->transfer_log);
2529
12038a3a
PR
2530 INIT_LIST_HEAD(&tconn->current_epoch->list);
2531 tconn->epochs = 1;
2532 spin_lock_init(&tconn->epoch_lock);
4b0007c0
PR
2533 tconn->write_ordering = WO_bdev_flush;
2534
b6dd1a89
LE
2535 tconn->send.seen_any_write_yet = false;
2536 tconn->send.current_epoch_nr = 0;
2537 tconn->send.current_epoch_writes = 0;
2538
bbeb641c 2539 tconn->cstate = C_STANDALONE;
8410da8f 2540 mutex_init(&tconn->cstate_mutex);
6699b655 2541 spin_lock_init(&tconn->req_lock);
a0095508 2542 mutex_init(&tconn->conf_update);
2a67d8b9 2543 init_waitqueue_head(&tconn->ping_wait);
062e879c 2544 idr_init(&tconn->volumes);
b2fb6dbe 2545
d5b27b01 2546 drbd_init_workqueue(&tconn->sender_work);
6699b655 2547 mutex_init(&tconn->data.mutex);
6699b655
PR
2548 mutex_init(&tconn->meta.mutex);
2549
392c8801
PR
2550 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2551 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2552 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2553
9dc9fbb3 2554 kref_init(&tconn->kref);
ec0bddbc 2555 list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
2111438b
PR
2556
2557 return tconn;
2558
2559fail:
12038a3a 2560 kfree(tconn->current_epoch);
774b3055 2561 free_cpumask_var(tconn->cpu_mask);
e6ef8a5c
AG
2562 drbd_free_socket(&tconn->meta);
2563 drbd_free_socket(&tconn->data);
2111438b
PR
2564 kfree(tconn->name);
2565 kfree(tconn);
2566
2567 return NULL;
2568}
2569
9dc9fbb3 2570void conn_destroy(struct kref *kref)
2111438b 2571{
9dc9fbb3
PR
2572 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
2573
12038a3a
PR
2574 if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
2575 conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
2576 kfree(tconn->current_epoch);
2577
062e879c 2578 idr_destroy(&tconn->volumes);
2111438b 2579
774b3055 2580 free_cpumask_var(tconn->cpu_mask);
e6ef8a5c
AG
2581 drbd_free_socket(&tconn->meta);
2582 drbd_free_socket(&tconn->data);
2111438b 2583 kfree(tconn->name);
b42a70ad
PR
2584 kfree(tconn->int_dig_in);
2585 kfree(tconn->int_dig_vv);
2111438b
PR
2586 kfree(tconn);
2587}
2588
774b3055 2589enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
b411b363
PR
2590{
2591 struct drbd_conf *mdev;
2592 struct gendisk *disk;
2593 struct request_queue *q;
774b3055 2594 int vnr_got = vnr;
81a5d60e 2595 int minor_got = minor;
8432b314 2596 enum drbd_ret_code err = ERR_NOMEM;
774b3055
PR
2597
2598 mdev = minor_to_mdev(minor);
2599 if (mdev)
2600 return ERR_MINOR_EXISTS;
b411b363
PR
2601
2602 /* GFP_KERNEL, we are outside of all write-out paths */
2603 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2604 if (!mdev)
774b3055
PR
2605 return ERR_NOMEM;
2606
9dc9fbb3 2607 kref_get(&tconn->kref);
774b3055 2608 mdev->tconn = tconn;
9dc9fbb3 2609
b411b363 2610 mdev->minor = minor;
3b98c0c2 2611 mdev->vnr = vnr;
b411b363
PR
2612
2613 drbd_init_set_defaults(mdev);
2614
2615 q = blk_alloc_queue(GFP_KERNEL);
2616 if (!q)
2617 goto out_no_q;
2618 mdev->rq_queue = q;
2619 q->queuedata = mdev;
b411b363
PR
2620
2621 disk = alloc_disk(1);
2622 if (!disk)
2623 goto out_no_disk;
2624 mdev->vdisk = disk;
2625
81e84650 2626 set_disk_ro(disk, true);
b411b363
PR
2627
2628 disk->queue = q;
2629 disk->major = DRBD_MAJOR;
2630 disk->first_minor = minor;
2631 disk->fops = &drbd_ops;
2632 sprintf(disk->disk_name, "drbd%d", minor);
2633 disk->private_data = mdev;
2634
2635 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2636 /* we have no partitions. we contain only ourselves. */
2637 mdev->this_bdev->bd_contains = mdev->this_bdev;
2638
2639 q->backing_dev_info.congested_fn = drbd_congested;
2640 q->backing_dev_info.congested_data = mdev;
2641
2f58dcfc 2642 blk_queue_make_request(q, drbd_make_request);
81a3537a 2643 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
99432fcc
PR
2644 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2645 This triggers a max_bio_size message upon first attach or connect */
2646 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
b411b363
PR
2647 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2648 blk_queue_merge_bvec(q, drbd_merge_bvec);
87eeee41 2649 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
b411b363
PR
2650
2651 mdev->md_io_page = alloc_page(GFP_KERNEL);
2652 if (!mdev->md_io_page)
2653 goto out_no_io_page;
2654
2655 if (drbd_bm_init(mdev))
2656 goto out_no_bitmap;
dac1389c 2657 mdev->read_requests = RB_ROOT;
de696716 2658 mdev->write_requests = RB_ROOT;
b411b363 2659
81a5d60e 2660 if (!idr_pre_get(&minors, GFP_KERNEL))
8432b314
LE
2661 goto out_no_minor_idr;
2662 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2663 goto out_no_minor_idr;
81a5d60e 2664 if (minor_got != minor) {
8432b314
LE
2665 err = ERR_MINOR_EXISTS;
2666 drbd_msg_put_info("requested minor exists already");
569083c0 2667 goto out_idr_remove_minor;
81a5d60e 2668 }
8432b314
LE
2669
2670 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
2671 goto out_idr_remove_minor;
2672 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2673 goto out_idr_remove_minor;
2674 if (vnr_got != vnr) {
2675 err = ERR_INVALID_REQUEST;
2676 drbd_msg_put_info("requested volume exists already");
2677 goto out_idr_remove_vol;
2678 }
774b3055 2679 add_disk(disk);
81fa2e67 2680 kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
774b3055 2681
2325eb66
PR
2682 /* inherit the connection state */
2683 mdev->state.conn = tconn->cstate;
2684 if (mdev->state.conn == C_WF_REPORT_PARAMS)
c141ebda 2685 drbd_connected(mdev);
2325eb66 2686
774b3055 2687 return NO_ERROR;
b411b363 2688
569083c0
LE
2689out_idr_remove_vol:
2690 idr_remove(&tconn->volumes, vnr_got);
8432b314
LE
2691out_idr_remove_minor:
2692 idr_remove(&minors, minor_got);
569083c0 2693 synchronize_rcu();
8432b314 2694out_no_minor_idr:
b411b363
PR
2695 drbd_bm_cleanup(mdev);
2696out_no_bitmap:
2697 __free_page(mdev->md_io_page);
2698out_no_io_page:
2699 put_disk(disk);
2700out_no_disk:
2701 blk_cleanup_queue(q);
2702out_no_q:
b411b363 2703 kfree(mdev);
9dc9fbb3 2704 kref_put(&tconn->kref, &conn_destroy);
8432b314 2705 return err;
b411b363
PR
2706}
2707
b411b363
PR
2708int __init drbd_init(void)
2709{
2710 int err;
2711
2b8a90b5 2712 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
b411b363 2713 printk(KERN_ERR
81a5d60e 2714 "drbd: invalid minor_count (%d)\n", minor_count);
b411b363
PR
2715#ifdef MODULE
2716 return -EINVAL;
2717#else
46530e85 2718 minor_count = DRBD_MINOR_COUNT_DEF;
b411b363
PR
2719#endif
2720 }
2721
b411b363
PR
2722 err = register_blkdev(DRBD_MAJOR, "drbd");
2723 if (err) {
2724 printk(KERN_ERR
2725 "drbd: unable to register block device major %d\n",
2726 DRBD_MAJOR);
2727 return err;
2728 }
2729
3b98c0c2
LE
2730 err = drbd_genl_register();
2731 if (err) {
2732 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2733 goto fail;
2734 }
2735
2736
b411b363
PR
2737 register_reboot_notifier(&drbd_notifier);
2738
2739 /*
2740 * allocate all necessary structs
2741 */
2742 err = -ENOMEM;
2743
2744 init_waitqueue_head(&drbd_pp_wait);
2745
2746 drbd_proc = NULL; /* play safe for drbd_cleanup */
81a5d60e 2747 idr_init(&minors);
b411b363
PR
2748
2749 err = drbd_create_mempools();
2750 if (err)
3b98c0c2 2751 goto fail;
b411b363 2752
8c484ee4 2753 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
b411b363
PR
2754 if (!drbd_proc) {
2755 printk(KERN_ERR "drbd: unable to register proc file\n");
3b98c0c2 2756 goto fail;
b411b363
PR
2757 }
2758
2759 rwlock_init(&global_state_lock);
2111438b 2760 INIT_LIST_HEAD(&drbd_tconns);
b411b363 2761
2312f0b3
LE
2762 retry.wq = create_singlethread_workqueue("drbd-reissue");
2763 if (!retry.wq) {
2764 printk(KERN_ERR "drbd: unable to create retry workqueue\n");
2765 goto fail;
2766 }
2767 INIT_WORK(&retry.worker, do_retry);
2768 spin_lock_init(&retry.lock);
2769 INIT_LIST_HEAD(&retry.writes);
2770
b411b363
PR
2771 printk(KERN_INFO "drbd: initialized. "
2772 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2773 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2774 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2775 printk(KERN_INFO "drbd: registered as block device major %d\n",
2776 DRBD_MAJOR);
b411b363
PR
2777
2778 return 0; /* Success! */
2779
3b98c0c2 2780fail:
b411b363
PR
2781 drbd_cleanup();
2782 if (err == -ENOMEM)
2783 /* currently always the case */
2784 printk(KERN_ERR "drbd: ran out of memory\n");
2785 else
2786 printk(KERN_ERR "drbd: initialization failure\n");
2787 return err;
2788}
2789
2790void drbd_free_bc(struct drbd_backing_dev *ldev)
2791{
2792 if (ldev == NULL)
2793 return;
2794
e525fd89
TH
2795 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2796 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
2797
2798 kfree(ldev);
2799}
2800
360cc740
PR
2801void drbd_free_sock(struct drbd_tconn *tconn)
2802{
2803 if (tconn->data.socket) {
2804 mutex_lock(&tconn->data.mutex);
2805 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2806 sock_release(tconn->data.socket);
2807 tconn->data.socket = NULL;
2808 mutex_unlock(&tconn->data.mutex);
b411b363 2809 }
360cc740
PR
2810 if (tconn->meta.socket) {
2811 mutex_lock(&tconn->meta.mutex);
2812 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2813 sock_release(tconn->meta.socket);
2814 tconn->meta.socket = NULL;
2815 mutex_unlock(&tconn->meta.mutex);
b411b363
PR
2816 }
2817}
2818
b411b363
PR
2819/* meta data management */
2820
2821struct meta_data_on_disk {
2822 u64 la_size; /* last agreed size. */
2823 u64 uuid[UI_SIZE]; /* UUIDs. */
2824 u64 device_uuid;
2825 u64 reserved_u64_1;
2826 u32 flags; /* MDF */
2827 u32 magic;
2828 u32 md_size_sect;
2829 u32 al_offset; /* offset to this block */
2830 u32 al_nr_extents; /* important for restoring the AL */
f399002e 2831 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
b411b363
PR
2832 u32 bm_offset; /* offset to the bitmap, from here */
2833 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
99432fcc
PR
2834 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2835 u32 reserved_u32[3];
b411b363
PR
2836
2837} __packed;
2838
2839/**
2840 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2841 * @mdev: DRBD device.
2842 */
2843void drbd_md_sync(struct drbd_conf *mdev)
2844{
2845 struct meta_data_on_disk *buffer;
2846 sector_t sector;
2847 int i;
2848
ee15b038
LE
2849 del_timer(&mdev->md_sync_timer);
2850 /* timer may be rearmed by drbd_md_mark_dirty() now. */
b411b363
PR
2851 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2852 return;
b411b363
PR
2853
2854 /* We use here D_FAILED and not D_ATTACHING because we try to write
2855 * metadata even if we detach due to a disk failure! */
2856 if (!get_ldev_if_state(mdev, D_FAILED))
2857 return;
2858
cdfda633
PR
2859 buffer = drbd_md_get_buffer(mdev);
2860 if (!buffer)
2861 goto out;
2862
b411b363
PR
2863 memset(buffer, 0, 512);
2864
2865 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2866 for (i = UI_CURRENT; i < UI_SIZE; i++)
2867 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2868 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
d5d7ebd4 2869 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
b411b363
PR
2870
2871 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
2872 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
2873 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2874 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2875 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2876
2877 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
99432fcc 2878 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
b411b363
PR
2879
2880 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2881 sector = mdev->ldev->md.md_offset;
2882
3fbf4d21 2883 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
b411b363
PR
2884 /* this was a try anyways ... */
2885 dev_err(DEV, "meta data update failed!\n");
0c849666 2886 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
b411b363
PR
2887 }
2888
2889 /* Update mdev->ldev->md.la_size_sect,
2890 * since we updated it on metadata. */
2891 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2892
cdfda633
PR
2893 drbd_md_put_buffer(mdev);
2894out:
b411b363
PR
2895 put_ldev(mdev);
2896}
2897
2898/**
2899 * drbd_md_read() - Reads in the meta data super block
2900 * @mdev: DRBD device.
2901 * @bdev: Device from which the meta data should be read in.
2902 *
116676ca 2903 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
d5d7ebd4 2904 * something goes wrong.
b411b363
PR
2905 */
2906int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2907{
2908 struct meta_data_on_disk *buffer;
d5d7ebd4 2909 u32 magic, flags;
b411b363
PR
2910 int i, rv = NO_ERROR;
2911
2912 if (!get_ldev_if_state(mdev, D_ATTACHING))
2913 return ERR_IO_MD_DISK;
2914
cdfda633
PR
2915 buffer = drbd_md_get_buffer(mdev);
2916 if (!buffer)
2917 goto out;
b411b363 2918
3fbf4d21 2919 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
25985edc 2920 /* NOTE: can't do normal error processing here as this is
b411b363
PR
2921 called BEFORE disk is attached */
2922 dev_err(DEV, "Error while reading metadata.\n");
2923 rv = ERR_IO_MD_DISK;
2924 goto err;
2925 }
2926
d5d7ebd4
LE
2927 magic = be32_to_cpu(buffer->magic);
2928 flags = be32_to_cpu(buffer->flags);
2929 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
2930 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
2931 /* btw: that's Activity Log clean, not "all" clean. */
2932 dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
2933 rv = ERR_MD_UNCLEAN;
2934 goto err;
2935 }
2936 if (magic != DRBD_MD_MAGIC_08) {
43de7c85 2937 if (magic == DRBD_MD_MAGIC_07)
d5d7ebd4
LE
2938 dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
2939 else
2940 dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
b411b363
PR
2941 rv = ERR_MD_INVALID;
2942 goto err;
2943 }
2944 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
2945 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
2946 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
2947 rv = ERR_MD_INVALID;
2948 goto err;
2949 }
2950 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
2951 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
2952 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
2953 rv = ERR_MD_INVALID;
2954 goto err;
2955 }
2956 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
2957 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
2958 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
2959 rv = ERR_MD_INVALID;
2960 goto err;
2961 }
2962
2963 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
2964 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2965 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
2966 rv = ERR_MD_INVALID;
2967 goto err;
2968 }
2969
2970 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
2971 for (i = UI_CURRENT; i < UI_SIZE; i++)
2972 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2973 bdev->md.flags = be32_to_cpu(buffer->flags);
b411b363
PR
2974 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2975
87eeee41 2976 spin_lock_irq(&mdev->tconn->req_lock);
99432fcc
PR
2977 if (mdev->state.conn < C_CONNECTED) {
2978 int peer;
2979 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
2980 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
2981 mdev->peer_max_bio_size = peer;
2982 }
87eeee41 2983 spin_unlock_irq(&mdev->tconn->req_lock);
99432fcc 2984
b411b363 2985 err:
cdfda633
PR
2986 drbd_md_put_buffer(mdev);
2987 out:
b411b363
PR
2988 put_ldev(mdev);
2989
2990 return rv;
2991}
2992
2993/**
2994 * drbd_md_mark_dirty() - Mark meta data super block as dirty
2995 * @mdev: DRBD device.
2996 *
2997 * Call this function if you change anything that should be written to
2998 * the meta-data super block. This function sets MD_DIRTY, and starts a
2999 * timer that ensures that within five seconds you have to call drbd_md_sync().
3000 */
ca0e6098 3001#ifdef DEBUG
ee15b038
LE
3002void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3003{
3004 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3005 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3006 mdev->last_md_mark_dirty.line = line;
3007 mdev->last_md_mark_dirty.func = func;
3008 }
3009}
3010#else
b411b363
PR
3011void drbd_md_mark_dirty(struct drbd_conf *mdev)
3012{
ee15b038 3013 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
ca0e6098 3014 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
b411b363 3015}
ee15b038 3016#endif
b411b363
PR
3017
3018static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3019{
3020 int i;
3021
62b0da3a 3022 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
b411b363 3023 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
b411b363
PR
3024}
3025
3026void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3027{
3028 if (idx == UI_CURRENT) {
3029 if (mdev->state.role == R_PRIMARY)
3030 val |= 1;
3031 else
3032 val &= ~((u64)1);
3033
3034 drbd_set_ed_uuid(mdev, val);
3035 }
3036
3037 mdev->ldev->md.uuid[idx] = val;
b411b363
PR
3038 drbd_md_mark_dirty(mdev);
3039}
3040
3041
3042void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3043{
3044 if (mdev->ldev->md.uuid[idx]) {
3045 drbd_uuid_move_history(mdev);
3046 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
b411b363
PR
3047 }
3048 _drbd_uuid_set(mdev, idx, val);
3049}
3050
3051/**
3052 * drbd_uuid_new_current() - Creates a new current UUID
3053 * @mdev: DRBD device.
3054 *
3055 * Creates a new current UUID, and rotates the old current UUID into
3056 * the bitmap slot. Causes an incremental resync upon next connect.
3057 */
3058void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3059{
3060 u64 val;
62b0da3a
LE
3061 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3062
3063 if (bm_uuid)
3064 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 3065
b411b363 3066 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
b411b363
PR
3067
3068 get_random_bytes(&val, sizeof(u64));
3069 _drbd_uuid_set(mdev, UI_CURRENT, val);
62b0da3a 3070 drbd_print_uuids(mdev, "new current UUID");
aaa8e2b3
LE
3071 /* get it to stable storage _now_ */
3072 drbd_md_sync(mdev);
b411b363
PR
3073}
3074
3075void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3076{
3077 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3078 return;
3079
3080 if (val == 0) {
3081 drbd_uuid_move_history(mdev);
3082 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3083 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363 3084 } else {
62b0da3a
LE
3085 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3086 if (bm_uuid)
3087 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 3088
62b0da3a 3089 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
b411b363
PR
3090 }
3091 drbd_md_mark_dirty(mdev);
3092}
3093
3094/**
3095 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3096 * @mdev: DRBD device.
3097 *
3098 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3099 */
3100int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3101{
3102 int rv = -EIO;
3103
3104 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3105 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3106 drbd_md_sync(mdev);
3107 drbd_bm_set_all(mdev);
3108
3109 rv = drbd_bm_write(mdev);
3110
3111 if (!rv) {
3112 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3113 drbd_md_sync(mdev);
3114 }
3115
3116 put_ldev(mdev);
3117 }
3118
3119 return rv;
3120}
3121
3122/**
3123 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3124 * @mdev: DRBD device.
3125 *
3126 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3127 */
3128int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3129{
3130 int rv = -EIO;
3131
0778286a 3132 drbd_resume_al(mdev);
b411b363
PR
3133 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3134 drbd_bm_clear_all(mdev);
3135 rv = drbd_bm_write(mdev);
3136 put_ldev(mdev);
3137 }
3138
3139 return rv;
3140}
3141
99920dc5 3142static int w_bitmap_io(struct drbd_work *w, int unused)
b411b363
PR
3143{
3144 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
00d56944 3145 struct drbd_conf *mdev = w->mdev;
02851e9f 3146 int rv = -EIO;
b411b363
PR
3147
3148 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3149
02851e9f 3150 if (get_ldev(mdev)) {
20ceb2b2 3151 drbd_bm_lock(mdev, work->why, work->flags);
02851e9f
LE
3152 rv = work->io_fn(mdev);
3153 drbd_bm_unlock(mdev);
3154 put_ldev(mdev);
3155 }
b411b363 3156
4738fa16 3157 clear_bit_unlock(BITMAP_IO, &mdev->flags);
b411b363
PR
3158 wake_up(&mdev->misc_wait);
3159
3160 if (work->done)
3161 work->done(mdev, rv);
3162
3163 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3164 work->why = NULL;
20ceb2b2 3165 work->flags = 0;
b411b363 3166
99920dc5 3167 return 0;
b411b363
PR
3168}
3169
82f59cc6
LE
3170void drbd_ldev_destroy(struct drbd_conf *mdev)
3171{
3172 lc_destroy(mdev->resync);
3173 mdev->resync = NULL;
3174 lc_destroy(mdev->act_log);
3175 mdev->act_log = NULL;
3176 __no_warn(local,
3177 drbd_free_bc(mdev->ldev);
3178 mdev->ldev = NULL;);
3179
82f59cc6
LE
3180 clear_bit(GO_DISKLESS, &mdev->flags);
3181}
3182
99920dc5 3183static int w_go_diskless(struct drbd_work *w, int unused)
e9e6f3ec 3184{
00d56944
PR
3185 struct drbd_conf *mdev = w->mdev;
3186
e9e6f3ec 3187 D_ASSERT(mdev->state.disk == D_FAILED);
9d282875
LE
3188 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3189 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
82f59cc6
LE
3190 * the protected members anymore, though, so once put_ldev reaches zero
3191 * again, it will be safe to free them. */
e9e6f3ec 3192 drbd_force_state(mdev, NS(disk, D_DISKLESS));
99920dc5 3193 return 0;
e9e6f3ec
LE
3194}
3195
3196void drbd_go_diskless(struct drbd_conf *mdev)
3197{
3198 D_ASSERT(mdev->state.disk == D_FAILED);
3199 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
d5b27b01 3200 drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
e9e6f3ec
LE
3201}
3202
b411b363
PR
3203/**
3204 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3205 * @mdev: DRBD device.
3206 * @io_fn: IO callback to be called when bitmap IO is possible
3207 * @done: callback to be called after the bitmap IO was performed
3208 * @why: Descriptive text of the reason for doing the IO
3209 *
3210 * While IO on the bitmap happens we freeze application IO thus we ensure
3211 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3212 * called from worker context. It MUST NOT be used while a previous such
3213 * work is still pending!
3214 */
3215void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3216 int (*io_fn)(struct drbd_conf *),
3217 void (*done)(struct drbd_conf *, int),
20ceb2b2 3218 char *why, enum bm_flag flags)
b411b363 3219{
e6b3ea83 3220 D_ASSERT(current == mdev->tconn->worker.task);
b411b363
PR
3221
3222 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3223 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3224 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3225 if (mdev->bm_io_work.why)
3226 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3227 why, mdev->bm_io_work.why);
3228
3229 mdev->bm_io_work.io_fn = io_fn;
3230 mdev->bm_io_work.done = done;
3231 mdev->bm_io_work.why = why;
20ceb2b2 3232 mdev->bm_io_work.flags = flags;
b411b363 3233
87eeee41 3234 spin_lock_irq(&mdev->tconn->req_lock);
b411b363
PR
3235 set_bit(BITMAP_IO, &mdev->flags);
3236 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
127b3178 3237 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
d5b27b01 3238 drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
b411b363 3239 }
87eeee41 3240 spin_unlock_irq(&mdev->tconn->req_lock);
b411b363
PR
3241}
3242
3243/**
3244 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3245 * @mdev: DRBD device.
3246 * @io_fn: IO callback to be called when bitmap IO is possible
3247 * @why: Descriptive text of the reason for doing the IO
3248 *
3249 * freezes application IO while that the actual IO operations runs. This
3250 * functions MAY NOT be called from worker context.
3251 */
20ceb2b2
LE
3252int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3253 char *why, enum bm_flag flags)
b411b363
PR
3254{
3255 int rv;
3256
e6b3ea83 3257 D_ASSERT(current != mdev->tconn->worker.task);
b411b363 3258
20ceb2b2
LE
3259 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3260 drbd_suspend_io(mdev);
b411b363 3261
20ceb2b2 3262 drbd_bm_lock(mdev, why, flags);
b411b363
PR
3263 rv = io_fn(mdev);
3264 drbd_bm_unlock(mdev);
3265
20ceb2b2
LE
3266 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3267 drbd_resume_io(mdev);
b411b363
PR
3268
3269 return rv;
3270}
3271
3272void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3273{
3274 if ((mdev->ldev->md.flags & flag) != flag) {
3275 drbd_md_mark_dirty(mdev);
3276 mdev->ldev->md.flags |= flag;
3277 }
3278}
3279
3280void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3281{
3282 if ((mdev->ldev->md.flags & flag) != 0) {
3283 drbd_md_mark_dirty(mdev);
3284 mdev->ldev->md.flags &= ~flag;
3285 }
3286}
3287int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3288{
3289 return (bdev->md.flags & flag) != 0;
3290}
3291
3292static void md_sync_timer_fn(unsigned long data)
3293{
3294 struct drbd_conf *mdev = (struct drbd_conf *) data;
3295
d5b27b01 3296 drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
b411b363
PR
3297}
3298
99920dc5 3299static int w_md_sync(struct drbd_work *w, int unused)
b411b363 3300{
00d56944
PR
3301 struct drbd_conf *mdev = w->mdev;
3302
b411b363 3303 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
ee15b038
LE
3304#ifdef DEBUG
3305 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3306 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3307#endif
b411b363 3308 drbd_md_sync(mdev);
99920dc5 3309 return 0;
b411b363
PR
3310}
3311
d8763023 3312const char *cmdname(enum drbd_packet cmd)
f2ad9063
AG
3313{
3314 /* THINK may need to become several global tables
3315 * when we want to support more than
3316 * one PRO_VERSION */
3317 static const char *cmdnames[] = {
3318 [P_DATA] = "Data",
3319 [P_DATA_REPLY] = "DataReply",
3320 [P_RS_DATA_REPLY] = "RSDataReply",
3321 [P_BARRIER] = "Barrier",
3322 [P_BITMAP] = "ReportBitMap",
3323 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3324 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3325 [P_UNPLUG_REMOTE] = "UnplugRemote",
3326 [P_DATA_REQUEST] = "DataRequest",
3327 [P_RS_DATA_REQUEST] = "RSDataRequest",
3328 [P_SYNC_PARAM] = "SyncParam",
3329 [P_SYNC_PARAM89] = "SyncParam89",
3330 [P_PROTOCOL] = "ReportProtocol",
3331 [P_UUIDS] = "ReportUUIDs",
3332 [P_SIZES] = "ReportSizes",
3333 [P_STATE] = "ReportState",
3334 [P_SYNC_UUID] = "ReportSyncUUID",
3335 [P_AUTH_CHALLENGE] = "AuthChallenge",
3336 [P_AUTH_RESPONSE] = "AuthResponse",
3337 [P_PING] = "Ping",
3338 [P_PING_ACK] = "PingAck",
3339 [P_RECV_ACK] = "RecvAck",
3340 [P_WRITE_ACK] = "WriteAck",
3341 [P_RS_WRITE_ACK] = "RSWriteAck",
7be8da07 3342 [P_DISCARD_WRITE] = "DiscardWrite",
f2ad9063
AG
3343 [P_NEG_ACK] = "NegAck",
3344 [P_NEG_DREPLY] = "NegDReply",
3345 [P_NEG_RS_DREPLY] = "NegRSDReply",
3346 [P_BARRIER_ACK] = "BarrierAck",
3347 [P_STATE_CHG_REQ] = "StateChgRequest",
3348 [P_STATE_CHG_REPLY] = "StateChgReply",
3349 [P_OV_REQUEST] = "OVRequest",
3350 [P_OV_REPLY] = "OVReply",
3351 [P_OV_RESULT] = "OVResult",
3352 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3353 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3354 [P_COMPRESSED_BITMAP] = "CBitmap",
3355 [P_DELAY_PROBE] = "DelayProbe",
3356 [P_OUT_OF_SYNC] = "OutOfSync",
7be8da07 3357 [P_RETRY_WRITE] = "RetryWrite",
ae25b336
LE
3358 [P_RS_CANCEL] = "RSCancel",
3359 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3360 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
036b17ea
PR
3361 [P_RETRY_WRITE] = "retry_write",
3362 [P_PROTOCOL_UPDATE] = "protocol_update",
ae25b336
LE
3363
3364 /* enum drbd_packet, but not commands - obsoleted flags:
3365 * P_MAY_IGNORE
3366 * P_MAX_OPT_CMD
3367 */
f2ad9063
AG
3368 };
3369
ae25b336 3370 /* too big for the array: 0xfffX */
e5d6f33a
AG
3371 if (cmd == P_INITIAL_META)
3372 return "InitialMeta";
3373 if (cmd == P_INITIAL_DATA)
3374 return "InitialData";
6038178e
AG
3375 if (cmd == P_CONNECTION_FEATURES)
3376 return "ConnectionFeatures";
6e849ce8 3377 if (cmd >= ARRAY_SIZE(cmdnames))
f2ad9063
AG
3378 return "Unknown";
3379 return cmdnames[cmd];
3380}
3381
7be8da07
AG
3382/**
3383 * drbd_wait_misc - wait for a request to make progress
3384 * @mdev: device associated with the request
3385 * @i: the struct drbd_interval embedded in struct drbd_request or
3386 * struct drbd_peer_request
3387 */
3388int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3389{
44ed167d 3390 struct net_conf *nc;
7be8da07
AG
3391 DEFINE_WAIT(wait);
3392 long timeout;
3393
44ed167d
PR
3394 rcu_read_lock();
3395 nc = rcu_dereference(mdev->tconn->net_conf);
3396 if (!nc) {
3397 rcu_read_unlock();
7be8da07 3398 return -ETIMEDOUT;
44ed167d
PR
3399 }
3400 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3401 rcu_read_unlock();
7be8da07
AG
3402
3403 /* Indicate to wake up mdev->misc_wait on progress. */
3404 i->waiting = true;
3405 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3406 spin_unlock_irq(&mdev->tconn->req_lock);
3407 timeout = schedule_timeout(timeout);
3408 finish_wait(&mdev->misc_wait, &wait);
3409 spin_lock_irq(&mdev->tconn->req_lock);
3410 if (!timeout || mdev->state.conn < C_CONNECTED)
3411 return -ETIMEDOUT;
3412 if (signal_pending(current))
3413 return -ERESTARTSYS;
3414 return 0;
3415}
3416
b411b363
PR
3417#ifdef CONFIG_DRBD_FAULT_INJECTION
3418/* Fault insertion support including random number generator shamelessly
3419 * stolen from kernel/rcutorture.c */
3420struct fault_random_state {
3421 unsigned long state;
3422 unsigned long count;
3423};
3424
3425#define FAULT_RANDOM_MULT 39916801 /* prime */
3426#define FAULT_RANDOM_ADD 479001701 /* prime */
3427#define FAULT_RANDOM_REFRESH 10000
3428
3429/*
3430 * Crude but fast random-number generator. Uses a linear congruential
3431 * generator, with occasional help from get_random_bytes().
3432 */
3433static unsigned long
3434_drbd_fault_random(struct fault_random_state *rsp)
3435{
3436 long refresh;
3437
49829ea7 3438 if (!rsp->count--) {
b411b363
PR
3439 get_random_bytes(&refresh, sizeof(refresh));
3440 rsp->state += refresh;
3441 rsp->count = FAULT_RANDOM_REFRESH;
3442 }
3443 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3444 return swahw32(rsp->state);
3445}
3446
3447static char *
3448_drbd_fault_str(unsigned int type) {
3449 static char *_faults[] = {
3450 [DRBD_FAULT_MD_WR] = "Meta-data write",
3451 [DRBD_FAULT_MD_RD] = "Meta-data read",
3452 [DRBD_FAULT_RS_WR] = "Resync write",
3453 [DRBD_FAULT_RS_RD] = "Resync read",
3454 [DRBD_FAULT_DT_WR] = "Data write",
3455 [DRBD_FAULT_DT_RD] = "Data read",
3456 [DRBD_FAULT_DT_RA] = "Data read ahead",
3457 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
6b4388ac
PR
3458 [DRBD_FAULT_AL_EE] = "EE allocation",
3459 [DRBD_FAULT_RECEIVE] = "receive data corruption",
b411b363
PR
3460 };
3461
3462 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3463}
3464
3465unsigned int
3466_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3467{
3468 static struct fault_random_state rrs = {0, 0};
3469
3470 unsigned int ret = (
3471 (fault_devs == 0 ||
3472 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3473 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3474
3475 if (ret) {
3476 fault_count++;
3477
7383506c 3478 if (__ratelimit(&drbd_ratelimit_state))
b411b363
PR
3479 dev_warn(DEV, "***Simulating %s failure\n",
3480 _drbd_fault_str(type));
3481 }
3482
3483 return ret;
3484}
3485#endif
3486
3487const char *drbd_buildtag(void)
3488{
3489 /* DRBD built from external sources has here a reference to the
3490 git hash of the source code. */
3491
3492 static char buildtag[38] = "\0uilt-in";
3493
3494 if (buildtag[0] == 0) {
3495#ifdef CONFIG_MODULES
3496 if (THIS_MODULE != NULL)
3497 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3498 else
3499#endif
3500 buildtag[0] = 'b';
3501 }
3502
3503 return buildtag;
3504}
3505
3506module_init(drbd_init)
3507module_exit(drbd_cleanup)
3508
b411b363
PR
3509EXPORT_SYMBOL(drbd_conn_str);
3510EXPORT_SYMBOL(drbd_role_str);
3511EXPORT_SYMBOL(drbd_disk_str);
3512EXPORT_SYMBOL(drbd_set_st_err_str);
This page took 0.364702 seconds and 5 git commands to generate.