drbd: drbd_adm_down(): Move valid resource name check to drbd_adm_prepare()
[deliverable/linux.git] / drivers / block / drbd / drbd_int.h
CommitLineData
b411b363
PR
1/*
2 drbd_int.h
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24*/
25
26#ifndef _DRBD_INT_H
27#define _DRBD_INT_H
28
29#include <linux/compiler.h>
30#include <linux/types.h>
b411b363
PR
31#include <linux/list.h>
32#include <linux/sched.h>
33#include <linux/bitops.h>
34#include <linux/slab.h>
35#include <linux/crypto.h>
132cc538 36#include <linux/ratelimit.h>
b411b363
PR
37#include <linux/tcp.h>
38#include <linux/mutex.h>
39#include <linux/major.h>
40#include <linux/blkdev.h>
41#include <linux/genhd.h>
062e879c 42#include <linux/idr.h>
b411b363
PR
43#include <net/tcp.h>
44#include <linux/lru_cache.h>
70c71606 45#include <linux/prefetch.h>
3b98c0c2 46#include <linux/drbd_genl_api.h>
b8907339
PR
47#include <linux/drbd.h>
48#include "drbd_state.h"
a3603a6e 49#include "drbd_protocol.h"
b411b363
PR
50
51#ifdef __CHECKER__
52# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
53# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
54# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
55# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
56#else
57# define __protected_by(x)
58# define __protected_read_by(x)
59# define __protected_write_by(x)
60# define __must_hold(x)
61#endif
62
63#define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0)
64
65/* module parameter, defined in drbd_main.c */
66extern unsigned int minor_count;
90ab5ee9
RR
67extern bool disable_sendpage;
68extern bool allow_oos;
b30ab791 69void tl_abort_disk_io(struct drbd_device *device);
b411b363
PR
70
71#ifdef CONFIG_DRBD_FAULT_INJECTION
72extern int enable_faults;
73extern int fault_rate;
74extern int fault_devs;
75#endif
76
77extern char usermode_helper[];
78
79
b411b363
PR
80/* I don't remember why XCPU ...
81 * This is used to wake the asender,
82 * and to interrupt sending the sending task
83 * on disconnect.
84 */
85#define DRBD_SIG SIGXCPU
86
87/* This is used to stop/restart our threads.
88 * Cannot use SIGTERM nor SIGKILL, since these
89 * are sent out by init on runlevel changes
90 * I choose SIGHUP for now.
91 */
92#define DRBD_SIGKILL SIGHUP
93
b411b363
PR
94#define ID_IN_SYNC (4711ULL)
95#define ID_OUT_OF_SYNC (4712ULL)
b411b363 96#define ID_SYNCER (-1ULL)
579b57ed 97
4a23f264 98#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
b411b363 99
54761697 100struct drbd_device;
bde89a9e 101struct drbd_connection;
b411b363
PR
102
103
104/* to shorten dev_warn(DEV, "msg"); and relatives statements */
b30ab791 105#define DEV (disk_to_dev(device->vdisk))
b411b363 106
60ae4966 107#define conn_printk(LEVEL, TCONN, FMT, ARGS...) \
77c556f6 108 printk(LEVEL "d-con %s: " FMT, TCONN->resource->name , ## ARGS)
60ae4966
PR
109#define conn_alert(TCONN, FMT, ARGS...) conn_printk(KERN_ALERT, TCONN, FMT, ## ARGS)
110#define conn_crit(TCONN, FMT, ARGS...) conn_printk(KERN_CRIT, TCONN, FMT, ## ARGS)
111#define conn_err(TCONN, FMT, ARGS...) conn_printk(KERN_ERR, TCONN, FMT, ## ARGS)
112#define conn_warn(TCONN, FMT, ARGS...) conn_printk(KERN_WARNING, TCONN, FMT, ## ARGS)
113#define conn_notice(TCONN, FMT, ARGS...) conn_printk(KERN_NOTICE, TCONN, FMT, ## ARGS)
114#define conn_info(TCONN, FMT, ARGS...) conn_printk(KERN_INFO, TCONN, FMT, ## ARGS)
115#define conn_dbg(TCONN, FMT, ARGS...) conn_printk(KERN_DEBUG, TCONN, FMT, ## ARGS)
116
b411b363
PR
117#define D_ASSERT(exp) if (!(exp)) \
118 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
119
841ce241
AG
120/**
121 * expect - Make an assertion
122 *
123 * Unlike the assert macro, this macro returns a boolean result.
124 */
125#define expect(exp) ({ \
126 bool _bool = (exp); \
127 if (!_bool) \
128 dev_err(DEV, "ASSERTION %s FAILED in %s\n", \
129 #exp, __func__); \
130 _bool; \
131 })
b411b363
PR
132
133/* Defines to control fault insertion */
134enum {
135 DRBD_FAULT_MD_WR = 0, /* meta data write */
136 DRBD_FAULT_MD_RD = 1, /* read */
137 DRBD_FAULT_RS_WR = 2, /* resync */
138 DRBD_FAULT_RS_RD = 3,
139 DRBD_FAULT_DT_WR = 4, /* data */
140 DRBD_FAULT_DT_RD = 5,
141 DRBD_FAULT_DT_RA = 6, /* data read ahead */
142 DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */
143 DRBD_FAULT_AL_EE = 8, /* alloc ee */
6b4388ac 144 DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
b411b363
PR
145
146 DRBD_FAULT_MAX,
147};
148
b411b363 149extern unsigned int
b30ab791 150_drbd_insert_fault(struct drbd_device *device, unsigned int type);
0cf9d27e 151
b411b363 152static inline int
b30ab791 153drbd_insert_fault(struct drbd_device *device, unsigned int type) {
0cf9d27e 154#ifdef CONFIG_DRBD_FAULT_INJECTION
b411b363
PR
155 return fault_rate &&
156 (enable_faults & (1<<type)) &&
b30ab791 157 _drbd_insert_fault(device, type);
b411b363 158#else
0cf9d27e 159 return 0;
b411b363 160#endif
0cf9d27e 161}
b411b363
PR
162
163/* integer division, round _UP_ to the next integer */
164#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
165/* usual integer division */
166#define div_floor(A, B) ((A)/(B))
167
b411b363 168extern struct ratelimit_state drbd_ratelimit_state;
05a10ec7 169extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
77c556f6 170extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
b411b363 171
d8763023 172extern const char *cmdname(enum drbd_packet cmd);
b411b363
PR
173
174/* for sending/receiving the bitmap,
175 * possibly in some encoding scheme */
176struct bm_xfer_ctx {
177 /* "const"
178 * stores total bits and long words
179 * of the bitmap, so we don't need to
180 * call the accessor functions over and again. */
181 unsigned long bm_bits;
182 unsigned long bm_words;
183 /* during xfer, current position within the bitmap */
184 unsigned long bit_offset;
185 unsigned long word_offset;
186
187 /* statistics; index: (h->command == P_BITMAP) */
188 unsigned packets[2];
189 unsigned bytes[2];
190};
191
b30ab791 192extern void INFO_bm_xfer_stats(struct drbd_device *device,
b411b363
PR
193 const char *direction, struct bm_xfer_ctx *c);
194
195static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
196{
197 /* word_offset counts "native long words" (32 or 64 bit),
198 * aligned at 64 bit.
199 * Encoded packet may end at an unaligned bit offset.
200 * In case a fallback clear text packet is transmitted in
201 * between, we adjust this offset back to the last 64bit
202 * aligned "native long word", which makes coding and decoding
203 * the plain text bitmap much more convenient. */
204#if BITS_PER_LONG == 64
205 c->word_offset = c->bit_offset >> 6;
206#elif BITS_PER_LONG == 32
207 c->word_offset = c->bit_offset >> 5;
208 c->word_offset &= ~(1UL);
209#else
210# error "unsupported BITS_PER_LONG"
211#endif
212}
213
bde89a9e 214extern unsigned int drbd_header_size(struct drbd_connection *connection);
b411b363 215
b411b363
PR
216/**********************************************************************/
217enum drbd_thread_state {
e77a0a5c
AG
218 NONE,
219 RUNNING,
220 EXITING,
221 RESTARTING
b411b363
PR
222};
223
224struct drbd_thread {
225 spinlock_t t_lock;
226 struct task_struct *task;
227 struct completion stop;
228 enum drbd_thread_state t_state;
229 int (*function) (struct drbd_thread *);
bde89a9e 230 struct drbd_connection *connection;
b411b363 231 int reset_cpu_mask;
bed879ae 232 char name[9];
b411b363
PR
233};
234
235static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
236{
237 /* THINK testing the t_state seems to be uncritical in all cases
238 * (but thread_{start,stop}), so we can read it *without* the lock.
239 * --lge */
240
241 smp_rmb();
242 return thi->t_state;
243}
244
b411b363
PR
245struct drbd_work {
246 struct list_head list;
309a8348 247 int (*cb)(struct drbd_work *, int cancel);
00d56944 248 union {
b30ab791 249 struct drbd_device *device;
bde89a9e 250 struct drbd_connection *connection;
00d56944 251 };
b411b363
PR
252};
253
ace652ac
AG
254#include "drbd_interval.h"
255
54761697 256extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
7be8da07 257
b411b363
PR
258struct drbd_request {
259 struct drbd_work w;
b411b363
PR
260
261 /* if local IO is not allowed, will be NULL.
262 * if local IO _is_ allowed, holds the locally submitted bio clone,
263 * or, after local IO completion, the ERR_PTR(error).
fcefa62e 264 * see drbd_request_endio(). */
b411b363
PR
265 struct bio *private_bio;
266
ace652ac 267 struct drbd_interval i;
b411b363 268
b6dd1a89 269 /* epoch: used to check on "completion" whether this req was in
b411b363 270 * the current epoch, and we therefore have to close it,
b6dd1a89
LE
271 * causing a p_barrier packet to be send, starting a new epoch.
272 *
273 * This corresponds to "barrier" in struct p_barrier[_ack],
274 * and to "barrier_nr" in struct drbd_epoch (and various
275 * comments/function parameters/local variable names).
b411b363 276 */
b6dd1a89 277 unsigned int epoch;
b411b363 278
b411b363
PR
279 struct list_head tl_requests; /* ring list in the transfer log */
280 struct bio *master_bio; /* master bio pointer */
b411b363 281 unsigned long start_time;
b411b363 282
b406777e
LE
283 /* once it hits 0, we may complete the master_bio */
284 atomic_t completion_ref;
285 /* once it hits 0, we may destroy this drbd_request object */
286 struct kref kref;
b411b363 287
a0d856df 288 unsigned rq_state; /* see comments above _req_mod() */
b411b363 289};
b411b363
PR
290
291struct drbd_epoch {
bde89a9e 292 struct drbd_connection *connection;
b411b363
PR
293 struct list_head list;
294 unsigned int barrier_nr;
295 atomic_t epoch_size; /* increased on every request added. */
296 atomic_t active; /* increased on every req. added, and dec on every finished. */
297 unsigned long flags;
298};
299
de0b2e69
RK
300/* Prototype declaration of function defined in drbd_receiver.c */
301int drbdd_init(struct drbd_thread *);
302int drbd_asender(struct drbd_thread *);
303
b411b363
PR
304/* drbd_epoch flag bits */
305enum {
b411b363 306 DE_HAVE_BARRIER_NUMBER,
b411b363
PR
307};
308
309enum epoch_event {
310 EV_PUT,
311 EV_GOT_BARRIER_NR,
b411b363 312 EV_BECAME_LAST,
b411b363
PR
313 EV_CLEANUP = 32, /* used as flag */
314};
315
b411b363
PR
316struct drbd_wq_barrier {
317 struct drbd_work w;
318 struct completion done;
319};
320
321struct digest_info {
322 int digest_size;
323 void *digest;
324};
325
f6ffca9f 326struct drbd_peer_request {
45bb912b 327 struct drbd_work w;
85719573 328 struct drbd_epoch *epoch; /* for writes */
45bb912b
LE
329 struct page *pages;
330 atomic_t pending_bios;
010f6e67 331 struct drbd_interval i;
45bb912b
LE
332 /* see comments on ee flag bits below */
333 unsigned long flags;
85719573
PR
334 union {
335 u64 block_id;
336 struct digest_info *digest;
337 };
45bb912b
LE
338};
339
340/* ee flag bits.
341 * While corresponding bios are in flight, the only modification will be
342 * set_bit WAS_ERROR, which has to be atomic.
343 * If no bios are in flight yet, or all have been completed,
344 * non-atomic modification to ee->flags is ok.
345 */
b411b363
PR
346enum {
347 __EE_CALL_AL_COMPLETE_IO,
b411b363 348 __EE_MAY_SET_IN_SYNC,
45bb912b 349
45bb912b
LE
350 /* In case a barrier failed,
351 * we need to resubmit without the barrier flag. */
352 __EE_RESUBMITTED,
353
6c852bec 354 /* we may have several bios per peer request.
45bb912b
LE
355 * if any of those fail, we set this flag atomically
356 * from the endio callback */
357 __EE_WAS_ERROR,
c36c3ced
LE
358
359 /* This ee has a pointer to a digest instead of a block id */
360 __EE_HAS_DIGEST,
7be8da07
AG
361
362 /* Conflicting local requests need to be restarted after this request */
363 __EE_RESTART_REQUESTS,
303d1448
PR
364
365 /* The peer wants a write ACK for this (wire proto C) */
366 __EE_SEND_WRITE_ACK,
302bdeae
PR
367
368 /* Is set when net_conf had two_primaries set while creating this peer_req */
369 __EE_IN_INTERVAL_TREE,
b411b363
PR
370};
371#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
b411b363 372#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
45bb912b
LE
373#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
374#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
c36c3ced 375#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
7be8da07 376#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
303d1448 377#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
302bdeae 378#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
b411b363 379
b30ab791 380/* flag bits per device */
b411b363 381enum {
b411b363
PR
382 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
383 MD_DIRTY, /* current uuids and flags not yet on disk */
b411b363 384 USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */
b411b363
PR
385 CL_ST_CHG_SUCCESS,
386 CL_ST_CHG_FAIL,
387 CRASHED_PRIMARY, /* This node was a crashed primary.
388 * Gets cleared when the state.conn
389 * goes into C_CONNECTED state. */
b411b363
PR
390 CONSIDER_RESYNC,
391
a8a4e51e 392 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
b411b363
PR
393 SUSPEND_IO, /* suspend application io */
394 BITMAP_IO, /* suspend application io;
395 once no more io in flight, start bitmap io */
396 BITMAP_IO_QUEUED, /* Started bitmap IO */
82f59cc6 397 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
a2a3c74f
LE
398 WAS_IO_ERROR, /* Local disk failed, returned IO error */
399 WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */
383606e0 400 FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
b411b363 401 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
b411b363
PR
402 RESIZE_PENDING, /* Size change detected locally, waiting for the response from
403 * the peer, if it changed there as well. */
43a5182c 404 NEW_CUR_UUID, /* Create new current UUID when thawing IO */
0778286a 405 AL_SUSPENDED, /* Activity logging is currently suspended. */
370a43e7 406 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
e64a3294 407 B_RS_H_DONE, /* Before resync handler done (already executed) */
08b165ba 408 DISCARD_MY_DATA, /* discard_my_data flag per volume */
380207d0 409 READ_BALANCE_RR,
b411b363
PR
410};
411
54761697 412struct drbd_bitmap; /* opaque for drbd_device */
b411b363 413
20ceb2b2
LE
414/* definition of bits in bm_flags to be used in drbd_bm_lock
415 * and drbd_bitmap_io and friends. */
416enum bm_flag {
417 /* do we need to kfree, or vfree bm_pages? */
418 BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
419
420 /* currently locked for bulk operation */
0e8488ad 421 BM_LOCKED_MASK = 0xf,
20ceb2b2
LE
422
423 /* in detail, that is: */
424 BM_DONT_CLEAR = 0x1,
425 BM_DONT_SET = 0x2,
426 BM_DONT_TEST = 0x4,
427
0e8488ad
LE
428 /* so we can mark it locked for bulk operation,
429 * and still allow all non-bulk operations */
430 BM_IS_LOCKED = 0x8,
431
20ceb2b2 432 /* (test bit, count bit) allowed (common case) */
0e8488ad 433 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
20ceb2b2
LE
434
435 /* testing bits, as well as setting new bits allowed, but clearing bits
436 * would be unexpected. Used during bitmap receive. Setting new bits
437 * requires sending of "out-of-sync" information, though. */
0e8488ad 438 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
20ceb2b2 439
0e8488ad
LE
440 /* for drbd_bm_write_copy_pages, everything is allowed,
441 * only concurrent bulk operations are locked out. */
442 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
20ceb2b2
LE
443};
444
b411b363
PR
445struct drbd_work_queue {
446 struct list_head q;
b411b363 447 spinlock_t q_lock; /* to protect the list. */
8c0785a5 448 wait_queue_head_t q_wait;
b411b363
PR
449};
450
451struct drbd_socket {
b411b363
PR
452 struct mutex mutex;
453 struct socket *socket;
454 /* this way we get our
455 * send/receive buffers off the stack */
5a87d920 456 void *sbuf;
e6ef8a5c 457 void *rbuf;
b411b363
PR
458};
459
460struct drbd_md {
461 u64 md_offset; /* sector offset to 'super' block */
462
463 u64 la_size_sect; /* last agreed size, unit sectors */
9f2247bb 464 spinlock_t uuid_lock;
b411b363
PR
465 u64 uuid[UI_SIZE];
466 u64 device_uuid;
467 u32 flags;
468 u32 md_size_sect;
469
ae8bf312 470 s32 al_offset; /* signed relative sector offset to activity log */
b411b363 471 s32 bm_offset; /* signed relative sector offset to bitmap */
3a4d4eb3
LE
472
473 /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
474 s32 meta_dev_idx;
475
476 /* see al_tr_number_to_on_disk_sector() */
477 u32 al_stripes;
478 u32 al_stripe_size_4k;
479 u32 al_size_4k; /* cached product of the above */
b411b363
PR
480};
481
b411b363
PR
482struct drbd_backing_dev {
483 struct block_device *backing_bdev;
484 struct block_device *md_bdev;
b411b363 485 struct drbd_md md;
a6b32bc3 486 struct disk_conf *disk_conf; /* RCU, for updates: first_peer_device(device)->connection->conf_update */
b411b363
PR
487 sector_t known_size; /* last known size of that backing device */
488};
489
490struct drbd_md_io {
0c464425 491 unsigned int done;
b411b363
PR
492 int error;
493};
494
495struct bm_io_work {
496 struct drbd_work w;
497 char *why;
20ceb2b2 498 enum bm_flag flags;
b30ab791
AG
499 int (*io_fn)(struct drbd_device *device);
500 void (*done)(struct drbd_device *device, int rv);
b411b363
PR
501};
502
503enum write_ordering_e {
504 WO_none,
505 WO_drain_io,
506 WO_bdev_flush,
b411b363
PR
507};
508
778f271d 509struct fifo_buffer {
778f271d
PR
510 unsigned int head_index;
511 unsigned int size;
9958c857
PR
512 int total; /* sum of all values */
513 int values[0];
778f271d 514};
9958c857 515extern struct fifo_buffer *fifo_alloc(int fifo_size);
778f271d 516
bde89a9e 517/* flag bits per connection */
01a311a5
PR
518enum {
519 NET_CONGESTED, /* The data socket is congested */
427c0434 520 RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
e43ef195 521 SEND_PING, /* whether asender should send a ping asap */
808e37b8 522 SIGNAL_ASENDER, /* whether asender wants to be interrupted */
2a67d8b9 523 GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */
4d0fc3fd 524 CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */
fc3b10a4
PR
525 CONN_WD_ST_CHG_OKAY,
526 CONN_WD_ST_CHG_FAIL,
8169e41b 527 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
6936fcb4 528 CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
a1096a6e 529 STATE_SENT, /* Do not change state/UUIDs while this is set */
6f3465ed
LE
530 CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
531 * pending, from drbd worker context.
532 * If set, bdi_write_congested() returns true,
533 * so shrink_page_list() would not recurse into,
534 * and potentially deadlock on, this drbd worker.
535 */
b66623e3 536 DISCONNECT_SENT,
01a311a5
PR
537};
538
77c556f6
AG
539struct drbd_resource {
540 char *name;
541 struct kref kref;
542 struct list_head connections;
543 struct list_head resources;
544};
545
546struct drbd_connection {
547 struct list_head connections;
548 struct drbd_resource *resource;
9dc9fbb3 549 struct kref kref;
bde89a9e 550 struct idr volumes; /* <connection, vnr> to device mapping */
8410da8f 551 enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
8e0af25f
PR
552 unsigned susp:1; /* IO suspended by user */
553 unsigned susp_nod:1; /* IO suspended because no data */
554 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
8410da8f 555 struct mutex cstate_mutex; /* Protects graceful disconnects */
28e448bb 556 unsigned int connect_cnt; /* Inc each time a connection is established */
2111438b 557
062e879c 558 unsigned long flags;
44ed167d 559 struct net_conf *net_conf; /* content protected by rcu */
a0095508 560 struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
91fd4dad 561 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
f399002e 562 struct res_opts res_opts;
e42325a5 563
089c075d
AG
564 struct sockaddr_storage my_addr;
565 int my_addr_len;
566 struct sockaddr_storage peer_addr;
567 int peer_addr_len;
568
e42325a5
PR
569 struct drbd_socket data; /* data/barrier/cstate/parameter packets */
570 struct drbd_socket meta; /* ping/ack (metadata) packets */
31890f4a
PR
571 int agreed_pro_version; /* actually used protocol version */
572 unsigned long last_received; /* in jiffies, either socket */
573 unsigned int ko_count;
e6b3ea83 574
87eeee41 575 spinlock_t req_lock;
b6dd1a89
LE
576
577 struct list_head transfer_log; /* all requests not yet fully processed */
87eeee41 578
a0638456 579 struct crypto_hash *cram_hmac_tfm;
bde89a9e 580 struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
036b17ea 581 struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
f399002e
LE
582 struct crypto_hash *csums_tfm;
583 struct crypto_hash *verify_tfm;
a0638456
PR
584 void *int_dig_in;
585 void *int_dig_vv;
586
b6dd1a89 587 /* receiver side */
12038a3a
PR
588 struct drbd_epoch *current_epoch;
589 spinlock_t epoch_lock;
590 unsigned int epochs;
4b0007c0 591 enum write_ordering_e write_ordering;
b379c41e 592 atomic_t current_tle_nr; /* transfer log epoch number */
b6dd1a89 593 unsigned current_tle_writes; /* writes seen within this tl epoch */
4b0007c0 594
07be15b1 595 unsigned long last_reconnect_jif;
e6b3ea83
PR
596 struct drbd_thread receiver;
597 struct drbd_thread worker;
598 struct drbd_thread asender;
80822284 599 cpumask_var_t cpu_mask;
b6dd1a89
LE
600
601 /* sender side */
d5b27b01 602 struct drbd_work_queue sender_work;
b6dd1a89
LE
603
604 struct {
605 /* whether this sender thread
606 * has processed a single write yet. */
607 bool seen_any_write_yet;
608
609 /* Which barrier number to send with the next P_BARRIER */
610 int current_epoch_nr;
611
612 /* how many write requests have been sent
613 * with req->epoch == current_epoch_nr.
614 * If none, no P_BARRIER will be sent. */
615 unsigned current_epoch_writes;
616 } send;
778f271d
PR
617};
618
113fef9e
LE
619struct submit_worker {
620 struct workqueue_struct *wq;
621 struct work_struct worker;
622
623 spinlock_t lock;
624 struct list_head writes;
625};
626
a6b32bc3
AG
627struct drbd_peer_device {
628 struct list_head peer_devices;
629 struct drbd_device *device;
bde89a9e 630 struct drbd_connection *connection;
a6b32bc3
AG
631};
632
633struct drbd_device {
634 struct list_head peer_devices;
2111438b 635 int vnr; /* volume number within the connection */
81fa2e67 636 struct kref kref;
2111438b 637
b411b363
PR
638 /* things that are stored as / read from meta data on disk */
639 unsigned long flags;
640
641 /* configured by drbdsetup */
b411b363
PR
642 struct drbd_backing_dev *ldev __protected_by(local);
643
644 sector_t p_size; /* partner's disk size */
645 struct request_queue *rq_queue;
646 struct block_device *this_bdev;
647 struct gendisk *vdisk;
648
07be15b1 649 unsigned long last_reattach_jif;
b411b363
PR
650 struct drbd_work resync_work,
651 unplug_work,
e9e6f3ec 652 go_diskless,
c4752ef1
PR
653 md_sync_work,
654 start_resync_work;
b411b363
PR
655 struct timer_list resync_timer;
656 struct timer_list md_sync_timer;
370a43e7 657 struct timer_list start_resync_timer;
7fde2be9 658 struct timer_list request_timer;
ee15b038
LE
659#ifdef DRBD_DEBUG_MD_SYNC
660 struct {
661 unsigned int line;
662 const char* func;
663 } last_md_mark_dirty;
664#endif
b411b363
PR
665
666 /* Used after attach while negotiating new disk state. */
667 union drbd_state new_state_tmp;
668
da9fbc27 669 union drbd_dev_state state;
b411b363
PR
670 wait_queue_head_t misc_wait;
671 wait_queue_head_t state_wait; /* upon each state change. */
672 unsigned int send_cnt;
673 unsigned int recv_cnt;
674 unsigned int read_cnt;
675 unsigned int writ_cnt;
676 unsigned int al_writ_cnt;
677 unsigned int bm_writ_cnt;
678 atomic_t ap_bio_cnt; /* Requests we need to complete */
679 atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
680 atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
d942ae44 681 atomic_t unacked_cnt; /* Need to send replies for */
b411b363 682 atomic_t local_cnt; /* Waiting for local completion */
b2fb6dbe 683
dac1389c
AG
684 /* Interval tree of pending local requests */
685 struct rb_root read_requests;
de696716 686 struct rb_root write_requests;
b411b363 687
4b0715f0 688 /* blocks to resync in this run [unit BM_BLOCK_SIZE] */
b411b363 689 unsigned long rs_total;
4b0715f0 690 /* number of resync blocks that failed in this run */
b411b363
PR
691 unsigned long rs_failed;
692 /* Syncer's start time [unit jiffies] */
693 unsigned long rs_start;
694 /* cumulated time in PausedSyncX state [unit jiffies] */
695 unsigned long rs_paused;
1d7734a0
LE
696 /* skipped because csum was equal [unit BM_BLOCK_SIZE] */
697 unsigned long rs_same_csum;
698#define DRBD_SYNC_MARKS 8
699#define DRBD_SYNC_MARK_STEP (3*HZ)
b411b363 700 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
1d7734a0 701 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
b411b363 702 /* marks's time [unit jiffies] */
1d7734a0
LE
703 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
704 /* current index into rs_mark_{left,time} */
705 int rs_last_mark;
328e0f12 706 unsigned long rs_last_bcast; /* [unit jiffies] */
b411b363
PR
707
708 /* where does the admin want us to start? (sector) */
709 sector_t ov_start_sector;
02b91b55 710 sector_t ov_stop_sector;
b411b363
PR
711 /* where are we now? (sector) */
712 sector_t ov_position;
713 /* Start sector of out of sync range (to merge printk reporting). */
714 sector_t ov_last_oos_start;
715 /* size of out-of-sync range in sectors. */
716 sector_t ov_last_oos_size;
717 unsigned long ov_left; /* in bits */
b411b363 718
b411b363
PR
719 struct drbd_bitmap *bitmap;
720 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
721
722 /* Used to track operations of resync... */
723 struct lru_cache *resync;
724 /* Number of locked elements in resync LRU */
725 unsigned int resync_locked;
726 /* resync extent number waiting for application requests */
727 unsigned int resync_wenr;
728
729 int open_cnt;
730 u64 *p_uuid;
4b0007c0 731
85719573
PR
732 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
733 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
18b75d75
AG
734 struct list_head done_ee; /* need to send P_WRITE_ACK */
735 struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
b411b363 736 struct list_head net_ee; /* zero-copy network send in progress */
b411b363
PR
737
738 int next_barrier_nr;
b411b363 739 struct list_head resync_reads;
435f0740
LE
740 atomic_t pp_in_use; /* allocated from page pool */
741 atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
b411b363
PR
742 wait_queue_head_t ee_wait;
743 struct page *md_io_page; /* one page buffer for md_io */
cc94c650 744 struct drbd_md_io md_io;
e1711731 745 atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */
b411b363
PR
746 spinlock_t al_lock;
747 wait_queue_head_t al_wait;
748 struct lru_cache *act_log; /* activity log */
749 unsigned int al_tr_number;
750 int al_tr_cycle;
b411b363
PR
751 wait_queue_head_t seq_wait;
752 atomic_t packet_seq;
753 unsigned int peer_seq;
754 spinlock_t peer_seq_lock;
755 unsigned int minor;
756 unsigned long comm_bm_set; /* communicated number of set bits. */
b411b363
PR
757 struct bm_io_work bm_io_work;
758 u64 ed_uuid; /* UUID of the exposed data */
8410da8f 759 struct mutex own_state_mutex;
a6b32bc3 760 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
b411b363 761 char congestion_reason; /* Why we where congested... */
1d7734a0
LE
762 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
763 atomic_t rs_sect_ev; /* for submitted resync data rate, both */
764 int rs_last_sect_ev; /* counter to compare with */
765 int rs_last_events; /* counter of read or write "events" (unit sectors)
766 * on the lower level device when we last looked. */
767 int c_sync_rate; /* current resync rate after syncer throttle magic */
bde89a9e 768 struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
778f271d 769 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
759fbdfb 770 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
db141b2f
LE
771 unsigned int peer_max_bio_size;
772 unsigned int local_max_bio_size;
113fef9e
LE
773
774 /* any requests that would block in drbd_make_request()
775 * are deferred to this single-threaded work queue */
776 struct submit_worker submit;
b411b363
PR
777};
778
b30ab791 779static inline struct drbd_device *minor_to_device(unsigned int minor)
b411b363 780{
05a10ec7 781 return (struct drbd_device *)idr_find(&drbd_devices, minor);
b411b363
PR
782}
783
a6b32bc3
AG
784static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
785{
786 return list_first_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
787}
788
77c556f6
AG
789#define for_each_resource(resource, _resources) \
790 list_for_each_entry(resource, _resources, resources)
791
792#define for_each_resource_rcu(resource, _resources) \
793 list_for_each_entry_rcu(resource, _resources, resources)
794
795#define for_each_resource_safe(resource, tmp, _resources) \
796 list_for_each_entry_safe(resource, tmp, _resources, resources)
797
798#define for_each_connection(connection, resource) \
799 list_for_each_entry(connection, &resource->connections, connections)
800
801#define for_each_connection_rcu(connection, resource) \
802 list_for_each_entry_rcu(connection, &resource->connections, connections)
803
804#define for_each_connection_safe(connection, tmp, resource) \
805 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
806
a6b32bc3
AG
807#define for_each_peer_device(peer_device, device) \
808 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
809
810#define for_each_peer_device_rcu(peer_device, device) \
811 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
812
813#define for_each_peer_device_safe(peer_device, tmp, device) \
814 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
815
b30ab791 816static inline unsigned int device_to_minor(struct drbd_device *device)
b411b363 817{
b30ab791 818 return device->minor;
b411b363
PR
819}
820
bde89a9e 821static inline struct drbd_device *vnr_to_device(struct drbd_connection *connection, int vnr)
b411b363 822{
bde89a9e 823 return (struct drbd_device *)idr_find(&connection->volumes, vnr);
b411b363
PR
824}
825
826/*
827 * function declarations
828 *************************/
829
830/* drbd_main.c */
831
e89b591c
PR
832enum dds_flags {
833 DDSF_FORCED = 1,
834 DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
835};
836
b30ab791 837extern void drbd_init_set_defaults(struct drbd_device *device);
b411b363
PR
838extern int drbd_thread_start(struct drbd_thread *thi);
839extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
bde89a9e 840extern char *drbd_task_to_thread_name(struct drbd_connection *connection, struct task_struct *task);
b411b363 841#ifdef CONFIG_SMP
80822284 842extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
bde89a9e 843extern void drbd_calc_cpu_mask(struct drbd_connection *connection);
b411b363
PR
844#else
845#define drbd_thread_current_set_cpu(A) ({})
846#define drbd_calc_cpu_mask(A) ({})
847#endif
bde89a9e 848extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
b411b363 849 unsigned int set_size);
bde89a9e
AG
850extern void tl_clear(struct drbd_connection *);
851extern void drbd_free_sock(struct drbd_connection *connection);
852extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
bedbd2a5 853 void *buf, size_t size, unsigned msg_flags);
bde89a9e 854extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
fb708e40
AG
855 unsigned);
856
bde89a9e
AG
857extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
858extern int drbd_send_protocol(struct drbd_connection *connection);
b30ab791
AG
859extern int drbd_send_uuids(struct drbd_device *device);
860extern int drbd_send_uuids_skip_initial_sync(struct drbd_device *device);
861extern void drbd_gen_and_send_sync_uuid(struct drbd_device *device);
862extern int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flags flags);
863extern int drbd_send_state(struct drbd_device *device, union drbd_state s);
864extern int drbd_send_current_state(struct drbd_device *device);
865extern int drbd_send_sync_param(struct drbd_device *device);
bde89a9e 866extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
d4e67d7c 867 u32 set_size);
54761697 868extern int drbd_send_ack(struct drbd_device *, enum drbd_packet,
f6ffca9f 869 struct drbd_peer_request *);
b30ab791 870extern void drbd_send_ack_rp(struct drbd_device *device, enum drbd_packet cmd,
a9a9994d 871 struct p_block_req *rp);
b30ab791 872extern void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd,
a9a9994d 873 struct p_data *dp, int data_size);
b30ab791 874extern int drbd_send_ack_ex(struct drbd_device *device, enum drbd_packet cmd,
b411b363 875 sector_t sector, int blksize, u64 block_id);
54761697
AG
876extern int drbd_send_out_of_sync(struct drbd_device *, struct drbd_request *);
877extern int drbd_send_block(struct drbd_device *, enum drbd_packet,
f6ffca9f 878 struct drbd_peer_request *);
b30ab791
AG
879extern int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req);
880extern int drbd_send_drequest(struct drbd_device *device, int cmd,
b411b363 881 sector_t sector, int size, u64 block_id);
b30ab791 882extern int drbd_send_drequest_csum(struct drbd_device *device, sector_t sector,
d8763023
AG
883 int size, void *digest, int digest_size,
884 enum drbd_packet cmd);
b30ab791 885extern int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size);
b411b363 886
b30ab791
AG
887extern int drbd_send_bitmap(struct drbd_device *device);
888extern void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode);
bde89a9e 889extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
b411b363 890extern void drbd_free_bc(struct drbd_backing_dev *ldev);
b30ab791
AG
891extern void drbd_device_cleanup(struct drbd_device *device);
892void drbd_print_uuids(struct drbd_device *device, const char *text);
b411b363 893
bde89a9e 894extern void conn_md_sync(struct drbd_connection *connection);
b30ab791
AG
895extern void drbd_md_write(struct drbd_device *device, void *buffer);
896extern void drbd_md_sync(struct drbd_device *device);
897extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
898extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
899extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
900extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
901extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
902extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
903extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
904extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
905extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
b411b363 906extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
ee15b038 907#ifndef DRBD_DEBUG_MD_SYNC
b30ab791 908extern void drbd_md_mark_dirty(struct drbd_device *device);
ee15b038
LE
909#else
910#define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ )
b30ab791 911extern void drbd_md_mark_dirty_(struct drbd_device *device,
ee15b038
LE
912 unsigned int line, const char *func);
913#endif
b30ab791 914extern void drbd_queue_bitmap_io(struct drbd_device *device,
54761697
AG
915 int (*io_fn)(struct drbd_device *),
916 void (*done)(struct drbd_device *, int),
20ceb2b2 917 char *why, enum bm_flag flags);
b30ab791 918extern int drbd_bitmap_io(struct drbd_device *device,
54761697 919 int (*io_fn)(struct drbd_device *),
20ceb2b2 920 char *why, enum bm_flag flags);
b30ab791 921extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
54761697 922 int (*io_fn)(struct drbd_device *),
edc9f5eb 923 char *why, enum bm_flag flags);
b30ab791
AG
924extern int drbd_bmio_set_n_write(struct drbd_device *device);
925extern int drbd_bmio_clear_n_write(struct drbd_device *device);
926extern void drbd_ldev_destroy(struct drbd_device *device);
b411b363 927
b411b363 928/* Meta data layout
ae8bf312
LE
929 *
930 * We currently have two possible layouts.
931 * Offsets in (512 byte) sectors.
932 * external:
933 * |----------- md_size_sect ------------------|
934 * [ 4k superblock ][ activity log ][ Bitmap ]
935 * | al_offset == 8 |
936 * | bm_offset = al_offset + X |
937 * ==> bitmap sectors = md_size_sect - bm_offset
938 *
939 * Variants:
940 * old, indexed fixed size meta data:
941 *
942 * internal:
943 * |----------- md_size_sect ------------------|
944 * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*]
945 * | al_offset < 0 |
946 * | bm_offset = al_offset - Y |
947 * ==> bitmap sectors = Y = al_offset - bm_offset
948 *
949 * [padding*] are zero or up to 7 unused 512 Byte sectors to the
950 * end of the device, so that the [4k superblock] will be 4k aligned.
951 *
952 * The activity log consists of 4k transaction blocks,
953 * which are written in a ring-buffer, or striped ring-buffer like fashion,
954 * which are writtensize used to be fixed 32kB,
955 * but is about to become configurable.
956 */
b411b363 957
ae8bf312
LE
958/* Our old fixed size meta data layout
959 * allows up to about 3.8TB, so if you want more,
7ad651b5 960 * you need to use the "flexible" meta data format. */
ae8bf312
LE
961#define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */
962#define MD_4kB_SECT 8
963#define MD_32kB_SECT 64
7ad651b5
LE
964
965/* One activity log extent represents 4M of storage */
966#define AL_EXTENT_SHIFT 22
b411b363
PR
967#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
968
7ad651b5
LE
969/* We could make these currently hardcoded constants configurable
970 * variables at create-md time (or even re-configurable at runtime?).
971 * Which will require some more changes to the DRBD "super block"
972 * and attach code.
973 *
974 * updates per transaction:
975 * This many changes to the active set can be logged with one transaction.
976 * This number is arbitrary.
977 * context per transaction:
978 * This many context extent numbers are logged with each transaction.
979 * This number is resulting from the transaction block size (4k), the layout
980 * of the transaction header, and the number of updates per transaction.
981 * See drbd_actlog.c:struct al_transaction_on_disk
982 * */
983#define AL_UPDATES_PER_TRANSACTION 64 // arbitrary
984#define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4
985
b411b363
PR
986#if BITS_PER_LONG == 32
987#define LN2_BPL 5
988#define cpu_to_lel(A) cpu_to_le32(A)
989#define lel_to_cpu(A) le32_to_cpu(A)
990#elif BITS_PER_LONG == 64
991#define LN2_BPL 6
992#define cpu_to_lel(A) cpu_to_le64(A)
993#define lel_to_cpu(A) le64_to_cpu(A)
994#else
995#error "LN2 of BITS_PER_LONG unknown!"
996#endif
997
998/* resync bitmap */
999/* 16MB sized 'bitmap extent' to track syncer usage */
1000struct bm_extent {
1001 int rs_left; /* number of bits set (out of sync) in this extent. */
1002 int rs_failed; /* number of failed resync requests in this extent. */
1003 unsigned long flags;
1004 struct lc_element lce;
1005};
1006
1007#define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */
1008#define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */
e3555d85 1009#define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */
b411b363
PR
1010
1011/* drbd_bitmap.c */
1012/*
1013 * We need to store one bit for a block.
1014 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1015 * Bit 0 ==> local node thinks this block is binary identical on both nodes
1016 * Bit 1 ==> local node thinks this block needs to be synced.
1017 */
1018
8e26f9cc
PR
1019#define SLEEP_TIME (HZ/10)
1020
45dfffeb
LE
1021/* We do bitmap IO in units of 4k blocks.
1022 * We also still have a hardcoded 4k per bit relation. */
1023#define BM_BLOCK_SHIFT 12 /* 4k per bit */
b411b363 1024#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
45dfffeb
LE
1025/* mostly arbitrarily set the represented size of one bitmap extent,
1026 * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
1027 * at 4k per bit resolution) */
1028#define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */
b411b363
PR
1029#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1030
1031#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1032#error "HAVE YOU FIXED drbdmeta AS WELL??"
1033#endif
1034
1035/* thus many _storage_ sectors are described by one bit */
1036#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1037#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1038#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1039
1040/* bit to represented kilo byte conversion */
1041#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1042
1043/* in which _bitmap_ extent (resp. sector) the bit for a certain
1044 * _storage_ sector is located in */
1045#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1046
1047/* how much _storage_ sectors we have per bitmap sector */
1048#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1049#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1050
1051/* in one sector of the bitmap, we have this many activity_log extents. */
1052#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
b411b363
PR
1053
1054#define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
1055#define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
1056
1057/* the extent in "PER_EXTENT" below is an activity log extent
1058 * we need that many (long words/bytes) to store the bitmap
1059 * of one AL_EXTENT_SIZE chunk of storage.
1060 * we can store the bitmap for that many AL_EXTENTS within
1061 * one sector of the _on_disk_ bitmap:
1062 * bit 0 bit 37 bit 38 bit (512*8)-1
1063 * ...|........|........|.. // ..|........|
1064 * sect. 0 `296 `304 ^(512*8*8)-1
1065 *
1066#define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1067#define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128
1068#define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4
1069 */
1070
1071#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
ae8bf312
LE
1072/* we have a certain meta data variant that has a fixed on-disk size of 128
1073 * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
1074 * log, leaving this many sectors for the bitmap.
1075 */
1076
1077#define DRBD_MAX_SECTORS_FIXED_BM \
1078 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1079#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
b411b363
PR
1080#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1081#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1082#else
ae8bf312 1083#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
b411b363
PR
1084/* 16 TB in units of sectors */
1085#if BITS_PER_LONG == 32
1086/* adjust by one page worth of bitmap,
1087 * so we won't wrap around in drbd_bm_find_next_bit.
1088 * you should use 64bit OS for that much storage, anyways. */
1089#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1090#else
4b0715f0
LE
1091/* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
1092#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1093/* corresponds to (1UL << 38) bits right now. */
b411b363
PR
1094#endif
1095#endif
1096
23361cf3
LE
1097/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
1098 * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
1099 * Since we may live in a mixed-platform cluster,
1100 * we limit us to a platform agnostic constant here for now.
1101 * A followup commit may allow even bigger BIO sizes,
1102 * once we thought that through. */
98683650 1103#define DRBD_MAX_BIO_SIZE (1U << 20)
23361cf3
LE
1104#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1105#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1106#endif
db141b2f 1107#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
b411b363 1108
98683650
PR
1109#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
1110#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
b411b363 1111
b30ab791
AG
1112extern int drbd_bm_init(struct drbd_device *device);
1113extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1114extern void drbd_bm_cleanup(struct drbd_device *device);
1115extern void drbd_bm_set_all(struct drbd_device *device);
1116extern void drbd_bm_clear_all(struct drbd_device *device);
4b0715f0 1117/* set/clear/test only a few bits at a time */
b411b363 1118extern int drbd_bm_set_bits(
b30ab791 1119 struct drbd_device *device, unsigned long s, unsigned long e);
b411b363 1120extern int drbd_bm_clear_bits(
b30ab791 1121 struct drbd_device *device, unsigned long s, unsigned long e);
4b0715f0 1122extern int drbd_bm_count_bits(
b30ab791 1123 struct drbd_device *device, const unsigned long s, const unsigned long e);
4b0715f0
LE
1124/* bm_set_bits variant for use while holding drbd_bm_lock,
1125 * may process the whole bitmap in one go */
b30ab791 1126extern void _drbd_bm_set_bits(struct drbd_device *device,
b411b363 1127 const unsigned long s, const unsigned long e);
b30ab791
AG
1128extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1129extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1130extern int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local);
1131extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1132extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1133extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1134extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1135extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1136extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1137extern size_t drbd_bm_words(struct drbd_device *device);
1138extern unsigned long drbd_bm_bits(struct drbd_device *device);
1139extern sector_t drbd_bm_capacity(struct drbd_device *device);
4b0715f0
LE
1140
1141#define DRBD_END_OF_BITMAP (~(unsigned long)0)
b30ab791 1142extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
b411b363 1143/* bm_find_next variants for use while you hold drbd_bm_lock() */
b30ab791
AG
1144extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1145extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1146extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1147extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1148extern int drbd_bm_rs_done(struct drbd_device *device);
b411b363 1149/* for receive_bitmap */
b30ab791 1150extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
b411b363 1151 size_t number, unsigned long *buffer);
19f843aa 1152/* for _drbd_send_bitmap */
b30ab791 1153extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
b411b363
PR
1154 size_t number, unsigned long *buffer);
1155
b30ab791
AG
1156extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1157extern void drbd_bm_unlock(struct drbd_device *device);
b411b363
PR
1158/* drbd_main.c */
1159
1160extern struct kmem_cache *drbd_request_cache;
6c852bec 1161extern struct kmem_cache *drbd_ee_cache; /* peer requests */
b411b363
PR
1162extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
1163extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
1164extern mempool_t *drbd_request_mempool;
1165extern mempool_t *drbd_ee_mempool;
1166
4281808f
LE
1167/* drbd's page pool, used to buffer data received from the peer,
1168 * or data requested by the peer.
1169 *
1170 * This does not have an emergency reserve.
1171 *
1172 * When allocating from this pool, it first takes pages from the pool.
1173 * Only if the pool is depleted will try to allocate from the system.
1174 *
1175 * The assumption is that pages taken from this pool will be processed,
1176 * and given back, "quickly", and then can be recycled, so we can avoid
1177 * frequent calls to alloc_page(), and still will be able to make progress even
1178 * under memory pressure.
1179 */
1180extern struct page *drbd_pp_pool;
b411b363
PR
1181extern spinlock_t drbd_pp_lock;
1182extern int drbd_pp_vacant;
1183extern wait_queue_head_t drbd_pp_wait;
1184
4281808f
LE
1185/* We also need a standard (emergency-reserve backed) page pool
1186 * for meta data IO (activity log, bitmap).
1187 * We can keep it global, as long as it is used as "N pages at a time".
1188 * 128 should be plenty, currently we probably can get away with as few as 1.
1189 */
1190#define DRBD_MIN_POOL_PAGES 128
1191extern mempool_t *drbd_md_io_page_pool;
1192
9476f39d
LE
1193/* We also need to make sure we get a bio
1194 * when we need it for housekeeping purposes */
1195extern struct bio_set *drbd_md_io_bio_set;
1196/* to allocate from that set */
1197extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1198
b411b363
PR
1199extern rwlock_t global_state_lock;
1200
bde89a9e 1201extern int conn_lowest_minor(struct drbd_connection *connection);
a6b32bc3 1202enum drbd_ret_code drbd_create_minor(struct drbd_connection *connection, unsigned int minor, int vnr);
05a10ec7 1203extern void drbd_destroy_device(struct kref *kref);
b411b363 1204
77c556f6
AG
1205extern struct drbd_resource *drbd_create_resource(const char *name);
1206extern void drbd_free_resource(struct drbd_resource *resource);
1207
bde89a9e
AG
1208extern int set_resource_options(struct drbd_connection *connection, struct res_opts *res_opts);
1209extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
05a10ec7 1210extern void drbd_destroy_connection(struct kref *kref);
bde89a9e
AG
1211struct drbd_connection *conn_get_by_name(const char *name);
1212extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
089c075d 1213 void *peer_addr, int peer_addr_len);
77c556f6 1214extern void drbd_destroy_resource(struct kref *kref);
bde89a9e 1215extern void conn_free_crypto(struct drbd_connection *connection);
b411b363
PR
1216
1217extern int proc_details;
1218
1219/* drbd_req */
113fef9e 1220extern void do_submit(struct work_struct *ws);
54761697 1221extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
5a7bbad2 1222extern void drbd_make_request(struct request_queue *q, struct bio *bio);
b30ab791 1223extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
b411b363
PR
1224extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
1225extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1226
1227
1228/* drbd_nl.c */
8432b314 1229extern int drbd_msg_put_info(const char *info);
b30ab791
AG
1230extern void drbd_suspend_io(struct drbd_device *device);
1231extern void drbd_resume_io(struct drbd_device *device);
b411b363 1232extern char *ppsize(char *buf, unsigned long long size);
54761697 1233extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
e96c9633 1234enum determine_dev_size {
d752b269
PR
1235 DS_ERROR_SHRINK = -3,
1236 DS_ERROR_SPACE_MD = -2,
e96c9633
PR
1237 DS_ERROR = -1,
1238 DS_UNCHANGED = 0,
1239 DS_SHRUNK = 1,
57737adc
PR
1240 DS_GREW = 2,
1241 DS_GREW_FROM_ZERO = 3,
e96c9633 1242};
d752b269 1243extern enum determine_dev_size
54761697
AG
1244drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1245extern void resync_after_online_grow(struct drbd_device *);
b30ab791
AG
1246extern void drbd_reconsider_max_bio_size(struct drbd_device *device);
1247extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
bf885f8a
AG
1248 enum drbd_role new_role,
1249 int force);
bde89a9e
AG
1250extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1251extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
b30ab791 1252extern int drbd_khelper(struct drbd_device *device, char *cmd);
b411b363
PR
1253
1254/* drbd_worker.c */
1255extern int drbd_worker(struct drbd_thread *thi);
b30ab791
AG
1256enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1257void drbd_resync_after_changed(struct drbd_device *device);
1258extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1259extern void resume_next_sg(struct drbd_device *device);
1260extern void suspend_other_sg(struct drbd_device *device);
1261extern int drbd_resync_finished(struct drbd_device *device);
b411b363 1262/* maybe rather drbd_main.c ? */
b30ab791
AG
1263extern void *drbd_md_get_buffer(struct drbd_device *device);
1264extern void drbd_md_put_buffer(struct drbd_device *device);
1265extern int drbd_md_sync_page_io(struct drbd_device *device,
b411b363 1266 struct drbd_backing_dev *bdev, sector_t sector, int rw);
54761697 1267extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
b30ab791 1268extern void wait_until_done_or_force_detached(struct drbd_device *device,
44edfb0d 1269 struct drbd_backing_dev *bdev, unsigned int *done);
b30ab791 1270extern void drbd_rs_controller_reset(struct drbd_device *device);
b411b363 1271
b30ab791 1272static inline void ov_out_of_sync_print(struct drbd_device *device)
b411b363 1273{
b30ab791 1274 if (device->ov_last_oos_size) {
b411b363 1275 dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
b30ab791
AG
1276 (unsigned long long)device->ov_last_oos_start,
1277 (unsigned long)device->ov_last_oos_size);
b411b363 1278 }
b30ab791 1279 device->ov_last_oos_size = 0;
b411b363
PR
1280}
1281
1282
54761697
AG
1283extern void drbd_csum_bio(struct drbd_device *, struct crypto_hash *, struct bio *, void *);
1284extern void drbd_csum_ee(struct drbd_device *, struct crypto_hash *,
f6ffca9f 1285 struct drbd_peer_request *, void *);
b411b363 1286/* worker callbacks */
99920dc5
AG
1287extern int w_e_end_data_req(struct drbd_work *, int);
1288extern int w_e_end_rsdata_req(struct drbd_work *, int);
1289extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1290extern int w_e_end_ov_reply(struct drbd_work *, int);
1291extern int w_e_end_ov_req(struct drbd_work *, int);
1292extern int w_ov_finished(struct drbd_work *, int);
1293extern int w_resync_timer(struct drbd_work *, int);
1294extern int w_send_write_hint(struct drbd_work *, int);
1295extern int w_make_resync_request(struct drbd_work *, int);
1296extern int w_send_dblock(struct drbd_work *, int);
99920dc5
AG
1297extern int w_send_read_req(struct drbd_work *, int);
1298extern int w_prev_work_done(struct drbd_work *, int);
1299extern int w_e_reissue(struct drbd_work *, int);
1300extern int w_restart_disk_io(struct drbd_work *, int);
8f7bed77 1301extern int w_send_out_of_sync(struct drbd_work *, int);
99920dc5 1302extern int w_start_resync(struct drbd_work *, int);
b411b363
PR
1303
1304extern void resync_timer_fn(unsigned long data);
370a43e7 1305extern void start_resync_timer_fn(unsigned long data);
b411b363
PR
1306
1307/* drbd_receiver.c */
b30ab791 1308extern int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector);
54761697 1309extern int drbd_submit_peer_request(struct drbd_device *,
fbe29dec
AG
1310 struct drbd_peer_request *, const unsigned,
1311 const int);
54761697
AG
1312extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1313extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_device *, u64,
0db55363
AG
1314 sector_t, unsigned int,
1315 gfp_t) __must_hold(local);
54761697 1316extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
3967deb1
AG
1317 int);
1318#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1319#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
54761697 1320extern struct page *drbd_alloc_pages(struct drbd_device *, unsigned int, bool);
b30ab791
AG
1321extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1322extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
bde89a9e 1323extern void conn_flush_workqueue(struct drbd_connection *connection);
b30ab791
AG
1324extern int drbd_connected(struct drbd_device *device);
1325static inline void drbd_flush_workqueue(struct drbd_device *device)
0e29d163 1326{
a6b32bc3 1327 conn_flush_workqueue(first_peer_device(device)->connection);
0e29d163 1328}
b411b363 1329
ed439848
LE
1330/* Yes, there is kernel_setsockopt, but only since 2.6.18.
1331 * So we have our own copy of it here. */
b411b363 1332static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
ed439848 1333 char *optval, int optlen)
b411b363 1334{
ed439848
LE
1335 mm_segment_t oldfs = get_fs();
1336 char __user *uoptval;
b411b363 1337 int err;
ed439848
LE
1338
1339 uoptval = (char __user __force *)optval;
1340
1341 set_fs(KERNEL_DS);
b411b363 1342 if (level == SOL_SOCKET)
ed439848 1343 err = sock_setsockopt(sock, level, optname, uoptval, optlen);
b411b363 1344 else
ed439848 1345 err = sock->ops->setsockopt(sock, level, optname, uoptval,
b411b363 1346 optlen);
ed439848 1347 set_fs(oldfs);
b411b363
PR
1348 return err;
1349}
1350
1351static inline void drbd_tcp_cork(struct socket *sock)
1352{
ed439848 1353 int val = 1;
b411b363 1354 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
ed439848 1355 (char*)&val, sizeof(val));
b411b363
PR
1356}
1357
1358static inline void drbd_tcp_uncork(struct socket *sock)
1359{
ed439848 1360 int val = 0;
b411b363 1361 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
ed439848 1362 (char*)&val, sizeof(val));
b411b363
PR
1363}
1364
1365static inline void drbd_tcp_nodelay(struct socket *sock)
1366{
ed439848 1367 int val = 1;
b411b363 1368 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
ed439848 1369 (char*)&val, sizeof(val));
b411b363
PR
1370}
1371
1372static inline void drbd_tcp_quickack(struct socket *sock)
1373{
ed439848 1374 int val = 2;
b411b363 1375 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
ed439848 1376 (char*)&val, sizeof(val));
b411b363
PR
1377}
1378
bde89a9e 1379void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo);
b411b363
PR
1380
1381/* drbd_proc.c */
1382extern struct proc_dir_entry *drbd_proc;
7d4e9d09 1383extern const struct file_operations drbd_proc_fops;
b411b363
PR
1384extern const char *drbd_conn_str(enum drbd_conns s);
1385extern const char *drbd_role_str(enum drbd_role s);
1386
1387/* drbd_actlog.c */
b30ab791
AG
1388extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1389extern void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate);
1390extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1391extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate);
1392extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1393extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1394extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1395extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1396extern void drbd_rs_cancel_all(struct drbd_device *device);
1397extern int drbd_rs_del_all(struct drbd_device *device);
1398extern void drbd_rs_failed_io(struct drbd_device *device,
b411b363 1399 sector_t sector, int size);
b30ab791
AG
1400extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1401extern void __drbd_set_in_sync(struct drbd_device *device, sector_t sector,
b411b363 1402 int size, const char *file, const unsigned int line);
b30ab791
AG
1403#define drbd_set_in_sync(device, sector, size) \
1404 __drbd_set_in_sync(device, sector, size, __FILE__, __LINE__)
1405extern int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector,
b411b363 1406 int size, const char *file, const unsigned int line);
b30ab791
AG
1407#define drbd_set_out_of_sync(device, sector, size) \
1408 __drbd_set_out_of_sync(device, sector, size, __FILE__, __LINE__)
1409extern void drbd_al_shrink(struct drbd_device *device);
54761697 1410extern int drbd_initialize_al(struct drbd_device *, void *);
b411b363 1411
b411b363 1412/* drbd_nl.c */
3b98c0c2
LE
1413/* state info broadcast */
1414struct sib_info {
1415 enum drbd_state_info_bcast_reason sib_reason;
1416 union {
1417 struct {
1418 char *helper_name;
1419 unsigned helper_exit_code;
1420 };
1421 struct {
1422 union drbd_state os;
1423 union drbd_state ns;
1424 };
1425 };
1426};
b30ab791 1427void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
b411b363
PR
1428
1429/*
1430 * inline helper functions
1431 *************************/
1432
45bb912b
LE
1433/* see also page_chain_add and friends in drbd_receiver.c */
1434static inline struct page *page_chain_next(struct page *page)
1435{
1436 return (struct page *)page_private(page);
1437}
1438#define page_chain_for_each(page) \
1439 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1440 page = page_chain_next(page))
1441#define page_chain_for_each_safe(page, n) \
1442 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1443
45bb912b 1444
045417f7 1445static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
45bb912b 1446{
db830c46 1447 struct page *page = peer_req->pages;
45bb912b
LE
1448 page_chain_for_each(page) {
1449 if (page_count(page) > 1)
1450 return 1;
1451 }
1452 return 0;
1453}
1454
bf885f8a 1455static inline enum drbd_state_rv
b30ab791 1456_drbd_set_state(struct drbd_device *device, union drbd_state ns,
bf885f8a 1457 enum chg_state_flags flags, struct completion *done)
b411b363 1458{
bf885f8a 1459 enum drbd_state_rv rv;
b411b363
PR
1460
1461 read_lock(&global_state_lock);
b30ab791 1462 rv = __drbd_set_state(device, ns, flags, done);
b411b363
PR
1463 read_unlock(&global_state_lock);
1464
1465 return rv;
1466}
1467
b30ab791 1468static inline union drbd_state drbd_read_state(struct drbd_device *device)
b411b363 1469{
78bae59b
PR
1470 union drbd_state rv;
1471
b30ab791 1472 rv.i = device->state.i;
a6b32bc3
AG
1473 rv.susp = first_peer_device(device)->connection->susp;
1474 rv.susp_nod = first_peer_device(device)->connection->susp_nod;
1475 rv.susp_fen = first_peer_device(device)->connection->susp_fen;
78bae59b
PR
1476
1477 return rv;
b411b363
PR
1478}
1479
383606e0 1480enum drbd_force_detach_flags {
a2a3c74f
LE
1481 DRBD_READ_ERROR,
1482 DRBD_WRITE_ERROR,
383606e0
LE
1483 DRBD_META_IO_ERROR,
1484 DRBD_FORCE_DETACH,
1485};
1486
b411b363 1487#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
b30ab791 1488static inline void __drbd_chk_io_error_(struct drbd_device *device,
a2a3c74f 1489 enum drbd_force_detach_flags df,
383606e0 1490 const char *where)
b411b363 1491{
daeda1cc
PR
1492 enum drbd_io_error_p ep;
1493
1494 rcu_read_lock();
b30ab791 1495 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
daeda1cc
PR
1496 rcu_read_unlock();
1497 switch (ep) {
1498 case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
a2a3c74f 1499 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
7383506c 1500 if (__ratelimit(&drbd_ratelimit_state))
82f59cc6 1501 dev_err(DEV, "Local IO failed in %s.\n", where);
b30ab791
AG
1502 if (device->state.disk > D_INCONSISTENT)
1503 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
b411b363
PR
1504 break;
1505 }
a2a3c74f 1506 /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
b411b363
PR
1507 case EP_DETACH:
1508 case EP_CALL_HELPER:
a2a3c74f
LE
1509 /* Remember whether we saw a READ or WRITE error.
1510 *
1511 * Recovery of the affected area for WRITE failure is covered
1512 * by the activity log.
1513 * READ errors may fall outside that area though. Certain READ
1514 * errors can be "healed" by writing good data to the affected
1515 * blocks, which triggers block re-allocation in lower layers.
1516 *
1517 * If we can not write the bitmap after a READ error,
1518 * we may need to trigger a full sync (see w_go_diskless()).
1519 *
1520 * Force-detach is not really an IO error, but rather a
1521 * desperate measure to try to deal with a completely
1522 * unresponsive lower level IO stack.
1523 * Still it should be treated as a WRITE error.
1524 *
1525 * Meta IO error is always WRITE error:
1526 * we read meta data only once during attach,
1527 * which will fail in case of errors.
1528 */
b30ab791 1529 set_bit(WAS_IO_ERROR, &device->flags);
a2a3c74f 1530 if (df == DRBD_READ_ERROR)
b30ab791 1531 set_bit(WAS_READ_ERROR, &device->flags);
a2a3c74f 1532 if (df == DRBD_FORCE_DETACH)
b30ab791
AG
1533 set_bit(FORCE_DETACH, &device->flags);
1534 if (device->state.disk > D_FAILED) {
1535 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
82f59cc6
LE
1536 dev_err(DEV,
1537 "Local IO failed in %s. Detaching...\n", where);
b411b363
PR
1538 }
1539 break;
1540 }
1541}
1542
1543/**
1544 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
b30ab791 1545 * @device: DRBD device.
b411b363
PR
1546 * @error: Error code passed to the IO completion callback
1547 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1548 *
1549 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1550 */
1551#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
b30ab791 1552static inline void drbd_chk_io_error_(struct drbd_device *device,
383606e0 1553 int error, enum drbd_force_detach_flags forcedetach, const char *where)
b411b363
PR
1554{
1555 if (error) {
1556 unsigned long flags;
a6b32bc3 1557 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags);
b30ab791 1558 __drbd_chk_io_error_(device, forcedetach, where);
a6b32bc3 1559 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags);
b411b363
PR
1560 }
1561}
1562
1563
1564/**
1565 * drbd_md_first_sector() - Returns the first sector number of the meta data area
1566 * @bdev: Meta data block device.
1567 *
1568 * BTW, for internal meta data, this happens to be the maximum capacity
1569 * we could agree upon with our peer node.
1570 */
68e41a43 1571static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
b411b363 1572{
68e41a43 1573 switch (bdev->md.meta_dev_idx) {
b411b363
PR
1574 case DRBD_MD_INDEX_INTERNAL:
1575 case DRBD_MD_INDEX_FLEX_INT:
1576 return bdev->md.md_offset + bdev->md.bm_offset;
1577 case DRBD_MD_INDEX_FLEX_EXT:
1578 default:
1579 return bdev->md.md_offset;
1580 }
1581}
1582
1583/**
1584 * drbd_md_last_sector() - Return the last sector number of the meta data area
1585 * @bdev: Meta data block device.
1586 */
1587static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1588{
68e41a43 1589 switch (bdev->md.meta_dev_idx) {
b411b363
PR
1590 case DRBD_MD_INDEX_INTERNAL:
1591 case DRBD_MD_INDEX_FLEX_INT:
ae8bf312 1592 return bdev->md.md_offset + MD_4kB_SECT -1;
b411b363
PR
1593 case DRBD_MD_INDEX_FLEX_EXT:
1594 default:
ae8bf312 1595 return bdev->md.md_offset + bdev->md.md_size_sect -1;
b411b363
PR
1596 }
1597}
1598
1599/* Returns the number of 512 byte sectors of the device */
1600static inline sector_t drbd_get_capacity(struct block_device *bdev)
1601{
1602 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
77304d2a 1603 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
b411b363
PR
1604}
1605
1606/**
1607 * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1608 * @bdev: Meta data block device.
1609 *
1610 * returns the capacity we announce to out peer. we clip ourselves at the
1611 * various MAX_SECTORS, because if we don't, current implementation will
1612 * oops sooner or later
1613 */
1614static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1615{
1616 sector_t s;
daeda1cc 1617
68e41a43 1618 switch (bdev->md.meta_dev_idx) {
b411b363
PR
1619 case DRBD_MD_INDEX_INTERNAL:
1620 case DRBD_MD_INDEX_FLEX_INT:
1621 s = drbd_get_capacity(bdev->backing_bdev)
1622 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
68e41a43 1623 drbd_md_first_sector(bdev))
b411b363
PR
1624 : 0;
1625 break;
1626 case DRBD_MD_INDEX_FLEX_EXT:
1627 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1628 drbd_get_capacity(bdev->backing_bdev));
1629 /* clip at maximum size the meta device can support */
1630 s = min_t(sector_t, s,
1631 BM_EXT_TO_SECT(bdev->md.md_size_sect
1632 - bdev->md.bm_offset));
1633 break;
1634 default:
1635 s = min_t(sector_t, DRBD_MAX_SECTORS,
1636 drbd_get_capacity(bdev->backing_bdev));
1637 }
1638 return s;
1639}
1640
1641/**
3a4d4eb3 1642 * drbd_md_ss() - Return the sector number of our meta data super block
b411b363
PR
1643 * @bdev: Meta data block device.
1644 */
3a4d4eb3 1645static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
b411b363 1646{
3a4d4eb3 1647 const int meta_dev_idx = bdev->md.meta_dev_idx;
daeda1cc 1648
3a4d4eb3
LE
1649 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1650 return 0;
daeda1cc 1651
3a4d4eb3
LE
1652 /* Since drbd08, internal meta data is always "flexible".
1653 * position: last 4k aligned block of 4k size */
1654 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1655 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
ae8bf312 1656 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
3a4d4eb3
LE
1657
1658 /* external, some index; this is the old fixed size layout */
1659 return MD_128MB_SECT * bdev->md.meta_dev_idx;
b411b363
PR
1660}
1661
b411b363
PR
1662static inline void
1663drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
1664{
1665 unsigned long flags;
1666 spin_lock_irqsave(&q->q_lock, flags);
1667 list_add(&w->list, &q->q);
b411b363 1668 spin_unlock_irqrestore(&q->q_lock, flags);
8c0785a5 1669 wake_up(&q->q_wait);
b411b363
PR
1670}
1671
1672static inline void
1673drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1674{
1675 unsigned long flags;
1676 spin_lock_irqsave(&q->q_lock, flags);
1677 list_add_tail(&w->list, &q->q);
b411b363 1678 spin_unlock_irqrestore(&q->q_lock, flags);
8c0785a5 1679 wake_up(&q->q_wait);
b411b363
PR
1680}
1681
bde89a9e 1682static inline void wake_asender(struct drbd_connection *connection)
b411b363 1683{
bde89a9e
AG
1684 if (test_bit(SIGNAL_ASENDER, &connection->flags))
1685 force_sig(DRBD_SIG, connection->asender.task);
b411b363
PR
1686}
1687
bde89a9e 1688static inline void request_ping(struct drbd_connection *connection)
b411b363 1689{
bde89a9e
AG
1690 set_bit(SEND_PING, &connection->flags);
1691 wake_asender(connection);
b411b363
PR
1692}
1693
bde89a9e 1694extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
54761697 1695extern void *drbd_prepare_command(struct drbd_device *, struct drbd_socket *);
bde89a9e 1696extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
dba58587
AG
1697 enum drbd_packet, unsigned int, void *,
1698 unsigned int);
54761697 1699extern int drbd_send_command(struct drbd_device *, struct drbd_socket *,
dba58587
AG
1700 enum drbd_packet, unsigned int, void *,
1701 unsigned int);
b411b363 1702
bde89a9e
AG
1703extern int drbd_send_ping(struct drbd_connection *connection);
1704extern int drbd_send_ping_ack(struct drbd_connection *connection);
54761697 1705extern int drbd_send_state_req(struct drbd_device *, union drbd_state, union drbd_state);
bde89a9e 1706extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
b411b363
PR
1707
1708static inline void drbd_thread_stop(struct drbd_thread *thi)
1709{
81e84650 1710 _drbd_thread_stop(thi, false, true);
b411b363
PR
1711}
1712
1713static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1714{
81e84650 1715 _drbd_thread_stop(thi, false, false);
b411b363
PR
1716}
1717
1718static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1719{
81e84650 1720 _drbd_thread_stop(thi, true, false);
b411b363
PR
1721}
1722
1723/* counts how many answer packets packets we expect from our peer,
1724 * for either explicit application requests,
1725 * or implicit barrier packets as necessary.
1726 * increased:
1727 * w_send_barrier
8554df1c 1728 * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
b411b363
PR
1729 * it is much easier and equally valid to count what we queue for the
1730 * worker, even before it actually was queued or send.
1731 * (drbd_make_request_common; recovery path on read io-error)
1732 * decreased:
1733 * got_BarrierAck (respective tl_clear, tl_clear_barrier)
8554df1c 1734 * _req_mod(req, DATA_RECEIVED)
b411b363 1735 * [from receive_DataReply]
8554df1c 1736 * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
b411b363
PR
1737 * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
1738 * for some reason it is NOT decreased in got_NegAck,
1739 * but in the resulting cleanup code from report_params.
1740 * we should try to remember the reason for that...
8554df1c
AG
1741 * _req_mod(req, SEND_FAILED or SEND_CANCELED)
1742 * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
b411b363
PR
1743 * [from tl_clear_barrier]
1744 */
b30ab791 1745static inline void inc_ap_pending(struct drbd_device *device)
b411b363 1746{
b30ab791 1747 atomic_inc(&device->ap_pending_cnt);
b411b363
PR
1748}
1749
49559d87 1750#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
b30ab791 1751 if (atomic_read(&device->which) < 0) \
b411b363 1752 dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \
49559d87 1753 func, line, \
b30ab791 1754 atomic_read(&device->which))
b411b363 1755
b30ab791
AG
1756#define dec_ap_pending(device) _dec_ap_pending(device, __FUNCTION__, __LINE__)
1757static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
49559d87 1758{
b30ab791
AG
1759 if (atomic_dec_and_test(&device->ap_pending_cnt))
1760 wake_up(&device->misc_wait);
49559d87
PR
1761 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
1762}
b411b363
PR
1763
1764/* counts how many resync-related answers we still expect from the peer
1765 * increase decrease
1766 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
25985edc 1767 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
b411b363
PR
1768 * (or P_NEG_ACK with ID_SYNCER)
1769 */
b30ab791 1770static inline void inc_rs_pending(struct drbd_device *device)
b411b363 1771{
b30ab791 1772 atomic_inc(&device->rs_pending_cnt);
b411b363
PR
1773}
1774
b30ab791
AG
1775#define dec_rs_pending(device) _dec_rs_pending(device, __FUNCTION__, __LINE__)
1776static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
49559d87 1777{
b30ab791 1778 atomic_dec(&device->rs_pending_cnt);
49559d87
PR
1779 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
1780}
b411b363
PR
1781
1782/* counts how many answers we still need to send to the peer.
1783 * increased on
1784 * receive_Data unless protocol A;
1785 * we need to send a P_RECV_ACK (proto B)
1786 * or P_WRITE_ACK (proto C)
1787 * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
1788 * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
1789 * receive_Barrier_* we need to send a P_BARRIER_ACK
1790 */
b30ab791 1791static inline void inc_unacked(struct drbd_device *device)
b411b363 1792{
b30ab791 1793 atomic_inc(&device->unacked_cnt);
b411b363
PR
1794}
1795
b30ab791
AG
1796#define dec_unacked(device) _dec_unacked(device, __FUNCTION__, __LINE__)
1797static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
b411b363 1798{
b30ab791 1799 atomic_dec(&device->unacked_cnt);
49559d87 1800 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
b411b363
PR
1801}
1802
b30ab791
AG
1803#define sub_unacked(device, n) _sub_unacked(device, n, __FUNCTION__, __LINE__)
1804static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
b411b363 1805{
b30ab791 1806 atomic_sub(n, &device->unacked_cnt);
49559d87 1807 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
b411b363
PR
1808}
1809
1810/**
b30ab791 1811 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
b411b363
PR
1812 * @M: DRBD device.
1813 *
b30ab791 1814 * You have to call put_ldev() when finished working with device->ldev.
b411b363
PR
1815 */
1816#define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
1817#define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
1818
b30ab791 1819static inline void put_ldev(struct drbd_device *device)
b411b363 1820{
b30ab791 1821 int i = atomic_dec_return(&device->local_cnt);
9a0d9d03
LE
1822
1823 /* This may be called from some endio handler,
1824 * so we must not sleep here. */
1825
b411b363 1826 __release(local);
1d7734a0 1827 D_ASSERT(i >= 0);
e9e6f3ec 1828 if (i == 0) {
b30ab791 1829 if (device->state.disk == D_DISKLESS)
82f59cc6 1830 /* even internal references gone, safe to destroy */
b30ab791
AG
1831 drbd_ldev_destroy(device);
1832 if (device->state.disk == D_FAILED) {
82f59cc6 1833 /* all application IO references gone. */
b30ab791 1834 if (!test_and_set_bit(GO_DISKLESS, &device->flags))
a6b32bc3 1835 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->go_diskless);
9114d795 1836 }
b30ab791 1837 wake_up(&device->misc_wait);
e9e6f3ec 1838 }
b411b363
PR
1839}
1840
1841#ifndef __CHECKER__
b30ab791 1842static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
b411b363
PR
1843{
1844 int io_allowed;
1845
82f59cc6 1846 /* never get a reference while D_DISKLESS */
b30ab791 1847 if (device->state.disk == D_DISKLESS)
82f59cc6
LE
1848 return 0;
1849
b30ab791
AG
1850 atomic_inc(&device->local_cnt);
1851 io_allowed = (device->state.disk >= mins);
b411b363 1852 if (!io_allowed)
b30ab791 1853 put_ldev(device);
b411b363
PR
1854 return io_allowed;
1855}
1856#else
b30ab791 1857extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
b411b363
PR
1858#endif
1859
1860/* you must have an "get_ldev" reference */
b30ab791 1861static inline void drbd_get_syncer_progress(struct drbd_device *device,
b411b363
PR
1862 unsigned long *bits_left, unsigned int *per_mil_done)
1863{
4b0715f0
LE
1864 /* this is to break it at compile time when we change that, in case we
1865 * want to support more than (1<<32) bits on a 32bit arch. */
b30ab791 1866 typecheck(unsigned long, device->rs_total);
b411b363
PR
1867
1868 /* note: both rs_total and rs_left are in bits, i.e. in
1869 * units of BM_BLOCK_SIZE.
1870 * for the percentage, we don't care. */
1871
b30ab791
AG
1872 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
1873 *bits_left = device->ov_left;
439d5953 1874 else
b30ab791 1875 *bits_left = drbd_bm_total_weight(device) - device->rs_failed;
b411b363
PR
1876 /* >> 10 to prevent overflow,
1877 * +1 to prevent division by zero */
b30ab791 1878 if (*bits_left > device->rs_total) {
b411b363
PR
1879 /* doh. maybe a logic bug somewhere.
1880 * may also be just a race condition
1881 * between this and a disconnect during sync.
1882 * for now, just prevent in-kernel buffer overflow.
1883 */
1884 smp_rmb();
1885 dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
b30ab791
AG
1886 drbd_conn_str(device->state.conn),
1887 *bits_left, device->rs_total, device->rs_failed);
b411b363
PR
1888 *per_mil_done = 0;
1889 } else {
4b0715f0
LE
1890 /* Make sure the division happens in long context.
1891 * We allow up to one petabyte storage right now,
1892 * at a granularity of 4k per bit that is 2**38 bits.
1893 * After shift right and multiplication by 1000,
1894 * this should still fit easily into a 32bit long,
1895 * so we don't need a 64bit division on 32bit arch.
1896 * Note: currently we don't support such large bitmaps on 32bit
1897 * arch anyways, but no harm done to be prepared for it here.
1898 */
b30ab791 1899 unsigned int shift = device->rs_total > UINT_MAX ? 16 : 10;
4b0715f0 1900 unsigned long left = *bits_left >> shift;
b30ab791 1901 unsigned long total = 1UL + (device->rs_total >> shift);
4b0715f0 1902 unsigned long tmp = 1000UL - left * 1000UL/total;
b411b363
PR
1903 *per_mil_done = tmp;
1904 }
1905}
1906
1907
1908/* this throttles on-the-fly application requests
1909 * according to max_buffers settings;
1910 * maybe re-implement using semaphores? */
b30ab791 1911static inline int drbd_get_max_buffers(struct drbd_device *device)
b411b363 1912{
44ed167d
PR
1913 struct net_conf *nc;
1914 int mxb;
1915
1916 rcu_read_lock();
a6b32bc3 1917 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
44ed167d
PR
1918 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
1919 rcu_read_unlock();
1920
b411b363
PR
1921 return mxb;
1922}
1923
b30ab791 1924static inline int drbd_state_is_stable(struct drbd_device *device)
b411b363 1925{
b30ab791 1926 union drbd_dev_state s = device->state;
b411b363
PR
1927
1928 /* DO NOT add a default clause, we want the compiler to warn us
1929 * for any newly introduced state we may have forgotten to add here */
1930
1931 switch ((enum drbd_conns)s.conn) {
1932 /* new io only accepted when there is no connection, ... */
1933 case C_STANDALONE:
1934 case C_WF_CONNECTION:
1935 /* ... or there is a well established connection. */
1936 case C_CONNECTED:
1937 case C_SYNC_SOURCE:
1938 case C_SYNC_TARGET:
1939 case C_VERIFY_S:
1940 case C_VERIFY_T:
1941 case C_PAUSED_SYNC_S:
1942 case C_PAUSED_SYNC_T:
67531718
PR
1943 case C_AHEAD:
1944 case C_BEHIND:
3719094e 1945 /* transitional states, IO allowed */
b411b363
PR
1946 case C_DISCONNECTING:
1947 case C_UNCONNECTED:
1948 case C_TIMEOUT:
1949 case C_BROKEN_PIPE:
1950 case C_NETWORK_FAILURE:
1951 case C_PROTOCOL_ERROR:
1952 case C_TEAR_DOWN:
1953 case C_WF_REPORT_PARAMS:
1954 case C_STARTING_SYNC_S:
1955 case C_STARTING_SYNC_T:
3719094e
PR
1956 break;
1957
1958 /* Allow IO in BM exchange states with new protocols */
b411b363 1959 case C_WF_BITMAP_S:
a6b32bc3 1960 if (first_peer_device(device)->connection->agreed_pro_version < 96)
3719094e
PR
1961 return 0;
1962 break;
1963
1964 /* no new io accepted in these states */
b411b363
PR
1965 case C_WF_BITMAP_T:
1966 case C_WF_SYNC_UUID:
1967 case C_MASK:
1968 /* not "stable" */
1969 return 0;
1970 }
1971
1972 switch ((enum drbd_disk_state)s.disk) {
1973 case D_DISKLESS:
1974 case D_INCONSISTENT:
1975 case D_OUTDATED:
1976 case D_CONSISTENT:
1977 case D_UP_TO_DATE:
5ca1de03 1978 case D_FAILED:
b411b363
PR
1979 /* disk state is stable as well. */
1980 break;
1981
d942ae44 1982 /* no new io accepted during transitional states */
b411b363 1983 case D_ATTACHING:
b411b363
PR
1984 case D_NEGOTIATING:
1985 case D_UNKNOWN:
1986 case D_MASK:
1987 /* not "stable" */
1988 return 0;
1989 }
1990
1991 return 1;
1992}
1993
b30ab791 1994static inline int drbd_suspended(struct drbd_device *device)
fb22c402 1995{
a6b32bc3 1996 struct drbd_connection *connection = first_peer_device(device)->connection;
8e0af25f 1997
bde89a9e 1998 return connection->susp || connection->susp_fen || connection->susp_nod;
fb22c402
PR
1999}
2000
b30ab791 2001static inline bool may_inc_ap_bio(struct drbd_device *device)
b411b363 2002{
b30ab791 2003 int mxb = drbd_get_max_buffers(device);
b411b363 2004
b30ab791 2005 if (drbd_suspended(device))
1b881ef7 2006 return false;
b30ab791 2007 if (test_bit(SUSPEND_IO, &device->flags))
1b881ef7 2008 return false;
b411b363
PR
2009
2010 /* to avoid potential deadlock or bitmap corruption,
2011 * in various places, we only allow new application io
2012 * to start during "stable" states. */
2013
2014 /* no new io accepted when attaching or detaching the disk */
b30ab791 2015 if (!drbd_state_is_stable(device))
1b881ef7 2016 return false;
b411b363
PR
2017
2018 /* since some older kernels don't have atomic_add_unless,
2019 * and we are within the spinlock anyways, we have this workaround. */
b30ab791 2020 if (atomic_read(&device->ap_bio_cnt) > mxb)
1b881ef7 2021 return false;
b30ab791 2022 if (test_bit(BITMAP_IO, &device->flags))
1b881ef7
AG
2023 return false;
2024 return true;
b411b363
PR
2025}
2026
b30ab791 2027static inline bool inc_ap_bio_cond(struct drbd_device *device)
b411b363 2028{
1b881ef7 2029 bool rv = false;
8869d683 2030
a6b32bc3 2031 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
b30ab791 2032 rv = may_inc_ap_bio(device);
8869d683 2033 if (rv)
b30ab791 2034 atomic_inc(&device->ap_bio_cnt);
a6b32bc3 2035 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
8869d683
PR
2036
2037 return rv;
2038}
b411b363 2039
b30ab791 2040static inline void inc_ap_bio(struct drbd_device *device)
8869d683 2041{
b411b363
PR
2042 /* we wait here
2043 * as long as the device is suspended
2044 * until the bitmap is no longer on the fly during connection
d942ae44 2045 * handshake as long as we would exceed the max_buffer limit.
b411b363
PR
2046 *
2047 * to avoid races with the reconnect code,
2048 * we need to atomic_inc within the spinlock. */
2049
b30ab791 2050 wait_event(device->misc_wait, inc_ap_bio_cond(device));
b411b363
PR
2051}
2052
b30ab791 2053static inline void dec_ap_bio(struct drbd_device *device)
b411b363 2054{
b30ab791
AG
2055 int mxb = drbd_get_max_buffers(device);
2056 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
b411b363
PR
2057
2058 D_ASSERT(ap_bio >= 0);
7ee1fb93 2059
b30ab791
AG
2060 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2061 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
a6b32bc3 2062 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w);
7ee1fb93
LE
2063 }
2064
b411b363
PR
2065 /* this currently does wake_up for every dec_ap_bio!
2066 * maybe rather introduce some type of hysteresis?
2067 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2068 if (ap_bio < mxb)
b30ab791 2069 wake_up(&device->misc_wait);
b411b363
PR
2070}
2071
b30ab791 2072static inline bool verify_can_do_stop_sector(struct drbd_device *device)
58ffa580 2073{
a6b32bc3
AG
2074 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2075 first_peer_device(device)->connection->agreed_pro_version != 100;
58ffa580
LE
2076}
2077
b30ab791 2078static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
b411b363 2079{
b30ab791
AG
2080 int changed = device->ed_uuid != val;
2081 device->ed_uuid = val;
62b0da3a 2082 return changed;
b411b363
PR
2083}
2084
b30ab791 2085static inline int drbd_queue_order_type(struct drbd_device *device)
b411b363
PR
2086{
2087 /* sorry, we currently have no working implementation
2088 * of distributed TCQ stuff */
2089#ifndef QUEUE_ORDERED_NONE
2090#define QUEUE_ORDERED_NONE 0
2091#endif
2092 return QUEUE_ORDERED_NONE;
2093}
2094
b30ab791 2095static inline void drbd_md_flush(struct drbd_device *device)
b411b363
PR
2096{
2097 int r;
2098
b30ab791
AG
2099 if (device->ldev == NULL) {
2100 dev_warn(DEV, "device->ldev == NULL in drbd_md_flush\n");
fd0017c1
PR
2101 return;
2102 }
2103
b30ab791 2104 if (test_bit(MD_NO_FUA, &device->flags))
b411b363
PR
2105 return;
2106
b30ab791 2107 r = blkdev_issue_flush(device->ldev->md_bdev, GFP_NOIO, NULL);
b411b363 2108 if (r) {
b30ab791 2109 set_bit(MD_NO_FUA, &device->flags);
b411b363
PR
2110 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
2111 }
2112}
2113
77c556f6
AG
2114static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2115{
2116 return list_first_entry(&resource->connections,
2117 struct drbd_connection, connections);
2118}
2119
b411b363 2120#endif
This page took 0.369203 seconds and 5 git commands to generate.