]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/block/drbd/drbd_int.h
tree wide: use kvfree() than conditional kfree()/vfree()
[mirror_ubuntu-bionic-kernel.git] / drivers / block / drbd / drbd_int.h
CommitLineData
b411b363
PR
1/*
2 drbd_int.h
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24*/
25
26#ifndef _DRBD_INT_H
27#define _DRBD_INT_H
28
29#include <linux/compiler.h>
30#include <linux/types.h>
b411b363
PR
31#include <linux/list.h>
32#include <linux/sched.h>
33#include <linux/bitops.h>
34#include <linux/slab.h>
35#include <linux/crypto.h>
132cc538 36#include <linux/ratelimit.h>
b411b363
PR
37#include <linux/tcp.h>
38#include <linux/mutex.h>
39#include <linux/major.h>
40#include <linux/blkdev.h>
66114cad 41#include <linux/backing-dev.h>
b411b363 42#include <linux/genhd.h>
062e879c 43#include <linux/idr.h>
b411b363
PR
44#include <net/tcp.h>
45#include <linux/lru_cache.h>
70c71606 46#include <linux/prefetch.h>
3b98c0c2 47#include <linux/drbd_genl_api.h>
b8907339 48#include <linux/drbd.h>
d9f65229 49#include "drbd_strings.h"
b8907339 50#include "drbd_state.h"
a3603a6e 51#include "drbd_protocol.h"
b411b363
PR
52
53#ifdef __CHECKER__
54# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
55# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
56# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
57# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call")))
58#else
59# define __protected_by(x)
60# define __protected_read_by(x)
61# define __protected_write_by(x)
62# define __must_hold(x)
63#endif
64
b411b363
PR
65/* module parameter, defined in drbd_main.c */
66extern unsigned int minor_count;
90ab5ee9
RR
67extern bool disable_sendpage;
68extern bool allow_oos;
b30ab791 69void tl_abort_disk_io(struct drbd_device *device);
b411b363
PR
70
71#ifdef CONFIG_DRBD_FAULT_INJECTION
72extern int enable_faults;
73extern int fault_rate;
74extern int fault_devs;
75#endif
76
77extern char usermode_helper[];
78
79
b411b363
PR
80/* This is used to stop/restart our threads.
81 * Cannot use SIGTERM nor SIGKILL, since these
82 * are sent out by init on runlevel changes
83 * I choose SIGHUP for now.
84 */
85#define DRBD_SIGKILL SIGHUP
86
b411b363
PR
87#define ID_IN_SYNC (4711ULL)
88#define ID_OUT_OF_SYNC (4712ULL)
b411b363 89#define ID_SYNCER (-1ULL)
579b57ed 90
4a23f264 91#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
b411b363 92
54761697 93struct drbd_device;
bde89a9e 94struct drbd_connection;
b411b363 95
3b52beff
AG
96#define __drbd_printk_device(level, device, fmt, args...) \
97 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
98#define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
99 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
100#define __drbd_printk_resource(level, resource, fmt, args...) \
101 printk(level "drbd %s: " fmt, (resource)->name, ## args)
102#define __drbd_printk_connection(level, connection, fmt, args...) \
103 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
104
105void drbd_printk_with_wrong_object_type(void);
106
107#define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
108 (__builtin_types_compatible_p(typeof(obj), type) || \
109 __builtin_types_compatible_p(typeof(obj), const type)), \
110 func(level, (const type)(obj), fmt, ## args)
111
112#define drbd_printk(level, obj, fmt, args...) \
113 __builtin_choose_expr( \
114 __drbd_printk_if_same_type(obj, struct drbd_device *, \
115 __drbd_printk_device, level, fmt, ## args), \
116 __builtin_choose_expr( \
117 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
118 __drbd_printk_resource, level, fmt, ## args), \
119 __builtin_choose_expr( \
120 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
121 __drbd_printk_connection, level, fmt, ## args), \
122 __builtin_choose_expr( \
123 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
124 __drbd_printk_peer_device, level, fmt, ## args), \
125 drbd_printk_with_wrong_object_type()))))
126
127#define drbd_dbg(obj, fmt, args...) \
128 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
129#define drbd_alert(obj, fmt, args...) \
130 drbd_printk(KERN_ALERT, obj, fmt, ## args)
131#define drbd_err(obj, fmt, args...) \
132 drbd_printk(KERN_ERR, obj, fmt, ## args)
133#define drbd_warn(obj, fmt, args...) \
134 drbd_printk(KERN_WARNING, obj, fmt, ## args)
135#define drbd_info(obj, fmt, args...) \
136 drbd_printk(KERN_INFO, obj, fmt, ## args)
137#define drbd_emerg(obj, fmt, args...) \
138 drbd_printk(KERN_EMERG, obj, fmt, ## args)
d0180171
AG
139
140#define dynamic_drbd_dbg(device, fmt, args...) \
141 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
b411b363 142
0b0ba1ef
AG
143#define D_ASSERT(device, exp) do { \
144 if (!(exp)) \
145 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
146 } while (0)
b411b363 147
841ce241
AG
148/**
149 * expect - Make an assertion
150 *
151 * Unlike the assert macro, this macro returns a boolean result.
152 */
153#define expect(exp) ({ \
154 bool _bool = (exp); \
155 if (!_bool) \
d0180171 156 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
841ce241
AG
157 #exp, __func__); \
158 _bool; \
159 })
b411b363
PR
160
161/* Defines to control fault insertion */
162enum {
163 DRBD_FAULT_MD_WR = 0, /* meta data write */
164 DRBD_FAULT_MD_RD = 1, /* read */
165 DRBD_FAULT_RS_WR = 2, /* resync */
166 DRBD_FAULT_RS_RD = 3,
167 DRBD_FAULT_DT_WR = 4, /* data */
168 DRBD_FAULT_DT_RD = 5,
169 DRBD_FAULT_DT_RA = 6, /* data read ahead */
170 DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */
171 DRBD_FAULT_AL_EE = 8, /* alloc ee */
6b4388ac 172 DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
b411b363
PR
173
174 DRBD_FAULT_MAX,
175};
176
b411b363 177extern unsigned int
b30ab791 178_drbd_insert_fault(struct drbd_device *device, unsigned int type);
0cf9d27e 179
b411b363 180static inline int
b30ab791 181drbd_insert_fault(struct drbd_device *device, unsigned int type) {
0cf9d27e 182#ifdef CONFIG_DRBD_FAULT_INJECTION
b411b363
PR
183 return fault_rate &&
184 (enable_faults & (1<<type)) &&
b30ab791 185 _drbd_insert_fault(device, type);
b411b363 186#else
0cf9d27e 187 return 0;
b411b363 188#endif
0cf9d27e 189}
b411b363
PR
190
191/* integer division, round _UP_ to the next integer */
192#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
193/* usual integer division */
194#define div_floor(A, B) ((A)/(B))
195
b411b363 196extern struct ratelimit_state drbd_ratelimit_state;
05a10ec7 197extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
77c556f6 198extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
b411b363 199
d8763023 200extern const char *cmdname(enum drbd_packet cmd);
b411b363
PR
201
202/* for sending/receiving the bitmap,
203 * possibly in some encoding scheme */
204struct bm_xfer_ctx {
205 /* "const"
206 * stores total bits and long words
207 * of the bitmap, so we don't need to
208 * call the accessor functions over and again. */
209 unsigned long bm_bits;
210 unsigned long bm_words;
211 /* during xfer, current position within the bitmap */
212 unsigned long bit_offset;
213 unsigned long word_offset;
214
215 /* statistics; index: (h->command == P_BITMAP) */
216 unsigned packets[2];
217 unsigned bytes[2];
218};
219
b30ab791 220extern void INFO_bm_xfer_stats(struct drbd_device *device,
b411b363
PR
221 const char *direction, struct bm_xfer_ctx *c);
222
223static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
224{
225 /* word_offset counts "native long words" (32 or 64 bit),
226 * aligned at 64 bit.
227 * Encoded packet may end at an unaligned bit offset.
228 * In case a fallback clear text packet is transmitted in
229 * between, we adjust this offset back to the last 64bit
230 * aligned "native long word", which makes coding and decoding
231 * the plain text bitmap much more convenient. */
232#if BITS_PER_LONG == 64
233 c->word_offset = c->bit_offset >> 6;
234#elif BITS_PER_LONG == 32
235 c->word_offset = c->bit_offset >> 5;
236 c->word_offset &= ~(1UL);
237#else
238# error "unsupported BITS_PER_LONG"
239#endif
240}
241
bde89a9e 242extern unsigned int drbd_header_size(struct drbd_connection *connection);
b411b363 243
b411b363
PR
244/**********************************************************************/
245enum drbd_thread_state {
e77a0a5c
AG
246 NONE,
247 RUNNING,
248 EXITING,
249 RESTARTING
b411b363
PR
250};
251
252struct drbd_thread {
253 spinlock_t t_lock;
254 struct task_struct *task;
255 struct completion stop;
256 enum drbd_thread_state t_state;
257 int (*function) (struct drbd_thread *);
2457b6d5 258 struct drbd_resource *resource;
bde89a9e 259 struct drbd_connection *connection;
b411b363 260 int reset_cpu_mask;
c60b0251 261 const char *name;
b411b363
PR
262};
263
264static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
265{
266 /* THINK testing the t_state seems to be uncritical in all cases
267 * (but thread_{start,stop}), so we can read it *without* the lock.
268 * --lge */
269
270 smp_rmb();
271 return thi->t_state;
272}
273
b411b363
PR
274struct drbd_work {
275 struct list_head list;
309a8348 276 int (*cb)(struct drbd_work *, int cancel);
84b8c06b
AG
277};
278
279struct drbd_device_work {
280 struct drbd_work w;
281 struct drbd_device *device;
b411b363
PR
282};
283
ace652ac
AG
284#include "drbd_interval.h"
285
54761697 286extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
7be8da07 287
28bc3b8c
AG
288extern void lock_all_resources(void);
289extern void unlock_all_resources(void);
290
b411b363
PR
291struct drbd_request {
292 struct drbd_work w;
84b8c06b 293 struct drbd_device *device;
b411b363
PR
294
295 /* if local IO is not allowed, will be NULL.
296 * if local IO _is_ allowed, holds the locally submitted bio clone,
297 * or, after local IO completion, the ERR_PTR(error).
fcefa62e 298 * see drbd_request_endio(). */
b411b363
PR
299 struct bio *private_bio;
300
ace652ac 301 struct drbd_interval i;
b411b363 302
b6dd1a89 303 /* epoch: used to check on "completion" whether this req was in
b411b363 304 * the current epoch, and we therefore have to close it,
b6dd1a89
LE
305 * causing a p_barrier packet to be send, starting a new epoch.
306 *
307 * This corresponds to "barrier" in struct p_barrier[_ack],
308 * and to "barrier_nr" in struct drbd_epoch (and various
309 * comments/function parameters/local variable names).
b411b363 310 */
b6dd1a89 311 unsigned int epoch;
b411b363 312
b411b363
PR
313 struct list_head tl_requests; /* ring list in the transfer log */
314 struct bio *master_bio; /* master bio pointer */
e5f891b2 315
844a6ae7
LE
316 /* see struct drbd_device */
317 struct list_head req_pending_master_completion;
318 struct list_head req_pending_local;
319
e5f891b2
LE
320 /* for generic IO accounting */
321 unsigned long start_jif;
322
323 /* for DRBD internal statistics */
324
325 /* Minimal set of time stamps to determine if we wait for activity log
326 * transactions, local disk or peer. 32 bit "jiffies" are good enough,
327 * we don't expect a DRBD request to be stalled for several month.
328 */
329
330 /* before actual request processing */
331 unsigned long in_actlog_jif;
332
333 /* local disk */
334 unsigned long pre_submit_jif;
335
336 /* per connection */
337 unsigned long pre_send_jif;
338 unsigned long acked_jif;
339 unsigned long net_done_jif;
340
341 /* Possibly even more detail to track each phase:
342 * master_completion_jif
343 * how long did it take to complete the master bio
344 * (application visible latency)
345 * allocated_jif
346 * how long the master bio was blocked until we finally allocated
347 * a tracking struct
348 * in_actlog_jif
349 * how long did we wait for activity log transactions
350 *
351 * net_queued_jif
352 * when did we finally queue it for sending
353 * pre_send_jif
354 * when did we start sending it
355 * post_send_jif
356 * how long did we block in the network stack trying to send it
357 * acked_jif
358 * when did we receive (or fake, in protocol A) a remote ACK
359 * net_done_jif
360 * when did we receive final acknowledgement (P_BARRIER_ACK),
361 * or decide, e.g. on connection loss, that we do no longer expect
362 * anything from this peer for this request.
363 *
364 * pre_submit_jif
365 * post_sub_jif
366 * when did we start submiting to the lower level device,
367 * and how long did we block in that submit function
368 * local_completion_jif
369 * how long did it take the lower level device to complete this request
370 */
371
b411b363 372
b406777e
LE
373 /* once it hits 0, we may complete the master_bio */
374 atomic_t completion_ref;
375 /* once it hits 0, we may destroy this drbd_request object */
376 struct kref kref;
b411b363 377
a0d856df 378 unsigned rq_state; /* see comments above _req_mod() */
b411b363 379};
b411b363
PR
380
381struct drbd_epoch {
bde89a9e 382 struct drbd_connection *connection;
b411b363
PR
383 struct list_head list;
384 unsigned int barrier_nr;
385 atomic_t epoch_size; /* increased on every request added. */
386 atomic_t active; /* increased on every req. added, and dec on every finished. */
387 unsigned long flags;
388};
389
de0b2e69
RK
390/* Prototype declaration of function defined in drbd_receiver.c */
391int drbdd_init(struct drbd_thread *);
392int drbd_asender(struct drbd_thread *);
393
b411b363
PR
394/* drbd_epoch flag bits */
395enum {
b411b363 396 DE_HAVE_BARRIER_NUMBER,
b411b363
PR
397};
398
399enum epoch_event {
400 EV_PUT,
401 EV_GOT_BARRIER_NR,
b411b363 402 EV_BECAME_LAST,
b411b363
PR
403 EV_CLEANUP = 32, /* used as flag */
404};
405
b411b363
PR
406struct digest_info {
407 int digest_size;
408 void *digest;
409};
410
f6ffca9f 411struct drbd_peer_request {
a8cd15ba
AG
412 struct drbd_work w;
413 struct drbd_peer_device *peer_device;
85719573 414 struct drbd_epoch *epoch; /* for writes */
45bb912b
LE
415 struct page *pages;
416 atomic_t pending_bios;
010f6e67 417 struct drbd_interval i;
45bb912b
LE
418 /* see comments on ee flag bits below */
419 unsigned long flags;
21ae5d7f 420 unsigned long submit_jif;
85719573
PR
421 union {
422 u64 block_id;
423 struct digest_info *digest;
424 };
45bb912b
LE
425};
426
427/* ee flag bits.
428 * While corresponding bios are in flight, the only modification will be
429 * set_bit WAS_ERROR, which has to be atomic.
430 * If no bios are in flight yet, or all have been completed,
431 * non-atomic modification to ee->flags is ok.
432 */
b411b363
PR
433enum {
434 __EE_CALL_AL_COMPLETE_IO,
b411b363 435 __EE_MAY_SET_IN_SYNC,
45bb912b 436
a0fb3c47
LE
437 /* is this a TRIM aka REQ_DISCARD? */
438 __EE_IS_TRIM,
439 /* our lower level cannot handle trim,
440 * and we want to fall back to zeroout instead */
441 __EE_IS_TRIM_USE_ZEROOUT,
442
45bb912b
LE
443 /* In case a barrier failed,
444 * we need to resubmit without the barrier flag. */
445 __EE_RESUBMITTED,
446
6c852bec 447 /* we may have several bios per peer request.
45bb912b
LE
448 * if any of those fail, we set this flag atomically
449 * from the endio callback */
450 __EE_WAS_ERROR,
c36c3ced
LE
451
452 /* This ee has a pointer to a digest instead of a block id */
453 __EE_HAS_DIGEST,
7be8da07
AG
454
455 /* Conflicting local requests need to be restarted after this request */
456 __EE_RESTART_REQUESTS,
303d1448
PR
457
458 /* The peer wants a write ACK for this (wire proto C) */
459 __EE_SEND_WRITE_ACK,
302bdeae
PR
460
461 /* Is set when net_conf had two_primaries set while creating this peer_req */
462 __EE_IN_INTERVAL_TREE,
21ae5d7f
LE
463
464 /* for debugfs: */
465 /* has this been submitted, or does it still wait for something else? */
466 __EE_SUBMITTED,
467
468 /* this is/was a write request */
469 __EE_WRITE,
470
471 /* this originates from application on peer
472 * (not some resync or verify or other DRBD internal request) */
473 __EE_APPLICATION,
b411b363
PR
474};
475#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
b411b363 476#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
a0fb3c47
LE
477#define EE_IS_TRIM (1<<__EE_IS_TRIM)
478#define EE_IS_TRIM_USE_ZEROOUT (1<<__EE_IS_TRIM_USE_ZEROOUT)
479#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
45bb912b 480#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
c36c3ced 481#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
7be8da07 482#define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
303d1448 483#define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
302bdeae 484#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
21ae5d7f
LE
485#define EE_SUBMITTED (1<<__EE_SUBMITTED)
486#define EE_WRITE (1<<__EE_WRITE)
487#define EE_APPLICATION (1<<__EE_APPLICATION)
b411b363 488
b30ab791 489/* flag bits per device */
b411b363 490enum {
b411b363
PR
491 UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
492 MD_DIRTY, /* current uuids and flags not yet on disk */
b411b363 493 USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */
b411b363
PR
494 CL_ST_CHG_SUCCESS,
495 CL_ST_CHG_FAIL,
496 CRASHED_PRIMARY, /* This node was a crashed primary.
497 * Gets cleared when the state.conn
498 * goes into C_CONNECTED state. */
b411b363
PR
499 CONSIDER_RESYNC,
500
a8a4e51e 501 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
5ab7d2c0 502
b411b363
PR
503 BITMAP_IO, /* suspend application io;
504 once no more io in flight, start bitmap io */
505 BITMAP_IO_QUEUED, /* Started bitmap IO */
a2a3c74f
LE
506 WAS_IO_ERROR, /* Local disk failed, returned IO error */
507 WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */
383606e0 508 FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
b411b363 509 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
b411b363
PR
510 RESIZE_PENDING, /* Size change detected locally, waiting for the response from
511 * the peer, if it changed there as well. */
43a5182c 512 NEW_CUR_UUID, /* Create new current UUID when thawing IO */
0778286a 513 AL_SUSPENDED, /* Activity logging is currently suspended. */
370a43e7 514 AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
e64a3294 515 B_RS_H_DONE, /* Before resync handler done (already executed) */
08b165ba 516 DISCARD_MY_DATA, /* discard_my_data flag per volume */
380207d0 517 READ_BALANCE_RR,
e334f550 518
f418815f
LE
519 FLUSH_PENDING, /* if set, device->flush_jif is when we submitted that flush
520 * from drbd_flush_after_epoch() */
521
e334f550
LE
522 /* cleared only after backing device related structures have been destroyed. */
523 GOING_DISKLESS, /* Disk is being detached, because of io-error, or admin request. */
524
525 /* to be used in drbd_device_post_work() */
526 GO_DISKLESS, /* tell worker to schedule cleanup before detach */
527 DESTROY_DISK, /* tell worker to close backing devices and destroy related structures. */
ac0acb9e
LE
528 MD_SYNC, /* tell worker to call drbd_md_sync() */
529 RS_START, /* tell worker to start resync/OV */
e334f550
LE
530 RS_PROGRESS, /* tell worker that resync made significant progress */
531 RS_DONE, /* tell worker that resync is done */
b411b363
PR
532};
533
54761697 534struct drbd_bitmap; /* opaque for drbd_device */
b411b363 535
20ceb2b2
LE
536/* definition of bits in bm_flags to be used in drbd_bm_lock
537 * and drbd_bitmap_io and friends. */
538enum bm_flag {
20ceb2b2 539 /* currently locked for bulk operation */
0e8488ad 540 BM_LOCKED_MASK = 0xf,
20ceb2b2
LE
541
542 /* in detail, that is: */
543 BM_DONT_CLEAR = 0x1,
544 BM_DONT_SET = 0x2,
545 BM_DONT_TEST = 0x4,
546
0e8488ad
LE
547 /* so we can mark it locked for bulk operation,
548 * and still allow all non-bulk operations */
549 BM_IS_LOCKED = 0x8,
550
20ceb2b2 551 /* (test bit, count bit) allowed (common case) */
0e8488ad 552 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
20ceb2b2
LE
553
554 /* testing bits, as well as setting new bits allowed, but clearing bits
555 * would be unexpected. Used during bitmap receive. Setting new bits
556 * requires sending of "out-of-sync" information, though. */
0e8488ad 557 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
20ceb2b2 558
0e8488ad
LE
559 /* for drbd_bm_write_copy_pages, everything is allowed,
560 * only concurrent bulk operations are locked out. */
561 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
20ceb2b2
LE
562};
563
b411b363
PR
564struct drbd_work_queue {
565 struct list_head q;
b411b363 566 spinlock_t q_lock; /* to protect the list. */
8c0785a5 567 wait_queue_head_t q_wait;
b411b363
PR
568};
569
570struct drbd_socket {
b411b363
PR
571 struct mutex mutex;
572 struct socket *socket;
573 /* this way we get our
574 * send/receive buffers off the stack */
5a87d920 575 void *sbuf;
e6ef8a5c 576 void *rbuf;
b411b363
PR
577};
578
579struct drbd_md {
580 u64 md_offset; /* sector offset to 'super' block */
581
582 u64 la_size_sect; /* last agreed size, unit sectors */
9f2247bb 583 spinlock_t uuid_lock;
b411b363
PR
584 u64 uuid[UI_SIZE];
585 u64 device_uuid;
586 u32 flags;
587 u32 md_size_sect;
588
ae8bf312 589 s32 al_offset; /* signed relative sector offset to activity log */
b411b363 590 s32 bm_offset; /* signed relative sector offset to bitmap */
3a4d4eb3
LE
591
592 /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
593 s32 meta_dev_idx;
594
595 /* see al_tr_number_to_on_disk_sector() */
596 u32 al_stripes;
597 u32 al_stripe_size_4k;
598 u32 al_size_4k; /* cached product of the above */
b411b363
PR
599};
600
b411b363
PR
601struct drbd_backing_dev {
602 struct block_device *backing_bdev;
603 struct block_device *md_bdev;
b411b363 604 struct drbd_md md;
0500813f 605 struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
b411b363
PR
606 sector_t known_size; /* last known size of that backing device */
607};
608
609struct drbd_md_io {
e37d2438
LE
610 struct page *page;
611 unsigned long start_jif; /* last call to drbd_md_get_buffer */
612 unsigned long submit_jif; /* last _drbd_md_sync_page_io() submit */
613 const char *current_use;
614 atomic_t in_use;
0c464425 615 unsigned int done;
b411b363
PR
616 int error;
617};
618
619struct bm_io_work {
620 struct drbd_work w;
621 char *why;
20ceb2b2 622 enum bm_flag flags;
b30ab791
AG
623 int (*io_fn)(struct drbd_device *device);
624 void (*done)(struct drbd_device *device, int rv);
b411b363
PR
625};
626
778f271d 627struct fifo_buffer {
778f271d
PR
628 unsigned int head_index;
629 unsigned int size;
9958c857
PR
630 int total; /* sum of all values */
631 int values[0];
778f271d 632};
9958c857 633extern struct fifo_buffer *fifo_alloc(int fifo_size);
778f271d 634
bde89a9e 635/* flag bits per connection */
01a311a5
PR
636enum {
637 NET_CONGESTED, /* The data socket is congested */
427c0434 638 RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
668700b4 639 SEND_PING,
2a67d8b9 640 GOT_PING_ACK, /* set when we receive a ping_ack packet, ping_wait gets woken */
4d0fc3fd 641 CONN_WD_ST_CHG_REQ, /* A cluster wide state change on the connection is active */
fc3b10a4
PR
642 CONN_WD_ST_CHG_OKAY,
643 CONN_WD_ST_CHG_FAIL,
8169e41b 644 CONN_DRY_RUN, /* Expect disconnect after resync handshake. */
6936fcb4 645 CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
a1096a6e 646 STATE_SENT, /* Do not change state/UUIDs while this is set */
6f3465ed
LE
647 CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
648 * pending, from drbd worker context.
649 * If set, bdi_write_congested() returns true,
650 * so shrink_page_list() would not recurse into,
651 * and potentially deadlock on, this drbd worker.
652 */
b66623e3 653 DISCONNECT_SENT,
e334f550
LE
654
655 DEVICE_WORK_PENDING, /* tell worker that some device has pending work */
01a311a5
PR
656};
657
a2972846
AG
658enum which_state { NOW, OLD = NOW, NEW };
659
77c556f6
AG
660struct drbd_resource {
661 char *name;
4d3d5aa8
LE
662#ifdef CONFIG_DEBUG_FS
663 struct dentry *debugfs_res;
664 struct dentry *debugfs_res_volumes;
665 struct dentry *debugfs_res_connections;
666 struct dentry *debugfs_res_in_flight_summary;
667#endif
77c556f6 668 struct kref kref;
803ea134 669 struct idr devices; /* volume number to device mapping */
77c556f6
AG
670 struct list_head connections;
671 struct list_head resources;
eb6bea67 672 struct res_opts res_opts;
0500813f 673 struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */
9e276872 674 struct mutex adm_mutex; /* mutex to serialize administrative requests */
0500813f 675 spinlock_t req_lock;
6bbf53ca
AG
676
677 unsigned susp:1; /* IO suspended by user */
678 unsigned susp_nod:1; /* IO suspended because no data */
679 unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
625a6ba2 680
e9526580
PR
681 enum write_ordering_e write_ordering;
682
625a6ba2 683 cpumask_var_t cpu_mask;
77c556f6
AG
684};
685
944410e9
LE
686struct drbd_thread_timing_details
687{
688 unsigned long start_jif;
689 void *cb_addr;
690 const char *caller_fn;
691 unsigned int line;
692 unsigned int cb_nr;
693};
694
77c556f6
AG
695struct drbd_connection {
696 struct list_head connections;
697 struct drbd_resource *resource;
4d3d5aa8
LE
698#ifdef CONFIG_DEBUG_FS
699 struct dentry *debugfs_conn;
700 struct dentry *debugfs_conn_callback_history;
701 struct dentry *debugfs_conn_oldest_requests;
702#endif
9dc9fbb3 703 struct kref kref;
c06ece6b 704 struct idr peer_devices; /* volume number to peer device mapping */
8410da8f
PR
705 enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
706 struct mutex cstate_mutex; /* Protects graceful disconnects */
28e448bb 707 unsigned int connect_cnt; /* Inc each time a connection is established */
2111438b 708
062e879c 709 unsigned long flags;
44ed167d 710 struct net_conf *net_conf; /* content protected by rcu */
91fd4dad 711 wait_queue_head_t ping_wait; /* Woken upon reception of a ping, and a state change */
e42325a5 712
089c075d
AG
713 struct sockaddr_storage my_addr;
714 int my_addr_len;
715 struct sockaddr_storage peer_addr;
716 int peer_addr_len;
717
e42325a5
PR
718 struct drbd_socket data; /* data/barrier/cstate/parameter packets */
719 struct drbd_socket meta; /* ping/ack (metadata) packets */
31890f4a 720 int agreed_pro_version; /* actually used protocol version */
20c68fde 721 u32 agreed_features;
31890f4a
PR
722 unsigned long last_received; /* in jiffies, either socket */
723 unsigned int ko_count;
e6b3ea83 724
b6dd1a89 725 struct list_head transfer_log; /* all requests not yet fully processed */
87eeee41 726
a0638456 727 struct crypto_hash *cram_hmac_tfm;
bde89a9e 728 struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
036b17ea 729 struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
f399002e
LE
730 struct crypto_hash *csums_tfm;
731 struct crypto_hash *verify_tfm;
a0638456
PR
732 void *int_dig_in;
733 void *int_dig_vv;
734
b6dd1a89 735 /* receiver side */
12038a3a
PR
736 struct drbd_epoch *current_epoch;
737 spinlock_t epoch_lock;
738 unsigned int epochs;
b379c41e 739 atomic_t current_tle_nr; /* transfer log epoch number */
b6dd1a89 740 unsigned current_tle_writes; /* writes seen within this tl epoch */
4b0007c0 741
07be15b1 742 unsigned long last_reconnect_jif;
e6b3ea83
PR
743 struct drbd_thread receiver;
744 struct drbd_thread worker;
1c03e520 745 struct drbd_thread ack_receiver;
668700b4 746 struct workqueue_struct *ack_sender;
b6dd1a89 747
7753a4c1
LE
748 /* cached pointers,
749 * so we can look up the oldest pending requests more quickly.
750 * protected by resource->req_lock */
751 struct drbd_request *req_next; /* DRBD 9: todo.req_next */
752 struct drbd_request *req_ack_pending;
753 struct drbd_request *req_not_net_done;
754
b6dd1a89 755 /* sender side */
d5b27b01 756 struct drbd_work_queue sender_work;
b6dd1a89 757
944410e9
LE
758#define DRBD_THREAD_DETAILS_HIST 16
759 unsigned int w_cb_nr; /* keeps counting up */
760 unsigned int r_cb_nr; /* keeps counting up */
761 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
762 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
763
b6dd1a89 764 struct {
84d34f2f
LE
765 unsigned long last_sent_barrier_jif;
766
b6dd1a89
LE
767 /* whether this sender thread
768 * has processed a single write yet. */
769 bool seen_any_write_yet;
770
771 /* Which barrier number to send with the next P_BARRIER */
772 int current_epoch_nr;
773
774 /* how many write requests have been sent
775 * with req->epoch == current_epoch_nr.
776 * If none, no P_BARRIER will be sent. */
777 unsigned current_epoch_writes;
778 } send;
778f271d
PR
779};
780
a2972846
AG
781static inline bool has_net_conf(struct drbd_connection *connection)
782{
783 bool has_net_conf;
784
785 rcu_read_lock();
786 has_net_conf = rcu_dereference(connection->net_conf);
787 rcu_read_unlock();
788
789 return has_net_conf;
790}
791
944410e9
LE
792void __update_timing_details(
793 struct drbd_thread_timing_details *tdp,
794 unsigned int *cb_nr,
795 void *cb,
796 const char *fn, const unsigned int line);
797
798#define update_worker_timing_details(c, cb) \
799 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
800#define update_receiver_timing_details(c, cb) \
801 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
802
113fef9e
LE
803struct submit_worker {
804 struct workqueue_struct *wq;
805 struct work_struct worker;
806
844a6ae7 807 /* protected by ..->resource->req_lock */
113fef9e
LE
808 struct list_head writes;
809};
810
a6b32bc3
AG
811struct drbd_peer_device {
812 struct list_head peer_devices;
813 struct drbd_device *device;
bde89a9e 814 struct drbd_connection *connection;
668700b4 815 struct work_struct send_acks_work;
4d3d5aa8
LE
816#ifdef CONFIG_DEBUG_FS
817 struct dentry *debugfs_peer_dev;
818#endif
a6b32bc3
AG
819};
820
821struct drbd_device {
d8628a86 822 struct drbd_resource *resource;
a6b32bc3 823 struct list_head peer_devices;
4ce49266 824 struct list_head pending_bitmap_io;
4d3d5aa8
LE
825
826 unsigned long flush_jif;
827#ifdef CONFIG_DEBUG_FS
828 struct dentry *debugfs_minor;
829 struct dentry *debugfs_vol;
830 struct dentry *debugfs_vol_oldest_requests;
831 struct dentry *debugfs_vol_act_log_extents;
832 struct dentry *debugfs_vol_resync_extents;
833 struct dentry *debugfs_vol_data_gen_id;
f5ec0173 834 struct dentry *debugfs_vol_ed_gen_id;
4d3d5aa8
LE
835#endif
836
837 unsigned int vnr; /* volume number within the connection */
838 unsigned int minor; /* device minor number */
839
81fa2e67 840 struct kref kref;
2111438b 841
b411b363
PR
842 /* things that are stored as / read from meta data on disk */
843 unsigned long flags;
844
845 /* configured by drbdsetup */
b411b363
PR
846 struct drbd_backing_dev *ldev __protected_by(local);
847
848 sector_t p_size; /* partner's disk size */
849 struct request_queue *rq_queue;
850 struct block_device *this_bdev;
851 struct gendisk *vdisk;
852
07be15b1 853 unsigned long last_reattach_jif;
84b8c06b
AG
854 struct drbd_work resync_work;
855 struct drbd_work unplug_work;
b411b363
PR
856 struct timer_list resync_timer;
857 struct timer_list md_sync_timer;
370a43e7 858 struct timer_list start_resync_timer;
7fde2be9 859 struct timer_list request_timer;
b411b363
PR
860
861 /* Used after attach while negotiating new disk state. */
862 union drbd_state new_state_tmp;
863
da9fbc27 864 union drbd_dev_state state;
b411b363
PR
865 wait_queue_head_t misc_wait;
866 wait_queue_head_t state_wait; /* upon each state change. */
867 unsigned int send_cnt;
868 unsigned int recv_cnt;
869 unsigned int read_cnt;
870 unsigned int writ_cnt;
871 unsigned int al_writ_cnt;
872 unsigned int bm_writ_cnt;
873 atomic_t ap_bio_cnt; /* Requests we need to complete */
ad3fee79 874 atomic_t ap_actlog_cnt; /* Requests waiting for activity log */
b411b363
PR
875 atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
876 atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
d942ae44 877 atomic_t unacked_cnt; /* Need to send replies for */
b411b363 878 atomic_t local_cnt; /* Waiting for local completion */
7dbb4386 879 atomic_t suspend_cnt;
b2fb6dbe 880
dac1389c
AG
881 /* Interval tree of pending local requests */
882 struct rb_root read_requests;
de696716 883 struct rb_root write_requests;
b411b363 884
844a6ae7
LE
885 /* for statistics and timeouts */
886 /* [0] read, [1] write */
887 struct list_head pending_master_completion[2];
888 struct list_head pending_completion[2];
889
aaaba345
LE
890 /* use checksums for *this* resync */
891 bool use_csums;
4b0715f0 892 /* blocks to resync in this run [unit BM_BLOCK_SIZE] */
b411b363 893 unsigned long rs_total;
4b0715f0 894 /* number of resync blocks that failed in this run */
b411b363
PR
895 unsigned long rs_failed;
896 /* Syncer's start time [unit jiffies] */
897 unsigned long rs_start;
898 /* cumulated time in PausedSyncX state [unit jiffies] */
899 unsigned long rs_paused;
1d7734a0
LE
900 /* skipped because csum was equal [unit BM_BLOCK_SIZE] */
901 unsigned long rs_same_csum;
902#define DRBD_SYNC_MARKS 8
903#define DRBD_SYNC_MARK_STEP (3*HZ)
b411b363 904 /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
1d7734a0 905 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
b411b363 906 /* marks's time [unit jiffies] */
1d7734a0
LE
907 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
908 /* current index into rs_mark_{left,time} */
909 int rs_last_mark;
328e0f12 910 unsigned long rs_last_bcast; /* [unit jiffies] */
b411b363
PR
911
912 /* where does the admin want us to start? (sector) */
913 sector_t ov_start_sector;
02b91b55 914 sector_t ov_stop_sector;
b411b363
PR
915 /* where are we now? (sector) */
916 sector_t ov_position;
917 /* Start sector of out of sync range (to merge printk reporting). */
918 sector_t ov_last_oos_start;
919 /* size of out-of-sync range in sectors. */
920 sector_t ov_last_oos_size;
921 unsigned long ov_left; /* in bits */
b411b363 922
b411b363
PR
923 struct drbd_bitmap *bitmap;
924 unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
925
926 /* Used to track operations of resync... */
927 struct lru_cache *resync;
928 /* Number of locked elements in resync LRU */
929 unsigned int resync_locked;
930 /* resync extent number waiting for application requests */
931 unsigned int resync_wenr;
932
933 int open_cnt;
934 u64 *p_uuid;
4b0007c0 935
85719573
PR
936 struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
937 struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
18b75d75
AG
938 struct list_head done_ee; /* need to send P_WRITE_ACK */
939 struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
b411b363 940 struct list_head net_ee; /* zero-copy network send in progress */
b411b363
PR
941
942 int next_barrier_nr;
b411b363 943 struct list_head resync_reads;
435f0740
LE
944 atomic_t pp_in_use; /* allocated from page pool */
945 atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */
b411b363 946 wait_queue_head_t ee_wait;
cc94c650 947 struct drbd_md_io md_io;
b411b363
PR
948 spinlock_t al_lock;
949 wait_queue_head_t al_wait;
950 struct lru_cache *act_log; /* activity log */
951 unsigned int al_tr_number;
952 int al_tr_cycle;
b411b363
PR
953 wait_queue_head_t seq_wait;
954 atomic_t packet_seq;
955 unsigned int peer_seq;
956 spinlock_t peer_seq_lock;
b411b363 957 unsigned long comm_bm_set; /* communicated number of set bits. */
b411b363
PR
958 struct bm_io_work bm_io_work;
959 u64 ed_uuid; /* UUID of the exposed data */
8410da8f 960 struct mutex own_state_mutex;
a6b32bc3 961 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
b411b363 962 char congestion_reason; /* Why we where congested... */
1d7734a0
LE
963 atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
964 atomic_t rs_sect_ev; /* for submitted resync data rate, both */
965 int rs_last_sect_ev; /* counter to compare with */
966 int rs_last_events; /* counter of read or write "events" (unit sectors)
967 * on the lower level device when we last looked. */
968 int c_sync_rate; /* current resync rate after syncer throttle magic */
bde89a9e 969 struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
778f271d 970 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
759fbdfb 971 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
db141b2f
LE
972 unsigned int peer_max_bio_size;
973 unsigned int local_max_bio_size;
113fef9e
LE
974
975 /* any requests that would block in drbd_make_request()
976 * are deferred to this single-threaded work queue */
977 struct submit_worker submit;
b411b363
PR
978};
979
4ce49266
LE
980struct drbd_bm_aio_ctx {
981 struct drbd_device *device;
982 struct list_head list; /* on device->pending_bitmap_io */;
983 unsigned long start_jif;
984 atomic_t in_flight;
985 unsigned int done;
986 unsigned flags;
987#define BM_AIO_COPY_PAGES 1
988#define BM_AIO_WRITE_HINTED 2
989#define BM_AIO_WRITE_ALL_PAGES 4
990#define BM_AIO_READ 8
991 int error;
992 struct kref kref;
993};
994
a910b123
LE
995struct drbd_config_context {
996 /* assigned from drbd_genlmsghdr */
997 unsigned int minor;
998 /* assigned from request attributes, if present */
999 unsigned int volume;
1000#define VOLUME_UNSPECIFIED (-1U)
1001 /* pointer into the request skb,
1002 * limited lifetime! */
1003 char *resource_name;
1004 struct nlattr *my_addr;
1005 struct nlattr *peer_addr;
1006
1007 /* reply buffer */
1008 struct sk_buff *reply_skb;
1009 /* pointer into reply buffer */
1010 struct drbd_genlmsghdr *reply_dh;
1011 /* resolved from attributes, if possible */
1012 struct drbd_device *device;
1013 struct drbd_resource *resource;
1014 struct drbd_connection *connection;
1015};
1016
b30ab791 1017static inline struct drbd_device *minor_to_device(unsigned int minor)
b411b363 1018{
05a10ec7 1019 return (struct drbd_device *)idr_find(&drbd_devices, minor);
b411b363
PR
1020}
1021
a6b32bc3
AG
1022static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1023{
ec4a3407 1024 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
a6b32bc3
AG
1025}
1026
a2972846
AG
1027static inline struct drbd_peer_device *
1028conn_peer_device(struct drbd_connection *connection, int volume_number)
1029{
1030 return idr_find(&connection->peer_devices, volume_number);
1031}
1032
77c556f6
AG
1033#define for_each_resource(resource, _resources) \
1034 list_for_each_entry(resource, _resources, resources)
1035
1036#define for_each_resource_rcu(resource, _resources) \
1037 list_for_each_entry_rcu(resource, _resources, resources)
1038
1039#define for_each_resource_safe(resource, tmp, _resources) \
1040 list_for_each_entry_safe(resource, tmp, _resources, resources)
1041
1042#define for_each_connection(connection, resource) \
1043 list_for_each_entry(connection, &resource->connections, connections)
1044
1045#define for_each_connection_rcu(connection, resource) \
1046 list_for_each_entry_rcu(connection, &resource->connections, connections)
1047
1048#define for_each_connection_safe(connection, tmp, resource) \
1049 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1050
a6b32bc3
AG
1051#define for_each_peer_device(peer_device, device) \
1052 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1053
1054#define for_each_peer_device_rcu(peer_device, device) \
1055 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1056
1057#define for_each_peer_device_safe(peer_device, tmp, device) \
1058 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1059
b30ab791 1060static inline unsigned int device_to_minor(struct drbd_device *device)
b411b363 1061{
b30ab791 1062 return device->minor;
b411b363
PR
1063}
1064
b411b363
PR
1065/*
1066 * function declarations
1067 *************************/
1068
1069/* drbd_main.c */
1070
e89b591c
PR
1071enum dds_flags {
1072 DDSF_FORCED = 1,
1073 DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
1074};
1075
b30ab791 1076extern void drbd_init_set_defaults(struct drbd_device *device);
b411b363
PR
1077extern int drbd_thread_start(struct drbd_thread *thi);
1078extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1079#ifdef CONFIG_SMP
80822284 1080extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
b411b363
PR
1081#else
1082#define drbd_thread_current_set_cpu(A) ({})
b411b363 1083#endif
bde89a9e 1084extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
b411b363 1085 unsigned int set_size);
bde89a9e
AG
1086extern void tl_clear(struct drbd_connection *);
1087extern void drbd_free_sock(struct drbd_connection *connection);
1088extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
bedbd2a5 1089 void *buf, size_t size, unsigned msg_flags);
bde89a9e 1090extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
fb708e40
AG
1091 unsigned);
1092
bde89a9e
AG
1093extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1094extern int drbd_send_protocol(struct drbd_connection *connection);
69a22773
AG
1095extern int drbd_send_uuids(struct drbd_peer_device *);
1096extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1097extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1098extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1099extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1100extern int drbd_send_current_state(struct drbd_peer_device *);
1101extern int drbd_send_sync_param(struct drbd_peer_device *);
bde89a9e 1102extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
d4e67d7c 1103 u32 set_size);
69a22773 1104extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
f6ffca9f 1105 struct drbd_peer_request *);
69a22773 1106extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
a9a9994d 1107 struct p_block_req *rp);
69a22773 1108extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
a9a9994d 1109 struct p_data *dp, int data_size);
69a22773 1110extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
b411b363 1111 sector_t sector, int blksize, u64 block_id);
69a22773
AG
1112extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1113extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
f6ffca9f 1114 struct drbd_peer_request *);
69a22773
AG
1115extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1116extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
b411b363 1117 sector_t sector, int size, u64 block_id);
69a22773 1118extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
d8763023
AG
1119 int size, void *digest, int digest_size,
1120 enum drbd_packet cmd);
69a22773 1121extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
b411b363 1122
b30ab791 1123extern int drbd_send_bitmap(struct drbd_device *device);
69a22773 1124extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
bde89a9e 1125extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
63a7c8ad 1126extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
b30ab791
AG
1127extern void drbd_device_cleanup(struct drbd_device *device);
1128void drbd_print_uuids(struct drbd_device *device, const char *text);
b411b363 1129
bde89a9e 1130extern void conn_md_sync(struct drbd_connection *connection);
b30ab791
AG
1131extern void drbd_md_write(struct drbd_device *device, void *buffer);
1132extern void drbd_md_sync(struct drbd_device *device);
1133extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1134extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1135extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1136extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1137extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1138extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1139extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1140extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1141extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
b411b363 1142extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
b30ab791 1143extern void drbd_md_mark_dirty(struct drbd_device *device);
b30ab791 1144extern void drbd_queue_bitmap_io(struct drbd_device *device,
54761697
AG
1145 int (*io_fn)(struct drbd_device *),
1146 void (*done)(struct drbd_device *, int),
20ceb2b2 1147 char *why, enum bm_flag flags);
b30ab791 1148extern int drbd_bitmap_io(struct drbd_device *device,
54761697 1149 int (*io_fn)(struct drbd_device *),
20ceb2b2 1150 char *why, enum bm_flag flags);
b30ab791 1151extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
54761697 1152 int (*io_fn)(struct drbd_device *),
edc9f5eb 1153 char *why, enum bm_flag flags);
8fe39aac
PR
1154extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1155extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
b411b363 1156
b411b363 1157/* Meta data layout
ae8bf312
LE
1158 *
1159 * We currently have two possible layouts.
1160 * Offsets in (512 byte) sectors.
1161 * external:
1162 * |----------- md_size_sect ------------------|
1163 * [ 4k superblock ][ activity log ][ Bitmap ]
1164 * | al_offset == 8 |
1165 * | bm_offset = al_offset + X |
1166 * ==> bitmap sectors = md_size_sect - bm_offset
1167 *
1168 * Variants:
1169 * old, indexed fixed size meta data:
1170 *
1171 * internal:
1172 * |----------- md_size_sect ------------------|
1173 * [data.....][ Bitmap ][ activity log ][ 4k superblock ][padding*]
1174 * | al_offset < 0 |
1175 * | bm_offset = al_offset - Y |
1176 * ==> bitmap sectors = Y = al_offset - bm_offset
1177 *
1178 * [padding*] are zero or up to 7 unused 512 Byte sectors to the
1179 * end of the device, so that the [4k superblock] will be 4k aligned.
1180 *
1181 * The activity log consists of 4k transaction blocks,
1182 * which are written in a ring-buffer, or striped ring-buffer like fashion,
1183 * which are writtensize used to be fixed 32kB,
1184 * but is about to become configurable.
1185 */
b411b363 1186
ae8bf312
LE
1187/* Our old fixed size meta data layout
1188 * allows up to about 3.8TB, so if you want more,
7ad651b5 1189 * you need to use the "flexible" meta data format. */
ae8bf312
LE
1190#define MD_128MB_SECT (128LLU << 11) /* 128 MB, unit sectors */
1191#define MD_4kB_SECT 8
1192#define MD_32kB_SECT 64
7ad651b5
LE
1193
1194/* One activity log extent represents 4M of storage */
1195#define AL_EXTENT_SHIFT 22
b411b363
PR
1196#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1197
7ad651b5
LE
1198/* We could make these currently hardcoded constants configurable
1199 * variables at create-md time (or even re-configurable at runtime?).
1200 * Which will require some more changes to the DRBD "super block"
1201 * and attach code.
1202 *
1203 * updates per transaction:
1204 * This many changes to the active set can be logged with one transaction.
1205 * This number is arbitrary.
1206 * context per transaction:
1207 * This many context extent numbers are logged with each transaction.
1208 * This number is resulting from the transaction block size (4k), the layout
1209 * of the transaction header, and the number of updates per transaction.
1210 * See drbd_actlog.c:struct al_transaction_on_disk
1211 * */
1212#define AL_UPDATES_PER_TRANSACTION 64 // arbitrary
1213#define AL_CONTEXT_PER_TRANSACTION 919 // (4096 - 36 - 6*64)/4
1214
b411b363
PR
1215#if BITS_PER_LONG == 32
1216#define LN2_BPL 5
1217#define cpu_to_lel(A) cpu_to_le32(A)
1218#define lel_to_cpu(A) le32_to_cpu(A)
1219#elif BITS_PER_LONG == 64
1220#define LN2_BPL 6
1221#define cpu_to_lel(A) cpu_to_le64(A)
1222#define lel_to_cpu(A) le64_to_cpu(A)
1223#else
1224#error "LN2 of BITS_PER_LONG unknown!"
1225#endif
1226
1227/* resync bitmap */
1228/* 16MB sized 'bitmap extent' to track syncer usage */
1229struct bm_extent {
1230 int rs_left; /* number of bits set (out of sync) in this extent. */
1231 int rs_failed; /* number of failed resync requests in this extent. */
1232 unsigned long flags;
1233 struct lc_element lce;
1234};
1235
1236#define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */
1237#define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */
e3555d85 1238#define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */
b411b363
PR
1239
1240/* drbd_bitmap.c */
1241/*
1242 * We need to store one bit for a block.
1243 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1244 * Bit 0 ==> local node thinks this block is binary identical on both nodes
1245 * Bit 1 ==> local node thinks this block needs to be synced.
1246 */
1247
8e26f9cc
PR
1248#define SLEEP_TIME (HZ/10)
1249
45dfffeb
LE
1250/* We do bitmap IO in units of 4k blocks.
1251 * We also still have a hardcoded 4k per bit relation. */
1252#define BM_BLOCK_SHIFT 12 /* 4k per bit */
b411b363 1253#define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
45dfffeb
LE
1254/* mostly arbitrarily set the represented size of one bitmap extent,
1255 * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
1256 * at 4k per bit resolution) */
1257#define BM_EXT_SHIFT 24 /* 16 MiB per resync extent */
b411b363
PR
1258#define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1259
1260#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1261#error "HAVE YOU FIXED drbdmeta AS WELL??"
1262#endif
1263
1264/* thus many _storage_ sectors are described by one bit */
1265#define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1266#define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1267#define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1268
1269/* bit to represented kilo byte conversion */
1270#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1271
1272/* in which _bitmap_ extent (resp. sector) the bit for a certain
1273 * _storage_ sector is located in */
1274#define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
5ab7d2c0 1275#define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
b411b363 1276
5ab7d2c0 1277/* first storage sector a bitmap extent corresponds to */
b411b363 1278#define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
5ab7d2c0 1279/* how much _storage_ sectors we have per bitmap extent */
b411b363 1280#define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
5ab7d2c0
LE
1281/* how many bits are covered by one bitmap extent (resync extent) */
1282#define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1283
1284#define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1285
b411b363
PR
1286
1287/* in one sector of the bitmap, we have this many activity_log extents. */
1288#define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
b411b363 1289
b411b363
PR
1290/* the extent in "PER_EXTENT" below is an activity log extent
1291 * we need that many (long words/bytes) to store the bitmap
1292 * of one AL_EXTENT_SIZE chunk of storage.
1293 * we can store the bitmap for that many AL_EXTENTS within
1294 * one sector of the _on_disk_ bitmap:
1295 * bit 0 bit 37 bit 38 bit (512*8)-1
1296 * ...|........|........|.. // ..|........|
1297 * sect. 0 `296 `304 ^(512*8*8)-1
1298 *
1299#define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1300#define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128
1301#define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4
1302 */
1303
1304#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
ae8bf312
LE
1305/* we have a certain meta data variant that has a fixed on-disk size of 128
1306 * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
1307 * log, leaving this many sectors for the bitmap.
1308 */
1309
1310#define DRBD_MAX_SECTORS_FIXED_BM \
1311 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1312#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
b411b363
PR
1313#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
1314#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
1315#else
ae8bf312 1316#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
b411b363
PR
1317/* 16 TB in units of sectors */
1318#if BITS_PER_LONG == 32
1319/* adjust by one page worth of bitmap,
1320 * so we won't wrap around in drbd_bm_find_next_bit.
1321 * you should use 64bit OS for that much storage, anyways. */
1322#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1323#else
4b0715f0
LE
1324/* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
1325#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1326/* corresponds to (1UL << 38) bits right now. */
b411b363
PR
1327#endif
1328#endif
1329
23361cf3
LE
1330/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
1331 * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
1332 * Since we may live in a mixed-platform cluster,
1333 * we limit us to a platform agnostic constant here for now.
1334 * A followup commit may allow even bigger BIO sizes,
1335 * once we thought that through. */
98683650 1336#define DRBD_MAX_BIO_SIZE (1U << 20)
23361cf3
LE
1337#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1338#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1339#endif
db141b2f 1340#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
b411b363 1341
98683650
PR
1342#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
1343#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
b411b363 1344
a0fb3c47
LE
1345/* For now, don't allow more than one activity log extent worth of data
1346 * to be discarded in one go. We may need to rework drbd_al_begin_io()
1347 * to allow for even larger discard ranges */
1348#define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE
1349#define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9)
1350
b30ab791
AG
1351extern int drbd_bm_init(struct drbd_device *device);
1352extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1353extern void drbd_bm_cleanup(struct drbd_device *device);
1354extern void drbd_bm_set_all(struct drbd_device *device);
1355extern void drbd_bm_clear_all(struct drbd_device *device);
4b0715f0 1356/* set/clear/test only a few bits at a time */
b411b363 1357extern int drbd_bm_set_bits(
b30ab791 1358 struct drbd_device *device, unsigned long s, unsigned long e);
b411b363 1359extern int drbd_bm_clear_bits(
b30ab791 1360 struct drbd_device *device, unsigned long s, unsigned long e);
4b0715f0 1361extern int drbd_bm_count_bits(
b30ab791 1362 struct drbd_device *device, const unsigned long s, const unsigned long e);
4b0715f0
LE
1363/* bm_set_bits variant for use while holding drbd_bm_lock,
1364 * may process the whole bitmap in one go */
b30ab791 1365extern void _drbd_bm_set_bits(struct drbd_device *device,
b411b363 1366 const unsigned long s, const unsigned long e);
b30ab791
AG
1367extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1368extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
b30ab791
AG
1369extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1370extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1371extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1372extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
c7a58db4 1373extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
b30ab791
AG
1374extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1375extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1376extern size_t drbd_bm_words(struct drbd_device *device);
1377extern unsigned long drbd_bm_bits(struct drbd_device *device);
1378extern sector_t drbd_bm_capacity(struct drbd_device *device);
4b0715f0
LE
1379
1380#define DRBD_END_OF_BITMAP (~(unsigned long)0)
b30ab791 1381extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
b411b363 1382/* bm_find_next variants for use while you hold drbd_bm_lock() */
b30ab791
AG
1383extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1384extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1385extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1386extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
b411b363 1387/* for receive_bitmap */
b30ab791 1388extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
b411b363 1389 size_t number, unsigned long *buffer);
19f843aa 1390/* for _drbd_send_bitmap */
b30ab791 1391extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
b411b363
PR
1392 size_t number, unsigned long *buffer);
1393
b30ab791
AG
1394extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1395extern void drbd_bm_unlock(struct drbd_device *device);
b411b363
PR
1396/* drbd_main.c */
1397
1398extern struct kmem_cache *drbd_request_cache;
6c852bec 1399extern struct kmem_cache *drbd_ee_cache; /* peer requests */
b411b363
PR
1400extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
1401extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
1402extern mempool_t *drbd_request_mempool;
1403extern mempool_t *drbd_ee_mempool;
1404
4281808f
LE
1405/* drbd's page pool, used to buffer data received from the peer,
1406 * or data requested by the peer.
1407 *
1408 * This does not have an emergency reserve.
1409 *
1410 * When allocating from this pool, it first takes pages from the pool.
1411 * Only if the pool is depleted will try to allocate from the system.
1412 *
1413 * The assumption is that pages taken from this pool will be processed,
1414 * and given back, "quickly", and then can be recycled, so we can avoid
1415 * frequent calls to alloc_page(), and still will be able to make progress even
1416 * under memory pressure.
1417 */
1418extern struct page *drbd_pp_pool;
b411b363
PR
1419extern spinlock_t drbd_pp_lock;
1420extern int drbd_pp_vacant;
1421extern wait_queue_head_t drbd_pp_wait;
1422
4281808f
LE
1423/* We also need a standard (emergency-reserve backed) page pool
1424 * for meta data IO (activity log, bitmap).
1425 * We can keep it global, as long as it is used as "N pages at a time".
1426 * 128 should be plenty, currently we probably can get away with as few as 1.
1427 */
1428#define DRBD_MIN_POOL_PAGES 128
1429extern mempool_t *drbd_md_io_page_pool;
1430
9476f39d
LE
1431/* We also need to make sure we get a bio
1432 * when we need it for housekeeping purposes */
1433extern struct bio_set *drbd_md_io_bio_set;
1434/* to allocate from that set */
1435extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1436
28bc3b8c 1437extern struct mutex resources_mutex;
b411b363 1438
bde89a9e 1439extern int conn_lowest_minor(struct drbd_connection *connection);
a910b123 1440extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
05a10ec7 1441extern void drbd_destroy_device(struct kref *kref);
a910b123 1442extern void drbd_delete_device(struct drbd_device *device);
b411b363 1443
77c556f6
AG
1444extern struct drbd_resource *drbd_create_resource(const char *name);
1445extern void drbd_free_resource(struct drbd_resource *resource);
1446
eb6bea67 1447extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
bde89a9e 1448extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
05a10ec7 1449extern void drbd_destroy_connection(struct kref *kref);
bde89a9e 1450extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
089c075d 1451 void *peer_addr, int peer_addr_len);
4bc76048 1452extern struct drbd_resource *drbd_find_resource(const char *name);
77c556f6 1453extern void drbd_destroy_resource(struct kref *kref);
bde89a9e 1454extern void conn_free_crypto(struct drbd_connection *connection);
b411b363
PR
1455
1456extern int proc_details;
1457
1458/* drbd_req */
113fef9e 1459extern void do_submit(struct work_struct *ws);
54761697 1460extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
dece1635 1461extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
b30ab791 1462extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
b411b363
PR
1463extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1464
1465
1466/* drbd_nl.c */
a2972846
AG
1467
1468extern struct mutex notification_mutex;
1469
b30ab791
AG
1470extern void drbd_suspend_io(struct drbd_device *device);
1471extern void drbd_resume_io(struct drbd_device *device);
b411b363 1472extern char *ppsize(char *buf, unsigned long long size);
54761697 1473extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
e96c9633 1474enum determine_dev_size {
d752b269
PR
1475 DS_ERROR_SHRINK = -3,
1476 DS_ERROR_SPACE_MD = -2,
e96c9633
PR
1477 DS_ERROR = -1,
1478 DS_UNCHANGED = 0,
1479 DS_SHRUNK = 1,
57737adc
PR
1480 DS_GREW = 2,
1481 DS_GREW_FROM_ZERO = 3,
e96c9633 1482};
d752b269 1483extern enum determine_dev_size
54761697
AG
1484drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1485extern void resync_after_online_grow(struct drbd_device *);
8fe39aac 1486extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev);
b30ab791 1487extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
bf885f8a
AG
1488 enum drbd_role new_role,
1489 int force);
bde89a9e
AG
1490extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1491extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
b30ab791 1492extern int drbd_khelper(struct drbd_device *device, char *cmd);
b411b363
PR
1493
1494/* drbd_worker.c */
d40e5671 1495/* bi_end_io handlers */
4246a0b6
CH
1496extern void drbd_md_endio(struct bio *bio);
1497extern void drbd_peer_request_endio(struct bio *bio);
1498extern void drbd_request_endio(struct bio *bio);
b411b363 1499extern int drbd_worker(struct drbd_thread *thi);
b30ab791
AG
1500enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1501void drbd_resync_after_changed(struct drbd_device *device);
1502extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1503extern void resume_next_sg(struct drbd_device *device);
1504extern void suspend_other_sg(struct drbd_device *device);
1505extern int drbd_resync_finished(struct drbd_device *device);
b411b363 1506/* maybe rather drbd_main.c ? */
e37d2438 1507extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
b30ab791
AG
1508extern void drbd_md_put_buffer(struct drbd_device *device);
1509extern int drbd_md_sync_page_io(struct drbd_device *device,
b411b363 1510 struct drbd_backing_dev *bdev, sector_t sector, int rw);
54761697 1511extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
b30ab791 1512extern void wait_until_done_or_force_detached(struct drbd_device *device,
44edfb0d 1513 struct drbd_backing_dev *bdev, unsigned int *done);
b30ab791 1514extern void drbd_rs_controller_reset(struct drbd_device *device);
b411b363 1515
b30ab791 1516static inline void ov_out_of_sync_print(struct drbd_device *device)
b411b363 1517{
b30ab791 1518 if (device->ov_last_oos_size) {
d0180171 1519 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
b30ab791
AG
1520 (unsigned long long)device->ov_last_oos_start,
1521 (unsigned long)device->ov_last_oos_size);
b411b363 1522 }
b30ab791 1523 device->ov_last_oos_size = 0;
b411b363
PR
1524}
1525
1526
79a3c8d3
AG
1527extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
1528extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *);
b411b363 1529/* worker callbacks */
99920dc5
AG
1530extern int w_e_end_data_req(struct drbd_work *, int);
1531extern int w_e_end_rsdata_req(struct drbd_work *, int);
1532extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1533extern int w_e_end_ov_reply(struct drbd_work *, int);
1534extern int w_e_end_ov_req(struct drbd_work *, int);
1535extern int w_ov_finished(struct drbd_work *, int);
1536extern int w_resync_timer(struct drbd_work *, int);
1537extern int w_send_write_hint(struct drbd_work *, int);
99920dc5 1538extern int w_send_dblock(struct drbd_work *, int);
99920dc5 1539extern int w_send_read_req(struct drbd_work *, int);
99920dc5
AG
1540extern int w_e_reissue(struct drbd_work *, int);
1541extern int w_restart_disk_io(struct drbd_work *, int);
8f7bed77 1542extern int w_send_out_of_sync(struct drbd_work *, int);
99920dc5 1543extern int w_start_resync(struct drbd_work *, int);
b411b363
PR
1544
1545extern void resync_timer_fn(unsigned long data);
370a43e7 1546extern void start_resync_timer_fn(unsigned long data);
b411b363 1547
a0fb3c47
LE
1548extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1549
b411b363 1550/* drbd_receiver.c */
753c6191 1551extern int drbd_receiver(struct drbd_thread *thi);
1c03e520 1552extern int drbd_ack_receiver(struct drbd_thread *thi);
668700b4
PR
1553extern void drbd_send_ping_wf(struct work_struct *ws);
1554extern void drbd_send_acks_wf(struct work_struct *ws);
e8299874 1555extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
ad3fee79
LE
1556extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1557 bool throttle_if_app_is_waiting);
54761697 1558extern int drbd_submit_peer_request(struct drbd_device *,
fbe29dec
AG
1559 struct drbd_peer_request *, const unsigned,
1560 const int);
54761697 1561extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
69a22773 1562extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
0db55363 1563 sector_t, unsigned int,
a0fb3c47 1564 bool,
0db55363 1565 gfp_t) __must_hold(local);
54761697 1566extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
3967deb1
AG
1567 int);
1568#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1569#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
69a22773 1570extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
b30ab791
AG
1571extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1572extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
69a22773 1573extern int drbd_connected(struct drbd_peer_device *);
b411b363 1574
b411b363
PR
1575static inline void drbd_tcp_cork(struct socket *sock)
1576{
ed439848 1577 int val = 1;
e805b983 1578 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
ed439848 1579 (char*)&val, sizeof(val));
b411b363
PR
1580}
1581
1582static inline void drbd_tcp_uncork(struct socket *sock)
1583{
ed439848 1584 int val = 0;
e805b983 1585 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
ed439848 1586 (char*)&val, sizeof(val));
b411b363
PR
1587}
1588
1589static inline void drbd_tcp_nodelay(struct socket *sock)
1590{
ed439848 1591 int val = 1;
e805b983 1592 (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
ed439848 1593 (char*)&val, sizeof(val));
b411b363
PR
1594}
1595
1596static inline void drbd_tcp_quickack(struct socket *sock)
1597{
ed439848 1598 int val = 2;
e805b983 1599 (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
ed439848 1600 (char*)&val, sizeof(val));
b411b363
PR
1601}
1602
d40e5671
PR
1603/* sets the number of 512 byte sectors of our virtual device */
1604static inline void drbd_set_my_capacity(struct drbd_device *device,
1605 sector_t size)
1606{
1607 /* set_capacity(device->this_bdev->bd_disk, size); */
1608 set_capacity(device->vdisk, size);
1609 device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
1610}
1611
1612/*
1613 * used to submit our private bio
1614 */
1615static inline void drbd_generic_make_request(struct drbd_device *device,
1616 int fault_type, struct bio *bio)
1617{
1618 __release(local);
1619 if (!bio->bi_bdev) {
f88c5d90 1620 drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
4246a0b6
CH
1621 bio->bi_error = -ENODEV;
1622 bio_endio(bio);
d40e5671
PR
1623 return;
1624 }
1625
1626 if (drbd_insert_fault(device, fault_type))
4246a0b6 1627 bio_io_error(bio);
d40e5671
PR
1628 else
1629 generic_make_request(bio);
1630}
1631
8fe39aac
PR
1632void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1633 enum write_ordering_e wo);
b411b363
PR
1634
1635/* drbd_proc.c */
1636extern struct proc_dir_entry *drbd_proc;
7d4e9d09 1637extern const struct file_operations drbd_proc_fops;
b411b363
PR
1638extern const char *drbd_conn_str(enum drbd_conns s);
1639extern const char *drbd_role_str(enum drbd_role s);
1640
1641/* drbd_actlog.c */
e4d7d6f4 1642extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
b30ab791 1643extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
4dd726f0 1644extern void drbd_al_begin_io_commit(struct drbd_device *device);
b30ab791 1645extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
4dd726f0 1646extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
b30ab791
AG
1647extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1648extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1649extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1650extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1651extern void drbd_rs_cancel_all(struct drbd_device *device);
1652extern int drbd_rs_del_all(struct drbd_device *device);
1653extern void drbd_rs_failed_io(struct drbd_device *device,
b411b363 1654 sector_t sector, int size);
b30ab791 1655extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
5ab7d2c0
LE
1656
1657enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1658extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
179e20b8 1659 enum update_sync_bits_mode mode);
b30ab791 1660#define drbd_set_in_sync(device, sector, size) \
179e20b8 1661 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
b30ab791 1662#define drbd_set_out_of_sync(device, sector, size) \
179e20b8 1663 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
5ab7d2c0 1664#define drbd_rs_failed_io(device, sector, size) \
179e20b8 1665 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
b30ab791 1666extern void drbd_al_shrink(struct drbd_device *device);
5f7c0124 1667extern int drbd_al_initialize(struct drbd_device *, void *);
b411b363 1668
b411b363 1669/* drbd_nl.c */
3b98c0c2
LE
1670/* state info broadcast */
1671struct sib_info {
1672 enum drbd_state_info_bcast_reason sib_reason;
1673 union {
1674 struct {
1675 char *helper_name;
1676 unsigned helper_exit_code;
1677 };
1678 struct {
1679 union drbd_state os;
1680 union drbd_state ns;
1681 };
1682 };
1683};
b30ab791 1684void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
b411b363 1685
a2972846
AG
1686extern void notify_resource_state(struct sk_buff *,
1687 unsigned int,
1688 struct drbd_resource *,
1689 struct resource_info *,
1690 enum drbd_notification_type);
1691extern void notify_device_state(struct sk_buff *,
1692 unsigned int,
1693 struct drbd_device *,
1694 struct device_info *,
1695 enum drbd_notification_type);
1696extern void notify_connection_state(struct sk_buff *,
1697 unsigned int,
1698 struct drbd_connection *,
1699 struct connection_info *,
1700 enum drbd_notification_type);
1701extern void notify_peer_device_state(struct sk_buff *,
1702 unsigned int,
1703 struct drbd_peer_device *,
1704 struct peer_device_info *,
1705 enum drbd_notification_type);
1706extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1707 struct drbd_connection *, const char *, int);
1708
b411b363
PR
1709/*
1710 * inline helper functions
1711 *************************/
1712
45bb912b
LE
1713/* see also page_chain_add and friends in drbd_receiver.c */
1714static inline struct page *page_chain_next(struct page *page)
1715{
1716 return (struct page *)page_private(page);
1717}
1718#define page_chain_for_each(page) \
1719 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1720 page = page_chain_next(page))
1721#define page_chain_for_each_safe(page, n) \
1722 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1723
45bb912b 1724
045417f7 1725static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
45bb912b 1726{
db830c46 1727 struct page *page = peer_req->pages;
45bb912b
LE
1728 page_chain_for_each(page) {
1729 if (page_count(page) > 1)
1730 return 1;
1731 }
1732 return 0;
1733}
1734
b30ab791 1735static inline union drbd_state drbd_read_state(struct drbd_device *device)
b411b363 1736{
6bbf53ca 1737 struct drbd_resource *resource = device->resource;
78bae59b
PR
1738 union drbd_state rv;
1739
b30ab791 1740 rv.i = device->state.i;
6bbf53ca
AG
1741 rv.susp = resource->susp;
1742 rv.susp_nod = resource->susp_nod;
1743 rv.susp_fen = resource->susp_fen;
78bae59b
PR
1744
1745 return rv;
b411b363
PR
1746}
1747
383606e0 1748enum drbd_force_detach_flags {
a2a3c74f
LE
1749 DRBD_READ_ERROR,
1750 DRBD_WRITE_ERROR,
383606e0
LE
1751 DRBD_META_IO_ERROR,
1752 DRBD_FORCE_DETACH,
1753};
1754
b411b363 1755#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
b30ab791 1756static inline void __drbd_chk_io_error_(struct drbd_device *device,
a2a3c74f 1757 enum drbd_force_detach_flags df,
383606e0 1758 const char *where)
b411b363 1759{
daeda1cc
PR
1760 enum drbd_io_error_p ep;
1761
1762 rcu_read_lock();
b30ab791 1763 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
daeda1cc
PR
1764 rcu_read_unlock();
1765 switch (ep) {
1766 case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
a2a3c74f 1767 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
7383506c 1768 if (__ratelimit(&drbd_ratelimit_state))
d0180171 1769 drbd_err(device, "Local IO failed in %s.\n", where);
b30ab791
AG
1770 if (device->state.disk > D_INCONSISTENT)
1771 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
b411b363
PR
1772 break;
1773 }
a2a3c74f 1774 /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
b411b363
PR
1775 case EP_DETACH:
1776 case EP_CALL_HELPER:
a2a3c74f
LE
1777 /* Remember whether we saw a READ or WRITE error.
1778 *
1779 * Recovery of the affected area for WRITE failure is covered
1780 * by the activity log.
1781 * READ errors may fall outside that area though. Certain READ
1782 * errors can be "healed" by writing good data to the affected
1783 * blocks, which triggers block re-allocation in lower layers.
1784 *
1785 * If we can not write the bitmap after a READ error,
1786 * we may need to trigger a full sync (see w_go_diskless()).
1787 *
1788 * Force-detach is not really an IO error, but rather a
1789 * desperate measure to try to deal with a completely
1790 * unresponsive lower level IO stack.
1791 * Still it should be treated as a WRITE error.
1792 *
1793 * Meta IO error is always WRITE error:
1794 * we read meta data only once during attach,
1795 * which will fail in case of errors.
1796 */
b30ab791 1797 set_bit(WAS_IO_ERROR, &device->flags);
a2a3c74f 1798 if (df == DRBD_READ_ERROR)
b30ab791 1799 set_bit(WAS_READ_ERROR, &device->flags);
a2a3c74f 1800 if (df == DRBD_FORCE_DETACH)
b30ab791
AG
1801 set_bit(FORCE_DETACH, &device->flags);
1802 if (device->state.disk > D_FAILED) {
1803 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
d0180171 1804 drbd_err(device,
82f59cc6 1805 "Local IO failed in %s. Detaching...\n", where);
b411b363
PR
1806 }
1807 break;
1808 }
1809}
1810
1811/**
1812 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
b30ab791 1813 * @device: DRBD device.
b411b363
PR
1814 * @error: Error code passed to the IO completion callback
1815 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1816 *
1817 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1818 */
1819#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
b30ab791 1820static inline void drbd_chk_io_error_(struct drbd_device *device,
383606e0 1821 int error, enum drbd_force_detach_flags forcedetach, const char *where)
b411b363
PR
1822{
1823 if (error) {
1824 unsigned long flags;
0500813f 1825 spin_lock_irqsave(&device->resource->req_lock, flags);
b30ab791 1826 __drbd_chk_io_error_(device, forcedetach, where);
0500813f 1827 spin_unlock_irqrestore(&device->resource->req_lock, flags);
b411b363
PR
1828 }
1829}
1830
1831
1832/**
1833 * drbd_md_first_sector() - Returns the first sector number of the meta data area
1834 * @bdev: Meta data block device.
1835 *
1836 * BTW, for internal meta data, this happens to be the maximum capacity
1837 * we could agree upon with our peer node.
1838 */
68e41a43 1839static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
b411b363 1840{
68e41a43 1841 switch (bdev->md.meta_dev_idx) {
b411b363
PR
1842 case DRBD_MD_INDEX_INTERNAL:
1843 case DRBD_MD_INDEX_FLEX_INT:
1844 return bdev->md.md_offset + bdev->md.bm_offset;
1845 case DRBD_MD_INDEX_FLEX_EXT:
1846 default:
1847 return bdev->md.md_offset;
1848 }
1849}
1850
1851/**
1852 * drbd_md_last_sector() - Return the last sector number of the meta data area
1853 * @bdev: Meta data block device.
1854 */
1855static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1856{
68e41a43 1857 switch (bdev->md.meta_dev_idx) {
b411b363
PR
1858 case DRBD_MD_INDEX_INTERNAL:
1859 case DRBD_MD_INDEX_FLEX_INT:
ae8bf312 1860 return bdev->md.md_offset + MD_4kB_SECT -1;
b411b363
PR
1861 case DRBD_MD_INDEX_FLEX_EXT:
1862 default:
ae8bf312 1863 return bdev->md.md_offset + bdev->md.md_size_sect -1;
b411b363
PR
1864 }
1865}
1866
1867/* Returns the number of 512 byte sectors of the device */
1868static inline sector_t drbd_get_capacity(struct block_device *bdev)
1869{
1870 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
77304d2a 1871 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
b411b363
PR
1872}
1873
1874/**
1875 * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1876 * @bdev: Meta data block device.
1877 *
1878 * returns the capacity we announce to out peer. we clip ourselves at the
1879 * various MAX_SECTORS, because if we don't, current implementation will
1880 * oops sooner or later
1881 */
1882static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1883{
1884 sector_t s;
daeda1cc 1885
68e41a43 1886 switch (bdev->md.meta_dev_idx) {
b411b363
PR
1887 case DRBD_MD_INDEX_INTERNAL:
1888 case DRBD_MD_INDEX_FLEX_INT:
1889 s = drbd_get_capacity(bdev->backing_bdev)
1890 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
68e41a43 1891 drbd_md_first_sector(bdev))
b411b363
PR
1892 : 0;
1893 break;
1894 case DRBD_MD_INDEX_FLEX_EXT:
1895 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1896 drbd_get_capacity(bdev->backing_bdev));
1897 /* clip at maximum size the meta device can support */
1898 s = min_t(sector_t, s,
1899 BM_EXT_TO_SECT(bdev->md.md_size_sect
1900 - bdev->md.bm_offset));
1901 break;
1902 default:
1903 s = min_t(sector_t, DRBD_MAX_SECTORS,
1904 drbd_get_capacity(bdev->backing_bdev));
1905 }
1906 return s;
1907}
1908
1909/**
3a4d4eb3 1910 * drbd_md_ss() - Return the sector number of our meta data super block
b411b363
PR
1911 * @bdev: Meta data block device.
1912 */
3a4d4eb3 1913static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
b411b363 1914{
3a4d4eb3 1915 const int meta_dev_idx = bdev->md.meta_dev_idx;
daeda1cc 1916
3a4d4eb3
LE
1917 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1918 return 0;
daeda1cc 1919
3a4d4eb3
LE
1920 /* Since drbd08, internal meta data is always "flexible".
1921 * position: last 4k aligned block of 4k size */
1922 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1923 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
ae8bf312 1924 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
3a4d4eb3
LE
1925
1926 /* external, some index; this is the old fixed size layout */
1927 return MD_128MB_SECT * bdev->md.meta_dev_idx;
b411b363
PR
1928}
1929
b411b363
PR
1930static inline void
1931drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1932{
1933 unsigned long flags;
1934 spin_lock_irqsave(&q->q_lock, flags);
1935 list_add_tail(&w->list, &q->q);
b411b363 1936 spin_unlock_irqrestore(&q->q_lock, flags);
8c0785a5 1937 wake_up(&q->q_wait);
b411b363
PR
1938}
1939
15e26f6a
LE
1940static inline void
1941drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1942{
1943 unsigned long flags;
1944 spin_lock_irqsave(&q->q_lock, flags);
1945 if (list_empty_careful(&w->list))
1946 list_add_tail(&w->list, &q->q);
1947 spin_unlock_irqrestore(&q->q_lock, flags);
1948 wake_up(&q->q_wait);
1949}
1950
e334f550
LE
1951static inline void
1952drbd_device_post_work(struct drbd_device *device, int work_bit)
1953{
1954 if (!test_and_set_bit(work_bit, &device->flags)) {
1955 struct drbd_connection *connection =
1956 first_peer_device(device)->connection;
1957 struct drbd_work_queue *q = &connection->sender_work;
1958 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1959 wake_up(&q->q_wait);
1960 }
1961}
1962
b5043c5e
AG
1963extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1964
668700b4
PR
1965/* To get the ack_receiver out of the blocking network stack,
1966 * so it can change its sk_rcvtimeo from idle- to ping-timeout,
1967 * and send a ping, we need to send a signal.
1968 * Which signal we send is irrelevant. */
1969static inline void wake_ack_receiver(struct drbd_connection *connection)
b411b363 1970{
668700b4
PR
1971 struct task_struct *task = connection->ack_receiver.task;
1972 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1973 force_sig(SIGXCPU, task);
b411b363
PR
1974}
1975
bde89a9e 1976static inline void request_ping(struct drbd_connection *connection)
b411b363 1977{
bde89a9e 1978 set_bit(SEND_PING, &connection->flags);
668700b4 1979 wake_ack_receiver(connection);
b411b363
PR
1980}
1981
bde89a9e 1982extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
69a22773 1983extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
bde89a9e 1984extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
dba58587
AG
1985 enum drbd_packet, unsigned int, void *,
1986 unsigned int);
69a22773 1987extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
dba58587
AG
1988 enum drbd_packet, unsigned int, void *,
1989 unsigned int);
b411b363 1990
bde89a9e
AG
1991extern int drbd_send_ping(struct drbd_connection *connection);
1992extern int drbd_send_ping_ack(struct drbd_connection *connection);
69a22773 1993extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
bde89a9e 1994extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
b411b363
PR
1995
1996static inline void drbd_thread_stop(struct drbd_thread *thi)
1997{
81e84650 1998 _drbd_thread_stop(thi, false, true);
b411b363
PR
1999}
2000
2001static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
2002{
81e84650 2003 _drbd_thread_stop(thi, false, false);
b411b363
PR
2004}
2005
2006static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
2007{
81e84650 2008 _drbd_thread_stop(thi, true, false);
b411b363
PR
2009}
2010
2011/* counts how many answer packets packets we expect from our peer,
2012 * for either explicit application requests,
2013 * or implicit barrier packets as necessary.
2014 * increased:
2015 * w_send_barrier
8554df1c 2016 * _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
b411b363
PR
2017 * it is much easier and equally valid to count what we queue for the
2018 * worker, even before it actually was queued or send.
2019 * (drbd_make_request_common; recovery path on read io-error)
2020 * decreased:
2021 * got_BarrierAck (respective tl_clear, tl_clear_barrier)
8554df1c 2022 * _req_mod(req, DATA_RECEIVED)
b411b363 2023 * [from receive_DataReply]
8554df1c 2024 * _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
b411b363
PR
2025 * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
2026 * for some reason it is NOT decreased in got_NegAck,
2027 * but in the resulting cleanup code from report_params.
2028 * we should try to remember the reason for that...
8554df1c
AG
2029 * _req_mod(req, SEND_FAILED or SEND_CANCELED)
2030 * _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
b411b363
PR
2031 * [from tl_clear_barrier]
2032 */
b30ab791 2033static inline void inc_ap_pending(struct drbd_device *device)
b411b363 2034{
b30ab791 2035 atomic_inc(&device->ap_pending_cnt);
b411b363
PR
2036}
2037
49559d87 2038#define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
b30ab791 2039 if (atomic_read(&device->which) < 0) \
d0180171 2040 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
49559d87 2041 func, line, \
b30ab791 2042 atomic_read(&device->which))
b411b363 2043
659b2e3b 2044#define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
b30ab791 2045static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
49559d87 2046{
b30ab791
AG
2047 if (atomic_dec_and_test(&device->ap_pending_cnt))
2048 wake_up(&device->misc_wait);
49559d87
PR
2049 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2050}
b411b363
PR
2051
2052/* counts how many resync-related answers we still expect from the peer
2053 * increase decrease
2054 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
25985edc 2055 * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER)
b411b363
PR
2056 * (or P_NEG_ACK with ID_SYNCER)
2057 */
b30ab791 2058static inline void inc_rs_pending(struct drbd_device *device)
b411b363 2059{
b30ab791 2060 atomic_inc(&device->rs_pending_cnt);
b411b363
PR
2061}
2062
659b2e3b 2063#define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
b30ab791 2064static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
49559d87 2065{
b30ab791 2066 atomic_dec(&device->rs_pending_cnt);
49559d87
PR
2067 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2068}
b411b363
PR
2069
2070/* counts how many answers we still need to send to the peer.
2071 * increased on
2072 * receive_Data unless protocol A;
2073 * we need to send a P_RECV_ACK (proto B)
2074 * or P_WRITE_ACK (proto C)
2075 * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
2076 * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
2077 * receive_Barrier_* we need to send a P_BARRIER_ACK
2078 */
b30ab791 2079static inline void inc_unacked(struct drbd_device *device)
b411b363 2080{
b30ab791 2081 atomic_inc(&device->unacked_cnt);
b411b363
PR
2082}
2083
659b2e3b 2084#define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
b30ab791 2085static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
b411b363 2086{
b30ab791 2087 atomic_dec(&device->unacked_cnt);
49559d87 2088 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
b411b363
PR
2089}
2090
659b2e3b 2091#define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
b30ab791 2092static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
b411b363 2093{
b30ab791 2094 atomic_sub(n, &device->unacked_cnt);
49559d87 2095 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
b411b363
PR
2096}
2097
5ab7d2c0
LE
2098static inline bool is_sync_state(enum drbd_conns connection_state)
2099{
2100 return
2101 (connection_state == C_SYNC_SOURCE
2102 || connection_state == C_SYNC_TARGET
2103 || connection_state == C_PAUSED_SYNC_S
2104 || connection_state == C_PAUSED_SYNC_T);
2105}
2106
b411b363 2107/**
b30ab791 2108 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
d1b80853
AG
2109 * @_device: DRBD device.
2110 * @_min_state: Minimum device state required for success.
b411b363 2111 *
b30ab791 2112 * You have to call put_ldev() when finished working with device->ldev.
b411b363 2113 */
d1b80853
AG
2114#define get_ldev_if_state(_device, _min_state) \
2115 (_get_ldev_if_state((_device), (_min_state)) ? \
2116 ({ __acquire(x); true; }) : false)
2117#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
b411b363 2118
b30ab791 2119static inline void put_ldev(struct drbd_device *device)
b411b363 2120{
11f8b2b6 2121 enum drbd_disk_state disk_state = device->state.disk;
ba3c6fb8
LE
2122 /* We must check the state *before* the atomic_dec becomes visible,
2123 * or we have a theoretical race where someone hitting zero,
2124 * while state still D_FAILED, will then see D_DISKLESS in the
2125 * condition below and calling into destroy, where he must not, yet. */
b30ab791 2126 int i = atomic_dec_return(&device->local_cnt);
9a0d9d03
LE
2127
2128 /* This may be called from some endio handler,
2129 * so we must not sleep here. */
2130
b411b363 2131 __release(local);
0b0ba1ef 2132 D_ASSERT(device, i >= 0);
e9e6f3ec 2133 if (i == 0) {
11f8b2b6 2134 if (disk_state == D_DISKLESS)
82f59cc6 2135 /* even internal references gone, safe to destroy */
e334f550 2136 drbd_device_post_work(device, DESTROY_DISK);
11f8b2b6 2137 if (disk_state == D_FAILED)
82f59cc6 2138 /* all application IO references gone. */
e334f550
LE
2139 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2140 drbd_device_post_work(device, GO_DISKLESS);
b30ab791 2141 wake_up(&device->misc_wait);
e9e6f3ec 2142 }
b411b363
PR
2143}
2144
2145#ifndef __CHECKER__
b30ab791 2146static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
b411b363
PR
2147{
2148 int io_allowed;
2149
82f59cc6 2150 /* never get a reference while D_DISKLESS */
b30ab791 2151 if (device->state.disk == D_DISKLESS)
82f59cc6
LE
2152 return 0;
2153
b30ab791
AG
2154 atomic_inc(&device->local_cnt);
2155 io_allowed = (device->state.disk >= mins);
b411b363 2156 if (!io_allowed)
b30ab791 2157 put_ldev(device);
b411b363
PR
2158 return io_allowed;
2159}
2160#else
b30ab791 2161extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
b411b363
PR
2162#endif
2163
b411b363
PR
2164/* this throttles on-the-fly application requests
2165 * according to max_buffers settings;
2166 * maybe re-implement using semaphores? */
b30ab791 2167static inline int drbd_get_max_buffers(struct drbd_device *device)
b411b363 2168{
44ed167d
PR
2169 struct net_conf *nc;
2170 int mxb;
2171
2172 rcu_read_lock();
a6b32bc3 2173 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
44ed167d
PR
2174 mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
2175 rcu_read_unlock();
2176
b411b363
PR
2177 return mxb;
2178}
2179
b30ab791 2180static inline int drbd_state_is_stable(struct drbd_device *device)
b411b363 2181{
b30ab791 2182 union drbd_dev_state s = device->state;
b411b363
PR
2183
2184 /* DO NOT add a default clause, we want the compiler to warn us
2185 * for any newly introduced state we may have forgotten to add here */
2186
2187 switch ((enum drbd_conns)s.conn) {
2188 /* new io only accepted when there is no connection, ... */
2189 case C_STANDALONE:
2190 case C_WF_CONNECTION:
2191 /* ... or there is a well established connection. */
2192 case C_CONNECTED:
2193 case C_SYNC_SOURCE:
2194 case C_SYNC_TARGET:
2195 case C_VERIFY_S:
2196 case C_VERIFY_T:
2197 case C_PAUSED_SYNC_S:
2198 case C_PAUSED_SYNC_T:
67531718
PR
2199 case C_AHEAD:
2200 case C_BEHIND:
3719094e 2201 /* transitional states, IO allowed */
b411b363
PR
2202 case C_DISCONNECTING:
2203 case C_UNCONNECTED:
2204 case C_TIMEOUT:
2205 case C_BROKEN_PIPE:
2206 case C_NETWORK_FAILURE:
2207 case C_PROTOCOL_ERROR:
2208 case C_TEAR_DOWN:
2209 case C_WF_REPORT_PARAMS:
2210 case C_STARTING_SYNC_S:
2211 case C_STARTING_SYNC_T:
3719094e
PR
2212 break;
2213
2214 /* Allow IO in BM exchange states with new protocols */
b411b363 2215 case C_WF_BITMAP_S:
a6b32bc3 2216 if (first_peer_device(device)->connection->agreed_pro_version < 96)
3719094e
PR
2217 return 0;
2218 break;
2219
2220 /* no new io accepted in these states */
b411b363
PR
2221 case C_WF_BITMAP_T:
2222 case C_WF_SYNC_UUID:
2223 case C_MASK:
2224 /* not "stable" */
2225 return 0;
2226 }
2227
2228 switch ((enum drbd_disk_state)s.disk) {
2229 case D_DISKLESS:
2230 case D_INCONSISTENT:
2231 case D_OUTDATED:
2232 case D_CONSISTENT:
2233 case D_UP_TO_DATE:
5ca1de03 2234 case D_FAILED:
b411b363
PR
2235 /* disk state is stable as well. */
2236 break;
2237
d942ae44 2238 /* no new io accepted during transitional states */
b411b363 2239 case D_ATTACHING:
b411b363
PR
2240 case D_NEGOTIATING:
2241 case D_UNKNOWN:
2242 case D_MASK:
2243 /* not "stable" */
2244 return 0;
2245 }
2246
2247 return 1;
2248}
2249
b30ab791 2250static inline int drbd_suspended(struct drbd_device *device)
fb22c402 2251{
6bbf53ca 2252 struct drbd_resource *resource = device->resource;
8e0af25f 2253
6bbf53ca 2254 return resource->susp || resource->susp_fen || resource->susp_nod;
fb22c402
PR
2255}
2256
b30ab791 2257static inline bool may_inc_ap_bio(struct drbd_device *device)
b411b363 2258{
b30ab791 2259 int mxb = drbd_get_max_buffers(device);
b411b363 2260
b30ab791 2261 if (drbd_suspended(device))
1b881ef7 2262 return false;
7dbb4386 2263 if (atomic_read(&device->suspend_cnt))
1b881ef7 2264 return false;
b411b363
PR
2265
2266 /* to avoid potential deadlock or bitmap corruption,
2267 * in various places, we only allow new application io
2268 * to start during "stable" states. */
2269
2270 /* no new io accepted when attaching or detaching the disk */
b30ab791 2271 if (!drbd_state_is_stable(device))
1b881ef7 2272 return false;
b411b363
PR
2273
2274 /* since some older kernels don't have atomic_add_unless,
2275 * and we are within the spinlock anyways, we have this workaround. */
b30ab791 2276 if (atomic_read(&device->ap_bio_cnt) > mxb)
1b881ef7 2277 return false;
b30ab791 2278 if (test_bit(BITMAP_IO, &device->flags))
1b881ef7
AG
2279 return false;
2280 return true;
b411b363
PR
2281}
2282
b30ab791 2283static inline bool inc_ap_bio_cond(struct drbd_device *device)
b411b363 2284{
1b881ef7 2285 bool rv = false;
8869d683 2286
0500813f 2287 spin_lock_irq(&device->resource->req_lock);
b30ab791 2288 rv = may_inc_ap_bio(device);
8869d683 2289 if (rv)
b30ab791 2290 atomic_inc(&device->ap_bio_cnt);
0500813f 2291 spin_unlock_irq(&device->resource->req_lock);
8869d683
PR
2292
2293 return rv;
2294}
b411b363 2295
b30ab791 2296static inline void inc_ap_bio(struct drbd_device *device)
8869d683 2297{
b411b363
PR
2298 /* we wait here
2299 * as long as the device is suspended
2300 * until the bitmap is no longer on the fly during connection
d942ae44 2301 * handshake as long as we would exceed the max_buffer limit.
b411b363
PR
2302 *
2303 * to avoid races with the reconnect code,
2304 * we need to atomic_inc within the spinlock. */
2305
b30ab791 2306 wait_event(device->misc_wait, inc_ap_bio_cond(device));
b411b363
PR
2307}
2308
b30ab791 2309static inline void dec_ap_bio(struct drbd_device *device)
b411b363 2310{
b30ab791
AG
2311 int mxb = drbd_get_max_buffers(device);
2312 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
b411b363 2313
0b0ba1ef 2314 D_ASSERT(device, ap_bio >= 0);
7ee1fb93 2315
b30ab791
AG
2316 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2317 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
84b8c06b
AG
2318 drbd_queue_work(&first_peer_device(device)->
2319 connection->sender_work,
2320 &device->bm_io_work.w);
7ee1fb93
LE
2321 }
2322
b411b363
PR
2323 /* this currently does wake_up for every dec_ap_bio!
2324 * maybe rather introduce some type of hysteresis?
2325 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2326 if (ap_bio < mxb)
b30ab791 2327 wake_up(&device->misc_wait);
b411b363
PR
2328}
2329
b30ab791 2330static inline bool verify_can_do_stop_sector(struct drbd_device *device)
58ffa580 2331{
a6b32bc3
AG
2332 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2333 first_peer_device(device)->connection->agreed_pro_version != 100;
58ffa580
LE
2334}
2335
b30ab791 2336static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
b411b363 2337{
b30ab791
AG
2338 int changed = device->ed_uuid != val;
2339 device->ed_uuid = val;
62b0da3a 2340 return changed;
b411b363
PR
2341}
2342
b30ab791 2343static inline int drbd_queue_order_type(struct drbd_device *device)
b411b363
PR
2344{
2345 /* sorry, we currently have no working implementation
2346 * of distributed TCQ stuff */
2347#ifndef QUEUE_ORDERED_NONE
2348#define QUEUE_ORDERED_NONE 0
2349#endif
2350 return QUEUE_ORDERED_NONE;
2351}
2352
77c556f6
AG
2353static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2354{
ec4a3407 2355 return list_first_entry_or_null(&resource->connections,
77c556f6
AG
2356 struct drbd_connection, connections);
2357}
2358
b411b363 2359#endif