]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_int.h | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
26 | #ifndef _DRBD_INT_H | |
27 | #define _DRBD_INT_H | |
28 | ||
29 | #include <linux/compiler.h> | |
30 | #include <linux/types.h> | |
31 | #include <linux/version.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/sched.h> | |
34 | #include <linux/bitops.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/crypto.h> | |
132cc538 | 37 | #include <linux/ratelimit.h> |
b411b363 PR |
38 | #include <linux/tcp.h> |
39 | #include <linux/mutex.h> | |
40 | #include <linux/major.h> | |
41 | #include <linux/blkdev.h> | |
42 | #include <linux/genhd.h> | |
43 | #include <net/tcp.h> | |
44 | #include <linux/lru_cache.h> | |
70c71606 | 45 | #include <linux/prefetch.h> |
b411b363 PR |
46 | |
47 | #ifdef __CHECKER__ | |
48 | # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr"))) | |
49 | # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read"))) | |
50 | # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write"))) | |
51 | # define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call"))) | |
52 | #else | |
53 | # define __protected_by(x) | |
54 | # define __protected_read_by(x) | |
55 | # define __protected_write_by(x) | |
56 | # define __must_hold(x) | |
57 | #endif | |
58 | ||
59 | #define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0) | |
60 | ||
61 | /* module parameter, defined in drbd_main.c */ | |
62 | extern unsigned int minor_count; | |
63 | extern int disable_sendpage; | |
64 | extern int allow_oos; | |
65 | extern unsigned int cn_idx; | |
66 | ||
67 | #ifdef CONFIG_DRBD_FAULT_INJECTION | |
68 | extern int enable_faults; | |
69 | extern int fault_rate; | |
70 | extern int fault_devs; | |
71 | #endif | |
72 | ||
73 | extern char usermode_helper[]; | |
74 | ||
75 | ||
b411b363 PR |
76 | /* I don't remember why XCPU ... |
77 | * This is used to wake the asender, | |
78 | * and to interrupt sending the sending task | |
79 | * on disconnect. | |
80 | */ | |
81 | #define DRBD_SIG SIGXCPU | |
82 | ||
83 | /* This is used to stop/restart our threads. | |
84 | * Cannot use SIGTERM nor SIGKILL, since these | |
85 | * are sent out by init on runlevel changes | |
86 | * I choose SIGHUP for now. | |
87 | */ | |
88 | #define DRBD_SIGKILL SIGHUP | |
89 | ||
b411b363 PR |
90 | #define ID_IN_SYNC (4711ULL) |
91 | #define ID_OUT_OF_SYNC (4712ULL) | |
b411b363 | 92 | #define ID_SYNCER (-1ULL) |
579b57ed | 93 | |
4a23f264 | 94 | #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) |
b411b363 PR |
95 | |
96 | struct drbd_conf; | |
97 | ||
98 | ||
99 | /* to shorten dev_warn(DEV, "msg"); and relatives statements */ | |
100 | #define DEV (disk_to_dev(mdev->vdisk)) | |
101 | ||
102 | #define D_ASSERT(exp) if (!(exp)) \ | |
103 | dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) | |
104 | ||
3beec1d4 LE |
105 | #define ERR_IF(exp) if (({ \ |
106 | int _b = (exp) != 0; \ | |
107 | if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \ | |
108 | __func__, #exp, __FILE__, __LINE__); \ | |
109 | _b; \ | |
b411b363 PR |
110 | })) |
111 | ||
112 | /* Defines to control fault insertion */ | |
113 | enum { | |
114 | DRBD_FAULT_MD_WR = 0, /* meta data write */ | |
115 | DRBD_FAULT_MD_RD = 1, /* read */ | |
116 | DRBD_FAULT_RS_WR = 2, /* resync */ | |
117 | DRBD_FAULT_RS_RD = 3, | |
118 | DRBD_FAULT_DT_WR = 4, /* data */ | |
119 | DRBD_FAULT_DT_RD = 5, | |
120 | DRBD_FAULT_DT_RA = 6, /* data read ahead */ | |
121 | DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ | |
122 | DRBD_FAULT_AL_EE = 8, /* alloc ee */ | |
6b4388ac | 123 | DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */ |
b411b363 PR |
124 | |
125 | DRBD_FAULT_MAX, | |
126 | }; | |
127 | ||
b411b363 PR |
128 | extern unsigned int |
129 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); | |
0cf9d27e | 130 | |
b411b363 PR |
131 | static inline int |
132 | drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { | |
0cf9d27e | 133 | #ifdef CONFIG_DRBD_FAULT_INJECTION |
b411b363 PR |
134 | return fault_rate && |
135 | (enable_faults & (1<<type)) && | |
136 | _drbd_insert_fault(mdev, type); | |
b411b363 | 137 | #else |
0cf9d27e | 138 | return 0; |
b411b363 | 139 | #endif |
0cf9d27e | 140 | } |
b411b363 PR |
141 | |
142 | /* integer division, round _UP_ to the next integer */ | |
143 | #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) | |
144 | /* usual integer division */ | |
145 | #define div_floor(A, B) ((A)/(B)) | |
146 | ||
147 | /* drbd_meta-data.c (still in drbd_main.c) */ | |
148 | /* 4th incarnation of the disk layout. */ | |
149 | #define DRBD_MD_MAGIC (DRBD_MAGIC+4) | |
150 | ||
151 | extern struct drbd_conf **minor_table; | |
152 | extern struct ratelimit_state drbd_ratelimit_state; | |
153 | ||
154 | /* on the wire */ | |
155 | enum drbd_packets { | |
156 | /* receiver (data socket) */ | |
157 | P_DATA = 0x00, | |
158 | P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */ | |
159 | P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */ | |
160 | P_BARRIER = 0x03, | |
161 | P_BITMAP = 0x04, | |
162 | P_BECOME_SYNC_TARGET = 0x05, | |
163 | P_BECOME_SYNC_SOURCE = 0x06, | |
164 | P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */ | |
165 | P_DATA_REQUEST = 0x08, /* Used to ask for a data block */ | |
166 | P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */ | |
167 | P_SYNC_PARAM = 0x0a, | |
168 | P_PROTOCOL = 0x0b, | |
169 | P_UUIDS = 0x0c, | |
170 | P_SIZES = 0x0d, | |
171 | P_STATE = 0x0e, | |
172 | P_SYNC_UUID = 0x0f, | |
173 | P_AUTH_CHALLENGE = 0x10, | |
174 | P_AUTH_RESPONSE = 0x11, | |
175 | P_STATE_CHG_REQ = 0x12, | |
176 | ||
177 | /* asender (meta socket */ | |
178 | P_PING = 0x13, | |
179 | P_PING_ACK = 0x14, | |
180 | P_RECV_ACK = 0x15, /* Used in protocol B */ | |
181 | P_WRITE_ACK = 0x16, /* Used in protocol C */ | |
182 | P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */ | |
183 | P_DISCARD_ACK = 0x18, /* Used in proto C, two-primaries conflict detection */ | |
184 | P_NEG_ACK = 0x19, /* Sent if local disk is unusable */ | |
185 | P_NEG_DREPLY = 0x1a, /* Local disk is broken... */ | |
186 | P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */ | |
187 | P_BARRIER_ACK = 0x1c, | |
188 | P_STATE_CHG_REPLY = 0x1d, | |
189 | ||
190 | /* "new" commands, no longer fitting into the ordering scheme above */ | |
191 | ||
192 | P_OV_REQUEST = 0x1e, /* data socket */ | |
193 | P_OV_REPLY = 0x1f, | |
194 | P_OV_RESULT = 0x20, /* meta socket */ | |
195 | P_CSUM_RS_REQUEST = 0x21, /* data socket */ | |
196 | P_RS_IS_IN_SYNC = 0x22, /* meta socket */ | |
197 | P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */ | |
198 | P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */ | |
0ced55a3 PR |
199 | /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ |
200 | /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ | |
201 | P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ | |
73a01a18 | 202 | P_OUT_OF_SYNC = 0x28, /* Mark as out of sync (Outrunning), data socket */ |
d612d309 | 203 | P_RS_CANCEL = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */ |
b411b363 | 204 | |
d612d309 | 205 | P_MAX_CMD = 0x2A, |
b411b363 PR |
206 | P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ |
207 | P_MAX_OPT_CMD = 0x101, | |
208 | ||
209 | /* special command ids for handshake */ | |
210 | ||
211 | P_HAND_SHAKE_M = 0xfff1, /* First Packet on the MetaSock */ | |
212 | P_HAND_SHAKE_S = 0xfff2, /* First Packet on the Socket */ | |
213 | ||
214 | P_HAND_SHAKE = 0xfffe /* FIXED for the next century! */ | |
215 | }; | |
216 | ||
217 | static inline const char *cmdname(enum drbd_packets cmd) | |
218 | { | |
219 | /* THINK may need to become several global tables | |
220 | * when we want to support more than | |
221 | * one PRO_VERSION */ | |
222 | static const char *cmdnames[] = { | |
223 | [P_DATA] = "Data", | |
224 | [P_DATA_REPLY] = "DataReply", | |
225 | [P_RS_DATA_REPLY] = "RSDataReply", | |
226 | [P_BARRIER] = "Barrier", | |
227 | [P_BITMAP] = "ReportBitMap", | |
228 | [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget", | |
229 | [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource", | |
230 | [P_UNPLUG_REMOTE] = "UnplugRemote", | |
231 | [P_DATA_REQUEST] = "DataRequest", | |
232 | [P_RS_DATA_REQUEST] = "RSDataRequest", | |
233 | [P_SYNC_PARAM] = "SyncParam", | |
234 | [P_SYNC_PARAM89] = "SyncParam89", | |
235 | [P_PROTOCOL] = "ReportProtocol", | |
236 | [P_UUIDS] = "ReportUUIDs", | |
237 | [P_SIZES] = "ReportSizes", | |
238 | [P_STATE] = "ReportState", | |
239 | [P_SYNC_UUID] = "ReportSyncUUID", | |
240 | [P_AUTH_CHALLENGE] = "AuthChallenge", | |
241 | [P_AUTH_RESPONSE] = "AuthResponse", | |
242 | [P_PING] = "Ping", | |
243 | [P_PING_ACK] = "PingAck", | |
244 | [P_RECV_ACK] = "RecvAck", | |
245 | [P_WRITE_ACK] = "WriteAck", | |
246 | [P_RS_WRITE_ACK] = "RSWriteAck", | |
247 | [P_DISCARD_ACK] = "DiscardAck", | |
248 | [P_NEG_ACK] = "NegAck", | |
249 | [P_NEG_DREPLY] = "NegDReply", | |
250 | [P_NEG_RS_DREPLY] = "NegRSDReply", | |
251 | [P_BARRIER_ACK] = "BarrierAck", | |
252 | [P_STATE_CHG_REQ] = "StateChgRequest", | |
253 | [P_STATE_CHG_REPLY] = "StateChgReply", | |
254 | [P_OV_REQUEST] = "OVRequest", | |
255 | [P_OV_REPLY] = "OVReply", | |
256 | [P_OV_RESULT] = "OVResult", | |
c42b6cf4 LE |
257 | [P_CSUM_RS_REQUEST] = "CsumRSRequest", |
258 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", | |
259 | [P_COMPRESSED_BITMAP] = "CBitmap", | |
a8cdfd8d | 260 | [P_DELAY_PROBE] = "DelayProbe", |
73a01a18 | 261 | [P_OUT_OF_SYNC] = "OutOfSync", |
b411b363 PR |
262 | [P_MAX_CMD] = NULL, |
263 | }; | |
264 | ||
265 | if (cmd == P_HAND_SHAKE_M) | |
266 | return "HandShakeM"; | |
267 | if (cmd == P_HAND_SHAKE_S) | |
268 | return "HandShakeS"; | |
269 | if (cmd == P_HAND_SHAKE) | |
270 | return "HandShake"; | |
271 | if (cmd >= P_MAX_CMD) | |
272 | return "Unknown"; | |
273 | return cmdnames[cmd]; | |
274 | } | |
275 | ||
276 | /* for sending/receiving the bitmap, | |
277 | * possibly in some encoding scheme */ | |
278 | struct bm_xfer_ctx { | |
279 | /* "const" | |
280 | * stores total bits and long words | |
281 | * of the bitmap, so we don't need to | |
282 | * call the accessor functions over and again. */ | |
283 | unsigned long bm_bits; | |
284 | unsigned long bm_words; | |
285 | /* during xfer, current position within the bitmap */ | |
286 | unsigned long bit_offset; | |
287 | unsigned long word_offset; | |
288 | ||
289 | /* statistics; index: (h->command == P_BITMAP) */ | |
290 | unsigned packets[2]; | |
291 | unsigned bytes[2]; | |
292 | }; | |
293 | ||
294 | extern void INFO_bm_xfer_stats(struct drbd_conf *mdev, | |
295 | const char *direction, struct bm_xfer_ctx *c); | |
296 | ||
297 | static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) | |
298 | { | |
299 | /* word_offset counts "native long words" (32 or 64 bit), | |
300 | * aligned at 64 bit. | |
301 | * Encoded packet may end at an unaligned bit offset. | |
302 | * In case a fallback clear text packet is transmitted in | |
303 | * between, we adjust this offset back to the last 64bit | |
304 | * aligned "native long word", which makes coding and decoding | |
305 | * the plain text bitmap much more convenient. */ | |
306 | #if BITS_PER_LONG == 64 | |
307 | c->word_offset = c->bit_offset >> 6; | |
308 | #elif BITS_PER_LONG == 32 | |
309 | c->word_offset = c->bit_offset >> 5; | |
310 | c->word_offset &= ~(1UL); | |
311 | #else | |
312 | # error "unsupported BITS_PER_LONG" | |
313 | #endif | |
314 | } | |
315 | ||
316 | #ifndef __packed | |
317 | #define __packed __attribute__((packed)) | |
318 | #endif | |
319 | ||
320 | /* This is the layout for a packet on the wire. | |
321 | * The byteorder is the network byte order. | |
322 | * (except block_id and barrier fields. | |
323 | * these are pointers to local structs | |
324 | * and have no relevance for the partner, | |
325 | * which just echoes them as received.) | |
326 | * | |
327 | * NOTE that the payload starts at a long aligned offset, | |
328 | * regardless of 32 or 64 bit arch! | |
329 | */ | |
0b70a13d | 330 | struct p_header80 { |
b411b363 PR |
331 | u32 magic; |
332 | u16 command; | |
333 | u16 length; /* bytes of data after this header */ | |
334 | u8 payload[0]; | |
335 | } __packed; | |
0b70a13d PR |
336 | |
337 | /* Header for big packets, Used for data packets exceeding 64kB */ | |
338 | struct p_header95 { | |
339 | u16 magic; /* use DRBD_MAGIC_BIG here */ | |
340 | u16 command; | |
00b42537 | 341 | u32 length; /* Use only 24 bits of that. Ignore the highest 8 bit. */ |
0b70a13d PR |
342 | u8 payload[0]; |
343 | } __packed; | |
344 | ||
345 | union p_header { | |
346 | struct p_header80 h80; | |
347 | struct p_header95 h95; | |
348 | }; | |
b411b363 PR |
349 | |
350 | /* | |
351 | * short commands, packets without payload, plain p_header: | |
352 | * P_PING | |
353 | * P_PING_ACK | |
354 | * P_BECOME_SYNC_TARGET | |
355 | * P_BECOME_SYNC_SOURCE | |
356 | * P_UNPLUG_REMOTE | |
357 | */ | |
358 | ||
359 | /* | |
360 | * commands with out-of-struct payload: | |
361 | * P_BITMAP (no additional fields) | |
362 | * P_DATA, P_DATA_REPLY (see p_data) | |
363 | * P_COMPRESSED_BITMAP (see receive_compressed_bitmap) | |
364 | */ | |
365 | ||
366 | /* these defines must not be changed without changing the protocol version */ | |
76d2e7ec PR |
367 | #define DP_HARDBARRIER 1 /* depricated */ |
368 | #define DP_RW_SYNC 2 /* equals REQ_SYNC */ | |
b411b363 | 369 | #define DP_MAY_SET_IN_SYNC 4 |
721a9602 | 370 | #define DP_UNPLUG 8 /* not used anymore */ |
76d2e7ec PR |
371 | #define DP_FUA 16 /* equals REQ_FUA */ |
372 | #define DP_FLUSH 32 /* equals REQ_FLUSH */ | |
373 | #define DP_DISCARD 64 /* equals REQ_DISCARD */ | |
b411b363 PR |
374 | |
375 | struct p_data { | |
0b70a13d | 376 | union p_header head; |
b411b363 PR |
377 | u64 sector; /* 64 bits sector number */ |
378 | u64 block_id; /* to identify the request in protocol B&C */ | |
379 | u32 seq_num; | |
380 | u32 dp_flags; | |
381 | } __packed; | |
382 | ||
383 | /* | |
384 | * commands which share a struct: | |
385 | * p_block_ack: | |
386 | * P_RECV_ACK (proto B), P_WRITE_ACK (proto C), | |
387 | * P_DISCARD_ACK (proto C, two-primaries conflict detection) | |
388 | * p_block_req: | |
389 | * P_DATA_REQUEST, P_RS_DATA_REQUEST | |
390 | */ | |
391 | struct p_block_ack { | |
0b70a13d | 392 | struct p_header80 head; |
b411b363 PR |
393 | u64 sector; |
394 | u64 block_id; | |
395 | u32 blksize; | |
396 | u32 seq_num; | |
397 | } __packed; | |
398 | ||
399 | ||
400 | struct p_block_req { | |
0b70a13d | 401 | struct p_header80 head; |
b411b363 PR |
402 | u64 sector; |
403 | u64 block_id; | |
404 | u32 blksize; | |
405 | u32 pad; /* to multiple of 8 Byte */ | |
406 | } __packed; | |
407 | ||
408 | /* | |
409 | * commands with their own struct for additional fields: | |
410 | * P_HAND_SHAKE | |
411 | * P_BARRIER | |
412 | * P_BARRIER_ACK | |
413 | * P_SYNC_PARAM | |
414 | * ReportParams | |
415 | */ | |
416 | ||
417 | struct p_handshake { | |
0b70a13d | 418 | struct p_header80 head; /* 8 bytes */ |
b411b363 PR |
419 | u32 protocol_min; |
420 | u32 feature_flags; | |
421 | u32 protocol_max; | |
422 | ||
423 | /* should be more than enough for future enhancements | |
424 | * for now, feature_flags and the reserverd array shall be zero. | |
425 | */ | |
426 | ||
427 | u32 _pad; | |
428 | u64 reserverd[7]; | |
429 | } __packed; | |
430 | /* 80 bytes, FIXED for the next century */ | |
431 | ||
432 | struct p_barrier { | |
0b70a13d | 433 | struct p_header80 head; |
b411b363 PR |
434 | u32 barrier; /* barrier number _handle_ only */ |
435 | u32 pad; /* to multiple of 8 Byte */ | |
436 | } __packed; | |
437 | ||
438 | struct p_barrier_ack { | |
0b70a13d | 439 | struct p_header80 head; |
b411b363 PR |
440 | u32 barrier; |
441 | u32 set_size; | |
442 | } __packed; | |
443 | ||
444 | struct p_rs_param { | |
0b70a13d | 445 | struct p_header80 head; |
b411b363 PR |
446 | u32 rate; |
447 | ||
448 | /* Since protocol version 88 and higher. */ | |
449 | char verify_alg[0]; | |
450 | } __packed; | |
451 | ||
452 | struct p_rs_param_89 { | |
0b70a13d | 453 | struct p_header80 head; |
b411b363 PR |
454 | u32 rate; |
455 | /* protocol version 89: */ | |
456 | char verify_alg[SHARED_SECRET_MAX]; | |
457 | char csums_alg[SHARED_SECRET_MAX]; | |
458 | } __packed; | |
459 | ||
8e26f9cc | 460 | struct p_rs_param_95 { |
0b70a13d | 461 | struct p_header80 head; |
8e26f9cc PR |
462 | u32 rate; |
463 | char verify_alg[SHARED_SECRET_MAX]; | |
464 | char csums_alg[SHARED_SECRET_MAX]; | |
465 | u32 c_plan_ahead; | |
466 | u32 c_delay_target; | |
467 | u32 c_fill_target; | |
468 | u32 c_max_rate; | |
469 | } __packed; | |
470 | ||
cf14c2e9 PR |
471 | enum drbd_conn_flags { |
472 | CF_WANT_LOSE = 1, | |
473 | CF_DRY_RUN = 2, | |
474 | }; | |
475 | ||
b411b363 | 476 | struct p_protocol { |
0b70a13d | 477 | struct p_header80 head; |
b411b363 PR |
478 | u32 protocol; |
479 | u32 after_sb_0p; | |
480 | u32 after_sb_1p; | |
481 | u32 after_sb_2p; | |
cf14c2e9 | 482 | u32 conn_flags; |
b411b363 PR |
483 | u32 two_primaries; |
484 | ||
485 | /* Since protocol version 87 and higher. */ | |
486 | char integrity_alg[0]; | |
487 | ||
488 | } __packed; | |
489 | ||
490 | struct p_uuids { | |
0b70a13d | 491 | struct p_header80 head; |
b411b363 PR |
492 | u64 uuid[UI_EXTENDED_SIZE]; |
493 | } __packed; | |
494 | ||
495 | struct p_rs_uuid { | |
0b70a13d | 496 | struct p_header80 head; |
b411b363 PR |
497 | u64 uuid; |
498 | } __packed; | |
499 | ||
500 | struct p_sizes { | |
0b70a13d | 501 | struct p_header80 head; |
b411b363 PR |
502 | u64 d_size; /* size of disk */ |
503 | u64 u_size; /* user requested size */ | |
504 | u64 c_size; /* current exported size */ | |
1816a2b4 | 505 | u32 max_bio_size; /* Maximal size of a BIO */ |
e89b591c PR |
506 | u16 queue_order_type; /* not yet implemented in DRBD*/ |
507 | u16 dds_flags; /* use enum dds_flags here. */ | |
b411b363 PR |
508 | } __packed; |
509 | ||
510 | struct p_state { | |
0b70a13d | 511 | struct p_header80 head; |
b411b363 PR |
512 | u32 state; |
513 | } __packed; | |
514 | ||
515 | struct p_req_state { | |
0b70a13d | 516 | struct p_header80 head; |
b411b363 PR |
517 | u32 mask; |
518 | u32 val; | |
519 | } __packed; | |
520 | ||
521 | struct p_req_state_reply { | |
0b70a13d | 522 | struct p_header80 head; |
b411b363 PR |
523 | u32 retcode; |
524 | } __packed; | |
525 | ||
526 | struct p_drbd06_param { | |
527 | u64 size; | |
528 | u32 state; | |
529 | u32 blksize; | |
530 | u32 protocol; | |
531 | u32 version; | |
532 | u32 gen_cnt[5]; | |
533 | u32 bit_map_gen[5]; | |
534 | } __packed; | |
535 | ||
536 | struct p_discard { | |
0b70a13d | 537 | struct p_header80 head; |
b411b363 PR |
538 | u64 block_id; |
539 | u32 seq_num; | |
540 | u32 pad; | |
541 | } __packed; | |
542 | ||
73a01a18 PR |
543 | struct p_block_desc { |
544 | struct p_header80 head; | |
545 | u64 sector; | |
546 | u32 blksize; | |
547 | u32 pad; /* to multiple of 8 Byte */ | |
548 | } __packed; | |
549 | ||
b411b363 PR |
550 | /* Valid values for the encoding field. |
551 | * Bump proto version when changing this. */ | |
552 | enum drbd_bitmap_code { | |
553 | /* RLE_VLI_Bytes = 0, | |
554 | * and other bit variants had been defined during | |
555 | * algorithm evaluation. */ | |
556 | RLE_VLI_Bits = 2, | |
557 | }; | |
558 | ||
559 | struct p_compressed_bm { | |
0b70a13d | 560 | struct p_header80 head; |
b411b363 PR |
561 | /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code |
562 | * (encoding & 0x80): polarity (set/unset) of first runlength | |
563 | * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits | |
564 | * used to pad up to head.length bytes | |
565 | */ | |
566 | u8 encoding; | |
567 | ||
568 | u8 code[0]; | |
569 | } __packed; | |
570 | ||
0b70a13d PR |
571 | struct p_delay_probe93 { |
572 | struct p_header80 head; | |
573 | u32 seq_num; /* sequence number to match the two probe packets */ | |
574 | u32 offset; /* usecs the probe got sent after the reference time point */ | |
0ced55a3 PR |
575 | } __packed; |
576 | ||
b411b363 PR |
577 | /* DCBP: Drbd Compressed Bitmap Packet ... */ |
578 | static inline enum drbd_bitmap_code | |
579 | DCBP_get_code(struct p_compressed_bm *p) | |
580 | { | |
581 | return (enum drbd_bitmap_code)(p->encoding & 0x0f); | |
582 | } | |
583 | ||
584 | static inline void | |
585 | DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code) | |
586 | { | |
587 | BUG_ON(code & ~0xf); | |
588 | p->encoding = (p->encoding & ~0xf) | code; | |
589 | } | |
590 | ||
591 | static inline int | |
592 | DCBP_get_start(struct p_compressed_bm *p) | |
593 | { | |
594 | return (p->encoding & 0x80) != 0; | |
595 | } | |
596 | ||
597 | static inline void | |
598 | DCBP_set_start(struct p_compressed_bm *p, int set) | |
599 | { | |
600 | p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0); | |
601 | } | |
602 | ||
603 | static inline int | |
604 | DCBP_get_pad_bits(struct p_compressed_bm *p) | |
605 | { | |
606 | return (p->encoding >> 4) & 0x7; | |
607 | } | |
608 | ||
609 | static inline void | |
610 | DCBP_set_pad_bits(struct p_compressed_bm *p, int n) | |
611 | { | |
612 | BUG_ON(n & ~0x7); | |
613 | p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); | |
614 | } | |
615 | ||
616 | /* one bitmap packet, including the p_header, | |
617 | * should fit within one _architecture independend_ page. | |
618 | * so we need to use the fixed size 4KiB page size | |
25985edc | 619 | * most architectures have used for a long time. |
b411b363 | 620 | */ |
0b70a13d | 621 | #define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80)) |
b411b363 PR |
622 | #define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long)) |
623 | #define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm)) | |
624 | #if (PAGE_SIZE < 4096) | |
625 | /* drbd_send_bitmap / receive_bitmap would break horribly */ | |
626 | #error "PAGE_SIZE too small" | |
627 | #endif | |
628 | ||
629 | union p_polymorph { | |
02918be2 | 630 | union p_header header; |
b411b363 PR |
631 | struct p_handshake handshake; |
632 | struct p_data data; | |
633 | struct p_block_ack block_ack; | |
634 | struct p_barrier barrier; | |
635 | struct p_barrier_ack barrier_ack; | |
636 | struct p_rs_param_89 rs_param_89; | |
8e26f9cc | 637 | struct p_rs_param_95 rs_param_95; |
b411b363 PR |
638 | struct p_protocol protocol; |
639 | struct p_sizes sizes; | |
640 | struct p_uuids uuids; | |
641 | struct p_state state; | |
642 | struct p_req_state req_state; | |
643 | struct p_req_state_reply req_state_reply; | |
644 | struct p_block_req block_req; | |
02918be2 PR |
645 | struct p_delay_probe93 delay_probe93; |
646 | struct p_rs_uuid rs_uuid; | |
73a01a18 | 647 | struct p_block_desc block_desc; |
b411b363 PR |
648 | } __packed; |
649 | ||
650 | /**********************************************************************/ | |
651 | enum drbd_thread_state { | |
652 | None, | |
653 | Running, | |
654 | Exiting, | |
655 | Restarting | |
656 | }; | |
657 | ||
658 | struct drbd_thread { | |
659 | spinlock_t t_lock; | |
660 | struct task_struct *task; | |
661 | struct completion stop; | |
662 | enum drbd_thread_state t_state; | |
663 | int (*function) (struct drbd_thread *); | |
664 | struct drbd_conf *mdev; | |
665 | int reset_cpu_mask; | |
666 | }; | |
667 | ||
668 | static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) | |
669 | { | |
670 | /* THINK testing the t_state seems to be uncritical in all cases | |
671 | * (but thread_{start,stop}), so we can read it *without* the lock. | |
672 | * --lge */ | |
673 | ||
674 | smp_rmb(); | |
675 | return thi->t_state; | |
676 | } | |
677 | ||
b411b363 PR |
678 | struct drbd_work; |
679 | typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); | |
680 | struct drbd_work { | |
681 | struct list_head list; | |
682 | drbd_work_cb cb; | |
683 | }; | |
684 | ||
b411b363 PR |
685 | struct drbd_request { |
686 | struct drbd_work w; | |
687 | struct drbd_conf *mdev; | |
688 | ||
689 | /* if local IO is not allowed, will be NULL. | |
690 | * if local IO _is_ allowed, holds the locally submitted bio clone, | |
691 | * or, after local IO completion, the ERR_PTR(error). | |
692 | * see drbd_endio_pri(). */ | |
693 | struct bio *private_bio; | |
694 | ||
24c4830c | 695 | struct hlist_node collision; |
b411b363 PR |
696 | sector_t sector; |
697 | unsigned int size; | |
698 | unsigned int epoch; /* barrier_nr */ | |
699 | ||
700 | /* barrier_nr: used to check on "completion" whether this req was in | |
701 | * the current epoch, and we therefore have to close it, | |
702 | * starting a new epoch... | |
703 | */ | |
704 | ||
b411b363 PR |
705 | struct list_head tl_requests; /* ring list in the transfer log */ |
706 | struct bio *master_bio; /* master bio pointer */ | |
707 | unsigned long rq_state; /* see comments above _req_mod() */ | |
708 | int seq_num; | |
709 | unsigned long start_time; | |
710 | }; | |
711 | ||
712 | struct drbd_tl_epoch { | |
713 | struct drbd_work w; | |
714 | struct list_head requests; /* requests before */ | |
715 | struct drbd_tl_epoch *next; /* pointer to the next barrier */ | |
716 | unsigned int br_number; /* the barriers identifier. */ | |
7e602c0a | 717 | int n_writes; /* number of requests attached before this barrier */ |
b411b363 PR |
718 | }; |
719 | ||
720 | struct drbd_request; | |
721 | ||
722 | /* These Tl_epoch_entries may be in one of 6 lists: | |
723 | active_ee .. data packet being written | |
724 | sync_ee .. syncer block being written | |
725 | done_ee .. block written, need to send P_WRITE_ACK | |
726 | read_ee .. [RS]P_DATA_REQUEST being read | |
727 | */ | |
728 | ||
729 | struct drbd_epoch { | |
730 | struct list_head list; | |
731 | unsigned int barrier_nr; | |
732 | atomic_t epoch_size; /* increased on every request added. */ | |
733 | atomic_t active; /* increased on every req. added, and dec on every finished. */ | |
734 | unsigned long flags; | |
735 | }; | |
736 | ||
737 | /* drbd_epoch flag bits */ | |
738 | enum { | |
b411b363 | 739 | DE_HAVE_BARRIER_NUMBER, |
b411b363 PR |
740 | }; |
741 | ||
742 | enum epoch_event { | |
743 | EV_PUT, | |
744 | EV_GOT_BARRIER_NR, | |
b411b363 | 745 | EV_BECAME_LAST, |
b411b363 PR |
746 | EV_CLEANUP = 32, /* used as flag */ |
747 | }; | |
748 | ||
b411b363 PR |
749 | struct drbd_wq_barrier { |
750 | struct drbd_work w; | |
751 | struct completion done; | |
752 | }; | |
753 | ||
754 | struct digest_info { | |
755 | int digest_size; | |
756 | void *digest; | |
757 | }; | |
758 | ||
45bb912b LE |
759 | struct drbd_epoch_entry { |
760 | struct drbd_work w; | |
24c4830c | 761 | struct hlist_node collision; |
85719573 | 762 | struct drbd_epoch *epoch; /* for writes */ |
45bb912b LE |
763 | struct drbd_conf *mdev; |
764 | struct page *pages; | |
765 | atomic_t pending_bios; | |
766 | unsigned int size; | |
767 | /* see comments on ee flag bits below */ | |
768 | unsigned long flags; | |
769 | sector_t sector; | |
85719573 PR |
770 | union { |
771 | u64 block_id; | |
772 | struct digest_info *digest; | |
773 | }; | |
45bb912b LE |
774 | }; |
775 | ||
776 | /* ee flag bits. | |
777 | * While corresponding bios are in flight, the only modification will be | |
778 | * set_bit WAS_ERROR, which has to be atomic. | |
779 | * If no bios are in flight yet, or all have been completed, | |
780 | * non-atomic modification to ee->flags is ok. | |
781 | */ | |
b411b363 PR |
782 | enum { |
783 | __EE_CALL_AL_COMPLETE_IO, | |
b411b363 | 784 | __EE_MAY_SET_IN_SYNC, |
45bb912b | 785 | |
45bb912b LE |
786 | /* In case a barrier failed, |
787 | * we need to resubmit without the barrier flag. */ | |
788 | __EE_RESUBMITTED, | |
789 | ||
790 | /* we may have several bios per epoch entry. | |
791 | * if any of those fail, we set this flag atomically | |
792 | * from the endio callback */ | |
793 | __EE_WAS_ERROR, | |
c36c3ced LE |
794 | |
795 | /* This ee has a pointer to a digest instead of a block id */ | |
796 | __EE_HAS_DIGEST, | |
b411b363 PR |
797 | }; |
798 | #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) | |
b411b363 | 799 | #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) |
45bb912b LE |
800 | #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) |
801 | #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) | |
c36c3ced | 802 | #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) |
b411b363 PR |
803 | |
804 | /* global flag bits */ | |
805 | enum { | |
25985edc | 806 | CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */ |
b411b363 PR |
807 | SIGNAL_ASENDER, /* whether asender wants to be interrupted */ |
808 | SEND_PING, /* whether asender should send a ping asap */ | |
809 | ||
b411b363 PR |
810 | UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ |
811 | UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ | |
812 | MD_DIRTY, /* current uuids and flags not yet on disk */ | |
813 | DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ | |
814 | USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */ | |
815 | CLUSTER_ST_CHANGE, /* Cluster wide state change going on... */ | |
816 | CL_ST_CHG_SUCCESS, | |
817 | CL_ST_CHG_FAIL, | |
818 | CRASHED_PRIMARY, /* This node was a crashed primary. | |
819 | * Gets cleared when the state.conn | |
820 | * goes into C_CONNECTED state. */ | |
19f843aa | 821 | NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ |
b411b363 PR |
822 | CONSIDER_RESYNC, |
823 | ||
a8a4e51e | 824 | MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ |
b411b363 PR |
825 | SUSPEND_IO, /* suspend application io */ |
826 | BITMAP_IO, /* suspend application io; | |
827 | once no more io in flight, start bitmap io */ | |
828 | BITMAP_IO_QUEUED, /* Started bitmap IO */ | |
82f59cc6 LE |
829 | GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ |
830 | WAS_IO_ERROR, /* Local disk failed returned IO error */ | |
b411b363 PR |
831 | RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ |
832 | NET_CONGESTED, /* The data socket is congested */ | |
833 | ||
834 | CONFIG_PENDING, /* serialization of (re)configuration requests. | |
835 | * if set, also prevents the device from dying */ | |
836 | DEVICE_DYING, /* device became unconfigured, | |
837 | * but worker thread is still handling the cleanup. | |
838 | * reconfiguring (nl_disk_conf, nl_net_conf) is dissalowed, | |
839 | * while this is set. */ | |
840 | RESIZE_PENDING, /* Size change detected locally, waiting for the response from | |
841 | * the peer, if it changed there as well. */ | |
cf14c2e9 | 842 | CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ |
309d1608 | 843 | GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ |
43a5182c | 844 | NEW_CUR_UUID, /* Create new current UUID when thawing IO */ |
0778286a | 845 | AL_SUSPENDED, /* Activity logging is currently suspended. */ |
370a43e7 | 846 | AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ |
b411b363 PR |
847 | }; |
848 | ||
849 | struct drbd_bitmap; /* opaque for drbd_conf */ | |
850 | ||
20ceb2b2 LE |
851 | /* definition of bits in bm_flags to be used in drbd_bm_lock |
852 | * and drbd_bitmap_io and friends. */ | |
853 | enum bm_flag { | |
854 | /* do we need to kfree, or vfree bm_pages? */ | |
855 | BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */ | |
856 | ||
857 | /* currently locked for bulk operation */ | |
858 | BM_LOCKED_MASK = 0x7, | |
859 | ||
860 | /* in detail, that is: */ | |
861 | BM_DONT_CLEAR = 0x1, | |
862 | BM_DONT_SET = 0x2, | |
863 | BM_DONT_TEST = 0x4, | |
864 | ||
865 | /* (test bit, count bit) allowed (common case) */ | |
866 | BM_LOCKED_TEST_ALLOWED = 0x3, | |
867 | ||
868 | /* testing bits, as well as setting new bits allowed, but clearing bits | |
869 | * would be unexpected. Used during bitmap receive. Setting new bits | |
870 | * requires sending of "out-of-sync" information, though. */ | |
871 | BM_LOCKED_SET_ALLOWED = 0x1, | |
872 | ||
873 | /* clear is not expected while bitmap is locked for bulk operation */ | |
874 | }; | |
875 | ||
876 | ||
b411b363 PR |
877 | /* TODO sort members for performance |
878 | * MAYBE group them further */ | |
879 | ||
880 | /* THINK maybe we actually want to use the default "event/%s" worker threads | |
881 | * or similar in linux 2.6, which uses per cpu data and threads. | |
b411b363 PR |
882 | */ |
883 | struct drbd_work_queue { | |
884 | struct list_head q; | |
885 | struct semaphore s; /* producers up it, worker down()s it */ | |
886 | spinlock_t q_lock; /* to protect the list. */ | |
887 | }; | |
888 | ||
889 | struct drbd_socket { | |
890 | struct drbd_work_queue work; | |
891 | struct mutex mutex; | |
892 | struct socket *socket; | |
893 | /* this way we get our | |
894 | * send/receive buffers off the stack */ | |
895 | union p_polymorph sbuf; | |
896 | union p_polymorph rbuf; | |
897 | }; | |
898 | ||
899 | struct drbd_md { | |
900 | u64 md_offset; /* sector offset to 'super' block */ | |
901 | ||
902 | u64 la_size_sect; /* last agreed size, unit sectors */ | |
903 | u64 uuid[UI_SIZE]; | |
904 | u64 device_uuid; | |
905 | u32 flags; | |
906 | u32 md_size_sect; | |
907 | ||
908 | s32 al_offset; /* signed relative sector offset to al area */ | |
909 | s32 bm_offset; /* signed relative sector offset to bitmap */ | |
910 | ||
911 | /* u32 al_nr_extents; important for restoring the AL | |
912 | * is stored into sync_conf.al_extents, which in turn | |
913 | * gets applied to act_log->nr_elements | |
914 | */ | |
915 | }; | |
916 | ||
917 | /* for sync_conf and other types... */ | |
918 | #define NL_PACKET(name, number, fields) struct name { fields }; | |
919 | #define NL_INTEGER(pn,pr,member) int member; | |
920 | #define NL_INT64(pn,pr,member) __u64 member; | |
921 | #define NL_BIT(pn,pr,member) unsigned member:1; | |
922 | #define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len; | |
923 | #include "linux/drbd_nl.h" | |
924 | ||
925 | struct drbd_backing_dev { | |
926 | struct block_device *backing_bdev; | |
927 | struct block_device *md_bdev; | |
b411b363 PR |
928 | struct drbd_md md; |
929 | struct disk_conf dc; /* The user provided config... */ | |
930 | sector_t known_size; /* last known size of that backing device */ | |
931 | }; | |
932 | ||
933 | struct drbd_md_io { | |
934 | struct drbd_conf *mdev; | |
935 | struct completion event; | |
936 | int error; | |
937 | }; | |
938 | ||
939 | struct bm_io_work { | |
940 | struct drbd_work w; | |
941 | char *why; | |
20ceb2b2 | 942 | enum bm_flag flags; |
b411b363 PR |
943 | int (*io_fn)(struct drbd_conf *mdev); |
944 | void (*done)(struct drbd_conf *mdev, int rv); | |
945 | }; | |
946 | ||
947 | enum write_ordering_e { | |
948 | WO_none, | |
949 | WO_drain_io, | |
950 | WO_bdev_flush, | |
b411b363 PR |
951 | }; |
952 | ||
778f271d PR |
953 | struct fifo_buffer { |
954 | int *values; | |
955 | unsigned int head_index; | |
956 | unsigned int size; | |
957 | }; | |
958 | ||
b411b363 PR |
959 | struct drbd_conf { |
960 | /* things that are stored as / read from meta data on disk */ | |
961 | unsigned long flags; | |
962 | ||
963 | /* configured by drbdsetup */ | |
964 | struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */ | |
965 | struct syncer_conf sync_conf; | |
966 | struct drbd_backing_dev *ldev __protected_by(local); | |
967 | ||
968 | sector_t p_size; /* partner's disk size */ | |
969 | struct request_queue *rq_queue; | |
970 | struct block_device *this_bdev; | |
971 | struct gendisk *vdisk; | |
972 | ||
973 | struct drbd_socket data; /* data/barrier/cstate/parameter packets */ | |
974 | struct drbd_socket meta; /* ping/ack (metadata) packets */ | |
975 | int agreed_pro_version; /* actually used protocol version */ | |
976 | unsigned long last_received; /* in jiffies, either socket */ | |
977 | unsigned int ko_count; | |
978 | struct drbd_work resync_work, | |
979 | unplug_work, | |
e9e6f3ec | 980 | go_diskless, |
c4752ef1 PR |
981 | md_sync_work, |
982 | start_resync_work; | |
b411b363 PR |
983 | struct timer_list resync_timer; |
984 | struct timer_list md_sync_timer; | |
370a43e7 | 985 | struct timer_list start_resync_timer; |
7fde2be9 | 986 | struct timer_list request_timer; |
ee15b038 LE |
987 | #ifdef DRBD_DEBUG_MD_SYNC |
988 | struct { | |
989 | unsigned int line; | |
990 | const char* func; | |
991 | } last_md_mark_dirty; | |
992 | #endif | |
b411b363 PR |
993 | |
994 | /* Used after attach while negotiating new disk state. */ | |
995 | union drbd_state new_state_tmp; | |
996 | ||
997 | union drbd_state state; | |
998 | wait_queue_head_t misc_wait; | |
999 | wait_queue_head_t state_wait; /* upon each state change. */ | |
84dfb9f5 | 1000 | wait_queue_head_t net_cnt_wait; |
b411b363 PR |
1001 | unsigned int send_cnt; |
1002 | unsigned int recv_cnt; | |
1003 | unsigned int read_cnt; | |
1004 | unsigned int writ_cnt; | |
1005 | unsigned int al_writ_cnt; | |
1006 | unsigned int bm_writ_cnt; | |
1007 | atomic_t ap_bio_cnt; /* Requests we need to complete */ | |
1008 | atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */ | |
1009 | atomic_t rs_pending_cnt; /* RS request/data packets on the wire */ | |
1010 | atomic_t unacked_cnt; /* Need to send replys for */ | |
1011 | atomic_t local_cnt; /* Waiting for local completion */ | |
1012 | atomic_t net_cnt; /* Users of net_conf */ | |
1013 | spinlock_t req_lock; | |
1014 | struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */ | |
1015 | struct drbd_tl_epoch *newest_tle; | |
1016 | struct drbd_tl_epoch *oldest_tle; | |
1017 | struct list_head out_of_sequence_requests; | |
1018 | struct hlist_head *tl_hash; | |
1019 | unsigned int tl_hash_s; | |
1020 | ||
4b0715f0 | 1021 | /* blocks to resync in this run [unit BM_BLOCK_SIZE] */ |
b411b363 | 1022 | unsigned long rs_total; |
4b0715f0 | 1023 | /* number of resync blocks that failed in this run */ |
b411b363 PR |
1024 | unsigned long rs_failed; |
1025 | /* Syncer's start time [unit jiffies] */ | |
1026 | unsigned long rs_start; | |
1027 | /* cumulated time in PausedSyncX state [unit jiffies] */ | |
1028 | unsigned long rs_paused; | |
1d7734a0 LE |
1029 | /* skipped because csum was equal [unit BM_BLOCK_SIZE] */ |
1030 | unsigned long rs_same_csum; | |
1031 | #define DRBD_SYNC_MARKS 8 | |
1032 | #define DRBD_SYNC_MARK_STEP (3*HZ) | |
b411b363 | 1033 | /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ |
1d7734a0 | 1034 | unsigned long rs_mark_left[DRBD_SYNC_MARKS]; |
b411b363 | 1035 | /* marks's time [unit jiffies] */ |
1d7734a0 LE |
1036 | unsigned long rs_mark_time[DRBD_SYNC_MARKS]; |
1037 | /* current index into rs_mark_{left,time} */ | |
1038 | int rs_last_mark; | |
b411b363 PR |
1039 | |
1040 | /* where does the admin want us to start? (sector) */ | |
1041 | sector_t ov_start_sector; | |
1042 | /* where are we now? (sector) */ | |
1043 | sector_t ov_position; | |
1044 | /* Start sector of out of sync range (to merge printk reporting). */ | |
1045 | sector_t ov_last_oos_start; | |
1046 | /* size of out-of-sync range in sectors. */ | |
1047 | sector_t ov_last_oos_size; | |
1048 | unsigned long ov_left; /* in bits */ | |
1049 | struct crypto_hash *csums_tfm; | |
1050 | struct crypto_hash *verify_tfm; | |
1051 | ||
1052 | struct drbd_thread receiver; | |
1053 | struct drbd_thread worker; | |
1054 | struct drbd_thread asender; | |
1055 | struct drbd_bitmap *bitmap; | |
1056 | unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */ | |
1057 | ||
1058 | /* Used to track operations of resync... */ | |
1059 | struct lru_cache *resync; | |
1060 | /* Number of locked elements in resync LRU */ | |
1061 | unsigned int resync_locked; | |
1062 | /* resync extent number waiting for application requests */ | |
1063 | unsigned int resync_wenr; | |
1064 | ||
1065 | int open_cnt; | |
1066 | u64 *p_uuid; | |
1067 | struct drbd_epoch *current_epoch; | |
1068 | spinlock_t epoch_lock; | |
1069 | unsigned int epochs; | |
1070 | enum write_ordering_e write_ordering; | |
85719573 PR |
1071 | struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */ |
1072 | struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ | |
b411b363 | 1073 | struct list_head done_ee; /* send ack */ |
85719573 | 1074 | struct list_head read_ee; /* IO in progress (any read) */ |
b411b363 PR |
1075 | struct list_head net_ee; /* zero-copy network send in progress */ |
1076 | struct hlist_head *ee_hash; /* is proteced by req_lock! */ | |
1077 | unsigned int ee_hash_s; | |
1078 | ||
1079 | /* this one is protected by ee_lock, single thread */ | |
1080 | struct drbd_epoch_entry *last_write_w_barrier; | |
1081 | ||
1082 | int next_barrier_nr; | |
1083 | struct hlist_head *app_reads_hash; /* is proteced by req_lock */ | |
1084 | struct list_head resync_reads; | |
435f0740 LE |
1085 | atomic_t pp_in_use; /* allocated from page pool */ |
1086 | atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */ | |
b411b363 PR |
1087 | wait_queue_head_t ee_wait; |
1088 | struct page *md_io_page; /* one page buffer for md_io */ | |
1089 | struct page *md_io_tmpp; /* for logical_block_size != 512 */ | |
1090 | struct mutex md_io_mutex; /* protects the md_io_buffer */ | |
1091 | spinlock_t al_lock; | |
1092 | wait_queue_head_t al_wait; | |
1093 | struct lru_cache *act_log; /* activity log */ | |
1094 | unsigned int al_tr_number; | |
1095 | int al_tr_cycle; | |
1096 | int al_tr_pos; /* position of the next transaction in the journal */ | |
1097 | struct crypto_hash *cram_hmac_tfm; | |
1098 | struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */ | |
1099 | struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */ | |
1100 | void *int_dig_out; | |
1101 | void *int_dig_in; | |
1102 | void *int_dig_vv; | |
1103 | wait_queue_head_t seq_wait; | |
1104 | atomic_t packet_seq; | |
1105 | unsigned int peer_seq; | |
1106 | spinlock_t peer_seq_lock; | |
1107 | unsigned int minor; | |
1108 | unsigned long comm_bm_set; /* communicated number of set bits. */ | |
1109 | cpumask_var_t cpu_mask; | |
1110 | struct bm_io_work bm_io_work; | |
1111 | u64 ed_uuid; /* UUID of the exposed data */ | |
1112 | struct mutex state_mutex; | |
1113 | char congestion_reason; /* Why we where congested... */ | |
1d7734a0 LE |
1114 | atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ |
1115 | atomic_t rs_sect_ev; /* for submitted resync data rate, both */ | |
1116 | int rs_last_sect_ev; /* counter to compare with */ | |
1117 | int rs_last_events; /* counter of read or write "events" (unit sectors) | |
1118 | * on the lower level device when we last looked. */ | |
1119 | int c_sync_rate; /* current resync rate after syncer throttle magic */ | |
778f271d PR |
1120 | struct fifo_buffer rs_plan_s; /* correction values of resync planer */ |
1121 | int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ | |
25985edc | 1122 | int rs_planed; /* resync sectors already planned */ |
759fbdfb | 1123 | atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ |
99432fcc PR |
1124 | int peer_max_bio_size; |
1125 | int local_max_bio_size; | |
b411b363 PR |
1126 | }; |
1127 | ||
1128 | static inline struct drbd_conf *minor_to_mdev(unsigned int minor) | |
1129 | { | |
1130 | struct drbd_conf *mdev; | |
1131 | ||
1132 | mdev = minor < minor_count ? minor_table[minor] : NULL; | |
1133 | ||
1134 | return mdev; | |
1135 | } | |
1136 | ||
1137 | static inline unsigned int mdev_to_minor(struct drbd_conf *mdev) | |
1138 | { | |
1139 | return mdev->minor; | |
1140 | } | |
1141 | ||
25985edc | 1142 | /* returns 1 if it was successful, |
b411b363 PR |
1143 | * returns 0 if there was no data socket. |
1144 | * so wherever you are going to use the data.socket, e.g. do | |
1145 | * if (!drbd_get_data_sock(mdev)) | |
1146 | * return 0; | |
1147 | * CODE(); | |
1148 | * drbd_put_data_sock(mdev); | |
1149 | */ | |
1150 | static inline int drbd_get_data_sock(struct drbd_conf *mdev) | |
1151 | { | |
1152 | mutex_lock(&mdev->data.mutex); | |
1153 | /* drbd_disconnect() could have called drbd_free_sock() | |
1154 | * while we were waiting in down()... */ | |
1155 | if (unlikely(mdev->data.socket == NULL)) { | |
1156 | mutex_unlock(&mdev->data.mutex); | |
1157 | return 0; | |
1158 | } | |
1159 | return 1; | |
1160 | } | |
1161 | ||
1162 | static inline void drbd_put_data_sock(struct drbd_conf *mdev) | |
1163 | { | |
1164 | mutex_unlock(&mdev->data.mutex); | |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * function declarations | |
1169 | *************************/ | |
1170 | ||
1171 | /* drbd_main.c */ | |
1172 | ||
1173 | enum chg_state_flags { | |
1174 | CS_HARD = 1, | |
1175 | CS_VERBOSE = 2, | |
1176 | CS_WAIT_COMPLETE = 4, | |
1177 | CS_SERIALIZE = 8, | |
1178 | CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE, | |
1179 | }; | |
1180 | ||
e89b591c PR |
1181 | enum dds_flags { |
1182 | DDSF_FORCED = 1, | |
1183 | DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ | |
1184 | }; | |
1185 | ||
b411b363 | 1186 | extern void drbd_init_set_defaults(struct drbd_conf *mdev); |
bf885f8a AG |
1187 | extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev, |
1188 | enum chg_state_flags f, | |
1189 | union drbd_state mask, | |
1190 | union drbd_state val); | |
b411b363 PR |
1191 | extern void drbd_force_state(struct drbd_conf *, union drbd_state, |
1192 | union drbd_state); | |
bf885f8a AG |
1193 | extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *, |
1194 | union drbd_state, | |
1195 | union drbd_state, | |
1196 | enum chg_state_flags); | |
1197 | extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state, | |
1198 | enum chg_state_flags, | |
1199 | struct completion *done); | |
b411b363 PR |
1200 | extern void print_st_err(struct drbd_conf *, union drbd_state, |
1201 | union drbd_state, int); | |
1202 | extern int drbd_thread_start(struct drbd_thread *thi); | |
1203 | extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); | |
1204 | #ifdef CONFIG_SMP | |
1205 | extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev); | |
1206 | extern void drbd_calc_cpu_mask(struct drbd_conf *mdev); | |
1207 | #else | |
1208 | #define drbd_thread_current_set_cpu(A) ({}) | |
1209 | #define drbd_calc_cpu_mask(A) ({}) | |
1210 | #endif | |
1211 | extern void drbd_free_resources(struct drbd_conf *mdev); | |
1212 | extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, | |
1213 | unsigned int set_size); | |
1214 | extern void tl_clear(struct drbd_conf *mdev); | |
1215 | extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); | |
1216 | extern void drbd_free_sock(struct drbd_conf *mdev); | |
1217 | extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, | |
1218 | void *buf, size_t size, unsigned msg_flags); | |
1219 | extern int drbd_send_protocol(struct drbd_conf *mdev); | |
1220 | extern int drbd_send_uuids(struct drbd_conf *mdev); | |
1221 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); | |
5a22db89 | 1222 | extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev); |
e89b591c | 1223 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); |
b411b363 PR |
1224 | extern int _drbd_send_state(struct drbd_conf *mdev); |
1225 | extern int drbd_send_state(struct drbd_conf *mdev); | |
1226 | extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | |
0b70a13d | 1227 | enum drbd_packets cmd, struct p_header80 *h, |
b411b363 PR |
1228 | size_t size, unsigned msg_flags); |
1229 | #define USE_DATA_SOCKET 1 | |
1230 | #define USE_META_SOCKET 0 | |
1231 | extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, | |
0b70a13d | 1232 | enum drbd_packets cmd, struct p_header80 *h, |
b411b363 PR |
1233 | size_t size); |
1234 | extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1235 | char *data, size_t size); | |
1236 | extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc); | |
1237 | extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, | |
1238 | u32 set_size); | |
1239 | extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1240 | struct drbd_epoch_entry *e); | |
1241 | extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1242 | struct p_block_req *rp); | |
1243 | extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, | |
2b2bf214 | 1244 | struct p_data *dp, int data_size); |
b411b363 PR |
1245 | extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, |
1246 | sector_t sector, int blksize, u64 block_id); | |
73a01a18 | 1247 | extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req); |
b411b363 PR |
1248 | extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, |
1249 | struct drbd_epoch_entry *e); | |
1250 | extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); | |
b411b363 PR |
1251 | extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, |
1252 | sector_t sector, int size, u64 block_id); | |
1253 | extern int drbd_send_drequest_csum(struct drbd_conf *mdev, | |
1254 | sector_t sector,int size, | |
1255 | void *digest, int digest_size, | |
1256 | enum drbd_packets cmd); | |
1257 | extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size); | |
1258 | ||
1259 | extern int drbd_send_bitmap(struct drbd_conf *mdev); | |
1260 | extern int _drbd_send_bitmap(struct drbd_conf *mdev); | |
bf885f8a | 1261 | extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode); |
b411b363 PR |
1262 | extern void drbd_free_bc(struct drbd_backing_dev *ldev); |
1263 | extern void drbd_mdev_cleanup(struct drbd_conf *mdev); | |
62b0da3a | 1264 | void drbd_print_uuids(struct drbd_conf *mdev, const char *text); |
b411b363 | 1265 | |
b411b363 PR |
1266 | extern void drbd_md_sync(struct drbd_conf *mdev); |
1267 | extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); | |
b411b363 PR |
1268 | extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); |
1269 | extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | |
1270 | extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); | |
1271 | extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); | |
1272 | extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local); | |
1273 | extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local); | |
1274 | extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local); | |
1275 | extern int drbd_md_test_flag(struct drbd_backing_dev *, int); | |
ee15b038 | 1276 | #ifndef DRBD_DEBUG_MD_SYNC |
b411b363 | 1277 | extern void drbd_md_mark_dirty(struct drbd_conf *mdev); |
ee15b038 LE |
1278 | #else |
1279 | #define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ ) | |
1280 | extern void drbd_md_mark_dirty_(struct drbd_conf *mdev, | |
1281 | unsigned int line, const char *func); | |
1282 | #endif | |
b411b363 PR |
1283 | extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, |
1284 | int (*io_fn)(struct drbd_conf *), | |
1285 | void (*done)(struct drbd_conf *, int), | |
20ceb2b2 LE |
1286 | char *why, enum bm_flag flags); |
1287 | extern int drbd_bitmap_io(struct drbd_conf *mdev, | |
1288 | int (*io_fn)(struct drbd_conf *), | |
1289 | char *why, enum bm_flag flags); | |
b411b363 PR |
1290 | extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); |
1291 | extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); | |
e9e6f3ec | 1292 | extern void drbd_go_diskless(struct drbd_conf *mdev); |
82f59cc6 | 1293 | extern void drbd_ldev_destroy(struct drbd_conf *mdev); |
b411b363 PR |
1294 | |
1295 | ||
1296 | /* Meta data layout | |
1297 | We reserve a 128MB Block (4k aligned) | |
1298 | * either at the end of the backing device | |
3ad2f3fb | 1299 | * or on a separate meta data device. */ |
b411b363 PR |
1300 | |
1301 | #define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */ | |
1302 | /* The following numbers are sectors */ | |
1303 | #define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */ | |
1304 | #define MD_AL_MAX_SIZE 64 /* = 32 kb LOG ~ 3776 extents ~ 14 GB Storage */ | |
1305 | /* Allows up to about 3.8TB */ | |
1306 | #define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE) | |
1307 | ||
1308 | /* Since the smalles IO unit is usually 512 byte */ | |
1309 | #define MD_SECTOR_SHIFT 9 | |
1310 | #define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT) | |
1311 | ||
1312 | /* activity log */ | |
1313 | #define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */ | |
1314 | #define AL_EXTENT_SHIFT 22 /* One extent represents 4M Storage */ | |
1315 | #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT) | |
1316 | ||
1317 | #if BITS_PER_LONG == 32 | |
1318 | #define LN2_BPL 5 | |
1319 | #define cpu_to_lel(A) cpu_to_le32(A) | |
1320 | #define lel_to_cpu(A) le32_to_cpu(A) | |
1321 | #elif BITS_PER_LONG == 64 | |
1322 | #define LN2_BPL 6 | |
1323 | #define cpu_to_lel(A) cpu_to_le64(A) | |
1324 | #define lel_to_cpu(A) le64_to_cpu(A) | |
1325 | #else | |
1326 | #error "LN2 of BITS_PER_LONG unknown!" | |
1327 | #endif | |
1328 | ||
1329 | /* resync bitmap */ | |
1330 | /* 16MB sized 'bitmap extent' to track syncer usage */ | |
1331 | struct bm_extent { | |
1332 | int rs_left; /* number of bits set (out of sync) in this extent. */ | |
1333 | int rs_failed; /* number of failed resync requests in this extent. */ | |
1334 | unsigned long flags; | |
1335 | struct lc_element lce; | |
1336 | }; | |
1337 | ||
1338 | #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ | |
1339 | #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ | |
e3555d85 | 1340 | #define BME_PRIORITY 2 /* finish resync IO on this extent ASAP! App IO waiting! */ |
b411b363 PR |
1341 | |
1342 | /* drbd_bitmap.c */ | |
1343 | /* | |
1344 | * We need to store one bit for a block. | |
1345 | * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap. | |
1346 | * Bit 0 ==> local node thinks this block is binary identical on both nodes | |
1347 | * Bit 1 ==> local node thinks this block needs to be synced. | |
1348 | */ | |
1349 | ||
8e26f9cc PR |
1350 | #define SLEEP_TIME (HZ/10) |
1351 | ||
b411b363 PR |
1352 | #define BM_BLOCK_SHIFT 12 /* 4k per bit */ |
1353 | #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) | |
1354 | /* (9+3) : 512 bytes @ 8 bits; representing 16M storage | |
1355 | * per sector of on disk bitmap */ | |
1356 | #define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3) /* = 24 */ | |
1357 | #define BM_EXT_SIZE (1<<BM_EXT_SHIFT) | |
1358 | ||
1359 | #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12) | |
1360 | #error "HAVE YOU FIXED drbdmeta AS WELL??" | |
1361 | #endif | |
1362 | ||
1363 | /* thus many _storage_ sectors are described by one bit */ | |
1364 | #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9)) | |
1365 | #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9)) | |
1366 | #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1) | |
1367 | ||
1368 | /* bit to represented kilo byte conversion */ | |
1369 | #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10)) | |
1370 | ||
1371 | /* in which _bitmap_ extent (resp. sector) the bit for a certain | |
1372 | * _storage_ sector is located in */ | |
1373 | #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9)) | |
1374 | ||
1375 | /* how much _storage_ sectors we have per bitmap sector */ | |
1376 | #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) | |
1377 | #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1) | |
1378 | ||
1379 | /* in one sector of the bitmap, we have this many activity_log extents. */ | |
1380 | #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT)) | |
1381 | #define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) | |
1382 | ||
1383 | #define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT) | |
1384 | #define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1) | |
1385 | ||
1386 | /* the extent in "PER_EXTENT" below is an activity log extent | |
1387 | * we need that many (long words/bytes) to store the bitmap | |
1388 | * of one AL_EXTENT_SIZE chunk of storage. | |
1389 | * we can store the bitmap for that many AL_EXTENTS within | |
1390 | * one sector of the _on_disk_ bitmap: | |
1391 | * bit 0 bit 37 bit 38 bit (512*8)-1 | |
1392 | * ...|........|........|.. // ..|........| | |
1393 | * sect. 0 `296 `304 ^(512*8*8)-1 | |
1394 | * | |
1395 | #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG ) | |
1396 | #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128 | |
1397 | #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4 | |
1398 | */ | |
1399 | ||
1400 | #define DRBD_MAX_SECTORS_32 (0xffffffffLU) | |
1401 | #define DRBD_MAX_SECTORS_BM \ | |
1402 | ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9))) | |
1403 | #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 | |
1404 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM | |
1405 | #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM | |
36bfc7e2 | 1406 | #elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32 |
b411b363 PR |
1407 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 |
1408 | #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 | |
1409 | #else | |
1410 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM | |
1411 | /* 16 TB in units of sectors */ | |
1412 | #if BITS_PER_LONG == 32 | |
1413 | /* adjust by one page worth of bitmap, | |
1414 | * so we won't wrap around in drbd_bm_find_next_bit. | |
1415 | * you should use 64bit OS for that much storage, anyways. */ | |
1416 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) | |
1417 | #else | |
4b0715f0 LE |
1418 | /* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */ |
1419 | #define DRBD_MAX_SECTORS_FLEX (1UL << 51) | |
1420 | /* corresponds to (1UL << 38) bits right now. */ | |
b411b363 PR |
1421 | #endif |
1422 | #endif | |
1423 | ||
1424 | /* Sector shift value for the "hash" functions of tl_hash and ee_hash tables. | |
d5373389 | 1425 | * With a value of 8 all IO in one 128K block make it to the same slot of the |
b411b363 | 1426 | * hash table. */ |
d5373389 | 1427 | #define HT_SHIFT 8 |
1816a2b4 | 1428 | #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) |
99432fcc | 1429 | #define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */ |
b411b363 | 1430 | |
d5373389 PR |
1431 | #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ |
1432 | ||
b411b363 PR |
1433 | /* Number of elements in the app_reads_hash */ |
1434 | #define APP_R_HSIZE 15 | |
1435 | ||
1436 | extern int drbd_bm_init(struct drbd_conf *mdev); | |
02d9a94b | 1437 | extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits); |
b411b363 PR |
1438 | extern void drbd_bm_cleanup(struct drbd_conf *mdev); |
1439 | extern void drbd_bm_set_all(struct drbd_conf *mdev); | |
1440 | extern void drbd_bm_clear_all(struct drbd_conf *mdev); | |
4b0715f0 | 1441 | /* set/clear/test only a few bits at a time */ |
b411b363 PR |
1442 | extern int drbd_bm_set_bits( |
1443 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | |
1444 | extern int drbd_bm_clear_bits( | |
1445 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | |
4b0715f0 LE |
1446 | extern int drbd_bm_count_bits( |
1447 | struct drbd_conf *mdev, const unsigned long s, const unsigned long e); | |
1448 | /* bm_set_bits variant for use while holding drbd_bm_lock, | |
1449 | * may process the whole bitmap in one go */ | |
b411b363 PR |
1450 | extern void _drbd_bm_set_bits(struct drbd_conf *mdev, |
1451 | const unsigned long s, const unsigned long e); | |
1452 | extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); | |
1453 | extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); | |
19f843aa | 1454 | extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); |
b411b363 PR |
1455 | extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); |
1456 | extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); | |
1457 | extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, | |
1458 | unsigned long al_enr); | |
1459 | extern size_t drbd_bm_words(struct drbd_conf *mdev); | |
1460 | extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); | |
1461 | extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); | |
4b0715f0 LE |
1462 | |
1463 | #define DRBD_END_OF_BITMAP (~(unsigned long)0) | |
b411b363 PR |
1464 | extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); |
1465 | /* bm_find_next variants for use while you hold drbd_bm_lock() */ | |
1466 | extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); | |
1467 | extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo); | |
0778286a | 1468 | extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev); |
b411b363 PR |
1469 | extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev); |
1470 | extern int drbd_bm_rs_done(struct drbd_conf *mdev); | |
1471 | /* for receive_bitmap */ | |
1472 | extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, | |
1473 | size_t number, unsigned long *buffer); | |
19f843aa | 1474 | /* for _drbd_send_bitmap */ |
b411b363 PR |
1475 | extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, |
1476 | size_t number, unsigned long *buffer); | |
1477 | ||
20ceb2b2 | 1478 | extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags); |
b411b363 | 1479 | extern void drbd_bm_unlock(struct drbd_conf *mdev); |
b411b363 PR |
1480 | /* drbd_main.c */ |
1481 | ||
1482 | extern struct kmem_cache *drbd_request_cache; | |
1483 | extern struct kmem_cache *drbd_ee_cache; /* epoch entries */ | |
1484 | extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ | |
1485 | extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ | |
1486 | extern mempool_t *drbd_request_mempool; | |
1487 | extern mempool_t *drbd_ee_mempool; | |
1488 | ||
1489 | extern struct page *drbd_pp_pool; /* drbd's page pool */ | |
1490 | extern spinlock_t drbd_pp_lock; | |
1491 | extern int drbd_pp_vacant; | |
1492 | extern wait_queue_head_t drbd_pp_wait; | |
1493 | ||
1494 | extern rwlock_t global_state_lock; | |
1495 | ||
1496 | extern struct drbd_conf *drbd_new_device(unsigned int minor); | |
1497 | extern void drbd_free_mdev(struct drbd_conf *mdev); | |
1498 | ||
1499 | extern int proc_details; | |
1500 | ||
1501 | /* drbd_req */ | |
2f58dcfc | 1502 | extern int drbd_make_request(struct request_queue *q, struct bio *bio); |
b411b363 PR |
1503 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); |
1504 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); | |
1505 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); | |
1506 | ||
1507 | ||
1508 | /* drbd_nl.c */ | |
1509 | extern void drbd_suspend_io(struct drbd_conf *mdev); | |
1510 | extern void drbd_resume_io(struct drbd_conf *mdev); | |
1511 | extern char *ppsize(char *buf, unsigned long long size); | |
a393db6f | 1512 | extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); |
b411b363 | 1513 | enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; |
24c4830c | 1514 | extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); |
b411b363 | 1515 | extern void resync_after_online_grow(struct drbd_conf *); |
99432fcc | 1516 | extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); |
bf885f8a AG |
1517 | extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, |
1518 | enum drbd_role new_role, | |
1519 | int force); | |
87f7be4c PR |
1520 | extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); |
1521 | extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev); | |
b411b363 PR |
1522 | extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); |
1523 | ||
1524 | /* drbd_worker.c */ | |
1525 | extern int drbd_worker(struct drbd_thread *thi); | |
1526 | extern int drbd_alter_sa(struct drbd_conf *mdev, int na); | |
1527 | extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side); | |
1528 | extern void resume_next_sg(struct drbd_conf *mdev); | |
1529 | extern void suspend_other_sg(struct drbd_conf *mdev); | |
1530 | extern int drbd_resync_finished(struct drbd_conf *mdev); | |
1531 | /* maybe rather drbd_main.c ? */ | |
1532 | extern int drbd_md_sync_page_io(struct drbd_conf *mdev, | |
1533 | struct drbd_backing_dev *bdev, sector_t sector, int rw); | |
1534 | extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); | |
9bd28d3c | 1535 | extern void drbd_rs_controller_reset(struct drbd_conf *mdev); |
b411b363 PR |
1536 | |
1537 | static inline void ov_oos_print(struct drbd_conf *mdev) | |
1538 | { | |
1539 | if (mdev->ov_last_oos_size) { | |
1540 | dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n", | |
1541 | (unsigned long long)mdev->ov_last_oos_start, | |
1542 | (unsigned long)mdev->ov_last_oos_size); | |
1543 | } | |
1544 | mdev->ov_last_oos_size=0; | |
1545 | } | |
1546 | ||
1547 | ||
45bb912b LE |
1548 | extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *); |
1549 | extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *); | |
b411b363 PR |
1550 | /* worker callbacks */ |
1551 | extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int); | |
1552 | extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int); | |
1553 | extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int); | |
1554 | extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int); | |
1555 | extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int); | |
1556 | extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); | |
1557 | extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); | |
1558 | extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); | |
794abb75 | 1559 | extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int); |
b411b363 | 1560 | extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); |
b411b363 | 1561 | extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); |
b411b363 PR |
1562 | extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); |
1563 | extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); | |
1564 | extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); | |
1565 | extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); | |
1566 | extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); | |
265be2d0 | 1567 | extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); |
73a01a18 | 1568 | extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int); |
c4752ef1 | 1569 | extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int); |
b411b363 PR |
1570 | |
1571 | extern void resync_timer_fn(unsigned long data); | |
370a43e7 | 1572 | extern void start_resync_timer_fn(unsigned long data); |
b411b363 PR |
1573 | |
1574 | /* drbd_receiver.c */ | |
e3555d85 | 1575 | extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector); |
45bb912b LE |
1576 | extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, |
1577 | const unsigned rw, const int fault_type); | |
b411b363 PR |
1578 | extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); |
1579 | extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | |
1580 | u64 id, | |
1581 | sector_t sector, | |
1582 | unsigned int data_size, | |
1583 | gfp_t gfp_mask) __must_hold(local); | |
435f0740 LE |
1584 | extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, |
1585 | int is_net); | |
1586 | #define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0) | |
1587 | #define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1) | |
b411b363 PR |
1588 | extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev, |
1589 | struct list_head *head); | |
1590 | extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, | |
1591 | struct list_head *head); | |
1592 | extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled); | |
1593 | extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed); | |
1594 | extern void drbd_flush_workqueue(struct drbd_conf *mdev); | |
1595 | ||
1596 | /* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to | |
1597 | * mess with get_fs/set_fs, we know we are KERNEL_DS always. */ | |
1598 | static inline int drbd_setsockopt(struct socket *sock, int level, int optname, | |
1599 | char __user *optval, int optlen) | |
1600 | { | |
1601 | int err; | |
1602 | if (level == SOL_SOCKET) | |
1603 | err = sock_setsockopt(sock, level, optname, optval, optlen); | |
1604 | else | |
1605 | err = sock->ops->setsockopt(sock, level, optname, optval, | |
1606 | optlen); | |
1607 | return err; | |
1608 | } | |
1609 | ||
1610 | static inline void drbd_tcp_cork(struct socket *sock) | |
1611 | { | |
1612 | int __user val = 1; | |
1613 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, | |
1614 | (char __user *)&val, sizeof(val)); | |
1615 | } | |
1616 | ||
1617 | static inline void drbd_tcp_uncork(struct socket *sock) | |
1618 | { | |
1619 | int __user val = 0; | |
1620 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, | |
1621 | (char __user *)&val, sizeof(val)); | |
1622 | } | |
1623 | ||
1624 | static inline void drbd_tcp_nodelay(struct socket *sock) | |
1625 | { | |
1626 | int __user val = 1; | |
1627 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY, | |
1628 | (char __user *)&val, sizeof(val)); | |
1629 | } | |
1630 | ||
1631 | static inline void drbd_tcp_quickack(struct socket *sock) | |
1632 | { | |
344fa462 | 1633 | int __user val = 2; |
b411b363 PR |
1634 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, |
1635 | (char __user *)&val, sizeof(val)); | |
1636 | } | |
1637 | ||
1638 | void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo); | |
1639 | ||
1640 | /* drbd_proc.c */ | |
1641 | extern struct proc_dir_entry *drbd_proc; | |
7d4e9d09 | 1642 | extern const struct file_operations drbd_proc_fops; |
b411b363 PR |
1643 | extern const char *drbd_conn_str(enum drbd_conns s); |
1644 | extern const char *drbd_role_str(enum drbd_role s); | |
1645 | ||
1646 | /* drbd_actlog.c */ | |
1647 | extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector); | |
1648 | extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector); | |
1649 | extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector); | |
1650 | extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector); | |
1651 | extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector); | |
1652 | extern void drbd_rs_cancel_all(struct drbd_conf *mdev); | |
1653 | extern int drbd_rs_del_all(struct drbd_conf *mdev); | |
1654 | extern void drbd_rs_failed_io(struct drbd_conf *mdev, | |
1655 | sector_t sector, int size); | |
1656 | extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); | |
ea5442af | 1657 | extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go); |
b411b363 PR |
1658 | extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, |
1659 | int size, const char *file, const unsigned int line); | |
1660 | #define drbd_set_in_sync(mdev, sector, size) \ | |
1661 | __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) | |
73a01a18 | 1662 | extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, |
b411b363 PR |
1663 | int size, const char *file, const unsigned int line); |
1664 | #define drbd_set_out_of_sync(mdev, sector, size) \ | |
1665 | __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) | |
1666 | extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); | |
b411b363 PR |
1667 | extern void drbd_al_shrink(struct drbd_conf *mdev); |
1668 | ||
1669 | ||
1670 | /* drbd_nl.c */ | |
1671 | ||
1672 | void drbd_nl_cleanup(void); | |
1673 | int __init drbd_nl_init(void); | |
1674 | void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state); | |
1675 | void drbd_bcast_sync_progress(struct drbd_conf *mdev); | |
1676 | void drbd_bcast_ee(struct drbd_conf *mdev, | |
1677 | const char *reason, const int dgs, | |
1678 | const char* seen_hash, const char* calc_hash, | |
1679 | const struct drbd_epoch_entry* e); | |
1680 | ||
1681 | ||
1682 | /** | |
1683 | * DOC: DRBD State macros | |
1684 | * | |
1685 | * These macros are used to express state changes in easily readable form. | |
1686 | * | |
1687 | * The NS macros expand to a mask and a value, that can be bit ored onto the | |
1688 | * current state as soon as the spinlock (req_lock) was taken. | |
1689 | * | |
1690 | * The _NS macros are used for state functions that get called with the | |
1691 | * spinlock. These macros expand directly to the new state value. | |
1692 | * | |
1693 | * Besides the basic forms NS() and _NS() additional _?NS[23] are defined | |
1694 | * to express state changes that affect more than one aspect of the state. | |
1695 | * | |
1696 | * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY) | |
1697 | * Means that the network connection was established and that the peer | |
1698 | * is in secondary role. | |
1699 | */ | |
1700 | #define role_MASK R_MASK | |
1701 | #define peer_MASK R_MASK | |
1702 | #define disk_MASK D_MASK | |
1703 | #define pdsk_MASK D_MASK | |
1704 | #define conn_MASK C_MASK | |
1705 | #define susp_MASK 1 | |
1706 | #define user_isp_MASK 1 | |
1707 | #define aftr_isp_MASK 1 | |
fb22c402 PR |
1708 | #define susp_nod_MASK 1 |
1709 | #define susp_fen_MASK 1 | |
b411b363 PR |
1710 | |
1711 | #define NS(T, S) \ | |
1712 | ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \ | |
1713 | ({ union drbd_state val; val.i = 0; val.T = (S); val; }) | |
1714 | #define NS2(T1, S1, T2, S2) \ | |
1715 | ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ | |
1716 | mask.T2 = T2##_MASK; mask; }), \ | |
1717 | ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ | |
1718 | val.T2 = (S2); val; }) | |
1719 | #define NS3(T1, S1, T2, S2, T3, S3) \ | |
1720 | ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ | |
1721 | mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \ | |
1722 | ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ | |
1723 | val.T2 = (S2); val.T3 = (S3); val; }) | |
1724 | ||
1725 | #define _NS(D, T, S) \ | |
1726 | D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; }) | |
1727 | #define _NS2(D, T1, S1, T2, S2) \ | |
1728 | D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \ | |
1729 | __ns.T2 = (S2); __ns; }) | |
1730 | #define _NS3(D, T1, S1, T2, S2, T3, S3) \ | |
1731 | D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \ | |
1732 | __ns.T2 = (S2); __ns.T3 = (S3); __ns; }) | |
1733 | ||
1734 | /* | |
1735 | * inline helper functions | |
1736 | *************************/ | |
1737 | ||
45bb912b LE |
1738 | /* see also page_chain_add and friends in drbd_receiver.c */ |
1739 | static inline struct page *page_chain_next(struct page *page) | |
1740 | { | |
1741 | return (struct page *)page_private(page); | |
1742 | } | |
1743 | #define page_chain_for_each(page) \ | |
1744 | for (; page && ({ prefetch(page_chain_next(page)); 1; }); \ | |
1745 | page = page_chain_next(page)) | |
1746 | #define page_chain_for_each_safe(page, n) \ | |
1747 | for (; page && ({ n = page_chain_next(page); 1; }); page = n) | |
1748 | ||
1749 | static inline int drbd_bio_has_active_page(struct bio *bio) | |
1750 | { | |
1751 | struct bio_vec *bvec; | |
1752 | int i; | |
1753 | ||
1754 | __bio_for_each_segment(bvec, bio, i, 0) { | |
1755 | if (page_count(bvec->bv_page) > 1) | |
1756 | return 1; | |
1757 | } | |
1758 | ||
1759 | return 0; | |
1760 | } | |
1761 | ||
1762 | static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e) | |
1763 | { | |
1764 | struct page *page = e->pages; | |
1765 | page_chain_for_each(page) { | |
1766 | if (page_count(page) > 1) | |
1767 | return 1; | |
1768 | } | |
1769 | return 0; | |
1770 | } | |
1771 | ||
1772 | ||
b411b363 PR |
1773 | static inline void drbd_state_lock(struct drbd_conf *mdev) |
1774 | { | |
1775 | wait_event(mdev->misc_wait, | |
1776 | !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags)); | |
1777 | } | |
1778 | ||
1779 | static inline void drbd_state_unlock(struct drbd_conf *mdev) | |
1780 | { | |
1781 | clear_bit(CLUSTER_ST_CHANGE, &mdev->flags); | |
1782 | wake_up(&mdev->misc_wait); | |
1783 | } | |
1784 | ||
bf885f8a AG |
1785 | static inline enum drbd_state_rv |
1786 | _drbd_set_state(struct drbd_conf *mdev, union drbd_state ns, | |
1787 | enum chg_state_flags flags, struct completion *done) | |
b411b363 | 1788 | { |
bf885f8a | 1789 | enum drbd_state_rv rv; |
b411b363 PR |
1790 | |
1791 | read_lock(&global_state_lock); | |
1792 | rv = __drbd_set_state(mdev, ns, flags, done); | |
1793 | read_unlock(&global_state_lock); | |
1794 | ||
1795 | return rv; | |
1796 | } | |
1797 | ||
1798 | /** | |
1799 | * drbd_request_state() - Reqest a state change | |
1800 | * @mdev: DRBD device. | |
1801 | * @mask: mask of state bits to change. | |
1802 | * @val: value of new state bits. | |
1803 | * | |
1804 | * This is the most graceful way of requesting a state change. It is verbose | |
1805 | * quite verbose in case the state change is not possible, and all those | |
1806 | * state changes are globally serialized. | |
1807 | */ | |
1808 | static inline int drbd_request_state(struct drbd_conf *mdev, | |
1809 | union drbd_state mask, | |
1810 | union drbd_state val) | |
1811 | { | |
1812 | return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); | |
1813 | } | |
1814 | ||
1815 | #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) | |
1816 | static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) | |
1817 | { | |
1818 | switch (mdev->ldev->dc.on_io_error) { | |
1819 | case EP_PASS_ON: | |
1820 | if (!forcedetach) { | |
7383506c | 1821 | if (__ratelimit(&drbd_ratelimit_state)) |
82f59cc6 | 1822 | dev_err(DEV, "Local IO failed in %s.\n", where); |
d2e17807 PR |
1823 | if (mdev->state.disk > D_INCONSISTENT) |
1824 | _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL); | |
b411b363 PR |
1825 | break; |
1826 | } | |
1827 | /* NOTE fall through to detach case if forcedetach set */ | |
1828 | case EP_DETACH: | |
1829 | case EP_CALL_HELPER: | |
82f59cc6 | 1830 | set_bit(WAS_IO_ERROR, &mdev->flags); |
b411b363 PR |
1831 | if (mdev->state.disk > D_FAILED) { |
1832 | _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); | |
82f59cc6 LE |
1833 | dev_err(DEV, |
1834 | "Local IO failed in %s. Detaching...\n", where); | |
b411b363 PR |
1835 | } |
1836 | break; | |
1837 | } | |
1838 | } | |
1839 | ||
1840 | /** | |
1841 | * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers | |
1842 | * @mdev: DRBD device. | |
1843 | * @error: Error code passed to the IO completion callback | |
1844 | * @forcedetach: Force detach. I.e. the error happened while accessing the meta data | |
1845 | * | |
1846 | * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED) | |
1847 | */ | |
1848 | #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) | |
1849 | static inline void drbd_chk_io_error_(struct drbd_conf *mdev, | |
1850 | int error, int forcedetach, const char *where) | |
1851 | { | |
1852 | if (error) { | |
1853 | unsigned long flags; | |
1854 | spin_lock_irqsave(&mdev->req_lock, flags); | |
1855 | __drbd_chk_io_error_(mdev, forcedetach, where); | |
1856 | spin_unlock_irqrestore(&mdev->req_lock, flags); | |
1857 | } | |
1858 | } | |
1859 | ||
1860 | ||
1861 | /** | |
1862 | * drbd_md_first_sector() - Returns the first sector number of the meta data area | |
1863 | * @bdev: Meta data block device. | |
1864 | * | |
1865 | * BTW, for internal meta data, this happens to be the maximum capacity | |
1866 | * we could agree upon with our peer node. | |
1867 | */ | |
1868 | static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) | |
1869 | { | |
1870 | switch (bdev->dc.meta_dev_idx) { | |
1871 | case DRBD_MD_INDEX_INTERNAL: | |
1872 | case DRBD_MD_INDEX_FLEX_INT: | |
1873 | return bdev->md.md_offset + bdev->md.bm_offset; | |
1874 | case DRBD_MD_INDEX_FLEX_EXT: | |
1875 | default: | |
1876 | return bdev->md.md_offset; | |
1877 | } | |
1878 | } | |
1879 | ||
1880 | /** | |
1881 | * drbd_md_last_sector() - Return the last sector number of the meta data area | |
1882 | * @bdev: Meta data block device. | |
1883 | */ | |
1884 | static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) | |
1885 | { | |
1886 | switch (bdev->dc.meta_dev_idx) { | |
1887 | case DRBD_MD_INDEX_INTERNAL: | |
1888 | case DRBD_MD_INDEX_FLEX_INT: | |
1889 | return bdev->md.md_offset + MD_AL_OFFSET - 1; | |
1890 | case DRBD_MD_INDEX_FLEX_EXT: | |
1891 | default: | |
1892 | return bdev->md.md_offset + bdev->md.md_size_sect; | |
1893 | } | |
1894 | } | |
1895 | ||
1896 | /* Returns the number of 512 byte sectors of the device */ | |
1897 | static inline sector_t drbd_get_capacity(struct block_device *bdev) | |
1898 | { | |
1899 | /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ | |
77304d2a | 1900 | return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0; |
b411b363 PR |
1901 | } |
1902 | ||
1903 | /** | |
1904 | * drbd_get_max_capacity() - Returns the capacity we announce to out peer | |
1905 | * @bdev: Meta data block device. | |
1906 | * | |
1907 | * returns the capacity we announce to out peer. we clip ourselves at the | |
1908 | * various MAX_SECTORS, because if we don't, current implementation will | |
1909 | * oops sooner or later | |
1910 | */ | |
1911 | static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) | |
1912 | { | |
1913 | sector_t s; | |
1914 | switch (bdev->dc.meta_dev_idx) { | |
1915 | case DRBD_MD_INDEX_INTERNAL: | |
1916 | case DRBD_MD_INDEX_FLEX_INT: | |
1917 | s = drbd_get_capacity(bdev->backing_bdev) | |
1918 | ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX, | |
1919 | drbd_md_first_sector(bdev)) | |
1920 | : 0; | |
1921 | break; | |
1922 | case DRBD_MD_INDEX_FLEX_EXT: | |
1923 | s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX, | |
1924 | drbd_get_capacity(bdev->backing_bdev)); | |
1925 | /* clip at maximum size the meta device can support */ | |
1926 | s = min_t(sector_t, s, | |
1927 | BM_EXT_TO_SECT(bdev->md.md_size_sect | |
1928 | - bdev->md.bm_offset)); | |
1929 | break; | |
1930 | default: | |
1931 | s = min_t(sector_t, DRBD_MAX_SECTORS, | |
1932 | drbd_get_capacity(bdev->backing_bdev)); | |
1933 | } | |
1934 | return s; | |
1935 | } | |
1936 | ||
1937 | /** | |
1938 | * drbd_md_ss__() - Return the sector number of our meta data super block | |
1939 | * @mdev: DRBD device. | |
1940 | * @bdev: Meta data block device. | |
1941 | */ | |
1942 | static inline sector_t drbd_md_ss__(struct drbd_conf *mdev, | |
1943 | struct drbd_backing_dev *bdev) | |
1944 | { | |
1945 | switch (bdev->dc.meta_dev_idx) { | |
1946 | default: /* external, some index */ | |
1947 | return MD_RESERVED_SECT * bdev->dc.meta_dev_idx; | |
1948 | case DRBD_MD_INDEX_INTERNAL: | |
1949 | /* with drbd08, internal meta data is always "flexible" */ | |
1950 | case DRBD_MD_INDEX_FLEX_INT: | |
1951 | /* sizeof(struct md_on_disk_07) == 4k | |
1952 | * position: last 4k aligned block of 4k size */ | |
1953 | if (!bdev->backing_bdev) { | |
1954 | if (__ratelimit(&drbd_ratelimit_state)) { | |
1955 | dev_err(DEV, "bdev->backing_bdev==NULL\n"); | |
1956 | dump_stack(); | |
1957 | } | |
1958 | return 0; | |
1959 | } | |
1960 | return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) | |
1961 | - MD_AL_OFFSET; | |
1962 | case DRBD_MD_INDEX_FLEX_EXT: | |
1963 | return 0; | |
1964 | } | |
1965 | } | |
1966 | ||
b411b363 PR |
1967 | static inline void |
1968 | drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w) | |
1969 | { | |
1970 | unsigned long flags; | |
1971 | spin_lock_irqsave(&q->q_lock, flags); | |
1972 | list_add(&w->list, &q->q); | |
1973 | up(&q->s); /* within the spinlock, | |
1974 | see comment near end of drbd_worker() */ | |
1975 | spin_unlock_irqrestore(&q->q_lock, flags); | |
1976 | } | |
1977 | ||
1978 | static inline void | |
1979 | drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) | |
1980 | { | |
1981 | unsigned long flags; | |
1982 | spin_lock_irqsave(&q->q_lock, flags); | |
1983 | list_add_tail(&w->list, &q->q); | |
1984 | up(&q->s); /* within the spinlock, | |
1985 | see comment near end of drbd_worker() */ | |
1986 | spin_unlock_irqrestore(&q->q_lock, flags); | |
1987 | } | |
1988 | ||
1989 | static inline void wake_asender(struct drbd_conf *mdev) | |
1990 | { | |
1991 | if (test_bit(SIGNAL_ASENDER, &mdev->flags)) | |
1992 | force_sig(DRBD_SIG, mdev->asender.task); | |
1993 | } | |
1994 | ||
1995 | static inline void request_ping(struct drbd_conf *mdev) | |
1996 | { | |
1997 | set_bit(SEND_PING, &mdev->flags); | |
1998 | wake_asender(mdev); | |
1999 | } | |
2000 | ||
2001 | static inline int drbd_send_short_cmd(struct drbd_conf *mdev, | |
2002 | enum drbd_packets cmd) | |
2003 | { | |
0b70a13d | 2004 | struct p_header80 h; |
b411b363 PR |
2005 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h)); |
2006 | } | |
2007 | ||
2008 | static inline int drbd_send_ping(struct drbd_conf *mdev) | |
2009 | { | |
0b70a13d | 2010 | struct p_header80 h; |
b411b363 PR |
2011 | return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h)); |
2012 | } | |
2013 | ||
2014 | static inline int drbd_send_ping_ack(struct drbd_conf *mdev) | |
2015 | { | |
0b70a13d | 2016 | struct p_header80 h; |
b411b363 PR |
2017 | return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h)); |
2018 | } | |
2019 | ||
2020 | static inline void drbd_thread_stop(struct drbd_thread *thi) | |
2021 | { | |
81e84650 | 2022 | _drbd_thread_stop(thi, false, true); |
b411b363 PR |
2023 | } |
2024 | ||
2025 | static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) | |
2026 | { | |
81e84650 | 2027 | _drbd_thread_stop(thi, false, false); |
b411b363 PR |
2028 | } |
2029 | ||
2030 | static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) | |
2031 | { | |
81e84650 | 2032 | _drbd_thread_stop(thi, true, false); |
b411b363 PR |
2033 | } |
2034 | ||
2035 | /* counts how many answer packets packets we expect from our peer, | |
2036 | * for either explicit application requests, | |
2037 | * or implicit barrier packets as necessary. | |
2038 | * increased: | |
2039 | * w_send_barrier | |
2040 | * _req_mod(req, queue_for_net_write or queue_for_net_read); | |
2041 | * it is much easier and equally valid to count what we queue for the | |
2042 | * worker, even before it actually was queued or send. | |
2043 | * (drbd_make_request_common; recovery path on read io-error) | |
2044 | * decreased: | |
2045 | * got_BarrierAck (respective tl_clear, tl_clear_barrier) | |
2046 | * _req_mod(req, data_received) | |
2047 | * [from receive_DataReply] | |
2048 | * _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked) | |
2049 | * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)] | |
2050 | * for some reason it is NOT decreased in got_NegAck, | |
2051 | * but in the resulting cleanup code from report_params. | |
2052 | * we should try to remember the reason for that... | |
2053 | * _req_mod(req, send_failed or send_canceled) | |
2054 | * _req_mod(req, connection_lost_while_pending) | |
2055 | * [from tl_clear_barrier] | |
2056 | */ | |
2057 | static inline void inc_ap_pending(struct drbd_conf *mdev) | |
2058 | { | |
2059 | atomic_inc(&mdev->ap_pending_cnt); | |
2060 | } | |
2061 | ||
2062 | #define ERR_IF_CNT_IS_NEGATIVE(which) \ | |
2063 | if (atomic_read(&mdev->which) < 0) \ | |
2064 | dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \ | |
2065 | __func__ , __LINE__ , \ | |
2066 | atomic_read(&mdev->which)) | |
2067 | ||
2068 | #define dec_ap_pending(mdev) do { \ | |
2069 | typecheck(struct drbd_conf *, mdev); \ | |
2070 | if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \ | |
2071 | wake_up(&mdev->misc_wait); \ | |
2072 | ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0) | |
2073 | ||
2074 | /* counts how many resync-related answers we still expect from the peer | |
2075 | * increase decrease | |
2076 | * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY) | |
25985edc | 2077 | * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK with ID_SYNCER) |
b411b363 PR |
2078 | * (or P_NEG_ACK with ID_SYNCER) |
2079 | */ | |
2080 | static inline void inc_rs_pending(struct drbd_conf *mdev) | |
2081 | { | |
2082 | atomic_inc(&mdev->rs_pending_cnt); | |
2083 | } | |
2084 | ||
2085 | #define dec_rs_pending(mdev) do { \ | |
2086 | typecheck(struct drbd_conf *, mdev); \ | |
2087 | atomic_dec(&mdev->rs_pending_cnt); \ | |
2088 | ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0) | |
2089 | ||
2090 | /* counts how many answers we still need to send to the peer. | |
2091 | * increased on | |
2092 | * receive_Data unless protocol A; | |
2093 | * we need to send a P_RECV_ACK (proto B) | |
2094 | * or P_WRITE_ACK (proto C) | |
2095 | * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK | |
2096 | * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA | |
2097 | * receive_Barrier_* we need to send a P_BARRIER_ACK | |
2098 | */ | |
2099 | static inline void inc_unacked(struct drbd_conf *mdev) | |
2100 | { | |
2101 | atomic_inc(&mdev->unacked_cnt); | |
2102 | } | |
2103 | ||
2104 | #define dec_unacked(mdev) do { \ | |
2105 | typecheck(struct drbd_conf *, mdev); \ | |
2106 | atomic_dec(&mdev->unacked_cnt); \ | |
2107 | ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0) | |
2108 | ||
2109 | #define sub_unacked(mdev, n) do { \ | |
2110 | typecheck(struct drbd_conf *, mdev); \ | |
2111 | atomic_sub(n, &mdev->unacked_cnt); \ | |
2112 | ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0) | |
2113 | ||
2114 | ||
2115 | static inline void put_net_conf(struct drbd_conf *mdev) | |
2116 | { | |
2117 | if (atomic_dec_and_test(&mdev->net_cnt)) | |
84dfb9f5 | 2118 | wake_up(&mdev->net_cnt_wait); |
b411b363 PR |
2119 | } |
2120 | ||
2121 | /** | |
2122 | * get_net_conf() - Increase ref count on mdev->net_conf; Returns 0 if nothing there | |
2123 | * @mdev: DRBD device. | |
2124 | * | |
2125 | * You have to call put_net_conf() when finished working with mdev->net_conf. | |
2126 | */ | |
2127 | static inline int get_net_conf(struct drbd_conf *mdev) | |
2128 | { | |
2129 | int have_net_conf; | |
2130 | ||
2131 | atomic_inc(&mdev->net_cnt); | |
2132 | have_net_conf = mdev->state.conn >= C_UNCONNECTED; | |
2133 | if (!have_net_conf) | |
2134 | put_net_conf(mdev); | |
2135 | return have_net_conf; | |
2136 | } | |
2137 | ||
2138 | /** | |
2139 | * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev | |
2140 | * @M: DRBD device. | |
2141 | * | |
2142 | * You have to call put_ldev() when finished working with mdev->ldev. | |
2143 | */ | |
2144 | #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT)) | |
2145 | #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS)) | |
2146 | ||
2147 | static inline void put_ldev(struct drbd_conf *mdev) | |
2148 | { | |
1d7734a0 | 2149 | int i = atomic_dec_return(&mdev->local_cnt); |
9a0d9d03 LE |
2150 | |
2151 | /* This may be called from some endio handler, | |
2152 | * so we must not sleep here. */ | |
2153 | ||
b411b363 | 2154 | __release(local); |
1d7734a0 | 2155 | D_ASSERT(i >= 0); |
e9e6f3ec | 2156 | if (i == 0) { |
82f59cc6 LE |
2157 | if (mdev->state.disk == D_DISKLESS) |
2158 | /* even internal references gone, safe to destroy */ | |
2159 | drbd_ldev_destroy(mdev); | |
e9e6f3ec | 2160 | if (mdev->state.disk == D_FAILED) |
82f59cc6 | 2161 | /* all application IO references gone. */ |
e9e6f3ec | 2162 | drbd_go_diskless(mdev); |
b411b363 | 2163 | wake_up(&mdev->misc_wait); |
e9e6f3ec | 2164 | } |
b411b363 PR |
2165 | } |
2166 | ||
2167 | #ifndef __CHECKER__ | |
2168 | static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) | |
2169 | { | |
2170 | int io_allowed; | |
2171 | ||
82f59cc6 LE |
2172 | /* never get a reference while D_DISKLESS */ |
2173 | if (mdev->state.disk == D_DISKLESS) | |
2174 | return 0; | |
2175 | ||
b411b363 PR |
2176 | atomic_inc(&mdev->local_cnt); |
2177 | io_allowed = (mdev->state.disk >= mins); | |
2178 | if (!io_allowed) | |
2179 | put_ldev(mdev); | |
2180 | return io_allowed; | |
2181 | } | |
2182 | #else | |
2183 | extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins); | |
2184 | #endif | |
2185 | ||
2186 | /* you must have an "get_ldev" reference */ | |
2187 | static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, | |
2188 | unsigned long *bits_left, unsigned int *per_mil_done) | |
2189 | { | |
4b0715f0 LE |
2190 | /* this is to break it at compile time when we change that, in case we |
2191 | * want to support more than (1<<32) bits on a 32bit arch. */ | |
b411b363 PR |
2192 | typecheck(unsigned long, mdev->rs_total); |
2193 | ||
2194 | /* note: both rs_total and rs_left are in bits, i.e. in | |
2195 | * units of BM_BLOCK_SIZE. | |
2196 | * for the percentage, we don't care. */ | |
2197 | ||
439d5953 LE |
2198 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) |
2199 | *bits_left = mdev->ov_left; | |
2200 | else | |
2201 | *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | |
b411b363 PR |
2202 | /* >> 10 to prevent overflow, |
2203 | * +1 to prevent division by zero */ | |
2204 | if (*bits_left > mdev->rs_total) { | |
2205 | /* doh. maybe a logic bug somewhere. | |
2206 | * may also be just a race condition | |
2207 | * between this and a disconnect during sync. | |
2208 | * for now, just prevent in-kernel buffer overflow. | |
2209 | */ | |
2210 | smp_rmb(); | |
2211 | dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n", | |
2212 | drbd_conn_str(mdev->state.conn), | |
2213 | *bits_left, mdev->rs_total, mdev->rs_failed); | |
2214 | *per_mil_done = 0; | |
2215 | } else { | |
4b0715f0 LE |
2216 | /* Make sure the division happens in long context. |
2217 | * We allow up to one petabyte storage right now, | |
2218 | * at a granularity of 4k per bit that is 2**38 bits. | |
2219 | * After shift right and multiplication by 1000, | |
2220 | * this should still fit easily into a 32bit long, | |
2221 | * so we don't need a 64bit division on 32bit arch. | |
2222 | * Note: currently we don't support such large bitmaps on 32bit | |
2223 | * arch anyways, but no harm done to be prepared for it here. | |
2224 | */ | |
2225 | unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10; | |
2226 | unsigned long left = *bits_left >> shift; | |
2227 | unsigned long total = 1UL + (mdev->rs_total >> shift); | |
2228 | unsigned long tmp = 1000UL - left * 1000UL/total; | |
b411b363 PR |
2229 | *per_mil_done = tmp; |
2230 | } | |
2231 | } | |
2232 | ||
2233 | ||
2234 | /* this throttles on-the-fly application requests | |
2235 | * according to max_buffers settings; | |
2236 | * maybe re-implement using semaphores? */ | |
2237 | static inline int drbd_get_max_buffers(struct drbd_conf *mdev) | |
2238 | { | |
2239 | int mxb = 1000000; /* arbitrary limit on open requests */ | |
2240 | if (get_net_conf(mdev)) { | |
2241 | mxb = mdev->net_conf->max_buffers; | |
2242 | put_net_conf(mdev); | |
2243 | } | |
2244 | return mxb; | |
2245 | } | |
2246 | ||
3719094e | 2247 | static inline int drbd_state_is_stable(struct drbd_conf *mdev) |
b411b363 | 2248 | { |
3719094e | 2249 | union drbd_state s = mdev->state; |
b411b363 PR |
2250 | |
2251 | /* DO NOT add a default clause, we want the compiler to warn us | |
2252 | * for any newly introduced state we may have forgotten to add here */ | |
2253 | ||
2254 | switch ((enum drbd_conns)s.conn) { | |
2255 | /* new io only accepted when there is no connection, ... */ | |
2256 | case C_STANDALONE: | |
2257 | case C_WF_CONNECTION: | |
2258 | /* ... or there is a well established connection. */ | |
2259 | case C_CONNECTED: | |
2260 | case C_SYNC_SOURCE: | |
2261 | case C_SYNC_TARGET: | |
2262 | case C_VERIFY_S: | |
2263 | case C_VERIFY_T: | |
2264 | case C_PAUSED_SYNC_S: | |
2265 | case C_PAUSED_SYNC_T: | |
67531718 PR |
2266 | case C_AHEAD: |
2267 | case C_BEHIND: | |
3719094e | 2268 | /* transitional states, IO allowed */ |
b411b363 PR |
2269 | case C_DISCONNECTING: |
2270 | case C_UNCONNECTED: | |
2271 | case C_TIMEOUT: | |
2272 | case C_BROKEN_PIPE: | |
2273 | case C_NETWORK_FAILURE: | |
2274 | case C_PROTOCOL_ERROR: | |
2275 | case C_TEAR_DOWN: | |
2276 | case C_WF_REPORT_PARAMS: | |
2277 | case C_STARTING_SYNC_S: | |
2278 | case C_STARTING_SYNC_T: | |
3719094e PR |
2279 | break; |
2280 | ||
2281 | /* Allow IO in BM exchange states with new protocols */ | |
b411b363 | 2282 | case C_WF_BITMAP_S: |
3719094e PR |
2283 | if (mdev->agreed_pro_version < 96) |
2284 | return 0; | |
2285 | break; | |
2286 | ||
2287 | /* no new io accepted in these states */ | |
b411b363 PR |
2288 | case C_WF_BITMAP_T: |
2289 | case C_WF_SYNC_UUID: | |
2290 | case C_MASK: | |
2291 | /* not "stable" */ | |
2292 | return 0; | |
2293 | } | |
2294 | ||
2295 | switch ((enum drbd_disk_state)s.disk) { | |
2296 | case D_DISKLESS: | |
2297 | case D_INCONSISTENT: | |
2298 | case D_OUTDATED: | |
2299 | case D_CONSISTENT: | |
2300 | case D_UP_TO_DATE: | |
2301 | /* disk state is stable as well. */ | |
2302 | break; | |
2303 | ||
2304 | /* no new io accepted during tansitional states */ | |
2305 | case D_ATTACHING: | |
2306 | case D_FAILED: | |
2307 | case D_NEGOTIATING: | |
2308 | case D_UNKNOWN: | |
2309 | case D_MASK: | |
2310 | /* not "stable" */ | |
2311 | return 0; | |
2312 | } | |
2313 | ||
2314 | return 1; | |
2315 | } | |
2316 | ||
fb22c402 PR |
2317 | static inline int is_susp(union drbd_state s) |
2318 | { | |
2319 | return s.susp || s.susp_nod || s.susp_fen; | |
2320 | } | |
2321 | ||
1b881ef7 | 2322 | static inline bool may_inc_ap_bio(struct drbd_conf *mdev) |
b411b363 PR |
2323 | { |
2324 | int mxb = drbd_get_max_buffers(mdev); | |
2325 | ||
fb22c402 | 2326 | if (is_susp(mdev->state)) |
1b881ef7 | 2327 | return false; |
b411b363 | 2328 | if (test_bit(SUSPEND_IO, &mdev->flags)) |
1b881ef7 | 2329 | return false; |
b411b363 PR |
2330 | |
2331 | /* to avoid potential deadlock or bitmap corruption, | |
2332 | * in various places, we only allow new application io | |
2333 | * to start during "stable" states. */ | |
2334 | ||
2335 | /* no new io accepted when attaching or detaching the disk */ | |
3719094e | 2336 | if (!drbd_state_is_stable(mdev)) |
1b881ef7 | 2337 | return false; |
b411b363 PR |
2338 | |
2339 | /* since some older kernels don't have atomic_add_unless, | |
2340 | * and we are within the spinlock anyways, we have this workaround. */ | |
2341 | if (atomic_read(&mdev->ap_bio_cnt) > mxb) | |
1b881ef7 | 2342 | return false; |
b411b363 | 2343 | if (test_bit(BITMAP_IO, &mdev->flags)) |
1b881ef7 AG |
2344 | return false; |
2345 | return true; | |
b411b363 PR |
2346 | } |
2347 | ||
1b881ef7 | 2348 | static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count) |
b411b363 | 2349 | { |
1b881ef7 | 2350 | bool rv = false; |
8869d683 PR |
2351 | |
2352 | spin_lock_irq(&mdev->req_lock); | |
1b881ef7 | 2353 | rv = may_inc_ap_bio(mdev); |
8869d683 PR |
2354 | if (rv) |
2355 | atomic_add(count, &mdev->ap_bio_cnt); | |
2356 | spin_unlock_irq(&mdev->req_lock); | |
2357 | ||
2358 | return rv; | |
2359 | } | |
b411b363 | 2360 | |
8869d683 PR |
2361 | static inline void inc_ap_bio(struct drbd_conf *mdev, int count) |
2362 | { | |
b411b363 PR |
2363 | /* we wait here |
2364 | * as long as the device is suspended | |
2365 | * until the bitmap is no longer on the fly during connection | |
2366 | * handshake as long as we would exeed the max_buffer limit. | |
2367 | * | |
2368 | * to avoid races with the reconnect code, | |
2369 | * we need to atomic_inc within the spinlock. */ | |
2370 | ||
1b881ef7 | 2371 | wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count)); |
b411b363 PR |
2372 | } |
2373 | ||
2374 | static inline void dec_ap_bio(struct drbd_conf *mdev) | |
2375 | { | |
2376 | int mxb = drbd_get_max_buffers(mdev); | |
2377 | int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); | |
2378 | ||
2379 | D_ASSERT(ap_bio >= 0); | |
2380 | /* this currently does wake_up for every dec_ap_bio! | |
2381 | * maybe rather introduce some type of hysteresis? | |
2382 | * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ | |
2383 | if (ap_bio < mxb) | |
2384 | wake_up(&mdev->misc_wait); | |
2385 | if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { | |
2386 | if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) | |
2387 | drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); | |
2388 | } | |
2389 | } | |
2390 | ||
62b0da3a | 2391 | static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) |
b411b363 | 2392 | { |
62b0da3a | 2393 | int changed = mdev->ed_uuid != val; |
b411b363 | 2394 | mdev->ed_uuid = val; |
62b0da3a | 2395 | return changed; |
b411b363 PR |
2396 | } |
2397 | ||
2398 | static inline int seq_cmp(u32 a, u32 b) | |
2399 | { | |
2400 | /* we assume wrap around at 32bit. | |
2401 | * for wrap around at 24bit (old atomic_t), | |
2402 | * we'd have to | |
2403 | * a <<= 8; b <<= 8; | |
2404 | */ | |
2405 | return (s32)(a) - (s32)(b); | |
2406 | } | |
2407 | #define seq_lt(a, b) (seq_cmp((a), (b)) < 0) | |
2408 | #define seq_gt(a, b) (seq_cmp((a), (b)) > 0) | |
2409 | #define seq_ge(a, b) (seq_cmp((a), (b)) >= 0) | |
2410 | #define seq_le(a, b) (seq_cmp((a), (b)) <= 0) | |
2411 | /* CAUTION: please no side effects in arguments! */ | |
2412 | #define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b))) | |
2413 | ||
2414 | static inline void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq) | |
2415 | { | |
2416 | unsigned int m; | |
2417 | spin_lock(&mdev->peer_seq_lock); | |
2418 | m = seq_max(mdev->peer_seq, new_seq); | |
2419 | mdev->peer_seq = m; | |
2420 | spin_unlock(&mdev->peer_seq_lock); | |
2421 | if (m == new_seq) | |
2422 | wake_up(&mdev->seq_wait); | |
2423 | } | |
2424 | ||
2425 | static inline void drbd_update_congested(struct drbd_conf *mdev) | |
2426 | { | |
2427 | struct sock *sk = mdev->data.socket->sk; | |
2428 | if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) | |
2429 | set_bit(NET_CONGESTED, &mdev->flags); | |
2430 | } | |
2431 | ||
2432 | static inline int drbd_queue_order_type(struct drbd_conf *mdev) | |
2433 | { | |
2434 | /* sorry, we currently have no working implementation | |
2435 | * of distributed TCQ stuff */ | |
2436 | #ifndef QUEUE_ORDERED_NONE | |
2437 | #define QUEUE_ORDERED_NONE 0 | |
2438 | #endif | |
2439 | return QUEUE_ORDERED_NONE; | |
2440 | } | |
2441 | ||
b411b363 PR |
2442 | static inline void drbd_md_flush(struct drbd_conf *mdev) |
2443 | { | |
2444 | int r; | |
2445 | ||
a8a4e51e | 2446 | if (test_bit(MD_NO_FUA, &mdev->flags)) |
b411b363 PR |
2447 | return; |
2448 | ||
dd3932ed | 2449 | r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); |
b411b363 | 2450 | if (r) { |
a8a4e51e | 2451 | set_bit(MD_NO_FUA, &mdev->flags); |
b411b363 PR |
2452 | dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); |
2453 | } | |
2454 | } | |
2455 | ||
2456 | #endif |