]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | drbd_int.h | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
26 | #ifndef _DRBD_INT_H | |
27 | #define _DRBD_INT_H | |
28 | ||
29 | #include <linux/compiler.h> | |
30 | #include <linux/types.h> | |
31 | #include <linux/version.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/sched.h> | |
34 | #include <linux/bitops.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/crypto.h> | |
37 | #include <linux/ratelimit.h> | |
38 | #include <linux/tcp.h> | |
39 | #include <linux/mutex.h> | |
40 | #include <linux/major.h> | |
41 | #include <linux/blkdev.h> | |
42 | #include <linux/genhd.h> | |
43 | #include <net/tcp.h> | |
44 | #include <linux/lru_cache.h> | |
45 | ||
46 | #ifdef __CHECKER__ | |
47 | # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr"))) | |
48 | # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read"))) | |
49 | # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write"))) | |
50 | # define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call"))) | |
51 | #else | |
52 | # define __protected_by(x) | |
53 | # define __protected_read_by(x) | |
54 | # define __protected_write_by(x) | |
55 | # define __must_hold(x) | |
56 | #endif | |
57 | ||
58 | #define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0) | |
59 | ||
60 | /* module parameter, defined in drbd_main.c */ | |
61 | extern unsigned int minor_count; | |
62 | extern int disable_sendpage; | |
63 | extern int allow_oos; | |
64 | extern unsigned int cn_idx; | |
65 | ||
66 | #ifdef CONFIG_DRBD_FAULT_INJECTION | |
67 | extern int enable_faults; | |
68 | extern int fault_rate; | |
69 | extern int fault_devs; | |
70 | #endif | |
71 | ||
72 | extern char usermode_helper[]; | |
73 | ||
74 | ||
75 | #ifndef TRUE | |
76 | #define TRUE 1 | |
77 | #endif | |
78 | #ifndef FALSE | |
79 | #define FALSE 0 | |
80 | #endif | |
81 | ||
82 | /* I don't remember why XCPU ... | |
83 | * This is used to wake the asender, | |
84 | * and to interrupt sending the sending task | |
85 | * on disconnect. | |
86 | */ | |
87 | #define DRBD_SIG SIGXCPU | |
88 | ||
89 | /* This is used to stop/restart our threads. | |
90 | * Cannot use SIGTERM nor SIGKILL, since these | |
91 | * are sent out by init on runlevel changes | |
92 | * I choose SIGHUP for now. | |
93 | */ | |
94 | #define DRBD_SIGKILL SIGHUP | |
95 | ||
96 | /* All EEs on the free list should have ID_VACANT (== 0) | |
97 | * freshly allocated EEs get !ID_VACANT (== 1) | |
98 | * so if it says "cannot dereference null pointer at address 0x00000001", | |
99 | * it is most likely one of these :( */ | |
100 | ||
101 | #define ID_IN_SYNC (4711ULL) | |
102 | #define ID_OUT_OF_SYNC (4712ULL) | |
103 | ||
104 | #define ID_SYNCER (-1ULL) | |
105 | #define ID_VACANT 0 | |
106 | #define is_syncer_block_id(id) ((id) == ID_SYNCER) | |
107 | ||
108 | struct drbd_conf; | |
109 | ||
110 | ||
111 | /* to shorten dev_warn(DEV, "msg"); and relatives statements */ | |
112 | #define DEV (disk_to_dev(mdev->vdisk)) | |
113 | ||
114 | #define D_ASSERT(exp) if (!(exp)) \ | |
115 | dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) | |
116 | ||
117 | #define ERR_IF(exp) if (({ \ | |
118 | int _b = (exp) != 0; \ | |
119 | if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \ | |
120 | __func__, #exp, __FILE__, __LINE__); \ | |
121 | _b; \ | |
122 | })) | |
123 | ||
124 | /* Defines to control fault insertion */ | |
125 | enum { | |
126 | DRBD_FAULT_MD_WR = 0, /* meta data write */ | |
127 | DRBD_FAULT_MD_RD = 1, /* read */ | |
128 | DRBD_FAULT_RS_WR = 2, /* resync */ | |
129 | DRBD_FAULT_RS_RD = 3, | |
130 | DRBD_FAULT_DT_WR = 4, /* data */ | |
131 | DRBD_FAULT_DT_RD = 5, | |
132 | DRBD_FAULT_DT_RA = 6, /* data read ahead */ | |
133 | DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ | |
134 | DRBD_FAULT_AL_EE = 8, /* alloc ee */ | |
135 | DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */ | |
136 | ||
137 | DRBD_FAULT_MAX, | |
138 | }; | |
139 | ||
140 | #ifdef CONFIG_DRBD_FAULT_INJECTION | |
141 | extern unsigned int | |
142 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); | |
143 | static inline int | |
144 | drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { | |
145 | return fault_rate && | |
146 | (enable_faults & (1<<type)) && | |
147 | _drbd_insert_fault(mdev, type); | |
148 | } | |
149 | #define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t))) | |
150 | ||
151 | #else | |
152 | #define FAULT_ACTIVE(_m, _t) (0) | |
153 | #endif | |
154 | ||
155 | /* integer division, round _UP_ to the next integer */ | |
156 | #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) | |
157 | /* usual integer division */ | |
158 | #define div_floor(A, B) ((A)/(B)) | |
159 | ||
160 | /* drbd_meta-data.c (still in drbd_main.c) */ | |
161 | /* 4th incarnation of the disk layout. */ | |
162 | #define DRBD_MD_MAGIC (DRBD_MAGIC+4) | |
163 | ||
164 | extern struct drbd_conf **minor_table; | |
165 | extern struct ratelimit_state drbd_ratelimit_state; | |
166 | ||
167 | /* on the wire */ | |
168 | enum drbd_packets { | |
169 | /* receiver (data socket) */ | |
170 | P_DATA = 0x00, | |
171 | P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */ | |
172 | P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */ | |
173 | P_BARRIER = 0x03, | |
174 | P_BITMAP = 0x04, | |
175 | P_BECOME_SYNC_TARGET = 0x05, | |
176 | P_BECOME_SYNC_SOURCE = 0x06, | |
177 | P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */ | |
178 | P_DATA_REQUEST = 0x08, /* Used to ask for a data block */ | |
179 | P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */ | |
180 | P_SYNC_PARAM = 0x0a, | |
181 | P_PROTOCOL = 0x0b, | |
182 | P_UUIDS = 0x0c, | |
183 | P_SIZES = 0x0d, | |
184 | P_STATE = 0x0e, | |
185 | P_SYNC_UUID = 0x0f, | |
186 | P_AUTH_CHALLENGE = 0x10, | |
187 | P_AUTH_RESPONSE = 0x11, | |
188 | P_STATE_CHG_REQ = 0x12, | |
189 | ||
190 | /* asender (meta socket */ | |
191 | P_PING = 0x13, | |
192 | P_PING_ACK = 0x14, | |
193 | P_RECV_ACK = 0x15, /* Used in protocol B */ | |
194 | P_WRITE_ACK = 0x16, /* Used in protocol C */ | |
195 | P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */ | |
196 | P_DISCARD_ACK = 0x18, /* Used in proto C, two-primaries conflict detection */ | |
197 | P_NEG_ACK = 0x19, /* Sent if local disk is unusable */ | |
198 | P_NEG_DREPLY = 0x1a, /* Local disk is broken... */ | |
199 | P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */ | |
200 | P_BARRIER_ACK = 0x1c, | |
201 | P_STATE_CHG_REPLY = 0x1d, | |
202 | ||
203 | /* "new" commands, no longer fitting into the ordering scheme above */ | |
204 | ||
205 | P_OV_REQUEST = 0x1e, /* data socket */ | |
206 | P_OV_REPLY = 0x1f, | |
207 | P_OV_RESULT = 0x20, /* meta socket */ | |
208 | P_CSUM_RS_REQUEST = 0x21, /* data socket */ | |
209 | P_RS_IS_IN_SYNC = 0x22, /* meta socket */ | |
210 | P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */ | |
211 | P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */ | |
212 | /* P_CKPT_FENCE_REQ = 0x25, * currently reserved for protocol D */ | |
213 | /* P_CKPT_DISABLE_REQ = 0x26, * currently reserved for protocol D */ | |
214 | P_DELAY_PROBE = 0x27, /* is used on BOTH sockets */ | |
215 | ||
216 | P_MAX_CMD = 0x28, | |
217 | P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ | |
218 | P_MAX_OPT_CMD = 0x101, | |
219 | ||
220 | /* special command ids for handshake */ | |
221 | ||
222 | P_HAND_SHAKE_M = 0xfff1, /* First Packet on the MetaSock */ | |
223 | P_HAND_SHAKE_S = 0xfff2, /* First Packet on the Socket */ | |
224 | ||
225 | P_HAND_SHAKE = 0xfffe /* FIXED for the next century! */ | |
226 | }; | |
227 | ||
228 | static inline const char *cmdname(enum drbd_packets cmd) | |
229 | { | |
230 | /* THINK may need to become several global tables | |
231 | * when we want to support more than | |
232 | * one PRO_VERSION */ | |
233 | static const char *cmdnames[] = { | |
234 | [P_DATA] = "Data", | |
235 | [P_DATA_REPLY] = "DataReply", | |
236 | [P_RS_DATA_REPLY] = "RSDataReply", | |
237 | [P_BARRIER] = "Barrier", | |
238 | [P_BITMAP] = "ReportBitMap", | |
239 | [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget", | |
240 | [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource", | |
241 | [P_UNPLUG_REMOTE] = "UnplugRemote", | |
242 | [P_DATA_REQUEST] = "DataRequest", | |
243 | [P_RS_DATA_REQUEST] = "RSDataRequest", | |
244 | [P_SYNC_PARAM] = "SyncParam", | |
245 | [P_SYNC_PARAM89] = "SyncParam89", | |
246 | [P_PROTOCOL] = "ReportProtocol", | |
247 | [P_UUIDS] = "ReportUUIDs", | |
248 | [P_SIZES] = "ReportSizes", | |
249 | [P_STATE] = "ReportState", | |
250 | [P_SYNC_UUID] = "ReportSyncUUID", | |
251 | [P_AUTH_CHALLENGE] = "AuthChallenge", | |
252 | [P_AUTH_RESPONSE] = "AuthResponse", | |
253 | [P_PING] = "Ping", | |
254 | [P_PING_ACK] = "PingAck", | |
255 | [P_RECV_ACK] = "RecvAck", | |
256 | [P_WRITE_ACK] = "WriteAck", | |
257 | [P_RS_WRITE_ACK] = "RSWriteAck", | |
258 | [P_DISCARD_ACK] = "DiscardAck", | |
259 | [P_NEG_ACK] = "NegAck", | |
260 | [P_NEG_DREPLY] = "NegDReply", | |
261 | [P_NEG_RS_DREPLY] = "NegRSDReply", | |
262 | [P_BARRIER_ACK] = "BarrierAck", | |
263 | [P_STATE_CHG_REQ] = "StateChgRequest", | |
264 | [P_STATE_CHG_REPLY] = "StateChgReply", | |
265 | [P_OV_REQUEST] = "OVRequest", | |
266 | [P_OV_REPLY] = "OVReply", | |
267 | [P_OV_RESULT] = "OVResult", | |
268 | [P_CSUM_RS_REQUEST] = "CsumRSRequest", | |
269 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", | |
270 | [P_COMPRESSED_BITMAP] = "CBitmap", | |
271 | [P_DELAY_PROBE] = "DelayProbe", | |
272 | [P_MAX_CMD] = NULL, | |
273 | }; | |
274 | ||
275 | if (cmd == P_HAND_SHAKE_M) | |
276 | return "HandShakeM"; | |
277 | if (cmd == P_HAND_SHAKE_S) | |
278 | return "HandShakeS"; | |
279 | if (cmd == P_HAND_SHAKE) | |
280 | return "HandShake"; | |
281 | if (cmd >= P_MAX_CMD) | |
282 | return "Unknown"; | |
283 | return cmdnames[cmd]; | |
284 | } | |
285 | ||
286 | /* for sending/receiving the bitmap, | |
287 | * possibly in some encoding scheme */ | |
288 | struct bm_xfer_ctx { | |
289 | /* "const" | |
290 | * stores total bits and long words | |
291 | * of the bitmap, so we don't need to | |
292 | * call the accessor functions over and again. */ | |
293 | unsigned long bm_bits; | |
294 | unsigned long bm_words; | |
295 | /* during xfer, current position within the bitmap */ | |
296 | unsigned long bit_offset; | |
297 | unsigned long word_offset; | |
298 | ||
299 | /* statistics; index: (h->command == P_BITMAP) */ | |
300 | unsigned packets[2]; | |
301 | unsigned bytes[2]; | |
302 | }; | |
303 | ||
304 | extern void INFO_bm_xfer_stats(struct drbd_conf *mdev, | |
305 | const char *direction, struct bm_xfer_ctx *c); | |
306 | ||
307 | static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) | |
308 | { | |
309 | /* word_offset counts "native long words" (32 or 64 bit), | |
310 | * aligned at 64 bit. | |
311 | * Encoded packet may end at an unaligned bit offset. | |
312 | * In case a fallback clear text packet is transmitted in | |
313 | * between, we adjust this offset back to the last 64bit | |
314 | * aligned "native long word", which makes coding and decoding | |
315 | * the plain text bitmap much more convenient. */ | |
316 | #if BITS_PER_LONG == 64 | |
317 | c->word_offset = c->bit_offset >> 6; | |
318 | #elif BITS_PER_LONG == 32 | |
319 | c->word_offset = c->bit_offset >> 5; | |
320 | c->word_offset &= ~(1UL); | |
321 | #else | |
322 | # error "unsupported BITS_PER_LONG" | |
323 | #endif | |
324 | } | |
325 | ||
326 | #ifndef __packed | |
327 | #define __packed __attribute__((packed)) | |
328 | #endif | |
329 | ||
330 | /* This is the layout for a packet on the wire. | |
331 | * The byteorder is the network byte order. | |
332 | * (except block_id and barrier fields. | |
333 | * these are pointers to local structs | |
334 | * and have no relevance for the partner, | |
335 | * which just echoes them as received.) | |
336 | * | |
337 | * NOTE that the payload starts at a long aligned offset, | |
338 | * regardless of 32 or 64 bit arch! | |
339 | */ | |
340 | struct p_header80 { | |
341 | u32 magic; | |
342 | u16 command; | |
343 | u16 length; /* bytes of data after this header */ | |
344 | u8 payload[0]; | |
345 | } __packed; | |
346 | ||
347 | /* Header for big packets, Used for data packets exceeding 64kB */ | |
348 | struct p_header95 { | |
349 | u16 magic; /* use DRBD_MAGIC_BIG here */ | |
350 | u16 command; | |
351 | u32 length; /* Use only 24 bits of that. Ignore the highest 8 bit. */ | |
352 | u8 payload[0]; | |
353 | } __packed; | |
354 | ||
355 | union p_header { | |
356 | struct p_header80 h80; | |
357 | struct p_header95 h95; | |
358 | }; | |
359 | ||
360 | /* | |
361 | * short commands, packets without payload, plain p_header: | |
362 | * P_PING | |
363 | * P_PING_ACK | |
364 | * P_BECOME_SYNC_TARGET | |
365 | * P_BECOME_SYNC_SOURCE | |
366 | * P_UNPLUG_REMOTE | |
367 | */ | |
368 | ||
369 | /* | |
370 | * commands with out-of-struct payload: | |
371 | * P_BITMAP (no additional fields) | |
372 | * P_DATA, P_DATA_REPLY (see p_data) | |
373 | * P_COMPRESSED_BITMAP (see receive_compressed_bitmap) | |
374 | */ | |
375 | ||
376 | /* these defines must not be changed without changing the protocol version */ | |
377 | #define DP_HARDBARRIER 1 /* depricated */ | |
378 | #define DP_RW_SYNC 2 /* equals REQ_SYNC */ | |
379 | #define DP_MAY_SET_IN_SYNC 4 | |
380 | #define DP_UNPLUG 8 /* not used anymore */ | |
381 | #define DP_FUA 16 /* equals REQ_FUA */ | |
382 | #define DP_FLUSH 32 /* equals REQ_FLUSH */ | |
383 | #define DP_DISCARD 64 /* equals REQ_DISCARD */ | |
384 | ||
385 | struct p_data { | |
386 | union p_header head; | |
387 | u64 sector; /* 64 bits sector number */ | |
388 | u64 block_id; /* to identify the request in protocol B&C */ | |
389 | u32 seq_num; | |
390 | u32 dp_flags; | |
391 | } __packed; | |
392 | ||
393 | /* | |
394 | * commands which share a struct: | |
395 | * p_block_ack: | |
396 | * P_RECV_ACK (proto B), P_WRITE_ACK (proto C), | |
397 | * P_DISCARD_ACK (proto C, two-primaries conflict detection) | |
398 | * p_block_req: | |
399 | * P_DATA_REQUEST, P_RS_DATA_REQUEST | |
400 | */ | |
401 | struct p_block_ack { | |
402 | struct p_header80 head; | |
403 | u64 sector; | |
404 | u64 block_id; | |
405 | u32 blksize; | |
406 | u32 seq_num; | |
407 | } __packed; | |
408 | ||
409 | ||
410 | struct p_block_req { | |
411 | struct p_header80 head; | |
412 | u64 sector; | |
413 | u64 block_id; | |
414 | u32 blksize; | |
415 | u32 pad; /* to multiple of 8 Byte */ | |
416 | } __packed; | |
417 | ||
418 | /* | |
419 | * commands with their own struct for additional fields: | |
420 | * P_HAND_SHAKE | |
421 | * P_BARRIER | |
422 | * P_BARRIER_ACK | |
423 | * P_SYNC_PARAM | |
424 | * ReportParams | |
425 | */ | |
426 | ||
427 | struct p_handshake { | |
428 | struct p_header80 head; /* 8 bytes */ | |
429 | u32 protocol_min; | |
430 | u32 feature_flags; | |
431 | u32 protocol_max; | |
432 | ||
433 | /* should be more than enough for future enhancements | |
434 | * for now, feature_flags and the reserverd array shall be zero. | |
435 | */ | |
436 | ||
437 | u32 _pad; | |
438 | u64 reserverd[7]; | |
439 | } __packed; | |
440 | /* 80 bytes, FIXED for the next century */ | |
441 | ||
442 | struct p_barrier { | |
443 | struct p_header80 head; | |
444 | u32 barrier; /* barrier number _handle_ only */ | |
445 | u32 pad; /* to multiple of 8 Byte */ | |
446 | } __packed; | |
447 | ||
448 | struct p_barrier_ack { | |
449 | struct p_header80 head; | |
450 | u32 barrier; | |
451 | u32 set_size; | |
452 | } __packed; | |
453 | ||
454 | struct p_rs_param { | |
455 | struct p_header80 head; | |
456 | u32 rate; | |
457 | ||
458 | /* Since protocol version 88 and higher. */ | |
459 | char verify_alg[0]; | |
460 | } __packed; | |
461 | ||
462 | struct p_rs_param_89 { | |
463 | struct p_header80 head; | |
464 | u32 rate; | |
465 | /* protocol version 89: */ | |
466 | char verify_alg[SHARED_SECRET_MAX]; | |
467 | char csums_alg[SHARED_SECRET_MAX]; | |
468 | } __packed; | |
469 | ||
470 | struct p_rs_param_95 { | |
471 | struct p_header80 head; | |
472 | u32 rate; | |
473 | char verify_alg[SHARED_SECRET_MAX]; | |
474 | char csums_alg[SHARED_SECRET_MAX]; | |
475 | u32 c_plan_ahead; | |
476 | u32 c_delay_target; | |
477 | u32 c_fill_target; | |
478 | u32 c_max_rate; | |
479 | } __packed; | |
480 | ||
481 | enum drbd_conn_flags { | |
482 | CF_WANT_LOSE = 1, | |
483 | CF_DRY_RUN = 2, | |
484 | }; | |
485 | ||
486 | struct p_protocol { | |
487 | struct p_header80 head; | |
488 | u32 protocol; | |
489 | u32 after_sb_0p; | |
490 | u32 after_sb_1p; | |
491 | u32 after_sb_2p; | |
492 | u32 conn_flags; | |
493 | u32 two_primaries; | |
494 | ||
495 | /* Since protocol version 87 and higher. */ | |
496 | char integrity_alg[0]; | |
497 | ||
498 | } __packed; | |
499 | ||
500 | struct p_uuids { | |
501 | struct p_header80 head; | |
502 | u64 uuid[UI_EXTENDED_SIZE]; | |
503 | } __packed; | |
504 | ||
505 | struct p_rs_uuid { | |
506 | struct p_header80 head; | |
507 | u64 uuid; | |
508 | } __packed; | |
509 | ||
510 | struct p_sizes { | |
511 | struct p_header80 head; | |
512 | u64 d_size; /* size of disk */ | |
513 | u64 u_size; /* user requested size */ | |
514 | u64 c_size; /* current exported size */ | |
515 | u32 max_bio_size; /* Maximal size of a BIO */ | |
516 | u16 queue_order_type; /* not yet implemented in DRBD*/ | |
517 | u16 dds_flags; /* use enum dds_flags here. */ | |
518 | } __packed; | |
519 | ||
520 | struct p_state { | |
521 | struct p_header80 head; | |
522 | u32 state; | |
523 | } __packed; | |
524 | ||
525 | struct p_req_state { | |
526 | struct p_header80 head; | |
527 | u32 mask; | |
528 | u32 val; | |
529 | } __packed; | |
530 | ||
531 | struct p_req_state_reply { | |
532 | struct p_header80 head; | |
533 | u32 retcode; | |
534 | } __packed; | |
535 | ||
536 | struct p_drbd06_param { | |
537 | u64 size; | |
538 | u32 state; | |
539 | u32 blksize; | |
540 | u32 protocol; | |
541 | u32 version; | |
542 | u32 gen_cnt[5]; | |
543 | u32 bit_map_gen[5]; | |
544 | } __packed; | |
545 | ||
546 | struct p_discard { | |
547 | struct p_header80 head; | |
548 | u64 block_id; | |
549 | u32 seq_num; | |
550 | u32 pad; | |
551 | } __packed; | |
552 | ||
553 | /* Valid values for the encoding field. | |
554 | * Bump proto version when changing this. */ | |
555 | enum drbd_bitmap_code { | |
556 | /* RLE_VLI_Bytes = 0, | |
557 | * and other bit variants had been defined during | |
558 | * algorithm evaluation. */ | |
559 | RLE_VLI_Bits = 2, | |
560 | }; | |
561 | ||
562 | struct p_compressed_bm { | |
563 | struct p_header80 head; | |
564 | /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code | |
565 | * (encoding & 0x80): polarity (set/unset) of first runlength | |
566 | * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits | |
567 | * used to pad up to head.length bytes | |
568 | */ | |
569 | u8 encoding; | |
570 | ||
571 | u8 code[0]; | |
572 | } __packed; | |
573 | ||
574 | struct p_delay_probe93 { | |
575 | struct p_header80 head; | |
576 | u32 seq_num; /* sequence number to match the two probe packets */ | |
577 | u32 offset; /* usecs the probe got sent after the reference time point */ | |
578 | } __packed; | |
579 | ||
580 | /* DCBP: Drbd Compressed Bitmap Packet ... */ | |
581 | static inline enum drbd_bitmap_code | |
582 | DCBP_get_code(struct p_compressed_bm *p) | |
583 | { | |
584 | return (enum drbd_bitmap_code)(p->encoding & 0x0f); | |
585 | } | |
586 | ||
587 | static inline void | |
588 | DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code) | |
589 | { | |
590 | BUG_ON(code & ~0xf); | |
591 | p->encoding = (p->encoding & ~0xf) | code; | |
592 | } | |
593 | ||
594 | static inline int | |
595 | DCBP_get_start(struct p_compressed_bm *p) | |
596 | { | |
597 | return (p->encoding & 0x80) != 0; | |
598 | } | |
599 | ||
600 | static inline void | |
601 | DCBP_set_start(struct p_compressed_bm *p, int set) | |
602 | { | |
603 | p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0); | |
604 | } | |
605 | ||
606 | static inline int | |
607 | DCBP_get_pad_bits(struct p_compressed_bm *p) | |
608 | { | |
609 | return (p->encoding >> 4) & 0x7; | |
610 | } | |
611 | ||
612 | static inline void | |
613 | DCBP_set_pad_bits(struct p_compressed_bm *p, int n) | |
614 | { | |
615 | BUG_ON(n & ~0x7); | |
616 | p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); | |
617 | } | |
618 | ||
619 | /* one bitmap packet, including the p_header, | |
620 | * should fit within one _architecture independend_ page. | |
621 | * so we need to use the fixed size 4KiB page size | |
622 | * most architechtures have used for a long time. | |
623 | */ | |
624 | #define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80)) | |
625 | #define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long)) | |
626 | #define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm)) | |
627 | #if (PAGE_SIZE < 4096) | |
628 | /* drbd_send_bitmap / receive_bitmap would break horribly */ | |
629 | #error "PAGE_SIZE too small" | |
630 | #endif | |
631 | ||
632 | union p_polymorph { | |
633 | union p_header header; | |
634 | struct p_handshake handshake; | |
635 | struct p_data data; | |
636 | struct p_block_ack block_ack; | |
637 | struct p_barrier barrier; | |
638 | struct p_barrier_ack barrier_ack; | |
639 | struct p_rs_param_89 rs_param_89; | |
640 | struct p_rs_param_95 rs_param_95; | |
641 | struct p_protocol protocol; | |
642 | struct p_sizes sizes; | |
643 | struct p_uuids uuids; | |
644 | struct p_state state; | |
645 | struct p_req_state req_state; | |
646 | struct p_req_state_reply req_state_reply; | |
647 | struct p_block_req block_req; | |
648 | struct p_delay_probe93 delay_probe93; | |
649 | struct p_rs_uuid rs_uuid; | |
650 | } __packed; | |
651 | ||
652 | /**********************************************************************/ | |
653 | enum drbd_thread_state { | |
654 | None, | |
655 | Running, | |
656 | Exiting, | |
657 | Restarting | |
658 | }; | |
659 | ||
660 | struct drbd_thread { | |
661 | spinlock_t t_lock; | |
662 | struct task_struct *task; | |
663 | struct completion stop; | |
664 | enum drbd_thread_state t_state; | |
665 | int (*function) (struct drbd_thread *); | |
666 | struct drbd_conf *mdev; | |
667 | int reset_cpu_mask; | |
668 | }; | |
669 | ||
670 | static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) | |
671 | { | |
672 | /* THINK testing the t_state seems to be uncritical in all cases | |
673 | * (but thread_{start,stop}), so we can read it *without* the lock. | |
674 | * --lge */ | |
675 | ||
676 | smp_rmb(); | |
677 | return thi->t_state; | |
678 | } | |
679 | ||
680 | ||
681 | /* | |
682 | * Having this as the first member of a struct provides sort of "inheritance". | |
683 | * "derived" structs can be "drbd_queue_work()"ed. | |
684 | * The callback should know and cast back to the descendant struct. | |
685 | * drbd_request and drbd_epoch_entry are descendants of drbd_work. | |
686 | */ | |
687 | struct drbd_work; | |
688 | typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); | |
689 | struct drbd_work { | |
690 | struct list_head list; | |
691 | drbd_work_cb cb; | |
692 | }; | |
693 | ||
694 | struct drbd_tl_epoch; | |
695 | struct drbd_request { | |
696 | struct drbd_work w; | |
697 | struct drbd_conf *mdev; | |
698 | ||
699 | /* if local IO is not allowed, will be NULL. | |
700 | * if local IO _is_ allowed, holds the locally submitted bio clone, | |
701 | * or, after local IO completion, the ERR_PTR(error). | |
702 | * see drbd_endio_pri(). */ | |
703 | struct bio *private_bio; | |
704 | ||
705 | struct hlist_node colision; | |
706 | sector_t sector; | |
707 | unsigned int size; | |
708 | unsigned int epoch; /* barrier_nr */ | |
709 | ||
710 | /* barrier_nr: used to check on "completion" whether this req was in | |
711 | * the current epoch, and we therefore have to close it, | |
712 | * starting a new epoch... | |
713 | */ | |
714 | ||
715 | /* up to here, the struct layout is identical to drbd_epoch_entry; | |
716 | * we might be able to use that to our advantage... */ | |
717 | ||
718 | struct list_head tl_requests; /* ring list in the transfer log */ | |
719 | struct bio *master_bio; /* master bio pointer */ | |
720 | unsigned long rq_state; /* see comments above _req_mod() */ | |
721 | int seq_num; | |
722 | unsigned long start_time; | |
723 | }; | |
724 | ||
725 | struct drbd_tl_epoch { | |
726 | struct drbd_work w; | |
727 | struct list_head requests; /* requests before */ | |
728 | struct drbd_tl_epoch *next; /* pointer to the next barrier */ | |
729 | unsigned int br_number; /* the barriers identifier. */ | |
730 | int n_writes; /* number of requests attached before this barrier */ | |
731 | }; | |
732 | ||
733 | struct drbd_request; | |
734 | ||
735 | /* These Tl_epoch_entries may be in one of 6 lists: | |
736 | active_ee .. data packet being written | |
737 | sync_ee .. syncer block being written | |
738 | done_ee .. block written, need to send P_WRITE_ACK | |
739 | read_ee .. [RS]P_DATA_REQUEST being read | |
740 | */ | |
741 | ||
742 | struct drbd_epoch { | |
743 | struct list_head list; | |
744 | unsigned int barrier_nr; | |
745 | atomic_t epoch_size; /* increased on every request added. */ | |
746 | atomic_t active; /* increased on every req. added, and dec on every finished. */ | |
747 | unsigned long flags; | |
748 | }; | |
749 | ||
750 | /* drbd_epoch flag bits */ | |
751 | enum { | |
752 | DE_HAVE_BARRIER_NUMBER, | |
753 | }; | |
754 | ||
755 | enum epoch_event { | |
756 | EV_PUT, | |
757 | EV_GOT_BARRIER_NR, | |
758 | EV_BECAME_LAST, | |
759 | EV_CLEANUP = 32, /* used as flag */ | |
760 | }; | |
761 | ||
762 | struct drbd_wq_barrier { | |
763 | struct drbd_work w; | |
764 | struct completion done; | |
765 | }; | |
766 | ||
767 | struct digest_info { | |
768 | int digest_size; | |
769 | void *digest; | |
770 | }; | |
771 | ||
772 | struct drbd_epoch_entry { | |
773 | struct drbd_work w; | |
774 | struct hlist_node colision; | |
775 | struct drbd_epoch *epoch; /* for writes */ | |
776 | struct drbd_conf *mdev; | |
777 | struct page *pages; | |
778 | atomic_t pending_bios; | |
779 | unsigned int size; | |
780 | /* see comments on ee flag bits below */ | |
781 | unsigned long flags; | |
782 | sector_t sector; | |
783 | union { | |
784 | u64 block_id; | |
785 | struct digest_info *digest; | |
786 | }; | |
787 | }; | |
788 | ||
789 | /* ee flag bits. | |
790 | * While corresponding bios are in flight, the only modification will be | |
791 | * set_bit WAS_ERROR, which has to be atomic. | |
792 | * If no bios are in flight yet, or all have been completed, | |
793 | * non-atomic modification to ee->flags is ok. | |
794 | */ | |
795 | enum { | |
796 | __EE_CALL_AL_COMPLETE_IO, | |
797 | __EE_MAY_SET_IN_SYNC, | |
798 | ||
799 | /* In case a barrier failed, | |
800 | * we need to resubmit without the barrier flag. */ | |
801 | __EE_RESUBMITTED, | |
802 | ||
803 | /* we may have several bios per epoch entry. | |
804 | * if any of those fail, we set this flag atomically | |
805 | * from the endio callback */ | |
806 | __EE_WAS_ERROR, | |
807 | ||
808 | /* This ee has a pointer to a digest instead of a block id */ | |
809 | __EE_HAS_DIGEST, | |
810 | }; | |
811 | #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) | |
812 | #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) | |
813 | #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) | |
814 | #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) | |
815 | #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) | |
816 | ||
817 | /* global flag bits */ | |
818 | enum { | |
819 | CREATE_BARRIER, /* next P_DATA is preceeded by a P_BARRIER */ | |
820 | SIGNAL_ASENDER, /* whether asender wants to be interrupted */ | |
821 | SEND_PING, /* whether asender should send a ping asap */ | |
822 | ||
823 | UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ | |
824 | UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ | |
825 | MD_DIRTY, /* current uuids and flags not yet on disk */ | |
826 | DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ | |
827 | USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */ | |
828 | CLUSTER_ST_CHANGE, /* Cluster wide state change going on... */ | |
829 | CL_ST_CHG_SUCCESS, | |
830 | CL_ST_CHG_FAIL, | |
831 | CRASHED_PRIMARY, /* This node was a crashed primary. | |
832 | * Gets cleared when the state.conn | |
833 | * goes into C_CONNECTED state. */ | |
834 | WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ | |
835 | CONSIDER_RESYNC, | |
836 | ||
837 | MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ | |
838 | SUSPEND_IO, /* suspend application io */ | |
839 | BITMAP_IO, /* suspend application io; | |
840 | once no more io in flight, start bitmap io */ | |
841 | BITMAP_IO_QUEUED, /* Started bitmap IO */ | |
842 | GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ | |
843 | WAS_IO_ERROR, /* Local disk failed returned IO error */ | |
844 | RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ | |
845 | NET_CONGESTED, /* The data socket is congested */ | |
846 | ||
847 | CONFIG_PENDING, /* serialization of (re)configuration requests. | |
848 | * if set, also prevents the device from dying */ | |
849 | DEVICE_DYING, /* device became unconfigured, | |
850 | * but worker thread is still handling the cleanup. | |
851 | * reconfiguring (nl_disk_conf, nl_net_conf) is dissalowed, | |
852 | * while this is set. */ | |
853 | RESIZE_PENDING, /* Size change detected locally, waiting for the response from | |
854 | * the peer, if it changed there as well. */ | |
855 | CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ | |
856 | GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ | |
857 | NEW_CUR_UUID, /* Create new current UUID when thawing IO */ | |
858 | AL_SUSPENDED, /* Activity logging is currently suspended. */ | |
859 | }; | |
860 | ||
861 | struct drbd_bitmap; /* opaque for drbd_conf */ | |
862 | ||
863 | /* TODO sort members for performance | |
864 | * MAYBE group them further */ | |
865 | ||
866 | /* THINK maybe we actually want to use the default "event/%s" worker threads | |
867 | * or similar in linux 2.6, which uses per cpu data and threads. | |
868 | */ | |
869 | struct drbd_work_queue { | |
870 | struct list_head q; | |
871 | struct semaphore s; /* producers up it, worker down()s it */ | |
872 | spinlock_t q_lock; /* to protect the list. */ | |
873 | }; | |
874 | ||
875 | struct drbd_socket { | |
876 | struct drbd_work_queue work; | |
877 | struct mutex mutex; | |
878 | struct socket *socket; | |
879 | /* this way we get our | |
880 | * send/receive buffers off the stack */ | |
881 | union p_polymorph sbuf; | |
882 | union p_polymorph rbuf; | |
883 | }; | |
884 | ||
885 | struct drbd_md { | |
886 | u64 md_offset; /* sector offset to 'super' block */ | |
887 | ||
888 | u64 la_size_sect; /* last agreed size, unit sectors */ | |
889 | u64 uuid[UI_SIZE]; | |
890 | u64 device_uuid; | |
891 | u32 flags; | |
892 | u32 md_size_sect; | |
893 | ||
894 | s32 al_offset; /* signed relative sector offset to al area */ | |
895 | s32 bm_offset; /* signed relative sector offset to bitmap */ | |
896 | ||
897 | /* u32 al_nr_extents; important for restoring the AL | |
898 | * is stored into sync_conf.al_extents, which in turn | |
899 | * gets applied to act_log->nr_elements | |
900 | */ | |
901 | }; | |
902 | ||
903 | /* for sync_conf and other types... */ | |
904 | #define NL_PACKET(name, number, fields) struct name { fields }; | |
905 | #define NL_INTEGER(pn,pr,member) int member; | |
906 | #define NL_INT64(pn,pr,member) __u64 member; | |
907 | #define NL_BIT(pn,pr,member) unsigned member:1; | |
908 | #define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len; | |
909 | #include "linux/drbd_nl.h" | |
910 | ||
911 | struct drbd_backing_dev { | |
912 | struct block_device *backing_bdev; | |
913 | struct block_device *md_bdev; | |
914 | struct drbd_md md; | |
915 | struct disk_conf dc; /* The user provided config... */ | |
916 | sector_t known_size; /* last known size of that backing device */ | |
917 | }; | |
918 | ||
919 | struct drbd_md_io { | |
920 | struct drbd_conf *mdev; | |
921 | struct completion event; | |
922 | int error; | |
923 | }; | |
924 | ||
925 | struct bm_io_work { | |
926 | struct drbd_work w; | |
927 | char *why; | |
928 | int (*io_fn)(struct drbd_conf *mdev); | |
929 | void (*done)(struct drbd_conf *mdev, int rv); | |
930 | }; | |
931 | ||
932 | enum write_ordering_e { | |
933 | WO_none, | |
934 | WO_drain_io, | |
935 | WO_bdev_flush, | |
936 | }; | |
937 | ||
938 | struct fifo_buffer { | |
939 | int *values; | |
940 | unsigned int head_index; | |
941 | unsigned int size; | |
942 | }; | |
943 | ||
944 | struct drbd_conf { | |
945 | /* things that are stored as / read from meta data on disk */ | |
946 | unsigned long flags; | |
947 | ||
948 | /* configured by drbdsetup */ | |
949 | struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */ | |
950 | struct syncer_conf sync_conf; | |
951 | struct drbd_backing_dev *ldev __protected_by(local); | |
952 | ||
953 | sector_t p_size; /* partner's disk size */ | |
954 | struct request_queue *rq_queue; | |
955 | struct block_device *this_bdev; | |
956 | struct gendisk *vdisk; | |
957 | ||
958 | struct drbd_socket data; /* data/barrier/cstate/parameter packets */ | |
959 | struct drbd_socket meta; /* ping/ack (metadata) packets */ | |
960 | int agreed_pro_version; /* actually used protocol version */ | |
961 | unsigned long last_received; /* in jiffies, either socket */ | |
962 | unsigned int ko_count; | |
963 | struct drbd_work resync_work, | |
964 | unplug_work, | |
965 | go_diskless, | |
966 | md_sync_work; | |
967 | struct timer_list resync_timer; | |
968 | struct timer_list md_sync_timer; | |
969 | #ifdef DRBD_DEBUG_MD_SYNC | |
970 | struct { | |
971 | unsigned int line; | |
972 | const char* func; | |
973 | } last_md_mark_dirty; | |
974 | #endif | |
975 | ||
976 | /* Used after attach while negotiating new disk state. */ | |
977 | union drbd_state new_state_tmp; | |
978 | ||
979 | union drbd_state state; | |
980 | wait_queue_head_t misc_wait; | |
981 | wait_queue_head_t state_wait; /* upon each state change. */ | |
982 | wait_queue_head_t net_cnt_wait; | |
983 | unsigned int send_cnt; | |
984 | unsigned int recv_cnt; | |
985 | unsigned int read_cnt; | |
986 | unsigned int writ_cnt; | |
987 | unsigned int al_writ_cnt; | |
988 | unsigned int bm_writ_cnt; | |
989 | atomic_t ap_bio_cnt; /* Requests we need to complete */ | |
990 | atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */ | |
991 | atomic_t rs_pending_cnt; /* RS request/data packets on the wire */ | |
992 | atomic_t unacked_cnt; /* Need to send replys for */ | |
993 | atomic_t local_cnt; /* Waiting for local completion */ | |
994 | atomic_t net_cnt; /* Users of net_conf */ | |
995 | spinlock_t req_lock; | |
996 | struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */ | |
997 | struct drbd_tl_epoch *newest_tle; | |
998 | struct drbd_tl_epoch *oldest_tle; | |
999 | struct list_head out_of_sequence_requests; | |
1000 | struct hlist_head *tl_hash; | |
1001 | unsigned int tl_hash_s; | |
1002 | ||
1003 | /* blocks to sync in this run [unit BM_BLOCK_SIZE] */ | |
1004 | unsigned long rs_total; | |
1005 | /* number of sync IOs that failed in this run */ | |
1006 | unsigned long rs_failed; | |
1007 | /* Syncer's start time [unit jiffies] */ | |
1008 | unsigned long rs_start; | |
1009 | /* cumulated time in PausedSyncX state [unit jiffies] */ | |
1010 | unsigned long rs_paused; | |
1011 | /* skipped because csum was equal [unit BM_BLOCK_SIZE] */ | |
1012 | unsigned long rs_same_csum; | |
1013 | #define DRBD_SYNC_MARKS 8 | |
1014 | #define DRBD_SYNC_MARK_STEP (3*HZ) | |
1015 | /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ | |
1016 | unsigned long rs_mark_left[DRBD_SYNC_MARKS]; | |
1017 | /* marks's time [unit jiffies] */ | |
1018 | unsigned long rs_mark_time[DRBD_SYNC_MARKS]; | |
1019 | /* current index into rs_mark_{left,time} */ | |
1020 | int rs_last_mark; | |
1021 | ||
1022 | /* where does the admin want us to start? (sector) */ | |
1023 | sector_t ov_start_sector; | |
1024 | /* where are we now? (sector) */ | |
1025 | sector_t ov_position; | |
1026 | /* Start sector of out of sync range (to merge printk reporting). */ | |
1027 | sector_t ov_last_oos_start; | |
1028 | /* size of out-of-sync range in sectors. */ | |
1029 | sector_t ov_last_oos_size; | |
1030 | unsigned long ov_left; /* in bits */ | |
1031 | struct crypto_hash *csums_tfm; | |
1032 | struct crypto_hash *verify_tfm; | |
1033 | ||
1034 | struct drbd_thread receiver; | |
1035 | struct drbd_thread worker; | |
1036 | struct drbd_thread asender; | |
1037 | struct drbd_bitmap *bitmap; | |
1038 | unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */ | |
1039 | ||
1040 | /* Used to track operations of resync... */ | |
1041 | struct lru_cache *resync; | |
1042 | /* Number of locked elements in resync LRU */ | |
1043 | unsigned int resync_locked; | |
1044 | /* resync extent number waiting for application requests */ | |
1045 | unsigned int resync_wenr; | |
1046 | ||
1047 | int open_cnt; | |
1048 | u64 *p_uuid; | |
1049 | struct drbd_epoch *current_epoch; | |
1050 | spinlock_t epoch_lock; | |
1051 | unsigned int epochs; | |
1052 | enum write_ordering_e write_ordering; | |
1053 | struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */ | |
1054 | struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ | |
1055 | struct list_head done_ee; /* send ack */ | |
1056 | struct list_head read_ee; /* IO in progress (any read) */ | |
1057 | struct list_head net_ee; /* zero-copy network send in progress */ | |
1058 | struct hlist_head *ee_hash; /* is proteced by req_lock! */ | |
1059 | unsigned int ee_hash_s; | |
1060 | ||
1061 | /* this one is protected by ee_lock, single thread */ | |
1062 | struct drbd_epoch_entry *last_write_w_barrier; | |
1063 | ||
1064 | int next_barrier_nr; | |
1065 | struct hlist_head *app_reads_hash; /* is proteced by req_lock */ | |
1066 | struct list_head resync_reads; | |
1067 | atomic_t pp_in_use; /* allocated from page pool */ | |
1068 | atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */ | |
1069 | wait_queue_head_t ee_wait; | |
1070 | struct page *md_io_page; /* one page buffer for md_io */ | |
1071 | struct page *md_io_tmpp; /* for logical_block_size != 512 */ | |
1072 | struct mutex md_io_mutex; /* protects the md_io_buffer */ | |
1073 | spinlock_t al_lock; | |
1074 | wait_queue_head_t al_wait; | |
1075 | struct lru_cache *act_log; /* activity log */ | |
1076 | unsigned int al_tr_number; | |
1077 | int al_tr_cycle; | |
1078 | int al_tr_pos; /* position of the next transaction in the journal */ | |
1079 | struct crypto_hash *cram_hmac_tfm; | |
1080 | struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */ | |
1081 | struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */ | |
1082 | void *int_dig_out; | |
1083 | void *int_dig_in; | |
1084 | void *int_dig_vv; | |
1085 | wait_queue_head_t seq_wait; | |
1086 | atomic_t packet_seq; | |
1087 | unsigned int peer_seq; | |
1088 | spinlock_t peer_seq_lock; | |
1089 | unsigned int minor; | |
1090 | unsigned long comm_bm_set; /* communicated number of set bits. */ | |
1091 | cpumask_var_t cpu_mask; | |
1092 | struct bm_io_work bm_io_work; | |
1093 | u64 ed_uuid; /* UUID of the exposed data */ | |
1094 | struct mutex state_mutex; | |
1095 | char congestion_reason; /* Why we where congested... */ | |
1096 | atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ | |
1097 | atomic_t rs_sect_ev; /* for submitted resync data rate, both */ | |
1098 | int rs_last_sect_ev; /* counter to compare with */ | |
1099 | int rs_last_events; /* counter of read or write "events" (unit sectors) | |
1100 | * on the lower level device when we last looked. */ | |
1101 | int c_sync_rate; /* current resync rate after syncer throttle magic */ | |
1102 | struct fifo_buffer rs_plan_s; /* correction values of resync planer */ | |
1103 | int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ | |
1104 | int rs_planed; /* resync sectors already planed */ | |
1105 | atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ | |
1106 | }; | |
1107 | ||
1108 | static inline struct drbd_conf *minor_to_mdev(unsigned int minor) | |
1109 | { | |
1110 | struct drbd_conf *mdev; | |
1111 | ||
1112 | mdev = minor < minor_count ? minor_table[minor] : NULL; | |
1113 | ||
1114 | return mdev; | |
1115 | } | |
1116 | ||
1117 | static inline unsigned int mdev_to_minor(struct drbd_conf *mdev) | |
1118 | { | |
1119 | return mdev->minor; | |
1120 | } | |
1121 | ||
1122 | /* returns 1 if it was successfull, | |
1123 | * returns 0 if there was no data socket. | |
1124 | * so wherever you are going to use the data.socket, e.g. do | |
1125 | * if (!drbd_get_data_sock(mdev)) | |
1126 | * return 0; | |
1127 | * CODE(); | |
1128 | * drbd_put_data_sock(mdev); | |
1129 | */ | |
1130 | static inline int drbd_get_data_sock(struct drbd_conf *mdev) | |
1131 | { | |
1132 | mutex_lock(&mdev->data.mutex); | |
1133 | /* drbd_disconnect() could have called drbd_free_sock() | |
1134 | * while we were waiting in down()... */ | |
1135 | if (unlikely(mdev->data.socket == NULL)) { | |
1136 | mutex_unlock(&mdev->data.mutex); | |
1137 | return 0; | |
1138 | } | |
1139 | return 1; | |
1140 | } | |
1141 | ||
1142 | static inline void drbd_put_data_sock(struct drbd_conf *mdev) | |
1143 | { | |
1144 | mutex_unlock(&mdev->data.mutex); | |
1145 | } | |
1146 | ||
1147 | /* | |
1148 | * function declarations | |
1149 | *************************/ | |
1150 | ||
1151 | /* drbd_main.c */ | |
1152 | ||
1153 | enum chg_state_flags { | |
1154 | CS_HARD = 1, | |
1155 | CS_VERBOSE = 2, | |
1156 | CS_WAIT_COMPLETE = 4, | |
1157 | CS_SERIALIZE = 8, | |
1158 | CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE, | |
1159 | }; | |
1160 | ||
1161 | enum dds_flags { | |
1162 | DDSF_FORCED = 1, | |
1163 | DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ | |
1164 | }; | |
1165 | ||
1166 | extern void drbd_init_set_defaults(struct drbd_conf *mdev); | |
1167 | extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, | |
1168 | union drbd_state mask, union drbd_state val); | |
1169 | extern void drbd_force_state(struct drbd_conf *, union drbd_state, | |
1170 | union drbd_state); | |
1171 | extern int _drbd_request_state(struct drbd_conf *, union drbd_state, | |
1172 | union drbd_state, enum chg_state_flags); | |
1173 | extern int __drbd_set_state(struct drbd_conf *, union drbd_state, | |
1174 | enum chg_state_flags, struct completion *done); | |
1175 | extern void print_st_err(struct drbd_conf *, union drbd_state, | |
1176 | union drbd_state, int); | |
1177 | extern int drbd_thread_start(struct drbd_thread *thi); | |
1178 | extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); | |
1179 | #ifdef CONFIG_SMP | |
1180 | extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev); | |
1181 | extern void drbd_calc_cpu_mask(struct drbd_conf *mdev); | |
1182 | #else | |
1183 | #define drbd_thread_current_set_cpu(A) ({}) | |
1184 | #define drbd_calc_cpu_mask(A) ({}) | |
1185 | #endif | |
1186 | extern void drbd_free_resources(struct drbd_conf *mdev); | |
1187 | extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, | |
1188 | unsigned int set_size); | |
1189 | extern void tl_clear(struct drbd_conf *mdev); | |
1190 | enum drbd_req_event; | |
1191 | extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what); | |
1192 | extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); | |
1193 | extern void drbd_free_sock(struct drbd_conf *mdev); | |
1194 | extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, | |
1195 | void *buf, size_t size, unsigned msg_flags); | |
1196 | extern int drbd_send_protocol(struct drbd_conf *mdev); | |
1197 | extern int drbd_send_uuids(struct drbd_conf *mdev); | |
1198 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); | |
1199 | extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val); | |
1200 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); | |
1201 | extern int _drbd_send_state(struct drbd_conf *mdev); | |
1202 | extern int drbd_send_state(struct drbd_conf *mdev); | |
1203 | extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | |
1204 | enum drbd_packets cmd, struct p_header80 *h, | |
1205 | size_t size, unsigned msg_flags); | |
1206 | #define USE_DATA_SOCKET 1 | |
1207 | #define USE_META_SOCKET 0 | |
1208 | extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, | |
1209 | enum drbd_packets cmd, struct p_header80 *h, | |
1210 | size_t size); | |
1211 | extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1212 | char *data, size_t size); | |
1213 | extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc); | |
1214 | extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, | |
1215 | u32 set_size); | |
1216 | extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1217 | struct drbd_epoch_entry *e); | |
1218 | extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1219 | struct p_block_req *rp); | |
1220 | extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1221 | struct p_data *dp, int data_size); | |
1222 | extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1223 | sector_t sector, int blksize, u64 block_id); | |
1224 | extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1225 | struct drbd_epoch_entry *e); | |
1226 | extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); | |
1227 | extern int _drbd_send_barrier(struct drbd_conf *mdev, | |
1228 | struct drbd_tl_epoch *barrier); | |
1229 | extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, | |
1230 | sector_t sector, int size, u64 block_id); | |
1231 | extern int drbd_send_drequest_csum(struct drbd_conf *mdev, | |
1232 | sector_t sector,int size, | |
1233 | void *digest, int digest_size, | |
1234 | enum drbd_packets cmd); | |
1235 | extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size); | |
1236 | ||
1237 | extern int drbd_send_bitmap(struct drbd_conf *mdev); | |
1238 | extern int _drbd_send_bitmap(struct drbd_conf *mdev); | |
1239 | extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode); | |
1240 | extern void drbd_free_bc(struct drbd_backing_dev *ldev); | |
1241 | extern void drbd_mdev_cleanup(struct drbd_conf *mdev); | |
1242 | ||
1243 | /* drbd_meta-data.c (still in drbd_main.c) */ | |
1244 | extern void drbd_md_sync(struct drbd_conf *mdev); | |
1245 | extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); | |
1246 | /* maybe define them below as inline? */ | |
1247 | extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | |
1248 | extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | |
1249 | extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); | |
1250 | extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); | |
1251 | extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local); | |
1252 | extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local); | |
1253 | extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local); | |
1254 | extern int drbd_md_test_flag(struct drbd_backing_dev *, int); | |
1255 | #ifndef DRBD_DEBUG_MD_SYNC | |
1256 | extern void drbd_md_mark_dirty(struct drbd_conf *mdev); | |
1257 | #else | |
1258 | #define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ ) | |
1259 | extern void drbd_md_mark_dirty_(struct drbd_conf *mdev, | |
1260 | unsigned int line, const char *func); | |
1261 | #endif | |
1262 | extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, | |
1263 | int (*io_fn)(struct drbd_conf *), | |
1264 | void (*done)(struct drbd_conf *, int), | |
1265 | char *why); | |
1266 | extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); | |
1267 | extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); | |
1268 | extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); | |
1269 | extern void drbd_go_diskless(struct drbd_conf *mdev); | |
1270 | extern void drbd_ldev_destroy(struct drbd_conf *mdev); | |
1271 | ||
1272 | ||
1273 | /* Meta data layout | |
1274 | We reserve a 128MB Block (4k aligned) | |
1275 | * either at the end of the backing device | |
1276 | * or on a separate meta data device. */ | |
1277 | ||
1278 | #define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */ | |
1279 | /* The following numbers are sectors */ | |
1280 | #define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */ | |
1281 | #define MD_AL_MAX_SIZE 64 /* = 32 kb LOG ~ 3776 extents ~ 14 GB Storage */ | |
1282 | /* Allows up to about 3.8TB */ | |
1283 | #define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE) | |
1284 | ||
1285 | /* Since the smalles IO unit is usually 512 byte */ | |
1286 | #define MD_SECTOR_SHIFT 9 | |
1287 | #define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT) | |
1288 | ||
1289 | /* activity log */ | |
1290 | #define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */ | |
1291 | #define AL_EXTENT_SHIFT 22 /* One extent represents 4M Storage */ | |
1292 | #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT) | |
1293 | ||
1294 | #if BITS_PER_LONG == 32 | |
1295 | #define LN2_BPL 5 | |
1296 | #define cpu_to_lel(A) cpu_to_le32(A) | |
1297 | #define lel_to_cpu(A) le32_to_cpu(A) | |
1298 | #elif BITS_PER_LONG == 64 | |
1299 | #define LN2_BPL 6 | |
1300 | #define cpu_to_lel(A) cpu_to_le64(A) | |
1301 | #define lel_to_cpu(A) le64_to_cpu(A) | |
1302 | #else | |
1303 | #error "LN2 of BITS_PER_LONG unknown!" | |
1304 | #endif | |
1305 | ||
1306 | /* resync bitmap */ | |
1307 | /* 16MB sized 'bitmap extent' to track syncer usage */ | |
1308 | struct bm_extent { | |
1309 | int rs_left; /* number of bits set (out of sync) in this extent. */ | |
1310 | int rs_failed; /* number of failed resync requests in this extent. */ | |
1311 | unsigned long flags; | |
1312 | struct lc_element lce; | |
1313 | }; | |
1314 | ||
1315 | #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ | |
1316 | #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ | |
1317 | ||
1318 | /* drbd_bitmap.c */ | |
1319 | /* | |
1320 | * We need to store one bit for a block. | |
1321 | * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap. | |
1322 | * Bit 0 ==> local node thinks this block is binary identical on both nodes | |
1323 | * Bit 1 ==> local node thinks this block needs to be synced. | |
1324 | */ | |
1325 | ||
1326 | #define SLEEP_TIME (HZ/10) | |
1327 | ||
1328 | #define BM_BLOCK_SHIFT 12 /* 4k per bit */ | |
1329 | #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) | |
1330 | /* (9+3) : 512 bytes @ 8 bits; representing 16M storage | |
1331 | * per sector of on disk bitmap */ | |
1332 | #define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3) /* = 24 */ | |
1333 | #define BM_EXT_SIZE (1<<BM_EXT_SHIFT) | |
1334 | ||
1335 | #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12) | |
1336 | #error "HAVE YOU FIXED drbdmeta AS WELL??" | |
1337 | #endif | |
1338 | ||
1339 | /* thus many _storage_ sectors are described by one bit */ | |
1340 | #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9)) | |
1341 | #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9)) | |
1342 | #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1) | |
1343 | ||
1344 | /* bit to represented kilo byte conversion */ | |
1345 | #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10)) | |
1346 | ||
1347 | /* in which _bitmap_ extent (resp. sector) the bit for a certain | |
1348 | * _storage_ sector is located in */ | |
1349 | #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9)) | |
1350 | ||
1351 | /* how much _storage_ sectors we have per bitmap sector */ | |
1352 | #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) | |
1353 | #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1) | |
1354 | ||
1355 | /* in one sector of the bitmap, we have this many activity_log extents. */ | |
1356 | #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT)) | |
1357 | #define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) | |
1358 | ||
1359 | #define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT) | |
1360 | #define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1) | |
1361 | ||
1362 | /* the extent in "PER_EXTENT" below is an activity log extent | |
1363 | * we need that many (long words/bytes) to store the bitmap | |
1364 | * of one AL_EXTENT_SIZE chunk of storage. | |
1365 | * we can store the bitmap for that many AL_EXTENTS within | |
1366 | * one sector of the _on_disk_ bitmap: | |
1367 | * bit 0 bit 37 bit 38 bit (512*8)-1 | |
1368 | * ...|........|........|.. // ..|........| | |
1369 | * sect. 0 `296 `304 ^(512*8*8)-1 | |
1370 | * | |
1371 | #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG ) | |
1372 | #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128 | |
1373 | #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4 | |
1374 | */ | |
1375 | ||
1376 | #define DRBD_MAX_SECTORS_32 (0xffffffffLU) | |
1377 | #define DRBD_MAX_SECTORS_BM \ | |
1378 | ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9))) | |
1379 | #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 | |
1380 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM | |
1381 | #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM | |
1382 | #elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32 | |
1383 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 | |
1384 | #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 | |
1385 | #else | |
1386 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM | |
1387 | /* 16 TB in units of sectors */ | |
1388 | #if BITS_PER_LONG == 32 | |
1389 | /* adjust by one page worth of bitmap, | |
1390 | * so we won't wrap around in drbd_bm_find_next_bit. | |
1391 | * you should use 64bit OS for that much storage, anyways. */ | |
1392 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) | |
1393 | #else | |
1394 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32) | |
1395 | #endif | |
1396 | #endif | |
1397 | ||
1398 | /* Sector shift value for the "hash" functions of tl_hash and ee_hash tables. | |
1399 | * With a value of 8 all IO in one 128K block make it to the same slot of the | |
1400 | * hash table. */ | |
1401 | #define HT_SHIFT 8 | |
1402 | #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) | |
1403 | ||
1404 | #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ | |
1405 | ||
1406 | /* Number of elements in the app_reads_hash */ | |
1407 | #define APP_R_HSIZE 15 | |
1408 | ||
1409 | extern int drbd_bm_init(struct drbd_conf *mdev); | |
1410 | extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits); | |
1411 | extern void drbd_bm_cleanup(struct drbd_conf *mdev); | |
1412 | extern void drbd_bm_set_all(struct drbd_conf *mdev); | |
1413 | extern void drbd_bm_clear_all(struct drbd_conf *mdev); | |
1414 | extern int drbd_bm_set_bits( | |
1415 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | |
1416 | extern int drbd_bm_clear_bits( | |
1417 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | |
1418 | /* bm_set_bits variant for use while holding drbd_bm_lock */ | |
1419 | extern void _drbd_bm_set_bits(struct drbd_conf *mdev, | |
1420 | const unsigned long s, const unsigned long e); | |
1421 | extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); | |
1422 | extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); | |
1423 | extern int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local); | |
1424 | extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); | |
1425 | extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); | |
1426 | extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, | |
1427 | unsigned long al_enr); | |
1428 | extern size_t drbd_bm_words(struct drbd_conf *mdev); | |
1429 | extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); | |
1430 | extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); | |
1431 | extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); | |
1432 | /* bm_find_next variants for use while you hold drbd_bm_lock() */ | |
1433 | extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); | |
1434 | extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo); | |
1435 | extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev); | |
1436 | extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev); | |
1437 | extern int drbd_bm_rs_done(struct drbd_conf *mdev); | |
1438 | /* for receive_bitmap */ | |
1439 | extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, | |
1440 | size_t number, unsigned long *buffer); | |
1441 | /* for _drbd_send_bitmap and drbd_bm_write_sect */ | |
1442 | extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, | |
1443 | size_t number, unsigned long *buffer); | |
1444 | ||
1445 | extern void drbd_bm_lock(struct drbd_conf *mdev, char *why); | |
1446 | extern void drbd_bm_unlock(struct drbd_conf *mdev); | |
1447 | ||
1448 | extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e); | |
1449 | /* drbd_main.c */ | |
1450 | ||
1451 | extern struct kmem_cache *drbd_request_cache; | |
1452 | extern struct kmem_cache *drbd_ee_cache; /* epoch entries */ | |
1453 | extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ | |
1454 | extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ | |
1455 | extern mempool_t *drbd_request_mempool; | |
1456 | extern mempool_t *drbd_ee_mempool; | |
1457 | ||
1458 | extern struct page *drbd_pp_pool; /* drbd's page pool */ | |
1459 | extern spinlock_t drbd_pp_lock; | |
1460 | extern int drbd_pp_vacant; | |
1461 | extern wait_queue_head_t drbd_pp_wait; | |
1462 | ||
1463 | extern rwlock_t global_state_lock; | |
1464 | ||
1465 | extern struct drbd_conf *drbd_new_device(unsigned int minor); | |
1466 | extern void drbd_free_mdev(struct drbd_conf *mdev); | |
1467 | ||
1468 | extern int proc_details; | |
1469 | ||
1470 | /* drbd_req */ | |
1471 | extern int drbd_make_request_26(struct request_queue *q, struct bio *bio); | |
1472 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); | |
1473 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); | |
1474 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); | |
1475 | ||
1476 | ||
1477 | /* drbd_nl.c */ | |
1478 | extern void drbd_suspend_io(struct drbd_conf *mdev); | |
1479 | extern void drbd_resume_io(struct drbd_conf *mdev); | |
1480 | extern char *ppsize(char *buf, unsigned long long size); | |
1481 | extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); | |
1482 | enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; | |
1483 | extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); | |
1484 | extern void resync_after_online_grow(struct drbd_conf *); | |
1485 | extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); | |
1486 | extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, | |
1487 | int force); | |
1488 | extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); | |
1489 | extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev); | |
1490 | extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); | |
1491 | ||
1492 | /* drbd_worker.c */ | |
1493 | extern int drbd_worker(struct drbd_thread *thi); | |
1494 | extern int drbd_alter_sa(struct drbd_conf *mdev, int na); | |
1495 | extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side); | |
1496 | extern void resume_next_sg(struct drbd_conf *mdev); | |
1497 | extern void suspend_other_sg(struct drbd_conf *mdev); | |
1498 | extern int drbd_resync_finished(struct drbd_conf *mdev); | |
1499 | /* maybe rather drbd_main.c ? */ | |
1500 | extern int drbd_md_sync_page_io(struct drbd_conf *mdev, | |
1501 | struct drbd_backing_dev *bdev, sector_t sector, int rw); | |
1502 | extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); | |
1503 | extern void drbd_rs_controller_reset(struct drbd_conf *mdev); | |
1504 | ||
1505 | static inline void ov_oos_print(struct drbd_conf *mdev) | |
1506 | { | |
1507 | if (mdev->ov_last_oos_size) { | |
1508 | dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n", | |
1509 | (unsigned long long)mdev->ov_last_oos_start, | |
1510 | (unsigned long)mdev->ov_last_oos_size); | |
1511 | } | |
1512 | mdev->ov_last_oos_size=0; | |
1513 | } | |
1514 | ||
1515 | ||
1516 | extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *); | |
1517 | extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *); | |
1518 | /* worker callbacks */ | |
1519 | extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int); | |
1520 | extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int); | |
1521 | extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int); | |
1522 | extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int); | |
1523 | extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int); | |
1524 | extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); | |
1525 | extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); | |
1526 | extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); | |
1527 | extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); | |
1528 | extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); | |
1529 | extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); | |
1530 | extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int); | |
1531 | extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); | |
1532 | extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); | |
1533 | extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); | |
1534 | extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); | |
1535 | extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); | |
1536 | extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); | |
1537 | ||
1538 | extern void resync_timer_fn(unsigned long data); | |
1539 | ||
1540 | /* drbd_receiver.c */ | |
1541 | extern int drbd_rs_should_slow_down(struct drbd_conf *mdev); | |
1542 | extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | |
1543 | const unsigned rw, const int fault_type); | |
1544 | extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); | |
1545 | extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | |
1546 | u64 id, | |
1547 | sector_t sector, | |
1548 | unsigned int data_size, | |
1549 | gfp_t gfp_mask) __must_hold(local); | |
1550 | extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, | |
1551 | int is_net); | |
1552 | #define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0) | |
1553 | #define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1) | |
1554 | extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev, | |
1555 | struct list_head *head); | |
1556 | extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, | |
1557 | struct list_head *head); | |
1558 | extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled); | |
1559 | extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed); | |
1560 | extern void drbd_flush_workqueue(struct drbd_conf *mdev); | |
1561 | extern void drbd_free_tl_hash(struct drbd_conf *mdev); | |
1562 | ||
1563 | /* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to | |
1564 | * mess with get_fs/set_fs, we know we are KERNEL_DS always. */ | |
1565 | static inline int drbd_setsockopt(struct socket *sock, int level, int optname, | |
1566 | char __user *optval, int optlen) | |
1567 | { | |
1568 | int err; | |
1569 | if (level == SOL_SOCKET) | |
1570 | err = sock_setsockopt(sock, level, optname, optval, optlen); | |
1571 | else | |
1572 | err = sock->ops->setsockopt(sock, level, optname, optval, | |
1573 | optlen); | |
1574 | return err; | |
1575 | } | |
1576 | ||
1577 | static inline void drbd_tcp_cork(struct socket *sock) | |
1578 | { | |
1579 | int __user val = 1; | |
1580 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, | |
1581 | (char __user *)&val, sizeof(val)); | |
1582 | } | |
1583 | ||
1584 | static inline void drbd_tcp_uncork(struct socket *sock) | |
1585 | { | |
1586 | int __user val = 0; | |
1587 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, | |
1588 | (char __user *)&val, sizeof(val)); | |
1589 | } | |
1590 | ||
1591 | static inline void drbd_tcp_nodelay(struct socket *sock) | |
1592 | { | |
1593 | int __user val = 1; | |
1594 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY, | |
1595 | (char __user *)&val, sizeof(val)); | |
1596 | } | |
1597 | ||
1598 | static inline void drbd_tcp_quickack(struct socket *sock) | |
1599 | { | |
1600 | int __user val = 2; | |
1601 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, | |
1602 | (char __user *)&val, sizeof(val)); | |
1603 | } | |
1604 | ||
1605 | void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo); | |
1606 | ||
1607 | /* drbd_proc.c */ | |
1608 | extern struct proc_dir_entry *drbd_proc; | |
1609 | extern const struct file_operations drbd_proc_fops; | |
1610 | extern const char *drbd_conn_str(enum drbd_conns s); | |
1611 | extern const char *drbd_role_str(enum drbd_role s); | |
1612 | ||
1613 | /* drbd_actlog.c */ | |
1614 | extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector); | |
1615 | extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector); | |
1616 | extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector); | |
1617 | extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector); | |
1618 | extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector); | |
1619 | extern void drbd_rs_cancel_all(struct drbd_conf *mdev); | |
1620 | extern int drbd_rs_del_all(struct drbd_conf *mdev); | |
1621 | extern void drbd_rs_failed_io(struct drbd_conf *mdev, | |
1622 | sector_t sector, int size); | |
1623 | extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); | |
1624 | extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go); | |
1625 | extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, | |
1626 | int size, const char *file, const unsigned int line); | |
1627 | #define drbd_set_in_sync(mdev, sector, size) \ | |
1628 | __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) | |
1629 | extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, | |
1630 | int size, const char *file, const unsigned int line); | |
1631 | #define drbd_set_out_of_sync(mdev, sector, size) \ | |
1632 | __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) | |
1633 | extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); | |
1634 | extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev); | |
1635 | extern void drbd_al_shrink(struct drbd_conf *mdev); | |
1636 | ||
1637 | ||
1638 | /* drbd_nl.c */ | |
1639 | ||
1640 | void drbd_nl_cleanup(void); | |
1641 | int __init drbd_nl_init(void); | |
1642 | void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state); | |
1643 | void drbd_bcast_sync_progress(struct drbd_conf *mdev); | |
1644 | void drbd_bcast_ee(struct drbd_conf *mdev, | |
1645 | const char *reason, const int dgs, | |
1646 | const char* seen_hash, const char* calc_hash, | |
1647 | const struct drbd_epoch_entry* e); | |
1648 | ||
1649 | ||
1650 | /** | |
1651 | * DOC: DRBD State macros | |
1652 | * | |
1653 | * These macros are used to express state changes in easily readable form. | |
1654 | * | |
1655 | * The NS macros expand to a mask and a value, that can be bit ored onto the | |
1656 | * current state as soon as the spinlock (req_lock) was taken. | |
1657 | * | |
1658 | * The _NS macros are used for state functions that get called with the | |
1659 | * spinlock. These macros expand directly to the new state value. | |
1660 | * | |
1661 | * Besides the basic forms NS() and _NS() additional _?NS[23] are defined | |
1662 | * to express state changes that affect more than one aspect of the state. | |
1663 | * | |
1664 | * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY) | |
1665 | * Means that the network connection was established and that the peer | |
1666 | * is in secondary role. | |
1667 | */ | |
1668 | #define role_MASK R_MASK | |
1669 | #define peer_MASK R_MASK | |
1670 | #define disk_MASK D_MASK | |
1671 | #define pdsk_MASK D_MASK | |
1672 | #define conn_MASK C_MASK | |
1673 | #define susp_MASK 1 | |
1674 | #define user_isp_MASK 1 | |
1675 | #define aftr_isp_MASK 1 | |
1676 | #define susp_nod_MASK 1 | |
1677 | #define susp_fen_MASK 1 | |
1678 | ||
1679 | #define NS(T, S) \ | |
1680 | ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \ | |
1681 | ({ union drbd_state val; val.i = 0; val.T = (S); val; }) | |
1682 | #define NS2(T1, S1, T2, S2) \ | |
1683 | ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ | |
1684 | mask.T2 = T2##_MASK; mask; }), \ | |
1685 | ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ | |
1686 | val.T2 = (S2); val; }) | |
1687 | #define NS3(T1, S1, T2, S2, T3, S3) \ | |
1688 | ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ | |
1689 | mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \ | |
1690 | ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ | |
1691 | val.T2 = (S2); val.T3 = (S3); val; }) | |
1692 | ||
1693 | #define _NS(D, T, S) \ | |
1694 | D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; }) | |
1695 | #define _NS2(D, T1, S1, T2, S2) \ | |
1696 | D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \ | |
1697 | __ns.T2 = (S2); __ns; }) | |
1698 | #define _NS3(D, T1, S1, T2, S2, T3, S3) \ | |
1699 | D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \ | |
1700 | __ns.T2 = (S2); __ns.T3 = (S3); __ns; }) | |
1701 | ||
1702 | /* | |
1703 | * inline helper functions | |
1704 | *************************/ | |
1705 | ||
1706 | /* see also page_chain_add and friends in drbd_receiver.c */ | |
1707 | static inline struct page *page_chain_next(struct page *page) | |
1708 | { | |
1709 | return (struct page *)page_private(page); | |
1710 | } | |
1711 | #define page_chain_for_each(page) \ | |
1712 | for (; page && ({ prefetch(page_chain_next(page)); 1; }); \ | |
1713 | page = page_chain_next(page)) | |
1714 | #define page_chain_for_each_safe(page, n) \ | |
1715 | for (; page && ({ n = page_chain_next(page); 1; }); page = n) | |
1716 | ||
1717 | static inline int drbd_bio_has_active_page(struct bio *bio) | |
1718 | { | |
1719 | struct bio_vec *bvec; | |
1720 | int i; | |
1721 | ||
1722 | __bio_for_each_segment(bvec, bio, i, 0) { | |
1723 | if (page_count(bvec->bv_page) > 1) | |
1724 | return 1; | |
1725 | } | |
1726 | ||
1727 | return 0; | |
1728 | } | |
1729 | ||
1730 | static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e) | |
1731 | { | |
1732 | struct page *page = e->pages; | |
1733 | page_chain_for_each(page) { | |
1734 | if (page_count(page) > 1) | |
1735 | return 1; | |
1736 | } | |
1737 | return 0; | |
1738 | } | |
1739 | ||
1740 | ||
1741 | static inline void drbd_state_lock(struct drbd_conf *mdev) | |
1742 | { | |
1743 | wait_event(mdev->misc_wait, | |
1744 | !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags)); | |
1745 | } | |
1746 | ||
1747 | static inline void drbd_state_unlock(struct drbd_conf *mdev) | |
1748 | { | |
1749 | clear_bit(CLUSTER_ST_CHANGE, &mdev->flags); | |
1750 | wake_up(&mdev->misc_wait); | |
1751 | } | |
1752 | ||
1753 | static inline int _drbd_set_state(struct drbd_conf *mdev, | |
1754 | union drbd_state ns, enum chg_state_flags flags, | |
1755 | struct completion *done) | |
1756 | { | |
1757 | int rv; | |
1758 | ||
1759 | read_lock(&global_state_lock); | |
1760 | rv = __drbd_set_state(mdev, ns, flags, done); | |
1761 | read_unlock(&global_state_lock); | |
1762 | ||
1763 | return rv; | |
1764 | } | |
1765 | ||
1766 | /** | |
1767 | * drbd_request_state() - Reqest a state change | |
1768 | * @mdev: DRBD device. | |
1769 | * @mask: mask of state bits to change. | |
1770 | * @val: value of new state bits. | |
1771 | * | |
1772 | * This is the most graceful way of requesting a state change. It is verbose | |
1773 | * quite verbose in case the state change is not possible, and all those | |
1774 | * state changes are globally serialized. | |
1775 | */ | |
1776 | static inline int drbd_request_state(struct drbd_conf *mdev, | |
1777 | union drbd_state mask, | |
1778 | union drbd_state val) | |
1779 | { | |
1780 | return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); | |
1781 | } | |
1782 | ||
1783 | #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) | |
1784 | static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) | |
1785 | { | |
1786 | switch (mdev->ldev->dc.on_io_error) { | |
1787 | case EP_PASS_ON: | |
1788 | if (!forcedetach) { | |
1789 | if (__ratelimit(&drbd_ratelimit_state)) | |
1790 | dev_err(DEV, "Local IO failed in %s.\n", where); | |
1791 | break; | |
1792 | } | |
1793 | /* NOTE fall through to detach case if forcedetach set */ | |
1794 | case EP_DETACH: | |
1795 | case EP_CALL_HELPER: | |
1796 | set_bit(WAS_IO_ERROR, &mdev->flags); | |
1797 | if (mdev->state.disk > D_FAILED) { | |
1798 | _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); | |
1799 | dev_err(DEV, | |
1800 | "Local IO failed in %s. Detaching...\n", where); | |
1801 | } | |
1802 | break; | |
1803 | } | |
1804 | } | |
1805 | ||
1806 | /** | |
1807 | * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers | |
1808 | * @mdev: DRBD device. | |
1809 | * @error: Error code passed to the IO completion callback | |
1810 | * @forcedetach: Force detach. I.e. the error happened while accessing the meta data | |
1811 | * | |
1812 | * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED) | |
1813 | */ | |
1814 | #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) | |
1815 | static inline void drbd_chk_io_error_(struct drbd_conf *mdev, | |
1816 | int error, int forcedetach, const char *where) | |
1817 | { | |
1818 | if (error) { | |
1819 | unsigned long flags; | |
1820 | spin_lock_irqsave(&mdev->req_lock, flags); | |
1821 | __drbd_chk_io_error_(mdev, forcedetach, where); | |
1822 | spin_unlock_irqrestore(&mdev->req_lock, flags); | |
1823 | } | |
1824 | } | |
1825 | ||
1826 | ||
1827 | /** | |
1828 | * drbd_md_first_sector() - Returns the first sector number of the meta data area | |
1829 | * @bdev: Meta data block device. | |
1830 | * | |
1831 | * BTW, for internal meta data, this happens to be the maximum capacity | |
1832 | * we could agree upon with our peer node. | |
1833 | */ | |
1834 | static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) | |
1835 | { | |
1836 | switch (bdev->dc.meta_dev_idx) { | |
1837 | case DRBD_MD_INDEX_INTERNAL: | |
1838 | case DRBD_MD_INDEX_FLEX_INT: | |
1839 | return bdev->md.md_offset + bdev->md.bm_offset; | |
1840 | case DRBD_MD_INDEX_FLEX_EXT: | |
1841 | default: | |
1842 | return bdev->md.md_offset; | |
1843 | } | |
1844 | } | |
1845 | ||
1846 | /** | |
1847 | * drbd_md_last_sector() - Return the last sector number of the meta data area | |
1848 | * @bdev: Meta data block device. | |
1849 | */ | |
1850 | static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) | |
1851 | { | |
1852 | switch (bdev->dc.meta_dev_idx) { | |
1853 | case DRBD_MD_INDEX_INTERNAL: | |
1854 | case DRBD_MD_INDEX_FLEX_INT: | |
1855 | return bdev->md.md_offset + MD_AL_OFFSET - 1; | |
1856 | case DRBD_MD_INDEX_FLEX_EXT: | |
1857 | default: | |
1858 | return bdev->md.md_offset + bdev->md.md_size_sect; | |
1859 | } | |
1860 | } | |
1861 | ||
1862 | /* Returns the number of 512 byte sectors of the device */ | |
1863 | static inline sector_t drbd_get_capacity(struct block_device *bdev) | |
1864 | { | |
1865 | /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ | |
1866 | return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0; | |
1867 | } | |
1868 | ||
1869 | /** | |
1870 | * drbd_get_max_capacity() - Returns the capacity we announce to out peer | |
1871 | * @bdev: Meta data block device. | |
1872 | * | |
1873 | * returns the capacity we announce to out peer. we clip ourselves at the | |
1874 | * various MAX_SECTORS, because if we don't, current implementation will | |
1875 | * oops sooner or later | |
1876 | */ | |
1877 | static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) | |
1878 | { | |
1879 | sector_t s; | |
1880 | switch (bdev->dc.meta_dev_idx) { | |
1881 | case DRBD_MD_INDEX_INTERNAL: | |
1882 | case DRBD_MD_INDEX_FLEX_INT: | |
1883 | s = drbd_get_capacity(bdev->backing_bdev) | |
1884 | ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX, | |
1885 | drbd_md_first_sector(bdev)) | |
1886 | : 0; | |
1887 | break; | |
1888 | case DRBD_MD_INDEX_FLEX_EXT: | |
1889 | s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX, | |
1890 | drbd_get_capacity(bdev->backing_bdev)); | |
1891 | /* clip at maximum size the meta device can support */ | |
1892 | s = min_t(sector_t, s, | |
1893 | BM_EXT_TO_SECT(bdev->md.md_size_sect | |
1894 | - bdev->md.bm_offset)); | |
1895 | break; | |
1896 | default: | |
1897 | s = min_t(sector_t, DRBD_MAX_SECTORS, | |
1898 | drbd_get_capacity(bdev->backing_bdev)); | |
1899 | } | |
1900 | return s; | |
1901 | } | |
1902 | ||
1903 | /** | |
1904 | * drbd_md_ss__() - Return the sector number of our meta data super block | |
1905 | * @mdev: DRBD device. | |
1906 | * @bdev: Meta data block device. | |
1907 | */ | |
1908 | static inline sector_t drbd_md_ss__(struct drbd_conf *mdev, | |
1909 | struct drbd_backing_dev *bdev) | |
1910 | { | |
1911 | switch (bdev->dc.meta_dev_idx) { | |
1912 | default: /* external, some index */ | |
1913 | return MD_RESERVED_SECT * bdev->dc.meta_dev_idx; | |
1914 | case DRBD_MD_INDEX_INTERNAL: | |
1915 | /* with drbd08, internal meta data is always "flexible" */ | |
1916 | case DRBD_MD_INDEX_FLEX_INT: | |
1917 | /* sizeof(struct md_on_disk_07) == 4k | |
1918 | * position: last 4k aligned block of 4k size */ | |
1919 | if (!bdev->backing_bdev) { | |
1920 | if (__ratelimit(&drbd_ratelimit_state)) { | |
1921 | dev_err(DEV, "bdev->backing_bdev==NULL\n"); | |
1922 | dump_stack(); | |
1923 | } | |
1924 | return 0; | |
1925 | } | |
1926 | return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) | |
1927 | - MD_AL_OFFSET; | |
1928 | case DRBD_MD_INDEX_FLEX_EXT: | |
1929 | return 0; | |
1930 | } | |
1931 | } | |
1932 | ||
1933 | static inline void | |
1934 | drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w) | |
1935 | { | |
1936 | unsigned long flags; | |
1937 | spin_lock_irqsave(&q->q_lock, flags); | |
1938 | list_add(&w->list, &q->q); | |
1939 | up(&q->s); /* within the spinlock, | |
1940 | see comment near end of drbd_worker() */ | |
1941 | spin_unlock_irqrestore(&q->q_lock, flags); | |
1942 | } | |
1943 | ||
1944 | static inline void | |
1945 | drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) | |
1946 | { | |
1947 | unsigned long flags; | |
1948 | spin_lock_irqsave(&q->q_lock, flags); | |
1949 | list_add_tail(&w->list, &q->q); | |
1950 | up(&q->s); /* within the spinlock, | |
1951 | see comment near end of drbd_worker() */ | |
1952 | spin_unlock_irqrestore(&q->q_lock, flags); | |
1953 | } | |
1954 | ||
1955 | static inline void wake_asender(struct drbd_conf *mdev) | |
1956 | { | |
1957 | if (test_bit(SIGNAL_ASENDER, &mdev->flags)) | |
1958 | force_sig(DRBD_SIG, mdev->asender.task); | |
1959 | } | |
1960 | ||
1961 | static inline void request_ping(struct drbd_conf *mdev) | |
1962 | { | |
1963 | set_bit(SEND_PING, &mdev->flags); | |
1964 | wake_asender(mdev); | |
1965 | } | |
1966 | ||
1967 | static inline int drbd_send_short_cmd(struct drbd_conf *mdev, | |
1968 | enum drbd_packets cmd) | |
1969 | { | |
1970 | struct p_header80 h; | |
1971 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h)); | |
1972 | } | |
1973 | ||
1974 | static inline int drbd_send_ping(struct drbd_conf *mdev) | |
1975 | { | |
1976 | struct p_header80 h; | |
1977 | return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h)); | |
1978 | } | |
1979 | ||
1980 | static inline int drbd_send_ping_ack(struct drbd_conf *mdev) | |
1981 | { | |
1982 | struct p_header80 h; | |
1983 | return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h)); | |
1984 | } | |
1985 | ||
1986 | static inline void drbd_thread_stop(struct drbd_thread *thi) | |
1987 | { | |
1988 | _drbd_thread_stop(thi, FALSE, TRUE); | |
1989 | } | |
1990 | ||
1991 | static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) | |
1992 | { | |
1993 | _drbd_thread_stop(thi, FALSE, FALSE); | |
1994 | } | |
1995 | ||
1996 | static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) | |
1997 | { | |
1998 | _drbd_thread_stop(thi, TRUE, FALSE); | |
1999 | } | |
2000 | ||
2001 | /* counts how many answer packets packets we expect from our peer, | |
2002 | * for either explicit application requests, | |
2003 | * or implicit barrier packets as necessary. | |
2004 | * increased: | |
2005 | * w_send_barrier | |
2006 | * _req_mod(req, queue_for_net_write or queue_for_net_read); | |
2007 | * it is much easier and equally valid to count what we queue for the | |
2008 | * worker, even before it actually was queued or send. | |
2009 | * (drbd_make_request_common; recovery path on read io-error) | |
2010 | * decreased: | |
2011 | * got_BarrierAck (respective tl_clear, tl_clear_barrier) | |
2012 | * _req_mod(req, data_received) | |
2013 | * [from receive_DataReply] | |
2014 | * _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked) | |
2015 | * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)] | |
2016 | * for some reason it is NOT decreased in got_NegAck, | |
2017 | * but in the resulting cleanup code from report_params. | |
2018 | * we should try to remember the reason for that... | |
2019 | * _req_mod(req, send_failed or send_canceled) | |
2020 | * _req_mod(req, connection_lost_while_pending) | |
2021 | * [from tl_clear_barrier] | |
2022 | */ | |
2023 | static inline void inc_ap_pending(struct drbd_conf *mdev) | |
2024 | { | |
2025 | atomic_inc(&mdev->ap_pending_cnt); | |
2026 | } | |
2027 | ||
2028 | #define ERR_IF_CNT_IS_NEGATIVE(which) \ | |
2029 | if (atomic_read(&mdev->which) < 0) \ | |
2030 | dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \ | |
2031 | __func__ , __LINE__ , \ | |
2032 | atomic_read(&mdev->which)) | |
2033 | ||
2034 | #define dec_ap_pending(mdev) do { \ | |
2035 | typecheck(struct drbd_conf *, mdev); \ | |
2036 | if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \ | |
2037 | wake_up(&mdev->misc_wait); \ | |
2038 | ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0) | |
2039 | ||
2040 | /* counts how many resync-related answers we still expect from the peer | |
2041 | * increase decrease | |
2042 | * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY) | |
2043 | * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK whith ID_SYNCER) | |
2044 | * (or P_NEG_ACK with ID_SYNCER) | |
2045 | */ | |
2046 | static inline void inc_rs_pending(struct drbd_conf *mdev) | |
2047 | { | |
2048 | atomic_inc(&mdev->rs_pending_cnt); | |
2049 | } | |
2050 | ||
2051 | #define dec_rs_pending(mdev) do { \ | |
2052 | typecheck(struct drbd_conf *, mdev); \ | |
2053 | atomic_dec(&mdev->rs_pending_cnt); \ | |
2054 | ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0) | |
2055 | ||
2056 | /* counts how many answers we still need to send to the peer. | |
2057 | * increased on | |
2058 | * receive_Data unless protocol A; | |
2059 | * we need to send a P_RECV_ACK (proto B) | |
2060 | * or P_WRITE_ACK (proto C) | |
2061 | * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK | |
2062 | * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA | |
2063 | * receive_Barrier_* we need to send a P_BARRIER_ACK | |
2064 | */ | |
2065 | static inline void inc_unacked(struct drbd_conf *mdev) | |
2066 | { | |
2067 | atomic_inc(&mdev->unacked_cnt); | |
2068 | } | |
2069 | ||
2070 | #define dec_unacked(mdev) do { \ | |
2071 | typecheck(struct drbd_conf *, mdev); \ | |
2072 | atomic_dec(&mdev->unacked_cnt); \ | |
2073 | ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0) | |
2074 | ||
2075 | #define sub_unacked(mdev, n) do { \ | |
2076 | typecheck(struct drbd_conf *, mdev); \ | |
2077 | atomic_sub(n, &mdev->unacked_cnt); \ | |
2078 | ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0) | |
2079 | ||
2080 | ||
2081 | static inline void put_net_conf(struct drbd_conf *mdev) | |
2082 | { | |
2083 | if (atomic_dec_and_test(&mdev->net_cnt)) | |
2084 | wake_up(&mdev->net_cnt_wait); | |
2085 | } | |
2086 | ||
2087 | /** | |
2088 | * get_net_conf() - Increase ref count on mdev->net_conf; Returns 0 if nothing there | |
2089 | * @mdev: DRBD device. | |
2090 | * | |
2091 | * You have to call put_net_conf() when finished working with mdev->net_conf. | |
2092 | */ | |
2093 | static inline int get_net_conf(struct drbd_conf *mdev) | |
2094 | { | |
2095 | int have_net_conf; | |
2096 | ||
2097 | atomic_inc(&mdev->net_cnt); | |
2098 | have_net_conf = mdev->state.conn >= C_UNCONNECTED; | |
2099 | if (!have_net_conf) | |
2100 | put_net_conf(mdev); | |
2101 | return have_net_conf; | |
2102 | } | |
2103 | ||
2104 | /** | |
2105 | * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev | |
2106 | * @M: DRBD device. | |
2107 | * | |
2108 | * You have to call put_ldev() when finished working with mdev->ldev. | |
2109 | */ | |
2110 | #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT)) | |
2111 | #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS)) | |
2112 | ||
2113 | static inline void put_ldev(struct drbd_conf *mdev) | |
2114 | { | |
2115 | int i = atomic_dec_return(&mdev->local_cnt); | |
2116 | __release(local); | |
2117 | D_ASSERT(i >= 0); | |
2118 | if (i == 0) { | |
2119 | if (mdev->state.disk == D_DISKLESS) | |
2120 | /* even internal references gone, safe to destroy */ | |
2121 | drbd_ldev_destroy(mdev); | |
2122 | if (mdev->state.disk == D_FAILED) | |
2123 | /* all application IO references gone. */ | |
2124 | drbd_go_diskless(mdev); | |
2125 | wake_up(&mdev->misc_wait); | |
2126 | } | |
2127 | } | |
2128 | ||
2129 | #ifndef __CHECKER__ | |
2130 | static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) | |
2131 | { | |
2132 | int io_allowed; | |
2133 | ||
2134 | /* never get a reference while D_DISKLESS */ | |
2135 | if (mdev->state.disk == D_DISKLESS) | |
2136 | return 0; | |
2137 | ||
2138 | atomic_inc(&mdev->local_cnt); | |
2139 | io_allowed = (mdev->state.disk >= mins); | |
2140 | if (!io_allowed) | |
2141 | put_ldev(mdev); | |
2142 | return io_allowed; | |
2143 | } | |
2144 | #else | |
2145 | extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins); | |
2146 | #endif | |
2147 | ||
2148 | /* you must have an "get_ldev" reference */ | |
2149 | static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, | |
2150 | unsigned long *bits_left, unsigned int *per_mil_done) | |
2151 | { | |
2152 | /* | |
2153 | * this is to break it at compile time when we change that | |
2154 | * (we may feel 4TB maximum storage per drbd is not enough) | |
2155 | */ | |
2156 | typecheck(unsigned long, mdev->rs_total); | |
2157 | ||
2158 | /* note: both rs_total and rs_left are in bits, i.e. in | |
2159 | * units of BM_BLOCK_SIZE. | |
2160 | * for the percentage, we don't care. */ | |
2161 | ||
2162 | if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) | |
2163 | *bits_left = mdev->ov_left; | |
2164 | else | |
2165 | *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | |
2166 | /* >> 10 to prevent overflow, | |
2167 | * +1 to prevent division by zero */ | |
2168 | if (*bits_left > mdev->rs_total) { | |
2169 | /* doh. maybe a logic bug somewhere. | |
2170 | * may also be just a race condition | |
2171 | * between this and a disconnect during sync. | |
2172 | * for now, just prevent in-kernel buffer overflow. | |
2173 | */ | |
2174 | smp_rmb(); | |
2175 | dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n", | |
2176 | drbd_conn_str(mdev->state.conn), | |
2177 | *bits_left, mdev->rs_total, mdev->rs_failed); | |
2178 | *per_mil_done = 0; | |
2179 | } else { | |
2180 | /* make sure the calculation happens in long context */ | |
2181 | unsigned long tmp = 1000UL - | |
2182 | (*bits_left >> 10)*1000UL | |
2183 | / ((mdev->rs_total >> 10) + 1UL); | |
2184 | *per_mil_done = tmp; | |
2185 | } | |
2186 | } | |
2187 | ||
2188 | ||
2189 | /* this throttles on-the-fly application requests | |
2190 | * according to max_buffers settings; | |
2191 | * maybe re-implement using semaphores? */ | |
2192 | static inline int drbd_get_max_buffers(struct drbd_conf *mdev) | |
2193 | { | |
2194 | int mxb = 1000000; /* arbitrary limit on open requests */ | |
2195 | if (get_net_conf(mdev)) { | |
2196 | mxb = mdev->net_conf->max_buffers; | |
2197 | put_net_conf(mdev); | |
2198 | } | |
2199 | return mxb; | |
2200 | } | |
2201 | ||
2202 | static inline int drbd_state_is_stable(union drbd_state s) | |
2203 | { | |
2204 | ||
2205 | /* DO NOT add a default clause, we want the compiler to warn us | |
2206 | * for any newly introduced state we may have forgotten to add here */ | |
2207 | ||
2208 | switch ((enum drbd_conns)s.conn) { | |
2209 | /* new io only accepted when there is no connection, ... */ | |
2210 | case C_STANDALONE: | |
2211 | case C_WF_CONNECTION: | |
2212 | /* ... or there is a well established connection. */ | |
2213 | case C_CONNECTED: | |
2214 | case C_SYNC_SOURCE: | |
2215 | case C_SYNC_TARGET: | |
2216 | case C_VERIFY_S: | |
2217 | case C_VERIFY_T: | |
2218 | case C_PAUSED_SYNC_S: | |
2219 | case C_PAUSED_SYNC_T: | |
2220 | case C_AHEAD: | |
2221 | case C_BEHIND: | |
2222 | /* maybe stable, look at the disk state */ | |
2223 | break; | |
2224 | ||
2225 | /* no new io accepted during tansitional states | |
2226 | * like handshake or teardown */ | |
2227 | case C_DISCONNECTING: | |
2228 | case C_UNCONNECTED: | |
2229 | case C_TIMEOUT: | |
2230 | case C_BROKEN_PIPE: | |
2231 | case C_NETWORK_FAILURE: | |
2232 | case C_PROTOCOL_ERROR: | |
2233 | case C_TEAR_DOWN: | |
2234 | case C_WF_REPORT_PARAMS: | |
2235 | case C_STARTING_SYNC_S: | |
2236 | case C_STARTING_SYNC_T: | |
2237 | case C_WF_BITMAP_S: | |
2238 | case C_WF_BITMAP_T: | |
2239 | case C_WF_SYNC_UUID: | |
2240 | case C_MASK: | |
2241 | /* not "stable" */ | |
2242 | return 0; | |
2243 | } | |
2244 | ||
2245 | switch ((enum drbd_disk_state)s.disk) { | |
2246 | case D_DISKLESS: | |
2247 | case D_INCONSISTENT: | |
2248 | case D_OUTDATED: | |
2249 | case D_CONSISTENT: | |
2250 | case D_UP_TO_DATE: | |
2251 | /* disk state is stable as well. */ | |
2252 | break; | |
2253 | ||
2254 | /* no new io accepted during tansitional states */ | |
2255 | case D_ATTACHING: | |
2256 | case D_FAILED: | |
2257 | case D_NEGOTIATING: | |
2258 | case D_UNKNOWN: | |
2259 | case D_MASK: | |
2260 | /* not "stable" */ | |
2261 | return 0; | |
2262 | } | |
2263 | ||
2264 | return 1; | |
2265 | } | |
2266 | ||
2267 | static inline int is_susp(union drbd_state s) | |
2268 | { | |
2269 | return s.susp || s.susp_nod || s.susp_fen; | |
2270 | } | |
2271 | ||
2272 | static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) | |
2273 | { | |
2274 | int mxb = drbd_get_max_buffers(mdev); | |
2275 | ||
2276 | if (is_susp(mdev->state)) | |
2277 | return 0; | |
2278 | if (test_bit(SUSPEND_IO, &mdev->flags)) | |
2279 | return 0; | |
2280 | ||
2281 | /* to avoid potential deadlock or bitmap corruption, | |
2282 | * in various places, we only allow new application io | |
2283 | * to start during "stable" states. */ | |
2284 | ||
2285 | /* no new io accepted when attaching or detaching the disk */ | |
2286 | if (!drbd_state_is_stable(mdev->state)) | |
2287 | return 0; | |
2288 | ||
2289 | /* since some older kernels don't have atomic_add_unless, | |
2290 | * and we are within the spinlock anyways, we have this workaround. */ | |
2291 | if (atomic_read(&mdev->ap_bio_cnt) > mxb) | |
2292 | return 0; | |
2293 | if (test_bit(BITMAP_IO, &mdev->flags)) | |
2294 | return 0; | |
2295 | return 1; | |
2296 | } | |
2297 | ||
2298 | /* I'd like to use wait_event_lock_irq, | |
2299 | * but I'm not sure when it got introduced, | |
2300 | * and not sure when it has 3 or 4 arguments */ | |
2301 | static inline void inc_ap_bio(struct drbd_conf *mdev, int count) | |
2302 | { | |
2303 | /* compare with after_state_ch, | |
2304 | * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */ | |
2305 | DEFINE_WAIT(wait); | |
2306 | ||
2307 | /* we wait here | |
2308 | * as long as the device is suspended | |
2309 | * until the bitmap is no longer on the fly during connection | |
2310 | * handshake as long as we would exeed the max_buffer limit. | |
2311 | * | |
2312 | * to avoid races with the reconnect code, | |
2313 | * we need to atomic_inc within the spinlock. */ | |
2314 | ||
2315 | spin_lock_irq(&mdev->req_lock); | |
2316 | while (!__inc_ap_bio_cond(mdev)) { | |
2317 | prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); | |
2318 | spin_unlock_irq(&mdev->req_lock); | |
2319 | schedule(); | |
2320 | finish_wait(&mdev->misc_wait, &wait); | |
2321 | spin_lock_irq(&mdev->req_lock); | |
2322 | } | |
2323 | atomic_add(count, &mdev->ap_bio_cnt); | |
2324 | spin_unlock_irq(&mdev->req_lock); | |
2325 | } | |
2326 | ||
2327 | static inline void dec_ap_bio(struct drbd_conf *mdev) | |
2328 | { | |
2329 | int mxb = drbd_get_max_buffers(mdev); | |
2330 | int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); | |
2331 | ||
2332 | D_ASSERT(ap_bio >= 0); | |
2333 | /* this currently does wake_up for every dec_ap_bio! | |
2334 | * maybe rather introduce some type of hysteresis? | |
2335 | * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ | |
2336 | if (ap_bio < mxb) | |
2337 | wake_up(&mdev->misc_wait); | |
2338 | if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { | |
2339 | if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) | |
2340 | drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); | |
2341 | } | |
2342 | } | |
2343 | ||
2344 | static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) | |
2345 | { | |
2346 | mdev->ed_uuid = val; | |
2347 | } | |
2348 | ||
2349 | static inline int seq_cmp(u32 a, u32 b) | |
2350 | { | |
2351 | /* we assume wrap around at 32bit. | |
2352 | * for wrap around at 24bit (old atomic_t), | |
2353 | * we'd have to | |
2354 | * a <<= 8; b <<= 8; | |
2355 | */ | |
2356 | return (s32)(a) - (s32)(b); | |
2357 | } | |
2358 | #define seq_lt(a, b) (seq_cmp((a), (b)) < 0) | |
2359 | #define seq_gt(a, b) (seq_cmp((a), (b)) > 0) | |
2360 | #define seq_ge(a, b) (seq_cmp((a), (b)) >= 0) | |
2361 | #define seq_le(a, b) (seq_cmp((a), (b)) <= 0) | |
2362 | /* CAUTION: please no side effects in arguments! */ | |
2363 | #define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b))) | |
2364 | ||
2365 | static inline void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq) | |
2366 | { | |
2367 | unsigned int m; | |
2368 | spin_lock(&mdev->peer_seq_lock); | |
2369 | m = seq_max(mdev->peer_seq, new_seq); | |
2370 | mdev->peer_seq = m; | |
2371 | spin_unlock(&mdev->peer_seq_lock); | |
2372 | if (m == new_seq) | |
2373 | wake_up(&mdev->seq_wait); | |
2374 | } | |
2375 | ||
2376 | static inline void drbd_update_congested(struct drbd_conf *mdev) | |
2377 | { | |
2378 | struct sock *sk = mdev->data.socket->sk; | |
2379 | if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) | |
2380 | set_bit(NET_CONGESTED, &mdev->flags); | |
2381 | } | |
2382 | ||
2383 | static inline int drbd_queue_order_type(struct drbd_conf *mdev) | |
2384 | { | |
2385 | /* sorry, we currently have no working implementation | |
2386 | * of distributed TCQ stuff */ | |
2387 | #ifndef QUEUE_ORDERED_NONE | |
2388 | #define QUEUE_ORDERED_NONE 0 | |
2389 | #endif | |
2390 | return QUEUE_ORDERED_NONE; | |
2391 | } | |
2392 | ||
2393 | static inline void drbd_md_flush(struct drbd_conf *mdev) | |
2394 | { | |
2395 | int r; | |
2396 | ||
2397 | if (test_bit(MD_NO_FUA, &mdev->flags)) | |
2398 | return; | |
2399 | ||
2400 | r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); | |
2401 | if (r) { | |
2402 | set_bit(MD_NO_FUA, &mdev->flags); | |
2403 | dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); | |
2404 | } | |
2405 | } | |
2406 | ||
2407 | #endif |