]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev | |
11 | from Logicworks, Inc. for making SDP replication support possible. | |
12 | ||
13 | drbd is free software; you can redistribute it and/or modify | |
14 | it under the terms of the GNU General Public License as published by | |
15 | the Free Software Foundation; either version 2, or (at your option) | |
16 | any later version. | |
17 | ||
18 | drbd is distributed in the hope that it will be useful, | |
19 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | GNU General Public License for more details. | |
22 | ||
23 | You should have received a copy of the GNU General Public License | |
24 | along with drbd; see the file COPYING. If not, write to | |
25 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
26 | ||
27 | */ | |
28 | ||
b411b363 | 29 | #include <linux/module.h> |
b411b363 PR |
30 | #include <linux/drbd.h> |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/types.h> | |
33 | #include <net/sock.h> | |
34 | #include <linux/ctype.h> | |
2a48fc0a | 35 | #include <linux/mutex.h> |
b411b363 PR |
36 | #include <linux/fs.h> |
37 | #include <linux/file.h> | |
38 | #include <linux/proc_fs.h> | |
39 | #include <linux/init.h> | |
40 | #include <linux/mm.h> | |
41 | #include <linux/memcontrol.h> | |
42 | #include <linux/mm_inline.h> | |
43 | #include <linux/slab.h> | |
44 | #include <linux/random.h> | |
45 | #include <linux/reboot.h> | |
46 | #include <linux/notifier.h> | |
47 | #include <linux/kthread.h> | |
48 | ||
49 | #define __KERNEL_SYSCALLS__ | |
50 | #include <linux/unistd.h> | |
51 | #include <linux/vmalloc.h> | |
52 | ||
53 | #include <linux/drbd_limits.h> | |
54 | #include "drbd_int.h" | |
b411b363 PR |
55 | #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */ |
56 | ||
57 | #include "drbd_vli.h" | |
58 | ||
2a48fc0a | 59 | static DEFINE_MUTEX(drbd_main_mutex); |
b411b363 PR |
60 | int drbdd_init(struct drbd_thread *); |
61 | int drbd_worker(struct drbd_thread *); | |
62 | int drbd_asender(struct drbd_thread *); | |
63 | ||
64 | int drbd_init(void); | |
65 | static int drbd_open(struct block_device *bdev, fmode_t mode); | |
66 | static int drbd_release(struct gendisk *gd, fmode_t mode); | |
00d56944 | 67 | static int w_md_sync(struct drbd_work *w, int unused); |
b411b363 | 68 | static void md_sync_timer_fn(unsigned long data); |
00d56944 PR |
69 | static int w_bitmap_io(struct drbd_work *w, int unused); |
70 | static int w_go_diskless(struct drbd_work *w, int unused); | |
b411b363 | 71 | |
b411b363 PR |
72 | MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " |
73 | "Lars Ellenberg <lars@linbit.com>"); | |
74 | MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); | |
75 | MODULE_VERSION(REL_VERSION); | |
76 | MODULE_LICENSE("GPL"); | |
81a5d60e | 77 | MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices (" |
2b8a90b5 | 78 | __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")"); |
b411b363 PR |
79 | MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); |
80 | ||
81 | #include <linux/moduleparam.h> | |
82 | /* allow_open_on_secondary */ | |
83 | MODULE_PARM_DESC(allow_oos, "DONT USE!"); | |
84 | /* thanks to these macros, if compiled into the kernel (not-module), | |
85 | * this becomes the boot parameter drbd.minor_count */ | |
86 | module_param(minor_count, uint, 0444); | |
87 | module_param(disable_sendpage, bool, 0644); | |
88 | module_param(allow_oos, bool, 0); | |
89 | module_param(cn_idx, uint, 0444); | |
90 | module_param(proc_details, int, 0644); | |
91 | ||
92 | #ifdef CONFIG_DRBD_FAULT_INJECTION | |
93 | int enable_faults; | |
94 | int fault_rate; | |
95 | static int fault_count; | |
96 | int fault_devs; | |
97 | /* bitmap of enabled faults */ | |
98 | module_param(enable_faults, int, 0664); | |
99 | /* fault rate % value - applies to all enabled faults */ | |
100 | module_param(fault_rate, int, 0664); | |
101 | /* count of faults inserted */ | |
102 | module_param(fault_count, int, 0664); | |
103 | /* bitmap of devices to insert faults on */ | |
104 | module_param(fault_devs, int, 0644); | |
105 | #endif | |
106 | ||
107 | /* module parameter, defined */ | |
2b8a90b5 | 108 | unsigned int minor_count = DRBD_MINOR_COUNT_DEF; |
b411b363 PR |
109 | int disable_sendpage; |
110 | int allow_oos; | |
111 | unsigned int cn_idx = CN_IDX_DRBD; | |
112 | int proc_details; /* Detail level in proc drbd*/ | |
113 | ||
114 | /* Module parameter for setting the user mode helper program | |
115 | * to run. Default is /sbin/drbdadm */ | |
116 | char usermode_helper[80] = "/sbin/drbdadm"; | |
117 | ||
118 | module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644); | |
119 | ||
120 | /* in 2.6.x, our device mapping and config info contains our virtual gendisks | |
121 | * as member "struct gendisk *vdisk;" | |
122 | */ | |
81a5d60e | 123 | struct idr minors; |
2111438b | 124 | struct list_head drbd_tconns; /* list of struct drbd_tconn */ |
b411b363 PR |
125 | |
126 | struct kmem_cache *drbd_request_cache; | |
6c852bec | 127 | struct kmem_cache *drbd_ee_cache; /* peer requests */ |
b411b363 PR |
128 | struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ |
129 | struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ | |
130 | mempool_t *drbd_request_mempool; | |
131 | mempool_t *drbd_ee_mempool; | |
35abf594 | 132 | mempool_t *drbd_md_io_page_pool; |
b411b363 PR |
133 | |
134 | /* I do not use a standard mempool, because: | |
135 | 1) I want to hand out the pre-allocated objects first. | |
136 | 2) I want to be able to interrupt sleeping allocation with a signal. | |
137 | Note: This is a single linked list, the next pointer is the private | |
138 | member of struct page. | |
139 | */ | |
140 | struct page *drbd_pp_pool; | |
141 | spinlock_t drbd_pp_lock; | |
142 | int drbd_pp_vacant; | |
143 | wait_queue_head_t drbd_pp_wait; | |
144 | ||
145 | DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); | |
146 | ||
7d4e9d09 | 147 | static const struct block_device_operations drbd_ops = { |
b411b363 PR |
148 | .owner = THIS_MODULE, |
149 | .open = drbd_open, | |
150 | .release = drbd_release, | |
151 | }; | |
152 | ||
b411b363 PR |
153 | #ifdef __CHECKER__ |
154 | /* When checking with sparse, and this is an inline function, sparse will | |
155 | give tons of false positives. When this is a real functions sparse works. | |
156 | */ | |
157 | int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) | |
158 | { | |
159 | int io_allowed; | |
160 | ||
161 | atomic_inc(&mdev->local_cnt); | |
162 | io_allowed = (mdev->state.disk >= mins); | |
163 | if (!io_allowed) { | |
164 | if (atomic_dec_and_test(&mdev->local_cnt)) | |
165 | wake_up(&mdev->misc_wait); | |
166 | } | |
167 | return io_allowed; | |
168 | } | |
169 | ||
170 | #endif | |
171 | ||
172 | /** | |
173 | * DOC: The transfer log | |
174 | * | |
175 | * The transfer log is a single linked list of &struct drbd_tl_epoch objects. | |
87eeee41 | 176 | * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail |
b411b363 PR |
177 | * of the list. There is always at least one &struct drbd_tl_epoch object. |
178 | * | |
179 | * Each &struct drbd_tl_epoch has a circular double linked list of requests | |
180 | * attached. | |
181 | */ | |
2f5cdd0b | 182 | static int tl_init(struct drbd_tconn *tconn) |
b411b363 PR |
183 | { |
184 | struct drbd_tl_epoch *b; | |
185 | ||
186 | /* during device minor initialization, we may well use GFP_KERNEL */ | |
187 | b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL); | |
188 | if (!b) | |
189 | return 0; | |
190 | INIT_LIST_HEAD(&b->requests); | |
191 | INIT_LIST_HEAD(&b->w.list); | |
192 | b->next = NULL; | |
193 | b->br_number = 4711; | |
7e602c0a | 194 | b->n_writes = 0; |
b411b363 PR |
195 | b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ |
196 | ||
2f5cdd0b PR |
197 | tconn->oldest_tle = b; |
198 | tconn->newest_tle = b; | |
199 | INIT_LIST_HEAD(&tconn->out_of_sequence_requests); | |
b411b363 | 200 | |
b411b363 PR |
201 | return 1; |
202 | } | |
203 | ||
2f5cdd0b | 204 | static void tl_cleanup(struct drbd_tconn *tconn) |
b411b363 | 205 | { |
2f5cdd0b PR |
206 | if (tconn->oldest_tle != tconn->newest_tle) |
207 | conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n"); | |
208 | if (!list_empty(&tconn->out_of_sequence_requests)) | |
209 | conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n"); | |
210 | kfree(tconn->oldest_tle); | |
211 | tconn->oldest_tle = NULL; | |
212 | kfree(tconn->unused_spare_tle); | |
213 | tconn->unused_spare_tle = NULL; | |
d628769b AG |
214 | } |
215 | ||
b411b363 PR |
216 | /** |
217 | * _tl_add_barrier() - Adds a barrier to the transfer log | |
218 | * @mdev: DRBD device. | |
219 | * @new: Barrier to be added before the current head of the TL. | |
220 | * | |
221 | * The caller must hold the req_lock. | |
222 | */ | |
2f5cdd0b | 223 | void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new) |
b411b363 PR |
224 | { |
225 | struct drbd_tl_epoch *newest_before; | |
226 | ||
227 | INIT_LIST_HEAD(&new->requests); | |
228 | INIT_LIST_HEAD(&new->w.list); | |
229 | new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ | |
230 | new->next = NULL; | |
7e602c0a | 231 | new->n_writes = 0; |
b411b363 | 232 | |
2f5cdd0b | 233 | newest_before = tconn->newest_tle; |
b411b363 PR |
234 | /* never send a barrier number == 0, because that is special-cased |
235 | * when using TCQ for our write ordering code */ | |
236 | new->br_number = (newest_before->br_number+1) ?: 1; | |
2f5cdd0b PR |
237 | if (tconn->newest_tle != new) { |
238 | tconn->newest_tle->next = new; | |
239 | tconn->newest_tle = new; | |
b411b363 PR |
240 | } |
241 | } | |
242 | ||
243 | /** | |
244 | * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL | |
245 | * @mdev: DRBD device. | |
246 | * @barrier_nr: Expected identifier of the DRBD write barrier packet. | |
247 | * @set_size: Expected number of requests before that barrier. | |
248 | * | |
249 | * In case the passed barrier_nr or set_size does not match the oldest | |
250 | * &struct drbd_tl_epoch objects this function will cause a termination | |
251 | * of the connection. | |
252 | */ | |
2f5cdd0b PR |
253 | void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr, |
254 | unsigned int set_size) | |
b411b363 | 255 | { |
2f5cdd0b | 256 | struct drbd_conf *mdev; |
b411b363 PR |
257 | struct drbd_tl_epoch *b, *nob; /* next old barrier */ |
258 | struct list_head *le, *tle; | |
259 | struct drbd_request *r; | |
260 | ||
2f5cdd0b | 261 | spin_lock_irq(&tconn->req_lock); |
b411b363 | 262 | |
2f5cdd0b | 263 | b = tconn->oldest_tle; |
b411b363 PR |
264 | |
265 | /* first some paranoia code */ | |
266 | if (b == NULL) { | |
2f5cdd0b PR |
267 | conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n", |
268 | barrier_nr); | |
b411b363 PR |
269 | goto bail; |
270 | } | |
271 | if (b->br_number != barrier_nr) { | |
2f5cdd0b PR |
272 | conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n", |
273 | barrier_nr, b->br_number); | |
b411b363 PR |
274 | goto bail; |
275 | } | |
7e602c0a | 276 | if (b->n_writes != set_size) { |
2f5cdd0b PR |
277 | conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", |
278 | barrier_nr, set_size, b->n_writes); | |
b411b363 PR |
279 | goto bail; |
280 | } | |
281 | ||
282 | /* Clean up list of requests processed during current epoch */ | |
283 | list_for_each_safe(le, tle, &b->requests) { | |
284 | r = list_entry(le, struct drbd_request, tl_requests); | |
8554df1c | 285 | _req_mod(r, BARRIER_ACKED); |
b411b363 PR |
286 | } |
287 | /* There could be requests on the list waiting for completion | |
288 | of the write to the local disk. To avoid corruptions of | |
289 | slab's data structures we have to remove the lists head. | |
290 | ||
291 | Also there could have been a barrier ack out of sequence, overtaking | |
292 | the write acks - which would be a bug and violating write ordering. | |
293 | To not deadlock in case we lose connection while such requests are | |
294 | still pending, we need some way to find them for the | |
8554df1c | 295 | _req_mode(CONNECTION_LOST_WHILE_PENDING). |
b411b363 PR |
296 | |
297 | These have been list_move'd to the out_of_sequence_requests list in | |
8554df1c | 298 | _req_mod(, BARRIER_ACKED) above. |
b411b363 PR |
299 | */ |
300 | list_del_init(&b->requests); | |
2f5cdd0b | 301 | mdev = b->w.mdev; |
b411b363 PR |
302 | |
303 | nob = b->next; | |
304 | if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { | |
2f5cdd0b | 305 | _tl_add_barrier(tconn, b); |
b411b363 | 306 | if (nob) |
2f5cdd0b | 307 | tconn->oldest_tle = nob; |
b411b363 | 308 | /* if nob == NULL b was the only barrier, and becomes the new |
2f5cdd0b | 309 | barrier. Therefore tconn->oldest_tle points already to b */ |
b411b363 PR |
310 | } else { |
311 | D_ASSERT(nob != NULL); | |
2f5cdd0b | 312 | tconn->oldest_tle = nob; |
b411b363 PR |
313 | kfree(b); |
314 | } | |
315 | ||
2f5cdd0b | 316 | spin_unlock_irq(&tconn->req_lock); |
b411b363 PR |
317 | dec_ap_pending(mdev); |
318 | ||
319 | return; | |
320 | ||
321 | bail: | |
2f5cdd0b PR |
322 | spin_unlock_irq(&tconn->req_lock); |
323 | conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); | |
b411b363 PR |
324 | } |
325 | ||
617049aa | 326 | |
b411b363 | 327 | /** |
11b58e73 | 328 | * _tl_restart() - Walks the transfer log, and applies an action to all requests |
b411b363 | 329 | * @mdev: DRBD device. |
11b58e73 | 330 | * @what: The action/event to perform with all request objects |
b411b363 | 331 | * |
8554df1c AG |
332 | * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO, |
333 | * RESTART_FROZEN_DISK_IO. | |
b411b363 | 334 | */ |
2f5cdd0b | 335 | void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) |
b411b363 | 336 | { |
11b58e73 | 337 | struct drbd_tl_epoch *b, *tmp, **pn; |
b9b98716 | 338 | struct list_head *le, *tle, carry_reads; |
11b58e73 PR |
339 | struct drbd_request *req; |
340 | int rv, n_writes, n_reads; | |
b411b363 | 341 | |
2f5cdd0b PR |
342 | b = tconn->oldest_tle; |
343 | pn = &tconn->oldest_tle; | |
b411b363 | 344 | while (b) { |
11b58e73 PR |
345 | n_writes = 0; |
346 | n_reads = 0; | |
b9b98716 | 347 | INIT_LIST_HEAD(&carry_reads); |
b411b363 | 348 | list_for_each_safe(le, tle, &b->requests) { |
11b58e73 PR |
349 | req = list_entry(le, struct drbd_request, tl_requests); |
350 | rv = _req_mod(req, what); | |
351 | ||
352 | n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT; | |
353 | n_reads += (rv & MR_READ) >> MR_READ_SHIFT; | |
b411b363 PR |
354 | } |
355 | tmp = b->next; | |
356 | ||
b9b98716 | 357 | if (n_writes) { |
8554df1c | 358 | if (what == RESEND) { |
11b58e73 PR |
359 | b->n_writes = n_writes; |
360 | if (b->w.cb == NULL) { | |
361 | b->w.cb = w_send_barrier; | |
2f5cdd0b PR |
362 | inc_ap_pending(b->w.mdev); |
363 | set_bit(CREATE_BARRIER, &b->w.mdev->flags); | |
11b58e73 PR |
364 | } |
365 | ||
2f5cdd0b | 366 | drbd_queue_work(&tconn->data.work, &b->w); |
11b58e73 PR |
367 | } |
368 | pn = &b->next; | |
369 | } else { | |
b9b98716 PR |
370 | if (n_reads) |
371 | list_add(&carry_reads, &b->requests); | |
11b58e73 PR |
372 | /* there could still be requests on that ring list, |
373 | * in case local io is still pending */ | |
374 | list_del(&b->requests); | |
375 | ||
376 | /* dec_ap_pending corresponding to queue_barrier. | |
377 | * the newest barrier may not have been queued yet, | |
378 | * in which case w.cb is still NULL. */ | |
379 | if (b->w.cb != NULL) | |
2f5cdd0b | 380 | dec_ap_pending(b->w.mdev); |
11b58e73 | 381 | |
2f5cdd0b | 382 | if (b == tconn->newest_tle) { |
11b58e73 | 383 | /* recycle, but reinit! */ |
2f5cdd0b PR |
384 | if (tmp != NULL) |
385 | conn_err(tconn, "ASSERT FAILED tmp == NULL"); | |
11b58e73 | 386 | INIT_LIST_HEAD(&b->requests); |
b9b98716 | 387 | list_splice(&carry_reads, &b->requests); |
11b58e73 PR |
388 | INIT_LIST_HEAD(&b->w.list); |
389 | b->w.cb = NULL; | |
390 | b->br_number = net_random(); | |
391 | b->n_writes = 0; | |
392 | ||
393 | *pn = b; | |
394 | break; | |
395 | } | |
396 | *pn = tmp; | |
397 | kfree(b); | |
b411b363 | 398 | } |
b411b363 | 399 | b = tmp; |
b9b98716 | 400 | list_splice(&carry_reads, &b->requests); |
b411b363 | 401 | } |
11b58e73 PR |
402 | } |
403 | ||
b411b363 PR |
404 | |
405 | /** | |
406 | * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL | |
407 | * @mdev: DRBD device. | |
408 | * | |
409 | * This is called after the connection to the peer was lost. The storage covered | |
410 | * by the requests on the transfer gets marked as our of sync. Called from the | |
411 | * receiver thread and the worker thread. | |
412 | */ | |
2f5cdd0b | 413 | void tl_clear(struct drbd_tconn *tconn) |
b411b363 | 414 | { |
2f5cdd0b | 415 | struct drbd_conf *mdev; |
b411b363 PR |
416 | struct list_head *le, *tle; |
417 | struct drbd_request *r; | |
2f5cdd0b | 418 | int minor; |
b411b363 | 419 | |
2f5cdd0b | 420 | spin_lock_irq(&tconn->req_lock); |
b411b363 | 421 | |
2f5cdd0b | 422 | _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING); |
b411b363 PR |
423 | |
424 | /* we expect this list to be empty. */ | |
2f5cdd0b PR |
425 | if (!list_empty(&tconn->out_of_sequence_requests)) |
426 | conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n"); | |
b411b363 PR |
427 | |
428 | /* but just in case, clean it up anyways! */ | |
2f5cdd0b | 429 | list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) { |
b411b363 PR |
430 | r = list_entry(le, struct drbd_request, tl_requests); |
431 | /* It would be nice to complete outside of spinlock. | |
432 | * But this is easier for now. */ | |
8554df1c | 433 | _req_mod(r, CONNECTION_LOST_WHILE_PENDING); |
b411b363 PR |
434 | } |
435 | ||
436 | /* ensure bit indicating barrier is required is clear */ | |
2f5cdd0b PR |
437 | idr_for_each_entry(&tconn->volumes, mdev, minor) |
438 | clear_bit(CREATE_BARRIER, &mdev->flags); | |
b411b363 | 439 | |
2f5cdd0b | 440 | spin_unlock_irq(&tconn->req_lock); |
b411b363 PR |
441 | } |
442 | ||
2f5cdd0b | 443 | void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) |
11b58e73 | 444 | { |
2f5cdd0b PR |
445 | spin_lock_irq(&tconn->req_lock); |
446 | _tl_restart(tconn, what); | |
447 | spin_unlock_irq(&tconn->req_lock); | |
b411b363 PR |
448 | } |
449 | ||
b411b363 PR |
450 | static int drbd_thread_setup(void *arg) |
451 | { | |
452 | struct drbd_thread *thi = (struct drbd_thread *) arg; | |
392c8801 | 453 | struct drbd_tconn *tconn = thi->tconn; |
b411b363 PR |
454 | unsigned long flags; |
455 | int retval; | |
456 | ||
f1b3a6ec | 457 | snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s", |
392c8801 | 458 | thi->name[0], thi->tconn->name); |
f1b3a6ec | 459 | |
b411b363 PR |
460 | restart: |
461 | retval = thi->function(thi); | |
462 | ||
463 | spin_lock_irqsave(&thi->t_lock, flags); | |
464 | ||
e77a0a5c | 465 | /* if the receiver has been "EXITING", the last thing it did |
b411b363 PR |
466 | * was set the conn state to "StandAlone", |
467 | * if now a re-connect request comes in, conn state goes C_UNCONNECTED, | |
468 | * and receiver thread will be "started". | |
e77a0a5c | 469 | * drbd_thread_start needs to set "RESTARTING" in that case. |
b411b363 | 470 | * t_state check and assignment needs to be within the same spinlock, |
e77a0a5c AG |
471 | * so either thread_start sees EXITING, and can remap to RESTARTING, |
472 | * or thread_start see NONE, and can proceed as normal. | |
b411b363 PR |
473 | */ |
474 | ||
e77a0a5c | 475 | if (thi->t_state == RESTARTING) { |
392c8801 | 476 | conn_info(tconn, "Restarting %s thread\n", thi->name); |
e77a0a5c | 477 | thi->t_state = RUNNING; |
b411b363 PR |
478 | spin_unlock_irqrestore(&thi->t_lock, flags); |
479 | goto restart; | |
480 | } | |
481 | ||
482 | thi->task = NULL; | |
e77a0a5c | 483 | thi->t_state = NONE; |
b411b363 PR |
484 | smp_mb(); |
485 | complete(&thi->stop); | |
486 | spin_unlock_irqrestore(&thi->t_lock, flags); | |
487 | ||
392c8801 | 488 | conn_info(tconn, "Terminating %s\n", current->comm); |
b411b363 PR |
489 | |
490 | /* Release mod reference taken when thread was started */ | |
491 | module_put(THIS_MODULE); | |
492 | return retval; | |
493 | } | |
494 | ||
392c8801 | 495 | static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi, |
bed879ae | 496 | int (*func) (struct drbd_thread *), char *name) |
b411b363 PR |
497 | { |
498 | spin_lock_init(&thi->t_lock); | |
499 | thi->task = NULL; | |
e77a0a5c | 500 | thi->t_state = NONE; |
b411b363 | 501 | thi->function = func; |
392c8801 | 502 | thi->tconn = tconn; |
bed879ae | 503 | strncpy(thi->name, name, ARRAY_SIZE(thi->name)); |
b411b363 PR |
504 | } |
505 | ||
506 | int drbd_thread_start(struct drbd_thread *thi) | |
507 | { | |
392c8801 | 508 | struct drbd_tconn *tconn = thi->tconn; |
b411b363 PR |
509 | struct task_struct *nt; |
510 | unsigned long flags; | |
511 | ||
b411b363 PR |
512 | /* is used from state engine doing drbd_thread_stop_nowait, |
513 | * while holding the req lock irqsave */ | |
514 | spin_lock_irqsave(&thi->t_lock, flags); | |
515 | ||
516 | switch (thi->t_state) { | |
e77a0a5c | 517 | case NONE: |
392c8801 | 518 | conn_info(tconn, "Starting %s thread (from %s [%d])\n", |
bed879ae | 519 | thi->name, current->comm, current->pid); |
b411b363 PR |
520 | |
521 | /* Get ref on module for thread - this is released when thread exits */ | |
522 | if (!try_module_get(THIS_MODULE)) { | |
392c8801 | 523 | conn_err(tconn, "Failed to get module reference in drbd_thread_start\n"); |
b411b363 | 524 | spin_unlock_irqrestore(&thi->t_lock, flags); |
81e84650 | 525 | return false; |
b411b363 PR |
526 | } |
527 | ||
528 | init_completion(&thi->stop); | |
b411b363 | 529 | thi->reset_cpu_mask = 1; |
e77a0a5c | 530 | thi->t_state = RUNNING; |
b411b363 PR |
531 | spin_unlock_irqrestore(&thi->t_lock, flags); |
532 | flush_signals(current); /* otherw. may get -ERESTARTNOINTR */ | |
533 | ||
534 | nt = kthread_create(drbd_thread_setup, (void *) thi, | |
392c8801 | 535 | "drbd_%c_%s", thi->name[0], thi->tconn->name); |
b411b363 PR |
536 | |
537 | if (IS_ERR(nt)) { | |
392c8801 | 538 | conn_err(tconn, "Couldn't start thread\n"); |
b411b363 PR |
539 | |
540 | module_put(THIS_MODULE); | |
81e84650 | 541 | return false; |
b411b363 PR |
542 | } |
543 | spin_lock_irqsave(&thi->t_lock, flags); | |
544 | thi->task = nt; | |
e77a0a5c | 545 | thi->t_state = RUNNING; |
b411b363 PR |
546 | spin_unlock_irqrestore(&thi->t_lock, flags); |
547 | wake_up_process(nt); | |
548 | break; | |
e77a0a5c AG |
549 | case EXITING: |
550 | thi->t_state = RESTARTING; | |
392c8801 | 551 | conn_info(tconn, "Restarting %s thread (from %s [%d])\n", |
bed879ae | 552 | thi->name, current->comm, current->pid); |
b411b363 | 553 | /* fall through */ |
e77a0a5c AG |
554 | case RUNNING: |
555 | case RESTARTING: | |
b411b363 PR |
556 | default: |
557 | spin_unlock_irqrestore(&thi->t_lock, flags); | |
558 | break; | |
559 | } | |
560 | ||
81e84650 | 561 | return true; |
b411b363 PR |
562 | } |
563 | ||
564 | ||
565 | void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait) | |
566 | { | |
567 | unsigned long flags; | |
568 | ||
e77a0a5c | 569 | enum drbd_thread_state ns = restart ? RESTARTING : EXITING; |
b411b363 PR |
570 | |
571 | /* may be called from state engine, holding the req lock irqsave */ | |
572 | spin_lock_irqsave(&thi->t_lock, flags); | |
573 | ||
e77a0a5c | 574 | if (thi->t_state == NONE) { |
b411b363 PR |
575 | spin_unlock_irqrestore(&thi->t_lock, flags); |
576 | if (restart) | |
577 | drbd_thread_start(thi); | |
578 | return; | |
579 | } | |
580 | ||
581 | if (thi->t_state != ns) { | |
582 | if (thi->task == NULL) { | |
583 | spin_unlock_irqrestore(&thi->t_lock, flags); | |
584 | return; | |
585 | } | |
586 | ||
587 | thi->t_state = ns; | |
588 | smp_mb(); | |
589 | init_completion(&thi->stop); | |
590 | if (thi->task != current) | |
591 | force_sig(DRBD_SIGKILL, thi->task); | |
b411b363 PR |
592 | } |
593 | ||
594 | spin_unlock_irqrestore(&thi->t_lock, flags); | |
595 | ||
596 | if (wait) | |
597 | wait_for_completion(&thi->stop); | |
598 | } | |
599 | ||
392c8801 | 600 | static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task) |
bed879ae | 601 | { |
bed879ae PR |
602 | struct drbd_thread *thi = |
603 | task == tconn->receiver.task ? &tconn->receiver : | |
604 | task == tconn->asender.task ? &tconn->asender : | |
605 | task == tconn->worker.task ? &tconn->worker : NULL; | |
606 | ||
607 | return thi; | |
608 | } | |
609 | ||
392c8801 | 610 | char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task) |
bed879ae | 611 | { |
392c8801 | 612 | struct drbd_thread *thi = drbd_task_to_thread(tconn, task); |
bed879ae PR |
613 | return thi ? thi->name : task->comm; |
614 | } | |
615 | ||
80883197 | 616 | int conn_lowest_minor(struct drbd_tconn *tconn) |
80822284 PR |
617 | { |
618 | int minor = 0; | |
774b3055 PR |
619 | |
620 | if (!idr_get_next(&tconn->volumes, &minor)) | |
621 | return -1; | |
80822284 PR |
622 | return minor; |
623 | } | |
774b3055 PR |
624 | |
625 | #ifdef CONFIG_SMP | |
b411b363 PR |
626 | /** |
627 | * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs | |
628 | * @mdev: DRBD device. | |
629 | * | |
630 | * Forces all threads of a device onto the same CPU. This is beneficial for | |
631 | * DRBD's performance. May be overwritten by user's configuration. | |
632 | */ | |
80822284 | 633 | void drbd_calc_cpu_mask(struct drbd_tconn *tconn) |
b411b363 PR |
634 | { |
635 | int ord, cpu; | |
636 | ||
637 | /* user override. */ | |
80822284 | 638 | if (cpumask_weight(tconn->cpu_mask)) |
b411b363 PR |
639 | return; |
640 | ||
80822284 | 641 | ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask); |
b411b363 PR |
642 | for_each_online_cpu(cpu) { |
643 | if (ord-- == 0) { | |
80822284 | 644 | cpumask_set_cpu(cpu, tconn->cpu_mask); |
b411b363 PR |
645 | return; |
646 | } | |
647 | } | |
648 | /* should not be reached */ | |
80822284 | 649 | cpumask_setall(tconn->cpu_mask); |
b411b363 PR |
650 | } |
651 | ||
652 | /** | |
653 | * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread | |
654 | * @mdev: DRBD device. | |
bc31fe33 | 655 | * @thi: drbd_thread object |
b411b363 PR |
656 | * |
657 | * call in the "main loop" of _all_ threads, no need for any mutex, current won't die | |
658 | * prematurely. | |
659 | */ | |
80822284 | 660 | void drbd_thread_current_set_cpu(struct drbd_thread *thi) |
b411b363 PR |
661 | { |
662 | struct task_struct *p = current; | |
bed879ae | 663 | |
b411b363 PR |
664 | if (!thi->reset_cpu_mask) |
665 | return; | |
666 | thi->reset_cpu_mask = 0; | |
392c8801 | 667 | set_cpus_allowed_ptr(p, thi->tconn->cpu_mask); |
b411b363 PR |
668 | } |
669 | #endif | |
670 | ||
d38e787e | 671 | static void prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size) |
fd340c12 PR |
672 | { |
673 | h->magic = cpu_to_be32(DRBD_MAGIC); | |
674 | h->command = cpu_to_be16(cmd); | |
675 | h->length = cpu_to_be16(size); | |
676 | } | |
677 | ||
d38e787e | 678 | static void prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size) |
fd340c12 PR |
679 | { |
680 | h->magic = cpu_to_be16(DRBD_MAGIC_BIG); | |
681 | h->command = cpu_to_be16(cmd); | |
682 | h->length = cpu_to_be32(size); | |
683 | } | |
684 | ||
d38e787e PR |
685 | static void _prepare_header(struct drbd_tconn *tconn, int vnr, struct p_header *h, |
686 | enum drbd_packet cmd, int size) | |
687 | { | |
688 | if (tconn->agreed_pro_version >= 100 || size > DRBD_MAX_SIZE_H80_PACKET) | |
689 | prepare_header95(&h->h95, cmd, size); | |
690 | else | |
691 | prepare_header80(&h->h80, cmd, size); | |
692 | } | |
693 | ||
fd340c12 | 694 | static void prepare_header(struct drbd_conf *mdev, struct p_header *h, |
d8763023 | 695 | enum drbd_packet cmd, int size) |
fd340c12 | 696 | { |
d38e787e | 697 | _prepare_header(mdev->tconn, mdev->vnr, h, cmd, size); |
fd340c12 PR |
698 | } |
699 | ||
b411b363 | 700 | /* the appropriate socket mutex must be held already */ |
d38e787e | 701 | int _conn_send_cmd(struct drbd_tconn *tconn, int vnr, struct socket *sock, |
d8763023 AG |
702 | enum drbd_packet cmd, struct p_header *h, size_t size, |
703 | unsigned msg_flags) | |
b411b363 PR |
704 | { |
705 | int sent, ok; | |
706 | ||
d38e787e | 707 | _prepare_header(tconn, vnr, h, cmd, size - sizeof(struct p_header)); |
b411b363 | 708 | |
d38e787e | 709 | sent = drbd_send(tconn, sock, h, size, msg_flags); |
b411b363 PR |
710 | |
711 | ok = (sent == size); | |
0ddc5549 | 712 | if (!ok && !signal_pending(current)) |
d38e787e PR |
713 | conn_warn(tconn, "short sent %s size=%d sent=%d\n", |
714 | cmdname(cmd), (int)size, sent); | |
b411b363 PR |
715 | return ok; |
716 | } | |
717 | ||
718 | /* don't pass the socket. we may only look at it | |
719 | * when we hold the appropriate socket mutex. | |
720 | */ | |
2a67d8b9 | 721 | int conn_send_cmd(struct drbd_tconn *tconn, int vnr, int use_data_socket, |
d8763023 | 722 | enum drbd_packet cmd, struct p_header *h, size_t size) |
b411b363 PR |
723 | { |
724 | int ok = 0; | |
725 | struct socket *sock; | |
726 | ||
727 | if (use_data_socket) { | |
2a67d8b9 PR |
728 | mutex_lock(&tconn->data.mutex); |
729 | sock = tconn->data.socket; | |
b411b363 | 730 | } else { |
2a67d8b9 PR |
731 | mutex_lock(&tconn->meta.mutex); |
732 | sock = tconn->meta.socket; | |
b411b363 PR |
733 | } |
734 | ||
735 | /* drbd_disconnect() could have called drbd_free_sock() | |
736 | * while we were waiting in down()... */ | |
737 | if (likely(sock != NULL)) | |
2a67d8b9 | 738 | ok = _conn_send_cmd(tconn, vnr, sock, cmd, h, size, 0); |
b411b363 PR |
739 | |
740 | if (use_data_socket) | |
2a67d8b9 | 741 | mutex_unlock(&tconn->data.mutex); |
b411b363 | 742 | else |
2a67d8b9 | 743 | mutex_unlock(&tconn->meta.mutex); |
b411b363 PR |
744 | return ok; |
745 | } | |
746 | ||
61120870 | 747 | int conn_send_cmd2(struct drbd_tconn *tconn, enum drbd_packet cmd, char *data, |
b411b363 PR |
748 | size_t size) |
749 | { | |
61120870 | 750 | struct p_header80 h; |
b411b363 PR |
751 | int ok; |
752 | ||
61120870 | 753 | prepare_header80(&h, cmd, size); |
b411b363 | 754 | |
61120870 | 755 | if (!drbd_get_data_sock(tconn)) |
b411b363 PR |
756 | return 0; |
757 | ||
b411b363 | 758 | ok = (sizeof(h) == |
61120870 | 759 | drbd_send(tconn, tconn->data.socket, &h, sizeof(h), 0)); |
b411b363 | 760 | ok = ok && (size == |
61120870 | 761 | drbd_send(tconn, tconn->data.socket, data, size, 0)); |
b411b363 | 762 | |
61120870 | 763 | drbd_put_data_sock(tconn); |
b411b363 PR |
764 | |
765 | return ok; | |
766 | } | |
767 | ||
768 | int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) | |
769 | { | |
8e26f9cc | 770 | struct p_rs_param_95 *p; |
b411b363 PR |
771 | struct socket *sock; |
772 | int size, rv; | |
31890f4a | 773 | const int apv = mdev->tconn->agreed_pro_version; |
b411b363 PR |
774 | |
775 | size = apv <= 87 ? sizeof(struct p_rs_param) | |
776 | : apv == 88 ? sizeof(struct p_rs_param) | |
777 | + strlen(mdev->sync_conf.verify_alg) + 1 | |
8e26f9cc PR |
778 | : apv <= 94 ? sizeof(struct p_rs_param_89) |
779 | : /* apv >= 95 */ sizeof(struct p_rs_param_95); | |
b411b363 PR |
780 | |
781 | /* used from admin command context and receiver/worker context. | |
782 | * to avoid kmalloc, grab the socket right here, | |
783 | * then use the pre-allocated sbuf there */ | |
e42325a5 PR |
784 | mutex_lock(&mdev->tconn->data.mutex); |
785 | sock = mdev->tconn->data.socket; | |
b411b363 PR |
786 | |
787 | if (likely(sock != NULL)) { | |
d8763023 AG |
788 | enum drbd_packet cmd = |
789 | apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM; | |
b411b363 | 790 | |
e42325a5 | 791 | p = &mdev->tconn->data.sbuf.rs_param_95; |
b411b363 PR |
792 | |
793 | /* initialize verify_alg and csums_alg */ | |
794 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); | |
795 | ||
796 | p->rate = cpu_to_be32(sc->rate); | |
8e26f9cc PR |
797 | p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead); |
798 | p->c_delay_target = cpu_to_be32(sc->c_delay_target); | |
799 | p->c_fill_target = cpu_to_be32(sc->c_fill_target); | |
800 | p->c_max_rate = cpu_to_be32(sc->c_max_rate); | |
b411b363 PR |
801 | |
802 | if (apv >= 88) | |
803 | strcpy(p->verify_alg, mdev->sync_conf.verify_alg); | |
804 | if (apv >= 89) | |
805 | strcpy(p->csums_alg, mdev->sync_conf.csums_alg); | |
806 | ||
807 | rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0); | |
808 | } else | |
809 | rv = 0; /* not ok */ | |
810 | ||
e42325a5 | 811 | mutex_unlock(&mdev->tconn->data.mutex); |
b411b363 PR |
812 | |
813 | return rv; | |
814 | } | |
815 | ||
dc8228d1 | 816 | int drbd_send_protocol(struct drbd_tconn *tconn) |
b411b363 PR |
817 | { |
818 | struct p_protocol *p; | |
cf14c2e9 | 819 | int size, cf, rv; |
b411b363 PR |
820 | |
821 | size = sizeof(struct p_protocol); | |
822 | ||
dc8228d1 PR |
823 | if (tconn->agreed_pro_version >= 87) |
824 | size += strlen(tconn->net_conf->integrity_alg) + 1; | |
b411b363 PR |
825 | |
826 | /* we must not recurse into our own queue, | |
827 | * as that is blocked during handshake */ | |
828 | p = kmalloc(size, GFP_NOIO); | |
829 | if (p == NULL) | |
830 | return 0; | |
831 | ||
dc8228d1 PR |
832 | p->protocol = cpu_to_be32(tconn->net_conf->wire_protocol); |
833 | p->after_sb_0p = cpu_to_be32(tconn->net_conf->after_sb_0p); | |
834 | p->after_sb_1p = cpu_to_be32(tconn->net_conf->after_sb_1p); | |
835 | p->after_sb_2p = cpu_to_be32(tconn->net_conf->after_sb_2p); | |
836 | p->two_primaries = cpu_to_be32(tconn->net_conf->two_primaries); | |
b411b363 | 837 | |
cf14c2e9 | 838 | cf = 0; |
dc8228d1 | 839 | if (tconn->net_conf->want_lose) |
cf14c2e9 | 840 | cf |= CF_WANT_LOSE; |
dc8228d1 PR |
841 | if (tconn->net_conf->dry_run) { |
842 | if (tconn->agreed_pro_version >= 92) | |
cf14c2e9 PR |
843 | cf |= CF_DRY_RUN; |
844 | else { | |
dc8228d1 | 845 | conn_err(tconn, "--dry-run is not supported by peer"); |
7ac314c8 | 846 | kfree(p); |
148efa16 | 847 | return -1; |
cf14c2e9 PR |
848 | } |
849 | } | |
850 | p->conn_flags = cpu_to_be32(cf); | |
851 | ||
dc8228d1 PR |
852 | if (tconn->agreed_pro_version >= 87) |
853 | strcpy(p->integrity_alg, tconn->net_conf->integrity_alg); | |
b411b363 | 854 | |
dc8228d1 | 855 | rv = conn_send_cmd2(tconn, P_PROTOCOL, p->head.payload, size - sizeof(struct p_header)); |
b411b363 PR |
856 | kfree(p); |
857 | return rv; | |
858 | } | |
859 | ||
860 | int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags) | |
861 | { | |
862 | struct p_uuids p; | |
863 | int i; | |
864 | ||
865 | if (!get_ldev_if_state(mdev, D_NEGOTIATING)) | |
866 | return 1; | |
867 | ||
868 | for (i = UI_CURRENT; i < UI_SIZE; i++) | |
869 | p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0; | |
870 | ||
871 | mdev->comm_bm_set = drbd_bm_total_weight(mdev); | |
872 | p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set); | |
89e58e75 | 873 | uuid_flags |= mdev->tconn->net_conf->want_lose ? 1 : 0; |
b411b363 PR |
874 | uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0; |
875 | uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; | |
876 | p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags); | |
877 | ||
878 | put_ldev(mdev); | |
879 | ||
c012949a | 880 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS, &p.head, sizeof(p)); |
b411b363 PR |
881 | } |
882 | ||
883 | int drbd_send_uuids(struct drbd_conf *mdev) | |
884 | { | |
885 | return _drbd_send_uuids(mdev, 0); | |
886 | } | |
887 | ||
888 | int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) | |
889 | { | |
890 | return _drbd_send_uuids(mdev, 8); | |
891 | } | |
892 | ||
62b0da3a LE |
893 | void drbd_print_uuids(struct drbd_conf *mdev, const char *text) |
894 | { | |
895 | if (get_ldev_if_state(mdev, D_NEGOTIATING)) { | |
896 | u64 *uuid = mdev->ldev->md.uuid; | |
897 | dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n", | |
898 | text, | |
899 | (unsigned long long)uuid[UI_CURRENT], | |
900 | (unsigned long long)uuid[UI_BITMAP], | |
901 | (unsigned long long)uuid[UI_HISTORY_START], | |
902 | (unsigned long long)uuid[UI_HISTORY_END]); | |
903 | put_ldev(mdev); | |
904 | } else { | |
905 | dev_info(DEV, "%s effective data uuid: %016llX\n", | |
906 | text, | |
907 | (unsigned long long)mdev->ed_uuid); | |
908 | } | |
909 | } | |
910 | ||
5a22db89 | 911 | int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev) |
b411b363 PR |
912 | { |
913 | struct p_rs_uuid p; | |
5a22db89 LE |
914 | u64 uuid; |
915 | ||
916 | D_ASSERT(mdev->state.disk == D_UP_TO_DATE); | |
b411b363 | 917 | |
4a23f264 | 918 | uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET; |
5a22db89 | 919 | drbd_uuid_set(mdev, UI_BITMAP, uuid); |
62b0da3a | 920 | drbd_print_uuids(mdev, "updated sync UUID"); |
5a22db89 LE |
921 | drbd_md_sync(mdev); |
922 | p.uuid = cpu_to_be64(uuid); | |
b411b363 | 923 | |
c012949a | 924 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, &p.head, sizeof(p)); |
b411b363 PR |
925 | } |
926 | ||
e89b591c | 927 | int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) |
b411b363 PR |
928 | { |
929 | struct p_sizes p; | |
930 | sector_t d_size, u_size; | |
99432fcc | 931 | int q_order_type, max_bio_size; |
b411b363 PR |
932 | int ok; |
933 | ||
934 | if (get_ldev_if_state(mdev, D_NEGOTIATING)) { | |
935 | D_ASSERT(mdev->ldev->backing_bdev); | |
936 | d_size = drbd_get_max_capacity(mdev->ldev); | |
937 | u_size = mdev->ldev->dc.disk_size; | |
938 | q_order_type = drbd_queue_order_type(mdev); | |
99432fcc PR |
939 | max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; |
940 | max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); | |
b411b363 PR |
941 | put_ldev(mdev); |
942 | } else { | |
943 | d_size = 0; | |
944 | u_size = 0; | |
945 | q_order_type = QUEUE_ORDERED_NONE; | |
99432fcc | 946 | max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */ |
b411b363 PR |
947 | } |
948 | ||
949 | p.d_size = cpu_to_be64(d_size); | |
950 | p.u_size = cpu_to_be64(u_size); | |
951 | p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); | |
99432fcc | 952 | p.max_bio_size = cpu_to_be32(max_bio_size); |
e89b591c PR |
953 | p.queue_order_type = cpu_to_be16(q_order_type); |
954 | p.dds_flags = cpu_to_be16(flags); | |
b411b363 | 955 | |
c012949a | 956 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, &p.head, sizeof(p)); |
b411b363 PR |
957 | return ok; |
958 | } | |
959 | ||
960 | /** | |
961 | * drbd_send_state() - Sends the drbd state to the peer | |
962 | * @mdev: DRBD device. | |
963 | */ | |
964 | int drbd_send_state(struct drbd_conf *mdev) | |
965 | { | |
966 | struct socket *sock; | |
967 | struct p_state p; | |
968 | int ok = 0; | |
969 | ||
e42325a5 | 970 | mutex_lock(&mdev->tconn->data.mutex); |
b411b363 PR |
971 | |
972 | p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */ | |
e42325a5 | 973 | sock = mdev->tconn->data.socket; |
b411b363 PR |
974 | |
975 | if (likely(sock != NULL)) { | |
c012949a | 976 | ok = _drbd_send_cmd(mdev, sock, P_STATE, &p.head, sizeof(p), 0); |
b411b363 PR |
977 | } |
978 | ||
e42325a5 | 979 | mutex_unlock(&mdev->tconn->data.mutex); |
b411b363 | 980 | |
b411b363 PR |
981 | return ok; |
982 | } | |
983 | ||
cf29c9d8 PR |
984 | int _conn_send_state_req(struct drbd_tconn *tconn, int vnr, enum drbd_packet cmd, |
985 | union drbd_state mask, union drbd_state val) | |
b411b363 PR |
986 | { |
987 | struct p_req_state p; | |
988 | ||
989 | p.mask = cpu_to_be32(mask.i); | |
990 | p.val = cpu_to_be32(val.i); | |
991 | ||
cf29c9d8 | 992 | return conn_send_cmd(tconn, vnr, USE_DATA_SOCKET, cmd, &p.head, sizeof(p)); |
b411b363 PR |
993 | } |
994 | ||
bf885f8a | 995 | int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode) |
b411b363 PR |
996 | { |
997 | struct p_req_state_reply p; | |
998 | ||
999 | p.retcode = cpu_to_be32(retcode); | |
1000 | ||
c012949a | 1001 | return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY, &p.head, sizeof(p)); |
b411b363 PR |
1002 | } |
1003 | ||
047cd4a6 PR |
1004 | int conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode) |
1005 | { | |
1006 | struct p_req_state_reply p; | |
1007 | enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY; | |
1008 | ||
1009 | p.retcode = cpu_to_be32(retcode); | |
1010 | ||
1011 | return conn_send_cmd(tconn, 0, USE_META_SOCKET, cmd, &p.head, sizeof(p)); | |
1012 | } | |
1013 | ||
b411b363 PR |
1014 | int fill_bitmap_rle_bits(struct drbd_conf *mdev, |
1015 | struct p_compressed_bm *p, | |
1016 | struct bm_xfer_ctx *c) | |
1017 | { | |
1018 | struct bitstream bs; | |
1019 | unsigned long plain_bits; | |
1020 | unsigned long tmp; | |
1021 | unsigned long rl; | |
1022 | unsigned len; | |
1023 | unsigned toggle; | |
1024 | int bits; | |
1025 | ||
1026 | /* may we use this feature? */ | |
1027 | if ((mdev->sync_conf.use_rle == 0) || | |
31890f4a | 1028 | (mdev->tconn->agreed_pro_version < 90)) |
b411b363 PR |
1029 | return 0; |
1030 | ||
1031 | if (c->bit_offset >= c->bm_bits) | |
1032 | return 0; /* nothing to do. */ | |
1033 | ||
1034 | /* use at most thus many bytes */ | |
1035 | bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0); | |
1036 | memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX); | |
1037 | /* plain bits covered in this code string */ | |
1038 | plain_bits = 0; | |
1039 | ||
1040 | /* p->encoding & 0x80 stores whether the first run length is set. | |
1041 | * bit offset is implicit. | |
1042 | * start with toggle == 2 to be able to tell the first iteration */ | |
1043 | toggle = 2; | |
1044 | ||
1045 | /* see how much plain bits we can stuff into one packet | |
1046 | * using RLE and VLI. */ | |
1047 | do { | |
1048 | tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset) | |
1049 | : _drbd_bm_find_next(mdev, c->bit_offset); | |
1050 | if (tmp == -1UL) | |
1051 | tmp = c->bm_bits; | |
1052 | rl = tmp - c->bit_offset; | |
1053 | ||
1054 | if (toggle == 2) { /* first iteration */ | |
1055 | if (rl == 0) { | |
1056 | /* the first checked bit was set, | |
1057 | * store start value, */ | |
1058 | DCBP_set_start(p, 1); | |
1059 | /* but skip encoding of zero run length */ | |
1060 | toggle = !toggle; | |
1061 | continue; | |
1062 | } | |
1063 | DCBP_set_start(p, 0); | |
1064 | } | |
1065 | ||
1066 | /* paranoia: catch zero runlength. | |
1067 | * can only happen if bitmap is modified while we scan it. */ | |
1068 | if (rl == 0) { | |
1069 | dev_err(DEV, "unexpected zero runlength while encoding bitmap " | |
1070 | "t:%u bo:%lu\n", toggle, c->bit_offset); | |
1071 | return -1; | |
1072 | } | |
1073 | ||
1074 | bits = vli_encode_bits(&bs, rl); | |
1075 | if (bits == -ENOBUFS) /* buffer full */ | |
1076 | break; | |
1077 | if (bits <= 0) { | |
1078 | dev_err(DEV, "error while encoding bitmap: %d\n", bits); | |
1079 | return 0; | |
1080 | } | |
1081 | ||
1082 | toggle = !toggle; | |
1083 | plain_bits += rl; | |
1084 | c->bit_offset = tmp; | |
1085 | } while (c->bit_offset < c->bm_bits); | |
1086 | ||
1087 | len = bs.cur.b - p->code + !!bs.cur.bit; | |
1088 | ||
1089 | if (plain_bits < (len << 3)) { | |
1090 | /* incompressible with this method. | |
1091 | * we need to rewind both word and bit position. */ | |
1092 | c->bit_offset -= plain_bits; | |
1093 | bm_xfer_ctx_bit_to_word_offset(c); | |
1094 | c->bit_offset = c->word_offset * BITS_PER_LONG; | |
1095 | return 0; | |
1096 | } | |
1097 | ||
1098 | /* RLE + VLI was able to compress it just fine. | |
1099 | * update c->word_offset. */ | |
1100 | bm_xfer_ctx_bit_to_word_offset(c); | |
1101 | ||
1102 | /* store pad_bits */ | |
1103 | DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7); | |
1104 | ||
1105 | return len; | |
1106 | } | |
1107 | ||
f70af118 AG |
1108 | /** |
1109 | * send_bitmap_rle_or_plain | |
1110 | * | |
1111 | * Return 0 when done, 1 when another iteration is needed, and a negative error | |
1112 | * code upon failure. | |
1113 | */ | |
1114 | static int | |
b411b363 | 1115 | send_bitmap_rle_or_plain(struct drbd_conf *mdev, |
c012949a | 1116 | struct p_header *h, struct bm_xfer_ctx *c) |
b411b363 PR |
1117 | { |
1118 | struct p_compressed_bm *p = (void*)h; | |
1119 | unsigned long num_words; | |
1120 | int len; | |
1121 | int ok; | |
1122 | ||
1123 | len = fill_bitmap_rle_bits(mdev, p, c); | |
1124 | ||
1125 | if (len < 0) | |
f70af118 | 1126 | return -EIO; |
b411b363 PR |
1127 | |
1128 | if (len) { | |
1129 | DCBP_set_code(p, RLE_VLI_Bits); | |
e42325a5 | 1130 | ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_COMPRESSED_BITMAP, h, |
b411b363 PR |
1131 | sizeof(*p) + len, 0); |
1132 | ||
1133 | c->packets[0]++; | |
1134 | c->bytes[0] += sizeof(*p) + len; | |
1135 | ||
1136 | if (c->bit_offset >= c->bm_bits) | |
1137 | len = 0; /* DONE */ | |
1138 | } else { | |
1139 | /* was not compressible. | |
1140 | * send a buffer full of plain text bits instead. */ | |
1141 | num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); | |
1142 | len = num_words * sizeof(long); | |
1143 | if (len) | |
1144 | drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload); | |
e42325a5 | 1145 | ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BITMAP, |
0b70a13d | 1146 | h, sizeof(struct p_header80) + len, 0); |
b411b363 PR |
1147 | c->word_offset += num_words; |
1148 | c->bit_offset = c->word_offset * BITS_PER_LONG; | |
1149 | ||
1150 | c->packets[1]++; | |
0b70a13d | 1151 | c->bytes[1] += sizeof(struct p_header80) + len; |
b411b363 PR |
1152 | |
1153 | if (c->bit_offset > c->bm_bits) | |
1154 | c->bit_offset = c->bm_bits; | |
1155 | } | |
f70af118 AG |
1156 | if (ok) { |
1157 | if (len == 0) { | |
1158 | INFO_bm_xfer_stats(mdev, "send", c); | |
1159 | return 0; | |
1160 | } else | |
1161 | return 1; | |
1162 | } | |
1163 | return -EIO; | |
b411b363 PR |
1164 | } |
1165 | ||
1166 | /* See the comment at receive_bitmap() */ | |
1167 | int _drbd_send_bitmap(struct drbd_conf *mdev) | |
1168 | { | |
1169 | struct bm_xfer_ctx c; | |
c012949a | 1170 | struct p_header *p; |
f70af118 | 1171 | int err; |
b411b363 | 1172 | |
841ce241 AG |
1173 | if (!expect(mdev->bitmap)) |
1174 | return false; | |
b411b363 PR |
1175 | |
1176 | /* maybe we should use some per thread scratch page, | |
1177 | * and allocate that during initial device creation? */ | |
c012949a | 1178 | p = (struct p_header *) __get_free_page(GFP_NOIO); |
b411b363 PR |
1179 | if (!p) { |
1180 | dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); | |
81e84650 | 1181 | return false; |
b411b363 PR |
1182 | } |
1183 | ||
1184 | if (get_ldev(mdev)) { | |
1185 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { | |
1186 | dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n"); | |
1187 | drbd_bm_set_all(mdev); | |
1188 | if (drbd_bm_write(mdev)) { | |
1189 | /* write_bm did fail! Leave full sync flag set in Meta P_DATA | |
1190 | * but otherwise process as per normal - need to tell other | |
1191 | * side that a full resync is required! */ | |
1192 | dev_err(DEV, "Failed to write bitmap to disk!\n"); | |
1193 | } else { | |
1194 | drbd_md_clear_flag(mdev, MDF_FULL_SYNC); | |
1195 | drbd_md_sync(mdev); | |
1196 | } | |
1197 | } | |
1198 | put_ldev(mdev); | |
1199 | } | |
1200 | ||
1201 | c = (struct bm_xfer_ctx) { | |
1202 | .bm_bits = drbd_bm_bits(mdev), | |
1203 | .bm_words = drbd_bm_words(mdev), | |
1204 | }; | |
1205 | ||
1206 | do { | |
f70af118 AG |
1207 | err = send_bitmap_rle_or_plain(mdev, p, &c); |
1208 | } while (err > 0); | |
b411b363 PR |
1209 | |
1210 | free_page((unsigned long) p); | |
f70af118 | 1211 | return err == 0; |
b411b363 PR |
1212 | } |
1213 | ||
1214 | int drbd_send_bitmap(struct drbd_conf *mdev) | |
1215 | { | |
1216 | int err; | |
1217 | ||
61120870 | 1218 | if (!drbd_get_data_sock(mdev->tconn)) |
b411b363 PR |
1219 | return -1; |
1220 | err = !_drbd_send_bitmap(mdev); | |
61120870 | 1221 | drbd_put_data_sock(mdev->tconn); |
b411b363 PR |
1222 | return err; |
1223 | } | |
1224 | ||
1225 | int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size) | |
1226 | { | |
1227 | int ok; | |
1228 | struct p_barrier_ack p; | |
1229 | ||
1230 | p.barrier = barrier_nr; | |
1231 | p.set_size = cpu_to_be32(set_size); | |
1232 | ||
1233 | if (mdev->state.conn < C_CONNECTED) | |
81e84650 | 1234 | return false; |
c012949a | 1235 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, &p.head, sizeof(p)); |
b411b363 PR |
1236 | return ok; |
1237 | } | |
1238 | ||
1239 | /** | |
1240 | * _drbd_send_ack() - Sends an ack packet | |
1241 | * @mdev: DRBD device. | |
1242 | * @cmd: Packet command code. | |
1243 | * @sector: sector, needs to be in big endian byte order | |
1244 | * @blksize: size in byte, needs to be in big endian byte order | |
1245 | * @block_id: Id, big endian byte order | |
1246 | */ | |
d8763023 AG |
1247 | static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, |
1248 | u64 sector, u32 blksize, u64 block_id) | |
b411b363 PR |
1249 | { |
1250 | int ok; | |
1251 | struct p_block_ack p; | |
1252 | ||
1253 | p.sector = sector; | |
1254 | p.block_id = block_id; | |
1255 | p.blksize = blksize; | |
8ccf218e | 1256 | p.seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq)); |
b411b363 | 1257 | |
e42325a5 | 1258 | if (!mdev->tconn->meta.socket || mdev->state.conn < C_CONNECTED) |
81e84650 | 1259 | return false; |
c012949a | 1260 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, &p.head, sizeof(p)); |
b411b363 PR |
1261 | return ok; |
1262 | } | |
1263 | ||
2b2bf214 LE |
1264 | /* dp->sector and dp->block_id already/still in network byte order, |
1265 | * data_size is payload size according to dp->head, | |
1266 | * and may need to be corrected for digest size. */ | |
d8763023 | 1267 | int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd, |
2b2bf214 | 1268 | struct p_data *dp, int data_size) |
b411b363 | 1269 | { |
a0638456 PR |
1270 | data_size -= (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ? |
1271 | crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0; | |
b411b363 PR |
1272 | return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size), |
1273 | dp->block_id); | |
1274 | } | |
1275 | ||
d8763023 | 1276 | int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd, |
b411b363 PR |
1277 | struct p_block_req *rp) |
1278 | { | |
1279 | return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id); | |
1280 | } | |
1281 | ||
1282 | /** | |
1283 | * drbd_send_ack() - Sends an ack packet | |
db830c46 AG |
1284 | * @mdev: DRBD device |
1285 | * @cmd: packet command code | |
1286 | * @peer_req: peer request | |
b411b363 | 1287 | */ |
d8763023 | 1288 | int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd, |
db830c46 | 1289 | struct drbd_peer_request *peer_req) |
b411b363 PR |
1290 | { |
1291 | return _drbd_send_ack(mdev, cmd, | |
db830c46 AG |
1292 | cpu_to_be64(peer_req->i.sector), |
1293 | cpu_to_be32(peer_req->i.size), | |
1294 | peer_req->block_id); | |
b411b363 PR |
1295 | } |
1296 | ||
1297 | /* This function misuses the block_id field to signal if the blocks | |
1298 | * are is sync or not. */ | |
d8763023 | 1299 | int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd, |
b411b363 PR |
1300 | sector_t sector, int blksize, u64 block_id) |
1301 | { | |
1302 | return _drbd_send_ack(mdev, cmd, | |
1303 | cpu_to_be64(sector), | |
1304 | cpu_to_be32(blksize), | |
1305 | cpu_to_be64(block_id)); | |
1306 | } | |
1307 | ||
1308 | int drbd_send_drequest(struct drbd_conf *mdev, int cmd, | |
1309 | sector_t sector, int size, u64 block_id) | |
1310 | { | |
1311 | int ok; | |
1312 | struct p_block_req p; | |
1313 | ||
1314 | p.sector = cpu_to_be64(sector); | |
1315 | p.block_id = block_id; | |
1316 | p.blksize = cpu_to_be32(size); | |
1317 | ||
c012949a | 1318 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &p.head, sizeof(p)); |
b411b363 PR |
1319 | return ok; |
1320 | } | |
1321 | ||
d8763023 AG |
1322 | int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size, |
1323 | void *digest, int digest_size, enum drbd_packet cmd) | |
b411b363 PR |
1324 | { |
1325 | int ok; | |
1326 | struct p_block_req p; | |
1327 | ||
fd340c12 | 1328 | prepare_header(mdev, &p.head, cmd, sizeof(p) - sizeof(struct p_header) + digest_size); |
b411b363 | 1329 | p.sector = cpu_to_be64(sector); |
9a8e7753 | 1330 | p.block_id = ID_SYNCER /* unused */; |
b411b363 PR |
1331 | p.blksize = cpu_to_be32(size); |
1332 | ||
e42325a5 | 1333 | mutex_lock(&mdev->tconn->data.mutex); |
b411b363 | 1334 | |
bedbd2a5 PR |
1335 | ok = (sizeof(p) == drbd_send(mdev->tconn, mdev->tconn->data.socket, &p, sizeof(p), 0)); |
1336 | ok = ok && (digest_size == drbd_send(mdev->tconn, mdev->tconn->data.socket, digest, digest_size, 0)); | |
b411b363 | 1337 | |
e42325a5 | 1338 | mutex_unlock(&mdev->tconn->data.mutex); |
b411b363 PR |
1339 | |
1340 | return ok; | |
1341 | } | |
1342 | ||
1343 | int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) | |
1344 | { | |
1345 | int ok; | |
1346 | struct p_block_req p; | |
1347 | ||
1348 | p.sector = cpu_to_be64(sector); | |
9a8e7753 | 1349 | p.block_id = ID_SYNCER /* unused */; |
b411b363 PR |
1350 | p.blksize = cpu_to_be32(size); |
1351 | ||
c012949a | 1352 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST, &p.head, sizeof(p)); |
b411b363 PR |
1353 | return ok; |
1354 | } | |
1355 | ||
1356 | /* called on sndtimeo | |
81e84650 AG |
1357 | * returns false if we should retry, |
1358 | * true if we think connection is dead | |
b411b363 | 1359 | */ |
1a7ba646 | 1360 | static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock) |
b411b363 PR |
1361 | { |
1362 | int drop_it; | |
1363 | /* long elapsed = (long)(jiffies - mdev->last_received); */ | |
1364 | ||
1a7ba646 PR |
1365 | drop_it = tconn->meta.socket == sock |
1366 | || !tconn->asender.task | |
1367 | || get_t_state(&tconn->asender) != RUNNING | |
bbeb641c | 1368 | || tconn->cstate < C_WF_REPORT_PARAMS; |
b411b363 PR |
1369 | |
1370 | if (drop_it) | |
81e84650 | 1371 | return true; |
b411b363 | 1372 | |
1a7ba646 | 1373 | drop_it = !--tconn->ko_count; |
b411b363 | 1374 | if (!drop_it) { |
1a7ba646 PR |
1375 | conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n", |
1376 | current->comm, current->pid, tconn->ko_count); | |
1377 | request_ping(tconn); | |
b411b363 PR |
1378 | } |
1379 | ||
1380 | return drop_it; /* && (mdev->state == R_PRIMARY) */; | |
1381 | } | |
1382 | ||
1a7ba646 | 1383 | static void drbd_update_congested(struct drbd_tconn *tconn) |
9e204cdd | 1384 | { |
1a7ba646 | 1385 | struct sock *sk = tconn->data.socket->sk; |
9e204cdd | 1386 | if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) |
1a7ba646 | 1387 | set_bit(NET_CONGESTED, &tconn->flags); |
9e204cdd AG |
1388 | } |
1389 | ||
b411b363 PR |
1390 | /* The idea of sendpage seems to be to put some kind of reference |
1391 | * to the page into the skb, and to hand it over to the NIC. In | |
1392 | * this process get_page() gets called. | |
1393 | * | |
1394 | * As soon as the page was really sent over the network put_page() | |
1395 | * gets called by some part of the network layer. [ NIC driver? ] | |
1396 | * | |
1397 | * [ get_page() / put_page() increment/decrement the count. If count | |
1398 | * reaches 0 the page will be freed. ] | |
1399 | * | |
1400 | * This works nicely with pages from FSs. | |
1401 | * But this means that in protocol A we might signal IO completion too early! | |
1402 | * | |
1403 | * In order not to corrupt data during a resync we must make sure | |
1404 | * that we do not reuse our own buffer pages (EEs) to early, therefore | |
1405 | * we have the net_ee list. | |
1406 | * | |
1407 | * XFS seems to have problems, still, it submits pages with page_count == 0! | |
1408 | * As a workaround, we disable sendpage on pages | |
1409 | * with page_count == 0 or PageSlab. | |
1410 | */ | |
1411 | static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, | |
ba11ad9a | 1412 | int offset, size_t size, unsigned msg_flags) |
b411b363 | 1413 | { |
bedbd2a5 | 1414 | int sent = drbd_send(mdev->tconn, mdev->tconn->data.socket, kmap(page) + offset, size, msg_flags); |
b411b363 PR |
1415 | kunmap(page); |
1416 | if (sent == size) | |
1417 | mdev->send_cnt += size>>9; | |
1418 | return sent == size; | |
1419 | } | |
1420 | ||
1421 | static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, | |
ba11ad9a | 1422 | int offset, size_t size, unsigned msg_flags) |
b411b363 PR |
1423 | { |
1424 | mm_segment_t oldfs = get_fs(); | |
1425 | int sent, ok; | |
1426 | int len = size; | |
1427 | ||
1428 | /* e.g. XFS meta- & log-data is in slab pages, which have a | |
1429 | * page_count of 0 and/or have PageSlab() set. | |
1430 | * we cannot use send_page for those, as that does get_page(); | |
1431 | * put_page(); and would cause either a VM_BUG directly, or | |
1432 | * __page_cache_release a page that would actually still be referenced | |
1433 | * by someone, leading to some obscure delayed Oops somewhere else. */ | |
1434 | if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) | |
ba11ad9a | 1435 | return _drbd_no_send_page(mdev, page, offset, size, msg_flags); |
b411b363 | 1436 | |
ba11ad9a | 1437 | msg_flags |= MSG_NOSIGNAL; |
1a7ba646 | 1438 | drbd_update_congested(mdev->tconn); |
b411b363 PR |
1439 | set_fs(KERNEL_DS); |
1440 | do { | |
e42325a5 | 1441 | sent = mdev->tconn->data.socket->ops->sendpage(mdev->tconn->data.socket, page, |
b411b363 | 1442 | offset, len, |
ba11ad9a | 1443 | msg_flags); |
b411b363 | 1444 | if (sent == -EAGAIN) { |
1a7ba646 | 1445 | if (we_should_drop_the_connection(mdev->tconn, |
e42325a5 | 1446 | mdev->tconn->data.socket)) |
b411b363 PR |
1447 | break; |
1448 | else | |
1449 | continue; | |
1450 | } | |
1451 | if (sent <= 0) { | |
1452 | dev_warn(DEV, "%s: size=%d len=%d sent=%d\n", | |
1453 | __func__, (int)size, len, sent); | |
1454 | break; | |
1455 | } | |
1456 | len -= sent; | |
1457 | offset += sent; | |
1458 | } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/); | |
1459 | set_fs(oldfs); | |
01a311a5 | 1460 | clear_bit(NET_CONGESTED, &mdev->tconn->flags); |
b411b363 PR |
1461 | |
1462 | ok = (len == 0); | |
1463 | if (likely(ok)) | |
1464 | mdev->send_cnt += size>>9; | |
1465 | return ok; | |
1466 | } | |
1467 | ||
1468 | static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) | |
1469 | { | |
1470 | struct bio_vec *bvec; | |
1471 | int i; | |
ba11ad9a | 1472 | /* hint all but last page with MSG_MORE */ |
b411b363 PR |
1473 | __bio_for_each_segment(bvec, bio, i, 0) { |
1474 | if (!_drbd_no_send_page(mdev, bvec->bv_page, | |
ba11ad9a LE |
1475 | bvec->bv_offset, bvec->bv_len, |
1476 | i == bio->bi_vcnt -1 ? 0 : MSG_MORE)) | |
b411b363 PR |
1477 | return 0; |
1478 | } | |
1479 | return 1; | |
1480 | } | |
1481 | ||
1482 | static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) | |
1483 | { | |
1484 | struct bio_vec *bvec; | |
1485 | int i; | |
ba11ad9a | 1486 | /* hint all but last page with MSG_MORE */ |
b411b363 PR |
1487 | __bio_for_each_segment(bvec, bio, i, 0) { |
1488 | if (!_drbd_send_page(mdev, bvec->bv_page, | |
ba11ad9a LE |
1489 | bvec->bv_offset, bvec->bv_len, |
1490 | i == bio->bi_vcnt -1 ? 0 : MSG_MORE)) | |
b411b363 PR |
1491 | return 0; |
1492 | } | |
b411b363 PR |
1493 | return 1; |
1494 | } | |
1495 | ||
db830c46 AG |
1496 | static int _drbd_send_zc_ee(struct drbd_conf *mdev, |
1497 | struct drbd_peer_request *peer_req) | |
45bb912b | 1498 | { |
db830c46 AG |
1499 | struct page *page = peer_req->pages; |
1500 | unsigned len = peer_req->i.size; | |
1501 | ||
ba11ad9a | 1502 | /* hint all but last page with MSG_MORE */ |
45bb912b LE |
1503 | page_chain_for_each(page) { |
1504 | unsigned l = min_t(unsigned, len, PAGE_SIZE); | |
ba11ad9a LE |
1505 | if (!_drbd_send_page(mdev, page, 0, l, |
1506 | page_chain_next(page) ? MSG_MORE : 0)) | |
45bb912b LE |
1507 | return 0; |
1508 | len -= l; | |
1509 | } | |
1510 | return 1; | |
1511 | } | |
1512 | ||
76d2e7ec PR |
1513 | static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) |
1514 | { | |
31890f4a | 1515 | if (mdev->tconn->agreed_pro_version >= 95) |
76d2e7ec | 1516 | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | |
76d2e7ec PR |
1517 | (bi_rw & REQ_FUA ? DP_FUA : 0) | |
1518 | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | | |
1519 | (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); | |
1520 | else | |
721a9602 | 1521 | return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; |
76d2e7ec PR |
1522 | } |
1523 | ||
b411b363 PR |
1524 | /* Used to send write requests |
1525 | * R_PRIMARY -> Peer (P_DATA) | |
1526 | */ | |
1527 | int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) | |
1528 | { | |
1529 | int ok = 1; | |
1530 | struct p_data p; | |
1531 | unsigned int dp_flags = 0; | |
1532 | void *dgb; | |
1533 | int dgs; | |
1534 | ||
61120870 | 1535 | if (!drbd_get_data_sock(mdev->tconn)) |
b411b363 PR |
1536 | return 0; |
1537 | ||
a0638456 PR |
1538 | dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ? |
1539 | crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0; | |
b411b363 | 1540 | |
fd340c12 | 1541 | prepare_header(mdev, &p.head, P_DATA, sizeof(p) - sizeof(struct p_header) + dgs + req->i.size); |
ace652ac | 1542 | p.sector = cpu_to_be64(req->i.sector); |
b411b363 | 1543 | p.block_id = (unsigned long)req; |
8ccf218e | 1544 | p.seq_num = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq)); |
b411b363 | 1545 | |
76d2e7ec PR |
1546 | dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw); |
1547 | ||
b411b363 PR |
1548 | if (mdev->state.conn >= C_SYNC_SOURCE && |
1549 | mdev->state.conn <= C_PAUSED_SYNC_T) | |
1550 | dp_flags |= DP_MAY_SET_IN_SYNC; | |
1551 | ||
1552 | p.dp_flags = cpu_to_be32(dp_flags); | |
b411b363 PR |
1553 | set_bit(UNPLUG_REMOTE, &mdev->flags); |
1554 | ok = (sizeof(p) == | |
bedbd2a5 | 1555 | drbd_send(mdev->tconn, mdev->tconn->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0)); |
b411b363 | 1556 | if (ok && dgs) { |
a0638456 PR |
1557 | dgb = mdev->tconn->int_dig_out; |
1558 | drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, dgb); | |
bedbd2a5 | 1559 | ok = dgs == drbd_send(mdev->tconn, mdev->tconn->data.socket, dgb, dgs, 0); |
b411b363 PR |
1560 | } |
1561 | if (ok) { | |
470be44a LE |
1562 | /* For protocol A, we have to memcpy the payload into |
1563 | * socket buffers, as we may complete right away | |
1564 | * as soon as we handed it over to tcp, at which point the data | |
1565 | * pages may become invalid. | |
1566 | * | |
1567 | * For data-integrity enabled, we copy it as well, so we can be | |
1568 | * sure that even if the bio pages may still be modified, it | |
1569 | * won't change the data on the wire, thus if the digest checks | |
1570 | * out ok after sending on this side, but does not fit on the | |
1571 | * receiving side, we sure have detected corruption elsewhere. | |
1572 | */ | |
89e58e75 | 1573 | if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || dgs) |
b411b363 PR |
1574 | ok = _drbd_send_bio(mdev, req->master_bio); |
1575 | else | |
1576 | ok = _drbd_send_zc_bio(mdev, req->master_bio); | |
470be44a LE |
1577 | |
1578 | /* double check digest, sometimes buffers have been modified in flight. */ | |
1579 | if (dgs > 0 && dgs <= 64) { | |
24c4830c | 1580 | /* 64 byte, 512 bit, is the largest digest size |
470be44a LE |
1581 | * currently supported in kernel crypto. */ |
1582 | unsigned char digest[64]; | |
a0638456 PR |
1583 | drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, digest); |
1584 | if (memcmp(mdev->tconn->int_dig_out, digest, dgs)) { | |
470be44a LE |
1585 | dev_warn(DEV, |
1586 | "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", | |
ace652ac | 1587 | (unsigned long long)req->i.sector, req->i.size); |
470be44a LE |
1588 | } |
1589 | } /* else if (dgs > 64) { | |
1590 | ... Be noisy about digest too large ... | |
1591 | } */ | |
b411b363 PR |
1592 | } |
1593 | ||
61120870 | 1594 | drbd_put_data_sock(mdev->tconn); |
bd26bfc5 | 1595 | |
b411b363 PR |
1596 | return ok; |
1597 | } | |
1598 | ||
1599 | /* answer packet, used to send data back for read requests: | |
1600 | * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY) | |
1601 | * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY) | |
1602 | */ | |
d8763023 | 1603 | int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd, |
db830c46 | 1604 | struct drbd_peer_request *peer_req) |
b411b363 PR |
1605 | { |
1606 | int ok; | |
1607 | struct p_data p; | |
1608 | void *dgb; | |
1609 | int dgs; | |
1610 | ||
a0638456 PR |
1611 | dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ? |
1612 | crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0; | |
b411b363 | 1613 | |
db830c46 AG |
1614 | prepare_header(mdev, &p.head, cmd, sizeof(p) - |
1615 | sizeof(struct p_header80) + | |
1616 | dgs + peer_req->i.size); | |
1617 | p.sector = cpu_to_be64(peer_req->i.sector); | |
1618 | p.block_id = peer_req->block_id; | |
cc378270 | 1619 | p.seq_num = 0; /* unused */ |
b411b363 PR |
1620 | |
1621 | /* Only called by our kernel thread. | |
1622 | * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL | |
1623 | * in response to admin command or module unload. | |
1624 | */ | |
61120870 | 1625 | if (!drbd_get_data_sock(mdev->tconn)) |
b411b363 PR |
1626 | return 0; |
1627 | ||
bedbd2a5 | 1628 | ok = sizeof(p) == drbd_send(mdev->tconn, mdev->tconn->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0); |
b411b363 | 1629 | if (ok && dgs) { |
a0638456 | 1630 | dgb = mdev->tconn->int_dig_out; |
db830c46 | 1631 | drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, peer_req, dgb); |
bedbd2a5 | 1632 | ok = dgs == drbd_send(mdev->tconn, mdev->tconn->data.socket, dgb, dgs, 0); |
b411b363 PR |
1633 | } |
1634 | if (ok) | |
db830c46 | 1635 | ok = _drbd_send_zc_ee(mdev, peer_req); |
b411b363 | 1636 | |
61120870 | 1637 | drbd_put_data_sock(mdev->tconn); |
bd26bfc5 | 1638 | |
b411b363 PR |
1639 | return ok; |
1640 | } | |
1641 | ||
73a01a18 PR |
1642 | int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req) |
1643 | { | |
1644 | struct p_block_desc p; | |
1645 | ||
ace652ac AG |
1646 | p.sector = cpu_to_be64(req->i.sector); |
1647 | p.blksize = cpu_to_be32(req->i.size); | |
73a01a18 PR |
1648 | |
1649 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p)); | |
1650 | } | |
1651 | ||
b411b363 PR |
1652 | /* |
1653 | drbd_send distinguishes two cases: | |
1654 | ||
1655 | Packets sent via the data socket "sock" | |
1656 | and packets sent via the meta data socket "msock" | |
1657 | ||
1658 | sock msock | |
1659 | -----------------+-------------------------+------------------------------ | |
1660 | timeout conf.timeout / 2 conf.timeout / 2 | |
1661 | timeout action send a ping via msock Abort communication | |
1662 | and close all sockets | |
1663 | */ | |
1664 | ||
1665 | /* | |
1666 | * you must have down()ed the appropriate [m]sock_mutex elsewhere! | |
1667 | */ | |
bedbd2a5 | 1668 | int drbd_send(struct drbd_tconn *tconn, struct socket *sock, |
b411b363 PR |
1669 | void *buf, size_t size, unsigned msg_flags) |
1670 | { | |
1671 | struct kvec iov; | |
1672 | struct msghdr msg; | |
1673 | int rv, sent = 0; | |
1674 | ||
1675 | if (!sock) | |
1676 | return -1000; | |
1677 | ||
1678 | /* THINK if (signal_pending) return ... ? */ | |
1679 | ||
1680 | iov.iov_base = buf; | |
1681 | iov.iov_len = size; | |
1682 | ||
1683 | msg.msg_name = NULL; | |
1684 | msg.msg_namelen = 0; | |
1685 | msg.msg_control = NULL; | |
1686 | msg.msg_controllen = 0; | |
1687 | msg.msg_flags = msg_flags | MSG_NOSIGNAL; | |
1688 | ||
bedbd2a5 PR |
1689 | if (sock == tconn->data.socket) { |
1690 | tconn->ko_count = tconn->net_conf->ko_count; | |
1691 | drbd_update_congested(tconn); | |
b411b363 PR |
1692 | } |
1693 | do { | |
1694 | /* STRANGE | |
1695 | * tcp_sendmsg does _not_ use its size parameter at all ? | |
1696 | * | |
1697 | * -EAGAIN on timeout, -EINTR on signal. | |
1698 | */ | |
1699 | /* THINK | |
1700 | * do we need to block DRBD_SIG if sock == &meta.socket ?? | |
1701 | * otherwise wake_asender() might interrupt some send_*Ack ! | |
1702 | */ | |
1703 | rv = kernel_sendmsg(sock, &msg, &iov, 1, size); | |
1704 | if (rv == -EAGAIN) { | |
bedbd2a5 | 1705 | if (we_should_drop_the_connection(tconn, sock)) |
b411b363 PR |
1706 | break; |
1707 | else | |
1708 | continue; | |
1709 | } | |
b411b363 PR |
1710 | if (rv == -EINTR) { |
1711 | flush_signals(current); | |
1712 | rv = 0; | |
1713 | } | |
1714 | if (rv < 0) | |
1715 | break; | |
1716 | sent += rv; | |
1717 | iov.iov_base += rv; | |
1718 | iov.iov_len -= rv; | |
1719 | } while (sent < size); | |
1720 | ||
bedbd2a5 PR |
1721 | if (sock == tconn->data.socket) |
1722 | clear_bit(NET_CONGESTED, &tconn->flags); | |
b411b363 PR |
1723 | |
1724 | if (rv <= 0) { | |
1725 | if (rv != -EAGAIN) { | |
bedbd2a5 PR |
1726 | conn_err(tconn, "%s_sendmsg returned %d\n", |
1727 | sock == tconn->meta.socket ? "msock" : "sock", | |
1728 | rv); | |
bbeb641c | 1729 | conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD); |
b411b363 | 1730 | } else |
bbeb641c | 1731 | conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD); |
b411b363 PR |
1732 | } |
1733 | ||
1734 | return sent; | |
1735 | } | |
1736 | ||
1737 | static int drbd_open(struct block_device *bdev, fmode_t mode) | |
1738 | { | |
1739 | struct drbd_conf *mdev = bdev->bd_disk->private_data; | |
1740 | unsigned long flags; | |
1741 | int rv = 0; | |
1742 | ||
2a48fc0a | 1743 | mutex_lock(&drbd_main_mutex); |
87eeee41 | 1744 | spin_lock_irqsave(&mdev->tconn->req_lock, flags); |
b411b363 PR |
1745 | /* to have a stable mdev->state.role |
1746 | * and no race with updating open_cnt */ | |
1747 | ||
1748 | if (mdev->state.role != R_PRIMARY) { | |
1749 | if (mode & FMODE_WRITE) | |
1750 | rv = -EROFS; | |
1751 | else if (!allow_oos) | |
1752 | rv = -EMEDIUMTYPE; | |
1753 | } | |
1754 | ||
1755 | if (!rv) | |
1756 | mdev->open_cnt++; | |
87eeee41 | 1757 | spin_unlock_irqrestore(&mdev->tconn->req_lock, flags); |
2a48fc0a | 1758 | mutex_unlock(&drbd_main_mutex); |
b411b363 PR |
1759 | |
1760 | return rv; | |
1761 | } | |
1762 | ||
1763 | static int drbd_release(struct gendisk *gd, fmode_t mode) | |
1764 | { | |
1765 | struct drbd_conf *mdev = gd->private_data; | |
2a48fc0a | 1766 | mutex_lock(&drbd_main_mutex); |
b411b363 | 1767 | mdev->open_cnt--; |
2a48fc0a | 1768 | mutex_unlock(&drbd_main_mutex); |
b411b363 PR |
1769 | return 0; |
1770 | } | |
1771 | ||
b411b363 PR |
1772 | static void drbd_set_defaults(struct drbd_conf *mdev) |
1773 | { | |
85f4cc17 PR |
1774 | /* This way we get a compile error when sync_conf grows, |
1775 | and we forgot to initialize it here */ | |
1776 | mdev->sync_conf = (struct syncer_conf) { | |
1777 | /* .rate = */ DRBD_RATE_DEF, | |
1778 | /* .after = */ DRBD_AFTER_DEF, | |
1779 | /* .al_extents = */ DRBD_AL_EXTENTS_DEF, | |
85f4cc17 PR |
1780 | /* .verify_alg = */ {}, 0, |
1781 | /* .cpu_mask = */ {}, 0, | |
1782 | /* .csums_alg = */ {}, 0, | |
e756414f | 1783 | /* .use_rle = */ 0, |
9a31d716 PR |
1784 | /* .on_no_data = */ DRBD_ON_NO_DATA_DEF, |
1785 | /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF, | |
1786 | /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF, | |
1787 | /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF, | |
0f0601f4 LE |
1788 | /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF, |
1789 | /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF | |
85f4cc17 PR |
1790 | }; |
1791 | ||
1792 | /* Have to use that way, because the layout differs between | |
1793 | big endian and little endian */ | |
b411b363 PR |
1794 | mdev->state = (union drbd_state) { |
1795 | { .role = R_SECONDARY, | |
1796 | .peer = R_UNKNOWN, | |
1797 | .conn = C_STANDALONE, | |
1798 | .disk = D_DISKLESS, | |
1799 | .pdsk = D_UNKNOWN, | |
fb22c402 PR |
1800 | .susp = 0, |
1801 | .susp_nod = 0, | |
1802 | .susp_fen = 0 | |
b411b363 PR |
1803 | } }; |
1804 | } | |
1805 | ||
1806 | void drbd_init_set_defaults(struct drbd_conf *mdev) | |
1807 | { | |
1808 | /* the memset(,0,) did most of this. | |
1809 | * note: only assignments, no allocation in here */ | |
1810 | ||
1811 | drbd_set_defaults(mdev); | |
1812 | ||
b411b363 PR |
1813 | atomic_set(&mdev->ap_bio_cnt, 0); |
1814 | atomic_set(&mdev->ap_pending_cnt, 0); | |
1815 | atomic_set(&mdev->rs_pending_cnt, 0); | |
1816 | atomic_set(&mdev->unacked_cnt, 0); | |
1817 | atomic_set(&mdev->local_cnt, 0); | |
b411b363 | 1818 | atomic_set(&mdev->pp_in_use, 0); |
435f0740 | 1819 | atomic_set(&mdev->pp_in_use_by_net, 0); |
778f271d | 1820 | atomic_set(&mdev->rs_sect_in, 0); |
0f0601f4 | 1821 | atomic_set(&mdev->rs_sect_ev, 0); |
759fbdfb | 1822 | atomic_set(&mdev->ap_in_flight, 0); |
b411b363 PR |
1823 | |
1824 | mutex_init(&mdev->md_io_mutex); | |
8410da8f PR |
1825 | mutex_init(&mdev->own_state_mutex); |
1826 | mdev->state_mutex = &mdev->own_state_mutex; | |
b411b363 | 1827 | |
b411b363 | 1828 | spin_lock_init(&mdev->al_lock); |
b411b363 PR |
1829 | spin_lock_init(&mdev->peer_seq_lock); |
1830 | spin_lock_init(&mdev->epoch_lock); | |
1831 | ||
1832 | INIT_LIST_HEAD(&mdev->active_ee); | |
1833 | INIT_LIST_HEAD(&mdev->sync_ee); | |
1834 | INIT_LIST_HEAD(&mdev->done_ee); | |
1835 | INIT_LIST_HEAD(&mdev->read_ee); | |
1836 | INIT_LIST_HEAD(&mdev->net_ee); | |
1837 | INIT_LIST_HEAD(&mdev->resync_reads); | |
b411b363 PR |
1838 | INIT_LIST_HEAD(&mdev->resync_work.list); |
1839 | INIT_LIST_HEAD(&mdev->unplug_work.list); | |
e9e6f3ec | 1840 | INIT_LIST_HEAD(&mdev->go_diskless.list); |
b411b363 | 1841 | INIT_LIST_HEAD(&mdev->md_sync_work.list); |
c4752ef1 | 1842 | INIT_LIST_HEAD(&mdev->start_resync_work.list); |
b411b363 | 1843 | INIT_LIST_HEAD(&mdev->bm_io_work.w.list); |
0ced55a3 | 1844 | |
794abb75 | 1845 | mdev->resync_work.cb = w_resync_timer; |
b411b363 | 1846 | mdev->unplug_work.cb = w_send_write_hint; |
e9e6f3ec | 1847 | mdev->go_diskless.cb = w_go_diskless; |
b411b363 PR |
1848 | mdev->md_sync_work.cb = w_md_sync; |
1849 | mdev->bm_io_work.w.cb = w_bitmap_io; | |
370a43e7 | 1850 | mdev->start_resync_work.cb = w_start_resync; |
a21e9298 PR |
1851 | |
1852 | mdev->resync_work.mdev = mdev; | |
1853 | mdev->unplug_work.mdev = mdev; | |
1854 | mdev->go_diskless.mdev = mdev; | |
1855 | mdev->md_sync_work.mdev = mdev; | |
1856 | mdev->bm_io_work.w.mdev = mdev; | |
1857 | mdev->start_resync_work.mdev = mdev; | |
1858 | ||
b411b363 PR |
1859 | init_timer(&mdev->resync_timer); |
1860 | init_timer(&mdev->md_sync_timer); | |
370a43e7 | 1861 | init_timer(&mdev->start_resync_timer); |
7fde2be9 | 1862 | init_timer(&mdev->request_timer); |
b411b363 PR |
1863 | mdev->resync_timer.function = resync_timer_fn; |
1864 | mdev->resync_timer.data = (unsigned long) mdev; | |
1865 | mdev->md_sync_timer.function = md_sync_timer_fn; | |
1866 | mdev->md_sync_timer.data = (unsigned long) mdev; | |
370a43e7 PR |
1867 | mdev->start_resync_timer.function = start_resync_timer_fn; |
1868 | mdev->start_resync_timer.data = (unsigned long) mdev; | |
7fde2be9 PR |
1869 | mdev->request_timer.function = request_timer_fn; |
1870 | mdev->request_timer.data = (unsigned long) mdev; | |
b411b363 PR |
1871 | |
1872 | init_waitqueue_head(&mdev->misc_wait); | |
1873 | init_waitqueue_head(&mdev->state_wait); | |
1874 | init_waitqueue_head(&mdev->ee_wait); | |
1875 | init_waitqueue_head(&mdev->al_wait); | |
1876 | init_waitqueue_head(&mdev->seq_wait); | |
1877 | ||
fd340c12 | 1878 | /* mdev->tconn->agreed_pro_version gets initialized in drbd_connect() */ |
2451fc3b | 1879 | mdev->write_ordering = WO_bdev_flush; |
b411b363 | 1880 | mdev->resync_wenr = LC_FREE; |
99432fcc PR |
1881 | mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; |
1882 | mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; | |
b411b363 PR |
1883 | } |
1884 | ||
1885 | void drbd_mdev_cleanup(struct drbd_conf *mdev) | |
1886 | { | |
1d7734a0 | 1887 | int i; |
e6b3ea83 | 1888 | if (mdev->tconn->receiver.t_state != NONE) |
b411b363 | 1889 | dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", |
e6b3ea83 | 1890 | mdev->tconn->receiver.t_state); |
b411b363 PR |
1891 | |
1892 | /* no need to lock it, I'm the only thread alive */ | |
1893 | if (atomic_read(&mdev->current_epoch->epoch_size) != 0) | |
1894 | dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size)); | |
1895 | mdev->al_writ_cnt = | |
1896 | mdev->bm_writ_cnt = | |
1897 | mdev->read_cnt = | |
1898 | mdev->recv_cnt = | |
1899 | mdev->send_cnt = | |
1900 | mdev->writ_cnt = | |
1901 | mdev->p_size = | |
1902 | mdev->rs_start = | |
1903 | mdev->rs_total = | |
1d7734a0 LE |
1904 | mdev->rs_failed = 0; |
1905 | mdev->rs_last_events = 0; | |
0f0601f4 | 1906 | mdev->rs_last_sect_ev = 0; |
1d7734a0 LE |
1907 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { |
1908 | mdev->rs_mark_left[i] = 0; | |
1909 | mdev->rs_mark_time[i] = 0; | |
1910 | } | |
89e58e75 | 1911 | D_ASSERT(mdev->tconn->net_conf == NULL); |
b411b363 PR |
1912 | |
1913 | drbd_set_my_capacity(mdev, 0); | |
1914 | if (mdev->bitmap) { | |
1915 | /* maybe never allocated. */ | |
02d9a94b | 1916 | drbd_bm_resize(mdev, 0, 1); |
b411b363 PR |
1917 | drbd_bm_cleanup(mdev); |
1918 | } | |
1919 | ||
1920 | drbd_free_resources(mdev); | |
0778286a | 1921 | clear_bit(AL_SUSPENDED, &mdev->flags); |
b411b363 PR |
1922 | |
1923 | /* | |
1924 | * currently we drbd_init_ee only on module load, so | |
1925 | * we may do drbd_release_ee only on module unload! | |
1926 | */ | |
1927 | D_ASSERT(list_empty(&mdev->active_ee)); | |
1928 | D_ASSERT(list_empty(&mdev->sync_ee)); | |
1929 | D_ASSERT(list_empty(&mdev->done_ee)); | |
1930 | D_ASSERT(list_empty(&mdev->read_ee)); | |
1931 | D_ASSERT(list_empty(&mdev->net_ee)); | |
1932 | D_ASSERT(list_empty(&mdev->resync_reads)); | |
e42325a5 PR |
1933 | D_ASSERT(list_empty(&mdev->tconn->data.work.q)); |
1934 | D_ASSERT(list_empty(&mdev->tconn->meta.work.q)); | |
b411b363 PR |
1935 | D_ASSERT(list_empty(&mdev->resync_work.list)); |
1936 | D_ASSERT(list_empty(&mdev->unplug_work.list)); | |
e9e6f3ec | 1937 | D_ASSERT(list_empty(&mdev->go_diskless.list)); |
2265b473 LE |
1938 | |
1939 | drbd_set_defaults(mdev); | |
b411b363 PR |
1940 | } |
1941 | ||
1942 | ||
1943 | static void drbd_destroy_mempools(void) | |
1944 | { | |
1945 | struct page *page; | |
1946 | ||
1947 | while (drbd_pp_pool) { | |
1948 | page = drbd_pp_pool; | |
1949 | drbd_pp_pool = (struct page *)page_private(page); | |
1950 | __free_page(page); | |
1951 | drbd_pp_vacant--; | |
1952 | } | |
1953 | ||
1954 | /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */ | |
1955 | ||
35abf594 LE |
1956 | if (drbd_md_io_page_pool) |
1957 | mempool_destroy(drbd_md_io_page_pool); | |
b411b363 PR |
1958 | if (drbd_ee_mempool) |
1959 | mempool_destroy(drbd_ee_mempool); | |
1960 | if (drbd_request_mempool) | |
1961 | mempool_destroy(drbd_request_mempool); | |
1962 | if (drbd_ee_cache) | |
1963 | kmem_cache_destroy(drbd_ee_cache); | |
1964 | if (drbd_request_cache) | |
1965 | kmem_cache_destroy(drbd_request_cache); | |
1966 | if (drbd_bm_ext_cache) | |
1967 | kmem_cache_destroy(drbd_bm_ext_cache); | |
1968 | if (drbd_al_ext_cache) | |
1969 | kmem_cache_destroy(drbd_al_ext_cache); | |
1970 | ||
35abf594 | 1971 | drbd_md_io_page_pool = NULL; |
b411b363 PR |
1972 | drbd_ee_mempool = NULL; |
1973 | drbd_request_mempool = NULL; | |
1974 | drbd_ee_cache = NULL; | |
1975 | drbd_request_cache = NULL; | |
1976 | drbd_bm_ext_cache = NULL; | |
1977 | drbd_al_ext_cache = NULL; | |
1978 | ||
1979 | return; | |
1980 | } | |
1981 | ||
1982 | static int drbd_create_mempools(void) | |
1983 | { | |
1984 | struct page *page; | |
1816a2b4 | 1985 | const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; |
b411b363 PR |
1986 | int i; |
1987 | ||
1988 | /* prepare our caches and mempools */ | |
1989 | drbd_request_mempool = NULL; | |
1990 | drbd_ee_cache = NULL; | |
1991 | drbd_request_cache = NULL; | |
1992 | drbd_bm_ext_cache = NULL; | |
1993 | drbd_al_ext_cache = NULL; | |
1994 | drbd_pp_pool = NULL; | |
35abf594 | 1995 | drbd_md_io_page_pool = NULL; |
b411b363 PR |
1996 | |
1997 | /* caches */ | |
1998 | drbd_request_cache = kmem_cache_create( | |
1999 | "drbd_req", sizeof(struct drbd_request), 0, 0, NULL); | |
2000 | if (drbd_request_cache == NULL) | |
2001 | goto Enomem; | |
2002 | ||
2003 | drbd_ee_cache = kmem_cache_create( | |
f6ffca9f | 2004 | "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL); |
b411b363 PR |
2005 | if (drbd_ee_cache == NULL) |
2006 | goto Enomem; | |
2007 | ||
2008 | drbd_bm_ext_cache = kmem_cache_create( | |
2009 | "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL); | |
2010 | if (drbd_bm_ext_cache == NULL) | |
2011 | goto Enomem; | |
2012 | ||
2013 | drbd_al_ext_cache = kmem_cache_create( | |
2014 | "drbd_al", sizeof(struct lc_element), 0, 0, NULL); | |
2015 | if (drbd_al_ext_cache == NULL) | |
2016 | goto Enomem; | |
2017 | ||
2018 | /* mempools */ | |
35abf594 LE |
2019 | drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0); |
2020 | if (drbd_md_io_page_pool == NULL) | |
2021 | goto Enomem; | |
2022 | ||
b411b363 PR |
2023 | drbd_request_mempool = mempool_create(number, |
2024 | mempool_alloc_slab, mempool_free_slab, drbd_request_cache); | |
2025 | if (drbd_request_mempool == NULL) | |
2026 | goto Enomem; | |
2027 | ||
2028 | drbd_ee_mempool = mempool_create(number, | |
2029 | mempool_alloc_slab, mempool_free_slab, drbd_ee_cache); | |
2027ae1f | 2030 | if (drbd_ee_mempool == NULL) |
b411b363 PR |
2031 | goto Enomem; |
2032 | ||
2033 | /* drbd's page pool */ | |
2034 | spin_lock_init(&drbd_pp_lock); | |
2035 | ||
2036 | for (i = 0; i < number; i++) { | |
2037 | page = alloc_page(GFP_HIGHUSER); | |
2038 | if (!page) | |
2039 | goto Enomem; | |
2040 | set_page_private(page, (unsigned long)drbd_pp_pool); | |
2041 | drbd_pp_pool = page; | |
2042 | } | |
2043 | drbd_pp_vacant = number; | |
2044 | ||
2045 | return 0; | |
2046 | ||
2047 | Enomem: | |
2048 | drbd_destroy_mempools(); /* in case we allocated some */ | |
2049 | return -ENOMEM; | |
2050 | } | |
2051 | ||
2052 | static int drbd_notify_sys(struct notifier_block *this, unsigned long code, | |
2053 | void *unused) | |
2054 | { | |
2055 | /* just so we have it. you never know what interesting things we | |
2056 | * might want to do here some day... | |
2057 | */ | |
2058 | ||
2059 | return NOTIFY_DONE; | |
2060 | } | |
2061 | ||
2062 | static struct notifier_block drbd_notifier = { | |
2063 | .notifier_call = drbd_notify_sys, | |
2064 | }; | |
2065 | ||
2066 | static void drbd_release_ee_lists(struct drbd_conf *mdev) | |
2067 | { | |
2068 | int rr; | |
2069 | ||
2070 | rr = drbd_release_ee(mdev, &mdev->active_ee); | |
2071 | if (rr) | |
2072 | dev_err(DEV, "%d EEs in active list found!\n", rr); | |
2073 | ||
2074 | rr = drbd_release_ee(mdev, &mdev->sync_ee); | |
2075 | if (rr) | |
2076 | dev_err(DEV, "%d EEs in sync list found!\n", rr); | |
2077 | ||
2078 | rr = drbd_release_ee(mdev, &mdev->read_ee); | |
2079 | if (rr) | |
2080 | dev_err(DEV, "%d EEs in read list found!\n", rr); | |
2081 | ||
2082 | rr = drbd_release_ee(mdev, &mdev->done_ee); | |
2083 | if (rr) | |
2084 | dev_err(DEV, "%d EEs in done list found!\n", rr); | |
2085 | ||
2086 | rr = drbd_release_ee(mdev, &mdev->net_ee); | |
2087 | if (rr) | |
2088 | dev_err(DEV, "%d EEs in net list found!\n", rr); | |
2089 | } | |
2090 | ||
774b3055 PR |
2091 | /* caution. no locking. */ |
2092 | void drbd_delete_device(unsigned int minor) | |
b411b363 PR |
2093 | { |
2094 | struct drbd_conf *mdev = minor_to_mdev(minor); | |
2095 | ||
2096 | if (!mdev) | |
2097 | return; | |
2098 | ||
774b3055 PR |
2099 | idr_remove(&mdev->tconn->volumes, minor); |
2100 | ||
b411b363 | 2101 | /* paranoia asserts */ |
70dc65e1 | 2102 | D_ASSERT(mdev->open_cnt == 0); |
e42325a5 | 2103 | D_ASSERT(list_empty(&mdev->tconn->data.work.q)); |
b411b363 PR |
2104 | /* end paranoia asserts */ |
2105 | ||
2106 | del_gendisk(mdev->vdisk); | |
2107 | ||
2108 | /* cleanup stuff that may have been allocated during | |
2109 | * device (re-)configuration or state changes */ | |
2110 | ||
2111 | if (mdev->this_bdev) | |
2112 | bdput(mdev->this_bdev); | |
2113 | ||
2114 | drbd_free_resources(mdev); | |
2115 | ||
2116 | drbd_release_ee_lists(mdev); | |
2117 | ||
b411b363 PR |
2118 | lc_destroy(mdev->act_log); |
2119 | lc_destroy(mdev->resync); | |
2120 | ||
2121 | kfree(mdev->p_uuid); | |
2122 | /* mdev->p_uuid = NULL; */ | |
2123 | ||
b411b363 PR |
2124 | /* cleanup the rest that has been |
2125 | * allocated from drbd_new_device | |
2126 | * and actually free the mdev itself */ | |
2127 | drbd_free_mdev(mdev); | |
81a5d60e | 2128 | idr_remove(&minors, minor); |
b411b363 PR |
2129 | } |
2130 | ||
2131 | static void drbd_cleanup(void) | |
2132 | { | |
2133 | unsigned int i; | |
81a5d60e | 2134 | struct drbd_conf *mdev; |
b411b363 PR |
2135 | |
2136 | unregister_reboot_notifier(&drbd_notifier); | |
2137 | ||
17a93f30 LE |
2138 | /* first remove proc, |
2139 | * drbdsetup uses it's presence to detect | |
2140 | * whether DRBD is loaded. | |
2141 | * If we would get stuck in proc removal, | |
2142 | * but have netlink already deregistered, | |
2143 | * some drbdsetup commands may wait forever | |
2144 | * for an answer. | |
2145 | */ | |
2146 | if (drbd_proc) | |
2147 | remove_proc_entry("drbd", NULL); | |
2148 | ||
b411b363 PR |
2149 | drbd_nl_cleanup(); |
2150 | ||
81a5d60e PR |
2151 | idr_for_each_entry(&minors, mdev, i) |
2152 | drbd_delete_device(i); | |
2153 | drbd_destroy_mempools(); | |
b411b363 PR |
2154 | unregister_blkdev(DRBD_MAJOR, "drbd"); |
2155 | ||
81a5d60e PR |
2156 | idr_destroy(&minors); |
2157 | ||
b411b363 PR |
2158 | printk(KERN_INFO "drbd: module cleanup done.\n"); |
2159 | } | |
2160 | ||
2161 | /** | |
2162 | * drbd_congested() - Callback for pdflush | |
2163 | * @congested_data: User data | |
2164 | * @bdi_bits: Bits pdflush is currently interested in | |
2165 | * | |
2166 | * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested. | |
2167 | */ | |
2168 | static int drbd_congested(void *congested_data, int bdi_bits) | |
2169 | { | |
2170 | struct drbd_conf *mdev = congested_data; | |
2171 | struct request_queue *q; | |
2172 | char reason = '-'; | |
2173 | int r = 0; | |
2174 | ||
1b881ef7 | 2175 | if (!may_inc_ap_bio(mdev)) { |
b411b363 PR |
2176 | /* DRBD has frozen IO */ |
2177 | r = bdi_bits; | |
2178 | reason = 'd'; | |
2179 | goto out; | |
2180 | } | |
2181 | ||
2182 | if (get_ldev(mdev)) { | |
2183 | q = bdev_get_queue(mdev->ldev->backing_bdev); | |
2184 | r = bdi_congested(&q->backing_dev_info, bdi_bits); | |
2185 | put_ldev(mdev); | |
2186 | if (r) | |
2187 | reason = 'b'; | |
2188 | } | |
2189 | ||
01a311a5 | 2190 | if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) { |
b411b363 PR |
2191 | r |= (1 << BDI_async_congested); |
2192 | reason = reason == 'b' ? 'a' : 'n'; | |
2193 | } | |
2194 | ||
2195 | out: | |
2196 | mdev->congestion_reason = reason; | |
2197 | return r; | |
2198 | } | |
2199 | ||
6699b655 PR |
2200 | static void drbd_init_workqueue(struct drbd_work_queue* wq) |
2201 | { | |
2202 | sema_init(&wq->s, 0); | |
2203 | spin_lock_init(&wq->q_lock); | |
2204 | INIT_LIST_HEAD(&wq->q); | |
2205 | } | |
2206 | ||
1aba4d7f PR |
2207 | struct drbd_tconn *conn_by_name(const char *name) |
2208 | { | |
2209 | struct drbd_tconn *tconn; | |
2210 | ||
2211 | write_lock_irq(&global_state_lock); | |
2212 | list_for_each_entry(tconn, &drbd_tconns, all_tconn) { | |
2213 | if (!strcmp(tconn->name, name)) | |
2214 | goto found; | |
2215 | } | |
2216 | tconn = NULL; | |
2217 | found: | |
2218 | write_unlock_irq(&global_state_lock); | |
2219 | return tconn; | |
2220 | } | |
2221 | ||
2111438b PR |
2222 | struct drbd_tconn *drbd_new_tconn(char *name) |
2223 | { | |
2224 | struct drbd_tconn *tconn; | |
2225 | ||
2226 | tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL); | |
2227 | if (!tconn) | |
2228 | return NULL; | |
2229 | ||
2230 | tconn->name = kstrdup(name, GFP_KERNEL); | |
2231 | if (!tconn->name) | |
2232 | goto fail; | |
2233 | ||
774b3055 PR |
2234 | if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL)) |
2235 | goto fail; | |
2236 | ||
2f5cdd0b PR |
2237 | if (!tl_init(tconn)) |
2238 | goto fail; | |
2239 | ||
bbeb641c | 2240 | tconn->cstate = C_STANDALONE; |
8410da8f | 2241 | mutex_init(&tconn->cstate_mutex); |
6699b655 | 2242 | spin_lock_init(&tconn->req_lock); |
b2fb6dbe PR |
2243 | atomic_set(&tconn->net_cnt, 0); |
2244 | init_waitqueue_head(&tconn->net_cnt_wait); | |
2a67d8b9 | 2245 | init_waitqueue_head(&tconn->ping_wait); |
062e879c | 2246 | idr_init(&tconn->volumes); |
b2fb6dbe | 2247 | |
6699b655 PR |
2248 | drbd_init_workqueue(&tconn->data.work); |
2249 | mutex_init(&tconn->data.mutex); | |
2250 | ||
2251 | drbd_init_workqueue(&tconn->meta.work); | |
2252 | mutex_init(&tconn->meta.mutex); | |
2253 | ||
392c8801 PR |
2254 | drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver"); |
2255 | drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker"); | |
2256 | drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender"); | |
2257 | ||
2111438b PR |
2258 | write_lock_irq(&global_state_lock); |
2259 | list_add(&tconn->all_tconn, &drbd_tconns); | |
2260 | write_unlock_irq(&global_state_lock); | |
2261 | ||
2262 | return tconn; | |
2263 | ||
2264 | fail: | |
2f5cdd0b | 2265 | tl_cleanup(tconn); |
774b3055 | 2266 | free_cpumask_var(tconn->cpu_mask); |
2111438b PR |
2267 | kfree(tconn->name); |
2268 | kfree(tconn); | |
2269 | ||
2270 | return NULL; | |
2271 | } | |
2272 | ||
2273 | void drbd_free_tconn(struct drbd_tconn *tconn) | |
2274 | { | |
2275 | write_lock_irq(&global_state_lock); | |
2276 | list_del(&tconn->all_tconn); | |
2277 | write_unlock_irq(&global_state_lock); | |
062e879c | 2278 | idr_destroy(&tconn->volumes); |
2111438b | 2279 | |
774b3055 | 2280 | free_cpumask_var(tconn->cpu_mask); |
2111438b | 2281 | kfree(tconn->name); |
b42a70ad PR |
2282 | kfree(tconn->int_dig_out); |
2283 | kfree(tconn->int_dig_in); | |
2284 | kfree(tconn->int_dig_vv); | |
2111438b PR |
2285 | kfree(tconn); |
2286 | } | |
2287 | ||
774b3055 | 2288 | enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr) |
b411b363 PR |
2289 | { |
2290 | struct drbd_conf *mdev; | |
2291 | struct gendisk *disk; | |
2292 | struct request_queue *q; | |
774b3055 | 2293 | int vnr_got = vnr; |
81a5d60e | 2294 | int minor_got = minor; |
774b3055 PR |
2295 | |
2296 | mdev = minor_to_mdev(minor); | |
2297 | if (mdev) | |
2298 | return ERR_MINOR_EXISTS; | |
b411b363 PR |
2299 | |
2300 | /* GFP_KERNEL, we are outside of all write-out paths */ | |
2301 | mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL); | |
2302 | if (!mdev) | |
774b3055 PR |
2303 | return ERR_NOMEM; |
2304 | ||
2305 | mdev->tconn = tconn; | |
2306 | if (!idr_pre_get(&tconn->volumes, GFP_KERNEL)) | |
2307 | goto out_no_idr; | |
2308 | if (idr_get_new(&tconn->volumes, mdev, &vnr_got)) | |
2309 | goto out_no_idr; | |
2310 | if (vnr_got != vnr) { | |
2311 | dev_err(DEV, "vnr_got (%d) != vnr (%d)\n", vnr_got, vnr); | |
2312 | goto out_no_q; | |
062e879c | 2313 | } |
b411b363 PR |
2314 | |
2315 | mdev->minor = minor; | |
2316 | ||
2317 | drbd_init_set_defaults(mdev); | |
2318 | ||
2319 | q = blk_alloc_queue(GFP_KERNEL); | |
2320 | if (!q) | |
2321 | goto out_no_q; | |
2322 | mdev->rq_queue = q; | |
2323 | q->queuedata = mdev; | |
b411b363 PR |
2324 | |
2325 | disk = alloc_disk(1); | |
2326 | if (!disk) | |
2327 | goto out_no_disk; | |
2328 | mdev->vdisk = disk; | |
2329 | ||
81e84650 | 2330 | set_disk_ro(disk, true); |
b411b363 PR |
2331 | |
2332 | disk->queue = q; | |
2333 | disk->major = DRBD_MAJOR; | |
2334 | disk->first_minor = minor; | |
2335 | disk->fops = &drbd_ops; | |
2336 | sprintf(disk->disk_name, "drbd%d", minor); | |
2337 | disk->private_data = mdev; | |
2338 | ||
2339 | mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor)); | |
2340 | /* we have no partitions. we contain only ourselves. */ | |
2341 | mdev->this_bdev->bd_contains = mdev->this_bdev; | |
2342 | ||
2343 | q->backing_dev_info.congested_fn = drbd_congested; | |
2344 | q->backing_dev_info.congested_data = mdev; | |
2345 | ||
2f58dcfc | 2346 | blk_queue_make_request(q, drbd_make_request); |
99432fcc PR |
2347 | /* Setting the max_hw_sectors to an odd value of 8kibyte here |
2348 | This triggers a max_bio_size message upon first attach or connect */ | |
2349 | blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); | |
b411b363 PR |
2350 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); |
2351 | blk_queue_merge_bvec(q, drbd_merge_bvec); | |
87eeee41 | 2352 | q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */ |
b411b363 PR |
2353 | |
2354 | mdev->md_io_page = alloc_page(GFP_KERNEL); | |
2355 | if (!mdev->md_io_page) | |
2356 | goto out_no_io_page; | |
2357 | ||
2358 | if (drbd_bm_init(mdev)) | |
2359 | goto out_no_bitmap; | |
dac1389c | 2360 | mdev->read_requests = RB_ROOT; |
de696716 | 2361 | mdev->write_requests = RB_ROOT; |
b411b363 | 2362 | |
b411b363 PR |
2363 | mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL); |
2364 | if (!mdev->current_epoch) | |
2365 | goto out_no_epoch; | |
2366 | ||
2367 | INIT_LIST_HEAD(&mdev->current_epoch->list); | |
2368 | mdev->epochs = 1; | |
2369 | ||
81a5d60e PR |
2370 | if (!idr_pre_get(&minors, GFP_KERNEL)) |
2371 | goto out_no_minor_idr; | |
2372 | if (idr_get_new(&minors, mdev, &minor_got)) | |
2373 | goto out_no_minor_idr; | |
2374 | if (minor_got != minor) { | |
2375 | idr_remove(&minors, minor_got); | |
2376 | goto out_no_minor_idr; | |
2377 | } | |
774b3055 PR |
2378 | add_disk(disk); |
2379 | ||
2380 | return NO_ERROR; | |
b411b363 | 2381 | |
81a5d60e PR |
2382 | out_no_minor_idr: |
2383 | kfree(mdev->current_epoch); | |
b411b363 | 2384 | out_no_epoch: |
b411b363 PR |
2385 | drbd_bm_cleanup(mdev); |
2386 | out_no_bitmap: | |
2387 | __free_page(mdev->md_io_page); | |
2388 | out_no_io_page: | |
2389 | put_disk(disk); | |
2390 | out_no_disk: | |
2391 | blk_cleanup_queue(q); | |
2392 | out_no_q: | |
774b3055 PR |
2393 | idr_remove(&tconn->volumes, vnr_got); |
2394 | out_no_idr: | |
b411b363 | 2395 | kfree(mdev); |
774b3055 | 2396 | return ERR_NOMEM; |
b411b363 PR |
2397 | } |
2398 | ||
2399 | /* counterpart of drbd_new_device. | |
2400 | * last part of drbd_delete_device. */ | |
2401 | void drbd_free_mdev(struct drbd_conf *mdev) | |
2402 | { | |
2403 | kfree(mdev->current_epoch); | |
b411b363 PR |
2404 | if (mdev->bitmap) /* should no longer be there. */ |
2405 | drbd_bm_cleanup(mdev); | |
2406 | __free_page(mdev->md_io_page); | |
2407 | put_disk(mdev->vdisk); | |
2408 | blk_cleanup_queue(mdev->rq_queue); | |
b411b363 PR |
2409 | kfree(mdev); |
2410 | } | |
2411 | ||
2412 | ||
2413 | int __init drbd_init(void) | |
2414 | { | |
2415 | int err; | |
2416 | ||
fd340c12 PR |
2417 | BUILD_BUG_ON(sizeof(struct p_header80) != sizeof(struct p_header95)); |
2418 | BUILD_BUG_ON(sizeof(struct p_handshake) != 80); | |
b411b363 | 2419 | |
2b8a90b5 | 2420 | if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { |
b411b363 | 2421 | printk(KERN_ERR |
81a5d60e | 2422 | "drbd: invalid minor_count (%d)\n", minor_count); |
b411b363 PR |
2423 | #ifdef MODULE |
2424 | return -EINVAL; | |
2425 | #else | |
2426 | minor_count = 8; | |
2427 | #endif | |
2428 | } | |
2429 | ||
2430 | err = drbd_nl_init(); | |
2431 | if (err) | |
2432 | return err; | |
2433 | ||
2434 | err = register_blkdev(DRBD_MAJOR, "drbd"); | |
2435 | if (err) { | |
2436 | printk(KERN_ERR | |
2437 | "drbd: unable to register block device major %d\n", | |
2438 | DRBD_MAJOR); | |
2439 | return err; | |
2440 | } | |
2441 | ||
2442 | register_reboot_notifier(&drbd_notifier); | |
2443 | ||
2444 | /* | |
2445 | * allocate all necessary structs | |
2446 | */ | |
2447 | err = -ENOMEM; | |
2448 | ||
2449 | init_waitqueue_head(&drbd_pp_wait); | |
2450 | ||
2451 | drbd_proc = NULL; /* play safe for drbd_cleanup */ | |
81a5d60e | 2452 | idr_init(&minors); |
b411b363 PR |
2453 | |
2454 | err = drbd_create_mempools(); | |
2455 | if (err) | |
2456 | goto Enomem; | |
2457 | ||
8c484ee4 | 2458 | drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); |
b411b363 PR |
2459 | if (!drbd_proc) { |
2460 | printk(KERN_ERR "drbd: unable to register proc file\n"); | |
2461 | goto Enomem; | |
2462 | } | |
2463 | ||
2464 | rwlock_init(&global_state_lock); | |
2111438b | 2465 | INIT_LIST_HEAD(&drbd_tconns); |
b411b363 PR |
2466 | |
2467 | printk(KERN_INFO "drbd: initialized. " | |
2468 | "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n", | |
2469 | API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX); | |
2470 | printk(KERN_INFO "drbd: %s\n", drbd_buildtag()); | |
2471 | printk(KERN_INFO "drbd: registered as block device major %d\n", | |
2472 | DRBD_MAJOR); | |
b411b363 PR |
2473 | |
2474 | return 0; /* Success! */ | |
2475 | ||
2476 | Enomem: | |
2477 | drbd_cleanup(); | |
2478 | if (err == -ENOMEM) | |
2479 | /* currently always the case */ | |
2480 | printk(KERN_ERR "drbd: ran out of memory\n"); | |
2481 | else | |
2482 | printk(KERN_ERR "drbd: initialization failure\n"); | |
2483 | return err; | |
2484 | } | |
2485 | ||
2486 | void drbd_free_bc(struct drbd_backing_dev *ldev) | |
2487 | { | |
2488 | if (ldev == NULL) | |
2489 | return; | |
2490 | ||
e525fd89 TH |
2491 | blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); |
2492 | blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); | |
b411b363 PR |
2493 | |
2494 | kfree(ldev); | |
2495 | } | |
2496 | ||
360cc740 PR |
2497 | void drbd_free_sock(struct drbd_tconn *tconn) |
2498 | { | |
2499 | if (tconn->data.socket) { | |
2500 | mutex_lock(&tconn->data.mutex); | |
2501 | kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR); | |
2502 | sock_release(tconn->data.socket); | |
2503 | tconn->data.socket = NULL; | |
2504 | mutex_unlock(&tconn->data.mutex); | |
b411b363 | 2505 | } |
360cc740 PR |
2506 | if (tconn->meta.socket) { |
2507 | mutex_lock(&tconn->meta.mutex); | |
2508 | kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR); | |
2509 | sock_release(tconn->meta.socket); | |
2510 | tconn->meta.socket = NULL; | |
2511 | mutex_unlock(&tconn->meta.mutex); | |
b411b363 PR |
2512 | } |
2513 | } | |
2514 | ||
2515 | ||
2516 | void drbd_free_resources(struct drbd_conf *mdev) | |
2517 | { | |
2518 | crypto_free_hash(mdev->csums_tfm); | |
2519 | mdev->csums_tfm = NULL; | |
2520 | crypto_free_hash(mdev->verify_tfm); | |
2521 | mdev->verify_tfm = NULL; | |
a0638456 PR |
2522 | crypto_free_hash(mdev->tconn->cram_hmac_tfm); |
2523 | mdev->tconn->cram_hmac_tfm = NULL; | |
2524 | crypto_free_hash(mdev->tconn->integrity_w_tfm); | |
2525 | mdev->tconn->integrity_w_tfm = NULL; | |
2526 | crypto_free_hash(mdev->tconn->integrity_r_tfm); | |
2527 | mdev->tconn->integrity_r_tfm = NULL; | |
b411b363 | 2528 | |
360cc740 | 2529 | drbd_free_sock(mdev->tconn); |
b411b363 PR |
2530 | |
2531 | __no_warn(local, | |
2532 | drbd_free_bc(mdev->ldev); | |
2533 | mdev->ldev = NULL;); | |
2534 | } | |
2535 | ||
2536 | /* meta data management */ | |
2537 | ||
2538 | struct meta_data_on_disk { | |
2539 | u64 la_size; /* last agreed size. */ | |
2540 | u64 uuid[UI_SIZE]; /* UUIDs. */ | |
2541 | u64 device_uuid; | |
2542 | u64 reserved_u64_1; | |
2543 | u32 flags; /* MDF */ | |
2544 | u32 magic; | |
2545 | u32 md_size_sect; | |
2546 | u32 al_offset; /* offset to this block */ | |
2547 | u32 al_nr_extents; /* important for restoring the AL */ | |
2548 | /* `-- act_log->nr_elements <-- sync_conf.al_extents */ | |
2549 | u32 bm_offset; /* offset to the bitmap, from here */ | |
2550 | u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ | |
99432fcc PR |
2551 | u32 la_peer_max_bio_size; /* last peer max_bio_size */ |
2552 | u32 reserved_u32[3]; | |
b411b363 PR |
2553 | |
2554 | } __packed; | |
2555 | ||
2556 | /** | |
2557 | * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set | |
2558 | * @mdev: DRBD device. | |
2559 | */ | |
2560 | void drbd_md_sync(struct drbd_conf *mdev) | |
2561 | { | |
2562 | struct meta_data_on_disk *buffer; | |
2563 | sector_t sector; | |
2564 | int i; | |
2565 | ||
ee15b038 LE |
2566 | del_timer(&mdev->md_sync_timer); |
2567 | /* timer may be rearmed by drbd_md_mark_dirty() now. */ | |
b411b363 PR |
2568 | if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) |
2569 | return; | |
b411b363 PR |
2570 | |
2571 | /* We use here D_FAILED and not D_ATTACHING because we try to write | |
2572 | * metadata even if we detach due to a disk failure! */ | |
2573 | if (!get_ldev_if_state(mdev, D_FAILED)) | |
2574 | return; | |
2575 | ||
b411b363 PR |
2576 | mutex_lock(&mdev->md_io_mutex); |
2577 | buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page); | |
2578 | memset(buffer, 0, 512); | |
2579 | ||
2580 | buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); | |
2581 | for (i = UI_CURRENT; i < UI_SIZE; i++) | |
2582 | buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]); | |
2583 | buffer->flags = cpu_to_be32(mdev->ldev->md.flags); | |
2584 | buffer->magic = cpu_to_be32(DRBD_MD_MAGIC); | |
2585 | ||
2586 | buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect); | |
2587 | buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset); | |
2588 | buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements); | |
2589 | buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE); | |
2590 | buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid); | |
2591 | ||
2592 | buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); | |
99432fcc | 2593 | buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size); |
b411b363 PR |
2594 | |
2595 | D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset); | |
2596 | sector = mdev->ldev->md.md_offset; | |
2597 | ||
3f3a9b84 | 2598 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { |
b411b363 PR |
2599 | /* this was a try anyways ... */ |
2600 | dev_err(DEV, "meta data update failed!\n"); | |
81e84650 | 2601 | drbd_chk_io_error(mdev, 1, true); |
b411b363 PR |
2602 | } |
2603 | ||
2604 | /* Update mdev->ldev->md.la_size_sect, | |
2605 | * since we updated it on metadata. */ | |
2606 | mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev); | |
2607 | ||
2608 | mutex_unlock(&mdev->md_io_mutex); | |
2609 | put_ldev(mdev); | |
2610 | } | |
2611 | ||
2612 | /** | |
2613 | * drbd_md_read() - Reads in the meta data super block | |
2614 | * @mdev: DRBD device. | |
2615 | * @bdev: Device from which the meta data should be read in. | |
2616 | * | |
116676ca | 2617 | * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case |
b411b363 PR |
2618 | * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID. |
2619 | */ | |
2620 | int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | |
2621 | { | |
2622 | struct meta_data_on_disk *buffer; | |
2623 | int i, rv = NO_ERROR; | |
2624 | ||
2625 | if (!get_ldev_if_state(mdev, D_ATTACHING)) | |
2626 | return ERR_IO_MD_DISK; | |
2627 | ||
b411b363 PR |
2628 | mutex_lock(&mdev->md_io_mutex); |
2629 | buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page); | |
2630 | ||
2631 | if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) { | |
25985edc | 2632 | /* NOTE: can't do normal error processing here as this is |
b411b363 PR |
2633 | called BEFORE disk is attached */ |
2634 | dev_err(DEV, "Error while reading metadata.\n"); | |
2635 | rv = ERR_IO_MD_DISK; | |
2636 | goto err; | |
2637 | } | |
2638 | ||
e7fad8af | 2639 | if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) { |
b411b363 PR |
2640 | dev_err(DEV, "Error while reading metadata, magic not found.\n"); |
2641 | rv = ERR_MD_INVALID; | |
2642 | goto err; | |
2643 | } | |
2644 | if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) { | |
2645 | dev_err(DEV, "unexpected al_offset: %d (expected %d)\n", | |
2646 | be32_to_cpu(buffer->al_offset), bdev->md.al_offset); | |
2647 | rv = ERR_MD_INVALID; | |
2648 | goto err; | |
2649 | } | |
2650 | if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { | |
2651 | dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n", | |
2652 | be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); | |
2653 | rv = ERR_MD_INVALID; | |
2654 | goto err; | |
2655 | } | |
2656 | if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { | |
2657 | dev_err(DEV, "unexpected md_size: %u (expected %u)\n", | |
2658 | be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); | |
2659 | rv = ERR_MD_INVALID; | |
2660 | goto err; | |
2661 | } | |
2662 | ||
2663 | if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) { | |
2664 | dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n", | |
2665 | be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE); | |
2666 | rv = ERR_MD_INVALID; | |
2667 | goto err; | |
2668 | } | |
2669 | ||
2670 | bdev->md.la_size_sect = be64_to_cpu(buffer->la_size); | |
2671 | for (i = UI_CURRENT; i < UI_SIZE; i++) | |
2672 | bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); | |
2673 | bdev->md.flags = be32_to_cpu(buffer->flags); | |
2674 | mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents); | |
2675 | bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); | |
2676 | ||
87eeee41 | 2677 | spin_lock_irq(&mdev->tconn->req_lock); |
99432fcc PR |
2678 | if (mdev->state.conn < C_CONNECTED) { |
2679 | int peer; | |
2680 | peer = be32_to_cpu(buffer->la_peer_max_bio_size); | |
2681 | peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); | |
2682 | mdev->peer_max_bio_size = peer; | |
2683 | } | |
87eeee41 | 2684 | spin_unlock_irq(&mdev->tconn->req_lock); |
99432fcc | 2685 | |
b411b363 PR |
2686 | if (mdev->sync_conf.al_extents < 7) |
2687 | mdev->sync_conf.al_extents = 127; | |
2688 | ||
2689 | err: | |
2690 | mutex_unlock(&mdev->md_io_mutex); | |
2691 | put_ldev(mdev); | |
2692 | ||
2693 | return rv; | |
2694 | } | |
2695 | ||
2696 | /** | |
2697 | * drbd_md_mark_dirty() - Mark meta data super block as dirty | |
2698 | * @mdev: DRBD device. | |
2699 | * | |
2700 | * Call this function if you change anything that should be written to | |
2701 | * the meta-data super block. This function sets MD_DIRTY, and starts a | |
2702 | * timer that ensures that within five seconds you have to call drbd_md_sync(). | |
2703 | */ | |
ca0e6098 | 2704 | #ifdef DEBUG |
ee15b038 LE |
2705 | void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func) |
2706 | { | |
2707 | if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) { | |
2708 | mod_timer(&mdev->md_sync_timer, jiffies + HZ); | |
2709 | mdev->last_md_mark_dirty.line = line; | |
2710 | mdev->last_md_mark_dirty.func = func; | |
2711 | } | |
2712 | } | |
2713 | #else | |
b411b363 PR |
2714 | void drbd_md_mark_dirty(struct drbd_conf *mdev) |
2715 | { | |
ee15b038 | 2716 | if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) |
ca0e6098 | 2717 | mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); |
b411b363 | 2718 | } |
ee15b038 | 2719 | #endif |
b411b363 PR |
2720 | |
2721 | static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) | |
2722 | { | |
2723 | int i; | |
2724 | ||
62b0da3a | 2725 | for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) |
b411b363 | 2726 | mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; |
b411b363 PR |
2727 | } |
2728 | ||
2729 | void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |
2730 | { | |
2731 | if (idx == UI_CURRENT) { | |
2732 | if (mdev->state.role == R_PRIMARY) | |
2733 | val |= 1; | |
2734 | else | |
2735 | val &= ~((u64)1); | |
2736 | ||
2737 | drbd_set_ed_uuid(mdev, val); | |
2738 | } | |
2739 | ||
2740 | mdev->ldev->md.uuid[idx] = val; | |
b411b363 PR |
2741 | drbd_md_mark_dirty(mdev); |
2742 | } | |
2743 | ||
2744 | ||
2745 | void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | |
2746 | { | |
2747 | if (mdev->ldev->md.uuid[idx]) { | |
2748 | drbd_uuid_move_history(mdev); | |
2749 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; | |
b411b363 PR |
2750 | } |
2751 | _drbd_uuid_set(mdev, idx, val); | |
2752 | } | |
2753 | ||
2754 | /** | |
2755 | * drbd_uuid_new_current() - Creates a new current UUID | |
2756 | * @mdev: DRBD device. | |
2757 | * | |
2758 | * Creates a new current UUID, and rotates the old current UUID into | |
2759 | * the bitmap slot. Causes an incremental resync upon next connect. | |
2760 | */ | |
2761 | void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) | |
2762 | { | |
2763 | u64 val; | |
62b0da3a LE |
2764 | unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; |
2765 | ||
2766 | if (bm_uuid) | |
2767 | dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); | |
b411b363 | 2768 | |
b411b363 | 2769 | mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; |
b411b363 PR |
2770 | |
2771 | get_random_bytes(&val, sizeof(u64)); | |
2772 | _drbd_uuid_set(mdev, UI_CURRENT, val); | |
62b0da3a | 2773 | drbd_print_uuids(mdev, "new current UUID"); |
aaa8e2b3 LE |
2774 | /* get it to stable storage _now_ */ |
2775 | drbd_md_sync(mdev); | |
b411b363 PR |
2776 | } |
2777 | ||
2778 | void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) | |
2779 | { | |
2780 | if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) | |
2781 | return; | |
2782 | ||
2783 | if (val == 0) { | |
2784 | drbd_uuid_move_history(mdev); | |
2785 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; | |
2786 | mdev->ldev->md.uuid[UI_BITMAP] = 0; | |
b411b363 | 2787 | } else { |
62b0da3a LE |
2788 | unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP]; |
2789 | if (bm_uuid) | |
2790 | dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid); | |
b411b363 | 2791 | |
62b0da3a | 2792 | mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); |
b411b363 PR |
2793 | } |
2794 | drbd_md_mark_dirty(mdev); | |
2795 | } | |
2796 | ||
2797 | /** | |
2798 | * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() | |
2799 | * @mdev: DRBD device. | |
2800 | * | |
2801 | * Sets all bits in the bitmap and writes the whole bitmap to stable storage. | |
2802 | */ | |
2803 | int drbd_bmio_set_n_write(struct drbd_conf *mdev) | |
2804 | { | |
2805 | int rv = -EIO; | |
2806 | ||
2807 | if (get_ldev_if_state(mdev, D_ATTACHING)) { | |
2808 | drbd_md_set_flag(mdev, MDF_FULL_SYNC); | |
2809 | drbd_md_sync(mdev); | |
2810 | drbd_bm_set_all(mdev); | |
2811 | ||
2812 | rv = drbd_bm_write(mdev); | |
2813 | ||
2814 | if (!rv) { | |
2815 | drbd_md_clear_flag(mdev, MDF_FULL_SYNC); | |
2816 | drbd_md_sync(mdev); | |
2817 | } | |
2818 | ||
2819 | put_ldev(mdev); | |
2820 | } | |
2821 | ||
2822 | return rv; | |
2823 | } | |
2824 | ||
2825 | /** | |
2826 | * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() | |
2827 | * @mdev: DRBD device. | |
2828 | * | |
2829 | * Clears all bits in the bitmap and writes the whole bitmap to stable storage. | |
2830 | */ | |
2831 | int drbd_bmio_clear_n_write(struct drbd_conf *mdev) | |
2832 | { | |
2833 | int rv = -EIO; | |
2834 | ||
0778286a | 2835 | drbd_resume_al(mdev); |
b411b363 PR |
2836 | if (get_ldev_if_state(mdev, D_ATTACHING)) { |
2837 | drbd_bm_clear_all(mdev); | |
2838 | rv = drbd_bm_write(mdev); | |
2839 | put_ldev(mdev); | |
2840 | } | |
2841 | ||
2842 | return rv; | |
2843 | } | |
2844 | ||
00d56944 | 2845 | static int w_bitmap_io(struct drbd_work *w, int unused) |
b411b363 PR |
2846 | { |
2847 | struct bm_io_work *work = container_of(w, struct bm_io_work, w); | |
00d56944 | 2848 | struct drbd_conf *mdev = w->mdev; |
02851e9f | 2849 | int rv = -EIO; |
b411b363 PR |
2850 | |
2851 | D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); | |
2852 | ||
02851e9f | 2853 | if (get_ldev(mdev)) { |
20ceb2b2 | 2854 | drbd_bm_lock(mdev, work->why, work->flags); |
02851e9f LE |
2855 | rv = work->io_fn(mdev); |
2856 | drbd_bm_unlock(mdev); | |
2857 | put_ldev(mdev); | |
2858 | } | |
b411b363 | 2859 | |
4738fa16 | 2860 | clear_bit_unlock(BITMAP_IO, &mdev->flags); |
b411b363 PR |
2861 | wake_up(&mdev->misc_wait); |
2862 | ||
2863 | if (work->done) | |
2864 | work->done(mdev, rv); | |
2865 | ||
2866 | clear_bit(BITMAP_IO_QUEUED, &mdev->flags); | |
2867 | work->why = NULL; | |
20ceb2b2 | 2868 | work->flags = 0; |
b411b363 PR |
2869 | |
2870 | return 1; | |
2871 | } | |
2872 | ||
82f59cc6 LE |
2873 | void drbd_ldev_destroy(struct drbd_conf *mdev) |
2874 | { | |
2875 | lc_destroy(mdev->resync); | |
2876 | mdev->resync = NULL; | |
2877 | lc_destroy(mdev->act_log); | |
2878 | mdev->act_log = NULL; | |
2879 | __no_warn(local, | |
2880 | drbd_free_bc(mdev->ldev); | |
2881 | mdev->ldev = NULL;); | |
2882 | ||
82f59cc6 LE |
2883 | clear_bit(GO_DISKLESS, &mdev->flags); |
2884 | } | |
2885 | ||
00d56944 | 2886 | static int w_go_diskless(struct drbd_work *w, int unused) |
e9e6f3ec | 2887 | { |
00d56944 PR |
2888 | struct drbd_conf *mdev = w->mdev; |
2889 | ||
e9e6f3ec | 2890 | D_ASSERT(mdev->state.disk == D_FAILED); |
9d282875 LE |
2891 | /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will |
2892 | * inc/dec it frequently. Once we are D_DISKLESS, no one will touch | |
82f59cc6 LE |
2893 | * the protected members anymore, though, so once put_ldev reaches zero |
2894 | * again, it will be safe to free them. */ | |
e9e6f3ec | 2895 | drbd_force_state(mdev, NS(disk, D_DISKLESS)); |
e9e6f3ec LE |
2896 | return 1; |
2897 | } | |
2898 | ||
2899 | void drbd_go_diskless(struct drbd_conf *mdev) | |
2900 | { | |
2901 | D_ASSERT(mdev->state.disk == D_FAILED); | |
2902 | if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) | |
e42325a5 | 2903 | drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless); |
e9e6f3ec LE |
2904 | } |
2905 | ||
b411b363 PR |
2906 | /** |
2907 | * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap | |
2908 | * @mdev: DRBD device. | |
2909 | * @io_fn: IO callback to be called when bitmap IO is possible | |
2910 | * @done: callback to be called after the bitmap IO was performed | |
2911 | * @why: Descriptive text of the reason for doing the IO | |
2912 | * | |
2913 | * While IO on the bitmap happens we freeze application IO thus we ensure | |
2914 | * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be | |
2915 | * called from worker context. It MUST NOT be used while a previous such | |
2916 | * work is still pending! | |
2917 | */ | |
2918 | void drbd_queue_bitmap_io(struct drbd_conf *mdev, | |
2919 | int (*io_fn)(struct drbd_conf *), | |
2920 | void (*done)(struct drbd_conf *, int), | |
20ceb2b2 | 2921 | char *why, enum bm_flag flags) |
b411b363 | 2922 | { |
e6b3ea83 | 2923 | D_ASSERT(current == mdev->tconn->worker.task); |
b411b363 PR |
2924 | |
2925 | D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags)); | |
2926 | D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags)); | |
2927 | D_ASSERT(list_empty(&mdev->bm_io_work.w.list)); | |
2928 | if (mdev->bm_io_work.why) | |
2929 | dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n", | |
2930 | why, mdev->bm_io_work.why); | |
2931 | ||
2932 | mdev->bm_io_work.io_fn = io_fn; | |
2933 | mdev->bm_io_work.done = done; | |
2934 | mdev->bm_io_work.why = why; | |
20ceb2b2 | 2935 | mdev->bm_io_work.flags = flags; |
b411b363 | 2936 | |
87eeee41 | 2937 | spin_lock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
2938 | set_bit(BITMAP_IO, &mdev->flags); |
2939 | if (atomic_read(&mdev->ap_bio_cnt) == 0) { | |
127b3178 | 2940 | if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) |
e42325a5 | 2941 | drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w); |
b411b363 | 2942 | } |
87eeee41 | 2943 | spin_unlock_irq(&mdev->tconn->req_lock); |
b411b363 PR |
2944 | } |
2945 | ||
2946 | /** | |
2947 | * drbd_bitmap_io() - Does an IO operation on the whole bitmap | |
2948 | * @mdev: DRBD device. | |
2949 | * @io_fn: IO callback to be called when bitmap IO is possible | |
2950 | * @why: Descriptive text of the reason for doing the IO | |
2951 | * | |
2952 | * freezes application IO while that the actual IO operations runs. This | |
2953 | * functions MAY NOT be called from worker context. | |
2954 | */ | |
20ceb2b2 LE |
2955 | int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), |
2956 | char *why, enum bm_flag flags) | |
b411b363 PR |
2957 | { |
2958 | int rv; | |
2959 | ||
e6b3ea83 | 2960 | D_ASSERT(current != mdev->tconn->worker.task); |
b411b363 | 2961 | |
20ceb2b2 LE |
2962 | if ((flags & BM_LOCKED_SET_ALLOWED) == 0) |
2963 | drbd_suspend_io(mdev); | |
b411b363 | 2964 | |
20ceb2b2 | 2965 | drbd_bm_lock(mdev, why, flags); |
b411b363 PR |
2966 | rv = io_fn(mdev); |
2967 | drbd_bm_unlock(mdev); | |
2968 | ||
20ceb2b2 LE |
2969 | if ((flags & BM_LOCKED_SET_ALLOWED) == 0) |
2970 | drbd_resume_io(mdev); | |
b411b363 PR |
2971 | |
2972 | return rv; | |
2973 | } | |
2974 | ||
2975 | void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local) | |
2976 | { | |
2977 | if ((mdev->ldev->md.flags & flag) != flag) { | |
2978 | drbd_md_mark_dirty(mdev); | |
2979 | mdev->ldev->md.flags |= flag; | |
2980 | } | |
2981 | } | |
2982 | ||
2983 | void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local) | |
2984 | { | |
2985 | if ((mdev->ldev->md.flags & flag) != 0) { | |
2986 | drbd_md_mark_dirty(mdev); | |
2987 | mdev->ldev->md.flags &= ~flag; | |
2988 | } | |
2989 | } | |
2990 | int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag) | |
2991 | { | |
2992 | return (bdev->md.flags & flag) != 0; | |
2993 | } | |
2994 | ||
2995 | static void md_sync_timer_fn(unsigned long data) | |
2996 | { | |
2997 | struct drbd_conf *mdev = (struct drbd_conf *) data; | |
2998 | ||
e42325a5 | 2999 | drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work); |
b411b363 PR |
3000 | } |
3001 | ||
00d56944 | 3002 | static int w_md_sync(struct drbd_work *w, int unused) |
b411b363 | 3003 | { |
00d56944 PR |
3004 | struct drbd_conf *mdev = w->mdev; |
3005 | ||
b411b363 | 3006 | dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); |
ee15b038 LE |
3007 | #ifdef DEBUG |
3008 | dev_warn(DEV, "last md_mark_dirty: %s:%u\n", | |
3009 | mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line); | |
3010 | #endif | |
b411b363 | 3011 | drbd_md_sync(mdev); |
b411b363 PR |
3012 | return 1; |
3013 | } | |
3014 | ||
d8763023 | 3015 | const char *cmdname(enum drbd_packet cmd) |
f2ad9063 AG |
3016 | { |
3017 | /* THINK may need to become several global tables | |
3018 | * when we want to support more than | |
3019 | * one PRO_VERSION */ | |
3020 | static const char *cmdnames[] = { | |
3021 | [P_DATA] = "Data", | |
3022 | [P_DATA_REPLY] = "DataReply", | |
3023 | [P_RS_DATA_REPLY] = "RSDataReply", | |
3024 | [P_BARRIER] = "Barrier", | |
3025 | [P_BITMAP] = "ReportBitMap", | |
3026 | [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget", | |
3027 | [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource", | |
3028 | [P_UNPLUG_REMOTE] = "UnplugRemote", | |
3029 | [P_DATA_REQUEST] = "DataRequest", | |
3030 | [P_RS_DATA_REQUEST] = "RSDataRequest", | |
3031 | [P_SYNC_PARAM] = "SyncParam", | |
3032 | [P_SYNC_PARAM89] = "SyncParam89", | |
3033 | [P_PROTOCOL] = "ReportProtocol", | |
3034 | [P_UUIDS] = "ReportUUIDs", | |
3035 | [P_SIZES] = "ReportSizes", | |
3036 | [P_STATE] = "ReportState", | |
3037 | [P_SYNC_UUID] = "ReportSyncUUID", | |
3038 | [P_AUTH_CHALLENGE] = "AuthChallenge", | |
3039 | [P_AUTH_RESPONSE] = "AuthResponse", | |
3040 | [P_PING] = "Ping", | |
3041 | [P_PING_ACK] = "PingAck", | |
3042 | [P_RECV_ACK] = "RecvAck", | |
3043 | [P_WRITE_ACK] = "WriteAck", | |
3044 | [P_RS_WRITE_ACK] = "RSWriteAck", | |
7be8da07 | 3045 | [P_DISCARD_WRITE] = "DiscardWrite", |
f2ad9063 AG |
3046 | [P_NEG_ACK] = "NegAck", |
3047 | [P_NEG_DREPLY] = "NegDReply", | |
3048 | [P_NEG_RS_DREPLY] = "NegRSDReply", | |
3049 | [P_BARRIER_ACK] = "BarrierAck", | |
3050 | [P_STATE_CHG_REQ] = "StateChgRequest", | |
3051 | [P_STATE_CHG_REPLY] = "StateChgReply", | |
3052 | [P_OV_REQUEST] = "OVRequest", | |
3053 | [P_OV_REPLY] = "OVReply", | |
3054 | [P_OV_RESULT] = "OVResult", | |
3055 | [P_CSUM_RS_REQUEST] = "CsumRSRequest", | |
3056 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", | |
3057 | [P_COMPRESSED_BITMAP] = "CBitmap", | |
3058 | [P_DELAY_PROBE] = "DelayProbe", | |
3059 | [P_OUT_OF_SYNC] = "OutOfSync", | |
7be8da07 | 3060 | [P_RETRY_WRITE] = "RetryWrite", |
f2ad9063 AG |
3061 | }; |
3062 | ||
3063 | if (cmd == P_HAND_SHAKE_M) | |
3064 | return "HandShakeM"; | |
3065 | if (cmd == P_HAND_SHAKE_S) | |
3066 | return "HandShakeS"; | |
3067 | if (cmd == P_HAND_SHAKE) | |
3068 | return "HandShake"; | |
6e849ce8 | 3069 | if (cmd >= ARRAY_SIZE(cmdnames)) |
f2ad9063 AG |
3070 | return "Unknown"; |
3071 | return cmdnames[cmd]; | |
3072 | } | |
3073 | ||
7be8da07 AG |
3074 | /** |
3075 | * drbd_wait_misc - wait for a request to make progress | |
3076 | * @mdev: device associated with the request | |
3077 | * @i: the struct drbd_interval embedded in struct drbd_request or | |
3078 | * struct drbd_peer_request | |
3079 | */ | |
3080 | int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i) | |
3081 | { | |
3082 | struct net_conf *net_conf = mdev->tconn->net_conf; | |
3083 | DEFINE_WAIT(wait); | |
3084 | long timeout; | |
3085 | ||
3086 | if (!net_conf) | |
3087 | return -ETIMEDOUT; | |
3088 | timeout = MAX_SCHEDULE_TIMEOUT; | |
3089 | if (net_conf->ko_count) | |
3090 | timeout = net_conf->timeout * HZ / 10 * net_conf->ko_count; | |
3091 | ||
3092 | /* Indicate to wake up mdev->misc_wait on progress. */ | |
3093 | i->waiting = true; | |
3094 | prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE); | |
3095 | spin_unlock_irq(&mdev->tconn->req_lock); | |
3096 | timeout = schedule_timeout(timeout); | |
3097 | finish_wait(&mdev->misc_wait, &wait); | |
3098 | spin_lock_irq(&mdev->tconn->req_lock); | |
3099 | if (!timeout || mdev->state.conn < C_CONNECTED) | |
3100 | return -ETIMEDOUT; | |
3101 | if (signal_pending(current)) | |
3102 | return -ERESTARTSYS; | |
3103 | return 0; | |
3104 | } | |
3105 | ||
b411b363 PR |
3106 | #ifdef CONFIG_DRBD_FAULT_INJECTION |
3107 | /* Fault insertion support including random number generator shamelessly | |
3108 | * stolen from kernel/rcutorture.c */ | |
3109 | struct fault_random_state { | |
3110 | unsigned long state; | |
3111 | unsigned long count; | |
3112 | }; | |
3113 | ||
3114 | #define FAULT_RANDOM_MULT 39916801 /* prime */ | |
3115 | #define FAULT_RANDOM_ADD 479001701 /* prime */ | |
3116 | #define FAULT_RANDOM_REFRESH 10000 | |
3117 | ||
3118 | /* | |
3119 | * Crude but fast random-number generator. Uses a linear congruential | |
3120 | * generator, with occasional help from get_random_bytes(). | |
3121 | */ | |
3122 | static unsigned long | |
3123 | _drbd_fault_random(struct fault_random_state *rsp) | |
3124 | { | |
3125 | long refresh; | |
3126 | ||
49829ea7 | 3127 | if (!rsp->count--) { |
b411b363 PR |
3128 | get_random_bytes(&refresh, sizeof(refresh)); |
3129 | rsp->state += refresh; | |
3130 | rsp->count = FAULT_RANDOM_REFRESH; | |
3131 | } | |
3132 | rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD; | |
3133 | return swahw32(rsp->state); | |
3134 | } | |
3135 | ||
3136 | static char * | |
3137 | _drbd_fault_str(unsigned int type) { | |
3138 | static char *_faults[] = { | |
3139 | [DRBD_FAULT_MD_WR] = "Meta-data write", | |
3140 | [DRBD_FAULT_MD_RD] = "Meta-data read", | |
3141 | [DRBD_FAULT_RS_WR] = "Resync write", | |
3142 | [DRBD_FAULT_RS_RD] = "Resync read", | |
3143 | [DRBD_FAULT_DT_WR] = "Data write", | |
3144 | [DRBD_FAULT_DT_RD] = "Data read", | |
3145 | [DRBD_FAULT_DT_RA] = "Data read ahead", | |
3146 | [DRBD_FAULT_BM_ALLOC] = "BM allocation", | |
6b4388ac PR |
3147 | [DRBD_FAULT_AL_EE] = "EE allocation", |
3148 | [DRBD_FAULT_RECEIVE] = "receive data corruption", | |
b411b363 PR |
3149 | }; |
3150 | ||
3151 | return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**"; | |
3152 | } | |
3153 | ||
3154 | unsigned int | |
3155 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) | |
3156 | { | |
3157 | static struct fault_random_state rrs = {0, 0}; | |
3158 | ||
3159 | unsigned int ret = ( | |
3160 | (fault_devs == 0 || | |
3161 | ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) && | |
3162 | (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate)); | |
3163 | ||
3164 | if (ret) { | |
3165 | fault_count++; | |
3166 | ||
7383506c | 3167 | if (__ratelimit(&drbd_ratelimit_state)) |
b411b363 PR |
3168 | dev_warn(DEV, "***Simulating %s failure\n", |
3169 | _drbd_fault_str(type)); | |
3170 | } | |
3171 | ||
3172 | return ret; | |
3173 | } | |
3174 | #endif | |
3175 | ||
3176 | const char *drbd_buildtag(void) | |
3177 | { | |
3178 | /* DRBD built from external sources has here a reference to the | |
3179 | git hash of the source code. */ | |
3180 | ||
3181 | static char buildtag[38] = "\0uilt-in"; | |
3182 | ||
3183 | if (buildtag[0] == 0) { | |
3184 | #ifdef CONFIG_MODULES | |
3185 | if (THIS_MODULE != NULL) | |
3186 | sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion); | |
3187 | else | |
3188 | #endif | |
3189 | buildtag[0] = 'b'; | |
3190 | } | |
3191 | ||
3192 | return buildtag; | |
3193 | } | |
3194 | ||
3195 | module_init(drbd_init) | |
3196 | module_exit(drbd_cleanup) | |
3197 | ||
b411b363 PR |
3198 | EXPORT_SYMBOL(drbd_conn_str); |
3199 | EXPORT_SYMBOL(drbd_role_str); | |
3200 | EXPORT_SYMBOL(drbd_disk_str); | |
3201 | EXPORT_SYMBOL(drbd_set_st_err_str); |