]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_receiver.c | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | ||
25 | ||
b411b363 PR |
26 | #include <linux/module.h> |
27 | ||
28 | #include <asm/uaccess.h> | |
29 | #include <net/sock.h> | |
30 | ||
b411b363 PR |
31 | #include <linux/drbd.h> |
32 | #include <linux/fs.h> | |
33 | #include <linux/file.h> | |
34 | #include <linux/in.h> | |
35 | #include <linux/mm.h> | |
36 | #include <linux/memcontrol.h> | |
37 | #include <linux/mm_inline.h> | |
38 | #include <linux/slab.h> | |
b411b363 PR |
39 | #include <linux/pkt_sched.h> |
40 | #define __KERNEL_SYSCALLS__ | |
41 | #include <linux/unistd.h> | |
42 | #include <linux/vmalloc.h> | |
43 | #include <linux/random.h> | |
b411b363 PR |
44 | #include <linux/string.h> |
45 | #include <linux/scatterlist.h> | |
46 | #include "drbd_int.h" | |
a3603a6e | 47 | #include "drbd_protocol.h" |
b411b363 | 48 | #include "drbd_req.h" |
b411b363 PR |
49 | #include "drbd_vli.h" |
50 | ||
9104d31a | 51 | #define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME) |
20c68fde | 52 | |
77351055 PR |
53 | struct packet_info { |
54 | enum drbd_packet cmd; | |
e2857216 AG |
55 | unsigned int size; |
56 | unsigned int vnr; | |
e658983a | 57 | void *data; |
77351055 PR |
58 | }; |
59 | ||
b411b363 PR |
60 | enum finish_epoch { |
61 | FE_STILL_LIVE, | |
62 | FE_DESTROYED, | |
63 | FE_RECYCLED, | |
64 | }; | |
65 | ||
bde89a9e AG |
66 | static int drbd_do_features(struct drbd_connection *connection); |
67 | static int drbd_do_auth(struct drbd_connection *connection); | |
69a22773 | 68 | static int drbd_disconnected(struct drbd_peer_device *); |
a0fb3c47 | 69 | static void conn_wait_active_ee_empty(struct drbd_connection *connection); |
bde89a9e | 70 | static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event); |
99920dc5 | 71 | static int e_end_block(struct drbd_work *, int); |
b411b363 | 72 | |
b411b363 PR |
73 | |
74 | #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) | |
75 | ||
45bb912b LE |
76 | /* |
77 | * some helper functions to deal with single linked page lists, | |
78 | * page->private being our "next" pointer. | |
79 | */ | |
80 | ||
81 | /* If at least n pages are linked at head, get n pages off. | |
82 | * Otherwise, don't modify head, and return NULL. | |
83 | * Locking is the responsibility of the caller. | |
84 | */ | |
85 | static struct page *page_chain_del(struct page **head, int n) | |
86 | { | |
87 | struct page *page; | |
88 | struct page *tmp; | |
89 | ||
90 | BUG_ON(!n); | |
91 | BUG_ON(!head); | |
92 | ||
93 | page = *head; | |
23ce4227 PR |
94 | |
95 | if (!page) | |
96 | return NULL; | |
97 | ||
45bb912b LE |
98 | while (page) { |
99 | tmp = page_chain_next(page); | |
100 | if (--n == 0) | |
101 | break; /* found sufficient pages */ | |
102 | if (tmp == NULL) | |
103 | /* insufficient pages, don't use any of them. */ | |
104 | return NULL; | |
105 | page = tmp; | |
106 | } | |
107 | ||
108 | /* add end of list marker for the returned list */ | |
109 | set_page_private(page, 0); | |
110 | /* actual return value, and adjustment of head */ | |
111 | page = *head; | |
112 | *head = tmp; | |
113 | return page; | |
114 | } | |
115 | ||
116 | /* may be used outside of locks to find the tail of a (usually short) | |
117 | * "private" page chain, before adding it back to a global chain head | |
118 | * with page_chain_add() under a spinlock. */ | |
119 | static struct page *page_chain_tail(struct page *page, int *len) | |
120 | { | |
121 | struct page *tmp; | |
122 | int i = 1; | |
123 | while ((tmp = page_chain_next(page))) | |
124 | ++i, page = tmp; | |
125 | if (len) | |
126 | *len = i; | |
127 | return page; | |
128 | } | |
129 | ||
130 | static int page_chain_free(struct page *page) | |
131 | { | |
132 | struct page *tmp; | |
133 | int i = 0; | |
134 | page_chain_for_each_safe(page, tmp) { | |
135 | put_page(page); | |
136 | ++i; | |
137 | } | |
138 | return i; | |
139 | } | |
140 | ||
141 | static void page_chain_add(struct page **head, | |
142 | struct page *chain_first, struct page *chain_last) | |
143 | { | |
144 | #if 1 | |
145 | struct page *tmp; | |
146 | tmp = page_chain_tail(chain_first, NULL); | |
147 | BUG_ON(tmp != chain_last); | |
148 | #endif | |
149 | ||
150 | /* add chain to head */ | |
151 | set_page_private(chain_last, (unsigned long)*head); | |
152 | *head = chain_first; | |
153 | } | |
154 | ||
b30ab791 | 155 | static struct page *__drbd_alloc_pages(struct drbd_device *device, |
18c2d522 | 156 | unsigned int number) |
b411b363 PR |
157 | { |
158 | struct page *page = NULL; | |
45bb912b | 159 | struct page *tmp = NULL; |
18c2d522 | 160 | unsigned int i = 0; |
b411b363 PR |
161 | |
162 | /* Yes, testing drbd_pp_vacant outside the lock is racy. | |
163 | * So what. It saves a spin_lock. */ | |
45bb912b | 164 | if (drbd_pp_vacant >= number) { |
b411b363 | 165 | spin_lock(&drbd_pp_lock); |
45bb912b LE |
166 | page = page_chain_del(&drbd_pp_pool, number); |
167 | if (page) | |
168 | drbd_pp_vacant -= number; | |
b411b363 | 169 | spin_unlock(&drbd_pp_lock); |
45bb912b LE |
170 | if (page) |
171 | return page; | |
b411b363 | 172 | } |
45bb912b | 173 | |
b411b363 PR |
174 | /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD |
175 | * "criss-cross" setup, that might cause write-out on some other DRBD, | |
176 | * which in turn might block on the other node at this very place. */ | |
45bb912b LE |
177 | for (i = 0; i < number; i++) { |
178 | tmp = alloc_page(GFP_TRY); | |
179 | if (!tmp) | |
180 | break; | |
181 | set_page_private(tmp, (unsigned long)page); | |
182 | page = tmp; | |
183 | } | |
184 | ||
185 | if (i == number) | |
186 | return page; | |
187 | ||
188 | /* Not enough pages immediately available this time. | |
c37c8ecf | 189 | * No need to jump around here, drbd_alloc_pages will retry this |
45bb912b LE |
190 | * function "soon". */ |
191 | if (page) { | |
192 | tmp = page_chain_tail(page, NULL); | |
193 | spin_lock(&drbd_pp_lock); | |
194 | page_chain_add(&drbd_pp_pool, page, tmp); | |
195 | drbd_pp_vacant += i; | |
196 | spin_unlock(&drbd_pp_lock); | |
197 | } | |
198 | return NULL; | |
b411b363 PR |
199 | } |
200 | ||
b30ab791 | 201 | static void reclaim_finished_net_peer_reqs(struct drbd_device *device, |
a990be46 | 202 | struct list_head *to_be_freed) |
b411b363 | 203 | { |
a8cd15ba | 204 | struct drbd_peer_request *peer_req, *tmp; |
b411b363 PR |
205 | |
206 | /* The EEs are always appended to the end of the list. Since | |
207 | they are sent in order over the wire, they have to finish | |
208 | in order. As soon as we see the first not finished we can | |
209 | stop to examine the list... */ | |
210 | ||
a8cd15ba | 211 | list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) { |
045417f7 | 212 | if (drbd_peer_req_has_active_page(peer_req)) |
b411b363 | 213 | break; |
a8cd15ba | 214 | list_move(&peer_req->w.list, to_be_freed); |
b411b363 PR |
215 | } |
216 | } | |
217 | ||
668700b4 | 218 | static void drbd_reclaim_net_peer_reqs(struct drbd_device *device) |
b411b363 PR |
219 | { |
220 | LIST_HEAD(reclaimed); | |
db830c46 | 221 | struct drbd_peer_request *peer_req, *t; |
b411b363 | 222 | |
0500813f | 223 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 224 | reclaim_finished_net_peer_reqs(device, &reclaimed); |
0500813f | 225 | spin_unlock_irq(&device->resource->req_lock); |
a8cd15ba | 226 | list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) |
b30ab791 | 227 | drbd_free_net_peer_req(device, peer_req); |
b411b363 PR |
228 | } |
229 | ||
668700b4 PR |
230 | static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection) |
231 | { | |
232 | struct drbd_peer_device *peer_device; | |
233 | int vnr; | |
234 | ||
235 | rcu_read_lock(); | |
236 | idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { | |
237 | struct drbd_device *device = peer_device->device; | |
238 | if (!atomic_read(&device->pp_in_use_by_net)) | |
239 | continue; | |
240 | ||
241 | kref_get(&device->kref); | |
242 | rcu_read_unlock(); | |
243 | drbd_reclaim_net_peer_reqs(device); | |
244 | kref_put(&device->kref, drbd_destroy_device); | |
245 | rcu_read_lock(); | |
246 | } | |
247 | rcu_read_unlock(); | |
248 | } | |
249 | ||
b411b363 | 250 | /** |
c37c8ecf | 251 | * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled) |
b30ab791 | 252 | * @device: DRBD device. |
45bb912b LE |
253 | * @number: number of pages requested |
254 | * @retry: whether to retry, if not enough pages are available right now | |
255 | * | |
256 | * Tries to allocate number pages, first from our own page pool, then from | |
0e49d7b0 | 257 | * the kernel. |
45bb912b | 258 | * Possibly retry until DRBD frees sufficient pages somewhere else. |
b411b363 | 259 | * |
0e49d7b0 LE |
260 | * If this allocation would exceed the max_buffers setting, we throttle |
261 | * allocation (schedule_timeout) to give the system some room to breathe. | |
262 | * | |
263 | * We do not use max-buffers as hard limit, because it could lead to | |
264 | * congestion and further to a distributed deadlock during online-verify or | |
265 | * (checksum based) resync, if the max-buffers, socket buffer sizes and | |
266 | * resync-rate settings are mis-configured. | |
267 | * | |
45bb912b | 268 | * Returns a page chain linked via page->private. |
b411b363 | 269 | */ |
69a22773 | 270 | struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number, |
c37c8ecf | 271 | bool retry) |
b411b363 | 272 | { |
69a22773 | 273 | struct drbd_device *device = peer_device->device; |
b411b363 | 274 | struct page *page = NULL; |
44ed167d | 275 | struct net_conf *nc; |
b411b363 | 276 | DEFINE_WAIT(wait); |
0e49d7b0 | 277 | unsigned int mxb; |
b411b363 | 278 | |
44ed167d | 279 | rcu_read_lock(); |
69a22773 | 280 | nc = rcu_dereference(peer_device->connection->net_conf); |
44ed167d PR |
281 | mxb = nc ? nc->max_buffers : 1000000; |
282 | rcu_read_unlock(); | |
283 | ||
b30ab791 AG |
284 | if (atomic_read(&device->pp_in_use) < mxb) |
285 | page = __drbd_alloc_pages(device, number); | |
b411b363 | 286 | |
668700b4 PR |
287 | /* Try to keep the fast path fast, but occasionally we need |
288 | * to reclaim the pages we lended to the network stack. */ | |
289 | if (page && atomic_read(&device->pp_in_use_by_net) > 512) | |
290 | drbd_reclaim_net_peer_reqs(device); | |
291 | ||
45bb912b | 292 | while (page == NULL) { |
b411b363 PR |
293 | prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); |
294 | ||
668700b4 | 295 | drbd_reclaim_net_peer_reqs(device); |
b411b363 | 296 | |
b30ab791 AG |
297 | if (atomic_read(&device->pp_in_use) < mxb) { |
298 | page = __drbd_alloc_pages(device, number); | |
b411b363 PR |
299 | if (page) |
300 | break; | |
301 | } | |
302 | ||
303 | if (!retry) | |
304 | break; | |
305 | ||
306 | if (signal_pending(current)) { | |
d0180171 | 307 | drbd_warn(device, "drbd_alloc_pages interrupted!\n"); |
b411b363 PR |
308 | break; |
309 | } | |
310 | ||
0e49d7b0 LE |
311 | if (schedule_timeout(HZ/10) == 0) |
312 | mxb = UINT_MAX; | |
b411b363 PR |
313 | } |
314 | finish_wait(&drbd_pp_wait, &wait); | |
315 | ||
45bb912b | 316 | if (page) |
b30ab791 | 317 | atomic_add(number, &device->pp_in_use); |
b411b363 PR |
318 | return page; |
319 | } | |
320 | ||
c37c8ecf | 321 | /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages. |
0500813f | 322 | * Is also used from inside an other spin_lock_irq(&resource->req_lock); |
45bb912b LE |
323 | * Either links the page chain back to the global pool, |
324 | * or returns all pages to the system. */ | |
b30ab791 | 325 | static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net) |
b411b363 | 326 | { |
b30ab791 | 327 | atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use; |
b411b363 | 328 | int i; |
435f0740 | 329 | |
a73ff323 LE |
330 | if (page == NULL) |
331 | return; | |
332 | ||
81a5d60e | 333 | if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count) |
45bb912b LE |
334 | i = page_chain_free(page); |
335 | else { | |
336 | struct page *tmp; | |
337 | tmp = page_chain_tail(page, &i); | |
338 | spin_lock(&drbd_pp_lock); | |
339 | page_chain_add(&drbd_pp_pool, page, tmp); | |
340 | drbd_pp_vacant += i; | |
341 | spin_unlock(&drbd_pp_lock); | |
b411b363 | 342 | } |
435f0740 | 343 | i = atomic_sub_return(i, a); |
45bb912b | 344 | if (i < 0) |
d0180171 | 345 | drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n", |
435f0740 | 346 | is_net ? "pp_in_use_by_net" : "pp_in_use", i); |
b411b363 PR |
347 | wake_up(&drbd_pp_wait); |
348 | } | |
349 | ||
350 | /* | |
351 | You need to hold the req_lock: | |
352 | _drbd_wait_ee_list_empty() | |
353 | ||
354 | You must not have the req_lock: | |
3967deb1 | 355 | drbd_free_peer_req() |
0db55363 | 356 | drbd_alloc_peer_req() |
7721f567 | 357 | drbd_free_peer_reqs() |
b411b363 | 358 | drbd_ee_fix_bhs() |
a990be46 | 359 | drbd_finish_peer_reqs() |
b411b363 PR |
360 | drbd_clear_done_ee() |
361 | drbd_wait_ee_list_empty() | |
362 | */ | |
363 | ||
9104d31a LE |
364 | /* normal: payload_size == request size (bi_size) |
365 | * w_same: payload_size == logical_block_size | |
366 | * trim: payload_size == 0 */ | |
f6ffca9f | 367 | struct drbd_peer_request * |
69a22773 | 368 | drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, |
9104d31a | 369 | unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local) |
b411b363 | 370 | { |
69a22773 | 371 | struct drbd_device *device = peer_device->device; |
db830c46 | 372 | struct drbd_peer_request *peer_req; |
a73ff323 | 373 | struct page *page = NULL; |
9104d31a | 374 | unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT; |
b411b363 | 375 | |
b30ab791 | 376 | if (drbd_insert_fault(device, DRBD_FAULT_AL_EE)) |
b411b363 PR |
377 | return NULL; |
378 | ||
db830c46 AG |
379 | peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); |
380 | if (!peer_req) { | |
b411b363 | 381 | if (!(gfp_mask & __GFP_NOWARN)) |
d0180171 | 382 | drbd_err(device, "%s: allocation failed\n", __func__); |
b411b363 PR |
383 | return NULL; |
384 | } | |
385 | ||
9104d31a | 386 | if (nr_pages) { |
d0164adc MG |
387 | page = drbd_alloc_pages(peer_device, nr_pages, |
388 | gfpflags_allow_blocking(gfp_mask)); | |
a73ff323 LE |
389 | if (!page) |
390 | goto fail; | |
391 | } | |
b411b363 | 392 | |
c5a2c150 LE |
393 | memset(peer_req, 0, sizeof(*peer_req)); |
394 | INIT_LIST_HEAD(&peer_req->w.list); | |
db830c46 | 395 | drbd_clear_interval(&peer_req->i); |
9104d31a | 396 | peer_req->i.size = request_size; |
db830c46 | 397 | peer_req->i.sector = sector; |
c5a2c150 | 398 | peer_req->submit_jif = jiffies; |
a8cd15ba | 399 | peer_req->peer_device = peer_device; |
db830c46 | 400 | peer_req->pages = page; |
9a8e7753 AG |
401 | /* |
402 | * The block_id is opaque to the receiver. It is not endianness | |
403 | * converted, and sent back to the sender unchanged. | |
404 | */ | |
db830c46 | 405 | peer_req->block_id = id; |
b411b363 | 406 | |
db830c46 | 407 | return peer_req; |
b411b363 | 408 | |
45bb912b | 409 | fail: |
db830c46 | 410 | mempool_free(peer_req, drbd_ee_mempool); |
b411b363 PR |
411 | return NULL; |
412 | } | |
413 | ||
b30ab791 | 414 | void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req, |
f6ffca9f | 415 | int is_net) |
b411b363 | 416 | { |
21ae5d7f | 417 | might_sleep(); |
db830c46 AG |
418 | if (peer_req->flags & EE_HAS_DIGEST) |
419 | kfree(peer_req->digest); | |
b30ab791 | 420 | drbd_free_pages(device, peer_req->pages, is_net); |
0b0ba1ef AG |
421 | D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0); |
422 | D_ASSERT(device, drbd_interval_empty(&peer_req->i)); | |
21ae5d7f LE |
423 | if (!expect(!(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) { |
424 | peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; | |
425 | drbd_al_complete_io(device, &peer_req->i); | |
426 | } | |
db830c46 | 427 | mempool_free(peer_req, drbd_ee_mempool); |
b411b363 PR |
428 | } |
429 | ||
b30ab791 | 430 | int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list) |
b411b363 PR |
431 | { |
432 | LIST_HEAD(work_list); | |
db830c46 | 433 | struct drbd_peer_request *peer_req, *t; |
b411b363 | 434 | int count = 0; |
b30ab791 | 435 | int is_net = list == &device->net_ee; |
b411b363 | 436 | |
0500813f | 437 | spin_lock_irq(&device->resource->req_lock); |
b411b363 | 438 | list_splice_init(list, &work_list); |
0500813f | 439 | spin_unlock_irq(&device->resource->req_lock); |
b411b363 | 440 | |
a8cd15ba | 441 | list_for_each_entry_safe(peer_req, t, &work_list, w.list) { |
b30ab791 | 442 | __drbd_free_peer_req(device, peer_req, is_net); |
b411b363 PR |
443 | count++; |
444 | } | |
445 | return count; | |
446 | } | |
447 | ||
b411b363 | 448 | /* |
a990be46 | 449 | * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier. |
b411b363 | 450 | */ |
b30ab791 | 451 | static int drbd_finish_peer_reqs(struct drbd_device *device) |
b411b363 PR |
452 | { |
453 | LIST_HEAD(work_list); | |
454 | LIST_HEAD(reclaimed); | |
db830c46 | 455 | struct drbd_peer_request *peer_req, *t; |
e2b3032b | 456 | int err = 0; |
b411b363 | 457 | |
0500813f | 458 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 AG |
459 | reclaim_finished_net_peer_reqs(device, &reclaimed); |
460 | list_splice_init(&device->done_ee, &work_list); | |
0500813f | 461 | spin_unlock_irq(&device->resource->req_lock); |
b411b363 | 462 | |
a8cd15ba | 463 | list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) |
b30ab791 | 464 | drbd_free_net_peer_req(device, peer_req); |
b411b363 PR |
465 | |
466 | /* possible callbacks here: | |
d4dabbe2 | 467 | * e_end_block, and e_end_resync_block, e_send_superseded. |
b411b363 PR |
468 | * all ignore the last argument. |
469 | */ | |
a8cd15ba | 470 | list_for_each_entry_safe(peer_req, t, &work_list, w.list) { |
e2b3032b AG |
471 | int err2; |
472 | ||
b411b363 | 473 | /* list_del not necessary, next/prev members not touched */ |
a8cd15ba | 474 | err2 = peer_req->w.cb(&peer_req->w, !!err); |
e2b3032b AG |
475 | if (!err) |
476 | err = err2; | |
b30ab791 | 477 | drbd_free_peer_req(device, peer_req); |
b411b363 | 478 | } |
b30ab791 | 479 | wake_up(&device->ee_wait); |
b411b363 | 480 | |
e2b3032b | 481 | return err; |
b411b363 PR |
482 | } |
483 | ||
b30ab791 | 484 | static void _drbd_wait_ee_list_empty(struct drbd_device *device, |
d4da1537 | 485 | struct list_head *head) |
b411b363 PR |
486 | { |
487 | DEFINE_WAIT(wait); | |
488 | ||
489 | /* avoids spin_lock/unlock | |
490 | * and calling prepare_to_wait in the fast path */ | |
491 | while (!list_empty(head)) { | |
b30ab791 | 492 | prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE); |
0500813f | 493 | spin_unlock_irq(&device->resource->req_lock); |
7eaceacc | 494 | io_schedule(); |
b30ab791 | 495 | finish_wait(&device->ee_wait, &wait); |
0500813f | 496 | spin_lock_irq(&device->resource->req_lock); |
b411b363 PR |
497 | } |
498 | } | |
499 | ||
b30ab791 | 500 | static void drbd_wait_ee_list_empty(struct drbd_device *device, |
d4da1537 | 501 | struct list_head *head) |
b411b363 | 502 | { |
0500813f | 503 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 504 | _drbd_wait_ee_list_empty(device, head); |
0500813f | 505 | spin_unlock_irq(&device->resource->req_lock); |
b411b363 PR |
506 | } |
507 | ||
dbd9eea0 | 508 | static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags) |
b411b363 | 509 | { |
b411b363 PR |
510 | struct kvec iov = { |
511 | .iov_base = buf, | |
512 | .iov_len = size, | |
513 | }; | |
514 | struct msghdr msg = { | |
b411b363 PR |
515 | .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) |
516 | }; | |
f730c848 | 517 | return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags); |
b411b363 PR |
518 | } |
519 | ||
bde89a9e | 520 | static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size) |
b411b363 | 521 | { |
b411b363 PR |
522 | int rv; |
523 | ||
bde89a9e | 524 | rv = drbd_recv_short(connection->data.socket, buf, size, 0); |
b411b363 | 525 | |
dbd0820c PR |
526 | if (rv < 0) { |
527 | if (rv == -ECONNRESET) | |
1ec861eb | 528 | drbd_info(connection, "sock was reset by peer\n"); |
dbd0820c | 529 | else if (rv != -ERESTARTSYS) |
1ec861eb | 530 | drbd_err(connection, "sock_recvmsg returned %d\n", rv); |
dbd0820c | 531 | } else if (rv == 0) { |
bde89a9e | 532 | if (test_bit(DISCONNECT_SENT, &connection->flags)) { |
b66623e3 PR |
533 | long t; |
534 | rcu_read_lock(); | |
bde89a9e | 535 | t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10; |
b66623e3 PR |
536 | rcu_read_unlock(); |
537 | ||
bde89a9e | 538 | t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t); |
b66623e3 | 539 | |
599377ac PR |
540 | if (t) |
541 | goto out; | |
542 | } | |
1ec861eb | 543 | drbd_info(connection, "sock was shut down by peer\n"); |
599377ac PR |
544 | } |
545 | ||
b411b363 | 546 | if (rv != size) |
bde89a9e | 547 | conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD); |
b411b363 | 548 | |
599377ac | 549 | out: |
b411b363 PR |
550 | return rv; |
551 | } | |
552 | ||
bde89a9e | 553 | static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size) |
c6967746 AG |
554 | { |
555 | int err; | |
556 | ||
bde89a9e | 557 | err = drbd_recv(connection, buf, size); |
c6967746 AG |
558 | if (err != size) { |
559 | if (err >= 0) | |
560 | err = -EIO; | |
561 | } else | |
562 | err = 0; | |
563 | return err; | |
564 | } | |
565 | ||
bde89a9e | 566 | static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size) |
a5c31904 AG |
567 | { |
568 | int err; | |
569 | ||
bde89a9e | 570 | err = drbd_recv_all(connection, buf, size); |
a5c31904 | 571 | if (err && !signal_pending(current)) |
1ec861eb | 572 | drbd_warn(connection, "short read (expected size %d)\n", (int)size); |
a5c31904 AG |
573 | return err; |
574 | } | |
575 | ||
5dbf1673 LE |
576 | /* quoting tcp(7): |
577 | * On individual connections, the socket buffer size must be set prior to the | |
578 | * listen(2) or connect(2) calls in order to have it take effect. | |
579 | * This is our wrapper to do so. | |
580 | */ | |
581 | static void drbd_setbufsize(struct socket *sock, unsigned int snd, | |
582 | unsigned int rcv) | |
583 | { | |
584 | /* open coded SO_SNDBUF, SO_RCVBUF */ | |
585 | if (snd) { | |
586 | sock->sk->sk_sndbuf = snd; | |
587 | sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | |
588 | } | |
589 | if (rcv) { | |
590 | sock->sk->sk_rcvbuf = rcv; | |
591 | sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | |
592 | } | |
593 | } | |
594 | ||
bde89a9e | 595 | static struct socket *drbd_try_connect(struct drbd_connection *connection) |
b411b363 PR |
596 | { |
597 | const char *what; | |
598 | struct socket *sock; | |
599 | struct sockaddr_in6 src_in6; | |
44ed167d PR |
600 | struct sockaddr_in6 peer_in6; |
601 | struct net_conf *nc; | |
602 | int err, peer_addr_len, my_addr_len; | |
69ef82de | 603 | int sndbuf_size, rcvbuf_size, connect_int; |
b411b363 PR |
604 | int disconnect_on_error = 1; |
605 | ||
44ed167d | 606 | rcu_read_lock(); |
bde89a9e | 607 | nc = rcu_dereference(connection->net_conf); |
44ed167d PR |
608 | if (!nc) { |
609 | rcu_read_unlock(); | |
b411b363 | 610 | return NULL; |
44ed167d | 611 | } |
44ed167d PR |
612 | sndbuf_size = nc->sndbuf_size; |
613 | rcvbuf_size = nc->rcvbuf_size; | |
69ef82de | 614 | connect_int = nc->connect_int; |
089c075d | 615 | rcu_read_unlock(); |
44ed167d | 616 | |
bde89a9e AG |
617 | my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6)); |
618 | memcpy(&src_in6, &connection->my_addr, my_addr_len); | |
44ed167d | 619 | |
bde89a9e | 620 | if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6) |
44ed167d PR |
621 | src_in6.sin6_port = 0; |
622 | else | |
623 | ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ | |
624 | ||
bde89a9e AG |
625 | peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6)); |
626 | memcpy(&peer_in6, &connection->peer_addr, peer_addr_len); | |
b411b363 PR |
627 | |
628 | what = "sock_create_kern"; | |
eeb1bd5c | 629 | err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family, |
44ed167d | 630 | SOCK_STREAM, IPPROTO_TCP, &sock); |
b411b363 PR |
631 | if (err < 0) { |
632 | sock = NULL; | |
633 | goto out; | |
634 | } | |
635 | ||
636 | sock->sk->sk_rcvtimeo = | |
69ef82de | 637 | sock->sk->sk_sndtimeo = connect_int * HZ; |
44ed167d | 638 | drbd_setbufsize(sock, sndbuf_size, rcvbuf_size); |
b411b363 PR |
639 | |
640 | /* explicitly bind to the configured IP as source IP | |
641 | * for the outgoing connections. | |
642 | * This is needed for multihomed hosts and to be | |
643 | * able to use lo: interfaces for drbd. | |
644 | * Make sure to use 0 as port number, so linux selects | |
645 | * a free one dynamically. | |
646 | */ | |
b411b363 | 647 | what = "bind before connect"; |
44ed167d | 648 | err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len); |
b411b363 PR |
649 | if (err < 0) |
650 | goto out; | |
651 | ||
652 | /* connect may fail, peer not yet available. | |
653 | * stay C_WF_CONNECTION, don't go Disconnecting! */ | |
654 | disconnect_on_error = 0; | |
655 | what = "connect"; | |
44ed167d | 656 | err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0); |
b411b363 PR |
657 | |
658 | out: | |
659 | if (err < 0) { | |
660 | if (sock) { | |
661 | sock_release(sock); | |
662 | sock = NULL; | |
663 | } | |
664 | switch (-err) { | |
665 | /* timeout, busy, signal pending */ | |
666 | case ETIMEDOUT: case EAGAIN: case EINPROGRESS: | |
667 | case EINTR: case ERESTARTSYS: | |
668 | /* peer not (yet) available, network problem */ | |
669 | case ECONNREFUSED: case ENETUNREACH: | |
670 | case EHOSTDOWN: case EHOSTUNREACH: | |
671 | disconnect_on_error = 0; | |
672 | break; | |
673 | default: | |
1ec861eb | 674 | drbd_err(connection, "%s failed, err = %d\n", what, err); |
b411b363 PR |
675 | } |
676 | if (disconnect_on_error) | |
bde89a9e | 677 | conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); |
b411b363 | 678 | } |
44ed167d | 679 | |
b411b363 PR |
680 | return sock; |
681 | } | |
682 | ||
7a426fd8 | 683 | struct accept_wait_data { |
bde89a9e | 684 | struct drbd_connection *connection; |
7a426fd8 PR |
685 | struct socket *s_listen; |
686 | struct completion door_bell; | |
687 | void (*original_sk_state_change)(struct sock *sk); | |
688 | ||
689 | }; | |
690 | ||
715306f6 | 691 | static void drbd_incoming_connection(struct sock *sk) |
7a426fd8 PR |
692 | { |
693 | struct accept_wait_data *ad = sk->sk_user_data; | |
715306f6 | 694 | void (*state_change)(struct sock *sk); |
7a426fd8 | 695 | |
715306f6 AG |
696 | state_change = ad->original_sk_state_change; |
697 | if (sk->sk_state == TCP_ESTABLISHED) | |
698 | complete(&ad->door_bell); | |
699 | state_change(sk); | |
7a426fd8 PR |
700 | } |
701 | ||
bde89a9e | 702 | static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad) |
b411b363 | 703 | { |
1f3e509b | 704 | int err, sndbuf_size, rcvbuf_size, my_addr_len; |
44ed167d | 705 | struct sockaddr_in6 my_addr; |
1f3e509b | 706 | struct socket *s_listen; |
44ed167d | 707 | struct net_conf *nc; |
b411b363 PR |
708 | const char *what; |
709 | ||
44ed167d | 710 | rcu_read_lock(); |
bde89a9e | 711 | nc = rcu_dereference(connection->net_conf); |
44ed167d PR |
712 | if (!nc) { |
713 | rcu_read_unlock(); | |
7a426fd8 | 714 | return -EIO; |
44ed167d | 715 | } |
44ed167d PR |
716 | sndbuf_size = nc->sndbuf_size; |
717 | rcvbuf_size = nc->rcvbuf_size; | |
44ed167d | 718 | rcu_read_unlock(); |
b411b363 | 719 | |
bde89a9e AG |
720 | my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6)); |
721 | memcpy(&my_addr, &connection->my_addr, my_addr_len); | |
b411b363 PR |
722 | |
723 | what = "sock_create_kern"; | |
eeb1bd5c | 724 | err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family, |
1f3e509b | 725 | SOCK_STREAM, IPPROTO_TCP, &s_listen); |
b411b363 PR |
726 | if (err) { |
727 | s_listen = NULL; | |
728 | goto out; | |
729 | } | |
730 | ||
98683650 | 731 | s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ |
44ed167d | 732 | drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size); |
b411b363 PR |
733 | |
734 | what = "bind before listen"; | |
44ed167d | 735 | err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len); |
b411b363 PR |
736 | if (err < 0) |
737 | goto out; | |
738 | ||
7a426fd8 PR |
739 | ad->s_listen = s_listen; |
740 | write_lock_bh(&s_listen->sk->sk_callback_lock); | |
741 | ad->original_sk_state_change = s_listen->sk->sk_state_change; | |
715306f6 | 742 | s_listen->sk->sk_state_change = drbd_incoming_connection; |
7a426fd8 PR |
743 | s_listen->sk->sk_user_data = ad; |
744 | write_unlock_bh(&s_listen->sk->sk_callback_lock); | |
b411b363 | 745 | |
2820fd39 PR |
746 | what = "listen"; |
747 | err = s_listen->ops->listen(s_listen, 5); | |
748 | if (err < 0) | |
749 | goto out; | |
750 | ||
7a426fd8 | 751 | return 0; |
b411b363 PR |
752 | out: |
753 | if (s_listen) | |
754 | sock_release(s_listen); | |
755 | if (err < 0) { | |
756 | if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { | |
1ec861eb | 757 | drbd_err(connection, "%s failed, err = %d\n", what, err); |
bde89a9e | 758 | conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); |
b411b363 PR |
759 | } |
760 | } | |
b411b363 | 761 | |
7a426fd8 | 762 | return -EIO; |
b411b363 PR |
763 | } |
764 | ||
715306f6 | 765 | static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad) |
b411b363 | 766 | { |
715306f6 AG |
767 | write_lock_bh(&sk->sk_callback_lock); |
768 | sk->sk_state_change = ad->original_sk_state_change; | |
769 | sk->sk_user_data = NULL; | |
770 | write_unlock_bh(&sk->sk_callback_lock); | |
b411b363 PR |
771 | } |
772 | ||
bde89a9e | 773 | static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad) |
b411b363 | 774 | { |
1f3e509b PR |
775 | int timeo, connect_int, err = 0; |
776 | struct socket *s_estab = NULL; | |
1f3e509b PR |
777 | struct net_conf *nc; |
778 | ||
779 | rcu_read_lock(); | |
bde89a9e | 780 | nc = rcu_dereference(connection->net_conf); |
1f3e509b PR |
781 | if (!nc) { |
782 | rcu_read_unlock(); | |
783 | return NULL; | |
784 | } | |
785 | connect_int = nc->connect_int; | |
786 | rcu_read_unlock(); | |
787 | ||
788 | timeo = connect_int * HZ; | |
38b682b2 AM |
789 | /* 28.5% random jitter */ |
790 | timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7; | |
1f3e509b | 791 | |
7a426fd8 PR |
792 | err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo); |
793 | if (err <= 0) | |
794 | return NULL; | |
b411b363 | 795 | |
7a426fd8 | 796 | err = kernel_accept(ad->s_listen, &s_estab, 0); |
b411b363 PR |
797 | if (err < 0) { |
798 | if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { | |
1ec861eb | 799 | drbd_err(connection, "accept failed, err = %d\n", err); |
bde89a9e | 800 | conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); |
b411b363 PR |
801 | } |
802 | } | |
b411b363 | 803 | |
715306f6 AG |
804 | if (s_estab) |
805 | unregister_state_change(s_estab->sk, ad); | |
b411b363 | 806 | |
b411b363 PR |
807 | return s_estab; |
808 | } | |
b411b363 | 809 | |
bde89a9e | 810 | static int decode_header(struct drbd_connection *, void *, struct packet_info *); |
b411b363 | 811 | |
bde89a9e | 812 | static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock, |
9f5bdc33 AG |
813 | enum drbd_packet cmd) |
814 | { | |
bde89a9e | 815 | if (!conn_prepare_command(connection, sock)) |
9f5bdc33 | 816 | return -EIO; |
bde89a9e | 817 | return conn_send_command(connection, sock, cmd, 0, NULL, 0); |
b411b363 PR |
818 | } |
819 | ||
bde89a9e | 820 | static int receive_first_packet(struct drbd_connection *connection, struct socket *sock) |
b411b363 | 821 | { |
bde89a9e | 822 | unsigned int header_size = drbd_header_size(connection); |
9f5bdc33 | 823 | struct packet_info pi; |
4920e37a | 824 | struct net_conf *nc; |
9f5bdc33 | 825 | int err; |
b411b363 | 826 | |
4920e37a PR |
827 | rcu_read_lock(); |
828 | nc = rcu_dereference(connection->net_conf); | |
829 | if (!nc) { | |
830 | rcu_read_unlock(); | |
831 | return -EIO; | |
832 | } | |
833 | sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10; | |
834 | rcu_read_unlock(); | |
835 | ||
bde89a9e | 836 | err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0); |
9f5bdc33 AG |
837 | if (err != header_size) { |
838 | if (err >= 0) | |
839 | err = -EIO; | |
840 | return err; | |
841 | } | |
bde89a9e | 842 | err = decode_header(connection, connection->data.rbuf, &pi); |
9f5bdc33 AG |
843 | if (err) |
844 | return err; | |
845 | return pi.cmd; | |
b411b363 PR |
846 | } |
847 | ||
848 | /** | |
849 | * drbd_socket_okay() - Free the socket if its connection is not okay | |
b411b363 PR |
850 | * @sock: pointer to the pointer to the socket. |
851 | */ | |
5d0b17f1 | 852 | static bool drbd_socket_okay(struct socket **sock) |
b411b363 PR |
853 | { |
854 | int rr; | |
855 | char tb[4]; | |
856 | ||
857 | if (!*sock) | |
81e84650 | 858 | return false; |
b411b363 | 859 | |
dbd9eea0 | 860 | rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); |
b411b363 PR |
861 | |
862 | if (rr > 0 || rr == -EAGAIN) { | |
81e84650 | 863 | return true; |
b411b363 PR |
864 | } else { |
865 | sock_release(*sock); | |
866 | *sock = NULL; | |
81e84650 | 867 | return false; |
b411b363 PR |
868 | } |
869 | } | |
5d0b17f1 PR |
870 | |
871 | static bool connection_established(struct drbd_connection *connection, | |
872 | struct socket **sock1, | |
873 | struct socket **sock2) | |
874 | { | |
875 | struct net_conf *nc; | |
876 | int timeout; | |
877 | bool ok; | |
878 | ||
879 | if (!*sock1 || !*sock2) | |
880 | return false; | |
881 | ||
882 | rcu_read_lock(); | |
883 | nc = rcu_dereference(connection->net_conf); | |
884 | timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10; | |
885 | rcu_read_unlock(); | |
886 | schedule_timeout_interruptible(timeout); | |
887 | ||
888 | ok = drbd_socket_okay(sock1); | |
889 | ok = drbd_socket_okay(sock2) && ok; | |
890 | ||
891 | return ok; | |
892 | } | |
893 | ||
2325eb66 PR |
894 | /* Gets called if a connection is established, or if a new minor gets created |
895 | in a connection */ | |
69a22773 | 896 | int drbd_connected(struct drbd_peer_device *peer_device) |
907599e0 | 897 | { |
69a22773 | 898 | struct drbd_device *device = peer_device->device; |
0829f5ed | 899 | int err; |
907599e0 | 900 | |
b30ab791 AG |
901 | atomic_set(&device->packet_seq, 0); |
902 | device->peer_seq = 0; | |
907599e0 | 903 | |
69a22773 AG |
904 | device->state_mutex = peer_device->connection->agreed_pro_version < 100 ? |
905 | &peer_device->connection->cstate_mutex : | |
b30ab791 | 906 | &device->own_state_mutex; |
8410da8f | 907 | |
69a22773 | 908 | err = drbd_send_sync_param(peer_device); |
0829f5ed | 909 | if (!err) |
69a22773 | 910 | err = drbd_send_sizes(peer_device, 0, 0); |
0829f5ed | 911 | if (!err) |
69a22773 | 912 | err = drbd_send_uuids(peer_device); |
0829f5ed | 913 | if (!err) |
69a22773 | 914 | err = drbd_send_current_state(peer_device); |
b30ab791 AG |
915 | clear_bit(USE_DEGR_WFC_T, &device->flags); |
916 | clear_bit(RESIZE_PENDING, &device->flags); | |
917 | atomic_set(&device->ap_in_flight, 0); | |
918 | mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */ | |
0829f5ed | 919 | return err; |
907599e0 | 920 | } |
b411b363 PR |
921 | |
922 | /* | |
923 | * return values: | |
924 | * 1 yes, we have a valid connection | |
925 | * 0 oops, did not work out, please try again | |
926 | * -1 peer talks different language, | |
927 | * no point in trying again, please go standalone. | |
928 | * -2 We do not have a network config... | |
929 | */ | |
bde89a9e | 930 | static int conn_connect(struct drbd_connection *connection) |
b411b363 | 931 | { |
7da35862 | 932 | struct drbd_socket sock, msock; |
c06ece6b | 933 | struct drbd_peer_device *peer_device; |
44ed167d | 934 | struct net_conf *nc; |
5d0b17f1 PR |
935 | int vnr, timeout, h; |
936 | bool discard_my_data, ok; | |
197296ff | 937 | enum drbd_state_rv rv; |
7a426fd8 | 938 | struct accept_wait_data ad = { |
bde89a9e | 939 | .connection = connection, |
7a426fd8 PR |
940 | .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell), |
941 | }; | |
b411b363 | 942 | |
bde89a9e AG |
943 | clear_bit(DISCONNECT_SENT, &connection->flags); |
944 | if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS) | |
b411b363 PR |
945 | return -2; |
946 | ||
7da35862 | 947 | mutex_init(&sock.mutex); |
bde89a9e AG |
948 | sock.sbuf = connection->data.sbuf; |
949 | sock.rbuf = connection->data.rbuf; | |
7da35862 PR |
950 | sock.socket = NULL; |
951 | mutex_init(&msock.mutex); | |
bde89a9e AG |
952 | msock.sbuf = connection->meta.sbuf; |
953 | msock.rbuf = connection->meta.rbuf; | |
7da35862 PR |
954 | msock.socket = NULL; |
955 | ||
0916e0e3 | 956 | /* Assume that the peer only understands protocol 80 until we know better. */ |
bde89a9e | 957 | connection->agreed_pro_version = 80; |
b411b363 | 958 | |
bde89a9e | 959 | if (prepare_listen_socket(connection, &ad)) |
7a426fd8 | 960 | return 0; |
b411b363 PR |
961 | |
962 | do { | |
2bf89621 | 963 | struct socket *s; |
b411b363 | 964 | |
bde89a9e | 965 | s = drbd_try_connect(connection); |
b411b363 | 966 | if (s) { |
7da35862 PR |
967 | if (!sock.socket) { |
968 | sock.socket = s; | |
bde89a9e | 969 | send_first_packet(connection, &sock, P_INITIAL_DATA); |
7da35862 | 970 | } else if (!msock.socket) { |
bde89a9e | 971 | clear_bit(RESOLVE_CONFLICTS, &connection->flags); |
7da35862 | 972 | msock.socket = s; |
bde89a9e | 973 | send_first_packet(connection, &msock, P_INITIAL_META); |
b411b363 | 974 | } else { |
1ec861eb | 975 | drbd_err(connection, "Logic error in conn_connect()\n"); |
b411b363 PR |
976 | goto out_release_sockets; |
977 | } | |
978 | } | |
979 | ||
5d0b17f1 PR |
980 | if (connection_established(connection, &sock.socket, &msock.socket)) |
981 | break; | |
b411b363 PR |
982 | |
983 | retry: | |
bde89a9e | 984 | s = drbd_wait_for_connect(connection, &ad); |
b411b363 | 985 | if (s) { |
bde89a9e | 986 | int fp = receive_first_packet(connection, s); |
7da35862 PR |
987 | drbd_socket_okay(&sock.socket); |
988 | drbd_socket_okay(&msock.socket); | |
92f14951 | 989 | switch (fp) { |
e5d6f33a | 990 | case P_INITIAL_DATA: |
7da35862 | 991 | if (sock.socket) { |
1ec861eb | 992 | drbd_warn(connection, "initial packet S crossed\n"); |
7da35862 | 993 | sock_release(sock.socket); |
80c6eed4 PR |
994 | sock.socket = s; |
995 | goto randomize; | |
b411b363 | 996 | } |
7da35862 | 997 | sock.socket = s; |
b411b363 | 998 | break; |
e5d6f33a | 999 | case P_INITIAL_META: |
bde89a9e | 1000 | set_bit(RESOLVE_CONFLICTS, &connection->flags); |
7da35862 | 1001 | if (msock.socket) { |
1ec861eb | 1002 | drbd_warn(connection, "initial packet M crossed\n"); |
7da35862 | 1003 | sock_release(msock.socket); |
80c6eed4 PR |
1004 | msock.socket = s; |
1005 | goto randomize; | |
b411b363 | 1006 | } |
7da35862 | 1007 | msock.socket = s; |
b411b363 PR |
1008 | break; |
1009 | default: | |
1ec861eb | 1010 | drbd_warn(connection, "Error receiving initial packet\n"); |
b411b363 | 1011 | sock_release(s); |
80c6eed4 | 1012 | randomize: |
38b682b2 | 1013 | if (prandom_u32() & 1) |
b411b363 PR |
1014 | goto retry; |
1015 | } | |
1016 | } | |
1017 | ||
bde89a9e | 1018 | if (connection->cstate <= C_DISCONNECTING) |
b411b363 PR |
1019 | goto out_release_sockets; |
1020 | if (signal_pending(current)) { | |
1021 | flush_signals(current); | |
1022 | smp_rmb(); | |
bde89a9e | 1023 | if (get_t_state(&connection->receiver) == EXITING) |
b411b363 PR |
1024 | goto out_release_sockets; |
1025 | } | |
1026 | ||
5d0b17f1 | 1027 | ok = connection_established(connection, &sock.socket, &msock.socket); |
b666dbf8 | 1028 | } while (!ok); |
b411b363 | 1029 | |
7a426fd8 PR |
1030 | if (ad.s_listen) |
1031 | sock_release(ad.s_listen); | |
b411b363 | 1032 | |
98683650 PR |
1033 | sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ |
1034 | msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ | |
b411b363 | 1035 | |
7da35862 PR |
1036 | sock.socket->sk->sk_allocation = GFP_NOIO; |
1037 | msock.socket->sk->sk_allocation = GFP_NOIO; | |
b411b363 | 1038 | |
7da35862 PR |
1039 | sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; |
1040 | msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE; | |
b411b363 | 1041 | |
b411b363 | 1042 | /* NOT YET ... |
bde89a9e | 1043 | * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10; |
7da35862 | 1044 | * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; |
6038178e | 1045 | * first set it to the P_CONNECTION_FEATURES timeout, |
b411b363 | 1046 | * which we set to 4x the configured ping_timeout. */ |
44ed167d | 1047 | rcu_read_lock(); |
bde89a9e | 1048 | nc = rcu_dereference(connection->net_conf); |
44ed167d | 1049 | |
7da35862 PR |
1050 | sock.socket->sk->sk_sndtimeo = |
1051 | sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10; | |
b411b363 | 1052 | |
7da35862 | 1053 | msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ; |
44ed167d | 1054 | timeout = nc->timeout * HZ / 10; |
08b165ba | 1055 | discard_my_data = nc->discard_my_data; |
44ed167d | 1056 | rcu_read_unlock(); |
b411b363 | 1057 | |
7da35862 | 1058 | msock.socket->sk->sk_sndtimeo = timeout; |
b411b363 PR |
1059 | |
1060 | /* we don't want delays. | |
25985edc | 1061 | * we use TCP_CORK where appropriate, though */ |
7da35862 PR |
1062 | drbd_tcp_nodelay(sock.socket); |
1063 | drbd_tcp_nodelay(msock.socket); | |
b411b363 | 1064 | |
bde89a9e AG |
1065 | connection->data.socket = sock.socket; |
1066 | connection->meta.socket = msock.socket; | |
1067 | connection->last_received = jiffies; | |
b411b363 | 1068 | |
bde89a9e | 1069 | h = drbd_do_features(connection); |
b411b363 PR |
1070 | if (h <= 0) |
1071 | return h; | |
1072 | ||
bde89a9e | 1073 | if (connection->cram_hmac_tfm) { |
b30ab791 | 1074 | /* drbd_request_state(device, NS(conn, WFAuth)); */ |
bde89a9e | 1075 | switch (drbd_do_auth(connection)) { |
b10d96cb | 1076 | case -1: |
1ec861eb | 1077 | drbd_err(connection, "Authentication of peer failed\n"); |
b411b363 | 1078 | return -1; |
b10d96cb | 1079 | case 0: |
1ec861eb | 1080 | drbd_err(connection, "Authentication of peer failed, trying again.\n"); |
b10d96cb | 1081 | return 0; |
b411b363 PR |
1082 | } |
1083 | } | |
1084 | ||
bde89a9e AG |
1085 | connection->data.socket->sk->sk_sndtimeo = timeout; |
1086 | connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; | |
b411b363 | 1087 | |
bde89a9e | 1088 | if (drbd_send_protocol(connection) == -EOPNOTSUPP) |
7e2455c1 | 1089 | return -1; |
b411b363 | 1090 | |
31007745 PR |
1091 | /* Prevent a race between resync-handshake and |
1092 | * being promoted to Primary. | |
1093 | * | |
1094 | * Grab and release the state mutex, so we know that any current | |
1095 | * drbd_set_role() is finished, and any incoming drbd_set_role | |
1096 | * will see the STATE_SENT flag, and wait for it to be cleared. | |
1097 | */ | |
1098 | idr_for_each_entry(&connection->peer_devices, peer_device, vnr) | |
1099 | mutex_lock(peer_device->device->state_mutex); | |
1100 | ||
bde89a9e | 1101 | set_bit(STATE_SENT, &connection->flags); |
a1096a6e | 1102 | |
31007745 PR |
1103 | idr_for_each_entry(&connection->peer_devices, peer_device, vnr) |
1104 | mutex_unlock(peer_device->device->state_mutex); | |
1105 | ||
c141ebda | 1106 | rcu_read_lock(); |
c06ece6b AG |
1107 | idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { |
1108 | struct drbd_device *device = peer_device->device; | |
b30ab791 | 1109 | kref_get(&device->kref); |
26ea8f92 AG |
1110 | rcu_read_unlock(); |
1111 | ||
08b165ba | 1112 | if (discard_my_data) |
b30ab791 | 1113 | set_bit(DISCARD_MY_DATA, &device->flags); |
08b165ba | 1114 | else |
b30ab791 | 1115 | clear_bit(DISCARD_MY_DATA, &device->flags); |
08b165ba | 1116 | |
69a22773 | 1117 | drbd_connected(peer_device); |
05a10ec7 | 1118 | kref_put(&device->kref, drbd_destroy_device); |
c141ebda PR |
1119 | rcu_read_lock(); |
1120 | } | |
1121 | rcu_read_unlock(); | |
1122 | ||
bde89a9e AG |
1123 | rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE); |
1124 | if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) { | |
1125 | clear_bit(STATE_SENT, &connection->flags); | |
1e86ac48 | 1126 | return 0; |
a1096a6e | 1127 | } |
1e86ac48 | 1128 | |
1c03e520 | 1129 | drbd_thread_start(&connection->ack_receiver); |
39e91a60 LE |
1130 | /* opencoded create_singlethread_workqueue(), |
1131 | * to be able to use format string arguments */ | |
1132 | connection->ack_sender = | |
1133 | alloc_ordered_workqueue("drbd_as_%s", WQ_MEM_RECLAIM, connection->resource->name); | |
668700b4 PR |
1134 | if (!connection->ack_sender) { |
1135 | drbd_err(connection, "Failed to create workqueue ack_sender\n"); | |
1136 | return 0; | |
1137 | } | |
b411b363 | 1138 | |
0500813f | 1139 | mutex_lock(&connection->resource->conf_update); |
08b165ba PR |
1140 | /* The discard_my_data flag is a single-shot modifier to the next |
1141 | * connection attempt, the handshake of which is now well underway. | |
1142 | * No need for rcu style copying of the whole struct | |
1143 | * just to clear a single value. */ | |
bde89a9e | 1144 | connection->net_conf->discard_my_data = 0; |
0500813f | 1145 | mutex_unlock(&connection->resource->conf_update); |
08b165ba | 1146 | |
d3fcb490 | 1147 | return h; |
b411b363 PR |
1148 | |
1149 | out_release_sockets: | |
7a426fd8 PR |
1150 | if (ad.s_listen) |
1151 | sock_release(ad.s_listen); | |
7da35862 PR |
1152 | if (sock.socket) |
1153 | sock_release(sock.socket); | |
1154 | if (msock.socket) | |
1155 | sock_release(msock.socket); | |
b411b363 PR |
1156 | return -1; |
1157 | } | |
1158 | ||
bde89a9e | 1159 | static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi) |
b411b363 | 1160 | { |
bde89a9e | 1161 | unsigned int header_size = drbd_header_size(connection); |
e658983a | 1162 | |
0c8e36d9 AG |
1163 | if (header_size == sizeof(struct p_header100) && |
1164 | *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) { | |
1165 | struct p_header100 *h = header; | |
1166 | if (h->pad != 0) { | |
1ec861eb | 1167 | drbd_err(connection, "Header padding is not zero\n"); |
0c8e36d9 AG |
1168 | return -EINVAL; |
1169 | } | |
1170 | pi->vnr = be16_to_cpu(h->volume); | |
1171 | pi->cmd = be16_to_cpu(h->command); | |
1172 | pi->size = be32_to_cpu(h->length); | |
1173 | } else if (header_size == sizeof(struct p_header95) && | |
1174 | *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) { | |
e658983a | 1175 | struct p_header95 *h = header; |
e658983a | 1176 | pi->cmd = be16_to_cpu(h->command); |
b55d84ba AG |
1177 | pi->size = be32_to_cpu(h->length); |
1178 | pi->vnr = 0; | |
e658983a AG |
1179 | } else if (header_size == sizeof(struct p_header80) && |
1180 | *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) { | |
1181 | struct p_header80 *h = header; | |
1182 | pi->cmd = be16_to_cpu(h->command); | |
1183 | pi->size = be16_to_cpu(h->length); | |
77351055 | 1184 | pi->vnr = 0; |
02918be2 | 1185 | } else { |
1ec861eb | 1186 | drbd_err(connection, "Wrong magic value 0x%08x in protocol version %d\n", |
e658983a | 1187 | be32_to_cpu(*(__be32 *)header), |
bde89a9e | 1188 | connection->agreed_pro_version); |
8172f3e9 | 1189 | return -EINVAL; |
b411b363 | 1190 | } |
e658983a | 1191 | pi->data = header + header_size; |
8172f3e9 | 1192 | return 0; |
257d0af6 | 1193 | } |
b411b363 | 1194 | |
bde89a9e | 1195 | static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi) |
257d0af6 | 1196 | { |
bde89a9e | 1197 | void *buffer = connection->data.rbuf; |
69bc7bc3 | 1198 | int err; |
257d0af6 | 1199 | |
bde89a9e | 1200 | err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection)); |
a5c31904 | 1201 | if (err) |
69bc7bc3 | 1202 | return err; |
257d0af6 | 1203 | |
bde89a9e AG |
1204 | err = decode_header(connection, buffer, pi); |
1205 | connection->last_received = jiffies; | |
b411b363 | 1206 | |
69bc7bc3 | 1207 | return err; |
b411b363 PR |
1208 | } |
1209 | ||
f9ff0da5 LE |
1210 | /* This is blkdev_issue_flush, but asynchronous. |
1211 | * We want to submit to all component volumes in parallel, | |
1212 | * then wait for all completions. | |
1213 | */ | |
1214 | struct issue_flush_context { | |
1215 | atomic_t pending; | |
1216 | int error; | |
1217 | struct completion done; | |
1218 | }; | |
1219 | struct one_flush_context { | |
1220 | struct drbd_device *device; | |
1221 | struct issue_flush_context *ctx; | |
1222 | }; | |
1223 | ||
1224 | void one_flush_endio(struct bio *bio) | |
b411b363 | 1225 | { |
f9ff0da5 LE |
1226 | struct one_flush_context *octx = bio->bi_private; |
1227 | struct drbd_device *device = octx->device; | |
1228 | struct issue_flush_context *ctx = octx->ctx; | |
1229 | ||
1230 | if (bio->bi_error) { | |
1231 | ctx->error = bio->bi_error; | |
1232 | drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error); | |
1233 | } | |
1234 | kfree(octx); | |
1235 | bio_put(bio); | |
1236 | ||
1237 | clear_bit(FLUSH_PENDING, &device->flags); | |
1238 | put_ldev(device); | |
1239 | kref_put(&device->kref, drbd_destroy_device); | |
1240 | ||
1241 | if (atomic_dec_and_test(&ctx->pending)) | |
1242 | complete(&ctx->done); | |
1243 | } | |
1244 | ||
1245 | static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx) | |
1246 | { | |
1247 | struct bio *bio = bio_alloc(GFP_NOIO, 0); | |
1248 | struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO); | |
1249 | if (!bio || !octx) { | |
1250 | drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n"); | |
1251 | /* FIXME: what else can I do now? disconnecting or detaching | |
1252 | * really does not help to improve the state of the world, either. | |
1253 | */ | |
1254 | kfree(octx); | |
1255 | if (bio) | |
1256 | bio_put(bio); | |
1257 | ||
1258 | ctx->error = -ENOMEM; | |
1259 | put_ldev(device); | |
1260 | kref_put(&device->kref, drbd_destroy_device); | |
1261 | return; | |
1262 | } | |
4b0007c0 | 1263 | |
f9ff0da5 LE |
1264 | octx->device = device; |
1265 | octx->ctx = ctx; | |
1266 | bio->bi_bdev = device->ldev->backing_bdev; | |
1267 | bio->bi_private = octx; | |
1268 | bio->bi_end_io = one_flush_endio; | |
1269 | bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH); | |
1270 | ||
1271 | device->flush_jif = jiffies; | |
1272 | set_bit(FLUSH_PENDING, &device->flags); | |
1273 | atomic_inc(&ctx->pending); | |
1274 | submit_bio(bio); | |
1275 | } | |
1276 | ||
1277 | static void drbd_flush(struct drbd_connection *connection) | |
1278 | { | |
f6ba8636 | 1279 | if (connection->resource->write_ordering >= WO_BDEV_FLUSH) { |
f9ff0da5 LE |
1280 | struct drbd_peer_device *peer_device; |
1281 | struct issue_flush_context ctx; | |
1282 | int vnr; | |
1283 | ||
1284 | atomic_set(&ctx.pending, 1); | |
1285 | ctx.error = 0; | |
1286 | init_completion(&ctx.done); | |
1287 | ||
615e087f | 1288 | rcu_read_lock(); |
c06ece6b AG |
1289 | idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { |
1290 | struct drbd_device *device = peer_device->device; | |
1291 | ||
b30ab791 | 1292 | if (!get_ldev(device)) |
615e087f | 1293 | continue; |
b30ab791 | 1294 | kref_get(&device->kref); |
615e087f LE |
1295 | rcu_read_unlock(); |
1296 | ||
f9ff0da5 | 1297 | submit_one_flush(device, &ctx); |
b411b363 | 1298 | |
615e087f | 1299 | rcu_read_lock(); |
b411b363 | 1300 | } |
615e087f | 1301 | rcu_read_unlock(); |
f9ff0da5 LE |
1302 | |
1303 | /* Do we want to add a timeout, | |
1304 | * if disk-timeout is set? */ | |
1305 | if (!atomic_dec_and_test(&ctx.pending)) | |
1306 | wait_for_completion(&ctx.done); | |
1307 | ||
1308 | if (ctx.error) { | |
1309 | /* would rather check on EOPNOTSUPP, but that is not reliable. | |
1310 | * don't try again for ANY return value != 0 | |
1311 | * if (rv == -EOPNOTSUPP) */ | |
1312 | /* Any error is already reported by bio_endio callback. */ | |
1313 | drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO); | |
1314 | } | |
b411b363 | 1315 | } |
b411b363 PR |
1316 | } |
1317 | ||
1318 | /** | |
1319 | * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. | |
b30ab791 | 1320 | * @device: DRBD device. |
b411b363 PR |
1321 | * @epoch: Epoch object. |
1322 | * @ev: Epoch event. | |
1323 | */ | |
bde89a9e | 1324 | static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection, |
b411b363 PR |
1325 | struct drbd_epoch *epoch, |
1326 | enum epoch_event ev) | |
1327 | { | |
2451fc3b | 1328 | int epoch_size; |
b411b363 | 1329 | struct drbd_epoch *next_epoch; |
b411b363 PR |
1330 | enum finish_epoch rv = FE_STILL_LIVE; |
1331 | ||
bde89a9e | 1332 | spin_lock(&connection->epoch_lock); |
b411b363 PR |
1333 | do { |
1334 | next_epoch = NULL; | |
b411b363 PR |
1335 | |
1336 | epoch_size = atomic_read(&epoch->epoch_size); | |
1337 | ||
1338 | switch (ev & ~EV_CLEANUP) { | |
1339 | case EV_PUT: | |
1340 | atomic_dec(&epoch->active); | |
1341 | break; | |
1342 | case EV_GOT_BARRIER_NR: | |
1343 | set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); | |
b411b363 PR |
1344 | break; |
1345 | case EV_BECAME_LAST: | |
1346 | /* nothing to do*/ | |
1347 | break; | |
1348 | } | |
1349 | ||
b411b363 PR |
1350 | if (epoch_size != 0 && |
1351 | atomic_read(&epoch->active) == 0 && | |
80f9fd55 | 1352 | (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) { |
b411b363 | 1353 | if (!(ev & EV_CLEANUP)) { |
bde89a9e AG |
1354 | spin_unlock(&connection->epoch_lock); |
1355 | drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size); | |
1356 | spin_lock(&connection->epoch_lock); | |
b411b363 | 1357 | } |
9ed57dcb LE |
1358 | #if 0 |
1359 | /* FIXME: dec unacked on connection, once we have | |
1360 | * something to count pending connection packets in. */ | |
80f9fd55 | 1361 | if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) |
bde89a9e | 1362 | dec_unacked(epoch->connection); |
9ed57dcb | 1363 | #endif |
b411b363 | 1364 | |
bde89a9e | 1365 | if (connection->current_epoch != epoch) { |
b411b363 PR |
1366 | next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); |
1367 | list_del(&epoch->list); | |
1368 | ev = EV_BECAME_LAST | (ev & EV_CLEANUP); | |
bde89a9e | 1369 | connection->epochs--; |
b411b363 PR |
1370 | kfree(epoch); |
1371 | ||
1372 | if (rv == FE_STILL_LIVE) | |
1373 | rv = FE_DESTROYED; | |
1374 | } else { | |
1375 | epoch->flags = 0; | |
1376 | atomic_set(&epoch->epoch_size, 0); | |
698f9315 | 1377 | /* atomic_set(&epoch->active, 0); is already zero */ |
b411b363 PR |
1378 | if (rv == FE_STILL_LIVE) |
1379 | rv = FE_RECYCLED; | |
1380 | } | |
1381 | } | |
1382 | ||
1383 | if (!next_epoch) | |
1384 | break; | |
1385 | ||
1386 | epoch = next_epoch; | |
1387 | } while (1); | |
1388 | ||
bde89a9e | 1389 | spin_unlock(&connection->epoch_lock); |
b411b363 | 1390 | |
b411b363 PR |
1391 | return rv; |
1392 | } | |
1393 | ||
8fe39aac PR |
1394 | static enum write_ordering_e |
1395 | max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo) | |
1396 | { | |
1397 | struct disk_conf *dc; | |
1398 | ||
1399 | dc = rcu_dereference(bdev->disk_conf); | |
1400 | ||
f6ba8636 AG |
1401 | if (wo == WO_BDEV_FLUSH && !dc->disk_flushes) |
1402 | wo = WO_DRAIN_IO; | |
1403 | if (wo == WO_DRAIN_IO && !dc->disk_drain) | |
1404 | wo = WO_NONE; | |
8fe39aac PR |
1405 | |
1406 | return wo; | |
1407 | } | |
1408 | ||
b411b363 PR |
1409 | /** |
1410 | * drbd_bump_write_ordering() - Fall back to an other write ordering method | |
bde89a9e | 1411 | * @connection: DRBD connection. |
b411b363 PR |
1412 | * @wo: Write ordering method to try. |
1413 | */ | |
8fe39aac PR |
1414 | void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev, |
1415 | enum write_ordering_e wo) | |
b411b363 | 1416 | { |
e9526580 | 1417 | struct drbd_device *device; |
b411b363 | 1418 | enum write_ordering_e pwo; |
4b0007c0 | 1419 | int vnr; |
b411b363 | 1420 | static char *write_ordering_str[] = { |
f6ba8636 AG |
1421 | [WO_NONE] = "none", |
1422 | [WO_DRAIN_IO] = "drain", | |
1423 | [WO_BDEV_FLUSH] = "flush", | |
b411b363 PR |
1424 | }; |
1425 | ||
e9526580 | 1426 | pwo = resource->write_ordering; |
f6ba8636 | 1427 | if (wo != WO_BDEV_FLUSH) |
70df7092 | 1428 | wo = min(pwo, wo); |
daeda1cc | 1429 | rcu_read_lock(); |
e9526580 | 1430 | idr_for_each_entry(&resource->devices, device, vnr) { |
8fe39aac PR |
1431 | if (get_ldev(device)) { |
1432 | wo = max_allowed_wo(device->ldev, wo); | |
1433 | if (device->ldev == bdev) | |
1434 | bdev = NULL; | |
1435 | put_ldev(device); | |
1436 | } | |
4b0007c0 | 1437 | } |
8fe39aac PR |
1438 | |
1439 | if (bdev) | |
1440 | wo = max_allowed_wo(bdev, wo); | |
1441 | ||
70df7092 LE |
1442 | rcu_read_unlock(); |
1443 | ||
e9526580 | 1444 | resource->write_ordering = wo; |
f6ba8636 | 1445 | if (pwo != resource->write_ordering || wo == WO_BDEV_FLUSH) |
e9526580 | 1446 | drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]); |
b411b363 PR |
1447 | } |
1448 | ||
dd4f699d LE |
1449 | /* |
1450 | * We *may* ignore the discard-zeroes-data setting, if so configured. | |
1451 | * | |
1452 | * Assumption is that it "discard_zeroes_data=0" is only because the backend | |
1453 | * may ignore partial unaligned discards. | |
1454 | * | |
1455 | * LVM/DM thin as of at least | |
1456 | * LVM version: 2.02.115(2)-RHEL7 (2015-01-28) | |
1457 | * Library version: 1.02.93-RHEL7 (2015-01-28) | |
1458 | * Driver version: 4.29.0 | |
1459 | * still behaves this way. | |
1460 | * | |
1461 | * For unaligned (wrt. alignment and granularity) or too small discards, | |
1462 | * we zero-out the initial (and/or) trailing unaligned partial chunks, | |
1463 | * but discard all the aligned full chunks. | |
1464 | * | |
1465 | * At least for LVM/DM thin, the result is effectively "discard_zeroes_data=1". | |
1466 | */ | |
1467 | int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, bool discard) | |
1468 | { | |
1469 | struct block_device *bdev = device->ldev->backing_bdev; | |
1470 | struct request_queue *q = bdev_get_queue(bdev); | |
1471 | sector_t tmp, nr; | |
1472 | unsigned int max_discard_sectors, granularity; | |
1473 | int alignment; | |
1474 | int err = 0; | |
1475 | ||
1476 | if (!discard) | |
1477 | goto zero_out; | |
1478 | ||
1479 | /* Zero-sector (unknown) and one-sector granularities are the same. */ | |
1480 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
1481 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | |
1482 | ||
1483 | max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22)); | |
1484 | max_discard_sectors -= max_discard_sectors % granularity; | |
1485 | if (unlikely(!max_discard_sectors)) | |
1486 | goto zero_out; | |
1487 | ||
1488 | if (nr_sectors < granularity) | |
1489 | goto zero_out; | |
1490 | ||
1491 | tmp = start; | |
1492 | if (sector_div(tmp, granularity) != alignment) { | |
1493 | if (nr_sectors < 2*granularity) | |
1494 | goto zero_out; | |
1495 | /* start + gran - (start + gran - align) % gran */ | |
1496 | tmp = start + granularity - alignment; | |
1497 | tmp = start + granularity - sector_div(tmp, granularity); | |
1498 | ||
1499 | nr = tmp - start; | |
1500 | err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0); | |
1501 | nr_sectors -= nr; | |
1502 | start = tmp; | |
1503 | } | |
1504 | while (nr_sectors >= granularity) { | |
1505 | nr = min_t(sector_t, nr_sectors, max_discard_sectors); | |
1506 | err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0); | |
1507 | nr_sectors -= nr; | |
1508 | start += nr; | |
1509 | } | |
1510 | zero_out: | |
1511 | if (nr_sectors) { | |
1512 | err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 0); | |
1513 | } | |
1514 | return err != 0; | |
1515 | } | |
1516 | ||
1517 | static bool can_do_reliable_discards(struct drbd_device *device) | |
1518 | { | |
1519 | struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); | |
1520 | struct disk_conf *dc; | |
1521 | bool can_do; | |
1522 | ||
1523 | if (!blk_queue_discard(q)) | |
1524 | return false; | |
1525 | ||
1526 | if (q->limits.discard_zeroes_data) | |
1527 | return true; | |
1528 | ||
1529 | rcu_read_lock(); | |
1530 | dc = rcu_dereference(device->ldev->disk_conf); | |
1531 | can_do = dc->discard_zeroes_if_aligned; | |
1532 | rcu_read_unlock(); | |
1533 | return can_do; | |
1534 | } | |
1535 | ||
9104d31a | 1536 | static void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req) |
dd4f699d LE |
1537 | { |
1538 | /* If the backend cannot discard, or does not guarantee | |
1539 | * read-back zeroes in discarded ranges, we fall back to | |
1540 | * zero-out. Unless configuration specifically requested | |
1541 | * otherwise. */ | |
1542 | if (!can_do_reliable_discards(device)) | |
1543 | peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT; | |
1544 | ||
1545 | if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, | |
1546 | peer_req->i.size >> 9, !(peer_req->flags & EE_IS_TRIM_USE_ZEROOUT))) | |
1547 | peer_req->flags |= EE_WAS_ERROR; | |
1548 | drbd_endio_write_sec_final(peer_req); | |
1549 | } | |
1550 | ||
9104d31a LE |
1551 | static void drbd_issue_peer_wsame(struct drbd_device *device, |
1552 | struct drbd_peer_request *peer_req) | |
1553 | { | |
1554 | struct block_device *bdev = device->ldev->backing_bdev; | |
1555 | sector_t s = peer_req->i.sector; | |
1556 | sector_t nr = peer_req->i.size >> 9; | |
1557 | if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages)) | |
1558 | peer_req->flags |= EE_WAS_ERROR; | |
1559 | drbd_endio_write_sec_final(peer_req); | |
1560 | } | |
1561 | ||
1562 | ||
45bb912b | 1563 | /** |
fbe29dec | 1564 | * drbd_submit_peer_request() |
b30ab791 | 1565 | * @device: DRBD device. |
db830c46 | 1566 | * @peer_req: peer request |
45bb912b | 1567 | * @rw: flag field, see bio->bi_rw |
10f6d992 LE |
1568 | * |
1569 | * May spread the pages to multiple bios, | |
1570 | * depending on bio_add_page restrictions. | |
1571 | * | |
1572 | * Returns 0 if all bios have been submitted, | |
1573 | * -ENOMEM if we could not allocate enough bios, | |
1574 | * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a | |
1575 | * single page to an empty bio (which should never happen and likely indicates | |
1576 | * that the lower level IO stack is in some way broken). This has been observed | |
1577 | * on certain Xen deployments. | |
45bb912b LE |
1578 | */ |
1579 | /* TODO allocate from our own bio_set. */ | |
b30ab791 | 1580 | int drbd_submit_peer_request(struct drbd_device *device, |
fbe29dec | 1581 | struct drbd_peer_request *peer_req, |
bb3cc85e MC |
1582 | const unsigned op, const unsigned op_flags, |
1583 | const int fault_type) | |
45bb912b LE |
1584 | { |
1585 | struct bio *bios = NULL; | |
1586 | struct bio *bio; | |
db830c46 AG |
1587 | struct page *page = peer_req->pages; |
1588 | sector_t sector = peer_req->i.sector; | |
11f8b2b6 | 1589 | unsigned data_size = peer_req->i.size; |
45bb912b | 1590 | unsigned n_bios = 0; |
11f8b2b6 | 1591 | unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; |
10f6d992 | 1592 | int err = -ENOMEM; |
45bb912b | 1593 | |
dd4f699d LE |
1594 | /* TRIM/DISCARD: for now, always use the helper function |
1595 | * blkdev_issue_zeroout(..., discard=true). | |
1596 | * It's synchronous, but it does the right thing wrt. bio splitting. | |
1597 | * Correctness first, performance later. Next step is to code an | |
1598 | * asynchronous variant of the same. | |
1599 | */ | |
9104d31a | 1600 | if (peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) { |
a0fb3c47 LE |
1601 | /* wait for all pending IO completions, before we start |
1602 | * zeroing things out. */ | |
5dd2ca19 | 1603 | conn_wait_active_ee_empty(peer_req->peer_device->connection); |
45d2933c LE |
1604 | /* add it to the active list now, |
1605 | * so we can find it to present it in debugfs */ | |
21ae5d7f LE |
1606 | peer_req->submit_jif = jiffies; |
1607 | peer_req->flags |= EE_SUBMITTED; | |
700ca8c0 PR |
1608 | |
1609 | /* If this was a resync request from receive_rs_deallocated(), | |
1610 | * it is already on the sync_ee list */ | |
1611 | if (list_empty(&peer_req->w.list)) { | |
1612 | spin_lock_irq(&device->resource->req_lock); | |
1613 | list_add_tail(&peer_req->w.list, &device->active_ee); | |
1614 | spin_unlock_irq(&device->resource->req_lock); | |
1615 | } | |
1616 | ||
9104d31a LE |
1617 | if (peer_req->flags & EE_IS_TRIM) |
1618 | drbd_issue_peer_discard(device, peer_req); | |
1619 | else /* EE_WRITE_SAME */ | |
1620 | drbd_issue_peer_wsame(device, peer_req); | |
a0fb3c47 LE |
1621 | return 0; |
1622 | } | |
1623 | ||
45bb912b LE |
1624 | /* In most cases, we will only need one bio. But in case the lower |
1625 | * level restrictions happen to be different at this offset on this | |
1626 | * side than those of the sending peer, we may need to submit the | |
9476f39d LE |
1627 | * request in more than one bio. |
1628 | * | |
1629 | * Plain bio_alloc is good enough here, this is no DRBD internally | |
1630 | * generated bio, but a bio allocated on behalf of the peer. | |
1631 | */ | |
45bb912b LE |
1632 | next_bio: |
1633 | bio = bio_alloc(GFP_NOIO, nr_pages); | |
1634 | if (!bio) { | |
a0fb3c47 | 1635 | drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages); |
45bb912b LE |
1636 | goto fail; |
1637 | } | |
db830c46 | 1638 | /* > peer_req->i.sector, unless this is the first bio */ |
4f024f37 | 1639 | bio->bi_iter.bi_sector = sector; |
b30ab791 | 1640 | bio->bi_bdev = device->ldev->backing_bdev; |
bb3cc85e | 1641 | bio_set_op_attrs(bio, op, op_flags); |
db830c46 | 1642 | bio->bi_private = peer_req; |
fcefa62e | 1643 | bio->bi_end_io = drbd_peer_request_endio; |
45bb912b LE |
1644 | |
1645 | bio->bi_next = bios; | |
1646 | bios = bio; | |
1647 | ++n_bios; | |
1648 | ||
1649 | page_chain_for_each(page) { | |
11f8b2b6 | 1650 | unsigned len = min_t(unsigned, data_size, PAGE_SIZE); |
45bb912b | 1651 | if (!bio_add_page(bio, page, len, 0)) { |
10f6d992 LE |
1652 | /* A single page must always be possible! |
1653 | * But in case it fails anyways, | |
1654 | * we deal with it, and complain (below). */ | |
1655 | if (bio->bi_vcnt == 0) { | |
d0180171 | 1656 | drbd_err(device, |
10f6d992 LE |
1657 | "bio_add_page failed for len=%u, " |
1658 | "bi_vcnt=0 (bi_sector=%llu)\n", | |
4f024f37 | 1659 | len, (uint64_t)bio->bi_iter.bi_sector); |
10f6d992 LE |
1660 | err = -ENOSPC; |
1661 | goto fail; | |
1662 | } | |
45bb912b LE |
1663 | goto next_bio; |
1664 | } | |
11f8b2b6 | 1665 | data_size -= len; |
45bb912b LE |
1666 | sector += len >> 9; |
1667 | --nr_pages; | |
1668 | } | |
11f8b2b6 | 1669 | D_ASSERT(device, data_size == 0); |
a0fb3c47 | 1670 | D_ASSERT(device, page == NULL); |
45bb912b | 1671 | |
db830c46 | 1672 | atomic_set(&peer_req->pending_bios, n_bios); |
21ae5d7f LE |
1673 | /* for debugfs: update timestamp, mark as submitted */ |
1674 | peer_req->submit_jif = jiffies; | |
1675 | peer_req->flags |= EE_SUBMITTED; | |
45bb912b LE |
1676 | do { |
1677 | bio = bios; | |
1678 | bios = bios->bi_next; | |
1679 | bio->bi_next = NULL; | |
1680 | ||
b30ab791 | 1681 | drbd_generic_make_request(device, fault_type, bio); |
45bb912b | 1682 | } while (bios); |
45bb912b LE |
1683 | return 0; |
1684 | ||
1685 | fail: | |
1686 | while (bios) { | |
1687 | bio = bios; | |
1688 | bios = bios->bi_next; | |
1689 | bio_put(bio); | |
1690 | } | |
10f6d992 | 1691 | return err; |
45bb912b LE |
1692 | } |
1693 | ||
b30ab791 | 1694 | static void drbd_remove_epoch_entry_interval(struct drbd_device *device, |
db830c46 | 1695 | struct drbd_peer_request *peer_req) |
53840641 | 1696 | { |
db830c46 | 1697 | struct drbd_interval *i = &peer_req->i; |
53840641 | 1698 | |
b30ab791 | 1699 | drbd_remove_interval(&device->write_requests, i); |
53840641 AG |
1700 | drbd_clear_interval(i); |
1701 | ||
6c852bec | 1702 | /* Wake up any processes waiting for this peer request to complete. */ |
53840641 | 1703 | if (i->waiting) |
b30ab791 | 1704 | wake_up(&device->misc_wait); |
53840641 AG |
1705 | } |
1706 | ||
bde89a9e | 1707 | static void conn_wait_active_ee_empty(struct drbd_connection *connection) |
77fede51 | 1708 | { |
c06ece6b | 1709 | struct drbd_peer_device *peer_device; |
77fede51 PR |
1710 | int vnr; |
1711 | ||
1712 | rcu_read_lock(); | |
c06ece6b AG |
1713 | idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { |
1714 | struct drbd_device *device = peer_device->device; | |
1715 | ||
b30ab791 | 1716 | kref_get(&device->kref); |
77fede51 | 1717 | rcu_read_unlock(); |
b30ab791 | 1718 | drbd_wait_ee_list_empty(device, &device->active_ee); |
05a10ec7 | 1719 | kref_put(&device->kref, drbd_destroy_device); |
77fede51 PR |
1720 | rcu_read_lock(); |
1721 | } | |
1722 | rcu_read_unlock(); | |
1723 | } | |
1724 | ||
bde89a9e | 1725 | static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 1726 | { |
2451fc3b | 1727 | int rv; |
e658983a | 1728 | struct p_barrier *p = pi->data; |
b411b363 PR |
1729 | struct drbd_epoch *epoch; |
1730 | ||
9ed57dcb LE |
1731 | /* FIXME these are unacked on connection, |
1732 | * not a specific (peer)device. | |
1733 | */ | |
bde89a9e AG |
1734 | connection->current_epoch->barrier_nr = p->barrier; |
1735 | connection->current_epoch->connection = connection; | |
1736 | rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR); | |
b411b363 PR |
1737 | |
1738 | /* P_BARRIER_ACK may imply that the corresponding extent is dropped from | |
1739 | * the activity log, which means it would not be resynced in case the | |
1740 | * R_PRIMARY crashes now. | |
1741 | * Therefore we must send the barrier_ack after the barrier request was | |
1742 | * completed. */ | |
e9526580 | 1743 | switch (connection->resource->write_ordering) { |
f6ba8636 | 1744 | case WO_NONE: |
b411b363 | 1745 | if (rv == FE_RECYCLED) |
82bc0194 | 1746 | return 0; |
2451fc3b PR |
1747 | |
1748 | /* receiver context, in the writeout path of the other node. | |
1749 | * avoid potential distributed deadlock */ | |
1750 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); | |
1751 | if (epoch) | |
1752 | break; | |
1753 | else | |
1ec861eb | 1754 | drbd_warn(connection, "Allocation of an epoch failed, slowing down\n"); |
2451fc3b | 1755 | /* Fall through */ |
b411b363 | 1756 | |
f6ba8636 AG |
1757 | case WO_BDEV_FLUSH: |
1758 | case WO_DRAIN_IO: | |
bde89a9e AG |
1759 | conn_wait_active_ee_empty(connection); |
1760 | drbd_flush(connection); | |
2451fc3b | 1761 | |
bde89a9e | 1762 | if (atomic_read(&connection->current_epoch->epoch_size)) { |
2451fc3b PR |
1763 | epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); |
1764 | if (epoch) | |
1765 | break; | |
b411b363 PR |
1766 | } |
1767 | ||
82bc0194 | 1768 | return 0; |
2451fc3b | 1769 | default: |
e9526580 PR |
1770 | drbd_err(connection, "Strangeness in connection->write_ordering %d\n", |
1771 | connection->resource->write_ordering); | |
82bc0194 | 1772 | return -EIO; |
b411b363 PR |
1773 | } |
1774 | ||
1775 | epoch->flags = 0; | |
1776 | atomic_set(&epoch->epoch_size, 0); | |
1777 | atomic_set(&epoch->active, 0); | |
1778 | ||
bde89a9e AG |
1779 | spin_lock(&connection->epoch_lock); |
1780 | if (atomic_read(&connection->current_epoch->epoch_size)) { | |
1781 | list_add(&epoch->list, &connection->current_epoch->list); | |
1782 | connection->current_epoch = epoch; | |
1783 | connection->epochs++; | |
b411b363 PR |
1784 | } else { |
1785 | /* The current_epoch got recycled while we allocated this one... */ | |
1786 | kfree(epoch); | |
1787 | } | |
bde89a9e | 1788 | spin_unlock(&connection->epoch_lock); |
b411b363 | 1789 | |
82bc0194 | 1790 | return 0; |
b411b363 PR |
1791 | } |
1792 | ||
9104d31a LE |
1793 | /* quick wrapper in case payload size != request_size (write same) */ |
1794 | static void drbd_csum_ee_size(struct crypto_ahash *h, | |
1795 | struct drbd_peer_request *r, void *d, | |
1796 | unsigned int payload_size) | |
1797 | { | |
1798 | unsigned int tmp = r->i.size; | |
1799 | r->i.size = payload_size; | |
1800 | drbd_csum_ee(h, r, d); | |
1801 | r->i.size = tmp; | |
1802 | } | |
1803 | ||
b411b363 | 1804 | /* used from receive_RSDataReply (recv_resync_read) |
9104d31a LE |
1805 | * and from receive_Data. |
1806 | * data_size: actual payload ("data in") | |
1807 | * for normal writes that is bi_size. | |
1808 | * for discards, that is zero. | |
1809 | * for write same, it is logical_block_size. | |
1810 | * both trim and write same have the bi_size ("data len to be affected") | |
1811 | * as extra argument in the packet header. | |
1812 | */ | |
f6ffca9f | 1813 | static struct drbd_peer_request * |
69a22773 | 1814 | read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, |
a0fb3c47 | 1815 | struct packet_info *pi) __must_hold(local) |
b411b363 | 1816 | { |
69a22773 | 1817 | struct drbd_device *device = peer_device->device; |
b30ab791 | 1818 | const sector_t capacity = drbd_get_capacity(device->this_bdev); |
db830c46 | 1819 | struct drbd_peer_request *peer_req; |
b411b363 | 1820 | struct page *page; |
11f8b2b6 AG |
1821 | int digest_size, err; |
1822 | unsigned int data_size = pi->size, ds; | |
69a22773 AG |
1823 | void *dig_in = peer_device->connection->int_dig_in; |
1824 | void *dig_vv = peer_device->connection->int_dig_vv; | |
6b4388ac | 1825 | unsigned long *data; |
a0fb3c47 | 1826 | struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL; |
9104d31a | 1827 | struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL; |
b411b363 | 1828 | |
11f8b2b6 | 1829 | digest_size = 0; |
a0fb3c47 | 1830 | if (!trim && peer_device->connection->peer_integrity_tfm) { |
9534d671 | 1831 | digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm); |
9f5bdc33 AG |
1832 | /* |
1833 | * FIXME: Receive the incoming digest into the receive buffer | |
1834 | * here, together with its struct p_data? | |
1835 | */ | |
11f8b2b6 | 1836 | err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size); |
a5c31904 | 1837 | if (err) |
b411b363 | 1838 | return NULL; |
11f8b2b6 | 1839 | data_size -= digest_size; |
b411b363 PR |
1840 | } |
1841 | ||
9104d31a LE |
1842 | /* assume request_size == data_size, but special case trim and wsame. */ |
1843 | ds = data_size; | |
a0fb3c47 | 1844 | if (trim) { |
9104d31a LE |
1845 | if (!expect(data_size == 0)) |
1846 | return NULL; | |
1847 | ds = be32_to_cpu(trim->size); | |
1848 | } else if (wsame) { | |
1849 | if (data_size != queue_logical_block_size(device->rq_queue)) { | |
1850 | drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n", | |
1851 | data_size, queue_logical_block_size(device->rq_queue)); | |
1852 | return NULL; | |
1853 | } | |
1854 | if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) { | |
1855 | drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n", | |
1856 | data_size, bdev_logical_block_size(device->ldev->backing_bdev)); | |
1857 | return NULL; | |
1858 | } | |
1859 | ds = be32_to_cpu(wsame->size); | |
a0fb3c47 LE |
1860 | } |
1861 | ||
9104d31a | 1862 | if (!expect(IS_ALIGNED(ds, 512))) |
841ce241 | 1863 | return NULL; |
9104d31a LE |
1864 | if (trim || wsame) { |
1865 | if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9))) | |
1866 | return NULL; | |
1867 | } else if (!expect(ds <= DRBD_MAX_BIO_SIZE)) | |
841ce241 | 1868 | return NULL; |
b411b363 | 1869 | |
6666032a LE |
1870 | /* even though we trust out peer, |
1871 | * we sometimes have to double check. */ | |
9104d31a | 1872 | if (sector + (ds>>9) > capacity) { |
d0180171 | 1873 | drbd_err(device, "request from peer beyond end of local disk: " |
fdda6544 | 1874 | "capacity: %llus < sector: %llus + size: %u\n", |
6666032a | 1875 | (unsigned long long)capacity, |
9104d31a | 1876 | (unsigned long long)sector, ds); |
6666032a LE |
1877 | return NULL; |
1878 | } | |
1879 | ||
b411b363 PR |
1880 | /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD |
1881 | * "criss-cross" setup, that might cause write-out on some other DRBD, | |
1882 | * which in turn might block on the other node at this very place. */ | |
9104d31a | 1883 | peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); |
db830c46 | 1884 | if (!peer_req) |
b411b363 | 1885 | return NULL; |
45bb912b | 1886 | |
21ae5d7f | 1887 | peer_req->flags |= EE_WRITE; |
9104d31a LE |
1888 | if (trim) { |
1889 | peer_req->flags |= EE_IS_TRIM; | |
81a3537a | 1890 | return peer_req; |
9104d31a LE |
1891 | } |
1892 | if (wsame) | |
1893 | peer_req->flags |= EE_WRITE_SAME; | |
a73ff323 | 1894 | |
9104d31a | 1895 | /* receive payload size bytes into page chain */ |
b411b363 | 1896 | ds = data_size; |
db830c46 | 1897 | page = peer_req->pages; |
45bb912b LE |
1898 | page_chain_for_each(page) { |
1899 | unsigned len = min_t(int, ds, PAGE_SIZE); | |
6b4388ac | 1900 | data = kmap(page); |
69a22773 | 1901 | err = drbd_recv_all_warn(peer_device->connection, data, len); |
b30ab791 | 1902 | if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) { |
d0180171 | 1903 | drbd_err(device, "Fault injection: Corrupting data on receive\n"); |
6b4388ac PR |
1904 | data[0] = data[0] ^ (unsigned long)-1; |
1905 | } | |
b411b363 | 1906 | kunmap(page); |
a5c31904 | 1907 | if (err) { |
b30ab791 | 1908 | drbd_free_peer_req(device, peer_req); |
b411b363 PR |
1909 | return NULL; |
1910 | } | |
a5c31904 | 1911 | ds -= len; |
b411b363 PR |
1912 | } |
1913 | ||
11f8b2b6 | 1914 | if (digest_size) { |
9104d31a | 1915 | drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size); |
11f8b2b6 | 1916 | if (memcmp(dig_in, dig_vv, digest_size)) { |
d0180171 | 1917 | drbd_err(device, "Digest integrity check FAILED: %llus +%u\n", |
470be44a | 1918 | (unsigned long long)sector, data_size); |
b30ab791 | 1919 | drbd_free_peer_req(device, peer_req); |
b411b363 PR |
1920 | return NULL; |
1921 | } | |
1922 | } | |
11f8b2b6 | 1923 | device->recv_cnt += data_size >> 9; |
db830c46 | 1924 | return peer_req; |
b411b363 PR |
1925 | } |
1926 | ||
1927 | /* drbd_drain_block() just takes a data block | |
1928 | * out of the socket input buffer, and discards it. | |
1929 | */ | |
69a22773 | 1930 | static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size) |
b411b363 PR |
1931 | { |
1932 | struct page *page; | |
a5c31904 | 1933 | int err = 0; |
b411b363 PR |
1934 | void *data; |
1935 | ||
c3470cde | 1936 | if (!data_size) |
fc5be839 | 1937 | return 0; |
c3470cde | 1938 | |
69a22773 | 1939 | page = drbd_alloc_pages(peer_device, 1, 1); |
b411b363 PR |
1940 | |
1941 | data = kmap(page); | |
1942 | while (data_size) { | |
fc5be839 AG |
1943 | unsigned int len = min_t(int, data_size, PAGE_SIZE); |
1944 | ||
69a22773 | 1945 | err = drbd_recv_all_warn(peer_device->connection, data, len); |
a5c31904 | 1946 | if (err) |
b411b363 | 1947 | break; |
a5c31904 | 1948 | data_size -= len; |
b411b363 PR |
1949 | } |
1950 | kunmap(page); | |
69a22773 | 1951 | drbd_free_pages(peer_device->device, page, 0); |
fc5be839 | 1952 | return err; |
b411b363 PR |
1953 | } |
1954 | ||
69a22773 | 1955 | static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_request *req, |
b411b363 PR |
1956 | sector_t sector, int data_size) |
1957 | { | |
7988613b KO |
1958 | struct bio_vec bvec; |
1959 | struct bvec_iter iter; | |
b411b363 | 1960 | struct bio *bio; |
11f8b2b6 | 1961 | int digest_size, err, expect; |
69a22773 AG |
1962 | void *dig_in = peer_device->connection->int_dig_in; |
1963 | void *dig_vv = peer_device->connection->int_dig_vv; | |
b411b363 | 1964 | |
11f8b2b6 | 1965 | digest_size = 0; |
69a22773 | 1966 | if (peer_device->connection->peer_integrity_tfm) { |
9534d671 | 1967 | digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm); |
11f8b2b6 | 1968 | err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size); |
a5c31904 AG |
1969 | if (err) |
1970 | return err; | |
11f8b2b6 | 1971 | data_size -= digest_size; |
b411b363 PR |
1972 | } |
1973 | ||
b411b363 PR |
1974 | /* optimistically update recv_cnt. if receiving fails below, |
1975 | * we disconnect anyways, and counters will be reset. */ | |
69a22773 | 1976 | peer_device->device->recv_cnt += data_size>>9; |
b411b363 PR |
1977 | |
1978 | bio = req->master_bio; | |
69a22773 | 1979 | D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); |
b411b363 | 1980 | |
7988613b KO |
1981 | bio_for_each_segment(bvec, bio, iter) { |
1982 | void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; | |
1983 | expect = min_t(int, data_size, bvec.bv_len); | |
69a22773 | 1984 | err = drbd_recv_all_warn(peer_device->connection, mapped, expect); |
7988613b | 1985 | kunmap(bvec.bv_page); |
a5c31904 AG |
1986 | if (err) |
1987 | return err; | |
1988 | data_size -= expect; | |
b411b363 PR |
1989 | } |
1990 | ||
11f8b2b6 | 1991 | if (digest_size) { |
69a22773 | 1992 | drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv); |
11f8b2b6 | 1993 | if (memcmp(dig_in, dig_vv, digest_size)) { |
69a22773 | 1994 | drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n"); |
28284cef | 1995 | return -EINVAL; |
b411b363 PR |
1996 | } |
1997 | } | |
1998 | ||
69a22773 | 1999 | D_ASSERT(peer_device->device, data_size == 0); |
28284cef | 2000 | return 0; |
b411b363 PR |
2001 | } |
2002 | ||
a990be46 | 2003 | /* |
668700b4 | 2004 | * e_end_resync_block() is called in ack_sender context via |
a990be46 AG |
2005 | * drbd_finish_peer_reqs(). |
2006 | */ | |
99920dc5 | 2007 | static int e_end_resync_block(struct drbd_work *w, int unused) |
b411b363 | 2008 | { |
8050e6d0 | 2009 | struct drbd_peer_request *peer_req = |
a8cd15ba AG |
2010 | container_of(w, struct drbd_peer_request, w); |
2011 | struct drbd_peer_device *peer_device = peer_req->peer_device; | |
2012 | struct drbd_device *device = peer_device->device; | |
db830c46 | 2013 | sector_t sector = peer_req->i.sector; |
99920dc5 | 2014 | int err; |
b411b363 | 2015 | |
0b0ba1ef | 2016 | D_ASSERT(device, drbd_interval_empty(&peer_req->i)); |
b411b363 | 2017 | |
db830c46 | 2018 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
b30ab791 | 2019 | drbd_set_in_sync(device, sector, peer_req->i.size); |
a8cd15ba | 2020 | err = drbd_send_ack(peer_device, P_RS_WRITE_ACK, peer_req); |
b411b363 PR |
2021 | } else { |
2022 | /* Record failure to sync */ | |
b30ab791 | 2023 | drbd_rs_failed_io(device, sector, peer_req->i.size); |
b411b363 | 2024 | |
a8cd15ba | 2025 | err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); |
b411b363 | 2026 | } |
b30ab791 | 2027 | dec_unacked(device); |
b411b363 | 2028 | |
99920dc5 | 2029 | return err; |
b411b363 PR |
2030 | } |
2031 | ||
69a22773 | 2032 | static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, |
a0fb3c47 | 2033 | struct packet_info *pi) __releases(local) |
b411b363 | 2034 | { |
69a22773 | 2035 | struct drbd_device *device = peer_device->device; |
db830c46 | 2036 | struct drbd_peer_request *peer_req; |
b411b363 | 2037 | |
a0fb3c47 | 2038 | peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi); |
db830c46 | 2039 | if (!peer_req) |
45bb912b | 2040 | goto fail; |
b411b363 | 2041 | |
b30ab791 | 2042 | dec_rs_pending(device); |
b411b363 | 2043 | |
b30ab791 | 2044 | inc_unacked(device); |
b411b363 PR |
2045 | /* corresponding dec_unacked() in e_end_resync_block() |
2046 | * respective _drbd_clear_done_ee */ | |
2047 | ||
a8cd15ba | 2048 | peer_req->w.cb = e_end_resync_block; |
21ae5d7f | 2049 | peer_req->submit_jif = jiffies; |
45bb912b | 2050 | |
0500813f | 2051 | spin_lock_irq(&device->resource->req_lock); |
b9ed7080 | 2052 | list_add_tail(&peer_req->w.list, &device->sync_ee); |
0500813f | 2053 | spin_unlock_irq(&device->resource->req_lock); |
b411b363 | 2054 | |
a0fb3c47 | 2055 | atomic_add(pi->size >> 9, &device->rs_sect_ev); |
bb3cc85e MC |
2056 | if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0, |
2057 | DRBD_FAULT_RS_WR) == 0) | |
e1c1b0fc | 2058 | return 0; |
b411b363 | 2059 | |
10f6d992 | 2060 | /* don't care for the reason here */ |
d0180171 | 2061 | drbd_err(device, "submit failed, triggering re-connect\n"); |
0500813f | 2062 | spin_lock_irq(&device->resource->req_lock); |
a8cd15ba | 2063 | list_del(&peer_req->w.list); |
0500813f | 2064 | spin_unlock_irq(&device->resource->req_lock); |
22cc37a9 | 2065 | |
b30ab791 | 2066 | drbd_free_peer_req(device, peer_req); |
45bb912b | 2067 | fail: |
b30ab791 | 2068 | put_ldev(device); |
e1c1b0fc | 2069 | return -EIO; |
b411b363 PR |
2070 | } |
2071 | ||
668eebc6 | 2072 | static struct drbd_request * |
b30ab791 | 2073 | find_request(struct drbd_device *device, struct rb_root *root, u64 id, |
bc9c5c41 | 2074 | sector_t sector, bool missing_ok, const char *func) |
51624585 | 2075 | { |
51624585 AG |
2076 | struct drbd_request *req; |
2077 | ||
bc9c5c41 AG |
2078 | /* Request object according to our peer */ |
2079 | req = (struct drbd_request *)(unsigned long)id; | |
5e472264 | 2080 | if (drbd_contains_interval(root, sector, &req->i) && req->i.local) |
668eebc6 | 2081 | return req; |
c3afd8f5 | 2082 | if (!missing_ok) { |
d0180171 | 2083 | drbd_err(device, "%s: failed to find request 0x%lx, sector %llus\n", func, |
c3afd8f5 AG |
2084 | (unsigned long)id, (unsigned long long)sector); |
2085 | } | |
51624585 | 2086 | return NULL; |
b411b363 PR |
2087 | } |
2088 | ||
bde89a9e | 2089 | static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 2090 | { |
9f4fe9ad | 2091 | struct drbd_peer_device *peer_device; |
b30ab791 | 2092 | struct drbd_device *device; |
b411b363 PR |
2093 | struct drbd_request *req; |
2094 | sector_t sector; | |
82bc0194 | 2095 | int err; |
e658983a | 2096 | struct p_data *p = pi->data; |
4a76b161 | 2097 | |
9f4fe9ad AG |
2098 | peer_device = conn_peer_device(connection, pi->vnr); |
2099 | if (!peer_device) | |
4a76b161 | 2100 | return -EIO; |
9f4fe9ad | 2101 | device = peer_device->device; |
b411b363 PR |
2102 | |
2103 | sector = be64_to_cpu(p->sector); | |
2104 | ||
0500813f | 2105 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 2106 | req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); |
0500813f | 2107 | spin_unlock_irq(&device->resource->req_lock); |
c3afd8f5 | 2108 | if (unlikely(!req)) |
82bc0194 | 2109 | return -EIO; |
b411b363 | 2110 | |
24c4830c | 2111 | /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid |
b411b363 PR |
2112 | * special casing it there for the various failure cases. |
2113 | * still no race with drbd_fail_pending_reads */ | |
69a22773 | 2114 | err = recv_dless_read(peer_device, req, sector, pi->size); |
82bc0194 | 2115 | if (!err) |
8554df1c | 2116 | req_mod(req, DATA_RECEIVED); |
b411b363 PR |
2117 | /* else: nothing. handled from drbd_disconnect... |
2118 | * I don't think we may complete this just yet | |
2119 | * in case we are "on-disconnect: freeze" */ | |
2120 | ||
82bc0194 | 2121 | return err; |
b411b363 PR |
2122 | } |
2123 | ||
bde89a9e | 2124 | static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 2125 | { |
9f4fe9ad | 2126 | struct drbd_peer_device *peer_device; |
b30ab791 | 2127 | struct drbd_device *device; |
b411b363 | 2128 | sector_t sector; |
82bc0194 | 2129 | int err; |
e658983a | 2130 | struct p_data *p = pi->data; |
4a76b161 | 2131 | |
9f4fe9ad AG |
2132 | peer_device = conn_peer_device(connection, pi->vnr); |
2133 | if (!peer_device) | |
4a76b161 | 2134 | return -EIO; |
9f4fe9ad | 2135 | device = peer_device->device; |
b411b363 PR |
2136 | |
2137 | sector = be64_to_cpu(p->sector); | |
0b0ba1ef | 2138 | D_ASSERT(device, p->block_id == ID_SYNCER); |
b411b363 | 2139 | |
b30ab791 | 2140 | if (get_ldev(device)) { |
b411b363 PR |
2141 | /* data is submitted to disk within recv_resync_read. |
2142 | * corresponding put_ldev done below on error, | |
fcefa62e | 2143 | * or in drbd_peer_request_endio. */ |
a0fb3c47 | 2144 | err = recv_resync_read(peer_device, sector, pi); |
b411b363 PR |
2145 | } else { |
2146 | if (__ratelimit(&drbd_ratelimit_state)) | |
d0180171 | 2147 | drbd_err(device, "Can not write resync data to local disk.\n"); |
b411b363 | 2148 | |
69a22773 | 2149 | err = drbd_drain_block(peer_device, pi->size); |
b411b363 | 2150 | |
69a22773 | 2151 | drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size); |
b411b363 PR |
2152 | } |
2153 | ||
b30ab791 | 2154 | atomic_add(pi->size >> 9, &device->rs_sect_in); |
778f271d | 2155 | |
82bc0194 | 2156 | return err; |
b411b363 PR |
2157 | } |
2158 | ||
b30ab791 | 2159 | static void restart_conflicting_writes(struct drbd_device *device, |
7be8da07 | 2160 | sector_t sector, int size) |
b411b363 | 2161 | { |
7be8da07 AG |
2162 | struct drbd_interval *i; |
2163 | struct drbd_request *req; | |
2164 | ||
b30ab791 | 2165 | drbd_for_each_overlap(i, &device->write_requests, sector, size) { |
7be8da07 AG |
2166 | if (!i->local) |
2167 | continue; | |
2168 | req = container_of(i, struct drbd_request, i); | |
2169 | if (req->rq_state & RQ_LOCAL_PENDING || | |
2170 | !(req->rq_state & RQ_POSTPONED)) | |
2171 | continue; | |
2312f0b3 LE |
2172 | /* as it is RQ_POSTPONED, this will cause it to |
2173 | * be queued on the retry workqueue. */ | |
d4dabbe2 | 2174 | __req_mod(req, CONFLICT_RESOLVED, NULL); |
7be8da07 AG |
2175 | } |
2176 | } | |
b411b363 | 2177 | |
a990be46 | 2178 | /* |
668700b4 | 2179 | * e_end_block() is called in ack_sender context via drbd_finish_peer_reqs(). |
b411b363 | 2180 | */ |
99920dc5 | 2181 | static int e_end_block(struct drbd_work *w, int cancel) |
b411b363 | 2182 | { |
8050e6d0 | 2183 | struct drbd_peer_request *peer_req = |
a8cd15ba AG |
2184 | container_of(w, struct drbd_peer_request, w); |
2185 | struct drbd_peer_device *peer_device = peer_req->peer_device; | |
2186 | struct drbd_device *device = peer_device->device; | |
db830c46 | 2187 | sector_t sector = peer_req->i.sector; |
99920dc5 | 2188 | int err = 0, pcmd; |
b411b363 | 2189 | |
303d1448 | 2190 | if (peer_req->flags & EE_SEND_WRITE_ACK) { |
db830c46 | 2191 | if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { |
b30ab791 AG |
2192 | pcmd = (device->state.conn >= C_SYNC_SOURCE && |
2193 | device->state.conn <= C_PAUSED_SYNC_T && | |
db830c46 | 2194 | peer_req->flags & EE_MAY_SET_IN_SYNC) ? |
b411b363 | 2195 | P_RS_WRITE_ACK : P_WRITE_ACK; |
a8cd15ba | 2196 | err = drbd_send_ack(peer_device, pcmd, peer_req); |
b411b363 | 2197 | if (pcmd == P_RS_WRITE_ACK) |
b30ab791 | 2198 | drbd_set_in_sync(device, sector, peer_req->i.size); |
b411b363 | 2199 | } else { |
a8cd15ba | 2200 | err = drbd_send_ack(peer_device, P_NEG_ACK, peer_req); |
b411b363 PR |
2201 | /* we expect it to be marked out of sync anyways... |
2202 | * maybe assert this? */ | |
2203 | } | |
b30ab791 | 2204 | dec_unacked(device); |
b411b363 | 2205 | } |
08d0dabf | 2206 | |
b411b363 PR |
2207 | /* we delete from the conflict detection hash _after_ we sent out the |
2208 | * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ | |
302bdeae | 2209 | if (peer_req->flags & EE_IN_INTERVAL_TREE) { |
0500813f | 2210 | spin_lock_irq(&device->resource->req_lock); |
0b0ba1ef | 2211 | D_ASSERT(device, !drbd_interval_empty(&peer_req->i)); |
b30ab791 | 2212 | drbd_remove_epoch_entry_interval(device, peer_req); |
7be8da07 | 2213 | if (peer_req->flags & EE_RESTART_REQUESTS) |
b30ab791 | 2214 | restart_conflicting_writes(device, sector, peer_req->i.size); |
0500813f | 2215 | spin_unlock_irq(&device->resource->req_lock); |
bb3bfe96 | 2216 | } else |
0b0ba1ef | 2217 | D_ASSERT(device, drbd_interval_empty(&peer_req->i)); |
b411b363 | 2218 | |
5dd2ca19 | 2219 | drbd_may_finish_epoch(peer_device->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); |
b411b363 | 2220 | |
99920dc5 | 2221 | return err; |
b411b363 PR |
2222 | } |
2223 | ||
a8cd15ba | 2224 | static int e_send_ack(struct drbd_work *w, enum drbd_packet ack) |
b411b363 | 2225 | { |
8050e6d0 | 2226 | struct drbd_peer_request *peer_req = |
a8cd15ba AG |
2227 | container_of(w, struct drbd_peer_request, w); |
2228 | struct drbd_peer_device *peer_device = peer_req->peer_device; | |
99920dc5 | 2229 | int err; |
b411b363 | 2230 | |
a8cd15ba AG |
2231 | err = drbd_send_ack(peer_device, ack, peer_req); |
2232 | dec_unacked(peer_device->device); | |
b411b363 | 2233 | |
99920dc5 | 2234 | return err; |
b411b363 PR |
2235 | } |
2236 | ||
d4dabbe2 | 2237 | static int e_send_superseded(struct drbd_work *w, int unused) |
7be8da07 | 2238 | { |
a8cd15ba | 2239 | return e_send_ack(w, P_SUPERSEDED); |
7be8da07 AG |
2240 | } |
2241 | ||
99920dc5 | 2242 | static int e_send_retry_write(struct drbd_work *w, int unused) |
7be8da07 | 2243 | { |
a8cd15ba AG |
2244 | struct drbd_peer_request *peer_req = |
2245 | container_of(w, struct drbd_peer_request, w); | |
2246 | struct drbd_connection *connection = peer_req->peer_device->connection; | |
7be8da07 | 2247 | |
a8cd15ba | 2248 | return e_send_ack(w, connection->agreed_pro_version >= 100 ? |
d4dabbe2 | 2249 | P_RETRY_WRITE : P_SUPERSEDED); |
7be8da07 | 2250 | } |
b411b363 | 2251 | |
3e394da1 AG |
2252 | static bool seq_greater(u32 a, u32 b) |
2253 | { | |
2254 | /* | |
2255 | * We assume 32-bit wrap-around here. | |
2256 | * For 24-bit wrap-around, we would have to shift: | |
2257 | * a <<= 8; b <<= 8; | |
2258 | */ | |
2259 | return (s32)a - (s32)b > 0; | |
2260 | } | |
b411b363 | 2261 | |
3e394da1 AG |
2262 | static u32 seq_max(u32 a, u32 b) |
2263 | { | |
2264 | return seq_greater(a, b) ? a : b; | |
b411b363 PR |
2265 | } |
2266 | ||
69a22773 | 2267 | static void update_peer_seq(struct drbd_peer_device *peer_device, unsigned int peer_seq) |
3e394da1 | 2268 | { |
69a22773 | 2269 | struct drbd_device *device = peer_device->device; |
3c13b680 | 2270 | unsigned int newest_peer_seq; |
3e394da1 | 2271 | |
69a22773 | 2272 | if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) { |
b30ab791 AG |
2273 | spin_lock(&device->peer_seq_lock); |
2274 | newest_peer_seq = seq_max(device->peer_seq, peer_seq); | |
2275 | device->peer_seq = newest_peer_seq; | |
2276 | spin_unlock(&device->peer_seq_lock); | |
2277 | /* wake up only if we actually changed device->peer_seq */ | |
3c13b680 | 2278 | if (peer_seq == newest_peer_seq) |
b30ab791 | 2279 | wake_up(&device->seq_wait); |
7be8da07 | 2280 | } |
b411b363 PR |
2281 | } |
2282 | ||
d93f6302 | 2283 | static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2) |
b6a370ba | 2284 | { |
d93f6302 LE |
2285 | return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9))); |
2286 | } | |
b6a370ba | 2287 | |
d93f6302 | 2288 | /* maybe change sync_ee into interval trees as well? */ |
b30ab791 | 2289 | static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req) |
d93f6302 LE |
2290 | { |
2291 | struct drbd_peer_request *rs_req; | |
b6a370ba PR |
2292 | bool rv = 0; |
2293 | ||
0500813f | 2294 | spin_lock_irq(&device->resource->req_lock); |
a8cd15ba | 2295 | list_for_each_entry(rs_req, &device->sync_ee, w.list) { |
d93f6302 LE |
2296 | if (overlaps(peer_req->i.sector, peer_req->i.size, |
2297 | rs_req->i.sector, rs_req->i.size)) { | |
b6a370ba PR |
2298 | rv = 1; |
2299 | break; | |
2300 | } | |
2301 | } | |
0500813f | 2302 | spin_unlock_irq(&device->resource->req_lock); |
b6a370ba PR |
2303 | |
2304 | return rv; | |
2305 | } | |
2306 | ||
b411b363 PR |
2307 | /* Called from receive_Data. |
2308 | * Synchronize packets on sock with packets on msock. | |
2309 | * | |
2310 | * This is here so even when a P_DATA packet traveling via sock overtook an Ack | |
2311 | * packet traveling on msock, they are still processed in the order they have | |
2312 | * been sent. | |
2313 | * | |
2314 | * Note: we don't care for Ack packets overtaking P_DATA packets. | |
2315 | * | |
b30ab791 | 2316 | * In case packet_seq is larger than device->peer_seq number, there are |
b411b363 | 2317 | * outstanding packets on the msock. We wait for them to arrive. |
b30ab791 | 2318 | * In case we are the logically next packet, we update device->peer_seq |
b411b363 PR |
2319 | * ourselves. Correctly handles 32bit wrap around. |
2320 | * | |
2321 | * Assume we have a 10 GBit connection, that is about 1<<30 byte per second, | |
2322 | * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds | |
2323 | * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have | |
2324 | * 1<<9 == 512 seconds aka ages for the 32bit wrap around... | |
2325 | * | |
2326 | * returns 0 if we may process the packet, | |
2327 | * -ERESTARTSYS if we were interrupted (by disconnect signal). */ | |
69a22773 | 2328 | static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, const u32 peer_seq) |
b411b363 | 2329 | { |
69a22773 | 2330 | struct drbd_device *device = peer_device->device; |
b411b363 | 2331 | DEFINE_WAIT(wait); |
b411b363 | 2332 | long timeout; |
b874d231 | 2333 | int ret = 0, tp; |
7be8da07 | 2334 | |
69a22773 | 2335 | if (!test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)) |
7be8da07 AG |
2336 | return 0; |
2337 | ||
b30ab791 | 2338 | spin_lock(&device->peer_seq_lock); |
b411b363 | 2339 | for (;;) { |
b30ab791 AG |
2340 | if (!seq_greater(peer_seq - 1, device->peer_seq)) { |
2341 | device->peer_seq = seq_max(device->peer_seq, peer_seq); | |
b411b363 | 2342 | break; |
7be8da07 | 2343 | } |
b874d231 | 2344 | |
b411b363 PR |
2345 | if (signal_pending(current)) { |
2346 | ret = -ERESTARTSYS; | |
2347 | break; | |
2348 | } | |
b874d231 PR |
2349 | |
2350 | rcu_read_lock(); | |
5dd2ca19 | 2351 | tp = rcu_dereference(peer_device->connection->net_conf)->two_primaries; |
b874d231 PR |
2352 | rcu_read_unlock(); |
2353 | ||
2354 | if (!tp) | |
2355 | break; | |
2356 | ||
2357 | /* Only need to wait if two_primaries is enabled */ | |
b30ab791 AG |
2358 | prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE); |
2359 | spin_unlock(&device->peer_seq_lock); | |
44ed167d | 2360 | rcu_read_lock(); |
69a22773 | 2361 | timeout = rcu_dereference(peer_device->connection->net_conf)->ping_timeo*HZ/10; |
44ed167d | 2362 | rcu_read_unlock(); |
71b1c1eb | 2363 | timeout = schedule_timeout(timeout); |
b30ab791 | 2364 | spin_lock(&device->peer_seq_lock); |
7be8da07 | 2365 | if (!timeout) { |
b411b363 | 2366 | ret = -ETIMEDOUT; |
d0180171 | 2367 | drbd_err(device, "Timed out waiting for missing ack packets; disconnecting\n"); |
b411b363 PR |
2368 | break; |
2369 | } | |
2370 | } | |
b30ab791 AG |
2371 | spin_unlock(&device->peer_seq_lock); |
2372 | finish_wait(&device->seq_wait, &wait); | |
b411b363 PR |
2373 | return ret; |
2374 | } | |
2375 | ||
688593c5 LE |
2376 | /* see also bio_flags_to_wire() |
2377 | * DRBD_REQ_*, because we need to semantically map the flags to data packet | |
2378 | * flags and back. We may replicate to other kernel versions. */ | |
bb3cc85e | 2379 | static unsigned long wire_flags_to_bio_flags(u32 dpf) |
76d2e7ec | 2380 | { |
688593c5 LE |
2381 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | |
2382 | (dpf & DP_FUA ? REQ_FUA : 0) | | |
28a8f0d3 | 2383 | (dpf & DP_FLUSH ? REQ_PREFLUSH : 0); |
bb3cc85e MC |
2384 | } |
2385 | ||
2386 | static unsigned long wire_flags_to_bio_op(u32 dpf) | |
2387 | { | |
2388 | if (dpf & DP_DISCARD) | |
2389 | return REQ_OP_DISCARD; | |
2390 | else | |
2391 | return REQ_OP_WRITE; | |
76d2e7ec PR |
2392 | } |
2393 | ||
b30ab791 | 2394 | static void fail_postponed_requests(struct drbd_device *device, sector_t sector, |
7be8da07 AG |
2395 | unsigned int size) |
2396 | { | |
2397 | struct drbd_interval *i; | |
2398 | ||
2399 | repeat: | |
b30ab791 | 2400 | drbd_for_each_overlap(i, &device->write_requests, sector, size) { |
7be8da07 AG |
2401 | struct drbd_request *req; |
2402 | struct bio_and_error m; | |
2403 | ||
2404 | if (!i->local) | |
2405 | continue; | |
2406 | req = container_of(i, struct drbd_request, i); | |
2407 | if (!(req->rq_state & RQ_POSTPONED)) | |
2408 | continue; | |
2409 | req->rq_state &= ~RQ_POSTPONED; | |
2410 | __req_mod(req, NEG_ACKED, &m); | |
0500813f | 2411 | spin_unlock_irq(&device->resource->req_lock); |
7be8da07 | 2412 | if (m.bio) |
b30ab791 | 2413 | complete_master_bio(device, &m); |
0500813f | 2414 | spin_lock_irq(&device->resource->req_lock); |
7be8da07 AG |
2415 | goto repeat; |
2416 | } | |
2417 | } | |
2418 | ||
b30ab791 | 2419 | static int handle_write_conflicts(struct drbd_device *device, |
7be8da07 AG |
2420 | struct drbd_peer_request *peer_req) |
2421 | { | |
e33b32de | 2422 | struct drbd_connection *connection = peer_req->peer_device->connection; |
bde89a9e | 2423 | bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags); |
7be8da07 AG |
2424 | sector_t sector = peer_req->i.sector; |
2425 | const unsigned int size = peer_req->i.size; | |
2426 | struct drbd_interval *i; | |
2427 | bool equal; | |
2428 | int err; | |
2429 | ||
2430 | /* | |
2431 | * Inserting the peer request into the write_requests tree will prevent | |
2432 | * new conflicting local requests from being added. | |
2433 | */ | |
b30ab791 | 2434 | drbd_insert_interval(&device->write_requests, &peer_req->i); |
7be8da07 AG |
2435 | |
2436 | repeat: | |
b30ab791 | 2437 | drbd_for_each_overlap(i, &device->write_requests, sector, size) { |
7be8da07 AG |
2438 | if (i == &peer_req->i) |
2439 | continue; | |
08d0dabf LE |
2440 | if (i->completed) |
2441 | continue; | |
7be8da07 AG |
2442 | |
2443 | if (!i->local) { | |
2444 | /* | |
2445 | * Our peer has sent a conflicting remote request; this | |
2446 | * should not happen in a two-node setup. Wait for the | |
2447 | * earlier peer request to complete. | |
2448 | */ | |
b30ab791 | 2449 | err = drbd_wait_misc(device, i); |
7be8da07 AG |
2450 | if (err) |
2451 | goto out; | |
2452 | goto repeat; | |
2453 | } | |
2454 | ||
2455 | equal = i->sector == sector && i->size == size; | |
2456 | if (resolve_conflicts) { | |
2457 | /* | |
2458 | * If the peer request is fully contained within the | |
d4dabbe2 LE |
2459 | * overlapping request, it can be considered overwritten |
2460 | * and thus superseded; otherwise, it will be retried | |
2461 | * once all overlapping requests have completed. | |
7be8da07 | 2462 | */ |
d4dabbe2 | 2463 | bool superseded = i->sector <= sector && i->sector + |
7be8da07 AG |
2464 | (i->size >> 9) >= sector + (size >> 9); |
2465 | ||
2466 | if (!equal) | |
d0180171 | 2467 | drbd_alert(device, "Concurrent writes detected: " |
7be8da07 AG |
2468 | "local=%llus +%u, remote=%llus +%u, " |
2469 | "assuming %s came first\n", | |
2470 | (unsigned long long)i->sector, i->size, | |
2471 | (unsigned long long)sector, size, | |
d4dabbe2 | 2472 | superseded ? "local" : "remote"); |
7be8da07 | 2473 | |
a8cd15ba | 2474 | peer_req->w.cb = superseded ? e_send_superseded : |
7be8da07 | 2475 | e_send_retry_write; |
a8cd15ba | 2476 | list_add_tail(&peer_req->w.list, &device->done_ee); |
668700b4 | 2477 | queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work); |
7be8da07 AG |
2478 | |
2479 | err = -ENOENT; | |
2480 | goto out; | |
2481 | } else { | |
2482 | struct drbd_request *req = | |
2483 | container_of(i, struct drbd_request, i); | |
2484 | ||
2485 | if (!equal) | |
d0180171 | 2486 | drbd_alert(device, "Concurrent writes detected: " |
7be8da07 AG |
2487 | "local=%llus +%u, remote=%llus +%u\n", |
2488 | (unsigned long long)i->sector, i->size, | |
2489 | (unsigned long long)sector, size); | |
2490 | ||
2491 | if (req->rq_state & RQ_LOCAL_PENDING || | |
2492 | !(req->rq_state & RQ_POSTPONED)) { | |
2493 | /* | |
2494 | * Wait for the node with the discard flag to | |
d4dabbe2 LE |
2495 | * decide if this request has been superseded |
2496 | * or needs to be retried. | |
2497 | * Requests that have been superseded will | |
7be8da07 AG |
2498 | * disappear from the write_requests tree. |
2499 | * | |
2500 | * In addition, wait for the conflicting | |
2501 | * request to finish locally before submitting | |
2502 | * the conflicting peer request. | |
2503 | */ | |
b30ab791 | 2504 | err = drbd_wait_misc(device, &req->i); |
7be8da07 | 2505 | if (err) { |
e33b32de | 2506 | _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD); |
b30ab791 | 2507 | fail_postponed_requests(device, sector, size); |
7be8da07 AG |
2508 | goto out; |
2509 | } | |
2510 | goto repeat; | |
2511 | } | |
2512 | /* | |
2513 | * Remember to restart the conflicting requests after | |
2514 | * the new peer request has completed. | |
2515 | */ | |
2516 | peer_req->flags |= EE_RESTART_REQUESTS; | |
2517 | } | |
2518 | } | |
2519 | err = 0; | |
2520 | ||
2521 | out: | |
2522 | if (err) | |
b30ab791 | 2523 | drbd_remove_epoch_entry_interval(device, peer_req); |
7be8da07 AG |
2524 | return err; |
2525 | } | |
2526 | ||
b411b363 | 2527 | /* mirrored write */ |
bde89a9e | 2528 | static int receive_Data(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 2529 | { |
9f4fe9ad | 2530 | struct drbd_peer_device *peer_device; |
b30ab791 | 2531 | struct drbd_device *device; |
21ae5d7f | 2532 | struct net_conf *nc; |
b411b363 | 2533 | sector_t sector; |
db830c46 | 2534 | struct drbd_peer_request *peer_req; |
e658983a | 2535 | struct p_data *p = pi->data; |
7be8da07 | 2536 | u32 peer_seq = be32_to_cpu(p->seq_num); |
bb3cc85e | 2537 | int op, op_flags; |
b411b363 | 2538 | u32 dp_flags; |
302bdeae | 2539 | int err, tp; |
b411b363 | 2540 | |
9f4fe9ad AG |
2541 | peer_device = conn_peer_device(connection, pi->vnr); |
2542 | if (!peer_device) | |
4a76b161 | 2543 | return -EIO; |
9f4fe9ad | 2544 | device = peer_device->device; |
b411b363 | 2545 | |
b30ab791 | 2546 | if (!get_ldev(device)) { |
82bc0194 AG |
2547 | int err2; |
2548 | ||
69a22773 AG |
2549 | err = wait_for_and_update_peer_seq(peer_device, peer_seq); |
2550 | drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size); | |
bde89a9e | 2551 | atomic_inc(&connection->current_epoch->epoch_size); |
69a22773 | 2552 | err2 = drbd_drain_block(peer_device, pi->size); |
82bc0194 AG |
2553 | if (!err) |
2554 | err = err2; | |
2555 | return err; | |
b411b363 PR |
2556 | } |
2557 | ||
fcefa62e AG |
2558 | /* |
2559 | * Corresponding put_ldev done either below (on various errors), or in | |
2560 | * drbd_peer_request_endio, if we successfully submit the data at the | |
2561 | * end of this function. | |
2562 | */ | |
b411b363 PR |
2563 | |
2564 | sector = be64_to_cpu(p->sector); | |
a0fb3c47 | 2565 | peer_req = read_in_block(peer_device, p->block_id, sector, pi); |
db830c46 | 2566 | if (!peer_req) { |
b30ab791 | 2567 | put_ldev(device); |
82bc0194 | 2568 | return -EIO; |
b411b363 PR |
2569 | } |
2570 | ||
a8cd15ba | 2571 | peer_req->w.cb = e_end_block; |
21ae5d7f LE |
2572 | peer_req->submit_jif = jiffies; |
2573 | peer_req->flags |= EE_APPLICATION; | |
b411b363 | 2574 | |
688593c5 | 2575 | dp_flags = be32_to_cpu(p->dp_flags); |
bb3cc85e MC |
2576 | op = wire_flags_to_bio_op(dp_flags); |
2577 | op_flags = wire_flags_to_bio_flags(dp_flags); | |
a0fb3c47 | 2578 | if (pi->cmd == P_TRIM) { |
a0fb3c47 | 2579 | D_ASSERT(peer_device, peer_req->i.size > 0); |
bb3cc85e | 2580 | D_ASSERT(peer_device, op == REQ_OP_DISCARD); |
a0fb3c47 LE |
2581 | D_ASSERT(peer_device, peer_req->pages == NULL); |
2582 | } else if (peer_req->pages == NULL) { | |
0b0ba1ef AG |
2583 | D_ASSERT(device, peer_req->i.size == 0); |
2584 | D_ASSERT(device, dp_flags & DP_FLUSH); | |
a73ff323 | 2585 | } |
688593c5 LE |
2586 | |
2587 | if (dp_flags & DP_MAY_SET_IN_SYNC) | |
db830c46 | 2588 | peer_req->flags |= EE_MAY_SET_IN_SYNC; |
688593c5 | 2589 | |
bde89a9e AG |
2590 | spin_lock(&connection->epoch_lock); |
2591 | peer_req->epoch = connection->current_epoch; | |
db830c46 AG |
2592 | atomic_inc(&peer_req->epoch->epoch_size); |
2593 | atomic_inc(&peer_req->epoch->active); | |
bde89a9e | 2594 | spin_unlock(&connection->epoch_lock); |
b411b363 | 2595 | |
302bdeae | 2596 | rcu_read_lock(); |
21ae5d7f LE |
2597 | nc = rcu_dereference(peer_device->connection->net_conf); |
2598 | tp = nc->two_primaries; | |
2599 | if (peer_device->connection->agreed_pro_version < 100) { | |
2600 | switch (nc->wire_protocol) { | |
2601 | case DRBD_PROT_C: | |
2602 | dp_flags |= DP_SEND_WRITE_ACK; | |
2603 | break; | |
2604 | case DRBD_PROT_B: | |
2605 | dp_flags |= DP_SEND_RECEIVE_ACK; | |
2606 | break; | |
2607 | } | |
2608 | } | |
302bdeae | 2609 | rcu_read_unlock(); |
21ae5d7f LE |
2610 | |
2611 | if (dp_flags & DP_SEND_WRITE_ACK) { | |
2612 | peer_req->flags |= EE_SEND_WRITE_ACK; | |
2613 | inc_unacked(device); | |
2614 | /* corresponding dec_unacked() in e_end_block() | |
2615 | * respective _drbd_clear_done_ee */ | |
2616 | } | |
2617 | ||
2618 | if (dp_flags & DP_SEND_RECEIVE_ACK) { | |
2619 | /* I really don't like it that the receiver thread | |
2620 | * sends on the msock, but anyways */ | |
5dd2ca19 | 2621 | drbd_send_ack(peer_device, P_RECV_ACK, peer_req); |
21ae5d7f LE |
2622 | } |
2623 | ||
302bdeae | 2624 | if (tp) { |
21ae5d7f LE |
2625 | /* two primaries implies protocol C */ |
2626 | D_ASSERT(device, dp_flags & DP_SEND_WRITE_ACK); | |
302bdeae | 2627 | peer_req->flags |= EE_IN_INTERVAL_TREE; |
69a22773 | 2628 | err = wait_for_and_update_peer_seq(peer_device, peer_seq); |
7be8da07 | 2629 | if (err) |
b411b363 | 2630 | goto out_interrupted; |
0500813f | 2631 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 2632 | err = handle_write_conflicts(device, peer_req); |
7be8da07 | 2633 | if (err) { |
0500813f | 2634 | spin_unlock_irq(&device->resource->req_lock); |
7be8da07 | 2635 | if (err == -ENOENT) { |
b30ab791 | 2636 | put_ldev(device); |
82bc0194 | 2637 | return 0; |
b411b363 | 2638 | } |
7be8da07 | 2639 | goto out_interrupted; |
b411b363 | 2640 | } |
b874d231 | 2641 | } else { |
69a22773 | 2642 | update_peer_seq(peer_device, peer_seq); |
0500813f | 2643 | spin_lock_irq(&device->resource->req_lock); |
b874d231 | 2644 | } |
9104d31a LE |
2645 | /* TRIM and WRITE_SAME are processed synchronously, |
2646 | * we wait for all pending requests, respectively wait for | |
a0fb3c47 LE |
2647 | * active_ee to become empty in drbd_submit_peer_request(); |
2648 | * better not add ourselves here. */ | |
9104d31a | 2649 | if ((peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) == 0) |
b9ed7080 | 2650 | list_add_tail(&peer_req->w.list, &device->active_ee); |
0500813f | 2651 | spin_unlock_irq(&device->resource->req_lock); |
b411b363 | 2652 | |
b30ab791 AG |
2653 | if (device->state.conn == C_SYNC_TARGET) |
2654 | wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req)); | |
b411b363 | 2655 | |
b30ab791 | 2656 | if (device->state.pdsk < D_INCONSISTENT) { |
b411b363 | 2657 | /* In case we have the only disk of the cluster, */ |
b30ab791 | 2658 | drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); |
db830c46 | 2659 | peer_req->flags &= ~EE_MAY_SET_IN_SYNC; |
4dd726f0 | 2660 | drbd_al_begin_io(device, &peer_req->i); |
21ae5d7f | 2661 | peer_req->flags |= EE_CALL_AL_COMPLETE_IO; |
b411b363 PR |
2662 | } |
2663 | ||
bb3cc85e MC |
2664 | err = drbd_submit_peer_request(device, peer_req, op, op_flags, |
2665 | DRBD_FAULT_DT_WR); | |
82bc0194 AG |
2666 | if (!err) |
2667 | return 0; | |
b411b363 | 2668 | |
10f6d992 | 2669 | /* don't care for the reason here */ |
d0180171 | 2670 | drbd_err(device, "submit failed, triggering re-connect\n"); |
0500813f | 2671 | spin_lock_irq(&device->resource->req_lock); |
a8cd15ba | 2672 | list_del(&peer_req->w.list); |
b30ab791 | 2673 | drbd_remove_epoch_entry_interval(device, peer_req); |
0500813f | 2674 | spin_unlock_irq(&device->resource->req_lock); |
21ae5d7f LE |
2675 | if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) { |
2676 | peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; | |
b30ab791 | 2677 | drbd_al_complete_io(device, &peer_req->i); |
21ae5d7f | 2678 | } |
22cc37a9 | 2679 | |
b411b363 | 2680 | out_interrupted: |
bde89a9e | 2681 | drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP); |
b30ab791 AG |
2682 | put_ldev(device); |
2683 | drbd_free_peer_req(device, peer_req); | |
82bc0194 | 2684 | return err; |
b411b363 PR |
2685 | } |
2686 | ||
0f0601f4 LE |
2687 | /* We may throttle resync, if the lower device seems to be busy, |
2688 | * and current sync rate is above c_min_rate. | |
2689 | * | |
2690 | * To decide whether or not the lower device is busy, we use a scheme similar | |
2691 | * to MD RAID is_mddev_idle(): if the partition stats reveal "significant" | |
2692 | * (more than 64 sectors) of activity we cannot account for with our own resync | |
2693 | * activity, it obviously is "busy". | |
2694 | * | |
2695 | * The current sync rate used here uses only the most recent two step marks, | |
2696 | * to have a short time average so we can react faster. | |
2697 | */ | |
ad3fee79 LE |
2698 | bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, |
2699 | bool throttle_if_app_is_waiting) | |
0f0601f4 | 2700 | { |
e3555d85 | 2701 | struct lc_element *tmp; |
ad3fee79 | 2702 | bool throttle = drbd_rs_c_min_rate_throttle(device); |
daeda1cc | 2703 | |
ad3fee79 LE |
2704 | if (!throttle || throttle_if_app_is_waiting) |
2705 | return throttle; | |
0f0601f4 | 2706 | |
b30ab791 AG |
2707 | spin_lock_irq(&device->al_lock); |
2708 | tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); | |
e3555d85 PR |
2709 | if (tmp) { |
2710 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | |
e8299874 LE |
2711 | if (test_bit(BME_PRIORITY, &bm_ext->flags)) |
2712 | throttle = false; | |
ad3fee79 LE |
2713 | /* Do not slow down if app IO is already waiting for this extent, |
2714 | * and our progress is necessary for application IO to complete. */ | |
e3555d85 | 2715 | } |
b30ab791 | 2716 | spin_unlock_irq(&device->al_lock); |
e3555d85 | 2717 | |
e8299874 LE |
2718 | return throttle; |
2719 | } | |
2720 | ||
2721 | bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) | |
2722 | { | |
2723 | struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; | |
2724 | unsigned long db, dt, dbdt; | |
2725 | unsigned int c_min_rate; | |
2726 | int curr_events; | |
2727 | ||
2728 | rcu_read_lock(); | |
2729 | c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; | |
2730 | rcu_read_unlock(); | |
2731 | ||
2732 | /* feature disabled? */ | |
2733 | if (c_min_rate == 0) | |
2734 | return false; | |
2735 | ||
0f0601f4 LE |
2736 | curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + |
2737 | (int)part_stat_read(&disk->part0, sectors[1]) - | |
b30ab791 | 2738 | atomic_read(&device->rs_sect_ev); |
ad3fee79 LE |
2739 | |
2740 | if (atomic_read(&device->ap_actlog_cnt) | |
ff8bd88b | 2741 | || curr_events - device->rs_last_events > 64) { |
0f0601f4 LE |
2742 | unsigned long rs_left; |
2743 | int i; | |
2744 | ||
b30ab791 | 2745 | device->rs_last_events = curr_events; |
0f0601f4 LE |
2746 | |
2747 | /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, | |
2748 | * approx. */ | |
b30ab791 | 2749 | i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; |
2649f080 | 2750 | |
b30ab791 AG |
2751 | if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T) |
2752 | rs_left = device->ov_left; | |
2649f080 | 2753 | else |
b30ab791 | 2754 | rs_left = drbd_bm_total_weight(device) - device->rs_failed; |
0f0601f4 | 2755 | |
b30ab791 | 2756 | dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ; |
0f0601f4 LE |
2757 | if (!dt) |
2758 | dt++; | |
b30ab791 | 2759 | db = device->rs_mark_left[i] - rs_left; |
0f0601f4 LE |
2760 | dbdt = Bit2KB(db/dt); |
2761 | ||
daeda1cc | 2762 | if (dbdt > c_min_rate) |
e8299874 | 2763 | return true; |
0f0601f4 | 2764 | } |
e8299874 | 2765 | return false; |
0f0601f4 LE |
2766 | } |
2767 | ||
bde89a9e | 2768 | static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 2769 | { |
9f4fe9ad | 2770 | struct drbd_peer_device *peer_device; |
b30ab791 | 2771 | struct drbd_device *device; |
b411b363 | 2772 | sector_t sector; |
4a76b161 | 2773 | sector_t capacity; |
db830c46 | 2774 | struct drbd_peer_request *peer_req; |
b411b363 | 2775 | struct digest_info *di = NULL; |
b18b37be | 2776 | int size, verb; |
b411b363 | 2777 | unsigned int fault_type; |
e658983a | 2778 | struct p_block_req *p = pi->data; |
4a76b161 | 2779 | |
9f4fe9ad AG |
2780 | peer_device = conn_peer_device(connection, pi->vnr); |
2781 | if (!peer_device) | |
4a76b161 | 2782 | return -EIO; |
9f4fe9ad | 2783 | device = peer_device->device; |
b30ab791 | 2784 | capacity = drbd_get_capacity(device->this_bdev); |
b411b363 PR |
2785 | |
2786 | sector = be64_to_cpu(p->sector); | |
2787 | size = be32_to_cpu(p->blksize); | |
2788 | ||
c670a398 | 2789 | if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { |
d0180171 | 2790 | drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, |
b411b363 | 2791 | (unsigned long long)sector, size); |
82bc0194 | 2792 | return -EINVAL; |
b411b363 PR |
2793 | } |
2794 | if (sector + (size>>9) > capacity) { | |
d0180171 | 2795 | drbd_err(device, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, |
b411b363 | 2796 | (unsigned long long)sector, size); |
82bc0194 | 2797 | return -EINVAL; |
b411b363 PR |
2798 | } |
2799 | ||
b30ab791 | 2800 | if (!get_ldev_if_state(device, D_UP_TO_DATE)) { |
b18b37be | 2801 | verb = 1; |
e2857216 | 2802 | switch (pi->cmd) { |
b18b37be | 2803 | case P_DATA_REQUEST: |
69a22773 | 2804 | drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p); |
b18b37be | 2805 | break; |
700ca8c0 | 2806 | case P_RS_THIN_REQ: |
b18b37be PR |
2807 | case P_RS_DATA_REQUEST: |
2808 | case P_CSUM_RS_REQUEST: | |
2809 | case P_OV_REQUEST: | |
69a22773 | 2810 | drbd_send_ack_rp(peer_device, P_NEG_RS_DREPLY , p); |
b18b37be PR |
2811 | break; |
2812 | case P_OV_REPLY: | |
2813 | verb = 0; | |
b30ab791 | 2814 | dec_rs_pending(device); |
69a22773 | 2815 | drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC); |
b18b37be PR |
2816 | break; |
2817 | default: | |
49ba9b1b | 2818 | BUG(); |
b18b37be PR |
2819 | } |
2820 | if (verb && __ratelimit(&drbd_ratelimit_state)) | |
d0180171 | 2821 | drbd_err(device, "Can not satisfy peer's read request, " |
b411b363 | 2822 | "no local data.\n"); |
b18b37be | 2823 | |
a821cc4a | 2824 | /* drain possibly payload */ |
69a22773 | 2825 | return drbd_drain_block(peer_device, pi->size); |
b411b363 PR |
2826 | } |
2827 | ||
2828 | /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD | |
2829 | * "criss-cross" setup, that might cause write-out on some other DRBD, | |
2830 | * which in turn might block on the other node at this very place. */ | |
a0fb3c47 | 2831 | peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, |
9104d31a | 2832 | size, GFP_NOIO); |
db830c46 | 2833 | if (!peer_req) { |
b30ab791 | 2834 | put_ldev(device); |
82bc0194 | 2835 | return -ENOMEM; |
b411b363 PR |
2836 | } |
2837 | ||
e2857216 | 2838 | switch (pi->cmd) { |
b411b363 | 2839 | case P_DATA_REQUEST: |
a8cd15ba | 2840 | peer_req->w.cb = w_e_end_data_req; |
b411b363 | 2841 | fault_type = DRBD_FAULT_DT_RD; |
80a40e43 | 2842 | /* application IO, don't drbd_rs_begin_io */ |
21ae5d7f | 2843 | peer_req->flags |= EE_APPLICATION; |
80a40e43 LE |
2844 | goto submit; |
2845 | ||
700ca8c0 PR |
2846 | case P_RS_THIN_REQ: |
2847 | /* If at some point in the future we have a smart way to | |
2848 | find out if this data block is completely deallocated, | |
2849 | then we would do something smarter here than reading | |
2850 | the block... */ | |
2851 | peer_req->flags |= EE_RS_THIN_REQ; | |
b411b363 | 2852 | case P_RS_DATA_REQUEST: |
a8cd15ba | 2853 | peer_req->w.cb = w_e_end_rsdata_req; |
b411b363 | 2854 | fault_type = DRBD_FAULT_RS_RD; |
5f9915bb | 2855 | /* used in the sector offset progress display */ |
b30ab791 | 2856 | device->bm_resync_fo = BM_SECT_TO_BIT(sector); |
b411b363 PR |
2857 | break; |
2858 | ||
2859 | case P_OV_REPLY: | |
2860 | case P_CSUM_RS_REQUEST: | |
2861 | fault_type = DRBD_FAULT_RS_RD; | |
e2857216 | 2862 | di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO); |
b411b363 PR |
2863 | if (!di) |
2864 | goto out_free_e; | |
2865 | ||
e2857216 | 2866 | di->digest_size = pi->size; |
b411b363 PR |
2867 | di->digest = (((char *)di)+sizeof(struct digest_info)); |
2868 | ||
db830c46 AG |
2869 | peer_req->digest = di; |
2870 | peer_req->flags |= EE_HAS_DIGEST; | |
c36c3ced | 2871 | |
9f4fe9ad | 2872 | if (drbd_recv_all(peer_device->connection, di->digest, pi->size)) |
b411b363 PR |
2873 | goto out_free_e; |
2874 | ||
e2857216 | 2875 | if (pi->cmd == P_CSUM_RS_REQUEST) { |
9f4fe9ad | 2876 | D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89); |
a8cd15ba | 2877 | peer_req->w.cb = w_e_end_csum_rs_req; |
5f9915bb | 2878 | /* used in the sector offset progress display */ |
b30ab791 | 2879 | device->bm_resync_fo = BM_SECT_TO_BIT(sector); |
aaaba345 LE |
2880 | /* remember to report stats in drbd_resync_finished */ |
2881 | device->use_csums = true; | |
e2857216 | 2882 | } else if (pi->cmd == P_OV_REPLY) { |
2649f080 | 2883 | /* track progress, we may need to throttle */ |
b30ab791 | 2884 | atomic_add(size >> 9, &device->rs_sect_in); |
a8cd15ba | 2885 | peer_req->w.cb = w_e_end_ov_reply; |
b30ab791 | 2886 | dec_rs_pending(device); |
0f0601f4 LE |
2887 | /* drbd_rs_begin_io done when we sent this request, |
2888 | * but accounting still needs to be done. */ | |
2889 | goto submit_for_resync; | |
b411b363 PR |
2890 | } |
2891 | break; | |
2892 | ||
2893 | case P_OV_REQUEST: | |
b30ab791 | 2894 | if (device->ov_start_sector == ~(sector_t)0 && |
9f4fe9ad | 2895 | peer_device->connection->agreed_pro_version >= 90) { |
de228bba LE |
2896 | unsigned long now = jiffies; |
2897 | int i; | |
b30ab791 AG |
2898 | device->ov_start_sector = sector; |
2899 | device->ov_position = sector; | |
2900 | device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector); | |
2901 | device->rs_total = device->ov_left; | |
de228bba | 2902 | for (i = 0; i < DRBD_SYNC_MARKS; i++) { |
b30ab791 AG |
2903 | device->rs_mark_left[i] = device->ov_left; |
2904 | device->rs_mark_time[i] = now; | |
de228bba | 2905 | } |
d0180171 | 2906 | drbd_info(device, "Online Verify start sector: %llu\n", |
b411b363 PR |
2907 | (unsigned long long)sector); |
2908 | } | |
a8cd15ba | 2909 | peer_req->w.cb = w_e_end_ov_req; |
b411b363 | 2910 | fault_type = DRBD_FAULT_RS_RD; |
b411b363 PR |
2911 | break; |
2912 | ||
b411b363 | 2913 | default: |
49ba9b1b | 2914 | BUG(); |
b411b363 PR |
2915 | } |
2916 | ||
0f0601f4 LE |
2917 | /* Throttle, drbd_rs_begin_io and submit should become asynchronous |
2918 | * wrt the receiver, but it is not as straightforward as it may seem. | |
2919 | * Various places in the resync start and stop logic assume resync | |
2920 | * requests are processed in order, requeuing this on the worker thread | |
2921 | * introduces a bunch of new code for synchronization between threads. | |
2922 | * | |
2923 | * Unlimited throttling before drbd_rs_begin_io may stall the resync | |
2924 | * "forever", throttling after drbd_rs_begin_io will lock that extent | |
2925 | * for application writes for the same time. For now, just throttle | |
2926 | * here, where the rest of the code expects the receiver to sleep for | |
2927 | * a while, anyways. | |
2928 | */ | |
2929 | ||
2930 | /* Throttle before drbd_rs_begin_io, as that locks out application IO; | |
2931 | * this defers syncer requests for some time, before letting at least | |
2932 | * on request through. The resync controller on the receiving side | |
2933 | * will adapt to the incoming rate accordingly. | |
2934 | * | |
2935 | * We cannot throttle here if remote is Primary/SyncTarget: | |
2936 | * we would also throttle its application reads. | |
2937 | * In that case, throttling is done on the SyncTarget only. | |
2938 | */ | |
c5a2c150 LE |
2939 | |
2940 | /* Even though this may be a resync request, we do add to "read_ee"; | |
2941 | * "sync_ee" is only used for resync WRITEs. | |
2942 | * Add to list early, so debugfs can find this request | |
2943 | * even if we have to sleep below. */ | |
2944 | spin_lock_irq(&device->resource->req_lock); | |
2945 | list_add_tail(&peer_req->w.list, &device->read_ee); | |
2946 | spin_unlock_irq(&device->resource->req_lock); | |
2947 | ||
944410e9 | 2948 | update_receiver_timing_details(connection, drbd_rs_should_slow_down); |
ad3fee79 LE |
2949 | if (device->state.peer != R_PRIMARY |
2950 | && drbd_rs_should_slow_down(device, sector, false)) | |
e3555d85 | 2951 | schedule_timeout_uninterruptible(HZ/10); |
944410e9 | 2952 | update_receiver_timing_details(connection, drbd_rs_begin_io); |
b30ab791 | 2953 | if (drbd_rs_begin_io(device, sector)) |
80a40e43 | 2954 | goto out_free_e; |
b411b363 | 2955 | |
0f0601f4 | 2956 | submit_for_resync: |
b30ab791 | 2957 | atomic_add(size >> 9, &device->rs_sect_ev); |
0f0601f4 | 2958 | |
80a40e43 | 2959 | submit: |
944410e9 | 2960 | update_receiver_timing_details(connection, drbd_submit_peer_request); |
b30ab791 | 2961 | inc_unacked(device); |
bb3cc85e MC |
2962 | if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0, |
2963 | fault_type) == 0) | |
82bc0194 | 2964 | return 0; |
b411b363 | 2965 | |
10f6d992 | 2966 | /* don't care for the reason here */ |
d0180171 | 2967 | drbd_err(device, "submit failed, triggering re-connect\n"); |
c5a2c150 LE |
2968 | |
2969 | out_free_e: | |
0500813f | 2970 | spin_lock_irq(&device->resource->req_lock); |
a8cd15ba | 2971 | list_del(&peer_req->w.list); |
0500813f | 2972 | spin_unlock_irq(&device->resource->req_lock); |
22cc37a9 LE |
2973 | /* no drbd_rs_complete_io(), we are dropping the connection anyways */ |
2974 | ||
b30ab791 AG |
2975 | put_ldev(device); |
2976 | drbd_free_peer_req(device, peer_req); | |
82bc0194 | 2977 | return -EIO; |
b411b363 PR |
2978 | } |
2979 | ||
69a22773 AG |
2980 | /** |
2981 | * drbd_asb_recover_0p - Recover after split-brain with no remaining primaries | |
2982 | */ | |
2983 | static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold(local) | |
b411b363 | 2984 | { |
69a22773 | 2985 | struct drbd_device *device = peer_device->device; |
b411b363 PR |
2986 | int self, peer, rv = -100; |
2987 | unsigned long ch_self, ch_peer; | |
44ed167d | 2988 | enum drbd_after_sb_p after_sb_0p; |
b411b363 | 2989 | |
b30ab791 AG |
2990 | self = device->ldev->md.uuid[UI_BITMAP] & 1; |
2991 | peer = device->p_uuid[UI_BITMAP] & 1; | |
b411b363 | 2992 | |
b30ab791 AG |
2993 | ch_peer = device->p_uuid[UI_SIZE]; |
2994 | ch_self = device->comm_bm_set; | |
b411b363 | 2995 | |
44ed167d | 2996 | rcu_read_lock(); |
69a22773 | 2997 | after_sb_0p = rcu_dereference(peer_device->connection->net_conf)->after_sb_0p; |
44ed167d PR |
2998 | rcu_read_unlock(); |
2999 | switch (after_sb_0p) { | |
b411b363 PR |
3000 | case ASB_CONSENSUS: |
3001 | case ASB_DISCARD_SECONDARY: | |
3002 | case ASB_CALL_HELPER: | |
44ed167d | 3003 | case ASB_VIOLENTLY: |
d0180171 | 3004 | drbd_err(device, "Configuration error.\n"); |
b411b363 PR |
3005 | break; |
3006 | case ASB_DISCONNECT: | |
3007 | break; | |
3008 | case ASB_DISCARD_YOUNGER_PRI: | |
3009 | if (self == 0 && peer == 1) { | |
3010 | rv = -1; | |
3011 | break; | |
3012 | } | |
3013 | if (self == 1 && peer == 0) { | |
3014 | rv = 1; | |
3015 | break; | |
3016 | } | |
3017 | /* Else fall through to one of the other strategies... */ | |
3018 | case ASB_DISCARD_OLDER_PRI: | |
3019 | if (self == 0 && peer == 1) { | |
3020 | rv = 1; | |
3021 | break; | |
3022 | } | |
3023 | if (self == 1 && peer == 0) { | |
3024 | rv = -1; | |
3025 | break; | |
3026 | } | |
3027 | /* Else fall through to one of the other strategies... */ | |
d0180171 | 3028 | drbd_warn(device, "Discard younger/older primary did not find a decision\n" |
b411b363 PR |
3029 | "Using discard-least-changes instead\n"); |
3030 | case ASB_DISCARD_ZERO_CHG: | |
3031 | if (ch_peer == 0 && ch_self == 0) { | |
69a22773 | 3032 | rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) |
b411b363 PR |
3033 | ? -1 : 1; |
3034 | break; | |
3035 | } else { | |
3036 | if (ch_peer == 0) { rv = 1; break; } | |
3037 | if (ch_self == 0) { rv = -1; break; } | |
3038 | } | |
44ed167d | 3039 | if (after_sb_0p == ASB_DISCARD_ZERO_CHG) |
b411b363 PR |
3040 | break; |
3041 | case ASB_DISCARD_LEAST_CHG: | |
3042 | if (ch_self < ch_peer) | |
3043 | rv = -1; | |
3044 | else if (ch_self > ch_peer) | |
3045 | rv = 1; | |
3046 | else /* ( ch_self == ch_peer ) */ | |
3047 | /* Well, then use something else. */ | |
69a22773 | 3048 | rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) |
b411b363 PR |
3049 | ? -1 : 1; |
3050 | break; | |
3051 | case ASB_DISCARD_LOCAL: | |
3052 | rv = -1; | |
3053 | break; | |
3054 | case ASB_DISCARD_REMOTE: | |
3055 | rv = 1; | |
3056 | } | |
3057 | ||
3058 | return rv; | |
3059 | } | |
3060 | ||
69a22773 AG |
3061 | /** |
3062 | * drbd_asb_recover_1p - Recover after split-brain with one remaining primary | |
3063 | */ | |
3064 | static int drbd_asb_recover_1p(struct drbd_peer_device *peer_device) __must_hold(local) | |
b411b363 | 3065 | { |
69a22773 | 3066 | struct drbd_device *device = peer_device->device; |
6184ea21 | 3067 | int hg, rv = -100; |
44ed167d | 3068 | enum drbd_after_sb_p after_sb_1p; |
b411b363 | 3069 | |
44ed167d | 3070 | rcu_read_lock(); |
69a22773 | 3071 | after_sb_1p = rcu_dereference(peer_device->connection->net_conf)->after_sb_1p; |
44ed167d PR |
3072 | rcu_read_unlock(); |
3073 | switch (after_sb_1p) { | |
b411b363 PR |
3074 | case ASB_DISCARD_YOUNGER_PRI: |
3075 | case ASB_DISCARD_OLDER_PRI: | |
3076 | case ASB_DISCARD_LEAST_CHG: | |
3077 | case ASB_DISCARD_LOCAL: | |
3078 | case ASB_DISCARD_REMOTE: | |
44ed167d | 3079 | case ASB_DISCARD_ZERO_CHG: |
d0180171 | 3080 | drbd_err(device, "Configuration error.\n"); |
b411b363 PR |
3081 | break; |
3082 | case ASB_DISCONNECT: | |
3083 | break; | |
3084 | case ASB_CONSENSUS: | |
69a22773 | 3085 | hg = drbd_asb_recover_0p(peer_device); |
b30ab791 | 3086 | if (hg == -1 && device->state.role == R_SECONDARY) |
b411b363 | 3087 | rv = hg; |
b30ab791 | 3088 | if (hg == 1 && device->state.role == R_PRIMARY) |
b411b363 PR |
3089 | rv = hg; |
3090 | break; | |
3091 | case ASB_VIOLENTLY: | |
69a22773 | 3092 | rv = drbd_asb_recover_0p(peer_device); |
b411b363 PR |
3093 | break; |
3094 | case ASB_DISCARD_SECONDARY: | |
b30ab791 | 3095 | return device->state.role == R_PRIMARY ? 1 : -1; |
b411b363 | 3096 | case ASB_CALL_HELPER: |
69a22773 | 3097 | hg = drbd_asb_recover_0p(peer_device); |
b30ab791 | 3098 | if (hg == -1 && device->state.role == R_PRIMARY) { |
bb437946 AG |
3099 | enum drbd_state_rv rv2; |
3100 | ||
b411b363 PR |
3101 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, |
3102 | * we might be here in C_WF_REPORT_PARAMS which is transient. | |
3103 | * we do not need to wait for the after state change work either. */ | |
b30ab791 | 3104 | rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY)); |
bb437946 | 3105 | if (rv2 != SS_SUCCESS) { |
b30ab791 | 3106 | drbd_khelper(device, "pri-lost-after-sb"); |
b411b363 | 3107 | } else { |
d0180171 | 3108 | drbd_warn(device, "Successfully gave up primary role.\n"); |
b411b363 PR |
3109 | rv = hg; |
3110 | } | |
3111 | } else | |
3112 | rv = hg; | |
3113 | } | |
3114 | ||
3115 | return rv; | |
3116 | } | |
3117 | ||
69a22773 AG |
3118 | /** |
3119 | * drbd_asb_recover_2p - Recover after split-brain with two remaining primaries | |
3120 | */ | |
3121 | static int drbd_asb_recover_2p(struct drbd_peer_device *peer_device) __must_hold(local) | |
b411b363 | 3122 | { |
69a22773 | 3123 | struct drbd_device *device = peer_device->device; |
6184ea21 | 3124 | int hg, rv = -100; |
44ed167d | 3125 | enum drbd_after_sb_p after_sb_2p; |
b411b363 | 3126 | |
44ed167d | 3127 | rcu_read_lock(); |
69a22773 | 3128 | after_sb_2p = rcu_dereference(peer_device->connection->net_conf)->after_sb_2p; |
44ed167d PR |
3129 | rcu_read_unlock(); |
3130 | switch (after_sb_2p) { | |
b411b363 PR |
3131 | case ASB_DISCARD_YOUNGER_PRI: |
3132 | case ASB_DISCARD_OLDER_PRI: | |
3133 | case ASB_DISCARD_LEAST_CHG: | |
3134 | case ASB_DISCARD_LOCAL: | |
3135 | case ASB_DISCARD_REMOTE: | |
3136 | case ASB_CONSENSUS: | |
3137 | case ASB_DISCARD_SECONDARY: | |
44ed167d | 3138 | case ASB_DISCARD_ZERO_CHG: |
d0180171 | 3139 | drbd_err(device, "Configuration error.\n"); |
b411b363 PR |
3140 | break; |
3141 | case ASB_VIOLENTLY: | |
69a22773 | 3142 | rv = drbd_asb_recover_0p(peer_device); |
b411b363 PR |
3143 | break; |
3144 | case ASB_DISCONNECT: | |
3145 | break; | |
3146 | case ASB_CALL_HELPER: | |
69a22773 | 3147 | hg = drbd_asb_recover_0p(peer_device); |
b411b363 | 3148 | if (hg == -1) { |
bb437946 AG |
3149 | enum drbd_state_rv rv2; |
3150 | ||
b411b363 PR |
3151 | /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, |
3152 | * we might be here in C_WF_REPORT_PARAMS which is transient. | |
3153 | * we do not need to wait for the after state change work either. */ | |
b30ab791 | 3154 | rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY)); |
bb437946 | 3155 | if (rv2 != SS_SUCCESS) { |
b30ab791 | 3156 | drbd_khelper(device, "pri-lost-after-sb"); |
b411b363 | 3157 | } else { |
d0180171 | 3158 | drbd_warn(device, "Successfully gave up primary role.\n"); |
b411b363 PR |
3159 | rv = hg; |
3160 | } | |
3161 | } else | |
3162 | rv = hg; | |
3163 | } | |
3164 | ||
3165 | return rv; | |
3166 | } | |
3167 | ||
b30ab791 | 3168 | static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid, |
b411b363 PR |
3169 | u64 bits, u64 flags) |
3170 | { | |
3171 | if (!uuid) { | |
d0180171 | 3172 | drbd_info(device, "%s uuid info vanished while I was looking!\n", text); |
b411b363 PR |
3173 | return; |
3174 | } | |
d0180171 | 3175 | drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", |
b411b363 PR |
3176 | text, |
3177 | (unsigned long long)uuid[UI_CURRENT], | |
3178 | (unsigned long long)uuid[UI_BITMAP], | |
3179 | (unsigned long long)uuid[UI_HISTORY_START], | |
3180 | (unsigned long long)uuid[UI_HISTORY_END], | |
3181 | (unsigned long long)bits, | |
3182 | (unsigned long long)flags); | |
3183 | } | |
3184 | ||
3185 | /* | |
3186 | 100 after split brain try auto recover | |
3187 | 2 C_SYNC_SOURCE set BitMap | |
3188 | 1 C_SYNC_SOURCE use BitMap | |
3189 | 0 no Sync | |
3190 | -1 C_SYNC_TARGET use BitMap | |
3191 | -2 C_SYNC_TARGET set BitMap | |
3192 | -100 after split brain, disconnect | |
3193 | -1000 unrelated data | |
4a23f264 PR |
3194 | -1091 requires proto 91 |
3195 | -1096 requires proto 96 | |
b411b363 | 3196 | */ |
f2d3d75b LE |
3197 | |
3198 | static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local) | |
b411b363 | 3199 | { |
44a4d551 LE |
3200 | struct drbd_peer_device *const peer_device = first_peer_device(device); |
3201 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; | |
b411b363 PR |
3202 | u64 self, peer; |
3203 | int i, j; | |
3204 | ||
b30ab791 AG |
3205 | self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); |
3206 | peer = device->p_uuid[UI_CURRENT] & ~((u64)1); | |
b411b363 PR |
3207 | |
3208 | *rule_nr = 10; | |
3209 | if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED) | |
3210 | return 0; | |
3211 | ||
3212 | *rule_nr = 20; | |
3213 | if ((self == UUID_JUST_CREATED || self == (u64)0) && | |
3214 | peer != UUID_JUST_CREATED) | |
3215 | return -2; | |
3216 | ||
3217 | *rule_nr = 30; | |
3218 | if (self != UUID_JUST_CREATED && | |
3219 | (peer == UUID_JUST_CREATED || peer == (u64)0)) | |
3220 | return 2; | |
3221 | ||
3222 | if (self == peer) { | |
3223 | int rct, dc; /* roles at crash time */ | |
3224 | ||
b30ab791 | 3225 | if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { |
b411b363 | 3226 | |
44a4d551 | 3227 | if (connection->agreed_pro_version < 91) |
4a23f264 | 3228 | return -1091; |
b411b363 | 3229 | |
b30ab791 AG |
3230 | if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && |
3231 | (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { | |
d0180171 | 3232 | drbd_info(device, "was SyncSource, missed the resync finished event, corrected myself:\n"); |
b30ab791 AG |
3233 | drbd_uuid_move_history(device); |
3234 | device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; | |
3235 | device->ldev->md.uuid[UI_BITMAP] = 0; | |
b411b363 | 3236 | |
b30ab791 AG |
3237 | drbd_uuid_dump(device, "self", device->ldev->md.uuid, |
3238 | device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); | |
b411b363 PR |
3239 | *rule_nr = 34; |
3240 | } else { | |
d0180171 | 3241 | drbd_info(device, "was SyncSource (peer failed to write sync_uuid)\n"); |
b411b363 PR |
3242 | *rule_nr = 36; |
3243 | } | |
3244 | ||
3245 | return 1; | |
3246 | } | |
3247 | ||
b30ab791 | 3248 | if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { |
b411b363 | 3249 | |
44a4d551 | 3250 | if (connection->agreed_pro_version < 91) |
4a23f264 | 3251 | return -1091; |
b411b363 | 3252 | |
b30ab791 AG |
3253 | if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && |
3254 | (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) { | |
d0180171 | 3255 | drbd_info(device, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); |
b411b363 | 3256 | |
b30ab791 AG |
3257 | device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START]; |
3258 | device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP]; | |
3259 | device->p_uuid[UI_BITMAP] = 0UL; | |
b411b363 | 3260 | |
b30ab791 | 3261 | drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); |
b411b363 PR |
3262 | *rule_nr = 35; |
3263 | } else { | |
d0180171 | 3264 | drbd_info(device, "was SyncTarget (failed to write sync_uuid)\n"); |
b411b363 PR |
3265 | *rule_nr = 37; |
3266 | } | |
3267 | ||
3268 | return -1; | |
3269 | } | |
3270 | ||
3271 | /* Common power [off|failure] */ | |
b30ab791 AG |
3272 | rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) + |
3273 | (device->p_uuid[UI_FLAGS] & 2); | |
b411b363 PR |
3274 | /* lowest bit is set when we were primary, |
3275 | * next bit (weight 2) is set when peer was primary */ | |
3276 | *rule_nr = 40; | |
3277 | ||
f2d3d75b LE |
3278 | /* Neither has the "crashed primary" flag set, |
3279 | * only a replication link hickup. */ | |
3280 | if (rct == 0) | |
3281 | return 0; | |
3282 | ||
3283 | /* Current UUID equal and no bitmap uuid; does not necessarily | |
3284 | * mean this was a "simultaneous hard crash", maybe IO was | |
3285 | * frozen, so no UUID-bump happened. | |
3286 | * This is a protocol change, overload DRBD_FF_WSAME as flag | |
3287 | * for "new-enough" peer DRBD version. */ | |
3288 | if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) { | |
3289 | *rule_nr = 41; | |
3290 | if (!(connection->agreed_features & DRBD_FF_WSAME)) { | |
3291 | drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n"); | |
3292 | return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8)); | |
3293 | } | |
3294 | if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) { | |
3295 | /* At least one has the "crashed primary" bit set, | |
3296 | * both are primary now, but neither has rotated its UUIDs? | |
3297 | * "Can not happen." */ | |
3298 | drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n"); | |
3299 | return -100; | |
3300 | } | |
3301 | if (device->state.role == R_PRIMARY) | |
3302 | return 1; | |
3303 | return -1; | |
3304 | } | |
3305 | ||
3306 | /* Both are secondary. | |
3307 | * Really looks like recovery from simultaneous hard crash. | |
3308 | * Check which had been primary before, and arbitrate. */ | |
b411b363 | 3309 | switch (rct) { |
f2d3d75b | 3310 | case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */ |
b411b363 PR |
3311 | case 1: /* self_pri && !peer_pri */ return 1; |
3312 | case 2: /* !self_pri && peer_pri */ return -1; | |
3313 | case 3: /* self_pri && peer_pri */ | |
44a4d551 | 3314 | dc = test_bit(RESOLVE_CONFLICTS, &connection->flags); |
b411b363 PR |
3315 | return dc ? -1 : 1; |
3316 | } | |
3317 | } | |
3318 | ||
3319 | *rule_nr = 50; | |
b30ab791 | 3320 | peer = device->p_uuid[UI_BITMAP] & ~((u64)1); |
b411b363 PR |
3321 | if (self == peer) |
3322 | return -1; | |
3323 | ||
3324 | *rule_nr = 51; | |
b30ab791 | 3325 | peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1); |
b411b363 | 3326 | if (self == peer) { |
44a4d551 | 3327 | if (connection->agreed_pro_version < 96 ? |
b30ab791 AG |
3328 | (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == |
3329 | (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : | |
3330 | peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) { | |
b411b363 PR |
3331 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
3332 | resync as sync source modifications of the peer's UUIDs. */ | |
3333 | ||
44a4d551 | 3334 | if (connection->agreed_pro_version < 91) |
4a23f264 | 3335 | return -1091; |
b411b363 | 3336 | |
b30ab791 AG |
3337 | device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START]; |
3338 | device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1]; | |
4a23f264 | 3339 | |
d0180171 | 3340 | drbd_info(device, "Lost last syncUUID packet, corrected:\n"); |
b30ab791 | 3341 | drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); |
4a23f264 | 3342 | |
b411b363 PR |
3343 | return -1; |
3344 | } | |
3345 | } | |
3346 | ||
3347 | *rule_nr = 60; | |
b30ab791 | 3348 | self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); |
b411b363 | 3349 | for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { |
b30ab791 | 3350 | peer = device->p_uuid[i] & ~((u64)1); |
b411b363 PR |
3351 | if (self == peer) |
3352 | return -2; | |
3353 | } | |
3354 | ||
3355 | *rule_nr = 70; | |
b30ab791 AG |
3356 | self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); |
3357 | peer = device->p_uuid[UI_CURRENT] & ~((u64)1); | |
b411b363 PR |
3358 | if (self == peer) |
3359 | return 1; | |
3360 | ||
3361 | *rule_nr = 71; | |
b30ab791 | 3362 | self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); |
b411b363 | 3363 | if (self == peer) { |
44a4d551 | 3364 | if (connection->agreed_pro_version < 96 ? |
b30ab791 AG |
3365 | (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == |
3366 | (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) : | |
3367 | self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { | |
b411b363 PR |
3368 | /* The last P_SYNC_UUID did not get though. Undo the last start of |
3369 | resync as sync source modifications of our UUIDs. */ | |
3370 | ||
44a4d551 | 3371 | if (connection->agreed_pro_version < 91) |
4a23f264 | 3372 | return -1091; |
b411b363 | 3373 | |
b30ab791 AG |
3374 | __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); |
3375 | __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]); | |
b411b363 | 3376 | |
d0180171 | 3377 | drbd_info(device, "Last syncUUID did not get through, corrected:\n"); |
b30ab791 AG |
3378 | drbd_uuid_dump(device, "self", device->ldev->md.uuid, |
3379 | device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0); | |
b411b363 PR |
3380 | |
3381 | return 1; | |
3382 | } | |
3383 | } | |
3384 | ||
3385 | ||
3386 | *rule_nr = 80; | |
b30ab791 | 3387 | peer = device->p_uuid[UI_CURRENT] & ~((u64)1); |
b411b363 | 3388 | for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { |
b30ab791 | 3389 | self = device->ldev->md.uuid[i] & ~((u64)1); |
b411b363 PR |
3390 | if (self == peer) |
3391 | return 2; | |
3392 | } | |
3393 | ||
3394 | *rule_nr = 90; | |
b30ab791 AG |
3395 | self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); |
3396 | peer = device->p_uuid[UI_BITMAP] & ~((u64)1); | |
b411b363 PR |
3397 | if (self == peer && self != ((u64)0)) |
3398 | return 100; | |
3399 | ||
3400 | *rule_nr = 100; | |
3401 | for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { | |
b30ab791 | 3402 | self = device->ldev->md.uuid[i] & ~((u64)1); |
b411b363 | 3403 | for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) { |
b30ab791 | 3404 | peer = device->p_uuid[j] & ~((u64)1); |
b411b363 PR |
3405 | if (self == peer) |
3406 | return -100; | |
3407 | } | |
3408 | } | |
3409 | ||
3410 | return -1000; | |
3411 | } | |
3412 | ||
3413 | /* drbd_sync_handshake() returns the new conn state on success, or | |
3414 | CONN_MASK (-1) on failure. | |
3415 | */ | |
69a22773 AG |
3416 | static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device, |
3417 | enum drbd_role peer_role, | |
b411b363 PR |
3418 | enum drbd_disk_state peer_disk) __must_hold(local) |
3419 | { | |
69a22773 | 3420 | struct drbd_device *device = peer_device->device; |
b411b363 PR |
3421 | enum drbd_conns rv = C_MASK; |
3422 | enum drbd_disk_state mydisk; | |
44ed167d | 3423 | struct net_conf *nc; |
6dff2902 | 3424 | int hg, rule_nr, rr_conflict, tentative; |
b411b363 | 3425 | |
b30ab791 | 3426 | mydisk = device->state.disk; |
b411b363 | 3427 | if (mydisk == D_NEGOTIATING) |
b30ab791 | 3428 | mydisk = device->new_state_tmp.disk; |
b411b363 | 3429 | |
d0180171 | 3430 | drbd_info(device, "drbd_sync_handshake:\n"); |
9f2247bb | 3431 | |
b30ab791 AG |
3432 | spin_lock_irq(&device->ldev->md.uuid_lock); |
3433 | drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0); | |
3434 | drbd_uuid_dump(device, "peer", device->p_uuid, | |
3435 | device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]); | |
b411b363 | 3436 | |
f2d3d75b | 3437 | hg = drbd_uuid_compare(device, peer_role, &rule_nr); |
b30ab791 | 3438 | spin_unlock_irq(&device->ldev->md.uuid_lock); |
b411b363 | 3439 | |
d0180171 | 3440 | drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr); |
b411b363 PR |
3441 | |
3442 | if (hg == -1000) { | |
d0180171 | 3443 | drbd_alert(device, "Unrelated data, aborting!\n"); |
b411b363 PR |
3444 | return C_MASK; |
3445 | } | |
f2d3d75b LE |
3446 | if (hg < -0x10000) { |
3447 | int proto, fflags; | |
3448 | hg = -hg; | |
3449 | proto = hg & 0xff; | |
3450 | fflags = (hg >> 8) & 0xff; | |
3451 | drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n", | |
3452 | proto, fflags); | |
3453 | return C_MASK; | |
3454 | } | |
4a23f264 | 3455 | if (hg < -1000) { |
d0180171 | 3456 | drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); |
b411b363 PR |
3457 | return C_MASK; |
3458 | } | |
3459 | ||
3460 | if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) || | |
3461 | (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) { | |
3462 | int f = (hg == -100) || abs(hg) == 2; | |
3463 | hg = mydisk > D_INCONSISTENT ? 1 : -1; | |
3464 | if (f) | |
3465 | hg = hg*2; | |
d0180171 | 3466 | drbd_info(device, "Becoming sync %s due to disk states.\n", |
b411b363 PR |
3467 | hg > 0 ? "source" : "target"); |
3468 | } | |
3469 | ||
3a11a487 | 3470 | if (abs(hg) == 100) |
b30ab791 | 3471 | drbd_khelper(device, "initial-split-brain"); |
3a11a487 | 3472 | |
44ed167d | 3473 | rcu_read_lock(); |
69a22773 | 3474 | nc = rcu_dereference(peer_device->connection->net_conf); |
44ed167d PR |
3475 | |
3476 | if (hg == 100 || (hg == -100 && nc->always_asbp)) { | |
b30ab791 | 3477 | int pcount = (device->state.role == R_PRIMARY) |
b411b363 PR |
3478 | + (peer_role == R_PRIMARY); |
3479 | int forced = (hg == -100); | |
3480 | ||
3481 | switch (pcount) { | |
3482 | case 0: | |
69a22773 | 3483 | hg = drbd_asb_recover_0p(peer_device); |
b411b363 PR |
3484 | break; |
3485 | case 1: | |
69a22773 | 3486 | hg = drbd_asb_recover_1p(peer_device); |
b411b363 PR |
3487 | break; |
3488 | case 2: | |
69a22773 | 3489 | hg = drbd_asb_recover_2p(peer_device); |
b411b363 PR |
3490 | break; |
3491 | } | |
3492 | if (abs(hg) < 100) { | |
d0180171 | 3493 | drbd_warn(device, "Split-Brain detected, %d primaries, " |
b411b363 PR |
3494 | "automatically solved. Sync from %s node\n", |
3495 | pcount, (hg < 0) ? "peer" : "this"); | |
3496 | if (forced) { | |
d0180171 | 3497 | drbd_warn(device, "Doing a full sync, since" |
b411b363 PR |
3498 | " UUIDs where ambiguous.\n"); |
3499 | hg = hg*2; | |
3500 | } | |
3501 | } | |
3502 | } | |
3503 | ||
3504 | if (hg == -100) { | |
b30ab791 | 3505 | if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1)) |
b411b363 | 3506 | hg = -1; |
b30ab791 | 3507 | if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1)) |
b411b363 PR |
3508 | hg = 1; |
3509 | ||
3510 | if (abs(hg) < 100) | |
d0180171 | 3511 | drbd_warn(device, "Split-Brain detected, manually solved. " |
b411b363 PR |
3512 | "Sync from %s node\n", |
3513 | (hg < 0) ? "peer" : "this"); | |
3514 | } | |
44ed167d | 3515 | rr_conflict = nc->rr_conflict; |
6dff2902 | 3516 | tentative = nc->tentative; |
44ed167d | 3517 | rcu_read_unlock(); |
b411b363 PR |
3518 | |
3519 | if (hg == -100) { | |
580b9767 LE |
3520 | /* FIXME this log message is not correct if we end up here |
3521 | * after an attempted attach on a diskless node. | |
3522 | * We just refuse to attach -- well, we drop the "connection" | |
3523 | * to that disk, in a way... */ | |
d0180171 | 3524 | drbd_alert(device, "Split-Brain detected but unresolved, dropping connection!\n"); |
b30ab791 | 3525 | drbd_khelper(device, "split-brain"); |
b411b363 PR |
3526 | return C_MASK; |
3527 | } | |
3528 | ||
3529 | if (hg > 0 && mydisk <= D_INCONSISTENT) { | |
d0180171 | 3530 | drbd_err(device, "I shall become SyncSource, but I am inconsistent!\n"); |
b411b363 PR |
3531 | return C_MASK; |
3532 | } | |
3533 | ||
3534 | if (hg < 0 && /* by intention we do not use mydisk here. */ | |
b30ab791 | 3535 | device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) { |
44ed167d | 3536 | switch (rr_conflict) { |
b411b363 | 3537 | case ASB_CALL_HELPER: |
b30ab791 | 3538 | drbd_khelper(device, "pri-lost"); |
b411b363 PR |
3539 | /* fall through */ |
3540 | case ASB_DISCONNECT: | |
d0180171 | 3541 | drbd_err(device, "I shall become SyncTarget, but I am primary!\n"); |
b411b363 PR |
3542 | return C_MASK; |
3543 | case ASB_VIOLENTLY: | |
d0180171 | 3544 | drbd_warn(device, "Becoming SyncTarget, violating the stable-data" |
b411b363 PR |
3545 | "assumption\n"); |
3546 | } | |
3547 | } | |
3548 | ||
69a22773 | 3549 | if (tentative || test_bit(CONN_DRY_RUN, &peer_device->connection->flags)) { |
cf14c2e9 | 3550 | if (hg == 0) |
d0180171 | 3551 | drbd_info(device, "dry-run connect: No resync, would become Connected immediately.\n"); |
cf14c2e9 | 3552 | else |
d0180171 | 3553 | drbd_info(device, "dry-run connect: Would become %s, doing a %s resync.", |
cf14c2e9 PR |
3554 | drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), |
3555 | abs(hg) >= 2 ? "full" : "bit-map based"); | |
3556 | return C_MASK; | |
3557 | } | |
3558 | ||
b411b363 | 3559 | if (abs(hg) >= 2) { |
d0180171 | 3560 | drbd_info(device, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); |
b30ab791 | 3561 | if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", |
20ceb2b2 | 3562 | BM_LOCKED_SET_ALLOWED)) |
b411b363 PR |
3563 | return C_MASK; |
3564 | } | |
3565 | ||
3566 | if (hg > 0) { /* become sync source. */ | |
3567 | rv = C_WF_BITMAP_S; | |
3568 | } else if (hg < 0) { /* become sync target */ | |
3569 | rv = C_WF_BITMAP_T; | |
3570 | } else { | |
3571 | rv = C_CONNECTED; | |
b30ab791 | 3572 | if (drbd_bm_total_weight(device)) { |
d0180171 | 3573 | drbd_info(device, "No resync, but %lu bits in bitmap!\n", |
b30ab791 | 3574 | drbd_bm_total_weight(device)); |
b411b363 PR |
3575 | } |
3576 | } | |
3577 | ||
3578 | return rv; | |
3579 | } | |
3580 | ||
f179d76d | 3581 | static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer) |
b411b363 PR |
3582 | { |
3583 | /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */ | |
f179d76d PR |
3584 | if (peer == ASB_DISCARD_REMOTE) |
3585 | return ASB_DISCARD_LOCAL; | |
b411b363 PR |
3586 | |
3587 | /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */ | |
f179d76d PR |
3588 | if (peer == ASB_DISCARD_LOCAL) |
3589 | return ASB_DISCARD_REMOTE; | |
b411b363 PR |
3590 | |
3591 | /* everything else is valid if they are equal on both sides. */ | |
f179d76d | 3592 | return peer; |
b411b363 PR |
3593 | } |
3594 | ||
bde89a9e | 3595 | static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 3596 | { |
e658983a | 3597 | struct p_protocol *p = pi->data; |
036b17ea PR |
3598 | enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; |
3599 | int p_proto, p_discard_my_data, p_two_primaries, cf; | |
3600 | struct net_conf *nc, *old_net_conf, *new_net_conf = NULL; | |
3601 | char integrity_alg[SHARED_SECRET_MAX] = ""; | |
9534d671 | 3602 | struct crypto_ahash *peer_integrity_tfm = NULL; |
7aca6c75 | 3603 | void *int_dig_in = NULL, *int_dig_vv = NULL; |
b411b363 | 3604 | |
b411b363 PR |
3605 | p_proto = be32_to_cpu(p->protocol); |
3606 | p_after_sb_0p = be32_to_cpu(p->after_sb_0p); | |
3607 | p_after_sb_1p = be32_to_cpu(p->after_sb_1p); | |
3608 | p_after_sb_2p = be32_to_cpu(p->after_sb_2p); | |
b411b363 | 3609 | p_two_primaries = be32_to_cpu(p->two_primaries); |
cf14c2e9 | 3610 | cf = be32_to_cpu(p->conn_flags); |
6139f60d | 3611 | p_discard_my_data = cf & CF_DISCARD_MY_DATA; |
cf14c2e9 | 3612 | |
bde89a9e | 3613 | if (connection->agreed_pro_version >= 87) { |
86db0618 | 3614 | int err; |
cf14c2e9 | 3615 | |
88104ca4 | 3616 | if (pi->size > sizeof(integrity_alg)) |
86db0618 | 3617 | return -EIO; |
bde89a9e | 3618 | err = drbd_recv_all(connection, integrity_alg, pi->size); |
86db0618 AG |
3619 | if (err) |
3620 | return err; | |
036b17ea | 3621 | integrity_alg[SHARED_SECRET_MAX - 1] = 0; |
b411b363 PR |
3622 | } |
3623 | ||
7d4c782c | 3624 | if (pi->cmd != P_PROTOCOL_UPDATE) { |
bde89a9e | 3625 | clear_bit(CONN_DRY_RUN, &connection->flags); |
b411b363 | 3626 | |
fbc12f45 | 3627 | if (cf & CF_DRY_RUN) |
bde89a9e | 3628 | set_bit(CONN_DRY_RUN, &connection->flags); |
b411b363 | 3629 | |
fbc12f45 | 3630 | rcu_read_lock(); |
bde89a9e | 3631 | nc = rcu_dereference(connection->net_conf); |
b411b363 | 3632 | |
fbc12f45 | 3633 | if (p_proto != nc->wire_protocol) { |
1ec861eb | 3634 | drbd_err(connection, "incompatible %s settings\n", "protocol"); |
fbc12f45 AG |
3635 | goto disconnect_rcu_unlock; |
3636 | } | |
b411b363 | 3637 | |
fbc12f45 | 3638 | if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) { |
1ec861eb | 3639 | drbd_err(connection, "incompatible %s settings\n", "after-sb-0pri"); |
fbc12f45 AG |
3640 | goto disconnect_rcu_unlock; |
3641 | } | |
b411b363 | 3642 | |
fbc12f45 | 3643 | if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) { |
1ec861eb | 3644 | drbd_err(connection, "incompatible %s settings\n", "after-sb-1pri"); |
fbc12f45 AG |
3645 | goto disconnect_rcu_unlock; |
3646 | } | |
b411b363 | 3647 | |
fbc12f45 | 3648 | if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) { |
1ec861eb | 3649 | drbd_err(connection, "incompatible %s settings\n", "after-sb-2pri"); |
fbc12f45 AG |
3650 | goto disconnect_rcu_unlock; |
3651 | } | |
b411b363 | 3652 | |
fbc12f45 | 3653 | if (p_discard_my_data && nc->discard_my_data) { |
1ec861eb | 3654 | drbd_err(connection, "incompatible %s settings\n", "discard-my-data"); |
fbc12f45 AG |
3655 | goto disconnect_rcu_unlock; |
3656 | } | |
b411b363 | 3657 | |
fbc12f45 | 3658 | if (p_two_primaries != nc->two_primaries) { |
1ec861eb | 3659 | drbd_err(connection, "incompatible %s settings\n", "allow-two-primaries"); |
fbc12f45 AG |
3660 | goto disconnect_rcu_unlock; |
3661 | } | |
b411b363 | 3662 | |
fbc12f45 | 3663 | if (strcmp(integrity_alg, nc->integrity_alg)) { |
1ec861eb | 3664 | drbd_err(connection, "incompatible %s settings\n", "data-integrity-alg"); |
fbc12f45 AG |
3665 | goto disconnect_rcu_unlock; |
3666 | } | |
b411b363 | 3667 | |
fbc12f45 | 3668 | rcu_read_unlock(); |
b411b363 PR |
3669 | } |
3670 | ||
7d4c782c AG |
3671 | if (integrity_alg[0]) { |
3672 | int hash_size; | |
3673 | ||
3674 | /* | |
3675 | * We can only change the peer data integrity algorithm | |
3676 | * here. Changing our own data integrity algorithm | |
3677 | * requires that we send a P_PROTOCOL_UPDATE packet at | |
3678 | * the same time; otherwise, the peer has no way to | |
3679 | * tell between which packets the algorithm should | |
3680 | * change. | |
3681 | */ | |
b411b363 | 3682 | |
9534d671 | 3683 | peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC); |
7d4c782c | 3684 | if (!peer_integrity_tfm) { |
1ec861eb | 3685 | drbd_err(connection, "peer data-integrity-alg %s not supported\n", |
7d4c782c AG |
3686 | integrity_alg); |
3687 | goto disconnect; | |
3688 | } | |
b411b363 | 3689 | |
9534d671 | 3690 | hash_size = crypto_ahash_digestsize(peer_integrity_tfm); |
7d4c782c AG |
3691 | int_dig_in = kmalloc(hash_size, GFP_KERNEL); |
3692 | int_dig_vv = kmalloc(hash_size, GFP_KERNEL); | |
3693 | if (!(int_dig_in && int_dig_vv)) { | |
1ec861eb | 3694 | drbd_err(connection, "Allocation of buffers for data integrity checking failed\n"); |
b411b363 PR |
3695 | goto disconnect; |
3696 | } | |
b411b363 PR |
3697 | } |
3698 | ||
7d4c782c AG |
3699 | new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); |
3700 | if (!new_net_conf) { | |
1ec861eb | 3701 | drbd_err(connection, "Allocation of new net_conf failed\n"); |
7d4c782c AG |
3702 | goto disconnect; |
3703 | } | |
3704 | ||
bde89a9e | 3705 | mutex_lock(&connection->data.mutex); |
0500813f | 3706 | mutex_lock(&connection->resource->conf_update); |
bde89a9e | 3707 | old_net_conf = connection->net_conf; |
7d4c782c AG |
3708 | *new_net_conf = *old_net_conf; |
3709 | ||
3710 | new_net_conf->wire_protocol = p_proto; | |
3711 | new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p); | |
3712 | new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p); | |
3713 | new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p); | |
3714 | new_net_conf->two_primaries = p_two_primaries; | |
3715 | ||
bde89a9e | 3716 | rcu_assign_pointer(connection->net_conf, new_net_conf); |
0500813f | 3717 | mutex_unlock(&connection->resource->conf_update); |
bde89a9e | 3718 | mutex_unlock(&connection->data.mutex); |
7d4c782c | 3719 | |
9534d671 | 3720 | crypto_free_ahash(connection->peer_integrity_tfm); |
bde89a9e AG |
3721 | kfree(connection->int_dig_in); |
3722 | kfree(connection->int_dig_vv); | |
3723 | connection->peer_integrity_tfm = peer_integrity_tfm; | |
3724 | connection->int_dig_in = int_dig_in; | |
3725 | connection->int_dig_vv = int_dig_vv; | |
7d4c782c AG |
3726 | |
3727 | if (strcmp(old_net_conf->integrity_alg, integrity_alg)) | |
1ec861eb | 3728 | drbd_info(connection, "peer data-integrity-alg: %s\n", |
7d4c782c AG |
3729 | integrity_alg[0] ? integrity_alg : "(none)"); |
3730 | ||
3731 | synchronize_rcu(); | |
3732 | kfree(old_net_conf); | |
82bc0194 | 3733 | return 0; |
b411b363 | 3734 | |
44ed167d PR |
3735 | disconnect_rcu_unlock: |
3736 | rcu_read_unlock(); | |
b411b363 | 3737 | disconnect: |
9534d671 | 3738 | crypto_free_ahash(peer_integrity_tfm); |
036b17ea PR |
3739 | kfree(int_dig_in); |
3740 | kfree(int_dig_vv); | |
bde89a9e | 3741 | conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); |
82bc0194 | 3742 | return -EIO; |
b411b363 PR |
3743 | } |
3744 | ||
3745 | /* helper function | |
3746 | * input: alg name, feature name | |
3747 | * return: NULL (alg name was "") | |
3748 | * ERR_PTR(error) if something goes wrong | |
3749 | * or the crypto hash ptr, if it worked out ok. */ | |
9534d671 | 3750 | static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device, |
b411b363 PR |
3751 | const char *alg, const char *name) |
3752 | { | |
9534d671 | 3753 | struct crypto_ahash *tfm; |
b411b363 PR |
3754 | |
3755 | if (!alg[0]) | |
3756 | return NULL; | |
3757 | ||
9534d671 | 3758 | tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC); |
b411b363 | 3759 | if (IS_ERR(tfm)) { |
d0180171 | 3760 | drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n", |
b411b363 PR |
3761 | alg, name, PTR_ERR(tfm)); |
3762 | return tfm; | |
3763 | } | |
b411b363 PR |
3764 | return tfm; |
3765 | } | |
3766 | ||
bde89a9e | 3767 | static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi) |
4a76b161 | 3768 | { |
bde89a9e | 3769 | void *buffer = connection->data.rbuf; |
4a76b161 AG |
3770 | int size = pi->size; |
3771 | ||
3772 | while (size) { | |
3773 | int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE); | |
bde89a9e | 3774 | s = drbd_recv(connection, buffer, s); |
4a76b161 AG |
3775 | if (s <= 0) { |
3776 | if (s < 0) | |
3777 | return s; | |
3778 | break; | |
3779 | } | |
3780 | size -= s; | |
3781 | } | |
3782 | if (size) | |
3783 | return -EIO; | |
3784 | return 0; | |
3785 | } | |
3786 | ||
3787 | /* | |
3788 | * config_unknown_volume - device configuration command for unknown volume | |
3789 | * | |
3790 | * When a device is added to an existing connection, the node on which the | |
3791 | * device is added first will send configuration commands to its peer but the | |
3792 | * peer will not know about the device yet. It will warn and ignore these | |
3793 | * commands. Once the device is added on the second node, the second node will | |
3794 | * send the same device configuration commands, but in the other direction. | |
3795 | * | |
3796 | * (We can also end up here if drbd is misconfigured.) | |
3797 | */ | |
bde89a9e | 3798 | static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi) |
4a76b161 | 3799 | { |
1ec861eb | 3800 | drbd_warn(connection, "%s packet received for volume %u, which is not configured locally\n", |
2fcb8f30 | 3801 | cmdname(pi->cmd), pi->vnr); |
bde89a9e | 3802 | return ignore_remaining_packet(connection, pi); |
4a76b161 AG |
3803 | } |
3804 | ||
bde89a9e | 3805 | static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 3806 | { |
9f4fe9ad | 3807 | struct drbd_peer_device *peer_device; |
b30ab791 | 3808 | struct drbd_device *device; |
e658983a | 3809 | struct p_rs_param_95 *p; |
b411b363 | 3810 | unsigned int header_size, data_size, exp_max_sz; |
9534d671 HX |
3811 | struct crypto_ahash *verify_tfm = NULL; |
3812 | struct crypto_ahash *csums_tfm = NULL; | |
2ec91e0e | 3813 | struct net_conf *old_net_conf, *new_net_conf = NULL; |
813472ce | 3814 | struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL; |
bde89a9e | 3815 | const int apv = connection->agreed_pro_version; |
813472ce | 3816 | struct fifo_buffer *old_plan = NULL, *new_plan = NULL; |
778f271d | 3817 | int fifo_size = 0; |
82bc0194 | 3818 | int err; |
b411b363 | 3819 | |
9f4fe9ad AG |
3820 | peer_device = conn_peer_device(connection, pi->vnr); |
3821 | if (!peer_device) | |
bde89a9e | 3822 | return config_unknown_volume(connection, pi); |
9f4fe9ad | 3823 | device = peer_device->device; |
b411b363 PR |
3824 | |
3825 | exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) | |
3826 | : apv == 88 ? sizeof(struct p_rs_param) | |
3827 | + SHARED_SECRET_MAX | |
8e26f9cc PR |
3828 | : apv <= 94 ? sizeof(struct p_rs_param_89) |
3829 | : /* apv >= 95 */ sizeof(struct p_rs_param_95); | |
b411b363 | 3830 | |
e2857216 | 3831 | if (pi->size > exp_max_sz) { |
d0180171 | 3832 | drbd_err(device, "SyncParam packet too long: received %u, expected <= %u bytes\n", |
e2857216 | 3833 | pi->size, exp_max_sz); |
82bc0194 | 3834 | return -EIO; |
b411b363 PR |
3835 | } |
3836 | ||
3837 | if (apv <= 88) { | |
e658983a | 3838 | header_size = sizeof(struct p_rs_param); |
e2857216 | 3839 | data_size = pi->size - header_size; |
8e26f9cc | 3840 | } else if (apv <= 94) { |
e658983a | 3841 | header_size = sizeof(struct p_rs_param_89); |
e2857216 | 3842 | data_size = pi->size - header_size; |
0b0ba1ef | 3843 | D_ASSERT(device, data_size == 0); |
8e26f9cc | 3844 | } else { |
e658983a | 3845 | header_size = sizeof(struct p_rs_param_95); |
e2857216 | 3846 | data_size = pi->size - header_size; |
0b0ba1ef | 3847 | D_ASSERT(device, data_size == 0); |
b411b363 PR |
3848 | } |
3849 | ||
3850 | /* initialize verify_alg and csums_alg */ | |
e658983a | 3851 | p = pi->data; |
b411b363 PR |
3852 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); |
3853 | ||
9f4fe9ad | 3854 | err = drbd_recv_all(peer_device->connection, p, header_size); |
82bc0194 AG |
3855 | if (err) |
3856 | return err; | |
b411b363 | 3857 | |
0500813f | 3858 | mutex_lock(&connection->resource->conf_update); |
9f4fe9ad | 3859 | old_net_conf = peer_device->connection->net_conf; |
b30ab791 | 3860 | if (get_ldev(device)) { |
813472ce PR |
3861 | new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); |
3862 | if (!new_disk_conf) { | |
b30ab791 | 3863 | put_ldev(device); |
0500813f | 3864 | mutex_unlock(&connection->resource->conf_update); |
d0180171 | 3865 | drbd_err(device, "Allocation of new disk_conf failed\n"); |
813472ce PR |
3866 | return -ENOMEM; |
3867 | } | |
daeda1cc | 3868 | |
b30ab791 | 3869 | old_disk_conf = device->ldev->disk_conf; |
813472ce | 3870 | *new_disk_conf = *old_disk_conf; |
b411b363 | 3871 | |
6394b935 | 3872 | new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate); |
813472ce | 3873 | } |
b411b363 PR |
3874 | |
3875 | if (apv >= 88) { | |
3876 | if (apv == 88) { | |
5de73827 | 3877 | if (data_size > SHARED_SECRET_MAX || data_size == 0) { |
d0180171 | 3878 | drbd_err(device, "verify-alg of wrong size, " |
5de73827 PR |
3879 | "peer wants %u, accepting only up to %u byte\n", |
3880 | data_size, SHARED_SECRET_MAX); | |
813472ce PR |
3881 | err = -EIO; |
3882 | goto reconnect; | |
b411b363 PR |
3883 | } |
3884 | ||
9f4fe9ad | 3885 | err = drbd_recv_all(peer_device->connection, p->verify_alg, data_size); |
813472ce PR |
3886 | if (err) |
3887 | goto reconnect; | |
b411b363 PR |
3888 | /* we expect NUL terminated string */ |
3889 | /* but just in case someone tries to be evil */ | |
0b0ba1ef | 3890 | D_ASSERT(device, p->verify_alg[data_size-1] == 0); |
b411b363 PR |
3891 | p->verify_alg[data_size-1] = 0; |
3892 | ||
3893 | } else /* apv >= 89 */ { | |
3894 | /* we still expect NUL terminated strings */ | |
3895 | /* but just in case someone tries to be evil */ | |
0b0ba1ef AG |
3896 | D_ASSERT(device, p->verify_alg[SHARED_SECRET_MAX-1] == 0); |
3897 | D_ASSERT(device, p->csums_alg[SHARED_SECRET_MAX-1] == 0); | |
b411b363 PR |
3898 | p->verify_alg[SHARED_SECRET_MAX-1] = 0; |
3899 | p->csums_alg[SHARED_SECRET_MAX-1] = 0; | |
3900 | } | |
3901 | ||
2ec91e0e | 3902 | if (strcmp(old_net_conf->verify_alg, p->verify_alg)) { |
b30ab791 | 3903 | if (device->state.conn == C_WF_REPORT_PARAMS) { |
d0180171 | 3904 | drbd_err(device, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", |
2ec91e0e | 3905 | old_net_conf->verify_alg, p->verify_alg); |
b411b363 PR |
3906 | goto disconnect; |
3907 | } | |
b30ab791 | 3908 | verify_tfm = drbd_crypto_alloc_digest_safe(device, |
b411b363 PR |
3909 | p->verify_alg, "verify-alg"); |
3910 | if (IS_ERR(verify_tfm)) { | |
3911 | verify_tfm = NULL; | |
3912 | goto disconnect; | |
3913 | } | |
3914 | } | |
3915 | ||
2ec91e0e | 3916 | if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) { |
b30ab791 | 3917 | if (device->state.conn == C_WF_REPORT_PARAMS) { |
d0180171 | 3918 | drbd_err(device, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", |
2ec91e0e | 3919 | old_net_conf->csums_alg, p->csums_alg); |
b411b363 PR |
3920 | goto disconnect; |
3921 | } | |
b30ab791 | 3922 | csums_tfm = drbd_crypto_alloc_digest_safe(device, |
b411b363 PR |
3923 | p->csums_alg, "csums-alg"); |
3924 | if (IS_ERR(csums_tfm)) { | |
3925 | csums_tfm = NULL; | |
3926 | goto disconnect; | |
3927 | } | |
3928 | } | |
3929 | ||
813472ce | 3930 | if (apv > 94 && new_disk_conf) { |
daeda1cc PR |
3931 | new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead); |
3932 | new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target); | |
3933 | new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target); | |
3934 | new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate); | |
778f271d | 3935 | |
daeda1cc | 3936 | fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ; |
b30ab791 | 3937 | if (fifo_size != device->rs_plan_s->size) { |
813472ce PR |
3938 | new_plan = fifo_alloc(fifo_size); |
3939 | if (!new_plan) { | |
d0180171 | 3940 | drbd_err(device, "kmalloc of fifo_buffer failed"); |
b30ab791 | 3941 | put_ldev(device); |
778f271d PR |
3942 | goto disconnect; |
3943 | } | |
3944 | } | |
8e26f9cc | 3945 | } |
b411b363 | 3946 | |
91fd4dad | 3947 | if (verify_tfm || csums_tfm) { |
2ec91e0e PR |
3948 | new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); |
3949 | if (!new_net_conf) { | |
d0180171 | 3950 | drbd_err(device, "Allocation of new net_conf failed\n"); |
91fd4dad PR |
3951 | goto disconnect; |
3952 | } | |
3953 | ||
2ec91e0e | 3954 | *new_net_conf = *old_net_conf; |
91fd4dad PR |
3955 | |
3956 | if (verify_tfm) { | |
2ec91e0e PR |
3957 | strcpy(new_net_conf->verify_alg, p->verify_alg); |
3958 | new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; | |
9534d671 | 3959 | crypto_free_ahash(peer_device->connection->verify_tfm); |
9f4fe9ad | 3960 | peer_device->connection->verify_tfm = verify_tfm; |
d0180171 | 3961 | drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg); |
91fd4dad PR |
3962 | } |
3963 | if (csums_tfm) { | |
2ec91e0e PR |
3964 | strcpy(new_net_conf->csums_alg, p->csums_alg); |
3965 | new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; | |
9534d671 | 3966 | crypto_free_ahash(peer_device->connection->csums_tfm); |
9f4fe9ad | 3967 | peer_device->connection->csums_tfm = csums_tfm; |
d0180171 | 3968 | drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg); |
91fd4dad | 3969 | } |
bde89a9e | 3970 | rcu_assign_pointer(connection->net_conf, new_net_conf); |
778f271d | 3971 | } |
b411b363 PR |
3972 | } |
3973 | ||
813472ce | 3974 | if (new_disk_conf) { |
b30ab791 AG |
3975 | rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); |
3976 | put_ldev(device); | |
813472ce PR |
3977 | } |
3978 | ||
3979 | if (new_plan) { | |
b30ab791 AG |
3980 | old_plan = device->rs_plan_s; |
3981 | rcu_assign_pointer(device->rs_plan_s, new_plan); | |
b411b363 | 3982 | } |
daeda1cc | 3983 | |
0500813f | 3984 | mutex_unlock(&connection->resource->conf_update); |
daeda1cc PR |
3985 | synchronize_rcu(); |
3986 | if (new_net_conf) | |
3987 | kfree(old_net_conf); | |
3988 | kfree(old_disk_conf); | |
813472ce | 3989 | kfree(old_plan); |
daeda1cc | 3990 | |
82bc0194 | 3991 | return 0; |
b411b363 | 3992 | |
813472ce PR |
3993 | reconnect: |
3994 | if (new_disk_conf) { | |
b30ab791 | 3995 | put_ldev(device); |
813472ce PR |
3996 | kfree(new_disk_conf); |
3997 | } | |
0500813f | 3998 | mutex_unlock(&connection->resource->conf_update); |
813472ce PR |
3999 | return -EIO; |
4000 | ||
b411b363 | 4001 | disconnect: |
813472ce PR |
4002 | kfree(new_plan); |
4003 | if (new_disk_conf) { | |
b30ab791 | 4004 | put_ldev(device); |
813472ce PR |
4005 | kfree(new_disk_conf); |
4006 | } | |
0500813f | 4007 | mutex_unlock(&connection->resource->conf_update); |
b411b363 PR |
4008 | /* just for completeness: actually not needed, |
4009 | * as this is not reached if csums_tfm was ok. */ | |
9534d671 | 4010 | crypto_free_ahash(csums_tfm); |
b411b363 | 4011 | /* but free the verify_tfm again, if csums_tfm did not work out */ |
9534d671 | 4012 | crypto_free_ahash(verify_tfm); |
9f4fe9ad | 4013 | conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); |
82bc0194 | 4014 | return -EIO; |
b411b363 PR |
4015 | } |
4016 | ||
b411b363 | 4017 | /* warn if the arguments differ by more than 12.5% */ |
b30ab791 | 4018 | static void warn_if_differ_considerably(struct drbd_device *device, |
b411b363 PR |
4019 | const char *s, sector_t a, sector_t b) |
4020 | { | |
4021 | sector_t d; | |
4022 | if (a == 0 || b == 0) | |
4023 | return; | |
4024 | d = (a > b) ? (a - b) : (b - a); | |
4025 | if (d > (a>>3) || d > (b>>3)) | |
d0180171 | 4026 | drbd_warn(device, "Considerable difference in %s: %llus vs. %llus\n", s, |
b411b363 PR |
4027 | (unsigned long long)a, (unsigned long long)b); |
4028 | } | |
4029 | ||
bde89a9e | 4030 | static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 4031 | { |
9f4fe9ad | 4032 | struct drbd_peer_device *peer_device; |
b30ab791 | 4033 | struct drbd_device *device; |
e658983a | 4034 | struct p_sizes *p = pi->data; |
9104d31a | 4035 | struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL; |
e96c9633 | 4036 | enum determine_dev_size dd = DS_UNCHANGED; |
6a8d68b1 | 4037 | sector_t p_size, p_usize, p_csize, my_usize; |
b411b363 | 4038 | int ldsc = 0; /* local disk size changed */ |
e89b591c | 4039 | enum dds_flags ddsf; |
b411b363 | 4040 | |
9f4fe9ad AG |
4041 | peer_device = conn_peer_device(connection, pi->vnr); |
4042 | if (!peer_device) | |
bde89a9e | 4043 | return config_unknown_volume(connection, pi); |
9f4fe9ad | 4044 | device = peer_device->device; |
4a76b161 | 4045 | |
b411b363 PR |
4046 | p_size = be64_to_cpu(p->d_size); |
4047 | p_usize = be64_to_cpu(p->u_size); | |
6a8d68b1 | 4048 | p_csize = be64_to_cpu(p->c_size); |
b411b363 | 4049 | |
b411b363 PR |
4050 | /* just store the peer's disk size for now. |
4051 | * we still need to figure out whether we accept that. */ | |
b30ab791 | 4052 | device->p_size = p_size; |
b411b363 | 4053 | |
b30ab791 | 4054 | if (get_ldev(device)) { |
60bac040 | 4055 | sector_t new_size, cur_size; |
daeda1cc | 4056 | rcu_read_lock(); |
b30ab791 | 4057 | my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size; |
daeda1cc PR |
4058 | rcu_read_unlock(); |
4059 | ||
b30ab791 AG |
4060 | warn_if_differ_considerably(device, "lower level device sizes", |
4061 | p_size, drbd_get_max_capacity(device->ldev)); | |
4062 | warn_if_differ_considerably(device, "user requested size", | |
daeda1cc | 4063 | p_usize, my_usize); |
b411b363 PR |
4064 | |
4065 | /* if this is the first connect, or an otherwise expected | |
4066 | * param exchange, choose the minimum */ | |
b30ab791 | 4067 | if (device->state.conn == C_WF_REPORT_PARAMS) |
daeda1cc | 4068 | p_usize = min_not_zero(my_usize, p_usize); |
b411b363 PR |
4069 | |
4070 | /* Never shrink a device with usable data during connect. | |
4071 | But allow online shrinking if we are connected. */ | |
60bac040 LE |
4072 | new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0); |
4073 | cur_size = drbd_get_capacity(device->this_bdev); | |
4074 | if (new_size < cur_size && | |
b30ab791 AG |
4075 | device->state.disk >= D_OUTDATED && |
4076 | device->state.conn < C_CONNECTED) { | |
60bac040 LE |
4077 | drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n", |
4078 | (unsigned long long)new_size, (unsigned long long)cur_size); | |
9f4fe9ad | 4079 | conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); |
b30ab791 | 4080 | put_ldev(device); |
82bc0194 | 4081 | return -EIO; |
b411b363 | 4082 | } |
daeda1cc PR |
4083 | |
4084 | if (my_usize != p_usize) { | |
4085 | struct disk_conf *old_disk_conf, *new_disk_conf = NULL; | |
4086 | ||
4087 | new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); | |
4088 | if (!new_disk_conf) { | |
d0180171 | 4089 | drbd_err(device, "Allocation of new disk_conf failed\n"); |
b30ab791 | 4090 | put_ldev(device); |
daeda1cc PR |
4091 | return -ENOMEM; |
4092 | } | |
4093 | ||
0500813f | 4094 | mutex_lock(&connection->resource->conf_update); |
b30ab791 | 4095 | old_disk_conf = device->ldev->disk_conf; |
daeda1cc PR |
4096 | *new_disk_conf = *old_disk_conf; |
4097 | new_disk_conf->disk_size = p_usize; | |
4098 | ||
b30ab791 | 4099 | rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); |
0500813f | 4100 | mutex_unlock(&connection->resource->conf_update); |
daeda1cc PR |
4101 | synchronize_rcu(); |
4102 | kfree(old_disk_conf); | |
4103 | ||
d0180171 | 4104 | drbd_info(device, "Peer sets u_size to %lu sectors\n", |
daeda1cc | 4105 | (unsigned long)my_usize); |
b411b363 | 4106 | } |
daeda1cc | 4107 | |
b30ab791 | 4108 | put_ldev(device); |
b411b363 | 4109 | } |
b411b363 | 4110 | |
20c68fde | 4111 | device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); |
dd4f699d | 4112 | /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size(). |
20c68fde | 4113 | In case we cleared the QUEUE_FLAG_DISCARD from our queue in |
dd4f699d | 4114 | drbd_reconsider_queue_parameters(), we can be sure that after |
20c68fde LE |
4115 | drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */ |
4116 | ||
e89b591c | 4117 | ddsf = be16_to_cpu(p->dds_flags); |
b30ab791 | 4118 | if (get_ldev(device)) { |
9104d31a | 4119 | drbd_reconsider_queue_parameters(device, device->ldev, o); |
b30ab791 AG |
4120 | dd = drbd_determine_dev_size(device, ddsf, NULL); |
4121 | put_ldev(device); | |
e96c9633 | 4122 | if (dd == DS_ERROR) |
82bc0194 | 4123 | return -EIO; |
b30ab791 | 4124 | drbd_md_sync(device); |
b411b363 | 4125 | } else { |
6a8d68b1 LE |
4126 | /* |
4127 | * I am diskless, need to accept the peer's *current* size. | |
4128 | * I must NOT accept the peers backing disk size, | |
4129 | * it may have been larger than mine all along... | |
4130 | * | |
4131 | * At this point, the peer knows more about my disk, or at | |
4132 | * least about what we last agreed upon, than myself. | |
4133 | * So if his c_size is less than his d_size, the most likely | |
4134 | * reason is that *my* d_size was smaller last time we checked. | |
4135 | * | |
4136 | * However, if he sends a zero current size, | |
4137 | * take his (user-capped or) backing disk size anyways. | |
4138 | */ | |
9104d31a | 4139 | drbd_reconsider_queue_parameters(device, NULL, o); |
6a8d68b1 | 4140 | drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size); |
b411b363 PR |
4141 | } |
4142 | ||
b30ab791 AG |
4143 | if (get_ldev(device)) { |
4144 | if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) { | |
4145 | device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); | |
b411b363 PR |
4146 | ldsc = 1; |
4147 | } | |
4148 | ||
b30ab791 | 4149 | put_ldev(device); |
b411b363 PR |
4150 | } |
4151 | ||
b30ab791 | 4152 | if (device->state.conn > C_WF_REPORT_PARAMS) { |
b411b363 | 4153 | if (be64_to_cpu(p->c_size) != |
b30ab791 | 4154 | drbd_get_capacity(device->this_bdev) || ldsc) { |
b411b363 PR |
4155 | /* we have different sizes, probably peer |
4156 | * needs to know my new size... */ | |
69a22773 | 4157 | drbd_send_sizes(peer_device, 0, ddsf); |
b411b363 | 4158 | } |
b30ab791 AG |
4159 | if (test_and_clear_bit(RESIZE_PENDING, &device->flags) || |
4160 | (dd == DS_GREW && device->state.conn == C_CONNECTED)) { | |
4161 | if (device->state.pdsk >= D_INCONSISTENT && | |
4162 | device->state.disk >= D_INCONSISTENT) { | |
e89b591c | 4163 | if (ddsf & DDSF_NO_RESYNC) |
d0180171 | 4164 | drbd_info(device, "Resync of new storage suppressed with --assume-clean\n"); |
e89b591c | 4165 | else |
b30ab791 | 4166 | resync_after_online_grow(device); |
e89b591c | 4167 | } else |
b30ab791 | 4168 | set_bit(RESYNC_AFTER_NEG, &device->flags); |
b411b363 PR |
4169 | } |
4170 | } | |
4171 | ||
82bc0194 | 4172 | return 0; |
b411b363 PR |
4173 | } |
4174 | ||
bde89a9e | 4175 | static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 4176 | { |
9f4fe9ad | 4177 | struct drbd_peer_device *peer_device; |
b30ab791 | 4178 | struct drbd_device *device; |
e658983a | 4179 | struct p_uuids *p = pi->data; |
b411b363 | 4180 | u64 *p_uuid; |
62b0da3a | 4181 | int i, updated_uuids = 0; |
b411b363 | 4182 | |
9f4fe9ad AG |
4183 | peer_device = conn_peer_device(connection, pi->vnr); |
4184 | if (!peer_device) | |
bde89a9e | 4185 | return config_unknown_volume(connection, pi); |
9f4fe9ad | 4186 | device = peer_device->device; |
4a76b161 | 4187 | |
b411b363 | 4188 | p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); |
063eacf8 | 4189 | if (!p_uuid) { |
d0180171 | 4190 | drbd_err(device, "kmalloc of p_uuid failed\n"); |
063eacf8 JW |
4191 | return false; |
4192 | } | |
b411b363 PR |
4193 | |
4194 | for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) | |
4195 | p_uuid[i] = be64_to_cpu(p->uuid[i]); | |
4196 | ||
b30ab791 AG |
4197 | kfree(device->p_uuid); |
4198 | device->p_uuid = p_uuid; | |
b411b363 | 4199 | |
b30ab791 AG |
4200 | if (device->state.conn < C_CONNECTED && |
4201 | device->state.disk < D_INCONSISTENT && | |
4202 | device->state.role == R_PRIMARY && | |
4203 | (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { | |
d0180171 | 4204 | drbd_err(device, "Can only connect to data with current UUID=%016llX\n", |
b30ab791 | 4205 | (unsigned long long)device->ed_uuid); |
9f4fe9ad | 4206 | conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); |
82bc0194 | 4207 | return -EIO; |
b411b363 PR |
4208 | } |
4209 | ||
b30ab791 | 4210 | if (get_ldev(device)) { |
b411b363 | 4211 | int skip_initial_sync = |
b30ab791 | 4212 | device->state.conn == C_CONNECTED && |
9f4fe9ad | 4213 | peer_device->connection->agreed_pro_version >= 90 && |
b30ab791 | 4214 | device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && |
b411b363 PR |
4215 | (p_uuid[UI_FLAGS] & 8); |
4216 | if (skip_initial_sync) { | |
d0180171 | 4217 | drbd_info(device, "Accepted new current UUID, preparing to skip initial sync\n"); |
b30ab791 | 4218 | drbd_bitmap_io(device, &drbd_bmio_clear_n_write, |
20ceb2b2 LE |
4219 | "clear_n_write from receive_uuids", |
4220 | BM_LOCKED_TEST_ALLOWED); | |
b30ab791 AG |
4221 | _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]); |
4222 | _drbd_uuid_set(device, UI_BITMAP, 0); | |
4223 | _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), | |
b411b363 | 4224 | CS_VERBOSE, NULL); |
b30ab791 | 4225 | drbd_md_sync(device); |
62b0da3a | 4226 | updated_uuids = 1; |
b411b363 | 4227 | } |
b30ab791 AG |
4228 | put_ldev(device); |
4229 | } else if (device->state.disk < D_INCONSISTENT && | |
4230 | device->state.role == R_PRIMARY) { | |
18a50fa2 PR |
4231 | /* I am a diskless primary, the peer just created a new current UUID |
4232 | for me. */ | |
b30ab791 | 4233 | updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]); |
b411b363 PR |
4234 | } |
4235 | ||
4236 | /* Before we test for the disk state, we should wait until an eventually | |
4237 | ongoing cluster wide state change is finished. That is important if | |
4238 | we are primary and are detaching from our disk. We need to see the | |
4239 | new disk state... */ | |
b30ab791 AG |
4240 | mutex_lock(device->state_mutex); |
4241 | mutex_unlock(device->state_mutex); | |
4242 | if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT) | |
4243 | updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]); | |
62b0da3a LE |
4244 | |
4245 | if (updated_uuids) | |
b30ab791 | 4246 | drbd_print_uuids(device, "receiver updated UUIDs to"); |
b411b363 | 4247 | |
82bc0194 | 4248 | return 0; |
b411b363 PR |
4249 | } |
4250 | ||
4251 | /** | |
4252 | * convert_state() - Converts the peer's view of the cluster state to our point of view | |
4253 | * @ps: The state as seen by the peer. | |
4254 | */ | |
4255 | static union drbd_state convert_state(union drbd_state ps) | |
4256 | { | |
4257 | union drbd_state ms; | |
4258 | ||
4259 | static enum drbd_conns c_tab[] = { | |
369bea63 | 4260 | [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS, |
b411b363 PR |
4261 | [C_CONNECTED] = C_CONNECTED, |
4262 | ||
4263 | [C_STARTING_SYNC_S] = C_STARTING_SYNC_T, | |
4264 | [C_STARTING_SYNC_T] = C_STARTING_SYNC_S, | |
4265 | [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */ | |
4266 | [C_VERIFY_S] = C_VERIFY_T, | |
4267 | [C_MASK] = C_MASK, | |
4268 | }; | |
4269 | ||
4270 | ms.i = ps.i; | |
4271 | ||
4272 | ms.conn = c_tab[ps.conn]; | |
4273 | ms.peer = ps.role; | |
4274 | ms.role = ps.peer; | |
4275 | ms.pdsk = ps.disk; | |
4276 | ms.disk = ps.pdsk; | |
4277 | ms.peer_isp = (ps.aftr_isp | ps.user_isp); | |
4278 | ||
4279 | return ms; | |
4280 | } | |
4281 | ||
bde89a9e | 4282 | static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 4283 | { |
9f4fe9ad | 4284 | struct drbd_peer_device *peer_device; |
b30ab791 | 4285 | struct drbd_device *device; |
e658983a | 4286 | struct p_req_state *p = pi->data; |
b411b363 | 4287 | union drbd_state mask, val; |
bf885f8a | 4288 | enum drbd_state_rv rv; |
b411b363 | 4289 | |
9f4fe9ad AG |
4290 | peer_device = conn_peer_device(connection, pi->vnr); |
4291 | if (!peer_device) | |
4a76b161 | 4292 | return -EIO; |
9f4fe9ad | 4293 | device = peer_device->device; |
4a76b161 | 4294 | |
b411b363 PR |
4295 | mask.i = be32_to_cpu(p->mask); |
4296 | val.i = be32_to_cpu(p->val); | |
4297 | ||
9f4fe9ad | 4298 | if (test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) && |
b30ab791 | 4299 | mutex_is_locked(device->state_mutex)) { |
69a22773 | 4300 | drbd_send_sr_reply(peer_device, SS_CONCURRENT_ST_CHG); |
82bc0194 | 4301 | return 0; |
b411b363 PR |
4302 | } |
4303 | ||
4304 | mask = convert_state(mask); | |
4305 | val = convert_state(val); | |
4306 | ||
b30ab791 | 4307 | rv = drbd_change_state(device, CS_VERBOSE, mask, val); |
69a22773 | 4308 | drbd_send_sr_reply(peer_device, rv); |
b411b363 | 4309 | |
b30ab791 | 4310 | drbd_md_sync(device); |
b411b363 | 4311 | |
82bc0194 | 4312 | return 0; |
b411b363 PR |
4313 | } |
4314 | ||
bde89a9e | 4315 | static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 4316 | { |
e658983a | 4317 | struct p_req_state *p = pi->data; |
b411b363 | 4318 | union drbd_state mask, val; |
bf885f8a | 4319 | enum drbd_state_rv rv; |
b411b363 | 4320 | |
b411b363 PR |
4321 | mask.i = be32_to_cpu(p->mask); |
4322 | val.i = be32_to_cpu(p->val); | |
4323 | ||
bde89a9e AG |
4324 | if (test_bit(RESOLVE_CONFLICTS, &connection->flags) && |
4325 | mutex_is_locked(&connection->cstate_mutex)) { | |
4326 | conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG); | |
82bc0194 | 4327 | return 0; |
b411b363 PR |
4328 | } |
4329 | ||
4330 | mask = convert_state(mask); | |
4331 | val = convert_state(val); | |
4332 | ||
bde89a9e AG |
4333 | rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL); |
4334 | conn_send_sr_reply(connection, rv); | |
b411b363 | 4335 | |
82bc0194 | 4336 | return 0; |
b411b363 PR |
4337 | } |
4338 | ||
bde89a9e | 4339 | static int receive_state(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 4340 | { |
9f4fe9ad | 4341 | struct drbd_peer_device *peer_device; |
b30ab791 | 4342 | struct drbd_device *device; |
e658983a | 4343 | struct p_state *p = pi->data; |
4ac4aada | 4344 | union drbd_state os, ns, peer_state; |
b411b363 | 4345 | enum drbd_disk_state real_peer_disk; |
65d922c3 | 4346 | enum chg_state_flags cs_flags; |
b411b363 PR |
4347 | int rv; |
4348 | ||
9f4fe9ad AG |
4349 | peer_device = conn_peer_device(connection, pi->vnr); |
4350 | if (!peer_device) | |
bde89a9e | 4351 | return config_unknown_volume(connection, pi); |
9f4fe9ad | 4352 | device = peer_device->device; |
4a76b161 | 4353 | |
b411b363 PR |
4354 | peer_state.i = be32_to_cpu(p->state); |
4355 | ||
4356 | real_peer_disk = peer_state.disk; | |
4357 | if (peer_state.disk == D_NEGOTIATING) { | |
b30ab791 | 4358 | real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; |
d0180171 | 4359 | drbd_info(device, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); |
b411b363 PR |
4360 | } |
4361 | ||
0500813f | 4362 | spin_lock_irq(&device->resource->req_lock); |
b411b363 | 4363 | retry: |
b30ab791 | 4364 | os = ns = drbd_read_state(device); |
0500813f | 4365 | spin_unlock_irq(&device->resource->req_lock); |
b411b363 | 4366 | |
668700b4 | 4367 | /* If some other part of the code (ack_receiver thread, timeout) |
545752d5 LE |
4368 | * already decided to close the connection again, |
4369 | * we must not "re-establish" it here. */ | |
4370 | if (os.conn <= C_TEAR_DOWN) | |
58ffa580 | 4371 | return -ECONNRESET; |
545752d5 | 4372 | |
40424e4a LE |
4373 | /* If this is the "end of sync" confirmation, usually the peer disk |
4374 | * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits | |
4375 | * set) resync started in PausedSyncT, or if the timing of pause-/ | |
4376 | * unpause-sync events has been "just right", the peer disk may | |
4377 | * transition from D_CONSISTENT to D_UP_TO_DATE as well. | |
4378 | */ | |
4379 | if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) && | |
4380 | real_peer_disk == D_UP_TO_DATE && | |
e9ef7bb6 LE |
4381 | os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) { |
4382 | /* If we are (becoming) SyncSource, but peer is still in sync | |
4383 | * preparation, ignore its uptodate-ness to avoid flapping, it | |
4384 | * will change to inconsistent once the peer reaches active | |
4385 | * syncing states. | |
4386 | * It may have changed syncer-paused flags, however, so we | |
4387 | * cannot ignore this completely. */ | |
4388 | if (peer_state.conn > C_CONNECTED && | |
4389 | peer_state.conn < C_SYNC_SOURCE) | |
4390 | real_peer_disk = D_INCONSISTENT; | |
4391 | ||
4392 | /* if peer_state changes to connected at the same time, | |
4393 | * it explicitly notifies us that it finished resync. | |
4394 | * Maybe we should finish it up, too? */ | |
4395 | else if (os.conn >= C_SYNC_SOURCE && | |
4396 | peer_state.conn == C_CONNECTED) { | |
b30ab791 AG |
4397 | if (drbd_bm_total_weight(device) <= device->rs_failed) |
4398 | drbd_resync_finished(device); | |
82bc0194 | 4399 | return 0; |
e9ef7bb6 LE |
4400 | } |
4401 | } | |
4402 | ||
02b91b55 LE |
4403 | /* explicit verify finished notification, stop sector reached. */ |
4404 | if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE && | |
4405 | peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) { | |
b30ab791 AG |
4406 | ov_out_of_sync_print(device); |
4407 | drbd_resync_finished(device); | |
58ffa580 | 4408 | return 0; |
02b91b55 LE |
4409 | } |
4410 | ||
e9ef7bb6 LE |
4411 | /* peer says his disk is inconsistent, while we think it is uptodate, |
4412 | * and this happens while the peer still thinks we have a sync going on, | |
4413 | * but we think we are already done with the sync. | |
4414 | * We ignore this to avoid flapping pdsk. | |
4415 | * This should not happen, if the peer is a recent version of drbd. */ | |
4416 | if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT && | |
4417 | os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE) | |
4418 | real_peer_disk = D_UP_TO_DATE; | |
4419 | ||
4ac4aada LE |
4420 | if (ns.conn == C_WF_REPORT_PARAMS) |
4421 | ns.conn = C_CONNECTED; | |
b411b363 | 4422 | |
67531718 PR |
4423 | if (peer_state.conn == C_AHEAD) |
4424 | ns.conn = C_BEHIND; | |
4425 | ||
b30ab791 AG |
4426 | if (device->p_uuid && peer_state.disk >= D_NEGOTIATING && |
4427 | get_ldev_if_state(device, D_NEGOTIATING)) { | |
b411b363 PR |
4428 | int cr; /* consider resync */ |
4429 | ||
4430 | /* if we established a new connection */ | |
4ac4aada | 4431 | cr = (os.conn < C_CONNECTED); |
b411b363 PR |
4432 | /* if we had an established connection |
4433 | * and one of the nodes newly attaches a disk */ | |
4ac4aada | 4434 | cr |= (os.conn == C_CONNECTED && |
b411b363 | 4435 | (peer_state.disk == D_NEGOTIATING || |
4ac4aada | 4436 | os.disk == D_NEGOTIATING)); |
b411b363 PR |
4437 | /* if we have both been inconsistent, and the peer has been |
4438 | * forced to be UpToDate with --overwrite-data */ | |
b30ab791 | 4439 | cr |= test_bit(CONSIDER_RESYNC, &device->flags); |
b411b363 PR |
4440 | /* if we had been plain connected, and the admin requested to |
4441 | * start a sync by "invalidate" or "invalidate-remote" */ | |
4ac4aada | 4442 | cr |= (os.conn == C_CONNECTED && |
b411b363 PR |
4443 | (peer_state.conn >= C_STARTING_SYNC_S && |
4444 | peer_state.conn <= C_WF_BITMAP_T)); | |
4445 | ||
4446 | if (cr) | |
69a22773 | 4447 | ns.conn = drbd_sync_handshake(peer_device, peer_state.role, real_peer_disk); |
b411b363 | 4448 | |
b30ab791 | 4449 | put_ldev(device); |
4ac4aada LE |
4450 | if (ns.conn == C_MASK) { |
4451 | ns.conn = C_CONNECTED; | |
b30ab791 AG |
4452 | if (device->state.disk == D_NEGOTIATING) { |
4453 | drbd_force_state(device, NS(disk, D_FAILED)); | |
b411b363 | 4454 | } else if (peer_state.disk == D_NEGOTIATING) { |
d0180171 | 4455 | drbd_err(device, "Disk attach process on the peer node was aborted.\n"); |
b411b363 | 4456 | peer_state.disk = D_DISKLESS; |
580b9767 | 4457 | real_peer_disk = D_DISKLESS; |
b411b363 | 4458 | } else { |
9f4fe9ad | 4459 | if (test_and_clear_bit(CONN_DRY_RUN, &peer_device->connection->flags)) |
82bc0194 | 4460 | return -EIO; |
0b0ba1ef | 4461 | D_ASSERT(device, os.conn == C_WF_REPORT_PARAMS); |
9f4fe9ad | 4462 | conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); |
82bc0194 | 4463 | return -EIO; |
b411b363 PR |
4464 | } |
4465 | } | |
4466 | } | |
4467 | ||
0500813f | 4468 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 4469 | if (os.i != drbd_read_state(device).i) |
b411b363 | 4470 | goto retry; |
b30ab791 | 4471 | clear_bit(CONSIDER_RESYNC, &device->flags); |
b411b363 PR |
4472 | ns.peer = peer_state.role; |
4473 | ns.pdsk = real_peer_disk; | |
4474 | ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); | |
4ac4aada | 4475 | if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) |
b30ab791 | 4476 | ns.disk = device->new_state_tmp.disk; |
4ac4aada | 4477 | cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); |
b30ab791 AG |
4478 | if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && |
4479 | test_bit(NEW_CUR_UUID, &device->flags)) { | |
8554df1c | 4480 | /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this |
481c6f50 | 4481 | for temporal network outages! */ |
0500813f | 4482 | spin_unlock_irq(&device->resource->req_lock); |
d0180171 | 4483 | drbd_err(device, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); |
9f4fe9ad | 4484 | tl_clear(peer_device->connection); |
b30ab791 AG |
4485 | drbd_uuid_new_current(device); |
4486 | clear_bit(NEW_CUR_UUID, &device->flags); | |
9f4fe9ad | 4487 | conn_request_state(peer_device->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD); |
82bc0194 | 4488 | return -EIO; |
481c6f50 | 4489 | } |
b30ab791 AG |
4490 | rv = _drbd_set_state(device, ns, cs_flags, NULL); |
4491 | ns = drbd_read_state(device); | |
0500813f | 4492 | spin_unlock_irq(&device->resource->req_lock); |
b411b363 PR |
4493 | |
4494 | if (rv < SS_SUCCESS) { | |
9f4fe9ad | 4495 | conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD); |
82bc0194 | 4496 | return -EIO; |
b411b363 PR |
4497 | } |
4498 | ||
4ac4aada LE |
4499 | if (os.conn > C_WF_REPORT_PARAMS) { |
4500 | if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED && | |
b411b363 PR |
4501 | peer_state.disk != D_NEGOTIATING ) { |
4502 | /* we want resync, peer has not yet decided to sync... */ | |
4503 | /* Nowadays only used when forcing a node into primary role and | |
4504 | setting its disk to UpToDate with that */ | |
69a22773 AG |
4505 | drbd_send_uuids(peer_device); |
4506 | drbd_send_current_state(peer_device); | |
b411b363 PR |
4507 | } |
4508 | } | |
4509 | ||
b30ab791 | 4510 | clear_bit(DISCARD_MY_DATA, &device->flags); |
b411b363 | 4511 | |
b30ab791 | 4512 | drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */ |
b411b363 | 4513 | |
82bc0194 | 4514 | return 0; |
b411b363 PR |
4515 | } |
4516 | ||
bde89a9e | 4517 | static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 4518 | { |
9f4fe9ad | 4519 | struct drbd_peer_device *peer_device; |
b30ab791 | 4520 | struct drbd_device *device; |
e658983a | 4521 | struct p_rs_uuid *p = pi->data; |
4a76b161 | 4522 | |
9f4fe9ad AG |
4523 | peer_device = conn_peer_device(connection, pi->vnr); |
4524 | if (!peer_device) | |
4a76b161 | 4525 | return -EIO; |
9f4fe9ad | 4526 | device = peer_device->device; |
b411b363 | 4527 | |
b30ab791 AG |
4528 | wait_event(device->misc_wait, |
4529 | device->state.conn == C_WF_SYNC_UUID || | |
4530 | device->state.conn == C_BEHIND || | |
4531 | device->state.conn < C_CONNECTED || | |
4532 | device->state.disk < D_NEGOTIATING); | |
b411b363 | 4533 | |
0b0ba1ef | 4534 | /* D_ASSERT(device, device->state.conn == C_WF_SYNC_UUID ); */ |
b411b363 | 4535 | |
b411b363 PR |
4536 | /* Here the _drbd_uuid_ functions are right, current should |
4537 | _not_ be rotated into the history */ | |
b30ab791 AG |
4538 | if (get_ldev_if_state(device, D_NEGOTIATING)) { |
4539 | _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid)); | |
4540 | _drbd_uuid_set(device, UI_BITMAP, 0UL); | |
b411b363 | 4541 | |
b30ab791 AG |
4542 | drbd_print_uuids(device, "updated sync uuid"); |
4543 | drbd_start_resync(device, C_SYNC_TARGET); | |
b411b363 | 4544 | |
b30ab791 | 4545 | put_ldev(device); |
b411b363 | 4546 | } else |
d0180171 | 4547 | drbd_err(device, "Ignoring SyncUUID packet!\n"); |
b411b363 | 4548 | |
82bc0194 | 4549 | return 0; |
b411b363 PR |
4550 | } |
4551 | ||
2c46407d AG |
4552 | /** |
4553 | * receive_bitmap_plain | |
4554 | * | |
4555 | * Return 0 when done, 1 when another iteration is needed, and a negative error | |
4556 | * code upon failure. | |
4557 | */ | |
4558 | static int | |
69a22773 | 4559 | receive_bitmap_plain(struct drbd_peer_device *peer_device, unsigned int size, |
e658983a | 4560 | unsigned long *p, struct bm_xfer_ctx *c) |
b411b363 | 4561 | { |
50d0b1ad | 4562 | unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - |
69a22773 | 4563 | drbd_header_size(peer_device->connection); |
e658983a | 4564 | unsigned int num_words = min_t(size_t, data_size / sizeof(*p), |
50d0b1ad | 4565 | c->bm_words - c->word_offset); |
e658983a | 4566 | unsigned int want = num_words * sizeof(*p); |
2c46407d | 4567 | int err; |
b411b363 | 4568 | |
50d0b1ad | 4569 | if (want != size) { |
69a22773 | 4570 | drbd_err(peer_device, "%s:want (%u) != size (%u)\n", __func__, want, size); |
2c46407d | 4571 | return -EIO; |
b411b363 PR |
4572 | } |
4573 | if (want == 0) | |
2c46407d | 4574 | return 0; |
69a22773 | 4575 | err = drbd_recv_all(peer_device->connection, p, want); |
82bc0194 | 4576 | if (err) |
2c46407d | 4577 | return err; |
b411b363 | 4578 | |
69a22773 | 4579 | drbd_bm_merge_lel(peer_device->device, c->word_offset, num_words, p); |
b411b363 PR |
4580 | |
4581 | c->word_offset += num_words; | |
4582 | c->bit_offset = c->word_offset * BITS_PER_LONG; | |
4583 | if (c->bit_offset > c->bm_bits) | |
4584 | c->bit_offset = c->bm_bits; | |
4585 | ||
2c46407d | 4586 | return 1; |
b411b363 PR |
4587 | } |
4588 | ||
a02d1240 AG |
4589 | static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p) |
4590 | { | |
4591 | return (enum drbd_bitmap_code)(p->encoding & 0x0f); | |
4592 | } | |
4593 | ||
4594 | static int dcbp_get_start(struct p_compressed_bm *p) | |
4595 | { | |
4596 | return (p->encoding & 0x80) != 0; | |
4597 | } | |
4598 | ||
4599 | static int dcbp_get_pad_bits(struct p_compressed_bm *p) | |
4600 | { | |
4601 | return (p->encoding >> 4) & 0x7; | |
4602 | } | |
4603 | ||
2c46407d AG |
4604 | /** |
4605 | * recv_bm_rle_bits | |
4606 | * | |
4607 | * Return 0 when done, 1 when another iteration is needed, and a negative error | |
4608 | * code upon failure. | |
4609 | */ | |
4610 | static int | |
69a22773 | 4611 | recv_bm_rle_bits(struct drbd_peer_device *peer_device, |
b411b363 | 4612 | struct p_compressed_bm *p, |
c6d25cfe PR |
4613 | struct bm_xfer_ctx *c, |
4614 | unsigned int len) | |
b411b363 PR |
4615 | { |
4616 | struct bitstream bs; | |
4617 | u64 look_ahead; | |
4618 | u64 rl; | |
4619 | u64 tmp; | |
4620 | unsigned long s = c->bit_offset; | |
4621 | unsigned long e; | |
a02d1240 | 4622 | int toggle = dcbp_get_start(p); |
b411b363 PR |
4623 | int have; |
4624 | int bits; | |
4625 | ||
a02d1240 | 4626 | bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p)); |
b411b363 PR |
4627 | |
4628 | bits = bitstream_get_bits(&bs, &look_ahead, 64); | |
4629 | if (bits < 0) | |
2c46407d | 4630 | return -EIO; |
b411b363 PR |
4631 | |
4632 | for (have = bits; have > 0; s += rl, toggle = !toggle) { | |
4633 | bits = vli_decode_bits(&rl, look_ahead); | |
4634 | if (bits <= 0) | |
2c46407d | 4635 | return -EIO; |
b411b363 PR |
4636 | |
4637 | if (toggle) { | |
4638 | e = s + rl -1; | |
4639 | if (e >= c->bm_bits) { | |
69a22773 | 4640 | drbd_err(peer_device, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); |
2c46407d | 4641 | return -EIO; |
b411b363 | 4642 | } |
69a22773 | 4643 | _drbd_bm_set_bits(peer_device->device, s, e); |
b411b363 PR |
4644 | } |
4645 | ||
4646 | if (have < bits) { | |
69a22773 | 4647 | drbd_err(peer_device, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", |
b411b363 PR |
4648 | have, bits, look_ahead, |
4649 | (unsigned int)(bs.cur.b - p->code), | |
4650 | (unsigned int)bs.buf_len); | |
2c46407d | 4651 | return -EIO; |
b411b363 | 4652 | } |
d2da5b0c LE |
4653 | /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */ |
4654 | if (likely(bits < 64)) | |
4655 | look_ahead >>= bits; | |
4656 | else | |
4657 | look_ahead = 0; | |
b411b363 PR |
4658 | have -= bits; |
4659 | ||
4660 | bits = bitstream_get_bits(&bs, &tmp, 64 - have); | |
4661 | if (bits < 0) | |
2c46407d | 4662 | return -EIO; |
b411b363 PR |
4663 | look_ahead |= tmp << have; |
4664 | have += bits; | |
4665 | } | |
4666 | ||
4667 | c->bit_offset = s; | |
4668 | bm_xfer_ctx_bit_to_word_offset(c); | |
4669 | ||
2c46407d | 4670 | return (s != c->bm_bits); |
b411b363 PR |
4671 | } |
4672 | ||
2c46407d AG |
4673 | /** |
4674 | * decode_bitmap_c | |
4675 | * | |
4676 | * Return 0 when done, 1 when another iteration is needed, and a negative error | |
4677 | * code upon failure. | |
4678 | */ | |
4679 | static int | |
69a22773 | 4680 | decode_bitmap_c(struct drbd_peer_device *peer_device, |
b411b363 | 4681 | struct p_compressed_bm *p, |
c6d25cfe PR |
4682 | struct bm_xfer_ctx *c, |
4683 | unsigned int len) | |
b411b363 | 4684 | { |
a02d1240 | 4685 | if (dcbp_get_code(p) == RLE_VLI_Bits) |
69a22773 | 4686 | return recv_bm_rle_bits(peer_device, p, c, len - sizeof(*p)); |
b411b363 PR |
4687 | |
4688 | /* other variants had been implemented for evaluation, | |
4689 | * but have been dropped as this one turned out to be "best" | |
4690 | * during all our tests. */ | |
4691 | ||
69a22773 AG |
4692 | drbd_err(peer_device, "receive_bitmap_c: unknown encoding %u\n", p->encoding); |
4693 | conn_request_state(peer_device->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); | |
2c46407d | 4694 | return -EIO; |
b411b363 PR |
4695 | } |
4696 | ||
b30ab791 | 4697 | void INFO_bm_xfer_stats(struct drbd_device *device, |
b411b363 PR |
4698 | const char *direction, struct bm_xfer_ctx *c) |
4699 | { | |
4700 | /* what would it take to transfer it "plaintext" */ | |
a6b32bc3 | 4701 | unsigned int header_size = drbd_header_size(first_peer_device(device)->connection); |
50d0b1ad AG |
4702 | unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size; |
4703 | unsigned int plain = | |
4704 | header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) + | |
4705 | c->bm_words * sizeof(unsigned long); | |
4706 | unsigned int total = c->bytes[0] + c->bytes[1]; | |
4707 | unsigned int r; | |
b411b363 PR |
4708 | |
4709 | /* total can not be zero. but just in case: */ | |
4710 | if (total == 0) | |
4711 | return; | |
4712 | ||
4713 | /* don't report if not compressed */ | |
4714 | if (total >= plain) | |
4715 | return; | |
4716 | ||
4717 | /* total < plain. check for overflow, still */ | |
4718 | r = (total > UINT_MAX/1000) ? (total / (plain/1000)) | |
4719 | : (1000 * total / plain); | |
4720 | ||
4721 | if (r > 1000) | |
4722 | r = 1000; | |
4723 | ||
4724 | r = 1000 - r; | |
d0180171 | 4725 | drbd_info(device, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " |
b411b363 PR |
4726 | "total %u; compression: %u.%u%%\n", |
4727 | direction, | |
4728 | c->bytes[1], c->packets[1], | |
4729 | c->bytes[0], c->packets[0], | |
4730 | total, r/10, r % 10); | |
4731 | } | |
4732 | ||
4733 | /* Since we are processing the bitfield from lower addresses to higher, | |
4734 | it does not matter if the process it in 32 bit chunks or 64 bit | |
4735 | chunks as long as it is little endian. (Understand it as byte stream, | |
4736 | beginning with the lowest byte...) If we would use big endian | |
4737 | we would need to process it from the highest address to the lowest, | |
4738 | in order to be agnostic to the 32 vs 64 bits issue. | |
4739 | ||
4740 | returns 0 on failure, 1 if we successfully received it. */ | |
bde89a9e | 4741 | static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 4742 | { |
9f4fe9ad | 4743 | struct drbd_peer_device *peer_device; |
b30ab791 | 4744 | struct drbd_device *device; |
b411b363 | 4745 | struct bm_xfer_ctx c; |
2c46407d | 4746 | int err; |
4a76b161 | 4747 | |
9f4fe9ad AG |
4748 | peer_device = conn_peer_device(connection, pi->vnr); |
4749 | if (!peer_device) | |
4a76b161 | 4750 | return -EIO; |
9f4fe9ad | 4751 | device = peer_device->device; |
b411b363 | 4752 | |
b30ab791 | 4753 | drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED); |
20ceb2b2 LE |
4754 | /* you are supposed to send additional out-of-sync information |
4755 | * if you actually set bits during this phase */ | |
b411b363 | 4756 | |
b411b363 | 4757 | c = (struct bm_xfer_ctx) { |
b30ab791 AG |
4758 | .bm_bits = drbd_bm_bits(device), |
4759 | .bm_words = drbd_bm_words(device), | |
b411b363 PR |
4760 | }; |
4761 | ||
2c46407d | 4762 | for(;;) { |
e658983a | 4763 | if (pi->cmd == P_BITMAP) |
69a22773 | 4764 | err = receive_bitmap_plain(peer_device, pi->size, pi->data, &c); |
e658983a | 4765 | else if (pi->cmd == P_COMPRESSED_BITMAP) { |
b411b363 PR |
4766 | /* MAYBE: sanity check that we speak proto >= 90, |
4767 | * and the feature is enabled! */ | |
e658983a | 4768 | struct p_compressed_bm *p = pi->data; |
b411b363 | 4769 | |
bde89a9e | 4770 | if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) { |
d0180171 | 4771 | drbd_err(device, "ReportCBitmap packet too large\n"); |
82bc0194 | 4772 | err = -EIO; |
b411b363 PR |
4773 | goto out; |
4774 | } | |
e658983a | 4775 | if (pi->size <= sizeof(*p)) { |
d0180171 | 4776 | drbd_err(device, "ReportCBitmap packet too small (l:%u)\n", pi->size); |
82bc0194 | 4777 | err = -EIO; |
78fcbdae | 4778 | goto out; |
b411b363 | 4779 | } |
9f4fe9ad | 4780 | err = drbd_recv_all(peer_device->connection, p, pi->size); |
e658983a AG |
4781 | if (err) |
4782 | goto out; | |
69a22773 | 4783 | err = decode_bitmap_c(peer_device, p, &c, pi->size); |
b411b363 | 4784 | } else { |
d0180171 | 4785 | drbd_warn(device, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd); |
82bc0194 | 4786 | err = -EIO; |
b411b363 PR |
4787 | goto out; |
4788 | } | |
4789 | ||
e2857216 | 4790 | c.packets[pi->cmd == P_BITMAP]++; |
bde89a9e | 4791 | c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size; |
b411b363 | 4792 | |
2c46407d AG |
4793 | if (err <= 0) { |
4794 | if (err < 0) | |
4795 | goto out; | |
b411b363 | 4796 | break; |
2c46407d | 4797 | } |
9f4fe9ad | 4798 | err = drbd_recv_header(peer_device->connection, pi); |
82bc0194 | 4799 | if (err) |
b411b363 | 4800 | goto out; |
2c46407d | 4801 | } |
b411b363 | 4802 | |
b30ab791 | 4803 | INFO_bm_xfer_stats(device, "receive", &c); |
b411b363 | 4804 | |
b30ab791 | 4805 | if (device->state.conn == C_WF_BITMAP_T) { |
de1f8e4a AG |
4806 | enum drbd_state_rv rv; |
4807 | ||
b30ab791 | 4808 | err = drbd_send_bitmap(device); |
82bc0194 | 4809 | if (err) |
b411b363 PR |
4810 | goto out; |
4811 | /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ | |
b30ab791 | 4812 | rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); |
0b0ba1ef | 4813 | D_ASSERT(device, rv == SS_SUCCESS); |
b30ab791 | 4814 | } else if (device->state.conn != C_WF_BITMAP_S) { |
b411b363 PR |
4815 | /* admin may have requested C_DISCONNECTING, |
4816 | * other threads may have noticed network errors */ | |
d0180171 | 4817 | drbd_info(device, "unexpected cstate (%s) in receive_bitmap\n", |
b30ab791 | 4818 | drbd_conn_str(device->state.conn)); |
b411b363 | 4819 | } |
82bc0194 | 4820 | err = 0; |
b411b363 | 4821 | |
b411b363 | 4822 | out: |
b30ab791 AG |
4823 | drbd_bm_unlock(device); |
4824 | if (!err && device->state.conn == C_WF_BITMAP_S) | |
4825 | drbd_start_resync(device, C_SYNC_SOURCE); | |
82bc0194 | 4826 | return err; |
b411b363 PR |
4827 | } |
4828 | ||
bde89a9e | 4829 | static int receive_skip(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 4830 | { |
1ec861eb | 4831 | drbd_warn(connection, "skipping unknown optional packet type %d, l: %d!\n", |
e2857216 | 4832 | pi->cmd, pi->size); |
b411b363 | 4833 | |
bde89a9e | 4834 | return ignore_remaining_packet(connection, pi); |
b411b363 PR |
4835 | } |
4836 | ||
bde89a9e | 4837 | static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi) |
0ced55a3 | 4838 | { |
e7f52dfb LE |
4839 | /* Make sure we've acked all the TCP data associated |
4840 | * with the data requests being unplugged */ | |
bde89a9e | 4841 | drbd_tcp_quickack(connection->data.socket); |
0ced55a3 | 4842 | |
82bc0194 | 4843 | return 0; |
0ced55a3 PR |
4844 | } |
4845 | ||
bde89a9e | 4846 | static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi) |
73a01a18 | 4847 | { |
9f4fe9ad | 4848 | struct drbd_peer_device *peer_device; |
b30ab791 | 4849 | struct drbd_device *device; |
e658983a | 4850 | struct p_block_desc *p = pi->data; |
4a76b161 | 4851 | |
9f4fe9ad AG |
4852 | peer_device = conn_peer_device(connection, pi->vnr); |
4853 | if (!peer_device) | |
4a76b161 | 4854 | return -EIO; |
9f4fe9ad | 4855 | device = peer_device->device; |
73a01a18 | 4856 | |
b30ab791 | 4857 | switch (device->state.conn) { |
f735e363 LE |
4858 | case C_WF_SYNC_UUID: |
4859 | case C_WF_BITMAP_T: | |
4860 | case C_BEHIND: | |
4861 | break; | |
4862 | default: | |
d0180171 | 4863 | drbd_err(device, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", |
b30ab791 | 4864 | drbd_conn_str(device->state.conn)); |
f735e363 LE |
4865 | } |
4866 | ||
b30ab791 | 4867 | drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); |
73a01a18 | 4868 | |
82bc0194 | 4869 | return 0; |
73a01a18 PR |
4870 | } |
4871 | ||
700ca8c0 PR |
4872 | static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi) |
4873 | { | |
4874 | struct drbd_peer_device *peer_device; | |
4875 | struct p_block_desc *p = pi->data; | |
4876 | struct drbd_device *device; | |
4877 | sector_t sector; | |
4878 | int size, err = 0; | |
4879 | ||
4880 | peer_device = conn_peer_device(connection, pi->vnr); | |
4881 | if (!peer_device) | |
4882 | return -EIO; | |
4883 | device = peer_device->device; | |
4884 | ||
4885 | sector = be64_to_cpu(p->sector); | |
4886 | size = be32_to_cpu(p->blksize); | |
4887 | ||
4888 | dec_rs_pending(device); | |
4889 | ||
4890 | if (get_ldev(device)) { | |
4891 | struct drbd_peer_request *peer_req; | |
4892 | const int op = REQ_OP_DISCARD; | |
4893 | ||
4894 | peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, | |
9104d31a | 4895 | size, 0, GFP_NOIO); |
700ca8c0 PR |
4896 | if (!peer_req) { |
4897 | put_ldev(device); | |
4898 | return -ENOMEM; | |
4899 | } | |
4900 | ||
4901 | peer_req->w.cb = e_end_resync_block; | |
4902 | peer_req->submit_jif = jiffies; | |
4903 | peer_req->flags |= EE_IS_TRIM; | |
4904 | ||
4905 | spin_lock_irq(&device->resource->req_lock); | |
4906 | list_add_tail(&peer_req->w.list, &device->sync_ee); | |
4907 | spin_unlock_irq(&device->resource->req_lock); | |
4908 | ||
4909 | atomic_add(pi->size >> 9, &device->rs_sect_ev); | |
4910 | err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR); | |
4911 | ||
4912 | if (err) { | |
4913 | spin_lock_irq(&device->resource->req_lock); | |
4914 | list_del(&peer_req->w.list); | |
4915 | spin_unlock_irq(&device->resource->req_lock); | |
4916 | ||
4917 | drbd_free_peer_req(device, peer_req); | |
4918 | put_ldev(device); | |
4919 | err = 0; | |
4920 | goto fail; | |
4921 | } | |
4922 | ||
4923 | inc_unacked(device); | |
4924 | ||
4925 | /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(), | |
4926 | as well as drbd_rs_complete_io() */ | |
4927 | } else { | |
4928 | fail: | |
4929 | drbd_rs_complete_io(device, sector); | |
4930 | drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER); | |
4931 | } | |
4932 | ||
4933 | atomic_add(size >> 9, &device->rs_sect_in); | |
4934 | ||
4935 | return err; | |
4936 | } | |
4937 | ||
02918be2 PR |
4938 | struct data_cmd { |
4939 | int expect_payload; | |
9104d31a | 4940 | unsigned int pkt_size; |
bde89a9e | 4941 | int (*fn)(struct drbd_connection *, struct packet_info *); |
02918be2 PR |
4942 | }; |
4943 | ||
4944 | static struct data_cmd drbd_cmd_handler[] = { | |
4945 | [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, | |
4946 | [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply }, | |
4947 | [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } , | |
4948 | [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } , | |
e658983a AG |
4949 | [P_BITMAP] = { 1, 0, receive_bitmap } , |
4950 | [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } , | |
4951 | [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote }, | |
02918be2 PR |
4952 | [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, |
4953 | [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, | |
e658983a AG |
4954 | [P_SYNC_PARAM] = { 1, 0, receive_SyncParam }, |
4955 | [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam }, | |
02918be2 PR |
4956 | [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol }, |
4957 | [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids }, | |
4958 | [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes }, | |
4959 | [P_STATE] = { 0, sizeof(struct p_state), receive_state }, | |
4960 | [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state }, | |
4961 | [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid }, | |
4962 | [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, | |
4963 | [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | |
4964 | [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, | |
700ca8c0 | 4965 | [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest }, |
02918be2 | 4966 | [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, |
73a01a18 | 4967 | [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, |
4a76b161 | 4968 | [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state }, |
036b17ea | 4969 | [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol }, |
a0fb3c47 | 4970 | [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data }, |
700ca8c0 | 4971 | [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated }, |
9104d31a | 4972 | [P_WSAME] = { 1, sizeof(struct p_wsame), receive_Data }, |
b411b363 PR |
4973 | }; |
4974 | ||
bde89a9e | 4975 | static void drbdd(struct drbd_connection *connection) |
b411b363 | 4976 | { |
77351055 | 4977 | struct packet_info pi; |
02918be2 | 4978 | size_t shs; /* sub header size */ |
82bc0194 | 4979 | int err; |
b411b363 | 4980 | |
bde89a9e | 4981 | while (get_t_state(&connection->receiver) == RUNNING) { |
9104d31a | 4982 | struct data_cmd const *cmd; |
b411b363 | 4983 | |
bde89a9e | 4984 | drbd_thread_current_set_cpu(&connection->receiver); |
944410e9 | 4985 | update_receiver_timing_details(connection, drbd_recv_header); |
bde89a9e | 4986 | if (drbd_recv_header(connection, &pi)) |
02918be2 | 4987 | goto err_out; |
b411b363 | 4988 | |
deebe195 | 4989 | cmd = &drbd_cmd_handler[pi.cmd]; |
4a76b161 | 4990 | if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) { |
1ec861eb | 4991 | drbd_err(connection, "Unexpected data packet %s (0x%04x)", |
2fcb8f30 | 4992 | cmdname(pi.cmd), pi.cmd); |
02918be2 | 4993 | goto err_out; |
0b33a916 | 4994 | } |
b411b363 | 4995 | |
e658983a | 4996 | shs = cmd->pkt_size; |
9104d31a LE |
4997 | if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME) |
4998 | shs += sizeof(struct o_qlim); | |
e658983a | 4999 | if (pi.size > shs && !cmd->expect_payload) { |
1ec861eb | 5000 | drbd_err(connection, "No payload expected %s l:%d\n", |
2fcb8f30 | 5001 | cmdname(pi.cmd), pi.size); |
02918be2 | 5002 | goto err_out; |
b411b363 | 5003 | } |
9104d31a LE |
5004 | if (pi.size < shs) { |
5005 | drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n", | |
5006 | cmdname(pi.cmd), (int)shs, pi.size); | |
5007 | goto err_out; | |
5008 | } | |
b411b363 | 5009 | |
c13f7e1a | 5010 | if (shs) { |
944410e9 | 5011 | update_receiver_timing_details(connection, drbd_recv_all_warn); |
bde89a9e | 5012 | err = drbd_recv_all_warn(connection, pi.data, shs); |
a5c31904 | 5013 | if (err) |
c13f7e1a | 5014 | goto err_out; |
e2857216 | 5015 | pi.size -= shs; |
c13f7e1a LE |
5016 | } |
5017 | ||
944410e9 | 5018 | update_receiver_timing_details(connection, cmd->fn); |
bde89a9e | 5019 | err = cmd->fn(connection, &pi); |
4a76b161 | 5020 | if (err) { |
1ec861eb | 5021 | drbd_err(connection, "error receiving %s, e: %d l: %d!\n", |
9f5bdc33 | 5022 | cmdname(pi.cmd), err, pi.size); |
02918be2 | 5023 | goto err_out; |
b411b363 PR |
5024 | } |
5025 | } | |
82bc0194 | 5026 | return; |
b411b363 | 5027 | |
82bc0194 | 5028 | err_out: |
bde89a9e | 5029 | conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD); |
b411b363 PR |
5030 | } |
5031 | ||
bde89a9e | 5032 | static void conn_disconnect(struct drbd_connection *connection) |
b411b363 | 5033 | { |
c06ece6b | 5034 | struct drbd_peer_device *peer_device; |
bbeb641c | 5035 | enum drbd_conns oc; |
376694a0 | 5036 | int vnr; |
b411b363 | 5037 | |
bde89a9e | 5038 | if (connection->cstate == C_STANDALONE) |
b411b363 | 5039 | return; |
b411b363 | 5040 | |
545752d5 LE |
5041 | /* We are about to start the cleanup after connection loss. |
5042 | * Make sure drbd_make_request knows about that. | |
5043 | * Usually we should be in some network failure state already, | |
5044 | * but just in case we are not, we fix it up here. | |
5045 | */ | |
bde89a9e | 5046 | conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); |
545752d5 | 5047 | |
668700b4 | 5048 | /* ack_receiver does not clean up anything. it must not interfere, either */ |
1c03e520 | 5049 | drbd_thread_stop(&connection->ack_receiver); |
668700b4 PR |
5050 | if (connection->ack_sender) { |
5051 | destroy_workqueue(connection->ack_sender); | |
5052 | connection->ack_sender = NULL; | |
5053 | } | |
bde89a9e | 5054 | drbd_free_sock(connection); |
360cc740 | 5055 | |
c141ebda | 5056 | rcu_read_lock(); |
c06ece6b AG |
5057 | idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { |
5058 | struct drbd_device *device = peer_device->device; | |
b30ab791 | 5059 | kref_get(&device->kref); |
c141ebda | 5060 | rcu_read_unlock(); |
69a22773 | 5061 | drbd_disconnected(peer_device); |
c06ece6b | 5062 | kref_put(&device->kref, drbd_destroy_device); |
c141ebda PR |
5063 | rcu_read_lock(); |
5064 | } | |
5065 | rcu_read_unlock(); | |
5066 | ||
bde89a9e | 5067 | if (!list_empty(&connection->current_epoch->list)) |
1ec861eb | 5068 | drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n"); |
12038a3a | 5069 | /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ |
bde89a9e AG |
5070 | atomic_set(&connection->current_epoch->epoch_size, 0); |
5071 | connection->send.seen_any_write_yet = false; | |
12038a3a | 5072 | |
1ec861eb | 5073 | drbd_info(connection, "Connection closed\n"); |
360cc740 | 5074 | |
bde89a9e AG |
5075 | if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN) |
5076 | conn_try_outdate_peer_async(connection); | |
cb703454 | 5077 | |
0500813f | 5078 | spin_lock_irq(&connection->resource->req_lock); |
bde89a9e | 5079 | oc = connection->cstate; |
bbeb641c | 5080 | if (oc >= C_UNCONNECTED) |
bde89a9e | 5081 | _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); |
bbeb641c | 5082 | |
0500813f | 5083 | spin_unlock_irq(&connection->resource->req_lock); |
360cc740 | 5084 | |
f3dfa40a | 5085 | if (oc == C_DISCONNECTING) |
bde89a9e | 5086 | conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD); |
360cc740 PR |
5087 | } |
5088 | ||
69a22773 | 5089 | static int drbd_disconnected(struct drbd_peer_device *peer_device) |
360cc740 | 5090 | { |
69a22773 | 5091 | struct drbd_device *device = peer_device->device; |
360cc740 | 5092 | unsigned int i; |
b411b363 | 5093 | |
85719573 | 5094 | /* wait for current activity to cease. */ |
0500813f | 5095 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 AG |
5096 | _drbd_wait_ee_list_empty(device, &device->active_ee); |
5097 | _drbd_wait_ee_list_empty(device, &device->sync_ee); | |
5098 | _drbd_wait_ee_list_empty(device, &device->read_ee); | |
0500813f | 5099 | spin_unlock_irq(&device->resource->req_lock); |
b411b363 PR |
5100 | |
5101 | /* We do not have data structures that would allow us to | |
5102 | * get the rs_pending_cnt down to 0 again. | |
5103 | * * On C_SYNC_TARGET we do not have any data structures describing | |
5104 | * the pending RSDataRequest's we have sent. | |
5105 | * * On C_SYNC_SOURCE there is no data structure that tracks | |
5106 | * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget. | |
5107 | * And no, it is not the sum of the reference counts in the | |
5108 | * resync_LRU. The resync_LRU tracks the whole operation including | |
5109 | * the disk-IO, while the rs_pending_cnt only tracks the blocks | |
5110 | * on the fly. */ | |
b30ab791 AG |
5111 | drbd_rs_cancel_all(device); |
5112 | device->rs_total = 0; | |
5113 | device->rs_failed = 0; | |
5114 | atomic_set(&device->rs_pending_cnt, 0); | |
5115 | wake_up(&device->misc_wait); | |
b411b363 | 5116 | |
b30ab791 AG |
5117 | del_timer_sync(&device->resync_timer); |
5118 | resync_timer_fn((unsigned long)device); | |
b411b363 | 5119 | |
b411b363 PR |
5120 | /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, |
5121 | * w_make_resync_request etc. which may still be on the worker queue | |
5122 | * to be "canceled" */ | |
b5043c5e | 5123 | drbd_flush_workqueue(&peer_device->connection->sender_work); |
b411b363 | 5124 | |
b30ab791 | 5125 | drbd_finish_peer_reqs(device); |
b411b363 | 5126 | |
d10b4ea3 PR |
5127 | /* This second workqueue flush is necessary, since drbd_finish_peer_reqs() |
5128 | might have issued a work again. The one before drbd_finish_peer_reqs() is | |
5129 | necessary to reclain net_ee in drbd_finish_peer_reqs(). */ | |
b5043c5e | 5130 | drbd_flush_workqueue(&peer_device->connection->sender_work); |
d10b4ea3 | 5131 | |
08332d73 LE |
5132 | /* need to do it again, drbd_finish_peer_reqs() may have populated it |
5133 | * again via drbd_try_clear_on_disk_bm(). */ | |
b30ab791 | 5134 | drbd_rs_cancel_all(device); |
b411b363 | 5135 | |
b30ab791 AG |
5136 | kfree(device->p_uuid); |
5137 | device->p_uuid = NULL; | |
b411b363 | 5138 | |
b30ab791 | 5139 | if (!drbd_suspended(device)) |
69a22773 | 5140 | tl_clear(peer_device->connection); |
b411b363 | 5141 | |
b30ab791 | 5142 | drbd_md_sync(device); |
b411b363 | 5143 | |
be115b69 LE |
5144 | if (get_ldev(device)) { |
5145 | drbd_bitmap_io(device, &drbd_bm_write_copy_pages, | |
5146 | "write from disconnected", BM_LOCKED_CHANGE_ALLOWED); | |
5147 | put_ldev(device); | |
5148 | } | |
20ceb2b2 | 5149 | |
b411b363 PR |
5150 | /* tcp_close and release of sendpage pages can be deferred. I don't |
5151 | * want to use SO_LINGER, because apparently it can be deferred for | |
5152 | * more than 20 seconds (longest time I checked). | |
5153 | * | |
5154 | * Actually we don't care for exactly when the network stack does its | |
5155 | * put_page(), but release our reference on these pages right here. | |
5156 | */ | |
b30ab791 | 5157 | i = drbd_free_peer_reqs(device, &device->net_ee); |
b411b363 | 5158 | if (i) |
d0180171 | 5159 | drbd_info(device, "net_ee not empty, killed %u entries\n", i); |
b30ab791 | 5160 | i = atomic_read(&device->pp_in_use_by_net); |
435f0740 | 5161 | if (i) |
d0180171 | 5162 | drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i); |
b30ab791 | 5163 | i = atomic_read(&device->pp_in_use); |
b411b363 | 5164 | if (i) |
d0180171 | 5165 | drbd_info(device, "pp_in_use = %d, expected 0\n", i); |
b411b363 | 5166 | |
0b0ba1ef AG |
5167 | D_ASSERT(device, list_empty(&device->read_ee)); |
5168 | D_ASSERT(device, list_empty(&device->active_ee)); | |
5169 | D_ASSERT(device, list_empty(&device->sync_ee)); | |
5170 | D_ASSERT(device, list_empty(&device->done_ee)); | |
b411b363 | 5171 | |
360cc740 | 5172 | return 0; |
b411b363 PR |
5173 | } |
5174 | ||
5175 | /* | |
5176 | * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version | |
5177 | * we can agree on is stored in agreed_pro_version. | |
5178 | * | |
5179 | * feature flags and the reserved array should be enough room for future | |
5180 | * enhancements of the handshake protocol, and possible plugins... | |
5181 | * | |
5182 | * for now, they are expected to be zero, but ignored. | |
5183 | */ | |
bde89a9e | 5184 | static int drbd_send_features(struct drbd_connection *connection) |
b411b363 | 5185 | { |
9f5bdc33 AG |
5186 | struct drbd_socket *sock; |
5187 | struct p_connection_features *p; | |
b411b363 | 5188 | |
bde89a9e AG |
5189 | sock = &connection->data; |
5190 | p = conn_prepare_command(connection, sock); | |
9f5bdc33 | 5191 | if (!p) |
e8d17b01 | 5192 | return -EIO; |
b411b363 PR |
5193 | memset(p, 0, sizeof(*p)); |
5194 | p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); | |
5195 | p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); | |
20c68fde | 5196 | p->feature_flags = cpu_to_be32(PRO_FEATURES); |
bde89a9e | 5197 | return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0); |
b411b363 PR |
5198 | } |
5199 | ||
5200 | /* | |
5201 | * return values: | |
5202 | * 1 yes, we have a valid connection | |
5203 | * 0 oops, did not work out, please try again | |
5204 | * -1 peer talks different language, | |
5205 | * no point in trying again, please go standalone. | |
5206 | */ | |
bde89a9e | 5207 | static int drbd_do_features(struct drbd_connection *connection) |
b411b363 | 5208 | { |
bde89a9e | 5209 | /* ASSERT current == connection->receiver ... */ |
e658983a AG |
5210 | struct p_connection_features *p; |
5211 | const int expect = sizeof(struct p_connection_features); | |
77351055 | 5212 | struct packet_info pi; |
a5c31904 | 5213 | int err; |
b411b363 | 5214 | |
bde89a9e | 5215 | err = drbd_send_features(connection); |
e8d17b01 | 5216 | if (err) |
b411b363 PR |
5217 | return 0; |
5218 | ||
bde89a9e | 5219 | err = drbd_recv_header(connection, &pi); |
69bc7bc3 | 5220 | if (err) |
b411b363 PR |
5221 | return 0; |
5222 | ||
6038178e | 5223 | if (pi.cmd != P_CONNECTION_FEATURES) { |
1ec861eb | 5224 | drbd_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n", |
2fcb8f30 | 5225 | cmdname(pi.cmd), pi.cmd); |
b411b363 PR |
5226 | return -1; |
5227 | } | |
5228 | ||
77351055 | 5229 | if (pi.size != expect) { |
1ec861eb | 5230 | drbd_err(connection, "expected ConnectionFeatures length: %u, received: %u\n", |
77351055 | 5231 | expect, pi.size); |
b411b363 PR |
5232 | return -1; |
5233 | } | |
5234 | ||
e658983a | 5235 | p = pi.data; |
bde89a9e | 5236 | err = drbd_recv_all_warn(connection, p, expect); |
a5c31904 | 5237 | if (err) |
b411b363 | 5238 | return 0; |
b411b363 | 5239 | |
b411b363 PR |
5240 | p->protocol_min = be32_to_cpu(p->protocol_min); |
5241 | p->protocol_max = be32_to_cpu(p->protocol_max); | |
5242 | if (p->protocol_max == 0) | |
5243 | p->protocol_max = p->protocol_min; | |
5244 | ||
5245 | if (PRO_VERSION_MAX < p->protocol_min || | |
5246 | PRO_VERSION_MIN > p->protocol_max) | |
5247 | goto incompat; | |
5248 | ||
bde89a9e | 5249 | connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); |
20c68fde | 5250 | connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags); |
b411b363 | 5251 | |
1ec861eb | 5252 | drbd_info(connection, "Handshake successful: " |
bde89a9e | 5253 | "Agreed network protocol version %d\n", connection->agreed_pro_version); |
b411b363 | 5254 | |
9104d31a LE |
5255 | drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s.\n", |
5256 | connection->agreed_features, | |
5257 | connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "", | |
5258 | connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "", | |
5259 | connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" : | |
5260 | connection->agreed_features ? "" : " none"); | |
92d94ae6 | 5261 | |
b411b363 PR |
5262 | return 1; |
5263 | ||
5264 | incompat: | |
1ec861eb | 5265 | drbd_err(connection, "incompatible DRBD dialects: " |
b411b363 PR |
5266 | "I support %d-%d, peer supports %d-%d\n", |
5267 | PRO_VERSION_MIN, PRO_VERSION_MAX, | |
5268 | p->protocol_min, p->protocol_max); | |
5269 | return -1; | |
5270 | } | |
5271 | ||
5272 | #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) | |
bde89a9e | 5273 | static int drbd_do_auth(struct drbd_connection *connection) |
b411b363 | 5274 | { |
1ec861eb AG |
5275 | drbd_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); |
5276 | drbd_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); | |
b10d96cb | 5277 | return -1; |
b411b363 PR |
5278 | } |
5279 | #else | |
5280 | #define CHALLENGE_LEN 64 | |
b10d96cb JT |
5281 | |
5282 | /* Return value: | |
5283 | 1 - auth succeeded, | |
5284 | 0 - failed, try again (network error), | |
5285 | -1 - auth failed, don't try again. | |
5286 | */ | |
5287 | ||
bde89a9e | 5288 | static int drbd_do_auth(struct drbd_connection *connection) |
b411b363 | 5289 | { |
9f5bdc33 | 5290 | struct drbd_socket *sock; |
b411b363 | 5291 | char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ |
b411b363 PR |
5292 | char *response = NULL; |
5293 | char *right_response = NULL; | |
5294 | char *peers_ch = NULL; | |
44ed167d PR |
5295 | unsigned int key_len; |
5296 | char secret[SHARED_SECRET_MAX]; /* 64 byte */ | |
b411b363 | 5297 | unsigned int resp_size; |
9534d671 | 5298 | SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm); |
77351055 | 5299 | struct packet_info pi; |
44ed167d | 5300 | struct net_conf *nc; |
69bc7bc3 | 5301 | int err, rv; |
b411b363 | 5302 | |
9f5bdc33 | 5303 | /* FIXME: Put the challenge/response into the preallocated socket buffer. */ |
b411b363 | 5304 | |
44ed167d | 5305 | rcu_read_lock(); |
bde89a9e | 5306 | nc = rcu_dereference(connection->net_conf); |
44ed167d PR |
5307 | key_len = strlen(nc->shared_secret); |
5308 | memcpy(secret, nc->shared_secret, key_len); | |
5309 | rcu_read_unlock(); | |
5310 | ||
9534d671 HX |
5311 | desc->tfm = connection->cram_hmac_tfm; |
5312 | desc->flags = 0; | |
b411b363 | 5313 | |
9534d671 | 5314 | rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); |
b411b363 | 5315 | if (rv) { |
9534d671 | 5316 | drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv); |
b10d96cb | 5317 | rv = -1; |
b411b363 PR |
5318 | goto fail; |
5319 | } | |
5320 | ||
5321 | get_random_bytes(my_challenge, CHALLENGE_LEN); | |
5322 | ||
bde89a9e AG |
5323 | sock = &connection->data; |
5324 | if (!conn_prepare_command(connection, sock)) { | |
9f5bdc33 AG |
5325 | rv = 0; |
5326 | goto fail; | |
5327 | } | |
bde89a9e | 5328 | rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0, |
9f5bdc33 | 5329 | my_challenge, CHALLENGE_LEN); |
b411b363 PR |
5330 | if (!rv) |
5331 | goto fail; | |
5332 | ||
bde89a9e | 5333 | err = drbd_recv_header(connection, &pi); |
69bc7bc3 AG |
5334 | if (err) { |
5335 | rv = 0; | |
b411b363 | 5336 | goto fail; |
69bc7bc3 | 5337 | } |
b411b363 | 5338 | |
77351055 | 5339 | if (pi.cmd != P_AUTH_CHALLENGE) { |
1ec861eb | 5340 | drbd_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n", |
2fcb8f30 | 5341 | cmdname(pi.cmd), pi.cmd); |
b411b363 PR |
5342 | rv = 0; |
5343 | goto fail; | |
5344 | } | |
5345 | ||
77351055 | 5346 | if (pi.size > CHALLENGE_LEN * 2) { |
1ec861eb | 5347 | drbd_err(connection, "expected AuthChallenge payload too big.\n"); |
b10d96cb | 5348 | rv = -1; |
b411b363 PR |
5349 | goto fail; |
5350 | } | |
5351 | ||
67cca286 PR |
5352 | if (pi.size < CHALLENGE_LEN) { |
5353 | drbd_err(connection, "AuthChallenge payload too small.\n"); | |
5354 | rv = -1; | |
5355 | goto fail; | |
5356 | } | |
5357 | ||
77351055 | 5358 | peers_ch = kmalloc(pi.size, GFP_NOIO); |
b411b363 | 5359 | if (peers_ch == NULL) { |
1ec861eb | 5360 | drbd_err(connection, "kmalloc of peers_ch failed\n"); |
b10d96cb | 5361 | rv = -1; |
b411b363 PR |
5362 | goto fail; |
5363 | } | |
5364 | ||
bde89a9e | 5365 | err = drbd_recv_all_warn(connection, peers_ch, pi.size); |
a5c31904 | 5366 | if (err) { |
b411b363 PR |
5367 | rv = 0; |
5368 | goto fail; | |
5369 | } | |
5370 | ||
67cca286 PR |
5371 | if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) { |
5372 | drbd_err(connection, "Peer presented the same challenge!\n"); | |
5373 | rv = -1; | |
5374 | goto fail; | |
5375 | } | |
5376 | ||
9534d671 | 5377 | resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm); |
b411b363 PR |
5378 | response = kmalloc(resp_size, GFP_NOIO); |
5379 | if (response == NULL) { | |
1ec861eb | 5380 | drbd_err(connection, "kmalloc of response failed\n"); |
b10d96cb | 5381 | rv = -1; |
b411b363 PR |
5382 | goto fail; |
5383 | } | |
5384 | ||
9534d671 | 5385 | rv = crypto_shash_digest(desc, peers_ch, pi.size, response); |
b411b363 | 5386 | if (rv) { |
1ec861eb | 5387 | drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv); |
b10d96cb | 5388 | rv = -1; |
b411b363 PR |
5389 | goto fail; |
5390 | } | |
5391 | ||
bde89a9e | 5392 | if (!conn_prepare_command(connection, sock)) { |
9f5bdc33 | 5393 | rv = 0; |
b411b363 | 5394 | goto fail; |
9f5bdc33 | 5395 | } |
bde89a9e | 5396 | rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0, |
9f5bdc33 | 5397 | response, resp_size); |
b411b363 PR |
5398 | if (!rv) |
5399 | goto fail; | |
5400 | ||
bde89a9e | 5401 | err = drbd_recv_header(connection, &pi); |
69bc7bc3 | 5402 | if (err) { |
b411b363 PR |
5403 | rv = 0; |
5404 | goto fail; | |
5405 | } | |
5406 | ||
77351055 | 5407 | if (pi.cmd != P_AUTH_RESPONSE) { |
1ec861eb | 5408 | drbd_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n", |
2fcb8f30 | 5409 | cmdname(pi.cmd), pi.cmd); |
b411b363 PR |
5410 | rv = 0; |
5411 | goto fail; | |
5412 | } | |
5413 | ||
77351055 | 5414 | if (pi.size != resp_size) { |
1ec861eb | 5415 | drbd_err(connection, "expected AuthResponse payload of wrong size\n"); |
b411b363 PR |
5416 | rv = 0; |
5417 | goto fail; | |
5418 | } | |
b411b363 | 5419 | |
bde89a9e | 5420 | err = drbd_recv_all_warn(connection, response , resp_size); |
a5c31904 | 5421 | if (err) { |
b411b363 PR |
5422 | rv = 0; |
5423 | goto fail; | |
5424 | } | |
5425 | ||
5426 | right_response = kmalloc(resp_size, GFP_NOIO); | |
2d1ee87d | 5427 | if (right_response == NULL) { |
1ec861eb | 5428 | drbd_err(connection, "kmalloc of right_response failed\n"); |
b10d96cb | 5429 | rv = -1; |
b411b363 PR |
5430 | goto fail; |
5431 | } | |
5432 | ||
9534d671 HX |
5433 | rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN, |
5434 | right_response); | |
b411b363 | 5435 | if (rv) { |
1ec861eb | 5436 | drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv); |
b10d96cb | 5437 | rv = -1; |
b411b363 PR |
5438 | goto fail; |
5439 | } | |
5440 | ||
5441 | rv = !memcmp(response, right_response, resp_size); | |
5442 | ||
5443 | if (rv) | |
1ec861eb | 5444 | drbd_info(connection, "Peer authenticated using %d bytes HMAC\n", |
44ed167d | 5445 | resp_size); |
b10d96cb JT |
5446 | else |
5447 | rv = -1; | |
b411b363 PR |
5448 | |
5449 | fail: | |
5450 | kfree(peers_ch); | |
5451 | kfree(response); | |
5452 | kfree(right_response); | |
9534d671 | 5453 | shash_desc_zero(desc); |
b411b363 PR |
5454 | |
5455 | return rv; | |
5456 | } | |
5457 | #endif | |
5458 | ||
8fe60551 | 5459 | int drbd_receiver(struct drbd_thread *thi) |
b411b363 | 5460 | { |
bde89a9e | 5461 | struct drbd_connection *connection = thi->connection; |
b411b363 PR |
5462 | int h; |
5463 | ||
1ec861eb | 5464 | drbd_info(connection, "receiver (re)started\n"); |
b411b363 PR |
5465 | |
5466 | do { | |
bde89a9e | 5467 | h = conn_connect(connection); |
b411b363 | 5468 | if (h == 0) { |
bde89a9e | 5469 | conn_disconnect(connection); |
20ee6390 | 5470 | schedule_timeout_interruptible(HZ); |
b411b363 PR |
5471 | } |
5472 | if (h == -1) { | |
1ec861eb | 5473 | drbd_warn(connection, "Discarding network configuration.\n"); |
bde89a9e | 5474 | conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); |
b411b363 PR |
5475 | } |
5476 | } while (h == 0); | |
5477 | ||
91fd4dad | 5478 | if (h > 0) |
bde89a9e | 5479 | drbdd(connection); |
b411b363 | 5480 | |
bde89a9e | 5481 | conn_disconnect(connection); |
b411b363 | 5482 | |
1ec861eb | 5483 | drbd_info(connection, "receiver terminated\n"); |
b411b363 PR |
5484 | return 0; |
5485 | } | |
5486 | ||
5487 | /* ********* acknowledge sender ******** */ | |
5488 | ||
bde89a9e | 5489 | static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5490 | { |
e658983a | 5491 | struct p_req_state_reply *p = pi->data; |
e4f78ede PR |
5492 | int retcode = be32_to_cpu(p->retcode); |
5493 | ||
5494 | if (retcode >= SS_SUCCESS) { | |
bde89a9e | 5495 | set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags); |
e4f78ede | 5496 | } else { |
bde89a9e | 5497 | set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags); |
1ec861eb | 5498 | drbd_err(connection, "Requested state change failed by peer: %s (%d)\n", |
e4f78ede PR |
5499 | drbd_set_st_err_str(retcode), retcode); |
5500 | } | |
bde89a9e | 5501 | wake_up(&connection->ping_wait); |
e4f78ede | 5502 | |
2735a594 | 5503 | return 0; |
e4f78ede | 5504 | } |
b411b363 | 5505 | |
bde89a9e | 5506 | static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5507 | { |
9f4fe9ad | 5508 | struct drbd_peer_device *peer_device; |
b30ab791 | 5509 | struct drbd_device *device; |
e658983a | 5510 | struct p_req_state_reply *p = pi->data; |
b411b363 PR |
5511 | int retcode = be32_to_cpu(p->retcode); |
5512 | ||
9f4fe9ad AG |
5513 | peer_device = conn_peer_device(connection, pi->vnr); |
5514 | if (!peer_device) | |
2735a594 | 5515 | return -EIO; |
9f4fe9ad | 5516 | device = peer_device->device; |
1952e916 | 5517 | |
bde89a9e | 5518 | if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) { |
0b0ba1ef | 5519 | D_ASSERT(device, connection->agreed_pro_version < 100); |
bde89a9e | 5520 | return got_conn_RqSReply(connection, pi); |
4d0fc3fd PR |
5521 | } |
5522 | ||
b411b363 | 5523 | if (retcode >= SS_SUCCESS) { |
b30ab791 | 5524 | set_bit(CL_ST_CHG_SUCCESS, &device->flags); |
b411b363 | 5525 | } else { |
b30ab791 | 5526 | set_bit(CL_ST_CHG_FAIL, &device->flags); |
d0180171 | 5527 | drbd_err(device, "Requested state change failed by peer: %s (%d)\n", |
e4f78ede | 5528 | drbd_set_st_err_str(retcode), retcode); |
b411b363 | 5529 | } |
b30ab791 | 5530 | wake_up(&device->state_wait); |
b411b363 | 5531 | |
2735a594 | 5532 | return 0; |
b411b363 PR |
5533 | } |
5534 | ||
bde89a9e | 5535 | static int got_Ping(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5536 | { |
bde89a9e | 5537 | return drbd_send_ping_ack(connection); |
b411b363 PR |
5538 | |
5539 | } | |
5540 | ||
bde89a9e | 5541 | static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 PR |
5542 | { |
5543 | /* restore idle timeout */ | |
bde89a9e AG |
5544 | connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ; |
5545 | if (!test_and_set_bit(GOT_PING_ACK, &connection->flags)) | |
5546 | wake_up(&connection->ping_wait); | |
b411b363 | 5547 | |
2735a594 | 5548 | return 0; |
b411b363 PR |
5549 | } |
5550 | ||
bde89a9e | 5551 | static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5552 | { |
9f4fe9ad | 5553 | struct drbd_peer_device *peer_device; |
b30ab791 | 5554 | struct drbd_device *device; |
e658983a | 5555 | struct p_block_ack *p = pi->data; |
b411b363 PR |
5556 | sector_t sector = be64_to_cpu(p->sector); |
5557 | int blksize = be32_to_cpu(p->blksize); | |
5558 | ||
9f4fe9ad AG |
5559 | peer_device = conn_peer_device(connection, pi->vnr); |
5560 | if (!peer_device) | |
2735a594 | 5561 | return -EIO; |
9f4fe9ad | 5562 | device = peer_device->device; |
1952e916 | 5563 | |
9f4fe9ad | 5564 | D_ASSERT(device, peer_device->connection->agreed_pro_version >= 89); |
b411b363 | 5565 | |
69a22773 | 5566 | update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); |
b411b363 | 5567 | |
b30ab791 AG |
5568 | if (get_ldev(device)) { |
5569 | drbd_rs_complete_io(device, sector); | |
5570 | drbd_set_in_sync(device, sector, blksize); | |
1d53f09e | 5571 | /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ |
b30ab791 AG |
5572 | device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); |
5573 | put_ldev(device); | |
1d53f09e | 5574 | } |
b30ab791 AG |
5575 | dec_rs_pending(device); |
5576 | atomic_add(blksize >> 9, &device->rs_sect_in); | |
b411b363 | 5577 | |
2735a594 | 5578 | return 0; |
b411b363 PR |
5579 | } |
5580 | ||
bc9c5c41 | 5581 | static int |
b30ab791 | 5582 | validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector, |
bc9c5c41 AG |
5583 | struct rb_root *root, const char *func, |
5584 | enum drbd_req_event what, bool missing_ok) | |
b411b363 PR |
5585 | { |
5586 | struct drbd_request *req; | |
5587 | struct bio_and_error m; | |
5588 | ||
0500813f | 5589 | spin_lock_irq(&device->resource->req_lock); |
b30ab791 | 5590 | req = find_request(device, root, id, sector, missing_ok, func); |
b411b363 | 5591 | if (unlikely(!req)) { |
0500813f | 5592 | spin_unlock_irq(&device->resource->req_lock); |
85997675 | 5593 | return -EIO; |
b411b363 PR |
5594 | } |
5595 | __req_mod(req, what, &m); | |
0500813f | 5596 | spin_unlock_irq(&device->resource->req_lock); |
b411b363 PR |
5597 | |
5598 | if (m.bio) | |
b30ab791 | 5599 | complete_master_bio(device, &m); |
85997675 | 5600 | return 0; |
b411b363 PR |
5601 | } |
5602 | ||
bde89a9e | 5603 | static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5604 | { |
9f4fe9ad | 5605 | struct drbd_peer_device *peer_device; |
b30ab791 | 5606 | struct drbd_device *device; |
e658983a | 5607 | struct p_block_ack *p = pi->data; |
b411b363 PR |
5608 | sector_t sector = be64_to_cpu(p->sector); |
5609 | int blksize = be32_to_cpu(p->blksize); | |
5610 | enum drbd_req_event what; | |
5611 | ||
9f4fe9ad AG |
5612 | peer_device = conn_peer_device(connection, pi->vnr); |
5613 | if (!peer_device) | |
2735a594 | 5614 | return -EIO; |
9f4fe9ad | 5615 | device = peer_device->device; |
1952e916 | 5616 | |
69a22773 | 5617 | update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); |
b411b363 | 5618 | |
579b57ed | 5619 | if (p->block_id == ID_SYNCER) { |
b30ab791 AG |
5620 | drbd_set_in_sync(device, sector, blksize); |
5621 | dec_rs_pending(device); | |
2735a594 | 5622 | return 0; |
b411b363 | 5623 | } |
e05e1e59 | 5624 | switch (pi->cmd) { |
b411b363 | 5625 | case P_RS_WRITE_ACK: |
8554df1c | 5626 | what = WRITE_ACKED_BY_PEER_AND_SIS; |
b411b363 PR |
5627 | break; |
5628 | case P_WRITE_ACK: | |
8554df1c | 5629 | what = WRITE_ACKED_BY_PEER; |
b411b363 PR |
5630 | break; |
5631 | case P_RECV_ACK: | |
8554df1c | 5632 | what = RECV_ACKED_BY_PEER; |
b411b363 | 5633 | break; |
d4dabbe2 LE |
5634 | case P_SUPERSEDED: |
5635 | what = CONFLICT_RESOLVED; | |
b411b363 | 5636 | break; |
7be8da07 | 5637 | case P_RETRY_WRITE: |
7be8da07 | 5638 | what = POSTPONE_WRITE; |
b411b363 PR |
5639 | break; |
5640 | default: | |
2735a594 | 5641 | BUG(); |
b411b363 PR |
5642 | } |
5643 | ||
b30ab791 AG |
5644 | return validate_req_change_req_state(device, p->block_id, sector, |
5645 | &device->write_requests, __func__, | |
2735a594 | 5646 | what, false); |
b411b363 PR |
5647 | } |
5648 | ||
bde89a9e | 5649 | static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5650 | { |
9f4fe9ad | 5651 | struct drbd_peer_device *peer_device; |
b30ab791 | 5652 | struct drbd_device *device; |
e658983a | 5653 | struct p_block_ack *p = pi->data; |
b411b363 | 5654 | sector_t sector = be64_to_cpu(p->sector); |
2deb8336 | 5655 | int size = be32_to_cpu(p->blksize); |
85997675 | 5656 | int err; |
b411b363 | 5657 | |
9f4fe9ad AG |
5658 | peer_device = conn_peer_device(connection, pi->vnr); |
5659 | if (!peer_device) | |
2735a594 | 5660 | return -EIO; |
9f4fe9ad | 5661 | device = peer_device->device; |
b411b363 | 5662 | |
69a22773 | 5663 | update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); |
b411b363 | 5664 | |
579b57ed | 5665 | if (p->block_id == ID_SYNCER) { |
b30ab791 AG |
5666 | dec_rs_pending(device); |
5667 | drbd_rs_failed_io(device, sector, size); | |
2735a594 | 5668 | return 0; |
b411b363 | 5669 | } |
2deb8336 | 5670 | |
b30ab791 AG |
5671 | err = validate_req_change_req_state(device, p->block_id, sector, |
5672 | &device->write_requests, __func__, | |
303d1448 | 5673 | NEG_ACKED, true); |
85997675 | 5674 | if (err) { |
c3afd8f5 AG |
5675 | /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. |
5676 | The master bio might already be completed, therefore the | |
5677 | request is no longer in the collision hash. */ | |
5678 | /* In Protocol B we might already have got a P_RECV_ACK | |
5679 | but then get a P_NEG_ACK afterwards. */ | |
b30ab791 | 5680 | drbd_set_out_of_sync(device, sector, size); |
2deb8336 | 5681 | } |
2735a594 | 5682 | return 0; |
b411b363 PR |
5683 | } |
5684 | ||
bde89a9e | 5685 | static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5686 | { |
9f4fe9ad | 5687 | struct drbd_peer_device *peer_device; |
b30ab791 | 5688 | struct drbd_device *device; |
e658983a | 5689 | struct p_block_ack *p = pi->data; |
b411b363 PR |
5690 | sector_t sector = be64_to_cpu(p->sector); |
5691 | ||
9f4fe9ad AG |
5692 | peer_device = conn_peer_device(connection, pi->vnr); |
5693 | if (!peer_device) | |
2735a594 | 5694 | return -EIO; |
9f4fe9ad | 5695 | device = peer_device->device; |
1952e916 | 5696 | |
69a22773 | 5697 | update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); |
7be8da07 | 5698 | |
d0180171 | 5699 | drbd_err(device, "Got NegDReply; Sector %llus, len %u.\n", |
b411b363 PR |
5700 | (unsigned long long)sector, be32_to_cpu(p->blksize)); |
5701 | ||
b30ab791 AG |
5702 | return validate_req_change_req_state(device, p->block_id, sector, |
5703 | &device->read_requests, __func__, | |
2735a594 | 5704 | NEG_ACKED, false); |
b411b363 PR |
5705 | } |
5706 | ||
bde89a9e | 5707 | static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5708 | { |
9f4fe9ad | 5709 | struct drbd_peer_device *peer_device; |
b30ab791 | 5710 | struct drbd_device *device; |
b411b363 PR |
5711 | sector_t sector; |
5712 | int size; | |
e658983a | 5713 | struct p_block_ack *p = pi->data; |
1952e916 | 5714 | |
9f4fe9ad AG |
5715 | peer_device = conn_peer_device(connection, pi->vnr); |
5716 | if (!peer_device) | |
2735a594 | 5717 | return -EIO; |
9f4fe9ad | 5718 | device = peer_device->device; |
b411b363 PR |
5719 | |
5720 | sector = be64_to_cpu(p->sector); | |
5721 | size = be32_to_cpu(p->blksize); | |
b411b363 | 5722 | |
69a22773 | 5723 | update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); |
b411b363 | 5724 | |
b30ab791 | 5725 | dec_rs_pending(device); |
b411b363 | 5726 | |
b30ab791 AG |
5727 | if (get_ldev_if_state(device, D_FAILED)) { |
5728 | drbd_rs_complete_io(device, sector); | |
e05e1e59 | 5729 | switch (pi->cmd) { |
d612d309 | 5730 | case P_NEG_RS_DREPLY: |
b30ab791 | 5731 | drbd_rs_failed_io(device, sector, size); |
d612d309 PR |
5732 | case P_RS_CANCEL: |
5733 | break; | |
5734 | default: | |
2735a594 | 5735 | BUG(); |
d612d309 | 5736 | } |
b30ab791 | 5737 | put_ldev(device); |
b411b363 PR |
5738 | } |
5739 | ||
2735a594 | 5740 | return 0; |
b411b363 PR |
5741 | } |
5742 | ||
bde89a9e | 5743 | static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5744 | { |
e658983a | 5745 | struct p_barrier_ack *p = pi->data; |
c06ece6b | 5746 | struct drbd_peer_device *peer_device; |
9ed57dcb | 5747 | int vnr; |
1952e916 | 5748 | |
bde89a9e | 5749 | tl_release(connection, p->barrier, be32_to_cpu(p->set_size)); |
b411b363 | 5750 | |
9ed57dcb | 5751 | rcu_read_lock(); |
c06ece6b AG |
5752 | idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { |
5753 | struct drbd_device *device = peer_device->device; | |
5754 | ||
b30ab791 AG |
5755 | if (device->state.conn == C_AHEAD && |
5756 | atomic_read(&device->ap_in_flight) == 0 && | |
5757 | !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) { | |
5758 | device->start_resync_timer.expires = jiffies + HZ; | |
5759 | add_timer(&device->start_resync_timer); | |
9ed57dcb | 5760 | } |
c4752ef1 | 5761 | } |
9ed57dcb | 5762 | rcu_read_unlock(); |
c4752ef1 | 5763 | |
2735a594 | 5764 | return 0; |
b411b363 PR |
5765 | } |
5766 | ||
bde89a9e | 5767 | static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi) |
b411b363 | 5768 | { |
9f4fe9ad | 5769 | struct drbd_peer_device *peer_device; |
b30ab791 | 5770 | struct drbd_device *device; |
e658983a | 5771 | struct p_block_ack *p = pi->data; |
84b8c06b | 5772 | struct drbd_device_work *dw; |
b411b363 PR |
5773 | sector_t sector; |
5774 | int size; | |
5775 | ||
9f4fe9ad AG |
5776 | peer_device = conn_peer_device(connection, pi->vnr); |
5777 | if (!peer_device) | |
2735a594 | 5778 | return -EIO; |
9f4fe9ad | 5779 | device = peer_device->device; |
1952e916 | 5780 | |
b411b363 PR |
5781 | sector = be64_to_cpu(p->sector); |
5782 | size = be32_to_cpu(p->blksize); | |
5783 | ||
69a22773 | 5784 | update_peer_seq(peer_device, be32_to_cpu(p->seq_num)); |
b411b363 PR |
5785 | |
5786 | if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) | |
b30ab791 | 5787 | drbd_ov_out_of_sync_found(device, sector, size); |
b411b363 | 5788 | else |
b30ab791 | 5789 | ov_out_of_sync_print(device); |
b411b363 | 5790 | |
b30ab791 | 5791 | if (!get_ldev(device)) |
2735a594 | 5792 | return 0; |
1d53f09e | 5793 | |
b30ab791 AG |
5794 | drbd_rs_complete_io(device, sector); |
5795 | dec_rs_pending(device); | |
b411b363 | 5796 | |
b30ab791 | 5797 | --device->ov_left; |
ea5442af LE |
5798 | |
5799 | /* let's advance progress step marks only for every other megabyte */ | |
b30ab791 AG |
5800 | if ((device->ov_left & 0x200) == 0x200) |
5801 | drbd_advance_rs_marks(device, device->ov_left); | |
ea5442af | 5802 | |
b30ab791 | 5803 | if (device->ov_left == 0) { |
84b8c06b AG |
5804 | dw = kmalloc(sizeof(*dw), GFP_NOIO); |
5805 | if (dw) { | |
5806 | dw->w.cb = w_ov_finished; | |
5807 | dw->device = device; | |
5808 | drbd_queue_work(&peer_device->connection->sender_work, &dw->w); | |
b411b363 | 5809 | } else { |
84b8c06b | 5810 | drbd_err(device, "kmalloc(dw) failed."); |
b30ab791 AG |
5811 | ov_out_of_sync_print(device); |
5812 | drbd_resync_finished(device); | |
b411b363 PR |
5813 | } |
5814 | } | |
b30ab791 | 5815 | put_ldev(device); |
2735a594 | 5816 | return 0; |
b411b363 PR |
5817 | } |
5818 | ||
bde89a9e | 5819 | static int got_skip(struct drbd_connection *connection, struct packet_info *pi) |
0ced55a3 | 5820 | { |
2735a594 | 5821 | return 0; |
b411b363 PR |
5822 | } |
5823 | ||
668700b4 PR |
5824 | struct meta_sock_cmd { |
5825 | size_t pkt_size; | |
5826 | int (*fn)(struct drbd_connection *connection, struct packet_info *); | |
5827 | }; | |
5828 | ||
5829 | static void set_rcvtimeo(struct drbd_connection *connection, bool ping_timeout) | |
0ced55a3 | 5830 | { |
668700b4 PR |
5831 | long t; |
5832 | struct net_conf *nc; | |
32862ec7 | 5833 | |
668700b4 PR |
5834 | rcu_read_lock(); |
5835 | nc = rcu_dereference(connection->net_conf); | |
5836 | t = ping_timeout ? nc->ping_timeo : nc->ping_int; | |
5837 | rcu_read_unlock(); | |
c141ebda | 5838 | |
668700b4 PR |
5839 | t *= HZ; |
5840 | if (ping_timeout) | |
5841 | t /= 10; | |
082a3439 | 5842 | |
668700b4 PR |
5843 | connection->meta.socket->sk->sk_rcvtimeo = t; |
5844 | } | |
32862ec7 | 5845 | |
668700b4 PR |
5846 | static void set_ping_timeout(struct drbd_connection *connection) |
5847 | { | |
5848 | set_rcvtimeo(connection, 1); | |
0ced55a3 PR |
5849 | } |
5850 | ||
668700b4 PR |
5851 | static void set_idle_timeout(struct drbd_connection *connection) |
5852 | { | |
5853 | set_rcvtimeo(connection, 0); | |
5854 | } | |
b411b363 | 5855 | |
668700b4 | 5856 | static struct meta_sock_cmd ack_receiver_tbl[] = { |
e658983a AG |
5857 | [P_PING] = { 0, got_Ping }, |
5858 | [P_PING_ACK] = { 0, got_PingAck }, | |
b411b363 PR |
5859 | [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, |
5860 | [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, | |
5861 | [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, | |
d4dabbe2 | 5862 | [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck }, |
b411b363 PR |
5863 | [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck }, |
5864 | [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply }, | |
1952e916 | 5865 | [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply }, |
b411b363 PR |
5866 | [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult }, |
5867 | [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, | |
5868 | [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, | |
5869 | [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, | |
02918be2 | 5870 | [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, |
1952e916 AG |
5871 | [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply }, |
5872 | [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply }, | |
5873 | [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck }, | |
7201b972 | 5874 | }; |
b411b363 | 5875 | |
1c03e520 | 5876 | int drbd_ack_receiver(struct drbd_thread *thi) |
b411b363 | 5877 | { |
bde89a9e | 5878 | struct drbd_connection *connection = thi->connection; |
668700b4 | 5879 | struct meta_sock_cmd *cmd = NULL; |
77351055 | 5880 | struct packet_info pi; |
668700b4 | 5881 | unsigned long pre_recv_jif; |
257d0af6 | 5882 | int rv; |
bde89a9e | 5883 | void *buf = connection->meta.rbuf; |
b411b363 | 5884 | int received = 0; |
bde89a9e | 5885 | unsigned int header_size = drbd_header_size(connection); |
52b061a4 | 5886 | int expect = header_size; |
44ed167d | 5887 | bool ping_timeout_active = false; |
3990e04d | 5888 | struct sched_param param = { .sched_priority = 2 }; |
b411b363 | 5889 | |
3990e04d PR |
5890 | rv = sched_setscheduler(current, SCHED_RR, ¶m); |
5891 | if (rv < 0) | |
668700b4 | 5892 | drbd_err(connection, "drbd_ack_receiver: ERROR set priority, ret=%d\n", rv); |
b411b363 | 5893 | |
e77a0a5c | 5894 | while (get_t_state(thi) == RUNNING) { |
80822284 | 5895 | drbd_thread_current_set_cpu(thi); |
b411b363 | 5896 | |
668700b4 | 5897 | conn_reclaim_net_peer_reqs(connection); |
44ed167d | 5898 | |
bde89a9e AG |
5899 | if (test_and_clear_bit(SEND_PING, &connection->flags)) { |
5900 | if (drbd_send_ping(connection)) { | |
1ec861eb | 5901 | drbd_err(connection, "drbd_send_ping has failed\n"); |
b411b363 | 5902 | goto reconnect; |
841ce241 | 5903 | } |
668700b4 | 5904 | set_ping_timeout(connection); |
44ed167d | 5905 | ping_timeout_active = true; |
b411b363 PR |
5906 | } |
5907 | ||
668700b4 | 5908 | pre_recv_jif = jiffies; |
bde89a9e | 5909 | rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0); |
b411b363 PR |
5910 | |
5911 | /* Note: | |
5912 | * -EINTR (on meta) we got a signal | |
5913 | * -EAGAIN (on meta) rcvtimeo expired | |
5914 | * -ECONNRESET other side closed the connection | |
5915 | * -ERESTARTSYS (on data) we got a signal | |
5916 | * rv < 0 other than above: unexpected error! | |
5917 | * rv == expected: full header or command | |
5918 | * rv < expected: "woken" by signal during receive | |
5919 | * rv == 0 : "connection shut down by peer" | |
5920 | */ | |
5921 | if (likely(rv > 0)) { | |
5922 | received += rv; | |
5923 | buf += rv; | |
5924 | } else if (rv == 0) { | |
bde89a9e | 5925 | if (test_bit(DISCONNECT_SENT, &connection->flags)) { |
b66623e3 PR |
5926 | long t; |
5927 | rcu_read_lock(); | |
bde89a9e | 5928 | t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10; |
b66623e3 PR |
5929 | rcu_read_unlock(); |
5930 | ||
bde89a9e AG |
5931 | t = wait_event_timeout(connection->ping_wait, |
5932 | connection->cstate < C_WF_REPORT_PARAMS, | |
b66623e3 | 5933 | t); |
599377ac PR |
5934 | if (t) |
5935 | break; | |
5936 | } | |
1ec861eb | 5937 | drbd_err(connection, "meta connection shut down by peer.\n"); |
b411b363 PR |
5938 | goto reconnect; |
5939 | } else if (rv == -EAGAIN) { | |
cb6518cb LE |
5940 | /* If the data socket received something meanwhile, |
5941 | * that is good enough: peer is still alive. */ | |
668700b4 | 5942 | if (time_after(connection->last_received, pre_recv_jif)) |
cb6518cb | 5943 | continue; |
f36af18c | 5944 | if (ping_timeout_active) { |
1ec861eb | 5945 | drbd_err(connection, "PingAck did not arrive in time.\n"); |
b411b363 PR |
5946 | goto reconnect; |
5947 | } | |
bde89a9e | 5948 | set_bit(SEND_PING, &connection->flags); |
b411b363 PR |
5949 | continue; |
5950 | } else if (rv == -EINTR) { | |
668700b4 PR |
5951 | /* maybe drbd_thread_stop(): the while condition will notice. |
5952 | * maybe woken for send_ping: we'll send a ping above, | |
5953 | * and change the rcvtimeo */ | |
5954 | flush_signals(current); | |
b411b363 PR |
5955 | continue; |
5956 | } else { | |
1ec861eb | 5957 | drbd_err(connection, "sock_recvmsg returned %d\n", rv); |
b411b363 PR |
5958 | goto reconnect; |
5959 | } | |
5960 | ||
5961 | if (received == expect && cmd == NULL) { | |
bde89a9e | 5962 | if (decode_header(connection, connection->meta.rbuf, &pi)) |
b411b363 | 5963 | goto reconnect; |
668700b4 PR |
5964 | cmd = &ack_receiver_tbl[pi.cmd]; |
5965 | if (pi.cmd >= ARRAY_SIZE(ack_receiver_tbl) || !cmd->fn) { | |
1ec861eb | 5966 | drbd_err(connection, "Unexpected meta packet %s (0x%04x)\n", |
2fcb8f30 | 5967 | cmdname(pi.cmd), pi.cmd); |
b411b363 PR |
5968 | goto disconnect; |
5969 | } | |
e658983a | 5970 | expect = header_size + cmd->pkt_size; |
52b061a4 | 5971 | if (pi.size != expect - header_size) { |
1ec861eb | 5972 | drbd_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n", |
77351055 | 5973 | pi.cmd, pi.size); |
b411b363 | 5974 | goto reconnect; |
257d0af6 | 5975 | } |
b411b363 PR |
5976 | } |
5977 | if (received == expect) { | |
2735a594 | 5978 | bool err; |
a4fbda8e | 5979 | |
bde89a9e | 5980 | err = cmd->fn(connection, &pi); |
2735a594 | 5981 | if (err) { |
1ec861eb | 5982 | drbd_err(connection, "%pf failed\n", cmd->fn); |
b411b363 | 5983 | goto reconnect; |
1952e916 | 5984 | } |
b411b363 | 5985 | |
bde89a9e | 5986 | connection->last_received = jiffies; |
f36af18c | 5987 | |
668700b4 PR |
5988 | if (cmd == &ack_receiver_tbl[P_PING_ACK]) { |
5989 | set_idle_timeout(connection); | |
44ed167d PR |
5990 | ping_timeout_active = false; |
5991 | } | |
f36af18c | 5992 | |
bde89a9e | 5993 | buf = connection->meta.rbuf; |
b411b363 | 5994 | received = 0; |
52b061a4 | 5995 | expect = header_size; |
b411b363 PR |
5996 | cmd = NULL; |
5997 | } | |
5998 | } | |
5999 | ||
6000 | if (0) { | |
6001 | reconnect: | |
bde89a9e AG |
6002 | conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); |
6003 | conn_md_sync(connection); | |
b411b363 PR |
6004 | } |
6005 | if (0) { | |
6006 | disconnect: | |
bde89a9e | 6007 | conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); |
b411b363 | 6008 | } |
b411b363 | 6009 | |
668700b4 | 6010 | drbd_info(connection, "ack_receiver terminated\n"); |
b411b363 PR |
6011 | |
6012 | return 0; | |
6013 | } | |
668700b4 PR |
6014 | |
6015 | void drbd_send_acks_wf(struct work_struct *ws) | |
6016 | { | |
6017 | struct drbd_peer_device *peer_device = | |
6018 | container_of(ws, struct drbd_peer_device, send_acks_work); | |
6019 | struct drbd_connection *connection = peer_device->connection; | |
6020 | struct drbd_device *device = peer_device->device; | |
6021 | struct net_conf *nc; | |
6022 | int tcp_cork, err; | |
6023 | ||
6024 | rcu_read_lock(); | |
6025 | nc = rcu_dereference(connection->net_conf); | |
6026 | tcp_cork = nc->tcp_cork; | |
6027 | rcu_read_unlock(); | |
6028 | ||
6029 | if (tcp_cork) | |
6030 | drbd_tcp_cork(connection->meta.socket); | |
6031 | ||
6032 | err = drbd_finish_peer_reqs(device); | |
6033 | kref_put(&device->kref, drbd_destroy_device); | |
6034 | /* get is in drbd_endio_write_sec_final(). That is necessary to keep the | |
6035 | struct work_struct send_acks_work alive, which is in the peer_device object */ | |
6036 | ||
6037 | if (err) { | |
6038 | conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); | |
6039 | return; | |
6040 | } | |
6041 | ||
6042 | if (tcp_cork) | |
6043 | drbd_tcp_uncork(connection->meta.socket); | |
6044 | ||
6045 | return; | |
6046 | } |