]>
Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_req.h | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2006-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
8 | Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
9 | ||
10 | DRBD is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | DRBD is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | ||
25 | #ifndef _DRBD_REQ_H | |
26 | #define _DRBD_REQ_H | |
27 | ||
b411b363 PR |
28 | #include <linux/module.h> |
29 | ||
30 | #include <linux/slab.h> | |
31 | #include <linux/drbd.h> | |
32 | #include "drbd_int.h" | |
33 | #include "drbd_wrappers.h" | |
34 | ||
35 | /* The request callbacks will be called in irq context by the IDE drivers, | |
36 | and in Softirqs/Tasklets/BH context by the SCSI drivers, | |
37 | and by the receiver and worker in kernel-thread context. | |
38 | Try to get the locking right :) */ | |
39 | ||
40 | /* | |
41 | * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are | |
42 | * associated with IO requests originating from the block layer above us. | |
43 | * | |
44 | * There are quite a few things that may happen to a drbd request | |
45 | * during its lifetime. | |
46 | * | |
47 | * It will be created. | |
48 | * It will be marked with the intention to be | |
49 | * submitted to local disk and/or | |
50 | * send via the network. | |
51 | * | |
52 | * It has to be placed on the transfer log and other housekeeping lists, | |
53 | * In case we have a network connection. | |
54 | * | |
55 | * It may be identified as a concurrent (write) request | |
56 | * and be handled accordingly. | |
57 | * | |
58 | * It may me handed over to the local disk subsystem. | |
59 | * It may be completed by the local disk subsystem, | |
3ad2f3fb | 60 | * either successfully or with io-error. |
b411b363 PR |
61 | * In case it is a READ request, and it failed locally, |
62 | * it may be retried remotely. | |
63 | * | |
64 | * It may be queued for sending. | |
65 | * It may be handed over to the network stack, | |
66 | * which may fail. | |
67 | * It may be acknowledged by the "peer" according to the wire_protocol in use. | |
68 | * this may be a negative ack. | |
69 | * It may receive a faked ack when the network connection is lost and the | |
70 | * transfer log is cleaned up. | |
71 | * Sending may be canceled due to network connection loss. | |
72 | * When it finally has outlived its time, | |
73 | * corresponding dirty bits in the resync-bitmap may be cleared or set, | |
74 | * it will be destroyed, | |
75 | * and completion will be signalled to the originator, | |
76 | * with or without "success". | |
77 | */ | |
78 | ||
79 | enum drbd_req_event { | |
80 | created, | |
81 | to_be_send, | |
82 | to_be_submitted, | |
83 | ||
84 | /* XXX yes, now I am inconsistent... | |
85 | * these two are not "events" but "actions" | |
86 | * oh, well... */ | |
87 | queue_for_net_write, | |
88 | queue_for_net_read, | |
89 | ||
90 | send_canceled, | |
91 | send_failed, | |
92 | handed_over_to_network, | |
93 | connection_lost_while_pending, | |
d255e5ff | 94 | read_retry_remote_canceled, |
b411b363 PR |
95 | recv_acked_by_peer, |
96 | write_acked_by_peer, | |
97 | write_acked_by_peer_and_sis, /* and set_in_sync */ | |
98 | conflict_discarded_by_peer, | |
99 | neg_acked, | |
100 | barrier_acked, /* in protocol A and B */ | |
101 | data_received, /* (remote read) */ | |
102 | ||
103 | read_completed_with_error, | |
104 | read_ahead_completed_with_error, | |
105 | write_completed_with_error, | |
106 | completed_ok, | |
11b58e73 | 107 | resend, |
b411b363 PR |
108 | nothing, /* for tracing only */ |
109 | }; | |
110 | ||
111 | /* encoding of request states for now. we don't actually need that many bits. | |
112 | * we don't need to do atomic bit operations either, since most of the time we | |
113 | * need to look at the connection state and/or manipulate some lists at the | |
114 | * same time, so we should hold the request lock anyways. | |
115 | */ | |
116 | enum drbd_req_state_bits { | |
117 | /* 210 | |
118 | * 000: no local possible | |
119 | * 001: to be submitted | |
120 | * UNUSED, we could map: 011: submitted, completion still pending | |
121 | * 110: completed ok | |
122 | * 010: completed with error | |
123 | */ | |
124 | __RQ_LOCAL_PENDING, | |
125 | __RQ_LOCAL_COMPLETED, | |
126 | __RQ_LOCAL_OK, | |
127 | ||
128 | /* 76543 | |
129 | * 00000: no network possible | |
130 | * 00001: to be send | |
131 | * 00011: to be send, on worker queue | |
132 | * 00101: sent, expecting recv_ack (B) or write_ack (C) | |
133 | * 11101: sent, | |
134 | * recv_ack (B) or implicit "ack" (A), | |
135 | * still waiting for the barrier ack. | |
136 | * master_bio may already be completed and invalidated. | |
137 | * 11100: write_acked (C), | |
138 | * data_received (for remote read, any protocol) | |
139 | * or finally the barrier ack has arrived (B,A)... | |
140 | * request can be freed | |
141 | * 01100: neg-acked (write, protocol C) | |
142 | * or neg-d-acked (read, any protocol) | |
143 | * or killed from the transfer log | |
144 | * during cleanup after connection loss | |
145 | * request can be freed | |
146 | * 01000: canceled or send failed... | |
147 | * request can be freed | |
148 | */ | |
149 | ||
150 | /* if "SENT" is not set, yet, this can still fail or be canceled. | |
151 | * if "SENT" is set already, we still wait for an Ack packet. | |
152 | * when cleared, the master_bio may be completed. | |
153 | * in (B,A) the request object may still linger on the transaction log | |
154 | * until the corresponding barrier ack comes in */ | |
155 | __RQ_NET_PENDING, | |
156 | ||
157 | /* If it is QUEUED, and it is a WRITE, it is also registered in the | |
158 | * transfer log. Currently we need this flag to avoid conflicts between | |
159 | * worker canceling the request and tl_clear_barrier killing it from | |
160 | * transfer log. We should restructure the code so this conflict does | |
161 | * no longer occur. */ | |
162 | __RQ_NET_QUEUED, | |
163 | ||
164 | /* well, actually only "handed over to the network stack". | |
165 | * | |
166 | * TODO can potentially be dropped because of the similar meaning | |
167 | * of RQ_NET_SENT and ~RQ_NET_QUEUED. | |
168 | * however it is not exactly the same. before we drop it | |
169 | * we must ensure that we can tell a request with network part | |
170 | * from a request without, regardless of what happens to it. */ | |
171 | __RQ_NET_SENT, | |
172 | ||
173 | /* when set, the request may be freed (if RQ_NET_QUEUED is clear). | |
174 | * basically this means the corresponding P_BARRIER_ACK was received */ | |
175 | __RQ_NET_DONE, | |
176 | ||
177 | /* whether or not we know (C) or pretend (B,A) that the write | |
178 | * was successfully written on the peer. | |
179 | */ | |
180 | __RQ_NET_OK, | |
181 | ||
182 | /* peer called drbd_set_in_sync() for this write */ | |
183 | __RQ_NET_SIS, | |
184 | ||
185 | /* keep this last, its for the RQ_NET_MASK */ | |
186 | __RQ_NET_MAX, | |
288f422e PR |
187 | |
188 | /* Set when this is a write, clear for a read */ | |
189 | __RQ_WRITE, | |
b411b363 PR |
190 | }; |
191 | ||
192 | #define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING) | |
193 | #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED) | |
194 | #define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK) | |
195 | ||
196 | #define RQ_LOCAL_MASK ((RQ_LOCAL_OK << 1)-1) /* 0x07 */ | |
197 | ||
198 | #define RQ_NET_PENDING (1UL << __RQ_NET_PENDING) | |
199 | #define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED) | |
200 | #define RQ_NET_SENT (1UL << __RQ_NET_SENT) | |
201 | #define RQ_NET_DONE (1UL << __RQ_NET_DONE) | |
202 | #define RQ_NET_OK (1UL << __RQ_NET_OK) | |
203 | #define RQ_NET_SIS (1UL << __RQ_NET_SIS) | |
204 | ||
205 | /* 0x1f8 */ | |
206 | #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK) | |
207 | ||
288f422e PR |
208 | #define RQ_WRITE (1UL << __RQ_WRITE) |
209 | ||
11b58e73 PR |
210 | /* For waking up the frozen transfer log mod_req() has to return if the request |
211 | should be counted in the epoch object*/ | |
212 | #define MR_WRITE_SHIFT 0 | |
213 | #define MR_WRITE (1 << MR_WRITE_SHIFT) | |
214 | #define MR_READ_SHIFT 1 | |
215 | #define MR_READ (1 << MR_READ_SHIFT) | |
216 | ||
b411b363 PR |
217 | /* epoch entries */ |
218 | static inline | |
219 | struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector) | |
220 | { | |
221 | BUG_ON(mdev->ee_hash_s == 0); | |
222 | return mdev->ee_hash + | |
223 | ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s); | |
224 | } | |
225 | ||
226 | /* transfer log (drbd_request objects) */ | |
227 | static inline | |
228 | struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector) | |
229 | { | |
230 | BUG_ON(mdev->tl_hash_s == 0); | |
231 | return mdev->tl_hash + | |
232 | ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s); | |
233 | } | |
234 | ||
235 | /* application reads (drbd_request objects) */ | |
236 | static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector) | |
237 | { | |
238 | return mdev->app_reads_hash | |
239 | + ((unsigned int)(sector) % APP_R_HSIZE); | |
240 | } | |
241 | ||
242 | /* when we receive the answer for a read request, | |
243 | * verify that we actually know about it */ | |
244 | static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev, | |
245 | u64 id, sector_t sector) | |
246 | { | |
247 | struct hlist_head *slot = ar_hash_slot(mdev, sector); | |
248 | struct hlist_node *n; | |
249 | struct drbd_request *req; | |
250 | ||
251 | hlist_for_each_entry(req, n, slot, colision) { | |
252 | if ((unsigned long)req == (unsigned long)id) { | |
253 | D_ASSERT(req->sector == sector); | |
254 | return req; | |
255 | } | |
256 | } | |
257 | return NULL; | |
258 | } | |
259 | ||
5ba82308 PR |
260 | static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src) |
261 | { | |
262 | struct bio *bio; | |
263 | bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ | |
264 | ||
265 | req->private_bio = bio; | |
266 | ||
267 | bio->bi_private = req; | |
268 | bio->bi_end_io = drbd_endio_pri; | |
269 | bio->bi_next = NULL; | |
270 | } | |
271 | ||
b411b363 PR |
272 | static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, |
273 | struct bio *bio_src) | |
274 | { | |
b411b363 PR |
275 | struct drbd_request *req = |
276 | mempool_alloc(drbd_request_mempool, GFP_NOIO); | |
277 | if (likely(req)) { | |
5ba82308 | 278 | drbd_req_make_private_bio(req, bio_src); |
b411b363 | 279 | |
288f422e | 280 | req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; |
b411b363 PR |
281 | req->mdev = mdev; |
282 | req->master_bio = bio_src; | |
b411b363 | 283 | req->epoch = 0; |
5ba82308 PR |
284 | req->sector = bio_src->bi_sector; |
285 | req->size = bio_src->bi_size; | |
b411b363 PR |
286 | req->start_time = jiffies; |
287 | INIT_HLIST_NODE(&req->colision); | |
288 | INIT_LIST_HEAD(&req->tl_requests); | |
289 | INIT_LIST_HEAD(&req->w.list); | |
b411b363 PR |
290 | } |
291 | return req; | |
292 | } | |
293 | ||
294 | static inline void drbd_req_free(struct drbd_request *req) | |
295 | { | |
296 | mempool_free(req, drbd_request_mempool); | |
297 | } | |
298 | ||
299 | static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2) | |
300 | { | |
301 | return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9))); | |
302 | } | |
303 | ||
304 | /* Short lived temporary struct on the stack. | |
305 | * We could squirrel the error to be returned into | |
306 | * bio->bi_size, or similar. But that would be too ugly. */ | |
307 | struct bio_and_error { | |
308 | struct bio *bio; | |
309 | int error; | |
310 | }; | |
311 | ||
312 | extern void _req_may_be_done(struct drbd_request *req, | |
313 | struct bio_and_error *m); | |
2a80699f | 314 | extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
b411b363 PR |
315 | struct bio_and_error *m); |
316 | extern void complete_master_bio(struct drbd_conf *mdev, | |
317 | struct bio_and_error *m); | |
318 | ||
319 | /* use this if you don't want to deal with calling complete_master_bio() | |
320 | * outside the spinlock, e.g. when walking some list on cleanup. */ | |
2a80699f | 321 | static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) |
b411b363 PR |
322 | { |
323 | struct drbd_conf *mdev = req->mdev; | |
324 | struct bio_and_error m; | |
2a80699f | 325 | int rv; |
b411b363 PR |
326 | |
327 | /* __req_mod possibly frees req, do not touch req after that! */ | |
2a80699f | 328 | rv = __req_mod(req, what, &m); |
b411b363 PR |
329 | if (m.bio) |
330 | complete_master_bio(mdev, &m); | |
2a80699f PR |
331 | |
332 | return rv; | |
b411b363 PR |
333 | } |
334 | ||
335 | /* completion of master bio is outside of spinlock. | |
336 | * If you need it irqsave, do it your self! */ | |
2a80699f | 337 | static inline int req_mod(struct drbd_request *req, |
b411b363 PR |
338 | enum drbd_req_event what) |
339 | { | |
340 | struct drbd_conf *mdev = req->mdev; | |
341 | struct bio_and_error m; | |
2a80699f PR |
342 | int rv; |
343 | ||
b411b363 | 344 | spin_lock_irq(&mdev->req_lock); |
2a80699f | 345 | rv = __req_mod(req, what, &m); |
b411b363 PR |
346 | spin_unlock_irq(&mdev->req_lock); |
347 | ||
348 | if (m.bio) | |
349 | complete_master_bio(mdev, &m); | |
2a80699f PR |
350 | |
351 | return rv; | |
b411b363 PR |
352 | } |
353 | #endif |