]>
Commit | Line | Data |
---|---|---|
4d05a28d | 1 | /****************************************************************************** |
4d05a28d KRW |
2 | * |
3 | * Back-end of the driver for virtual block devices. This portion of the | |
4 | * driver exports a 'unified' block-device interface that can be accessed | |
5 | * by any operating system that implements a compatible front end. A | |
6 | * reference front-end implementation can be found in: | |
a1397fa3 | 7 | * drivers/block/xen-blkfront.c |
4d05a28d KRW |
8 | * |
9 | * Copyright (c) 2003-2004, Keir Fraser & Steve Hand | |
10 | * Copyright (c) 2005, Christopher Clark | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License version 2 | |
14 | * as published by the Free Software Foundation; or, when distributed | |
15 | * separately from the Linux kernel or incorporated into other | |
16 | * software packages, subject to the following license: | |
17 | * | |
18 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
19 | * of this source file (the "Software"), to deal in the Software without | |
20 | * restriction, including without limitation the rights to use, copy, modify, | |
21 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
22 | * and to permit persons to whom the Software is furnished to do so, subject to | |
23 | * the following conditions: | |
24 | * | |
25 | * The above copyright notice and this permission notice shall be included in | |
26 | * all copies or substantial portions of the Software. | |
27 | * | |
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
29 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
30 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
31 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
32 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
33 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
34 | * IN THE SOFTWARE. | |
35 | */ | |
36 | ||
77387b82 TC |
37 | #define pr_fmt(fmt) "xen-blkback: " fmt |
38 | ||
4d05a28d KRW |
39 | #include <linux/spinlock.h> |
40 | #include <linux/kthread.h> | |
41 | #include <linux/list.h> | |
42 | #include <linux/delay.h> | |
88122933 | 43 | #include <linux/freezer.h> |
0a8704a5 | 44 | #include <linux/bitmap.h> |
afd91d07 | 45 | |
88122933 JF |
46 | #include <xen/events.h> |
47 | #include <xen/page.h> | |
e79affc3 | 48 | #include <xen/xen.h> |
88122933 JF |
49 | #include <asm/xen/hypervisor.h> |
50 | #include <asm/xen/hypercall.h> | |
087ffecd | 51 | #include <xen/balloon.h> |
c43cf3ea | 52 | #include <xen/grant_table.h> |
4d05a28d KRW |
53 | #include "common.h" |
54 | ||
c6cc142d RPM |
55 | /* |
56 | * Maximum number of unused free pages to keep in the internal buffer. | |
57 | * Setting this to a value too low will reduce memory used in each backend, | |
58 | * but can have a performance penalty. | |
59 | * | |
60 | * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can | |
61 | * be set to a lower value that might degrade performance on some intensive | |
62 | * IO workloads. | |
63 | */ | |
64 | ||
823f2091 SP |
65 | static int max_buffer_pages = 1024; |
66 | module_param_named(max_buffer_pages, max_buffer_pages, int, 0644); | |
c6cc142d RPM |
67 | MODULE_PARM_DESC(max_buffer_pages, |
68 | "Maximum number of free pages to keep in each block backend buffer"); | |
69 | ||
3f3aad5e RPM |
70 | /* |
71 | * Maximum number of grants to map persistently in blkback. For maximum | |
72 | * performance this should be the total numbers of grants that can be used | |
73 | * to fill the ring, but since this might become too high, specially with | |
74 | * the use of indirect descriptors, we set it to a value that provides good | |
75 | * performance without using too much memory. | |
76 | * | |
77 | * When the list of persistent grants is full we clean it up using a LRU | |
78 | * algorithm. | |
79 | */ | |
80 | ||
823f2091 SP |
81 | static int max_pgrants = 1056; |
82 | module_param_named(max_persistent_grants, max_pgrants, int, 0644); | |
3f3aad5e RPM |
83 | MODULE_PARM_DESC(max_persistent_grants, |
84 | "Maximum number of grants to map persistently"); | |
85 | ||
973e5405 JG |
86 | /* |
87 | * How long a persistent grant is allowed to remain allocated without being in | |
88 | * use. The time is in seconds, 0 means indefinitely long. | |
89 | */ | |
90 | ||
823f2091 SP |
91 | static unsigned int pgrant_timeout = 60; |
92 | module_param_named(persistent_grant_unused_seconds, pgrant_timeout, | |
973e5405 JG |
93 | uint, 0644); |
94 | MODULE_PARM_DESC(persistent_grant_unused_seconds, | |
95 | "Time in seconds an unused persistent grant is allowed to " | |
96 | "remain allocated. Default is 60, 0 means unlimited."); | |
97 | ||
d62d8600 BL |
98 | /* |
99 | * Maximum number of rings/queues blkback supports, allow as many queues as there | |
100 | * are CPUs if user has not specified a value. | |
101 | */ | |
102 | unsigned int xenblk_max_queues; | |
103 | module_param_named(max_queues, xenblk_max_queues, uint, 0644); | |
104 | MODULE_PARM_DESC(max_queues, | |
105 | "Maximum number of hardware queues per virtual disk." \ | |
106 | "By default it is the number of online CPUs."); | |
107 | ||
86839c56 BL |
108 | /* |
109 | * Maximum order of pages to be used for the shared ring between front and | |
110 | * backend, 4KB page granularity is used. | |
111 | */ | |
9cce2914 | 112 | unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER; |
5657a819 | 113 | module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); |
86839c56 | 114 | MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); |
3f3aad5e RPM |
115 | /* |
116 | * The LRU mechanism to clean the lists of persistent grants needs to | |
117 | * be executed periodically. The time interval between consecutive executions | |
118 | * of the purge mechanism is set in ms. | |
119 | */ | |
120 | #define LRU_INTERVAL 100 | |
121 | ||
122 | /* | |
123 | * When the persistent grants list is full we will remove unused grants | |
124 | * from the list. The percent number of grants to be removed at each LRU | |
125 | * execution. | |
126 | */ | |
127 | #define LRU_PERCENT_CLEAN 5 | |
128 | ||
4d05a28d | 129 | /* Run-time switchable: /sys/module/blkback/parameters/ */ |
2e9977c2 | 130 | static unsigned int log_stats; |
4d05a28d | 131 | module_param(log_stats, int, 0644); |
4d05a28d | 132 | |
4d05a28d KRW |
133 | #define BLKBACK_INVALID_HANDLE (~0) |
134 | ||
973e5405 JG |
135 | static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt) |
136 | { | |
823f2091 SP |
137 | return pgrant_timeout && (jiffies - persistent_gnt->last_used >= |
138 | HZ * pgrant_timeout); | |
973e5405 JG |
139 | } |
140 | ||
c6cc142d RPM |
141 | #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) |
142 | ||
01263a1f | 143 | static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags); |
59795700 | 144 | static int dispatch_rw_block_io(struct xen_blkif_ring *ring, |
fc53bf75 KRW |
145 | struct blkif_request *req, |
146 | struct pending_req *pending_req); | |
59795700 | 147 | static void make_response(struct xen_blkif_ring *ring, u64 id, |
4d05a28d KRW |
148 | unsigned short op, int st); |
149 | ||
7dc34117 RPM |
150 | #define foreach_grant_safe(pos, n, rbtree, node) \ |
151 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ | |
217fd5e7 | 152 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ |
0a8704a5 | 153 | &(pos)->node != NULL; \ |
7dc34117 RPM |
154 | (pos) = container_of(n, typeof(*(pos)), node), \ |
155 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) | |
0a8704a5 RPM |
156 | |
157 | ||
3f3aad5e RPM |
158 | /* |
159 | * We don't need locking around the persistent grant helpers | |
d4bf0065 | 160 | * because blkback uses a single-thread for each backend, so we |
3f3aad5e RPM |
161 | * can be sure that this functions will never be called recursively. |
162 | * | |
163 | * The only exception to that is put_persistent_grant, that can be called | |
164 | * from interrupt context (by xen_blkbk_unmap), so we have to use atomic | |
165 | * bit operations to modify the flags of a persistent grant and to count | |
166 | * the number of used grants. | |
167 | */ | |
d4bf0065 | 168 | static int add_persistent_gnt(struct xen_blkif_ring *ring, |
0a8704a5 RPM |
169 | struct persistent_gnt *persistent_gnt) |
170 | { | |
3f3aad5e | 171 | struct rb_node **new = NULL, *parent = NULL; |
0a8704a5 | 172 | struct persistent_gnt *this; |
d4bf0065 | 173 | struct xen_blkif *blkif = ring->blkif; |
0a8704a5 | 174 | |
823f2091 | 175 | if (ring->persistent_gnt_c >= max_pgrants) { |
3f3aad5e RPM |
176 | if (!blkif->vbd.overflow_max_grants) |
177 | blkif->vbd.overflow_max_grants = 1; | |
178 | return -EBUSY; | |
179 | } | |
0a8704a5 | 180 | /* Figure out where to put new node */ |
d4bf0065 | 181 | new = &ring->persistent_gnts.rb_node; |
0a8704a5 RPM |
182 | while (*new) { |
183 | this = container_of(*new, struct persistent_gnt, node); | |
184 | ||
185 | parent = *new; | |
186 | if (persistent_gnt->gnt < this->gnt) | |
187 | new = &((*new)->rb_left); | |
188 | else if (persistent_gnt->gnt > this->gnt) | |
189 | new = &((*new)->rb_right); | |
190 | else { | |
77387b82 | 191 | pr_alert_ratelimited("trying to add a gref that's already in the tree\n"); |
c6cc142d | 192 | return -EINVAL; |
0a8704a5 RPM |
193 | } |
194 | } | |
195 | ||
d77ff24e | 196 | persistent_gnt->active = true; |
0a8704a5 RPM |
197 | /* Add new node and rebalance tree. */ |
198 | rb_link_node(&(persistent_gnt->node), parent, new); | |
d4bf0065 BL |
199 | rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); |
200 | ring->persistent_gnt_c++; | |
201 | atomic_inc(&ring->persistent_gnt_in_use); | |
c6cc142d | 202 | return 0; |
0a8704a5 RPM |
203 | } |
204 | ||
d4bf0065 | 205 | static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring, |
0a8704a5 RPM |
206 | grant_ref_t gref) |
207 | { | |
208 | struct persistent_gnt *data; | |
3f3aad5e | 209 | struct rb_node *node = NULL; |
0a8704a5 | 210 | |
d4bf0065 | 211 | node = ring->persistent_gnts.rb_node; |
0a8704a5 RPM |
212 | while (node) { |
213 | data = container_of(node, struct persistent_gnt, node); | |
214 | ||
215 | if (gref < data->gnt) | |
216 | node = node->rb_left; | |
217 | else if (gref > data->gnt) | |
218 | node = node->rb_right; | |
3f3aad5e | 219 | else { |
d77ff24e | 220 | if (data->active) { |
77387b82 | 221 | pr_alert_ratelimited("requesting a grant already in use\n"); |
3f3aad5e RPM |
222 | return NULL; |
223 | } | |
d77ff24e | 224 | data->active = true; |
d4bf0065 | 225 | atomic_inc(&ring->persistent_gnt_in_use); |
0a8704a5 | 226 | return data; |
3f3aad5e | 227 | } |
0a8704a5 RPM |
228 | } |
229 | return NULL; | |
230 | } | |
231 | ||
d4bf0065 | 232 | static void put_persistent_gnt(struct xen_blkif_ring *ring, |
3f3aad5e RPM |
233 | struct persistent_gnt *persistent_gnt) |
234 | { | |
d77ff24e | 235 | if (!persistent_gnt->active) |
77387b82 | 236 | pr_alert_ratelimited("freeing a grant already unused\n"); |
973e5405 | 237 | persistent_gnt->last_used = jiffies; |
d77ff24e | 238 | persistent_gnt->active = false; |
d4bf0065 | 239 | atomic_dec(&ring->persistent_gnt_in_use); |
3f3aad5e RPM |
240 | } |
241 | ||
d4bf0065 | 242 | static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root, |
c6cc142d | 243 | unsigned int num) |
4d4f270f RPM |
244 | { |
245 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
246 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
247 | struct persistent_gnt *persistent_gnt; | |
7dc34117 | 248 | struct rb_node *n; |
4d4f270f | 249 | int segs_to_unmap = 0; |
c43cf3ea | 250 | struct gntab_unmap_queue_data unmap_data; |
c43cf3ea | 251 | |
c43cf3ea JH |
252 | unmap_data.pages = pages; |
253 | unmap_data.unmap_ops = unmap; | |
254 | unmap_data.kunmap_ops = NULL; | |
4d4f270f | 255 | |
7dc34117 | 256 | foreach_grant_safe(persistent_gnt, n, root, node) { |
4d4f270f RPM |
257 | BUG_ON(persistent_gnt->handle == |
258 | BLKBACK_INVALID_HANDLE); | |
259 | gnttab_set_unmap_op(&unmap[segs_to_unmap], | |
260 | (unsigned long) pfn_to_kaddr(page_to_pfn( | |
261 | persistent_gnt->page)), | |
262 | GNTMAP_host_map, | |
263 | persistent_gnt->handle); | |
264 | ||
265 | pages[segs_to_unmap] = persistent_gnt->page; | |
4d4f270f RPM |
266 | |
267 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || | |
268 | !rb_next(&persistent_gnt->node)) { | |
c43cf3ea JH |
269 | |
270 | unmap_data.count = segs_to_unmap; | |
b44166cd | 271 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
c43cf3ea | 272 | |
ca33479c JG |
273 | gnttab_page_cache_put(&ring->free_pages, pages, |
274 | segs_to_unmap); | |
4d4f270f RPM |
275 | segs_to_unmap = 0; |
276 | } | |
7dc34117 RPM |
277 | |
278 | rb_erase(&persistent_gnt->node, root); | |
279 | kfree(persistent_gnt); | |
280 | num--; | |
4d4f270f RPM |
281 | } |
282 | BUG_ON(num != 0); | |
283 | } | |
284 | ||
abb97b8c | 285 | void xen_blkbk_unmap_purged_grants(struct work_struct *work) |
3f3aad5e RPM |
286 | { |
287 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
288 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
289 | struct persistent_gnt *persistent_gnt; | |
325d73bf | 290 | int segs_to_unmap = 0; |
d4bf0065 | 291 | struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work); |
325d73bf | 292 | struct gntab_unmap_queue_data unmap_data; |
325d73bf | 293 | |
325d73bf BL |
294 | unmap_data.pages = pages; |
295 | unmap_data.unmap_ops = unmap; | |
296 | unmap_data.kunmap_ops = NULL; | |
3f3aad5e | 297 | |
d4bf0065 BL |
298 | while(!list_empty(&ring->persistent_purge_list)) { |
299 | persistent_gnt = list_first_entry(&ring->persistent_purge_list, | |
3f3aad5e RPM |
300 | struct persistent_gnt, |
301 | remove_node); | |
302 | list_del(&persistent_gnt->remove_node); | |
303 | ||
304 | gnttab_set_unmap_op(&unmap[segs_to_unmap], | |
305 | vaddr(persistent_gnt->page), | |
306 | GNTMAP_host_map, | |
307 | persistent_gnt->handle); | |
308 | ||
309 | pages[segs_to_unmap] = persistent_gnt->page; | |
310 | ||
311 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | |
325d73bf | 312 | unmap_data.count = segs_to_unmap; |
b44166cd | 313 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
ca33479c JG |
314 | gnttab_page_cache_put(&ring->free_pages, pages, |
315 | segs_to_unmap); | |
3f3aad5e RPM |
316 | segs_to_unmap = 0; |
317 | } | |
318 | kfree(persistent_gnt); | |
319 | } | |
320 | if (segs_to_unmap > 0) { | |
325d73bf | 321 | unmap_data.count = segs_to_unmap; |
b44166cd | 322 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
ca33479c | 323 | gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap); |
3f3aad5e RPM |
324 | } |
325 | } | |
326 | ||
d4bf0065 | 327 | static void purge_persistent_gnt(struct xen_blkif_ring *ring) |
3f3aad5e RPM |
328 | { |
329 | struct persistent_gnt *persistent_gnt; | |
330 | struct rb_node *n; | |
331 | unsigned int num_clean, total; | |
973e5405 | 332 | bool scan_used = false; |
3f3aad5e RPM |
333 | struct rb_root *root; |
334 | ||
d4bf0065 | 335 | if (work_busy(&ring->persistent_purge_work)) { |
53bc7dc0 | 336 | pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); |
59795700 | 337 | goto out; |
3f3aad5e RPM |
338 | } |
339 | ||
823f2091 SP |
340 | if (ring->persistent_gnt_c < max_pgrants || |
341 | (ring->persistent_gnt_c == max_pgrants && | |
973e5405 JG |
342 | !ring->blkif->vbd.overflow_max_grants)) { |
343 | num_clean = 0; | |
344 | } else { | |
823f2091 SP |
345 | num_clean = (max_pgrants / 100) * LRU_PERCENT_CLEAN; |
346 | num_clean = ring->persistent_gnt_c - max_pgrants + num_clean; | |
973e5405 JG |
347 | num_clean = min(ring->persistent_gnt_c, num_clean); |
348 | pr_debug("Going to purge at least %u persistent grants\n", | |
349 | num_clean); | |
350 | } | |
3f3aad5e RPM |
351 | |
352 | /* | |
353 | * At this point, we can assure that there will be no calls | |
354 | * to get_persistent_grant (because we are executing this code from | |
355 | * xen_blkif_schedule), there can only be calls to put_persistent_gnt, | |
356 | * which means that the number of currently used grants will go down, | |
357 | * but never up, so we will always be able to remove the requested | |
358 | * number of grants. | |
359 | */ | |
360 | ||
973e5405 | 361 | total = 0; |
3f3aad5e | 362 | |
d4bf0065 BL |
363 | BUG_ON(!list_empty(&ring->persistent_purge_list)); |
364 | root = &ring->persistent_gnts; | |
3f3aad5e RPM |
365 | purge_list: |
366 | foreach_grant_safe(persistent_gnt, n, root, node) { | |
367 | BUG_ON(persistent_gnt->handle == | |
368 | BLKBACK_INVALID_HANDLE); | |
369 | ||
d77ff24e | 370 | if (persistent_gnt->active) |
3f3aad5e | 371 | continue; |
973e5405 JG |
372 | if (!scan_used && !persistent_gnt_timeout(persistent_gnt)) |
373 | continue; | |
374 | if (scan_used && total >= num_clean) | |
3f3aad5e RPM |
375 | continue; |
376 | ||
377 | rb_erase(&persistent_gnt->node, root); | |
378 | list_add(&persistent_gnt->remove_node, | |
d4bf0065 | 379 | &ring->persistent_purge_list); |
973e5405 | 380 | total++; |
3f3aad5e RPM |
381 | } |
382 | /* | |
973e5405 | 383 | * Check whether we also need to start cleaning |
3f3aad5e RPM |
384 | * grants that were used since last purge in order to cope |
385 | * with the requested num | |
386 | */ | |
973e5405 JG |
387 | if (!scan_used && total < num_clean) { |
388 | pr_debug("Still missing %u purged frames\n", num_clean - total); | |
3f3aad5e RPM |
389 | scan_used = true; |
390 | goto purge_list; | |
391 | } | |
2d910543 | 392 | |
973e5405 JG |
393 | if (total) { |
394 | ring->persistent_gnt_c -= total; | |
395 | ring->blkif->vbd.overflow_max_grants = 0; | |
3f3aad5e | 396 | |
973e5405 JG |
397 | /* We can defer this work */ |
398 | schedule_work(&ring->persistent_purge_work); | |
399 | pr_debug("Purged %u/%u\n", num_clean, total); | |
400 | } | |
59795700 BL |
401 | |
402 | out: | |
3f3aad5e RPM |
403 | return; |
404 | } | |
405 | ||
a1397fa3 KRW |
406 | /* |
407 | * Retrieve from the 'pending_reqs' a free pending_req structure to be used. | |
4d05a28d | 408 | */ |
59795700 | 409 | static struct pending_req *alloc_req(struct xen_blkif_ring *ring) |
4d05a28d | 410 | { |
2e9977c2 | 411 | struct pending_req *req = NULL; |
4d05a28d KRW |
412 | unsigned long flags; |
413 | ||
59795700 BL |
414 | spin_lock_irqsave(&ring->pending_free_lock, flags); |
415 | if (!list_empty(&ring->pending_free)) { | |
416 | req = list_entry(ring->pending_free.next, struct pending_req, | |
2e9977c2 | 417 | free_list); |
4d05a28d KRW |
418 | list_del(&req->free_list); |
419 | } | |
59795700 | 420 | spin_unlock_irqrestore(&ring->pending_free_lock, flags); |
4d05a28d KRW |
421 | return req; |
422 | } | |
423 | ||
a1397fa3 KRW |
424 | /* |
425 | * Return the 'pending_req' structure back to the freepool. We also | |
426 | * wake up the thread if it was waiting for a free page. | |
427 | */ | |
59795700 | 428 | static void free_req(struct xen_blkif_ring *ring, struct pending_req *req) |
4d05a28d KRW |
429 | { |
430 | unsigned long flags; | |
431 | int was_empty; | |
432 | ||
59795700 BL |
433 | spin_lock_irqsave(&ring->pending_free_lock, flags); |
434 | was_empty = list_empty(&ring->pending_free); | |
435 | list_add(&req->free_list, &ring->pending_free); | |
436 | spin_unlock_irqrestore(&ring->pending_free_lock, flags); | |
4d05a28d | 437 | if (was_empty) |
59795700 | 438 | wake_up(&ring->pending_free_wq); |
4d05a28d KRW |
439 | } |
440 | ||
ee9ff853 KRW |
441 | /* |
442 | * Routines for managing virtual block devices (vbds). | |
443 | */ | |
3d814731 KRW |
444 | static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, |
445 | int operation) | |
ee9ff853 | 446 | { |
3d814731 | 447 | struct xen_vbd *vbd = &blkif->vbd; |
ee9ff853 KRW |
448 | int rc = -EACCES; |
449 | ||
a022606e | 450 | if ((operation != REQ_OP_READ) && vbd->readonly) |
ee9ff853 KRW |
451 | goto out; |
452 | ||
8ab52150 JB |
453 | if (likely(req->nr_sects)) { |
454 | blkif_sector_t end = req->sector_number + req->nr_sects; | |
455 | ||
456 | if (unlikely(end < req->sector_number)) | |
457 | goto out; | |
458 | if (unlikely(end > vbd_sz(vbd))) | |
459 | goto out; | |
460 | } | |
ee9ff853 KRW |
461 | |
462 | req->dev = vbd->pdevice; | |
463 | req->bdev = vbd->bdev; | |
464 | rc = 0; | |
465 | ||
466 | out: | |
467 | return rc; | |
468 | } | |
469 | ||
3d814731 | 470 | static void xen_vbd_resize(struct xen_blkif *blkif) |
ee9ff853 | 471 | { |
3d814731 | 472 | struct xen_vbd *vbd = &blkif->vbd; |
ee9ff853 KRW |
473 | struct xenbus_transaction xbt; |
474 | int err; | |
8b6bf747 | 475 | struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); |
42c7841d | 476 | unsigned long long new_size = vbd_sz(vbd); |
ee9ff853 | 477 | |
77387b82 | 478 | pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n", |
ee9ff853 | 479 | blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); |
77387b82 | 480 | pr_info("VBD Resize: new size %llu\n", new_size); |
ee9ff853 KRW |
481 | vbd->size = new_size; |
482 | again: | |
483 | err = xenbus_transaction_start(&xbt); | |
484 | if (err) { | |
77387b82 | 485 | pr_warn("Error starting transaction\n"); |
ee9ff853 KRW |
486 | return; |
487 | } | |
488 | err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", | |
42c7841d | 489 | (unsigned long long)vbd_sz(vbd)); |
ee9ff853 | 490 | if (err) { |
77387b82 | 491 | pr_warn("Error writing new size\n"); |
ee9ff853 KRW |
492 | goto abort; |
493 | } | |
494 | /* | |
495 | * Write the current state; we will use this to synchronize | |
496 | * the front-end. If the current state is "connected" the | |
497 | * front-end will get the new size information online. | |
498 | */ | |
499 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); | |
500 | if (err) { | |
77387b82 | 501 | pr_warn("Error writing the state\n"); |
ee9ff853 KRW |
502 | goto abort; |
503 | } | |
504 | ||
505 | err = xenbus_transaction_end(xbt, 0); | |
506 | if (err == -EAGAIN) | |
507 | goto again; | |
508 | if (err) | |
77387b82 | 509 | pr_warn("Error ending transaction\n"); |
496b318e | 510 | return; |
ee9ff853 KRW |
511 | abort: |
512 | xenbus_transaction_end(xbt, 1); | |
513 | } | |
514 | ||
a1397fa3 | 515 | /* |
b0aef179 KRW |
516 | * Notification from the guest OS. |
517 | */ | |
59795700 | 518 | static void blkif_notify_work(struct xen_blkif_ring *ring) |
4d05a28d | 519 | { |
59795700 BL |
520 | ring->waiting_reqs = 1; |
521 | wake_up(&ring->wq); | |
b0aef179 | 522 | } |
4d05a28d | 523 | |
8b6bf747 | 524 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id) |
b0aef179 KRW |
525 | { |
526 | blkif_notify_work(dev_id); | |
527 | return IRQ_HANDLED; | |
4d05a28d KRW |
528 | } |
529 | ||
2e9977c2 | 530 | /* |
4d05a28d KRW |
531 | * SCHEDULER FUNCTIONS |
532 | */ | |
533 | ||
d4bf0065 | 534 | static void print_stats(struct xen_blkif_ring *ring) |
4d05a28d | 535 | { |
77387b82 | 536 | pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" |
3f3aad5e | 537 | " | ds %4llu | pg: %4u/%4d\n", |
db6fbc10 BL |
538 | current->comm, ring->st_oo_req, |
539 | ring->st_rd_req, ring->st_wr_req, | |
540 | ring->st_f_req, ring->st_ds_req, | |
823f2091 | 541 | ring->persistent_gnt_c, max_pgrants); |
db6fbc10 BL |
542 | ring->st_print = jiffies + msecs_to_jiffies(10 * 1000); |
543 | ring->st_rd_req = 0; | |
544 | ring->st_wr_req = 0; | |
545 | ring->st_oo_req = 0; | |
546 | ring->st_ds_req = 0; | |
4d05a28d KRW |
547 | } |
548 | ||
8b6bf747 | 549 | int xen_blkif_schedule(void *arg) |
4d05a28d | 550 | { |
59795700 BL |
551 | struct xen_blkif_ring *ring = arg; |
552 | struct xen_blkif *blkif = ring->blkif; | |
3d814731 | 553 | struct xen_vbd *vbd = &blkif->vbd; |
3f3aad5e | 554 | unsigned long timeout; |
8e3f8755 | 555 | int ret; |
01263a1f JG |
556 | bool do_eoi; |
557 | unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS; | |
4d05a28d | 558 | |
a6e7af12 | 559 | set_freezable(); |
4d05a28d KRW |
560 | while (!kthread_should_stop()) { |
561 | if (try_to_freeze()) | |
562 | continue; | |
42c7841d | 563 | if (unlikely(vbd->size != vbd_sz(vbd))) |
3d814731 | 564 | xen_vbd_resize(blkif); |
4d05a28d | 565 | |
3f3aad5e RPM |
566 | timeout = msecs_to_jiffies(LRU_INTERVAL); |
567 | ||
568 | timeout = wait_event_interruptible_timeout( | |
59795700 BL |
569 | ring->wq, |
570 | ring->waiting_reqs || kthread_should_stop(), | |
3f3aad5e RPM |
571 | timeout); |
572 | if (timeout == 0) | |
573 | goto purge_gnt_list; | |
574 | timeout = wait_event_interruptible_timeout( | |
59795700 BL |
575 | ring->pending_free_wq, |
576 | !list_empty(&ring->pending_free) || | |
3f3aad5e RPM |
577 | kthread_should_stop(), |
578 | timeout); | |
579 | if (timeout == 0) | |
580 | goto purge_gnt_list; | |
4d05a28d | 581 | |
01263a1f JG |
582 | do_eoi = ring->waiting_reqs; |
583 | ||
59795700 | 584 | ring->waiting_reqs = 0; |
4d05a28d KRW |
585 | smp_mb(); /* clear flag *before* checking for work */ |
586 | ||
01263a1f | 587 | ret = do_block_io_op(ring, &eoi_flags); |
8e3f8755 | 588 | if (ret > 0) |
59795700 | 589 | ring->waiting_reqs = 1; |
8e3f8755 | 590 | if (ret == -EACCES) |
59795700 | 591 | wait_event_interruptible(ring->shutdown_wq, |
8e3f8755 | 592 | kthread_should_stop()); |
4d05a28d | 593 | |
01263a1f JG |
594 | if (do_eoi && !ring->waiting_reqs) { |
595 | xen_irq_lateeoi(ring->irq, eoi_flags); | |
596 | eoi_flags |= XEN_EOI_FLAG_SPURIOUS; | |
597 | } | |
598 | ||
3f3aad5e RPM |
599 | purge_gnt_list: |
600 | if (blkif->vbd.feature_gnt_persistent && | |
d4bf0065 BL |
601 | time_after(jiffies, ring->next_lru)) { |
602 | purge_persistent_gnt(ring); | |
603 | ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); | |
3f3aad5e RPM |
604 | } |
605 | ||
cb9369bd SP |
606 | /* Shrink the free pages pool if it is too large. */ |
607 | if (time_before(jiffies, blkif->buffer_squeeze_end)) | |
ca33479c | 608 | gnttab_page_cache_shrink(&ring->free_pages, 0); |
cb9369bd | 609 | else |
ca33479c JG |
610 | gnttab_page_cache_shrink(&ring->free_pages, |
611 | max_buffer_pages); | |
c6cc142d | 612 | |
db6fbc10 | 613 | if (log_stats && time_after(jiffies, ring->st_print)) |
d4bf0065 | 614 | print_stats(ring); |
4d05a28d KRW |
615 | } |
616 | ||
ef753411 | 617 | /* Drain pending purge work */ |
d4bf0065 | 618 | flush_work(&ring->persistent_purge_work); |
c6cc142d | 619 | |
ef753411 | 620 | if (log_stats) |
d4bf0065 | 621 | print_stats(ring); |
ef753411 | 622 | |
59795700 | 623 | ring->xenblkd = NULL; |
ef753411 RPM |
624 | |
625 | return 0; | |
626 | } | |
627 | ||
628 | /* | |
629 | * Remove persistent grants and empty the pool of free pages | |
630 | */ | |
59795700 | 631 | void xen_blkbk_free_caches(struct xen_blkif_ring *ring) |
ef753411 | 632 | { |
0a8704a5 | 633 | /* Free all persistent grant pages */ |
d4bf0065 BL |
634 | if (!RB_EMPTY_ROOT(&ring->persistent_gnts)) |
635 | free_persistent_gnts(ring, &ring->persistent_gnts, | |
636 | ring->persistent_gnt_c); | |
0a8704a5 | 637 | |
d4bf0065 BL |
638 | BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts)); |
639 | ring->persistent_gnt_c = 0; | |
0a8704a5 | 640 | |
2ed22e3c | 641 | /* Since we are shutting down remove all pages from the buffer */ |
ca33479c | 642 | gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */); |
4d05a28d KRW |
643 | } |
644 | ||
c43cf3ea | 645 | static unsigned int xen_blkbk_unmap_prepare( |
59795700 | 646 | struct xen_blkif_ring *ring, |
c43cf3ea JH |
647 | struct grant_page **pages, |
648 | unsigned int num, | |
649 | struct gnttab_unmap_grant_ref *unmap_ops, | |
650 | struct page **unmap_pages) | |
b0aef179 | 651 | { |
b0aef179 | 652 | unsigned int i, invcount = 0; |
b0aef179 | 653 | |
31552ee3 | 654 | for (i = 0; i < num; i++) { |
bb642e83 | 655 | if (pages[i]->persistent_gnt != NULL) { |
d4bf0065 | 656 | put_persistent_gnt(ring, pages[i]->persistent_gnt); |
0a8704a5 | 657 | continue; |
3f3aad5e | 658 | } |
bb642e83 | 659 | if (pages[i]->handle == BLKBACK_INVALID_HANDLE) |
b0aef179 | 660 | continue; |
bb642e83 | 661 | unmap_pages[invcount] = pages[i]->page; |
c43cf3ea | 662 | gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page), |
bb642e83 RPM |
663 | GNTMAP_host_map, pages[i]->handle); |
664 | pages[i]->handle = BLKBACK_INVALID_HANDLE; | |
c43cf3ea | 665 | invcount++; |
306b82a8 | 666 | } |
c43cf3ea | 667 | |
306b82a8 | 668 | return invcount; |
c43cf3ea JH |
669 | } |
670 | ||
671 | static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data) | |
672 | { | |
59795700 BL |
673 | struct pending_req *pending_req = (struct pending_req *)(data->data); |
674 | struct xen_blkif_ring *ring = pending_req->ring; | |
675 | struct xen_blkif *blkif = ring->blkif; | |
c43cf3ea JH |
676 | |
677 | /* BUG_ON used to reproduce existing behaviour, | |
678 | but is this the best way to deal with this? */ | |
679 | BUG_ON(result); | |
680 | ||
ca33479c | 681 | gnttab_page_cache_put(&ring->free_pages, data->pages, data->count); |
59795700 | 682 | make_response(ring, pending_req->id, |
c43cf3ea | 683 | pending_req->operation, pending_req->status); |
59795700 | 684 | free_req(ring, pending_req); |
c43cf3ea JH |
685 | /* |
686 | * Make sure the request is freed before releasing blkif, | |
687 | * or there could be a race between free_req and the | |
688 | * cleanup done in xen_blkif_free during shutdown. | |
689 | * | |
690 | * NB: The fact that we might try to wake up pending_free_wq | |
691 | * before drain_complete (in case there's a drain going on) | |
692 | * it's not a problem with our current implementation | |
693 | * because we can assure there's no thread waiting on | |
694 | * pending_free_wq if there's a drain going on, but it has | |
695 | * to be taken into account if the current model is changed. | |
696 | */ | |
59795700 | 697 | if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) { |
c43cf3ea JH |
698 | complete(&blkif->drain_complete); |
699 | } | |
700 | xen_blkif_put(blkif); | |
701 | } | |
702 | ||
703 | static void xen_blkbk_unmap_and_respond(struct pending_req *req) | |
704 | { | |
705 | struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data; | |
59795700 | 706 | struct xen_blkif_ring *ring = req->ring; |
c43cf3ea JH |
707 | struct grant_page **pages = req->segments; |
708 | unsigned int invcount; | |
709 | ||
59795700 | 710 | invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs, |
c43cf3ea JH |
711 | req->unmap, req->unmap_pages); |
712 | ||
713 | work->data = req; | |
714 | work->done = xen_blkbk_unmap_and_respond_callback; | |
715 | work->unmap_ops = req->unmap; | |
716 | work->kunmap_ops = NULL; | |
717 | work->pages = req->unmap_pages; | |
718 | work->count = invcount; | |
719 | ||
720 | gnttab_unmap_refs_async(&req->gnttab_unmap_data); | |
721 | } | |
722 | ||
723 | ||
724 | /* | |
725 | * Unmap the grant references. | |
726 | * | |
727 | * This could accumulate ops up to the batch size to reduce the number | |
728 | * of hypercalls, but since this is only used in error paths there's | |
729 | * no real need. | |
730 | */ | |
59795700 | 731 | static void xen_blkbk_unmap(struct xen_blkif_ring *ring, |
c43cf3ea JH |
732 | struct grant_page *pages[], |
733 | int num) | |
734 | { | |
735 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
736 | struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
737 | unsigned int invcount = 0; | |
738 | int ret; | |
739 | ||
740 | while (num) { | |
741 | unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); | |
59795700 BL |
742 | |
743 | invcount = xen_blkbk_unmap_prepare(ring, pages, batch, | |
c43cf3ea JH |
744 | unmap, unmap_pages); |
745 | if (invcount) { | |
746 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); | |
31552ee3 | 747 | BUG_ON(ret); |
ca33479c JG |
748 | gnttab_page_cache_put(&ring->free_pages, unmap_pages, |
749 | invcount); | |
31552ee3 | 750 | } |
c43cf3ea JH |
751 | pages += batch; |
752 | num -= batch; | |
b0aef179 | 753 | } |
b0aef179 | 754 | } |
01f37f2d | 755 | |
59795700 | 756 | static int xen_blkbk_map(struct xen_blkif_ring *ring, |
bb642e83 | 757 | struct grant_page *pages[], |
31552ee3 | 758 | int num, bool ro) |
1a95fe6e KRW |
759 | { |
760 | struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
0a8704a5 RPM |
761 | struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
762 | struct persistent_gnt *persistent_gnt = NULL; | |
0a8704a5 | 763 | phys_addr_t addr = 0; |
c6cc142d | 764 | int i, seg_idx, new_map_idx; |
0a8704a5 | 765 | int segs_to_map = 0; |
1a95fe6e | 766 | int ret = 0; |
31552ee3 | 767 | int last_map = 0, map_until = 0; |
0a8704a5 | 768 | int use_persistent_gnts; |
59795700 | 769 | struct xen_blkif *blkif = ring->blkif; |
0a8704a5 RPM |
770 | |
771 | use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); | |
772 | ||
01f37f2d KRW |
773 | /* |
774 | * Fill out preq.nr_sects with proper amount of sectors, and setup | |
1a95fe6e KRW |
775 | * assign map[..] with the PFN of the page in our domain with the |
776 | * corresponding grant reference for each page. | |
777 | */ | |
31552ee3 RPM |
778 | again: |
779 | for (i = map_until; i < num; i++) { | |
1a95fe6e KRW |
780 | uint32_t flags; |
781 | ||
59795700 | 782 | if (use_persistent_gnts) { |
0a8704a5 | 783 | persistent_gnt = get_persistent_gnt( |
d4bf0065 | 784 | ring, |
bb642e83 | 785 | pages[i]->gref); |
59795700 | 786 | } |
0a8704a5 RPM |
787 | |
788 | if (persistent_gnt) { | |
789 | /* | |
790 | * We are using persistent grants and | |
791 | * the grant is already mapped | |
792 | */ | |
bb642e83 RPM |
793 | pages[i]->page = persistent_gnt->page; |
794 | pages[i]->persistent_gnt = persistent_gnt; | |
0a8704a5 | 795 | } else { |
ca33479c | 796 | if (gnttab_page_cache_get(&ring->free_pages, |
07f3f728 JB |
797 | &pages[i]->page)) { |
798 | gnttab_page_cache_put(&ring->free_pages, | |
799 | pages_to_gnt, | |
800 | segs_to_map); | |
801 | ret = -ENOMEM; | |
802 | goto out; | |
803 | } | |
bb642e83 RPM |
804 | addr = vaddr(pages[i]->page); |
805 | pages_to_gnt[segs_to_map] = pages[i]->page; | |
806 | pages[i]->persistent_gnt = NULL; | |
0a8704a5 | 807 | flags = GNTMAP_host_map; |
31552ee3 | 808 | if (!use_persistent_gnts && ro) |
0a8704a5 RPM |
809 | flags |= GNTMAP_readonly; |
810 | gnttab_set_map_op(&map[segs_to_map++], addr, | |
bb642e83 | 811 | flags, pages[i]->gref, |
0a8704a5 RPM |
812 | blkif->domid); |
813 | } | |
31552ee3 RPM |
814 | map_until = i + 1; |
815 | if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) | |
816 | break; | |
1a95fe6e KRW |
817 | } |
818 | ||
801d2430 | 819 | if (segs_to_map) |
0a8704a5 | 820 | ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); |
1a95fe6e | 821 | |
01f37f2d KRW |
822 | /* |
823 | * Now swizzle the MFN in our domain with the MFN from the other domain | |
1a95fe6e KRW |
824 | * so that when we access vaddr(pending_req,i) it has the contents of |
825 | * the page from the other domain. | |
826 | */ | |
31552ee3 | 827 | for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { |
bb642e83 | 828 | if (!pages[seg_idx]->persistent_gnt) { |
0a8704a5 | 829 | /* This is a newly mapped grant */ |
c6cc142d RPM |
830 | BUG_ON(new_map_idx >= segs_to_map); |
831 | if (unlikely(map[new_map_idx].status != 0)) { | |
77387b82 | 832 | pr_debug("invalid buffer -- could not remap it\n"); |
ca33479c JG |
833 | gnttab_page_cache_put(&ring->free_pages, |
834 | &pages[seg_idx]->page, 1); | |
bb642e83 | 835 | pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; |
801d2430 | 836 | ret |= !ret; |
31552ee3 | 837 | goto next; |
0a8704a5 | 838 | } |
bb642e83 | 839 | pages[seg_idx]->handle = map[new_map_idx].handle; |
c6cc142d | 840 | } else { |
31552ee3 | 841 | continue; |
0a8704a5 | 842 | } |
c6cc142d | 843 | if (use_persistent_gnts && |
823f2091 | 844 | ring->persistent_gnt_c < max_pgrants) { |
c6cc142d RPM |
845 | /* |
846 | * We are using persistent grants, the grant is | |
3f3aad5e | 847 | * not mapped but we might have room for it. |
c6cc142d RPM |
848 | */ |
849 | persistent_gnt = kmalloc(sizeof(struct persistent_gnt), | |
850 | GFP_KERNEL); | |
851 | if (!persistent_gnt) { | |
0a8704a5 | 852 | /* |
c6cc142d RPM |
853 | * If we don't have enough memory to |
854 | * allocate the persistent_gnt struct | |
855 | * map this grant non-persistenly | |
0a8704a5 | 856 | */ |
31552ee3 | 857 | goto next; |
0a8704a5 | 858 | } |
c6cc142d RPM |
859 | persistent_gnt->gnt = map[new_map_idx].ref; |
860 | persistent_gnt->handle = map[new_map_idx].handle; | |
bb642e83 | 861 | persistent_gnt->page = pages[seg_idx]->page; |
d4bf0065 | 862 | if (add_persistent_gnt(ring, |
c6cc142d RPM |
863 | persistent_gnt)) { |
864 | kfree(persistent_gnt); | |
865 | persistent_gnt = NULL; | |
31552ee3 | 866 | goto next; |
c6cc142d | 867 | } |
bb642e83 | 868 | pages[seg_idx]->persistent_gnt = persistent_gnt; |
77387b82 | 869 | pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n", |
d4bf0065 | 870 | persistent_gnt->gnt, ring->persistent_gnt_c, |
823f2091 | 871 | max_pgrants); |
c6cc142d RPM |
872 | goto next; |
873 | } | |
874 | if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { | |
875 | blkif->vbd.overflow_max_grants = 1; | |
77387b82 | 876 | pr_debug("domain %u, device %#x is using maximum number of persistent grants\n", |
c6cc142d | 877 | blkif->domid, blkif->vbd.handle); |
1a95fe6e | 878 | } |
c6cc142d RPM |
879 | /* |
880 | * We could not map this grant persistently, so use it as | |
881 | * a non-persistent grant. | |
882 | */ | |
c6cc142d | 883 | next: |
31552ee3 | 884 | new_map_idx++; |
1a95fe6e | 885 | } |
31552ee3 RPM |
886 | segs_to_map = 0; |
887 | last_map = map_until; | |
07f3f728 | 888 | if (!ret && map_until != num) |
31552ee3 RPM |
889 | goto again; |
890 | ||
07f3f728 JB |
891 | out: |
892 | for (i = last_map; i < num; i++) { | |
893 | /* Don't zap current batch's valid persistent grants. */ | |
894 | if(i >= last_map + segs_to_map) | |
895 | pages[i]->persistent_gnt = NULL; | |
f9bd84a8 | 896 | pages[i]->handle = BLKBACK_INVALID_HANDLE; |
07f3f728 JB |
897 | } |
898 | ||
899 | return ret; | |
1a95fe6e KRW |
900 | } |
901 | ||
bb642e83 | 902 | static int xen_blkbk_map_seg(struct pending_req *pending_req) |
31552ee3 | 903 | { |
402b27f9 | 904 | int rc; |
31552ee3 | 905 | |
59795700 | 906 | rc = xen_blkbk_map(pending_req->ring, pending_req->segments, |
6684fa1c | 907 | pending_req->nr_segs, |
31552ee3 | 908 | (pending_req->operation != BLKIF_OP_READ)); |
31552ee3 | 909 | |
402b27f9 RPM |
910 | return rc; |
911 | } | |
31552ee3 | 912 | |
402b27f9 RPM |
913 | static int xen_blkbk_parse_indirect(struct blkif_request *req, |
914 | struct pending_req *pending_req, | |
915 | struct seg_buf seg[], | |
916 | struct phys_req *preq) | |
917 | { | |
bb642e83 | 918 | struct grant_page **pages = pending_req->indirect_pages; |
59795700 | 919 | struct xen_blkif_ring *ring = pending_req->ring; |
402b27f9 | 920 | int indirect_grefs, rc, n, nseg, i; |
80bfa2f6 | 921 | struct blkif_request_segment *segments = NULL; |
402b27f9 | 922 | |
6684fa1c | 923 | nseg = pending_req->nr_segs; |
402b27f9 RPM |
924 | indirect_grefs = INDIRECT_PAGES(nseg); |
925 | BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); | |
926 | ||
bb642e83 RPM |
927 | for (i = 0; i < indirect_grefs; i++) |
928 | pages[i]->gref = req->u.indirect.indirect_grefs[i]; | |
929 | ||
59795700 | 930 | rc = xen_blkbk_map(ring, pages, indirect_grefs, true); |
402b27f9 RPM |
931 | if (rc) |
932 | goto unmap; | |
933 | ||
934 | for (n = 0, i = 0; n < nseg; n++) { | |
18779149 RPM |
935 | uint8_t first_sect, last_sect; |
936 | ||
402b27f9 RPM |
937 | if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { |
938 | /* Map indirect segments */ | |
939 | if (segments) | |
940 | kunmap_atomic(segments); | |
bb642e83 | 941 | segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); |
402b27f9 RPM |
942 | } |
943 | i = n % SEGS_PER_INDIRECT_FRAME; | |
18779149 | 944 | |
bb642e83 | 945 | pending_req->segments[n]->gref = segments[i].gref; |
18779149 RPM |
946 | |
947 | first_sect = READ_ONCE(segments[i].first_sect); | |
948 | last_sect = READ_ONCE(segments[i].last_sect); | |
949 | if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) { | |
402b27f9 RPM |
950 | rc = -EINVAL; |
951 | goto unmap; | |
952 | } | |
18779149 RPM |
953 | |
954 | seg[n].nsec = last_sect - first_sect + 1; | |
955 | seg[n].offset = first_sect << 9; | |
402b27f9 RPM |
956 | preq->nr_sects += seg[n].nsec; |
957 | } | |
958 | ||
959 | unmap: | |
960 | if (segments) | |
961 | kunmap_atomic(segments); | |
59795700 | 962 | xen_blkbk_unmap(ring, pages, indirect_grefs); |
402b27f9 | 963 | return rc; |
31552ee3 RPM |
964 | } |
965 | ||
59795700 | 966 | static int dispatch_discard_io(struct xen_blkif_ring *ring, |
42146352 | 967 | struct blkif_request *req) |
b3cb0d6a LD |
968 | { |
969 | int err = 0; | |
970 | int status = BLKIF_RSP_OKAY; | |
59795700 | 971 | struct xen_blkif *blkif = ring->blkif; |
b3cb0d6a | 972 | struct block_device *bdev = blkif->vbd.bdev; |
4dae7670 | 973 | unsigned long secure; |
604c499c | 974 | struct phys_req preq; |
b3cb0d6a | 975 | |
ea5ec76d VN |
976 | xen_blkif_get(blkif); |
977 | ||
604c499c KRW |
978 | preq.sector_number = req->u.discard.sector_number; |
979 | preq.nr_sects = req->u.discard.nr_sectors; | |
980 | ||
a022606e | 981 | err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE); |
604c499c | 982 | if (err) { |
77387b82 | 983 | pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", |
604c499c KRW |
984 | preq.sector_number, |
985 | preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); | |
986 | goto fail_response; | |
987 | } | |
db6fbc10 | 988 | ring->st_ds_req++; |
42146352 | 989 | |
4dae7670 KRW |
990 | secure = (blkif->vbd.discard_secure && |
991 | (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? | |
992 | BLKDEV_DISCARD_SECURE : 0; | |
993 | ||
994 | err = blkdev_issue_discard(bdev, req->u.discard.sector_number, | |
995 | req->u.discard.nr_sectors, | |
996 | GFP_KERNEL, secure); | |
604c499c | 997 | fail_response: |
b3cb0d6a | 998 | if (err == -EOPNOTSUPP) { |
77387b82 | 999 | pr_debug("discard op failed, not supported\n"); |
b3cb0d6a LD |
1000 | status = BLKIF_RSP_EOPNOTSUPP; |
1001 | } else if (err) | |
1002 | status = BLKIF_RSP_ERROR; | |
1003 | ||
59795700 | 1004 | make_response(ring, req->u.discard.id, req->operation, status); |
42146352 KRW |
1005 | xen_blkif_put(blkif); |
1006 | return err; | |
b3cb0d6a LD |
1007 | } |
1008 | ||
59795700 | 1009 | static int dispatch_other_io(struct xen_blkif_ring *ring, |
0e367ae4 DV |
1010 | struct blkif_request *req, |
1011 | struct pending_req *pending_req) | |
1012 | { | |
59795700 BL |
1013 | free_req(ring, pending_req); |
1014 | make_response(ring, req->u.other.id, req->operation, | |
0e367ae4 DV |
1015 | BLKIF_RSP_EOPNOTSUPP); |
1016 | return -EIO; | |
1017 | } | |
1018 | ||
59795700 | 1019 | static void xen_blk_drain_io(struct xen_blkif_ring *ring) |
29bde093 | 1020 | { |
59795700 BL |
1021 | struct xen_blkif *blkif = ring->blkif; |
1022 | ||
29bde093 KRW |
1023 | atomic_set(&blkif->drain, 1); |
1024 | do { | |
59795700 | 1025 | if (atomic_read(&ring->inflight) == 0) |
6927d920 | 1026 | break; |
29bde093 KRW |
1027 | wait_for_completion_interruptible_timeout( |
1028 | &blkif->drain_complete, HZ); | |
1029 | ||
1030 | if (!atomic_read(&blkif->drain)) | |
1031 | break; | |
29bde093 KRW |
1032 | } while (!kthread_should_stop()); |
1033 | atomic_set(&blkif->drain, 0); | |
1034 | } | |
1035 | ||
4e4cbee9 CH |
1036 | static void __end_block_io_op(struct pending_req *pending_req, |
1037 | blk_status_t error) | |
4d05a28d KRW |
1038 | { |
1039 | /* An error fails the entire request. */ | |
4e4cbee9 CH |
1040 | if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE && |
1041 | error == BLK_STS_NOTSUPP) { | |
77387b82 | 1042 | pr_debug("flush diskcache op failed, not supported\n"); |
59795700 | 1043 | xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0); |
4d05a28d | 1044 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
4e4cbee9 CH |
1045 | } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER && |
1046 | error == BLK_STS_NOTSUPP) { | |
77387b82 | 1047 | pr_debug("write barrier op failed, not supported\n"); |
59795700 | 1048 | xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0); |
29bde093 | 1049 | pending_req->status = BLKIF_RSP_EOPNOTSUPP; |
4d05a28d | 1050 | } else if (error) { |
77387b82 | 1051 | pr_debug("Buffer not up-to-date at end of operation," |
ebe81906 | 1052 | " error=%d\n", error); |
4d05a28d KRW |
1053 | pending_req->status = BLKIF_RSP_ERROR; |
1054 | } | |
1055 | ||
01f37f2d KRW |
1056 | /* |
1057 | * If all of the bio's have completed it is time to unmap | |
a1397fa3 | 1058 | * the grant references associated with 'request' and provide |
2e9977c2 KRW |
1059 | * the proper response on the ring. |
1060 | */ | |
c43cf3ea JH |
1061 | if (atomic_dec_and_test(&pending_req->pendcnt)) |
1062 | xen_blkbk_unmap_and_respond(pending_req); | |
4d05a28d KRW |
1063 | } |
1064 | ||
a1397fa3 KRW |
1065 | /* |
1066 | * bio callback. | |
1067 | */ | |
4246a0b6 | 1068 | static void end_block_io_op(struct bio *bio) |
4d05a28d | 1069 | { |
4e4cbee9 | 1070 | __end_block_io_op(bio->bi_private, bio->bi_status); |
4d05a28d | 1071 | bio_put(bio); |
4d05a28d KRW |
1072 | } |
1073 | ||
1074 | ||
4d05a28d | 1075 | |
a1397fa3 KRW |
1076 | /* |
1077 | * Function to copy the from the ring buffer the 'struct blkif_request' | |
1078 | * (which has the sectors we want, number of them, grant references, etc), | |
1079 | * and transmute it to the block API to hand it over to the proper block disk. | |
4d05a28d | 1080 | */ |
b4726a9d | 1081 | static int |
01263a1f | 1082 | __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) |
4d05a28d | 1083 | { |
59795700 | 1084 | union blkif_back_rings *blk_rings = &ring->blk_rings; |
88122933 | 1085 | struct blkif_request req; |
2e9977c2 | 1086 | struct pending_req *pending_req; |
4d05a28d KRW |
1087 | RING_IDX rc, rp; |
1088 | int more_to_do = 0; | |
1089 | ||
1090 | rc = blk_rings->common.req_cons; | |
1091 | rp = blk_rings->common.sring->req_prod; | |
1092 | rmb(); /* Ensure we see queued requests up to 'rp'. */ | |
1093 | ||
8e3f8755 KRW |
1094 | if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) { |
1095 | rc = blk_rings->common.rsp_prod_pvt; | |
77387b82 | 1096 | pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n", |
59795700 | 1097 | rp, rc, rp - rc, ring->blkif->vbd.pdevice); |
8e3f8755 KRW |
1098 | return -EACCES; |
1099 | } | |
4d05a28d KRW |
1100 | while (rc != rp) { |
1101 | ||
1102 | if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) | |
1103 | break; | |
1104 | ||
01263a1f JG |
1105 | /* We've seen a request, so clear spurious eoi flag. */ |
1106 | *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS; | |
1107 | ||
8270b45b | 1108 | if (kthread_should_stop()) { |
4d05a28d KRW |
1109 | more_to_do = 1; |
1110 | break; | |
1111 | } | |
1112 | ||
59795700 | 1113 | pending_req = alloc_req(ring); |
8270b45b | 1114 | if (NULL == pending_req) { |
db6fbc10 | 1115 | ring->st_oo_req++; |
4d05a28d KRW |
1116 | more_to_do = 1; |
1117 | break; | |
1118 | } | |
1119 | ||
59795700 | 1120 | switch (ring->blkif->blk_protocol) { |
4d05a28d KRW |
1121 | case BLKIF_PROTOCOL_NATIVE: |
1122 | memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); | |
1123 | break; | |
1124 | case BLKIF_PROTOCOL_X86_32: | |
1125 | blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); | |
1126 | break; | |
1127 | case BLKIF_PROTOCOL_X86_64: | |
1128 | blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); | |
1129 | break; | |
1130 | default: | |
1131 | BUG(); | |
1132 | } | |
1133 | blk_rings->common.req_cons = ++rc; /* before make_response() */ | |
1134 | ||
1135 | /* Apply all sanity checks to /private copy/ of request. */ | |
1136 | barrier(); | |
0e367ae4 DV |
1137 | |
1138 | switch (req.operation) { | |
1139 | case BLKIF_OP_READ: | |
1140 | case BLKIF_OP_WRITE: | |
1141 | case BLKIF_OP_WRITE_BARRIER: | |
1142 | case BLKIF_OP_FLUSH_DISKCACHE: | |
402b27f9 | 1143 | case BLKIF_OP_INDIRECT: |
59795700 | 1144 | if (dispatch_rw_block_io(ring, &req, pending_req)) |
0e367ae4 DV |
1145 | goto done; |
1146 | break; | |
1147 | case BLKIF_OP_DISCARD: | |
59795700 BL |
1148 | free_req(ring, pending_req); |
1149 | if (dispatch_discard_io(ring, &req)) | |
0e367ae4 | 1150 | goto done; |
4d05a28d | 1151 | break; |
0e367ae4 | 1152 | default: |
59795700 | 1153 | if (dispatch_other_io(ring, &req, pending_req)) |
0e367ae4 DV |
1154 | goto done; |
1155 | break; | |
1156 | } | |
4d05a28d KRW |
1157 | |
1158 | /* Yield point for this unbounded loop. */ | |
1159 | cond_resched(); | |
1160 | } | |
0e367ae4 | 1161 | done: |
4d05a28d KRW |
1162 | return more_to_do; |
1163 | } | |
1164 | ||
b4726a9d | 1165 | static int |
01263a1f | 1166 | do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) |
b4726a9d | 1167 | { |
59795700 | 1168 | union blkif_back_rings *blk_rings = &ring->blk_rings; |
b4726a9d DS |
1169 | int more_to_do; |
1170 | ||
1171 | do { | |
01263a1f | 1172 | more_to_do = __do_block_io_op(ring, eoi_flags); |
b4726a9d DS |
1173 | if (more_to_do) |
1174 | break; | |
1175 | ||
1176 | RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); | |
1177 | } while (more_to_do); | |
1178 | ||
1179 | return more_to_do; | |
1180 | } | |
a1397fa3 | 1181 | /* |
01f37f2d KRW |
1182 | * Transmutation of the 'struct blkif_request' to a proper 'struct bio' |
1183 | * and call the 'submit_bio' to pass it to the underlying storage. | |
a1397fa3 | 1184 | */ |
59795700 | 1185 | static int dispatch_rw_block_io(struct xen_blkif_ring *ring, |
30fd1502 KRW |
1186 | struct blkif_request *req, |
1187 | struct pending_req *pending_req) | |
4d05a28d | 1188 | { |
4d05a28d | 1189 | struct phys_req preq; |
402b27f9 | 1190 | struct seg_buf *seg = pending_req->seg; |
4d05a28d KRW |
1191 | unsigned int nseg; |
1192 | struct bio *bio = NULL; | |
402b27f9 | 1193 | struct bio **biolist = pending_req->biolist; |
1a95fe6e | 1194 | int i, nbio = 0; |
4d05a28d | 1195 | int operation; |
a022606e | 1196 | int operation_flags = 0; |
a19be5f0 | 1197 | struct blk_plug plug; |
29bde093 | 1198 | bool drain = false; |
bb642e83 | 1199 | struct grant_page **pages = pending_req->segments; |
402b27f9 RPM |
1200 | unsigned short req_operation; |
1201 | ||
1202 | req_operation = req->operation == BLKIF_OP_INDIRECT ? | |
1203 | req->u.indirect.indirect_op : req->operation; | |
67de5dfb | 1204 | |
402b27f9 RPM |
1205 | if ((req->operation == BLKIF_OP_INDIRECT) && |
1206 | (req_operation != BLKIF_OP_READ) && | |
1207 | (req_operation != BLKIF_OP_WRITE)) { | |
77387b82 | 1208 | pr_debug("Invalid indirect operation (%u)\n", req_operation); |
402b27f9 RPM |
1209 | goto fail_response; |
1210 | } | |
4d05a28d | 1211 | |
402b27f9 | 1212 | switch (req_operation) { |
4d05a28d | 1213 | case BLKIF_OP_READ: |
db6fbc10 | 1214 | ring->st_rd_req++; |
a022606e | 1215 | operation = REQ_OP_READ; |
4d05a28d KRW |
1216 | break; |
1217 | case BLKIF_OP_WRITE: | |
db6fbc10 | 1218 | ring->st_wr_req++; |
a022606e | 1219 | operation = REQ_OP_WRITE; |
70fd7614 | 1220 | operation_flags = REQ_SYNC | REQ_IDLE; |
4d05a28d | 1221 | break; |
29bde093 KRW |
1222 | case BLKIF_OP_WRITE_BARRIER: |
1223 | drain = true; | |
df561f66 | 1224 | fallthrough; |
24f567f9 | 1225 | case BLKIF_OP_FLUSH_DISKCACHE: |
db6fbc10 | 1226 | ring->st_f_req++; |
a022606e | 1227 | operation = REQ_OP_WRITE; |
70fd7614 | 1228 | operation_flags = REQ_PREFLUSH; |
4d05a28d KRW |
1229 | break; |
1230 | default: | |
1231 | operation = 0; /* make gcc happy */ | |
fc53bf75 KRW |
1232 | goto fail_response; |
1233 | break; | |
4d05a28d KRW |
1234 | } |
1235 | ||
42146352 | 1236 | /* Check that the number of segments is sane. */ |
402b27f9 RPM |
1237 | nseg = req->operation == BLKIF_OP_INDIRECT ? |
1238 | req->u.indirect.nr_segments : req->u.rw.nr_segments; | |
97e36834 | 1239 | |
70fd7614 | 1240 | if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) || |
402b27f9 RPM |
1241 | unlikely((req->operation != BLKIF_OP_INDIRECT) && |
1242 | (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || | |
1243 | unlikely((req->operation == BLKIF_OP_INDIRECT) && | |
1244 | (nseg > MAX_INDIRECT_SEGMENTS))) { | |
77387b82 | 1245 | pr_debug("Bad number of segments in request (%d)\n", nseg); |
1a95fe6e | 1246 | /* Haven't submitted any bio's yet. */ |
4d05a28d KRW |
1247 | goto fail_response; |
1248 | } | |
1249 | ||
4d05a28d KRW |
1250 | preq.nr_sects = 0; |
1251 | ||
59795700 | 1252 | pending_req->ring = ring; |
97e36834 | 1253 | pending_req->id = req->u.rw.id; |
402b27f9 | 1254 | pending_req->operation = req_operation; |
4d05a28d | 1255 | pending_req->status = BLKIF_RSP_OKAY; |
6684fa1c | 1256 | pending_req->nr_segs = nseg; |
e9350493 | 1257 | |
402b27f9 RPM |
1258 | if (req->operation != BLKIF_OP_INDIRECT) { |
1259 | preq.dev = req->u.rw.handle; | |
1260 | preq.sector_number = req->u.rw.sector_number; | |
1261 | for (i = 0; i < nseg; i++) { | |
bb642e83 | 1262 | pages[i]->gref = req->u.rw.seg[i].gref; |
402b27f9 RPM |
1263 | seg[i].nsec = req->u.rw.seg[i].last_sect - |
1264 | req->u.rw.seg[i].first_sect + 1; | |
1265 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | |
67de5dfb | 1266 | if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || |
402b27f9 RPM |
1267 | (req->u.rw.seg[i].last_sect < |
1268 | req->u.rw.seg[i].first_sect)) | |
1269 | goto fail_response; | |
1270 | preq.nr_sects += seg[i].nsec; | |
1271 | } | |
1272 | } else { | |
1273 | preq.dev = req->u.indirect.handle; | |
1274 | preq.sector_number = req->u.indirect.sector_number; | |
1275 | if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq)) | |
4d05a28d | 1276 | goto fail_response; |
4d05a28d KRW |
1277 | } |
1278 | ||
59795700 | 1279 | if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { |
77387b82 | 1280 | pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", |
a022606e | 1281 | operation == REQ_OP_READ ? "read" : "write", |
ebe81906 | 1282 | preq.sector_number, |
a72d9002 | 1283 | preq.sector_number + preq.nr_sects, |
59795700 | 1284 | ring->blkif->vbd.pdevice); |
1a95fe6e | 1285 | goto fail_response; |
4d05a28d | 1286 | } |
01f37f2d KRW |
1287 | |
1288 | /* | |
3d814731 | 1289 | * This check _MUST_ be done after xen_vbd_translate as the preq.bdev |
01f37f2d KRW |
1290 | * is set there. |
1291 | */ | |
e9350493 KRW |
1292 | for (i = 0; i < nseg; i++) { |
1293 | if (((int)preq.sector_number|(int)seg[i].nsec) & | |
1294 | ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { | |
77387b82 | 1295 | pr_debug("Misaligned I/O request from domain %d\n", |
59795700 | 1296 | ring->blkif->domid); |
e9350493 KRW |
1297 | goto fail_response; |
1298 | } | |
1299 | } | |
01f37f2d | 1300 | |
29bde093 | 1301 | /* Wait on all outstanding I/O's and once that has been completed |
70fd7614 | 1302 | * issue the flush. |
29bde093 KRW |
1303 | */ |
1304 | if (drain) | |
59795700 | 1305 | xen_blk_drain_io(pending_req->ring); |
29bde093 | 1306 | |
01f37f2d KRW |
1307 | /* |
1308 | * If we have failed at this point, we need to undo the M2P override, | |
2e9977c2 KRW |
1309 | * set gnttab_set_unmap_op on all of the grant references and perform |
1310 | * the hypercall to unmap the grants - that is all done in | |
9f3aedf5 | 1311 | * xen_blkbk_unmap. |
2e9977c2 | 1312 | */ |
bb642e83 | 1313 | if (xen_blkbk_map_seg(pending_req)) |
4d05a28d KRW |
1314 | goto fail_flush; |
1315 | ||
b3cb0d6a LD |
1316 | /* |
1317 | * This corresponding xen_blkif_put is done in __end_block_io_op, or | |
1318 | * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. | |
1319 | */ | |
59795700 BL |
1320 | xen_blkif_get(ring->blkif); |
1321 | atomic_inc(&ring->inflight); | |
4d05a28d KRW |
1322 | |
1323 | for (i = 0; i < nseg; i++) { | |
4d05a28d KRW |
1324 | while ((bio == NULL) || |
1325 | (bio_add_page(bio, | |
bb642e83 | 1326 | pages[i]->page, |
4d05a28d | 1327 | seg[i].nsec << 9, |
ffb1dabd | 1328 | seg[i].offset) == 0)) { |
2e9977c2 | 1329 | |
1e0f7a21 RPM |
1330 | int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES); |
1331 | bio = bio_alloc(GFP_KERNEL, nr_iovecs); | |
4d05a28d KRW |
1332 | if (unlikely(bio == NULL)) |
1333 | goto fail_put_bio; | |
1334 | ||
03e0edf9 | 1335 | biolist[nbio++] = bio; |
74d46992 | 1336 | bio_set_dev(bio, preq.bdev); |
4d05a28d KRW |
1337 | bio->bi_private = pending_req; |
1338 | bio->bi_end_io = end_block_io_op; | |
4f024f37 | 1339 | bio->bi_iter.bi_sector = preq.sector_number; |
a022606e | 1340 | bio_set_op_attrs(bio, operation, operation_flags); |
4d05a28d KRW |
1341 | } |
1342 | ||
1343 | preq.sector_number += seg[i].nsec; | |
1344 | } | |
1345 | ||
b3cb0d6a | 1346 | /* This will be hit if the operation was a flush or discard. */ |
4d05a28d | 1347 | if (!bio) { |
70fd7614 | 1348 | BUG_ON(operation_flags != REQ_PREFLUSH); |
b0f80127 | 1349 | |
42146352 KRW |
1350 | bio = bio_alloc(GFP_KERNEL, 0); |
1351 | if (unlikely(bio == NULL)) | |
1352 | goto fail_put_bio; | |
4d05a28d | 1353 | |
42146352 | 1354 | biolist[nbio++] = bio; |
74d46992 | 1355 | bio_set_dev(bio, preq.bdev); |
42146352 KRW |
1356 | bio->bi_private = pending_req; |
1357 | bio->bi_end_io = end_block_io_op; | |
a022606e | 1358 | bio_set_op_attrs(bio, operation, operation_flags); |
4d05a28d KRW |
1359 | } |
1360 | ||
77089926 | 1361 | atomic_set(&pending_req->pendcnt, nbio); |
a19be5f0 KRW |
1362 | blk_start_plug(&plug); |
1363 | ||
77089926 | 1364 | for (i = 0; i < nbio; i++) |
4e49ea4a | 1365 | submit_bio(biolist[i]); |
77089926 | 1366 | |
a19be5f0 | 1367 | /* Let the I/Os go.. */ |
3d68b399 | 1368 | blk_finish_plug(&plug); |
a19be5f0 | 1369 | |
a022606e | 1370 | if (operation == REQ_OP_READ) |
db6fbc10 | 1371 | ring->st_rd_sect += preq.nr_sects; |
a022606e | 1372 | else if (operation == REQ_OP_WRITE) |
db6fbc10 | 1373 | ring->st_wr_sect += preq.nr_sects; |
4d05a28d | 1374 | |
fc53bf75 | 1375 | return 0; |
4d05a28d KRW |
1376 | |
1377 | fail_flush: | |
59795700 | 1378 | xen_blkbk_unmap(ring, pending_req->segments, |
6684fa1c | 1379 | pending_req->nr_segs); |
4d05a28d | 1380 | fail_response: |
0faa8cca | 1381 | /* Haven't submitted any bio's yet. */ |
59795700 BL |
1382 | make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); |
1383 | free_req(ring, pending_req); | |
4d05a28d | 1384 | msleep(1); /* back off a bit */ |
fc53bf75 | 1385 | return -EIO; |
4d05a28d KRW |
1386 | |
1387 | fail_put_bio: | |
03e0edf9 | 1388 | for (i = 0; i < nbio; i++) |
77089926 | 1389 | bio_put(biolist[i]); |
0e5e098a | 1390 | atomic_set(&pending_req->pendcnt, 1); |
4e4cbee9 | 1391 | __end_block_io_op(pending_req, BLK_STS_RESOURCE); |
4d05a28d | 1392 | msleep(1); /* back off a bit */ |
fc53bf75 | 1393 | return -EIO; |
4d05a28d KRW |
1394 | } |
1395 | ||
1396 | ||
1397 | ||
a1397fa3 KRW |
1398 | /* |
1399 | * Put a response on the ring on how the operation fared. | |
4d05a28d | 1400 | */ |
59795700 | 1401 | static void make_response(struct xen_blkif_ring *ring, u64 id, |
4d05a28d KRW |
1402 | unsigned short op, int st) |
1403 | { | |
089bc014 | 1404 | struct blkif_response *resp; |
4d05a28d | 1405 | unsigned long flags; |
59795700 | 1406 | union blkif_back_rings *blk_rings; |
4d05a28d KRW |
1407 | int notify; |
1408 | ||
59795700 BL |
1409 | spin_lock_irqsave(&ring->blk_ring_lock, flags); |
1410 | blk_rings = &ring->blk_rings; | |
4d05a28d | 1411 | /* Place on the response ring for the relevant domain. */ |
59795700 | 1412 | switch (ring->blkif->blk_protocol) { |
4d05a28d | 1413 | case BLKIF_PROTOCOL_NATIVE: |
089bc014 JB |
1414 | resp = RING_GET_RESPONSE(&blk_rings->native, |
1415 | blk_rings->native.rsp_prod_pvt); | |
4d05a28d KRW |
1416 | break; |
1417 | case BLKIF_PROTOCOL_X86_32: | |
089bc014 JB |
1418 | resp = RING_GET_RESPONSE(&blk_rings->x86_32, |
1419 | blk_rings->x86_32.rsp_prod_pvt); | |
4d05a28d KRW |
1420 | break; |
1421 | case BLKIF_PROTOCOL_X86_64: | |
089bc014 JB |
1422 | resp = RING_GET_RESPONSE(&blk_rings->x86_64, |
1423 | blk_rings->x86_64.rsp_prod_pvt); | |
4d05a28d KRW |
1424 | break; |
1425 | default: | |
1426 | BUG(); | |
1427 | } | |
089bc014 JB |
1428 | |
1429 | resp->id = id; | |
1430 | resp->operation = op; | |
1431 | resp->status = st; | |
1432 | ||
4d05a28d KRW |
1433 | blk_rings->common.rsp_prod_pvt++; |
1434 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); | |
59795700 | 1435 | spin_unlock_irqrestore(&ring->blk_ring_lock, flags); |
4d05a28d | 1436 | if (notify) |
59795700 | 1437 | notify_remote_via_irq(ring->irq); |
4d05a28d KRW |
1438 | } |
1439 | ||
8b6bf747 | 1440 | static int __init xen_blkif_init(void) |
4d05a28d | 1441 | { |
8770b268 | 1442 | int rc = 0; |
4d05a28d | 1443 | |
b2167ba6 | 1444 | if (!xen_domain()) |
4d05a28d KRW |
1445 | return -ENODEV; |
1446 | ||
9cce2914 | 1447 | if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { |
86839c56 | 1448 | pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", |
9cce2914 JG |
1449 | xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); |
1450 | xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER; | |
86839c56 BL |
1451 | } |
1452 | ||
d62d8600 BL |
1453 | if (xenblk_max_queues == 0) |
1454 | xenblk_max_queues = num_online_cpus(); | |
1455 | ||
8b6bf747 | 1456 | rc = xen_blkif_interface_init(); |
8770b268 KRW |
1457 | if (rc) |
1458 | goto failed_init; | |
4d05a28d | 1459 | |
8b6bf747 | 1460 | rc = xen_blkif_xenbus_init(); |
8770b268 KRW |
1461 | if (rc) |
1462 | goto failed_init; | |
4d05a28d | 1463 | |
8770b268 | 1464 | failed_init: |
8770b268 | 1465 | return rc; |
4d05a28d KRW |
1466 | } |
1467 | ||
8b6bf747 | 1468 | module_init(xen_blkif_init); |
4d05a28d | 1469 | |
14855954 PD |
1470 | static void __exit xen_blkif_fini(void) |
1471 | { | |
1472 | xen_blkif_xenbus_fini(); | |
1473 | xen_blkif_interface_fini(); | |
1474 | } | |
1475 | ||
1476 | module_exit(xen_blkif_fini); | |
1477 | ||
4d05a28d | 1478 | MODULE_LICENSE("Dual BSD/GPL"); |
a7e9357f | 1479 | MODULE_ALIAS("xen-backend:vbd"); |