]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/xen-blkback/blkback.c
block/fs/drivers: remove rw argument from submit_bio
[mirror_ubuntu-artful-kernel.git] / drivers / block / xen-blkback / blkback.c
1 /******************************************************************************
2 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
8 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37 #define pr_fmt(fmt) "xen-blkback: " fmt
38
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
45
46 #include <xen/events.h>
47 #include <xen/page.h>
48 #include <xen/xen.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
53 #include "common.h"
54
55 /*
56 * Maximum number of unused free pages to keep in the internal buffer.
57 * Setting this to a value too low will reduce memory used in each backend,
58 * but can have a performance penalty.
59 *
60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61 * be set to a lower value that might degrade performance on some intensive
62 * IO workloads.
63 */
64
65 static int xen_blkif_max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
69
70 /*
71 * Maximum number of grants to map persistently in blkback. For maximum
72 * performance this should be the total numbers of grants that can be used
73 * to fill the ring, but since this might become too high, specially with
74 * the use of indirect descriptors, we set it to a value that provides good
75 * performance without using too much memory.
76 *
77 * When the list of persistent grants is full we clean it up using a LRU
78 * algorithm.
79 */
80
81 static int xen_blkif_max_pgrants = 1056;
82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84 "Maximum number of grants to map persistently");
85
86 /*
87 * Maximum number of rings/queues blkback supports, allow as many queues as there
88 * are CPUs if user has not specified a value.
89 */
90 unsigned int xenblk_max_queues;
91 module_param_named(max_queues, xenblk_max_queues, uint, 0644);
92 MODULE_PARM_DESC(max_queues,
93 "Maximum number of hardware queues per virtual disk." \
94 "By default it is the number of online CPUs.");
95
96 /*
97 * Maximum order of pages to be used for the shared ring between front and
98 * backend, 4KB page granularity is used.
99 */
100 unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
101 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
102 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
103 /*
104 * The LRU mechanism to clean the lists of persistent grants needs to
105 * be executed periodically. The time interval between consecutive executions
106 * of the purge mechanism is set in ms.
107 */
108 #define LRU_INTERVAL 100
109
110 /*
111 * When the persistent grants list is full we will remove unused grants
112 * from the list. The percent number of grants to be removed at each LRU
113 * execution.
114 */
115 #define LRU_PERCENT_CLEAN 5
116
117 /* Run-time switchable: /sys/module/blkback/parameters/ */
118 static unsigned int log_stats;
119 module_param(log_stats, int, 0644);
120
121 #define BLKBACK_INVALID_HANDLE (~0)
122
123 /* Number of free pages to remove on each call to gnttab_free_pages */
124 #define NUM_BATCH_FREE_PAGES 10
125
126 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
127 {
128 unsigned long flags;
129
130 spin_lock_irqsave(&ring->free_pages_lock, flags);
131 if (list_empty(&ring->free_pages)) {
132 BUG_ON(ring->free_pages_num != 0);
133 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
134 return gnttab_alloc_pages(1, page);
135 }
136 BUG_ON(ring->free_pages_num == 0);
137 page[0] = list_first_entry(&ring->free_pages, struct page, lru);
138 list_del(&page[0]->lru);
139 ring->free_pages_num--;
140 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
141
142 return 0;
143 }
144
145 static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
146 int num)
147 {
148 unsigned long flags;
149 int i;
150
151 spin_lock_irqsave(&ring->free_pages_lock, flags);
152 for (i = 0; i < num; i++)
153 list_add(&page[i]->lru, &ring->free_pages);
154 ring->free_pages_num += num;
155 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
156 }
157
158 static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
159 {
160 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
161 struct page *page[NUM_BATCH_FREE_PAGES];
162 unsigned int num_pages = 0;
163 unsigned long flags;
164
165 spin_lock_irqsave(&ring->free_pages_lock, flags);
166 while (ring->free_pages_num > num) {
167 BUG_ON(list_empty(&ring->free_pages));
168 page[num_pages] = list_first_entry(&ring->free_pages,
169 struct page, lru);
170 list_del(&page[num_pages]->lru);
171 ring->free_pages_num--;
172 if (++num_pages == NUM_BATCH_FREE_PAGES) {
173 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
174 gnttab_free_pages(num_pages, page);
175 spin_lock_irqsave(&ring->free_pages_lock, flags);
176 num_pages = 0;
177 }
178 }
179 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
180 if (num_pages != 0)
181 gnttab_free_pages(num_pages, page);
182 }
183
184 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
185
186 static int do_block_io_op(struct xen_blkif_ring *ring);
187 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
188 struct blkif_request *req,
189 struct pending_req *pending_req);
190 static void make_response(struct xen_blkif_ring *ring, u64 id,
191 unsigned short op, int st);
192
193 #define foreach_grant_safe(pos, n, rbtree, node) \
194 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
195 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
196 &(pos)->node != NULL; \
197 (pos) = container_of(n, typeof(*(pos)), node), \
198 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
199
200
201 /*
202 * We don't need locking around the persistent grant helpers
203 * because blkback uses a single-thread for each backend, so we
204 * can be sure that this functions will never be called recursively.
205 *
206 * The only exception to that is put_persistent_grant, that can be called
207 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
208 * bit operations to modify the flags of a persistent grant and to count
209 * the number of used grants.
210 */
211 static int add_persistent_gnt(struct xen_blkif_ring *ring,
212 struct persistent_gnt *persistent_gnt)
213 {
214 struct rb_node **new = NULL, *parent = NULL;
215 struct persistent_gnt *this;
216 struct xen_blkif *blkif = ring->blkif;
217
218 if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
219 if (!blkif->vbd.overflow_max_grants)
220 blkif->vbd.overflow_max_grants = 1;
221 return -EBUSY;
222 }
223 /* Figure out where to put new node */
224 new = &ring->persistent_gnts.rb_node;
225 while (*new) {
226 this = container_of(*new, struct persistent_gnt, node);
227
228 parent = *new;
229 if (persistent_gnt->gnt < this->gnt)
230 new = &((*new)->rb_left);
231 else if (persistent_gnt->gnt > this->gnt)
232 new = &((*new)->rb_right);
233 else {
234 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
235 return -EINVAL;
236 }
237 }
238
239 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
240 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
241 /* Add new node and rebalance tree. */
242 rb_link_node(&(persistent_gnt->node), parent, new);
243 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
244 ring->persistent_gnt_c++;
245 atomic_inc(&ring->persistent_gnt_in_use);
246 return 0;
247 }
248
249 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
250 grant_ref_t gref)
251 {
252 struct persistent_gnt *data;
253 struct rb_node *node = NULL;
254
255 node = ring->persistent_gnts.rb_node;
256 while (node) {
257 data = container_of(node, struct persistent_gnt, node);
258
259 if (gref < data->gnt)
260 node = node->rb_left;
261 else if (gref > data->gnt)
262 node = node->rb_right;
263 else {
264 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
265 pr_alert_ratelimited("requesting a grant already in use\n");
266 return NULL;
267 }
268 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
269 atomic_inc(&ring->persistent_gnt_in_use);
270 return data;
271 }
272 }
273 return NULL;
274 }
275
276 static void put_persistent_gnt(struct xen_blkif_ring *ring,
277 struct persistent_gnt *persistent_gnt)
278 {
279 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
280 pr_alert_ratelimited("freeing a grant already unused\n");
281 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
282 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
283 atomic_dec(&ring->persistent_gnt_in_use);
284 }
285
286 static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
287 unsigned int num)
288 {
289 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
290 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
291 struct persistent_gnt *persistent_gnt;
292 struct rb_node *n;
293 int segs_to_unmap = 0;
294 struct gntab_unmap_queue_data unmap_data;
295
296 unmap_data.pages = pages;
297 unmap_data.unmap_ops = unmap;
298 unmap_data.kunmap_ops = NULL;
299
300 foreach_grant_safe(persistent_gnt, n, root, node) {
301 BUG_ON(persistent_gnt->handle ==
302 BLKBACK_INVALID_HANDLE);
303 gnttab_set_unmap_op(&unmap[segs_to_unmap],
304 (unsigned long) pfn_to_kaddr(page_to_pfn(
305 persistent_gnt->page)),
306 GNTMAP_host_map,
307 persistent_gnt->handle);
308
309 pages[segs_to_unmap] = persistent_gnt->page;
310
311 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
312 !rb_next(&persistent_gnt->node)) {
313
314 unmap_data.count = segs_to_unmap;
315 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
316
317 put_free_pages(ring, pages, segs_to_unmap);
318 segs_to_unmap = 0;
319 }
320
321 rb_erase(&persistent_gnt->node, root);
322 kfree(persistent_gnt);
323 num--;
324 }
325 BUG_ON(num != 0);
326 }
327
328 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
329 {
330 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
331 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
332 struct persistent_gnt *persistent_gnt;
333 int segs_to_unmap = 0;
334 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
335 struct gntab_unmap_queue_data unmap_data;
336
337 unmap_data.pages = pages;
338 unmap_data.unmap_ops = unmap;
339 unmap_data.kunmap_ops = NULL;
340
341 while(!list_empty(&ring->persistent_purge_list)) {
342 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
343 struct persistent_gnt,
344 remove_node);
345 list_del(&persistent_gnt->remove_node);
346
347 gnttab_set_unmap_op(&unmap[segs_to_unmap],
348 vaddr(persistent_gnt->page),
349 GNTMAP_host_map,
350 persistent_gnt->handle);
351
352 pages[segs_to_unmap] = persistent_gnt->page;
353
354 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
355 unmap_data.count = segs_to_unmap;
356 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
357 put_free_pages(ring, pages, segs_to_unmap);
358 segs_to_unmap = 0;
359 }
360 kfree(persistent_gnt);
361 }
362 if (segs_to_unmap > 0) {
363 unmap_data.count = segs_to_unmap;
364 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
365 put_free_pages(ring, pages, segs_to_unmap);
366 }
367 }
368
369 static void purge_persistent_gnt(struct xen_blkif_ring *ring)
370 {
371 struct persistent_gnt *persistent_gnt;
372 struct rb_node *n;
373 unsigned int num_clean, total;
374 bool scan_used = false, clean_used = false;
375 struct rb_root *root;
376
377 if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
378 (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
379 !ring->blkif->vbd.overflow_max_grants)) {
380 goto out;
381 }
382
383 if (work_busy(&ring->persistent_purge_work)) {
384 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
385 goto out;
386 }
387
388 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
389 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
390 num_clean = min(ring->persistent_gnt_c, num_clean);
391 if ((num_clean == 0) ||
392 (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
393 goto out;
394
395 /*
396 * At this point, we can assure that there will be no calls
397 * to get_persistent_grant (because we are executing this code from
398 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
399 * which means that the number of currently used grants will go down,
400 * but never up, so we will always be able to remove the requested
401 * number of grants.
402 */
403
404 total = num_clean;
405
406 pr_debug("Going to purge %u persistent grants\n", num_clean);
407
408 BUG_ON(!list_empty(&ring->persistent_purge_list));
409 root = &ring->persistent_gnts;
410 purge_list:
411 foreach_grant_safe(persistent_gnt, n, root, node) {
412 BUG_ON(persistent_gnt->handle ==
413 BLKBACK_INVALID_HANDLE);
414
415 if (clean_used) {
416 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
417 continue;
418 }
419
420 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
421 continue;
422 if (!scan_used &&
423 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
424 continue;
425
426 rb_erase(&persistent_gnt->node, root);
427 list_add(&persistent_gnt->remove_node,
428 &ring->persistent_purge_list);
429 if (--num_clean == 0)
430 goto finished;
431 }
432 /*
433 * If we get here it means we also need to start cleaning
434 * grants that were used since last purge in order to cope
435 * with the requested num
436 */
437 if (!scan_used && !clean_used) {
438 pr_debug("Still missing %u purged frames\n", num_clean);
439 scan_used = true;
440 goto purge_list;
441 }
442 finished:
443 if (!clean_used) {
444 pr_debug("Finished scanning for grants to clean, removing used flag\n");
445 clean_used = true;
446 goto purge_list;
447 }
448
449 ring->persistent_gnt_c -= (total - num_clean);
450 ring->blkif->vbd.overflow_max_grants = 0;
451
452 /* We can defer this work */
453 schedule_work(&ring->persistent_purge_work);
454 pr_debug("Purged %u/%u\n", (total - num_clean), total);
455
456 out:
457 return;
458 }
459
460 /*
461 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
462 */
463 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
464 {
465 struct pending_req *req = NULL;
466 unsigned long flags;
467
468 spin_lock_irqsave(&ring->pending_free_lock, flags);
469 if (!list_empty(&ring->pending_free)) {
470 req = list_entry(ring->pending_free.next, struct pending_req,
471 free_list);
472 list_del(&req->free_list);
473 }
474 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
475 return req;
476 }
477
478 /*
479 * Return the 'pending_req' structure back to the freepool. We also
480 * wake up the thread if it was waiting for a free page.
481 */
482 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
483 {
484 unsigned long flags;
485 int was_empty;
486
487 spin_lock_irqsave(&ring->pending_free_lock, flags);
488 was_empty = list_empty(&ring->pending_free);
489 list_add(&req->free_list, &ring->pending_free);
490 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
491 if (was_empty)
492 wake_up(&ring->pending_free_wq);
493 }
494
495 /*
496 * Routines for managing virtual block devices (vbds).
497 */
498 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
499 int operation)
500 {
501 struct xen_vbd *vbd = &blkif->vbd;
502 int rc = -EACCES;
503
504 if ((operation != READ) && vbd->readonly)
505 goto out;
506
507 if (likely(req->nr_sects)) {
508 blkif_sector_t end = req->sector_number + req->nr_sects;
509
510 if (unlikely(end < req->sector_number))
511 goto out;
512 if (unlikely(end > vbd_sz(vbd)))
513 goto out;
514 }
515
516 req->dev = vbd->pdevice;
517 req->bdev = vbd->bdev;
518 rc = 0;
519
520 out:
521 return rc;
522 }
523
524 static void xen_vbd_resize(struct xen_blkif *blkif)
525 {
526 struct xen_vbd *vbd = &blkif->vbd;
527 struct xenbus_transaction xbt;
528 int err;
529 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
530 unsigned long long new_size = vbd_sz(vbd);
531
532 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
533 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
534 pr_info("VBD Resize: new size %llu\n", new_size);
535 vbd->size = new_size;
536 again:
537 err = xenbus_transaction_start(&xbt);
538 if (err) {
539 pr_warn("Error starting transaction\n");
540 return;
541 }
542 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
543 (unsigned long long)vbd_sz(vbd));
544 if (err) {
545 pr_warn("Error writing new size\n");
546 goto abort;
547 }
548 /*
549 * Write the current state; we will use this to synchronize
550 * the front-end. If the current state is "connected" the
551 * front-end will get the new size information online.
552 */
553 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
554 if (err) {
555 pr_warn("Error writing the state\n");
556 goto abort;
557 }
558
559 err = xenbus_transaction_end(xbt, 0);
560 if (err == -EAGAIN)
561 goto again;
562 if (err)
563 pr_warn("Error ending transaction\n");
564 return;
565 abort:
566 xenbus_transaction_end(xbt, 1);
567 }
568
569 /*
570 * Notification from the guest OS.
571 */
572 static void blkif_notify_work(struct xen_blkif_ring *ring)
573 {
574 ring->waiting_reqs = 1;
575 wake_up(&ring->wq);
576 }
577
578 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
579 {
580 blkif_notify_work(dev_id);
581 return IRQ_HANDLED;
582 }
583
584 /*
585 * SCHEDULER FUNCTIONS
586 */
587
588 static void print_stats(struct xen_blkif_ring *ring)
589 {
590 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
591 " | ds %4llu | pg: %4u/%4d\n",
592 current->comm, ring->st_oo_req,
593 ring->st_rd_req, ring->st_wr_req,
594 ring->st_f_req, ring->st_ds_req,
595 ring->persistent_gnt_c,
596 xen_blkif_max_pgrants);
597 ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
598 ring->st_rd_req = 0;
599 ring->st_wr_req = 0;
600 ring->st_oo_req = 0;
601 ring->st_ds_req = 0;
602 }
603
604 int xen_blkif_schedule(void *arg)
605 {
606 struct xen_blkif_ring *ring = arg;
607 struct xen_blkif *blkif = ring->blkif;
608 struct xen_vbd *vbd = &blkif->vbd;
609 unsigned long timeout;
610 int ret;
611
612 xen_blkif_get(blkif);
613
614 set_freezable();
615 while (!kthread_should_stop()) {
616 if (try_to_freeze())
617 continue;
618 if (unlikely(vbd->size != vbd_sz(vbd)))
619 xen_vbd_resize(blkif);
620
621 timeout = msecs_to_jiffies(LRU_INTERVAL);
622
623 timeout = wait_event_interruptible_timeout(
624 ring->wq,
625 ring->waiting_reqs || kthread_should_stop(),
626 timeout);
627 if (timeout == 0)
628 goto purge_gnt_list;
629 timeout = wait_event_interruptible_timeout(
630 ring->pending_free_wq,
631 !list_empty(&ring->pending_free) ||
632 kthread_should_stop(),
633 timeout);
634 if (timeout == 0)
635 goto purge_gnt_list;
636
637 ring->waiting_reqs = 0;
638 smp_mb(); /* clear flag *before* checking for work */
639
640 ret = do_block_io_op(ring);
641 if (ret > 0)
642 ring->waiting_reqs = 1;
643 if (ret == -EACCES)
644 wait_event_interruptible(ring->shutdown_wq,
645 kthread_should_stop());
646
647 purge_gnt_list:
648 if (blkif->vbd.feature_gnt_persistent &&
649 time_after(jiffies, ring->next_lru)) {
650 purge_persistent_gnt(ring);
651 ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
652 }
653
654 /* Shrink if we have more than xen_blkif_max_buffer_pages */
655 shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
656
657 if (log_stats && time_after(jiffies, ring->st_print))
658 print_stats(ring);
659 }
660
661 /* Drain pending purge work */
662 flush_work(&ring->persistent_purge_work);
663
664 if (log_stats)
665 print_stats(ring);
666
667 ring->xenblkd = NULL;
668 xen_blkif_put(blkif);
669
670 return 0;
671 }
672
673 /*
674 * Remove persistent grants and empty the pool of free pages
675 */
676 void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
677 {
678 /* Free all persistent grant pages */
679 if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
680 free_persistent_gnts(ring, &ring->persistent_gnts,
681 ring->persistent_gnt_c);
682
683 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
684 ring->persistent_gnt_c = 0;
685
686 /* Since we are shutting down remove all pages from the buffer */
687 shrink_free_pagepool(ring, 0 /* All */);
688 }
689
690 static unsigned int xen_blkbk_unmap_prepare(
691 struct xen_blkif_ring *ring,
692 struct grant_page **pages,
693 unsigned int num,
694 struct gnttab_unmap_grant_ref *unmap_ops,
695 struct page **unmap_pages)
696 {
697 unsigned int i, invcount = 0;
698
699 for (i = 0; i < num; i++) {
700 if (pages[i]->persistent_gnt != NULL) {
701 put_persistent_gnt(ring, pages[i]->persistent_gnt);
702 continue;
703 }
704 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
705 continue;
706 unmap_pages[invcount] = pages[i]->page;
707 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
708 GNTMAP_host_map, pages[i]->handle);
709 pages[i]->handle = BLKBACK_INVALID_HANDLE;
710 invcount++;
711 }
712
713 return invcount;
714 }
715
716 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
717 {
718 struct pending_req *pending_req = (struct pending_req *)(data->data);
719 struct xen_blkif_ring *ring = pending_req->ring;
720 struct xen_blkif *blkif = ring->blkif;
721
722 /* BUG_ON used to reproduce existing behaviour,
723 but is this the best way to deal with this? */
724 BUG_ON(result);
725
726 put_free_pages(ring, data->pages, data->count);
727 make_response(ring, pending_req->id,
728 pending_req->operation, pending_req->status);
729 free_req(ring, pending_req);
730 /*
731 * Make sure the request is freed before releasing blkif,
732 * or there could be a race between free_req and the
733 * cleanup done in xen_blkif_free during shutdown.
734 *
735 * NB: The fact that we might try to wake up pending_free_wq
736 * before drain_complete (in case there's a drain going on)
737 * it's not a problem with our current implementation
738 * because we can assure there's no thread waiting on
739 * pending_free_wq if there's a drain going on, but it has
740 * to be taken into account if the current model is changed.
741 */
742 if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
743 complete(&blkif->drain_complete);
744 }
745 xen_blkif_put(blkif);
746 }
747
748 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
749 {
750 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
751 struct xen_blkif_ring *ring = req->ring;
752 struct grant_page **pages = req->segments;
753 unsigned int invcount;
754
755 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
756 req->unmap, req->unmap_pages);
757
758 work->data = req;
759 work->done = xen_blkbk_unmap_and_respond_callback;
760 work->unmap_ops = req->unmap;
761 work->kunmap_ops = NULL;
762 work->pages = req->unmap_pages;
763 work->count = invcount;
764
765 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
766 }
767
768
769 /*
770 * Unmap the grant references.
771 *
772 * This could accumulate ops up to the batch size to reduce the number
773 * of hypercalls, but since this is only used in error paths there's
774 * no real need.
775 */
776 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
777 struct grant_page *pages[],
778 int num)
779 {
780 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
781 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
782 unsigned int invcount = 0;
783 int ret;
784
785 while (num) {
786 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
787
788 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
789 unmap, unmap_pages);
790 if (invcount) {
791 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
792 BUG_ON(ret);
793 put_free_pages(ring, unmap_pages, invcount);
794 }
795 pages += batch;
796 num -= batch;
797 }
798 }
799
800 static int xen_blkbk_map(struct xen_blkif_ring *ring,
801 struct grant_page *pages[],
802 int num, bool ro)
803 {
804 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
805 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
806 struct persistent_gnt *persistent_gnt = NULL;
807 phys_addr_t addr = 0;
808 int i, seg_idx, new_map_idx;
809 int segs_to_map = 0;
810 int ret = 0;
811 int last_map = 0, map_until = 0;
812 int use_persistent_gnts;
813 struct xen_blkif *blkif = ring->blkif;
814
815 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
816
817 /*
818 * Fill out preq.nr_sects with proper amount of sectors, and setup
819 * assign map[..] with the PFN of the page in our domain with the
820 * corresponding grant reference for each page.
821 */
822 again:
823 for (i = map_until; i < num; i++) {
824 uint32_t flags;
825
826 if (use_persistent_gnts) {
827 persistent_gnt = get_persistent_gnt(
828 ring,
829 pages[i]->gref);
830 }
831
832 if (persistent_gnt) {
833 /*
834 * We are using persistent grants and
835 * the grant is already mapped
836 */
837 pages[i]->page = persistent_gnt->page;
838 pages[i]->persistent_gnt = persistent_gnt;
839 } else {
840 if (get_free_page(ring, &pages[i]->page))
841 goto out_of_memory;
842 addr = vaddr(pages[i]->page);
843 pages_to_gnt[segs_to_map] = pages[i]->page;
844 pages[i]->persistent_gnt = NULL;
845 flags = GNTMAP_host_map;
846 if (!use_persistent_gnts && ro)
847 flags |= GNTMAP_readonly;
848 gnttab_set_map_op(&map[segs_to_map++], addr,
849 flags, pages[i]->gref,
850 blkif->domid);
851 }
852 map_until = i + 1;
853 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
854 break;
855 }
856
857 if (segs_to_map) {
858 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
859 BUG_ON(ret);
860 }
861
862 /*
863 * Now swizzle the MFN in our domain with the MFN from the other domain
864 * so that when we access vaddr(pending_req,i) it has the contents of
865 * the page from the other domain.
866 */
867 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
868 if (!pages[seg_idx]->persistent_gnt) {
869 /* This is a newly mapped grant */
870 BUG_ON(new_map_idx >= segs_to_map);
871 if (unlikely(map[new_map_idx].status != 0)) {
872 pr_debug("invalid buffer -- could not remap it\n");
873 put_free_pages(ring, &pages[seg_idx]->page, 1);
874 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
875 ret |= 1;
876 goto next;
877 }
878 pages[seg_idx]->handle = map[new_map_idx].handle;
879 } else {
880 continue;
881 }
882 if (use_persistent_gnts &&
883 ring->persistent_gnt_c < xen_blkif_max_pgrants) {
884 /*
885 * We are using persistent grants, the grant is
886 * not mapped but we might have room for it.
887 */
888 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
889 GFP_KERNEL);
890 if (!persistent_gnt) {
891 /*
892 * If we don't have enough memory to
893 * allocate the persistent_gnt struct
894 * map this grant non-persistenly
895 */
896 goto next;
897 }
898 persistent_gnt->gnt = map[new_map_idx].ref;
899 persistent_gnt->handle = map[new_map_idx].handle;
900 persistent_gnt->page = pages[seg_idx]->page;
901 if (add_persistent_gnt(ring,
902 persistent_gnt)) {
903 kfree(persistent_gnt);
904 persistent_gnt = NULL;
905 goto next;
906 }
907 pages[seg_idx]->persistent_gnt = persistent_gnt;
908 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
909 persistent_gnt->gnt, ring->persistent_gnt_c,
910 xen_blkif_max_pgrants);
911 goto next;
912 }
913 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
914 blkif->vbd.overflow_max_grants = 1;
915 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
916 blkif->domid, blkif->vbd.handle);
917 }
918 /*
919 * We could not map this grant persistently, so use it as
920 * a non-persistent grant.
921 */
922 next:
923 new_map_idx++;
924 }
925 segs_to_map = 0;
926 last_map = map_until;
927 if (map_until != num)
928 goto again;
929
930 return ret;
931
932 out_of_memory:
933 pr_alert("%s: out of memory\n", __func__);
934 put_free_pages(ring, pages_to_gnt, segs_to_map);
935 return -ENOMEM;
936 }
937
938 static int xen_blkbk_map_seg(struct pending_req *pending_req)
939 {
940 int rc;
941
942 rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
943 pending_req->nr_segs,
944 (pending_req->operation != BLKIF_OP_READ));
945
946 return rc;
947 }
948
949 static int xen_blkbk_parse_indirect(struct blkif_request *req,
950 struct pending_req *pending_req,
951 struct seg_buf seg[],
952 struct phys_req *preq)
953 {
954 struct grant_page **pages = pending_req->indirect_pages;
955 struct xen_blkif_ring *ring = pending_req->ring;
956 int indirect_grefs, rc, n, nseg, i;
957 struct blkif_request_segment *segments = NULL;
958
959 nseg = pending_req->nr_segs;
960 indirect_grefs = INDIRECT_PAGES(nseg);
961 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
962
963 for (i = 0; i < indirect_grefs; i++)
964 pages[i]->gref = req->u.indirect.indirect_grefs[i];
965
966 rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
967 if (rc)
968 goto unmap;
969
970 for (n = 0, i = 0; n < nseg; n++) {
971 uint8_t first_sect, last_sect;
972
973 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
974 /* Map indirect segments */
975 if (segments)
976 kunmap_atomic(segments);
977 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
978 }
979 i = n % SEGS_PER_INDIRECT_FRAME;
980
981 pending_req->segments[n]->gref = segments[i].gref;
982
983 first_sect = READ_ONCE(segments[i].first_sect);
984 last_sect = READ_ONCE(segments[i].last_sect);
985 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
986 rc = -EINVAL;
987 goto unmap;
988 }
989
990 seg[n].nsec = last_sect - first_sect + 1;
991 seg[n].offset = first_sect << 9;
992 preq->nr_sects += seg[n].nsec;
993 }
994
995 unmap:
996 if (segments)
997 kunmap_atomic(segments);
998 xen_blkbk_unmap(ring, pages, indirect_grefs);
999 return rc;
1000 }
1001
1002 static int dispatch_discard_io(struct xen_blkif_ring *ring,
1003 struct blkif_request *req)
1004 {
1005 int err = 0;
1006 int status = BLKIF_RSP_OKAY;
1007 struct xen_blkif *blkif = ring->blkif;
1008 struct block_device *bdev = blkif->vbd.bdev;
1009 unsigned long secure;
1010 struct phys_req preq;
1011
1012 xen_blkif_get(blkif);
1013
1014 preq.sector_number = req->u.discard.sector_number;
1015 preq.nr_sects = req->u.discard.nr_sectors;
1016
1017 err = xen_vbd_translate(&preq, blkif, WRITE);
1018 if (err) {
1019 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1020 preq.sector_number,
1021 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1022 goto fail_response;
1023 }
1024 ring->st_ds_req++;
1025
1026 secure = (blkif->vbd.discard_secure &&
1027 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1028 BLKDEV_DISCARD_SECURE : 0;
1029
1030 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1031 req->u.discard.nr_sectors,
1032 GFP_KERNEL, secure);
1033 fail_response:
1034 if (err == -EOPNOTSUPP) {
1035 pr_debug("discard op failed, not supported\n");
1036 status = BLKIF_RSP_EOPNOTSUPP;
1037 } else if (err)
1038 status = BLKIF_RSP_ERROR;
1039
1040 make_response(ring, req->u.discard.id, req->operation, status);
1041 xen_blkif_put(blkif);
1042 return err;
1043 }
1044
1045 static int dispatch_other_io(struct xen_blkif_ring *ring,
1046 struct blkif_request *req,
1047 struct pending_req *pending_req)
1048 {
1049 free_req(ring, pending_req);
1050 make_response(ring, req->u.other.id, req->operation,
1051 BLKIF_RSP_EOPNOTSUPP);
1052 return -EIO;
1053 }
1054
1055 static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1056 {
1057 struct xen_blkif *blkif = ring->blkif;
1058
1059 atomic_set(&blkif->drain, 1);
1060 do {
1061 if (atomic_read(&ring->inflight) == 0)
1062 break;
1063 wait_for_completion_interruptible_timeout(
1064 &blkif->drain_complete, HZ);
1065
1066 if (!atomic_read(&blkif->drain))
1067 break;
1068 } while (!kthread_should_stop());
1069 atomic_set(&blkif->drain, 0);
1070 }
1071
1072 /*
1073 * Completion callback on the bio's. Called as bh->b_end_io()
1074 */
1075
1076 static void __end_block_io_op(struct pending_req *pending_req, int error)
1077 {
1078 /* An error fails the entire request. */
1079 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
1080 (error == -EOPNOTSUPP)) {
1081 pr_debug("flush diskcache op failed, not supported\n");
1082 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1083 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1084 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
1085 (error == -EOPNOTSUPP)) {
1086 pr_debug("write barrier op failed, not supported\n");
1087 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1088 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1089 } else if (error) {
1090 pr_debug("Buffer not up-to-date at end of operation,"
1091 " error=%d\n", error);
1092 pending_req->status = BLKIF_RSP_ERROR;
1093 }
1094
1095 /*
1096 * If all of the bio's have completed it is time to unmap
1097 * the grant references associated with 'request' and provide
1098 * the proper response on the ring.
1099 */
1100 if (atomic_dec_and_test(&pending_req->pendcnt))
1101 xen_blkbk_unmap_and_respond(pending_req);
1102 }
1103
1104 /*
1105 * bio callback.
1106 */
1107 static void end_block_io_op(struct bio *bio)
1108 {
1109 __end_block_io_op(bio->bi_private, bio->bi_error);
1110 bio_put(bio);
1111 }
1112
1113
1114
1115 /*
1116 * Function to copy the from the ring buffer the 'struct blkif_request'
1117 * (which has the sectors we want, number of them, grant references, etc),
1118 * and transmute it to the block API to hand it over to the proper block disk.
1119 */
1120 static int
1121 __do_block_io_op(struct xen_blkif_ring *ring)
1122 {
1123 union blkif_back_rings *blk_rings = &ring->blk_rings;
1124 struct blkif_request req;
1125 struct pending_req *pending_req;
1126 RING_IDX rc, rp;
1127 int more_to_do = 0;
1128
1129 rc = blk_rings->common.req_cons;
1130 rp = blk_rings->common.sring->req_prod;
1131 rmb(); /* Ensure we see queued requests up to 'rp'. */
1132
1133 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1134 rc = blk_rings->common.rsp_prod_pvt;
1135 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1136 rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1137 return -EACCES;
1138 }
1139 while (rc != rp) {
1140
1141 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1142 break;
1143
1144 if (kthread_should_stop()) {
1145 more_to_do = 1;
1146 break;
1147 }
1148
1149 pending_req = alloc_req(ring);
1150 if (NULL == pending_req) {
1151 ring->st_oo_req++;
1152 more_to_do = 1;
1153 break;
1154 }
1155
1156 switch (ring->blkif->blk_protocol) {
1157 case BLKIF_PROTOCOL_NATIVE:
1158 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1159 break;
1160 case BLKIF_PROTOCOL_X86_32:
1161 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1162 break;
1163 case BLKIF_PROTOCOL_X86_64:
1164 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1165 break;
1166 default:
1167 BUG();
1168 }
1169 blk_rings->common.req_cons = ++rc; /* before make_response() */
1170
1171 /* Apply all sanity checks to /private copy/ of request. */
1172 barrier();
1173
1174 switch (req.operation) {
1175 case BLKIF_OP_READ:
1176 case BLKIF_OP_WRITE:
1177 case BLKIF_OP_WRITE_BARRIER:
1178 case BLKIF_OP_FLUSH_DISKCACHE:
1179 case BLKIF_OP_INDIRECT:
1180 if (dispatch_rw_block_io(ring, &req, pending_req))
1181 goto done;
1182 break;
1183 case BLKIF_OP_DISCARD:
1184 free_req(ring, pending_req);
1185 if (dispatch_discard_io(ring, &req))
1186 goto done;
1187 break;
1188 default:
1189 if (dispatch_other_io(ring, &req, pending_req))
1190 goto done;
1191 break;
1192 }
1193
1194 /* Yield point for this unbounded loop. */
1195 cond_resched();
1196 }
1197 done:
1198 return more_to_do;
1199 }
1200
1201 static int
1202 do_block_io_op(struct xen_blkif_ring *ring)
1203 {
1204 union blkif_back_rings *blk_rings = &ring->blk_rings;
1205 int more_to_do;
1206
1207 do {
1208 more_to_do = __do_block_io_op(ring);
1209 if (more_to_do)
1210 break;
1211
1212 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1213 } while (more_to_do);
1214
1215 return more_to_do;
1216 }
1217 /*
1218 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1219 * and call the 'submit_bio' to pass it to the underlying storage.
1220 */
1221 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1222 struct blkif_request *req,
1223 struct pending_req *pending_req)
1224 {
1225 struct phys_req preq;
1226 struct seg_buf *seg = pending_req->seg;
1227 unsigned int nseg;
1228 struct bio *bio = NULL;
1229 struct bio **biolist = pending_req->biolist;
1230 int i, nbio = 0;
1231 int operation;
1232 struct blk_plug plug;
1233 bool drain = false;
1234 struct grant_page **pages = pending_req->segments;
1235 unsigned short req_operation;
1236
1237 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1238 req->u.indirect.indirect_op : req->operation;
1239
1240 if ((req->operation == BLKIF_OP_INDIRECT) &&
1241 (req_operation != BLKIF_OP_READ) &&
1242 (req_operation != BLKIF_OP_WRITE)) {
1243 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1244 goto fail_response;
1245 }
1246
1247 switch (req_operation) {
1248 case BLKIF_OP_READ:
1249 ring->st_rd_req++;
1250 operation = READ;
1251 break;
1252 case BLKIF_OP_WRITE:
1253 ring->st_wr_req++;
1254 operation = WRITE_ODIRECT;
1255 break;
1256 case BLKIF_OP_WRITE_BARRIER:
1257 drain = true;
1258 case BLKIF_OP_FLUSH_DISKCACHE:
1259 ring->st_f_req++;
1260 operation = WRITE_FLUSH;
1261 break;
1262 default:
1263 operation = 0; /* make gcc happy */
1264 goto fail_response;
1265 break;
1266 }
1267
1268 /* Check that the number of segments is sane. */
1269 nseg = req->operation == BLKIF_OP_INDIRECT ?
1270 req->u.indirect.nr_segments : req->u.rw.nr_segments;
1271
1272 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1273 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1274 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1275 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1276 (nseg > MAX_INDIRECT_SEGMENTS))) {
1277 pr_debug("Bad number of segments in request (%d)\n", nseg);
1278 /* Haven't submitted any bio's yet. */
1279 goto fail_response;
1280 }
1281
1282 preq.nr_sects = 0;
1283
1284 pending_req->ring = ring;
1285 pending_req->id = req->u.rw.id;
1286 pending_req->operation = req_operation;
1287 pending_req->status = BLKIF_RSP_OKAY;
1288 pending_req->nr_segs = nseg;
1289
1290 if (req->operation != BLKIF_OP_INDIRECT) {
1291 preq.dev = req->u.rw.handle;
1292 preq.sector_number = req->u.rw.sector_number;
1293 for (i = 0; i < nseg; i++) {
1294 pages[i]->gref = req->u.rw.seg[i].gref;
1295 seg[i].nsec = req->u.rw.seg[i].last_sect -
1296 req->u.rw.seg[i].first_sect + 1;
1297 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1298 if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1299 (req->u.rw.seg[i].last_sect <
1300 req->u.rw.seg[i].first_sect))
1301 goto fail_response;
1302 preq.nr_sects += seg[i].nsec;
1303 }
1304 } else {
1305 preq.dev = req->u.indirect.handle;
1306 preq.sector_number = req->u.indirect.sector_number;
1307 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1308 goto fail_response;
1309 }
1310
1311 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1312 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1313 operation == READ ? "read" : "write",
1314 preq.sector_number,
1315 preq.sector_number + preq.nr_sects,
1316 ring->blkif->vbd.pdevice);
1317 goto fail_response;
1318 }
1319
1320 /*
1321 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1322 * is set there.
1323 */
1324 for (i = 0; i < nseg; i++) {
1325 if (((int)preq.sector_number|(int)seg[i].nsec) &
1326 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1327 pr_debug("Misaligned I/O request from domain %d\n",
1328 ring->blkif->domid);
1329 goto fail_response;
1330 }
1331 }
1332
1333 /* Wait on all outstanding I/O's and once that has been completed
1334 * issue the WRITE_FLUSH.
1335 */
1336 if (drain)
1337 xen_blk_drain_io(pending_req->ring);
1338
1339 /*
1340 * If we have failed at this point, we need to undo the M2P override,
1341 * set gnttab_set_unmap_op on all of the grant references and perform
1342 * the hypercall to unmap the grants - that is all done in
1343 * xen_blkbk_unmap.
1344 */
1345 if (xen_blkbk_map_seg(pending_req))
1346 goto fail_flush;
1347
1348 /*
1349 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1350 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1351 */
1352 xen_blkif_get(ring->blkif);
1353 atomic_inc(&ring->inflight);
1354
1355 for (i = 0; i < nseg; i++) {
1356 while ((bio == NULL) ||
1357 (bio_add_page(bio,
1358 pages[i]->page,
1359 seg[i].nsec << 9,
1360 seg[i].offset) == 0)) {
1361
1362 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1363 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1364 if (unlikely(bio == NULL))
1365 goto fail_put_bio;
1366
1367 biolist[nbio++] = bio;
1368 bio->bi_bdev = preq.bdev;
1369 bio->bi_private = pending_req;
1370 bio->bi_end_io = end_block_io_op;
1371 bio->bi_iter.bi_sector = preq.sector_number;
1372 bio->bi_rw = operation;
1373 }
1374
1375 preq.sector_number += seg[i].nsec;
1376 }
1377
1378 /* This will be hit if the operation was a flush or discard. */
1379 if (!bio) {
1380 BUG_ON(operation != WRITE_FLUSH);
1381
1382 bio = bio_alloc(GFP_KERNEL, 0);
1383 if (unlikely(bio == NULL))
1384 goto fail_put_bio;
1385
1386 biolist[nbio++] = bio;
1387 bio->bi_bdev = preq.bdev;
1388 bio->bi_private = pending_req;
1389 bio->bi_end_io = end_block_io_op;
1390 bio->bi_rw = operation;
1391 }
1392
1393 atomic_set(&pending_req->pendcnt, nbio);
1394 blk_start_plug(&plug);
1395
1396 for (i = 0; i < nbio; i++)
1397 submit_bio(biolist[i]);
1398
1399 /* Let the I/Os go.. */
1400 blk_finish_plug(&plug);
1401
1402 if (operation == READ)
1403 ring->st_rd_sect += preq.nr_sects;
1404 else if (operation & WRITE)
1405 ring->st_wr_sect += preq.nr_sects;
1406
1407 return 0;
1408
1409 fail_flush:
1410 xen_blkbk_unmap(ring, pending_req->segments,
1411 pending_req->nr_segs);
1412 fail_response:
1413 /* Haven't submitted any bio's yet. */
1414 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1415 free_req(ring, pending_req);
1416 msleep(1); /* back off a bit */
1417 return -EIO;
1418
1419 fail_put_bio:
1420 for (i = 0; i < nbio; i++)
1421 bio_put(biolist[i]);
1422 atomic_set(&pending_req->pendcnt, 1);
1423 __end_block_io_op(pending_req, -EINVAL);
1424 msleep(1); /* back off a bit */
1425 return -EIO;
1426 }
1427
1428
1429
1430 /*
1431 * Put a response on the ring on how the operation fared.
1432 */
1433 static void make_response(struct xen_blkif_ring *ring, u64 id,
1434 unsigned short op, int st)
1435 {
1436 struct blkif_response resp;
1437 unsigned long flags;
1438 union blkif_back_rings *blk_rings;
1439 int notify;
1440
1441 resp.id = id;
1442 resp.operation = op;
1443 resp.status = st;
1444
1445 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1446 blk_rings = &ring->blk_rings;
1447 /* Place on the response ring for the relevant domain. */
1448 switch (ring->blkif->blk_protocol) {
1449 case BLKIF_PROTOCOL_NATIVE:
1450 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1451 &resp, sizeof(resp));
1452 break;
1453 case BLKIF_PROTOCOL_X86_32:
1454 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1455 &resp, sizeof(resp));
1456 break;
1457 case BLKIF_PROTOCOL_X86_64:
1458 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1459 &resp, sizeof(resp));
1460 break;
1461 default:
1462 BUG();
1463 }
1464 blk_rings->common.rsp_prod_pvt++;
1465 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1466 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1467 if (notify)
1468 notify_remote_via_irq(ring->irq);
1469 }
1470
1471 static int __init xen_blkif_init(void)
1472 {
1473 int rc = 0;
1474
1475 if (!xen_domain())
1476 return -ENODEV;
1477
1478 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1479 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1480 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1481 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1482 }
1483
1484 if (xenblk_max_queues == 0)
1485 xenblk_max_queues = num_online_cpus();
1486
1487 rc = xen_blkif_interface_init();
1488 if (rc)
1489 goto failed_init;
1490
1491 rc = xen_blkif_xenbus_init();
1492 if (rc)
1493 goto failed_init;
1494
1495 failed_init:
1496 return rc;
1497 }
1498
1499 module_init(xen_blkif_init);
1500
1501 MODULE_LICENSE("Dual BSD/GPL");
1502 MODULE_ALIAS("xen-backend:vbd");