]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame_incremental - drivers/block/xen-blkback/blkback.c
xen-blkback: don't "handle" error by BUG()
[mirror_ubuntu-hirsute-kernel.git] / drivers / block / xen-blkback / blkback.c
... / ...
CommitLineData
1/******************************************************************************
2 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
8 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#define pr_fmt(fmt) "xen-blkback: " fmt
38
39#include <linux/spinlock.h>
40#include <linux/kthread.h>
41#include <linux/list.h>
42#include <linux/delay.h>
43#include <linux/freezer.h>
44#include <linux/bitmap.h>
45
46#include <xen/events.h>
47#include <xen/page.h>
48#include <xen/xen.h>
49#include <asm/xen/hypervisor.h>
50#include <asm/xen/hypercall.h>
51#include <xen/balloon.h>
52#include <xen/grant_table.h>
53#include "common.h"
54
55/*
56 * Maximum number of unused free pages to keep in the internal buffer.
57 * Setting this to a value too low will reduce memory used in each backend,
58 * but can have a performance penalty.
59 *
60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61 * be set to a lower value that might degrade performance on some intensive
62 * IO workloads.
63 */
64
65static int max_buffer_pages = 1024;
66module_param_named(max_buffer_pages, max_buffer_pages, int, 0644);
67MODULE_PARM_DESC(max_buffer_pages,
68"Maximum number of free pages to keep in each block backend buffer");
69
70/*
71 * Maximum number of grants to map persistently in blkback. For maximum
72 * performance this should be the total numbers of grants that can be used
73 * to fill the ring, but since this might become too high, specially with
74 * the use of indirect descriptors, we set it to a value that provides good
75 * performance without using too much memory.
76 *
77 * When the list of persistent grants is full we clean it up using a LRU
78 * algorithm.
79 */
80
81static int max_pgrants = 1056;
82module_param_named(max_persistent_grants, max_pgrants, int, 0644);
83MODULE_PARM_DESC(max_persistent_grants,
84 "Maximum number of grants to map persistently");
85
86/*
87 * How long a persistent grant is allowed to remain allocated without being in
88 * use. The time is in seconds, 0 means indefinitely long.
89 */
90
91static unsigned int pgrant_timeout = 60;
92module_param_named(persistent_grant_unused_seconds, pgrant_timeout,
93 uint, 0644);
94MODULE_PARM_DESC(persistent_grant_unused_seconds,
95 "Time in seconds an unused persistent grant is allowed to "
96 "remain allocated. Default is 60, 0 means unlimited.");
97
98/*
99 * Maximum number of rings/queues blkback supports, allow as many queues as there
100 * are CPUs if user has not specified a value.
101 */
102unsigned int xenblk_max_queues;
103module_param_named(max_queues, xenblk_max_queues, uint, 0644);
104MODULE_PARM_DESC(max_queues,
105 "Maximum number of hardware queues per virtual disk." \
106 "By default it is the number of online CPUs.");
107
108/*
109 * Maximum order of pages to be used for the shared ring between front and
110 * backend, 4KB page granularity is used.
111 */
112unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
113module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
114MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
115/*
116 * The LRU mechanism to clean the lists of persistent grants needs to
117 * be executed periodically. The time interval between consecutive executions
118 * of the purge mechanism is set in ms.
119 */
120#define LRU_INTERVAL 100
121
122/*
123 * When the persistent grants list is full we will remove unused grants
124 * from the list. The percent number of grants to be removed at each LRU
125 * execution.
126 */
127#define LRU_PERCENT_CLEAN 5
128
129/* Run-time switchable: /sys/module/blkback/parameters/ */
130static unsigned int log_stats;
131module_param(log_stats, int, 0644);
132
133#define BLKBACK_INVALID_HANDLE (~0)
134
135static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
136{
137 return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
138 HZ * pgrant_timeout);
139}
140
141#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
142
143static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
144static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
145 struct blkif_request *req,
146 struct pending_req *pending_req);
147static void make_response(struct xen_blkif_ring *ring, u64 id,
148 unsigned short op, int st);
149
150#define foreach_grant_safe(pos, n, rbtree, node) \
151 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
152 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
153 &(pos)->node != NULL; \
154 (pos) = container_of(n, typeof(*(pos)), node), \
155 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
156
157
158/*
159 * We don't need locking around the persistent grant helpers
160 * because blkback uses a single-thread for each backend, so we
161 * can be sure that this functions will never be called recursively.
162 *
163 * The only exception to that is put_persistent_grant, that can be called
164 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
165 * bit operations to modify the flags of a persistent grant and to count
166 * the number of used grants.
167 */
168static int add_persistent_gnt(struct xen_blkif_ring *ring,
169 struct persistent_gnt *persistent_gnt)
170{
171 struct rb_node **new = NULL, *parent = NULL;
172 struct persistent_gnt *this;
173 struct xen_blkif *blkif = ring->blkif;
174
175 if (ring->persistent_gnt_c >= max_pgrants) {
176 if (!blkif->vbd.overflow_max_grants)
177 blkif->vbd.overflow_max_grants = 1;
178 return -EBUSY;
179 }
180 /* Figure out where to put new node */
181 new = &ring->persistent_gnts.rb_node;
182 while (*new) {
183 this = container_of(*new, struct persistent_gnt, node);
184
185 parent = *new;
186 if (persistent_gnt->gnt < this->gnt)
187 new = &((*new)->rb_left);
188 else if (persistent_gnt->gnt > this->gnt)
189 new = &((*new)->rb_right);
190 else {
191 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
192 return -EINVAL;
193 }
194 }
195
196 persistent_gnt->active = true;
197 /* Add new node and rebalance tree. */
198 rb_link_node(&(persistent_gnt->node), parent, new);
199 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
200 ring->persistent_gnt_c++;
201 atomic_inc(&ring->persistent_gnt_in_use);
202 return 0;
203}
204
205static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
206 grant_ref_t gref)
207{
208 struct persistent_gnt *data;
209 struct rb_node *node = NULL;
210
211 node = ring->persistent_gnts.rb_node;
212 while (node) {
213 data = container_of(node, struct persistent_gnt, node);
214
215 if (gref < data->gnt)
216 node = node->rb_left;
217 else if (gref > data->gnt)
218 node = node->rb_right;
219 else {
220 if (data->active) {
221 pr_alert_ratelimited("requesting a grant already in use\n");
222 return NULL;
223 }
224 data->active = true;
225 atomic_inc(&ring->persistent_gnt_in_use);
226 return data;
227 }
228 }
229 return NULL;
230}
231
232static void put_persistent_gnt(struct xen_blkif_ring *ring,
233 struct persistent_gnt *persistent_gnt)
234{
235 if (!persistent_gnt->active)
236 pr_alert_ratelimited("freeing a grant already unused\n");
237 persistent_gnt->last_used = jiffies;
238 persistent_gnt->active = false;
239 atomic_dec(&ring->persistent_gnt_in_use);
240}
241
242static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
243 unsigned int num)
244{
245 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
246 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
247 struct persistent_gnt *persistent_gnt;
248 struct rb_node *n;
249 int segs_to_unmap = 0;
250 struct gntab_unmap_queue_data unmap_data;
251
252 unmap_data.pages = pages;
253 unmap_data.unmap_ops = unmap;
254 unmap_data.kunmap_ops = NULL;
255
256 foreach_grant_safe(persistent_gnt, n, root, node) {
257 BUG_ON(persistent_gnt->handle ==
258 BLKBACK_INVALID_HANDLE);
259 gnttab_set_unmap_op(&unmap[segs_to_unmap],
260 (unsigned long) pfn_to_kaddr(page_to_pfn(
261 persistent_gnt->page)),
262 GNTMAP_host_map,
263 persistent_gnt->handle);
264
265 pages[segs_to_unmap] = persistent_gnt->page;
266
267 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
268 !rb_next(&persistent_gnt->node)) {
269
270 unmap_data.count = segs_to_unmap;
271 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
272
273 gnttab_page_cache_put(&ring->free_pages, pages,
274 segs_to_unmap);
275 segs_to_unmap = 0;
276 }
277
278 rb_erase(&persistent_gnt->node, root);
279 kfree(persistent_gnt);
280 num--;
281 }
282 BUG_ON(num != 0);
283}
284
285void xen_blkbk_unmap_purged_grants(struct work_struct *work)
286{
287 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
288 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
289 struct persistent_gnt *persistent_gnt;
290 int segs_to_unmap = 0;
291 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
292 struct gntab_unmap_queue_data unmap_data;
293
294 unmap_data.pages = pages;
295 unmap_data.unmap_ops = unmap;
296 unmap_data.kunmap_ops = NULL;
297
298 while(!list_empty(&ring->persistent_purge_list)) {
299 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
300 struct persistent_gnt,
301 remove_node);
302 list_del(&persistent_gnt->remove_node);
303
304 gnttab_set_unmap_op(&unmap[segs_to_unmap],
305 vaddr(persistent_gnt->page),
306 GNTMAP_host_map,
307 persistent_gnt->handle);
308
309 pages[segs_to_unmap] = persistent_gnt->page;
310
311 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
312 unmap_data.count = segs_to_unmap;
313 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
314 gnttab_page_cache_put(&ring->free_pages, pages,
315 segs_to_unmap);
316 segs_to_unmap = 0;
317 }
318 kfree(persistent_gnt);
319 }
320 if (segs_to_unmap > 0) {
321 unmap_data.count = segs_to_unmap;
322 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
323 gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
324 }
325}
326
327static void purge_persistent_gnt(struct xen_blkif_ring *ring)
328{
329 struct persistent_gnt *persistent_gnt;
330 struct rb_node *n;
331 unsigned int num_clean, total;
332 bool scan_used = false;
333 struct rb_root *root;
334
335 if (work_busy(&ring->persistent_purge_work)) {
336 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
337 goto out;
338 }
339
340 if (ring->persistent_gnt_c < max_pgrants ||
341 (ring->persistent_gnt_c == max_pgrants &&
342 !ring->blkif->vbd.overflow_max_grants)) {
343 num_clean = 0;
344 } else {
345 num_clean = (max_pgrants / 100) * LRU_PERCENT_CLEAN;
346 num_clean = ring->persistent_gnt_c - max_pgrants + num_clean;
347 num_clean = min(ring->persistent_gnt_c, num_clean);
348 pr_debug("Going to purge at least %u persistent grants\n",
349 num_clean);
350 }
351
352 /*
353 * At this point, we can assure that there will be no calls
354 * to get_persistent_grant (because we are executing this code from
355 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
356 * which means that the number of currently used grants will go down,
357 * but never up, so we will always be able to remove the requested
358 * number of grants.
359 */
360
361 total = 0;
362
363 BUG_ON(!list_empty(&ring->persistent_purge_list));
364 root = &ring->persistent_gnts;
365purge_list:
366 foreach_grant_safe(persistent_gnt, n, root, node) {
367 BUG_ON(persistent_gnt->handle ==
368 BLKBACK_INVALID_HANDLE);
369
370 if (persistent_gnt->active)
371 continue;
372 if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
373 continue;
374 if (scan_used && total >= num_clean)
375 continue;
376
377 rb_erase(&persistent_gnt->node, root);
378 list_add(&persistent_gnt->remove_node,
379 &ring->persistent_purge_list);
380 total++;
381 }
382 /*
383 * Check whether we also need to start cleaning
384 * grants that were used since last purge in order to cope
385 * with the requested num
386 */
387 if (!scan_used && total < num_clean) {
388 pr_debug("Still missing %u purged frames\n", num_clean - total);
389 scan_used = true;
390 goto purge_list;
391 }
392
393 if (total) {
394 ring->persistent_gnt_c -= total;
395 ring->blkif->vbd.overflow_max_grants = 0;
396
397 /* We can defer this work */
398 schedule_work(&ring->persistent_purge_work);
399 pr_debug("Purged %u/%u\n", num_clean, total);
400 }
401
402out:
403 return;
404}
405
406/*
407 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
408 */
409static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
410{
411 struct pending_req *req = NULL;
412 unsigned long flags;
413
414 spin_lock_irqsave(&ring->pending_free_lock, flags);
415 if (!list_empty(&ring->pending_free)) {
416 req = list_entry(ring->pending_free.next, struct pending_req,
417 free_list);
418 list_del(&req->free_list);
419 }
420 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
421 return req;
422}
423
424/*
425 * Return the 'pending_req' structure back to the freepool. We also
426 * wake up the thread if it was waiting for a free page.
427 */
428static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
429{
430 unsigned long flags;
431 int was_empty;
432
433 spin_lock_irqsave(&ring->pending_free_lock, flags);
434 was_empty = list_empty(&ring->pending_free);
435 list_add(&req->free_list, &ring->pending_free);
436 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
437 if (was_empty)
438 wake_up(&ring->pending_free_wq);
439}
440
441/*
442 * Routines for managing virtual block devices (vbds).
443 */
444static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
445 int operation)
446{
447 struct xen_vbd *vbd = &blkif->vbd;
448 int rc = -EACCES;
449
450 if ((operation != REQ_OP_READ) && vbd->readonly)
451 goto out;
452
453 if (likely(req->nr_sects)) {
454 blkif_sector_t end = req->sector_number + req->nr_sects;
455
456 if (unlikely(end < req->sector_number))
457 goto out;
458 if (unlikely(end > vbd_sz(vbd)))
459 goto out;
460 }
461
462 req->dev = vbd->pdevice;
463 req->bdev = vbd->bdev;
464 rc = 0;
465
466 out:
467 return rc;
468}
469
470static void xen_vbd_resize(struct xen_blkif *blkif)
471{
472 struct xen_vbd *vbd = &blkif->vbd;
473 struct xenbus_transaction xbt;
474 int err;
475 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
476 unsigned long long new_size = vbd_sz(vbd);
477
478 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
479 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
480 pr_info("VBD Resize: new size %llu\n", new_size);
481 vbd->size = new_size;
482again:
483 err = xenbus_transaction_start(&xbt);
484 if (err) {
485 pr_warn("Error starting transaction\n");
486 return;
487 }
488 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
489 (unsigned long long)vbd_sz(vbd));
490 if (err) {
491 pr_warn("Error writing new size\n");
492 goto abort;
493 }
494 /*
495 * Write the current state; we will use this to synchronize
496 * the front-end. If the current state is "connected" the
497 * front-end will get the new size information online.
498 */
499 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
500 if (err) {
501 pr_warn("Error writing the state\n");
502 goto abort;
503 }
504
505 err = xenbus_transaction_end(xbt, 0);
506 if (err == -EAGAIN)
507 goto again;
508 if (err)
509 pr_warn("Error ending transaction\n");
510 return;
511abort:
512 xenbus_transaction_end(xbt, 1);
513}
514
515/*
516 * Notification from the guest OS.
517 */
518static void blkif_notify_work(struct xen_blkif_ring *ring)
519{
520 ring->waiting_reqs = 1;
521 wake_up(&ring->wq);
522}
523
524irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
525{
526 blkif_notify_work(dev_id);
527 return IRQ_HANDLED;
528}
529
530/*
531 * SCHEDULER FUNCTIONS
532 */
533
534static void print_stats(struct xen_blkif_ring *ring)
535{
536 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
537 " | ds %4llu | pg: %4u/%4d\n",
538 current->comm, ring->st_oo_req,
539 ring->st_rd_req, ring->st_wr_req,
540 ring->st_f_req, ring->st_ds_req,
541 ring->persistent_gnt_c, max_pgrants);
542 ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
543 ring->st_rd_req = 0;
544 ring->st_wr_req = 0;
545 ring->st_oo_req = 0;
546 ring->st_ds_req = 0;
547}
548
549int xen_blkif_schedule(void *arg)
550{
551 struct xen_blkif_ring *ring = arg;
552 struct xen_blkif *blkif = ring->blkif;
553 struct xen_vbd *vbd = &blkif->vbd;
554 unsigned long timeout;
555 int ret;
556 bool do_eoi;
557 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
558
559 set_freezable();
560 while (!kthread_should_stop()) {
561 if (try_to_freeze())
562 continue;
563 if (unlikely(vbd->size != vbd_sz(vbd)))
564 xen_vbd_resize(blkif);
565
566 timeout = msecs_to_jiffies(LRU_INTERVAL);
567
568 timeout = wait_event_interruptible_timeout(
569 ring->wq,
570 ring->waiting_reqs || kthread_should_stop(),
571 timeout);
572 if (timeout == 0)
573 goto purge_gnt_list;
574 timeout = wait_event_interruptible_timeout(
575 ring->pending_free_wq,
576 !list_empty(&ring->pending_free) ||
577 kthread_should_stop(),
578 timeout);
579 if (timeout == 0)
580 goto purge_gnt_list;
581
582 do_eoi = ring->waiting_reqs;
583
584 ring->waiting_reqs = 0;
585 smp_mb(); /* clear flag *before* checking for work */
586
587 ret = do_block_io_op(ring, &eoi_flags);
588 if (ret > 0)
589 ring->waiting_reqs = 1;
590 if (ret == -EACCES)
591 wait_event_interruptible(ring->shutdown_wq,
592 kthread_should_stop());
593
594 if (do_eoi && !ring->waiting_reqs) {
595 xen_irq_lateeoi(ring->irq, eoi_flags);
596 eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
597 }
598
599purge_gnt_list:
600 if (blkif->vbd.feature_gnt_persistent &&
601 time_after(jiffies, ring->next_lru)) {
602 purge_persistent_gnt(ring);
603 ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
604 }
605
606 /* Shrink the free pages pool if it is too large. */
607 if (time_before(jiffies, blkif->buffer_squeeze_end))
608 gnttab_page_cache_shrink(&ring->free_pages, 0);
609 else
610 gnttab_page_cache_shrink(&ring->free_pages,
611 max_buffer_pages);
612
613 if (log_stats && time_after(jiffies, ring->st_print))
614 print_stats(ring);
615 }
616
617 /* Drain pending purge work */
618 flush_work(&ring->persistent_purge_work);
619
620 if (log_stats)
621 print_stats(ring);
622
623 ring->xenblkd = NULL;
624
625 return 0;
626}
627
628/*
629 * Remove persistent grants and empty the pool of free pages
630 */
631void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
632{
633 /* Free all persistent grant pages */
634 if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
635 free_persistent_gnts(ring, &ring->persistent_gnts,
636 ring->persistent_gnt_c);
637
638 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
639 ring->persistent_gnt_c = 0;
640
641 /* Since we are shutting down remove all pages from the buffer */
642 gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
643}
644
645static unsigned int xen_blkbk_unmap_prepare(
646 struct xen_blkif_ring *ring,
647 struct grant_page **pages,
648 unsigned int num,
649 struct gnttab_unmap_grant_ref *unmap_ops,
650 struct page **unmap_pages)
651{
652 unsigned int i, invcount = 0;
653
654 for (i = 0; i < num; i++) {
655 if (pages[i]->persistent_gnt != NULL) {
656 put_persistent_gnt(ring, pages[i]->persistent_gnt);
657 continue;
658 }
659 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
660 continue;
661 unmap_pages[invcount] = pages[i]->page;
662 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
663 GNTMAP_host_map, pages[i]->handle);
664 pages[i]->handle = BLKBACK_INVALID_HANDLE;
665 invcount++;
666 }
667
668 return invcount;
669}
670
671static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
672{
673 struct pending_req *pending_req = (struct pending_req *)(data->data);
674 struct xen_blkif_ring *ring = pending_req->ring;
675 struct xen_blkif *blkif = ring->blkif;
676
677 /* BUG_ON used to reproduce existing behaviour,
678 but is this the best way to deal with this? */
679 BUG_ON(result);
680
681 gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
682 make_response(ring, pending_req->id,
683 pending_req->operation, pending_req->status);
684 free_req(ring, pending_req);
685 /*
686 * Make sure the request is freed before releasing blkif,
687 * or there could be a race between free_req and the
688 * cleanup done in xen_blkif_free during shutdown.
689 *
690 * NB: The fact that we might try to wake up pending_free_wq
691 * before drain_complete (in case there's a drain going on)
692 * it's not a problem with our current implementation
693 * because we can assure there's no thread waiting on
694 * pending_free_wq if there's a drain going on, but it has
695 * to be taken into account if the current model is changed.
696 */
697 if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
698 complete(&blkif->drain_complete);
699 }
700 xen_blkif_put(blkif);
701}
702
703static void xen_blkbk_unmap_and_respond(struct pending_req *req)
704{
705 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
706 struct xen_blkif_ring *ring = req->ring;
707 struct grant_page **pages = req->segments;
708 unsigned int invcount;
709
710 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
711 req->unmap, req->unmap_pages);
712
713 work->data = req;
714 work->done = xen_blkbk_unmap_and_respond_callback;
715 work->unmap_ops = req->unmap;
716 work->kunmap_ops = NULL;
717 work->pages = req->unmap_pages;
718 work->count = invcount;
719
720 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
721}
722
723
724/*
725 * Unmap the grant references.
726 *
727 * This could accumulate ops up to the batch size to reduce the number
728 * of hypercalls, but since this is only used in error paths there's
729 * no real need.
730 */
731static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
732 struct grant_page *pages[],
733 int num)
734{
735 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
736 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
737 unsigned int invcount = 0;
738 int ret;
739
740 while (num) {
741 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
742
743 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
744 unmap, unmap_pages);
745 if (invcount) {
746 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
747 BUG_ON(ret);
748 gnttab_page_cache_put(&ring->free_pages, unmap_pages,
749 invcount);
750 }
751 pages += batch;
752 num -= batch;
753 }
754}
755
756static int xen_blkbk_map(struct xen_blkif_ring *ring,
757 struct grant_page *pages[],
758 int num, bool ro)
759{
760 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
761 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
762 struct persistent_gnt *persistent_gnt = NULL;
763 phys_addr_t addr = 0;
764 int i, seg_idx, new_map_idx;
765 int segs_to_map = 0;
766 int ret = 0;
767 int last_map = 0, map_until = 0;
768 int use_persistent_gnts;
769 struct xen_blkif *blkif = ring->blkif;
770
771 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
772
773 /*
774 * Fill out preq.nr_sects with proper amount of sectors, and setup
775 * assign map[..] with the PFN of the page in our domain with the
776 * corresponding grant reference for each page.
777 */
778again:
779 for (i = map_until; i < num; i++) {
780 uint32_t flags;
781
782 if (use_persistent_gnts) {
783 persistent_gnt = get_persistent_gnt(
784 ring,
785 pages[i]->gref);
786 }
787
788 if (persistent_gnt) {
789 /*
790 * We are using persistent grants and
791 * the grant is already mapped
792 */
793 pages[i]->page = persistent_gnt->page;
794 pages[i]->persistent_gnt = persistent_gnt;
795 } else {
796 if (gnttab_page_cache_get(&ring->free_pages,
797 &pages[i]->page))
798 goto out_of_memory;
799 addr = vaddr(pages[i]->page);
800 pages_to_gnt[segs_to_map] = pages[i]->page;
801 pages[i]->persistent_gnt = NULL;
802 flags = GNTMAP_host_map;
803 if (!use_persistent_gnts && ro)
804 flags |= GNTMAP_readonly;
805 gnttab_set_map_op(&map[segs_to_map++], addr,
806 flags, pages[i]->gref,
807 blkif->domid);
808 }
809 map_until = i + 1;
810 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
811 break;
812 }
813
814 if (segs_to_map)
815 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
816
817 /*
818 * Now swizzle the MFN in our domain with the MFN from the other domain
819 * so that when we access vaddr(pending_req,i) it has the contents of
820 * the page from the other domain.
821 */
822 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
823 if (!pages[seg_idx]->persistent_gnt) {
824 /* This is a newly mapped grant */
825 BUG_ON(new_map_idx >= segs_to_map);
826 if (unlikely(map[new_map_idx].status != 0)) {
827 pr_debug("invalid buffer -- could not remap it\n");
828 gnttab_page_cache_put(&ring->free_pages,
829 &pages[seg_idx]->page, 1);
830 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
831 ret |= !ret;
832 goto next;
833 }
834 pages[seg_idx]->handle = map[new_map_idx].handle;
835 } else {
836 continue;
837 }
838 if (use_persistent_gnts &&
839 ring->persistent_gnt_c < max_pgrants) {
840 /*
841 * We are using persistent grants, the grant is
842 * not mapped but we might have room for it.
843 */
844 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
845 GFP_KERNEL);
846 if (!persistent_gnt) {
847 /*
848 * If we don't have enough memory to
849 * allocate the persistent_gnt struct
850 * map this grant non-persistenly
851 */
852 goto next;
853 }
854 persistent_gnt->gnt = map[new_map_idx].ref;
855 persistent_gnt->handle = map[new_map_idx].handle;
856 persistent_gnt->page = pages[seg_idx]->page;
857 if (add_persistent_gnt(ring,
858 persistent_gnt)) {
859 kfree(persistent_gnt);
860 persistent_gnt = NULL;
861 goto next;
862 }
863 pages[seg_idx]->persistent_gnt = persistent_gnt;
864 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
865 persistent_gnt->gnt, ring->persistent_gnt_c,
866 max_pgrants);
867 goto next;
868 }
869 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
870 blkif->vbd.overflow_max_grants = 1;
871 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
872 blkif->domid, blkif->vbd.handle);
873 }
874 /*
875 * We could not map this grant persistently, so use it as
876 * a non-persistent grant.
877 */
878next:
879 new_map_idx++;
880 }
881 segs_to_map = 0;
882 last_map = map_until;
883 if (map_until != num)
884 goto again;
885
886 return ret;
887
888out_of_memory:
889 pr_alert("%s: out of memory\n", __func__);
890 gnttab_page_cache_put(&ring->free_pages, pages_to_gnt, segs_to_map);
891 for (i = last_map; i < num; i++)
892 pages[i]->handle = BLKBACK_INVALID_HANDLE;
893 return -ENOMEM;
894}
895
896static int xen_blkbk_map_seg(struct pending_req *pending_req)
897{
898 int rc;
899
900 rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
901 pending_req->nr_segs,
902 (pending_req->operation != BLKIF_OP_READ));
903
904 return rc;
905}
906
907static int xen_blkbk_parse_indirect(struct blkif_request *req,
908 struct pending_req *pending_req,
909 struct seg_buf seg[],
910 struct phys_req *preq)
911{
912 struct grant_page **pages = pending_req->indirect_pages;
913 struct xen_blkif_ring *ring = pending_req->ring;
914 int indirect_grefs, rc, n, nseg, i;
915 struct blkif_request_segment *segments = NULL;
916
917 nseg = pending_req->nr_segs;
918 indirect_grefs = INDIRECT_PAGES(nseg);
919 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
920
921 for (i = 0; i < indirect_grefs; i++)
922 pages[i]->gref = req->u.indirect.indirect_grefs[i];
923
924 rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
925 if (rc)
926 goto unmap;
927
928 for (n = 0, i = 0; n < nseg; n++) {
929 uint8_t first_sect, last_sect;
930
931 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
932 /* Map indirect segments */
933 if (segments)
934 kunmap_atomic(segments);
935 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
936 }
937 i = n % SEGS_PER_INDIRECT_FRAME;
938
939 pending_req->segments[n]->gref = segments[i].gref;
940
941 first_sect = READ_ONCE(segments[i].first_sect);
942 last_sect = READ_ONCE(segments[i].last_sect);
943 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
944 rc = -EINVAL;
945 goto unmap;
946 }
947
948 seg[n].nsec = last_sect - first_sect + 1;
949 seg[n].offset = first_sect << 9;
950 preq->nr_sects += seg[n].nsec;
951 }
952
953unmap:
954 if (segments)
955 kunmap_atomic(segments);
956 xen_blkbk_unmap(ring, pages, indirect_grefs);
957 return rc;
958}
959
960static int dispatch_discard_io(struct xen_blkif_ring *ring,
961 struct blkif_request *req)
962{
963 int err = 0;
964 int status = BLKIF_RSP_OKAY;
965 struct xen_blkif *blkif = ring->blkif;
966 struct block_device *bdev = blkif->vbd.bdev;
967 unsigned long secure;
968 struct phys_req preq;
969
970 xen_blkif_get(blkif);
971
972 preq.sector_number = req->u.discard.sector_number;
973 preq.nr_sects = req->u.discard.nr_sectors;
974
975 err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
976 if (err) {
977 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
978 preq.sector_number,
979 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
980 goto fail_response;
981 }
982 ring->st_ds_req++;
983
984 secure = (blkif->vbd.discard_secure &&
985 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
986 BLKDEV_DISCARD_SECURE : 0;
987
988 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
989 req->u.discard.nr_sectors,
990 GFP_KERNEL, secure);
991fail_response:
992 if (err == -EOPNOTSUPP) {
993 pr_debug("discard op failed, not supported\n");
994 status = BLKIF_RSP_EOPNOTSUPP;
995 } else if (err)
996 status = BLKIF_RSP_ERROR;
997
998 make_response(ring, req->u.discard.id, req->operation, status);
999 xen_blkif_put(blkif);
1000 return err;
1001}
1002
1003static int dispatch_other_io(struct xen_blkif_ring *ring,
1004 struct blkif_request *req,
1005 struct pending_req *pending_req)
1006{
1007 free_req(ring, pending_req);
1008 make_response(ring, req->u.other.id, req->operation,
1009 BLKIF_RSP_EOPNOTSUPP);
1010 return -EIO;
1011}
1012
1013static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1014{
1015 struct xen_blkif *blkif = ring->blkif;
1016
1017 atomic_set(&blkif->drain, 1);
1018 do {
1019 if (atomic_read(&ring->inflight) == 0)
1020 break;
1021 wait_for_completion_interruptible_timeout(
1022 &blkif->drain_complete, HZ);
1023
1024 if (!atomic_read(&blkif->drain))
1025 break;
1026 } while (!kthread_should_stop());
1027 atomic_set(&blkif->drain, 0);
1028}
1029
1030static void __end_block_io_op(struct pending_req *pending_req,
1031 blk_status_t error)
1032{
1033 /* An error fails the entire request. */
1034 if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1035 error == BLK_STS_NOTSUPP) {
1036 pr_debug("flush diskcache op failed, not supported\n");
1037 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1038 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1039 } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1040 error == BLK_STS_NOTSUPP) {
1041 pr_debug("write barrier op failed, not supported\n");
1042 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1043 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1044 } else if (error) {
1045 pr_debug("Buffer not up-to-date at end of operation,"
1046 " error=%d\n", error);
1047 pending_req->status = BLKIF_RSP_ERROR;
1048 }
1049
1050 /*
1051 * If all of the bio's have completed it is time to unmap
1052 * the grant references associated with 'request' and provide
1053 * the proper response on the ring.
1054 */
1055 if (atomic_dec_and_test(&pending_req->pendcnt))
1056 xen_blkbk_unmap_and_respond(pending_req);
1057}
1058
1059/*
1060 * bio callback.
1061 */
1062static void end_block_io_op(struct bio *bio)
1063{
1064 __end_block_io_op(bio->bi_private, bio->bi_status);
1065 bio_put(bio);
1066}
1067
1068
1069
1070/*
1071 * Function to copy the from the ring buffer the 'struct blkif_request'
1072 * (which has the sectors we want, number of them, grant references, etc),
1073 * and transmute it to the block API to hand it over to the proper block disk.
1074 */
1075static int
1076__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1077{
1078 union blkif_back_rings *blk_rings = &ring->blk_rings;
1079 struct blkif_request req;
1080 struct pending_req *pending_req;
1081 RING_IDX rc, rp;
1082 int more_to_do = 0;
1083
1084 rc = blk_rings->common.req_cons;
1085 rp = blk_rings->common.sring->req_prod;
1086 rmb(); /* Ensure we see queued requests up to 'rp'. */
1087
1088 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1089 rc = blk_rings->common.rsp_prod_pvt;
1090 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1091 rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1092 return -EACCES;
1093 }
1094 while (rc != rp) {
1095
1096 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1097 break;
1098
1099 /* We've seen a request, so clear spurious eoi flag. */
1100 *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
1101
1102 if (kthread_should_stop()) {
1103 more_to_do = 1;
1104 break;
1105 }
1106
1107 pending_req = alloc_req(ring);
1108 if (NULL == pending_req) {
1109 ring->st_oo_req++;
1110 more_to_do = 1;
1111 break;
1112 }
1113
1114 switch (ring->blkif->blk_protocol) {
1115 case BLKIF_PROTOCOL_NATIVE:
1116 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1117 break;
1118 case BLKIF_PROTOCOL_X86_32:
1119 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1120 break;
1121 case BLKIF_PROTOCOL_X86_64:
1122 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1123 break;
1124 default:
1125 BUG();
1126 }
1127 blk_rings->common.req_cons = ++rc; /* before make_response() */
1128
1129 /* Apply all sanity checks to /private copy/ of request. */
1130 barrier();
1131
1132 switch (req.operation) {
1133 case BLKIF_OP_READ:
1134 case BLKIF_OP_WRITE:
1135 case BLKIF_OP_WRITE_BARRIER:
1136 case BLKIF_OP_FLUSH_DISKCACHE:
1137 case BLKIF_OP_INDIRECT:
1138 if (dispatch_rw_block_io(ring, &req, pending_req))
1139 goto done;
1140 break;
1141 case BLKIF_OP_DISCARD:
1142 free_req(ring, pending_req);
1143 if (dispatch_discard_io(ring, &req))
1144 goto done;
1145 break;
1146 default:
1147 if (dispatch_other_io(ring, &req, pending_req))
1148 goto done;
1149 break;
1150 }
1151
1152 /* Yield point for this unbounded loop. */
1153 cond_resched();
1154 }
1155done:
1156 return more_to_do;
1157}
1158
1159static int
1160do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1161{
1162 union blkif_back_rings *blk_rings = &ring->blk_rings;
1163 int more_to_do;
1164
1165 do {
1166 more_to_do = __do_block_io_op(ring, eoi_flags);
1167 if (more_to_do)
1168 break;
1169
1170 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1171 } while (more_to_do);
1172
1173 return more_to_do;
1174}
1175/*
1176 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1177 * and call the 'submit_bio' to pass it to the underlying storage.
1178 */
1179static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1180 struct blkif_request *req,
1181 struct pending_req *pending_req)
1182{
1183 struct phys_req preq;
1184 struct seg_buf *seg = pending_req->seg;
1185 unsigned int nseg;
1186 struct bio *bio = NULL;
1187 struct bio **biolist = pending_req->biolist;
1188 int i, nbio = 0;
1189 int operation;
1190 int operation_flags = 0;
1191 struct blk_plug plug;
1192 bool drain = false;
1193 struct grant_page **pages = pending_req->segments;
1194 unsigned short req_operation;
1195
1196 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1197 req->u.indirect.indirect_op : req->operation;
1198
1199 if ((req->operation == BLKIF_OP_INDIRECT) &&
1200 (req_operation != BLKIF_OP_READ) &&
1201 (req_operation != BLKIF_OP_WRITE)) {
1202 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1203 goto fail_response;
1204 }
1205
1206 switch (req_operation) {
1207 case BLKIF_OP_READ:
1208 ring->st_rd_req++;
1209 operation = REQ_OP_READ;
1210 break;
1211 case BLKIF_OP_WRITE:
1212 ring->st_wr_req++;
1213 operation = REQ_OP_WRITE;
1214 operation_flags = REQ_SYNC | REQ_IDLE;
1215 break;
1216 case BLKIF_OP_WRITE_BARRIER:
1217 drain = true;
1218 fallthrough;
1219 case BLKIF_OP_FLUSH_DISKCACHE:
1220 ring->st_f_req++;
1221 operation = REQ_OP_WRITE;
1222 operation_flags = REQ_PREFLUSH;
1223 break;
1224 default:
1225 operation = 0; /* make gcc happy */
1226 goto fail_response;
1227 break;
1228 }
1229
1230 /* Check that the number of segments is sane. */
1231 nseg = req->operation == BLKIF_OP_INDIRECT ?
1232 req->u.indirect.nr_segments : req->u.rw.nr_segments;
1233
1234 if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
1235 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1236 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1237 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1238 (nseg > MAX_INDIRECT_SEGMENTS))) {
1239 pr_debug("Bad number of segments in request (%d)\n", nseg);
1240 /* Haven't submitted any bio's yet. */
1241 goto fail_response;
1242 }
1243
1244 preq.nr_sects = 0;
1245
1246 pending_req->ring = ring;
1247 pending_req->id = req->u.rw.id;
1248 pending_req->operation = req_operation;
1249 pending_req->status = BLKIF_RSP_OKAY;
1250 pending_req->nr_segs = nseg;
1251
1252 if (req->operation != BLKIF_OP_INDIRECT) {
1253 preq.dev = req->u.rw.handle;
1254 preq.sector_number = req->u.rw.sector_number;
1255 for (i = 0; i < nseg; i++) {
1256 pages[i]->gref = req->u.rw.seg[i].gref;
1257 seg[i].nsec = req->u.rw.seg[i].last_sect -
1258 req->u.rw.seg[i].first_sect + 1;
1259 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1260 if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1261 (req->u.rw.seg[i].last_sect <
1262 req->u.rw.seg[i].first_sect))
1263 goto fail_response;
1264 preq.nr_sects += seg[i].nsec;
1265 }
1266 } else {
1267 preq.dev = req->u.indirect.handle;
1268 preq.sector_number = req->u.indirect.sector_number;
1269 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1270 goto fail_response;
1271 }
1272
1273 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1274 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1275 operation == REQ_OP_READ ? "read" : "write",
1276 preq.sector_number,
1277 preq.sector_number + preq.nr_sects,
1278 ring->blkif->vbd.pdevice);
1279 goto fail_response;
1280 }
1281
1282 /*
1283 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1284 * is set there.
1285 */
1286 for (i = 0; i < nseg; i++) {
1287 if (((int)preq.sector_number|(int)seg[i].nsec) &
1288 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1289 pr_debug("Misaligned I/O request from domain %d\n",
1290 ring->blkif->domid);
1291 goto fail_response;
1292 }
1293 }
1294
1295 /* Wait on all outstanding I/O's and once that has been completed
1296 * issue the flush.
1297 */
1298 if (drain)
1299 xen_blk_drain_io(pending_req->ring);
1300
1301 /*
1302 * If we have failed at this point, we need to undo the M2P override,
1303 * set gnttab_set_unmap_op on all of the grant references and perform
1304 * the hypercall to unmap the grants - that is all done in
1305 * xen_blkbk_unmap.
1306 */
1307 if (xen_blkbk_map_seg(pending_req))
1308 goto fail_flush;
1309
1310 /*
1311 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1312 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1313 */
1314 xen_blkif_get(ring->blkif);
1315 atomic_inc(&ring->inflight);
1316
1317 for (i = 0; i < nseg; i++) {
1318 while ((bio == NULL) ||
1319 (bio_add_page(bio,
1320 pages[i]->page,
1321 seg[i].nsec << 9,
1322 seg[i].offset) == 0)) {
1323
1324 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1325 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1326 if (unlikely(bio == NULL))
1327 goto fail_put_bio;
1328
1329 biolist[nbio++] = bio;
1330 bio_set_dev(bio, preq.bdev);
1331 bio->bi_private = pending_req;
1332 bio->bi_end_io = end_block_io_op;
1333 bio->bi_iter.bi_sector = preq.sector_number;
1334 bio_set_op_attrs(bio, operation, operation_flags);
1335 }
1336
1337 preq.sector_number += seg[i].nsec;
1338 }
1339
1340 /* This will be hit if the operation was a flush or discard. */
1341 if (!bio) {
1342 BUG_ON(operation_flags != REQ_PREFLUSH);
1343
1344 bio = bio_alloc(GFP_KERNEL, 0);
1345 if (unlikely(bio == NULL))
1346 goto fail_put_bio;
1347
1348 biolist[nbio++] = bio;
1349 bio_set_dev(bio, preq.bdev);
1350 bio->bi_private = pending_req;
1351 bio->bi_end_io = end_block_io_op;
1352 bio_set_op_attrs(bio, operation, operation_flags);
1353 }
1354
1355 atomic_set(&pending_req->pendcnt, nbio);
1356 blk_start_plug(&plug);
1357
1358 for (i = 0; i < nbio; i++)
1359 submit_bio(biolist[i]);
1360
1361 /* Let the I/Os go.. */
1362 blk_finish_plug(&plug);
1363
1364 if (operation == REQ_OP_READ)
1365 ring->st_rd_sect += preq.nr_sects;
1366 else if (operation == REQ_OP_WRITE)
1367 ring->st_wr_sect += preq.nr_sects;
1368
1369 return 0;
1370
1371 fail_flush:
1372 xen_blkbk_unmap(ring, pending_req->segments,
1373 pending_req->nr_segs);
1374 fail_response:
1375 /* Haven't submitted any bio's yet. */
1376 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1377 free_req(ring, pending_req);
1378 msleep(1); /* back off a bit */
1379 return -EIO;
1380
1381 fail_put_bio:
1382 for (i = 0; i < nbio; i++)
1383 bio_put(biolist[i]);
1384 atomic_set(&pending_req->pendcnt, 1);
1385 __end_block_io_op(pending_req, BLK_STS_RESOURCE);
1386 msleep(1); /* back off a bit */
1387 return -EIO;
1388}
1389
1390
1391
1392/*
1393 * Put a response on the ring on how the operation fared.
1394 */
1395static void make_response(struct xen_blkif_ring *ring, u64 id,
1396 unsigned short op, int st)
1397{
1398 struct blkif_response *resp;
1399 unsigned long flags;
1400 union blkif_back_rings *blk_rings;
1401 int notify;
1402
1403 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1404 blk_rings = &ring->blk_rings;
1405 /* Place on the response ring for the relevant domain. */
1406 switch (ring->blkif->blk_protocol) {
1407 case BLKIF_PROTOCOL_NATIVE:
1408 resp = RING_GET_RESPONSE(&blk_rings->native,
1409 blk_rings->native.rsp_prod_pvt);
1410 break;
1411 case BLKIF_PROTOCOL_X86_32:
1412 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1413 blk_rings->x86_32.rsp_prod_pvt);
1414 break;
1415 case BLKIF_PROTOCOL_X86_64:
1416 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1417 blk_rings->x86_64.rsp_prod_pvt);
1418 break;
1419 default:
1420 BUG();
1421 }
1422
1423 resp->id = id;
1424 resp->operation = op;
1425 resp->status = st;
1426
1427 blk_rings->common.rsp_prod_pvt++;
1428 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1429 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1430 if (notify)
1431 notify_remote_via_irq(ring->irq);
1432}
1433
1434static int __init xen_blkif_init(void)
1435{
1436 int rc = 0;
1437
1438 if (!xen_domain())
1439 return -ENODEV;
1440
1441 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1442 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1443 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1444 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1445 }
1446
1447 if (xenblk_max_queues == 0)
1448 xenblk_max_queues = num_online_cpus();
1449
1450 rc = xen_blkif_interface_init();
1451 if (rc)
1452 goto failed_init;
1453
1454 rc = xen_blkif_xenbus_init();
1455 if (rc)
1456 goto failed_init;
1457
1458 failed_init:
1459 return rc;
1460}
1461
1462module_init(xen_blkif_init);
1463
1464static void __exit xen_blkif_fini(void)
1465{
1466 xen_blkif_xenbus_fini();
1467 xen_blkif_interface_fini();
1468}
1469
1470module_exit(xen_blkif_fini);
1471
1472MODULE_LICENSE("Dual BSD/GPL");
1473MODULE_ALIAS("xen-backend:vbd");