]>
Commit | Line | Data |
---|---|---|
9f27ee59 JF |
1 | /* |
2 | * blkfront.c | |
3 | * | |
4 | * XenLinux virtual block device driver. | |
5 | * | |
6 | * Copyright (c) 2003-2004, Keir Fraser & Steve Hand | |
7 | * Modifications by Mark A. Williamson are (c) Intel Research Cambridge | |
8 | * Copyright (c) 2004, Christian Limpach | |
9 | * Copyright (c) 2004, Andrew Warfield | |
10 | * Copyright (c) 2005, Christopher Clark | |
11 | * Copyright (c) 2005, XenSource Ltd | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License version 2 | |
15 | * as published by the Free Software Foundation; or, when distributed | |
16 | * separately from the Linux kernel or incorporated into other | |
17 | * software packages, subject to the following license: | |
18 | * | |
19 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
20 | * of this source file (the "Software"), to deal in the Software without | |
21 | * restriction, including without limitation the rights to use, copy, modify, | |
22 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
23 | * and to permit persons to whom the Software is furnished to do so, subject to | |
24 | * the following conditions: | |
25 | * | |
26 | * The above copyright notice and this permission notice shall be included in | |
27 | * all copies or substantial portions of the Software. | |
28 | * | |
29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
30 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
31 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
32 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
33 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
34 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
35 | * IN THE SOFTWARE. | |
36 | */ | |
37 | ||
38 | #include <linux/interrupt.h> | |
39 | #include <linux/blkdev.h> | |
597592d9 | 40 | #include <linux/hdreg.h> |
440a01a7 | 41 | #include <linux/cdrom.h> |
9f27ee59 | 42 | #include <linux/module.h> |
5a0e3ad6 | 43 | #include <linux/slab.h> |
6e9624b8 | 44 | #include <linux/smp_lock.h> |
9e973e64 | 45 | #include <linux/scatterlist.h> |
9f27ee59 | 46 | |
1ccbf534 | 47 | #include <xen/xen.h> |
9f27ee59 JF |
48 | #include <xen/xenbus.h> |
49 | #include <xen/grant_table.h> | |
50 | #include <xen/events.h> | |
51 | #include <xen/page.h> | |
52 | ||
53 | #include <xen/interface/grant_table.h> | |
54 | #include <xen/interface/io/blkif.h> | |
3e334239 | 55 | #include <xen/interface/io/protocols.h> |
9f27ee59 JF |
56 | |
57 | #include <asm/xen/hypervisor.h> | |
58 | ||
59 | enum blkif_state { | |
60 | BLKIF_STATE_DISCONNECTED, | |
61 | BLKIF_STATE_CONNECTED, | |
62 | BLKIF_STATE_SUSPENDED, | |
63 | }; | |
64 | ||
65 | struct blk_shadow { | |
66 | struct blkif_request req; | |
67 | unsigned long request; | |
68 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | |
69 | }; | |
70 | ||
83d5cde4 | 71 | static const struct block_device_operations xlvbd_block_fops; |
9f27ee59 JF |
72 | |
73 | #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) | |
74 | ||
75 | /* | |
76 | * We have one of these per vbd, whether ide, scsi or 'other'. They | |
77 | * hang in private_data off the gendisk structure. We may end up | |
78 | * putting all kinds of interesting stuff here :-) | |
79 | */ | |
80 | struct blkfront_info | |
81 | { | |
82 | struct xenbus_device *xbdev; | |
9f27ee59 JF |
83 | struct gendisk *gd; |
84 | int vdevice; | |
85 | blkif_vdev_t handle; | |
86 | enum blkif_state connected; | |
87 | int ring_ref; | |
88 | struct blkif_front_ring ring; | |
9e973e64 | 89 | struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
9f27ee59 JF |
90 | unsigned int evtchn, irq; |
91 | struct request_queue *rq; | |
92 | struct work_struct work; | |
93 | struct gnttab_free_callback callback; | |
94 | struct blk_shadow shadow[BLK_RING_SIZE]; | |
95 | unsigned long shadow_free; | |
96 | int feature_barrier; | |
1d78d705 | 97 | int is_ready; |
9f27ee59 JF |
98 | |
99 | /** | |
100 | * The number of people holding this device open. We won't allow a | |
101 | * hot-unplug unless this is 0. | |
102 | */ | |
103 | int users; | |
104 | }; | |
105 | ||
106 | static DEFINE_SPINLOCK(blkif_io_lock); | |
107 | ||
0e345826 JB |
108 | static unsigned int nr_minors; |
109 | static unsigned long *minors; | |
110 | static DEFINE_SPINLOCK(minor_lock); | |
111 | ||
9f27ee59 JF |
112 | #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ |
113 | (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) | |
114 | #define GRANT_INVALID_REF 0 | |
115 | ||
116 | #define PARTS_PER_DISK 16 | |
9246b5f0 | 117 | #define PARTS_PER_EXT_DISK 256 |
9f27ee59 JF |
118 | |
119 | #define BLKIF_MAJOR(dev) ((dev)>>8) | |
120 | #define BLKIF_MINOR(dev) ((dev) & 0xff) | |
121 | ||
9246b5f0 CL |
122 | #define EXT_SHIFT 28 |
123 | #define EXTENDED (1<<EXT_SHIFT) | |
124 | #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED)) | |
125 | #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) | |
9f27ee59 | 126 | |
9246b5f0 | 127 | #define DEV_NAME "xvd" /* name in /dev */ |
9f27ee59 JF |
128 | |
129 | static int get_id_from_freelist(struct blkfront_info *info) | |
130 | { | |
131 | unsigned long free = info->shadow_free; | |
b9ed7252 | 132 | BUG_ON(free >= BLK_RING_SIZE); |
9f27ee59 JF |
133 | info->shadow_free = info->shadow[free].req.id; |
134 | info->shadow[free].req.id = 0x0fffffee; /* debug */ | |
135 | return free; | |
136 | } | |
137 | ||
138 | static void add_id_to_freelist(struct blkfront_info *info, | |
139 | unsigned long id) | |
140 | { | |
141 | info->shadow[id].req.id = info->shadow_free; | |
142 | info->shadow[id].request = 0; | |
143 | info->shadow_free = id; | |
144 | } | |
145 | ||
0e345826 JB |
146 | static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) |
147 | { | |
148 | unsigned int end = minor + nr; | |
149 | int rc; | |
150 | ||
151 | if (end > nr_minors) { | |
152 | unsigned long *bitmap, *old; | |
153 | ||
154 | bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), | |
155 | GFP_KERNEL); | |
156 | if (bitmap == NULL) | |
157 | return -ENOMEM; | |
158 | ||
159 | spin_lock(&minor_lock); | |
160 | if (end > nr_minors) { | |
161 | old = minors; | |
162 | memcpy(bitmap, minors, | |
163 | BITS_TO_LONGS(nr_minors) * sizeof(*bitmap)); | |
164 | minors = bitmap; | |
165 | nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG; | |
166 | } else | |
167 | old = bitmap; | |
168 | spin_unlock(&minor_lock); | |
169 | kfree(old); | |
170 | } | |
171 | ||
172 | spin_lock(&minor_lock); | |
173 | if (find_next_bit(minors, end, minor) >= end) { | |
174 | for (; minor < end; ++minor) | |
175 | __set_bit(minor, minors); | |
176 | rc = 0; | |
177 | } else | |
178 | rc = -EBUSY; | |
179 | spin_unlock(&minor_lock); | |
180 | ||
181 | return rc; | |
182 | } | |
183 | ||
184 | static void xlbd_release_minors(unsigned int minor, unsigned int nr) | |
185 | { | |
186 | unsigned int end = minor + nr; | |
187 | ||
188 | BUG_ON(end > nr_minors); | |
189 | spin_lock(&minor_lock); | |
190 | for (; minor < end; ++minor) | |
191 | __clear_bit(minor, minors); | |
192 | spin_unlock(&minor_lock); | |
193 | } | |
194 | ||
9f27ee59 JF |
195 | static void blkif_restart_queue_callback(void *arg) |
196 | { | |
197 | struct blkfront_info *info = (struct blkfront_info *)arg; | |
198 | schedule_work(&info->work); | |
199 | } | |
200 | ||
afe42d7d | 201 | static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) |
597592d9 IC |
202 | { |
203 | /* We don't have real geometry info, but let's at least return | |
204 | values consistent with the size of the device */ | |
205 | sector_t nsect = get_capacity(bd->bd_disk); | |
206 | sector_t cylinders = nsect; | |
207 | ||
208 | hg->heads = 0xff; | |
209 | hg->sectors = 0x3f; | |
210 | sector_div(cylinders, hg->heads * hg->sectors); | |
211 | hg->cylinders = cylinders; | |
212 | if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) | |
213 | hg->cylinders = 0xffff; | |
214 | return 0; | |
215 | } | |
216 | ||
a63c848b | 217 | static int blkif_ioctl(struct block_device *bdev, fmode_t mode, |
62aa0054 | 218 | unsigned command, unsigned long argument) |
440a01a7 | 219 | { |
a63c848b | 220 | struct blkfront_info *info = bdev->bd_disk->private_data; |
440a01a7 CL |
221 | int i; |
222 | ||
223 | dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", | |
224 | command, (long)argument); | |
225 | ||
226 | switch (command) { | |
227 | case CDROMMULTISESSION: | |
228 | dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); | |
229 | for (i = 0; i < sizeof(struct cdrom_multisession); i++) | |
230 | if (put_user(0, (char __user *)(argument + i))) | |
231 | return -EFAULT; | |
232 | return 0; | |
233 | ||
234 | case CDROM_GET_CAPABILITY: { | |
235 | struct gendisk *gd = info->gd; | |
236 | if (gd->flags & GENHD_FL_CD) | |
237 | return 0; | |
238 | return -EINVAL; | |
239 | } | |
240 | ||
241 | default: | |
242 | /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", | |
243 | command);*/ | |
244 | return -EINVAL; /* same return as native Linux */ | |
245 | } | |
246 | ||
247 | return 0; | |
248 | } | |
249 | ||
9f27ee59 JF |
250 | /* |
251 | * blkif_queue_request | |
252 | * | |
253 | * request block io | |
254 | * | |
255 | * id: for guest use only. | |
256 | * operation: BLKIF_OP_{READ,WRITE,PROBE} | |
257 | * buffer: buffer to read/write into. this should be a | |
258 | * virtual address in the guest os. | |
259 | */ | |
260 | static int blkif_queue_request(struct request *req) | |
261 | { | |
262 | struct blkfront_info *info = req->rq_disk->private_data; | |
263 | unsigned long buffer_mfn; | |
264 | struct blkif_request *ring_req; | |
9f27ee59 JF |
265 | unsigned long id; |
266 | unsigned int fsect, lsect; | |
9e973e64 | 267 | int i, ref; |
9f27ee59 | 268 | grant_ref_t gref_head; |
9e973e64 | 269 | struct scatterlist *sg; |
9f27ee59 JF |
270 | |
271 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) | |
272 | return 1; | |
273 | ||
274 | if (gnttab_alloc_grant_references( | |
275 | BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { | |
276 | gnttab_request_free_callback( | |
277 | &info->callback, | |
278 | blkif_restart_queue_callback, | |
279 | info, | |
280 | BLKIF_MAX_SEGMENTS_PER_REQUEST); | |
281 | return 1; | |
282 | } | |
283 | ||
284 | /* Fill out a communications ring structure. */ | |
285 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | |
286 | id = get_id_from_freelist(info); | |
287 | info->shadow[id].request = (unsigned long)req; | |
288 | ||
289 | ring_req->id = id; | |
83096ebf | 290 | ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); |
9f27ee59 JF |
291 | ring_req->handle = info->handle; |
292 | ||
293 | ring_req->operation = rq_data_dir(req) ? | |
294 | BLKIF_OP_WRITE : BLKIF_OP_READ; | |
33659ebb | 295 | if (req->cmd_flags & REQ_HARDBARRIER) |
9f27ee59 JF |
296 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; |
297 | ||
9e973e64 JA |
298 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); |
299 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); | |
300 | ||
301 | for_each_sg(info->sg, sg, ring_req->nr_segments, i) { | |
302 | buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg))); | |
303 | fsect = sg->offset >> 9; | |
304 | lsect = fsect + (sg->length >> 9) - 1; | |
6c92e699 JA |
305 | /* install a grant reference. */ |
306 | ref = gnttab_claim_grant_reference(&gref_head); | |
307 | BUG_ON(ref == -ENOSPC); | |
308 | ||
309 | gnttab_grant_foreign_access_ref( | |
9f27ee59 JF |
310 | ref, |
311 | info->xbdev->otherend_id, | |
312 | buffer_mfn, | |
313 | rq_data_dir(req) ); | |
314 | ||
9e973e64 JA |
315 | info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); |
316 | ring_req->seg[i] = | |
9f27ee59 JF |
317 | (struct blkif_request_segment) { |
318 | .gref = ref, | |
319 | .first_sect = fsect, | |
320 | .last_sect = lsect }; | |
9f27ee59 JF |
321 | } |
322 | ||
323 | info->ring.req_prod_pvt++; | |
324 | ||
325 | /* Keep a private copy so we can reissue requests when recovering. */ | |
326 | info->shadow[id].req = *ring_req; | |
327 | ||
328 | gnttab_free_grant_references(gref_head); | |
329 | ||
330 | return 0; | |
331 | } | |
332 | ||
333 | ||
334 | static inline void flush_requests(struct blkfront_info *info) | |
335 | { | |
336 | int notify; | |
337 | ||
338 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); | |
339 | ||
340 | if (notify) | |
341 | notify_remote_via_irq(info->irq); | |
342 | } | |
343 | ||
344 | /* | |
345 | * do_blkif_request | |
346 | * read a block; request is in a request queue | |
347 | */ | |
165125e1 | 348 | static void do_blkif_request(struct request_queue *rq) |
9f27ee59 JF |
349 | { |
350 | struct blkfront_info *info = NULL; | |
351 | struct request *req; | |
352 | int queued; | |
353 | ||
354 | pr_debug("Entered do_blkif_request\n"); | |
355 | ||
356 | queued = 0; | |
357 | ||
9934c8c0 | 358 | while ((req = blk_peek_request(rq)) != NULL) { |
9f27ee59 | 359 | info = req->rq_disk->private_data; |
9f27ee59 JF |
360 | |
361 | if (RING_FULL(&info->ring)) | |
362 | goto wait; | |
363 | ||
9934c8c0 | 364 | blk_start_request(req); |
296b2f6a | 365 | |
33659ebb | 366 | if (req->cmd_type != REQ_TYPE_FS) { |
296b2f6a TH |
367 | __blk_end_request_all(req, -EIO); |
368 | continue; | |
369 | } | |
370 | ||
9f27ee59 | 371 | pr_debug("do_blk_req %p: cmd %p, sec %lx, " |
83096ebf TH |
372 | "(%u/%u) buffer:%p [%s]\n", |
373 | req, req->cmd, (unsigned long)blk_rq_pos(req), | |
374 | blk_rq_cur_sectors(req), blk_rq_sectors(req), | |
375 | req->buffer, rq_data_dir(req) ? "write" : "read"); | |
9f27ee59 | 376 | |
9f27ee59 JF |
377 | if (blkif_queue_request(req)) { |
378 | blk_requeue_request(rq, req); | |
379 | wait: | |
380 | /* Avoid pointless unplugs. */ | |
381 | blk_stop_queue(rq); | |
382 | break; | |
383 | } | |
384 | ||
385 | queued++; | |
386 | } | |
387 | ||
388 | if (queued != 0) | |
389 | flush_requests(info); | |
390 | } | |
391 | ||
392 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | |
393 | { | |
165125e1 | 394 | struct request_queue *rq; |
9f27ee59 JF |
395 | |
396 | rq = blk_init_queue(do_blkif_request, &blkif_io_lock); | |
397 | if (rq == NULL) | |
398 | return -1; | |
399 | ||
66d352e1 | 400 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); |
9f27ee59 JF |
401 | |
402 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | |
e1defc4f | 403 | blk_queue_logical_block_size(rq, sector_size); |
086fa5ff | 404 | blk_queue_max_hw_sectors(rq, 512); |
9f27ee59 JF |
405 | |
406 | /* Each segment in a request is up to an aligned page in size. */ | |
407 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | |
408 | blk_queue_max_segment_size(rq, PAGE_SIZE); | |
409 | ||
410 | /* Ensure a merged request will fit in a single I/O ring slot. */ | |
8a78362c | 411 | blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
9f27ee59 JF |
412 | |
413 | /* Make sure buffer addresses are sector-aligned. */ | |
414 | blk_queue_dma_alignment(rq, 511); | |
415 | ||
1c91fe1a IC |
416 | /* Make sure we don't use bounce buffers. */ |
417 | blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); | |
418 | ||
9f27ee59 JF |
419 | gd->queue = rq; |
420 | ||
421 | return 0; | |
422 | } | |
423 | ||
424 | ||
425 | static int xlvbd_barrier(struct blkfront_info *info) | |
426 | { | |
427 | int err; | |
428 | ||
429 | err = blk_queue_ordered(info->rq, | |
00fff265 | 430 | info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE); |
9f27ee59 JF |
431 | |
432 | if (err) | |
433 | return err; | |
434 | ||
435 | printk(KERN_INFO "blkfront: %s: barriers %s\n", | |
436 | info->gd->disk_name, | |
437 | info->feature_barrier ? "enabled" : "disabled"); | |
438 | return 0; | |
439 | } | |
440 | ||
441 | ||
9246b5f0 CL |
442 | static int xlvbd_alloc_gendisk(blkif_sector_t capacity, |
443 | struct blkfront_info *info, | |
444 | u16 vdisk_info, u16 sector_size) | |
9f27ee59 JF |
445 | { |
446 | struct gendisk *gd; | |
447 | int nr_minors = 1; | |
448 | int err = -ENODEV; | |
9246b5f0 CL |
449 | unsigned int offset; |
450 | int minor; | |
451 | int nr_parts; | |
9f27ee59 JF |
452 | |
453 | BUG_ON(info->gd != NULL); | |
454 | BUG_ON(info->rq != NULL); | |
455 | ||
9246b5f0 CL |
456 | if ((info->vdevice>>EXT_SHIFT) > 1) { |
457 | /* this is above the extended range; something is wrong */ | |
458 | printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice); | |
459 | return -ENODEV; | |
460 | } | |
461 | ||
462 | if (!VDEV_IS_EXTENDED(info->vdevice)) { | |
463 | minor = BLKIF_MINOR(info->vdevice); | |
464 | nr_parts = PARTS_PER_DISK; | |
465 | } else { | |
466 | minor = BLKIF_MINOR_EXT(info->vdevice); | |
467 | nr_parts = PARTS_PER_EXT_DISK; | |
468 | } | |
469 | ||
470 | if ((minor % nr_parts) == 0) | |
471 | nr_minors = nr_parts; | |
9f27ee59 | 472 | |
0e345826 JB |
473 | err = xlbd_reserve_minors(minor, nr_minors); |
474 | if (err) | |
475 | goto out; | |
476 | err = -ENODEV; | |
477 | ||
9f27ee59 JF |
478 | gd = alloc_disk(nr_minors); |
479 | if (gd == NULL) | |
0e345826 | 480 | goto release; |
9f27ee59 | 481 | |
9246b5f0 CL |
482 | offset = minor / nr_parts; |
483 | ||
484 | if (nr_minors > 1) { | |
485 | if (offset < 26) | |
486 | sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset); | |
487 | else | |
488 | sprintf(gd->disk_name, "%s%c%c", DEV_NAME, | |
489 | 'a' + ((offset / 26)-1), 'a' + (offset % 26)); | |
490 | } else { | |
491 | if (offset < 26) | |
492 | sprintf(gd->disk_name, "%s%c%d", DEV_NAME, | |
493 | 'a' + offset, | |
494 | minor & (nr_parts - 1)); | |
495 | else | |
496 | sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME, | |
497 | 'a' + ((offset / 26) - 1), | |
498 | 'a' + (offset % 26), | |
499 | minor & (nr_parts - 1)); | |
500 | } | |
9f27ee59 JF |
501 | |
502 | gd->major = XENVBD_MAJOR; | |
503 | gd->first_minor = minor; | |
504 | gd->fops = &xlvbd_block_fops; | |
505 | gd->private_data = info; | |
506 | gd->driverfs_dev = &(info->xbdev->dev); | |
507 | set_capacity(gd, capacity); | |
508 | ||
509 | if (xlvbd_init_blk_queue(gd, sector_size)) { | |
510 | del_gendisk(gd); | |
0e345826 | 511 | goto release; |
9f27ee59 JF |
512 | } |
513 | ||
514 | info->rq = gd->queue; | |
515 | info->gd = gd; | |
516 | ||
517 | if (info->feature_barrier) | |
518 | xlvbd_barrier(info); | |
519 | ||
520 | if (vdisk_info & VDISK_READONLY) | |
521 | set_disk_ro(gd, 1); | |
522 | ||
523 | if (vdisk_info & VDISK_REMOVABLE) | |
524 | gd->flags |= GENHD_FL_REMOVABLE; | |
525 | ||
526 | if (vdisk_info & VDISK_CDROM) | |
527 | gd->flags |= GENHD_FL_CD; | |
528 | ||
529 | return 0; | |
530 | ||
0e345826 JB |
531 | release: |
532 | xlbd_release_minors(minor, nr_minors); | |
9f27ee59 JF |
533 | out: |
534 | return err; | |
535 | } | |
536 | ||
537 | static void kick_pending_request_queues(struct blkfront_info *info) | |
538 | { | |
539 | if (!RING_FULL(&info->ring)) { | |
540 | /* Re-enable calldowns. */ | |
541 | blk_start_queue(info->rq); | |
542 | /* Kick things off immediately. */ | |
543 | do_blkif_request(info->rq); | |
544 | } | |
545 | } | |
546 | ||
547 | static void blkif_restart_queue(struct work_struct *work) | |
548 | { | |
549 | struct blkfront_info *info = container_of(work, struct blkfront_info, work); | |
550 | ||
551 | spin_lock_irq(&blkif_io_lock); | |
552 | if (info->connected == BLKIF_STATE_CONNECTED) | |
553 | kick_pending_request_queues(info); | |
554 | spin_unlock_irq(&blkif_io_lock); | |
555 | } | |
556 | ||
557 | static void blkif_free(struct blkfront_info *info, int suspend) | |
558 | { | |
559 | /* Prevent new requests being issued until we fix things up. */ | |
560 | spin_lock_irq(&blkif_io_lock); | |
561 | info->connected = suspend ? | |
562 | BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; | |
563 | /* No more blkif_request(). */ | |
564 | if (info->rq) | |
565 | blk_stop_queue(info->rq); | |
566 | /* No more gnttab callback work. */ | |
567 | gnttab_cancel_free_callback(&info->callback); | |
568 | spin_unlock_irq(&blkif_io_lock); | |
569 | ||
570 | /* Flush gnttab callback work. Must be done with no locks held. */ | |
571 | flush_scheduled_work(); | |
572 | ||
573 | /* Free resources associated with old device channel. */ | |
574 | if (info->ring_ref != GRANT_INVALID_REF) { | |
575 | gnttab_end_foreign_access(info->ring_ref, 0, | |
576 | (unsigned long)info->ring.sring); | |
577 | info->ring_ref = GRANT_INVALID_REF; | |
578 | info->ring.sring = NULL; | |
579 | } | |
580 | if (info->irq) | |
581 | unbind_from_irqhandler(info->irq, info); | |
582 | info->evtchn = info->irq = 0; | |
583 | ||
584 | } | |
585 | ||
586 | static void blkif_completion(struct blk_shadow *s) | |
587 | { | |
588 | int i; | |
589 | for (i = 0; i < s->req.nr_segments; i++) | |
590 | gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL); | |
591 | } | |
592 | ||
593 | static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |
594 | { | |
595 | struct request *req; | |
596 | struct blkif_response *bret; | |
597 | RING_IDX i, rp; | |
598 | unsigned long flags; | |
599 | struct blkfront_info *info = (struct blkfront_info *)dev_id; | |
f530f036 | 600 | int error; |
9f27ee59 JF |
601 | |
602 | spin_lock_irqsave(&blkif_io_lock, flags); | |
603 | ||
604 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { | |
605 | spin_unlock_irqrestore(&blkif_io_lock, flags); | |
606 | return IRQ_HANDLED; | |
607 | } | |
608 | ||
609 | again: | |
610 | rp = info->ring.sring->rsp_prod; | |
611 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | |
612 | ||
613 | for (i = info->ring.rsp_cons; i != rp; i++) { | |
614 | unsigned long id; | |
9f27ee59 JF |
615 | |
616 | bret = RING_GET_RESPONSE(&info->ring, i); | |
617 | id = bret->id; | |
618 | req = (struct request *)info->shadow[id].request; | |
619 | ||
620 | blkif_completion(&info->shadow[id]); | |
621 | ||
622 | add_id_to_freelist(info, id); | |
623 | ||
f530f036 | 624 | error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; |
9f27ee59 JF |
625 | switch (bret->operation) { |
626 | case BLKIF_OP_WRITE_BARRIER: | |
627 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | |
628 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", | |
629 | info->gd->disk_name); | |
f530f036 | 630 | error = -EOPNOTSUPP; |
9f27ee59 JF |
631 | info->feature_barrier = 0; |
632 | xlvbd_barrier(info); | |
633 | } | |
634 | /* fall through */ | |
635 | case BLKIF_OP_READ: | |
636 | case BLKIF_OP_WRITE: | |
637 | if (unlikely(bret->status != BLKIF_RSP_OKAY)) | |
638 | dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " | |
639 | "request: %x\n", bret->status); | |
640 | ||
40cbbb78 | 641 | __blk_end_request_all(req, error); |
9f27ee59 JF |
642 | break; |
643 | default: | |
644 | BUG(); | |
645 | } | |
646 | } | |
647 | ||
648 | info->ring.rsp_cons = i; | |
649 | ||
650 | if (i != info->ring.req_prod_pvt) { | |
651 | int more_to_do; | |
652 | RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); | |
653 | if (more_to_do) | |
654 | goto again; | |
655 | } else | |
656 | info->ring.sring->rsp_event = i + 1; | |
657 | ||
658 | kick_pending_request_queues(info); | |
659 | ||
660 | spin_unlock_irqrestore(&blkif_io_lock, flags); | |
661 | ||
662 | return IRQ_HANDLED; | |
663 | } | |
664 | ||
665 | ||
666 | static int setup_blkring(struct xenbus_device *dev, | |
667 | struct blkfront_info *info) | |
668 | { | |
669 | struct blkif_sring *sring; | |
670 | int err; | |
671 | ||
672 | info->ring_ref = GRANT_INVALID_REF; | |
673 | ||
a144ff09 | 674 | sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); |
9f27ee59 JF |
675 | if (!sring) { |
676 | xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); | |
677 | return -ENOMEM; | |
678 | } | |
679 | SHARED_RING_INIT(sring); | |
680 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); | |
9e973e64 JA |
681 | |
682 | sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); | |
9f27ee59 JF |
683 | |
684 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); | |
685 | if (err < 0) { | |
686 | free_page((unsigned long)sring); | |
687 | info->ring.sring = NULL; | |
688 | goto fail; | |
689 | } | |
690 | info->ring_ref = err; | |
691 | ||
692 | err = xenbus_alloc_evtchn(dev, &info->evtchn); | |
693 | if (err) | |
694 | goto fail; | |
695 | ||
696 | err = bind_evtchn_to_irqhandler(info->evtchn, | |
697 | blkif_interrupt, | |
698 | IRQF_SAMPLE_RANDOM, "blkif", info); | |
699 | if (err <= 0) { | |
700 | xenbus_dev_fatal(dev, err, | |
701 | "bind_evtchn_to_irqhandler failed"); | |
702 | goto fail; | |
703 | } | |
704 | info->irq = err; | |
705 | ||
706 | return 0; | |
707 | fail: | |
708 | blkif_free(info, 0); | |
709 | return err; | |
710 | } | |
711 | ||
712 | ||
713 | /* Common code used when first setting up, and when resuming. */ | |
203fd61f | 714 | static int talk_to_blkback(struct xenbus_device *dev, |
9f27ee59 JF |
715 | struct blkfront_info *info) |
716 | { | |
717 | const char *message = NULL; | |
718 | struct xenbus_transaction xbt; | |
719 | int err; | |
720 | ||
721 | /* Create shared ring, alloc event channel. */ | |
722 | err = setup_blkring(dev, info); | |
723 | if (err) | |
724 | goto out; | |
725 | ||
726 | again: | |
727 | err = xenbus_transaction_start(&xbt); | |
728 | if (err) { | |
729 | xenbus_dev_fatal(dev, err, "starting transaction"); | |
730 | goto destroy_blkring; | |
731 | } | |
732 | ||
733 | err = xenbus_printf(xbt, dev->nodename, | |
734 | "ring-ref", "%u", info->ring_ref); | |
735 | if (err) { | |
736 | message = "writing ring-ref"; | |
737 | goto abort_transaction; | |
738 | } | |
739 | err = xenbus_printf(xbt, dev->nodename, | |
740 | "event-channel", "%u", info->evtchn); | |
741 | if (err) { | |
742 | message = "writing event-channel"; | |
743 | goto abort_transaction; | |
744 | } | |
3e334239 MA |
745 | err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", |
746 | XEN_IO_PROTO_ABI_NATIVE); | |
747 | if (err) { | |
748 | message = "writing protocol"; | |
749 | goto abort_transaction; | |
750 | } | |
9f27ee59 JF |
751 | |
752 | err = xenbus_transaction_end(xbt, 0); | |
753 | if (err) { | |
754 | if (err == -EAGAIN) | |
755 | goto again; | |
756 | xenbus_dev_fatal(dev, err, "completing transaction"); | |
757 | goto destroy_blkring; | |
758 | } | |
759 | ||
760 | xenbus_switch_state(dev, XenbusStateInitialised); | |
761 | ||
762 | return 0; | |
763 | ||
764 | abort_transaction: | |
765 | xenbus_transaction_end(xbt, 1); | |
766 | if (message) | |
767 | xenbus_dev_fatal(dev, err, "%s", message); | |
768 | destroy_blkring: | |
769 | blkif_free(info, 0); | |
770 | out: | |
771 | return err; | |
772 | } | |
773 | ||
774 | ||
775 | /** | |
776 | * Entry point to this code when a new device is created. Allocate the basic | |
777 | * structures and the ring buffer for communication with the backend, and | |
778 | * inform the backend of the appropriate details for those. Switch to | |
779 | * Initialised state. | |
780 | */ | |
781 | static int blkfront_probe(struct xenbus_device *dev, | |
782 | const struct xenbus_device_id *id) | |
783 | { | |
784 | int err, vdevice, i; | |
785 | struct blkfront_info *info; | |
786 | ||
787 | /* FIXME: Use dynamic device id if this is not set. */ | |
788 | err = xenbus_scanf(XBT_NIL, dev->nodename, | |
789 | "virtual-device", "%i", &vdevice); | |
790 | if (err != 1) { | |
9246b5f0 CL |
791 | /* go looking in the extended area instead */ |
792 | err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", | |
793 | "%i", &vdevice); | |
794 | if (err != 1) { | |
795 | xenbus_dev_fatal(dev, err, "reading virtual-device"); | |
796 | return err; | |
797 | } | |
9f27ee59 JF |
798 | } |
799 | ||
800 | info = kzalloc(sizeof(*info), GFP_KERNEL); | |
801 | if (!info) { | |
802 | xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); | |
803 | return -ENOMEM; | |
804 | } | |
805 | ||
806 | info->xbdev = dev; | |
807 | info->vdevice = vdevice; | |
808 | info->connected = BLKIF_STATE_DISCONNECTED; | |
809 | INIT_WORK(&info->work, blkif_restart_queue); | |
810 | ||
811 | for (i = 0; i < BLK_RING_SIZE; i++) | |
812 | info->shadow[i].req.id = i+1; | |
813 | info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; | |
814 | ||
815 | /* Front end dir is a number, which is used as the id. */ | |
816 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); | |
a1b4b12b | 817 | dev_set_drvdata(&dev->dev, info); |
9f27ee59 | 818 | |
203fd61f | 819 | err = talk_to_blkback(dev, info); |
9f27ee59 JF |
820 | if (err) { |
821 | kfree(info); | |
a1b4b12b | 822 | dev_set_drvdata(&dev->dev, NULL); |
9f27ee59 JF |
823 | return err; |
824 | } | |
825 | ||
826 | return 0; | |
827 | } | |
828 | ||
829 | ||
830 | static int blkif_recover(struct blkfront_info *info) | |
831 | { | |
832 | int i; | |
833 | struct blkif_request *req; | |
834 | struct blk_shadow *copy; | |
835 | int j; | |
836 | ||
837 | /* Stage 1: Make a safe copy of the shadow state. */ | |
a144ff09 IC |
838 | copy = kmalloc(sizeof(info->shadow), |
839 | GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); | |
9f27ee59 JF |
840 | if (!copy) |
841 | return -ENOMEM; | |
842 | memcpy(copy, info->shadow, sizeof(info->shadow)); | |
843 | ||
844 | /* Stage 2: Set up free list. */ | |
845 | memset(&info->shadow, 0, sizeof(info->shadow)); | |
846 | for (i = 0; i < BLK_RING_SIZE; i++) | |
847 | info->shadow[i].req.id = i+1; | |
848 | info->shadow_free = info->ring.req_prod_pvt; | |
849 | info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; | |
850 | ||
851 | /* Stage 3: Find pending requests and requeue them. */ | |
852 | for (i = 0; i < BLK_RING_SIZE; i++) { | |
853 | /* Not in use? */ | |
854 | if (copy[i].request == 0) | |
855 | continue; | |
856 | ||
857 | /* Grab a request slot and copy shadow state into it. */ | |
858 | req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | |
859 | *req = copy[i].req; | |
860 | ||
861 | /* We get a new request id, and must reset the shadow state. */ | |
862 | req->id = get_id_from_freelist(info); | |
863 | memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); | |
864 | ||
865 | /* Rewrite any grant references invalidated by susp/resume. */ | |
866 | for (j = 0; j < req->nr_segments; j++) | |
867 | gnttab_grant_foreign_access_ref( | |
868 | req->seg[j].gref, | |
869 | info->xbdev->otherend_id, | |
870 | pfn_to_mfn(info->shadow[req->id].frame[j]), | |
871 | rq_data_dir( | |
872 | (struct request *) | |
873 | info->shadow[req->id].request)); | |
874 | info->shadow[req->id].req = *req; | |
875 | ||
876 | info->ring.req_prod_pvt++; | |
877 | } | |
878 | ||
879 | kfree(copy); | |
880 | ||
881 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | |
882 | ||
883 | spin_lock_irq(&blkif_io_lock); | |
884 | ||
885 | /* Now safe for us to use the shared ring */ | |
886 | info->connected = BLKIF_STATE_CONNECTED; | |
887 | ||
888 | /* Send off requeued requests */ | |
889 | flush_requests(info); | |
890 | ||
891 | /* Kick any other new requests queued since we resumed */ | |
892 | kick_pending_request_queues(info); | |
893 | ||
894 | spin_unlock_irq(&blkif_io_lock); | |
895 | ||
896 | return 0; | |
897 | } | |
898 | ||
899 | /** | |
900 | * We are reconnecting to the backend, due to a suspend/resume, or a backend | |
901 | * driver restart. We tear down our blkif structure and recreate it, but | |
902 | * leave the device-layer structures intact so that this is transparent to the | |
903 | * rest of the kernel. | |
904 | */ | |
905 | static int blkfront_resume(struct xenbus_device *dev) | |
906 | { | |
a1b4b12b | 907 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
9f27ee59 JF |
908 | int err; |
909 | ||
910 | dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); | |
911 | ||
912 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | |
913 | ||
203fd61f | 914 | err = talk_to_blkback(dev, info); |
9f27ee59 JF |
915 | if (info->connected == BLKIF_STATE_SUSPENDED && !err) |
916 | err = blkif_recover(info); | |
917 | ||
918 | return err; | |
919 | } | |
920 | ||
921 | ||
922 | /* | |
923 | * Invoked when the backend is finally 'ready' (and has told produced | |
924 | * the details about the physical device - #sectors, size, etc). | |
925 | */ | |
926 | static void blkfront_connect(struct blkfront_info *info) | |
927 | { | |
928 | unsigned long long sectors; | |
929 | unsigned long sector_size; | |
930 | unsigned int binfo; | |
931 | int err; | |
932 | ||
1fa73be6 S |
933 | switch (info->connected) { |
934 | case BLKIF_STATE_CONNECTED: | |
935 | /* | |
936 | * Potentially, the back-end may be signalling | |
937 | * a capacity change; update the capacity. | |
938 | */ | |
939 | err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, | |
940 | "sectors", "%Lu", §ors); | |
941 | if (XENBUS_EXIST_ERR(err)) | |
942 | return; | |
943 | printk(KERN_INFO "Setting capacity to %Lu\n", | |
944 | sectors); | |
945 | set_capacity(info->gd, sectors); | |
2def141e | 946 | revalidate_disk(info->gd); |
1fa73be6 S |
947 | |
948 | /* fall through */ | |
949 | case BLKIF_STATE_SUSPENDED: | |
9f27ee59 | 950 | return; |
b4dddb49 JF |
951 | |
952 | default: | |
953 | break; | |
1fa73be6 | 954 | } |
9f27ee59 JF |
955 | |
956 | dev_dbg(&info->xbdev->dev, "%s:%s.\n", | |
957 | __func__, info->xbdev->otherend); | |
958 | ||
959 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | |
960 | "sectors", "%llu", §ors, | |
961 | "info", "%u", &binfo, | |
962 | "sector-size", "%lu", §or_size, | |
963 | NULL); | |
964 | if (err) { | |
965 | xenbus_dev_fatal(info->xbdev, err, | |
966 | "reading backend fields at %s", | |
967 | info->xbdev->otherend); | |
968 | return; | |
969 | } | |
970 | ||
971 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | |
972 | "feature-barrier", "%lu", &info->feature_barrier, | |
973 | NULL); | |
974 | if (err) | |
975 | info->feature_barrier = 0; | |
976 | ||
9246b5f0 | 977 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); |
9f27ee59 JF |
978 | if (err) { |
979 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", | |
980 | info->xbdev->otherend); | |
981 | return; | |
982 | } | |
983 | ||
984 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | |
985 | ||
986 | /* Kick pending requests. */ | |
987 | spin_lock_irq(&blkif_io_lock); | |
988 | info->connected = BLKIF_STATE_CONNECTED; | |
989 | kick_pending_request_queues(info); | |
990 | spin_unlock_irq(&blkif_io_lock); | |
991 | ||
992 | add_disk(info->gd); | |
1d78d705 CL |
993 | |
994 | info->is_ready = 1; | |
9f27ee59 JF |
995 | } |
996 | ||
997 | /** | |
998 | * Handle the change of state of the backend to Closing. We must delete our | |
999 | * device-layer structures now, to ensure that writes are flushed through to | |
1000 | * the backend. Once is this done, we can switch to Closed in | |
1001 | * acknowledgement. | |
1002 | */ | |
5d7ed20e | 1003 | static void blkfront_closing(struct blkfront_info *info) |
9f27ee59 | 1004 | { |
0e345826 | 1005 | unsigned int minor, nr_minors; |
9f27ee59 JF |
1006 | unsigned long flags; |
1007 | ||
9f27ee59 JF |
1008 | |
1009 | if (info->rq == NULL) | |
1010 | goto out; | |
1011 | ||
1012 | spin_lock_irqsave(&blkif_io_lock, flags); | |
1013 | ||
9f27ee59 JF |
1014 | /* No more blkif_request(). */ |
1015 | blk_stop_queue(info->rq); | |
1016 | ||
1017 | /* No more gnttab callback work. */ | |
1018 | gnttab_cancel_free_callback(&info->callback); | |
1019 | spin_unlock_irqrestore(&blkif_io_lock, flags); | |
1020 | ||
1021 | /* Flush gnttab callback work. Must be done with no locks held. */ | |
1022 | flush_scheduled_work(); | |
1023 | ||
1024 | blk_cleanup_queue(info->rq); | |
1025 | info->rq = NULL; | |
1026 | ||
0e345826 JB |
1027 | minor = info->gd->first_minor; |
1028 | nr_minors = info->gd->minors; | |
31a14400 | 1029 | del_gendisk(info->gd); |
0e345826 | 1030 | xlbd_release_minors(minor, nr_minors); |
31a14400 | 1031 | |
9f27ee59 | 1032 | out: |
5d7ed20e JB |
1033 | if (info->xbdev) |
1034 | xenbus_frontend_closed(info->xbdev); | |
9f27ee59 JF |
1035 | } |
1036 | ||
1037 | /** | |
1038 | * Callback received when the backend's state changes. | |
1039 | */ | |
203fd61f | 1040 | static void blkback_changed(struct xenbus_device *dev, |
9f27ee59 JF |
1041 | enum xenbus_state backend_state) |
1042 | { | |
a1b4b12b | 1043 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
9f27ee59 JF |
1044 | struct block_device *bd; |
1045 | ||
203fd61f | 1046 | dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state); |
9f27ee59 JF |
1047 | |
1048 | switch (backend_state) { | |
1049 | case XenbusStateInitialising: | |
1050 | case XenbusStateInitWait: | |
1051 | case XenbusStateInitialised: | |
1052 | case XenbusStateUnknown: | |
1053 | case XenbusStateClosed: | |
1054 | break; | |
1055 | ||
1056 | case XenbusStateConnected: | |
1057 | blkfront_connect(info); | |
1058 | break; | |
1059 | ||
1060 | case XenbusStateClosing: | |
28afea5b IC |
1061 | if (info->gd == NULL) { |
1062 | xenbus_frontend_closed(dev); | |
1063 | break; | |
1064 | } | |
53f0e8af | 1065 | bd = bdget_disk(info->gd, 0); |
9f27ee59 JF |
1066 | if (bd == NULL) |
1067 | xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); | |
1068 | ||
1069 | mutex_lock(&bd->bd_mutex); | |
1070 | if (info->users > 0) | |
1071 | xenbus_dev_error(dev, -EBUSY, | |
1072 | "Device in use; refusing to close"); | |
1073 | else | |
5d7ed20e | 1074 | blkfront_closing(info); |
9f27ee59 JF |
1075 | mutex_unlock(&bd->bd_mutex); |
1076 | bdput(bd); | |
1077 | break; | |
1078 | } | |
1079 | } | |
1080 | ||
1081 | static int blkfront_remove(struct xenbus_device *dev) | |
1082 | { | |
a1b4b12b | 1083 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
9f27ee59 JF |
1084 | |
1085 | dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename); | |
1086 | ||
1087 | blkif_free(info, 0); | |
1088 | ||
0e345826 JB |
1089 | if(info->users == 0) |
1090 | kfree(info); | |
1091 | else | |
5d7ed20e | 1092 | info->xbdev = NULL; |
9f27ee59 JF |
1093 | |
1094 | return 0; | |
1095 | } | |
1096 | ||
1d78d705 CL |
1097 | static int blkfront_is_ready(struct xenbus_device *dev) |
1098 | { | |
a1b4b12b | 1099 | struct blkfront_info *info = dev_get_drvdata(&dev->dev); |
1d78d705 | 1100 | |
5d7ed20e | 1101 | return info->is_ready && info->xbdev; |
1d78d705 CL |
1102 | } |
1103 | ||
a63c848b | 1104 | static int blkif_open(struct block_device *bdev, fmode_t mode) |
9f27ee59 | 1105 | { |
a63c848b | 1106 | struct blkfront_info *info = bdev->bd_disk->private_data; |
5d7ed20e JB |
1107 | |
1108 | if (!info->xbdev) | |
1109 | return -ENODEV; | |
6e9624b8 AB |
1110 | |
1111 | lock_kernel(); | |
5d7ed20e | 1112 | info->users++; |
6e9624b8 AB |
1113 | unlock_kernel(); |
1114 | ||
5d7ed20e | 1115 | return 0; |
9f27ee59 JF |
1116 | } |
1117 | ||
a63c848b | 1118 | static int blkif_release(struct gendisk *disk, fmode_t mode) |
9f27ee59 | 1119 | { |
a63c848b | 1120 | struct blkfront_info *info = disk->private_data; |
6e9624b8 | 1121 | lock_kernel(); |
9f27ee59 JF |
1122 | info->users--; |
1123 | if (info->users == 0) { | |
1124 | /* Check whether we have been instructed to close. We will | |
1125 | have ignored this request initially, as the device was | |
1126 | still mounted. */ | |
1127 | struct xenbus_device *dev = info->xbdev; | |
9f27ee59 | 1128 | |
5d7ed20e JB |
1129 | if (!dev) { |
1130 | blkfront_closing(info); | |
0e345826 | 1131 | kfree(info); |
5d7ed20e JB |
1132 | } else if (xenbus_read_driver_state(dev->otherend) |
1133 | == XenbusStateClosing && info->is_ready) | |
1134 | blkfront_closing(info); | |
9f27ee59 | 1135 | } |
6e9624b8 | 1136 | unlock_kernel(); |
9f27ee59 JF |
1137 | return 0; |
1138 | } | |
1139 | ||
83d5cde4 | 1140 | static const struct block_device_operations xlvbd_block_fops = |
9f27ee59 JF |
1141 | { |
1142 | .owner = THIS_MODULE, | |
a63c848b AV |
1143 | .open = blkif_open, |
1144 | .release = blkif_release, | |
597592d9 | 1145 | .getgeo = blkif_getgeo, |
8a6cfeb6 | 1146 | .ioctl = blkif_ioctl, |
9f27ee59 JF |
1147 | }; |
1148 | ||
1149 | ||
ec9c42ec | 1150 | static const struct xenbus_device_id blkfront_ids[] = { |
9f27ee59 JF |
1151 | { "vbd" }, |
1152 | { "" } | |
1153 | }; | |
1154 | ||
1155 | static struct xenbus_driver blkfront = { | |
1156 | .name = "vbd", | |
1157 | .owner = THIS_MODULE, | |
1158 | .ids = blkfront_ids, | |
1159 | .probe = blkfront_probe, | |
1160 | .remove = blkfront_remove, | |
1161 | .resume = blkfront_resume, | |
203fd61f | 1162 | .otherend_changed = blkback_changed, |
1d78d705 | 1163 | .is_ready = blkfront_is_ready, |
9f27ee59 JF |
1164 | }; |
1165 | ||
1166 | static int __init xlblk_init(void) | |
1167 | { | |
6e833587 | 1168 | if (!xen_domain()) |
9f27ee59 JF |
1169 | return -ENODEV; |
1170 | ||
1171 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | |
1172 | printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", | |
1173 | XENVBD_MAJOR, DEV_NAME); | |
1174 | return -ENODEV; | |
1175 | } | |
1176 | ||
1177 | return xenbus_register_frontend(&blkfront); | |
1178 | } | |
1179 | module_init(xlblk_init); | |
1180 | ||
1181 | ||
5a60d0cd | 1182 | static void __exit xlblk_exit(void) |
9f27ee59 JF |
1183 | { |
1184 | return xenbus_unregister_driver(&blkfront); | |
1185 | } | |
1186 | module_exit(xlblk_exit); | |
1187 | ||
1188 | MODULE_DESCRIPTION("Xen virtual block device frontend"); | |
1189 | MODULE_LICENSE("GPL"); | |
1190 | MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); | |
d2f0c52b | 1191 | MODULE_ALIAS("xen:vbd"); |
4f93f09b | 1192 | MODULE_ALIAS("xenblk"); |