]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/xen-blkback/common.h
Merge tag 'libnvdimm-for-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm...
[mirror_ubuntu-artful-kernel.git] / drivers / block / xen-blkback / common.h
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
27 #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28 #define __XEN_BLKIF__BACKEND__COMMON_H__
29
30 #include <linux/module.h>
31 #include <linux/interrupt.h>
32 #include <linux/slab.h>
33 #include <linux/blkdev.h>
34 #include <linux/vmalloc.h>
35 #include <linux/wait.h>
36 #include <linux/io.h>
37 #include <linux/rbtree.h>
38 #include <asm/setup.h>
39 #include <asm/pgalloc.h>
40 #include <asm/hypervisor.h>
41 #include <xen/grant_table.h>
42 #include <xen/page.h>
43 #include <xen/xenbus.h>
44 #include <xen/interface/io/ring.h>
45 #include <xen/interface/io/blkif.h>
46 #include <xen/interface/io/protocols.h>
47
48 extern unsigned int xen_blkif_max_ring_order;
49 /*
50 * This is the maximum number of segments that would be allowed in indirect
51 * requests. This value will also be passed to the frontend.
52 */
53 #define MAX_INDIRECT_SEGMENTS 256
54
55 /*
56 * Xen use 4K pages. The guest may use different page size (4K or 64K)
57 * Number of Xen pages per segment
58 */
59 #define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE)
60
61 #define XEN_PAGES_PER_INDIRECT_FRAME \
62 (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
63 #define SEGS_PER_INDIRECT_FRAME \
64 (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
65
66 #define MAX_INDIRECT_PAGES \
67 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
68 #define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
69
70 /* Not a real protocol. Used to generate ring structs which contain
71 * the elements common to all protocols only. This way we get a
72 * compiler-checkable way to use common struct elements, so we can
73 * avoid using switch(protocol) in a number of places. */
74 struct blkif_common_request {
75 char dummy;
76 };
77 struct blkif_common_response {
78 char dummy;
79 };
80
81 struct blkif_x86_32_request_rw {
82 uint8_t nr_segments; /* number of segments */
83 blkif_vdev_t handle; /* only for read/write requests */
84 uint64_t id; /* private guest value, echoed in resp */
85 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
86 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
87 } __attribute__((__packed__));
88
89 struct blkif_x86_32_request_discard {
90 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
91 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
92 uint64_t id; /* private guest value, echoed in resp */
93 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
94 uint64_t nr_sectors;
95 } __attribute__((__packed__));
96
97 struct blkif_x86_32_request_other {
98 uint8_t _pad1;
99 blkif_vdev_t _pad2;
100 uint64_t id; /* private guest value, echoed in resp */
101 } __attribute__((__packed__));
102
103 struct blkif_x86_32_request_indirect {
104 uint8_t indirect_op;
105 uint16_t nr_segments;
106 uint64_t id;
107 blkif_sector_t sector_number;
108 blkif_vdev_t handle;
109 uint16_t _pad1;
110 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
111 /*
112 * The maximum number of indirect segments (and pages) that will
113 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
114 * is also exported to the guest (via xenstore
115 * feature-max-indirect-segments entry), so the frontend knows how
116 * many indirect segments the backend supports.
117 */
118 uint64_t _pad2; /* make it 64 byte aligned */
119 } __attribute__((__packed__));
120
121 struct blkif_x86_32_request {
122 uint8_t operation; /* BLKIF_OP_??? */
123 union {
124 struct blkif_x86_32_request_rw rw;
125 struct blkif_x86_32_request_discard discard;
126 struct blkif_x86_32_request_other other;
127 struct blkif_x86_32_request_indirect indirect;
128 } u;
129 } __attribute__((__packed__));
130
131 /* i386 protocol version */
132 #pragma pack(push, 4)
133 struct blkif_x86_32_response {
134 uint64_t id; /* copied from request */
135 uint8_t operation; /* copied from request */
136 int16_t status; /* BLKIF_RSP_??? */
137 };
138 #pragma pack(pop)
139 /* x86_64 protocol version */
140
141 struct blkif_x86_64_request_rw {
142 uint8_t nr_segments; /* number of segments */
143 blkif_vdev_t handle; /* only for read/write requests */
144 uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
145 uint64_t id;
146 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
147 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
148 } __attribute__((__packed__));
149
150 struct blkif_x86_64_request_discard {
151 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
152 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
153 uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
154 uint64_t id;
155 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
156 uint64_t nr_sectors;
157 } __attribute__((__packed__));
158
159 struct blkif_x86_64_request_other {
160 uint8_t _pad1;
161 blkif_vdev_t _pad2;
162 uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
163 uint64_t id; /* private guest value, echoed in resp */
164 } __attribute__((__packed__));
165
166 struct blkif_x86_64_request_indirect {
167 uint8_t indirect_op;
168 uint16_t nr_segments;
169 uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
170 uint64_t id;
171 blkif_sector_t sector_number;
172 blkif_vdev_t handle;
173 uint16_t _pad2;
174 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
175 /*
176 * The maximum number of indirect segments (and pages) that will
177 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
178 * is also exported to the guest (via xenstore
179 * feature-max-indirect-segments entry), so the frontend knows how
180 * many indirect segments the backend supports.
181 */
182 uint32_t _pad3; /* make it 64 byte aligned */
183 } __attribute__((__packed__));
184
185 struct blkif_x86_64_request {
186 uint8_t operation; /* BLKIF_OP_??? */
187 union {
188 struct blkif_x86_64_request_rw rw;
189 struct blkif_x86_64_request_discard discard;
190 struct blkif_x86_64_request_other other;
191 struct blkif_x86_64_request_indirect indirect;
192 } u;
193 } __attribute__((__packed__));
194
195 struct blkif_x86_64_response {
196 uint64_t __attribute__((__aligned__(8))) id;
197 uint8_t operation; /* copied from request */
198 int16_t status; /* BLKIF_RSP_??? */
199 };
200
201 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
202 struct blkif_common_response);
203 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
204 struct blkif_x86_32_response);
205 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
206 struct blkif_x86_64_response);
207
208 union blkif_back_rings {
209 struct blkif_back_ring native;
210 struct blkif_common_back_ring common;
211 struct blkif_x86_32_back_ring x86_32;
212 struct blkif_x86_64_back_ring x86_64;
213 };
214
215 enum blkif_protocol {
216 BLKIF_PROTOCOL_NATIVE = 1,
217 BLKIF_PROTOCOL_X86_32 = 2,
218 BLKIF_PROTOCOL_X86_64 = 3,
219 };
220
221 /*
222 * Default protocol if the frontend doesn't specify one.
223 */
224 #ifdef CONFIG_X86
225 # define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
226 #else
227 # define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
228 #endif
229
230 struct xen_vbd {
231 /* What the domain refers to this vbd as. */
232 blkif_vdev_t handle;
233 /* Non-zero -> read-only */
234 unsigned char readonly;
235 /* VDISK_xxx */
236 unsigned char type;
237 /* phys device that this vbd maps to. */
238 u32 pdevice;
239 struct block_device *bdev;
240 /* Cached size parameter. */
241 sector_t size;
242 unsigned int flush_support:1;
243 unsigned int discard_secure:1;
244 unsigned int feature_gnt_persistent:1;
245 unsigned int overflow_max_grants:1;
246 };
247
248 struct backend_info;
249
250 /* Number of available flags */
251 #define PERSISTENT_GNT_FLAGS_SIZE 2
252 /* This persistent grant is currently in use */
253 #define PERSISTENT_GNT_ACTIVE 0
254 /*
255 * This persistent grant has been used, this flag is set when we remove the
256 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
257 */
258 #define PERSISTENT_GNT_WAS_ACTIVE 1
259
260 /* Number of requests that we can fit in a ring */
261 #define XEN_BLKIF_REQS_PER_PAGE 32
262
263 struct persistent_gnt {
264 struct page *page;
265 grant_ref_t gnt;
266 grant_handle_t handle;
267 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
268 struct rb_node node;
269 struct list_head remove_node;
270 };
271
272 struct xen_blkif {
273 /* Unique identifier for this interface. */
274 domid_t domid;
275 unsigned int handle;
276 /* Physical parameters of the comms window. */
277 unsigned int irq;
278 /* Comms information. */
279 enum blkif_protocol blk_protocol;
280 union blkif_back_rings blk_rings;
281 void *blk_ring;
282 /* The VBD attached to this interface. */
283 struct xen_vbd vbd;
284 /* Back pointer to the backend_info. */
285 struct backend_info *be;
286 /* Private fields. */
287 spinlock_t blk_ring_lock;
288 atomic_t refcnt;
289
290 wait_queue_head_t wq;
291 /* for barrier (drain) requests */
292 struct completion drain_complete;
293 atomic_t drain;
294 atomic_t inflight;
295 /* One thread per one blkif. */
296 struct task_struct *xenblkd;
297 unsigned int waiting_reqs;
298
299 /* tree to store persistent grants */
300 struct rb_root persistent_gnts;
301 unsigned int persistent_gnt_c;
302 atomic_t persistent_gnt_in_use;
303 unsigned long next_lru;
304
305 /* used by the kworker that offload work from the persistent purge */
306 struct list_head persistent_purge_list;
307 struct work_struct persistent_purge_work;
308
309 /* buffer of free pages to map grant refs */
310 spinlock_t free_pages_lock;
311 int free_pages_num;
312 struct list_head free_pages;
313
314 /* List of all 'pending_req' available */
315 struct list_head pending_free;
316 /* And its spinlock. */
317 spinlock_t pending_free_lock;
318 wait_queue_head_t pending_free_wq;
319
320 /* statistics */
321 unsigned long st_print;
322 unsigned long long st_rd_req;
323 unsigned long long st_wr_req;
324 unsigned long long st_oo_req;
325 unsigned long long st_f_req;
326 unsigned long long st_ds_req;
327 unsigned long long st_rd_sect;
328 unsigned long long st_wr_sect;
329
330 struct work_struct free_work;
331 /* Thread shutdown wait queue. */
332 wait_queue_head_t shutdown_wq;
333 unsigned int nr_ring_pages;
334 };
335
336 struct seg_buf {
337 unsigned long offset;
338 unsigned int nsec;
339 };
340
341 struct grant_page {
342 struct page *page;
343 struct persistent_gnt *persistent_gnt;
344 grant_handle_t handle;
345 grant_ref_t gref;
346 };
347
348 /*
349 * Each outstanding request that we've passed to the lower device layers has a
350 * 'pending_req' allocated to it. Each buffer_head that completes decrements
351 * the pendcnt towards zero. When it hits zero, the specified domain has a
352 * response queued for it, with the saved 'id' passed back.
353 */
354 struct pending_req {
355 struct xen_blkif *blkif;
356 u64 id;
357 int nr_segs;
358 atomic_t pendcnt;
359 unsigned short operation;
360 int status;
361 struct list_head free_list;
362 struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
363 /* Indirect descriptors */
364 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
365 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
366 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
367 struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
368 struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
369 struct gntab_unmap_queue_data gnttab_unmap_data;
370 };
371
372
373 #define vbd_sz(_v) ((_v)->bdev->bd_part ? \
374 (_v)->bdev->bd_part->nr_sects : \
375 get_capacity((_v)->bdev->bd_disk))
376
377 #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
378 #define xen_blkif_put(_b) \
379 do { \
380 if (atomic_dec_and_test(&(_b)->refcnt)) \
381 schedule_work(&(_b)->free_work);\
382 } while (0)
383
384 struct phys_req {
385 unsigned short dev;
386 blkif_sector_t nr_sects;
387 struct block_device *bdev;
388 blkif_sector_t sector_number;
389 };
390 int xen_blkif_interface_init(void);
391
392 int xen_blkif_xenbus_init(void);
393
394 irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
395 int xen_blkif_schedule(void *arg);
396 int xen_blkif_purge_persistent(void *arg);
397 void xen_blkbk_free_caches(struct xen_blkif *blkif);
398
399 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
400 struct backend_info *be, int state);
401
402 int xen_blkbk_barrier(struct xenbus_transaction xbt,
403 struct backend_info *be, int state);
404 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
405 void xen_blkbk_unmap_purged_grants(struct work_struct *work);
406
407 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
408 struct blkif_x86_32_request *src)
409 {
410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
411 dst->operation = src->operation;
412 switch (src->operation) {
413 case BLKIF_OP_READ:
414 case BLKIF_OP_WRITE:
415 case BLKIF_OP_WRITE_BARRIER:
416 case BLKIF_OP_FLUSH_DISKCACHE:
417 dst->u.rw.nr_segments = src->u.rw.nr_segments;
418 dst->u.rw.handle = src->u.rw.handle;
419 dst->u.rw.id = src->u.rw.id;
420 dst->u.rw.sector_number = src->u.rw.sector_number;
421 barrier();
422 if (n > dst->u.rw.nr_segments)
423 n = dst->u.rw.nr_segments;
424 for (i = 0; i < n; i++)
425 dst->u.rw.seg[i] = src->u.rw.seg[i];
426 break;
427 case BLKIF_OP_DISCARD:
428 dst->u.discard.flag = src->u.discard.flag;
429 dst->u.discard.id = src->u.discard.id;
430 dst->u.discard.sector_number = src->u.discard.sector_number;
431 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
432 break;
433 case BLKIF_OP_INDIRECT:
434 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
435 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
436 dst->u.indirect.handle = src->u.indirect.handle;
437 dst->u.indirect.id = src->u.indirect.id;
438 dst->u.indirect.sector_number = src->u.indirect.sector_number;
439 barrier();
440 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
441 for (i = 0; i < j; i++)
442 dst->u.indirect.indirect_grefs[i] =
443 src->u.indirect.indirect_grefs[i];
444 break;
445 default:
446 /*
447 * Don't know how to translate this op. Only get the
448 * ID so failure can be reported to the frontend.
449 */
450 dst->u.other.id = src->u.other.id;
451 break;
452 }
453 }
454
455 static inline void blkif_get_x86_64_req(struct blkif_request *dst,
456 struct blkif_x86_64_request *src)
457 {
458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
459 dst->operation = src->operation;
460 switch (src->operation) {
461 case BLKIF_OP_READ:
462 case BLKIF_OP_WRITE:
463 case BLKIF_OP_WRITE_BARRIER:
464 case BLKIF_OP_FLUSH_DISKCACHE:
465 dst->u.rw.nr_segments = src->u.rw.nr_segments;
466 dst->u.rw.handle = src->u.rw.handle;
467 dst->u.rw.id = src->u.rw.id;
468 dst->u.rw.sector_number = src->u.rw.sector_number;
469 barrier();
470 if (n > dst->u.rw.nr_segments)
471 n = dst->u.rw.nr_segments;
472 for (i = 0; i < n; i++)
473 dst->u.rw.seg[i] = src->u.rw.seg[i];
474 break;
475 case BLKIF_OP_DISCARD:
476 dst->u.discard.flag = src->u.discard.flag;
477 dst->u.discard.id = src->u.discard.id;
478 dst->u.discard.sector_number = src->u.discard.sector_number;
479 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
480 break;
481 case BLKIF_OP_INDIRECT:
482 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
483 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
484 dst->u.indirect.handle = src->u.indirect.handle;
485 dst->u.indirect.id = src->u.indirect.id;
486 dst->u.indirect.sector_number = src->u.indirect.sector_number;
487 barrier();
488 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
489 for (i = 0; i < j; i++)
490 dst->u.indirect.indirect_grefs[i] =
491 src->u.indirect.indirect_grefs[i];
492 break;
493 default:
494 /*
495 * Don't know how to translate this op. Only get the
496 * ID so failure can be reported to the frontend.
497 */
498 dst->u.other.id = src->u.other.id;
499 break;
500 }
501 }
502
503 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */