]>
Commit | Line | Data |
---|---|---|
4cc96131 MS |
1 | /* |
2 | * Internal header file _only_ for device mapper core | |
3 | * | |
4 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. | |
5 | * | |
6 | * This file is released under the LGPL. | |
7 | */ | |
8 | ||
9 | #ifndef DM_CORE_INTERNAL_H | |
10 | #define DM_CORE_INTERNAL_H | |
11 | ||
12 | #include <linux/kthread.h> | |
13 | #include <linux/ktime.h> | |
14 | #include <linux/blk-mq.h> | |
15 | ||
16 | #include <trace/events/block.h> | |
17 | ||
18 | #include "dm.h" | |
19 | ||
20 | #define DM_RESERVED_MAX_IOS 1024 | |
21 | ||
22 | struct dm_kobject_holder { | |
23 | struct kobject kobj; | |
24 | struct completion completion; | |
25 | }; | |
26 | ||
27 | /* | |
28 | * DM core internal structure that used directly by dm.c and dm-rq.c | |
29 | * DM targets must _not_ deference a mapped_device to directly access its members! | |
30 | */ | |
31 | struct mapped_device { | |
4cc96131 MS |
32 | struct mutex suspend_lock; |
33 | ||
34 | /* | |
35 | * The current mapping (struct dm_table *). | |
36 | * Use dm_get_live_table{_fast} or take suspend_lock for | |
37 | * dereference. | |
38 | */ | |
39 | void __rcu *map; | |
40 | ||
41 | struct list_head table_devices; | |
42 | struct mutex table_devices_lock; | |
43 | ||
44 | unsigned long flags; | |
45 | ||
46 | struct request_queue *queue; | |
47 | int numa_node_id; | |
48 | ||
7e0d574f | 49 | enum dm_queue_mode type; |
4cc96131 MS |
50 | /* Protect queue and type against concurrent access. */ |
51 | struct mutex type_lock; | |
52 | ||
53 | atomic_t holders; | |
54 | atomic_t open_count; | |
55 | ||
56 | struct dm_target *immutable_target; | |
57 | struct target_type *immutable_target_type; | |
58 | ||
59 | struct gendisk *disk; | |
f26c5719 | 60 | struct dax_device *dax_dev; |
4cc96131 MS |
61 | char name[16]; |
62 | ||
63 | void *interface_ptr; | |
64 | ||
65 | /* | |
66 | * A list of ios that arrived while we were suspended. | |
67 | */ | |
68 | atomic_t pending[2]; | |
69 | wait_queue_head_t wait; | |
70 | struct work_struct work; | |
71 | spinlock_t deferred_lock; | |
72 | struct bio_list deferred; | |
73 | ||
74 | /* | |
75 | * Event handling. | |
76 | */ | |
77 | wait_queue_head_t eventq; | |
78 | atomic_t event_nr; | |
79 | atomic_t uevent_seq; | |
80 | struct list_head uevent_list; | |
81 | spinlock_t uevent_lock; /* Protect access to uevent_list */ | |
82 | ||
83 | /* the number of internal suspends */ | |
84 | unsigned internal_suspend_count; | |
85 | ||
86 | /* | |
87 | * Processing queue (flush) | |
88 | */ | |
89 | struct workqueue_struct *wq; | |
90 | ||
91 | /* | |
92 | * io objects are allocated from here. | |
93 | */ | |
94 | mempool_t *io_pool; | |
4cc96131 MS |
95 | |
96 | struct bio_set *bs; | |
97 | ||
98 | /* | |
99 | * freeze/thaw support require holding onto a super block | |
100 | */ | |
101 | struct super_block *frozen_sb; | |
102 | ||
103 | /* forced geometry settings */ | |
104 | struct hd_geometry geometry; | |
105 | ||
106 | struct block_device *bdev; | |
107 | ||
108 | /* kobject and completion */ | |
109 | struct dm_kobject_holder kobj_holder; | |
110 | ||
111 | /* zero-length flush that will be cloned and submitted to targets */ | |
112 | struct bio flush_bio; | |
113 | ||
114 | struct dm_stats stats; | |
115 | ||
116 | struct kthread_worker kworker; | |
117 | struct task_struct *kworker_task; | |
118 | ||
119 | /* for request-based merge heuristic in dm_request_fn() */ | |
120 | unsigned seq_rq_merge_deadline_usecs; | |
121 | int last_rq_rw; | |
122 | sector_t last_rq_pos; | |
123 | ktime_t last_rq_start_time; | |
124 | ||
125 | /* for blk-mq request-based DM support */ | |
126 | struct blk_mq_tag_set *tag_set; | |
127 | bool use_blk_mq:1; | |
128 | bool init_tio_pdu:1; | |
856eb091 MP |
129 | |
130 | struct srcu_struct io_barrier; | |
4cc96131 MS |
131 | }; |
132 | ||
133 | void dm_init_md_queue(struct mapped_device *md); | |
134 | void dm_init_normal_md_queue(struct mapped_device *md); | |
135 | int md_in_flight(struct mapped_device *md); | |
136 | void disable_write_same(struct mapped_device *md); | |
ac62d620 | 137 | void disable_write_zeroes(struct mapped_device *md); |
4cc96131 MS |
138 | |
139 | static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) | |
140 | { | |
141 | return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; | |
142 | } | |
143 | ||
144 | unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max); | |
145 | ||
146 | static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) | |
147 | { | |
148 | return !maxlen || strlen(result) + 1 >= maxlen; | |
149 | } | |
150 | ||
93e6442c MP |
151 | extern atomic_t dm_global_event_nr; |
152 | extern wait_queue_head_t dm_global_eventq; | |
62e08243 | 153 | void dm_issue_global_event(void); |
93e6442c | 154 | |
4cc96131 | 155 | #endif |