]>
Commit | Line | Data |
---|---|---|
1 | #ifndef IOCONTEXT_H | |
2 | #define IOCONTEXT_H | |
3 | ||
4 | #include <linux/radix-tree.h> | |
5 | #include <linux/rcupdate.h> | |
6 | #include <linux/workqueue.h> | |
7 | ||
8 | enum { | |
9 | ICQ_IOPRIO_CHANGED, | |
10 | ICQ_CGROUP_CHANGED, | |
11 | }; | |
12 | ||
13 | /* | |
14 | * An io_cq (icq) is association between an io_context (ioc) and a | |
15 | * request_queue (q). This is used by elevators which need to track | |
16 | * information per ioc - q pair. | |
17 | * | |
18 | * Elevator can request use of icq by setting elevator_type->icq_size and | |
19 | * ->icq_align. Both size and align must be larger than that of struct | |
20 | * io_cq and elevator can use the tail area for private information. The | |
21 | * recommended way to do this is defining a struct which contains io_cq as | |
22 | * the first member followed by private members and using its size and | |
23 | * align. For example, | |
24 | * | |
25 | * struct snail_io_cq { | |
26 | * struct io_cq icq; | |
27 | * int poke_snail; | |
28 | * int feed_snail; | |
29 | * }; | |
30 | * | |
31 | * struct elevator_type snail_elv_type { | |
32 | * .ops = { ... }, | |
33 | * .icq_size = sizeof(struct snail_io_cq), | |
34 | * .icq_align = __alignof__(struct snail_io_cq), | |
35 | * ... | |
36 | * }; | |
37 | * | |
38 | * If icq_size is set, block core will manage icq's. All requests will | |
39 | * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn() | |
40 | * is called and be holding a reference to the associated io_context. | |
41 | * | |
42 | * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is | |
43 | * called and, on destruction, ->elevator_exit_icq_fn(). Both functions | |
44 | * are called with both the associated io_context and queue locks held. | |
45 | * | |
46 | * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding | |
47 | * queue lock but the returned icq is valid only until the queue lock is | |
48 | * released. Elevators can not and should not try to create or destroy | |
49 | * icq's. | |
50 | * | |
51 | * As icq's are linked from both ioc and q, the locking rules are a bit | |
52 | * complex. | |
53 | * | |
54 | * - ioc lock nests inside q lock. | |
55 | * | |
56 | * - ioc->icq_list and icq->ioc_node are protected by ioc lock. | |
57 | * q->icq_list and icq->q_node by q lock. | |
58 | * | |
59 | * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq | |
60 | * itself is protected by q lock. However, both the indexes and icq | |
61 | * itself are also RCU managed and lookup can be performed holding only | |
62 | * the q lock. | |
63 | * | |
64 | * - icq's are not reference counted. They are destroyed when either the | |
65 | * ioc or q goes away. Each request with icq set holds an extra | |
66 | * reference to ioc to ensure it stays until the request is completed. | |
67 | * | |
68 | * - Linking and unlinking icq's are performed while holding both ioc and q | |
69 | * locks. Due to the lock ordering, q exit is simple but ioc exit | |
70 | * requires reverse-order double lock dance. | |
71 | */ | |
72 | struct io_cq { | |
73 | struct request_queue *q; | |
74 | struct io_context *ioc; | |
75 | ||
76 | /* | |
77 | * q_node and ioc_node link io_cq through icq_list of q and ioc | |
78 | * respectively. Both fields are unused once ioc_exit_icq() is | |
79 | * called and shared with __rcu_icq_cache and __rcu_head which are | |
80 | * used for RCU free of io_cq. | |
81 | */ | |
82 | union { | |
83 | struct list_head q_node; | |
84 | struct kmem_cache *__rcu_icq_cache; | |
85 | }; | |
86 | union { | |
87 | struct hlist_node ioc_node; | |
88 | struct rcu_head __rcu_head; | |
89 | }; | |
90 | ||
91 | unsigned long changed; | |
92 | }; | |
93 | ||
94 | /* | |
95 | * I/O subsystem state of the associated processes. It is refcounted | |
96 | * and kmalloc'ed. These could be shared between processes. | |
97 | */ | |
98 | struct io_context { | |
99 | atomic_long_t refcount; | |
100 | atomic_t nr_tasks; | |
101 | ||
102 | /* all the fields below are protected by this lock */ | |
103 | spinlock_t lock; | |
104 | ||
105 | unsigned short ioprio; | |
106 | ||
107 | /* | |
108 | * For request batching | |
109 | */ | |
110 | int nr_batch_requests; /* Number of requests left in the batch */ | |
111 | unsigned long last_waited; /* Time last woken after wait for request */ | |
112 | ||
113 | struct radix_tree_root icq_tree; | |
114 | struct io_cq __rcu *icq_hint; | |
115 | struct hlist_head icq_list; | |
116 | ||
117 | struct work_struct release_work; | |
118 | }; | |
119 | ||
120 | static inline struct io_context *ioc_task_link(struct io_context *ioc) | |
121 | { | |
122 | /* | |
123 | * if ref count is zero, don't allow sharing (ioc is going away, it's | |
124 | * a race). | |
125 | */ | |
126 | if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { | |
127 | atomic_inc(&ioc->nr_tasks); | |
128 | return ioc; | |
129 | } | |
130 | ||
131 | return NULL; | |
132 | } | |
133 | ||
134 | struct task_struct; | |
135 | #ifdef CONFIG_BLOCK | |
136 | void put_io_context(struct io_context *ioc, struct request_queue *locked_q); | |
137 | void exit_io_context(struct task_struct *task); | |
138 | struct io_context *get_task_io_context(struct task_struct *task, | |
139 | gfp_t gfp_flags, int node); | |
140 | void ioc_ioprio_changed(struct io_context *ioc, int ioprio); | |
141 | void ioc_cgroup_changed(struct io_context *ioc); | |
142 | #else | |
143 | struct io_context; | |
144 | static inline void put_io_context(struct io_context *ioc, | |
145 | struct request_queue *locked_q) { } | |
146 | static inline void exit_io_context(struct task_struct *task) { } | |
147 | #endif | |
148 | ||
149 | #endif |