]>
Commit | Line | Data |
---|---|---|
fd0928df JA |
1 | #ifndef IOCONTEXT_H |
2 | #define IOCONTEXT_H | |
3 | ||
4 | /* | |
5 | * This is the per-process anticipatory I/O scheduler state. | |
6 | */ | |
7 | struct as_io_context { | |
8 | spinlock_t lock; | |
9 | ||
10 | void (*dtor)(struct as_io_context *aic); /* destructor */ | |
11 | void (*exit)(struct as_io_context *aic); /* called on task exit */ | |
12 | ||
13 | unsigned long state; | |
14 | atomic_t nr_queued; /* queued reads & sync writes */ | |
15 | atomic_t nr_dispatched; /* number of requests gone to the drivers */ | |
16 | ||
17 | /* IO History tracking */ | |
18 | /* Thinktime */ | |
19 | unsigned long last_end_request; | |
20 | unsigned long ttime_total; | |
21 | unsigned long ttime_samples; | |
22 | unsigned long ttime_mean; | |
23 | /* Layout pattern */ | |
24 | unsigned int seek_samples; | |
25 | sector_t last_request_pos; | |
26 | u64 seek_total; | |
27 | sector_t seek_mean; | |
28 | }; | |
29 | ||
30 | struct cfq_queue; | |
31 | struct cfq_io_context { | |
32 | struct rb_node rb_node; | |
33 | void *key; | |
34 | ||
35 | struct cfq_queue *cfqq[2]; | |
36 | ||
37 | struct io_context *ioc; | |
38 | ||
39 | unsigned long last_end_request; | |
40 | sector_t last_request_pos; | |
41 | ||
42 | unsigned long ttime_total; | |
43 | unsigned long ttime_samples; | |
44 | unsigned long ttime_mean; | |
45 | ||
46 | unsigned int seek_samples; | |
47 | u64 seek_total; | |
48 | sector_t seek_mean; | |
49 | ||
50 | struct list_head queue_list; | |
51 | ||
52 | void (*dtor)(struct io_context *); /* destructor */ | |
53 | void (*exit)(struct io_context *); /* called on task exit */ | |
54 | }; | |
55 | ||
56 | /* | |
d38ecf93 JA |
57 | * I/O subsystem state of the associated processes. It is refcounted |
58 | * and kmalloc'ed. These could be shared between processes. | |
fd0928df JA |
59 | */ |
60 | struct io_context { | |
61 | atomic_t refcount; | |
d38ecf93 JA |
62 | atomic_t nr_tasks; |
63 | ||
64 | /* all the fields below are protected by this lock */ | |
65 | spinlock_t lock; | |
fd0928df JA |
66 | |
67 | unsigned short ioprio; | |
68 | unsigned short ioprio_changed; | |
69 | ||
70 | /* | |
71 | * For request batching | |
72 | */ | |
73 | unsigned long last_waited; /* Time last woken after wait for request */ | |
74 | int nr_batch_requests; /* Number of requests left in the batch */ | |
75 | ||
76 | struct as_io_context *aic; | |
77 | struct rb_root cic_root; | |
78 | void *ioc_data; | |
79 | }; | |
80 | ||
d38ecf93 JA |
81 | static inline struct io_context *ioc_task_link(struct io_context *ioc) |
82 | { | |
83 | /* | |
84 | * if ref count is zero, don't allow sharing (ioc is going away, it's | |
85 | * a race). | |
86 | */ | |
87 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) | |
88 | return ioc; | |
89 | ||
90 | return NULL; | |
91 | } | |
92 | ||
fd0928df | 93 | #endif |