]>
Commit | Line | Data |
---|---|---|
e8db0be1 JP |
1 | #ifndef _LINUX_PM_QOS_H |
2 | #define _LINUX_PM_QOS_H | |
d82b3518 MG |
3 | /* interface for the pm_qos_power infrastructure of the linux kernel. |
4 | * | |
bf1db69f | 5 | * Mark Gross <mgross@linux.intel.com> |
d82b3518 | 6 | */ |
82f68251 | 7 | #include <linux/plist.h> |
d82b3518 | 8 | #include <linux/notifier.h> |
1a9a9152 | 9 | #include <linux/device.h> |
c4772d19 | 10 | #include <linux/workqueue.h> |
d82b3518 | 11 | |
d031e1de AF |
12 | enum { |
13 | PM_QOS_RESERVED = 0, | |
14 | PM_QOS_CPU_DMA_LATENCY, | |
15 | PM_QOS_NETWORK_LATENCY, | |
16 | PM_QOS_NETWORK_THROUGHPUT, | |
7990da71 | 17 | PM_QOS_MEMORY_BANDWIDTH, |
d031e1de AF |
18 | |
19 | /* insert new class ID */ | |
20 | PM_QOS_NUM_CLASSES, | |
21 | }; | |
d82b3518 | 22 | |
ae0fb4b7 RW |
23 | enum pm_qos_flags_status { |
24 | PM_QOS_FLAGS_UNDEFINED = -1, | |
25 | PM_QOS_FLAGS_NONE, | |
26 | PM_QOS_FLAGS_SOME, | |
27 | PM_QOS_FLAGS_ALL, | |
28 | }; | |
29 | ||
d82b3518 MG |
30 | #define PM_QOS_DEFAULT_VALUE -1 |
31 | ||
333c5ae9 TC |
32 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
33 | #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | |
34 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 | |
7990da71 | 35 | #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 |
b02f6695 | 36 | #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 |
2d984ad1 RW |
37 | #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 |
38 | #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) | |
39 | #define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1)) | |
333c5ae9 | 40 | |
e39473d0 | 41 | #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) |
e39473d0 | 42 | |
cc749986 JP |
43 | struct pm_qos_request { |
44 | struct plist_node node; | |
82f68251 | 45 | int pm_qos_class; |
c4772d19 | 46 | struct delayed_work work; /* for pm_qos_update_request_timeout */ |
82f68251 | 47 | }; |
d82b3518 | 48 | |
5efbe427 RW |
49 | struct pm_qos_flags_request { |
50 | struct list_head node; | |
51 | s32 flags; /* Do not change to 64 bit */ | |
52 | }; | |
53 | ||
ae0fb4b7 | 54 | enum dev_pm_qos_req_type { |
b02f6695 | 55 | DEV_PM_QOS_RESUME_LATENCY = 1, |
2d984ad1 | 56 | DEV_PM_QOS_LATENCY_TOLERANCE, |
ae0fb4b7 RW |
57 | DEV_PM_QOS_FLAGS, |
58 | }; | |
59 | ||
91ff4cb8 | 60 | struct dev_pm_qos_request { |
ae0fb4b7 | 61 | enum dev_pm_qos_req_type type; |
021c870b RW |
62 | union { |
63 | struct plist_node pnode; | |
ae0fb4b7 | 64 | struct pm_qos_flags_request flr; |
021c870b | 65 | } data; |
91ff4cb8 JP |
66 | struct device *dev; |
67 | }; | |
68 | ||
4e1779ba JP |
69 | enum pm_qos_type { |
70 | PM_QOS_UNITIALIZED, | |
71 | PM_QOS_MAX, /* return the largest value */ | |
7990da71 TV |
72 | PM_QOS_MIN, /* return the smallest value */ |
73 | PM_QOS_SUM /* return the sum */ | |
4e1779ba JP |
74 | }; |
75 | ||
76 | /* | |
5efbe427 RW |
77 | * Note: The lockless read path depends on the CPU accessing target_value |
78 | * or effective_flags atomically. Atomic access is only guaranteed on all CPU | |
4e1779ba JP |
79 | * types linux supports for 32 bit quantites |
80 | */ | |
81 | struct pm_qos_constraints { | |
82 | struct plist_head list; | |
83 | s32 target_value; /* Do not change to 64 bit */ | |
84 | s32 default_value; | |
327adaed | 85 | s32 no_constraint_value; |
4e1779ba JP |
86 | enum pm_qos_type type; |
87 | struct blocking_notifier_head *notifiers; | |
88 | }; | |
89 | ||
5efbe427 RW |
90 | struct pm_qos_flags { |
91 | struct list_head list; | |
92 | s32 effective_flags; /* Do not change to 64 bit */ | |
93 | }; | |
94 | ||
5f986c59 | 95 | struct dev_pm_qos { |
b02f6695 | 96 | struct pm_qos_constraints resume_latency; |
2d984ad1 | 97 | struct pm_qos_constraints latency_tolerance; |
ae0fb4b7 | 98 | struct pm_qos_flags flags; |
b02f6695 | 99 | struct dev_pm_qos_request *resume_latency_req; |
2d984ad1 | 100 | struct dev_pm_qos_request *latency_tolerance_req; |
e39473d0 | 101 | struct dev_pm_qos_request *flags_req; |
5f986c59 RW |
102 | }; |
103 | ||
abe98ec2 JP |
104 | /* Action requested to pm_qos_update_target */ |
105 | enum pm_qos_req_action { | |
106 | PM_QOS_ADD_REQ, /* Add a new request */ | |
107 | PM_QOS_UPDATE_REQ, /* Update an existing request */ | |
108 | PM_QOS_REMOVE_REQ /* Remove an existing request */ | |
109 | }; | |
110 | ||
91ff4cb8 JP |
111 | static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) |
112 | { | |
83618092 | 113 | return req->dev != NULL; |
91ff4cb8 JP |
114 | } |
115 | ||
abe98ec2 JP |
116 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, |
117 | enum pm_qos_req_action action, int value); | |
5efbe427 RW |
118 | bool pm_qos_update_flags(struct pm_qos_flags *pqf, |
119 | struct pm_qos_flags_request *req, | |
120 | enum pm_qos_req_action action, s32 val); | |
cc749986 JP |
121 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, |
122 | s32 value); | |
123 | void pm_qos_update_request(struct pm_qos_request *req, | |
e8db0be1 | 124 | s32 new_value); |
c4772d19 MH |
125 | void pm_qos_update_request_timeout(struct pm_qos_request *req, |
126 | s32 new_value, unsigned long timeout_us); | |
cc749986 | 127 | void pm_qos_remove_request(struct pm_qos_request *req); |
d82b3518 | 128 | |
ed77134b MG |
129 | int pm_qos_request(int pm_qos_class); |
130 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); | |
131 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); | |
cc749986 | 132 | int pm_qos_request_active(struct pm_qos_request *req); |
b66213cd | 133 | s32 pm_qos_read_value(struct pm_qos_constraints *c); |
91ff4cb8 | 134 | |
a9b542ee | 135 | #ifdef CONFIG_PM |
ae0fb4b7 RW |
136 | enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); |
137 | enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); | |
00dc9ad1 | 138 | s32 __dev_pm_qos_read_value(struct device *dev); |
1a9a9152 | 139 | s32 dev_pm_qos_read_value(struct device *dev); |
91ff4cb8 | 140 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, |
ae0fb4b7 | 141 | enum dev_pm_qos_req_type type, s32 value); |
91ff4cb8 JP |
142 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); |
143 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); | |
144 | int dev_pm_qos_add_notifier(struct device *dev, | |
145 | struct notifier_block *notifier); | |
146 | int dev_pm_qos_remove_notifier(struct device *dev, | |
147 | struct notifier_block *notifier); | |
148 | void dev_pm_qos_constraints_init(struct device *dev); | |
149 | void dev_pm_qos_constraints_destroy(struct device *dev); | |
40a5f8be | 150 | int dev_pm_qos_add_ancestor_request(struct device *dev, |
71d821fd RW |
151 | struct dev_pm_qos_request *req, |
152 | enum dev_pm_qos_req_type type, s32 value); | |
d30d819d RW |
153 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); |
154 | void dev_pm_qos_hide_latency_limit(struct device *dev); | |
155 | int dev_pm_qos_expose_flags(struct device *dev, s32 value); | |
156 | void dev_pm_qos_hide_flags(struct device *dev); | |
157 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); | |
158 | s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); | |
159 | int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); | |
13b2c4a0 MW |
160 | int dev_pm_qos_expose_latency_tolerance(struct device *dev); |
161 | void dev_pm_qos_hide_latency_tolerance(struct device *dev); | |
d30d819d RW |
162 | |
163 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) | |
164 | { | |
165 | return dev->power.qos->resume_latency_req->data.pnode.prio; | |
166 | } | |
167 | ||
168 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) | |
169 | { | |
170 | return dev->power.qos->flags_req->data.flr.flags; | |
171 | } | |
6dbf5cea RW |
172 | |
173 | static inline s32 dev_pm_qos_raw_read_value(struct device *dev) | |
174 | { | |
175 | return IS_ERR_OR_NULL(dev->power.qos) ? | |
176 | 0 : pm_qos_read_value(&dev->power.qos->resume_latency); | |
177 | } | |
e8db0be1 | 178 | #else |
ae0fb4b7 RW |
179 | static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, |
180 | s32 mask) | |
181 | { return PM_QOS_FLAGS_UNDEFINED; } | |
182 | static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, | |
183 | s32 mask) | |
184 | { return PM_QOS_FLAGS_UNDEFINED; } | |
00dc9ad1 RW |
185 | static inline s32 __dev_pm_qos_read_value(struct device *dev) |
186 | { return 0; } | |
1a9a9152 RW |
187 | static inline s32 dev_pm_qos_read_value(struct device *dev) |
188 | { return 0; } | |
91ff4cb8 JP |
189 | static inline int dev_pm_qos_add_request(struct device *dev, |
190 | struct dev_pm_qos_request *req, | |
ae0fb4b7 | 191 | enum dev_pm_qos_req_type type, |
91ff4cb8 JP |
192 | s32 value) |
193 | { return 0; } | |
194 | static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | |
195 | s32 new_value) | |
196 | { return 0; } | |
197 | static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | |
198 | { return 0; } | |
199 | static inline int dev_pm_qos_add_notifier(struct device *dev, | |
200 | struct notifier_block *notifier) | |
201 | { return 0; } | |
202 | static inline int dev_pm_qos_remove_notifier(struct device *dev, | |
203 | struct notifier_block *notifier) | |
204 | { return 0; } | |
205 | static inline void dev_pm_qos_constraints_init(struct device *dev) | |
1a9a9152 RW |
206 | { |
207 | dev->power.power_state = PMSG_ON; | |
208 | } | |
91ff4cb8 | 209 | static inline void dev_pm_qos_constraints_destroy(struct device *dev) |
1a9a9152 RW |
210 | { |
211 | dev->power.power_state = PMSG_INVALID; | |
212 | } | |
40a5f8be | 213 | static inline int dev_pm_qos_add_ancestor_request(struct device *dev, |
71d821fd RW |
214 | struct dev_pm_qos_request *req, |
215 | enum dev_pm_qos_req_type type, | |
216 | s32 value) | |
40a5f8be | 217 | { return 0; } |
85dc0b8a RW |
218 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) |
219 | { return 0; } | |
220 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} | |
e39473d0 RW |
221 | static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) |
222 | { return 0; } | |
223 | static inline void dev_pm_qos_hide_flags(struct device *dev) {} | |
224 | static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) | |
225 | { return 0; } | |
2d984ad1 RW |
226 | static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) |
227 | { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } | |
228 | static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) | |
229 | { return 0; } | |
13b2c4a0 MW |
230 | static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) |
231 | { return 0; } | |
232 | static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} | |
e39473d0 | 233 | |
b02f6695 | 234 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } |
e39473d0 | 235 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } |
6dbf5cea | 236 | static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; } |
85dc0b8a RW |
237 | #endif |
238 | ||
82f68251 | 239 | #endif |