]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - kernel/power/qos.c
ASoC: sti: fix missing clk_disable_unprepare() on error in uni_player_start()
[mirror_ubuntu-zesty-kernel.git] / kernel / power / qos.c
1 /*
2 * This module exposes the interface to kernel space for specifying
3 * QoS dependencies. It provides infrastructure for registration of:
4 *
5 * Dependents on a QoS value : register requests
6 * Watchers of QoS value : get notified when target QoS value changes
7 *
8 * This QoS design is best effort based. Dependents register their QoS needs.
9 * Watchers register to keep track of the current QoS needs of the system.
10 *
11 * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12 * each have defined units:
13 * latency: usec
14 * timeout: usec <-- currently not used.
15 * throughput: kbs (kilo byte / sec)
16 *
17 * There are lists of pm_qos_objects each one wrapping requests, notifiers
18 *
19 * User mode requests on a QOS parameter register themselves to the
20 * subsystem by opening the device node /dev/... and writing there request to
21 * the node. As long as the process holds a file handle open to the node the
22 * client continues to be accounted for. Upon file release the usermode
23 * request is removed and a new qos target is computed. This way when the
24 * request that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 *
27 * Mark Gross <mgross@linux.intel.com>
28 */
29
30 /*#define DEBUG*/
31
32 #include <linux/pm_qos.h>
33 #include <linux/sched.h>
34 #include <linux/spinlock.h>
35 #include <linux/slab.h>
36 #include <linux/time.h>
37 #include <linux/fs.h>
38 #include <linux/device.h>
39 #include <linux/miscdevice.h>
40 #include <linux/string.h>
41 #include <linux/platform_device.h>
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46
47 #include <linux/uaccess.h>
48 #include <linux/export.h>
49 #include <trace/events/power.h>
50
51 /*
52 * locking rule: all changes to constraints or notifiers lists
53 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
54 * held, taken with _irqsave. One lock to rule them all
55 */
56 struct pm_qos_object {
57 struct pm_qos_constraints *constraints;
58 struct miscdevice pm_qos_power_miscdev;
59 char *name;
60 };
61
62 static DEFINE_SPINLOCK(pm_qos_lock);
63
64 static struct pm_qos_object null_pm_qos;
65
66 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
67 static struct pm_qos_constraints cpu_dma_constraints = {
68 .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
69 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
70 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
71 .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
72 .type = PM_QOS_MIN,
73 .notifiers = &cpu_dma_lat_notifier,
74 };
75 static struct pm_qos_object cpu_dma_pm_qos = {
76 .constraints = &cpu_dma_constraints,
77 .name = "cpu_dma_latency",
78 };
79
80 static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
81 static struct pm_qos_constraints network_lat_constraints = {
82 .list = PLIST_HEAD_INIT(network_lat_constraints.list),
83 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
84 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
85 .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
86 .type = PM_QOS_MIN,
87 .notifiers = &network_lat_notifier,
88 };
89 static struct pm_qos_object network_lat_pm_qos = {
90 .constraints = &network_lat_constraints,
91 .name = "network_latency",
92 };
93
94
95 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
96 static struct pm_qos_constraints network_tput_constraints = {
97 .list = PLIST_HEAD_INIT(network_tput_constraints.list),
98 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
99 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
100 .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
101 .type = PM_QOS_MAX,
102 .notifiers = &network_throughput_notifier,
103 };
104 static struct pm_qos_object network_throughput_pm_qos = {
105 .constraints = &network_tput_constraints,
106 .name = "network_throughput",
107 };
108
109
110 static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
111 static struct pm_qos_constraints memory_bw_constraints = {
112 .list = PLIST_HEAD_INIT(memory_bw_constraints.list),
113 .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
114 .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
115 .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
116 .type = PM_QOS_SUM,
117 .notifiers = &memory_bandwidth_notifier,
118 };
119 static struct pm_qos_object memory_bandwidth_pm_qos = {
120 .constraints = &memory_bw_constraints,
121 .name = "memory_bandwidth",
122 };
123
124
125 static struct pm_qos_object *pm_qos_array[] = {
126 &null_pm_qos,
127 &cpu_dma_pm_qos,
128 &network_lat_pm_qos,
129 &network_throughput_pm_qos,
130 &memory_bandwidth_pm_qos,
131 };
132
133 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
134 size_t count, loff_t *f_pos);
135 static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
136 size_t count, loff_t *f_pos);
137 static int pm_qos_power_open(struct inode *inode, struct file *filp);
138 static int pm_qos_power_release(struct inode *inode, struct file *filp);
139
140 static const struct file_operations pm_qos_power_fops = {
141 .write = pm_qos_power_write,
142 .read = pm_qos_power_read,
143 .open = pm_qos_power_open,
144 .release = pm_qos_power_release,
145 .llseek = noop_llseek,
146 };
147
148 /* unlocked internal variant */
149 static inline int pm_qos_get_value(struct pm_qos_constraints *c)
150 {
151 struct plist_node *node;
152 int total_value = 0;
153
154 if (plist_head_empty(&c->list))
155 return c->no_constraint_value;
156
157 switch (c->type) {
158 case PM_QOS_MIN:
159 return plist_first(&c->list)->prio;
160
161 case PM_QOS_MAX:
162 return plist_last(&c->list)->prio;
163
164 case PM_QOS_SUM:
165 plist_for_each(node, &c->list)
166 total_value += node->prio;
167
168 return total_value;
169
170 default:
171 /* runtime check for not using enum */
172 BUG();
173 return PM_QOS_DEFAULT_VALUE;
174 }
175 }
176
177 s32 pm_qos_read_value(struct pm_qos_constraints *c)
178 {
179 return c->target_value;
180 }
181
182 static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
183 {
184 c->target_value = value;
185 }
186
187 static inline int pm_qos_get_value(struct pm_qos_constraints *c);
188 static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
189 {
190 struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
191 struct pm_qos_constraints *c;
192 struct pm_qos_request *req;
193 char *type;
194 unsigned long flags;
195 int tot_reqs = 0;
196 int active_reqs = 0;
197
198 if (IS_ERR_OR_NULL(qos)) {
199 pr_err("%s: bad qos param!\n", __func__);
200 return -EINVAL;
201 }
202 c = qos->constraints;
203 if (IS_ERR_OR_NULL(c)) {
204 pr_err("%s: Bad constraints on qos?\n", __func__);
205 return -EINVAL;
206 }
207
208 /* Lock to ensure we have a snapshot */
209 spin_lock_irqsave(&pm_qos_lock, flags);
210 if (plist_head_empty(&c->list)) {
211 seq_puts(s, "Empty!\n");
212 goto out;
213 }
214
215 switch (c->type) {
216 case PM_QOS_MIN:
217 type = "Minimum";
218 break;
219 case PM_QOS_MAX:
220 type = "Maximum";
221 break;
222 case PM_QOS_SUM:
223 type = "Sum";
224 break;
225 default:
226 type = "Unknown";
227 }
228
229 plist_for_each_entry(req, &c->list, node) {
230 char *state = "Default";
231
232 if ((req->node).prio != c->default_value) {
233 active_reqs++;
234 state = "Active";
235 }
236 tot_reqs++;
237 seq_printf(s, "%d: %d: %s\n", tot_reqs,
238 (req->node).prio, state);
239 }
240
241 seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
242 type, pm_qos_get_value(c), active_reqs, tot_reqs);
243
244 out:
245 spin_unlock_irqrestore(&pm_qos_lock, flags);
246 return 0;
247 }
248
249 static int pm_qos_dbg_open(struct inode *inode, struct file *file)
250 {
251 return single_open(file, pm_qos_dbg_show_requests,
252 inode->i_private);
253 }
254
255 static const struct file_operations pm_qos_debug_fops = {
256 .open = pm_qos_dbg_open,
257 .read = seq_read,
258 .llseek = seq_lseek,
259 .release = single_release,
260 };
261
262 /**
263 * pm_qos_update_target - manages the constraints list and calls the notifiers
264 * if needed
265 * @c: constraints data struct
266 * @node: request to add to the list, to update or to remove
267 * @action: action to take on the constraints list
268 * @value: value of the request to add or update
269 *
270 * This function returns 1 if the aggregated constraint value has changed, 0
271 * otherwise.
272 */
273 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
274 enum pm_qos_req_action action, int value)
275 {
276 unsigned long flags;
277 int prev_value, curr_value, new_value;
278 int ret;
279
280 spin_lock_irqsave(&pm_qos_lock, flags);
281 prev_value = pm_qos_get_value(c);
282 if (value == PM_QOS_DEFAULT_VALUE)
283 new_value = c->default_value;
284 else
285 new_value = value;
286
287 switch (action) {
288 case PM_QOS_REMOVE_REQ:
289 plist_del(node, &c->list);
290 break;
291 case PM_QOS_UPDATE_REQ:
292 /*
293 * to change the list, we atomically remove, reinit
294 * with new value and add, then see if the extremal
295 * changed
296 */
297 plist_del(node, &c->list);
298 case PM_QOS_ADD_REQ:
299 plist_node_init(node, new_value);
300 plist_add(node, &c->list);
301 break;
302 default:
303 /* no action */
304 ;
305 }
306
307 curr_value = pm_qos_get_value(c);
308 pm_qos_set_value(c, curr_value);
309
310 spin_unlock_irqrestore(&pm_qos_lock, flags);
311
312 trace_pm_qos_update_target(action, prev_value, curr_value);
313 if (prev_value != curr_value) {
314 ret = 1;
315 if (c->notifiers)
316 blocking_notifier_call_chain(c->notifiers,
317 (unsigned long)curr_value,
318 NULL);
319 } else {
320 ret = 0;
321 }
322 return ret;
323 }
324
325 /**
326 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
327 * @pqf: Device PM QoS flags set to remove the request from.
328 * @req: Request to remove from the set.
329 */
330 static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
331 struct pm_qos_flags_request *req)
332 {
333 s32 val = 0;
334
335 list_del(&req->node);
336 list_for_each_entry(req, &pqf->list, node)
337 val |= req->flags;
338
339 pqf->effective_flags = val;
340 }
341
342 /**
343 * pm_qos_update_flags - Update a set of PM QoS flags.
344 * @pqf: Set of flags to update.
345 * @req: Request to add to the set, to modify, or to remove from the set.
346 * @action: Action to take on the set.
347 * @val: Value of the request to add or modify.
348 *
349 * Update the given set of PM QoS flags and call notifiers if the aggregate
350 * value has changed. Returns 1 if the aggregate constraint value has changed,
351 * 0 otherwise.
352 */
353 bool pm_qos_update_flags(struct pm_qos_flags *pqf,
354 struct pm_qos_flags_request *req,
355 enum pm_qos_req_action action, s32 val)
356 {
357 unsigned long irqflags;
358 s32 prev_value, curr_value;
359
360 spin_lock_irqsave(&pm_qos_lock, irqflags);
361
362 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
363
364 switch (action) {
365 case PM_QOS_REMOVE_REQ:
366 pm_qos_flags_remove_req(pqf, req);
367 break;
368 case PM_QOS_UPDATE_REQ:
369 pm_qos_flags_remove_req(pqf, req);
370 case PM_QOS_ADD_REQ:
371 req->flags = val;
372 INIT_LIST_HEAD(&req->node);
373 list_add_tail(&req->node, &pqf->list);
374 pqf->effective_flags |= val;
375 break;
376 default:
377 /* no action */
378 ;
379 }
380
381 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
382
383 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
384
385 trace_pm_qos_update_flags(action, prev_value, curr_value);
386 return prev_value != curr_value;
387 }
388
389 /**
390 * pm_qos_request - returns current system wide qos expectation
391 * @pm_qos_class: identification of which qos value is requested
392 *
393 * This function returns the current target value.
394 */
395 int pm_qos_request(int pm_qos_class)
396 {
397 return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
398 }
399 EXPORT_SYMBOL_GPL(pm_qos_request);
400
401 int pm_qos_request_active(struct pm_qos_request *req)
402 {
403 return req->pm_qos_class != 0;
404 }
405 EXPORT_SYMBOL_GPL(pm_qos_request_active);
406
407 static void __pm_qos_update_request(struct pm_qos_request *req,
408 s32 new_value)
409 {
410 trace_pm_qos_update_request(req->pm_qos_class, new_value);
411
412 if (new_value != req->node.prio)
413 pm_qos_update_target(
414 pm_qos_array[req->pm_qos_class]->constraints,
415 &req->node, PM_QOS_UPDATE_REQ, new_value);
416 }
417
418 /**
419 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
420 * @work: work struct for the delayed work (timeout)
421 *
422 * This cancels the timeout request by falling back to the default at timeout.
423 */
424 static void pm_qos_work_fn(struct work_struct *work)
425 {
426 struct pm_qos_request *req = container_of(to_delayed_work(work),
427 struct pm_qos_request,
428 work);
429
430 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
431 }
432
433 /**
434 * pm_qos_add_request - inserts new qos request into the list
435 * @req: pointer to a preallocated handle
436 * @pm_qos_class: identifies which list of qos request to use
437 * @value: defines the qos request
438 *
439 * This function inserts a new entry in the pm_qos_class list of requested qos
440 * performance characteristics. It recomputes the aggregate QoS expectations
441 * for the pm_qos_class of parameters and initializes the pm_qos_request
442 * handle. Caller needs to save this handle for later use in updates and
443 * removal.
444 */
445
446 void pm_qos_add_request(struct pm_qos_request *req,
447 int pm_qos_class, s32 value)
448 {
449 if (!req) /*guard against callers passing in null */
450 return;
451
452 if (pm_qos_request_active(req)) {
453 WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
454 return;
455 }
456 req->pm_qos_class = pm_qos_class;
457 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
458 trace_pm_qos_add_request(pm_qos_class, value);
459 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
460 &req->node, PM_QOS_ADD_REQ, value);
461 }
462 EXPORT_SYMBOL_GPL(pm_qos_add_request);
463
464 /**
465 * pm_qos_update_request - modifies an existing qos request
466 * @req : handle to list element holding a pm_qos request to use
467 * @value: defines the qos request
468 *
469 * Updates an existing qos request for the pm_qos_class of parameters along
470 * with updating the target pm_qos_class value.
471 *
472 * Attempts are made to make this code callable on hot code paths.
473 */
474 void pm_qos_update_request(struct pm_qos_request *req,
475 s32 new_value)
476 {
477 if (!req) /*guard against callers passing in null */
478 return;
479
480 if (!pm_qos_request_active(req)) {
481 WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
482 return;
483 }
484
485 cancel_delayed_work_sync(&req->work);
486 __pm_qos_update_request(req, new_value);
487 }
488 EXPORT_SYMBOL_GPL(pm_qos_update_request);
489
490 /**
491 * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
492 * @req : handle to list element holding a pm_qos request to use
493 * @new_value: defines the temporal qos request
494 * @timeout_us: the effective duration of this qos request in usecs.
495 *
496 * After timeout_us, this qos request is cancelled automatically.
497 */
498 void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
499 unsigned long timeout_us)
500 {
501 if (!req)
502 return;
503 if (WARN(!pm_qos_request_active(req),
504 "%s called for unknown object.", __func__))
505 return;
506
507 cancel_delayed_work_sync(&req->work);
508
509 trace_pm_qos_update_request_timeout(req->pm_qos_class,
510 new_value, timeout_us);
511 if (new_value != req->node.prio)
512 pm_qos_update_target(
513 pm_qos_array[req->pm_qos_class]->constraints,
514 &req->node, PM_QOS_UPDATE_REQ, new_value);
515
516 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
517 }
518
519 /**
520 * pm_qos_remove_request - modifies an existing qos request
521 * @req: handle to request list element
522 *
523 * Will remove pm qos request from the list of constraints and
524 * recompute the current target value for the pm_qos_class. Call this
525 * on slow code paths.
526 */
527 void pm_qos_remove_request(struct pm_qos_request *req)
528 {
529 if (!req) /*guard against callers passing in null */
530 return;
531 /* silent return to keep pcm code cleaner */
532
533 if (!pm_qos_request_active(req)) {
534 WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
535 return;
536 }
537
538 cancel_delayed_work_sync(&req->work);
539
540 trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
541 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
542 &req->node, PM_QOS_REMOVE_REQ,
543 PM_QOS_DEFAULT_VALUE);
544 memset(req, 0, sizeof(*req));
545 }
546 EXPORT_SYMBOL_GPL(pm_qos_remove_request);
547
548 /**
549 * pm_qos_add_notifier - sets notification entry for changes to target value
550 * @pm_qos_class: identifies which qos target changes should be notified.
551 * @notifier: notifier block managed by caller.
552 *
553 * will register the notifier into a notification chain that gets called
554 * upon changes to the pm_qos_class target value.
555 */
556 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
557 {
558 int retval;
559
560 retval = blocking_notifier_chain_register(
561 pm_qos_array[pm_qos_class]->constraints->notifiers,
562 notifier);
563
564 return retval;
565 }
566 EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
567
568 /**
569 * pm_qos_remove_notifier - deletes notification entry from chain.
570 * @pm_qos_class: identifies which qos target changes are notified.
571 * @notifier: notifier block to be removed.
572 *
573 * will remove the notifier from the notification chain that gets called
574 * upon changes to the pm_qos_class target value.
575 */
576 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
577 {
578 int retval;
579
580 retval = blocking_notifier_chain_unregister(
581 pm_qos_array[pm_qos_class]->constraints->notifiers,
582 notifier);
583
584 return retval;
585 }
586 EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
587
588 /* User space interface to PM QoS classes via misc devices */
589 static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
590 {
591 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
592 qos->pm_qos_power_miscdev.name = qos->name;
593 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
594
595 if (d) {
596 (void)debugfs_create_file(qos->name, S_IRUGO, d,
597 (void *)qos, &pm_qos_debug_fops);
598 }
599
600 return misc_register(&qos->pm_qos_power_miscdev);
601 }
602
603 static int find_pm_qos_object_by_minor(int minor)
604 {
605 int pm_qos_class;
606
607 for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
608 pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
609 if (minor ==
610 pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
611 return pm_qos_class;
612 }
613 return -1;
614 }
615
616 static int pm_qos_power_open(struct inode *inode, struct file *filp)
617 {
618 long pm_qos_class;
619
620 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
621 if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
622 struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
623 if (!req)
624 return -ENOMEM;
625
626 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
627 filp->private_data = req;
628
629 return 0;
630 }
631 return -EPERM;
632 }
633
634 static int pm_qos_power_release(struct inode *inode, struct file *filp)
635 {
636 struct pm_qos_request *req;
637
638 req = filp->private_data;
639 pm_qos_remove_request(req);
640 kfree(req);
641
642 return 0;
643 }
644
645
646 static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
647 size_t count, loff_t *f_pos)
648 {
649 s32 value;
650 unsigned long flags;
651 struct pm_qos_request *req = filp->private_data;
652
653 if (!req)
654 return -EINVAL;
655 if (!pm_qos_request_active(req))
656 return -EINVAL;
657
658 spin_lock_irqsave(&pm_qos_lock, flags);
659 value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
660 spin_unlock_irqrestore(&pm_qos_lock, flags);
661
662 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
663 }
664
665 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
666 size_t count, loff_t *f_pos)
667 {
668 s32 value;
669 struct pm_qos_request *req;
670
671 if (count == sizeof(s32)) {
672 if (copy_from_user(&value, buf, sizeof(s32)))
673 return -EFAULT;
674 } else {
675 int ret;
676
677 ret = kstrtos32_from_user(buf, count, 16, &value);
678 if (ret)
679 return ret;
680 }
681
682 req = filp->private_data;
683 pm_qos_update_request(req, value);
684
685 return count;
686 }
687
688
689 static int __init pm_qos_power_init(void)
690 {
691 int ret = 0;
692 int i;
693 struct dentry *d;
694
695 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
696
697 d = debugfs_create_dir("pm_qos", NULL);
698 if (IS_ERR_OR_NULL(d))
699 d = NULL;
700
701 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
702 ret = register_pm_qos_misc(pm_qos_array[i], d);
703 if (ret < 0) {
704 printk(KERN_ERR "pm_qos_param: %s setup failed\n",
705 pm_qos_array[i]->name);
706 return ret;
707 }
708 }
709
710 return ret;
711 }
712
713 late_initcall(pm_qos_power_init);