]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - include/linux/clk.h
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / include / linux / clk.h
1 /*
2 * linux/include/linux/clk.h
3 *
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
6 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #ifndef __LINUX_CLK_H
13 #define __LINUX_CLK_H
14
15 #include <linux/err.h>
16 #include <linux/kernel.h>
17 #include <linux/notifier.h>
18
19 struct device;
20 struct clk;
21 struct device_node;
22 struct of_phandle_args;
23
24 /**
25 * DOC: clk notifier callback types
26 *
27 * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
28 * to indicate that the rate change will proceed. Drivers must
29 * immediately terminate any operations that will be affected by the
30 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
31 * NOTIFY_STOP or NOTIFY_BAD.
32 *
33 * ABORT_RATE_CHANGE: called if the rate change failed for some reason
34 * after PRE_RATE_CHANGE. In this case, all registered notifiers on
35 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must
36 * always return NOTIFY_DONE or NOTIFY_OK.
37 *
38 * POST_RATE_CHANGE - called after the clk rate change has successfully
39 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
40 *
41 */
42 #define PRE_RATE_CHANGE BIT(0)
43 #define POST_RATE_CHANGE BIT(1)
44 #define ABORT_RATE_CHANGE BIT(2)
45
46 /**
47 * struct clk_notifier - associate a clk with a notifier
48 * @clk: struct clk * to associate the notifier with
49 * @notifier_head: a blocking_notifier_head for this clk
50 * @node: linked list pointers
51 *
52 * A list of struct clk_notifier is maintained by the notifier code.
53 * An entry is created whenever code registers the first notifier on a
54 * particular @clk. Future notifiers on that @clk are added to the
55 * @notifier_head.
56 */
57 struct clk_notifier {
58 struct clk *clk;
59 struct srcu_notifier_head notifier_head;
60 struct list_head node;
61 };
62
63 /**
64 * struct clk_notifier_data - rate data to pass to the notifier callback
65 * @clk: struct clk * being changed
66 * @old_rate: previous rate of this clk
67 * @new_rate: new rate of this clk
68 *
69 * For a pre-notifier, old_rate is the clk's rate before this rate
70 * change, and new_rate is what the rate will be in the future. For a
71 * post-notifier, old_rate and new_rate are both set to the clk's
72 * current rate (this was done to optimize the implementation).
73 */
74 struct clk_notifier_data {
75 struct clk *clk;
76 unsigned long old_rate;
77 unsigned long new_rate;
78 };
79
80 /**
81 * struct clk_bulk_data - Data used for bulk clk operations.
82 *
83 * @id: clock consumer ID
84 * @clk: struct clk * to store the associated clock
85 *
86 * The CLK APIs provide a series of clk_bulk_() API calls as
87 * a convenience to consumers which require multiple clks. This
88 * structure is used to manage data for these calls.
89 */
90 struct clk_bulk_data {
91 const char *id;
92 struct clk *clk;
93 };
94
95 #ifdef CONFIG_COMMON_CLK
96
97 /**
98 * clk_notifier_register: register a clock rate-change notifier callback
99 * @clk: clock whose rate we are interested in
100 * @nb: notifier block with callback function pointer
101 *
102 * ProTip: debugging across notifier chains can be frustrating. Make sure that
103 * your notifier callback function prints a nice big warning in case of
104 * failure.
105 */
106 int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
107
108 /**
109 * clk_notifier_unregister: unregister a clock rate-change notifier callback
110 * @clk: clock whose rate we are no longer interested in
111 * @nb: notifier block which will be unregistered
112 */
113 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
114
115 /**
116 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
117 * for a clock source.
118 * @clk: clock source
119 *
120 * This gets the clock source accuracy expressed in ppb.
121 * A perfect clock returns 0.
122 */
123 long clk_get_accuracy(struct clk *clk);
124
125 /**
126 * clk_set_phase - adjust the phase shift of a clock signal
127 * @clk: clock signal source
128 * @degrees: number of degrees the signal is shifted
129 *
130 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
131 * success, -EERROR otherwise.
132 */
133 int clk_set_phase(struct clk *clk, int degrees);
134
135 /**
136 * clk_get_phase - return the phase shift of a clock signal
137 * @clk: clock signal source
138 *
139 * Returns the phase shift of a clock node in degrees, otherwise returns
140 * -EERROR.
141 */
142 int clk_get_phase(struct clk *clk);
143
144 /**
145 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
146 * @clk: clock signal source
147 * @num: numerator of the duty cycle ratio to be applied
148 * @den: denominator of the duty cycle ratio to be applied
149 *
150 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
151 * success, -EERROR otherwise.
152 */
153 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
154
155 /**
156 * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
157 * @clk: clock signal source
158 * @scale: scaling factor to be applied to represent the ratio as an integer
159 *
160 * Returns the duty cycle ratio multiplied by the scale provided, otherwise
161 * returns -EERROR.
162 */
163 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
164
165 /**
166 * clk_is_match - check if two clk's point to the same hardware clock
167 * @p: clk compared against q
168 * @q: clk compared against p
169 *
170 * Returns true if the two struct clk pointers both point to the same hardware
171 * clock node. Put differently, returns true if @p and @q
172 * share the same &struct clk_core object.
173 *
174 * Returns false otherwise. Note that two NULL clks are treated as matching.
175 */
176 bool clk_is_match(const struct clk *p, const struct clk *q);
177
178 #else
179
180 static inline int clk_notifier_register(struct clk *clk,
181 struct notifier_block *nb)
182 {
183 return -ENOTSUPP;
184 }
185
186 static inline int clk_notifier_unregister(struct clk *clk,
187 struct notifier_block *nb)
188 {
189 return -ENOTSUPP;
190 }
191
192 static inline long clk_get_accuracy(struct clk *clk)
193 {
194 return -ENOTSUPP;
195 }
196
197 static inline long clk_set_phase(struct clk *clk, int phase)
198 {
199 return -ENOTSUPP;
200 }
201
202 static inline long clk_get_phase(struct clk *clk)
203 {
204 return -ENOTSUPP;
205 }
206
207 static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
208 unsigned int den)
209 {
210 return -ENOTSUPP;
211 }
212
213 static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
214 unsigned int scale)
215 {
216 return 0;
217 }
218
219 static inline bool clk_is_match(const struct clk *p, const struct clk *q)
220 {
221 return p == q;
222 }
223
224 #endif
225
226 /**
227 * clk_prepare - prepare a clock source
228 * @clk: clock source
229 *
230 * This prepares the clock source for use.
231 *
232 * Must not be called from within atomic context.
233 */
234 #ifdef CONFIG_HAVE_CLK_PREPARE
235 int clk_prepare(struct clk *clk);
236 int __must_check clk_bulk_prepare(int num_clks,
237 const struct clk_bulk_data *clks);
238 #else
239 static inline int clk_prepare(struct clk *clk)
240 {
241 might_sleep();
242 return 0;
243 }
244
245 static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
246 {
247 might_sleep();
248 return 0;
249 }
250 #endif
251
252 /**
253 * clk_unprepare - undo preparation of a clock source
254 * @clk: clock source
255 *
256 * This undoes a previously prepared clock. The caller must balance
257 * the number of prepare and unprepare calls.
258 *
259 * Must not be called from within atomic context.
260 */
261 #ifdef CONFIG_HAVE_CLK_PREPARE
262 void clk_unprepare(struct clk *clk);
263 void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
264 #else
265 static inline void clk_unprepare(struct clk *clk)
266 {
267 might_sleep();
268 }
269 static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks)
270 {
271 might_sleep();
272 }
273 #endif
274
275 #ifdef CONFIG_HAVE_CLK
276 /**
277 * clk_get - lookup and obtain a reference to a clock producer.
278 * @dev: device for clock "consumer"
279 * @id: clock consumer ID
280 *
281 * Returns a struct clk corresponding to the clock producer, or
282 * valid IS_ERR() condition containing errno. The implementation
283 * uses @dev and @id to determine the clock consumer, and thereby
284 * the clock producer. (IOW, @id may be identical strings, but
285 * clk_get may return different clock producers depending on @dev.)
286 *
287 * Drivers must assume that the clock source is not enabled.
288 *
289 * clk_get should not be called from within interrupt context.
290 */
291 struct clk *clk_get(struct device *dev, const char *id);
292
293 /**
294 * clk_bulk_get - lookup and obtain a number of references to clock producer.
295 * @dev: device for clock "consumer"
296 * @num_clks: the number of clk_bulk_data
297 * @clks: the clk_bulk_data table of consumer
298 *
299 * This helper function allows drivers to get several clk consumers in one
300 * operation. If any of the clk cannot be acquired then any clks
301 * that were obtained will be freed before returning to the caller.
302 *
303 * Returns 0 if all clocks specified in clk_bulk_data table are obtained
304 * successfully, or valid IS_ERR() condition containing errno.
305 * The implementation uses @dev and @clk_bulk_data.id to determine the
306 * clock consumer, and thereby the clock producer.
307 * The clock returned is stored in each @clk_bulk_data.clk field.
308 *
309 * Drivers must assume that the clock source is not enabled.
310 *
311 * clk_bulk_get should not be called from within interrupt context.
312 */
313 int __must_check clk_bulk_get(struct device *dev, int num_clks,
314 struct clk_bulk_data *clks);
315 /**
316 * clk_bulk_get_all - lookup and obtain all available references to clock
317 * producer.
318 * @dev: device for clock "consumer"
319 * @clks: pointer to the clk_bulk_data table of consumer
320 *
321 * This helper function allows drivers to get all clk consumers in one
322 * operation. If any of the clk cannot be acquired then any clks
323 * that were obtained will be freed before returning to the caller.
324 *
325 * Returns a positive value for the number of clocks obtained while the
326 * clock references are stored in the clk_bulk_data table in @clks field.
327 * Returns 0 if there're none and a negative value if something failed.
328 *
329 * Drivers must assume that the clock source is not enabled.
330 *
331 * clk_bulk_get should not be called from within interrupt context.
332 */
333 int __must_check clk_bulk_get_all(struct device *dev,
334 struct clk_bulk_data **clks);
335 /**
336 * devm_clk_bulk_get - managed get multiple clk consumers
337 * @dev: device for clock "consumer"
338 * @num_clks: the number of clk_bulk_data
339 * @clks: the clk_bulk_data table of consumer
340 *
341 * Return 0 on success, an errno on failure.
342 *
343 * This helper function allows drivers to get several clk
344 * consumers in one operation with management, the clks will
345 * automatically be freed when the device is unbound.
346 */
347 int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
348 struct clk_bulk_data *clks);
349 /**
350 * devm_clk_bulk_get_all - managed get multiple clk consumers
351 * @dev: device for clock "consumer"
352 * @clks: pointer to the clk_bulk_data table of consumer
353 *
354 * Returns a positive value for the number of clocks obtained while the
355 * clock references are stored in the clk_bulk_data table in @clks field.
356 * Returns 0 if there're none and a negative value if something failed.
357 *
358 * This helper function allows drivers to get several clk
359 * consumers in one operation with management, the clks will
360 * automatically be freed when the device is unbound.
361 */
362
363 int __must_check devm_clk_bulk_get_all(struct device *dev,
364 struct clk_bulk_data **clks);
365
366 /**
367 * devm_clk_get - lookup and obtain a managed reference to a clock producer.
368 * @dev: device for clock "consumer"
369 * @id: clock consumer ID
370 *
371 * Returns a struct clk corresponding to the clock producer, or
372 * valid IS_ERR() condition containing errno. The implementation
373 * uses @dev and @id to determine the clock consumer, and thereby
374 * the clock producer. (IOW, @id may be identical strings, but
375 * clk_get may return different clock producers depending on @dev.)
376 *
377 * Drivers must assume that the clock source is not enabled.
378 *
379 * devm_clk_get should not be called from within interrupt context.
380 *
381 * The clock will automatically be freed when the device is unbound
382 * from the bus.
383 */
384 struct clk *devm_clk_get(struct device *dev, const char *id);
385
386 /**
387 * devm_clk_get_optional - lookup and obtain a managed reference to an optional
388 * clock producer.
389 * @dev: device for clock "consumer"
390 * @id: clock consumer ID
391 *
392 * Behaves the same as devm_clk_get() except where there is no clock producer.
393 * In this case, instead of returning -ENOENT, the function returns NULL.
394 */
395 struct clk *devm_clk_get_optional(struct device *dev, const char *id);
396
397 /**
398 * devm_get_clk_from_child - lookup and obtain a managed reference to a
399 * clock producer from child node.
400 * @dev: device for clock "consumer"
401 * @np: pointer to clock consumer node
402 * @con_id: clock consumer ID
403 *
404 * This function parses the clocks, and uses them to look up the
405 * struct clk from the registered list of clock providers by using
406 * @np and @con_id
407 *
408 * The clock will automatically be freed when the device is unbound
409 * from the bus.
410 */
411 struct clk *devm_get_clk_from_child(struct device *dev,
412 struct device_node *np, const char *con_id);
413 /**
414 * clk_rate_exclusive_get - get exclusivity over the rate control of a
415 * producer
416 * @clk: clock source
417 *
418 * This function allows drivers to get exclusive control over the rate of a
419 * provider. It prevents any other consumer to execute, even indirectly,
420 * opereation which could alter the rate of the provider or cause glitches
421 *
422 * If exlusivity is claimed more than once on clock, even by the same driver,
423 * the rate effectively gets locked as exclusivity can't be preempted.
424 *
425 * Must not be called from within atomic context.
426 *
427 * Returns success (0) or negative errno.
428 */
429 int clk_rate_exclusive_get(struct clk *clk);
430
431 /**
432 * clk_rate_exclusive_put - release exclusivity over the rate control of a
433 * producer
434 * @clk: clock source
435 *
436 * This function allows drivers to release the exclusivity it previously got
437 * from clk_rate_exclusive_get()
438 *
439 * The caller must balance the number of clk_rate_exclusive_get() and
440 * clk_rate_exclusive_put() calls.
441 *
442 * Must not be called from within atomic context.
443 */
444 void clk_rate_exclusive_put(struct clk *clk);
445
446 /**
447 * clk_enable - inform the system when the clock source should be running.
448 * @clk: clock source
449 *
450 * If the clock can not be enabled/disabled, this should return success.
451 *
452 * May be called from atomic contexts.
453 *
454 * Returns success (0) or negative errno.
455 */
456 int clk_enable(struct clk *clk);
457
458 /**
459 * clk_bulk_enable - inform the system when the set of clks should be running.
460 * @num_clks: the number of clk_bulk_data
461 * @clks: the clk_bulk_data table of consumer
462 *
463 * May be called from atomic contexts.
464 *
465 * Returns success (0) or negative errno.
466 */
467 int __must_check clk_bulk_enable(int num_clks,
468 const struct clk_bulk_data *clks);
469
470 /**
471 * clk_disable - inform the system when the clock source is no longer required.
472 * @clk: clock source
473 *
474 * Inform the system that a clock source is no longer required by
475 * a driver and may be shut down.
476 *
477 * May be called from atomic contexts.
478 *
479 * Implementation detail: if the clock source is shared between
480 * multiple drivers, clk_enable() calls must be balanced by the
481 * same number of clk_disable() calls for the clock source to be
482 * disabled.
483 */
484 void clk_disable(struct clk *clk);
485
486 /**
487 * clk_bulk_disable - inform the system when the set of clks is no
488 * longer required.
489 * @num_clks: the number of clk_bulk_data
490 * @clks: the clk_bulk_data table of consumer
491 *
492 * Inform the system that a set of clks is no longer required by
493 * a driver and may be shut down.
494 *
495 * May be called from atomic contexts.
496 *
497 * Implementation detail: if the set of clks is shared between
498 * multiple drivers, clk_bulk_enable() calls must be balanced by the
499 * same number of clk_bulk_disable() calls for the clock source to be
500 * disabled.
501 */
502 void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks);
503
504 /**
505 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
506 * This is only valid once the clock source has been enabled.
507 * @clk: clock source
508 */
509 unsigned long clk_get_rate(struct clk *clk);
510
511 /**
512 * clk_put - "free" the clock source
513 * @clk: clock source
514 *
515 * Note: drivers must ensure that all clk_enable calls made on this
516 * clock source are balanced by clk_disable calls prior to calling
517 * this function.
518 *
519 * clk_put should not be called from within interrupt context.
520 */
521 void clk_put(struct clk *clk);
522
523 /**
524 * clk_bulk_put - "free" the clock source
525 * @num_clks: the number of clk_bulk_data
526 * @clks: the clk_bulk_data table of consumer
527 *
528 * Note: drivers must ensure that all clk_bulk_enable calls made on this
529 * clock source are balanced by clk_bulk_disable calls prior to calling
530 * this function.
531 *
532 * clk_bulk_put should not be called from within interrupt context.
533 */
534 void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
535
536 /**
537 * clk_bulk_put_all - "free" all the clock source
538 * @num_clks: the number of clk_bulk_data
539 * @clks: the clk_bulk_data table of consumer
540 *
541 * Note: drivers must ensure that all clk_bulk_enable calls made on this
542 * clock source are balanced by clk_bulk_disable calls prior to calling
543 * this function.
544 *
545 * clk_bulk_put_all should not be called from within interrupt context.
546 */
547 void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
548
549 /**
550 * devm_clk_put - "free" a managed clock source
551 * @dev: device used to acquire the clock
552 * @clk: clock source acquired with devm_clk_get()
553 *
554 * Note: drivers must ensure that all clk_enable calls made on this
555 * clock source are balanced by clk_disable calls prior to calling
556 * this function.
557 *
558 * clk_put should not be called from within interrupt context.
559 */
560 void devm_clk_put(struct device *dev, struct clk *clk);
561
562 /*
563 * The remaining APIs are optional for machine class support.
564 */
565
566
567 /**
568 * clk_round_rate - adjust a rate to the exact rate a clock can provide
569 * @clk: clock source
570 * @rate: desired clock rate in Hz
571 *
572 * This answers the question "if I were to pass @rate to clk_set_rate(),
573 * what clock rate would I end up with?" without changing the hardware
574 * in any way. In other words:
575 *
576 * rate = clk_round_rate(clk, r);
577 *
578 * and:
579 *
580 * clk_set_rate(clk, r);
581 * rate = clk_get_rate(clk);
582 *
583 * are equivalent except the former does not modify the clock hardware
584 * in any way.
585 *
586 * Returns rounded clock rate in Hz, or negative errno.
587 */
588 long clk_round_rate(struct clk *clk, unsigned long rate);
589
590 /**
591 * clk_set_rate - set the clock rate for a clock source
592 * @clk: clock source
593 * @rate: desired clock rate in Hz
594 *
595 * Returns success (0) or negative errno.
596 */
597 int clk_set_rate(struct clk *clk, unsigned long rate);
598
599 /**
600 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
601 * clock source
602 * @clk: clock source
603 * @rate: desired clock rate in Hz
604 *
605 * This helper function allows drivers to atomically set the rate of a producer
606 * and claim exclusivity over the rate control of the producer.
607 *
608 * It is essentially a combination of clk_set_rate() and
609 * clk_rate_exclusite_get(). Caller must balance this call with a call to
610 * clk_rate_exclusive_put()
611 *
612 * Returns success (0) or negative errno.
613 */
614 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
615
616 /**
617 * clk_has_parent - check if a clock is a possible parent for another
618 * @clk: clock source
619 * @parent: parent clock source
620 *
621 * This function can be used in drivers that need to check that a clock can be
622 * the parent of another without actually changing the parent.
623 *
624 * Returns true if @parent is a possible parent for @clk, false otherwise.
625 */
626 bool clk_has_parent(struct clk *clk, struct clk *parent);
627
628 /**
629 * clk_set_rate_range - set a rate range for a clock source
630 * @clk: clock source
631 * @min: desired minimum clock rate in Hz, inclusive
632 * @max: desired maximum clock rate in Hz, inclusive
633 *
634 * Returns success (0) or negative errno.
635 */
636 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
637
638 /**
639 * clk_set_min_rate - set a minimum clock rate for a clock source
640 * @clk: clock source
641 * @rate: desired minimum clock rate in Hz, inclusive
642 *
643 * Returns success (0) or negative errno.
644 */
645 int clk_set_min_rate(struct clk *clk, unsigned long rate);
646
647 /**
648 * clk_set_max_rate - set a maximum clock rate for a clock source
649 * @clk: clock source
650 * @rate: desired maximum clock rate in Hz, inclusive
651 *
652 * Returns success (0) or negative errno.
653 */
654 int clk_set_max_rate(struct clk *clk, unsigned long rate);
655
656 /**
657 * clk_set_parent - set the parent clock source for this clock
658 * @clk: clock source
659 * @parent: parent clock source
660 *
661 * Returns success (0) or negative errno.
662 */
663 int clk_set_parent(struct clk *clk, struct clk *parent);
664
665 /**
666 * clk_get_parent - get the parent clock source for this clock
667 * @clk: clock source
668 *
669 * Returns struct clk corresponding to parent clock source, or
670 * valid IS_ERR() condition containing errno.
671 */
672 struct clk *clk_get_parent(struct clk *clk);
673
674 /**
675 * clk_get_sys - get a clock based upon the device name
676 * @dev_id: device name
677 * @con_id: connection ID
678 *
679 * Returns a struct clk corresponding to the clock producer, or
680 * valid IS_ERR() condition containing errno. The implementation
681 * uses @dev_id and @con_id to determine the clock consumer, and
682 * thereby the clock producer. In contrast to clk_get() this function
683 * takes the device name instead of the device itself for identification.
684 *
685 * Drivers must assume that the clock source is not enabled.
686 *
687 * clk_get_sys should not be called from within interrupt context.
688 */
689 struct clk *clk_get_sys(const char *dev_id, const char *con_id);
690
691 /**
692 * clk_save_context - save clock context for poweroff
693 *
694 * Saves the context of the clock register for powerstates in which the
695 * contents of the registers will be lost. Occurs deep within the suspend
696 * code so locking is not necessary.
697 */
698 int clk_save_context(void);
699
700 /**
701 * clk_restore_context - restore clock context after poweroff
702 *
703 * This occurs with all clocks enabled. Occurs deep within the resume code
704 * so locking is not necessary.
705 */
706 void clk_restore_context(void);
707
708 #else /* !CONFIG_HAVE_CLK */
709
710 static inline struct clk *clk_get(struct device *dev, const char *id)
711 {
712 return NULL;
713 }
714
715 static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
716 struct clk_bulk_data *clks)
717 {
718 return 0;
719 }
720
721 static inline int __must_check clk_bulk_get_all(struct device *dev,
722 struct clk_bulk_data **clks)
723 {
724 return 0;
725 }
726
727 static inline struct clk *devm_clk_get(struct device *dev, const char *id)
728 {
729 return NULL;
730 }
731
732 static inline struct clk *devm_clk_get_optional(struct device *dev,
733 const char *id)
734 {
735 return NULL;
736 }
737
738 static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
739 struct clk_bulk_data *clks)
740 {
741 return 0;
742 }
743
744 static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
745 struct clk_bulk_data **clks)
746 {
747
748 return 0;
749 }
750
751 static inline struct clk *devm_get_clk_from_child(struct device *dev,
752 struct device_node *np, const char *con_id)
753 {
754 return NULL;
755 }
756
757 static inline void clk_put(struct clk *clk) {}
758
759 static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
760
761 static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
762
763 static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
764
765
766 static inline int clk_rate_exclusive_get(struct clk *clk)
767 {
768 return 0;
769 }
770
771 static inline void clk_rate_exclusive_put(struct clk *clk) {}
772
773 static inline int clk_enable(struct clk *clk)
774 {
775 return 0;
776 }
777
778 static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
779 {
780 return 0;
781 }
782
783 static inline void clk_disable(struct clk *clk) {}
784
785
786 static inline void clk_bulk_disable(int num_clks,
787 struct clk_bulk_data *clks) {}
788
789 static inline unsigned long clk_get_rate(struct clk *clk)
790 {
791 return 0;
792 }
793
794 static inline int clk_set_rate(struct clk *clk, unsigned long rate)
795 {
796 return 0;
797 }
798
799 static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
800 {
801 return 0;
802 }
803
804 static inline long clk_round_rate(struct clk *clk, unsigned long rate)
805 {
806 return 0;
807 }
808
809 static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
810 {
811 return true;
812 }
813
814 static inline int clk_set_parent(struct clk *clk, struct clk *parent)
815 {
816 return 0;
817 }
818
819 static inline struct clk *clk_get_parent(struct clk *clk)
820 {
821 return NULL;
822 }
823
824 static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
825 {
826 return NULL;
827 }
828
829 static inline int clk_save_context(void)
830 {
831 return 0;
832 }
833
834 static inline void clk_restore_context(void) {}
835
836 #endif
837
838 /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
839 static inline int clk_prepare_enable(struct clk *clk)
840 {
841 int ret;
842
843 ret = clk_prepare(clk);
844 if (ret)
845 return ret;
846 ret = clk_enable(clk);
847 if (ret)
848 clk_unprepare(clk);
849
850 return ret;
851 }
852
853 /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
854 static inline void clk_disable_unprepare(struct clk *clk)
855 {
856 clk_disable(clk);
857 clk_unprepare(clk);
858 }
859
860 static inline int __must_check clk_bulk_prepare_enable(int num_clks,
861 struct clk_bulk_data *clks)
862 {
863 int ret;
864
865 ret = clk_bulk_prepare(num_clks, clks);
866 if (ret)
867 return ret;
868 ret = clk_bulk_enable(num_clks, clks);
869 if (ret)
870 clk_bulk_unprepare(num_clks, clks);
871
872 return ret;
873 }
874
875 static inline void clk_bulk_disable_unprepare(int num_clks,
876 struct clk_bulk_data *clks)
877 {
878 clk_bulk_disable(num_clks, clks);
879 clk_bulk_unprepare(num_clks, clks);
880 }
881
882 /**
883 * clk_get_optional - lookup and obtain a reference to an optional clock
884 * producer.
885 * @dev: device for clock "consumer"
886 * @id: clock consumer ID
887 *
888 * Behaves the same as clk_get() except where there is no clock producer. In
889 * this case, instead of returning -ENOENT, the function returns NULL.
890 */
891 static inline struct clk *clk_get_optional(struct device *dev, const char *id)
892 {
893 struct clk *clk = clk_get(dev, id);
894
895 if (clk == ERR_PTR(-ENOENT))
896 return NULL;
897
898 return clk;
899 }
900
901 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
902 struct clk *of_clk_get(struct device_node *np, int index);
903 struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
904 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
905 #else
906 static inline struct clk *of_clk_get(struct device_node *np, int index)
907 {
908 return ERR_PTR(-ENOENT);
909 }
910 static inline struct clk *of_clk_get_by_name(struct device_node *np,
911 const char *name)
912 {
913 return ERR_PTR(-ENOENT);
914 }
915 static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
916 {
917 return ERR_PTR(-ENOENT);
918 }
919 #endif
920
921 #endif