]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/clk.h
clk: Add clk_bulk_get_optional() function
[mirror_ubuntu-jammy-kernel.git] / include / linux / clk.h
CommitLineData
1da177e4 1/*
f8ce2547 2 * linux/include/linux/clk.h
1da177e4
LT
3 *
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
b2476490 6 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
686f8c5d
TP
12#ifndef __LINUX_CLK_H
13#define __LINUX_CLK_H
1da177e4 14
9f1612d3 15#include <linux/err.h>
40d3e0f4 16#include <linux/kernel.h>
b2476490 17#include <linux/notifier.h>
40d3e0f4 18
1da177e4 19struct device;
b2476490 20struct clk;
71a2f115
KM
21struct device_node;
22struct of_phandle_args;
b2476490 23
b2476490
MT
24/**
25 * DOC: clk notifier callback types
26 *
27 * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
28 * to indicate that the rate change will proceed. Drivers must
29 * immediately terminate any operations that will be affected by the
fb72a059
SB
30 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
31 * NOTIFY_STOP or NOTIFY_BAD.
b2476490
MT
32 *
33 * ABORT_RATE_CHANGE: called if the rate change failed for some reason
34 * after PRE_RATE_CHANGE. In this case, all registered notifiers on
35 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must
fb72a059 36 * always return NOTIFY_DONE or NOTIFY_OK.
b2476490
MT
37 *
38 * POST_RATE_CHANGE - called after the clk rate change has successfully
fb72a059 39 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
b2476490 40 *
1da177e4 41 */
b2476490
MT
42#define PRE_RATE_CHANGE BIT(0)
43#define POST_RATE_CHANGE BIT(1)
44#define ABORT_RATE_CHANGE BIT(2)
1da177e4 45
b2476490
MT
46/**
47 * struct clk_notifier - associate a clk with a notifier
48 * @clk: struct clk * to associate the notifier with
49 * @notifier_head: a blocking_notifier_head for this clk
50 * @node: linked list pointers
51 *
52 * A list of struct clk_notifier is maintained by the notifier code.
53 * An entry is created whenever code registers the first notifier on a
54 * particular @clk. Future notifiers on that @clk are added to the
55 * @notifier_head.
56 */
57struct clk_notifier {
58 struct clk *clk;
59 struct srcu_notifier_head notifier_head;
60 struct list_head node;
61};
1da177e4 62
b2476490
MT
63/**
64 * struct clk_notifier_data - rate data to pass to the notifier callback
65 * @clk: struct clk * being changed
66 * @old_rate: previous rate of this clk
67 * @new_rate: new rate of this clk
68 *
69 * For a pre-notifier, old_rate is the clk's rate before this rate
70 * change, and new_rate is what the rate will be in the future. For a
71 * post-notifier, old_rate and new_rate are both set to the clk's
72 * current rate (this was done to optimize the implementation).
1da177e4 73 */
b2476490
MT
74struct clk_notifier_data {
75 struct clk *clk;
76 unsigned long old_rate;
77 unsigned long new_rate;
78};
79
266e4e9d
DA
80/**
81 * struct clk_bulk_data - Data used for bulk clk operations.
82 *
83 * @id: clock consumer ID
84 * @clk: struct clk * to store the associated clock
85 *
86 * The CLK APIs provide a series of clk_bulk_() API calls as
87 * a convenience to consumers which require multiple clks. This
88 * structure is used to manage data for these calls.
89 */
90struct clk_bulk_data {
91 const char *id;
92 struct clk *clk;
93};
94
e81b87d2
KK
95#ifdef CONFIG_COMMON_CLK
96
86bcfa2e
MT
97/**
98 * clk_notifier_register: register a clock rate-change notifier callback
99 * @clk: clock whose rate we are interested in
100 * @nb: notifier block with callback function pointer
101 *
102 * ProTip: debugging across notifier chains can be frustrating. Make sure that
103 * your notifier callback function prints a nice big warning in case of
104 * failure.
105 */
b2476490
MT
106int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
107
86bcfa2e
MT
108/**
109 * clk_notifier_unregister: unregister a clock rate-change notifier callback
110 * @clk: clock whose rate we are no longer interested in
111 * @nb: notifier block which will be unregistered
112 */
b2476490
MT
113int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
114
5279fc40
BB
115/**
116 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
117 * for a clock source.
118 * @clk: clock source
119 *
120 * This gets the clock source accuracy expressed in ppb.
121 * A perfect clock returns 0.
122 */
123long clk_get_accuracy(struct clk *clk);
124
e59c5371
MT
125/**
126 * clk_set_phase - adjust the phase shift of a clock signal
127 * @clk: clock signal source
128 * @degrees: number of degrees the signal is shifted
129 *
130 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
131 * success, -EERROR otherwise.
132 */
133int clk_set_phase(struct clk *clk, int degrees);
134
135/**
136 * clk_get_phase - return the phase shift of a clock signal
137 * @clk: clock signal source
138 *
139 * Returns the phase shift of a clock node in degrees, otherwise returns
140 * -EERROR.
141 */
142int clk_get_phase(struct clk *clk);
143
9fba738a
JB
144/**
145 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
146 * @clk: clock signal source
147 * @num: numerator of the duty cycle ratio to be applied
148 * @den: denominator of the duty cycle ratio to be applied
149 *
150 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
151 * success, -EERROR otherwise.
152 */
153int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
154
155/**
156 * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
157 * @clk: clock signal source
158 * @scale: scaling factor to be applied to represent the ratio as an integer
159 *
160 * Returns the duty cycle ratio multiplied by the scale provided, otherwise
161 * returns -EERROR.
162 */
163int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
164
3d3801ef
MT
165/**
166 * clk_is_match - check if two clk's point to the same hardware clock
167 * @p: clk compared against q
168 * @q: clk compared against p
169 *
170 * Returns true if the two struct clk pointers both point to the same hardware
0e056eb5
MCC
171 * clock node. Put differently, returns true if @p and @q
172 * share the same &struct clk_core object.
3d3801ef
MT
173 *
174 * Returns false otherwise. Note that two NULL clks are treated as matching.
175 */
176bool clk_is_match(const struct clk *p, const struct clk *q);
177
5279fc40
BB
178#else
179
e81b87d2
KK
180static inline int clk_notifier_register(struct clk *clk,
181 struct notifier_block *nb)
182{
183 return -ENOTSUPP;
184}
185
186static inline int clk_notifier_unregister(struct clk *clk,
187 struct notifier_block *nb)
188{
189 return -ENOTSUPP;
190}
191
5279fc40
BB
192static inline long clk_get_accuracy(struct clk *clk)
193{
194 return -ENOTSUPP;
195}
196
e59c5371
MT
197static inline long clk_set_phase(struct clk *clk, int phase)
198{
199 return -ENOTSUPP;
200}
201
202static inline long clk_get_phase(struct clk *clk)
203{
204 return -ENOTSUPP;
205}
206
9fba738a
JB
207static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
208 unsigned int den)
209{
210 return -ENOTSUPP;
211}
212
213static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
214 unsigned int scale)
215{
216 return 0;
217}
218
3d3801ef
MT
219static inline bool clk_is_match(const struct clk *p, const struct clk *q)
220{
221 return p == q;
222}
223
7e87aed9 224#endif
1da177e4 225
93abe8e4
VK
226/**
227 * clk_prepare - prepare a clock source
228 * @clk: clock source
229 *
230 * This prepares the clock source for use.
231 *
232 * Must not be called from within atomic context.
233 */
234#ifdef CONFIG_HAVE_CLK_PREPARE
235int clk_prepare(struct clk *clk);
266e4e9d
DA
236int __must_check clk_bulk_prepare(int num_clks,
237 const struct clk_bulk_data *clks);
93abe8e4
VK
238#else
239static inline int clk_prepare(struct clk *clk)
240{
241 might_sleep();
242 return 0;
243}
266e4e9d 244
6e0d4ff4 245static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
266e4e9d
DA
246{
247 might_sleep();
248 return 0;
249}
93abe8e4
VK
250#endif
251
252/**
253 * clk_unprepare - undo preparation of a clock source
254 * @clk: clock source
255 *
256 * This undoes a previously prepared clock. The caller must balance
257 * the number of prepare and unprepare calls.
258 *
259 * Must not be called from within atomic context.
260 */
261#ifdef CONFIG_HAVE_CLK_PREPARE
262void clk_unprepare(struct clk *clk);
266e4e9d 263void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
93abe8e4
VK
264#else
265static inline void clk_unprepare(struct clk *clk)
266{
267 might_sleep();
268}
266e4e9d
DA
269static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks)
270{
271 might_sleep();
272}
93abe8e4
VK
273#endif
274
275#ifdef CONFIG_HAVE_CLK
1da177e4
LT
276/**
277 * clk_get - lookup and obtain a reference to a clock producer.
278 * @dev: device for clock "consumer"
a58b3a4a 279 * @id: clock consumer ID
1da177e4
LT
280 *
281 * Returns a struct clk corresponding to the clock producer, or
ea3f4eac
RK
282 * valid IS_ERR() condition containing errno. The implementation
283 * uses @dev and @id to determine the clock consumer, and thereby
284 * the clock producer. (IOW, @id may be identical strings, but
285 * clk_get may return different clock producers depending on @dev.)
f47fc0ac
RK
286 *
287 * Drivers must assume that the clock source is not enabled.
f7ad160b
AR
288 *
289 * clk_get should not be called from within interrupt context.
1da177e4
LT
290 */
291struct clk *clk_get(struct device *dev, const char *id);
292
266e4e9d
DA
293/**
294 * clk_bulk_get - lookup and obtain a number of references to clock producer.
295 * @dev: device for clock "consumer"
296 * @num_clks: the number of clk_bulk_data
297 * @clks: the clk_bulk_data table of consumer
298 *
299 * This helper function allows drivers to get several clk consumers in one
300 * operation. If any of the clk cannot be acquired then any clks
301 * that were obtained will be freed before returning to the caller.
302 *
303 * Returns 0 if all clocks specified in clk_bulk_data table are obtained
304 * successfully, or valid IS_ERR() condition containing errno.
305 * The implementation uses @dev and @clk_bulk_data.id to determine the
306 * clock consumer, and thereby the clock producer.
307 * The clock returned is stored in each @clk_bulk_data.clk field.
308 *
309 * Drivers must assume that the clock source is not enabled.
310 *
311 * clk_bulk_get should not be called from within interrupt context.
312 */
313int __must_check clk_bulk_get(struct device *dev, int num_clks,
314 struct clk_bulk_data *clks);
616e45df
DA
315/**
316 * clk_bulk_get_all - lookup and obtain all available references to clock
317 * producer.
318 * @dev: device for clock "consumer"
319 * @clks: pointer to the clk_bulk_data table of consumer
320 *
321 * This helper function allows drivers to get all clk consumers in one
322 * operation. If any of the clk cannot be acquired then any clks
323 * that were obtained will be freed before returning to the caller.
324 *
325 * Returns a positive value for the number of clocks obtained while the
326 * clock references are stored in the clk_bulk_data table in @clks field.
327 * Returns 0 if there're none and a negative value if something failed.
328 *
329 * Drivers must assume that the clock source is not enabled.
330 *
331 * clk_bulk_get should not be called from within interrupt context.
332 */
333int __must_check clk_bulk_get_all(struct device *dev,
334 struct clk_bulk_data **clks);
2f25528e
SN
335
336/**
337 * clk_bulk_get_optional - lookup and obtain a number of references to clock producer
338 * @dev: device for clock "consumer"
339 * @num_clks: the number of clk_bulk_data
340 * @clks: the clk_bulk_data table of consumer
341 *
342 * Behaves the same as clk_bulk_get() except where there is no clock producer.
343 * In this case, instead of returning -ENOENT, the function returns 0 and
344 * NULL for a clk for which a clock producer could not be determined.
345 */
346int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
347 struct clk_bulk_data *clks);
618aee02
DA
348/**
349 * devm_clk_bulk_get - managed get multiple clk consumers
350 * @dev: device for clock "consumer"
351 * @num_clks: the number of clk_bulk_data
352 * @clks: the clk_bulk_data table of consumer
353 *
354 * Return 0 on success, an errno on failure.
355 *
356 * This helper function allows drivers to get several clk
357 * consumers in one operation with management, the clks will
358 * automatically be freed when the device is unbound.
359 */
360int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
361 struct clk_bulk_data *clks);
f08c2e28
DA
362/**
363 * devm_clk_bulk_get_all - managed get multiple clk consumers
364 * @dev: device for clock "consumer"
365 * @clks: pointer to the clk_bulk_data table of consumer
366 *
367 * Returns a positive value for the number of clocks obtained while the
368 * clock references are stored in the clk_bulk_data table in @clks field.
369 * Returns 0 if there're none and a negative value if something failed.
370 *
371 * This helper function allows drivers to get several clk
372 * consumers in one operation with management, the clks will
373 * automatically be freed when the device is unbound.
374 */
375
376int __must_check devm_clk_bulk_get_all(struct device *dev,
377 struct clk_bulk_data **clks);
618aee02 378
a8a97db9
MB
379/**
380 * devm_clk_get - lookup and obtain a managed reference to a clock producer.
381 * @dev: device for clock "consumer"
a58b3a4a 382 * @id: clock consumer ID
a8a97db9
MB
383 *
384 * Returns a struct clk corresponding to the clock producer, or
385 * valid IS_ERR() condition containing errno. The implementation
386 * uses @dev and @id to determine the clock consumer, and thereby
387 * the clock producer. (IOW, @id may be identical strings, but
388 * clk_get may return different clock producers depending on @dev.)
389 *
390 * Drivers must assume that the clock source is not enabled.
391 *
392 * devm_clk_get should not be called from within interrupt context.
393 *
394 * The clock will automatically be freed when the device is unbound
395 * from the bus.
396 */
397struct clk *devm_clk_get(struct device *dev, const char *id);
398
60b8f0dd
PE
399/**
400 * devm_clk_get_optional - lookup and obtain a managed reference to an optional
401 * clock producer.
402 * @dev: device for clock "consumer"
403 * @id: clock consumer ID
404 *
405 * Behaves the same as devm_clk_get() except where there is no clock producer.
406 * In this case, instead of returning -ENOENT, the function returns NULL.
407 */
408struct clk *devm_clk_get_optional(struct device *dev, const char *id);
409
71a2f115
KM
410/**
411 * devm_get_clk_from_child - lookup and obtain a managed reference to a
412 * clock producer from child node.
413 * @dev: device for clock "consumer"
414 * @np: pointer to clock consumer node
415 * @con_id: clock consumer ID
416 *
417 * This function parses the clocks, and uses them to look up the
418 * struct clk from the registered list of clock providers by using
419 * @np and @con_id
420 *
421 * The clock will automatically be freed when the device is unbound
422 * from the bus.
423 */
424struct clk *devm_get_clk_from_child(struct device *dev,
425 struct device_node *np, const char *con_id);
55e9b8b7
JB
426/**
427 * clk_rate_exclusive_get - get exclusivity over the rate control of a
428 * producer
429 * @clk: clock source
430 *
431 * This function allows drivers to get exclusive control over the rate of a
432 * provider. It prevents any other consumer to execute, even indirectly,
433 * opereation which could alter the rate of the provider or cause glitches
434 *
435 * If exlusivity is claimed more than once on clock, even by the same driver,
436 * the rate effectively gets locked as exclusivity can't be preempted.
437 *
438 * Must not be called from within atomic context.
439 *
440 * Returns success (0) or negative errno.
441 */
442int clk_rate_exclusive_get(struct clk *clk);
443
444/**
445 * clk_rate_exclusive_put - release exclusivity over the rate control of a
446 * producer
447 * @clk: clock source
448 *
449 * This function allows drivers to release the exclusivity it previously got
450 * from clk_rate_exclusive_get()
451 *
452 * The caller must balance the number of clk_rate_exclusive_get() and
453 * clk_rate_exclusive_put() calls.
454 *
455 * Must not be called from within atomic context.
456 */
457void clk_rate_exclusive_put(struct clk *clk);
71a2f115 458
1da177e4
LT
459/**
460 * clk_enable - inform the system when the clock source should be running.
461 * @clk: clock source
462 *
463 * If the clock can not be enabled/disabled, this should return success.
464 *
40d3e0f4
RK
465 * May be called from atomic contexts.
466 *
1da177e4
LT
467 * Returns success (0) or negative errno.
468 */
469int clk_enable(struct clk *clk);
470
266e4e9d
DA
471/**
472 * clk_bulk_enable - inform the system when the set of clks should be running.
473 * @num_clks: the number of clk_bulk_data
474 * @clks: the clk_bulk_data table of consumer
475 *
476 * May be called from atomic contexts.
477 *
478 * Returns success (0) or negative errno.
479 */
480int __must_check clk_bulk_enable(int num_clks,
481 const struct clk_bulk_data *clks);
482
1da177e4
LT
483/**
484 * clk_disable - inform the system when the clock source is no longer required.
485 * @clk: clock source
f47fc0ac
RK
486 *
487 * Inform the system that a clock source is no longer required by
488 * a driver and may be shut down.
489 *
40d3e0f4
RK
490 * May be called from atomic contexts.
491 *
f47fc0ac
RK
492 * Implementation detail: if the clock source is shared between
493 * multiple drivers, clk_enable() calls must be balanced by the
494 * same number of clk_disable() calls for the clock source to be
495 * disabled.
1da177e4
LT
496 */
497void clk_disable(struct clk *clk);
498
266e4e9d
DA
499/**
500 * clk_bulk_disable - inform the system when the set of clks is no
501 * longer required.
502 * @num_clks: the number of clk_bulk_data
503 * @clks: the clk_bulk_data table of consumer
504 *
505 * Inform the system that a set of clks is no longer required by
506 * a driver and may be shut down.
507 *
508 * May be called from atomic contexts.
509 *
510 * Implementation detail: if the set of clks is shared between
511 * multiple drivers, clk_bulk_enable() calls must be balanced by the
512 * same number of clk_bulk_disable() calls for the clock source to be
513 * disabled.
514 */
515void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks);
516
1da177e4
LT
517/**
518 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
519 * This is only valid once the clock source has been enabled.
520 * @clk: clock source
521 */
522unsigned long clk_get_rate(struct clk *clk);
523
524/**
525 * clk_put - "free" the clock source
526 * @clk: clock source
f47fc0ac
RK
527 *
528 * Note: drivers must ensure that all clk_enable calls made on this
529 * clock source are balanced by clk_disable calls prior to calling
530 * this function.
f7ad160b
AR
531 *
532 * clk_put should not be called from within interrupt context.
1da177e4
LT
533 */
534void clk_put(struct clk *clk);
535
266e4e9d
DA
536/**
537 * clk_bulk_put - "free" the clock source
538 * @num_clks: the number of clk_bulk_data
539 * @clks: the clk_bulk_data table of consumer
540 *
541 * Note: drivers must ensure that all clk_bulk_enable calls made on this
542 * clock source are balanced by clk_bulk_disable calls prior to calling
543 * this function.
544 *
545 * clk_bulk_put should not be called from within interrupt context.
546 */
547void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
548
616e45df
DA
549/**
550 * clk_bulk_put_all - "free" all the clock source
551 * @num_clks: the number of clk_bulk_data
552 * @clks: the clk_bulk_data table of consumer
553 *
554 * Note: drivers must ensure that all clk_bulk_enable calls made on this
555 * clock source are balanced by clk_bulk_disable calls prior to calling
556 * this function.
557 *
558 * clk_bulk_put_all should not be called from within interrupt context.
559 */
560void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
561
a8a97db9
MB
562/**
563 * devm_clk_put - "free" a managed clock source
da3dae54 564 * @dev: device used to acquire the clock
a8a97db9
MB
565 * @clk: clock source acquired with devm_clk_get()
566 *
567 * Note: drivers must ensure that all clk_enable calls made on this
568 * clock source are balanced by clk_disable calls prior to calling
569 * this function.
570 *
571 * clk_put should not be called from within interrupt context.
572 */
573void devm_clk_put(struct device *dev, struct clk *clk);
1da177e4
LT
574
575/*
576 * The remaining APIs are optional for machine class support.
577 */
578
579
580/**
581 * clk_round_rate - adjust a rate to the exact rate a clock can provide
582 * @clk: clock source
583 * @rate: desired clock rate in Hz
584 *
d2d14a77
RK
585 * This answers the question "if I were to pass @rate to clk_set_rate(),
586 * what clock rate would I end up with?" without changing the hardware
587 * in any way. In other words:
588 *
589 * rate = clk_round_rate(clk, r);
590 *
591 * and:
592 *
593 * clk_set_rate(clk, r);
594 * rate = clk_get_rate(clk);
595 *
596 * are equivalent except the former does not modify the clock hardware
597 * in any way.
598 *
1da177e4
LT
599 * Returns rounded clock rate in Hz, or negative errno.
600 */
601long clk_round_rate(struct clk *clk, unsigned long rate);
8b7730dd 602
1da177e4
LT
603/**
604 * clk_set_rate - set the clock rate for a clock source
605 * @clk: clock source
606 * @rate: desired clock rate in Hz
607 *
608 * Returns success (0) or negative errno.
609 */
610int clk_set_rate(struct clk *clk, unsigned long rate);
8b7730dd 611
55e9b8b7
JB
612/**
613 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
614 * clock source
615 * @clk: clock source
616 * @rate: desired clock rate in Hz
617 *
618 * This helper function allows drivers to atomically set the rate of a producer
619 * and claim exclusivity over the rate control of the producer.
620 *
621 * It is essentially a combination of clk_set_rate() and
622 * clk_rate_exclusite_get(). Caller must balance this call with a call to
623 * clk_rate_exclusive_put()
624 *
625 * Returns success (0) or negative errno.
626 */
627int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
628
4e88f3de
TR
629/**
630 * clk_has_parent - check if a clock is a possible parent for another
631 * @clk: clock source
632 * @parent: parent clock source
633 *
634 * This function can be used in drivers that need to check that a clock can be
635 * the parent of another without actually changing the parent.
636 *
637 * Returns true if @parent is a possible parent for @clk, false otherwise.
638 */
639bool clk_has_parent(struct clk *clk, struct clk *parent);
640
1c8e6004
TV
641/**
642 * clk_set_rate_range - set a rate range for a clock source
643 * @clk: clock source
644 * @min: desired minimum clock rate in Hz, inclusive
645 * @max: desired maximum clock rate in Hz, inclusive
646 *
647 * Returns success (0) or negative errno.
648 */
649int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
650
651/**
652 * clk_set_min_rate - set a minimum clock rate for a clock source
653 * @clk: clock source
654 * @rate: desired minimum clock rate in Hz, inclusive
655 *
656 * Returns success (0) or negative errno.
657 */
658int clk_set_min_rate(struct clk *clk, unsigned long rate);
659
660/**
661 * clk_set_max_rate - set a maximum clock rate for a clock source
662 * @clk: clock source
663 * @rate: desired maximum clock rate in Hz, inclusive
664 *
665 * Returns success (0) or negative errno.
666 */
667int clk_set_max_rate(struct clk *clk, unsigned long rate);
668
1da177e4
LT
669/**
670 * clk_set_parent - set the parent clock source for this clock
671 * @clk: clock source
672 * @parent: parent clock source
673 *
674 * Returns success (0) or negative errno.
675 */
676int clk_set_parent(struct clk *clk, struct clk *parent);
677
678/**
679 * clk_get_parent - get the parent clock source for this clock
680 * @clk: clock source
681 *
682 * Returns struct clk corresponding to parent clock source, or
683 * valid IS_ERR() condition containing errno.
684 */
685struct clk *clk_get_parent(struct clk *clk);
686
05fd8e73
SH
687/**
688 * clk_get_sys - get a clock based upon the device name
689 * @dev_id: device name
690 * @con_id: connection ID
691 *
692 * Returns a struct clk corresponding to the clock producer, or
693 * valid IS_ERR() condition containing errno. The implementation
694 * uses @dev_id and @con_id to determine the clock consumer, and
695 * thereby the clock producer. In contrast to clk_get() this function
696 * takes the device name instead of the device itself for identification.
697 *
698 * Drivers must assume that the clock source is not enabled.
699 *
700 * clk_get_sys should not be called from within interrupt context.
701 */
702struct clk *clk_get_sys(const char *dev_id, const char *con_id);
703
8b95d1ce
RD
704/**
705 * clk_save_context - save clock context for poweroff
706 *
707 * Saves the context of the clock register for powerstates in which the
708 * contents of the registers will be lost. Occurs deep within the suspend
709 * code so locking is not necessary.
710 */
711int clk_save_context(void);
712
713/**
714 * clk_restore_context - restore clock context after poweroff
715 *
716 * This occurs with all clocks enabled. Occurs deep within the resume code
717 * so locking is not necessary.
718 */
719void clk_restore_context(void);
720
93abe8e4
VK
721#else /* !CONFIG_HAVE_CLK */
722
723static inline struct clk *clk_get(struct device *dev, const char *id)
724{
725 return NULL;
726}
727
6e0d4ff4
DA
728static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
729 struct clk_bulk_data *clks)
266e4e9d
DA
730{
731 return 0;
732}
733
2f25528e
SN
734static inline int __must_check clk_bulk_get_optional(struct device *dev,
735 int num_clks, struct clk_bulk_data *clks)
736{
737 return 0;
738}
739
616e45df
DA
740static inline int __must_check clk_bulk_get_all(struct device *dev,
741 struct clk_bulk_data **clks)
742{
743 return 0;
744}
745
93abe8e4
VK
746static inline struct clk *devm_clk_get(struct device *dev, const char *id)
747{
748 return NULL;
749}
750
60b8f0dd
PE
751static inline struct clk *devm_clk_get_optional(struct device *dev,
752 const char *id)
753{
754 return NULL;
755}
756
6e0d4ff4
DA
757static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
758 struct clk_bulk_data *clks)
618aee02
DA
759{
760 return 0;
761}
762
f08c2e28
DA
763static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
764 struct clk_bulk_data **clks)
765{
766
767 return 0;
768}
769
71a2f115
KM
770static inline struct clk *devm_get_clk_from_child(struct device *dev,
771 struct device_node *np, const char *con_id)
772{
773 return NULL;
774}
775
93abe8e4
VK
776static inline void clk_put(struct clk *clk) {}
777
266e4e9d
DA
778static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
779
616e45df
DA
780static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
781
93abe8e4
VK
782static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
783
55e9b8b7
JB
784
785static inline int clk_rate_exclusive_get(struct clk *clk)
786{
787 return 0;
788}
789
790static inline void clk_rate_exclusive_put(struct clk *clk) {}
791
93abe8e4
VK
792static inline int clk_enable(struct clk *clk)
793{
794 return 0;
795}
796
6e0d4ff4 797static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
266e4e9d
DA
798{
799 return 0;
800}
801
93abe8e4
VK
802static inline void clk_disable(struct clk *clk) {}
803
266e4e9d
DA
804
805static inline void clk_bulk_disable(int num_clks,
806 struct clk_bulk_data *clks) {}
807
93abe8e4
VK
808static inline unsigned long clk_get_rate(struct clk *clk)
809{
810 return 0;
811}
812
813static inline int clk_set_rate(struct clk *clk, unsigned long rate)
814{
815 return 0;
816}
817
55e9b8b7
JB
818static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
819{
820 return 0;
821}
822
93abe8e4
VK
823static inline long clk_round_rate(struct clk *clk, unsigned long rate)
824{
825 return 0;
826}
827
4e88f3de
TR
828static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
829{
830 return true;
831}
832
b88c9f41
DO
833static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
834 unsigned long max)
835{
836 return 0;
837}
838
839static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
840{
841 return 0;
842}
843
844static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
845{
846 return 0;
847}
848
93abe8e4
VK
849static inline int clk_set_parent(struct clk *clk, struct clk *parent)
850{
851 return 0;
852}
853
854static inline struct clk *clk_get_parent(struct clk *clk)
855{
856 return NULL;
857}
858
b81ea968
DL
859static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
860{
861 return NULL;
862}
8b95d1ce
RD
863
864static inline int clk_save_context(void)
865{
866 return 0;
867}
868
869static inline void clk_restore_context(void) {}
870
93abe8e4
VK
871#endif
872
873/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
874static inline int clk_prepare_enable(struct clk *clk)
875{
876 int ret;
877
878 ret = clk_prepare(clk);
879 if (ret)
880 return ret;
881 ret = clk_enable(clk);
882 if (ret)
883 clk_unprepare(clk);
884
885 return ret;
886}
887
888/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
889static inline void clk_disable_unprepare(struct clk *clk)
890{
891 clk_disable(clk);
892 clk_unprepare(clk);
893}
894
6e0d4ff4
DA
895static inline int __must_check clk_bulk_prepare_enable(int num_clks,
896 struct clk_bulk_data *clks)
3c48d86c
BA
897{
898 int ret;
899
900 ret = clk_bulk_prepare(num_clks, clks);
901 if (ret)
902 return ret;
903 ret = clk_bulk_enable(num_clks, clks);
904 if (ret)
905 clk_bulk_unprepare(num_clks, clks);
906
907 return ret;
908}
909
910static inline void clk_bulk_disable_unprepare(int num_clks,
911 struct clk_bulk_data *clks)
912{
913 clk_bulk_disable(num_clks, clks);
914 clk_bulk_unprepare(num_clks, clks);
915}
916
60b8f0dd
PE
917/**
918 * clk_get_optional - lookup and obtain a reference to an optional clock
919 * producer.
920 * @dev: device for clock "consumer"
921 * @id: clock consumer ID
922 *
923 * Behaves the same as clk_get() except where there is no clock producer. In
924 * this case, instead of returning -ENOENT, the function returns NULL.
925 */
926static inline struct clk *clk_get_optional(struct device *dev, const char *id)
927{
928 struct clk *clk = clk_get(dev, id);
929
930 if (clk == ERR_PTR(-ENOENT))
931 return NULL;
932
933 return clk;
934}
935
137f8a72 936#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
766e6a4e
GL
937struct clk *of_clk_get(struct device_node *np, int index);
938struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
939struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
940#else
941static inline struct clk *of_clk_get(struct device_node *np, int index)
942{
9f1612d3 943 return ERR_PTR(-ENOENT);
766e6a4e
GL
944}
945static inline struct clk *of_clk_get_by_name(struct device_node *np,
946 const char *name)
947{
9f1612d3 948 return ERR_PTR(-ENOENT);
766e6a4e 949}
428c9de5
GU
950static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
951{
952 return ERR_PTR(-ENOENT);
953}
766e6a4e
GL
954#endif
955
1da177e4 956#endif