| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * linux/include/linux/clk.h |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 2004 ARM Limited. |
|---|
| 5 | 6 | * Written by Deep Blue Solutions Limited. |
|---|
| 6 | 7 | * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 9 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 10 | | - * published by the Free Software Foundation. |
|---|
| 11 | 8 | */ |
|---|
| 12 | 9 | #ifndef __LINUX_CLK_H |
|---|
| 13 | 10 | #define __LINUX_CLK_H |
|---|
| .. | .. |
|---|
| 113 | 110 | int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); |
|---|
| 114 | 111 | |
|---|
| 115 | 112 | /** |
|---|
| 113 | + * devm_clk_notifier_register - register a managed rate-change notifier callback |
|---|
| 114 | + * @dev: device for clock "consumer" |
|---|
| 115 | + * @clk: clock whose rate we are interested in |
|---|
| 116 | + * @nb: notifier block with callback function pointer |
|---|
| 117 | + * |
|---|
| 118 | + * Returns 0 on success, -EERROR otherwise |
|---|
| 119 | + */ |
|---|
| 120 | +int devm_clk_notifier_register(struct device *dev, struct clk *clk, |
|---|
| 121 | + struct notifier_block *nb); |
|---|
| 122 | + |
|---|
| 123 | +/** |
|---|
| 116 | 124 | * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) |
|---|
| 117 | 125 | * for a clock source. |
|---|
| 118 | 126 | * @clk: clock source |
|---|
| .. | .. |
|---|
| 175 | 183 | */ |
|---|
| 176 | 184 | bool clk_is_match(const struct clk *p, const struct clk *q); |
|---|
| 177 | 185 | |
|---|
| 186 | +/** |
|---|
| 187 | + * clk_rate_exclusive_get - get exclusivity over the rate control of a |
|---|
| 188 | + * producer |
|---|
| 189 | + * @clk: clock source |
|---|
| 190 | + * |
|---|
| 191 | + * This function allows drivers to get exclusive control over the rate of a |
|---|
| 192 | + * provider. It prevents any other consumer to execute, even indirectly, |
|---|
| 193 | + * opereation which could alter the rate of the provider or cause glitches |
|---|
| 194 | + * |
|---|
| 195 | + * If exlusivity is claimed more than once on clock, even by the same driver, |
|---|
| 196 | + * the rate effectively gets locked as exclusivity can't be preempted. |
|---|
| 197 | + * |
|---|
| 198 | + * Must not be called from within atomic context. |
|---|
| 199 | + * |
|---|
| 200 | + * Returns success (0) or negative errno. |
|---|
| 201 | + */ |
|---|
| 202 | +int clk_rate_exclusive_get(struct clk *clk); |
|---|
| 203 | + |
|---|
| 204 | +/** |
|---|
| 205 | + * clk_rate_exclusive_put - release exclusivity over the rate control of a |
|---|
| 206 | + * producer |
|---|
| 207 | + * @clk: clock source |
|---|
| 208 | + * |
|---|
| 209 | + * This function allows drivers to release the exclusivity it previously got |
|---|
| 210 | + * from clk_rate_exclusive_get() |
|---|
| 211 | + * |
|---|
| 212 | + * The caller must balance the number of clk_rate_exclusive_get() and |
|---|
| 213 | + * clk_rate_exclusive_put() calls. |
|---|
| 214 | + * |
|---|
| 215 | + * Must not be called from within atomic context. |
|---|
| 216 | + */ |
|---|
| 217 | +void clk_rate_exclusive_put(struct clk *clk); |
|---|
| 218 | + |
|---|
| 178 | 219 | #else |
|---|
| 179 | 220 | |
|---|
| 180 | 221 | static inline int clk_notifier_register(struct clk *clk, |
|---|
| .. | .. |
|---|
| 185 | 226 | |
|---|
| 186 | 227 | static inline int clk_notifier_unregister(struct clk *clk, |
|---|
| 187 | 228 | struct notifier_block *nb) |
|---|
| 229 | +{ |
|---|
| 230 | + return -ENOTSUPP; |
|---|
| 231 | +} |
|---|
| 232 | + |
|---|
| 233 | +static inline int devm_clk_notifier_register(struct device *dev, |
|---|
| 234 | + struct clk *clk, |
|---|
| 235 | + struct notifier_block *nb) |
|---|
| 188 | 236 | { |
|---|
| 189 | 237 | return -ENOTSUPP; |
|---|
| 190 | 238 | } |
|---|
| .. | .. |
|---|
| 221 | 269 | return p == q; |
|---|
| 222 | 270 | } |
|---|
| 223 | 271 | |
|---|
| 272 | +static inline int clk_rate_exclusive_get(struct clk *clk) |
|---|
| 273 | +{ |
|---|
| 274 | + return 0; |
|---|
| 275 | +} |
|---|
| 276 | + |
|---|
| 277 | +static inline void clk_rate_exclusive_put(struct clk *clk) {} |
|---|
| 278 | + |
|---|
| 224 | 279 | #endif |
|---|
| 225 | 280 | |
|---|
| 226 | 281 | /** |
|---|
| .. | .. |
|---|
| 242 | 297 | return 0; |
|---|
| 243 | 298 | } |
|---|
| 244 | 299 | |
|---|
| 245 | | -static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks) |
|---|
| 300 | +static inline int __must_check |
|---|
| 301 | +clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) |
|---|
| 246 | 302 | { |
|---|
| 247 | 303 | might_sleep(); |
|---|
| 248 | 304 | return 0; |
|---|
| .. | .. |
|---|
| 266 | 322 | { |
|---|
| 267 | 323 | might_sleep(); |
|---|
| 268 | 324 | } |
|---|
| 269 | | -static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks) |
|---|
| 325 | +static inline void clk_bulk_unprepare(int num_clks, |
|---|
| 326 | + const struct clk_bulk_data *clks) |
|---|
| 270 | 327 | { |
|---|
| 271 | 328 | might_sleep(); |
|---|
| 272 | 329 | } |
|---|
| .. | .. |
|---|
| 362 | 419 | /** |
|---|
| 363 | 420 | * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks |
|---|
| 364 | 421 | * @dev: device for clock "consumer" |
|---|
| 422 | + * @num_clks: the number of clk_bulk_data |
|---|
| 365 | 423 | * @clks: pointer to the clk_bulk_data table of consumer |
|---|
| 366 | 424 | * |
|---|
| 367 | 425 | * Behaves the same as devm_clk_bulk_get() except where there is no clock |
|---|
| .. | .. |
|---|
| 419 | 477 | struct clk *devm_clk_get(struct device *dev, const char *id); |
|---|
| 420 | 478 | |
|---|
| 421 | 479 | /** |
|---|
| 480 | + * devm_clk_get_prepared - devm_clk_get() + clk_prepare() |
|---|
| 481 | + * @dev: device for clock "consumer" |
|---|
| 482 | + * @id: clock consumer ID |
|---|
| 483 | + * |
|---|
| 484 | + * Context: May sleep. |
|---|
| 485 | + * |
|---|
| 486 | + * Return: a struct clk corresponding to the clock producer, or |
|---|
| 487 | + * valid IS_ERR() condition containing errno. The implementation |
|---|
| 488 | + * uses @dev and @id to determine the clock consumer, and thereby |
|---|
| 489 | + * the clock producer. (IOW, @id may be identical strings, but |
|---|
| 490 | + * clk_get may return different clock producers depending on @dev.) |
|---|
| 491 | + * |
|---|
| 492 | + * The returned clk (if valid) is prepared. Drivers must however assume |
|---|
| 493 | + * that the clock is not enabled. |
|---|
| 494 | + * |
|---|
| 495 | + * The clock will automatically be unprepared and freed when the device |
|---|
| 496 | + * is unbound from the bus. |
|---|
| 497 | + */ |
|---|
| 498 | +struct clk *devm_clk_get_prepared(struct device *dev, const char *id); |
|---|
| 499 | + |
|---|
| 500 | +/** |
|---|
| 501 | + * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() |
|---|
| 502 | + * @dev: device for clock "consumer" |
|---|
| 503 | + * @id: clock consumer ID |
|---|
| 504 | + * |
|---|
| 505 | + * Context: May sleep. |
|---|
| 506 | + * |
|---|
| 507 | + * Return: a struct clk corresponding to the clock producer, or |
|---|
| 508 | + * valid IS_ERR() condition containing errno. The implementation |
|---|
| 509 | + * uses @dev and @id to determine the clock consumer, and thereby |
|---|
| 510 | + * the clock producer. (IOW, @id may be identical strings, but |
|---|
| 511 | + * clk_get may return different clock producers depending on @dev.) |
|---|
| 512 | + * |
|---|
| 513 | + * The returned clk (if valid) is prepared and enabled. |
|---|
| 514 | + * |
|---|
| 515 | + * The clock will automatically be disabled, unprepared and freed |
|---|
| 516 | + * when the device is unbound from the bus. |
|---|
| 517 | + */ |
|---|
| 518 | +struct clk *devm_clk_get_enabled(struct device *dev, const char *id); |
|---|
| 519 | + |
|---|
| 520 | +/** |
|---|
| 422 | 521 | * devm_clk_get_optional - lookup and obtain a managed reference to an optional |
|---|
| 423 | 522 | * clock producer. |
|---|
| 424 | 523 | * @dev: device for clock "consumer" |
|---|
| .. | .. |
|---|
| 428 | 527 | * In this case, instead of returning -ENOENT, the function returns NULL. |
|---|
| 429 | 528 | */ |
|---|
| 430 | 529 | struct clk *devm_clk_get_optional(struct device *dev, const char *id); |
|---|
| 530 | + |
|---|
| 531 | +/** |
|---|
| 532 | + * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() |
|---|
| 533 | + * @dev: device for clock "consumer" |
|---|
| 534 | + * @id: clock consumer ID |
|---|
| 535 | + * |
|---|
| 536 | + * Context: May sleep. |
|---|
| 537 | + * |
|---|
| 538 | + * Return: a struct clk corresponding to the clock producer, or |
|---|
| 539 | + * valid IS_ERR() condition containing errno. The implementation |
|---|
| 540 | + * uses @dev and @id to determine the clock consumer, and thereby |
|---|
| 541 | + * the clock producer. If no such clk is found, it returns NULL |
|---|
| 542 | + * which serves as a dummy clk. That's the only difference compared |
|---|
| 543 | + * to devm_clk_get_prepared(). |
|---|
| 544 | + * |
|---|
| 545 | + * The returned clk (if valid) is prepared. Drivers must however |
|---|
| 546 | + * assume that the clock is not enabled. |
|---|
| 547 | + * |
|---|
| 548 | + * The clock will automatically be unprepared and freed when the |
|---|
| 549 | + * device is unbound from the bus. |
|---|
| 550 | + */ |
|---|
| 551 | +struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); |
|---|
| 552 | + |
|---|
| 553 | +/** |
|---|
| 554 | + * devm_clk_get_optional_enabled - devm_clk_get_optional() + |
|---|
| 555 | + * clk_prepare_enable() |
|---|
| 556 | + * @dev: device for clock "consumer" |
|---|
| 557 | + * @id: clock consumer ID |
|---|
| 558 | + * |
|---|
| 559 | + * Context: May sleep. |
|---|
| 560 | + * |
|---|
| 561 | + * Return: a struct clk corresponding to the clock producer, or |
|---|
| 562 | + * valid IS_ERR() condition containing errno. The implementation |
|---|
| 563 | + * uses @dev and @id to determine the clock consumer, and thereby |
|---|
| 564 | + * the clock producer. If no such clk is found, it returns NULL |
|---|
| 565 | + * which serves as a dummy clk. That's the only difference compared |
|---|
| 566 | + * to devm_clk_get_enabled(). |
|---|
| 567 | + * |
|---|
| 568 | + * The returned clk (if valid) is prepared and enabled. |
|---|
| 569 | + * |
|---|
| 570 | + * The clock will automatically be disabled, unprepared and freed |
|---|
| 571 | + * when the device is unbound from the bus. |
|---|
| 572 | + */ |
|---|
| 573 | +struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); |
|---|
| 431 | 574 | |
|---|
| 432 | 575 | /** |
|---|
| 433 | 576 | * devm_get_clk_from_child - lookup and obtain a managed reference to a |
|---|
| .. | .. |
|---|
| 445 | 588 | */ |
|---|
| 446 | 589 | struct clk *devm_get_clk_from_child(struct device *dev, |
|---|
| 447 | 590 | struct device_node *np, const char *con_id); |
|---|
| 448 | | -/** |
|---|
| 449 | | - * clk_rate_exclusive_get - get exclusivity over the rate control of a |
|---|
| 450 | | - * producer |
|---|
| 451 | | - * @clk: clock source |
|---|
| 452 | | - * |
|---|
| 453 | | - * This function allows drivers to get exclusive control over the rate of a |
|---|
| 454 | | - * provider. It prevents any other consumer to execute, even indirectly, |
|---|
| 455 | | - * opereation which could alter the rate of the provider or cause glitches |
|---|
| 456 | | - * |
|---|
| 457 | | - * If exlusivity is claimed more than once on clock, even by the same driver, |
|---|
| 458 | | - * the rate effectively gets locked as exclusivity can't be preempted. |
|---|
| 459 | | - * |
|---|
| 460 | | - * Must not be called from within atomic context. |
|---|
| 461 | | - * |
|---|
| 462 | | - * Returns success (0) or negative errno. |
|---|
| 463 | | - */ |
|---|
| 464 | | -int clk_rate_exclusive_get(struct clk *clk); |
|---|
| 465 | | - |
|---|
| 466 | | -/** |
|---|
| 467 | | - * clk_rate_exclusive_put - release exclusivity over the rate control of a |
|---|
| 468 | | - * producer |
|---|
| 469 | | - * @clk: clock source |
|---|
| 470 | | - * |
|---|
| 471 | | - * This function allows drivers to release the exclusivity it previously got |
|---|
| 472 | | - * from clk_rate_exclusive_get() |
|---|
| 473 | | - * |
|---|
| 474 | | - * The caller must balance the number of clk_rate_exclusive_get() and |
|---|
| 475 | | - * clk_rate_exclusive_put() calls. |
|---|
| 476 | | - * |
|---|
| 477 | | - * Must not be called from within atomic context. |
|---|
| 478 | | - */ |
|---|
| 479 | | -void clk_rate_exclusive_put(struct clk *clk); |
|---|
| 480 | 591 | |
|---|
| 481 | 592 | /** |
|---|
| 482 | 593 | * clk_enable - inform the system when the clock source should be running. |
|---|
| .. | .. |
|---|
| 627 | 738 | * @clk: clock source |
|---|
| 628 | 739 | * @rate: desired clock rate in Hz |
|---|
| 629 | 740 | * |
|---|
| 741 | + * Updating the rate starts at the top-most affected clock and then |
|---|
| 742 | + * walks the tree down to the bottom-most clock that needs updating. |
|---|
| 743 | + * |
|---|
| 630 | 744 | * Returns success (0) or negative errno. |
|---|
| 631 | 745 | */ |
|---|
| 632 | 746 | int clk_set_rate(struct clk *clk, unsigned long rate); |
|---|
| .. | .. |
|---|
| 723 | 837 | */ |
|---|
| 724 | 838 | struct clk *clk_get_sys(const char *dev_id, const char *con_id); |
|---|
| 725 | 839 | |
|---|
| 840 | +/** |
|---|
| 841 | + * clk_save_context - save clock context for poweroff |
|---|
| 842 | + * |
|---|
| 843 | + * Saves the context of the clock register for powerstates in which the |
|---|
| 844 | + * contents of the registers will be lost. Occurs deep within the suspend |
|---|
| 845 | + * code so locking is not necessary. |
|---|
| 846 | + */ |
|---|
| 847 | +int clk_save_context(void); |
|---|
| 848 | + |
|---|
| 849 | +/** |
|---|
| 850 | + * clk_restore_context - restore clock context after poweroff |
|---|
| 851 | + * |
|---|
| 852 | + * This occurs with all clocks enabled. Occurs deep within the resume code |
|---|
| 853 | + * so locking is not necessary. |
|---|
| 854 | + */ |
|---|
| 855 | +void clk_restore_context(void); |
|---|
| 856 | + |
|---|
| 726 | 857 | #else /* !CONFIG_HAVE_CLK */ |
|---|
| 727 | 858 | |
|---|
| 728 | 859 | static inline struct clk *clk_get(struct device *dev, const char *id) |
|---|
| .. | .. |
|---|
| 753 | 884 | return NULL; |
|---|
| 754 | 885 | } |
|---|
| 755 | 886 | |
|---|
| 887 | +static inline struct clk *devm_clk_get_prepared(struct device *dev, |
|---|
| 888 | + const char *id) |
|---|
| 889 | +{ |
|---|
| 890 | + return NULL; |
|---|
| 891 | +} |
|---|
| 892 | + |
|---|
| 893 | +static inline struct clk *devm_clk_get_enabled(struct device *dev, |
|---|
| 894 | + const char *id) |
|---|
| 895 | +{ |
|---|
| 896 | + return NULL; |
|---|
| 897 | +} |
|---|
| 898 | + |
|---|
| 756 | 899 | static inline struct clk *devm_clk_get_optional(struct device *dev, |
|---|
| 757 | 900 | const char *id) |
|---|
| 901 | +{ |
|---|
| 902 | + return NULL; |
|---|
| 903 | +} |
|---|
| 904 | + |
|---|
| 905 | +static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, |
|---|
| 906 | + const char *id) |
|---|
| 907 | +{ |
|---|
| 908 | + return NULL; |
|---|
| 909 | +} |
|---|
| 910 | + |
|---|
| 911 | +static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, |
|---|
| 912 | + const char *id) |
|---|
| 758 | 913 | { |
|---|
| 759 | 914 | return NULL; |
|---|
| 760 | 915 | } |
|---|
| .. | .. |
|---|
| 792 | 947 | |
|---|
| 793 | 948 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} |
|---|
| 794 | 949 | |
|---|
| 795 | | - |
|---|
| 796 | | -static inline int clk_rate_exclusive_get(struct clk *clk) |
|---|
| 797 | | -{ |
|---|
| 798 | | - return 0; |
|---|
| 799 | | -} |
|---|
| 800 | | - |
|---|
| 801 | | -static inline void clk_rate_exclusive_put(struct clk *clk) {} |
|---|
| 802 | | - |
|---|
| 803 | 950 | static inline int clk_enable(struct clk *clk) |
|---|
| 804 | 951 | { |
|---|
| 805 | 952 | return 0; |
|---|
| 806 | 953 | } |
|---|
| 807 | 954 | |
|---|
| 808 | | -static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks) |
|---|
| 955 | +static inline int __must_check clk_bulk_enable(int num_clks, |
|---|
| 956 | + const struct clk_bulk_data *clks) |
|---|
| 809 | 957 | { |
|---|
| 810 | 958 | return 0; |
|---|
| 811 | 959 | } |
|---|
| .. | .. |
|---|
| 814 | 962 | |
|---|
| 815 | 963 | |
|---|
| 816 | 964 | static inline void clk_bulk_disable(int num_clks, |
|---|
| 817 | | - struct clk_bulk_data *clks) {} |
|---|
| 965 | + const struct clk_bulk_data *clks) {} |
|---|
| 818 | 966 | |
|---|
| 819 | 967 | static inline unsigned long clk_get_rate(struct clk *clk) |
|---|
| 820 | 968 | { |
|---|
| .. | .. |
|---|
| 841 | 989 | return true; |
|---|
| 842 | 990 | } |
|---|
| 843 | 991 | |
|---|
| 992 | +static inline int clk_set_rate_range(struct clk *clk, unsigned long min, |
|---|
| 993 | + unsigned long max) |
|---|
| 994 | +{ |
|---|
| 995 | + return 0; |
|---|
| 996 | +} |
|---|
| 997 | + |
|---|
| 998 | +static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) |
|---|
| 999 | +{ |
|---|
| 1000 | + return 0; |
|---|
| 1001 | +} |
|---|
| 1002 | + |
|---|
| 1003 | +static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) |
|---|
| 1004 | +{ |
|---|
| 1005 | + return 0; |
|---|
| 1006 | +} |
|---|
| 1007 | + |
|---|
| 844 | 1008 | static inline int clk_set_parent(struct clk *clk, struct clk *parent) |
|---|
| 845 | 1009 | { |
|---|
| 846 | 1010 | return 0; |
|---|
| .. | .. |
|---|
| 855 | 1019 | { |
|---|
| 856 | 1020 | return NULL; |
|---|
| 857 | 1021 | } |
|---|
| 1022 | + |
|---|
| 1023 | +static inline int clk_save_context(void) |
|---|
| 1024 | +{ |
|---|
| 1025 | + return 0; |
|---|
| 1026 | +} |
|---|
| 1027 | + |
|---|
| 1028 | +static inline void clk_restore_context(void) {} |
|---|
| 1029 | + |
|---|
| 858 | 1030 | #endif |
|---|
| 859 | 1031 | |
|---|
| 860 | 1032 | /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ |
|---|
| .. | .. |
|---|
| 879 | 1051 | clk_unprepare(clk); |
|---|
| 880 | 1052 | } |
|---|
| 881 | 1053 | |
|---|
| 882 | | -static inline int __must_check clk_bulk_prepare_enable(int num_clks, |
|---|
| 883 | | - struct clk_bulk_data *clks) |
|---|
| 1054 | +static inline int __must_check |
|---|
| 1055 | +clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) |
|---|
| 884 | 1056 | { |
|---|
| 885 | 1057 | int ret; |
|---|
| 886 | 1058 | |
|---|
| .. | .. |
|---|
| 895 | 1067 | } |
|---|
| 896 | 1068 | |
|---|
| 897 | 1069 | static inline void clk_bulk_disable_unprepare(int num_clks, |
|---|
| 898 | | - struct clk_bulk_data *clks) |
|---|
| 1070 | + const struct clk_bulk_data *clks) |
|---|
| 899 | 1071 | { |
|---|
| 900 | 1072 | clk_bulk_disable(num_clks, clks); |
|---|
| 901 | 1073 | clk_bulk_unprepare(num_clks, clks); |
|---|