.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Generic OPP Interface |
---|
3 | 4 | * |
---|
.. | .. |
---|
5 | 6 | * Nishanth Menon |
---|
6 | 7 | * Romit Dasgupta |
---|
7 | 8 | * Kevin Hilman |
---|
8 | | - * |
---|
9 | | - * This program is free software; you can redistribute it and/or modify |
---|
10 | | - * it under the terms of the GNU General Public License version 2 as |
---|
11 | | - * published by the Free Software Foundation. |
---|
12 | 9 | */ |
---|
13 | 10 | |
---|
14 | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
.. | .. |
---|
48 | 45 | static struct opp_table *_find_opp_table_unlocked(struct device *dev) |
---|
49 | 46 | { |
---|
50 | 47 | struct opp_table *opp_table; |
---|
| 48 | + bool found; |
---|
51 | 49 | |
---|
52 | 50 | list_for_each_entry(opp_table, &opp_tables, node) { |
---|
53 | | - if (_find_opp_dev(dev, opp_table)) { |
---|
| 51 | + mutex_lock(&opp_table->lock); |
---|
| 52 | + found = !!_find_opp_dev(dev, opp_table); |
---|
| 53 | + mutex_unlock(&opp_table->lock); |
---|
| 54 | + |
---|
| 55 | + if (found) { |
---|
54 | 56 | _get_opp_table_kref(opp_table); |
---|
55 | 57 | |
---|
56 | 58 | return opp_table; |
---|
.. | .. |
---|
116 | 118 | */ |
---|
117 | 119 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) |
---|
118 | 120 | { |
---|
119 | | - if (IS_ERR_OR_NULL(opp) || !opp->available) { |
---|
| 121 | + if (IS_ERR_OR_NULL(opp)) { |
---|
120 | 122 | pr_err("%s: Invalid parameters\n", __func__); |
---|
121 | 123 | return 0; |
---|
122 | 124 | } |
---|
.. | .. |
---|
124 | 126 | return opp->rate; |
---|
125 | 127 | } |
---|
126 | 128 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); |
---|
| 129 | + |
---|
| 130 | +/** |
---|
| 131 | + * dev_pm_opp_get_level() - Gets the level corresponding to an available opp |
---|
| 132 | + * @opp: opp for which level value has to be returned for |
---|
| 133 | + * |
---|
| 134 | + * Return: level read from device tree corresponding to the opp, else |
---|
| 135 | + * return 0. |
---|
| 136 | + */ |
---|
| 137 | +unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) |
---|
| 138 | +{ |
---|
| 139 | + if (IS_ERR_OR_NULL(opp) || !opp->available) { |
---|
| 140 | + pr_err("%s: Invalid parameters\n", __func__); |
---|
| 141 | + return 0; |
---|
| 142 | + } |
---|
| 143 | + |
---|
| 144 | + return opp->level; |
---|
| 145 | +} |
---|
| 146 | +EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); |
---|
127 | 147 | |
---|
128 | 148 | /** |
---|
129 | 149 | * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not |
---|
.. | .. |
---|
381 | 401 | } |
---|
382 | 402 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); |
---|
383 | 403 | |
---|
| 404 | +/** |
---|
| 405 | + * dev_pm_opp_find_level_exact() - search for an exact level |
---|
| 406 | + * @dev: device for which we do this operation |
---|
| 407 | + * @level: level to search for |
---|
| 408 | + * |
---|
| 409 | + * Return: Searches for exact match in the opp table and returns pointer to the |
---|
| 410 | + * matching opp if found, else returns ERR_PTR in case of error and should |
---|
| 411 | + * be handled using IS_ERR. Error return values can be: |
---|
| 412 | + * EINVAL: for bad pointer |
---|
| 413 | + * ERANGE: no match found for search |
---|
| 414 | + * ENODEV: if device not found in list of registered devices |
---|
| 415 | + * |
---|
| 416 | + * The callers are required to call dev_pm_opp_put() for the returned OPP after |
---|
| 417 | + * use. |
---|
| 418 | + */ |
---|
| 419 | +struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, |
---|
| 420 | + unsigned int level) |
---|
| 421 | +{ |
---|
| 422 | + struct opp_table *opp_table; |
---|
| 423 | + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
---|
| 424 | + |
---|
| 425 | + opp_table = _find_opp_table(dev); |
---|
| 426 | + if (IS_ERR(opp_table)) { |
---|
| 427 | + int r = PTR_ERR(opp_table); |
---|
| 428 | + |
---|
| 429 | + dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); |
---|
| 430 | + return ERR_PTR(r); |
---|
| 431 | + } |
---|
| 432 | + |
---|
| 433 | + mutex_lock(&opp_table->lock); |
---|
| 434 | + |
---|
| 435 | + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { |
---|
| 436 | + if (temp_opp->level == level) { |
---|
| 437 | + opp = temp_opp; |
---|
| 438 | + |
---|
| 439 | + /* Increment the reference count of OPP */ |
---|
| 440 | + dev_pm_opp_get(opp); |
---|
| 441 | + break; |
---|
| 442 | + } |
---|
| 443 | + } |
---|
| 444 | + |
---|
| 445 | + mutex_unlock(&opp_table->lock); |
---|
| 446 | + dev_pm_opp_put_opp_table(opp_table); |
---|
| 447 | + |
---|
| 448 | + return opp; |
---|
| 449 | +} |
---|
| 450 | +EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); |
---|
| 451 | + |
---|
384 | 452 | static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, |
---|
385 | 453 | unsigned long *freq) |
---|
386 | 454 | { |
---|
.. | .. |
---|
503 | 571 | } |
---|
504 | 572 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); |
---|
505 | 573 | |
---|
| 574 | +/** |
---|
| 575 | + * dev_pm_opp_find_freq_ceil_by_volt() - Find OPP with highest frequency for |
---|
| 576 | + * target voltage. |
---|
| 577 | + * @dev: Device for which we do this operation. |
---|
| 578 | + * @u_volt: Target voltage. |
---|
| 579 | + * |
---|
| 580 | + * Search for OPP with highest (ceil) frequency and has voltage <= u_volt. |
---|
| 581 | + * |
---|
| 582 | + * Return: matching *opp, else returns ERR_PTR in case of error which should be |
---|
| 583 | + * handled using IS_ERR. |
---|
| 584 | + * |
---|
| 585 | + * Error return values can be: |
---|
| 586 | + * EINVAL: bad parameters |
---|
| 587 | + * |
---|
| 588 | + * The callers are required to call dev_pm_opp_put() for the returned OPP after |
---|
| 589 | + * use. |
---|
| 590 | + */ |
---|
| 591 | +struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, |
---|
| 592 | + unsigned long u_volt) |
---|
| 593 | +{ |
---|
| 594 | + struct opp_table *opp_table; |
---|
| 595 | + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
---|
| 596 | + |
---|
| 597 | + if (!dev || !u_volt) { |
---|
| 598 | + dev_err(dev, "%s: Invalid argument volt=%lu\n", __func__, |
---|
| 599 | + u_volt); |
---|
| 600 | + return ERR_PTR(-EINVAL); |
---|
| 601 | + } |
---|
| 602 | + |
---|
| 603 | + opp_table = _find_opp_table(dev); |
---|
| 604 | + if (IS_ERR(opp_table)) |
---|
| 605 | + return ERR_CAST(opp_table); |
---|
| 606 | + |
---|
| 607 | + mutex_lock(&opp_table->lock); |
---|
| 608 | + |
---|
| 609 | + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { |
---|
| 610 | + if (temp_opp->available) { |
---|
| 611 | + if (temp_opp->supplies[0].u_volt > u_volt) |
---|
| 612 | + break; |
---|
| 613 | + opp = temp_opp; |
---|
| 614 | + } |
---|
| 615 | + } |
---|
| 616 | + |
---|
| 617 | + /* Increment the reference count of OPP */ |
---|
| 618 | + if (!IS_ERR(opp)) |
---|
| 619 | + dev_pm_opp_get(opp); |
---|
| 620 | + |
---|
| 621 | + mutex_unlock(&opp_table->lock); |
---|
| 622 | + dev_pm_opp_put_opp_table(opp_table); |
---|
| 623 | + |
---|
| 624 | + return opp; |
---|
| 625 | +} |
---|
| 626 | +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt); |
---|
| 627 | + |
---|
506 | 628 | static int _set_opp_voltage(struct device *dev, struct regulator *reg, |
---|
507 | 629 | struct dev_pm_opp_supply *supply) |
---|
508 | 630 | { |
---|
.. | .. |
---|
528 | 650 | return ret; |
---|
529 | 651 | } |
---|
530 | 652 | |
---|
531 | | -static inline int |
---|
532 | | -_generic_set_opp_clk_only(struct device *dev, struct clk *clk, |
---|
533 | | - unsigned long old_freq, unsigned long freq) |
---|
| 653 | +static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk, |
---|
| 654 | + unsigned long freq) |
---|
534 | 655 | { |
---|
535 | 656 | int ret; |
---|
536 | 657 | |
---|
.. | .. |
---|
543 | 664 | return ret; |
---|
544 | 665 | } |
---|
545 | 666 | |
---|
546 | | -static inline int |
---|
547 | | -_generic_set_opp_domain(struct device *dev, struct clk *clk, |
---|
548 | | - unsigned long old_freq, unsigned long freq, |
---|
549 | | - unsigned int old_pstate, unsigned int new_pstate) |
---|
550 | | -{ |
---|
551 | | - int ret; |
---|
552 | | - |
---|
553 | | - /* Scaling up? Scale domain performance state before frequency */ |
---|
554 | | - if (freq > old_freq) { |
---|
555 | | - ret = dev_pm_genpd_set_performance_state(dev, new_pstate); |
---|
556 | | - if (ret) |
---|
557 | | - return ret; |
---|
558 | | - } |
---|
559 | | - |
---|
560 | | - ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); |
---|
561 | | - if (ret) |
---|
562 | | - goto restore_domain_state; |
---|
563 | | - |
---|
564 | | - /* Scaling down? Scale domain performance state after frequency */ |
---|
565 | | - if (freq < old_freq) { |
---|
566 | | - ret = dev_pm_genpd_set_performance_state(dev, new_pstate); |
---|
567 | | - if (ret) |
---|
568 | | - goto restore_freq; |
---|
569 | | - } |
---|
570 | | - |
---|
571 | | - return 0; |
---|
572 | | - |
---|
573 | | -restore_freq: |
---|
574 | | - if (_generic_set_opp_clk_only(dev, clk, freq, old_freq)) |
---|
575 | | - dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", |
---|
576 | | - __func__, old_freq); |
---|
577 | | -restore_domain_state: |
---|
578 | | - if (freq > old_freq) |
---|
579 | | - dev_pm_genpd_set_performance_state(dev, old_pstate); |
---|
580 | | - |
---|
581 | | - return ret; |
---|
582 | | -} |
---|
583 | | - |
---|
584 | | -static int _generic_set_opp_regulator(const struct opp_table *opp_table, |
---|
| 667 | +static int _generic_set_opp_regulator(struct opp_table *opp_table, |
---|
585 | 668 | struct device *dev, |
---|
586 | 669 | unsigned long old_freq, |
---|
587 | 670 | unsigned long freq, |
---|
.. | .. |
---|
605 | 688 | } |
---|
606 | 689 | |
---|
607 | 690 | /* Change frequency */ |
---|
608 | | - ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq); |
---|
| 691 | + ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq); |
---|
609 | 692 | if (ret) |
---|
610 | 693 | goto restore_voltage; |
---|
611 | 694 | |
---|
.. | .. |
---|
616 | 699 | goto restore_freq; |
---|
617 | 700 | } |
---|
618 | 701 | |
---|
| 702 | + /* |
---|
| 703 | + * Enable the regulator after setting its voltages, otherwise it breaks |
---|
| 704 | + * some boot-enabled regulators. |
---|
| 705 | + */ |
---|
| 706 | + if (unlikely(!opp_table->enabled)) { |
---|
| 707 | + ret = regulator_enable(reg); |
---|
| 708 | + if (ret < 0) |
---|
| 709 | + dev_warn(dev, "Failed to enable regulator: %d", ret); |
---|
| 710 | + } |
---|
| 711 | + |
---|
619 | 712 | return 0; |
---|
620 | 713 | |
---|
621 | 714 | restore_freq: |
---|
622 | | - if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq)) |
---|
| 715 | + if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq)) |
---|
623 | 716 | dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", |
---|
624 | 717 | __func__, old_freq); |
---|
625 | 718 | restore_voltage: |
---|
.. | .. |
---|
630 | 723 | return ret; |
---|
631 | 724 | } |
---|
632 | 725 | |
---|
| 726 | +static int _set_opp_bw(const struct opp_table *opp_table, |
---|
| 727 | + struct dev_pm_opp *opp, struct device *dev, bool remove) |
---|
| 728 | +{ |
---|
| 729 | + u32 avg, peak; |
---|
| 730 | + int i, ret; |
---|
| 731 | + |
---|
| 732 | + if (!opp_table->paths) |
---|
| 733 | + return 0; |
---|
| 734 | + |
---|
| 735 | + for (i = 0; i < opp_table->path_count; i++) { |
---|
| 736 | + if (remove) { |
---|
| 737 | + avg = 0; |
---|
| 738 | + peak = 0; |
---|
| 739 | + } else { |
---|
| 740 | + avg = opp->bandwidth[i].avg; |
---|
| 741 | + peak = opp->bandwidth[i].peak; |
---|
| 742 | + } |
---|
| 743 | + ret = icc_set_bw(opp_table->paths[i], avg, peak); |
---|
| 744 | + if (ret) { |
---|
| 745 | + dev_err(dev, "Failed to %s bandwidth[%d]: %d\n", |
---|
| 746 | + remove ? "remove" : "set", i, ret); |
---|
| 747 | + return ret; |
---|
| 748 | + } |
---|
| 749 | + } |
---|
| 750 | + |
---|
| 751 | + return 0; |
---|
| 752 | +} |
---|
| 753 | + |
---|
| 754 | +static int _set_opp_custom(const struct opp_table *opp_table, |
---|
| 755 | + struct device *dev, unsigned long old_freq, |
---|
| 756 | + unsigned long freq, |
---|
| 757 | + struct dev_pm_opp_supply *old_supply, |
---|
| 758 | + struct dev_pm_opp_supply *new_supply) |
---|
| 759 | +{ |
---|
| 760 | + struct dev_pm_set_opp_data *data; |
---|
| 761 | + int size; |
---|
| 762 | + |
---|
| 763 | + data = opp_table->set_opp_data; |
---|
| 764 | + data->regulators = opp_table->regulators; |
---|
| 765 | + data->regulator_count = opp_table->regulator_count; |
---|
| 766 | + data->clk = opp_table->clk; |
---|
| 767 | + data->dev = dev; |
---|
| 768 | + |
---|
| 769 | + data->old_opp.rate = old_freq; |
---|
| 770 | + size = sizeof(*old_supply) * opp_table->regulator_count; |
---|
| 771 | + if (!old_supply) |
---|
| 772 | + memset(data->old_opp.supplies, 0, size); |
---|
| 773 | + else |
---|
| 774 | + memcpy(data->old_opp.supplies, old_supply, size); |
---|
| 775 | + |
---|
| 776 | + data->new_opp.rate = freq; |
---|
| 777 | + memcpy(data->new_opp.supplies, new_supply, size); |
---|
| 778 | + |
---|
| 779 | + return opp_table->set_opp(data); |
---|
| 780 | +} |
---|
| 781 | + |
---|
| 782 | +static int _set_required_opp(struct device *dev, struct device *pd_dev, |
---|
| 783 | + struct dev_pm_opp *opp, int i) |
---|
| 784 | +{ |
---|
| 785 | + unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0; |
---|
| 786 | + int ret; |
---|
| 787 | + |
---|
| 788 | + if (!pd_dev) |
---|
| 789 | + return 0; |
---|
| 790 | + |
---|
| 791 | + ret = dev_pm_genpd_set_performance_state(pd_dev, pstate); |
---|
| 792 | + if (ret) { |
---|
| 793 | + dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n", |
---|
| 794 | + dev_name(pd_dev), pstate, ret); |
---|
| 795 | + } |
---|
| 796 | + |
---|
| 797 | + return ret; |
---|
| 798 | +} |
---|
| 799 | + |
---|
| 800 | +/* This is only called for PM domain for now */ |
---|
| 801 | +static int _set_required_opps(struct device *dev, |
---|
| 802 | + struct opp_table *opp_table, |
---|
| 803 | + struct dev_pm_opp *opp, bool up) |
---|
| 804 | +{ |
---|
| 805 | + struct opp_table **required_opp_tables = opp_table->required_opp_tables; |
---|
| 806 | + struct device **genpd_virt_devs = opp_table->genpd_virt_devs; |
---|
| 807 | + int i, ret = 0; |
---|
| 808 | + |
---|
| 809 | + if (!required_opp_tables) |
---|
| 810 | + return 0; |
---|
| 811 | + |
---|
| 812 | + /* Single genpd case */ |
---|
| 813 | + if (!genpd_virt_devs) |
---|
| 814 | + return _set_required_opp(dev, dev, opp, 0); |
---|
| 815 | + |
---|
| 816 | + /* Multiple genpd case */ |
---|
| 817 | + |
---|
| 818 | + /* |
---|
| 819 | + * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev |
---|
| 820 | + * after it is freed from another thread. |
---|
| 821 | + */ |
---|
| 822 | + mutex_lock(&opp_table->genpd_virt_dev_lock); |
---|
| 823 | + |
---|
| 824 | + /* Scaling up? Set required OPPs in normal order, else reverse */ |
---|
| 825 | + if (up) { |
---|
| 826 | + for (i = 0; i < opp_table->required_opp_count; i++) { |
---|
| 827 | + ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i); |
---|
| 828 | + if (ret) |
---|
| 829 | + break; |
---|
| 830 | + } |
---|
| 831 | + } else { |
---|
| 832 | + for (i = opp_table->required_opp_count - 1; i >= 0; i--) { |
---|
| 833 | + ret = _set_required_opp(dev, genpd_virt_devs[i], opp, i); |
---|
| 834 | + if (ret) |
---|
| 835 | + break; |
---|
| 836 | + } |
---|
| 837 | + } |
---|
| 838 | + |
---|
| 839 | + mutex_unlock(&opp_table->genpd_virt_dev_lock); |
---|
| 840 | + |
---|
| 841 | + return ret; |
---|
| 842 | +} |
---|
| 843 | + |
---|
| 844 | +/** |
---|
| 845 | + * dev_pm_opp_set_bw() - sets bandwidth levels corresponding to an opp |
---|
| 846 | + * @dev: device for which we do this operation |
---|
| 847 | + * @opp: opp based on which the bandwidth levels are to be configured |
---|
| 848 | + * |
---|
| 849 | + * This configures the bandwidth to the levels specified by the OPP. However |
---|
| 850 | + * if the OPP specified is NULL the bandwidth levels are cleared out. |
---|
| 851 | + * |
---|
| 852 | + * Return: 0 on success or a negative error value. |
---|
| 853 | + */ |
---|
| 854 | +int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) |
---|
| 855 | +{ |
---|
| 856 | + struct opp_table *opp_table; |
---|
| 857 | + int ret; |
---|
| 858 | + |
---|
| 859 | + opp_table = _find_opp_table(dev); |
---|
| 860 | + if (IS_ERR(opp_table)) { |
---|
| 861 | + dev_err(dev, "%s: device opp table doesn't exist\n", __func__); |
---|
| 862 | + return PTR_ERR(opp_table); |
---|
| 863 | + } |
---|
| 864 | + |
---|
| 865 | + if (opp) |
---|
| 866 | + ret = _set_opp_bw(opp_table, opp, dev, false); |
---|
| 867 | + else |
---|
| 868 | + ret = _set_opp_bw(opp_table, NULL, dev, true); |
---|
| 869 | + |
---|
| 870 | + dev_pm_opp_put_opp_table(opp_table); |
---|
| 871 | + return ret; |
---|
| 872 | +} |
---|
| 873 | +EXPORT_SYMBOL_GPL(dev_pm_opp_set_bw); |
---|
| 874 | + |
---|
| 875 | +static int _opp_set_rate_zero(struct device *dev, struct opp_table *opp_table) |
---|
| 876 | +{ |
---|
| 877 | + int ret; |
---|
| 878 | + |
---|
| 879 | + if (!opp_table->enabled) |
---|
| 880 | + return 0; |
---|
| 881 | + |
---|
| 882 | + /* |
---|
| 883 | + * Some drivers need to support cases where some platforms may |
---|
| 884 | + * have OPP table for the device, while others don't and |
---|
| 885 | + * opp_set_rate() just needs to behave like clk_set_rate(). |
---|
| 886 | + */ |
---|
| 887 | + if (!_get_opp_count(opp_table)) |
---|
| 888 | + return 0; |
---|
| 889 | + |
---|
| 890 | + ret = _set_opp_bw(opp_table, NULL, dev, true); |
---|
| 891 | + if (ret) |
---|
| 892 | + return ret; |
---|
| 893 | + |
---|
| 894 | + if (opp_table->regulators) |
---|
| 895 | + regulator_disable(opp_table->regulators[0]); |
---|
| 896 | + |
---|
| 897 | + ret = _set_required_opps(dev, opp_table, NULL, false); |
---|
| 898 | + |
---|
| 899 | + opp_table->enabled = false; |
---|
| 900 | + return ret; |
---|
| 901 | +} |
---|
| 902 | + |
---|
633 | 903 | /** |
---|
634 | 904 | * dev_pm_opp_set_rate() - Configure new OPP based on frequency |
---|
635 | 905 | * @dev: device for which we do this operation |
---|
636 | 906 | * @target_freq: frequency to achieve |
---|
637 | 907 | * |
---|
638 | | - * This configures the power-supplies and clock source to the levels specified |
---|
639 | | - * by the OPP corresponding to the target_freq. |
---|
| 908 | + * This configures the power-supplies to the levels specified by the OPP |
---|
| 909 | + * corresponding to the target_freq, and programs the clock to a value <= |
---|
| 910 | + * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax |
---|
| 911 | + * provided by the opp, should have already rounded to the target OPP's |
---|
| 912 | + * frequency. |
---|
640 | 913 | */ |
---|
641 | 914 | int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) |
---|
642 | 915 | { |
---|
643 | 916 | struct opp_table *opp_table; |
---|
644 | | - unsigned long freq, old_freq; |
---|
| 917 | + unsigned long freq, old_freq, temp_freq; |
---|
645 | 918 | struct dev_pm_opp *old_opp, *opp; |
---|
646 | 919 | struct clk *clk; |
---|
647 | | - int ret, size; |
---|
648 | | - |
---|
649 | | - if (unlikely(!target_freq)) { |
---|
650 | | - dev_err(dev, "%s: Invalid target frequency %lu\n", __func__, |
---|
651 | | - target_freq); |
---|
652 | | - return -EINVAL; |
---|
653 | | - } |
---|
| 920 | + int ret; |
---|
654 | 921 | |
---|
655 | 922 | opp_table = _find_opp_table(dev); |
---|
656 | 923 | if (IS_ERR(opp_table)) { |
---|
657 | 924 | dev_err(dev, "%s: device opp doesn't exist\n", __func__); |
---|
658 | 925 | return PTR_ERR(opp_table); |
---|
| 926 | + } |
---|
| 927 | + |
---|
| 928 | + if (unlikely(!target_freq)) { |
---|
| 929 | + ret = _opp_set_rate_zero(dev, opp_table); |
---|
| 930 | + goto put_opp_table; |
---|
659 | 931 | } |
---|
660 | 932 | |
---|
661 | 933 | clk = opp_table->clk; |
---|
.. | .. |
---|
673 | 945 | old_freq = clk_get_rate(clk); |
---|
674 | 946 | |
---|
675 | 947 | /* Return early if nothing to do */ |
---|
676 | | - if (old_freq == freq) { |
---|
| 948 | + if (opp_table->enabled && old_freq == freq) { |
---|
677 | 949 | dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", |
---|
678 | 950 | __func__, freq); |
---|
679 | 951 | ret = 0; |
---|
680 | 952 | goto put_opp_table; |
---|
681 | 953 | } |
---|
682 | 954 | |
---|
683 | | - old_opp = _find_freq_ceil(opp_table, &old_freq); |
---|
| 955 | + /* |
---|
| 956 | + * For IO devices which require an OPP on some platforms/SoCs |
---|
| 957 | + * while just needing to scale the clock on some others |
---|
| 958 | + * we look for empty OPP tables with just a clock handle and |
---|
| 959 | + * scale only the clk. This makes dev_pm_opp_set_rate() |
---|
| 960 | + * equivalent to a clk_set_rate() |
---|
| 961 | + */ |
---|
| 962 | + if (!_get_opp_count(opp_table)) { |
---|
| 963 | + ret = _generic_set_opp_clk_only(dev, clk, freq); |
---|
| 964 | + goto put_opp_table; |
---|
| 965 | + } |
---|
| 966 | + |
---|
| 967 | + temp_freq = old_freq; |
---|
| 968 | + old_opp = _find_freq_ceil(opp_table, &temp_freq); |
---|
684 | 969 | if (IS_ERR(old_opp)) { |
---|
685 | 970 | dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n", |
---|
686 | 971 | __func__, old_freq, PTR_ERR(old_opp)); |
---|
687 | 972 | } |
---|
688 | 973 | |
---|
689 | | - opp = _find_freq_ceil(opp_table, &freq); |
---|
| 974 | + temp_freq = freq; |
---|
| 975 | + opp = _find_freq_ceil(opp_table, &temp_freq); |
---|
690 | 976 | if (IS_ERR(opp)) { |
---|
691 | 977 | ret = PTR_ERR(opp); |
---|
692 | 978 | dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", |
---|
.. | .. |
---|
697 | 983 | dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, |
---|
698 | 984 | old_freq, freq); |
---|
699 | 985 | |
---|
700 | | - /* Only frequency scaling */ |
---|
701 | | - if (!opp_table->regulators) { |
---|
702 | | - /* |
---|
703 | | - * We don't support devices with both regulator and |
---|
704 | | - * domain performance-state for now. |
---|
705 | | - */ |
---|
706 | | - if (opp_table->genpd_performance_state) |
---|
707 | | - ret = _generic_set_opp_domain(dev, clk, old_freq, freq, |
---|
708 | | - IS_ERR(old_opp) ? 0 : old_opp->pstate, |
---|
709 | | - opp->pstate); |
---|
710 | | - else |
---|
711 | | - ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); |
---|
712 | | - } else if (!opp_table->set_opp) { |
---|
| 986 | + /* Scaling up? Configure required OPPs before frequency */ |
---|
| 987 | + if (freq >= old_freq) { |
---|
| 988 | + ret = _set_required_opps(dev, opp_table, opp, true); |
---|
| 989 | + if (ret) |
---|
| 990 | + goto put_opp; |
---|
| 991 | + } |
---|
| 992 | + |
---|
| 993 | + if (opp_table->set_opp) { |
---|
| 994 | + ret = _set_opp_custom(opp_table, dev, old_freq, freq, |
---|
| 995 | + IS_ERR(old_opp) ? NULL : old_opp->supplies, |
---|
| 996 | + opp->supplies); |
---|
| 997 | + } else if (opp_table->regulators) { |
---|
713 | 998 | ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq, |
---|
714 | 999 | IS_ERR(old_opp) ? NULL : old_opp->supplies, |
---|
715 | 1000 | opp->supplies); |
---|
716 | 1001 | } else { |
---|
717 | | - struct dev_pm_set_opp_data *data; |
---|
718 | | - |
---|
719 | | - data = opp_table->set_opp_data; |
---|
720 | | - data->regulators = opp_table->regulators; |
---|
721 | | - data->regulator_count = opp_table->regulator_count; |
---|
722 | | - data->clk = clk; |
---|
723 | | - data->dev = dev; |
---|
724 | | - |
---|
725 | | - data->old_opp.rate = old_freq; |
---|
726 | | - size = sizeof(*opp->supplies) * opp_table->regulator_count; |
---|
727 | | - if (IS_ERR(old_opp)) |
---|
728 | | - memset(data->old_opp.supplies, 0, size); |
---|
729 | | - else |
---|
730 | | - memcpy(data->old_opp.supplies, old_opp->supplies, size); |
---|
731 | | - |
---|
732 | | - data->new_opp.rate = freq; |
---|
733 | | - memcpy(data->new_opp.supplies, opp->supplies, size); |
---|
734 | | - |
---|
735 | | - ret = opp_table->set_opp(data); |
---|
| 1002 | + /* Only frequency scaling */ |
---|
| 1003 | + ret = _generic_set_opp_clk_only(dev, clk, freq); |
---|
736 | 1004 | } |
---|
737 | 1005 | |
---|
| 1006 | + /* Scaling down? Configure required OPPs after frequency */ |
---|
| 1007 | + if (!ret && freq < old_freq) { |
---|
| 1008 | + ret = _set_required_opps(dev, opp_table, opp, false); |
---|
| 1009 | + if (ret) |
---|
| 1010 | + dev_err(dev, "Failed to set required opps: %d\n", ret); |
---|
| 1011 | + } |
---|
| 1012 | + |
---|
| 1013 | + if (!ret) { |
---|
| 1014 | + ret = _set_opp_bw(opp_table, opp, dev, false); |
---|
| 1015 | + if (!ret) |
---|
| 1016 | + opp_table->enabled = true; |
---|
| 1017 | + } |
---|
| 1018 | + |
---|
| 1019 | +put_opp: |
---|
738 | 1020 | dev_pm_opp_put(opp); |
---|
739 | 1021 | put_old_opp: |
---|
740 | 1022 | if (!IS_ERR(old_opp)) |
---|
.. | .. |
---|
745 | 1027 | } |
---|
746 | 1028 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); |
---|
747 | 1029 | |
---|
748 | | -/** |
---|
749 | | - * dev_pm_opp_check_rate_volt() - Configure new OPP based on current rate |
---|
750 | | - * @dev: device for which we do this operation |
---|
751 | | - * @force: when true force to set voltage |
---|
752 | | - * |
---|
753 | | - * This configures the power-supplies and clock source to the levels specified |
---|
754 | | - * by the OPP corresponding to current rate. |
---|
755 | | - * |
---|
756 | | - */ |
---|
757 | | -int dev_pm_opp_check_rate_volt(struct device *dev, bool force) |
---|
758 | | -{ |
---|
759 | | - struct opp_table *opp_table; |
---|
760 | | - struct dev_pm_opp *opp; |
---|
761 | | - struct regulator *reg; |
---|
762 | | - struct clk *clk; |
---|
763 | | - unsigned long old_freq, target_freq, target_volt; |
---|
764 | | - int old_volt; |
---|
765 | | - int ret = 0; |
---|
766 | | - |
---|
767 | | - opp_table = _find_opp_table(dev); |
---|
768 | | - if (IS_ERR(opp_table)) { |
---|
769 | | - dev_err(dev, "%s: device opp doesn't exist\n", __func__); |
---|
770 | | - return PTR_ERR(opp_table); |
---|
771 | | - } |
---|
772 | | - |
---|
773 | | - clk = opp_table->clk; |
---|
774 | | - if (!opp_table->regulators) { |
---|
775 | | - dev_err(dev, "opp_table regulators is null\n"); |
---|
776 | | - goto put_opp_table; |
---|
777 | | - } |
---|
778 | | - reg = opp_table->regulators[0]; |
---|
779 | | - if (IS_ERR_OR_NULL(clk) || IS_ERR_OR_NULL(reg)) { |
---|
780 | | - dev_err(dev, "clk or regulater is unavailable\n"); |
---|
781 | | - ret = -EINVAL; |
---|
782 | | - goto put_opp_table; |
---|
783 | | - } |
---|
784 | | - old_freq = clk_get_rate(clk); |
---|
785 | | - old_volt = regulator_get_voltage(reg); |
---|
786 | | - if (old_volt <= 0) { |
---|
787 | | - dev_err(dev, "failed to get volt %d\n", old_volt); |
---|
788 | | - ret = -EINVAL; |
---|
789 | | - goto put_opp_table; |
---|
790 | | - } |
---|
791 | | - |
---|
792 | | - target_freq = old_freq; |
---|
793 | | - /* If not available, use the closest opp */ |
---|
794 | | - opp = dev_pm_opp_find_freq_ceil(dev, &target_freq); |
---|
795 | | - if (IS_ERR(opp)) { |
---|
796 | | - /* The freq is an upper bound. opp should be lower */ |
---|
797 | | - opp = dev_pm_opp_find_freq_floor(dev, &target_freq); |
---|
798 | | - if (IS_ERR(opp)) { |
---|
799 | | - dev_err(dev, "failed to find OPP for freq %lu\n", |
---|
800 | | - target_freq); |
---|
801 | | - ret = PTR_ERR(opp); |
---|
802 | | - goto put_opp_table; |
---|
803 | | - } |
---|
804 | | - } |
---|
805 | | - target_volt = opp->supplies->u_volt; |
---|
806 | | - target_freq = clk_round_rate(clk, target_freq); |
---|
807 | | - |
---|
808 | | - dev_dbg(dev, "%lu Hz %d uV --> %lu Hz %lu uV\n", old_freq, old_volt, |
---|
809 | | - target_freq, target_volt); |
---|
810 | | - |
---|
811 | | - if (old_freq == target_freq) { |
---|
812 | | - if (old_volt != target_volt || force) { |
---|
813 | | - ret = _set_opp_voltage(dev, reg, opp->supplies); |
---|
814 | | - if (ret) { |
---|
815 | | - dev_err(dev, "failed to set volt %lu\n", |
---|
816 | | - target_volt); |
---|
817 | | - goto put_opp; |
---|
818 | | - } |
---|
819 | | - } |
---|
820 | | - goto put_opp; |
---|
821 | | - } |
---|
822 | | - |
---|
823 | | - ret = _generic_set_opp_regulator(opp_table, dev, old_freq, target_freq, |
---|
824 | | - NULL, opp->supplies); |
---|
825 | | -put_opp: |
---|
826 | | - dev_pm_opp_put(opp); |
---|
827 | | -put_opp_table: |
---|
828 | | - dev_pm_opp_put_opp_table(opp_table); |
---|
829 | | - return ret; |
---|
830 | | -} |
---|
831 | | -EXPORT_SYMBOL_GPL(dev_pm_opp_check_rate_volt); |
---|
832 | | - |
---|
833 | 1030 | /* OPP-dev Helpers */ |
---|
834 | 1031 | static void _remove_opp_dev(struct opp_device *opp_dev, |
---|
835 | 1032 | struct opp_table *opp_table) |
---|
.. | .. |
---|
839 | 1036 | kfree(opp_dev); |
---|
840 | 1037 | } |
---|
841 | 1038 | |
---|
842 | | -struct opp_device *_add_opp_dev(const struct device *dev, |
---|
843 | | - struct opp_table *opp_table) |
---|
| 1039 | +static struct opp_device *_add_opp_dev_unlocked(const struct device *dev, |
---|
| 1040 | + struct opp_table *opp_table) |
---|
844 | 1041 | { |
---|
845 | 1042 | struct opp_device *opp_dev; |
---|
846 | | - int ret; |
---|
847 | 1043 | |
---|
848 | 1044 | opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); |
---|
849 | 1045 | if (!opp_dev) |
---|
.. | .. |
---|
851 | 1047 | |
---|
852 | 1048 | /* Initialize opp-dev */ |
---|
853 | 1049 | opp_dev->dev = dev; |
---|
| 1050 | + |
---|
854 | 1051 | list_add(&opp_dev->node, &opp_table->dev_list); |
---|
855 | 1052 | |
---|
856 | 1053 | /* Create debugfs entries for the opp_table */ |
---|
857 | | - ret = opp_debug_register(opp_dev, opp_table); |
---|
858 | | - if (ret) |
---|
859 | | - dev_err(dev, "%s: Failed to register opp debugfs (%d)\n", |
---|
860 | | - __func__, ret); |
---|
| 1054 | + opp_debug_register(opp_dev, opp_table); |
---|
861 | 1055 | |
---|
862 | 1056 | return opp_dev; |
---|
863 | 1057 | } |
---|
864 | 1058 | |
---|
865 | | -static struct opp_table *_allocate_opp_table(struct device *dev) |
---|
| 1059 | +struct opp_device *_add_opp_dev(const struct device *dev, |
---|
| 1060 | + struct opp_table *opp_table) |
---|
| 1061 | +{ |
---|
| 1062 | + struct opp_device *opp_dev; |
---|
| 1063 | + |
---|
| 1064 | + mutex_lock(&opp_table->lock); |
---|
| 1065 | + opp_dev = _add_opp_dev_unlocked(dev, opp_table); |
---|
| 1066 | + mutex_unlock(&opp_table->lock); |
---|
| 1067 | + |
---|
| 1068 | + return opp_dev; |
---|
| 1069 | +} |
---|
| 1070 | + |
---|
| 1071 | +static struct opp_table *_allocate_opp_table(struct device *dev, int index) |
---|
866 | 1072 | { |
---|
867 | 1073 | struct opp_table *opp_table; |
---|
868 | 1074 | struct opp_device *opp_dev; |
---|
.. | .. |
---|
874 | 1080 | */ |
---|
875 | 1081 | opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); |
---|
876 | 1082 | if (!opp_table) |
---|
877 | | - return NULL; |
---|
| 1083 | + return ERR_PTR(-ENOMEM); |
---|
878 | 1084 | |
---|
| 1085 | + mutex_init(&opp_table->lock); |
---|
| 1086 | + mutex_init(&opp_table->genpd_virt_dev_lock); |
---|
879 | 1087 | INIT_LIST_HEAD(&opp_table->dev_list); |
---|
880 | 1088 | |
---|
881 | 1089 | /* Mark regulator count uninitialized */ |
---|
.. | .. |
---|
883 | 1091 | |
---|
884 | 1092 | opp_dev = _add_opp_dev(dev, opp_table); |
---|
885 | 1093 | if (!opp_dev) { |
---|
886 | | - kfree(opp_table); |
---|
887 | | - return NULL; |
---|
| 1094 | + ret = -ENOMEM; |
---|
| 1095 | + goto err; |
---|
888 | 1096 | } |
---|
889 | 1097 | |
---|
890 | | - _of_init_opp_table(opp_table, dev); |
---|
| 1098 | + _of_init_opp_table(opp_table, dev, index); |
---|
891 | 1099 | |
---|
892 | 1100 | /* Find clk for the device */ |
---|
893 | 1101 | opp_table->clk = clk_get(dev, NULL); |
---|
894 | 1102 | if (IS_ERR(opp_table->clk)) { |
---|
895 | 1103 | ret = PTR_ERR(opp_table->clk); |
---|
896 | | - if (ret != -EPROBE_DEFER) |
---|
897 | | - dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, |
---|
898 | | - ret); |
---|
| 1104 | + if (ret == -EPROBE_DEFER) |
---|
| 1105 | + goto remove_opp_dev; |
---|
| 1106 | + |
---|
| 1107 | + dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); |
---|
| 1108 | + } |
---|
| 1109 | + |
---|
| 1110 | + /* Find interconnect path(s) for the device */ |
---|
| 1111 | + ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); |
---|
| 1112 | + if (ret) { |
---|
| 1113 | + if (ret == -EPROBE_DEFER) |
---|
| 1114 | + goto put_clk; |
---|
| 1115 | + |
---|
| 1116 | + dev_warn(dev, "%s: Error finding interconnect paths: %d\n", |
---|
| 1117 | + __func__, ret); |
---|
899 | 1118 | } |
---|
900 | 1119 | |
---|
901 | 1120 | BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); |
---|
902 | 1121 | INIT_LIST_HEAD(&opp_table->opp_list); |
---|
903 | | - mutex_init(&opp_table->lock); |
---|
904 | 1122 | kref_init(&opp_table->kref); |
---|
905 | 1123 | |
---|
906 | 1124 | /* Secure the device table modification */ |
---|
907 | 1125 | list_add(&opp_table->node, &opp_tables); |
---|
908 | 1126 | return opp_table; |
---|
| 1127 | + |
---|
| 1128 | +put_clk: |
---|
| 1129 | + if (!IS_ERR(opp_table->clk)) |
---|
| 1130 | + clk_put(opp_table->clk); |
---|
| 1131 | +remove_opp_dev: |
---|
| 1132 | + _remove_opp_dev(opp_dev, opp_table); |
---|
| 1133 | +err: |
---|
| 1134 | + kfree(opp_table); |
---|
| 1135 | + return ERR_PTR(ret); |
---|
909 | 1136 | } |
---|
910 | 1137 | |
---|
911 | 1138 | void _get_opp_table_kref(struct opp_table *opp_table) |
---|
.. | .. |
---|
913 | 1140 | kref_get(&opp_table->kref); |
---|
914 | 1141 | } |
---|
915 | 1142 | |
---|
916 | | -struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) |
---|
| 1143 | +static struct opp_table *_opp_get_opp_table(struct device *dev, int index) |
---|
917 | 1144 | { |
---|
918 | 1145 | struct opp_table *opp_table; |
---|
919 | 1146 | |
---|
.. | .. |
---|
924 | 1151 | if (!IS_ERR(opp_table)) |
---|
925 | 1152 | goto unlock; |
---|
926 | 1153 | |
---|
927 | | - opp_table = _allocate_opp_table(dev); |
---|
| 1154 | + opp_table = _managed_opp(dev, index); |
---|
| 1155 | + if (opp_table) { |
---|
| 1156 | + if (!_add_opp_dev_unlocked(dev, opp_table)) { |
---|
| 1157 | + dev_pm_opp_put_opp_table(opp_table); |
---|
| 1158 | + opp_table = ERR_PTR(-ENOMEM); |
---|
| 1159 | + } |
---|
| 1160 | + goto unlock; |
---|
| 1161 | + } |
---|
| 1162 | + |
---|
| 1163 | + opp_table = _allocate_opp_table(dev, index); |
---|
928 | 1164 | |
---|
929 | 1165 | unlock: |
---|
930 | 1166 | mutex_unlock(&opp_table_lock); |
---|
931 | 1167 | |
---|
932 | 1168 | return opp_table; |
---|
933 | 1169 | } |
---|
| 1170 | + |
---|
| 1171 | +struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) |
---|
| 1172 | +{ |
---|
| 1173 | + return _opp_get_opp_table(dev, 0); |
---|
| 1174 | +} |
---|
934 | 1175 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); |
---|
| 1176 | + |
---|
| 1177 | +struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, |
---|
| 1178 | + int index) |
---|
| 1179 | +{ |
---|
| 1180 | + return _opp_get_opp_table(dev, index); |
---|
| 1181 | +} |
---|
935 | 1182 | |
---|
936 | 1183 | static void _opp_table_kref_release(struct kref *kref) |
---|
937 | 1184 | { |
---|
938 | 1185 | struct opp_table *opp_table = container_of(kref, struct opp_table, kref); |
---|
939 | | - struct opp_device *opp_dev; |
---|
| 1186 | + struct opp_device *opp_dev, *temp; |
---|
| 1187 | + int i; |
---|
| 1188 | + |
---|
| 1189 | + /* Drop the lock as soon as we can */ |
---|
| 1190 | + list_del(&opp_table->node); |
---|
| 1191 | + mutex_unlock(&opp_table_lock); |
---|
| 1192 | + |
---|
| 1193 | + _of_clear_opp_table(opp_table); |
---|
940 | 1194 | |
---|
941 | 1195 | /* Release clk */ |
---|
942 | 1196 | if (!IS_ERR(opp_table->clk)) |
---|
943 | 1197 | clk_put(opp_table->clk); |
---|
944 | 1198 | |
---|
945 | | - opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device, |
---|
946 | | - node); |
---|
| 1199 | + if (opp_table->paths) { |
---|
| 1200 | + for (i = 0; i < opp_table->path_count; i++) |
---|
| 1201 | + icc_put(opp_table->paths[i]); |
---|
| 1202 | + kfree(opp_table->paths); |
---|
| 1203 | + } |
---|
947 | 1204 | |
---|
948 | | - _remove_opp_dev(opp_dev, opp_table); |
---|
| 1205 | + WARN_ON(!list_empty(&opp_table->opp_list)); |
---|
949 | 1206 | |
---|
950 | | - /* dev_list must be empty now */ |
---|
951 | | - WARN_ON(!list_empty(&opp_table->dev_list)); |
---|
| 1207 | + list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) { |
---|
| 1208 | + /* |
---|
| 1209 | + * The OPP table is getting removed, drop the performance state |
---|
| 1210 | + * constraints. |
---|
| 1211 | + */ |
---|
| 1212 | + if (opp_table->genpd_performance_state) |
---|
| 1213 | + dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0); |
---|
952 | 1214 | |
---|
| 1215 | + _remove_opp_dev(opp_dev, opp_table); |
---|
| 1216 | + } |
---|
| 1217 | + |
---|
| 1218 | + mutex_destroy(&opp_table->genpd_virt_dev_lock); |
---|
953 | 1219 | mutex_destroy(&opp_table->lock); |
---|
954 | | - list_del(&opp_table->node); |
---|
955 | 1220 | kfree(opp_table); |
---|
956 | | - |
---|
957 | | - mutex_unlock(&opp_table_lock); |
---|
958 | 1221 | } |
---|
959 | 1222 | |
---|
960 | 1223 | void dev_pm_opp_put_opp_table(struct opp_table *opp_table) |
---|
.. | .. |
---|
969 | 1232 | kfree(opp); |
---|
970 | 1233 | } |
---|
971 | 1234 | |
---|
972 | | -static void _opp_kref_release(struct kref *kref) |
---|
| 1235 | +static void _opp_kref_release(struct dev_pm_opp *opp, |
---|
| 1236 | + struct opp_table *opp_table) |
---|
973 | 1237 | { |
---|
974 | | - struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); |
---|
975 | | - struct opp_table *opp_table = opp->opp_table; |
---|
976 | | - |
---|
977 | 1238 | /* |
---|
978 | 1239 | * Notify the changes in the availability of the operable |
---|
979 | 1240 | * frequency/voltage list. |
---|
980 | 1241 | */ |
---|
981 | 1242 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); |
---|
| 1243 | + _of_opp_free_required_opps(opp_table, opp); |
---|
982 | 1244 | opp_debug_remove_one(opp); |
---|
983 | 1245 | list_del(&opp->node); |
---|
984 | 1246 | kfree(opp); |
---|
| 1247 | +} |
---|
985 | 1248 | |
---|
| 1249 | +static void _opp_kref_release_unlocked(struct kref *kref) |
---|
| 1250 | +{ |
---|
| 1251 | + struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); |
---|
| 1252 | + struct opp_table *opp_table = opp->opp_table; |
---|
| 1253 | + |
---|
| 1254 | + _opp_kref_release(opp, opp_table); |
---|
| 1255 | +} |
---|
| 1256 | + |
---|
| 1257 | +static void _opp_kref_release_locked(struct kref *kref) |
---|
| 1258 | +{ |
---|
| 1259 | + struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); |
---|
| 1260 | + struct opp_table *opp_table = opp->opp_table; |
---|
| 1261 | + |
---|
| 1262 | + _opp_kref_release(opp, opp_table); |
---|
986 | 1263 | mutex_unlock(&opp_table->lock); |
---|
987 | | - dev_pm_opp_put_opp_table(opp_table); |
---|
988 | 1264 | } |
---|
989 | 1265 | |
---|
990 | 1266 | void dev_pm_opp_get(struct dev_pm_opp *opp) |
---|
.. | .. |
---|
994 | 1270 | |
---|
995 | 1271 | void dev_pm_opp_put(struct dev_pm_opp *opp) |
---|
996 | 1272 | { |
---|
997 | | - kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); |
---|
| 1273 | + kref_put_mutex(&opp->kref, _opp_kref_release_locked, |
---|
| 1274 | + &opp->opp_table->lock); |
---|
998 | 1275 | } |
---|
999 | 1276 | EXPORT_SYMBOL_GPL(dev_pm_opp_put); |
---|
| 1277 | + |
---|
| 1278 | +static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp) |
---|
| 1279 | +{ |
---|
| 1280 | + kref_put(&opp->kref, _opp_kref_release_unlocked); |
---|
| 1281 | +} |
---|
1000 | 1282 | |
---|
1001 | 1283 | /** |
---|
1002 | 1284 | * dev_pm_opp_remove() - Remove an OPP from OPP table |
---|
.. | .. |
---|
1028 | 1310 | |
---|
1029 | 1311 | if (found) { |
---|
1030 | 1312 | dev_pm_opp_put(opp); |
---|
| 1313 | + |
---|
| 1314 | + /* Drop the reference taken by dev_pm_opp_add() */ |
---|
| 1315 | + dev_pm_opp_put_opp_table(opp_table); |
---|
1031 | 1316 | } else { |
---|
1032 | 1317 | dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", |
---|
1033 | 1318 | __func__, freq); |
---|
1034 | 1319 | } |
---|
1035 | 1320 | |
---|
| 1321 | + /* Drop the reference taken by _find_opp_table() */ |
---|
1036 | 1322 | dev_pm_opp_put_opp_table(opp_table); |
---|
1037 | 1323 | } |
---|
1038 | 1324 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); |
---|
1039 | 1325 | |
---|
| 1326 | +bool _opp_remove_all_static(struct opp_table *opp_table) |
---|
| 1327 | +{ |
---|
| 1328 | + struct dev_pm_opp *opp, *tmp; |
---|
| 1329 | + bool ret = true; |
---|
| 1330 | + |
---|
| 1331 | + mutex_lock(&opp_table->lock); |
---|
| 1332 | + |
---|
| 1333 | + if (!opp_table->parsed_static_opps) { |
---|
| 1334 | + ret = false; |
---|
| 1335 | + goto unlock; |
---|
| 1336 | + } |
---|
| 1337 | + |
---|
| 1338 | + if (--opp_table->parsed_static_opps) |
---|
| 1339 | + goto unlock; |
---|
| 1340 | + |
---|
| 1341 | + list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { |
---|
| 1342 | + if (!opp->dynamic) |
---|
| 1343 | + dev_pm_opp_put_unlocked(opp); |
---|
| 1344 | + } |
---|
| 1345 | + |
---|
| 1346 | +unlock: |
---|
| 1347 | + mutex_unlock(&opp_table->lock); |
---|
| 1348 | + |
---|
| 1349 | + return ret; |
---|
| 1350 | +} |
---|
| 1351 | + |
---|
| 1352 | +/** |
---|
| 1353 | + * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs |
---|
| 1354 | + * @dev: device for which we do this operation |
---|
| 1355 | + * |
---|
| 1356 | + * This function removes all dynamically created OPPs from the opp table. |
---|
| 1357 | + */ |
---|
| 1358 | +void dev_pm_opp_remove_all_dynamic(struct device *dev) |
---|
| 1359 | +{ |
---|
| 1360 | + struct opp_table *opp_table; |
---|
| 1361 | + struct dev_pm_opp *opp, *temp; |
---|
| 1362 | + int count = 0; |
---|
| 1363 | + |
---|
| 1364 | + opp_table = _find_opp_table(dev); |
---|
| 1365 | + if (IS_ERR(opp_table)) |
---|
| 1366 | + return; |
---|
| 1367 | + |
---|
| 1368 | + mutex_lock(&opp_table->lock); |
---|
| 1369 | + list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) { |
---|
| 1370 | + if (opp->dynamic) { |
---|
| 1371 | + dev_pm_opp_put_unlocked(opp); |
---|
| 1372 | + count++; |
---|
| 1373 | + } |
---|
| 1374 | + } |
---|
| 1375 | + mutex_unlock(&opp_table->lock); |
---|
| 1376 | + |
---|
| 1377 | + /* Drop the references taken by dev_pm_opp_add() */ |
---|
| 1378 | + while (count--) |
---|
| 1379 | + dev_pm_opp_put_opp_table(opp_table); |
---|
| 1380 | + |
---|
| 1381 | + /* Drop the reference taken by _find_opp_table() */ |
---|
| 1382 | + dev_pm_opp_put_opp_table(opp_table); |
---|
| 1383 | +} |
---|
| 1384 | +EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); |
---|
| 1385 | + |
---|
1040 | 1386 | struct dev_pm_opp *_opp_allocate(struct opp_table *table) |
---|
1041 | 1387 | { |
---|
1042 | 1388 | struct dev_pm_opp *opp; |
---|
1043 | | - int count, supply_size; |
---|
| 1389 | + int supply_count, supply_size, icc_size; |
---|
1044 | 1390 | |
---|
1045 | 1391 | /* Allocate space for at least one supply */ |
---|
1046 | | - count = table->regulator_count > 0 ? table->regulator_count : 1; |
---|
1047 | | - supply_size = sizeof(*opp->supplies) * count; |
---|
| 1392 | + supply_count = table->regulator_count > 0 ? table->regulator_count : 1; |
---|
| 1393 | + supply_size = sizeof(*opp->supplies) * supply_count; |
---|
| 1394 | + icc_size = sizeof(*opp->bandwidth) * table->path_count; |
---|
1048 | 1395 | |
---|
1049 | 1396 | /* allocate new OPP node and supplies structures */ |
---|
1050 | | - opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL); |
---|
| 1397 | + opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL); |
---|
| 1398 | + |
---|
1051 | 1399 | if (!opp) |
---|
1052 | 1400 | return NULL; |
---|
1053 | 1401 | |
---|
1054 | 1402 | /* Put the supplies at the end of the OPP structure as an empty array */ |
---|
1055 | 1403 | opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); |
---|
| 1404 | + if (icc_size) |
---|
| 1405 | + opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count); |
---|
1056 | 1406 | INIT_LIST_HEAD(&opp->node); |
---|
1057 | 1407 | |
---|
1058 | 1408 | return opp; |
---|
.. | .. |
---|
1083 | 1433 | return true; |
---|
1084 | 1434 | } |
---|
1085 | 1435 | |
---|
| 1436 | +int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2) |
---|
| 1437 | +{ |
---|
| 1438 | + if (opp1->rate != opp2->rate) |
---|
| 1439 | + return opp1->rate < opp2->rate ? -1 : 1; |
---|
| 1440 | + if (opp1->bandwidth && opp2->bandwidth && |
---|
| 1441 | + opp1->bandwidth[0].peak != opp2->bandwidth[0].peak) |
---|
| 1442 | + return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1; |
---|
| 1443 | + if (opp1->level != opp2->level) |
---|
| 1444 | + return opp1->level < opp2->level ? -1 : 1; |
---|
| 1445 | + return 0; |
---|
| 1446 | +} |
---|
| 1447 | + |
---|
1086 | 1448 | static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, |
---|
1087 | 1449 | struct opp_table *opp_table, |
---|
1088 | 1450 | struct list_head **head) |
---|
1089 | 1451 | { |
---|
1090 | 1452 | struct dev_pm_opp *opp; |
---|
| 1453 | + int opp_cmp; |
---|
1091 | 1454 | |
---|
1092 | 1455 | /* |
---|
1093 | 1456 | * Insert new OPP in order of increasing frequency and discard if |
---|
.. | .. |
---|
1098 | 1461 | * loop. |
---|
1099 | 1462 | */ |
---|
1100 | 1463 | list_for_each_entry(opp, &opp_table->opp_list, node) { |
---|
1101 | | - if (new_opp->rate > opp->rate) { |
---|
| 1464 | + opp_cmp = _opp_compare_key(new_opp, opp); |
---|
| 1465 | + if (opp_cmp > 0) { |
---|
1102 | 1466 | *head = &opp->node; |
---|
1103 | 1467 | continue; |
---|
1104 | 1468 | } |
---|
1105 | 1469 | |
---|
1106 | | - if (new_opp->rate < opp->rate) |
---|
| 1470 | + if (opp_cmp < 0) |
---|
1107 | 1471 | return 0; |
---|
1108 | 1472 | |
---|
1109 | 1473 | /* Duplicate OPPs */ |
---|
.. | .. |
---|
1153 | 1517 | new_opp->opp_table = opp_table; |
---|
1154 | 1518 | kref_init(&new_opp->kref); |
---|
1155 | 1519 | |
---|
1156 | | - /* Get a reference to the OPP table */ |
---|
1157 | | - _get_opp_table_kref(opp_table); |
---|
1158 | | - |
---|
1159 | | - ret = opp_debug_create_one(new_opp, opp_table); |
---|
1160 | | - if (ret) |
---|
1161 | | - dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n", |
---|
1162 | | - __func__, ret); |
---|
| 1520 | + opp_debug_create_one(new_opp, opp_table); |
---|
1163 | 1521 | |
---|
1164 | 1522 | if (!_opp_supported_by_regulators(new_opp, opp_table)) { |
---|
1165 | 1523 | new_opp->available = false; |
---|
.. | .. |
---|
1250 | 1608 | struct opp_table *opp_table; |
---|
1251 | 1609 | |
---|
1252 | 1610 | opp_table = dev_pm_opp_get_opp_table(dev); |
---|
1253 | | - if (!opp_table) |
---|
1254 | | - return ERR_PTR(-ENOMEM); |
---|
| 1611 | + if (IS_ERR(opp_table)) |
---|
| 1612 | + return opp_table; |
---|
1255 | 1613 | |
---|
1256 | 1614 | /* Make sure there are no concurrent readers while updating opp_table */ |
---|
1257 | 1615 | WARN_ON(!list_empty(&opp_table->opp_list)); |
---|
.. | .. |
---|
1309 | 1667 | struct opp_table *opp_table; |
---|
1310 | 1668 | |
---|
1311 | 1669 | opp_table = dev_pm_opp_get_opp_table(dev); |
---|
1312 | | - if (!opp_table) |
---|
1313 | | - return ERR_PTR(-ENOMEM); |
---|
| 1670 | + if (IS_ERR(opp_table)) |
---|
| 1671 | + return opp_table; |
---|
1314 | 1672 | |
---|
1315 | 1673 | /* Make sure there are no concurrent readers while updating opp_table */ |
---|
1316 | 1674 | WARN_ON(!list_empty(&opp_table->opp_list)); |
---|
.. | .. |
---|
1402 | 1760 | int ret, i; |
---|
1403 | 1761 | |
---|
1404 | 1762 | opp_table = dev_pm_opp_get_opp_table(dev); |
---|
1405 | | - if (!opp_table) |
---|
1406 | | - return ERR_PTR(-ENOMEM); |
---|
| 1763 | + if (IS_ERR(opp_table)) |
---|
| 1764 | + return opp_table; |
---|
1407 | 1765 | |
---|
1408 | 1766 | /* This should be called before OPPs are initialized */ |
---|
1409 | 1767 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { |
---|
.. | .. |
---|
1473 | 1831 | /* Make sure there are no concurrent readers while updating opp_table */ |
---|
1474 | 1832 | WARN_ON(!list_empty(&opp_table->opp_list)); |
---|
1475 | 1833 | |
---|
| 1834 | + if (opp_table->enabled) { |
---|
| 1835 | + for (i = opp_table->regulator_count - 1; i >= 0; i--) |
---|
| 1836 | + regulator_disable(opp_table->regulators[i]); |
---|
| 1837 | + } |
---|
| 1838 | + |
---|
1476 | 1839 | for (i = opp_table->regulator_count - 1; i >= 0; i--) |
---|
1477 | 1840 | regulator_put(opp_table->regulators[i]); |
---|
1478 | 1841 | |
---|
.. | .. |
---|
1505 | 1868 | int ret; |
---|
1506 | 1869 | |
---|
1507 | 1870 | opp_table = dev_pm_opp_get_opp_table(dev); |
---|
1508 | | - if (!opp_table) |
---|
1509 | | - return ERR_PTR(-ENOMEM); |
---|
| 1871 | + if (IS_ERR(opp_table)) |
---|
| 1872 | + return opp_table; |
---|
1510 | 1873 | |
---|
1511 | 1874 | /* This should be called before OPPs are initialized */ |
---|
1512 | 1875 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { |
---|
.. | .. |
---|
1573 | 1936 | return ERR_PTR(-EINVAL); |
---|
1574 | 1937 | |
---|
1575 | 1938 | opp_table = dev_pm_opp_get_opp_table(dev); |
---|
1576 | | - if (!opp_table) |
---|
1577 | | - return ERR_PTR(-ENOMEM); |
---|
| 1939 | + if (IS_ERR(opp_table)) |
---|
| 1940 | + return opp_table; |
---|
1578 | 1941 | |
---|
1579 | 1942 | /* This should be called before OPPs are initialized */ |
---|
1580 | 1943 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { |
---|
.. | .. |
---|
1607 | 1970 | } |
---|
1608 | 1971 | EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper); |
---|
1609 | 1972 | |
---|
| 1973 | +static void _opp_detach_genpd(struct opp_table *opp_table) |
---|
| 1974 | +{ |
---|
| 1975 | + int index; |
---|
| 1976 | + |
---|
| 1977 | + if (!opp_table->genpd_virt_devs) |
---|
| 1978 | + return; |
---|
| 1979 | + |
---|
| 1980 | + for (index = 0; index < opp_table->required_opp_count; index++) { |
---|
| 1981 | + if (!opp_table->genpd_virt_devs[index]) |
---|
| 1982 | + continue; |
---|
| 1983 | + |
---|
| 1984 | + dev_pm_domain_detach(opp_table->genpd_virt_devs[index], false); |
---|
| 1985 | + opp_table->genpd_virt_devs[index] = NULL; |
---|
| 1986 | + } |
---|
| 1987 | + |
---|
| 1988 | + kfree(opp_table->genpd_virt_devs); |
---|
| 1989 | + opp_table->genpd_virt_devs = NULL; |
---|
| 1990 | +} |
---|
| 1991 | + |
---|
| 1992 | +/** |
---|
| 1993 | + * dev_pm_opp_attach_genpd - Attach genpd(s) for the device and save virtual device pointer |
---|
| 1994 | + * @dev: Consumer device for which the genpd is getting attached. |
---|
| 1995 | + * @names: Null terminated array of pointers containing names of genpd to attach. |
---|
| 1996 | + * @virt_devs: Pointer to return the array of virtual devices. |
---|
| 1997 | + * |
---|
| 1998 | + * Multiple generic power domains for a device are supported with the help of |
---|
| 1999 | + * virtual genpd devices, which are created for each consumer device - genpd |
---|
| 2000 | + * pair. These are the device structures which are attached to the power domain |
---|
| 2001 | + * and are required by the OPP core to set the performance state of the genpd. |
---|
| 2002 | + * The same API also works for the case where single genpd is available and so |
---|
| 2003 | + * we don't need to support that separately. |
---|
| 2004 | + * |
---|
| 2005 | + * This helper will normally be called by the consumer driver of the device |
---|
| 2006 | + * "dev", as only that has details of the genpd names. |
---|
| 2007 | + * |
---|
| 2008 | + * This helper needs to be called once with a list of all genpd to attach. |
---|
| 2009 | + * Otherwise the original device structure will be used instead by the OPP core. |
---|
| 2010 | + * |
---|
| 2011 | + * The order of entries in the names array must match the order in which |
---|
| 2012 | + * "required-opps" are added in DT. |
---|
| 2013 | + */ |
---|
| 2014 | +struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, |
---|
| 2015 | + const char **names, struct device ***virt_devs) |
---|
| 2016 | +{ |
---|
| 2017 | + struct opp_table *opp_table; |
---|
| 2018 | + struct device *virt_dev; |
---|
| 2019 | + int index = 0, ret = -EINVAL; |
---|
| 2020 | + const char **name = names; |
---|
| 2021 | + |
---|
| 2022 | + opp_table = dev_pm_opp_get_opp_table(dev); |
---|
| 2023 | + if (IS_ERR(opp_table)) |
---|
| 2024 | + return opp_table; |
---|
| 2025 | + |
---|
| 2026 | + if (opp_table->genpd_virt_devs) |
---|
| 2027 | + return opp_table; |
---|
| 2028 | + |
---|
| 2029 | + /* |
---|
| 2030 | + * If the genpd's OPP table isn't already initialized, parsing of the |
---|
| 2031 | + * required-opps fail for dev. We should retry this after genpd's OPP |
---|
| 2032 | + * table is added. |
---|
| 2033 | + */ |
---|
| 2034 | + if (!opp_table->required_opp_count) { |
---|
| 2035 | + ret = -EPROBE_DEFER; |
---|
| 2036 | + goto put_table; |
---|
| 2037 | + } |
---|
| 2038 | + |
---|
| 2039 | + mutex_lock(&opp_table->genpd_virt_dev_lock); |
---|
| 2040 | + |
---|
| 2041 | + opp_table->genpd_virt_devs = kcalloc(opp_table->required_opp_count, |
---|
| 2042 | + sizeof(*opp_table->genpd_virt_devs), |
---|
| 2043 | + GFP_KERNEL); |
---|
| 2044 | + if (!opp_table->genpd_virt_devs) |
---|
| 2045 | + goto unlock; |
---|
| 2046 | + |
---|
| 2047 | + while (*name) { |
---|
| 2048 | + if (index >= opp_table->required_opp_count) { |
---|
| 2049 | + dev_err(dev, "Index can't be greater than required-opp-count - 1, %s (%d : %d)\n", |
---|
| 2050 | + *name, opp_table->required_opp_count, index); |
---|
| 2051 | + goto err; |
---|
| 2052 | + } |
---|
| 2053 | + |
---|
| 2054 | + virt_dev = dev_pm_domain_attach_by_name(dev, *name); |
---|
| 2055 | + if (IS_ERR_OR_NULL(virt_dev)) { |
---|
| 2056 | + ret = virt_dev ? PTR_ERR(virt_dev) : -ENODEV; |
---|
| 2057 | + dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret); |
---|
| 2058 | + goto err; |
---|
| 2059 | + } |
---|
| 2060 | + |
---|
| 2061 | + opp_table->genpd_virt_devs[index] = virt_dev; |
---|
| 2062 | + index++; |
---|
| 2063 | + name++; |
---|
| 2064 | + } |
---|
| 2065 | + |
---|
| 2066 | + if (virt_devs) |
---|
| 2067 | + *virt_devs = opp_table->genpd_virt_devs; |
---|
| 2068 | + mutex_unlock(&opp_table->genpd_virt_dev_lock); |
---|
| 2069 | + |
---|
| 2070 | + return opp_table; |
---|
| 2071 | + |
---|
| 2072 | +err: |
---|
| 2073 | + _opp_detach_genpd(opp_table); |
---|
| 2074 | +unlock: |
---|
| 2075 | + mutex_unlock(&opp_table->genpd_virt_dev_lock); |
---|
| 2076 | + |
---|
| 2077 | +put_table: |
---|
| 2078 | + dev_pm_opp_put_opp_table(opp_table); |
---|
| 2079 | + |
---|
| 2080 | + return ERR_PTR(ret); |
---|
| 2081 | +} |
---|
| 2082 | +EXPORT_SYMBOL_GPL(dev_pm_opp_attach_genpd); |
---|
| 2083 | + |
---|
| 2084 | +/** |
---|
| 2085 | + * dev_pm_opp_detach_genpd() - Detach genpd(s) from the device. |
---|
| 2086 | + * @opp_table: OPP table returned by dev_pm_opp_attach_genpd(). |
---|
| 2087 | + * |
---|
| 2088 | + * This detaches the genpd(s), resets the virtual device pointers, and puts the |
---|
| 2089 | + * OPP table. |
---|
| 2090 | + */ |
---|
| 2091 | +void dev_pm_opp_detach_genpd(struct opp_table *opp_table) |
---|
| 2092 | +{ |
---|
| 2093 | + /* |
---|
| 2094 | + * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting |
---|
| 2095 | + * used in parallel. |
---|
| 2096 | + */ |
---|
| 2097 | + mutex_lock(&opp_table->genpd_virt_dev_lock); |
---|
| 2098 | + _opp_detach_genpd(opp_table); |
---|
| 2099 | + mutex_unlock(&opp_table->genpd_virt_dev_lock); |
---|
| 2100 | + |
---|
| 2101 | + dev_pm_opp_put_opp_table(opp_table); |
---|
| 2102 | +} |
---|
| 2103 | +EXPORT_SYMBOL_GPL(dev_pm_opp_detach_genpd); |
---|
| 2104 | + |
---|
| 2105 | +/** |
---|
| 2106 | + * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. |
---|
| 2107 | + * @src_table: OPP table which has dst_table as one of its required OPP table. |
---|
| 2108 | + * @dst_table: Required OPP table of the src_table. |
---|
| 2109 | + * @pstate: Current performance state of the src_table. |
---|
| 2110 | + * |
---|
| 2111 | + * This Returns pstate of the OPP (present in @dst_table) pointed out by the |
---|
| 2112 | + * "required-opps" property of the OPP (present in @src_table) which has |
---|
| 2113 | + * performance state set to @pstate. |
---|
| 2114 | + * |
---|
| 2115 | + * Return: Zero or positive performance state on success, otherwise negative |
---|
| 2116 | + * value on errors. |
---|
| 2117 | + */ |
---|
| 2118 | +int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, |
---|
| 2119 | + struct opp_table *dst_table, |
---|
| 2120 | + unsigned int pstate) |
---|
| 2121 | +{ |
---|
| 2122 | + struct dev_pm_opp *opp; |
---|
| 2123 | + int dest_pstate = -EINVAL; |
---|
| 2124 | + int i; |
---|
| 2125 | + |
---|
| 2126 | + /* |
---|
| 2127 | + * Normally the src_table will have the "required_opps" property set to |
---|
| 2128 | + * point to one of the OPPs in the dst_table, but in some cases the |
---|
| 2129 | + * genpd and its master have one to one mapping of performance states |
---|
| 2130 | + * and so none of them have the "required-opps" property set. Return the |
---|
| 2131 | + * pstate of the src_table as it is in such cases. |
---|
| 2132 | + */ |
---|
| 2133 | + if (!src_table->required_opp_count) |
---|
| 2134 | + return pstate; |
---|
| 2135 | + |
---|
| 2136 | + for (i = 0; i < src_table->required_opp_count; i++) { |
---|
| 2137 | + if (src_table->required_opp_tables[i]->np == dst_table->np) |
---|
| 2138 | + break; |
---|
| 2139 | + } |
---|
| 2140 | + |
---|
| 2141 | + if (unlikely(i == src_table->required_opp_count)) { |
---|
| 2142 | + pr_err("%s: Couldn't find matching OPP table (%p: %p)\n", |
---|
| 2143 | + __func__, src_table, dst_table); |
---|
| 2144 | + return -EINVAL; |
---|
| 2145 | + } |
---|
| 2146 | + |
---|
| 2147 | + mutex_lock(&src_table->lock); |
---|
| 2148 | + |
---|
| 2149 | + list_for_each_entry(opp, &src_table->opp_list, node) { |
---|
| 2150 | + if (opp->pstate == pstate) { |
---|
| 2151 | + dest_pstate = opp->required_opps[i]->pstate; |
---|
| 2152 | + goto unlock; |
---|
| 2153 | + } |
---|
| 2154 | + } |
---|
| 2155 | + |
---|
| 2156 | + pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, |
---|
| 2157 | + dst_table); |
---|
| 2158 | + |
---|
| 2159 | +unlock: |
---|
| 2160 | + mutex_unlock(&src_table->lock); |
---|
| 2161 | + |
---|
| 2162 | + return dest_pstate; |
---|
| 2163 | +} |
---|
| 2164 | + |
---|
1610 | 2165 | /** |
---|
1611 | 2166 | * dev_pm_opp_add() - Add an OPP table from a table definitions |
---|
1612 | 2167 | * @dev: device for which we do this operation |
---|
.. | .. |
---|
1630 | 2185 | int ret; |
---|
1631 | 2186 | |
---|
1632 | 2187 | opp_table = dev_pm_opp_get_opp_table(dev); |
---|
1633 | | - if (!opp_table) |
---|
1634 | | - return -ENOMEM; |
---|
| 2188 | + if (IS_ERR(opp_table)) |
---|
| 2189 | + return PTR_ERR(opp_table); |
---|
1635 | 2190 | |
---|
1636 | 2191 | /* Fix regulator count for dynamic OPPs */ |
---|
1637 | 2192 | opp_table->regulator_count = 1; |
---|
1638 | 2193 | |
---|
1639 | 2194 | ret = _opp_add_v1(opp_table, dev, freq, u_volt, true); |
---|
| 2195 | + if (ret) |
---|
| 2196 | + dev_pm_opp_put_opp_table(opp_table); |
---|
1640 | 2197 | |
---|
1641 | | - dev_pm_opp_put_opp_table(opp_table); |
---|
1642 | 2198 | return ret; |
---|
1643 | 2199 | } |
---|
1644 | 2200 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); |
---|
.. | .. |
---|
1712 | 2268 | dev_pm_opp_put_opp_table(opp_table); |
---|
1713 | 2269 | return r; |
---|
1714 | 2270 | } |
---|
| 2271 | + |
---|
| 2272 | +/** |
---|
| 2273 | + * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP |
---|
| 2274 | + * @dev: device for which we do this operation |
---|
| 2275 | + * @freq: OPP frequency to adjust voltage of |
---|
| 2276 | + * @u_volt: new OPP target voltage |
---|
| 2277 | + * @u_volt_min: new OPP min voltage |
---|
| 2278 | + * @u_volt_max: new OPP max voltage |
---|
| 2279 | + * |
---|
| 2280 | + * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
---|
| 2281 | + * copy operation, returns 0 if no modifcation was done OR modification was |
---|
| 2282 | + * successful. |
---|
| 2283 | + */ |
---|
| 2284 | +int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, |
---|
| 2285 | + unsigned long u_volt, unsigned long u_volt_min, |
---|
| 2286 | + unsigned long u_volt_max) |
---|
| 2287 | + |
---|
| 2288 | +{ |
---|
| 2289 | + struct opp_table *opp_table; |
---|
| 2290 | + struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); |
---|
| 2291 | + int r = 0; |
---|
| 2292 | + |
---|
| 2293 | + /* Find the opp_table */ |
---|
| 2294 | + opp_table = _find_opp_table(dev); |
---|
| 2295 | + if (IS_ERR(opp_table)) { |
---|
| 2296 | + r = PTR_ERR(opp_table); |
---|
| 2297 | + dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); |
---|
| 2298 | + return r; |
---|
| 2299 | + } |
---|
| 2300 | + |
---|
| 2301 | + mutex_lock(&opp_table->lock); |
---|
| 2302 | + |
---|
| 2303 | + /* Do we have the frequency? */ |
---|
| 2304 | + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { |
---|
| 2305 | + if (tmp_opp->rate == freq) { |
---|
| 2306 | + opp = tmp_opp; |
---|
| 2307 | + break; |
---|
| 2308 | + } |
---|
| 2309 | + } |
---|
| 2310 | + |
---|
| 2311 | + if (IS_ERR(opp)) { |
---|
| 2312 | + r = PTR_ERR(opp); |
---|
| 2313 | + goto adjust_unlock; |
---|
| 2314 | + } |
---|
| 2315 | + |
---|
| 2316 | + /* Is update really needed? */ |
---|
| 2317 | + if (opp->supplies->u_volt == u_volt) |
---|
| 2318 | + goto adjust_unlock; |
---|
| 2319 | + |
---|
| 2320 | + opp->supplies->u_volt = u_volt; |
---|
| 2321 | + opp->supplies->u_volt_min = u_volt_min; |
---|
| 2322 | + opp->supplies->u_volt_max = u_volt_max; |
---|
| 2323 | + |
---|
| 2324 | + dev_pm_opp_get(opp); |
---|
| 2325 | + mutex_unlock(&opp_table->lock); |
---|
| 2326 | + |
---|
| 2327 | + /* Notify the voltage change of the OPP */ |
---|
| 2328 | + blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, |
---|
| 2329 | + opp); |
---|
| 2330 | + |
---|
| 2331 | + dev_pm_opp_put(opp); |
---|
| 2332 | + goto adjust_put_table; |
---|
| 2333 | + |
---|
| 2334 | +adjust_unlock: |
---|
| 2335 | + mutex_unlock(&opp_table->lock); |
---|
| 2336 | +adjust_put_table: |
---|
| 2337 | + dev_pm_opp_put_opp_table(opp_table); |
---|
| 2338 | + return r; |
---|
| 2339 | +} |
---|
| 2340 | +EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); |
---|
1715 | 2341 | |
---|
1716 | 2342 | /** |
---|
1717 | 2343 | * dev_pm_opp_enable() - Enable a specific OPP |
---|
.. | .. |
---|
1801 | 2427 | } |
---|
1802 | 2428 | EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); |
---|
1803 | 2429 | |
---|
1804 | | -/* |
---|
1805 | | - * Free OPPs either created using static entries present in DT or even the |
---|
1806 | | - * dynamically added entries based on remove_all param. |
---|
| 2430 | +/** |
---|
| 2431 | + * dev_pm_opp_remove_table() - Free all OPPs associated with the device |
---|
| 2432 | + * @dev: device pointer used to lookup OPP table. |
---|
| 2433 | + * |
---|
| 2434 | + * Free both OPPs created using static entries present in DT and the |
---|
| 2435 | + * dynamically added entries. |
---|
1807 | 2436 | */ |
---|
1808 | | -void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, |
---|
1809 | | - bool remove_all) |
---|
1810 | | -{ |
---|
1811 | | - struct dev_pm_opp *opp, *tmp; |
---|
1812 | | - |
---|
1813 | | - /* Find if opp_table manages a single device */ |
---|
1814 | | - if (list_is_singular(&opp_table->dev_list)) { |
---|
1815 | | - /* Free static OPPs */ |
---|
1816 | | - list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { |
---|
1817 | | - if (remove_all || !opp->dynamic) |
---|
1818 | | - dev_pm_opp_put(opp); |
---|
1819 | | - } |
---|
1820 | | - |
---|
1821 | | - /* |
---|
1822 | | - * The OPP table is getting removed, drop the performance state |
---|
1823 | | - * constraints. |
---|
1824 | | - */ |
---|
1825 | | - if (opp_table->genpd_performance_state) |
---|
1826 | | - dev_pm_genpd_set_performance_state(dev, 0); |
---|
1827 | | - } else { |
---|
1828 | | - _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table); |
---|
1829 | | - } |
---|
1830 | | -} |
---|
1831 | | - |
---|
1832 | | -void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all) |
---|
| 2437 | +void dev_pm_opp_remove_table(struct device *dev) |
---|
1833 | 2438 | { |
---|
1834 | 2439 | struct opp_table *opp_table; |
---|
1835 | 2440 | |
---|
.. | .. |
---|
1846 | 2451 | return; |
---|
1847 | 2452 | } |
---|
1848 | 2453 | |
---|
1849 | | - _dev_pm_opp_remove_table(opp_table, dev, remove_all); |
---|
| 2454 | + /* |
---|
| 2455 | + * Drop the extra reference only if the OPP table was successfully added |
---|
| 2456 | + * with dev_pm_opp_of_add_table() earlier. |
---|
| 2457 | + **/ |
---|
| 2458 | + if (_opp_remove_all_static(opp_table)) |
---|
| 2459 | + dev_pm_opp_put_opp_table(opp_table); |
---|
1850 | 2460 | |
---|
| 2461 | + /* Drop reference taken by _find_opp_table() */ |
---|
1851 | 2462 | dev_pm_opp_put_opp_table(opp_table); |
---|
1852 | 2463 | } |
---|
1853 | | - |
---|
1854 | | -/** |
---|
1855 | | - * dev_pm_opp_remove_table() - Free all OPPs associated with the device |
---|
1856 | | - * @dev: device pointer used to lookup OPP table. |
---|
1857 | | - * |
---|
1858 | | - * Free both OPPs created using static entries present in DT and the |
---|
1859 | | - * dynamically added entries. |
---|
1860 | | - */ |
---|
1861 | | -void dev_pm_opp_remove_table(struct device *dev) |
---|
1862 | | -{ |
---|
1863 | | - _dev_pm_opp_find_and_remove_table(dev, true); |
---|
1864 | | -} |
---|
1865 | 2464 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); |
---|
1866 | | - |
---|
1867 | | -#ifdef CONFIG_DEBUG_FS |
---|
1868 | | -#include <linux/debugfs.h> |
---|
1869 | | - |
---|
1870 | | -static int opp_summary_show(struct seq_file *s, void *data) |
---|
1871 | | -{ |
---|
1872 | | - struct list_head *lists = (struct list_head *)s->private; |
---|
1873 | | - struct opp_table *opp_table; |
---|
1874 | | - struct dev_pm_opp *opp; |
---|
1875 | | - |
---|
1876 | | - mutex_lock(&opp_table_lock); |
---|
1877 | | - |
---|
1878 | | - seq_puts(s, " device rate(Hz) target(uV) min(uV) max(uV)\n"); |
---|
1879 | | - seq_puts(s, "-------------------------------------------------------------------\n"); |
---|
1880 | | - |
---|
1881 | | - list_for_each_entry(opp_table, lists, node) { |
---|
1882 | | - seq_printf(s, " %s\n", opp_table->dentry_name); |
---|
1883 | | - mutex_lock(&opp_table->lock); |
---|
1884 | | - list_for_each_entry(opp, &opp_table->opp_list, node) { |
---|
1885 | | - seq_printf(s, "%31lu %12lu %11lu %11lu\n", |
---|
1886 | | - opp->rate, |
---|
1887 | | - opp->supplies[0].u_volt, |
---|
1888 | | - opp->supplies[0].u_volt_min, |
---|
1889 | | - opp->supplies[0].u_volt_max); |
---|
1890 | | - } |
---|
1891 | | - mutex_unlock(&opp_table->lock); |
---|
1892 | | - } |
---|
1893 | | - |
---|
1894 | | - mutex_unlock(&opp_table_lock); |
---|
1895 | | - |
---|
1896 | | - return 0; |
---|
1897 | | -} |
---|
1898 | | - |
---|
1899 | | -static int opp_summary_open(struct inode *inode, struct file *file) |
---|
1900 | | -{ |
---|
1901 | | - return single_open(file, opp_summary_show, inode->i_private); |
---|
1902 | | -} |
---|
1903 | | - |
---|
1904 | | -static const struct file_operations opp_summary_fops = { |
---|
1905 | | - .open = opp_summary_open, |
---|
1906 | | - .read = seq_read, |
---|
1907 | | - .llseek = seq_lseek, |
---|
1908 | | - .release = single_release, |
---|
1909 | | -}; |
---|
1910 | | - |
---|
1911 | | -static int __init opp_debug_init(void) |
---|
1912 | | -{ |
---|
1913 | | - struct dentry *parent, *d; |
---|
1914 | | - |
---|
1915 | | - parent = debugfs_lookup("opp", NULL); |
---|
1916 | | - if (!parent) |
---|
1917 | | - return -ENOMEM; |
---|
1918 | | - |
---|
1919 | | - d = debugfs_create_file("opp_summary", 0444, parent, &opp_tables, |
---|
1920 | | - &opp_summary_fops); |
---|
1921 | | - if (!d) |
---|
1922 | | - return -ENOMEM; |
---|
1923 | | - |
---|
1924 | | - return 0; |
---|
1925 | | -} |
---|
1926 | | -late_initcall(opp_debug_init); |
---|
1927 | | -#endif |
---|