.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License version 2 and |
---|
6 | | - * only version 2 as published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope that it will be useful, |
---|
9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
11 | | - * GNU General Public License for more details. |
---|
12 | | - * |
---|
13 | 4 | */ |
---|
14 | 5 | |
---|
| 6 | +#include <linux/acpi.h> |
---|
15 | 7 | #include <linux/time.h> |
---|
16 | 8 | #include <linux/of.h> |
---|
17 | 9 | #include <linux/platform_device.h> |
---|
18 | 10 | #include <linux/phy/phy.h> |
---|
19 | | -#include <linux/phy/phy-qcom-ufs.h> |
---|
| 11 | +#include <linux/gpio/consumer.h> |
---|
| 12 | +#include <linux/reset-controller.h> |
---|
| 13 | +#include <linux/devfreq.h> |
---|
20 | 14 | |
---|
21 | 15 | #include "ufshcd.h" |
---|
22 | 16 | #include "ufshcd-pltfrm.h" |
---|
.. | .. |
---|
45 | 39 | |
---|
46 | 40 | static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; |
---|
47 | 41 | |
---|
48 | | -static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote); |
---|
49 | 42 | static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); |
---|
50 | 43 | static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, |
---|
51 | 44 | u32 clk_cycles); |
---|
| 45 | + |
---|
| 46 | +static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) |
---|
| 47 | +{ |
---|
| 48 | + return container_of(rcd, struct ufs_qcom_host, rcdev); |
---|
| 49 | +} |
---|
52 | 50 | |
---|
53 | 51 | static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, |
---|
54 | 52 | const char *prefix, void *priv) |
---|
.. | .. |
---|
70 | 68 | } |
---|
71 | 69 | |
---|
72 | 70 | static int ufs_qcom_host_clk_get(struct device *dev, |
---|
73 | | - const char *name, struct clk **clk_out) |
---|
| 71 | + const char *name, struct clk **clk_out, bool optional) |
---|
74 | 72 | { |
---|
75 | 73 | struct clk *clk; |
---|
76 | 74 | int err = 0; |
---|
77 | 75 | |
---|
78 | 76 | clk = devm_clk_get(dev, name); |
---|
79 | | - if (IS_ERR(clk)) { |
---|
80 | | - err = PTR_ERR(clk); |
---|
81 | | - dev_err(dev, "%s: failed to get %s err %d", |
---|
82 | | - __func__, name, err); |
---|
83 | | - } else { |
---|
| 77 | + if (!IS_ERR(clk)) { |
---|
84 | 78 | *clk_out = clk; |
---|
| 79 | + return 0; |
---|
85 | 80 | } |
---|
| 81 | + |
---|
| 82 | + err = PTR_ERR(clk); |
---|
| 83 | + |
---|
| 84 | + if (optional && err == -ENOENT) { |
---|
| 85 | + *clk_out = NULL; |
---|
| 86 | + return 0; |
---|
| 87 | + } |
---|
| 88 | + |
---|
| 89 | + if (err != -EPROBE_DEFER) |
---|
| 90 | + dev_err(dev, "failed to get %s err %d\n", name, err); |
---|
86 | 91 | |
---|
87 | 92 | return err; |
---|
88 | 93 | } |
---|
.. | .. |
---|
104 | 109 | if (!host->is_lane_clks_enabled) |
---|
105 | 110 | return; |
---|
106 | 111 | |
---|
107 | | - if (host->hba->lanes_per_direction > 1) |
---|
108 | | - clk_disable_unprepare(host->tx_l1_sync_clk); |
---|
| 112 | + clk_disable_unprepare(host->tx_l1_sync_clk); |
---|
109 | 113 | clk_disable_unprepare(host->tx_l0_sync_clk); |
---|
110 | | - if (host->hba->lanes_per_direction > 1) |
---|
111 | | - clk_disable_unprepare(host->rx_l1_sync_clk); |
---|
| 114 | + clk_disable_unprepare(host->rx_l1_sync_clk); |
---|
112 | 115 | clk_disable_unprepare(host->rx_l0_sync_clk); |
---|
113 | 116 | |
---|
114 | 117 | host->is_lane_clks_enabled = false; |
---|
.. | .. |
---|
132 | 135 | if (err) |
---|
133 | 136 | goto disable_rx_l0; |
---|
134 | 137 | |
---|
135 | | - if (host->hba->lanes_per_direction > 1) { |
---|
136 | | - err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", |
---|
| 138 | + err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", |
---|
137 | 139 | host->rx_l1_sync_clk); |
---|
138 | | - if (err) |
---|
139 | | - goto disable_tx_l0; |
---|
| 140 | + if (err) |
---|
| 141 | + goto disable_tx_l0; |
---|
140 | 142 | |
---|
141 | | - err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", |
---|
| 143 | + err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", |
---|
142 | 144 | host->tx_l1_sync_clk); |
---|
143 | | - if (err) |
---|
144 | | - goto disable_rx_l1; |
---|
145 | | - } |
---|
| 145 | + if (err) |
---|
| 146 | + goto disable_rx_l1; |
---|
146 | 147 | |
---|
147 | 148 | host->is_lane_clks_enabled = true; |
---|
148 | 149 | goto out; |
---|
149 | 150 | |
---|
150 | 151 | disable_rx_l1: |
---|
151 | | - if (host->hba->lanes_per_direction > 1) |
---|
152 | | - clk_disable_unprepare(host->rx_l1_sync_clk); |
---|
| 152 | + clk_disable_unprepare(host->rx_l1_sync_clk); |
---|
153 | 153 | disable_tx_l0: |
---|
154 | 154 | clk_disable_unprepare(host->tx_l0_sync_clk); |
---|
155 | 155 | disable_rx_l0: |
---|
.. | .. |
---|
163 | 163 | int err = 0; |
---|
164 | 164 | struct device *dev = host->hba->dev; |
---|
165 | 165 | |
---|
166 | | - err = ufs_qcom_host_clk_get(dev, |
---|
167 | | - "rx_lane0_sync_clk", &host->rx_l0_sync_clk); |
---|
| 166 | + if (has_acpi_companion(dev)) |
---|
| 167 | + return 0; |
---|
| 168 | + |
---|
| 169 | + err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk", |
---|
| 170 | + &host->rx_l0_sync_clk, false); |
---|
168 | 171 | if (err) |
---|
169 | 172 | goto out; |
---|
170 | 173 | |
---|
171 | | - err = ufs_qcom_host_clk_get(dev, |
---|
172 | | - "tx_lane0_sync_clk", &host->tx_l0_sync_clk); |
---|
| 174 | + err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk", |
---|
| 175 | + &host->tx_l0_sync_clk, false); |
---|
173 | 176 | if (err) |
---|
174 | 177 | goto out; |
---|
175 | 178 | |
---|
176 | 179 | /* In case of single lane per direction, don't read lane1 clocks */ |
---|
177 | 180 | if (host->hba->lanes_per_direction > 1) { |
---|
178 | 181 | err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", |
---|
179 | | - &host->rx_l1_sync_clk); |
---|
| 182 | + &host->rx_l1_sync_clk, false); |
---|
180 | 183 | if (err) |
---|
181 | 184 | goto out; |
---|
182 | 185 | |
---|
183 | 186 | err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", |
---|
184 | | - &host->tx_l1_sync_clk); |
---|
| 187 | + &host->tx_l1_sync_clk, true); |
---|
185 | 188 | } |
---|
186 | 189 | out: |
---|
187 | 190 | return err; |
---|
.. | .. |
---|
189 | 192 | |
---|
190 | 193 | static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) |
---|
191 | 194 | { |
---|
192 | | - struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
193 | | - struct phy *phy = host->generic_phy; |
---|
194 | 195 | u32 tx_lanes; |
---|
195 | | - int err = 0; |
---|
196 | 196 | |
---|
197 | | - err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); |
---|
198 | | - if (err) |
---|
199 | | - goto out; |
---|
200 | | - |
---|
201 | | - err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes); |
---|
202 | | - if (err) |
---|
203 | | - dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n", |
---|
204 | | - __func__); |
---|
205 | | - |
---|
206 | | -out: |
---|
207 | | - return err; |
---|
| 197 | + return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); |
---|
208 | 198 | } |
---|
209 | 199 | |
---|
210 | 200 | static int ufs_qcom_check_hibern8(struct ufs_hba *hba) |
---|
.. | .. |
---|
256 | 246 | mb(); |
---|
257 | 247 | } |
---|
258 | 248 | |
---|
| 249 | +/* |
---|
| 250 | + * ufs_qcom_host_reset - reset host controller and PHY |
---|
| 251 | + */ |
---|
| 252 | +static int ufs_qcom_host_reset(struct ufs_hba *hba) |
---|
| 253 | +{ |
---|
| 254 | + int ret = 0; |
---|
| 255 | + struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
| 256 | + bool reenable_intr = false; |
---|
| 257 | + |
---|
| 258 | + if (!host->core_reset) { |
---|
| 259 | + dev_warn(hba->dev, "%s: reset control not set\n", __func__); |
---|
| 260 | + goto out; |
---|
| 261 | + } |
---|
| 262 | + |
---|
| 263 | + reenable_intr = hba->is_irq_enabled; |
---|
| 264 | + disable_irq(hba->irq); |
---|
| 265 | + hba->is_irq_enabled = false; |
---|
| 266 | + |
---|
| 267 | + ret = reset_control_assert(host->core_reset); |
---|
| 268 | + if (ret) { |
---|
| 269 | + dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", |
---|
| 270 | + __func__, ret); |
---|
| 271 | + goto out; |
---|
| 272 | + } |
---|
| 273 | + |
---|
| 274 | + /* |
---|
| 275 | + * The hardware requirement for delay between assert/deassert |
---|
| 276 | + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to |
---|
| 277 | + * ~125us (4/32768). To be on the safe side add 200us delay. |
---|
| 278 | + */ |
---|
| 279 | + usleep_range(200, 210); |
---|
| 280 | + |
---|
| 281 | + ret = reset_control_deassert(host->core_reset); |
---|
| 282 | + if (ret) |
---|
| 283 | + dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", |
---|
| 284 | + __func__, ret); |
---|
| 285 | + |
---|
| 286 | + usleep_range(1000, 1100); |
---|
| 287 | + |
---|
| 288 | + if (reenable_intr) { |
---|
| 289 | + enable_irq(hba->irq); |
---|
| 290 | + hba->is_irq_enabled = true; |
---|
| 291 | + } |
---|
| 292 | + |
---|
| 293 | +out: |
---|
| 294 | + return ret; |
---|
| 295 | +} |
---|
| 296 | + |
---|
259 | 297 | static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) |
---|
260 | 298 | { |
---|
261 | 299 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
.. | .. |
---|
264 | 302 | bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) |
---|
265 | 303 | ? true : false; |
---|
266 | 304 | |
---|
| 305 | + /* Reset UFS Host Controller and PHY */ |
---|
| 306 | + ret = ufs_qcom_host_reset(hba); |
---|
| 307 | + if (ret) |
---|
| 308 | + dev_warn(hba->dev, "%s: host reset returned %d\n", |
---|
| 309 | + __func__, ret); |
---|
| 310 | + |
---|
267 | 311 | if (is_rate_B) |
---|
268 | 312 | phy_set_mode(phy, PHY_MODE_UFS_HS_B); |
---|
269 | | - |
---|
270 | | - /* Assert PHY reset and apply PHY calibration values */ |
---|
271 | | - ufs_qcom_assert_reset(hba); |
---|
272 | | - /* provide 1ms delay to let the reset pulse propagate */ |
---|
273 | | - usleep_range(1000, 1100); |
---|
274 | 313 | |
---|
275 | 314 | /* phy initialization - calibrate the phy */ |
---|
276 | 315 | ret = phy_init(phy); |
---|
.. | .. |
---|
279 | 318 | __func__, ret); |
---|
280 | 319 | goto out; |
---|
281 | 320 | } |
---|
282 | | - |
---|
283 | | - /* De-assert PHY reset and start serdes */ |
---|
284 | | - ufs_qcom_deassert_reset(hba); |
---|
285 | | - |
---|
286 | | - /* |
---|
287 | | - * after reset deassertion, phy will need all ref clocks, |
---|
288 | | - * voltage, current to settle down before starting serdes. |
---|
289 | | - */ |
---|
290 | | - usleep_range(1000, 1100); |
---|
291 | 321 | |
---|
292 | 322 | /* power on phy - start serdes and phy's power and clocks */ |
---|
293 | 323 | ret = phy_power_on(phy); |
---|
.. | .. |
---|
302 | 332 | return 0; |
---|
303 | 333 | |
---|
304 | 334 | out_disable_phy: |
---|
305 | | - ufs_qcom_assert_reset(hba); |
---|
306 | 335 | phy_exit(phy); |
---|
307 | 336 | out: |
---|
308 | 337 | return ret; |
---|
.. | .. |
---|
346 | 375 | /* check if UFS PHY moved from DISABLED to HIBERN8 */ |
---|
347 | 376 | err = ufs_qcom_check_hibern8(hba); |
---|
348 | 377 | ufs_qcom_enable_hw_clk_gating(hba); |
---|
349 | | - |
---|
| 378 | + ufs_qcom_ice_enable(host); |
---|
350 | 379 | break; |
---|
351 | 380 | default: |
---|
352 | 381 | dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); |
---|
.. | .. |
---|
356 | 385 | return err; |
---|
357 | 386 | } |
---|
358 | 387 | |
---|
359 | | -/** |
---|
| 388 | +/* |
---|
360 | 389 | * Returns zero for success and non-zero in case of a failure |
---|
361 | 390 | */ |
---|
362 | 391 | static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, |
---|
.. | .. |
---|
535 | 564 | * completed. |
---|
536 | 565 | */ |
---|
537 | 566 | if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) |
---|
538 | | - err = ufshcd_dme_set(hba, |
---|
539 | | - UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), |
---|
540 | | - 0); |
---|
| 567 | + err = ufshcd_disable_host_tx_lcc(hba); |
---|
541 | 568 | |
---|
542 | 569 | break; |
---|
543 | 570 | case POST_CHANGE: |
---|
.. | .. |
---|
551 | 578 | return err; |
---|
552 | 579 | } |
---|
553 | 580 | |
---|
| 581 | +static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) |
---|
| 582 | +{ |
---|
| 583 | + struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
| 584 | + |
---|
| 585 | + /* reset gpio is optional */ |
---|
| 586 | + if (!host->device_reset) |
---|
| 587 | + return; |
---|
| 588 | + |
---|
| 589 | + gpiod_set_value_cansleep(host->device_reset, asserted); |
---|
| 590 | +} |
---|
| 591 | + |
---|
554 | 592 | static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
---|
555 | 593 | { |
---|
556 | 594 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
557 | 595 | struct phy *phy = host->generic_phy; |
---|
558 | | - int ret = 0; |
---|
559 | 596 | |
---|
560 | 597 | if (ufs_qcom_is_link_off(hba)) { |
---|
561 | 598 | /* |
---|
.. | .. |
---|
566 | 603 | ufs_qcom_disable_lane_clks(host); |
---|
567 | 604 | phy_power_off(phy); |
---|
568 | 605 | |
---|
569 | | - /* Assert PHY soft reset */ |
---|
570 | | - ufs_qcom_assert_reset(hba); |
---|
571 | | - goto out; |
---|
572 | | - } |
---|
| 606 | + /* reset the connected UFS device during power down */ |
---|
| 607 | + ufs_qcom_device_reset_ctrl(hba, true); |
---|
573 | 608 | |
---|
574 | | - /* |
---|
575 | | - * If UniPro link is not active, PHY ref_clk, main PHY analog power |
---|
576 | | - * rail and low noise analog power rail for PLL can be switched off. |
---|
577 | | - */ |
---|
578 | | - if (!ufs_qcom_is_link_active(hba)) { |
---|
| 609 | + } else if (!ufs_qcom_is_link_active(hba)) { |
---|
579 | 610 | ufs_qcom_disable_lane_clks(host); |
---|
580 | | - phy_power_off(phy); |
---|
581 | 611 | } |
---|
582 | 612 | |
---|
583 | | -out: |
---|
584 | | - return ret; |
---|
| 613 | + return 0; |
---|
585 | 614 | } |
---|
586 | 615 | |
---|
587 | 616 | static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
---|
.. | .. |
---|
590 | 619 | struct phy *phy = host->generic_phy; |
---|
591 | 620 | int err; |
---|
592 | 621 | |
---|
593 | | - err = phy_power_on(phy); |
---|
594 | | - if (err) { |
---|
595 | | - dev_err(hba->dev, "%s: failed enabling regs, err = %d\n", |
---|
596 | | - __func__, err); |
---|
597 | | - goto out; |
---|
598 | | - } |
---|
599 | | - |
---|
600 | | - err = ufs_qcom_enable_lane_clks(host); |
---|
601 | | - if (err) |
---|
602 | | - goto out; |
---|
603 | | - |
---|
604 | | - hba->is_sys_suspended = false; |
---|
605 | | - |
---|
606 | | -out: |
---|
607 | | - return err; |
---|
608 | | -} |
---|
609 | | - |
---|
610 | | -struct ufs_qcom_dev_params { |
---|
611 | | - u32 pwm_rx_gear; /* pwm rx gear to work in */ |
---|
612 | | - u32 pwm_tx_gear; /* pwm tx gear to work in */ |
---|
613 | | - u32 hs_rx_gear; /* hs rx gear to work in */ |
---|
614 | | - u32 hs_tx_gear; /* hs tx gear to work in */ |
---|
615 | | - u32 rx_lanes; /* number of rx lanes */ |
---|
616 | | - u32 tx_lanes; /* number of tx lanes */ |
---|
617 | | - u32 rx_pwr_pwm; /* rx pwm working pwr */ |
---|
618 | | - u32 tx_pwr_pwm; /* tx pwm working pwr */ |
---|
619 | | - u32 rx_pwr_hs; /* rx hs working pwr */ |
---|
620 | | - u32 tx_pwr_hs; /* tx hs working pwr */ |
---|
621 | | - u32 hs_rate; /* rate A/B to work in HS */ |
---|
622 | | - u32 desired_working_mode; |
---|
623 | | -}; |
---|
624 | | - |
---|
625 | | -static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param, |
---|
626 | | - struct ufs_pa_layer_attr *dev_max, |
---|
627 | | - struct ufs_pa_layer_attr *agreed_pwr) |
---|
628 | | -{ |
---|
629 | | - int min_qcom_gear; |
---|
630 | | - int min_dev_gear; |
---|
631 | | - bool is_dev_sup_hs = false; |
---|
632 | | - bool is_qcom_max_hs = false; |
---|
633 | | - |
---|
634 | | - if (dev_max->pwr_rx == FAST_MODE) |
---|
635 | | - is_dev_sup_hs = true; |
---|
636 | | - |
---|
637 | | - if (qcom_param->desired_working_mode == FAST) { |
---|
638 | | - is_qcom_max_hs = true; |
---|
639 | | - min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear, |
---|
640 | | - qcom_param->hs_tx_gear); |
---|
641 | | - } else { |
---|
642 | | - min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear, |
---|
643 | | - qcom_param->pwm_tx_gear); |
---|
644 | | - } |
---|
645 | | - |
---|
646 | | - /* |
---|
647 | | - * device doesn't support HS but qcom_param->desired_working_mode is |
---|
648 | | - * HS, thus device and qcom_param don't agree |
---|
649 | | - */ |
---|
650 | | - if (!is_dev_sup_hs && is_qcom_max_hs) { |
---|
651 | | - pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n", |
---|
652 | | - __func__); |
---|
653 | | - return -ENOTSUPP; |
---|
654 | | - } else if (is_dev_sup_hs && is_qcom_max_hs) { |
---|
655 | | - /* |
---|
656 | | - * since device supports HS, it supports FAST_MODE. |
---|
657 | | - * since qcom_param->desired_working_mode is also HS |
---|
658 | | - * then final decision (FAST/FASTAUTO) is done according |
---|
659 | | - * to qcom_params as it is the restricting factor |
---|
660 | | - */ |
---|
661 | | - agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = |
---|
662 | | - qcom_param->rx_pwr_hs; |
---|
663 | | - } else { |
---|
664 | | - /* |
---|
665 | | - * here qcom_param->desired_working_mode is PWM. |
---|
666 | | - * it doesn't matter whether device supports HS or PWM, |
---|
667 | | - * in both cases qcom_param->desired_working_mode will |
---|
668 | | - * determine the mode |
---|
669 | | - */ |
---|
670 | | - agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = |
---|
671 | | - qcom_param->rx_pwr_pwm; |
---|
672 | | - } |
---|
673 | | - |
---|
674 | | - /* |
---|
675 | | - * we would like tx to work in the minimum number of lanes |
---|
676 | | - * between device capability and vendor preferences. |
---|
677 | | - * the same decision will be made for rx |
---|
678 | | - */ |
---|
679 | | - agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx, |
---|
680 | | - qcom_param->tx_lanes); |
---|
681 | | - agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx, |
---|
682 | | - qcom_param->rx_lanes); |
---|
683 | | - |
---|
684 | | - /* device maximum gear is the minimum between device rx and tx gears */ |
---|
685 | | - min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx); |
---|
686 | | - |
---|
687 | | - /* |
---|
688 | | - * if both device capabilities and vendor pre-defined preferences are |
---|
689 | | - * both HS or both PWM then set the minimum gear to be the chosen |
---|
690 | | - * working gear. |
---|
691 | | - * if one is PWM and one is HS then the one that is PWM get to decide |
---|
692 | | - * what is the gear, as it is the one that also decided previously what |
---|
693 | | - * pwr the device will be configured to. |
---|
694 | | - */ |
---|
695 | | - if ((is_dev_sup_hs && is_qcom_max_hs) || |
---|
696 | | - (!is_dev_sup_hs && !is_qcom_max_hs)) |
---|
697 | | - agreed_pwr->gear_rx = agreed_pwr->gear_tx = |
---|
698 | | - min_t(u32, min_dev_gear, min_qcom_gear); |
---|
699 | | - else if (!is_dev_sup_hs) |
---|
700 | | - agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear; |
---|
701 | | - else |
---|
702 | | - agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear; |
---|
703 | | - |
---|
704 | | - agreed_pwr->hs_rate = qcom_param->hs_rate; |
---|
705 | | - return 0; |
---|
706 | | -} |
---|
707 | | - |
---|
708 | | -#ifdef CONFIG_MSM_BUS_SCALING |
---|
709 | | -static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, |
---|
710 | | - const char *speed_mode) |
---|
711 | | -{ |
---|
712 | | - struct device *dev = host->hba->dev; |
---|
713 | | - struct device_node *np = dev->of_node; |
---|
714 | | - int err; |
---|
715 | | - const char *key = "qcom,bus-vector-names"; |
---|
716 | | - |
---|
717 | | - if (!speed_mode) { |
---|
718 | | - err = -EINVAL; |
---|
719 | | - goto out; |
---|
720 | | - } |
---|
721 | | - |
---|
722 | | - if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN")) |
---|
723 | | - err = of_property_match_string(np, key, "MAX"); |
---|
724 | | - else |
---|
725 | | - err = of_property_match_string(np, key, speed_mode); |
---|
726 | | - |
---|
727 | | -out: |
---|
728 | | - if (err < 0) |
---|
729 | | - dev_err(dev, "%s: Invalid %s mode %d\n", |
---|
730 | | - __func__, speed_mode, err); |
---|
731 | | - return err; |
---|
732 | | -} |
---|
733 | | - |
---|
734 | | -static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) |
---|
735 | | -{ |
---|
736 | | - int gear = max_t(u32, p->gear_rx, p->gear_tx); |
---|
737 | | - int lanes = max_t(u32, p->lane_rx, p->lane_tx); |
---|
738 | | - int pwr; |
---|
739 | | - |
---|
740 | | - /* default to PWM Gear 1, Lane 1 if power mode is not initialized */ |
---|
741 | | - if (!gear) |
---|
742 | | - gear = 1; |
---|
743 | | - |
---|
744 | | - if (!lanes) |
---|
745 | | - lanes = 1; |
---|
746 | | - |
---|
747 | | - if (!p->pwr_rx && !p->pwr_tx) { |
---|
748 | | - pwr = SLOWAUTO_MODE; |
---|
749 | | - snprintf(result, BUS_VECTOR_NAME_LEN, "MIN"); |
---|
750 | | - } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE || |
---|
751 | | - p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) { |
---|
752 | | - pwr = FAST_MODE; |
---|
753 | | - snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS", |
---|
754 | | - p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes); |
---|
755 | | - } else { |
---|
756 | | - pwr = SLOW_MODE; |
---|
757 | | - snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d", |
---|
758 | | - "PWM", gear, lanes); |
---|
759 | | - } |
---|
760 | | -} |
---|
761 | | - |
---|
762 | | -static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) |
---|
763 | | -{ |
---|
764 | | - int err = 0; |
---|
765 | | - |
---|
766 | | - if (vote != host->bus_vote.curr_vote) { |
---|
767 | | - err = msm_bus_scale_client_update_request( |
---|
768 | | - host->bus_vote.client_handle, vote); |
---|
| 622 | + if (ufs_qcom_is_link_off(hba)) { |
---|
| 623 | + err = phy_power_on(phy); |
---|
769 | 624 | if (err) { |
---|
770 | | - dev_err(host->hba->dev, |
---|
771 | | - "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n", |
---|
772 | | - __func__, host->bus_vote.client_handle, |
---|
773 | | - vote, err); |
---|
774 | | - goto out; |
---|
| 625 | + dev_err(hba->dev, "%s: failed PHY power on: %d\n", |
---|
| 626 | + __func__, err); |
---|
| 627 | + return err; |
---|
775 | 628 | } |
---|
776 | 629 | |
---|
777 | | - host->bus_vote.curr_vote = vote; |
---|
778 | | - } |
---|
779 | | -out: |
---|
780 | | - return err; |
---|
781 | | -} |
---|
| 630 | + err = ufs_qcom_enable_lane_clks(host); |
---|
| 631 | + if (err) |
---|
| 632 | + return err; |
---|
782 | 633 | |
---|
783 | | -static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) |
---|
784 | | -{ |
---|
785 | | - int vote; |
---|
786 | | - int err = 0; |
---|
787 | | - char mode[BUS_VECTOR_NAME_LEN]; |
---|
788 | | - |
---|
789 | | - ufs_qcom_get_speed_mode(&host->dev_req_params, mode); |
---|
790 | | - |
---|
791 | | - vote = ufs_qcom_get_bus_vote(host, mode); |
---|
792 | | - if (vote >= 0) |
---|
793 | | - err = ufs_qcom_set_bus_vote(host, vote); |
---|
794 | | - else |
---|
795 | | - err = vote; |
---|
796 | | - |
---|
797 | | - if (err) |
---|
798 | | - dev_err(host->hba->dev, "%s: failed %d\n", __func__, err); |
---|
799 | | - else |
---|
800 | | - host->bus_vote.saved_vote = vote; |
---|
801 | | - return err; |
---|
802 | | -} |
---|
803 | | - |
---|
804 | | -static ssize_t |
---|
805 | | -show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, |
---|
806 | | - char *buf) |
---|
807 | | -{ |
---|
808 | | - struct ufs_hba *hba = dev_get_drvdata(dev); |
---|
809 | | - struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
810 | | - |
---|
811 | | - return snprintf(buf, PAGE_SIZE, "%u\n", |
---|
812 | | - host->bus_vote.is_max_bw_needed); |
---|
813 | | -} |
---|
814 | | - |
---|
815 | | -static ssize_t |
---|
816 | | -store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, |
---|
817 | | - const char *buf, size_t count) |
---|
818 | | -{ |
---|
819 | | - struct ufs_hba *hba = dev_get_drvdata(dev); |
---|
820 | | - struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
821 | | - uint32_t value; |
---|
822 | | - |
---|
823 | | - if (!kstrtou32(buf, 0, &value)) { |
---|
824 | | - host->bus_vote.is_max_bw_needed = !!value; |
---|
825 | | - ufs_qcom_update_bus_bw_vote(host); |
---|
| 634 | + } else if (!ufs_qcom_is_link_active(hba)) { |
---|
| 635 | + err = ufs_qcom_enable_lane_clks(host); |
---|
| 636 | + if (err) |
---|
| 637 | + return err; |
---|
826 | 638 | } |
---|
827 | 639 | |
---|
828 | | - return count; |
---|
| 640 | + return ufs_qcom_ice_resume(host); |
---|
829 | 641 | } |
---|
830 | | - |
---|
831 | | -static int ufs_qcom_bus_register(struct ufs_qcom_host *host) |
---|
832 | | -{ |
---|
833 | | - int err; |
---|
834 | | - struct msm_bus_scale_pdata *bus_pdata; |
---|
835 | | - struct device *dev = host->hba->dev; |
---|
836 | | - struct platform_device *pdev = to_platform_device(dev); |
---|
837 | | - struct device_node *np = dev->of_node; |
---|
838 | | - |
---|
839 | | - bus_pdata = msm_bus_cl_get_pdata(pdev); |
---|
840 | | - if (!bus_pdata) { |
---|
841 | | - dev_err(dev, "%s: failed to get bus vectors\n", __func__); |
---|
842 | | - err = -ENODATA; |
---|
843 | | - goto out; |
---|
844 | | - } |
---|
845 | | - |
---|
846 | | - err = of_property_count_strings(np, "qcom,bus-vector-names"); |
---|
847 | | - if (err < 0 || err != bus_pdata->num_usecases) { |
---|
848 | | - dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n", |
---|
849 | | - __func__, err); |
---|
850 | | - goto out; |
---|
851 | | - } |
---|
852 | | - |
---|
853 | | - host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata); |
---|
854 | | - if (!host->bus_vote.client_handle) { |
---|
855 | | - dev_err(dev, "%s: msm_bus_scale_register_client failed\n", |
---|
856 | | - __func__); |
---|
857 | | - err = -EFAULT; |
---|
858 | | - goto out; |
---|
859 | | - } |
---|
860 | | - |
---|
861 | | - /* cache the vote index for minimum and maximum bandwidth */ |
---|
862 | | - host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN"); |
---|
863 | | - host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX"); |
---|
864 | | - |
---|
865 | | - host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw; |
---|
866 | | - host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw; |
---|
867 | | - sysfs_attr_init(&host->bus_vote.max_bus_bw.attr); |
---|
868 | | - host->bus_vote.max_bus_bw.attr.name = "max_bus_bw"; |
---|
869 | | - host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR; |
---|
870 | | - err = device_create_file(dev, &host->bus_vote.max_bus_bw); |
---|
871 | | -out: |
---|
872 | | - return err; |
---|
873 | | -} |
---|
874 | | -#else /* CONFIG_MSM_BUS_SCALING */ |
---|
875 | | -static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) |
---|
876 | | -{ |
---|
877 | | - return 0; |
---|
878 | | -} |
---|
879 | | - |
---|
880 | | -static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) |
---|
881 | | -{ |
---|
882 | | - return 0; |
---|
883 | | -} |
---|
884 | | - |
---|
885 | | -static int ufs_qcom_bus_register(struct ufs_qcom_host *host) |
---|
886 | | -{ |
---|
887 | | - return 0; |
---|
888 | | -} |
---|
889 | | -#endif /* CONFIG_MSM_BUS_SCALING */ |
---|
890 | 642 | |
---|
891 | 643 | static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) |
---|
892 | 644 | { |
---|
.. | .. |
---|
902 | 654 | /* |
---|
903 | 655 | * If we are here to disable this clock it might be immediately |
---|
904 | 656 | * after entering into hibern8 in which case we need to make |
---|
905 | | - * sure that device ref_clk is active at least 1us after the |
---|
| 657 | + * sure that device ref_clk is active for specific time after |
---|
906 | 658 | * hibern8 enter. |
---|
907 | 659 | */ |
---|
908 | | - if (!enable) |
---|
909 | | - udelay(1); |
---|
| 660 | + if (!enable) { |
---|
| 661 | + unsigned long gating_wait; |
---|
| 662 | + |
---|
| 663 | + gating_wait = host->hba->dev_info.clk_gating_wait_us; |
---|
| 664 | + if (!gating_wait) { |
---|
| 665 | + udelay(1); |
---|
| 666 | + } else { |
---|
| 667 | + /* |
---|
| 668 | + * bRefClkGatingWaitTime defines the minimum |
---|
| 669 | + * time for which the reference clock is |
---|
| 670 | + * required by device during transition from |
---|
| 671 | + * HS-MODE to LS-MODE or HIBERN8 state. Give it |
---|
| 672 | + * more delay to be on the safe side. |
---|
| 673 | + */ |
---|
| 674 | + gating_wait += 10; |
---|
| 675 | + usleep_range(gating_wait, gating_wait + 10); |
---|
| 676 | + } |
---|
| 677 | + } |
---|
910 | 678 | |
---|
911 | 679 | writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); |
---|
912 | 680 | |
---|
913 | | - /* ensure that ref_clk is enabled/disabled before we return */ |
---|
914 | | - wmb(); |
---|
| 681 | + /* |
---|
| 682 | + * Make sure the write to ref_clk reaches the destination and |
---|
| 683 | + * not stored in a Write Buffer (WB). |
---|
| 684 | + */ |
---|
| 685 | + readl(host->dev_ref_clk_ctrl_mmio); |
---|
915 | 686 | |
---|
916 | 687 | /* |
---|
917 | 688 | * If we call hibern8 exit after this, we need to make sure that |
---|
.. | .. |
---|
930 | 701 | struct ufs_pa_layer_attr *dev_max_params, |
---|
931 | 702 | struct ufs_pa_layer_attr *dev_req_params) |
---|
932 | 703 | { |
---|
933 | | - u32 val; |
---|
934 | 704 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
935 | | - struct phy *phy = host->generic_phy; |
---|
936 | | - struct ufs_qcom_dev_params ufs_qcom_cap; |
---|
| 705 | + struct ufs_dev_params ufs_qcom_cap; |
---|
937 | 706 | int ret = 0; |
---|
938 | | - int res = 0; |
---|
939 | 707 | |
---|
940 | 708 | if (!dev_req_params) { |
---|
941 | 709 | pr_err("%s: incoming dev_req_params is NULL\n", __func__); |
---|
.. | .. |
---|
973 | 741 | ufs_qcom_cap.hs_rx_gear = UFS_HS_G2; |
---|
974 | 742 | } |
---|
975 | 743 | |
---|
976 | | - ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap, |
---|
977 | | - dev_max_params, |
---|
978 | | - dev_req_params); |
---|
| 744 | + ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap, |
---|
| 745 | + dev_max_params, |
---|
| 746 | + dev_req_params); |
---|
979 | 747 | if (ret) { |
---|
980 | 748 | pr_err("%s: failed to determine capabilities\n", |
---|
981 | 749 | __func__); |
---|
.. | .. |
---|
986 | 754 | if (!ufshcd_is_hs_mode(&hba->pwr_info) && |
---|
987 | 755 | ufshcd_is_hs_mode(dev_req_params)) |
---|
988 | 756 | ufs_qcom_dev_ref_clk_ctrl(host, true); |
---|
| 757 | + |
---|
| 758 | + if (host->hw_ver.major >= 0x4) { |
---|
| 759 | + if (dev_req_params->gear_tx == UFS_HS_G4) { |
---|
| 760 | + /* INITIAL ADAPT */ |
---|
| 761 | + ufshcd_dme_set(hba, |
---|
| 762 | + UIC_ARG_MIB(PA_TXHSADAPTTYPE), |
---|
| 763 | + PA_INITIAL_ADAPT); |
---|
| 764 | + } else { |
---|
| 765 | + /* NO ADAPT */ |
---|
| 766 | + ufshcd_dme_set(hba, |
---|
| 767 | + UIC_ARG_MIB(PA_TXHSADAPTTYPE), |
---|
| 768 | + PA_NO_ADAPT); |
---|
| 769 | + } |
---|
| 770 | + } |
---|
989 | 771 | break; |
---|
990 | 772 | case POST_CHANGE: |
---|
991 | 773 | if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, |
---|
.. | .. |
---|
1001 | 783 | ret = -EINVAL; |
---|
1002 | 784 | } |
---|
1003 | 785 | |
---|
1004 | | - val = ~(MAX_U32 << dev_req_params->lane_tx); |
---|
1005 | | - res = ufs_qcom_phy_set_tx_lane_enable(phy, val); |
---|
1006 | | - if (res) { |
---|
1007 | | - dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n", |
---|
1008 | | - __func__, res); |
---|
1009 | | - ret = res; |
---|
1010 | | - } |
---|
1011 | | - |
---|
1012 | 786 | /* cache the power mode parameters to use internally */ |
---|
1013 | 787 | memcpy(&host->dev_req_params, |
---|
1014 | 788 | dev_req_params, sizeof(*dev_req_params)); |
---|
1015 | | - ufs_qcom_update_bus_bw_vote(host); |
---|
1016 | 789 | |
---|
1017 | 790 | /* disable the device ref clock if entered PWM mode */ |
---|
1018 | 791 | if (ufshcd_is_hs_mode(&hba->pwr_info) && |
---|
.. | .. |
---|
1052 | 825 | if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) |
---|
1053 | 826 | err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); |
---|
1054 | 827 | |
---|
| 828 | + if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) |
---|
| 829 | + hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; |
---|
| 830 | + |
---|
1055 | 831 | return err; |
---|
1056 | 832 | } |
---|
1057 | 833 | |
---|
.. | .. |
---|
1060 | 836 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
1061 | 837 | |
---|
1062 | 838 | if (host->hw_ver.major == 0x1) |
---|
1063 | | - return UFSHCI_VERSION_11; |
---|
| 839 | + return ufshci_version(1, 1); |
---|
1064 | 840 | else |
---|
1065 | | - return UFSHCI_VERSION_20; |
---|
| 841 | + return ufshci_version(2, 0); |
---|
1066 | 842 | } |
---|
1067 | 843 | |
---|
1068 | 844 | /** |
---|
.. | .. |
---|
1098 | 874 | | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE |
---|
1099 | 875 | | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP); |
---|
1100 | 876 | } |
---|
1101 | | - |
---|
1102 | | - /* |
---|
1103 | | - * Inline crypto is currently broken with ufs-qcom at least because the |
---|
1104 | | - * device tree doesn't include the crypto registers. There are likely |
---|
1105 | | - * to be other issues that will need to be addressed too. |
---|
1106 | | - */ |
---|
1107 | | - hba->quirks |= UFSHCD_QUIRK_BROKEN_CRYPTO; |
---|
1108 | 877 | } |
---|
1109 | 878 | |
---|
1110 | 879 | static void ufs_qcom_set_caps(struct ufs_hba *hba) |
---|
.. | .. |
---|
1114 | 883 | hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; |
---|
1115 | 884 | hba->caps |= UFSHCD_CAP_CLK_SCALING; |
---|
1116 | 885 | hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; |
---|
| 886 | + hba->caps |= UFSHCD_CAP_WB_EN; |
---|
| 887 | + hba->caps |= UFSHCD_CAP_CRYPTO; |
---|
1117 | 888 | |
---|
1118 | 889 | if (host->hw_ver.major >= 0x2) { |
---|
1119 | 890 | host->caps = UFS_QCOM_CAP_QUNIPRO | |
---|
.. | .. |
---|
1133 | 904 | enum ufs_notify_change_status status) |
---|
1134 | 905 | { |
---|
1135 | 906 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
1136 | | - int err; |
---|
1137 | | - int vote = 0; |
---|
| 907 | + int err = 0; |
---|
1138 | 908 | |
---|
1139 | 909 | /* |
---|
1140 | 910 | * In case ufs_qcom_init() is not yet done, simply ignore. |
---|
.. | .. |
---|
1144 | 914 | if (!host) |
---|
1145 | 915 | return 0; |
---|
1146 | 916 | |
---|
1147 | | - if (on && (status == POST_CHANGE)) { |
---|
1148 | | - phy_power_on(host->generic_phy); |
---|
1149 | | - |
---|
1150 | | - /* enable the device ref clock for HS mode*/ |
---|
1151 | | - if (ufshcd_is_hs_mode(&hba->pwr_info)) |
---|
1152 | | - ufs_qcom_dev_ref_clk_ctrl(host, true); |
---|
1153 | | - vote = host->bus_vote.saved_vote; |
---|
1154 | | - if (vote == host->bus_vote.min_bw_vote) |
---|
1155 | | - ufs_qcom_update_bus_bw_vote(host); |
---|
1156 | | - |
---|
1157 | | - } else if (!on && (status == PRE_CHANGE)) { |
---|
1158 | | - if (!ufs_qcom_is_link_active(hba)) { |
---|
1159 | | - /* disable device ref_clk */ |
---|
1160 | | - ufs_qcom_dev_ref_clk_ctrl(host, false); |
---|
1161 | | - |
---|
1162 | | - /* powering off PHY during aggressive clk gating */ |
---|
1163 | | - phy_power_off(host->generic_phy); |
---|
| 917 | + switch (status) { |
---|
| 918 | + case PRE_CHANGE: |
---|
| 919 | + if (!on) { |
---|
| 920 | + if (!ufs_qcom_is_link_active(hba)) { |
---|
| 921 | + /* disable device ref_clk */ |
---|
| 922 | + ufs_qcom_dev_ref_clk_ctrl(host, false); |
---|
| 923 | + } |
---|
1164 | 924 | } |
---|
1165 | | - |
---|
1166 | | - vote = host->bus_vote.min_bw_vote; |
---|
| 925 | + break; |
---|
| 926 | + case POST_CHANGE: |
---|
| 927 | + if (on) { |
---|
| 928 | + /* enable the device ref clock for HS mode*/ |
---|
| 929 | + if (ufshcd_is_hs_mode(&hba->pwr_info)) |
---|
| 930 | + ufs_qcom_dev_ref_clk_ctrl(host, true); |
---|
| 931 | + } |
---|
| 932 | + break; |
---|
1167 | 933 | } |
---|
1168 | | - |
---|
1169 | | - err = ufs_qcom_set_bus_vote(host, vote); |
---|
1170 | | - if (err) |
---|
1171 | | - dev_err(hba->dev, "%s: set bus vote failed %d\n", |
---|
1172 | | - __func__, err); |
---|
1173 | 934 | |
---|
1174 | 935 | return err; |
---|
1175 | 936 | } |
---|
| 937 | + |
---|
| 938 | +static int |
---|
| 939 | +ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) |
---|
| 940 | +{ |
---|
| 941 | + struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); |
---|
| 942 | + |
---|
| 943 | + /* Currently this code only knows about a single reset. */ |
---|
| 944 | + WARN_ON(id); |
---|
| 945 | + ufs_qcom_assert_reset(host->hba); |
---|
| 946 | + /* provide 1ms delay to let the reset pulse propagate. */ |
---|
| 947 | + usleep_range(1000, 1100); |
---|
| 948 | + return 0; |
---|
| 949 | +} |
---|
| 950 | + |
---|
| 951 | +static int |
---|
| 952 | +ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) |
---|
| 953 | +{ |
---|
| 954 | + struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev); |
---|
| 955 | + |
---|
| 956 | + /* Currently this code only knows about a single reset. */ |
---|
| 957 | + WARN_ON(id); |
---|
| 958 | + ufs_qcom_deassert_reset(host->hba); |
---|
| 959 | + |
---|
| 960 | + /* |
---|
| 961 | + * after reset deassertion, phy will need all ref clocks, |
---|
| 962 | + * voltage, current to settle down before starting serdes. |
---|
| 963 | + */ |
---|
| 964 | + usleep_range(1000, 1100); |
---|
| 965 | + return 0; |
---|
| 966 | +} |
---|
| 967 | + |
---|
| 968 | +static const struct reset_control_ops ufs_qcom_reset_ops = { |
---|
| 969 | + .assert = ufs_qcom_reset_assert, |
---|
| 970 | + .deassert = ufs_qcom_reset_deassert, |
---|
| 971 | +}; |
---|
1176 | 972 | |
---|
1177 | 973 | #define ANDROID_BOOT_DEV_MAX 30 |
---|
1178 | 974 | static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; |
---|
.. | .. |
---|
1218 | 1014 | host->hba = hba; |
---|
1219 | 1015 | ufshcd_set_variant(hba, host); |
---|
1220 | 1016 | |
---|
| 1017 | + /* Setup the reset control of HCI */ |
---|
| 1018 | + host->core_reset = devm_reset_control_get(hba->dev, "rst"); |
---|
| 1019 | + if (IS_ERR(host->core_reset)) { |
---|
| 1020 | + err = PTR_ERR(host->core_reset); |
---|
| 1021 | + dev_warn(dev, "Failed to get reset control %d\n", err); |
---|
| 1022 | + host->core_reset = NULL; |
---|
| 1023 | + err = 0; |
---|
| 1024 | + } |
---|
| 1025 | + |
---|
| 1026 | + /* Fire up the reset controller. Failure here is non-fatal. */ |
---|
| 1027 | + host->rcdev.of_node = dev->of_node; |
---|
| 1028 | + host->rcdev.ops = &ufs_qcom_reset_ops; |
---|
| 1029 | + host->rcdev.owner = dev->driver->owner; |
---|
| 1030 | + host->rcdev.nr_resets = 1; |
---|
| 1031 | + err = devm_reset_controller_register(dev, &host->rcdev); |
---|
| 1032 | + if (err) { |
---|
| 1033 | + dev_warn(dev, "Failed to register reset controller\n"); |
---|
| 1034 | + err = 0; |
---|
| 1035 | + } |
---|
| 1036 | + |
---|
1221 | 1037 | /* |
---|
1222 | 1038 | * voting/devoting device ref_clk source is time consuming hence |
---|
1223 | 1039 | * skip devoting it during aggressive clock gating. This clock |
---|
.. | .. |
---|
1235 | 1051 | __func__, err); |
---|
1236 | 1052 | goto out_variant_clear; |
---|
1237 | 1053 | } else if (IS_ERR(host->generic_phy)) { |
---|
1238 | | - err = PTR_ERR(host->generic_phy); |
---|
1239 | | - dev_err(dev, "%s: PHY get failed %d\n", __func__, err); |
---|
1240 | | - goto out_variant_clear; |
---|
| 1054 | + if (has_acpi_companion(dev)) { |
---|
| 1055 | + host->generic_phy = NULL; |
---|
| 1056 | + } else { |
---|
| 1057 | + err = PTR_ERR(host->generic_phy); |
---|
| 1058 | + dev_err(dev, "%s: PHY get failed %d\n", __func__, err); |
---|
| 1059 | + goto out_variant_clear; |
---|
| 1060 | + } |
---|
1241 | 1061 | } |
---|
1242 | 1062 | |
---|
1243 | | - err = ufs_qcom_bus_register(host); |
---|
1244 | | - if (err) |
---|
| 1063 | + host->device_reset = devm_gpiod_get_optional(dev, "reset", |
---|
| 1064 | + GPIOD_OUT_HIGH); |
---|
| 1065 | + if (IS_ERR(host->device_reset)) { |
---|
| 1066 | + err = PTR_ERR(host->device_reset); |
---|
| 1067 | + if (err != -EPROBE_DEFER) |
---|
| 1068 | + dev_err(dev, "failed to acquire reset gpio: %d\n", err); |
---|
1245 | 1069 | goto out_variant_clear; |
---|
| 1070 | + } |
---|
1246 | 1071 | |
---|
1247 | 1072 | ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, |
---|
1248 | 1073 | &host->hw_ver.minor, &host->hw_ver.step); |
---|
.. | .. |
---|
1256 | 1081 | host->dev_ref_clk_en_mask = BIT(26); |
---|
1257 | 1082 | } else { |
---|
1258 | 1083 | /* "dev_ref_clk_ctrl_mem" is optional resource */ |
---|
1259 | | - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
---|
| 1084 | + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
---|
| 1085 | + "dev_ref_clk_ctrl_mem"); |
---|
1260 | 1086 | if (res) { |
---|
1261 | 1087 | host->dev_ref_clk_ctrl_mmio = |
---|
1262 | 1088 | devm_ioremap_resource(dev, res); |
---|
.. | .. |
---|
1271 | 1097 | } |
---|
1272 | 1098 | } |
---|
1273 | 1099 | |
---|
1274 | | - /* update phy revision information before calling phy_init() */ |
---|
1275 | | - ufs_qcom_phy_save_controller_version(host->generic_phy, |
---|
1276 | | - host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step); |
---|
1277 | | - |
---|
1278 | 1100 | err = ufs_qcom_init_lane_clks(host); |
---|
1279 | 1101 | if (err) |
---|
1280 | 1102 | goto out_variant_clear; |
---|
1281 | 1103 | |
---|
1282 | 1104 | ufs_qcom_set_caps(hba); |
---|
1283 | 1105 | ufs_qcom_advertise_quirks(hba); |
---|
| 1106 | + |
---|
| 1107 | + err = ufs_qcom_ice_init(host); |
---|
| 1108 | + if (err) |
---|
| 1109 | + goto out_variant_clear; |
---|
1284 | 1110 | |
---|
1285 | 1111 | ufs_qcom_setup_clocks(hba, true, POST_CHANGE); |
---|
1286 | 1112 | |
---|
.. | .. |
---|
1402 | 1228 | int err = 0; |
---|
1403 | 1229 | |
---|
1404 | 1230 | if (status == PRE_CHANGE) { |
---|
| 1231 | + err = ufshcd_uic_hibern8_enter(hba); |
---|
| 1232 | + if (err) |
---|
| 1233 | + return err; |
---|
1405 | 1234 | if (scale_up) |
---|
1406 | 1235 | err = ufs_qcom_clk_scale_up_pre_change(hba); |
---|
1407 | 1236 | else |
---|
1408 | 1237 | err = ufs_qcom_clk_scale_down_pre_change(hba); |
---|
| 1238 | + if (err) |
---|
| 1239 | + ufshcd_uic_hibern8_exit(hba); |
---|
| 1240 | + |
---|
1409 | 1241 | } else { |
---|
1410 | 1242 | if (scale_up) |
---|
1411 | 1243 | err = ufs_qcom_clk_scale_up_post_change(hba); |
---|
1412 | 1244 | else |
---|
1413 | 1245 | err = ufs_qcom_clk_scale_down_post_change(hba); |
---|
1414 | 1246 | |
---|
1415 | | - if (err || !dev_req_params) |
---|
| 1247 | + |
---|
| 1248 | + if (err || !dev_req_params) { |
---|
| 1249 | + ufshcd_uic_hibern8_exit(hba); |
---|
1416 | 1250 | goto out; |
---|
| 1251 | + } |
---|
1417 | 1252 | |
---|
1418 | 1253 | ufs_qcom_cfg_timers(hba, |
---|
1419 | 1254 | dev_req_params->gear_rx, |
---|
1420 | 1255 | dev_req_params->pwr_rx, |
---|
1421 | 1256 | dev_req_params->hs_rate, |
---|
1422 | 1257 | false); |
---|
1423 | | - ufs_qcom_update_bus_bw_vote(host); |
---|
| 1258 | + ufshcd_uic_hibern8_exit(hba); |
---|
1424 | 1259 | } |
---|
1425 | 1260 | |
---|
1426 | 1261 | out: |
---|
.. | .. |
---|
1604 | 1439 | return 0; |
---|
1605 | 1440 | } |
---|
1606 | 1441 | |
---|
1607 | | -static void ufs_qcom_testbus_read(struct ufs_hba *hba) |
---|
1608 | | -{ |
---|
1609 | | - ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); |
---|
1610 | | -} |
---|
1611 | | - |
---|
1612 | | -static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) |
---|
1613 | | -{ |
---|
1614 | | - struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
1615 | | - u32 *testbus = NULL; |
---|
1616 | | - int i, nminor = 256, testbus_len = nminor * sizeof(u32); |
---|
1617 | | - |
---|
1618 | | - testbus = kmalloc(testbus_len, GFP_KERNEL); |
---|
1619 | | - if (!testbus) |
---|
1620 | | - return; |
---|
1621 | | - |
---|
1622 | | - host->testbus.select_major = TSTBUS_UNIPRO; |
---|
1623 | | - for (i = 0; i < nminor; i++) { |
---|
1624 | | - host->testbus.select_minor = i; |
---|
1625 | | - ufs_qcom_testbus_config(host); |
---|
1626 | | - testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); |
---|
1627 | | - } |
---|
1628 | | - print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET, |
---|
1629 | | - 16, 4, testbus, testbus_len, false); |
---|
1630 | | - kfree(testbus); |
---|
1631 | | -} |
---|
1632 | | - |
---|
1633 | 1442 | static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) |
---|
1634 | 1443 | { |
---|
1635 | 1444 | ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, |
---|
1636 | 1445 | "HCI Vendor Specific Registers "); |
---|
1637 | 1446 | |
---|
1638 | | - /* sleep a bit intermittently as we are dumping too much data */ |
---|
1639 | 1447 | ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); |
---|
1640 | | - udelay(1000); |
---|
1641 | | - ufs_qcom_testbus_read(hba); |
---|
1642 | | - udelay(1000); |
---|
1643 | | - ufs_qcom_print_unipro_testbus(hba); |
---|
1644 | | - udelay(1000); |
---|
1645 | 1448 | } |
---|
1646 | 1449 | |
---|
1647 | 1450 | /** |
---|
| 1451 | + * ufs_qcom_device_reset() - toggle the (optional) device reset line |
---|
| 1452 | + * @hba: per-adapter instance |
---|
| 1453 | + * |
---|
| 1454 | + * Toggles the (optional) reset line to reset the attached device. |
---|
| 1455 | + */ |
---|
| 1456 | +static int ufs_qcom_device_reset(struct ufs_hba *hba) |
---|
| 1457 | +{ |
---|
| 1458 | + struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
---|
| 1459 | + |
---|
| 1460 | + /* reset gpio is optional */ |
---|
| 1461 | + if (!host->device_reset) |
---|
| 1462 | + return -EOPNOTSUPP; |
---|
| 1463 | + |
---|
| 1464 | + /* |
---|
| 1465 | + * The UFS device shall detect reset pulses of 1us, sleep for 10us to |
---|
| 1466 | + * be on the safe side. |
---|
| 1467 | + */ |
---|
| 1468 | + ufs_qcom_device_reset_ctrl(hba, true); |
---|
| 1469 | + usleep_range(10, 15); |
---|
| 1470 | + |
---|
| 1471 | + ufs_qcom_device_reset_ctrl(hba, false); |
---|
| 1472 | + usleep_range(10, 15); |
---|
| 1473 | + |
---|
| 1474 | + return 0; |
---|
| 1475 | +} |
---|
| 1476 | + |
---|
| 1477 | +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) |
---|
| 1478 | +static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, |
---|
| 1479 | + struct devfreq_dev_profile *p, |
---|
| 1480 | + void *data) |
---|
| 1481 | +{ |
---|
| 1482 | + static struct devfreq_simple_ondemand_data *d; |
---|
| 1483 | + |
---|
| 1484 | + if (!data) |
---|
| 1485 | + return; |
---|
| 1486 | + |
---|
| 1487 | + d = (struct devfreq_simple_ondemand_data *)data; |
---|
| 1488 | + p->polling_ms = 60; |
---|
| 1489 | + d->upthreshold = 70; |
---|
| 1490 | + d->downdifferential = 5; |
---|
| 1491 | +} |
---|
| 1492 | +#else |
---|
| 1493 | +static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, |
---|
| 1494 | + struct devfreq_dev_profile *p, |
---|
| 1495 | + void *data) |
---|
| 1496 | +{ |
---|
| 1497 | +} |
---|
| 1498 | +#endif |
---|
| 1499 | + |
---|
| 1500 | +/* |
---|
1648 | 1501 | * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations |
---|
1649 | 1502 | * |
---|
1650 | 1503 | * The variant operations configure the necessary controller and PHY |
---|
1651 | 1504 | * handshake during initialization. |
---|
1652 | 1505 | */ |
---|
1653 | | -static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { |
---|
| 1506 | +static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { |
---|
1654 | 1507 | .name = "qcom", |
---|
1655 | 1508 | .init = ufs_qcom_init, |
---|
1656 | 1509 | .exit = ufs_qcom_exit, |
---|
.. | .. |
---|
1664 | 1517 | .suspend = ufs_qcom_suspend, |
---|
1665 | 1518 | .resume = ufs_qcom_resume, |
---|
1666 | 1519 | .dbg_register_dump = ufs_qcom_dump_dbg_regs, |
---|
| 1520 | + .device_reset = ufs_qcom_device_reset, |
---|
| 1521 | + .config_scaling_param = ufs_qcom_config_scaling_param, |
---|
| 1522 | + .program_key = ufs_qcom_ice_program_key, |
---|
1667 | 1523 | }; |
---|
1668 | 1524 | |
---|
1669 | 1525 | /** |
---|
.. | .. |
---|
1706 | 1562 | }; |
---|
1707 | 1563 | MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); |
---|
1708 | 1564 | |
---|
| 1565 | +#ifdef CONFIG_ACPI |
---|
| 1566 | +static const struct acpi_device_id ufs_qcom_acpi_match[] = { |
---|
| 1567 | + { "QCOM24A5" }, |
---|
| 1568 | + { }, |
---|
| 1569 | +}; |
---|
| 1570 | +MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); |
---|
| 1571 | +#endif |
---|
| 1572 | + |
---|
1709 | 1573 | static const struct dev_pm_ops ufs_qcom_pm_ops = { |
---|
1710 | 1574 | .suspend = ufshcd_pltfrm_suspend, |
---|
1711 | 1575 | .resume = ufshcd_pltfrm_resume, |
---|
.. | .. |
---|
1722 | 1586 | .name = "ufshcd-qcom", |
---|
1723 | 1587 | .pm = &ufs_qcom_pm_ops, |
---|
1724 | 1588 | .of_match_table = of_match_ptr(ufs_qcom_of_match), |
---|
| 1589 | + .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match), |
---|
1725 | 1590 | }, |
---|
1726 | 1591 | }; |
---|
1727 | 1592 | module_platform_driver(ufs_qcom_pltform); |
---|