forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/scsi/ufs/ufs-hisi.c
....@@ -1,11 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * HiSilicon Hixxxx UFS Driver
34 *
45 * Copyright (c) 2016-2017 Linaro Ltd.
56 * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
6
- *
7
- * Released under the GPLv2 only.
8
- * SPDX-License-Identifier: GPL-2.0
97 */
108
119 #include <linux/time.h>
....@@ -66,7 +64,7 @@
6664 return err;
6765 }
6866
69
-static void ufs_hi3660_clk_init(struct ufs_hba *hba)
67
+static void ufs_hisi_clk_init(struct ufs_hba *hba)
7068 {
7169 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
7270
....@@ -80,7 +78,7 @@
8078 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
8179 }
8280
83
-static void ufs_hi3660_soc_init(struct ufs_hba *hba)
81
+static void ufs_hisi_soc_init(struct ufs_hba *hba)
8482 {
8583 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
8684 u32 reg;
....@@ -139,6 +137,7 @@
139137
140138 static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
141139 {
140
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
142141 int err;
143142 uint32_t value;
144143 uint32_t reg;
....@@ -153,6 +152,14 @@
153152 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
154153 /* MPHY CBOVRCTRL3 */
155154 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
155
+
156
+ if (host->caps & UFS_HISI_CAP_PHY10nm) {
157
+ /* MPHY CBOVRCTRL4 */
158
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
159
+ /* MPHY CBOVRCTRL5 */
160
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
161
+ }
162
+
156163 /* Unipro VS_MphyCfgUpdt */
157164 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
158165 /* MPHY RXOVRCTRL4 rx0 */
....@@ -173,10 +180,21 @@
173180 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
174181 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
175182
176
- /* Tactive RX */
177
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
178
- /* Tactive RX */
179
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
183
+ if (host->caps & UFS_HISI_CAP_PHY10nm) {
184
+ /* RX_Hibern8Time_Capability*/
185
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
186
+ /* RX_Hibern8Time_Capability*/
187
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
188
+ /* RX_Min_ActivateTime */
189
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
190
+ /* RX_Min_ActivateTime*/
191
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
192
+ } else {
193
+ /* Tactive RX */
194
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
195
+ /* Tactive RX */
196
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
197
+ }
180198
181199 /* Gear3 Synclength */
182200 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
....@@ -208,7 +226,8 @@
208226 if (err)
209227 dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
210228
211
- ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
229
+ if (!(host->caps & UFS_HISI_CAP_PHY10nm))
230
+ ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
212231
213232 /* disable auto H8 */
214233 reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
....@@ -216,7 +235,7 @@
216235 ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
217236
218237 /* Unipro PA_Local_TX_LCC_Enable */
219
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0);
238
+ ufshcd_disable_host_tx_lcc(hba);
220239 /* close Unipro VS_Mk2ExtnSupport */
221240 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
222241 ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
....@@ -253,7 +272,7 @@
253272 return 0;
254273 }
255274
256
-static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba,
275
+static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
257276 enum ufs_notify_change_status status)
258277 {
259278 int err = 0;
....@@ -272,108 +291,7 @@
272291 return err;
273292 }
274293
275
-struct ufs_hisi_dev_params {
276
- u32 pwm_rx_gear; /* pwm rx gear to work in */
277
- u32 pwm_tx_gear; /* pwm tx gear to work in */
278
- u32 hs_rx_gear; /* hs rx gear to work in */
279
- u32 hs_tx_gear; /* hs tx gear to work in */
280
- u32 rx_lanes; /* number of rx lanes */
281
- u32 tx_lanes; /* number of tx lanes */
282
- u32 rx_pwr_pwm; /* rx pwm working pwr */
283
- u32 tx_pwr_pwm; /* tx pwm working pwr */
284
- u32 rx_pwr_hs; /* rx hs working pwr */
285
- u32 tx_pwr_hs; /* tx hs working pwr */
286
- u32 hs_rate; /* rate A/B to work in HS */
287
- u32 desired_working_mode;
288
-};
289
-
290
-static int ufs_hisi_get_pwr_dev_param(
291
- struct ufs_hisi_dev_params *hisi_param,
292
- struct ufs_pa_layer_attr *dev_max,
293
- struct ufs_pa_layer_attr *agreed_pwr)
294
-{
295
- int min_hisi_gear;
296
- int min_dev_gear;
297
- bool is_dev_sup_hs = false;
298
- bool is_hisi_max_hs = false;
299
-
300
- if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE)
301
- is_dev_sup_hs = true;
302
-
303
- if (hisi_param->desired_working_mode == FAST) {
304
- is_hisi_max_hs = true;
305
- min_hisi_gear = min_t(u32, hisi_param->hs_rx_gear,
306
- hisi_param->hs_tx_gear);
307
- } else {
308
- min_hisi_gear = min_t(u32, hisi_param->pwm_rx_gear,
309
- hisi_param->pwm_tx_gear);
310
- }
311
-
312
- /*
313
- * device doesn't support HS but
314
- * hisi_param->desired_working_mode is HS,
315
- * thus device and hisi_param don't agree
316
- */
317
- if (!is_dev_sup_hs && is_hisi_max_hs) {
318
- pr_err("%s: device not support HS\n", __func__);
319
- return -ENOTSUPP;
320
- } else if (is_dev_sup_hs && is_hisi_max_hs) {
321
- /*
322
- * since device supports HS, it supports FAST_MODE.
323
- * since hisi_param->desired_working_mode is also HS
324
- * then final decision (FAST/FASTAUTO) is done according
325
- * to hisi_params as it is the restricting factor
326
- */
327
- agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
328
- hisi_param->rx_pwr_hs;
329
- } else {
330
- /*
331
- * here hisi_param->desired_working_mode is PWM.
332
- * it doesn't matter whether device supports HS or PWM,
333
- * in both cases hisi_param->desired_working_mode will
334
- * determine the mode
335
- */
336
- agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
337
- hisi_param->rx_pwr_pwm;
338
- }
339
-
340
- /*
341
- * we would like tx to work in the minimum number of lanes
342
- * between device capability and vendor preferences.
343
- * the same decision will be made for rx
344
- */
345
- agreed_pwr->lane_tx =
346
- min_t(u32, dev_max->lane_tx, hisi_param->tx_lanes);
347
- agreed_pwr->lane_rx =
348
- min_t(u32, dev_max->lane_rx, hisi_param->rx_lanes);
349
-
350
- /* device maximum gear is the minimum between device rx and tx gears */
351
- min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
352
-
353
- /*
354
- * if both device capabilities and vendor pre-defined preferences are
355
- * both HS or both PWM then set the minimum gear to be the chosen
356
- * working gear.
357
- * if one is PWM and one is HS then the one that is PWM get to decide
358
- * what is the gear, as it is the one that also decided previously what
359
- * pwr the device will be configured to.
360
- */
361
- if ((is_dev_sup_hs && is_hisi_max_hs) ||
362
- (!is_dev_sup_hs && !is_hisi_max_hs))
363
- agreed_pwr->gear_rx = agreed_pwr->gear_tx =
364
- min_t(u32, min_dev_gear, min_hisi_gear);
365
- else
366
- agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hisi_gear;
367
-
368
- agreed_pwr->hs_rate = hisi_param->hs_rate;
369
-
370
- pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, rate = %d\n",
371
- agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx,
372
- agreed_pwr->hs_rate);
373
- return 0;
374
-}
375
-
376
-static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
294
+static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
377295 {
378296 hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
379297 hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
....@@ -391,6 +309,28 @@
391309
392310 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
393311 {
312
+ struct ufs_hisi_host *host = ufshcd_get_variant(hba);
313
+
314
+ if (host->caps & UFS_HISI_CAP_PHY10nm) {
315
+ /*
316
+ * Boston platform need to set SaveConfigTime to 0x13,
317
+ * and change sync length to maximum value
318
+ */
319
+ /* VS_DebugSaveConfigTime */
320
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
321
+ /* g1 sync length */
322
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
323
+ /* g2 sync length */
324
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
325
+ /* g3 sync length */
326
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
327
+ /* PA_Hibern8Time */
328
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
329
+ /* PA_Tactivate */
330
+ ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
331
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
332
+ }
333
+
394334 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
395335 pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
396336 /* VS_DebugSaveConfigTime */
....@@ -429,12 +369,12 @@
429369 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
430370 }
431371
432
-static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba,
372
+static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
433373 enum ufs_notify_change_status status,
434374 struct ufs_pa_layer_attr *dev_max_params,
435375 struct ufs_pa_layer_attr *dev_req_params)
436376 {
437
- struct ufs_hisi_dev_params ufs_hisi_cap;
377
+ struct ufs_dev_params ufs_hisi_cap;
438378 int ret = 0;
439379
440380 if (!dev_req_params) {
....@@ -447,8 +387,8 @@
447387 switch (status) {
448388 case PRE_CHANGE:
449389 ufs_hisi_set_dev_cap(&ufs_hisi_cap);
450
- ret = ufs_hisi_get_pwr_dev_param(
451
- &ufs_hisi_cap, dev_max_params, dev_req_params);
390
+ ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
391
+ dev_max_params, dev_req_params);
452392 if (ret) {
453393 dev_err(hba->dev,
454394 "%s: failed to determine capabilities\n", __func__);
....@@ -507,17 +447,12 @@
507447
508448 static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
509449 {
510
- struct resource *mem_res;
511450 struct device *dev = host->hba->dev;
512451 struct platform_device *pdev = to_platform_device(dev);
513452
514453 /* get resource of ufs sys ctrl */
515
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
516
- host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res);
517
- if (IS_ERR(host->ufs_sys_ctrl))
518
- return PTR_ERR(host->ufs_sys_ctrl);
519
-
520
- return 0;
454
+ host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
455
+ return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
521456 }
522457
523458 static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
....@@ -539,14 +474,6 @@
539474 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
540475 if (!host)
541476 return -ENOMEM;
542
-
543
- /*
544
- * Inline crypto is currently broken with ufs-hisi because the keyslots
545
- * overlap with the vendor-specific SYS CTRL registers -- and even if
546
- * software uses only non-overlapping keyslots, the kernel crashes when
547
- * programming a key or a UFS error occurs on the first encrypted I/O.
548
- */
549
- hba->quirks |= UFSHCD_QUIRK_BROKEN_CRYPTO;
550477
551478 host->hba = hba;
552479 ufshcd_set_variant(hba, host);
....@@ -582,25 +509,69 @@
582509 return ret;
583510 }
584511
585
- ufs_hi3660_clk_init(hba);
512
+ ufs_hisi_clk_init(hba);
586513
587
- ufs_hi3660_soc_init(hba);
514
+ ufs_hisi_soc_init(hba);
588515
589516 return 0;
590517 }
591518
592
-static struct ufs_hba_variant_ops ufs_hba_hisi_vops = {
519
+static int ufs_hi3670_init(struct ufs_hba *hba)
520
+{
521
+ int ret = 0;
522
+ struct device *dev = hba->dev;
523
+ struct ufs_hisi_host *host;
524
+
525
+ ret = ufs_hisi_init_common(hba);
526
+ if (ret) {
527
+ dev_err(dev, "%s: ufs common init fail\n", __func__);
528
+ return ret;
529
+ }
530
+
531
+ ufs_hisi_clk_init(hba);
532
+
533
+ ufs_hisi_soc_init(hba);
534
+
535
+ /* Add cap for 10nm PHY variant on HI3670 SoC */
536
+ host = ufshcd_get_variant(hba);
537
+ host->caps |= UFS_HISI_CAP_PHY10nm;
538
+
539
+ return 0;
540
+}
541
+
542
+static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
593543 .name = "hi3660",
594544 .init = ufs_hi3660_init,
595
- .link_startup_notify = ufs_hi3660_link_startup_notify,
596
- .pwr_change_notify = ufs_hi3660_pwr_change_notify,
545
+ .link_startup_notify = ufs_hisi_link_startup_notify,
546
+ .pwr_change_notify = ufs_hisi_pwr_change_notify,
597547 .suspend = ufs_hisi_suspend,
598548 .resume = ufs_hisi_resume,
599549 };
600550
551
+static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
552
+ .name = "hi3670",
553
+ .init = ufs_hi3670_init,
554
+ .link_startup_notify = ufs_hisi_link_startup_notify,
555
+ .pwr_change_notify = ufs_hisi_pwr_change_notify,
556
+ .suspend = ufs_hisi_suspend,
557
+ .resume = ufs_hisi_resume,
558
+};
559
+
560
+static const struct of_device_id ufs_hisi_of_match[] = {
561
+ { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
562
+ { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
563
+ {},
564
+};
565
+
566
+MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
567
+
601568 static int ufs_hisi_probe(struct platform_device *pdev)
602569 {
603
- return ufshcd_pltfrm_init(pdev, &ufs_hba_hisi_vops);
570
+ const struct of_device_id *of_id;
571
+
572
+ of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
573
+
574
+ return ufshcd_pltfrm_init(pdev, of_id->data);
604575 }
605576
606577 static int ufs_hisi_remove(struct platform_device *pdev)
....@@ -610,13 +581,6 @@
610581 ufshcd_remove(hba);
611582 return 0;
612583 }
613
-
614
-static const struct of_device_id ufs_hisi_of_match[] = {
615
- { .compatible = "hisilicon,hi3660-ufs" },
616
- {},
617
-};
618
-
619
-MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
620584
621585 static const struct dev_pm_ops ufs_hisi_pm_ops = {
622586 .suspend = ufshcd_pltfrm_suspend,