forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/scsi/ufs/ufshcd.c
....@@ -1,40 +1,12 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Universal Flash Storage Host controller driver Core
3
- *
4
- * This code is based on drivers/scsi/ufs/ufshcd.c
54 * Copyright (C) 2011-2013 Samsung India Software Operations
65 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
76 *
87 * Authors:
98 * Santosh Yaraganavi <santosh.sy@samsung.com>
109 * Vinayak Holikatti <h.vinayak@samsung.com>
11
- *
12
- * This program is free software; you can redistribute it and/or
13
- * modify it under the terms of the GNU General Public License
14
- * as published by the Free Software Foundation; either version 2
15
- * of the License, or (at your option) any later version.
16
- * See the COPYING file in the top-level directory or visit
17
- * <http://www.gnu.org/licenses/gpl-2.0.html>
18
- *
19
- * This program is distributed in the hope that it will be useful,
20
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
21
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22
- * GNU General Public License for more details.
23
- *
24
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
25
- * without warranty of any kind. You are solely responsible for
26
- * determining the appropriateness of using and distributing
27
- * the program and assume all risks associated with your exercise
28
- * of rights with respect to the program, including but not limited
29
- * to infringement of third party rights, the risks and costs of
30
- * program errors, damage to or loss of data, programs or equipment,
31
- * and unavailability or interruption of operations. Under no
32
- * circumstances will the contributor of this Program be liable for
33
- * any damages of any kind arising from your use or distribution of
34
- * this program.
35
- *
36
- * The Linux Foundation chooses to take subject only to the GPLv2
37
- * license terms, and distributes only under these terms.
3810 */
3911
4012 #include <linux/async.h>
....@@ -42,27 +14,36 @@
4214 #include <linux/nls.h>
4315 #include <linux/of.h>
4416 #include <linux/bitfield.h>
17
+#include <linux/blk-pm.h>
18
+#include <linux/blkdev.h>
4519 #include "ufshcd.h"
20
+#include "ufshcd-add-info.h"
4621 #include "ufs_quirks.h"
4722 #include "unipro.h"
4823 #include "ufs-sysfs.h"
24
+#include "ufs-debugfs.h"
25
+#include "ufs_bsg.h"
4926 #include "ufshcd-crypto.h"
27
+#include "ufshpb.h"
28
+#include <asm/unaligned.h>
29
+#include <linux/blkdev.h>
5030
5131 #define CREATE_TRACE_POINTS
5232 #include <trace/events/ufs.h>
5333
54
-#define UFSHCD_REQ_SENSE_SIZE 18
34
+#undef CREATE_TRACE_POINTS
35
+#include <trace/hooks/ufshcd.h>
5536
5637 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
5738 UTP_TASK_REQ_COMPL |\
5839 UFSHCD_ERROR_MASK)
5940 /* UIC command timeout, unit: ms */
60
-#define UIC_CMD_TIMEOUT 500
41
+#define UIC_CMD_TIMEOUT 5000
6142
6243 /* NOP OUT retries waiting for NOP IN response */
6344 #define NOP_OUT_RETRIES 10
64
-/* Timeout after 30 msecs if NOP OUT hangs without response */
65
-#define NOP_OUT_TIMEOUT 30 /* msecs */
45
+/* Timeout after 50 msecs if NOP OUT hangs without response */
46
+#define NOP_OUT_TIMEOUT 50 /* msecs */
6647
6748 /* Query request retries */
6849 #define QUERY_REQ_RETRIES 3
....@@ -93,6 +74,15 @@
9374 /* default delay of autosuspend: 2000 ms */
9475 #define RPM_AUTOSUSPEND_DELAY_MS 2000
9576
77
+/* Default delay of RPM device flush delayed work */
78
+#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
79
+
80
+/* Default value of wait time before gating device ref clock */
81
+#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
82
+
83
+/* Polling time to wait for fDeviceInit */
84
+#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
85
+
9686 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
9787 ({ \
9888 int _ret; \
....@@ -119,12 +109,17 @@
119109 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
120110 return -EINVAL;
121111
122
- regs = kzalloc(len, GFP_KERNEL);
112
+ regs = kzalloc(len, GFP_ATOMIC);
123113 if (!regs)
124114 return -ENOMEM;
125115
126
- for (pos = 0; pos < len; pos += 4)
116
+ for (pos = 0; pos < len; pos += 4) {
117
+ if (offset == 0 &&
118
+ pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
119
+ pos <= REG_UIC_ERROR_CODE_DME)
120
+ continue;
127121 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
122
+ }
128123
129124 ufshcd_hex_dump(prefix, regs, len);
130125 kfree(regs);
....@@ -136,8 +131,9 @@
136131 enum {
137132 UFSHCD_MAX_CHANNEL = 0,
138133 UFSHCD_MAX_ID = 1,
139
- UFSHCD_CMD_PER_LUN = 32,
140
- UFSHCD_CAN_QUEUE = 32,
134
+ UFSHCD_NUM_RESERVED = 1,
135
+ UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
136
+ UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
141137 };
142138
143139 /* UFSHCD states */
....@@ -145,7 +141,8 @@
145141 UFSHCD_STATE_RESET,
146142 UFSHCD_STATE_ERROR,
147143 UFSHCD_STATE_OPERATIONAL,
148
- UFSHCD_STATE_EH_SCHEDULED,
144
+ UFSHCD_STATE_EH_SCHEDULED_FATAL,
145
+ UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
149146 };
150147
151148 /* UFSHCD error handling flags */
....@@ -161,6 +158,7 @@
161158 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
162159 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
163160 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
161
+ UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
164162 };
165163
166164 #define ufshcd_set_eh_in_progress(h) \
....@@ -169,19 +167,6 @@
169167 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
170168 #define ufshcd_clear_eh_in_progress(h) \
171169 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
172
-
173
-#define ufshcd_set_ufs_dev_active(h) \
174
- ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
175
-#define ufshcd_set_ufs_dev_sleep(h) \
176
- ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
177
-#define ufshcd_set_ufs_dev_poweroff(h) \
178
- ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
179
-#define ufshcd_is_ufs_dev_active(h) \
180
- ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
181
-#define ufshcd_is_ufs_dev_sleep(h) \
182
- ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
183
-#define ufshcd_is_ufs_dev_poweroff(h) \
184
- ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
185170
186171 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
187172 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
....@@ -223,44 +208,33 @@
223208 static struct ufs_dev_fix ufs_fixups[] = {
224209 /* UFS cards deviations table */
225210 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
226
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
211
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
212
+ UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
227213 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
228
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
229
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
230
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
214
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
215
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
231216 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
232
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
233
- UFS_DEVICE_NO_FASTAUTO),
234
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
235
- UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
217
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
218
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
219
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
220
+ UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
236221 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
237222 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
238223 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
239224 UFS_DEVICE_QUIRK_PA_TACTIVATE),
240225 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
241226 UFS_DEVICE_QUIRK_PA_TACTIVATE),
242
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
243
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
244
- UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
245
- UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
246
- UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
247
-
248227 END_FIX
249228 };
250229
251
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
230
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
252231 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
253232 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
254233 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
255234 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
256235 static void ufshcd_hba_exit(struct ufs_hba *hba);
257
-static int ufshcd_probe_hba(struct ufs_hba *hba);
258
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
259
- bool skip_ref_clk);
236
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
260237 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
261
-static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
262
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
263
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
264238 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
265239 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
266240 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
....@@ -270,33 +244,59 @@
270244 static irqreturn_t ufshcd_intr(int irq, void *__hba);
271245 static int ufshcd_change_power_mode(struct ufs_hba *hba,
272246 struct ufs_pa_layer_attr *pwr_mode);
247
+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
248
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
249
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
250
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
251
+ struct ufs_vreg *vreg);
252
+static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
253
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
254
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
255
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
256
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
257
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
258
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
259
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
260
+
273261 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
274262 {
275263 return tag >= 0 && tag < hba->nutrs;
276264 }
277265
278
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
266
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
279267 {
280
- int ret = 0;
281
-
282268 if (!hba->is_irq_enabled) {
283
- ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
284
- hba);
285
- if (ret)
286
- dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
287
- __func__, ret);
269
+ enable_irq(hba->irq);
288270 hba->is_irq_enabled = true;
289271 }
290
-
291
- return ret;
292272 }
293273
294274 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
295275 {
296276 if (hba->is_irq_enabled) {
297
- free_irq(hba->irq, hba);
277
+ disable_irq(hba->irq);
298278 hba->is_irq_enabled = false;
299279 }
280
+}
281
+
282
+static inline void ufshcd_wb_config(struct ufs_hba *hba)
283
+{
284
+ int ret;
285
+
286
+ if (!ufshcd_is_wb_allowed(hba))
287
+ return;
288
+
289
+ ret = ufshcd_wb_ctrl(hba, true);
290
+ if (ret)
291
+ dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
292
+ else
293
+ dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
294
+ ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
295
+ if (ret)
296
+ dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
297
+ __func__, ret);
298
+ if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
299
+ ufshcd_wb_toggle_flush(hba, true);
300300 }
301301
302302 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
....@@ -309,16 +309,6 @@
309309 {
310310 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
311311 scsi_block_requests(hba->host);
312
-}
313
-
314
-/* replace non-printable or non-ASCII characters with spaces */
315
-static inline void ufshcd_remove_non_printable(char *val)
316
-{
317
- if (!val)
318
- return;
319
-
320
- if (*val < 0x20 || *val > 0x7e)
321
- *val = ' ';
322312 }
323313
324314 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
....@@ -340,21 +330,40 @@
340330 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
341331 const char *str)
342332 {
343
- struct utp_task_req_desc *descp;
344
- struct utp_upiu_task_req *task_req;
345
- int off = (int)tag - hba->nutrs;
333
+ struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
346334
347
- descp = &hba->utmrdl_base_addr[off];
348
- task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
349
- trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
350
- &task_req->input_param1);
335
+ trace_android_vh_ufs_send_tm_command(hba, tag, str);
336
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
337
+ &descp->input_param1);
338
+}
339
+
340
+static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
341
+ struct uic_command *ucmd,
342
+ const char *str)
343
+{
344
+ u32 cmd;
345
+
346
+ trace_android_vh_ufs_send_uic_command(hba, ucmd, str);
347
+
348
+ if (!trace_ufshcd_uic_command_enabled())
349
+ return;
350
+
351
+ if (!strcmp(str, "send"))
352
+ cmd = ucmd->command;
353
+ else
354
+ cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
355
+
356
+ trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
357
+ ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
358
+ ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
359
+ ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
351360 }
352361
353362 static void ufshcd_add_command_trace(struct ufs_hba *hba,
354363 unsigned int tag, const char *str)
355364 {
356365 sector_t lba = -1;
357
- u8 opcode = 0;
366
+ u8 opcode = 0, group_id = 0;
358367 u32 intr, doorbell;
359368 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
360369 struct scsi_cmnd *cmd = lrbp->cmd;
....@@ -380,13 +389,20 @@
380389 lba = cmd->request->bio->bi_iter.bi_sector;
381390 transfer_len = be32_to_cpu(
382391 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
392
+ if (opcode == WRITE_10)
393
+ group_id = lrbp->cmd->cmnd[6];
394
+ } else if (opcode == UNMAP) {
395
+ if (cmd->request) {
396
+ lba = scsi_get_lba(cmd);
397
+ transfer_len = blk_rq_bytes(cmd->request);
398
+ }
383399 }
384400 }
385401
386402 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
387403 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
388404 trace_ufshcd_command(dev_name(hba->dev), str, tag,
389
- doorbell, transfer_len, intr, lba, opcode);
405
+ doorbell, transfer_len, intr, lba, opcode, group_id);
390406 }
391407
392408 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
....@@ -405,46 +421,54 @@
405421 }
406422 }
407423
408
-static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
409
- struct ufs_uic_err_reg_hist *err_hist, char *err_name)
424
+static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
425
+ char *err_name)
410426 {
411427 int i;
428
+ bool found = false;
429
+ struct ufs_event_hist *e;
412430
413
- for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
414
- int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
431
+ if (id >= UFS_EVT_CNT)
432
+ return;
415433
416
- if (err_hist->reg[p] == 0)
434
+ e = &hba->ufs_stats.event[id];
435
+
436
+ for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
437
+ int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
438
+
439
+ if (e->tstamp[p] == 0)
417440 continue;
418
- dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
419
- err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
441
+ dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
442
+ e->val[p], ktime_to_us(e->tstamp[p]));
443
+ found = true;
420444 }
445
+
446
+ if (!found)
447
+ dev_err(hba->dev, "No record of %s\n", err_name);
421448 }
422449
423
-static void ufshcd_print_host_regs(struct ufs_hba *hba)
450
+static void ufshcd_print_evt_hist(struct ufs_hba *hba)
424451 {
425452 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
426
- dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
427
- hba->ufs_version, hba->capabilities);
428
- dev_err(hba->dev,
429
- "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
430
- (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
431
- dev_err(hba->dev,
432
- "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
433
- ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
434
- hba->ufs_stats.hibern8_exit_cnt);
435453
436
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
437
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
438
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
439
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
440
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
454
+ ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
455
+ ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
456
+ ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
457
+ ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
458
+ ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
459
+ ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
460
+ "auto_hibern8_err");
461
+ ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
462
+ ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
463
+ "link_startup_fail");
464
+ ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
465
+ ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
466
+ "suspend_fail");
467
+ ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
468
+ ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
469
+ ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
441470
442
- ufshcd_print_clk_freqs(hba);
443
-
444
- if (hba->vops && hba->vops->dbg_register_dump)
445
- hba->vops->dbg_register_dump(hba);
446
-
447
- ufshcd_crypto_debug(hba);
471
+ ufshcd_vops_dbg_register_dump(hba);
448472 }
449473
450474 static
....@@ -476,8 +500,8 @@
476500 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
477501 sizeof(struct utp_upiu_rsp));
478502
479
- prdt_length =
480
- le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
503
+ prdt_length = le16_to_cpu(
504
+ lrbp->utr_descriptor_ptr->prd_table_length);
481505 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
482506 prdt_length /= hba->sg_entry_size;
483507
....@@ -494,30 +518,23 @@
494518
495519 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
496520 {
497
- struct utp_task_req_desc *tmrdp;
498521 int tag;
499522
500523 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
501
- tmrdp = &hba->utmrdl_base_addr[tag];
524
+ struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
525
+
502526 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
503
- ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
504
- sizeof(struct request_desc_header));
505
- dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
506
- tag);
507
- ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
508
- sizeof(struct utp_upiu_req));
509
- dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
510
- tag);
511
- ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
512
- sizeof(struct utp_task_req_desc));
527
+ ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
513528 }
514529 }
515530
516531 static void ufshcd_print_host_state(struct ufs_hba *hba)
517532 {
533
+ struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
534
+
518535 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
519
- dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
520
- hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
536
+ dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
537
+ hba->outstanding_reqs, hba->outstanding_tasks);
521538 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
522539 hba->saved_err, hba->saved_uic_err);
523540 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
....@@ -527,12 +544,24 @@
527544 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
528545 hba->auto_bkops_enabled, hba->host->host_self_blocked);
529546 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
547
+ dev_err(hba->dev,
548
+ "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
549
+ ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
550
+ hba->ufs_stats.hibern8_exit_cnt);
551
+ dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
552
+ ktime_to_us(hba->ufs_stats.last_intr_ts),
553
+ hba->ufs_stats.last_intr_status);
530554 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
531555 hba->eh_flags, hba->req_abort_count);
532
- dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
533
- hba->capabilities, hba->caps);
556
+ dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
557
+ hba->ufs_version, hba->capabilities, hba->caps);
534558 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
535559 hba->dev_quirks);
560
+ if (sdev_ufs)
561
+ dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
562
+ sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
563
+
564
+ ufshcd_print_clk_freqs(hba);
536565 }
537566
538567 /**
....@@ -561,21 +590,33 @@
561590 hba->pwr_info.hs_rate);
562591 }
563592
564
-/*
593
+void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
594
+{
595
+ if (!us)
596
+ return;
597
+
598
+ if (us < 10)
599
+ udelay(us);
600
+ else
601
+ usleep_range(us, us + tolerance);
602
+}
603
+EXPORT_SYMBOL_GPL(ufshcd_delay_us);
604
+
605
+/**
565606 * ufshcd_wait_for_register - wait for register value to change
566
- * @hba - per-adapter interface
567
- * @reg - mmio register offset
568
- * @mask - mask to apply to read register value
569
- * @val - wait condition
570
- * @interval_us - polling interval in microsecs
571
- * @timeout_ms - timeout in millisecs
572
- * @can_sleep - perform sleep or just spin
607
+ * @hba: per-adapter interface
608
+ * @reg: mmio register offset
609
+ * @mask: mask to apply to the read register value
610
+ * @val: value to wait for
611
+ * @interval_us: polling interval in microseconds
612
+ * @timeout_ms: timeout in milliseconds
573613 *
574
- * Returns -ETIMEDOUT on error, zero on success
614
+ * Return:
615
+ * -ETIMEDOUT on error, zero on success.
575616 */
576617 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
577618 u32 val, unsigned long interval_us,
578
- unsigned long timeout_ms, bool can_sleep)
619
+ unsigned long timeout_ms)
579620 {
580621 int err = 0;
581622 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
....@@ -584,10 +625,7 @@
584625 val = val & mask;
585626
586627 while ((ufshcd_readl(hba, reg) & mask) != val) {
587
- if (can_sleep)
588
- usleep_range(interval_us, interval_us + 50);
589
- else
590
- udelay(interval_us);
628
+ usleep_range(interval_us, interval_us + 50);
591629 if (time_after(jiffies, timeout)) {
592630 if ((ufshcd_readl(hba, reg) & mask) != val)
593631 err = -ETIMEDOUT;
....@@ -606,23 +644,12 @@
606644 */
607645 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
608646 {
609
- u32 intr_mask = 0;
647
+ if (hba->ufs_version == ufshci_version(1, 0))
648
+ return INTERRUPT_MASK_ALL_VER_10;
649
+ if (hba->ufs_version <= ufshci_version(2, 0))
650
+ return INTERRUPT_MASK_ALL_VER_11;
610651
611
- switch (hba->ufs_version) {
612
- case UFSHCI_VERSION_10:
613
- intr_mask = INTERRUPT_MASK_ALL_VER_10;
614
- break;
615
- case UFSHCI_VERSION_11:
616
- case UFSHCI_VERSION_20:
617
- intr_mask = INTERRUPT_MASK_ALL_VER_11;
618
- break;
619
- case UFSHCI_VERSION_21:
620
- default:
621
- intr_mask = INTERRUPT_MASK_ALL_VER_21;
622
- break;
623
- }
624
-
625
- return intr_mask;
652
+ return INTERRUPT_MASK_ALL_VER_21;
626653 }
627654
628655 /**
....@@ -633,10 +660,22 @@
633660 */
634661 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
635662 {
636
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
637
- return ufshcd_vops_get_ufs_hci_version(hba);
663
+ u32 ufshci_ver;
638664
639
- return ufshcd_readl(hba, REG_UFS_VERSION);
665
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
666
+ ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
667
+ else
668
+ ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
669
+
670
+ /*
671
+ * UFSHCI v1.x uses a different version scheme, in order
672
+ * to allow the use of comparisons with the ufshci_version
673
+ * function, we convert it to the same scheme as ufs 2.0+.
674
+ */
675
+ if (ufshci_ver & 0x00010000)
676
+ return ufshci_version(1, ufshci_ver & 0x00000100);
677
+
678
+ return ufshci_ver;
640679 }
641680
642681 /**
....@@ -662,53 +701,6 @@
662701 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
663702 {
664703 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
665
-}
666
-
667
-/**
668
- * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
669
- * @task_req_descp: pointer to utp_task_req_desc structure
670
- *
671
- * This function is used to get the OCS field from UTMRD
672
- * Returns the OCS field in the UTMRD
673
- */
674
-static inline int
675
-ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
676
-{
677
- return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
678
-}
679
-
680
-/**
681
- * ufshcd_get_tm_free_slot - get a free slot for task management request
682
- * @hba: per adapter instance
683
- * @free_slot: pointer to variable with available slot value
684
- *
685
- * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
686
- * Returns 0 if free slot is not available, else return 1 with tag value
687
- * in @free_slot.
688
- */
689
-static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
690
-{
691
- int tag;
692
- bool ret = false;
693
-
694
- if (!free_slot)
695
- goto out;
696
-
697
- do {
698
- tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
699
- if (tag >= hba->nutmrs)
700
- goto out;
701
- } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
702
-
703
- *free_slot = tag;
704
- ret = true;
705
-out:
706
- return ret;
707
-}
708
-
709
-static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
710
-{
711
- clear_bit_unlock(slot, &hba->tm_slots_in_use);
712704 }
713705
714706 /**
....@@ -745,7 +737,7 @@
745737 */
746738 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
747739 {
748
- __clear_bit(tag, &hba->outstanding_reqs);
740
+ clear_bit(tag, &hba->outstanding_reqs);
749741 }
750742
751743 /**
....@@ -894,10 +886,8 @@
894886 {
895887 u32 val = CONTROLLER_ENABLE;
896888
897
- if (ufshcd_hba_is_crypto_supported(hba)) {
898
- ufshcd_crypto_enable(hba);
889
+ if (ufshcd_crypto_enable(hba))
899890 val |= CRYPTO_GENERAL_ENABLE;
900
- }
901891
902892 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
903893 }
....@@ -917,8 +907,7 @@
917907 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
918908 {
919909 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
920
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
921
- (hba->ufs_version == UFSHCI_VERSION_11))
910
+ if (hba->ufs_version <= ufshci_version(1, 1))
922911 return UFS_UNIPRO_VER_1_41;
923912 else
924913 return UFS_UNIPRO_VER_1_6;
....@@ -942,20 +931,22 @@
942931 return false;
943932 }
944933
945
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
934
+/**
935
+ * ufshcd_set_clk_freq - set UFS controller clock frequencies
936
+ * @hba: per adapter instance
937
+ * @scale_up: If True, set max possible frequency othewise set low frequency
938
+ *
939
+ * Returns 0 if successful
940
+ * Returns < 0 for any other errors
941
+ */
942
+static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
946943 {
947944 int ret = 0;
948945 struct ufs_clk_info *clki;
949946 struct list_head *head = &hba->clk_list_head;
950
- ktime_t start = ktime_get();
951
- bool clk_state_changed = false;
952947
953948 if (list_empty(head))
954949 goto out;
955
-
956
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
957
- if (ret)
958
- return ret;
959950
960951 list_for_each_entry(clki, head, list) {
961952 if (!IS_ERR_OR_NULL(clki->clk)) {
....@@ -963,7 +954,6 @@
963954 if (clki->curr_freq == clki->max_freq)
964955 continue;
965956
966
- clk_state_changed = true;
967957 ret = clk_set_rate(clki->clk, clki->max_freq);
968958 if (ret) {
969959 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
....@@ -982,7 +972,6 @@
982972 if (clki->curr_freq == clki->min_freq)
983973 continue;
984974
985
- clk_state_changed = true;
986975 ret = clk_set_rate(clki->clk, clki->min_freq);
987976 if (ret) {
988977 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
....@@ -1001,11 +990,37 @@
1001990 clki->name, clk_get_rate(clki->clk));
1002991 }
1003992
993
+out:
994
+ return ret;
995
+}
996
+
997
+/**
998
+ * ufshcd_scale_clks - scale up or scale down UFS controller clocks
999
+ * @hba: per adapter instance
1000
+ * @scale_up: True if scaling up and false if scaling down
1001
+ *
1002
+ * Returns 0 if successful
1003
+ * Returns < 0 for any other errors
1004
+ */
1005
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
1006
+{
1007
+ int ret = 0;
1008
+ ktime_t start = ktime_get();
1009
+
1010
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1011
+ if (ret)
1012
+ goto out;
1013
+
1014
+ ret = ufshcd_set_clk_freq(hba, scale_up);
1015
+ if (ret)
1016
+ goto out;
1017
+
10041018 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1019
+ if (ret)
1020
+ ufshcd_set_clk_freq(hba, !scale_up);
10051021
10061022 out:
1007
- if (clk_state_changed)
1008
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1023
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
10091024 (scale_up ? "up" : "down"),
10101025 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
10111026 return ret;
....@@ -1114,7 +1129,6 @@
11141129 */
11151130 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
11161131 {
1117
- #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
11181132 int ret = 0;
11191133 struct ufs_pa_layer_attr new_pwr_info;
11201134
....@@ -1125,22 +1139,21 @@
11251139 memcpy(&new_pwr_info, &hba->pwr_info,
11261140 sizeof(struct ufs_pa_layer_attr));
11271141
1128
- if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1129
- || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1142
+ if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1143
+ hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
11301144 /* save the current power mode */
11311145 memcpy(&hba->clk_scaling.saved_pwr_info.info,
11321146 &hba->pwr_info,
11331147 sizeof(struct ufs_pa_layer_attr));
11341148
11351149 /* scale down gear */
1136
- new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1137
- new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1150
+ new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1151
+ new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
11381152 }
11391153 }
11401154
11411155 /* check if the power mode needs to be changed or not? */
1142
- ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1143
-
1156
+ ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
11441157 if (ret)
11451158 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
11461159 __func__, ret,
....@@ -1160,19 +1173,30 @@
11601173 */
11611174 ufshcd_scsi_block_requests(hba);
11621175 down_write(&hba->clk_scaling_lock);
1163
- if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1176
+
1177
+ if (!hba->clk_scaling.is_allowed ||
1178
+ ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
11641179 ret = -EBUSY;
11651180 up_write(&hba->clk_scaling_lock);
11661181 ufshcd_scsi_unblock_requests(hba);
1182
+ goto out;
11671183 }
11681184
1185
+ /* let's not get into low power until clock scaling is completed */
1186
+ ufshcd_hold(hba, false);
1187
+
1188
+out:
11691189 return ret;
11701190 }
11711191
1172
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1192
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
11731193 {
1174
- up_write(&hba->clk_scaling_lock);
1194
+ if (writelock)
1195
+ up_write(&hba->clk_scaling_lock);
1196
+ else
1197
+ up_read(&hba->clk_scaling_lock);
11751198 ufshcd_scsi_unblock_requests(hba);
1199
+ ufshcd_release(hba);
11761200 }
11771201
11781202 /**
....@@ -1187,9 +1211,7 @@
11871211 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
11881212 {
11891213 int ret = 0;
1190
-
1191
- /* let's not get into low power until clock scaling is completed */
1192
- ufshcd_hold(hba, false);
1214
+ bool is_writelock = true;
11931215
11941216 ret = ufshcd_clock_scaling_prepare(hba);
11951217 if (ret)
....@@ -1199,14 +1221,14 @@
11991221 if (!scale_up) {
12001222 ret = ufshcd_scale_gear(hba, false);
12011223 if (ret)
1202
- goto out;
1224
+ goto out_unprepare;
12031225 }
12041226
12051227 ret = ufshcd_scale_clks(hba, scale_up);
12061228 if (ret) {
12071229 if (!scale_up)
12081230 ufshcd_scale_gear(hba, true);
1209
- goto out;
1231
+ goto out_unprepare;
12101232 }
12111233
12121234 /* scale up the gear after scaling up clocks */
....@@ -1214,15 +1236,17 @@
12141236 ret = ufshcd_scale_gear(hba, true);
12151237 if (ret) {
12161238 ufshcd_scale_clks(hba, false);
1217
- goto out;
1239
+ goto out_unprepare;
12181240 }
12191241 }
12201242
1221
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1243
+ /* Enable Write Booster if we have scaled up else disable it */
1244
+ downgrade_write(&hba->clk_scaling_lock);
1245
+ is_writelock = false;
1246
+ ufshcd_wb_ctrl(hba, scale_up);
12221247
1223
-out:
1224
- ufshcd_clock_scaling_unprepare(hba);
1225
- ufshcd_release(hba);
1248
+out_unprepare:
1249
+ ufshcd_clock_scaling_unprepare(hba, is_writelock);
12261250 return ret;
12271251 }
12281252
....@@ -1270,10 +1294,15 @@
12701294 struct list_head *clk_list = &hba->clk_list_head;
12711295 struct ufs_clk_info *clki;
12721296 unsigned long irq_flags;
1297
+ bool force_out = false;
1298
+ bool force_scaling = false;
12731299
12741300 if (!ufshcd_is_clkscaling_supported(hba))
12751301 return -EINVAL;
12761302
1303
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1304
+ /* Override with the closest supported frequency */
1305
+ *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
12771306 spin_lock_irqsave(hba->host->host_lock, irq_flags);
12781307 if (ufshcd_eh_in_progress(hba)) {
12791308 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
....@@ -1288,24 +1317,23 @@
12881317 goto out;
12891318 }
12901319
1291
- clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1320
+ /* Decide based on the rounded-off frequency and update */
12921321 scale_up = (*freq == clki->max_freq) ? true : false;
1293
- if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1322
+ if (!scale_up)
1323
+ *freq = clki->min_freq;
1324
+
1325
+ trace_android_vh_ufs_clock_scaling(hba, &force_out, &force_scaling, &scale_up);
1326
+
1327
+ /* Update the frequency */
1328
+ if (force_out || (!force_scaling && !ufshcd_is_devfreq_scaling_required(hba, scale_up))) {
12941329 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
12951330 ret = 0;
12961331 goto out; /* no state change required */
12971332 }
12981333 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
12991334
1300
- pm_runtime_get_noresume(hba->dev);
1301
- if (!pm_runtime_active(hba->dev)) {
1302
- pm_runtime_put_noidle(hba->dev);
1303
- ret = -EAGAIN;
1304
- goto out;
1305
- }
13061335 start = ktime_get();
13071336 ret = ufshcd_devfreq_scale(hba, scale_up);
1308
- pm_runtime_put(hba->dev);
13091337
13101338 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
13111339 (scale_up ? "up" : "down"),
....@@ -1319,6 +1347,24 @@
13191347 return ret;
13201348 }
13211349
1350
+static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1351
+{
1352
+ int *busy = priv;
1353
+
1354
+ WARN_ON_ONCE(reserved);
1355
+ (*busy)++;
1356
+ return false;
1357
+}
1358
+
1359
+/* Whether or not any tag is in use by a request that is in progress. */
1360
+static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1361
+{
1362
+ struct request_queue *q = hba->cmd_queue;
1363
+ int busy = 0;
1364
+
1365
+ blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1366
+ return busy;
1367
+}
13221368
13231369 static int ufshcd_devfreq_get_dev_status(struct device *dev,
13241370 struct devfreq_dev_status *stat)
....@@ -1326,6 +1372,9 @@
13261372 struct ufs_hba *hba = dev_get_drvdata(dev);
13271373 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
13281374 unsigned long flags;
1375
+ struct list_head *clk_list = &hba->clk_list_head;
1376
+ struct ufs_clk_info *clki;
1377
+ ktime_t curr_t;
13291378
13301379 if (!ufshcd_is_clkscaling_supported(hba))
13311380 return -EINVAL;
....@@ -1333,22 +1382,29 @@
13331382 memset(stat, 0, sizeof(*stat));
13341383
13351384 spin_lock_irqsave(hba->host->host_lock, flags);
1385
+ curr_t = ktime_get();
13361386 if (!scaling->window_start_t)
13371387 goto start_window;
13381388
1389
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1390
+ /*
1391
+ * If current frequency is 0, then the ondemand governor considers
1392
+ * there's no initial frequency set. And it always requests to set
1393
+ * to max. frequency.
1394
+ */
1395
+ stat->current_frequency = clki->curr_freq;
13391396 if (scaling->is_busy_started)
1340
- scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1341
- scaling->busy_start_t));
1397
+ scaling->tot_busy_t += ktime_us_delta(curr_t,
1398
+ scaling->busy_start_t);
13421399
1343
- stat->total_time = jiffies_to_usecs((long)jiffies -
1344
- (long)scaling->window_start_t);
1400
+ stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
13451401 stat->busy_time = scaling->tot_busy_t;
13461402 start_window:
1347
- scaling->window_start_t = jiffies;
1403
+ scaling->window_start_t = curr_t;
13481404 scaling->tot_busy_t = 0;
13491405
13501406 if (hba->outstanding_reqs) {
1351
- scaling->busy_start_t = ktime_get();
1407
+ scaling->busy_start_t = curr_t;
13521408 scaling->is_busy_started = true;
13531409 } else {
13541410 scaling->busy_start_t = 0;
....@@ -1357,12 +1413,6 @@
13571413 spin_unlock_irqrestore(hba->host->host_lock, flags);
13581414 return 0;
13591415 }
1360
-
1361
-static struct devfreq_dev_profile ufs_devfreq_profile = {
1362
- .polling_ms = 100,
1363
- .target = ufshcd_devfreq_target,
1364
- .get_dev_status = ufshcd_devfreq_get_dev_status,
1365
-};
13661416
13671417 static int ufshcd_devfreq_init(struct ufs_hba *hba)
13681418 {
....@@ -1379,10 +1429,12 @@
13791429 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
13801430 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
13811431
1432
+ ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1433
+ &hba->vps->ondemand_data);
13821434 devfreq = devfreq_add_device(hba->dev,
1383
- &ufs_devfreq_profile,
1435
+ &hba->vps->devfreq_profile,
13841436 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1385
- NULL);
1437
+ &hba->vps->ondemand_data);
13861438 if (IS_ERR(devfreq)) {
13871439 ret = PTR_ERR(devfreq);
13881440 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
....@@ -1428,8 +1480,8 @@
14281480 unsigned long flags;
14291481 bool suspend = false;
14301482
1431
- if (!ufshcd_is_clkscaling_supported(hba))
1432
- return;
1483
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
1484
+ cancel_work_sync(&hba->clk_scaling.resume_work);
14331485
14341486 spin_lock_irqsave(hba->host->host_lock, flags);
14351487 if (!hba->clk_scaling.is_suspended) {
....@@ -1447,9 +1499,6 @@
14471499 unsigned long flags;
14481500 bool resume = false;
14491501
1450
- if (!ufshcd_is_clkscaling_supported(hba))
1451
- return;
1452
-
14531502 spin_lock_irqsave(hba->host->host_lock, flags);
14541503 if (hba->clk_scaling.is_suspended) {
14551504 resume = true;
....@@ -1466,7 +1515,7 @@
14661515 {
14671516 struct ufs_hba *hba = dev_get_drvdata(dev);
14681517
1469
- return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1518
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
14701519 }
14711520
14721521 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
....@@ -1474,22 +1523,25 @@
14741523 {
14751524 struct ufs_hba *hba = dev_get_drvdata(dev);
14761525 u32 value;
1477
- int err;
1526
+ int err = 0;
14781527
14791528 if (kstrtou32(buf, 0, &value))
14801529 return -EINVAL;
14811530
1531
+ down(&hba->host_sem);
1532
+ if (!ufshcd_is_user_access_allowed(hba)) {
1533
+ err = -EBUSY;
1534
+ goto out;
1535
+ }
1536
+
14821537 value = !!value;
1483
- if (value == hba->clk_scaling.is_allowed)
1538
+ if (value == hba->clk_scaling.is_enabled)
14841539 goto out;
14851540
14861541 pm_runtime_get_sync(hba->dev);
14871542 ufshcd_hold(hba, false);
14881543
1489
- cancel_work_sync(&hba->clk_scaling.suspend_work);
1490
- cancel_work_sync(&hba->clk_scaling.resume_work);
1491
-
1492
- hba->clk_scaling.is_allowed = value;
1544
+ hba->clk_scaling.is_enabled = value;
14931545
14941546 if (value) {
14951547 ufshcd_resume_clkscaling(hba);
....@@ -1504,10 +1556,11 @@
15041556 ufshcd_release(hba);
15051557 pm_runtime_put_sync(hba->dev);
15061558 out:
1507
- return count;
1559
+ up(&hba->host_sem);
1560
+ return err ? err : count;
15081561 }
15091562
1510
-static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1563
+static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
15111564 {
15121565 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
15131566 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
....@@ -1516,6 +1569,45 @@
15161569 hba->clk_scaling.enable_attr.attr.mode = 0644;
15171570 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
15181571 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1572
+}
1573
+
1574
+static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1575
+{
1576
+ if (hba->clk_scaling.enable_attr.attr.name)
1577
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1578
+}
1579
+
1580
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1581
+{
1582
+ char wq_name[sizeof("ufs_clkscaling_00")];
1583
+
1584
+ if (!ufshcd_is_clkscaling_supported(hba))
1585
+ return;
1586
+
1587
+ if (!hba->clk_scaling.min_gear)
1588
+ hba->clk_scaling.min_gear = UFS_HS_G1;
1589
+
1590
+ INIT_WORK(&hba->clk_scaling.suspend_work,
1591
+ ufshcd_clk_scaling_suspend_work);
1592
+ INIT_WORK(&hba->clk_scaling.resume_work,
1593
+ ufshcd_clk_scaling_resume_work);
1594
+
1595
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1596
+ hba->host->host_no);
1597
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1598
+
1599
+ hba->clk_scaling.is_initialized = true;
1600
+}
1601
+
1602
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1603
+{
1604
+ if (!hba->clk_scaling.is_initialized)
1605
+ return;
1606
+
1607
+ ufshcd_remove_clk_scaling_sysfs(hba);
1608
+ destroy_workqueue(hba->clk_scaling.workq);
1609
+ ufshcd_devfreq_remove(hba);
1610
+ hba->clk_scaling.is_initialized = false;
15191611 }
15201612
15211613 static void ufshcd_ungate_work(struct work_struct *work)
....@@ -1534,7 +1626,10 @@
15341626 }
15351627
15361628 spin_unlock_irqrestore(hba->host->host_lock, flags);
1629
+ ufshcd_hba_vreg_set_hpm(hba);
15371630 ufshcd_setup_clocks(hba, true);
1631
+
1632
+ ufshcd_enable_irq(hba);
15381633
15391634 /* Exit from hibern8 */
15401635 if (ufshcd_can_hibern8_during_gating(hba)) {
....@@ -1570,11 +1665,6 @@
15701665 goto out;
15711666 spin_lock_irqsave(hba->host->host_lock, flags);
15721667 hba->clk_gating.active_reqs++;
1573
-
1574
- if (ufshcd_eh_in_progress(hba)) {
1575
- spin_unlock_irqrestore(hba->host->host_lock, flags);
1576
- return 0;
1577
- }
15781668
15791669 start:
15801670 switch (hba->clk_gating.state) {
....@@ -1614,6 +1704,7 @@
16141704 * currently running. Hence, fall through to cancel gating
16151705 * work and to enable clocks.
16161706 */
1707
+ fallthrough;
16171708 case CLKS_OFF:
16181709 hba->clk_gating.state = REQ_CLKS_ON;
16191710 trace_ufshcd_clk_gating(dev_name(hba->dev),
....@@ -1625,6 +1716,7 @@
16251716 * fall through to check if we should wait for this
16261717 * work to be done or not.
16271718 */
1719
+ fallthrough;
16281720 case REQ_CLKS_ON:
16291721 if (async) {
16301722 rc = -EAGAIN;
....@@ -1653,6 +1745,7 @@
16531745 struct ufs_hba *hba = container_of(work, struct ufs_hba,
16541746 clk_gating.gate_work.work);
16551747 unsigned long flags;
1748
+ int ret;
16561749
16571750 spin_lock_irqsave(hba->host->host_lock, flags);
16581751 /*
....@@ -1662,7 +1755,7 @@
16621755 * state to CLKS_ON.
16631756 */
16641757 if (hba->clk_gating.is_suspended ||
1665
- (hba->clk_gating.state == REQ_CLKS_ON)) {
1758
+ (hba->clk_gating.state != REQ_CLKS_OFF)) {
16661759 hba->clk_gating.state = CLKS_ON;
16671760 trace_ufshcd_clk_gating(dev_name(hba->dev),
16681761 hba->clk_gating.state);
....@@ -1671,7 +1764,7 @@
16711764
16721765 if (hba->clk_gating.active_reqs
16731766 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1674
- || hba->lrb_in_use || hba->outstanding_tasks
1767
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
16751768 || hba->active_uic_cmd || hba->uic_async_done)
16761769 goto rel_lock;
16771770
....@@ -1679,8 +1772,11 @@
16791772
16801773 /* put the link into hibern8 mode before turning off clocks */
16811774 if (ufshcd_can_hibern8_during_gating(hba)) {
1682
- if (ufshcd_uic_hibern8_enter(hba)) {
1775
+ ret = ufshcd_uic_hibern8_enter(hba);
1776
+ if (ret) {
16831777 hba->clk_gating.state = CLKS_ON;
1778
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1779
+ __func__, ret);
16841780 trace_ufshcd_clk_gating(dev_name(hba->dev),
16851781 hba->clk_gating.state);
16861782 goto out;
....@@ -1688,12 +1784,12 @@
16881784 ufshcd_set_link_hibern8(hba);
16891785 }
16901786
1691
- if (!ufshcd_is_link_active(hba))
1692
- ufshcd_setup_clocks(hba, false);
1693
- else
1694
- /* If link is active, device ref_clk can't be switched off */
1695
- __ufshcd_setup_clocks(hba, false, true);
1787
+ ufshcd_disable_irq(hba);
16961788
1789
+ ufshcd_setup_clocks(hba, false);
1790
+
1791
+ /* Put the host controller in low power mode if possible */
1792
+ ufshcd_hba_vreg_set_lpm(hba);
16971793 /*
16981794 * In case you are here to cancel this work the gating state
16991795 * would be marked as REQ_CLKS_ON. In this case keep the state
....@@ -1723,11 +1819,11 @@
17231819
17241820 hba->clk_gating.active_reqs--;
17251821
1726
- if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1727
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1728
- || hba->lrb_in_use || hba->outstanding_tasks
1729
- || hba->active_uic_cmd || hba->uic_async_done
1730
- || ufshcd_eh_in_progress(hba))
1822
+ if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1823
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1824
+ hba->outstanding_tasks ||
1825
+ hba->active_uic_cmd || hba->uic_async_done ||
1826
+ hba->clk_gating.state == CLKS_OFF)
17311827 return;
17321828
17331829 hba->clk_gating.state = REQ_CLKS_OFF;
....@@ -1789,68 +1885,24 @@
17891885 return -EINVAL;
17901886
17911887 value = !!value;
1888
+
1889
+ spin_lock_irqsave(hba->host->host_lock, flags);
17921890 if (value == hba->clk_gating.is_enabled)
17931891 goto out;
17941892
1795
- if (value) {
1796
- ufshcd_release(hba);
1797
- } else {
1798
- spin_lock_irqsave(hba->host->host_lock, flags);
1893
+ if (value)
1894
+ __ufshcd_release(hba);
1895
+ else
17991896 hba->clk_gating.active_reqs++;
1800
- spin_unlock_irqrestore(hba->host->host_lock, flags);
1801
- }
18021897
18031898 hba->clk_gating.is_enabled = value;
18041899 out:
1900
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
18051901 return count;
18061902 }
18071903
1808
-static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1904
+static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
18091905 {
1810
- char wq_name[sizeof("ufs_clkscaling_00")];
1811
-
1812
- if (!ufshcd_is_clkscaling_supported(hba))
1813
- return;
1814
-
1815
- INIT_WORK(&hba->clk_scaling.suspend_work,
1816
- ufshcd_clk_scaling_suspend_work);
1817
- INIT_WORK(&hba->clk_scaling.resume_work,
1818
- ufshcd_clk_scaling_resume_work);
1819
-
1820
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1821
- hba->host->host_no);
1822
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1823
-
1824
- ufshcd_clkscaling_init_sysfs(hba);
1825
-}
1826
-
1827
-static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1828
-{
1829
- if (!ufshcd_is_clkscaling_supported(hba))
1830
- return;
1831
-
1832
- destroy_workqueue(hba->clk_scaling.workq);
1833
- ufshcd_devfreq_remove(hba);
1834
-}
1835
-
1836
-static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1837
-{
1838
- char wq_name[sizeof("ufs_clk_gating_00")];
1839
-
1840
- if (!ufshcd_is_clkgating_allowed(hba))
1841
- return;
1842
-
1843
- hba->clk_gating.delay_ms = 150;
1844
- INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1845
- INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1846
-
1847
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1848
- hba->host->host_no);
1849
- hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1850
- WQ_MEM_RECLAIM);
1851
-
1852
- hba->clk_gating.is_enabled = true;
1853
-
18541906 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
18551907 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
18561908 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
....@@ -1868,61 +1920,167 @@
18681920 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
18691921 }
18701922
1871
-static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1923
+static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
18721924 {
1925
+ if (hba->clk_gating.delay_attr.attr.name)
1926
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1927
+ if (hba->clk_gating.enable_attr.attr.name)
1928
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1929
+}
1930
+
1931
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1932
+{
1933
+ char wq_name[sizeof("ufs_clk_gating_00")];
1934
+
18731935 if (!ufshcd_is_clkgating_allowed(hba))
18741936 return;
1875
- device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1876
- device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1937
+
1938
+ hba->clk_gating.state = CLKS_ON;
1939
+
1940
+ hba->clk_gating.delay_ms = 150;
1941
+ INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1942
+ INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1943
+
1944
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1945
+ hba->host->host_no);
1946
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1947
+ WQ_MEM_RECLAIM | WQ_HIGHPRI);
1948
+
1949
+ ufshcd_init_clk_gating_sysfs(hba);
1950
+
1951
+ hba->clk_gating.is_enabled = true;
1952
+ hba->clk_gating.is_initialized = true;
1953
+}
1954
+
1955
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1956
+{
1957
+ if (!hba->clk_gating.is_initialized)
1958
+ return;
1959
+ ufshcd_remove_clk_gating_sysfs(hba);
18771960 cancel_work_sync(&hba->clk_gating.ungate_work);
18781961 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
18791962 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1963
+ hba->clk_gating.is_initialized = false;
18801964 }
18811965
18821966 /* Must be called with host lock acquired */
18831967 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
18841968 {
18851969 bool queue_resume_work = false;
1970
+ ktime_t curr_t = ktime_get();
1971
+ unsigned long flags;
18861972
18871973 if (!ufshcd_is_clkscaling_supported(hba))
18881974 return;
18891975
1976
+ spin_lock_irqsave(hba->host->host_lock, flags);
18901977 if (!hba->clk_scaling.active_reqs++)
18911978 queue_resume_work = true;
18921979
1893
- if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1980
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
1981
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
18941982 return;
1983
+ }
18951984
18961985 if (queue_resume_work)
18971986 queue_work(hba->clk_scaling.workq,
18981987 &hba->clk_scaling.resume_work);
18991988
19001989 if (!hba->clk_scaling.window_start_t) {
1901
- hba->clk_scaling.window_start_t = jiffies;
1990
+ hba->clk_scaling.window_start_t = curr_t;
19021991 hba->clk_scaling.tot_busy_t = 0;
19031992 hba->clk_scaling.is_busy_started = false;
19041993 }
19051994
19061995 if (!hba->clk_scaling.is_busy_started) {
1907
- hba->clk_scaling.busy_start_t = ktime_get();
1996
+ hba->clk_scaling.busy_start_t = curr_t;
19081997 hba->clk_scaling.is_busy_started = true;
19091998 }
1999
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
19102000 }
19112001
19122002 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
19132003 {
19142004 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2005
+ unsigned long flags;
19152006
19162007 if (!ufshcd_is_clkscaling_supported(hba))
19172008 return;
19182009
2010
+ spin_lock_irqsave(hba->host->host_lock, flags);
2011
+ hba->clk_scaling.active_reqs--;
19192012 if (!hba->outstanding_reqs && scaling->is_busy_started) {
19202013 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
19212014 scaling->busy_start_t));
19222015 scaling->busy_start_t = 0;
19232016 scaling->is_busy_started = false;
19242017 }
2018
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
19252019 }
2020
+
2021
+static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2022
+{
2023
+ if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2024
+ return READ;
2025
+ else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2026
+ return WRITE;
2027
+ else
2028
+ return -EINVAL;
2029
+}
2030
+
2031
+static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2032
+ struct ufshcd_lrb *lrbp)
2033
+{
2034
+ struct ufs_hba_monitor *m = &hba->monitor;
2035
+
2036
+ return (m->enabled && lrbp && lrbp->cmd &&
2037
+ (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2038
+ ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2039
+}
2040
+
2041
+static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2042
+{
2043
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2044
+ unsigned long flags;
2045
+
2046
+ spin_lock_irqsave(hba->host->host_lock, flags);
2047
+ if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2048
+ hba->monitor.busy_start_ts[dir] = ktime_get();
2049
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
2050
+}
2051
+
2052
+static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2053
+{
2054
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2055
+ unsigned long flags;
2056
+
2057
+ spin_lock_irqsave(hba->host->host_lock, flags);
2058
+ if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2059
+ struct request *req = lrbp->cmd->request;
2060
+ struct ufs_hba_monitor *m = &hba->monitor;
2061
+ ktime_t now, inc, lat;
2062
+
2063
+ now = lrbp->compl_time_stamp;
2064
+ inc = ktime_sub(now, m->busy_start_ts[dir]);
2065
+ m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2066
+ m->nr_sec_rw[dir] += blk_rq_sectors(req);
2067
+
2068
+ /* Update latencies */
2069
+ m->nr_req[dir]++;
2070
+ lat = ktime_sub(now, lrbp->issue_time_stamp);
2071
+ m->lat_sum[dir] += lat;
2072
+ if (m->lat_max[dir] < lat || !m->lat_max[dir])
2073
+ m->lat_max[dir] = lat;
2074
+ if (m->lat_min[dir] > lat || !m->lat_min[dir])
2075
+ m->lat_min[dir] = lat;
2076
+
2077
+ m->nr_queued[dir]--;
2078
+ /* Push forward the busy start of monitor */
2079
+ m->busy_start_ts[dir] = now;
2080
+ }
2081
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
2082
+}
2083
+
19262084 /**
19272085 * ufshcd_send_command - Send SCSI or device management commands
19282086 * @hba: per adapter instance
....@@ -1931,12 +2089,30 @@
19312089 static inline
19322090 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
19332091 {
1934
- hba->lrb[task_tag].issue_time_stamp = ktime_get();
1935
- hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
2092
+ struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2093
+
2094
+ lrbp->issue_time_stamp = ktime_get();
2095
+ lrbp->compl_time_stamp = ktime_set(0, 0);
2096
+ trace_android_vh_ufs_send_command(hba, lrbp);
19362097 ufshcd_add_command_trace(hba, task_tag, "send");
19372098 ufshcd_clk_scaling_start_busy(hba);
1938
- __set_bit(task_tag, &hba->outstanding_reqs);
1939
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2099
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2100
+ ufshcd_start_monitor(hba, lrbp);
2101
+ if (hba->vops && hba->vops->setup_xfer_req)
2102
+ hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
2103
+ if (ufshcd_has_utrlcnr(hba)) {
2104
+ set_bit(task_tag, &hba->outstanding_reqs);
2105
+ ufshcd_writel(hba, 1 << task_tag,
2106
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
2107
+ } else {
2108
+ unsigned long flags;
2109
+
2110
+ spin_lock_irqsave(hba->host->host_lock, flags);
2111
+ set_bit(task_tag, &hba->outstanding_reqs);
2112
+ ufshcd_writel(hba, 1 << task_tag,
2113
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
2114
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
2115
+ }
19402116 /* Make sure that doorbell is committed immediately */
19412117 wmb();
19422118 }
....@@ -1953,11 +2129,10 @@
19532129 int len_to_copy;
19542130
19552131 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1956
- len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
2132
+ len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
19572133
1958
- memcpy(lrbp->sense_buffer,
1959
- lrbp->ucd_rsp_ptr->sr.sense_data,
1960
- min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
2134
+ memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2135
+ len_to_copy);
19612136 }
19622137 }
19632138
....@@ -1991,8 +2166,8 @@
19912166 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
19922167 } else {
19932168 dev_warn(hba->dev,
1994
- "%s: Response size is bigger than buffer",
1995
- __func__);
2169
+ "%s: rsp size %d is bigger than buffer size %d",
2170
+ __func__, resp_len, buf_len);
19962171 return -EINVAL;
19972172 }
19982173 }
....@@ -2003,15 +2178,27 @@
20032178 /**
20042179 * ufshcd_hba_capabilities - Read controller capabilities
20052180 * @hba: per adapter instance
2181
+ *
2182
+ * Return: 0 on success, negative on error.
20062183 */
2007
-static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2184
+static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
20082185 {
2186
+ int err;
2187
+
20092188 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
20102189
20112190 /* nutrs and nutmrs are 0 based values */
20122191 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
20132192 hba->nutmrs =
20142193 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2194
+ ufs_hba_add_info(hba)->reserved_slot = hba->nutrs - 1;
2195
+
2196
+ /* Read crypto capabilities */
2197
+ err = ufshcd_hba_init_crypto_capabilities(hba);
2198
+ if (err)
2199
+ dev_err(hba->dev, "crypto setup failed\n");
2200
+
2201
+ return err;
20152202 }
20162203
20172204 /**
....@@ -2059,6 +2246,8 @@
20592246 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
20602247 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
20612248
2249
+ ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
2250
+
20622251 /* Write UIC Cmd */
20632252 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
20642253 REG_UIC_COMMAND);
....@@ -2079,10 +2268,20 @@
20792268 unsigned long flags;
20802269
20812270 if (wait_for_completion_timeout(&uic_cmd->done,
2082
- msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2271
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
20832272 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2084
- else
2273
+ } else {
20852274 ret = -ETIMEDOUT;
2275
+ dev_err(hba->dev,
2276
+ "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2277
+ uic_cmd->command, uic_cmd->argument3);
2278
+
2279
+ if (!uic_cmd->cmd_active) {
2280
+ dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2281
+ __func__);
2282
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2283
+ }
2284
+ }
20862285
20872286 spin_lock_irqsave(hba->host->host_lock, flags);
20882287 hba->active_uic_cmd = NULL;
....@@ -2114,6 +2313,7 @@
21142313 if (completion)
21152314 init_completion(&uic_cmd->done);
21162315
2316
+ uic_cmd->cmd_active = 1;
21172317 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
21182318
21192319 return 0;
....@@ -2126,11 +2326,13 @@
21262326 *
21272327 * Returns 0 only if success.
21282328 */
2129
-static int
2130
-ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2329
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
21312330 {
21322331 int ret;
21332332 unsigned long flags;
2333
+
2334
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2335
+ return 0;
21342336
21352337 ufshcd_hold(hba, false);
21362338 mutex_lock(&hba->uic_cmd_mutex);
....@@ -2162,6 +2364,7 @@
21622364 struct scsi_cmnd *cmd;
21632365 int sg_segments;
21642366 int i;
2367
+ int err;
21652368
21662369 cmd = lrbp->cmd;
21672370 sg_segments = scsi_dma_map(cmd);
....@@ -2169,10 +2372,10 @@
21692372 return sg_segments;
21702373
21712374 if (sg_segments) {
2375
+
21722376 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
21732377 lrbp->utr_descriptor_ptr->prd_table_length =
2174
- cpu_to_le16((u16)(sg_segments *
2175
- hba->sg_entry_size));
2378
+ cpu_to_le16(sg_segments * hba->sg_entry_size);
21762379 else
21772380 lrbp->utr_descriptor_ptr->prd_table_length =
21782381 cpu_to_le16((u16) (sg_segments));
....@@ -2193,7 +2396,9 @@
21932396 lrbp->utr_descriptor_ptr->prd_table_length = 0;
21942397 }
21952398
2196
- return ufshcd_map_sg_crypto(hba, lrbp);
2399
+ err = 0;
2400
+ trace_android_vh_ufs_fill_prdt(hba, lrbp, sg_segments, &err);
2401
+ return err;
21972402 }
21982403
21992404 /**
....@@ -2205,7 +2410,7 @@
22052410 {
22062411 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
22072412
2208
- if (hba->ufs_version == UFSHCI_VERSION_10) {
2413
+ if (hba->ufs_version == ufshci_version(1, 0)) {
22092414 u32 rw;
22102415 rw = set & INTERRUPT_MASK_RW_VER_10;
22112416 set = rw | ((set ^ intrs) & intrs);
....@@ -2225,7 +2430,7 @@
22252430 {
22262431 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
22272432
2228
- if (hba->ufs_version == UFSHCI_VERSION_10) {
2433
+ if (hba->ufs_version == ufshci_version(1, 0)) {
22292434 u32 rw;
22302435 rw = (set & INTERRUPT_MASK_RW_VER_10) &
22312436 ~(intrs & INTERRUPT_MASK_RW_VER_10);
....@@ -2246,11 +2451,13 @@
22462451 * @cmd_dir: requests data direction
22472452 */
22482453 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2249
- u32 *upiu_flags, enum dma_data_direction cmd_dir)
2454
+ u8 *upiu_flags, enum dma_data_direction cmd_dir)
22502455 {
22512456 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
22522457 u32 data_direction;
22532458 u32 dword_0;
2459
+ u32 dword_1 = 0;
2460
+ u32 dword_3 = 0;
22542461
22552462 if (cmd_dir == DMA_FROM_DEVICE) {
22562463 data_direction = UTP_DEVICE_TO_HOST;
....@@ -2268,24 +2475,12 @@
22682475 if (lrbp->intr_cmd)
22692476 dword_0 |= UTP_REQ_DESC_INT_CMD;
22702477
2478
+ /* Prepare crypto related dwords */
2479
+ ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2480
+
22712481 /* Transfer request descriptor header fields */
2272
- if (ufshcd_lrbp_crypto_enabled(lrbp)) {
2273
-#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
2274
- dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD;
2275
- dword_0 |= lrbp->crypto_key_slot;
2276
- req_desc->header.dword_1 =
2277
- cpu_to_le32(lower_32_bits(lrbp->data_unit_num));
2278
- req_desc->header.dword_3 =
2279
- cpu_to_le32(upper_32_bits(lrbp->data_unit_num));
2280
-#endif /* CONFIG_SCSI_UFS_CRYPTO */
2281
- } else {
2282
- /* dword_1 and dword_3 are reserved, hence they are set to 0 */
2283
- req_desc->header.dword_1 = 0;
2284
- req_desc->header.dword_3 = 0;
2285
- }
2286
-
22872482 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2288
-
2483
+ req_desc->header.dword_1 = cpu_to_le32(dword_1);
22892484 /*
22902485 * assigning invalid value for command status. Controller
22912486 * updates OCS on command completion, with the command
....@@ -2293,6 +2488,7 @@
22932488 */
22942489 req_desc->header.dword_2 =
22952490 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2491
+ req_desc->header.dword_3 = cpu_to_le32(dword_3);
22962492
22972493 req_desc->prd_table_length = 0;
22982494 }
....@@ -2304,8 +2500,9 @@
23042500 * @upiu_flags: flags
23052501 */
23062502 static
2307
-void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2503
+void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
23082504 {
2505
+ struct scsi_cmnd *cmd = lrbp->cmd;
23092506 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
23102507 unsigned short cdb_len;
23112508
....@@ -2319,12 +2516,11 @@
23192516 /* Total EHS length and Data segment length will be zero */
23202517 ucd_req_ptr->header.dword_2 = 0;
23212518
2322
- ucd_req_ptr->sc.exp_data_transfer_len =
2323
- cpu_to_be32(lrbp->cmd->sdb.length);
2519
+ ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
23242520
2325
- cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2326
- memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2327
- memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2521
+ cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2522
+ memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2523
+ memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
23282524
23292525 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
23302526 }
....@@ -2337,12 +2533,11 @@
23372533 * @upiu_flags: flags
23382534 */
23392535 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2340
- struct ufshcd_lrb *lrbp, u32 upiu_flags)
2536
+ struct ufshcd_lrb *lrbp, u8 upiu_flags)
23412537 {
23422538 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
23432539 struct ufs_query *query = &hba->dev_cmd.query;
23442540 u16 len = be16_to_cpu(query->request.upiu_req.length);
2345
- u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
23462541
23472542 /* Query request header */
23482543 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
....@@ -2364,7 +2559,7 @@
23642559
23652560 /* Copy the Descriptor */
23662561 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2367
- memcpy(descp, query->descriptor, len);
2562
+ memcpy(ucd_req_ptr + 1, query->descriptor, len);
23682563
23692564 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
23702565 }
....@@ -2387,18 +2582,18 @@
23872582 }
23882583
23892584 /**
2390
- * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2585
+ * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
23912586 * for Device Management Purposes
23922587 * @hba: per adapter instance
23932588 * @lrbp: pointer to local reference block
23942589 */
2395
-static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2590
+static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2591
+ struct ufshcd_lrb *lrbp)
23962592 {
2397
- u32 upiu_flags;
2593
+ u8 upiu_flags;
23982594 int ret = 0;
23992595
2400
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2401
- (hba->ufs_version == UFSHCI_VERSION_11))
2596
+ if (hba->ufs_version <= ufshci_version(1, 1))
24022597 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
24032598 else
24042599 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
....@@ -2422,11 +2617,10 @@
24222617 */
24232618 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
24242619 {
2425
- u32 upiu_flags;
2620
+ u8 upiu_flags;
24262621 int ret = 0;
24272622
2428
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2429
- (hba->ufs_version == UFSHCI_VERSION_11))
2623
+ if (hba->ufs_version <= ufshci_version(1, 1))
24302624 lrbp->command_type = UTP_CMD_TYPE_SCSI;
24312625 else
24322626 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
....@@ -2453,6 +2647,28 @@
24532647 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
24542648 }
24552649
2650
+static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2651
+{
2652
+ struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2653
+ i * sizeof_utp_transfer_cmd_desc(hba);
2654
+ struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2655
+ dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2656
+ i * sizeof_utp_transfer_cmd_desc(hba);
2657
+ u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2658
+ response_upiu);
2659
+ u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2660
+
2661
+ lrb->utr_descriptor_ptr = utrdlp + i;
2662
+ lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2663
+ i * sizeof(struct utp_transfer_req_desc);
2664
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp;
2665
+ lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2666
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
2667
+ lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2668
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
2669
+ lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2670
+}
2671
+
24562672 /**
24572673 * ufshcd_queuecommand - main entry point for SCSI requests
24582674 * @host: SCSI host pointer
....@@ -2464,7 +2680,6 @@
24642680 {
24652681 struct ufshcd_lrb *lrbp;
24662682 struct ufs_hba *hba;
2467
- unsigned long flags;
24682683 int tag;
24692684 int err = 0;
24702685
....@@ -2481,93 +2696,92 @@
24812696 if (!down_read_trylock(&hba->clk_scaling_lock))
24822697 return SCSI_MLQUEUE_HOST_BUSY;
24832698
2484
- spin_lock_irqsave(hba->host->host_lock, flags);
24852699 switch (hba->ufshcd_state) {
24862700 case UFSHCD_STATE_OPERATIONAL:
2701
+ case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
24872702 break;
2488
- case UFSHCD_STATE_EH_SCHEDULED:
2703
+ case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2704
+ /*
2705
+ * pm_runtime_get_sync() is used at error handling preparation
2706
+ * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2707
+ * PM ops, it can never be finished if we let SCSI layer keep
2708
+ * retrying it, which gets err handler stuck forever. Neither
2709
+ * can we let the scsi cmd pass through, because UFS is in bad
2710
+ * state, the scsi cmd may eventually time out, which will get
2711
+ * err handler blocked for too long. So, just fail the scsi cmd
2712
+ * sent from PM ops, err handler can recover PM error anyways.
2713
+ */
2714
+ if (hba->pm_op_in_progress) {
2715
+ hba->force_reset = true;
2716
+ set_host_byte(cmd, DID_BAD_TARGET);
2717
+ cmd->scsi_done(cmd);
2718
+ goto out;
2719
+ }
2720
+ fallthrough;
24892721 case UFSHCD_STATE_RESET:
24902722 err = SCSI_MLQUEUE_HOST_BUSY;
2491
- goto out_unlock;
2723
+ goto out;
24922724 case UFSHCD_STATE_ERROR:
24932725 set_host_byte(cmd, DID_ERROR);
24942726 cmd->scsi_done(cmd);
2495
- goto out_unlock;
2727
+ goto out;
24962728 default:
24972729 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
24982730 __func__, hba->ufshcd_state);
24992731 set_host_byte(cmd, DID_BAD_TARGET);
25002732 cmd->scsi_done(cmd);
2501
- goto out_unlock;
2502
- }
2503
-
2504
- /* if error handling is in progress, don't issue commands */
2505
- if (ufshcd_eh_in_progress(hba)) {
2506
- set_host_byte(cmd, DID_ERROR);
2507
- cmd->scsi_done(cmd);
2508
- goto out_unlock;
2509
- }
2510
- spin_unlock_irqrestore(hba->host->host_lock, flags);
2511
-
2512
- hba->req_abort_count = 0;
2513
-
2514
- /* acquire the tag to make sure device cmds don't use it */
2515
- if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2516
- /*
2517
- * Dev manage command in progress, requeue the command.
2518
- * Requeuing the command helps in cases where the request *may*
2519
- * find different tag instead of waiting for dev manage command
2520
- * completion.
2521
- */
2522
- err = SCSI_MLQUEUE_HOST_BUSY;
25232733 goto out;
25242734 }
2735
+
2736
+ hba->req_abort_count = 0;
25252737
25262738 err = ufshcd_hold(hba, true);
25272739 if (err) {
25282740 err = SCSI_MLQUEUE_HOST_BUSY;
2529
- clear_bit_unlock(tag, &hba->lrb_in_use);
25302741 goto out;
25312742 }
2532
- WARN_ON(hba->clk_gating.state != CLKS_ON);
2743
+ WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2744
+ (hba->clk_gating.state != CLKS_ON));
25332745
25342746 lrbp = &hba->lrb[tag];
2535
-
25362747 WARN_ON(lrbp->cmd);
25372748 lrbp->cmd = cmd;
2538
- lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2749
+ lrbp->sense_bufflen = UFS_SENSE_SIZE;
25392750 lrbp->sense_buffer = cmd->sense_buffer;
25402751 lrbp->task_tag = tag;
25412752 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
25422753 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
25432754
2544
- err = ufshcd_prepare_lrbp_crypto(hba, cmd, lrbp);
2755
+ ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
2756
+
2757
+ trace_android_vh_ufs_prepare_command(hba, cmd->request, lrbp, &err);
25452758 if (err) {
2546
- ufshcd_release(hba);
25472759 lrbp->cmd = NULL;
2548
- clear_bit_unlock(tag, &hba->lrb_in_use);
2760
+ ufshcd_release(hba);
25492761 goto out;
25502762 }
2763
+
25512764 lrbp->req_abort_skip = false;
2765
+
2766
+ err = ufshpb_prep(hba, lrbp);
2767
+ if (err == -EAGAIN) {
2768
+ lrbp->cmd = NULL;
2769
+ ufshcd_release(hba);
2770
+ goto out;
2771
+ }
25522772
25532773 ufshcd_comp_scsi_upiu(hba, lrbp);
25542774
25552775 err = ufshcd_map_sg(hba, lrbp);
25562776 if (err) {
2557
- ufshcd_release(hba);
25582777 lrbp->cmd = NULL;
2559
- clear_bit_unlock(tag, &hba->lrb_in_use);
2778
+ ufshcd_release(hba);
25602779 goto out;
25612780 }
25622781 /* Make sure descriptors are ready before ringing the doorbell */
25632782 wmb();
25642783
2565
- /* issue command to the controller */
2566
- spin_lock_irqsave(hba->host->host_lock, flags);
2567
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
25682784 ufshcd_send_command(hba, tag);
2569
-out_unlock:
2570
- spin_unlock_irqrestore(hba->host->host_lock, flags);
25712785 out:
25722786 up_read(&hba->clk_scaling_lock);
25732787 return err;
....@@ -2582,12 +2796,10 @@
25822796 lrbp->task_tag = tag;
25832797 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
25842798 lrbp->intr_cmd = true; /* No interrupt aggregation */
2585
-#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
2586
- lrbp->crypto_enable = false; /* No crypto operations */
2587
-#endif
2799
+ ufshcd_prepare_lrbp_crypto(NULL, lrbp);
25882800 hba->dev_cmd.type = cmd_type;
25892801
2590
- return ufshcd_comp_devman_upiu(hba, lrbp);
2802
+ return ufshcd_compose_devman_upiu(hba, lrbp);
25912803 }
25922804
25932805 static int
....@@ -2608,7 +2820,7 @@
26082820 */
26092821 err = ufshcd_wait_for_register(hba,
26102822 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2611
- mask, ~mask, 1000, 1000, true);
2823
+ mask, ~mask, 1000, 1000);
26122824
26132825 return err;
26142826 }
....@@ -2707,48 +2919,10 @@
27072919 }
27082920
27092921 /**
2710
- * ufshcd_get_dev_cmd_tag - Get device management command tag
2711
- * @hba: per-adapter instance
2712
- * @tag_out: pointer to variable with available slot value
2713
- *
2714
- * Get a free slot and lock it until device management command
2715
- * completes.
2716
- *
2717
- * Returns false if free slot is unavailable for locking, else
2718
- * return true with tag value in @tag.
2719
- */
2720
-static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2721
-{
2722
- int tag;
2723
- bool ret = false;
2724
- unsigned long tmp;
2725
-
2726
- if (!tag_out)
2727
- goto out;
2728
-
2729
- do {
2730
- tmp = ~hba->lrb_in_use;
2731
- tag = find_last_bit(&tmp, hba->nutrs);
2732
- if (tag >= hba->nutrs)
2733
- goto out;
2734
- } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2735
-
2736
- *tag_out = tag;
2737
- ret = true;
2738
-out:
2739
- return ret;
2740
-}
2741
-
2742
-static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2743
-{
2744
- clear_bit_unlock(tag, &hba->lrb_in_use);
2745
-}
2746
-
2747
-/**
27482922 * ufshcd_exec_dev_cmd - API for sending device management requests
27492923 * @hba: UFS hba
27502924 * @cmd_type: specifies the type (NOP, Query...)
2751
- * @timeout: time in seconds
2925
+ * @timeout: timeout in milliseconds
27522926 *
27532927 * NOTE: Since there is only one available tag for device management commands,
27542928 * it is expected you hold the hba->dev_cmd.lock mutex.
....@@ -2756,46 +2930,34 @@
27562930 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
27572931 enum dev_cmd_type cmd_type, int timeout)
27582932 {
2933
+ DECLARE_COMPLETION_ONSTACK(wait);
2934
+ const u32 tag = ufs_hba_add_info(hba)->reserved_slot;
27592935 struct ufshcd_lrb *lrbp;
27602936 int err;
2761
- int tag;
2762
- struct completion wait;
2763
- unsigned long flags;
2937
+
2938
+ /* Protects use of ufs_hba_add_info(hba)->reserved_slot. */
2939
+ lockdep_assert_held(&hba->dev_cmd.lock);
27642940
27652941 down_read(&hba->clk_scaling_lock);
27662942
2767
- /*
2768
- * Get free slot, sleep if slots are unavailable.
2769
- * Even though we use wait_event() which sleeps indefinitely,
2770
- * the maximum wait time is bounded by SCSI request timeout.
2771
- */
2772
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2773
-
2774
- init_completion(&wait);
27752943 lrbp = &hba->lrb[tag];
27762944 WARN_ON(lrbp->cmd);
27772945 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
27782946 if (unlikely(err))
2779
- goto out_put_tag;
2947
+ goto out;
27802948
27812949 hba->dev_cmd.complete = &wait;
27822950
27832951 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
27842952 /* Make sure descriptors are ready before ringing the doorbell */
27852953 wmb();
2786
- spin_lock_irqsave(hba->host->host_lock, flags);
2787
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2954
+
27882955 ufshcd_send_command(hba, tag);
2789
- spin_unlock_irqrestore(hba->host->host_lock, flags);
2790
-
27912956 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2792
-
27932957 ufshcd_add_query_upiu_trace(hba, tag,
27942958 err ? "query_complete_err" : "query_complete");
27952959
2796
-out_put_tag:
2797
- ufshcd_put_dev_cmd_tag(hba, tag);
2798
- wake_up(&hba->dev_cmd.tag_wq);
2960
+out:
27992961 up_read(&hba->clk_scaling_lock);
28002962 return err;
28012963 }
....@@ -2824,14 +2986,14 @@
28242986 (*request)->upiu_req.selector = selector;
28252987 }
28262988
2827
-static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2828
- enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2989
+int ufshcd_query_flag_retry(struct ufs_hba *hba,
2990
+ enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
28292991 {
28302992 int ret;
28312993 int retries;
28322994
28332995 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2834
- ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2996
+ ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
28352997 if (ret)
28362998 dev_dbg(hba->dev,
28372999 "%s: failed with error %d, retries %d\n",
....@@ -2846,22 +3008,24 @@
28463008 __func__, opcode, idn, ret, retries);
28473009 return ret;
28483010 }
3011
+EXPORT_SYMBOL_GPL(ufshcd_query_flag_retry);
28493012
28503013 /**
28513014 * ufshcd_query_flag() - API function for sending flag query requests
28523015 * @hba: per-adapter instance
28533016 * @opcode: flag query to perform
28543017 * @idn: flag idn to access
3018
+ * @index: flag index to access
28553019 * @flag_res: the flag value after the query request completes
28563020 *
28573021 * Returns 0 for success, non-zero in case of failure
28583022 */
28593023 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2860
- enum flag_idn idn, bool *flag_res)
3024
+ enum flag_idn idn, u8 index, bool *flag_res)
28613025 {
28623026 struct ufs_query_req *request = NULL;
28633027 struct ufs_query_res *response = NULL;
2864
- int err, index = 0, selector = 0;
3028
+ int err, selector = 0;
28653029 int timeout = QUERY_REQ_TIMEOUT;
28663030
28673031 BUG_ON(!hba);
....@@ -2913,6 +3077,7 @@
29133077 ufshcd_release(hba);
29143078 return err;
29153079 }
3080
+EXPORT_SYMBOL_GPL(ufshcd_query_flag);
29163081
29173082 /**
29183083 * ufshcd_query_attr - API function for sending attribute requests
....@@ -2934,13 +3099,13 @@
29343099
29353100 BUG_ON(!hba);
29363101
2937
- ufshcd_hold(hba, false);
29383102 if (!attr_val) {
29393103 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
29403104 __func__, opcode);
2941
- err = -EINVAL;
2942
- goto out;
3105
+ return -EINVAL;
29433106 }
3107
+
3108
+ ufshcd_hold(hba, false);
29443109
29453110 mutex_lock(&hba->dev_cmd.lock);
29463111 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
....@@ -2973,10 +3138,10 @@
29733138
29743139 out_unlock:
29753140 mutex_unlock(&hba->dev_cmd.lock);
2976
-out:
29773141 ufshcd_release(hba);
29783142 return err;
29793143 }
3144
+EXPORT_SYMBOL_GPL(ufshcd_query_attr);
29803145
29813146 /**
29823147 * ufshcd_query_attr_retry() - API function for sending query
....@@ -2991,14 +3156,14 @@
29913156 *
29923157 * Returns 0 for success, non-zero in case of failure
29933158 */
2994
-static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3159
+int ufshcd_query_attr_retry(struct ufs_hba *hba,
29953160 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
29963161 u32 *attr_val)
29973162 {
29983163 int ret = 0;
29993164 u32 retries;
30003165
3001
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3166
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
30023167 ret = ufshcd_query_attr(hba, opcode, idn, index,
30033168 selector, attr_val);
30043169 if (ret)
....@@ -3014,6 +3179,7 @@
30143179 __func__, idn, ret, QUERY_REQ_RETRIES);
30153180 return ret;
30163181 }
3182
+EXPORT_SYMBOL_GPL(ufshcd_query_attr_retry);
30173183
30183184 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
30193185 enum query_opcode opcode, enum desc_idn idn, u8 index,
....@@ -3025,20 +3191,19 @@
30253191
30263192 BUG_ON(!hba);
30273193
3028
- ufshcd_hold(hba, false);
30293194 if (!desc_buf) {
30303195 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
30313196 __func__, opcode);
3032
- err = -EINVAL;
3033
- goto out;
3197
+ return -EINVAL;
30343198 }
30353199
30363200 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
30373201 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
30383202 __func__, *buf_len);
3039
- err = -EINVAL;
3040
- goto out;
3203
+ return -EINVAL;
30413204 }
3205
+
3206
+ ufshcd_hold(hba, false);
30423207
30433208 mutex_lock(&hba->dev_cmd.lock);
30443209 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
....@@ -3074,7 +3239,6 @@
30743239 out_unlock:
30753240 hba->dev_cmd.query.descriptor = NULL;
30763241 mutex_unlock(&hba->dev_cmd.lock);
3077
-out:
30783242 ufshcd_release(hba);
30793243 return err;
30803244 }
....@@ -3111,95 +3275,38 @@
31113275
31123276 return err;
31133277 }
3114
-
3115
-/**
3116
- * ufshcd_read_desc_length - read the specified descriptor length from header
3117
- * @hba: Pointer to adapter instance
3118
- * @desc_id: descriptor idn value
3119
- * @desc_index: descriptor index
3120
- * @desc_length: pointer to variable to read the length of descriptor
3121
- *
3122
- * Return 0 in case of success, non-zero otherwise
3123
- */
3124
-static int ufshcd_read_desc_length(struct ufs_hba *hba,
3125
- enum desc_idn desc_id,
3126
- int desc_index,
3127
- int *desc_length)
3128
-{
3129
- int ret;
3130
- u8 header[QUERY_DESC_HDR_SIZE];
3131
- int header_len = QUERY_DESC_HDR_SIZE;
3132
-
3133
- if (desc_id >= QUERY_DESC_IDN_MAX)
3134
- return -EINVAL;
3135
-
3136
- ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3137
- desc_id, desc_index, 0, header,
3138
- &header_len);
3139
-
3140
- if (ret) {
3141
- dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3142
- __func__, desc_id);
3143
- return ret;
3144
- } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3145
- dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3146
- __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3147
- desc_id);
3148
- ret = -EINVAL;
3149
- }
3150
-
3151
- *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3152
- return ret;
3153
-
3154
-}
3278
+EXPORT_SYMBOL_GPL(ufshcd_query_descriptor_retry);
31553279
31563280 /**
31573281 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
31583282 * @hba: Pointer to adapter instance
31593283 * @desc_id: descriptor idn value
31603284 * @desc_len: mapped desc length (out)
3161
- *
3162
- * Return 0 in case of success, non-zero otherwise
31633285 */
3164
-int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3165
- enum desc_idn desc_id, int *desc_len)
3286
+void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3287
+ int *desc_len)
31663288 {
3167
- switch (desc_id) {
3168
- case QUERY_DESC_IDN_DEVICE:
3169
- *desc_len = hba->desc_size.dev_desc;
3170
- break;
3171
- case QUERY_DESC_IDN_POWER:
3172
- *desc_len = hba->desc_size.pwr_desc;
3173
- break;
3174
- case QUERY_DESC_IDN_GEOMETRY:
3175
- *desc_len = hba->desc_size.geom_desc;
3176
- break;
3177
- case QUERY_DESC_IDN_CONFIGURATION:
3178
- *desc_len = hba->desc_size.conf_desc;
3179
- break;
3180
- case QUERY_DESC_IDN_UNIT:
3181
- *desc_len = hba->desc_size.unit_desc;
3182
- break;
3183
- case QUERY_DESC_IDN_INTERCONNECT:
3184
- *desc_len = hba->desc_size.interc_desc;
3185
- break;
3186
- case QUERY_DESC_IDN_STRING:
3187
- *desc_len = QUERY_DESC_MAX_SIZE;
3188
- break;
3189
- case QUERY_DESC_IDN_HEALTH:
3190
- *desc_len = hba->desc_size.hlth_desc;
3191
- break;
3192
- case QUERY_DESC_IDN_RFU_0:
3193
- case QUERY_DESC_IDN_RFU_1:
3289
+ if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3290
+ desc_id == QUERY_DESC_IDN_RFU_1)
31943291 *desc_len = 0;
3195
- break;
3196
- default:
3197
- *desc_len = 0;
3198
- return -EINVAL;
3199
- }
3200
- return 0;
3292
+ else
3293
+ *desc_len = hba->desc_size[desc_id];
32013294 }
32023295 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3296
+
3297
+static void ufshcd_update_desc_length(struct ufs_hba *hba,
3298
+ enum desc_idn desc_id, int desc_index,
3299
+ unsigned char desc_len)
3300
+{
3301
+ if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3302
+ desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3303
+ /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3304
+ * than the RPMB unit, however, both descriptors share the same
3305
+ * desc_idn, to cover both unit descriptors with one length, we
3306
+ * choose the normal unit descriptor length by desc_index.
3307
+ */
3308
+ hba->desc_size[desc_id] = desc_len;
3309
+}
32033310
32043311 /**
32053312 * ufshcd_read_desc_param - read the specified descriptor parameter
....@@ -3228,21 +3335,22 @@
32283335 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
32293336 return -EINVAL;
32303337
3231
- /* Get the max length of descriptor from structure filled up at probe
3232
- * time.
3233
- */
3234
- ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3338
+ /* Get the length of descriptor */
3339
+ ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3340
+ if (!buff_len) {
3341
+ dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3342
+ return -EINVAL;
3343
+ }
32353344
3236
- /* Sanity checks */
3237
- if (ret || !buff_len) {
3238
- dev_err(hba->dev, "%s: Failed to get full descriptor length",
3239
- __func__);
3240
- return ret;
3345
+ if (param_offset >= buff_len) {
3346
+ dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3347
+ __func__, param_offset, desc_id, buff_len);
3348
+ return -EINVAL;
32413349 }
32423350
32433351 /* Check whether we need temp memory */
32443352 if (param_offset != 0 || param_size < buff_len) {
3245
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
3353
+ desc_buf = kzalloc(buff_len, GFP_KERNEL);
32463354 if (!desc_buf)
32473355 return -ENOMEM;
32483356 } else {
....@@ -3256,95 +3364,109 @@
32563364 desc_buf, &buff_len);
32573365
32583366 if (ret) {
3259
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3367
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
32603368 __func__, desc_id, desc_index, param_offset, ret);
32613369 goto out;
32623370 }
32633371
32643372 /* Sanity check */
32653373 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3266
- dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3374
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
32673375 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
32683376 ret = -EINVAL;
32693377 goto out;
32703378 }
32713379
3272
- /* Check wherher we will not copy more data, than available */
3273
- if (is_kmalloc && param_size > buff_len)
3274
- param_size = buff_len;
3380
+ /* Update descriptor length */
3381
+ buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3382
+ ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
32753383
3276
- if (is_kmalloc)
3277
- memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3384
+ if (is_kmalloc) {
3385
+ /* Make sure we don't copy more data than available */
3386
+ if (param_offset >= buff_len)
3387
+ ret = -EINVAL;
3388
+ else
3389
+ memcpy(param_read_buf, &desc_buf[param_offset],
3390
+ min_t(u32, param_size, buff_len - param_offset));
3391
+ }
32783392 out:
32793393 if (is_kmalloc)
32803394 kfree(desc_buf);
32813395 return ret;
32823396 }
3397
+EXPORT_SYMBOL_GPL(ufshcd_read_desc_param);
32833398
3284
-static inline int ufshcd_read_desc(struct ufs_hba *hba,
3285
- enum desc_idn desc_id,
3286
- int desc_index,
3287
- u8 *buf,
3288
- u32 size)
3289
-{
3290
- return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3291
-}
3399
+/**
3400
+ * struct uc_string_id - unicode string
3401
+ *
3402
+ * @len: size of this descriptor inclusive
3403
+ * @type: descriptor type
3404
+ * @uc: unicode string character
3405
+ */
3406
+struct uc_string_id {
3407
+ u8 len;
3408
+ u8 type;
3409
+ wchar_t uc[];
3410
+} __packed;
32923411
3293
-static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3294
- u8 *buf,
3295
- u32 size)
3412
+/* replace non-printable or non-ASCII characters with spaces */
3413
+static inline char ufshcd_remove_non_printable(u8 ch)
32963414 {
3297
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3298
-}
3299
-
3300
-static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3301
-{
3302
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3415
+ return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
33033416 }
33043417
33053418 /**
33063419 * ufshcd_read_string_desc - read string descriptor
33073420 * @hba: pointer to adapter instance
33083421 * @desc_index: descriptor index
3309
- * @buf: pointer to buffer where descriptor would be read
3310
- * @size: size of buf
3422
+ * @buf: pointer to buffer where descriptor would be read,
3423
+ * the caller should free the memory.
33113424 * @ascii: if true convert from unicode to ascii characters
3425
+ * null terminated string.
33123426 *
3313
- * Return 0 in case of success, non-zero otherwise
3427
+ * Return:
3428
+ * * string size on success.
3429
+ * * -ENOMEM: on allocation failure
3430
+ * * -EINVAL: on a wrong parameter
33143431 */
3315
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3316
- u8 *buf, u32 size, bool ascii)
3432
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3433
+ u8 **buf, bool ascii)
33173434 {
3318
- int err = 0;
3435
+ struct uc_string_id *uc_str;
3436
+ u8 *str;
3437
+ int ret;
33193438
3320
- err = ufshcd_read_desc(hba,
3321
- QUERY_DESC_IDN_STRING, desc_index, buf, size);
3439
+ if (!buf)
3440
+ return -EINVAL;
33223441
3323
- if (err) {
3324
- dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3325
- __func__, QUERY_REQ_RETRIES, err);
3442
+ uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3443
+ if (!uc_str)
3444
+ return -ENOMEM;
3445
+
3446
+ ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3447
+ (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3448
+ if (ret < 0) {
3449
+ dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3450
+ QUERY_REQ_RETRIES, ret);
3451
+ str = NULL;
3452
+ goto out;
3453
+ }
3454
+
3455
+ if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3456
+ dev_dbg(hba->dev, "String Desc is of zero length\n");
3457
+ str = NULL;
3458
+ ret = 0;
33263459 goto out;
33273460 }
33283461
33293462 if (ascii) {
3330
- int desc_len;
3331
- int ascii_len;
3463
+ ssize_t ascii_len;
33323464 int i;
3333
- char *buff_ascii;
3334
-
3335
- desc_len = buf[0];
33363465 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3337
- ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3338
- if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3339
- dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3340
- __func__);
3341
- err = -ENOMEM;
3342
- goto out;
3343
- }
3344
-
3345
- buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3346
- if (!buff_ascii) {
3347
- err = -ENOMEM;
3466
+ ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3467
+ str = kzalloc(ascii_len, GFP_KERNEL);
3468
+ if (!str) {
3469
+ ret = -ENOMEM;
33483470 goto out;
33493471 }
33503472
....@@ -3352,22 +3474,28 @@
33523474 * the descriptor contains string in UTF16 format
33533475 * we need to convert to utf-8 so it can be displayed
33543476 */
3355
- utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3356
- desc_len - QUERY_DESC_HDR_SIZE,
3357
- UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3477
+ ret = utf16s_to_utf8s(uc_str->uc,
3478
+ uc_str->len - QUERY_DESC_HDR_SIZE,
3479
+ UTF16_BIG_ENDIAN, str, ascii_len);
33583480
33593481 /* replace non-printable or non-ASCII characters with spaces */
3360
- for (i = 0; i < ascii_len; i++)
3361
- ufshcd_remove_non_printable(&buff_ascii[i]);
3482
+ for (i = 0; i < ret; i++)
3483
+ str[i] = ufshcd_remove_non_printable(str[i]);
33623484
3363
- memset(buf + QUERY_DESC_HDR_SIZE, 0,
3364
- size - QUERY_DESC_HDR_SIZE);
3365
- memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3366
- buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3367
- kfree(buff_ascii);
3485
+ str[ret++] = '\0';
3486
+
3487
+ } else {
3488
+ str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3489
+ if (!str) {
3490
+ ret = -ENOMEM;
3491
+ goto out;
3492
+ }
3493
+ ret = uc_str->len;
33683494 }
33693495 out:
3370
- return err;
3496
+ *buf = str;
3497
+ kfree(uc_str);
3498
+ return ret;
33713499 }
33723500
33733501 /**
....@@ -3390,11 +3518,36 @@
33903518 * Unit descriptors are only available for general purpose LUs (LUN id
33913519 * from 0 to 7) and RPMB Well known LU.
33923520 */
3393
- if (!ufs_is_valid_unit_desc_lun(lun))
3521
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
33943522 return -EOPNOTSUPP;
33953523
33963524 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
33973525 param_offset, param_read_buf, param_size);
3526
+}
3527
+
3528
+static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3529
+{
3530
+ int err = 0;
3531
+ u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3532
+
3533
+ if (hba->dev_info.wspecversion >= 0x300) {
3534
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3535
+ QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3536
+ &gating_wait);
3537
+ if (err)
3538
+ dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3539
+ err, gating_wait);
3540
+
3541
+ if (gating_wait == 0) {
3542
+ gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3543
+ dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3544
+ gating_wait);
3545
+ }
3546
+
3547
+ hba->dev_info.clk_gating_wait_us = gating_wait;
3548
+ }
3549
+
3550
+ return err;
33983551 }
33993552
34003553 /**
....@@ -3494,7 +3647,6 @@
34943647 */
34953648 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
34963649 {
3497
- struct utp_transfer_cmd_desc *cmd_descp;
34983650 struct utp_transfer_req_desc *utrdlp;
34993651 dma_addr_t cmd_desc_dma_addr;
35003652 dma_addr_t cmd_desc_element_addr;
....@@ -3504,7 +3656,6 @@
35043656 int i;
35053657
35063658 utrdlp = hba->utrdl_base_addr;
3507
- cmd_descp = hba->ucdl_base_addr;
35083659
35093660 response_offset =
35103661 offsetof(struct utp_transfer_cmd_desc, response_upiu);
....@@ -3533,27 +3684,14 @@
35333684 cpu_to_le16(ALIGNED_UPIU_SIZE);
35343685 } else {
35353686 utrdlp[i].response_upiu_offset =
3536
- cpu_to_le16((response_offset >> 2));
3687
+ cpu_to_le16(response_offset >> 2);
35373688 utrdlp[i].prd_table_offset =
3538
- cpu_to_le16((prdt_offset >> 2));
3689
+ cpu_to_le16(prdt_offset >> 2);
35393690 utrdlp[i].response_upiu_length =
35403691 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
35413692 }
35423693
3543
- hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3544
- hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3545
- (i * sizeof(struct utp_transfer_req_desc));
3546
- hba->lrb[i].ucd_req_ptr = (struct utp_upiu_req *)cmd_descp;
3547
- hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3548
- hba->lrb[i].ucd_rsp_ptr =
3549
- (struct utp_upiu_rsp *)cmd_descp->response_upiu;
3550
- hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3551
- response_offset;
3552
- hba->lrb[i].ucd_prdt_ptr =
3553
- (struct ufshcd_sg_entry *)cmd_descp->prd_table;
3554
- hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3555
- prdt_offset;
3556
- cmd_descp = (void *)cmd_descp + cmd_desc_size;
3694
+ ufshcd_init_lrb(hba, &hba->lrb[i], i);
35573695 }
35583696 }
35593697
....@@ -3586,7 +3724,7 @@
35863724 * @hba: per adapter instance
35873725 *
35883726 * DME_RESET command is issued in order to reset UniPro stack.
3589
- * This function now deal with cold reset.
3727
+ * This function now deals with cold reset.
35903728 *
35913729 * Returns 0 on success, non-zero value on failure
35923730 */
....@@ -3796,17 +3934,20 @@
37963934 */
37973935 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
37983936 {
3799
- struct completion uic_async_done;
3937
+ DECLARE_COMPLETION_ONSTACK(uic_async_done);
38003938 unsigned long flags;
38013939 u8 status;
38023940 int ret;
38033941 bool reenable_intr = false;
38043942
38053943 mutex_lock(&hba->uic_cmd_mutex);
3806
- init_completion(&uic_async_done);
38073944 ufshcd_add_delay_before_dme_cmd(hba);
38083945
38093946 spin_lock_irqsave(hba->host->host_lock, flags);
3947
+ if (ufshcd_is_link_broken(hba)) {
3948
+ ret = -ENOLINK;
3949
+ goto out_unlock;
3950
+ }
38103951 hba->uic_async_done = &uic_async_done;
38113952 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
38123953 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
....@@ -3831,10 +3972,18 @@
38313972 dev_err(hba->dev,
38323973 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
38333974 cmd->command, cmd->argument3);
3975
+
3976
+ if (!cmd->cmd_active) {
3977
+ dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
3978
+ __func__);
3979
+ goto check_upmcrs;
3980
+ }
3981
+
38343982 ret = -ETIMEDOUT;
38353983 goto out;
38363984 }
38373985
3986
+check_upmcrs:
38383987 status = ufshcd_get_upmcrs(hba);
38393988 if (status != PWR_LOCAL) {
38403989 dev_err(hba->dev,
....@@ -3846,7 +3995,7 @@
38463995 if (ret) {
38473996 ufshcd_print_host_state(hba);
38483997 ufshcd_print_pwr_info(hba);
3849
- ufshcd_print_host_regs(hba);
3998
+ ufshcd_print_evt_hist(hba);
38503999 }
38514000
38524001 spin_lock_irqsave(hba->host->host_lock, flags);
....@@ -3854,6 +4003,14 @@
38544003 hba->uic_async_done = NULL;
38554004 if (reenable_intr)
38564005 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4006
+ if (ret) {
4007
+ dev_err(hba->dev,
4008
+ "%s: Changing link power status failed (%d). Scheduling error handler\n",
4009
+ __func__, ret);
4010
+ ufshcd_set_link_broken(hba);
4011
+ ufshcd_schedule_eh_work(hba);
4012
+ }
4013
+out_unlock:
38574014 spin_unlock_irqrestore(hba->host->host_lock, flags);
38584015 mutex_unlock(&hba->uic_cmd_mutex);
38594016
....@@ -3894,7 +4051,7 @@
38944051 return ret;
38954052 }
38964053
3897
-static int ufshcd_link_recovery(struct ufs_hba *hba)
4054
+int ufshcd_link_recovery(struct ufs_hba *hba)
38984055 {
38994056 int ret;
39004057 unsigned long flags;
....@@ -3903,6 +4060,9 @@
39034060 hba->ufshcd_state = UFSHCD_STATE_RESET;
39044061 ufshcd_set_eh_in_progress(hba);
39054062 spin_unlock_irqrestore(hba->host->host_lock, flags);
4063
+
4064
+ /* Reset the attached device */
4065
+ ufshcd_vops_device_reset(hba);
39064066
39074067 ret = ufshcd_host_reset_and_restore(hba);
39084068
....@@ -3918,8 +4078,9 @@
39184078
39194079 return ret;
39204080 }
4081
+EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
39214082
3922
-static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4083
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
39234084 {
39244085 int ret;
39254086 struct uic_command uic_cmd = {0};
....@@ -3932,46 +4093,18 @@
39324093 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
39334094 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
39344095
3935
- if (ret) {
3936
- int err;
3937
-
4096
+ if (ret)
39384097 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
39394098 __func__, ret);
3940
-
3941
- /*
3942
- * If link recovery fails then return error code returned from
3943
- * ufshcd_link_recovery().
3944
- * If link recovery succeeds then return -EAGAIN to attempt
3945
- * hibern8 enter retry again.
3946
- */
3947
- err = ufshcd_link_recovery(hba);
3948
- if (err) {
3949
- dev_err(hba->dev, "%s: link recovery failed", __func__);
3950
- ret = err;
3951
- } else {
3952
- ret = -EAGAIN;
3953
- }
3954
- } else
4099
+ else
39554100 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
39564101 POST_CHANGE);
39574102
39584103 return ret;
39594104 }
4105
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
39604106
3961
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3962
-{
3963
- int ret = 0, retries;
3964
-
3965
- for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3966
- ret = __ufshcd_uic_hibern8_enter(hba);
3967
- if (!ret)
3968
- goto out;
3969
- }
3970
-out:
3971
- return ret;
3972
-}
3973
-
3974
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4107
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
39754108 {
39764109 struct uic_command uic_cmd = {0};
39774110 int ret;
....@@ -3987,7 +4120,6 @@
39874120 if (ret) {
39884121 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
39894122 __func__, ret);
3990
- ret = ufshcd_link_recovery(hba);
39914123 } else {
39924124 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
39934125 POST_CHANGE);
....@@ -3997,12 +4129,38 @@
39974129
39984130 return ret;
39994131 }
4132
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
40004133
4001
-static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4134
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4135
+{
4136
+ unsigned long flags;
4137
+ bool update = false;
4138
+
4139
+ if (!ufshcd_is_auto_hibern8_supported(hba))
4140
+ return;
4141
+
4142
+ spin_lock_irqsave(hba->host->host_lock, flags);
4143
+ if (hba->ahit != ahit) {
4144
+ hba->ahit = ahit;
4145
+ update = true;
4146
+ }
4147
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
4148
+
4149
+ if (update && !pm_runtime_suspended(hba->dev)) {
4150
+ pm_runtime_get_sync(hba->dev);
4151
+ ufshcd_hold(hba, false);
4152
+ ufshcd_auto_hibern8_enable(hba);
4153
+ ufshcd_release(hba);
4154
+ pm_runtime_put(hba->dev);
4155
+ }
4156
+}
4157
+EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4158
+
4159
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
40024160 {
40034161 unsigned long flags;
40044162
4005
- if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
4163
+ if (!ufshcd_is_auto_hibern8_supported(hba))
40064164 return;
40074165
40084166 spin_lock_irqsave(hba->host->host_lock, flags);
....@@ -4095,7 +4253,8 @@
40954253 int ret;
40964254
40974255 /* if already configured to the requested pwr_mode */
4098
- if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4256
+ if (!hba->force_pmc &&
4257
+ pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
40994258 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
41004259 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
41014260 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
....@@ -4137,6 +4296,28 @@
41374296 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
41384297 pwr_mode->hs_rate);
41394298
4299
+ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4300
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4301
+ DL_FC0ProtectionTimeOutVal_Default);
4302
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4303
+ DL_TC0ReplayTimeOutVal_Default);
4304
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4305
+ DL_AFC0ReqTimeOutVal_Default);
4306
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4307
+ DL_FC1ProtectionTimeOutVal_Default);
4308
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4309
+ DL_TC1ReplayTimeOutVal_Default);
4310
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4311
+ DL_AFC1ReqTimeOutVal_Default);
4312
+
4313
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4314
+ DL_FC0ProtectionTimeOutVal_Default);
4315
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4316
+ DL_TC0ReplayTimeOutVal_Default);
4317
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4318
+ DL_AFC0ReqTimeOutVal_Default);
4319
+ }
4320
+
41404321 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
41414322 | pwr_mode->pwr_tx);
41424323
....@@ -4172,8 +4353,6 @@
41724353 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
41734354
41744355 ret = ufshcd_change_power_mode(hba, &final_params);
4175
- if (!ret)
4176
- ufshcd_print_pwr_info(hba);
41774356
41784357 return ret;
41794358 }
....@@ -4187,12 +4366,12 @@
41874366 */
41884367 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
41894368 {
4190
- int i;
41914369 int err;
4192
- bool flag_res = 1;
4370
+ bool flag_res = true;
4371
+ ktime_t timeout;
41934372
41944373 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4195
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4374
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
41964375 if (err) {
41974376 dev_err(hba->dev,
41984377 "%s setting fDeviceInit flag failed with error %d\n",
....@@ -4200,20 +4379,26 @@
42004379 goto out;
42014380 }
42024381
4203
- /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4204
- for (i = 0; i < 1000 && !err && flag_res; i++)
4205
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4206
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4382
+ /* Poll fDeviceInit flag to be cleared */
4383
+ timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4384
+ do {
4385
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4386
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4387
+ if (!flag_res)
4388
+ break;
4389
+ usleep_range(500, 1000);
4390
+ } while (ktime_before(ktime_get(), timeout));
42074391
4208
- if (err)
4392
+ if (err) {
42094393 dev_err(hba->dev,
4210
- "%s reading fDeviceInit flag failed with error %d\n",
4211
- __func__, err);
4212
- else if (flag_res)
4394
+ "%s reading fDeviceInit flag failed with error %d\n",
4395
+ __func__, err);
4396
+ } else if (flag_res) {
42134397 dev_err(hba->dev,
4214
- "%s fDeviceInit was not cleared by the device\n",
4215
- __func__);
4216
-
4398
+ "%s fDeviceInit was not cleared by the device\n",
4399
+ __func__);
4400
+ err = -EBUSY;
4401
+ }
42174402 out:
42184403 return err;
42194404 }
....@@ -4230,7 +4415,7 @@
42304415 *
42314416 * Returns 0 on success, non-zero value on failure
42324417 */
4233
-static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4418
+int ufshcd_make_hba_operational(struct ufs_hba *hba)
42344419 {
42354420 int err = 0;
42364421 u32 reg;
....@@ -4270,31 +4455,36 @@
42704455 dev_err(hba->dev,
42714456 "Host controller not ready to process requests");
42724457 err = -EIO;
4273
- goto out;
42744458 }
42754459
4276
-out:
42774460 return err;
42784461 }
4462
+EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
42794463
42804464 /**
42814465 * ufshcd_hba_stop - Send controller to reset state
42824466 * @hba: per adapter instance
4283
- * @can_sleep: perform sleep or just spin
42844467 */
4285
-static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4468
+void ufshcd_hba_stop(struct ufs_hba *hba)
42864469 {
4470
+ unsigned long flags;
42874471 int err;
42884472
4289
- ufshcd_crypto_disable(hba);
4290
-
4473
+ /*
4474
+ * Obtain the host lock to prevent that the controller is disabled
4475
+ * while the UFS interrupt handler is active on another CPU.
4476
+ */
4477
+ spin_lock_irqsave(hba->host->host_lock, flags);
42914478 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4479
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
4480
+
42924481 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
42934482 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4294
- 10, 1, can_sleep);
4483
+ 10, 1);
42954484 if (err)
42964485 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
42974486 }
4487
+EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
42984488
42994489 /**
43004490 * ufshcd_hba_execute_hce - initialize the controller
....@@ -4308,17 +4498,13 @@
43084498 */
43094499 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
43104500 {
4311
- int retry;
4501
+ int retry_outer = 3;
4502
+ int retry_inner;
43124503
4313
- /*
4314
- * msleep of 1 and 5 used in this function might result in msleep(20),
4315
- * but it was necessary to send the UFS FPGA to reset mode during
4316
- * development and testing of this driver. msleep can be changed to
4317
- * mdelay and retry count can be reduced based on the controller.
4318
- */
4504
+start:
43194505 if (!ufshcd_is_hba_active(hba))
43204506 /* change controller state to "reset state" */
4321
- ufshcd_hba_stop(hba, true);
4507
+ ufshcd_hba_stop(hba);
43224508
43234509 /* UniPro link is disabled at this point */
43244510 ufshcd_set_link_off(hba);
....@@ -4338,19 +4524,23 @@
43384524 * instruction might be read back.
43394525 * This delay can be changed based on the controller.
43404526 */
4341
- msleep(1);
4527
+ ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
43424528
43434529 /* wait for the host controller to complete initialization */
4344
- retry = 10;
4530
+ retry_inner = 50;
43454531 while (ufshcd_is_hba_active(hba)) {
4346
- if (retry) {
4347
- retry--;
4532
+ if (retry_inner) {
4533
+ retry_inner--;
43484534 } else {
43494535 dev_err(hba->dev,
43504536 "Controller enable failed\n");
4537
+ if (retry_outer) {
4538
+ retry_outer--;
4539
+ goto start;
4540
+ }
43514541 return -EIO;
43524542 }
4353
- msleep(5);
4543
+ usleep_range(1000, 1100);
43544544 }
43554545
43564546 /* enable UIC related interrupts */
....@@ -4361,7 +4551,7 @@
43614551 return 0;
43624552 }
43634553
4364
-static int ufshcd_hba_enable(struct ufs_hba *hba)
4554
+int ufshcd_hba_enable(struct ufs_hba *hba)
43654555 {
43664556 int ret;
43674557
....@@ -4386,9 +4576,11 @@
43864576
43874577 return ret;
43884578 }
4579
+EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4580
+
43894581 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
43904582 {
4391
- int tx_lanes, i, err = 0;
4583
+ int tx_lanes = 0, i, err = 0;
43924584
43934585 if (!peer)
43944586 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
....@@ -4422,6 +4614,23 @@
44224614 return ufshcd_disable_tx_lcc(hba, true);
44234615 }
44244616
4617
+void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4618
+{
4619
+ struct ufs_event_hist *e;
4620
+
4621
+ if (id >= UFS_EVT_CNT)
4622
+ return;
4623
+
4624
+ e = &hba->ufs_stats.event[id];
4625
+ e->val[e->pos] = val;
4626
+ e->tstamp[e->pos] = ktime_get();
4627
+ e->cnt += 1;
4628
+ e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4629
+
4630
+ ufshcd_vops_event_notify(hba, id, &val);
4631
+}
4632
+EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4633
+
44254634 /**
44264635 * ufshcd_link_startup - Initialize unipro link startup
44274636 * @hba: per adapter instance
....@@ -4449,6 +4658,9 @@
44494658
44504659 /* check if device is detected by inter-connect layer */
44514660 if (!ret && !ufshcd_is_device_present(hba)) {
4661
+ ufshcd_update_evt_hist(hba,
4662
+ UFS_EVT_LINK_STARTUP_FAIL,
4663
+ 0);
44524664 dev_err(hba->dev, "%s: Device not present\n", __func__);
44534665 ret = -ENXIO;
44544666 goto out;
....@@ -4459,13 +4671,21 @@
44594671 * but we can't be sure if the link is up until link startup
44604672 * succeeds. So reset the local Uni-Pro and try again.
44614673 */
4462
- if (ret && ufshcd_hba_enable(hba))
4674
+ if (ret && ufshcd_hba_enable(hba)) {
4675
+ ufshcd_update_evt_hist(hba,
4676
+ UFS_EVT_LINK_STARTUP_FAIL,
4677
+ (u32)ret);
44634678 goto out;
4679
+ }
44644680 } while (ret && retries--);
44654681
4466
- if (ret)
4682
+ if (ret) {
44674683 /* failed to get the link up... retire */
4684
+ ufshcd_update_evt_hist(hba,
4685
+ UFS_EVT_LINK_STARTUP_FAIL,
4686
+ (u32)ret);
44684687 goto out;
4688
+ }
44694689
44704690 if (link_startup_again) {
44714691 link_startup_again = false;
....@@ -4488,13 +4708,15 @@
44884708 if (ret)
44894709 goto out;
44904710
4711
+ /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4712
+ ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
44914713 ret = ufshcd_make_hba_operational(hba);
44924714 out:
44934715 if (ret) {
44944716 dev_err(hba->dev, "link startup failed %d\n", ret);
44954717 ufshcd_print_host_state(hba);
44964718 ufshcd_print_pwr_info(hba);
4497
- ufshcd_print_host_regs(hba);
4719
+ ufshcd_print_evt_hist(hba);
44984720 }
44994721 return ret;
45004722 }
....@@ -4595,7 +4817,7 @@
45954817 * protected so skip reading bLUWriteProtect parameter for
45964818 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
45974819 */
4598
- else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4820
+ else if (lun >= hba->dev_info.max_lu_supported)
45994821 ret = -ENOTSUPP;
46004822 else
46014823 ret = ufshcd_read_unit_desc_param(hba,
....@@ -4642,6 +4864,9 @@
46424864 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
46434865 sdev->use_10_for_ms = 1;
46444866
4867
+ /* DBD field should be set to 1 in mode sense(10) */
4868
+ sdev->set_dbd_for_ms = 1;
4869
+
46454870 /* allow SCSI layer to restart the device in case of errors */
46464871 sdev->allow_restart = 1;
46474872
....@@ -4667,11 +4892,27 @@
46674892 */
46684893 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
46694894 {
4670
- struct ufs_hba *hba = shost_priv(sdev->host);
4895
+ return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
4896
+}
46714897
4672
- if (depth > hba->nutrs)
4673
- depth = hba->nutrs;
4674
- return scsi_change_queue_depth(sdev, depth);
4898
+static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
4899
+{
4900
+ /* skip well-known LU */
4901
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4902
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
4903
+ return;
4904
+
4905
+ ufshpb_destroy_lu(hba, sdev);
4906
+}
4907
+
4908
+static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
4909
+{
4910
+ /* skip well-known LU */
4911
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4912
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
4913
+ return;
4914
+
4915
+ ufshpb_init_hpb_lu(hba, sdev);
46754916 }
46764917
46774918 /**
....@@ -4683,13 +4924,18 @@
46834924 struct ufs_hba *hba = shost_priv(sdev->host);
46844925 struct request_queue *q = sdev->request_queue;
46854926
4927
+ ufshcd_hpb_configure(hba, sdev);
4928
+
46864929 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4687
- blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4930
+ if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
4931
+ blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
46884932
46894933 if (ufshcd_is_rpm_autosuspend_allowed(hba))
46904934 sdev->rpm_autosuspend = 1;
46914935
46924936 ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
4937
+
4938
+ trace_android_vh_ufs_update_sdev(sdev);
46934939
46944940 return 0;
46954941 }
....@@ -4701,9 +4947,11 @@
47014947 static void ufshcd_slave_destroy(struct scsi_device *sdev)
47024948 {
47034949 struct ufs_hba *hba;
4704
- struct request_queue *q = sdev->request_queue;
47054950
47064951 hba = shost_priv(sdev->host);
4952
+
4953
+ ufshcd_hpb_destroy(hba, sdev);
4954
+
47074955 /* Drop the reference as it won't be needed anymore */
47084956 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
47094957 unsigned long flags;
....@@ -4712,48 +4960,6 @@
47124960 hba->sdev_ufs_device = NULL;
47134961 spin_unlock_irqrestore(hba->host->host_lock, flags);
47144962 }
4715
-
4716
- ufshcd_crypto_destroy_rq_keyslot_manager(hba, q);
4717
-}
4718
-
4719
-/**
4720
- * ufshcd_task_req_compl - handle task management request completion
4721
- * @hba: per adapter instance
4722
- * @index: index of the completed request
4723
- * @resp: task management service response
4724
- *
4725
- * Returns non-zero value on error, zero on success
4726
- */
4727
-static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4728
-{
4729
- struct utp_task_req_desc *task_req_descp;
4730
- struct utp_upiu_task_rsp *task_rsp_upiup;
4731
- unsigned long flags;
4732
- int ocs_value;
4733
- int task_result;
4734
-
4735
- spin_lock_irqsave(hba->host->host_lock, flags);
4736
-
4737
- /* Clear completed tasks from outstanding_tasks */
4738
- __clear_bit(index, &hba->outstanding_tasks);
4739
-
4740
- task_req_descp = hba->utmrdl_base_addr;
4741
- ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4742
-
4743
- if (ocs_value == OCS_SUCCESS) {
4744
- task_rsp_upiup = (struct utp_upiu_task_rsp *)
4745
- task_req_descp[index].task_rsp_upiu;
4746
- task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4747
- task_result = task_result & MASK_TM_SERVICE_RESP;
4748
- if (resp)
4749
- *resp = (u8)task_result;
4750
- } else {
4751
- dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4752
- __func__, ocs_value);
4753
- }
4754
- spin_unlock_irqrestore(hba->host->host_lock, flags);
4755
-
4756
- return ocs_value;
47574963 }
47584964
47594965 /**
....@@ -4771,6 +4977,7 @@
47714977 switch (scsi_status) {
47724978 case SAM_STAT_CHECK_CONDITION:
47734979 ufshcd_copy_sense_data(lrbp);
4980
+ fallthrough;
47744981 case SAM_STAT_GOOD:
47754982 result |= DID_OK << 16 |
47764983 COMMAND_COMPLETE << 8 |
....@@ -4807,6 +5014,12 @@
48075014 /* overall command status of utrd */
48085015 ocs = ufshcd_get_tr_ocs(lrbp);
48095016
5017
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5018
+ if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
5019
+ MASK_RSP_UPIU_RESULT)
5020
+ ocs = OCS_SUCCESS;
5021
+ }
5022
+
48105023 switch (ocs) {
48115024 case OCS_SUCCESS:
48125025 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
....@@ -4839,8 +5052,19 @@
48395052 * UFS device needs urgent BKOPs.
48405053 */
48415054 if (!hba->pm_op_in_progress &&
4842
- ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4843
- schedule_work(&hba->eeh_work);
5055
+ !ufshcd_eh_in_progress(hba) &&
5056
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
5057
+ schedule_work(&hba->eeh_work)) {
5058
+ /*
5059
+ * Prevent suspend once eeh_work is scheduled
5060
+ * to avoid deadlock between ufshcd_suspend
5061
+ * and exception event handler.
5062
+ */
5063
+ pm_runtime_get_noresume(hba->dev);
5064
+ }
5065
+
5066
+ if (scsi_status == SAM_STAT_GOOD)
5067
+ ufshpb_rsp_upiu(hba, lrbp);
48445068 break;
48455069 case UPIU_TRANSACTION_REJECT_UPIU:
48465070 /* TODO: handle Reject UPIU Response */
....@@ -4849,10 +5073,10 @@
48495073 "Reject UPIU not fully implemented\n");
48505074 break;
48515075 default:
4852
- result = DID_ERROR << 16;
48535076 dev_err(hba->dev,
48545077 "Unexpected request response code = %x\n",
48555078 result);
5079
+ result = DID_ERROR << 16;
48565080 break;
48575081 }
48585082 break;
....@@ -4860,6 +5084,10 @@
48605084 result |= DID_ABORT << 16;
48615085 break;
48625086 case OCS_INVALID_COMMAND_STATUS:
5087
+ dev_err_ratelimited(hba->dev,
5088
+ "Retrying request with tag %d / cdb %#02x because of invalid command status\n",
5089
+ lrbp->task_tag, lrbp->cmd && lrbp->cmd->cmnd ?
5090
+ lrbp->cmd->cmnd[0] : 0);
48635091 result |= DID_REQUEUE << 16;
48645092 break;
48655093 case OCS_INVALID_CMD_TABLE_ATTR:
....@@ -4868,6 +5096,7 @@
48685096 case OCS_MISMATCH_RESP_UPIU_SIZE:
48695097 case OCS_PEER_COMM_FAILURE:
48705098 case OCS_FATAL_ERROR:
5099
+ case OCS_DEVICE_FATAL_ERROR:
48715100 case OCS_INVALID_CRYPTO_CONFIG:
48725101 case OCS_GENERAL_CRYPTO_ERROR:
48735102 default:
....@@ -4875,33 +5104,87 @@
48755104 dev_err(hba->dev,
48765105 "OCS error from controller = %x for tag %d\n",
48775106 ocs, lrbp->task_tag);
4878
- ufshcd_print_host_regs(hba);
5107
+ ufshcd_print_evt_hist(hba);
48795108 ufshcd_print_host_state(hba);
48805109 break;
48815110 } /* end of switch */
48825111
4883
- if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
5112
+ if ((host_byte(result) != DID_OK) &&
5113
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
48845114 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
48855115 return result;
5116
+}
5117
+
5118
+static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5119
+ u32 intr_mask)
5120
+{
5121
+ if (!ufshcd_is_auto_hibern8_supported(hba) ||
5122
+ !ufshcd_is_auto_hibern8_enabled(hba))
5123
+ return false;
5124
+
5125
+ if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5126
+ return false;
5127
+
5128
+ if (hba->active_uic_cmd &&
5129
+ (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5130
+ hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5131
+ return false;
5132
+
5133
+ return true;
48865134 }
48875135
48885136 /**
48895137 * ufshcd_uic_cmd_compl - handle completion of uic command
48905138 * @hba: per adapter instance
48915139 * @intr_status: interrupt status generated by the controller
5140
+ *
5141
+ * Returns
5142
+ * IRQ_HANDLED - If interrupt is valid
5143
+ * IRQ_NONE - If invalid interrupt
48925144 */
4893
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5145
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
48945146 {
5147
+ irqreturn_t retval = IRQ_NONE;
5148
+
5149
+ spin_lock(hba->host->host_lock);
5150
+ if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5151
+ hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5152
+
48955153 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
48965154 hba->active_uic_cmd->argument2 |=
48975155 ufshcd_get_uic_cmd_result(hba);
48985156 hba->active_uic_cmd->argument3 =
48995157 ufshcd_get_dme_attr_val(hba);
5158
+ if (!hba->uic_async_done)
5159
+ hba->active_uic_cmd->cmd_active = 0;
49005160 complete(&hba->active_uic_cmd->done);
5161
+ retval = IRQ_HANDLED;
49015162 }
49025163
4903
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
5164
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5165
+ hba->active_uic_cmd->cmd_active = 0;
49045166 complete(hba->uic_async_done);
5167
+ retval = IRQ_HANDLED;
5168
+ }
5169
+
5170
+ if (retval == IRQ_HANDLED)
5171
+ ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5172
+ "complete");
5173
+ spin_unlock(hba->host->host_lock);
5174
+ return retval;
5175
+}
5176
+
5177
+/* Release the resources allocated for processing a SCSI command. */
5178
+static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5179
+ struct ufshcd_lrb *lrbp)
5180
+{
5181
+ struct scsi_cmnd *cmd = lrbp->cmd;
5182
+
5183
+ scsi_dma_unmap(cmd);
5184
+ ufshcd_crypto_clear_prdt(hba, lrbp);
5185
+ lrbp->cmd = NULL; /* Mark the command as completed. */
5186
+ ufshcd_release(hba);
5187
+ ufshcd_clk_scaling_update_busy(hba);
49055188 }
49065189
49075190 /**
....@@ -4914,55 +5197,48 @@
49145197 {
49155198 struct ufshcd_lrb *lrbp;
49165199 struct scsi_cmnd *cmd;
4917
- int result;
49185200 int index;
49195201
49205202 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5203
+ if (!test_and_clear_bit(index, &hba->outstanding_reqs))
5204
+ continue;
49215205 lrbp = &hba->lrb[index];
5206
+ lrbp->compl_time_stamp = ktime_get();
49225207 cmd = lrbp->cmd;
49235208 if (cmd) {
5209
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5210
+ ufshcd_update_monitor(hba, lrbp);
5211
+ trace_android_vh_ufs_compl_command(hba, lrbp);
49245212 ufshcd_add_command_trace(hba, index, "complete");
4925
- result = ufshcd_transfer_rsp_status(hba, lrbp);
4926
- scsi_dma_unmap(cmd);
4927
- cmd->result = result;
4928
- ufshcd_complete_lrbp_crypto(hba, cmd, lrbp);
4929
- /* Mark completed command as NULL in LRB */
4930
- lrbp->cmd = NULL;
4931
- clear_bit_unlock(index, &hba->lrb_in_use);
5213
+ cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
5214
+ ufshcd_release_scsi_cmd(hba, lrbp);
49325215 /* Do not touch lrbp after scsi done */
49335216 cmd->scsi_done(cmd);
4934
- __ufshcd_release(hba);
49355217 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
49365218 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
49375219 if (hba->dev_cmd.complete) {
5220
+ trace_android_vh_ufs_compl_command(hba, lrbp);
49385221 ufshcd_add_command_trace(hba, index,
49395222 "dev_complete");
49405223 complete(hba->dev_cmd.complete);
5224
+ ufshcd_clk_scaling_update_busy(hba);
49415225 }
49425226 }
4943
- if (ufshcd_is_clkscaling_supported(hba))
4944
- hba->clk_scaling.active_reqs--;
4945
-
4946
- lrbp->compl_time_stamp = ktime_get();
49475227 }
4948
-
4949
- /* clear corresponding bits of completed commands */
4950
- hba->outstanding_reqs ^= completed_reqs;
4951
-
4952
- ufshcd_clk_scaling_update_busy(hba);
4953
-
4954
- /* we might have free'd some tags above */
4955
- wake_up(&hba->dev_cmd.tag_wq);
49565228 }
49575229
49585230 /**
4959
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
5231
+ * ufshcd_trc_handler - handle transfer requests completion
49605232 * @hba: per adapter instance
5233
+ * @use_utrlcnr: get completed requests from UTRLCNR
5234
+ *
5235
+ * Returns
5236
+ * IRQ_HANDLED - If interrupt is valid
5237
+ * IRQ_NONE - If invalid interrupt
49615238 */
4962
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
5239
+static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
49635240 {
4964
- unsigned long completed_reqs;
4965
- u32 tr_doorbell;
5241
+ unsigned long completed_reqs = 0;
49665242
49675243 /* Resetting interrupt aggregation counters first and reading the
49685244 * DOOR_BELL afterward allows us to handle all the completed requests.
....@@ -4975,10 +5251,31 @@
49755251 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
49765252 ufshcd_reset_intr_aggr(hba);
49775253
4978
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4979
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5254
+ if (use_utrlcnr) {
5255
+ u32 utrlcnr;
49805256
4981
- __ufshcd_transfer_req_compl(hba, completed_reqs);
5257
+ utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
5258
+ if (utrlcnr) {
5259
+ ufshcd_writel(hba, utrlcnr,
5260
+ REG_UTP_TRANSFER_REQ_LIST_COMPL);
5261
+ completed_reqs = utrlcnr;
5262
+ }
5263
+ } else {
5264
+ unsigned long flags;
5265
+ u32 tr_doorbell;
5266
+
5267
+ spin_lock_irqsave(hba->host->host_lock, flags);
5268
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5269
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5270
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
5271
+ }
5272
+
5273
+ if (completed_reqs) {
5274
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
5275
+ return IRQ_HANDLED;
5276
+ } else {
5277
+ return IRQ_NONE;
5278
+ }
49825279 }
49835280
49845281 /**
....@@ -5056,7 +5353,7 @@
50565353 goto out;
50575354
50585355 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5059
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
5356
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
50605357 if (err) {
50615358 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
50625359 __func__, err);
....@@ -5106,7 +5403,7 @@
51065403 }
51075404
51085405 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5109
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
5406
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
51105407 if (err) {
51115408 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
51125409 __func__, err);
....@@ -5141,6 +5438,7 @@
51415438 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
51425439 ufshcd_disable_auto_bkops(hba);
51435440 }
5441
+ hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
51445442 hba->is_urgent_bkops_lvl_checked = false;
51455443 }
51465444
....@@ -5166,7 +5464,7 @@
51665464 * to know whether auto bkops is enabled or disabled after this function
51675465 * returns control to it.
51685466 */
5169
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5467
+int ufshcd_bkops_ctrl(struct ufs_hba *hba,
51705468 enum bkops_status status)
51715469 {
51725470 int err;
....@@ -5191,6 +5489,7 @@
51915489 out:
51925490 return err;
51935491 }
5492
+EXPORT_SYMBOL_GPL(ufshcd_bkops_ctrl);
51945493
51955494 /**
51965495 * ufshcd_urgent_bkops - handle urgent bkops exception event
....@@ -5250,6 +5549,190 @@
52505549 __func__, err);
52515550 }
52525551
5552
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5553
+{
5554
+ int ret;
5555
+ u8 index;
5556
+ enum query_opcode opcode;
5557
+
5558
+ if (!ufshcd_is_wb_allowed(hba))
5559
+ return 0;
5560
+
5561
+ if (!(enable ^ hba->wb_enabled))
5562
+ return 0;
5563
+ if (enable)
5564
+ opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5565
+ else
5566
+ opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5567
+
5568
+ index = ufshcd_wb_get_query_index(hba);
5569
+ ret = ufshcd_query_flag_retry(hba, opcode,
5570
+ QUERY_FLAG_IDN_WB_EN, index, NULL);
5571
+ if (ret) {
5572
+ dev_err(hba->dev, "%s write booster %s failed %d\n",
5573
+ __func__, enable ? "enable" : "disable", ret);
5574
+ return ret;
5575
+ }
5576
+
5577
+ hba->wb_enabled = enable;
5578
+ dev_dbg(hba->dev, "%s write booster %s %d\n",
5579
+ __func__, enable ? "enable" : "disable", ret);
5580
+
5581
+ return ret;
5582
+}
5583
+
5584
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5585
+{
5586
+ int val;
5587
+ u8 index;
5588
+
5589
+ if (set)
5590
+ val = UPIU_QUERY_OPCODE_SET_FLAG;
5591
+ else
5592
+ val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5593
+
5594
+ index = ufshcd_wb_get_query_index(hba);
5595
+ return ufshcd_query_flag_retry(hba, val,
5596
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5597
+ index, NULL);
5598
+}
5599
+
5600
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5601
+{
5602
+ if (enable)
5603
+ ufshcd_wb_buf_flush_enable(hba);
5604
+ else
5605
+ ufshcd_wb_buf_flush_disable(hba);
5606
+
5607
+}
5608
+
5609
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5610
+{
5611
+ int ret;
5612
+ u8 index;
5613
+
5614
+ if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
5615
+ return 0;
5616
+
5617
+ index = ufshcd_wb_get_query_index(hba);
5618
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5619
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5620
+ index, NULL);
5621
+ if (ret)
5622
+ dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5623
+ __func__, ret);
5624
+ else
5625
+ hba->wb_buf_flush_enabled = true;
5626
+
5627
+ dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5628
+ return ret;
5629
+}
5630
+
5631
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5632
+{
5633
+ int ret;
5634
+ u8 index;
5635
+
5636
+ if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
5637
+ return 0;
5638
+
5639
+ index = ufshcd_wb_get_query_index(hba);
5640
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5641
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5642
+ index, NULL);
5643
+ if (ret) {
5644
+ dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5645
+ __func__, ret);
5646
+ } else {
5647
+ hba->wb_buf_flush_enabled = false;
5648
+ dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5649
+ }
5650
+
5651
+ return ret;
5652
+}
5653
+
5654
+static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5655
+ u32 avail_buf)
5656
+{
5657
+ u32 cur_buf;
5658
+ int ret;
5659
+ u8 index;
5660
+
5661
+ index = ufshcd_wb_get_query_index(hba);
5662
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5663
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5664
+ index, 0, &cur_buf);
5665
+ if (ret) {
5666
+ dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5667
+ __func__, ret);
5668
+ return false;
5669
+ }
5670
+
5671
+ if (!cur_buf) {
5672
+ dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5673
+ cur_buf);
5674
+ return false;
5675
+ }
5676
+ /* Let it continue to flush when available buffer exceeds threshold */
5677
+ if (avail_buf < hba->vps->wb_flush_threshold)
5678
+ return true;
5679
+
5680
+ return false;
5681
+}
5682
+
5683
+static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5684
+{
5685
+ int ret;
5686
+ u32 avail_buf;
5687
+ u8 index;
5688
+
5689
+ if (!ufshcd_is_wb_allowed(hba))
5690
+ return false;
5691
+ /*
5692
+ * The ufs device needs the vcc to be ON to flush.
5693
+ * With user-space reduction enabled, it's enough to enable flush
5694
+ * by checking only the available buffer. The threshold
5695
+ * defined here is > 90% full.
5696
+ * With user-space preserved enabled, the current-buffer
5697
+ * should be checked too because the wb buffer size can reduce
5698
+ * when disk tends to be full. This info is provided by current
5699
+ * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5700
+ * keeping vcc on when current buffer is empty.
5701
+ */
5702
+ index = ufshcd_wb_get_query_index(hba);
5703
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5704
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5705
+ index, 0, &avail_buf);
5706
+ if (ret) {
5707
+ dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5708
+ __func__, ret);
5709
+ return false;
5710
+ }
5711
+
5712
+ if (!hba->dev_info.b_presrv_uspc_en) {
5713
+ if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5714
+ return true;
5715
+ return false;
5716
+ }
5717
+
5718
+ return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5719
+}
5720
+
5721
+static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5722
+{
5723
+ struct ufs_hba *hba = container_of(to_delayed_work(work),
5724
+ struct ufs_hba,
5725
+ rpm_dev_flush_recheck_work);
5726
+ /*
5727
+ * To prevent unnecessary VCC power drain after device finishes
5728
+ * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5729
+ * after a certain delay to recheck the threshold by next runtime
5730
+ * suspend.
5731
+ */
5732
+ pm_runtime_get_sync(hba->dev);
5733
+ pm_runtime_put_sync(hba->dev);
5734
+}
5735
+
52535736 /**
52545737 * ufshcd_exception_event_handler - handle exceptions raised by device
52555738 * @work: pointer to work data
....@@ -5265,7 +5748,7 @@
52655748 hba = container_of(work, struct ufs_hba, eeh_work);
52665749
52675750 pm_runtime_get_sync(hba->dev);
5268
- scsi_block_requests(hba->host);
5751
+ ufshcd_scsi_block_requests(hba);
52695752 err = ufshcd_get_ee_status(hba, &status);
52705753 if (err) {
52715754 dev_err(hba->dev, "%s: failed to get exception status %d\n",
....@@ -5279,15 +5762,22 @@
52795762 ufshcd_bkops_exception_event_handler(hba);
52805763
52815764 out:
5282
- scsi_unblock_requests(hba->host);
5283
- pm_runtime_put_sync(hba->dev);
5765
+ ufshcd_scsi_unblock_requests(hba);
5766
+ /*
5767
+ * pm_runtime_get_noresume is called while scheduling
5768
+ * eeh_work to avoid suspend racing with exception work.
5769
+ * Hence decrement usage counter using pm_runtime_put_noidle
5770
+ * to allow suspend on completion of exception event handler.
5771
+ */
5772
+ pm_runtime_put_noidle(hba->dev);
5773
+ pm_runtime_put(hba->dev);
52845774 return;
52855775 }
52865776
52875777 /* Complete requests that have door-bell cleared */
52885778 static void ufshcd_complete_requests(struct ufs_hba *hba)
52895779 {
5290
- ufshcd_transfer_req_compl(hba);
5780
+ ufshcd_trc_handler(hba, false);
52915781 ufshcd_tmc_handler(hba);
52925782 }
52935783
....@@ -5354,14 +5844,157 @@
53545844 hba->saved_err &= ~UIC_ERROR;
53555845 /* clear NAC error */
53565846 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5357
- if (!hba->saved_uic_err) {
5847
+ if (!hba->saved_uic_err)
53585848 err_handling = false;
5359
- goto out;
5360
- }
53615849 }
53625850 out:
53635851 spin_unlock_irqrestore(hba->host->host_lock, flags);
53645852 return err_handling;
5853
+}
5854
+
5855
+/* host lock must be held before calling this func */
5856
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5857
+{
5858
+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5859
+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5860
+}
5861
+
5862
+/* host lock must be held before calling this func */
5863
+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
5864
+{
5865
+ /* handle fatal errors only when link is not in error state */
5866
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5867
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5868
+ ufshcd_is_saved_err_fatal(hba))
5869
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5870
+ else
5871
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5872
+ queue_work(hba->eh_wq, &hba->eh_work);
5873
+ }
5874
+}
5875
+
5876
+static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
5877
+{
5878
+ down_write(&hba->clk_scaling_lock);
5879
+ hba->clk_scaling.is_allowed = allow;
5880
+ up_write(&hba->clk_scaling_lock);
5881
+}
5882
+
5883
+static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
5884
+{
5885
+ if (suspend) {
5886
+ if (hba->clk_scaling.is_enabled)
5887
+ ufshcd_suspend_clkscaling(hba);
5888
+ ufshcd_clk_scaling_allow(hba, false);
5889
+ } else {
5890
+ ufshcd_clk_scaling_allow(hba, true);
5891
+ if (hba->clk_scaling.is_enabled)
5892
+ ufshcd_resume_clkscaling(hba);
5893
+ }
5894
+}
5895
+
5896
+static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5897
+{
5898
+ pm_runtime_get_sync(hba->dev);
5899
+ if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
5900
+ enum ufs_pm_op pm_op;
5901
+
5902
+ /*
5903
+ * Don't assume anything of pm_runtime_get_sync(), if
5904
+ * resume fails, irq and clocks can be OFF, and powers
5905
+ * can be OFF or in LPM.
5906
+ */
5907
+ ufshcd_setup_hba_vreg(hba, true);
5908
+ ufshcd_enable_irq(hba);
5909
+ ufshcd_setup_vreg(hba, true);
5910
+ ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5911
+ ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5912
+ ufshcd_hold(hba, false);
5913
+ if (!ufshcd_is_clkgating_allowed(hba))
5914
+ ufshcd_setup_clocks(hba, true);
5915
+ ufshcd_release(hba);
5916
+ pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
5917
+ ufshcd_vops_resume(hba, pm_op);
5918
+ } else {
5919
+ ufshcd_hold(hba, false);
5920
+ if (ufshcd_is_clkscaling_supported(hba) &&
5921
+ hba->clk_scaling.is_enabled)
5922
+ ufshcd_suspend_clkscaling(hba);
5923
+ ufshcd_clk_scaling_allow(hba, false);
5924
+ }
5925
+ ufshcd_scsi_block_requests(hba);
5926
+ /* Drain ufshcd_queuecommand() */
5927
+ down_write(&hba->clk_scaling_lock);
5928
+ up_write(&hba->clk_scaling_lock);
5929
+ cancel_work_sync(&hba->eeh_work);
5930
+}
5931
+
5932
+static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5933
+{
5934
+ ufshcd_scsi_unblock_requests(hba);
5935
+ ufshcd_release(hba);
5936
+ if (ufshcd_is_clkscaling_supported(hba))
5937
+ ufshcd_clk_scaling_suspend(hba, false);
5938
+ pm_runtime_put(hba->dev);
5939
+}
5940
+
5941
+static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
5942
+{
5943
+ return (!hba->is_powered || hba->shutting_down ||
5944
+ hba->ufshcd_state == UFSHCD_STATE_ERROR ||
5945
+ (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
5946
+ ufshcd_is_link_broken(hba))));
5947
+}
5948
+
5949
+#ifdef CONFIG_PM
5950
+static void ufshcd_recover_pm_error(struct ufs_hba *hba)
5951
+{
5952
+ struct Scsi_Host *shost = hba->host;
5953
+ struct scsi_device *sdev;
5954
+ struct request_queue *q;
5955
+ int ret;
5956
+
5957
+ hba->is_sys_suspended = false;
5958
+ /*
5959
+ * Set RPM status of hba device to RPM_ACTIVE,
5960
+ * this also clears its runtime error.
5961
+ */
5962
+ ret = pm_runtime_set_active(hba->dev);
5963
+ /*
5964
+ * If hba device had runtime error, we also need to resume those
5965
+ * scsi devices under hba in case any of them has failed to be
5966
+ * resumed due to hba runtime resume failure. This is to unblock
5967
+ * blk_queue_enter in case there are bios waiting inside it.
5968
+ */
5969
+ if (!ret) {
5970
+ shost_for_each_device(sdev, shost) {
5971
+ q = sdev->request_queue;
5972
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
5973
+ q->rpm_status == RPM_SUSPENDING))
5974
+ pm_request_resume(q->dev);
5975
+ }
5976
+ }
5977
+}
5978
+#else
5979
+static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
5980
+{
5981
+}
5982
+#endif
5983
+
5984
+static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
5985
+{
5986
+ struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
5987
+ u32 mode;
5988
+
5989
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
5990
+
5991
+ if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
5992
+ return true;
5993
+
5994
+ if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
5995
+ return true;
5996
+
5997
+ return false;
53655998 }
53665999
53676000 /**
....@@ -5372,26 +6005,37 @@
53726005 {
53736006 struct ufs_hba *hba;
53746007 unsigned long flags;
5375
- u32 err_xfer = 0;
5376
- u32 err_tm = 0;
5377
- int err = 0;
6008
+ bool err_xfer = false;
6009
+ bool err_tm = false;
6010
+ int err = 0, pmc_err;
53786011 int tag;
5379
- bool needs_reset = false;
6012
+ bool needs_reset = false, needs_restore = false;
53806013
53816014 hba = container_of(work, struct ufs_hba, eh_work);
53826015
5383
- pm_runtime_get_sync(hba->dev);
5384
- ufshcd_hold(hba, false);
5385
-
6016
+ down(&hba->host_sem);
53866017 spin_lock_irqsave(hba->host->host_lock, flags);
5387
- if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5388
- goto out;
5389
-
5390
- hba->ufshcd_state = UFSHCD_STATE_RESET;
6018
+ if (ufshcd_err_handling_should_stop(hba)) {
6019
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6020
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6021
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6022
+ up(&hba->host_sem);
6023
+ return;
6024
+ }
53916025 ufshcd_set_eh_in_progress(hba);
5392
-
6026
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6027
+ ufshcd_err_handling_prepare(hba);
53936028 /* Complete requests that have door-bell cleared by h/w */
53946029 ufshcd_complete_requests(hba);
6030
+ spin_lock_irqsave(hba->host->host_lock, flags);
6031
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6032
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
6033
+ /*
6034
+ * A full reset and restore might have happened after preparation
6035
+ * is finished, double check whether we should stop.
6036
+ */
6037
+ if (ufshcd_err_handling_should_stop(hba))
6038
+ goto skip_err_handling;
53956039
53966040 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
53976041 bool ret;
....@@ -5400,29 +6044,60 @@
54006044 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
54016045 ret = ufshcd_quirk_dl_nac_errors(hba);
54026046 spin_lock_irqsave(hba->host->host_lock, flags);
5403
- if (!ret)
6047
+ if (!ret && ufshcd_err_handling_should_stop(hba))
54046048 goto skip_err_handling;
54056049 }
5406
- if ((hba->saved_err & INT_FATAL_ERRORS) ||
5407
- ((hba->saved_err & UIC_ERROR) &&
5408
- (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5409
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5410
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5411
- needs_reset = true;
6050
+
6051
+ if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6052
+ (hba->saved_uic_err &&
6053
+ (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6054
+ bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6055
+
6056
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6057
+ ufshcd_print_host_state(hba);
6058
+ ufshcd_print_pwr_info(hba);
6059
+ ufshcd_print_evt_hist(hba);
6060
+ ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6061
+ ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
6062
+ spin_lock_irqsave(hba->host->host_lock, flags);
6063
+ }
54126064
54136065 /*
54146066 * if host reset is required then skip clearing the pending
54156067 * transfers forcefully because they will get cleared during
54166068 * host reset and restore
54176069 */
5418
- if (needs_reset)
5419
- goto skip_pending_xfer_clear;
6070
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6071
+ ufshcd_is_saved_err_fatal(hba) ||
6072
+ ((hba->saved_err & UIC_ERROR) &&
6073
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6074
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6075
+ needs_reset = true;
6076
+ goto do_reset;
6077
+ }
54206078
6079
+ /*
6080
+ * If LINERESET was caught, UFS might have been put to PWM mode,
6081
+ * check if power mode restore is needed.
6082
+ */
6083
+ if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6084
+ hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6085
+ if (!hba->saved_uic_err)
6086
+ hba->saved_err &= ~UIC_ERROR;
6087
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6088
+ if (ufshcd_is_pwr_mode_restore_needed(hba))
6089
+ needs_restore = true;
6090
+ spin_lock_irqsave(hba->host->host_lock, flags);
6091
+ if (!hba->saved_err && !needs_restore)
6092
+ goto skip_err_handling;
6093
+ }
6094
+
6095
+ hba->silence_err_logs = true;
54216096 /* release lock as clear command might sleep */
54226097 spin_unlock_irqrestore(hba->host->host_lock, flags);
54236098 /* Clear pending transfer requests */
54246099 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5425
- if (ufshcd_clear_cmd(hba, tag)) {
6100
+ if (ufshcd_try_to_abort_task(hba, tag)) {
54266101 err_xfer = true;
54276102 goto lock_skip_pending_xfer_clear;
54286103 }
....@@ -5437,149 +6112,206 @@
54376112 }
54386113
54396114 lock_skip_pending_xfer_clear:
5440
- spin_lock_irqsave(hba->host->host_lock, flags);
5441
-
54426115 /* Complete the requests that are cleared by s/w */
54436116 ufshcd_complete_requests(hba);
54446117
5445
- if (err_xfer || err_tm)
6118
+ spin_lock_irqsave(hba->host->host_lock, flags);
6119
+ hba->silence_err_logs = false;
6120
+ if (err_xfer || err_tm) {
54466121 needs_reset = true;
6122
+ goto do_reset;
6123
+ }
54476124
5448
-skip_pending_xfer_clear:
6125
+ /*
6126
+ * After all reqs and tasks are cleared from doorbell,
6127
+ * now it is safe to retore power mode.
6128
+ */
6129
+ if (needs_restore) {
6130
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6131
+ /*
6132
+ * Hold the scaling lock just in case dev cmds
6133
+ * are sent via bsg and/or sysfs.
6134
+ */
6135
+ down_write(&hba->clk_scaling_lock);
6136
+ hba->force_pmc = true;
6137
+ pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6138
+ if (pmc_err) {
6139
+ needs_reset = true;
6140
+ dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6141
+ __func__, pmc_err);
6142
+ }
6143
+ hba->force_pmc = false;
6144
+ ufshcd_print_pwr_info(hba);
6145
+ up_write(&hba->clk_scaling_lock);
6146
+ spin_lock_irqsave(hba->host->host_lock, flags);
6147
+ }
6148
+
6149
+do_reset:
54496150 /* Fatal errors need reset */
54506151 if (needs_reset) {
5451
- unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5452
-
5453
- /*
5454
- * ufshcd_reset_and_restore() does the link reinitialization
5455
- * which will need atleast one empty doorbell slot to send the
5456
- * device management commands (NOP and query commands).
5457
- * If there is no slot empty at this moment then free up last
5458
- * slot forcefully.
5459
- */
5460
- if (hba->outstanding_reqs == max_doorbells)
5461
- __ufshcd_transfer_req_compl(hba,
5462
- (1UL << (hba->nutrs - 1)));
5463
-
6152
+ hba->force_reset = false;
54646153 spin_unlock_irqrestore(hba->host->host_lock, flags);
54656154 err = ufshcd_reset_and_restore(hba);
6155
+ if (err)
6156
+ dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6157
+ __func__, err);
6158
+ else
6159
+ ufshcd_recover_pm_error(hba);
54666160 spin_lock_irqsave(hba->host->host_lock, flags);
5467
- if (err) {
5468
- dev_err(hba->dev, "%s: reset and restore failed\n",
5469
- __func__);
5470
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
5471
- }
5472
- /*
5473
- * Inform scsi mid-layer that we did reset and allow to handle
5474
- * Unit Attention properly.
5475
- */
5476
- scsi_report_bus_reset(hba->host, 0);
5477
- hba->saved_err = 0;
5478
- hba->saved_uic_err = 0;
54796161 }
54806162
54816163 skip_err_handling:
54826164 if (!needs_reset) {
5483
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6165
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6166
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
54846167 if (hba->saved_err || hba->saved_uic_err)
54856168 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
54866169 __func__, hba->saved_err, hba->saved_uic_err);
54876170 }
5488
-
54896171 ufshcd_clear_eh_in_progress(hba);
5490
-
5491
-out:
54926172 spin_unlock_irqrestore(hba->host->host_lock, flags);
5493
- ufshcd_scsi_unblock_requests(hba);
5494
- ufshcd_release(hba);
5495
- pm_runtime_put_sync(hba->dev);
5496
-}
5497
-
5498
-static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5499
- u32 reg)
5500
-{
5501
- reg_hist->reg[reg_hist->pos] = reg;
5502
- reg_hist->tstamp[reg_hist->pos] = ktime_get();
5503
- reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
6173
+ ufshcd_err_handling_unprepare(hba);
6174
+ up(&hba->host_sem);
55046175 }
55056176
55066177 /**
55076178 * ufshcd_update_uic_error - check and set fatal UIC error flags.
55086179 * @hba: per-adapter instance
6180
+ *
6181
+ * Returns
6182
+ * IRQ_HANDLED - If interrupt is valid
6183
+ * IRQ_NONE - If invalid interrupt
55096184 */
5510
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
6185
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
55116186 {
55126187 u32 reg;
6188
+ irqreturn_t retval = IRQ_NONE;
55136189
5514
- /* PHY layer lane error */
6190
+ /* PHY layer error */
55156191 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5516
- /* Ignore LINERESET indication, as this is not an error */
55176192 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5518
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
6193
+ (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6194
+ ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
55196195 /*
55206196 * To know whether this error is fatal or not, DB timeout
55216197 * must be checked but this error is handled separately.
55226198 */
5523
- dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5524
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
6199
+ if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6200
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6201
+ __func__);
6202
+
6203
+ /* Got a LINERESET indication. */
6204
+ if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6205
+ struct uic_command *cmd = NULL;
6206
+
6207
+ hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6208
+ if (hba->uic_async_done && hba->active_uic_cmd)
6209
+ cmd = hba->active_uic_cmd;
6210
+ /*
6211
+ * Ignore the LINERESET during power mode change
6212
+ * operation via DME_SET command.
6213
+ */
6214
+ if (cmd && (cmd->command == UIC_CMD_DME_SET))
6215
+ hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6216
+ }
6217
+ retval |= IRQ_HANDLED;
55256218 }
55266219
55276220 /* PA_INIT_ERROR is fatal and needs UIC reset */
55286221 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5529
- if (reg)
5530
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
6222
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6223
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6224
+ ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
55316225
5532
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5533
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5534
- else if (hba->dev_quirks &
5535
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5536
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5537
- hba->uic_error |=
5538
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5539
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5540
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6226
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6227
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6228
+ else if (hba->dev_quirks &
6229
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6230
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6231
+ hba->uic_error |=
6232
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6233
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6234
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6235
+ }
6236
+ retval |= IRQ_HANDLED;
55416237 }
55426238
55436239 /* UIC NL/TL/DME errors needs software retry */
55446240 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5545
- if (reg) {
5546
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
6241
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6242
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6243
+ ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
55476244 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6245
+ retval |= IRQ_HANDLED;
55486246 }
55496247
55506248 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5551
- if (reg) {
5552
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
6249
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6250
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6251
+ ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
55536252 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6253
+ retval |= IRQ_HANDLED;
55546254 }
55556255
55566256 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5557
- if (reg) {
5558
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
6257
+ if ((reg & UIC_DME_ERROR) &&
6258
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
6259
+ ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
55596260 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6261
+ retval |= IRQ_HANDLED;
55606262 }
55616263
55626264 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
55636265 __func__, hba->uic_error);
6266
+ return retval;
55646267 }
55656268
55666269 /**
55676270 * ufshcd_check_errors - Check for errors that need s/w attention
55686271 * @hba: per-adapter instance
6272
+ * @intr_status: interrupt status generated by the controller
6273
+ *
6274
+ * Returns
6275
+ * IRQ_HANDLED - If interrupt is valid
6276
+ * IRQ_NONE - If invalid interrupt
55696277 */
5570
-static void ufshcd_check_errors(struct ufs_hba *hba)
6278
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
55716279 {
55726280 bool queue_eh_work = false;
6281
+ irqreturn_t retval = IRQ_NONE;
55736282
5574
- if (hba->errors & INT_FATAL_ERRORS)
6283
+ spin_lock(hba->host->host_lock);
6284
+ hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6285
+
6286
+ if (hba->errors & INT_FATAL_ERRORS) {
6287
+ ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6288
+ hba->errors);
55756289 queue_eh_work = true;
6290
+ }
55766291
55776292 if (hba->errors & UIC_ERROR) {
55786293 hba->uic_error = 0;
5579
- ufshcd_update_uic_error(hba);
5580
- if (hba->uic_error)
6294
+ retval = ufshcd_update_uic_error(hba);
6295
+ if (hba->uic_error) {
6296
+ dev_err(hba->dev,
6297
+ "Scheduling error handler because of an UIC error\n");
55816298 queue_eh_work = true;
6299
+ }
55826300 }
6301
+
6302
+ if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6303
+ dev_err(hba->dev,
6304
+ "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6305
+ __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6306
+ "Enter" : "Exit",
6307
+ hba->errors, ufshcd_get_upmcrs(hba));
6308
+ ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6309
+ hba->errors);
6310
+ ufshcd_set_link_broken(hba);
6311
+ queue_eh_work = true;
6312
+ }
6313
+
6314
+ trace_android_vh_ufs_check_int_errors(hba, queue_eh_work);
55836315
55846316 if (queue_eh_work) {
55856317 /*
....@@ -5589,30 +6321,20 @@
55896321 hba->saved_err |= hba->errors;
55906322 hba->saved_uic_err |= hba->uic_error;
55916323
5592
- /* handle fatal errors only when link is functional */
5593
- if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5594
- /* block commands from scsi mid-layer */
5595
- ufshcd_scsi_block_requests(hba);
5596
-
5597
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5598
-
5599
- /* dump controller state before resetting */
5600
- if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5601
- bool pr_prdt = !!(hba->saved_err &
5602
- SYSTEM_BUS_FATAL_ERROR);
5603
-
5604
- dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6324
+ /* dump controller state before resetting */
6325
+ if ((hba->saved_err &
6326
+ (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6327
+ (hba->saved_uic_err &&
6328
+ (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6329
+ dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
56056330 __func__, hba->saved_err,
56066331 hba->saved_uic_err);
5607
-
5608
- ufshcd_print_host_regs(hba);
5609
- ufshcd_print_pwr_info(hba);
5610
- ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5611
- ufshcd_print_trs(hba, hba->outstanding_reqs,
5612
- pr_prdt);
5613
- }
5614
- schedule_work(&hba->eh_work);
6332
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6333
+ "host_regs: ");
6334
+ ufshcd_print_pwr_info(hba);
56156335 }
6336
+ ufshcd_schedule_eh_work(hba);
6337
+ retval |= IRQ_HANDLED;
56166338 }
56176339 /*
56186340 * if (!queue_eh_work) -
....@@ -5620,40 +6342,68 @@
56206342 * itself without s/w intervention or errors that will be
56216343 * handled by the SCSI core layer.
56226344 */
6345
+ hba->errors = 0;
6346
+ hba->uic_error = 0;
6347
+ spin_unlock(hba->host->host_lock);
6348
+ return retval;
56236349 }
56246350
56256351 /**
56266352 * ufshcd_tmc_handler - handle task management function completion
56276353 * @hba: per adapter instance
6354
+ *
6355
+ * Returns
6356
+ * IRQ_HANDLED - If interrupt is valid
6357
+ * IRQ_NONE - If invalid interrupt
56286358 */
5629
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
6359
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
56306360 {
5631
- u32 tm_doorbell;
6361
+ struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs;
6362
+ unsigned long flags, pending, issued;
6363
+ irqreturn_t ret = IRQ_NONE;
6364
+ int tag;
56326365
5633
- tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5634
- hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5635
- wake_up(&hba->tm_wq);
6366
+ spin_lock_irqsave(hba->host->host_lock, flags);
6367
+ pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6368
+ issued = hba->outstanding_tasks & ~pending;
6369
+ for_each_set_bit(tag, &issued, hba->nutmrs) {
6370
+ struct request *req = tmf_rqs[tag];
6371
+ struct completion *c = req->end_io_data;
6372
+
6373
+ complete(c);
6374
+ ret = IRQ_HANDLED;
6375
+ }
6376
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6377
+
6378
+ return ret;
56366379 }
56376380
56386381 /**
56396382 * ufshcd_sl_intr - Interrupt service routine
56406383 * @hba: per adapter instance
56416384 * @intr_status: contains interrupts generated by the controller
6385
+ *
6386
+ * Returns
6387
+ * IRQ_HANDLED - If interrupt is valid
6388
+ * IRQ_NONE - If invalid interrupt
56426389 */
5643
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6390
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
56446391 {
5645
- hba->errors = UFSHCD_ERROR_MASK & intr_status;
5646
- if (hba->errors)
5647
- ufshcd_check_errors(hba);
6392
+ irqreturn_t retval = IRQ_NONE;
56486393
56496394 if (intr_status & UFSHCD_UIC_MASK)
5650
- ufshcd_uic_cmd_compl(hba, intr_status);
6395
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6396
+
6397
+ if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6398
+ retval |= ufshcd_check_errors(hba, intr_status);
56516399
56526400 if (intr_status & UTP_TASK_REQ_COMPL)
5653
- ufshcd_tmc_handler(hba);
6401
+ retval |= ufshcd_tmc_handler(hba);
56546402
56556403 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5656
- ufshcd_transfer_req_compl(hba);
6404
+ retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
6405
+
6406
+ return retval;
56576407 }
56586408
56596409 /**
....@@ -5661,8 +6411,9 @@
56616411 * @irq: irq number
56626412 * @__hba: pointer to adapter instance
56636413 *
5664
- * Returns IRQ_HANDLED - If interrupt is valid
5665
- * IRQ_NONE - If invalid interrupt
6414
+ * Returns
6415
+ * IRQ_HANDLED - If interrupt is valid
6416
+ * IRQ_NONE - If invalid interrupt
56666417 */
56676418 static irqreturn_t ufshcd_intr(int irq, void *__hba)
56686419 {
....@@ -5671,8 +6422,9 @@
56716422 struct ufs_hba *hba = __hba;
56726423 int retries = hba->nutrs;
56736424
5674
- spin_lock(hba->host->host_lock);
56756425 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6426
+ hba->ufs_stats.last_intr_status = intr_status;
6427
+ hba->ufs_stats.last_intr_ts = ktime_get();
56766428
56776429 /*
56786430 * There could be max of hba->nutrs reqs in flight and in worst case
....@@ -5685,15 +6437,22 @@
56856437 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
56866438 if (intr_status)
56876439 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5688
- if (enabled_intr_status) {
5689
- ufshcd_sl_intr(hba, enabled_intr_status);
5690
- retval = IRQ_HANDLED;
5691
- }
6440
+ if (enabled_intr_status)
6441
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
56926442
56936443 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
56946444 }
56956445
5696
- spin_unlock(hba->host->host_lock);
6446
+ if (enabled_intr_status && retval == IRQ_NONE &&
6447
+ !ufshcd_eh_in_progress(hba)) {
6448
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6449
+ __func__,
6450
+ intr_status,
6451
+ hba->ufs_stats.last_intr_status,
6452
+ enabled_intr_status);
6453
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6454
+ }
6455
+
56976456 return retval;
56986457 }
56996458
....@@ -5713,8 +6472,81 @@
57136472 /* poll for max. 1 sec to clear door bell register by h/w */
57146473 err = ufshcd_wait_for_register(hba,
57156474 REG_UTP_TASK_REQ_DOOR_BELL,
5716
- mask, 0, 1000, 1000, true);
6475
+ mask, 0, 1000, 1000);
57176476 out:
6477
+ return err;
6478
+}
6479
+
6480
+static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6481
+ struct utp_task_req_desc *treq, u8 tm_function)
6482
+{
6483
+ struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs;
6484
+ struct request_queue *q = hba->tmf_queue;
6485
+ struct Scsi_Host *host = hba->host;
6486
+ DECLARE_COMPLETION_ONSTACK(wait);
6487
+ struct request *req;
6488
+ unsigned long flags;
6489
+ int task_tag, err;
6490
+
6491
+ /*
6492
+ * blk_get_request() is used here only to get a free tag.
6493
+ */
6494
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6495
+ if (IS_ERR(req))
6496
+ return PTR_ERR(req);
6497
+
6498
+ req->end_io_data = &wait;
6499
+ ufshcd_hold(hba, false);
6500
+
6501
+ spin_lock_irqsave(host->host_lock, flags);
6502
+
6503
+ task_tag = req->tag;
6504
+ tmf_rqs[req->tag] = req;
6505
+ treq->req_header.dword_0 |= cpu_to_be32(task_tag);
6506
+
6507
+ memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6508
+ ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
6509
+
6510
+ /* send command to the controller */
6511
+ __set_bit(task_tag, &hba->outstanding_tasks);
6512
+
6513
+ /* Make sure descriptors are ready before ringing the task doorbell */
6514
+ wmb();
6515
+
6516
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
6517
+ /* Make sure that doorbell is committed immediately */
6518
+ wmb();
6519
+
6520
+ spin_unlock_irqrestore(host->host_lock, flags);
6521
+
6522
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6523
+
6524
+ /* wait until the task management command is completed */
6525
+ err = wait_for_completion_io_timeout(&wait,
6526
+ msecs_to_jiffies(TM_CMD_TIMEOUT));
6527
+ if (!err) {
6528
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
6529
+ dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6530
+ __func__, tm_function);
6531
+ if (ufshcd_clear_tm_cmd(hba, task_tag))
6532
+ dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6533
+ __func__, task_tag);
6534
+ err = -ETIMEDOUT;
6535
+ } else {
6536
+ err = 0;
6537
+ memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
6538
+
6539
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6540
+ }
6541
+
6542
+ spin_lock_irqsave(hba->host->host_lock, flags);
6543
+ tmf_rqs[req->tag] = NULL;
6544
+ __clear_bit(task_tag, &hba->outstanding_tasks);
6545
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6546
+
6547
+ ufshcd_release(hba);
6548
+ blk_put_request(req);
6549
+
57186550 return err;
57196551 }
57206552
....@@ -5731,87 +6563,212 @@
57316563 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
57326564 u8 tm_function, u8 *tm_response)
57336565 {
5734
- struct utp_task_req_desc *task_req_descp;
5735
- struct utp_upiu_task_req *task_req_upiup;
5736
- struct Scsi_Host *host;
5737
- unsigned long flags;
5738
- int free_slot;
5739
- int err;
5740
- int task_tag;
5741
-
5742
- host = hba->host;
5743
-
5744
- /*
5745
- * Get free slot, sleep if slots are unavailable.
5746
- * Even though we use wait_event() which sleeps indefinitely,
5747
- * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5748
- */
5749
- wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5750
- ufshcd_hold(hba, false);
5751
-
5752
- spin_lock_irqsave(host->host_lock, flags);
5753
- task_req_descp = hba->utmrdl_base_addr;
5754
- task_req_descp += free_slot;
6566
+ struct utp_task_req_desc treq = { { 0 }, };
6567
+ int ocs_value, err;
57556568
57566569 /* Configure task request descriptor */
5757
- task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5758
- task_req_descp->header.dword_2 =
5759
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6570
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6571
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
57606572
57616573 /* Configure task request UPIU */
5762
- task_req_upiup =
5763
- (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5764
- task_tag = hba->nutrs + free_slot;
5765
- task_req_upiup->header.dword_0 =
5766
- UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5767
- lun_id, task_tag);
5768
- task_req_upiup->header.dword_1 =
5769
- UPIU_HEADER_DWORD(0, tm_function, 0, 0);
6574
+ treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6575
+ cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6576
+ treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6577
+
57706578 /*
57716579 * The host shall provide the same value for LUN field in the basic
57726580 * header and for Input Parameter.
57736581 */
5774
- task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5775
- task_req_upiup->input_param2 = cpu_to_be32(task_id);
6582
+ treq.input_param1 = cpu_to_be32(lun_id);
6583
+ treq.input_param2 = cpu_to_be32(task_id);
57766584
5777
- ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
6585
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6586
+ if (err == -ETIMEDOUT)
6587
+ return err;
57786588
5779
- /* send command to the controller */
5780
- __set_bit(free_slot, &hba->outstanding_tasks);
6589
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6590
+ if (ocs_value != OCS_SUCCESS)
6591
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6592
+ __func__, ocs_value);
6593
+ else if (tm_response)
6594
+ *tm_response = be32_to_cpu(treq.output_param1) &
6595
+ MASK_TM_SERVICE_RESP;
6596
+ return err;
6597
+}
57816598
5782
- /* Make sure descriptors are ready before ringing the task doorbell */
5783
- wmb();
6599
+/**
6600
+ * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6601
+ * @hba: per-adapter instance
6602
+ * @req_upiu: upiu request
6603
+ * @rsp_upiu: upiu reply
6604
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
6605
+ * @buff_len: descriptor size, 0 if NA
6606
+ * @cmd_type: specifies the type (NOP, Query...)
6607
+ * @desc_op: descriptor operation
6608
+ *
6609
+ * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6610
+ * Therefore, it "rides" the device management infrastructure: uses its tag and
6611
+ * tasks work queues.
6612
+ *
6613
+ * Since there is only one available tag for device management commands,
6614
+ * the caller is expected to hold the hba->dev_cmd.lock mutex.
6615
+ */
6616
+static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6617
+ struct utp_upiu_req *req_upiu,
6618
+ struct utp_upiu_req *rsp_upiu,
6619
+ u8 *desc_buff, int *buff_len,
6620
+ enum dev_cmd_type cmd_type,
6621
+ enum query_opcode desc_op)
6622
+{
6623
+ DECLARE_COMPLETION_ONSTACK(wait);
6624
+ const u32 tag = ufs_hba_add_info(hba)->reserved_slot;
6625
+ struct ufshcd_lrb *lrbp;
6626
+ int err = 0;
6627
+ u8 upiu_flags;
57846628
5785
- ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5786
- /* Make sure that doorbell is committed immediately */
5787
- wmb();
6629
+ /* Protects use of ufs_hba_add_info(hba)->reserved_slot. */
6630
+ lockdep_assert_held(&hba->dev_cmd.lock);
57886631
5789
- spin_unlock_irqrestore(host->host_lock, flags);
6632
+ down_read(&hba->clk_scaling_lock);
57906633
5791
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6634
+ lrbp = &hba->lrb[tag];
6635
+ WARN_ON(lrbp->cmd);
6636
+ lrbp->cmd = NULL;
6637
+ lrbp->sense_bufflen = 0;
6638
+ lrbp->sense_buffer = NULL;
6639
+ lrbp->task_tag = tag;
6640
+ lrbp->lun = 0;
6641
+ lrbp->intr_cmd = true;
6642
+ ufshcd_prepare_lrbp_crypto(NULL, lrbp);
6643
+ hba->dev_cmd.type = cmd_type;
57926644
5793
- /* wait until the task management command is completed */
5794
- err = wait_event_timeout(hba->tm_wq,
5795
- test_bit(free_slot, &hba->tm_condition),
5796
- msecs_to_jiffies(TM_CMD_TIMEOUT));
5797
- if (!err) {
5798
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5799
- dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5800
- __func__, tm_function);
5801
- if (ufshcd_clear_tm_cmd(hba, free_slot))
5802
- dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5803
- __func__, free_slot);
5804
- err = -ETIMEDOUT;
5805
- } else {
5806
- err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5807
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6645
+ if (hba->ufs_version <= ufshci_version(1, 1))
6646
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6647
+ else
6648
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6649
+
6650
+ /* update the task tag in the request upiu */
6651
+ req_upiu->header.dword_0 |= cpu_to_be32(tag);
6652
+
6653
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6654
+
6655
+ /* just copy the upiu request as it is */
6656
+ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6657
+ if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6658
+ /* The Data Segment Area is optional depending upon the query
6659
+ * function value. for WRITE DESCRIPTOR, the data segment
6660
+ * follows right after the tsf.
6661
+ */
6662
+ memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6663
+ *buff_len = 0;
58086664 }
58096665
5810
- clear_bit(free_slot, &hba->tm_condition);
5811
- ufshcd_put_tm_slot(hba, free_slot);
5812
- wake_up(&hba->tm_tag_wq);
6666
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
58136667
5814
- ufshcd_release(hba);
6668
+ hba->dev_cmd.complete = &wait;
6669
+
6670
+ /* Make sure descriptors are ready before ringing the doorbell */
6671
+ wmb();
6672
+
6673
+ ufshcd_send_command(hba, tag);
6674
+ /*
6675
+ * ignore the returning value here - ufshcd_check_query_response is
6676
+ * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6677
+ * read the response directly ignoring all errors.
6678
+ */
6679
+ ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6680
+
6681
+ /* just copy the upiu response as it is */
6682
+ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6683
+ if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6684
+ u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6685
+ u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6686
+ MASK_QUERY_DATA_SEG_LEN;
6687
+
6688
+ if (*buff_len >= resp_len) {
6689
+ memcpy(desc_buff, descp, resp_len);
6690
+ *buff_len = resp_len;
6691
+ } else {
6692
+ dev_warn(hba->dev,
6693
+ "%s: rsp size %d is bigger than buffer size %d",
6694
+ __func__, resp_len, *buff_len);
6695
+ *buff_len = 0;
6696
+ err = -EINVAL;
6697
+ }
6698
+ }
6699
+
6700
+ up_read(&hba->clk_scaling_lock);
6701
+ return err;
6702
+}
6703
+
6704
+/**
6705
+ * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6706
+ * @hba: per-adapter instance
6707
+ * @req_upiu: upiu request
6708
+ * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
6709
+ * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
6710
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
6711
+ * @buff_len: descriptor size, 0 if NA
6712
+ * @desc_op: descriptor operation
6713
+ *
6714
+ * Supports UTP Transfer requests (nop and query), and UTP Task
6715
+ * Management requests.
6716
+ * It is up to the caller to fill the upiu conent properly, as it will
6717
+ * be copied without any further input validations.
6718
+ */
6719
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6720
+ struct utp_upiu_req *req_upiu,
6721
+ struct utp_upiu_req *rsp_upiu,
6722
+ int msgcode,
6723
+ u8 *desc_buff, int *buff_len,
6724
+ enum query_opcode desc_op)
6725
+{
6726
+ int err;
6727
+ enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6728
+ struct utp_task_req_desc treq = { { 0 }, };
6729
+ int ocs_value;
6730
+ u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6731
+
6732
+ switch (msgcode) {
6733
+ case UPIU_TRANSACTION_NOP_OUT:
6734
+ cmd_type = DEV_CMD_TYPE_NOP;
6735
+ fallthrough;
6736
+ case UPIU_TRANSACTION_QUERY_REQ:
6737
+ ufshcd_hold(hba, false);
6738
+ mutex_lock(&hba->dev_cmd.lock);
6739
+ err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6740
+ desc_buff, buff_len,
6741
+ cmd_type, desc_op);
6742
+ mutex_unlock(&hba->dev_cmd.lock);
6743
+ ufshcd_release(hba);
6744
+
6745
+ break;
6746
+ case UPIU_TRANSACTION_TASK_REQ:
6747
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6748
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6749
+
6750
+ memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6751
+
6752
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6753
+ if (err == -ETIMEDOUT)
6754
+ break;
6755
+
6756
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6757
+ if (ocs_value != OCS_SUCCESS) {
6758
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6759
+ ocs_value);
6760
+ break;
6761
+ }
6762
+
6763
+ memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6764
+
6765
+ break;
6766
+ default:
6767
+ err = -EINVAL;
6768
+
6769
+ break;
6770
+ }
6771
+
58156772 return err;
58166773 }
58176774
....@@ -5829,7 +6786,6 @@
58296786 u32 pos;
58306787 int err;
58316788 u8 resp = 0xF, lun;
5832
- unsigned long flags;
58336789
58346790 host = cmd->device->host;
58356791 hba = shost_priv(host);
....@@ -5848,14 +6804,13 @@
58486804 err = ufshcd_clear_cmd(hba, pos);
58496805 if (err)
58506806 break;
6807
+ __ufshcd_transfer_req_compl(hba, 1U << pos);
58516808 }
58526809 }
5853
- spin_lock_irqsave(host->host_lock, flags);
5854
- ufshcd_transfer_req_compl(hba);
5855
- spin_unlock_irqrestore(host->host_lock, flags);
58566810
58576811 out:
58586812 hba->req_abort_count = 0;
6813
+ ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
58596814 if (!err) {
58606815 err = SUCCESS;
58616816 } else {
....@@ -5877,8 +6832,9 @@
58776832 }
58786833
58796834 /**
5880
- * ufshcd_abort - abort a specific command
5881
- * @cmd: SCSI command pointer
6835
+ * ufshcd_try_to_abort_task - abort a specific task
6836
+ * @hba: Pointer to adapter instance
6837
+ * @tag: Task tag/index to be aborted
58826838 *
58836839 * Abort the pending command in device by sending UFS_ABORT_TASK task management
58846840 * command, and in host controller by clearing the door-bell register. There can
....@@ -5886,83 +6842,15 @@
58866842 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
58876843 * really issued and then try to abort it.
58886844 *
5889
- * Returns SUCCESS/FAILED
6845
+ * Returns zero on success, non-zero on failure
58906846 */
5891
-static int ufshcd_abort(struct scsi_cmnd *cmd)
6847
+static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
58926848 {
5893
- struct Scsi_Host *host;
5894
- struct ufs_hba *hba;
5895
- unsigned long flags;
5896
- unsigned int tag;
6849
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
58976850 int err = 0;
58986851 int poll_cnt;
58996852 u8 resp = 0xF;
5900
- struct ufshcd_lrb *lrbp;
59016853 u32 reg;
5902
-
5903
- host = cmd->device->host;
5904
- hba = shost_priv(host);
5905
- tag = cmd->request->tag;
5906
- lrbp = &hba->lrb[tag];
5907
- if (!ufshcd_valid_tag(hba, tag)) {
5908
- dev_err(hba->dev,
5909
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5910
- __func__, tag, cmd, cmd->request);
5911
- BUG();
5912
- }
5913
-
5914
- /*
5915
- * Task abort to the device W-LUN is illegal. When this command
5916
- * will fail, due to spec violation, scsi err handling next step
5917
- * will be to send LU reset which, again, is a spec violation.
5918
- * To avoid these unnecessary/illegal step we skip to the last error
5919
- * handling stage: reset and restore.
5920
- */
5921
- if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5922
- return ufshcd_eh_host_reset_handler(cmd);
5923
-
5924
- ufshcd_hold(hba, false);
5925
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5926
- /* If command is already aborted/completed, return SUCCESS */
5927
- if (!(test_bit(tag, &hba->outstanding_reqs))) {
5928
- dev_err(hba->dev,
5929
- "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5930
- __func__, tag, hba->outstanding_reqs, reg);
5931
- goto out;
5932
- }
5933
-
5934
- if (!(reg & (1 << tag))) {
5935
- dev_err(hba->dev,
5936
- "%s: cmd was completed, but without a notifying intr, tag = %d",
5937
- __func__, tag);
5938
- }
5939
-
5940
- /* Print Transfer Request of aborted task */
5941
- dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
5942
-
5943
- /*
5944
- * Print detailed info about aborted request.
5945
- * As more than one request might get aborted at the same time,
5946
- * print full information only for the first aborted request in order
5947
- * to reduce repeated printouts. For other aborted requests only print
5948
- * basic details.
5949
- */
5950
- scsi_print_command(hba->lrb[tag].cmd);
5951
- if (!hba->req_abort_count) {
5952
- ufshcd_print_host_regs(hba);
5953
- ufshcd_print_host_state(hba);
5954
- ufshcd_print_pwr_info(hba);
5955
- ufshcd_print_trs(hba, 1 << tag, true);
5956
- } else {
5957
- ufshcd_print_trs(hba, 1 << tag, false);
5958
- }
5959
- hba->req_abort_count++;
5960
-
5961
- /* Skip task abort in case previous aborts failed and report failure */
5962
- if (lrbp->req_abort_skip) {
5963
- err = -EIO;
5964
- goto out;
5965
- }
59666854
59676855 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
59686856 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
....@@ -5988,7 +6876,7 @@
59886876 /* command completed already */
59896877 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
59906878 __func__, tag);
5991
- goto cleanup;
6879
+ goto out;
59926880 } else {
59936881 dev_err(hba->dev,
59946882 "%s: no response from device. tag = %d, err %d\n",
....@@ -6016,36 +6904,129 @@
60166904 }
60176905
60186906 err = ufshcd_clear_cmd(hba, tag);
6019
- if (err) {
6907
+ if (err)
60206908 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
60216909 __func__, tag, err);
6022
- goto out;
6023
- }
6024
-
6025
-cleanup:
6026
- scsi_dma_unmap(cmd);
6027
-
6028
- spin_lock_irqsave(host->host_lock, flags);
6029
- ufshcd_outstanding_req_clear(hba, tag);
6030
- hba->lrb[tag].cmd = NULL;
6031
- spin_unlock_irqrestore(host->host_lock, flags);
6032
-
6033
- clear_bit_unlock(tag, &hba->lrb_in_use);
6034
- wake_up(&hba->dev_cmd.tag_wq);
60356910
60366911 out:
6037
- if (!err) {
6038
- err = SUCCESS;
6912
+ return err;
6913
+}
6914
+
6915
+/**
6916
+ * ufshcd_abort - scsi host template eh_abort_handler callback
6917
+ * @cmd: SCSI command pointer
6918
+ *
6919
+ * Returns SUCCESS/FAILED
6920
+ */
6921
+static int ufshcd_abort(struct scsi_cmnd *cmd)
6922
+{
6923
+ struct Scsi_Host *host;
6924
+ struct ufs_hba *hba;
6925
+ unsigned long flags;
6926
+ unsigned int tag;
6927
+ int err = FAILED, res;
6928
+ bool outstanding;
6929
+ struct ufshcd_lrb *lrbp;
6930
+ u32 reg;
6931
+
6932
+ host = cmd->device->host;
6933
+ hba = shost_priv(host);
6934
+ tag = cmd->request->tag;
6935
+ lrbp = &hba->lrb[tag];
6936
+ if (!ufshcd_valid_tag(hba, tag)) {
6937
+ dev_err(hba->dev,
6938
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6939
+ __func__, tag, cmd, cmd->request);
6940
+ BUG();
6941
+ }
6942
+
6943
+ ufshcd_hold(hba, false);
6944
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6945
+ /* If command is already aborted/completed, return FAILED. */
6946
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
6947
+ dev_err(hba->dev,
6948
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6949
+ __func__, tag, hba->outstanding_reqs, reg);
6950
+ goto release;
6951
+ }
6952
+
6953
+ /* Print Transfer Request of aborted task */
6954
+ dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6955
+
6956
+ /*
6957
+ * Print detailed info about aborted request.
6958
+ * As more than one request might get aborted at the same time,
6959
+ * print full information only for the first aborted request in order
6960
+ * to reduce repeated printouts. For other aborted requests only print
6961
+ * basic details.
6962
+ */
6963
+ scsi_print_command(cmd);
6964
+ if (!hba->req_abort_count) {
6965
+ ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
6966
+ ufshcd_print_evt_hist(hba);
6967
+ ufshcd_print_host_state(hba);
6968
+ ufshcd_print_pwr_info(hba);
6969
+ ufshcd_print_trs(hba, 1 << tag, true);
60396970 } else {
6040
- dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6041
- ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6042
- err = FAILED;
6971
+ ufshcd_print_trs(hba, 1 << tag, false);
6972
+ }
6973
+ hba->req_abort_count++;
6974
+
6975
+ if (!(reg & (1 << tag))) {
6976
+ dev_err(hba->dev,
6977
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
6978
+ __func__, tag);
6979
+ __ufshcd_transfer_req_compl(hba, 1UL << tag);
6980
+ goto release;
60436981 }
60446982
60456983 /*
6046
- * This ufshcd_release() corresponds to the original scsi cmd that got
6047
- * aborted here (as we won't get any IRQ for it).
6984
+ * Task abort to the device W-LUN is illegal. When this command
6985
+ * will fail, due to spec violation, scsi err handling next step
6986
+ * will be to send LU reset which, again, is a spec violation.
6987
+ * To avoid these unnecessary/illegal steps, first we clean up
6988
+ * the lrb taken by this cmd and re-set it in outstanding_reqs,
6989
+ * then queue the eh_work and bail.
60486990 */
6991
+ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
6992
+ ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
6993
+
6994
+ spin_lock_irqsave(host->host_lock, flags);
6995
+ hba->force_reset = true;
6996
+ ufshcd_schedule_eh_work(hba);
6997
+ spin_unlock_irqrestore(host->host_lock, flags);
6998
+ goto release;
6999
+ }
7000
+
7001
+ /* Skip task abort in case previous aborts failed and report failure */
7002
+ if (lrbp->req_abort_skip) {
7003
+ dev_err(hba->dev, "%s: skipping abort\n", __func__);
7004
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7005
+ goto release;
7006
+ }
7007
+
7008
+ res = ufshcd_try_to_abort_task(hba, tag);
7009
+ if (res) {
7010
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, res);
7011
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7012
+ goto release;
7013
+ }
7014
+
7015
+ /*
7016
+ * Clear the corresponding bit from outstanding_reqs since the command
7017
+ * has been aborted successfully.
7018
+ */
7019
+ spin_lock_irqsave(host->host_lock, flags);
7020
+ outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7021
+ spin_unlock_irqrestore(host->host_lock, flags);
7022
+
7023
+ if (outstanding)
7024
+ ufshcd_release_scsi_cmd(hba, lrbp);
7025
+
7026
+ err = SUCCESS;
7027
+
7028
+release:
7029
+ /* Matches the ufshcd_hold() call at the start of this function. */
60497030 ufshcd_release(hba);
60507031 return err;
60517032 }
....@@ -6063,35 +7044,29 @@
60637044 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
60647045 {
60657046 int err;
6066
- unsigned long flags;
60677047
7048
+ ufshpb_reset_host(hba);
60687049 /*
60697050 * Stop the host controller and complete the requests
60707051 * cleared by h/w
60717052 */
6072
- spin_lock_irqsave(hba->host->host_lock, flags);
6073
- ufshcd_hba_stop(hba, false);
7053
+ ufshcd_hba_stop(hba);
60747054 hba->silence_err_logs = true;
60757055 ufshcd_complete_requests(hba);
60767056 hba->silence_err_logs = false;
6077
- spin_unlock_irqrestore(hba->host->host_lock, flags);
60787057
60797058 /* scale up clocks to max frequency before full reinitialization */
6080
- ufshcd_scale_clks(hba, true);
7059
+ ufshcd_set_clk_freq(hba, true);
60817060
60827061 err = ufshcd_hba_enable(hba);
6083
- if (err)
6084
- goto out;
60857062
60867063 /* Establish the link again and restore the device */
6087
- err = ufshcd_probe_hba(hba);
7064
+ if (!err)
7065
+ err = ufshcd_probe_hba(hba, false);
60887066
6089
- if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
6090
- err = -EIO;
6091
-out:
60927067 if (err)
60937068 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6094
-
7069
+ ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
60957070 return err;
60967071 }
60977072
....@@ -6106,12 +7081,42 @@
61067081 */
61077082 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
61087083 {
7084
+ u32 saved_err;
7085
+ u32 saved_uic_err;
61097086 int err = 0;
7087
+ unsigned long flags;
61107088 int retries = MAX_HOST_RESET_RETRIES;
61117089
7090
+ /*
7091
+ * This is a fresh start, cache and clear saved error first,
7092
+ * in case new error generated during reset and restore.
7093
+ */
7094
+ spin_lock_irqsave(hba->host->host_lock, flags);
7095
+ saved_err = hba->saved_err;
7096
+ saved_uic_err = hba->saved_uic_err;
7097
+ hba->saved_err = 0;
7098
+ hba->saved_uic_err = 0;
7099
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
7100
+
61127101 do {
7102
+ /* Reset the attached device */
7103
+ ufshcd_vops_device_reset(hba);
7104
+
61137105 err = ufshcd_host_reset_and_restore(hba);
61147106 } while (err && --retries);
7107
+
7108
+ spin_lock_irqsave(hba->host->host_lock, flags);
7109
+ /*
7110
+ * Inform scsi mid-layer that we did reset and allow to handle
7111
+ * Unit Attention properly.
7112
+ */
7113
+ scsi_report_bus_reset(hba->host, 0);
7114
+ if (err) {
7115
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
7116
+ hba->saved_err |= saved_err;
7117
+ hba->saved_uic_err |= saved_uic_err;
7118
+ }
7119
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
61157120
61167121 return err;
61177122 }
....@@ -6124,48 +7129,25 @@
61247129 */
61257130 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
61267131 {
6127
- int err;
7132
+ int err = SUCCESS;
61287133 unsigned long flags;
61297134 struct ufs_hba *hba;
61307135
61317136 hba = shost_priv(cmd->device->host);
61327137
6133
- ufshcd_hold(hba, false);
6134
- /*
6135
- * Check if there is any race with fatal error handling.
6136
- * If so, wait for it to complete. Even though fatal error
6137
- * handling does reset and restore in some cases, don't assume
6138
- * anything out of it. We are just avoiding race here.
6139
- */
6140
- do {
6141
- spin_lock_irqsave(hba->host->host_lock, flags);
6142
- if (!(work_pending(&hba->eh_work) ||
6143
- hba->ufshcd_state == UFSHCD_STATE_RESET ||
6144
- hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6145
- break;
6146
- spin_unlock_irqrestore(hba->host->host_lock, flags);
6147
- dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6148
- flush_work(&hba->eh_work);
6149
- } while (1);
6150
-
6151
- hba->ufshcd_state = UFSHCD_STATE_RESET;
6152
- ufshcd_set_eh_in_progress(hba);
7138
+ spin_lock_irqsave(hba->host->host_lock, flags);
7139
+ hba->force_reset = true;
7140
+ ufshcd_schedule_eh_work(hba);
7141
+ dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
61537142 spin_unlock_irqrestore(hba->host->host_lock, flags);
61547143
6155
- err = ufshcd_reset_and_restore(hba);
7144
+ flush_work(&hba->eh_work);
61567145
61577146 spin_lock_irqsave(hba->host->host_lock, flags);
6158
- if (!err) {
6159
- err = SUCCESS;
6160
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6161
- } else {
7147
+ if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
61627148 err = FAILED;
6163
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
6164
- }
6165
- ufshcd_clear_eh_in_progress(hba);
61667149 spin_unlock_irqrestore(hba->host->host_lock, flags);
61677150
6168
- ufshcd_release(hba);
61697151 return err;
61707152 }
61717153
....@@ -6257,17 +7239,19 @@
62577239 return icc_level;
62587240 }
62597241
6260
-static void ufshcd_init_icc_levels(struct ufs_hba *hba)
7242
+static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
62617243 {
62627244 int ret;
6263
- int buff_len = hba->desc_size.pwr_desc;
7245
+ int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
62647246 u8 *desc_buf;
7247
+ u32 icc_level;
62657248
62667249 desc_buf = kmalloc(buff_len, GFP_KERNEL);
62677250 if (!desc_buf)
62687251 return;
62697252
6270
- ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
7253
+ ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7254
+ desc_buf, buff_len);
62717255 if (ret) {
62727256 dev_err(hba->dev,
62737257 "%s: Failed reading power descriptor.len = %d ret = %d",
....@@ -6275,20 +7259,17 @@
62757259 goto out;
62767260 }
62777261
6278
- hba->init_prefetch_data.icc_level =
6279
- ufshcd_find_max_sup_active_icc_level(hba,
6280
- desc_buf, buff_len);
6281
- dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6282
- __func__, hba->init_prefetch_data.icc_level);
7262
+ icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7263
+ buff_len);
7264
+ dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
62837265
62847266 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6285
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6286
- &hba->init_prefetch_data.icc_level);
7267
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
62877268
62887269 if (ret)
62897270 dev_err(hba->dev,
62907271 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6291
- __func__, hba->init_prefetch_data.icc_level , ret);
7272
+ __func__, icc_level, ret);
62927273
62937274 out:
62947275 kfree(desc_buf);
....@@ -6323,7 +7304,6 @@
63237304 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
63247305 {
63257306 int ret = 0;
6326
- struct scsi_device *sdev_rpmb;
63277307 struct scsi_device *sdev_boot;
63287308
63297309 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
....@@ -6335,13 +7315,13 @@
63357315 }
63367316 scsi_device_put(hba->sdev_ufs_device);
63377317
6338
- sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7318
+ hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
63397319 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6340
- if (IS_ERR(sdev_rpmb)) {
6341
- ret = PTR_ERR(sdev_rpmb);
7320
+ if (IS_ERR(hba->sdev_rpmb)) {
7321
+ ret = PTR_ERR(hba->sdev_rpmb);
63427322 goto remove_sdev_ufs_device;
63437323 }
6344
- scsi_device_put(sdev_rpmb);
7324
+ scsi_device_put(hba->sdev_rpmb);
63457325
63467326 sdev_boot = __scsi_add_device(hba->host, 0, 0,
63477327 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
....@@ -6357,23 +7337,120 @@
63577337 return ret;
63587338 }
63597339
6360
-static int ufs_get_device_desc(struct ufs_hba *hba,
6361
- struct ufs_dev_desc *dev_desc)
7340
+static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7341
+{
7342
+ struct ufs_dev_info *dev_info = &hba->dev_info;
7343
+ u8 lun;
7344
+ u32 d_lu_wb_buf_alloc;
7345
+
7346
+ if (!ufshcd_is_wb_allowed(hba))
7347
+ return;
7348
+ /*
7349
+ * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7350
+ * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7351
+ * enabled
7352
+ */
7353
+ if (!(dev_info->wspecversion >= 0x310 ||
7354
+ dev_info->wspecversion == 0x220 ||
7355
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7356
+ goto wb_disabled;
7357
+
7358
+ if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7359
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
7360
+ goto wb_disabled;
7361
+
7362
+ dev_info->d_ext_ufs_feature_sup =
7363
+ get_unaligned_be32(desc_buf +
7364
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7365
+
7366
+ if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
7367
+ goto wb_disabled;
7368
+
7369
+ /*
7370
+ * WB may be supported but not configured while provisioning.
7371
+ * The spec says, in dedicated wb buffer mode,
7372
+ * a max of 1 lun would have wb buffer configured.
7373
+ * Now only shared buffer mode is supported.
7374
+ */
7375
+ dev_info->b_wb_buffer_type =
7376
+ desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7377
+
7378
+ dev_info->b_presrv_uspc_en =
7379
+ desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7380
+
7381
+ if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) {
7382
+ dev_info->d_wb_alloc_units =
7383
+ get_unaligned_be32(desc_buf +
7384
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
7385
+ if (!dev_info->d_wb_alloc_units)
7386
+ goto wb_disabled;
7387
+ } else {
7388
+ for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7389
+ d_lu_wb_buf_alloc = 0;
7390
+ ufshcd_read_unit_desc_param(hba,
7391
+ lun,
7392
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7393
+ (u8 *)&d_lu_wb_buf_alloc,
7394
+ sizeof(d_lu_wb_buf_alloc));
7395
+ if (d_lu_wb_buf_alloc) {
7396
+ dev_info->wb_dedicated_lu = lun;
7397
+ break;
7398
+ }
7399
+ }
7400
+
7401
+ if (!d_lu_wb_buf_alloc)
7402
+ goto wb_disabled;
7403
+ }
7404
+ return;
7405
+
7406
+wb_disabled:
7407
+ hba->caps &= ~UFSHCD_CAP_WB_EN;
7408
+}
7409
+
7410
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
7411
+{
7412
+ struct ufs_dev_fix *f;
7413
+ struct ufs_dev_info *dev_info = &hba->dev_info;
7414
+
7415
+ if (!fixups)
7416
+ return;
7417
+
7418
+ for (f = fixups; f->quirk; f++) {
7419
+ if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7420
+ f->wmanufacturerid == UFS_ANY_VENDOR) &&
7421
+ ((dev_info->model &&
7422
+ STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7423
+ !strcmp(f->model, UFS_ANY_MODEL)))
7424
+ hba->dev_quirks |= f->quirk;
7425
+ }
7426
+}
7427
+EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7428
+
7429
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
7430
+{
7431
+ /* fix by general quirk table */
7432
+ ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7433
+
7434
+ /* allow vendors to fix quirks */
7435
+ ufshcd_vops_fixup_dev_quirks(hba);
7436
+}
7437
+
7438
+static int ufs_get_device_desc(struct ufs_hba *hba)
63627439 {
63637440 int err;
6364
- size_t buff_len;
63657441 u8 model_index;
7442
+ u8 b_ufs_feature_sup;
63667443 u8 *desc_buf;
7444
+ struct ufs_dev_info *dev_info = &hba->dev_info;
63677445
6368
- buff_len = max_t(size_t, hba->desc_size.dev_desc,
6369
- QUERY_DESC_MAX_SIZE + 1);
6370
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
7446
+ desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
63717447 if (!desc_buf) {
63727448 err = -ENOMEM;
63737449 goto out;
63747450 }
63757451
6376
- err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
7452
+ err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7453
+ hba->desc_size[QUERY_DESC_IDN_DEVICE]);
63777454 if (err) {
63787455 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
63797456 __func__, err);
....@@ -6384,47 +7461,61 @@
63847461 * getting vendor (manufacturerID) and Bank Index in big endian
63857462 * format
63867463 */
6387
- dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7464
+ dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
63887465 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7466
+
7467
+ /* getting Specification Version in big endian format */
7468
+ dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7469
+ desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7470
+ b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
63897471
63907472 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
63917473
6392
- /* Zero-pad entire buffer for string termination. */
6393
- memset(desc_buf, 0, buff_len);
7474
+ if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
7475
+ (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
7476
+ bool hpb_en = false;
63947477
6395
- err = ufshcd_read_string_desc(hba, model_index, desc_buf,
6396
- QUERY_DESC_MAX_SIZE, true/*ASCII*/);
6397
- if (err) {
7478
+ ufshpb_get_dev_info(hba, desc_buf);
7479
+
7480
+ if (!ufshpb_is_legacy(hba))
7481
+ err = ufshcd_query_flag_retry(hba,
7482
+ UPIU_QUERY_OPCODE_READ_FLAG,
7483
+ QUERY_FLAG_IDN_HPB_EN, 0,
7484
+ &hpb_en);
7485
+
7486
+ if (ufshpb_is_legacy(hba) || (!err && hpb_en))
7487
+ dev_info->hpb_enabled = true;
7488
+ }
7489
+
7490
+ err = ufshcd_read_string_desc(hba, model_index,
7491
+ &dev_info->model, SD_ASCII_STD);
7492
+ if (err < 0) {
63987493 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
63997494 __func__, err);
64007495 goto out;
64017496 }
64027497
6403
- desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6404
- strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
6405
- min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
6406
- MAX_MODEL_LEN));
7498
+ ufs_fixup_device_setup(hba);
64077499
6408
- /* Null terminate the model string */
6409
- dev_desc->model[MAX_MODEL_LEN] = '\0';
7500
+ ufshcd_wb_probe(hba, desc_buf);
7501
+
7502
+ /*
7503
+ * ufshcd_read_string_desc returns size of the string
7504
+ * reset the error value
7505
+ */
7506
+ err = 0;
64107507
64117508 out:
64127509 kfree(desc_buf);
64137510 return err;
64147511 }
64157512
6416
-static void ufs_fixup_device_setup(struct ufs_hba *hba,
6417
- struct ufs_dev_desc *dev_desc)
7513
+static void ufs_put_device_desc(struct ufs_hba *hba)
64187514 {
6419
- struct ufs_dev_fix *f;
7515
+ struct ufs_dev_info *dev_info = &hba->dev_info;
64207516
6421
- for (f = ufs_fixups; f->quirk; f++) {
6422
- if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6423
- f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6424
- (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6425
- !strcmp(f->card.model, UFS_ANY_MODEL)))
6426
- hba->dev_quirks |= f->quirk;
6427
- }
7517
+ kfree(dev_info->model);
7518
+ dev_info->model = NULL;
64287519 }
64297520
64307521 /**
....@@ -6560,7 +7651,7 @@
65607651 peer_pa_tactivate_us = peer_pa_tactivate *
65617652 gran_to_us_table[peer_granularity - 1];
65627653
6563
- if (pa_tactivate_us > peer_pa_tactivate_us) {
7654
+ if (pa_tactivate_us >= peer_pa_tactivate_us) {
65647655 u32 new_peer_pa_tactivate;
65657656
65667657 new_peer_pa_tactivate = pa_tactivate_us /
....@@ -6581,101 +7672,222 @@
65817672 ufshcd_tune_pa_hibern8time(hba);
65827673 }
65837674
7675
+ ufshcd_vops_apply_dev_quirks(hba);
7676
+
65847677 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
65857678 /* set 1ms timeout for PA_TACTIVATE */
65867679 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
65877680
65887681 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
65897682 ufshcd_quirk_tune_host_pa_tactivate(hba);
6590
-
6591
- ufshcd_vops_apply_dev_quirks(hba);
65927683 }
65937684
65947685 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
65957686 {
6596
- int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6597
-
65987687 hba->ufs_stats.hibern8_exit_cnt = 0;
65997688 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6600
-
6601
- memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6602
- memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6603
- memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6604
- memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6605
- memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6606
-
66077689 hba->req_abort_count = 0;
66087690 }
66097691
6610
-static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
7692
+static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
66117693 {
66127694 int err;
7695
+ size_t buff_len;
7696
+ u8 *desc_buf;
66137697
6614
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6615
- &hba->desc_size.dev_desc);
6616
- if (err)
6617
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
7698
+ buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7699
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
7700
+ if (!desc_buf) {
7701
+ err = -ENOMEM;
7702
+ goto out;
7703
+ }
66187704
6619
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6620
- &hba->desc_size.pwr_desc);
6621
- if (err)
6622
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
7705
+ err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7706
+ desc_buf, buff_len);
7707
+ if (err) {
7708
+ dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7709
+ __func__, err);
7710
+ goto out;
7711
+ }
66237712
6624
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6625
- &hba->desc_size.interc_desc);
6626
- if (err)
6627
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
7713
+ if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7714
+ hba->dev_info.max_lu_supported = 32;
7715
+ else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7716
+ hba->dev_info.max_lu_supported = 8;
66287717
6629
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6630
- &hba->desc_size.conf_desc);
6631
- if (err)
6632
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
7718
+ if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
7719
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
7720
+ ufshpb_get_geo_info(hba, desc_buf);
66337721
6634
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6635
- &hba->desc_size.unit_desc);
6636
- if (err)
6637
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6638
-
6639
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6640
- &hba->desc_size.geom_desc);
6641
- if (err)
6642
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6643
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6644
- &hba->desc_size.hlth_desc);
6645
- if (err)
6646
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
7722
+out:
7723
+ kfree(desc_buf);
7724
+ return err;
66477725 }
66487726
6649
-static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
7727
+static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7728
+ {19200000, REF_CLK_FREQ_19_2_MHZ},
7729
+ {26000000, REF_CLK_FREQ_26_MHZ},
7730
+ {38400000, REF_CLK_FREQ_38_4_MHZ},
7731
+ {52000000, REF_CLK_FREQ_52_MHZ},
7732
+ {0, REF_CLK_FREQ_INVAL},
7733
+};
7734
+
7735
+static enum ufs_ref_clk_freq
7736
+ufs_get_bref_clk_from_hz(unsigned long freq)
66507737 {
6651
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6652
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6653
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6654
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6655
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6656
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6657
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
7738
+ int i;
7739
+
7740
+ for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7741
+ if (ufs_ref_clk_freqs[i].freq_hz == freq)
7742
+ return ufs_ref_clk_freqs[i].val;
7743
+
7744
+ return REF_CLK_FREQ_INVAL;
7745
+}
7746
+
7747
+void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7748
+{
7749
+ unsigned long freq;
7750
+
7751
+ freq = clk_get_rate(refclk);
7752
+
7753
+ hba->dev_ref_clk_freq =
7754
+ ufs_get_bref_clk_from_hz(freq);
7755
+
7756
+ if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7757
+ dev_err(hba->dev,
7758
+ "invalid ref_clk setting = %ld\n", freq);
7759
+}
7760
+
7761
+static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7762
+{
7763
+ int err;
7764
+ u32 ref_clk;
7765
+ u32 freq = hba->dev_ref_clk_freq;
7766
+
7767
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7768
+ QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7769
+
7770
+ if (err) {
7771
+ dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7772
+ err);
7773
+ goto out;
7774
+ }
7775
+
7776
+ if (ref_clk == freq)
7777
+ goto out; /* nothing to update */
7778
+
7779
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7780
+ QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7781
+
7782
+ if (err) {
7783
+ dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7784
+ ufs_ref_clk_freqs[freq].freq_hz);
7785
+ goto out;
7786
+ }
7787
+
7788
+ dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7789
+ ufs_ref_clk_freqs[freq].freq_hz);
7790
+
7791
+out:
7792
+ return err;
7793
+}
7794
+
7795
+static int ufshcd_device_params_init(struct ufs_hba *hba)
7796
+{
7797
+ bool flag;
7798
+ int ret, i;
7799
+
7800
+ /* Init device descriptor sizes */
7801
+ for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7802
+ hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
7803
+
7804
+ /* Init UFS geometry descriptor related parameters */
7805
+ ret = ufshcd_device_geo_params_init(hba);
7806
+ if (ret)
7807
+ goto out;
7808
+
7809
+ /* Check and apply UFS device quirks */
7810
+ ret = ufs_get_device_desc(hba);
7811
+ if (ret) {
7812
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7813
+ __func__, ret);
7814
+ goto out;
7815
+ }
7816
+
7817
+ ufshcd_get_ref_clk_gating_wait(hba);
7818
+
7819
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7820
+ QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7821
+ hba->dev_info.f_power_on_wp_en = flag;
7822
+
7823
+ /* Probe maximum power mode co-supported by both UFS host and device */
7824
+ if (ufshcd_get_max_pwr_mode(hba))
7825
+ dev_err(hba->dev,
7826
+ "%s: Failed getting max supported power mode\n",
7827
+ __func__);
7828
+out:
7829
+ return ret;
7830
+}
7831
+
7832
+/**
7833
+ * ufshcd_add_lus - probe and add UFS logical units
7834
+ * @hba: per-adapter instance
7835
+ */
7836
+static int ufshcd_add_lus(struct ufs_hba *hba)
7837
+{
7838
+ int ret;
7839
+
7840
+ /* Add required well known logical units to scsi mid layer */
7841
+ ret = ufshcd_scsi_add_wlus(hba);
7842
+ if (ret)
7843
+ goto out;
7844
+
7845
+ /* Initialize devfreq after UFS device is detected */
7846
+ if (ufshcd_is_clkscaling_supported(hba)) {
7847
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
7848
+ &hba->pwr_info,
7849
+ sizeof(struct ufs_pa_layer_attr));
7850
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
7851
+ hba->clk_scaling.is_allowed = true;
7852
+
7853
+ ret = ufshcd_devfreq_init(hba);
7854
+ if (ret)
7855
+ goto out;
7856
+
7857
+ hba->clk_scaling.is_enabled = true;
7858
+ ufshcd_init_clk_scaling_sysfs(hba);
7859
+ }
7860
+
7861
+ ufs_bsg_probe(hba);
7862
+ ufshpb_init(hba);
7863
+ scsi_scan_host(hba->host);
7864
+ pm_runtime_put_sync(hba->dev);
7865
+
7866
+out:
7867
+ return ret;
66587868 }
66597869
66607870 /**
66617871 * ufshcd_probe_hba - probe hba to detect device and initialize
66627872 * @hba: per-adapter instance
7873
+ * @async: asynchronous execution or not
66637874 *
66647875 * Execute link-startup and verify device initialization
66657876 */
6666
-static int ufshcd_probe_hba(struct ufs_hba *hba)
7877
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
66677878 {
6668
- struct ufs_dev_desc card = {0};
66697879 int ret;
7880
+ unsigned long flags;
66707881 ktime_t start = ktime_get();
7882
+
7883
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
66717884
66727885 ret = ufshcd_link_startup(hba);
66737886 if (ret)
66747887 goto out;
66757888
6676
- /* set the default level for urgent bkops */
6677
- hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6678
- hba->is_urgent_bkops_lvl_checked = false;
7889
+ if (hba->quirks & UFSHCD_QUIRK_SKIP_INTERFACE_CONFIGURATION)
7890
+ goto out;
66797891
66807892 /* Debug counters initialization */
66817893 ufshcd_clear_dbg_ufs_stats(hba);
....@@ -6683,108 +7895,71 @@
66837895 /* UniPro link is active now */
66847896 ufshcd_set_link_active(hba);
66857897
6686
- /* Enable Auto-Hibernate if configured */
6687
- ufshcd_auto_hibern8_enable(hba);
6688
-
7898
+ /* Verify device initialization by sending NOP OUT UPIU */
66897899 ret = ufshcd_verify_dev_init(hba);
66907900 if (ret)
66917901 goto out;
66927902
7903
+ /* Initiate UFS initialization, and waiting until completion */
66937904 ret = ufshcd_complete_dev_init(hba);
66947905 if (ret)
66957906 goto out;
66967907
6697
- /* Init check for device descriptor sizes */
6698
- ufshcd_init_desc_sizes(hba);
6699
-
6700
- ret = ufs_get_device_desc(hba, &card);
6701
- if (ret) {
6702
- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6703
- __func__, ret);
6704
- goto out;
7908
+ /*
7909
+ * Initialize UFS device parameters used by driver, these
7910
+ * parameters are associated with UFS descriptors.
7911
+ */
7912
+ if (async) {
7913
+ ret = ufshcd_device_params_init(hba);
7914
+ if (ret)
7915
+ goto out;
67057916 }
67067917
6707
- ufs_fixup_device_setup(hba, &card);
67087918 ufshcd_tune_unipro_params(hba);
6709
-
6710
- ret = ufshcd_set_vccq_rail_unused(hba,
6711
- (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6712
- if (ret)
6713
- goto out;
67147919
67157920 /* UFS device is also active now */
67167921 ufshcd_set_ufs_dev_active(hba);
67177922 ufshcd_force_reset_auto_bkops(hba);
6718
- hba->wlun_dev_clr_ua = true;
67197923
6720
- if (ufshcd_get_max_pwr_mode(hba)) {
6721
- dev_err(hba->dev,
6722
- "%s: Failed getting max supported power mode\n",
6723
- __func__);
6724
- } else {
7924
+ /* Gear up to HS gear if supported */
7925
+ if (hba->max_pwr_info.is_valid) {
7926
+ /*
7927
+ * Set the right value to bRefClkFreq before attempting to
7928
+ * switch to HS gears.
7929
+ */
7930
+ if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7931
+ ufshcd_set_dev_ref_clk(hba);
67257932 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
67267933 if (ret) {
67277934 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
67287935 __func__, ret);
67297936 goto out;
67307937 }
7938
+ ufshcd_print_pwr_info(hba);
67317939 }
6732
-
6733
- /* set the state as operational after switching to desired gear */
6734
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
67357940
67367941 /*
6737
- * If we are in error handling context or in power management callbacks
6738
- * context, no need to scan the host
7942
+ * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7943
+ * and for removable UFS card as well, hence always set the parameter.
7944
+ * Note: Error handler may issue the device reset hence resetting
7945
+ * bActiveICCLevel as well so it is always safe to set this here.
67397946 */
6740
- if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6741
- bool flag;
7947
+ ufshcd_set_active_icc_lvl(hba);
67427948
6743
- /* clear any previous UFS device information */
6744
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6745
- if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6746
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6747
- hba->dev_info.f_power_on_wp_en = flag;
7949
+ ufshcd_wb_config(hba);
7950
+ /* Enable Auto-Hibernate if configured */
7951
+ ufshcd_auto_hibern8_enable(hba);
67487952
6749
- if (!hba->is_init_prefetch)
6750
- ufshcd_init_icc_levels(hba);
7953
+ ufshpb_reset(hba);
67517954
6752
- /* Add required well known logical units to scsi mid layer */
6753
- ret = ufshcd_scsi_add_wlus(hba);
6754
- if (ret)
6755
- goto out;
6756
-
6757
- /* Initialize devfreq after UFS device is detected */
6758
- if (ufshcd_is_clkscaling_supported(hba)) {
6759
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
6760
- &hba->pwr_info,
6761
- sizeof(struct ufs_pa_layer_attr));
6762
- hba->clk_scaling.saved_pwr_info.is_valid = true;
6763
- if (!hba->devfreq) {
6764
- ret = ufshcd_devfreq_init(hba);
6765
- if (ret)
6766
- goto out;
6767
- }
6768
- hba->clk_scaling.is_allowed = true;
6769
- }
6770
-
6771
- scsi_scan_host(hba->host);
6772
- pm_runtime_put_sync(hba->dev);
6773
- }
6774
-
6775
- if (!hba->is_init_prefetch)
6776
- hba->is_init_prefetch = true;
6777
-
7955
+ trace_android_rvh_ufs_complete_init(hba);
67787956 out:
6779
- /*
6780
- * If we failed to initialize the device or the device is not
6781
- * present, turn off the power/clocks etc.
6782
- */
6783
- if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6784
- pm_runtime_put_sync(hba->dev);
6785
- ufshcd_exit_clk_scaling(hba);
6786
- ufshcd_hba_exit(hba);
6787
- }
7957
+ spin_lock_irqsave(hba->host->host_lock, flags);
7958
+ if (ret)
7959
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
7960
+ else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
7961
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7962
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
67887963
67897964 trace_ufshcd_init(dev_name(hba->dev), ret,
67907965 ktime_to_us(ktime_sub(ktime_get(), start)),
....@@ -6800,49 +7975,46 @@
68007975 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
68017976 {
68027977 struct ufs_hba *hba = (struct ufs_hba *)data;
7978
+ int ret;
68037979
6804
- ufshcd_probe_hba(hba);
6805
-}
7980
+ down(&hba->host_sem);
7981
+ /* Initialize hba, detect and initialize UFS device */
7982
+ ret = ufshcd_probe_hba(hba, true);
7983
+ up(&hba->host_sem);
7984
+ if (ret)
7985
+ goto out;
68067986
6807
-static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6808
-{
6809
- unsigned long flags;
6810
- struct Scsi_Host *host;
6811
- struct ufs_hba *hba;
6812
- int index;
6813
- bool found = false;
6814
-
6815
- if (!scmd || !scmd->device || !scmd->device->host)
6816
- return BLK_EH_DONE;
6817
-
6818
- host = scmd->device->host;
6819
- hba = shost_priv(host);
6820
- if (!hba)
6821
- return BLK_EH_DONE;
6822
-
6823
- spin_lock_irqsave(host->host_lock, flags);
6824
-
6825
- for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6826
- if (hba->lrb[index].cmd == scmd) {
6827
- found = true;
6828
- break;
6829
- }
6830
- }
6831
-
6832
- spin_unlock_irqrestore(host->host_lock, flags);
6833
-
7987
+ /* Probe and add UFS logical units */
7988
+ ret = ufshcd_add_lus(hba);
7989
+out:
68347990 /*
6835
- * Bypass SCSI error handling and reset the block layer timer if this
6836
- * SCSI command was not actually dispatched to UFS driver, otherwise
6837
- * let SCSI layer handle the error as usual.
7991
+ * If we failed to initialize the device or the device is not
7992
+ * present, turn off the power/clocks etc.
68387993 */
6839
- return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
7994
+ if (ret) {
7995
+ pm_runtime_put_sync(hba->dev);
7996
+ ufshcd_hba_exit(hba);
7997
+ }
68407998 }
68417999
68428000 static const struct attribute_group *ufshcd_driver_groups[] = {
68438001 &ufs_sysfs_unit_descriptor_group,
68448002 &ufs_sysfs_lun_attributes_group,
8003
+#ifdef CONFIG_SCSI_UFS_HPB
8004
+ &ufs_sysfs_hpb_stat_group,
8005
+ &ufs_sysfs_hpb_param_group,
8006
+#endif
68458007 NULL,
8008
+};
8009
+
8010
+static struct ufs_hba_variant_params ufs_hba_vps = {
8011
+ .hba_enable_delay_us = 1000,
8012
+ .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
8013
+ .devfreq_profile.polling_ms = 100,
8014
+ .devfreq_profile.target = ufshcd_devfreq_target,
8015
+ .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8016
+ .ondemand_data.upthreshold = 70,
8017
+ .ondemand_data.downdifferential = 5,
68468018 };
68478019
68488020 static struct scsi_host_template ufshcd_driver_template = {
....@@ -6857,14 +8029,15 @@
68578029 .eh_abort_handler = ufshcd_abort,
68588030 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
68598031 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
6860
- .eh_timed_out = ufshcd_eh_timed_out,
68618032 .this_id = -1,
68628033 .sg_tablesize = SG_ALL,
68638034 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
68648035 .can_queue = UFSHCD_CAN_QUEUE,
8036
+ .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
68658037 .max_host_blocked = 1,
68668038 .track_queue_depth = 1,
68678039 .sdev_groups = ufshcd_driver_groups,
8040
+ .dma_boundary = PAGE_SIZE - 1,
68688041 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
68698042 };
68708043
....@@ -6897,13 +8070,7 @@
68978070 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
68988071 struct ufs_vreg *vreg)
68998072 {
6900
- if (!vreg)
6901
- return 0;
6902
- else if (vreg->unused)
6903
- return 0;
6904
- else
6905
- return ufshcd_config_vreg_load(hba->dev, vreg,
6906
- UFS_VREG_LPM_LOAD_UA);
8073
+ return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
69078074 }
69088075
69098076 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
....@@ -6911,10 +8078,8 @@
69118078 {
69128079 if (!vreg)
69138080 return 0;
6914
- else if (vreg->unused)
6915
- return 0;
6916
- else
6917
- return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8081
+
8082
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
69188083 }
69198084
69208085 static int ufshcd_config_vreg(struct device *dev,
....@@ -6931,21 +8096,19 @@
69318096 name = vreg->name;
69328097
69338098 if (regulator_count_voltages(reg) > 0) {
6934
- if (vreg->min_uV && vreg->max_uV) {
6935
- min_uV = on ? vreg->min_uV : 0;
6936
- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6937
- if (ret) {
6938
- dev_err(dev,
6939
- "%s: %s set voltage failed, err=%d\n",
6940
- __func__, name, ret);
6941
- goto out;
6942
- }
6943
- }
6944
-
69458099 uA_load = on ? vreg->max_uA : 0;
69468100 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
69478101 if (ret)
69488102 goto out;
8103
+
8104
+ if (vreg->min_uV && vreg->max_uV) {
8105
+ min_uV = on ? vreg->min_uV : 0;
8106
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8107
+ if (ret)
8108
+ dev_err(dev,
8109
+ "%s: %s set voltage failed, err=%d\n",
8110
+ __func__, name, ret);
8111
+ }
69498112 }
69508113 out:
69518114 return ret;
....@@ -6955,9 +8118,7 @@
69558118 {
69568119 int ret = 0;
69578120
6958
- if (!vreg)
6959
- goto out;
6960
- else if (vreg->enabled || vreg->unused)
8121
+ if (!vreg || vreg->enabled)
69618122 goto out;
69628123
69638124 ret = ufshcd_config_vreg(dev, vreg, true);
....@@ -6977,9 +8138,7 @@
69778138 {
69788139 int ret = 0;
69798140
6980
- if (!vreg)
6981
- goto out;
6982
- else if (!vreg->enabled || vreg->unused)
8141
+ if (!vreg || !vreg->enabled || vreg->always_on)
69838142 goto out;
69848143
69858144 ret = regulator_disable(vreg->reg);
....@@ -7002,9 +8161,6 @@
70028161 struct device *dev = hba->dev;
70038162 struct ufs_vreg_info *info = &hba->vreg_info;
70048163
7005
- if (!info)
7006
- goto out;
7007
-
70088164 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
70098165 if (ret)
70108166 goto out;
....@@ -7014,8 +8170,6 @@
70148170 goto out;
70158171
70168172 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7017
- if (ret)
7018
- goto out;
70198173
70208174 out:
70218175 if (ret) {
....@@ -7030,10 +8184,7 @@
70308184 {
70318185 struct ufs_vreg_info *info = &hba->vreg_info;
70328186
7033
- if (info)
7034
- return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7035
-
7036
- return 0;
8187
+ return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
70378188 }
70388189
70398190 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
....@@ -7059,18 +8210,13 @@
70598210 struct device *dev = hba->dev;
70608211 struct ufs_vreg_info *info = &hba->vreg_info;
70618212
7062
- if (!info)
7063
- goto out;
7064
-
70658213 ret = ufshcd_get_vreg(dev, info->vcc);
70668214 if (ret)
70678215 goto out;
70688216
70698217 ret = ufshcd_get_vreg(dev, info->vccq);
7070
- if (ret)
7071
- goto out;
7072
-
7073
- ret = ufshcd_get_vreg(dev, info->vccq2);
8218
+ if (!ret)
8219
+ ret = ufshcd_get_vreg(dev, info->vccq2);
70748220 out:
70758221 return ret;
70768222 }
....@@ -7085,38 +8231,7 @@
70858231 return 0;
70868232 }
70878233
7088
-static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
7089
-{
7090
- int ret = 0;
7091
- struct ufs_vreg_info *info = &hba->vreg_info;
7092
-
7093
- if (!info)
7094
- goto out;
7095
- else if (!info->vccq)
7096
- goto out;
7097
-
7098
- if (unused) {
7099
- /* shut off the rail here */
7100
- ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
7101
- /*
7102
- * Mark this rail as no longer used, so it doesn't get enabled
7103
- * later by mistake
7104
- */
7105
- if (!ret)
7106
- info->vccq->unused = true;
7107
- } else {
7108
- /*
7109
- * rail should have been already enabled hence just make sure
7110
- * that unused flag is cleared.
7111
- */
7112
- info->vccq->unused = false;
7113
- }
7114
-out:
7115
- return ret;
7116
-}
7117
-
7118
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7119
- bool skip_ref_clk)
8234
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
71208235 {
71218236 int ret = 0;
71228237 struct ufs_clk_info *clki;
....@@ -7128,20 +8243,18 @@
71288243 if (list_empty(head))
71298244 goto out;
71308245
7131
- /*
7132
- * vendor specific setup_clocks ops may depend on clocks managed by
7133
- * this standard driver hence call the vendor specific setup_clocks
7134
- * before disabling the clocks managed here.
7135
- */
7136
- if (!on) {
7137
- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7138
- if (ret)
7139
- return ret;
7140
- }
8246
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8247
+ if (ret)
8248
+ return ret;
71418249
71428250 list_for_each_entry(clki, head, list) {
71438251 if (!IS_ERR_OR_NULL(clki->clk)) {
7144
- if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
8252
+ /*
8253
+ * Don't disable clocks which are needed
8254
+ * to keep the link active.
8255
+ */
8256
+ if (ufshcd_is_link_active(hba) &&
8257
+ clki->keep_link_active)
71458258 continue;
71468259
71478260 clk_state_changed = on ^ clki->enabled;
....@@ -7161,16 +8274,9 @@
71618274 }
71628275 }
71638276
7164
- /*
7165
- * vendor specific setup_clocks ops may depend on clocks managed by
7166
- * this standard driver hence call the vendor specific setup_clocks
7167
- * after enabling the clocks managed here.
7168
- */
7169
- if (on) {
7170
- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7171
- if (ret)
7172
- return ret;
7173
- }
8277
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8278
+ if (ret)
8279
+ return ret;
71748280
71758281 out:
71768282 if (ret) {
....@@ -7191,11 +8297,6 @@
71918297 (on ? "on" : "off"),
71928298 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
71938299 return ret;
7194
-}
7195
-
7196
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7197
-{
7198
- return __ufshcd_setup_clocks(hba, on, false);
71998300 }
72008301
72018302 static int ufshcd_init_clocks(struct ufs_hba *hba)
....@@ -7219,6 +8320,14 @@
72198320 __func__, clki->name, ret);
72208321 goto out;
72218322 }
8323
+
8324
+ /*
8325
+ * Parse device ref clk freq as per device tree "ref_clk".
8326
+ * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8327
+ * in ufshcd_alloc_host().
8328
+ */
8329
+ if (!strcmp(clki->name, "ref_clk"))
8330
+ ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
72228331
72238332 if (clki->max_freq) {
72248333 ret = clk_set_rate(clki->clk, clki->max_freq);
....@@ -7250,12 +8359,7 @@
72508359
72518360 err = ufshcd_vops_setup_regulators(hba, true);
72528361 if (err)
7253
- goto out_exit;
7254
-
7255
- goto out;
7256
-
7257
-out_exit:
7258
- ufshcd_vops_exit(hba);
8362
+ ufshcd_vops_exit(hba);
72598363 out:
72608364 if (err)
72618365 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
....@@ -7312,6 +8416,8 @@
73128416 if (err)
73138417 goto out_disable_vreg;
73148418
8419
+ ufs_debugfs_hba_init(hba);
8420
+
73158421 hba->is_powered = true;
73168422 goto out;
73178423
....@@ -7328,45 +8434,18 @@
73288434 static void ufshcd_hba_exit(struct ufs_hba *hba)
73298435 {
73308436 if (hba->is_powered) {
8437
+ ufshcd_exit_clk_scaling(hba);
8438
+ ufshcd_exit_clk_gating(hba);
8439
+ if (hba->eh_wq)
8440
+ destroy_workqueue(hba->eh_wq);
8441
+ ufs_debugfs_hba_exit(hba);
73318442 ufshcd_variant_hba_exit(hba);
73328443 ufshcd_setup_vreg(hba, false);
7333
- ufshcd_suspend_clkscaling(hba);
7334
- if (ufshcd_is_clkscaling_supported(hba))
7335
- if (hba->devfreq)
7336
- ufshcd_suspend_clkscaling(hba);
73378444 ufshcd_setup_clocks(hba, false);
73388445 ufshcd_setup_hba_vreg(hba, false);
73398446 hba->is_powered = false;
8447
+ ufs_put_device_desc(hba);
73408448 }
7341
-}
7342
-
7343
-static int
7344
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7345
-{
7346
- unsigned char cmd[6] = {REQUEST_SENSE,
7347
- 0,
7348
- 0,
7349
- 0,
7350
- UFSHCD_REQ_SENSE_SIZE,
7351
- 0};
7352
- char *buffer;
7353
- int ret;
7354
-
7355
- buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
7356
- if (!buffer) {
7357
- ret = -ENOMEM;
7358
- goto out;
7359
- }
7360
-
7361
- ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7362
- UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
7363
- msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7364
- if (ret)
7365
- pr_err("%s: failed with err %d\n", __func__, ret);
7366
-
7367
- kfree(buffer);
7368
-out:
7369
- return ret;
73708449 }
73718450
73728451 /**
....@@ -7385,7 +8464,7 @@
73858464 struct scsi_sense_hdr sshdr;
73868465 struct scsi_device *sdp;
73878466 unsigned long flags;
7388
- int ret;
8467
+ int ret, retries;
73898468
73908469 spin_lock_irqsave(hba->host->host_lock, flags);
73918470 sdp = hba->sdev_ufs_device;
....@@ -7410,13 +8489,6 @@
74108489 * handling context.
74118490 */
74128491 hba->host->eh_noresume = 1;
7413
- if (hba->wlun_dev_clr_ua) {
7414
- ret = ufshcd_send_request_sense(hba, sdp);
7415
- if (ret)
7416
- goto out;
7417
- /* Unit attention condition is cleared now */
7418
- hba->wlun_dev_clr_ua = false;
7419
- }
74208492
74218493 cmd[4] = pwr_mode << 4;
74228494
....@@ -7425,8 +8497,14 @@
74258497 * callbacks hence set the RQF_PM flag so that it doesn't resume the
74268498 * already suspended childs.
74278499 */
7428
- ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7429
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8500
+ for (retries = 3; retries > 0; --retries) {
8501
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8502
+ START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8503
+ if (!scsi_status_is_check_condition(ret) ||
8504
+ !scsi_sense_valid(&sshdr) ||
8505
+ sshdr.sense_key != UNIT_ATTENTION)
8506
+ break;
8507
+ }
74308508 if (ret) {
74318509 sdev_printk(KERN_WARNING, sdp,
74328510 "START_STOP failed for power mode: %d, result %x\n",
....@@ -7437,7 +8515,7 @@
74378515
74388516 if (!ret)
74398517 hba->curr_dev_pwr_mode = pwr_mode;
7440
-out:
8518
+
74418519 scsi_device_put(sdp);
74428520 hba->host->eh_noresume = 0;
74438521 return ret;
....@@ -7454,18 +8532,20 @@
74548532
74558533 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
74568534 ret = ufshcd_uic_hibern8_enter(hba);
7457
- if (!ret)
8535
+ if (!ret) {
74588536 ufshcd_set_link_hibern8(hba);
7459
- else
8537
+ } else {
8538
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8539
+ __func__, ret);
74608540 goto out;
8541
+ }
74618542 }
74628543 /*
74638544 * If autobkops is enabled, link can't be turned off because
74648545 * turning off the link would also turn off the device.
74658546 */
74668547 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7467
- (!check_for_bkops || (check_for_bkops &&
7468
- !hba->auto_bkops_enabled))) {
8548
+ (!check_for_bkops || !hba->auto_bkops_enabled)) {
74698549 /*
74708550 * Let's make sure that link is in low power mode, we are doing
74718551 * this currently by putting the link in Hibern8. Otherway to
....@@ -7474,13 +8554,16 @@
74748554 * unipro. But putting the link in hibern8 is much faster.
74758555 */
74768556 ret = ufshcd_uic_hibern8_enter(hba);
7477
- if (ret)
8557
+ if (ret) {
8558
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8559
+ __func__, ret);
74788560 goto out;
8561
+ }
74798562 /*
74808563 * Change controller state to "reset state" which
74818564 * should also put the link in off/reset state
74828565 */
7483
- ufshcd_hba_stop(hba, true);
8566
+ ufshcd_hba_stop(hba);
74848567 /*
74858568 * TODO: Check if we need any delay to make sure that
74868569 * controller is reset
....@@ -7494,6 +8577,8 @@
74948577
74958578 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
74968579 {
8580
+ bool vcc_off = false;
8581
+
74978582 /*
74988583 * It seems some UFS devices may keep drawing more than sleep current
74998584 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
....@@ -7515,17 +8600,29 @@
75158600 *
75168601 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
75178602 * in low power state which would save some power.
8603
+ *
8604
+ * If Write Booster is enabled and the device needs to flush the WB
8605
+ * buffer OR if bkops status is urgent for WB, keep Vcc on.
75188606 */
75198607 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
75208608 !hba->dev_info.is_lu_power_on_wp) {
75218609 ufshcd_setup_vreg(hba, false);
8610
+ vcc_off = true;
75228611 } else if (!ufshcd_is_ufs_dev_active(hba)) {
75238612 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7524
- if (!ufshcd_is_link_active(hba)) {
8613
+ vcc_off = true;
8614
+ if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
75258615 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
75268616 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
75278617 }
75288618 }
8619
+
8620
+ /*
8621
+ * Some UFS devices require delay after VCC power rail is turned-off.
8622
+ */
8623
+ if (vcc_off && hba->vreg_info.vcc &&
8624
+ hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8625
+ usleep_range(5000, 5100);
75298626 }
75308627
75318628 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
....@@ -7536,7 +8633,7 @@
75368633 !hba->dev_info.is_lu_power_on_wp) {
75378634 ret = ufshcd_setup_vreg(hba, true);
75388635 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7539
- if (!ret && !ufshcd_is_link_active(hba)) {
8636
+ if (!ufshcd_is_link_active(hba)) {
75408637 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
75418638 if (ret)
75428639 goto vcc_disable;
....@@ -7558,13 +8655,13 @@
75588655
75598656 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
75608657 {
7561
- if (ufshcd_is_link_off(hba))
8658
+ if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
75628659 ufshcd_setup_hba_vreg(hba, false);
75638660 }
75648661
75658662 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
75668663 {
7567
- if (ufshcd_is_link_off(hba))
8664
+ if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
75688665 ufshcd_setup_hba_vreg(hba, true);
75698666 }
75708667
....@@ -7602,9 +8699,7 @@
76028699 req_link_state = UIC_LINK_OFF_STATE;
76038700 }
76048701
7605
- ret = ufshcd_crypto_suspend(hba, pm_op);
7606
- if (ret)
7607
- goto out;
8702
+ ufshpb_suspend(hba);
76088703
76098704 /*
76108705 * If we can't transition into any of the low power modes
....@@ -7613,11 +8708,8 @@
76138708 ufshcd_hold(hba, false);
76148709 hba->clk_gating.is_suspended = true;
76158710
7616
- if (hba->clk_scaling.is_allowed) {
7617
- cancel_work_sync(&hba->clk_scaling.suspend_work);
7618
- cancel_work_sync(&hba->clk_scaling.resume_work);
7619
- ufshcd_suspend_clkscaling(hba);
7620
- }
8711
+ if (ufshcd_is_clkscaling_supported(hba))
8712
+ ufshcd_clk_scaling_suspend(hba, true);
76218713
76228714 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
76238715 req_link_state == UIC_LINK_ACTIVE_STATE) {
....@@ -7648,23 +8740,38 @@
76488740 /* make sure that auto bkops is disabled */
76498741 ufshcd_disable_auto_bkops(hba);
76508742 }
8743
+ /*
8744
+ * If device needs to do BKOP or WB buffer flush during
8745
+ * Hibern8, keep device power mode as "active power mode"
8746
+ * and VCC supply.
8747
+ */
8748
+ hba->dev_info.b_rpm_dev_flush_capable =
8749
+ hba->auto_bkops_enabled ||
8750
+ (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8751
+ ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8752
+ ufshcd_is_auto_hibern8_enabled(hba))) &&
8753
+ ufshcd_wb_need_flush(hba));
76518754 }
76528755
7653
- if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7654
- ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7655
- !ufshcd_is_runtime_pm(pm_op))) {
7656
- /* ensure that bkops is disabled */
7657
- ufshcd_disable_auto_bkops(hba);
7658
- ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7659
- if (ret)
7660
- goto enable_gating;
8756
+ flush_work(&hba->eeh_work);
8757
+
8758
+ if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8759
+ if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8760
+ !ufshcd_is_runtime_pm(pm_op)) {
8761
+ /* ensure that bkops is disabled */
8762
+ ufshcd_disable_auto_bkops(hba);
8763
+ }
8764
+
8765
+ if (!hba->dev_info.b_rpm_dev_flush_capable) {
8766
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8767
+ if (ret)
8768
+ goto enable_gating;
8769
+ }
76618770 }
76628771
76638772 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
76648773 if (ret)
76658774 goto set_dev_active;
7666
-
7667
- ufshcd_vreg_set_lpm(hba);
76688775
76698776 disable_clks:
76708777 /*
....@@ -7675,27 +8782,27 @@
76758782 ret = ufshcd_vops_suspend(hba, pm_op);
76768783 if (ret)
76778784 goto set_link_active;
7678
-
7679
- if (!ufshcd_is_link_active(hba))
7680
- ufshcd_setup_clocks(hba, false);
7681
- else
7682
- /* If link is active, device ref_clk can't be switched off */
7683
- __ufshcd_setup_clocks(hba, false, true);
7684
-
7685
- hba->clk_gating.state = CLKS_OFF;
7686
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
76878785 /*
76888786 * Disable the host irq as host controller as there won't be any
76898787 * host controller transaction expected till resume.
76908788 */
76918789 ufshcd_disable_irq(hba);
8790
+
8791
+ ufshcd_setup_clocks(hba, false);
8792
+
8793
+ if (ufshcd_is_clkgating_allowed(hba)) {
8794
+ hba->clk_gating.state = CLKS_OFF;
8795
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
8796
+ hba->clk_gating.state);
8797
+ }
8798
+
8799
+ ufshcd_vreg_set_lpm(hba);
8800
+
76928801 /* Put the host controller in low power mode if possible */
76938802 ufshcd_hba_vreg_set_lpm(hba);
76948803 goto out;
76958804
76968805 set_link_active:
7697
- if (hba->clk_scaling.is_allowed)
7698
- ufshcd_resume_clkscaling(hba);
76998806 ufshcd_vreg_set_hpm(hba);
77008807 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
77018808 ufshcd_set_link_active(hba);
....@@ -7705,13 +8812,23 @@
77058812 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
77068813 ufshcd_disable_auto_bkops(hba);
77078814 enable_gating:
7708
- if (hba->clk_scaling.is_allowed)
7709
- ufshcd_resume_clkscaling(hba);
8815
+ if (ufshcd_is_clkscaling_supported(hba))
8816
+ ufshcd_clk_scaling_suspend(hba, false);
8817
+
77108818 hba->clk_gating.is_suspended = false;
8819
+ hba->dev_info.b_rpm_dev_flush_capable = false;
77118820 ufshcd_release(hba);
7712
- ufshcd_crypto_resume(hba, pm_op);
8821
+ ufshpb_resume(hba);
77138822 out:
8823
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
8824
+ schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8825
+ msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8826
+ }
8827
+
77148828 hba->pm_op_in_progress = 0;
8829
+
8830
+ if (ret)
8831
+ ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
77158832 return ret;
77168833 }
77178834
....@@ -7729,26 +8846,22 @@
77298846 {
77308847 int ret;
77318848 enum uic_link_state old_link_state;
7732
- enum ufs_dev_pwr_mode old_pwr_mode;
77338849
77348850 hba->pm_op_in_progress = 1;
77358851 old_link_state = hba->uic_link_state;
7736
- old_pwr_mode = hba->curr_dev_pwr_mode;
77378852
77388853 ufshcd_hba_vreg_set_hpm(hba);
7739
- /* Make sure clocks are enabled before accessing controller */
7740
- ret = ufshcd_setup_clocks(hba, true);
8854
+ ret = ufshcd_vreg_set_hpm(hba);
77418855 if (ret)
77428856 goto out;
77438857
7744
- /* enable the host irq as host controller would be active soon */
7745
- ret = ufshcd_enable_irq(hba);
8858
+ /* Make sure clocks are enabled before accessing controller */
8859
+ ret = ufshcd_setup_clocks(hba, true);
77468860 if (ret)
7747
- goto disable_irq_and_vops_clks;
8861
+ goto disable_vreg;
77488862
7749
- ret = ufshcd_vreg_set_hpm(hba);
7750
- if (ret)
7751
- goto disable_irq_and_vops_clks;
8863
+ /* enable the host irq as host controller would be active soon */
8864
+ ufshcd_enable_irq(hba);
77528865
77538866 /*
77548867 * Call vendor specific resume callback. As these callbacks may access
....@@ -7757,18 +8870,25 @@
77578870 */
77588871 ret = ufshcd_vops_resume(hba, pm_op);
77598872 if (ret)
7760
- goto disable_vreg;
8873
+ goto disable_irq_and_vops_clks;
77618874
77628875 if (ufshcd_is_link_hibern8(hba)) {
77638876 ret = ufshcd_uic_hibern8_exit(hba);
7764
- if (!ret)
8877
+ if (!ret) {
77658878 ufshcd_set_link_active(hba);
7766
- else
8879
+ } else {
8880
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8881
+ __func__, ret);
77678882 goto vendor_suspend;
8883
+ }
77688884 } else if (ufshcd_is_link_off(hba)) {
7769
- ret = ufshcd_host_reset_and_restore(hba);
77708885 /*
7771
- * ufshcd_host_reset_and_restore() should have already
8886
+ * A full initialization of the host and the device is
8887
+ * required since the link was put to off during suspend.
8888
+ */
8889
+ ret = ufshcd_reset_and_restore(hba);
8890
+ /*
8891
+ * ufshcd_reset_and_restore() should have already
77728892 * set the link state as active
77738893 */
77748894 if (ret || !ufshcd_is_link_active(hba))
....@@ -7781,10 +8901,6 @@
77818901 goto set_old_link_state;
77828902 }
77838903
7784
- ret = ufshcd_crypto_resume(hba, pm_op);
7785
- if (ret)
7786
- goto set_old_dev_pwr_mode;
7787
-
77888904 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
77898905 ufshcd_enable_auto_bkops(hba);
77908906 else
....@@ -7796,33 +8912,42 @@
77968912
77978913 hba->clk_gating.is_suspended = false;
77988914
7799
- if (hba->clk_scaling.is_allowed)
7800
- ufshcd_resume_clkscaling(hba);
7801
-
7802
- /* Schedule clock gating in case of no access to UFS device yet */
7803
- ufshcd_release(hba);
8915
+ if (ufshcd_is_clkscaling_supported(hba))
8916
+ ufshcd_clk_scaling_suspend(hba, false);
78048917
78058918 /* Enable Auto-Hibernate if configured */
78068919 ufshcd_auto_hibern8_enable(hba);
78078920
8921
+ ufshpb_resume(hba);
8922
+
8923
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
8924
+ hba->dev_info.b_rpm_dev_flush_capable = false;
8925
+ cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8926
+ }
8927
+
8928
+ /* Schedule clock gating in case of no access to UFS device yet */
8929
+ ufshcd_release(hba);
8930
+
78088931 goto out;
78098932
7810
-set_old_dev_pwr_mode:
7811
- if (old_pwr_mode != hba->curr_dev_pwr_mode)
7812
- ufshcd_set_dev_pwr_mode(hba, old_pwr_mode);
78138933 set_old_link_state:
78148934 ufshcd_link_state_transition(hba, old_link_state, 0);
78158935 vendor_suspend:
78168936 ufshcd_vops_suspend(hba, pm_op);
7817
-disable_vreg:
7818
- ufshcd_vreg_set_lpm(hba);
78198937 disable_irq_and_vops_clks:
78208938 ufshcd_disable_irq(hba);
7821
- if (hba->clk_scaling.is_allowed)
7822
- ufshcd_suspend_clkscaling(hba);
78238939 ufshcd_setup_clocks(hba, false);
8940
+ if (ufshcd_is_clkgating_allowed(hba)) {
8941
+ hba->clk_gating.state = CLKS_OFF;
8942
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
8943
+ hba->clk_gating.state);
8944
+ }
8945
+disable_vreg:
8946
+ ufshcd_vreg_set_lpm(hba);
78248947 out:
78258948 hba->pm_op_in_progress = 0;
8949
+ if (ret)
8950
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
78268951 return ret;
78278952 }
78288953
....@@ -7839,13 +8964,19 @@
78398964 int ret = 0;
78408965 ktime_t start = ktime_get();
78418966
7842
- if (!hba || !hba->is_powered)
8967
+ down(&hba->host_sem);
8968
+
8969
+ if (!hba->is_powered)
78438970 return 0;
8971
+
8972
+ cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
78448973
78458974 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
78468975 hba->curr_dev_pwr_mode) &&
78478976 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7848
- hba->uic_link_state))
8977
+ hba->uic_link_state) &&
8978
+ pm_runtime_suspended(hba->dev) &&
8979
+ !hba->dev_info.b_rpm_dev_flush_capable)
78498980 goto out;
78508981
78518982 if (pm_runtime_suspended(hba->dev)) {
....@@ -7869,6 +9000,8 @@
78699000 hba->curr_dev_pwr_mode, hba->uic_link_state);
78709001 if (!ret)
78719002 hba->is_sys_suspended = true;
9003
+ else
9004
+ up(&hba->host_sem);
78729005 return ret;
78739006 }
78749007 EXPORT_SYMBOL(ufshcd_system_suspend);
....@@ -7885,9 +9018,6 @@
78859018 int ret = 0;
78869019 ktime_t start = ktime_get();
78879020
7888
- if (!hba)
7889
- return -EINVAL;
7890
-
78919021 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
78929022 /*
78939023 * Let the runtime resume take care of resuming
....@@ -7902,6 +9032,7 @@
79029032 hba->curr_dev_pwr_mode, hba->uic_link_state);
79039033 if (!ret)
79049034 hba->is_sys_suspended = false;
9035
+ up(&hba->host_sem);
79059036 return ret;
79069037 }
79079038 EXPORT_SYMBOL(ufshcd_system_resume);
....@@ -7918,9 +9049,6 @@
79189049 {
79199050 int ret = 0;
79209051 ktime_t start = ktime_get();
7921
-
7922
- if (!hba)
7923
- return -EINVAL;
79249052
79259053 if (!hba->is_powered)
79269054 goto out;
....@@ -7960,9 +9088,6 @@
79609088 int ret = 0;
79619089 ktime_t start = ktime_get();
79629090
7963
- if (!hba)
7964
- return -EINVAL;
7965
-
79669091 if (!hba->is_powered)
79679092 goto out;
79689093 else
....@@ -7993,6 +9118,10 @@
79939118 {
79949119 int ret = 0;
79959120
9121
+ down(&hba->host_sem);
9122
+ hba->shutting_down = true;
9123
+ up(&hba->host_sem);
9124
+
79969125 if (!hba->is_powered)
79979126 goto out;
79989127
....@@ -8005,6 +9134,7 @@
80059134 out:
80069135 if (ret)
80079136 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
9137
+ hba->is_powered = false;
80089138 /* allow force shutdown even in case of errors */
80099139 return 0;
80109140 }
....@@ -8017,16 +9147,16 @@
80179147 */
80189148 void ufshcd_remove(struct ufs_hba *hba)
80199149 {
9150
+ ufs_bsg_remove(hba);
9151
+ ufshpb_remove(hba);
80209152 ufs_sysfs_remove_nodes(hba->dev);
9153
+ blk_cleanup_queue(hba->tmf_queue);
9154
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
9155
+ blk_cleanup_queue(hba->cmd_queue);
80219156 scsi_remove_host(hba->host);
80229157 /* disable interrupts */
80239158 ufshcd_disable_intr(hba, hba->intr_mask);
8024
- ufshcd_hba_stop(hba, true);
8025
-
8026
- ufshcd_exit_clk_scaling(hba);
8027
- ufshcd_exit_clk_gating(hba);
8028
- if (ufshcd_is_clkscaling_supported(hba))
8029
- device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9159
+ ufshcd_hba_stop(hba);
80309160 ufshcd_hba_exit(hba);
80319161 }
80329162 EXPORT_SYMBOL_GPL(ufshcd_remove);
....@@ -8077,23 +9207,17 @@
80779207 }
80789208
80799209 host = scsi_host_alloc(&ufshcd_driver_template,
8080
- sizeof(struct ufs_hba));
9210
+ sizeof(struct ufs_hba_add_info));
80819211 if (!host) {
80829212 dev_err(dev, "scsi_host_alloc failed\n");
80839213 err = -ENOMEM;
80849214 goto out_error;
80859215 }
8086
-
8087
- /*
8088
- * Do not use blk-mq at this time because blk-mq does not support
8089
- * runtime pm.
8090
- */
8091
- host->use_blk_mq = false;
8092
-
80939216 hba = shost_priv(host);
80949217 hba->host = host;
80959218 hba->dev = dev;
80969219 *hba_handle = hba;
9220
+ hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
80979221 hba->sg_entry_size = sizeof(struct ufshcd_sg_entry);
80989222
80999223 INIT_LIST_HEAD(&hba->clk_list_head);
....@@ -8102,6 +9226,18 @@
81029226 return err;
81039227 }
81049228 EXPORT_SYMBOL(ufshcd_alloc_host);
9229
+
9230
+/* This function exists because blk_mq_alloc_tag_set() requires this. */
9231
+static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9232
+ const struct blk_mq_queue_data *qd)
9233
+{
9234
+ WARN_ON_ONCE(true);
9235
+ return BLK_STS_NOTSUPP;
9236
+}
9237
+
9238
+static const struct blk_mq_ops ufshcd_tmf_ops = {
9239
+ .queue_rq = ufshcd_queue_tmf,
9240
+};
81059241
81069242 /**
81079243 * ufshcd_init - Driver initialization routine
....@@ -8112,9 +9248,11 @@
81129248 */
81139249 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
81149250 {
9251
+ struct request ***tmf_rqs = &ufs_hba_add_info(hba)->tmf_rqs;
81159252 int err;
81169253 struct Scsi_Host *host = hba->host;
81179254 struct device *dev = hba->dev;
9255
+ char eh_wq_name[sizeof("ufs_eh_wq_00")];
81189256
81199257 /*
81209258 * dev_set_drvdata() must be called before any callbacks are registered
....@@ -8132,24 +9270,21 @@
81329270
81339271 hba->mmio_base = mmio_base;
81349272 hba->irq = irq;
8135
-
8136
- /* Set descriptor lengths to specification defaults */
8137
- ufshcd_def_desc_sizes(hba);
9273
+ hba->vps = &ufs_hba_vps;
81389274
81399275 err = ufshcd_hba_init(hba);
81409276 if (err)
81419277 goto out_error;
81429278
81439279 /* Read capabilities registers */
8144
- ufshcd_hba_capabilities(hba);
9280
+ err = ufshcd_hba_capabilities(hba);
9281
+ if (err)
9282
+ goto out_disable;
81459283
81469284 /* Get UFS version supported by the controller */
81479285 hba->ufs_version = ufshcd_get_ufs_version(hba);
81489286
8149
- if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8150
- (hba->ufs_version != UFSHCI_VERSION_11) &&
8151
- (hba->ufs_version != UFSHCI_VERSION_20) &&
8152
- (hba->ufs_version != UFSHCI_VERSION_21))
9287
+ if (hba->ufs_version < ufshci_version(1, 0))
81539288 dev_err(hba->dev, "invalid UFS version 0x%x\n",
81549289 hba->ufs_version);
81559290
....@@ -8172,23 +9307,30 @@
81729307 /* Configure LRB */
81739308 ufshcd_host_memory_configure(hba);
81749309
8175
- host->can_queue = hba->nutrs;
8176
- host->cmd_per_lun = hba->nutrs;
9310
+ host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
9311
+ host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
81779312 host->max_id = UFSHCD_MAX_ID;
81789313 host->max_lun = UFS_MAX_LUNS;
81799314 host->max_channel = UFSHCD_MAX_CHANNEL;
81809315 host->unique_id = host->host_no;
8181
- host->max_cmd_len = MAX_CDB_SIZE;
9316
+ host->max_cmd_len = UFS_CDB_SIZE;
81829317
81839318 hba->max_pwr_info.is_valid = false;
81849319
8185
- /* Initailize wait queue for task management */
8186
- init_waitqueue_head(&hba->tm_wq);
8187
- init_waitqueue_head(&hba->tm_tag_wq);
8188
-
81899320 /* Initialize work queues */
9321
+ snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9322
+ hba->host->host_no);
9323
+ hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9324
+ if (!hba->eh_wq) {
9325
+ dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9326
+ __func__);
9327
+ err = -ENOMEM;
9328
+ goto out_disable;
9329
+ }
81909330 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
81919331 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9332
+
9333
+ sema_init(&hba->host_sem, 1);
81929334
81939335 /* Initialize UIC command mutex */
81949336 mutex_init(&hba->uic_cmd_mutex);
....@@ -8197,9 +9339,6 @@
81979339 mutex_init(&hba->dev_cmd.lock);
81989340
81999341 init_rwsem(&hba->clk_scaling_lock);
8200
-
8201
- /* Initialize device management tag acquire wait queue */
8202
- init_waitqueue_head(&hba->dev_cmd.tag_wq);
82039342
82049343 ufshcd_init_clk_gating(hba);
82059344
....@@ -8223,7 +9362,7 @@
82239362 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
82249363 if (err) {
82259364 dev_err(hba->dev, "request irq failed\n");
8226
- goto exit_gating;
9365
+ goto out_disable;
82279366 } else {
82289367 hba->is_irq_enabled = true;
82299368 }
....@@ -8231,23 +9370,48 @@
82319370 err = scsi_add_host(host, hba->dev);
82329371 if (err) {
82339372 dev_err(hba->dev, "scsi_add_host failed\n");
8234
- goto exit_gating;
9373
+ goto out_disable;
82359374 }
82369375
8237
- /* Init crypto */
8238
- err = ufshcd_hba_init_crypto(hba);
8239
- if (err) {
8240
- dev_err(hba->dev, "crypto setup failed\n");
9376
+ hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
9377
+ if (IS_ERR(hba->cmd_queue)) {
9378
+ err = PTR_ERR(hba->cmd_queue);
82419379 goto out_remove_scsi_host;
82429380 }
9381
+
9382
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
9383
+ .nr_hw_queues = 1,
9384
+ .queue_depth = hba->nutmrs,
9385
+ .ops = &ufshcd_tmf_ops,
9386
+ .flags = BLK_MQ_F_NO_SCHED,
9387
+ };
9388
+ err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9389
+ if (err < 0)
9390
+ goto free_cmd_queue;
9391
+ hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9392
+ if (IS_ERR(hba->tmf_queue)) {
9393
+ err = PTR_ERR(hba->tmf_queue);
9394
+ goto free_tmf_tag_set;
9395
+ }
9396
+ *tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, sizeof(**tmf_rqs),
9397
+ GFP_KERNEL);
9398
+ if (!*tmf_rqs) {
9399
+ err = -ENOMEM;
9400
+ goto free_tmf_queue;
9401
+ }
9402
+
9403
+ /* Reset the attached device */
9404
+ ufshcd_vops_device_reset(hba);
9405
+
9406
+ ufshcd_init_crypto(hba);
82439407
82449408 /* Host controller enable */
82459409 err = ufshcd_hba_enable(hba);
82469410 if (err) {
82479411 dev_err(hba->dev, "Host controller enable failed\n");
8248
- ufshcd_print_host_regs(hba);
9412
+ ufshcd_print_evt_hist(hba);
82499413 ufshcd_print_host_state(hba);
8250
- goto out_remove_scsi_host;
9414
+ goto free_tmf_queue;
82519415 }
82529416
82539417 /*
....@@ -8262,8 +9426,11 @@
82629426 UFS_SLEEP_PWR_MODE,
82639427 UIC_LINK_HIBERN8_STATE);
82649428
9429
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9430
+ ufshcd_rpm_dev_flush_recheck_work);
9431
+
82659432 /* Set the default auto-hiberate idle timer value to 150 ms */
8266
- if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
9433
+ if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
82679434 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
82689435 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
82699436 }
....@@ -8280,15 +9447,19 @@
82809447 ufshcd_set_ufs_dev_active(hba);
82819448
82829449 async_schedule(ufshcd_async_scan, hba);
8283
- ufs_sysfs_add_nodes(hba->dev);
9450
+ ufs_sysfs_add_nodes(hba);
82849451
9452
+ device_enable_async_suspend(dev);
82859453 return 0;
82869454
9455
+free_tmf_queue:
9456
+ blk_cleanup_queue(hba->tmf_queue);
9457
+free_tmf_tag_set:
9458
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
9459
+free_cmd_queue:
9460
+ blk_cleanup_queue(hba->cmd_queue);
82879461 out_remove_scsi_host:
82889462 scsi_remove_host(hba->host);
8289
-exit_gating:
8290
- ufshcd_exit_clk_scaling(hba);
8291
- ufshcd_exit_clk_gating(hba);
82929463 out_disable:
82939464 hba->is_irq_enabled = false;
82949465 ufshcd_hba_exit(hba);
....@@ -8297,6 +9468,20 @@
82979468 }
82989469 EXPORT_SYMBOL_GPL(ufshcd_init);
82999470
9471
+static int __init ufshcd_core_init(void)
9472
+{
9473
+ ufs_debugfs_init();
9474
+ return 0;
9475
+}
9476
+
9477
+static void __exit ufshcd_core_exit(void)
9478
+{
9479
+ ufs_debugfs_exit();
9480
+}
9481
+
9482
+module_init(ufshcd_core_init);
9483
+module_exit(ufshcd_core_exit);
9484
+
83009485 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
83019486 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
83029487 MODULE_DESCRIPTION("Generic UFS host controller driver Core");