forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/scsi/ufs/ufshcd.c
....@@ -1,40 +1,12 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Universal Flash Storage Host controller driver Core
3
- *
4
- * This code is based on drivers/scsi/ufs/ufshcd.c
54 * Copyright (C) 2011-2013 Samsung India Software Operations
65 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
76 *
87 * Authors:
98 * Santosh Yaraganavi <santosh.sy@samsung.com>
109 * Vinayak Holikatti <h.vinayak@samsung.com>
11
- *
12
- * This program is free software; you can redistribute it and/or
13
- * modify it under the terms of the GNU General Public License
14
- * as published by the Free Software Foundation; either version 2
15
- * of the License, or (at your option) any later version.
16
- * See the COPYING file in the top-level directory or visit
17
- * <http://www.gnu.org/licenses/gpl-2.0.html>
18
- *
19
- * This program is distributed in the hope that it will be useful,
20
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
21
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22
- * GNU General Public License for more details.
23
- *
24
- * This program is provided "AS IS" and "WITH ALL FAULTS" and
25
- * without warranty of any kind. You are solely responsible for
26
- * determining the appropriateness of using and distributing
27
- * the program and assume all risks associated with your exercise
28
- * of rights with respect to the program, including but not limited
29
- * to infringement of third party rights, the risks and costs of
30
- * program errors, damage to or loss of data, programs or equipment,
31
- * and unavailability or interruption of operations. Under no
32
- * circumstances will the contributor of this Program be liable for
33
- * any damages of any kind arising from your use or distribution of
34
- * this program.
35
- *
36
- * The Linux Foundation chooses to take subject only to the GPLv2
37
- * license terms, and distributes only under these terms.
3810 */
3911
4012 #include <linux/async.h>
....@@ -42,27 +14,36 @@
4214 #include <linux/nls.h>
4315 #include <linux/of.h>
4416 #include <linux/bitfield.h>
17
+#include <linux/blk-pm.h>
18
+#include <linux/blkdev.h>
4519 #include "ufshcd.h"
20
+#include "ufshcd-add-info.h"
4621 #include "ufs_quirks.h"
4722 #include "unipro.h"
4823 #include "ufs-sysfs.h"
24
+#include "ufs-debugfs.h"
25
+#include "ufs_bsg.h"
4926 #include "ufshcd-crypto.h"
27
+#include "ufshpb.h"
28
+#include <asm/unaligned.h>
29
+#include <linux/blkdev.h>
5030
5131 #define CREATE_TRACE_POINTS
5232 #include <trace/events/ufs.h>
5333
54
-#define UFSHCD_REQ_SENSE_SIZE 18
34
+#undef CREATE_TRACE_POINTS
35
+#include <trace/hooks/ufshcd.h>
5536
5637 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
5738 UTP_TASK_REQ_COMPL |\
5839 UFSHCD_ERROR_MASK)
5940 /* UIC command timeout, unit: ms */
60
-#define UIC_CMD_TIMEOUT 500
41
+#define UIC_CMD_TIMEOUT 5000
6142
6243 /* NOP OUT retries waiting for NOP IN response */
6344 #define NOP_OUT_RETRIES 10
64
-/* Timeout after 30 msecs if NOP OUT hangs without response */
65
-#define NOP_OUT_TIMEOUT 30 /* msecs */
45
+/* Timeout after 50 msecs if NOP OUT hangs without response */
46
+#define NOP_OUT_TIMEOUT 50 /* msecs */
6647
6748 /* Query request retries */
6849 #define QUERY_REQ_RETRIES 3
....@@ -93,6 +74,15 @@
9374 /* default delay of autosuspend: 2000 ms */
9475 #define RPM_AUTOSUSPEND_DELAY_MS 2000
9576
77
+/* Default delay of RPM device flush delayed work */
78
+#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
79
+
80
+/* Default value of wait time before gating device ref clock */
81
+#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
82
+
83
+/* Polling time to wait for fDeviceInit */
84
+#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
85
+
9686 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
9787 ({ \
9888 int _ret; \
....@@ -119,12 +109,17 @@
119109 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
120110 return -EINVAL;
121111
122
- regs = kzalloc(len, GFP_KERNEL);
112
+ regs = kzalloc(len, GFP_ATOMIC);
123113 if (!regs)
124114 return -ENOMEM;
125115
126
- for (pos = 0; pos < len; pos += 4)
116
+ for (pos = 0; pos < len; pos += 4) {
117
+ if (offset == 0 &&
118
+ pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
119
+ pos <= REG_UIC_ERROR_CODE_DME)
120
+ continue;
127121 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
122
+ }
128123
129124 ufshcd_hex_dump(prefix, regs, len);
130125 kfree(regs);
....@@ -136,8 +131,9 @@
136131 enum {
137132 UFSHCD_MAX_CHANNEL = 0,
138133 UFSHCD_MAX_ID = 1,
139
- UFSHCD_CMD_PER_LUN = 32,
140
- UFSHCD_CAN_QUEUE = 32,
134
+ UFSHCD_NUM_RESERVED = 1,
135
+ UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
136
+ UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
141137 };
142138
143139 /* UFSHCD states */
....@@ -145,7 +141,8 @@
145141 UFSHCD_STATE_RESET,
146142 UFSHCD_STATE_ERROR,
147143 UFSHCD_STATE_OPERATIONAL,
148
- UFSHCD_STATE_EH_SCHEDULED,
144
+ UFSHCD_STATE_EH_SCHEDULED_FATAL,
145
+ UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
149146 };
150147
151148 /* UFSHCD error handling flags */
....@@ -161,6 +158,7 @@
161158 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
162159 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
163160 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
161
+ UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
164162 };
165163
166164 #define ufshcd_set_eh_in_progress(h) \
....@@ -169,19 +167,6 @@
169167 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
170168 #define ufshcd_clear_eh_in_progress(h) \
171169 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
172
-
173
-#define ufshcd_set_ufs_dev_active(h) \
174
- ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
175
-#define ufshcd_set_ufs_dev_sleep(h) \
176
- ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
177
-#define ufshcd_set_ufs_dev_poweroff(h) \
178
- ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
179
-#define ufshcd_is_ufs_dev_active(h) \
180
- ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
181
-#define ufshcd_is_ufs_dev_sleep(h) \
182
- ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
183
-#define ufshcd_is_ufs_dev_poweroff(h) \
184
- ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
185170
186171 struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
187172 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
....@@ -223,44 +208,33 @@
223208 static struct ufs_dev_fix ufs_fixups[] = {
224209 /* UFS cards deviations table */
225210 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
226
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
211
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
212
+ UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
227213 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
228
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
229
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
230
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
214
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
215
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
231216 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
232
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
233
- UFS_DEVICE_NO_FASTAUTO),
234
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
235
- UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
217
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
218
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
219
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
220
+ UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
236221 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
237222 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
238223 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
239224 UFS_DEVICE_QUIRK_PA_TACTIVATE),
240225 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
241226 UFS_DEVICE_QUIRK_PA_TACTIVATE),
242
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
243
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
244
- UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
245
- UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
246
- UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
247
-
248227 END_FIX
249228 };
250229
251
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
230
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
252231 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
253232 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
254233 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
255234 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
256235 static void ufshcd_hba_exit(struct ufs_hba *hba);
257
-static int ufshcd_probe_hba(struct ufs_hba *hba);
258
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
259
- bool skip_ref_clk);
236
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
260237 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
261
-static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
262
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
263
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
264238 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
265239 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
266240 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
....@@ -270,33 +244,59 @@
270244 static irqreturn_t ufshcd_intr(int irq, void *__hba);
271245 static int ufshcd_change_power_mode(struct ufs_hba *hba,
272246 struct ufs_pa_layer_attr *pwr_mode);
247
+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
248
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
249
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
250
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
251
+ struct ufs_vreg *vreg);
252
+static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
253
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
254
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
255
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
256
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
257
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
258
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
259
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
260
+
273261 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
274262 {
275263 return tag >= 0 && tag < hba->nutrs;
276264 }
277265
278
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
266
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
279267 {
280
- int ret = 0;
281
-
282268 if (!hba->is_irq_enabled) {
283
- ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
284
- hba);
285
- if (ret)
286
- dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
287
- __func__, ret);
269
+ enable_irq(hba->irq);
288270 hba->is_irq_enabled = true;
289271 }
290
-
291
- return ret;
292272 }
293273
294274 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
295275 {
296276 if (hba->is_irq_enabled) {
297
- free_irq(hba->irq, hba);
277
+ disable_irq(hba->irq);
298278 hba->is_irq_enabled = false;
299279 }
280
+}
281
+
282
+static inline void ufshcd_wb_config(struct ufs_hba *hba)
283
+{
284
+ int ret;
285
+
286
+ if (!ufshcd_is_wb_allowed(hba))
287
+ return;
288
+
289
+ ret = ufshcd_wb_ctrl(hba, true);
290
+ if (ret)
291
+ dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
292
+ else
293
+ dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
294
+ ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
295
+ if (ret)
296
+ dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
297
+ __func__, ret);
298
+ if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
299
+ ufshcd_wb_toggle_flush(hba, true);
300300 }
301301
302302 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
....@@ -309,16 +309,6 @@
309309 {
310310 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
311311 scsi_block_requests(hba->host);
312
-}
313
-
314
-/* replace non-printable or non-ASCII characters with spaces */
315
-static inline void ufshcd_remove_non_printable(char *val)
316
-{
317
- if (!val)
318
- return;
319
-
320
- if (*val < 0x20 || *val > 0x7e)
321
- *val = ' ';
322312 }
323313
324314 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
....@@ -340,21 +330,40 @@
340330 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
341331 const char *str)
342332 {
343
- struct utp_task_req_desc *descp;
344
- struct utp_upiu_task_req *task_req;
345
- int off = (int)tag - hba->nutrs;
333
+ struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
346334
347
- descp = &hba->utmrdl_base_addr[off];
348
- task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
349
- trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
350
- &task_req->input_param1);
335
+ trace_android_vh_ufs_send_tm_command(hba, tag, str);
336
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
337
+ &descp->input_param1);
338
+}
339
+
340
+static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
341
+ struct uic_command *ucmd,
342
+ const char *str)
343
+{
344
+ u32 cmd;
345
+
346
+ trace_android_vh_ufs_send_uic_command(hba, ucmd, str);
347
+
348
+ if (!trace_ufshcd_uic_command_enabled())
349
+ return;
350
+
351
+ if (!strcmp(str, "send"))
352
+ cmd = ucmd->command;
353
+ else
354
+ cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
355
+
356
+ trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
357
+ ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
358
+ ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
359
+ ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
351360 }
352361
353362 static void ufshcd_add_command_trace(struct ufs_hba *hba,
354363 unsigned int tag, const char *str)
355364 {
356365 sector_t lba = -1;
357
- u8 opcode = 0;
366
+ u8 opcode = 0, group_id = 0;
358367 u32 intr, doorbell;
359368 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
360369 struct scsi_cmnd *cmd = lrbp->cmd;
....@@ -380,13 +389,20 @@
380389 lba = cmd->request->bio->bi_iter.bi_sector;
381390 transfer_len = be32_to_cpu(
382391 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
392
+ if (opcode == WRITE_10)
393
+ group_id = lrbp->cmd->cmnd[6];
394
+ } else if (opcode == UNMAP) {
395
+ if (cmd->request) {
396
+ lba = scsi_get_lba(cmd);
397
+ transfer_len = blk_rq_bytes(cmd->request);
398
+ }
383399 }
384400 }
385401
386402 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
387403 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
388404 trace_ufshcd_command(dev_name(hba->dev), str, tag,
389
- doorbell, transfer_len, intr, lba, opcode);
405
+ doorbell, transfer_len, intr, lba, opcode, group_id);
390406 }
391407
392408 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
....@@ -405,46 +421,54 @@
405421 }
406422 }
407423
408
-static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
409
- struct ufs_uic_err_reg_hist *err_hist, char *err_name)
424
+static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
425
+ char *err_name)
410426 {
411427 int i;
428
+ bool found = false;
429
+ struct ufs_event_hist *e;
412430
413
- for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
414
- int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
431
+ if (id >= UFS_EVT_CNT)
432
+ return;
415433
416
- if (err_hist->reg[p] == 0)
434
+ e = &hba->ufs_stats.event[id];
435
+
436
+ for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
437
+ int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
438
+
439
+ if (e->tstamp[p] == 0)
417440 continue;
418
- dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
419
- err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
441
+ dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
442
+ e->val[p], ktime_to_us(e->tstamp[p]));
443
+ found = true;
420444 }
445
+
446
+ if (!found)
447
+ dev_err(hba->dev, "No record of %s\n", err_name);
421448 }
422449
423
-static void ufshcd_print_host_regs(struct ufs_hba *hba)
450
+static void ufshcd_print_evt_hist(struct ufs_hba *hba)
424451 {
425452 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
426
- dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
427
- hba->ufs_version, hba->capabilities);
428
- dev_err(hba->dev,
429
- "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
430
- (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
431
- dev_err(hba->dev,
432
- "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
433
- ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
434
- hba->ufs_stats.hibern8_exit_cnt);
435453
436
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
437
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
438
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
439
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
440
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
454
+ ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
455
+ ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
456
+ ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
457
+ ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
458
+ ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
459
+ ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
460
+ "auto_hibern8_err");
461
+ ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
462
+ ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
463
+ "link_startup_fail");
464
+ ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
465
+ ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
466
+ "suspend_fail");
467
+ ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
468
+ ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
469
+ ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
441470
442
- ufshcd_print_clk_freqs(hba);
443
-
444
- if (hba->vops && hba->vops->dbg_register_dump)
445
- hba->vops->dbg_register_dump(hba);
446
-
447
- ufshcd_crypto_debug(hba);
471
+ ufshcd_vops_dbg_register_dump(hba);
448472 }
449473
450474 static
....@@ -476,8 +500,8 @@
476500 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
477501 sizeof(struct utp_upiu_rsp));
478502
479
- prdt_length =
480
- le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
503
+ prdt_length = le16_to_cpu(
504
+ lrbp->utr_descriptor_ptr->prd_table_length);
481505 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
482506 prdt_length /= hba->sg_entry_size;
483507
....@@ -494,30 +518,23 @@
494518
495519 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
496520 {
497
- struct utp_task_req_desc *tmrdp;
498521 int tag;
499522
500523 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
501
- tmrdp = &hba->utmrdl_base_addr[tag];
524
+ struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
525
+
502526 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
503
- ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
504
- sizeof(struct request_desc_header));
505
- dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
506
- tag);
507
- ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
508
- sizeof(struct utp_upiu_req));
509
- dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
510
- tag);
511
- ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
512
- sizeof(struct utp_task_req_desc));
527
+ ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
513528 }
514529 }
515530
516531 static void ufshcd_print_host_state(struct ufs_hba *hba)
517532 {
533
+ struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
534
+
518535 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
519
- dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
520
- hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
536
+ dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
537
+ hba->outstanding_reqs, hba->outstanding_tasks);
521538 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
522539 hba->saved_err, hba->saved_uic_err);
523540 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
....@@ -527,12 +544,24 @@
527544 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
528545 hba->auto_bkops_enabled, hba->host->host_self_blocked);
529546 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
547
+ dev_err(hba->dev,
548
+ "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
549
+ ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
550
+ hba->ufs_stats.hibern8_exit_cnt);
551
+ dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
552
+ ktime_to_us(hba->ufs_stats.last_intr_ts),
553
+ hba->ufs_stats.last_intr_status);
530554 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
531555 hba->eh_flags, hba->req_abort_count);
532
- dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
533
- hba->capabilities, hba->caps);
556
+ dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
557
+ hba->ufs_version, hba->capabilities, hba->caps);
534558 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
535559 hba->dev_quirks);
560
+ if (sdev_ufs)
561
+ dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
562
+ sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
563
+
564
+ ufshcd_print_clk_freqs(hba);
536565 }
537566
538567 /**
....@@ -561,21 +590,33 @@
561590 hba->pwr_info.hs_rate);
562591 }
563592
564
-/*
593
+void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
594
+{
595
+ if (!us)
596
+ return;
597
+
598
+ if (us < 10)
599
+ udelay(us);
600
+ else
601
+ usleep_range(us, us + tolerance);
602
+}
603
+EXPORT_SYMBOL_GPL(ufshcd_delay_us);
604
+
605
+/**
565606 * ufshcd_wait_for_register - wait for register value to change
566
- * @hba - per-adapter interface
567
- * @reg - mmio register offset
568
- * @mask - mask to apply to read register value
569
- * @val - wait condition
570
- * @interval_us - polling interval in microsecs
571
- * @timeout_ms - timeout in millisecs
572
- * @can_sleep - perform sleep or just spin
607
+ * @hba: per-adapter interface
608
+ * @reg: mmio register offset
609
+ * @mask: mask to apply to the read register value
610
+ * @val: value to wait for
611
+ * @interval_us: polling interval in microseconds
612
+ * @timeout_ms: timeout in milliseconds
573613 *
574
- * Returns -ETIMEDOUT on error, zero on success
614
+ * Return:
615
+ * -ETIMEDOUT on error, zero on success.
575616 */
576617 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
577618 u32 val, unsigned long interval_us,
578
- unsigned long timeout_ms, bool can_sleep)
619
+ unsigned long timeout_ms)
579620 {
580621 int err = 0;
581622 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
....@@ -584,10 +625,7 @@
584625 val = val & mask;
585626
586627 while ((ufshcd_readl(hba, reg) & mask) != val) {
587
- if (can_sleep)
588
- usleep_range(interval_us, interval_us + 50);
589
- else
590
- udelay(interval_us);
628
+ usleep_range(interval_us, interval_us + 50);
591629 if (time_after(jiffies, timeout)) {
592630 if ((ufshcd_readl(hba, reg) & mask) != val)
593631 err = -ETIMEDOUT;
....@@ -606,23 +644,12 @@
606644 */
607645 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
608646 {
609
- u32 intr_mask = 0;
647
+ if (hba->ufs_version == ufshci_version(1, 0))
648
+ return INTERRUPT_MASK_ALL_VER_10;
649
+ if (hba->ufs_version <= ufshci_version(2, 0))
650
+ return INTERRUPT_MASK_ALL_VER_11;
610651
611
- switch (hba->ufs_version) {
612
- case UFSHCI_VERSION_10:
613
- intr_mask = INTERRUPT_MASK_ALL_VER_10;
614
- break;
615
- case UFSHCI_VERSION_11:
616
- case UFSHCI_VERSION_20:
617
- intr_mask = INTERRUPT_MASK_ALL_VER_11;
618
- break;
619
- case UFSHCI_VERSION_21:
620
- default:
621
- intr_mask = INTERRUPT_MASK_ALL_VER_21;
622
- break;
623
- }
624
-
625
- return intr_mask;
652
+ return INTERRUPT_MASK_ALL_VER_21;
626653 }
627654
628655 /**
....@@ -633,10 +660,22 @@
633660 */
634661 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
635662 {
636
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
637
- return ufshcd_vops_get_ufs_hci_version(hba);
663
+ u32 ufshci_ver;
638664
639
- return ufshcd_readl(hba, REG_UFS_VERSION);
665
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
666
+ ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
667
+ else
668
+ ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
669
+
670
+ /*
671
+ * UFSHCI v1.x uses a different version scheme, in order
672
+ * to allow the use of comparisons with the ufshci_version
673
+ * function, we convert it to the same scheme as ufs 2.0+.
674
+ */
675
+ if (ufshci_ver & 0x00010000)
676
+ return ufshci_version(1, ufshci_ver & 0x00000100);
677
+
678
+ return ufshci_ver;
640679 }
641680
642681 /**
....@@ -665,53 +704,6 @@
665704 }
666705
667706 /**
668
- * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
669
- * @task_req_descp: pointer to utp_task_req_desc structure
670
- *
671
- * This function is used to get the OCS field from UTMRD
672
- * Returns the OCS field in the UTMRD
673
- */
674
-static inline int
675
-ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
676
-{
677
- return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
678
-}
679
-
680
-/**
681
- * ufshcd_get_tm_free_slot - get a free slot for task management request
682
- * @hba: per adapter instance
683
- * @free_slot: pointer to variable with available slot value
684
- *
685
- * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
686
- * Returns 0 if free slot is not available, else return 1 with tag value
687
- * in @free_slot.
688
- */
689
-static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
690
-{
691
- int tag;
692
- bool ret = false;
693
-
694
- if (!free_slot)
695
- goto out;
696
-
697
- do {
698
- tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
699
- if (tag >= hba->nutmrs)
700
- goto out;
701
- } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
702
-
703
- *free_slot = tag;
704
- ret = true;
705
-out:
706
- return ret;
707
-}
708
-
709
-static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
710
-{
711
- clear_bit_unlock(slot, &hba->tm_slots_in_use);
712
-}
713
-
714
-/**
715707 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
716708 * @hba: per adapter instance
717709 * @pos: position of the bit to be cleared
....@@ -736,16 +728,6 @@
736728 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
737729 else
738730 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
739
-}
740
-
741
-/**
742
- * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
743
- * @hba: per adapter instance
744
- * @tag: position of the bit to be cleared
745
- */
746
-static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
747
-{
748
- __clear_bit(tag, &hba->outstanding_reqs);
749731 }
750732
751733 /**
....@@ -894,10 +876,8 @@
894876 {
895877 u32 val = CONTROLLER_ENABLE;
896878
897
- if (ufshcd_hba_is_crypto_supported(hba)) {
898
- ufshcd_crypto_enable(hba);
879
+ if (ufshcd_crypto_enable(hba))
899880 val |= CRYPTO_GENERAL_ENABLE;
900
- }
901881
902882 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
903883 }
....@@ -917,8 +897,7 @@
917897 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
918898 {
919899 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
920
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
921
- (hba->ufs_version == UFSHCI_VERSION_11))
900
+ if (hba->ufs_version <= ufshci_version(1, 1))
922901 return UFS_UNIPRO_VER_1_41;
923902 else
924903 return UFS_UNIPRO_VER_1_6;
....@@ -942,20 +921,22 @@
942921 return false;
943922 }
944923
945
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
924
+/**
925
+ * ufshcd_set_clk_freq - set UFS controller clock frequencies
926
+ * @hba: per adapter instance
927
+ * @scale_up: If True, set max possible frequency othewise set low frequency
928
+ *
929
+ * Returns 0 if successful
930
+ * Returns < 0 for any other errors
931
+ */
932
+static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
946933 {
947934 int ret = 0;
948935 struct ufs_clk_info *clki;
949936 struct list_head *head = &hba->clk_list_head;
950
- ktime_t start = ktime_get();
951
- bool clk_state_changed = false;
952937
953938 if (list_empty(head))
954939 goto out;
955
-
956
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
957
- if (ret)
958
- return ret;
959940
960941 list_for_each_entry(clki, head, list) {
961942 if (!IS_ERR_OR_NULL(clki->clk)) {
....@@ -963,7 +944,6 @@
963944 if (clki->curr_freq == clki->max_freq)
964945 continue;
965946
966
- clk_state_changed = true;
967947 ret = clk_set_rate(clki->clk, clki->max_freq);
968948 if (ret) {
969949 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
....@@ -982,7 +962,6 @@
982962 if (clki->curr_freq == clki->min_freq)
983963 continue;
984964
985
- clk_state_changed = true;
986965 ret = clk_set_rate(clki->clk, clki->min_freq);
987966 if (ret) {
988967 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
....@@ -1001,11 +980,37 @@
1001980 clki->name, clk_get_rate(clki->clk));
1002981 }
1003982
983
+out:
984
+ return ret;
985
+}
986
+
987
+/**
988
+ * ufshcd_scale_clks - scale up or scale down UFS controller clocks
989
+ * @hba: per adapter instance
990
+ * @scale_up: True if scaling up and false if scaling down
991
+ *
992
+ * Returns 0 if successful
993
+ * Returns < 0 for any other errors
994
+ */
995
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
996
+{
997
+ int ret = 0;
998
+ ktime_t start = ktime_get();
999
+
1000
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
1001
+ if (ret)
1002
+ goto out;
1003
+
1004
+ ret = ufshcd_set_clk_freq(hba, scale_up);
1005
+ if (ret)
1006
+ goto out;
1007
+
10041008 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1009
+ if (ret)
1010
+ ufshcd_set_clk_freq(hba, !scale_up);
10051011
10061012 out:
1007
- if (clk_state_changed)
1008
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1013
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
10091014 (scale_up ? "up" : "down"),
10101015 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
10111016 return ret;
....@@ -1114,7 +1119,6 @@
11141119 */
11151120 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
11161121 {
1117
- #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
11181122 int ret = 0;
11191123 struct ufs_pa_layer_attr new_pwr_info;
11201124
....@@ -1125,22 +1129,21 @@
11251129 memcpy(&new_pwr_info, &hba->pwr_info,
11261130 sizeof(struct ufs_pa_layer_attr));
11271131
1128
- if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1129
- || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1132
+ if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1133
+ hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
11301134 /* save the current power mode */
11311135 memcpy(&hba->clk_scaling.saved_pwr_info.info,
11321136 &hba->pwr_info,
11331137 sizeof(struct ufs_pa_layer_attr));
11341138
11351139 /* scale down gear */
1136
- new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1137
- new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1140
+ new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1141
+ new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
11381142 }
11391143 }
11401144
11411145 /* check if the power mode needs to be changed or not? */
1142
- ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1143
-
1146
+ ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
11441147 if (ret)
11451148 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
11461149 __func__, ret,
....@@ -1160,19 +1163,30 @@
11601163 */
11611164 ufshcd_scsi_block_requests(hba);
11621165 down_write(&hba->clk_scaling_lock);
1163
- if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1166
+
1167
+ if (!hba->clk_scaling.is_allowed ||
1168
+ ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
11641169 ret = -EBUSY;
11651170 up_write(&hba->clk_scaling_lock);
11661171 ufshcd_scsi_unblock_requests(hba);
1172
+ goto out;
11671173 }
11681174
1175
+ /* let's not get into low power until clock scaling is completed */
1176
+ ufshcd_hold(hba, false);
1177
+
1178
+out:
11691179 return ret;
11701180 }
11711181
1172
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1182
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
11731183 {
1174
- up_write(&hba->clk_scaling_lock);
1184
+ if (writelock)
1185
+ up_write(&hba->clk_scaling_lock);
1186
+ else
1187
+ up_read(&hba->clk_scaling_lock);
11751188 ufshcd_scsi_unblock_requests(hba);
1189
+ ufshcd_release(hba);
11761190 }
11771191
11781192 /**
....@@ -1187,9 +1201,7 @@
11871201 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
11881202 {
11891203 int ret = 0;
1190
-
1191
- /* let's not get into low power until clock scaling is completed */
1192
- ufshcd_hold(hba, false);
1204
+ bool is_writelock = true;
11931205
11941206 ret = ufshcd_clock_scaling_prepare(hba);
11951207 if (ret)
....@@ -1199,14 +1211,14 @@
11991211 if (!scale_up) {
12001212 ret = ufshcd_scale_gear(hba, false);
12011213 if (ret)
1202
- goto out;
1214
+ goto out_unprepare;
12031215 }
12041216
12051217 ret = ufshcd_scale_clks(hba, scale_up);
12061218 if (ret) {
12071219 if (!scale_up)
12081220 ufshcd_scale_gear(hba, true);
1209
- goto out;
1221
+ goto out_unprepare;
12101222 }
12111223
12121224 /* scale up the gear after scaling up clocks */
....@@ -1214,15 +1226,17 @@
12141226 ret = ufshcd_scale_gear(hba, true);
12151227 if (ret) {
12161228 ufshcd_scale_clks(hba, false);
1217
- goto out;
1229
+ goto out_unprepare;
12181230 }
12191231 }
12201232
1221
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1233
+ /* Enable Write Booster if we have scaled up else disable it */
1234
+ downgrade_write(&hba->clk_scaling_lock);
1235
+ is_writelock = false;
1236
+ ufshcd_wb_ctrl(hba, scale_up);
12221237
1223
-out:
1224
- ufshcd_clock_scaling_unprepare(hba);
1225
- ufshcd_release(hba);
1238
+out_unprepare:
1239
+ ufshcd_clock_scaling_unprepare(hba, is_writelock);
12261240 return ret;
12271241 }
12281242
....@@ -1270,10 +1284,15 @@
12701284 struct list_head *clk_list = &hba->clk_list_head;
12711285 struct ufs_clk_info *clki;
12721286 unsigned long irq_flags;
1287
+ bool force_out = false;
1288
+ bool force_scaling = false;
12731289
12741290 if (!ufshcd_is_clkscaling_supported(hba))
12751291 return -EINVAL;
12761292
1293
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1294
+ /* Override with the closest supported frequency */
1295
+ *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
12771296 spin_lock_irqsave(hba->host->host_lock, irq_flags);
12781297 if (ufshcd_eh_in_progress(hba)) {
12791298 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
....@@ -1288,24 +1307,23 @@
12881307 goto out;
12891308 }
12901309
1291
- clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1310
+ /* Decide based on the rounded-off frequency and update */
12921311 scale_up = (*freq == clki->max_freq) ? true : false;
1293
- if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1312
+ if (!scale_up)
1313
+ *freq = clki->min_freq;
1314
+
1315
+ trace_android_vh_ufs_clock_scaling(hba, &force_out, &force_scaling, &scale_up);
1316
+
1317
+ /* Update the frequency */
1318
+ if (force_out || (!force_scaling && !ufshcd_is_devfreq_scaling_required(hba, scale_up))) {
12941319 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
12951320 ret = 0;
12961321 goto out; /* no state change required */
12971322 }
12981323 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
12991324
1300
- pm_runtime_get_noresume(hba->dev);
1301
- if (!pm_runtime_active(hba->dev)) {
1302
- pm_runtime_put_noidle(hba->dev);
1303
- ret = -EAGAIN;
1304
- goto out;
1305
- }
13061325 start = ktime_get();
13071326 ret = ufshcd_devfreq_scale(hba, scale_up);
1308
- pm_runtime_put(hba->dev);
13091327
13101328 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
13111329 (scale_up ? "up" : "down"),
....@@ -1319,6 +1337,24 @@
13191337 return ret;
13201338 }
13211339
1340
+static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1341
+{
1342
+ int *busy = priv;
1343
+
1344
+ WARN_ON_ONCE(reserved);
1345
+ (*busy)++;
1346
+ return false;
1347
+}
1348
+
1349
+/* Whether or not any tag is in use by a request that is in progress. */
1350
+static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1351
+{
1352
+ struct request_queue *q = hba->cmd_queue;
1353
+ int busy = 0;
1354
+
1355
+ blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1356
+ return busy;
1357
+}
13221358
13231359 static int ufshcd_devfreq_get_dev_status(struct device *dev,
13241360 struct devfreq_dev_status *stat)
....@@ -1326,6 +1362,9 @@
13261362 struct ufs_hba *hba = dev_get_drvdata(dev);
13271363 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
13281364 unsigned long flags;
1365
+ struct list_head *clk_list = &hba->clk_list_head;
1366
+ struct ufs_clk_info *clki;
1367
+ ktime_t curr_t;
13291368
13301369 if (!ufshcd_is_clkscaling_supported(hba))
13311370 return -EINVAL;
....@@ -1333,22 +1372,29 @@
13331372 memset(stat, 0, sizeof(*stat));
13341373
13351374 spin_lock_irqsave(hba->host->host_lock, flags);
1375
+ curr_t = ktime_get();
13361376 if (!scaling->window_start_t)
13371377 goto start_window;
13381378
1379
+ clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1380
+ /*
1381
+ * If current frequency is 0, then the ondemand governor considers
1382
+ * there's no initial frequency set. And it always requests to set
1383
+ * to max. frequency.
1384
+ */
1385
+ stat->current_frequency = clki->curr_freq;
13391386 if (scaling->is_busy_started)
1340
- scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1341
- scaling->busy_start_t));
1387
+ scaling->tot_busy_t += ktime_us_delta(curr_t,
1388
+ scaling->busy_start_t);
13421389
1343
- stat->total_time = jiffies_to_usecs((long)jiffies -
1344
- (long)scaling->window_start_t);
1390
+ stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
13451391 stat->busy_time = scaling->tot_busy_t;
13461392 start_window:
1347
- scaling->window_start_t = jiffies;
1393
+ scaling->window_start_t = curr_t;
13481394 scaling->tot_busy_t = 0;
13491395
13501396 if (hba->outstanding_reqs) {
1351
- scaling->busy_start_t = ktime_get();
1397
+ scaling->busy_start_t = curr_t;
13521398 scaling->is_busy_started = true;
13531399 } else {
13541400 scaling->busy_start_t = 0;
....@@ -1357,12 +1403,6 @@
13571403 spin_unlock_irqrestore(hba->host->host_lock, flags);
13581404 return 0;
13591405 }
1360
-
1361
-static struct devfreq_dev_profile ufs_devfreq_profile = {
1362
- .polling_ms = 100,
1363
- .target = ufshcd_devfreq_target,
1364
- .get_dev_status = ufshcd_devfreq_get_dev_status,
1365
-};
13661406
13671407 static int ufshcd_devfreq_init(struct ufs_hba *hba)
13681408 {
....@@ -1379,10 +1419,12 @@
13791419 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
13801420 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
13811421
1422
+ ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1423
+ &hba->vps->ondemand_data);
13821424 devfreq = devfreq_add_device(hba->dev,
1383
- &ufs_devfreq_profile,
1425
+ &hba->vps->devfreq_profile,
13841426 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1385
- NULL);
1427
+ &hba->vps->ondemand_data);
13861428 if (IS_ERR(devfreq)) {
13871429 ret = PTR_ERR(devfreq);
13881430 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
....@@ -1428,8 +1470,8 @@
14281470 unsigned long flags;
14291471 bool suspend = false;
14301472
1431
- if (!ufshcd_is_clkscaling_supported(hba))
1432
- return;
1473
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
1474
+ cancel_work_sync(&hba->clk_scaling.resume_work);
14331475
14341476 spin_lock_irqsave(hba->host->host_lock, flags);
14351477 if (!hba->clk_scaling.is_suspended) {
....@@ -1447,9 +1489,6 @@
14471489 unsigned long flags;
14481490 bool resume = false;
14491491
1450
- if (!ufshcd_is_clkscaling_supported(hba))
1451
- return;
1452
-
14531492 spin_lock_irqsave(hba->host->host_lock, flags);
14541493 if (hba->clk_scaling.is_suspended) {
14551494 resume = true;
....@@ -1466,7 +1505,7 @@
14661505 {
14671506 struct ufs_hba *hba = dev_get_drvdata(dev);
14681507
1469
- return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1508
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
14701509 }
14711510
14721511 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
....@@ -1474,22 +1513,25 @@
14741513 {
14751514 struct ufs_hba *hba = dev_get_drvdata(dev);
14761515 u32 value;
1477
- int err;
1516
+ int err = 0;
14781517
14791518 if (kstrtou32(buf, 0, &value))
14801519 return -EINVAL;
14811520
1521
+ down(&hba->host_sem);
1522
+ if (!ufshcd_is_user_access_allowed(hba)) {
1523
+ err = -EBUSY;
1524
+ goto out;
1525
+ }
1526
+
14821527 value = !!value;
1483
- if (value == hba->clk_scaling.is_allowed)
1528
+ if (value == hba->clk_scaling.is_enabled)
14841529 goto out;
14851530
14861531 pm_runtime_get_sync(hba->dev);
14871532 ufshcd_hold(hba, false);
14881533
1489
- cancel_work_sync(&hba->clk_scaling.suspend_work);
1490
- cancel_work_sync(&hba->clk_scaling.resume_work);
1491
-
1492
- hba->clk_scaling.is_allowed = value;
1534
+ hba->clk_scaling.is_enabled = value;
14931535
14941536 if (value) {
14951537 ufshcd_resume_clkscaling(hba);
....@@ -1504,10 +1546,11 @@
15041546 ufshcd_release(hba);
15051547 pm_runtime_put_sync(hba->dev);
15061548 out:
1507
- return count;
1549
+ up(&hba->host_sem);
1550
+ return err ? err : count;
15081551 }
15091552
1510
-static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1553
+static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
15111554 {
15121555 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
15131556 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
....@@ -1516,6 +1559,45 @@
15161559 hba->clk_scaling.enable_attr.attr.mode = 0644;
15171560 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
15181561 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1562
+}
1563
+
1564
+static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1565
+{
1566
+ if (hba->clk_scaling.enable_attr.attr.name)
1567
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1568
+}
1569
+
1570
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1571
+{
1572
+ char wq_name[sizeof("ufs_clkscaling_00")];
1573
+
1574
+ if (!ufshcd_is_clkscaling_supported(hba))
1575
+ return;
1576
+
1577
+ if (!hba->clk_scaling.min_gear)
1578
+ hba->clk_scaling.min_gear = UFS_HS_G1;
1579
+
1580
+ INIT_WORK(&hba->clk_scaling.suspend_work,
1581
+ ufshcd_clk_scaling_suspend_work);
1582
+ INIT_WORK(&hba->clk_scaling.resume_work,
1583
+ ufshcd_clk_scaling_resume_work);
1584
+
1585
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1586
+ hba->host->host_no);
1587
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1588
+
1589
+ hba->clk_scaling.is_initialized = true;
1590
+}
1591
+
1592
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1593
+{
1594
+ if (!hba->clk_scaling.is_initialized)
1595
+ return;
1596
+
1597
+ ufshcd_remove_clk_scaling_sysfs(hba);
1598
+ destroy_workqueue(hba->clk_scaling.workq);
1599
+ ufshcd_devfreq_remove(hba);
1600
+ hba->clk_scaling.is_initialized = false;
15191601 }
15201602
15211603 static void ufshcd_ungate_work(struct work_struct *work)
....@@ -1534,7 +1616,10 @@
15341616 }
15351617
15361618 spin_unlock_irqrestore(hba->host->host_lock, flags);
1619
+ ufshcd_hba_vreg_set_hpm(hba);
15371620 ufshcd_setup_clocks(hba, true);
1621
+
1622
+ ufshcd_enable_irq(hba);
15381623
15391624 /* Exit from hibern8 */
15401625 if (ufshcd_can_hibern8_during_gating(hba)) {
....@@ -1570,11 +1655,6 @@
15701655 goto out;
15711656 spin_lock_irqsave(hba->host->host_lock, flags);
15721657 hba->clk_gating.active_reqs++;
1573
-
1574
- if (ufshcd_eh_in_progress(hba)) {
1575
- spin_unlock_irqrestore(hba->host->host_lock, flags);
1576
- return 0;
1577
- }
15781658
15791659 start:
15801660 switch (hba->clk_gating.state) {
....@@ -1614,6 +1694,7 @@
16141694 * currently running. Hence, fall through to cancel gating
16151695 * work and to enable clocks.
16161696 */
1697
+ fallthrough;
16171698 case CLKS_OFF:
16181699 hba->clk_gating.state = REQ_CLKS_ON;
16191700 trace_ufshcd_clk_gating(dev_name(hba->dev),
....@@ -1625,6 +1706,7 @@
16251706 * fall through to check if we should wait for this
16261707 * work to be done or not.
16271708 */
1709
+ fallthrough;
16281710 case REQ_CLKS_ON:
16291711 if (async) {
16301712 rc = -EAGAIN;
....@@ -1653,6 +1735,7 @@
16531735 struct ufs_hba *hba = container_of(work, struct ufs_hba,
16541736 clk_gating.gate_work.work);
16551737 unsigned long flags;
1738
+ int ret;
16561739
16571740 spin_lock_irqsave(hba->host->host_lock, flags);
16581741 /*
....@@ -1662,7 +1745,7 @@
16621745 * state to CLKS_ON.
16631746 */
16641747 if (hba->clk_gating.is_suspended ||
1665
- (hba->clk_gating.state == REQ_CLKS_ON)) {
1748
+ (hba->clk_gating.state != REQ_CLKS_OFF)) {
16661749 hba->clk_gating.state = CLKS_ON;
16671750 trace_ufshcd_clk_gating(dev_name(hba->dev),
16681751 hba->clk_gating.state);
....@@ -1671,7 +1754,7 @@
16711754
16721755 if (hba->clk_gating.active_reqs
16731756 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1674
- || hba->lrb_in_use || hba->outstanding_tasks
1757
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
16751758 || hba->active_uic_cmd || hba->uic_async_done)
16761759 goto rel_lock;
16771760
....@@ -1679,8 +1762,11 @@
16791762
16801763 /* put the link into hibern8 mode before turning off clocks */
16811764 if (ufshcd_can_hibern8_during_gating(hba)) {
1682
- if (ufshcd_uic_hibern8_enter(hba)) {
1765
+ ret = ufshcd_uic_hibern8_enter(hba);
1766
+ if (ret) {
16831767 hba->clk_gating.state = CLKS_ON;
1768
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1769
+ __func__, ret);
16841770 trace_ufshcd_clk_gating(dev_name(hba->dev),
16851771 hba->clk_gating.state);
16861772 goto out;
....@@ -1688,12 +1774,12 @@
16881774 ufshcd_set_link_hibern8(hba);
16891775 }
16901776
1691
- if (!ufshcd_is_link_active(hba))
1692
- ufshcd_setup_clocks(hba, false);
1693
- else
1694
- /* If link is active, device ref_clk can't be switched off */
1695
- __ufshcd_setup_clocks(hba, false, true);
1777
+ ufshcd_disable_irq(hba);
16961778
1779
+ ufshcd_setup_clocks(hba, false);
1780
+
1781
+ /* Put the host controller in low power mode if possible */
1782
+ ufshcd_hba_vreg_set_lpm(hba);
16971783 /*
16981784 * In case you are here to cancel this work the gating state
16991785 * would be marked as REQ_CLKS_ON. In this case keep the state
....@@ -1723,11 +1809,11 @@
17231809
17241810 hba->clk_gating.active_reqs--;
17251811
1726
- if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1727
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1728
- || hba->lrb_in_use || hba->outstanding_tasks
1729
- || hba->active_uic_cmd || hba->uic_async_done
1730
- || ufshcd_eh_in_progress(hba))
1812
+ if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
1813
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
1814
+ hba->outstanding_tasks ||
1815
+ hba->active_uic_cmd || hba->uic_async_done ||
1816
+ hba->clk_gating.state == CLKS_OFF)
17311817 return;
17321818
17331819 hba->clk_gating.state = REQ_CLKS_OFF;
....@@ -1789,68 +1875,24 @@
17891875 return -EINVAL;
17901876
17911877 value = !!value;
1878
+
1879
+ spin_lock_irqsave(hba->host->host_lock, flags);
17921880 if (value == hba->clk_gating.is_enabled)
17931881 goto out;
17941882
1795
- if (value) {
1796
- ufshcd_release(hba);
1797
- } else {
1798
- spin_lock_irqsave(hba->host->host_lock, flags);
1883
+ if (value)
1884
+ __ufshcd_release(hba);
1885
+ else
17991886 hba->clk_gating.active_reqs++;
1800
- spin_unlock_irqrestore(hba->host->host_lock, flags);
1801
- }
18021887
18031888 hba->clk_gating.is_enabled = value;
18041889 out:
1890
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
18051891 return count;
18061892 }
18071893
1808
-static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1894
+static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
18091895 {
1810
- char wq_name[sizeof("ufs_clkscaling_00")];
1811
-
1812
- if (!ufshcd_is_clkscaling_supported(hba))
1813
- return;
1814
-
1815
- INIT_WORK(&hba->clk_scaling.suspend_work,
1816
- ufshcd_clk_scaling_suspend_work);
1817
- INIT_WORK(&hba->clk_scaling.resume_work,
1818
- ufshcd_clk_scaling_resume_work);
1819
-
1820
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1821
- hba->host->host_no);
1822
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1823
-
1824
- ufshcd_clkscaling_init_sysfs(hba);
1825
-}
1826
-
1827
-static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1828
-{
1829
- if (!ufshcd_is_clkscaling_supported(hba))
1830
- return;
1831
-
1832
- destroy_workqueue(hba->clk_scaling.workq);
1833
- ufshcd_devfreq_remove(hba);
1834
-}
1835
-
1836
-static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1837
-{
1838
- char wq_name[sizeof("ufs_clk_gating_00")];
1839
-
1840
- if (!ufshcd_is_clkgating_allowed(hba))
1841
- return;
1842
-
1843
- hba->clk_gating.delay_ms = 150;
1844
- INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1845
- INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1846
-
1847
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1848
- hba->host->host_no);
1849
- hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1850
- WQ_MEM_RECLAIM);
1851
-
1852
- hba->clk_gating.is_enabled = true;
1853
-
18541896 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
18551897 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
18561898 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
....@@ -1868,61 +1910,167 @@
18681910 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
18691911 }
18701912
1871
-static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1913
+static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
18721914 {
1915
+ if (hba->clk_gating.delay_attr.attr.name)
1916
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1917
+ if (hba->clk_gating.enable_attr.attr.name)
1918
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1919
+}
1920
+
1921
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1922
+{
1923
+ char wq_name[sizeof("ufs_clk_gating_00")];
1924
+
18731925 if (!ufshcd_is_clkgating_allowed(hba))
18741926 return;
1875
- device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1876
- device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1927
+
1928
+ hba->clk_gating.state = CLKS_ON;
1929
+
1930
+ hba->clk_gating.delay_ms = 150;
1931
+ INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1932
+ INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1933
+
1934
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1935
+ hba->host->host_no);
1936
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1937
+ WQ_MEM_RECLAIM | WQ_HIGHPRI);
1938
+
1939
+ ufshcd_init_clk_gating_sysfs(hba);
1940
+
1941
+ hba->clk_gating.is_enabled = true;
1942
+ hba->clk_gating.is_initialized = true;
1943
+}
1944
+
1945
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1946
+{
1947
+ if (!hba->clk_gating.is_initialized)
1948
+ return;
1949
+ ufshcd_remove_clk_gating_sysfs(hba);
18771950 cancel_work_sync(&hba->clk_gating.ungate_work);
18781951 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
18791952 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1953
+ hba->clk_gating.is_initialized = false;
18801954 }
18811955
18821956 /* Must be called with host lock acquired */
18831957 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
18841958 {
18851959 bool queue_resume_work = false;
1960
+ ktime_t curr_t = ktime_get();
1961
+ unsigned long flags;
18861962
18871963 if (!ufshcd_is_clkscaling_supported(hba))
18881964 return;
18891965
1966
+ spin_lock_irqsave(hba->host->host_lock, flags);
18901967 if (!hba->clk_scaling.active_reqs++)
18911968 queue_resume_work = true;
18921969
1893
- if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1970
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
1971
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
18941972 return;
1973
+ }
18951974
18961975 if (queue_resume_work)
18971976 queue_work(hba->clk_scaling.workq,
18981977 &hba->clk_scaling.resume_work);
18991978
19001979 if (!hba->clk_scaling.window_start_t) {
1901
- hba->clk_scaling.window_start_t = jiffies;
1980
+ hba->clk_scaling.window_start_t = curr_t;
19021981 hba->clk_scaling.tot_busy_t = 0;
19031982 hba->clk_scaling.is_busy_started = false;
19041983 }
19051984
19061985 if (!hba->clk_scaling.is_busy_started) {
1907
- hba->clk_scaling.busy_start_t = ktime_get();
1986
+ hba->clk_scaling.busy_start_t = curr_t;
19081987 hba->clk_scaling.is_busy_started = true;
19091988 }
1989
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
19101990 }
19111991
19121992 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
19131993 {
19141994 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1995
+ unsigned long flags;
19151996
19161997 if (!ufshcd_is_clkscaling_supported(hba))
19171998 return;
19181999
2000
+ spin_lock_irqsave(hba->host->host_lock, flags);
2001
+ hba->clk_scaling.active_reqs--;
19192002 if (!hba->outstanding_reqs && scaling->is_busy_started) {
19202003 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
19212004 scaling->busy_start_t));
19222005 scaling->busy_start_t = 0;
19232006 scaling->is_busy_started = false;
19242007 }
2008
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
19252009 }
2010
+
2011
+static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2012
+{
2013
+ if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2014
+ return READ;
2015
+ else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2016
+ return WRITE;
2017
+ else
2018
+ return -EINVAL;
2019
+}
2020
+
2021
+static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2022
+ struct ufshcd_lrb *lrbp)
2023
+{
2024
+ struct ufs_hba_monitor *m = &hba->monitor;
2025
+
2026
+ return (m->enabled && lrbp && lrbp->cmd &&
2027
+ (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2028
+ ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2029
+}
2030
+
2031
+static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2032
+{
2033
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2034
+ unsigned long flags;
2035
+
2036
+ spin_lock_irqsave(hba->host->host_lock, flags);
2037
+ if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2038
+ hba->monitor.busy_start_ts[dir] = ktime_get();
2039
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
2040
+}
2041
+
2042
+static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2043
+{
2044
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2045
+ unsigned long flags;
2046
+
2047
+ spin_lock_irqsave(hba->host->host_lock, flags);
2048
+ if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2049
+ struct request *req = lrbp->cmd->request;
2050
+ struct ufs_hba_monitor *m = &hba->monitor;
2051
+ ktime_t now, inc, lat;
2052
+
2053
+ now = lrbp->compl_time_stamp;
2054
+ inc = ktime_sub(now, m->busy_start_ts[dir]);
2055
+ m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2056
+ m->nr_sec_rw[dir] += blk_rq_sectors(req);
2057
+
2058
+ /* Update latencies */
2059
+ m->nr_req[dir]++;
2060
+ lat = ktime_sub(now, lrbp->issue_time_stamp);
2061
+ m->lat_sum[dir] += lat;
2062
+ if (m->lat_max[dir] < lat || !m->lat_max[dir])
2063
+ m->lat_max[dir] = lat;
2064
+ if (m->lat_min[dir] > lat || !m->lat_min[dir])
2065
+ m->lat_min[dir] = lat;
2066
+
2067
+ m->nr_queued[dir]--;
2068
+ /* Push forward the busy start of monitor */
2069
+ m->busy_start_ts[dir] = now;
2070
+ }
2071
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
2072
+}
2073
+
19262074 /**
19272075 * ufshcd_send_command - Send SCSI or device management commands
19282076 * @hba: per adapter instance
....@@ -1931,12 +2079,30 @@
19312079 static inline
19322080 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
19332081 {
1934
- hba->lrb[task_tag].issue_time_stamp = ktime_get();
1935
- hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
2082
+ struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2083
+
2084
+ lrbp->issue_time_stamp = ktime_get();
2085
+ lrbp->compl_time_stamp = ktime_set(0, 0);
2086
+ trace_android_vh_ufs_send_command(hba, lrbp);
19362087 ufshcd_add_command_trace(hba, task_tag, "send");
19372088 ufshcd_clk_scaling_start_busy(hba);
1938
- __set_bit(task_tag, &hba->outstanding_reqs);
1939
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2089
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2090
+ ufshcd_start_monitor(hba, lrbp);
2091
+ if (hba->vops && hba->vops->setup_xfer_req)
2092
+ hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
2093
+ if (ufshcd_has_utrlcnr(hba)) {
2094
+ set_bit(task_tag, &hba->outstanding_reqs);
2095
+ ufshcd_writel(hba, 1 << task_tag,
2096
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
2097
+ } else {
2098
+ unsigned long flags;
2099
+
2100
+ spin_lock_irqsave(hba->host->host_lock, flags);
2101
+ set_bit(task_tag, &hba->outstanding_reqs);
2102
+ ufshcd_writel(hba, 1 << task_tag,
2103
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
2104
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
2105
+ }
19402106 /* Make sure that doorbell is committed immediately */
19412107 wmb();
19422108 }
....@@ -1953,11 +2119,10 @@
19532119 int len_to_copy;
19542120
19552121 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1956
- len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
2122
+ len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
19572123
1958
- memcpy(lrbp->sense_buffer,
1959
- lrbp->ucd_rsp_ptr->sr.sense_data,
1960
- min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
2124
+ memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2125
+ len_to_copy);
19612126 }
19622127 }
19632128
....@@ -1991,8 +2156,8 @@
19912156 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
19922157 } else {
19932158 dev_warn(hba->dev,
1994
- "%s: Response size is bigger than buffer",
1995
- __func__);
2159
+ "%s: rsp size %d is bigger than buffer size %d",
2160
+ __func__, resp_len, buf_len);
19962161 return -EINVAL;
19972162 }
19982163 }
....@@ -2003,15 +2168,27 @@
20032168 /**
20042169 * ufshcd_hba_capabilities - Read controller capabilities
20052170 * @hba: per adapter instance
2171
+ *
2172
+ * Return: 0 on success, negative on error.
20062173 */
2007
-static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
2174
+static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
20082175 {
2176
+ int err;
2177
+
20092178 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
20102179
20112180 /* nutrs and nutmrs are 0 based values */
20122181 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
20132182 hba->nutmrs =
20142183 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2184
+ ufs_hba_add_info(hba)->reserved_slot = hba->nutrs - 1;
2185
+
2186
+ /* Read crypto capabilities */
2187
+ err = ufshcd_hba_init_crypto_capabilities(hba);
2188
+ if (err)
2189
+ dev_err(hba->dev, "crypto setup failed\n");
2190
+
2191
+ return err;
20152192 }
20162193
20172194 /**
....@@ -2059,6 +2236,8 @@
20592236 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
20602237 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
20612238
2239
+ ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
2240
+
20622241 /* Write UIC Cmd */
20632242 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
20642243 REG_UIC_COMMAND);
....@@ -2079,10 +2258,20 @@
20792258 unsigned long flags;
20802259
20812260 if (wait_for_completion_timeout(&uic_cmd->done,
2082
- msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2261
+ msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
20832262 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2084
- else
2263
+ } else {
20852264 ret = -ETIMEDOUT;
2265
+ dev_err(hba->dev,
2266
+ "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2267
+ uic_cmd->command, uic_cmd->argument3);
2268
+
2269
+ if (!uic_cmd->cmd_active) {
2270
+ dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2271
+ __func__);
2272
+ ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2273
+ }
2274
+ }
20862275
20872276 spin_lock_irqsave(hba->host->host_lock, flags);
20882277 hba->active_uic_cmd = NULL;
....@@ -2114,6 +2303,7 @@
21142303 if (completion)
21152304 init_completion(&uic_cmd->done);
21162305
2306
+ uic_cmd->cmd_active = 1;
21172307 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
21182308
21192309 return 0;
....@@ -2126,11 +2316,13 @@
21262316 *
21272317 * Returns 0 only if success.
21282318 */
2129
-static int
2130
-ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2319
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
21312320 {
21322321 int ret;
21332322 unsigned long flags;
2323
+
2324
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2325
+ return 0;
21342326
21352327 ufshcd_hold(hba, false);
21362328 mutex_lock(&hba->uic_cmd_mutex);
....@@ -2162,6 +2354,7 @@
21622354 struct scsi_cmnd *cmd;
21632355 int sg_segments;
21642356 int i;
2357
+ int err;
21652358
21662359 cmd = lrbp->cmd;
21672360 sg_segments = scsi_dma_map(cmd);
....@@ -2169,10 +2362,10 @@
21692362 return sg_segments;
21702363
21712364 if (sg_segments) {
2365
+
21722366 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
21732367 lrbp->utr_descriptor_ptr->prd_table_length =
2174
- cpu_to_le16((u16)(sg_segments *
2175
- hba->sg_entry_size));
2368
+ cpu_to_le16(sg_segments * hba->sg_entry_size);
21762369 else
21772370 lrbp->utr_descriptor_ptr->prd_table_length =
21782371 cpu_to_le16((u16) (sg_segments));
....@@ -2193,7 +2386,9 @@
21932386 lrbp->utr_descriptor_ptr->prd_table_length = 0;
21942387 }
21952388
2196
- return ufshcd_map_sg_crypto(hba, lrbp);
2389
+ err = 0;
2390
+ trace_android_vh_ufs_fill_prdt(hba, lrbp, sg_segments, &err);
2391
+ return err;
21972392 }
21982393
21992394 /**
....@@ -2205,7 +2400,7 @@
22052400 {
22062401 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
22072402
2208
- if (hba->ufs_version == UFSHCI_VERSION_10) {
2403
+ if (hba->ufs_version == ufshci_version(1, 0)) {
22092404 u32 rw;
22102405 rw = set & INTERRUPT_MASK_RW_VER_10;
22112406 set = rw | ((set ^ intrs) & intrs);
....@@ -2225,7 +2420,7 @@
22252420 {
22262421 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
22272422
2228
- if (hba->ufs_version == UFSHCI_VERSION_10) {
2423
+ if (hba->ufs_version == ufshci_version(1, 0)) {
22292424 u32 rw;
22302425 rw = (set & INTERRUPT_MASK_RW_VER_10) &
22312426 ~(intrs & INTERRUPT_MASK_RW_VER_10);
....@@ -2246,11 +2441,13 @@
22462441 * @cmd_dir: requests data direction
22472442 */
22482443 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2249
- u32 *upiu_flags, enum dma_data_direction cmd_dir)
2444
+ u8 *upiu_flags, enum dma_data_direction cmd_dir)
22502445 {
22512446 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
22522447 u32 data_direction;
22532448 u32 dword_0;
2449
+ u32 dword_1 = 0;
2450
+ u32 dword_3 = 0;
22542451
22552452 if (cmd_dir == DMA_FROM_DEVICE) {
22562453 data_direction = UTP_DEVICE_TO_HOST;
....@@ -2268,24 +2465,12 @@
22682465 if (lrbp->intr_cmd)
22692466 dword_0 |= UTP_REQ_DESC_INT_CMD;
22702467
2468
+ /* Prepare crypto related dwords */
2469
+ ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
2470
+
22712471 /* Transfer request descriptor header fields */
2272
- if (ufshcd_lrbp_crypto_enabled(lrbp)) {
2273
-#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
2274
- dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD;
2275
- dword_0 |= lrbp->crypto_key_slot;
2276
- req_desc->header.dword_1 =
2277
- cpu_to_le32(lower_32_bits(lrbp->data_unit_num));
2278
- req_desc->header.dword_3 =
2279
- cpu_to_le32(upper_32_bits(lrbp->data_unit_num));
2280
-#endif /* CONFIG_SCSI_UFS_CRYPTO */
2281
- } else {
2282
- /* dword_1 and dword_3 are reserved, hence they are set to 0 */
2283
- req_desc->header.dword_1 = 0;
2284
- req_desc->header.dword_3 = 0;
2285
- }
2286
-
22872472 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2288
-
2473
+ req_desc->header.dword_1 = cpu_to_le32(dword_1);
22892474 /*
22902475 * assigning invalid value for command status. Controller
22912476 * updates OCS on command completion, with the command
....@@ -2293,6 +2478,7 @@
22932478 */
22942479 req_desc->header.dword_2 =
22952480 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2481
+ req_desc->header.dword_3 = cpu_to_le32(dword_3);
22962482
22972483 req_desc->prd_table_length = 0;
22982484 }
....@@ -2304,8 +2490,9 @@
23042490 * @upiu_flags: flags
23052491 */
23062492 static
2307
-void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2493
+void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
23082494 {
2495
+ struct scsi_cmnd *cmd = lrbp->cmd;
23092496 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
23102497 unsigned short cdb_len;
23112498
....@@ -2319,12 +2506,11 @@
23192506 /* Total EHS length and Data segment length will be zero */
23202507 ucd_req_ptr->header.dword_2 = 0;
23212508
2322
- ucd_req_ptr->sc.exp_data_transfer_len =
2323
- cpu_to_be32(lrbp->cmd->sdb.length);
2509
+ ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
23242510
2325
- cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2326
- memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2327
- memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2511
+ cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2512
+ memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2513
+ memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
23282514
23292515 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
23302516 }
....@@ -2337,12 +2523,11 @@
23372523 * @upiu_flags: flags
23382524 */
23392525 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2340
- struct ufshcd_lrb *lrbp, u32 upiu_flags)
2526
+ struct ufshcd_lrb *lrbp, u8 upiu_flags)
23412527 {
23422528 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
23432529 struct ufs_query *query = &hba->dev_cmd.query;
23442530 u16 len = be16_to_cpu(query->request.upiu_req.length);
2345
- u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
23462531
23472532 /* Query request header */
23482533 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
....@@ -2364,7 +2549,7 @@
23642549
23652550 /* Copy the Descriptor */
23662551 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2367
- memcpy(descp, query->descriptor, len);
2552
+ memcpy(ucd_req_ptr + 1, query->descriptor, len);
23682553
23692554 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
23702555 }
....@@ -2387,18 +2572,18 @@
23872572 }
23882573
23892574 /**
2390
- * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2575
+ * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
23912576 * for Device Management Purposes
23922577 * @hba: per adapter instance
23932578 * @lrbp: pointer to local reference block
23942579 */
2395
-static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2580
+static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2581
+ struct ufshcd_lrb *lrbp)
23962582 {
2397
- u32 upiu_flags;
2583
+ u8 upiu_flags;
23982584 int ret = 0;
23992585
2400
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2401
- (hba->ufs_version == UFSHCI_VERSION_11))
2586
+ if (hba->ufs_version <= ufshci_version(1, 1))
24022587 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
24032588 else
24042589 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
....@@ -2422,11 +2607,10 @@
24222607 */
24232608 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
24242609 {
2425
- u32 upiu_flags;
2610
+ u8 upiu_flags;
24262611 int ret = 0;
24272612
2428
- if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2429
- (hba->ufs_version == UFSHCI_VERSION_11))
2613
+ if (hba->ufs_version <= ufshci_version(1, 1))
24302614 lrbp->command_type = UTP_CMD_TYPE_SCSI;
24312615 else
24322616 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
....@@ -2453,6 +2637,28 @@
24532637 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
24542638 }
24552639
2640
+static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2641
+{
2642
+ struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2643
+ i * sizeof_utp_transfer_cmd_desc(hba);
2644
+ struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2645
+ dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2646
+ i * sizeof_utp_transfer_cmd_desc(hba);
2647
+ u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
2648
+ response_upiu);
2649
+ u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
2650
+
2651
+ lrb->utr_descriptor_ptr = utrdlp + i;
2652
+ lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2653
+ i * sizeof(struct utp_transfer_req_desc);
2654
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp;
2655
+ lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2656
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
2657
+ lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2658
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
2659
+ lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2660
+}
2661
+
24562662 /**
24572663 * ufshcd_queuecommand - main entry point for SCSI requests
24582664 * @host: SCSI host pointer
....@@ -2464,7 +2670,6 @@
24642670 {
24652671 struct ufshcd_lrb *lrbp;
24662672 struct ufs_hba *hba;
2467
- unsigned long flags;
24682673 int tag;
24692674 int err = 0;
24702675
....@@ -2481,93 +2686,92 @@
24812686 if (!down_read_trylock(&hba->clk_scaling_lock))
24822687 return SCSI_MLQUEUE_HOST_BUSY;
24832688
2484
- spin_lock_irqsave(hba->host->host_lock, flags);
24852689 switch (hba->ufshcd_state) {
24862690 case UFSHCD_STATE_OPERATIONAL:
2691
+ case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
24872692 break;
2488
- case UFSHCD_STATE_EH_SCHEDULED:
2693
+ case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2694
+ /*
2695
+ * pm_runtime_get_sync() is used at error handling preparation
2696
+ * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2697
+ * PM ops, it can never be finished if we let SCSI layer keep
2698
+ * retrying it, which gets err handler stuck forever. Neither
2699
+ * can we let the scsi cmd pass through, because UFS is in bad
2700
+ * state, the scsi cmd may eventually time out, which will get
2701
+ * err handler blocked for too long. So, just fail the scsi cmd
2702
+ * sent from PM ops, err handler can recover PM error anyways.
2703
+ */
2704
+ if (hba->pm_op_in_progress) {
2705
+ hba->force_reset = true;
2706
+ set_host_byte(cmd, DID_BAD_TARGET);
2707
+ cmd->scsi_done(cmd);
2708
+ goto out;
2709
+ }
2710
+ fallthrough;
24892711 case UFSHCD_STATE_RESET:
24902712 err = SCSI_MLQUEUE_HOST_BUSY;
2491
- goto out_unlock;
2713
+ goto out;
24922714 case UFSHCD_STATE_ERROR:
24932715 set_host_byte(cmd, DID_ERROR);
24942716 cmd->scsi_done(cmd);
2495
- goto out_unlock;
2717
+ goto out;
24962718 default:
24972719 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
24982720 __func__, hba->ufshcd_state);
24992721 set_host_byte(cmd, DID_BAD_TARGET);
25002722 cmd->scsi_done(cmd);
2501
- goto out_unlock;
2502
- }
2503
-
2504
- /* if error handling is in progress, don't issue commands */
2505
- if (ufshcd_eh_in_progress(hba)) {
2506
- set_host_byte(cmd, DID_ERROR);
2507
- cmd->scsi_done(cmd);
2508
- goto out_unlock;
2509
- }
2510
- spin_unlock_irqrestore(hba->host->host_lock, flags);
2511
-
2512
- hba->req_abort_count = 0;
2513
-
2514
- /* acquire the tag to make sure device cmds don't use it */
2515
- if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2516
- /*
2517
- * Dev manage command in progress, requeue the command.
2518
- * Requeuing the command helps in cases where the request *may*
2519
- * find different tag instead of waiting for dev manage command
2520
- * completion.
2521
- */
2522
- err = SCSI_MLQUEUE_HOST_BUSY;
25232723 goto out;
25242724 }
2725
+
2726
+ hba->req_abort_count = 0;
25252727
25262728 err = ufshcd_hold(hba, true);
25272729 if (err) {
25282730 err = SCSI_MLQUEUE_HOST_BUSY;
2529
- clear_bit_unlock(tag, &hba->lrb_in_use);
25302731 goto out;
25312732 }
2532
- WARN_ON(hba->clk_gating.state != CLKS_ON);
2733
+ WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
2734
+ (hba->clk_gating.state != CLKS_ON));
25332735
25342736 lrbp = &hba->lrb[tag];
2535
-
25362737 WARN_ON(lrbp->cmd);
25372738 lrbp->cmd = cmd;
2538
- lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2739
+ lrbp->sense_bufflen = UFS_SENSE_SIZE;
25392740 lrbp->sense_buffer = cmd->sense_buffer;
25402741 lrbp->task_tag = tag;
25412742 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
25422743 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
25432744
2544
- err = ufshcd_prepare_lrbp_crypto(hba, cmd, lrbp);
2745
+ ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
2746
+
2747
+ trace_android_vh_ufs_prepare_command(hba, cmd->request, lrbp, &err);
25452748 if (err) {
2546
- ufshcd_release(hba);
25472749 lrbp->cmd = NULL;
2548
- clear_bit_unlock(tag, &hba->lrb_in_use);
2750
+ ufshcd_release(hba);
25492751 goto out;
25502752 }
2753
+
25512754 lrbp->req_abort_skip = false;
2755
+
2756
+ err = ufshpb_prep(hba, lrbp);
2757
+ if (err == -EAGAIN) {
2758
+ lrbp->cmd = NULL;
2759
+ ufshcd_release(hba);
2760
+ goto out;
2761
+ }
25522762
25532763 ufshcd_comp_scsi_upiu(hba, lrbp);
25542764
25552765 err = ufshcd_map_sg(hba, lrbp);
25562766 if (err) {
2557
- ufshcd_release(hba);
25582767 lrbp->cmd = NULL;
2559
- clear_bit_unlock(tag, &hba->lrb_in_use);
2768
+ ufshcd_release(hba);
25602769 goto out;
25612770 }
25622771 /* Make sure descriptors are ready before ringing the doorbell */
25632772 wmb();
25642773
2565
- /* issue command to the controller */
2566
- spin_lock_irqsave(hba->host->host_lock, flags);
2567
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
25682774 ufshcd_send_command(hba, tag);
2569
-out_unlock:
2570
- spin_unlock_irqrestore(hba->host->host_lock, flags);
25712775 out:
25722776 up_read(&hba->clk_scaling_lock);
25732777 return err;
....@@ -2582,12 +2786,10 @@
25822786 lrbp->task_tag = tag;
25832787 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
25842788 lrbp->intr_cmd = true; /* No interrupt aggregation */
2585
-#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
2586
- lrbp->crypto_enable = false; /* No crypto operations */
2587
-#endif
2789
+ ufshcd_prepare_lrbp_crypto(NULL, lrbp);
25882790 hba->dev_cmd.type = cmd_type;
25892791
2590
- return ufshcd_comp_devman_upiu(hba, lrbp);
2792
+ return ufshcd_compose_devman_upiu(hba, lrbp);
25912793 }
25922794
25932795 static int
....@@ -2608,7 +2810,7 @@
26082810 */
26092811 err = ufshcd_wait_for_register(hba,
26102812 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2611
- mask, ~mask, 1000, 1000, true);
2813
+ mask, ~mask, 1000, 1000);
26122814
26132815 return err;
26142816 }
....@@ -2670,85 +2872,86 @@
26702872 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
26712873 struct ufshcd_lrb *lrbp, int max_timeout)
26722874 {
2673
- int err = 0;
2674
- unsigned long time_left;
2875
+ unsigned long time_left = msecs_to_jiffies(max_timeout);
26752876 unsigned long flags;
2877
+ bool pending;
2878
+ int err;
26762879
2880
+retry:
26772881 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2678
- msecs_to_jiffies(max_timeout));
2882
+ time_left);
26792883
26802884 /* Make sure descriptors are ready before ringing the doorbell */
26812885 wmb();
2682
- spin_lock_irqsave(hba->host->host_lock, flags);
2683
- hba->dev_cmd.complete = NULL;
26842886 if (likely(time_left)) {
2887
+ /*
2888
+ * The completion handler called complete() and the caller of
2889
+ * this function still owns the @lrbp tag so the code below does
2890
+ * not trigger any race conditions.
2891
+ */
2892
+ hba->dev_cmd.complete = NULL;
26852893 err = ufshcd_get_tr_ocs(lrbp);
26862894 if (!err)
26872895 err = ufshcd_dev_cmd_completion(hba, lrbp);
2688
- }
2689
- spin_unlock_irqrestore(hba->host->host_lock, flags);
2690
-
2691
- if (!time_left) {
2896
+ } else {
26922897 err = -ETIMEDOUT;
26932898 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
26942899 __func__, lrbp->task_tag);
2695
- if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2900
+ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
26962901 /* successfully cleared the command, retry if needed */
26972902 err = -EAGAIN;
2698
- /*
2699
- * in case of an error, after clearing the doorbell,
2700
- * we also need to clear the outstanding_request
2701
- * field in hba
2702
- */
2703
- ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2903
+ /*
2904
+ * Since clearing the command succeeded we also need to
2905
+ * clear the task tag bit from the outstanding_reqs
2906
+ * variable.
2907
+ */
2908
+ spin_lock_irqsave(hba->host->host_lock, flags);
2909
+ pending = test_bit(lrbp->task_tag,
2910
+ &hba->outstanding_reqs);
2911
+ if (pending) {
2912
+ hba->dev_cmd.complete = NULL;
2913
+ __clear_bit(lrbp->task_tag,
2914
+ &hba->outstanding_reqs);
2915
+ }
2916
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
2917
+
2918
+ if (!pending) {
2919
+ /*
2920
+ * The completion handler ran while we tried to
2921
+ * clear the command.
2922
+ */
2923
+ time_left = 1;
2924
+ goto retry;
2925
+ }
2926
+ } else {
2927
+ dev_err(hba->dev, "%s: failed to clear tag %d\n",
2928
+ __func__, lrbp->task_tag);
2929
+ spin_lock_irqsave(hba->host->host_lock, flags);
2930
+ pending = test_bit(lrbp->task_tag,
2931
+ &hba->outstanding_reqs);
2932
+ if (pending)
2933
+ hba->dev_cmd.complete = NULL;
2934
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
2935
+
2936
+ if (!pending) {
2937
+ /*
2938
+ * The completion handler ran while we tried to
2939
+ * clear the command.
2940
+ */
2941
+ time_left = 1;
2942
+ goto retry;
2943
+ }
2944
+ }
27042945 }
27052946
27062947 return err;
27072948 }
27082949
27092950 /**
2710
- * ufshcd_get_dev_cmd_tag - Get device management command tag
2711
- * @hba: per-adapter instance
2712
- * @tag_out: pointer to variable with available slot value
2713
- *
2714
- * Get a free slot and lock it until device management command
2715
- * completes.
2716
- *
2717
- * Returns false if free slot is unavailable for locking, else
2718
- * return true with tag value in @tag.
2719
- */
2720
-static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2721
-{
2722
- int tag;
2723
- bool ret = false;
2724
- unsigned long tmp;
2725
-
2726
- if (!tag_out)
2727
- goto out;
2728
-
2729
- do {
2730
- tmp = ~hba->lrb_in_use;
2731
- tag = find_last_bit(&tmp, hba->nutrs);
2732
- if (tag >= hba->nutrs)
2733
- goto out;
2734
- } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2735
-
2736
- *tag_out = tag;
2737
- ret = true;
2738
-out:
2739
- return ret;
2740
-}
2741
-
2742
-static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2743
-{
2744
- clear_bit_unlock(tag, &hba->lrb_in_use);
2745
-}
2746
-
2747
-/**
27482951 * ufshcd_exec_dev_cmd - API for sending device management requests
27492952 * @hba: UFS hba
27502953 * @cmd_type: specifies the type (NOP, Query...)
2751
- * @timeout: time in seconds
2954
+ * @timeout: timeout in milliseconds
27522955 *
27532956 * NOTE: Since there is only one available tag for device management commands,
27542957 * it is expected you hold the hba->dev_cmd.lock mutex.
....@@ -2756,46 +2959,34 @@
27562959 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
27572960 enum dev_cmd_type cmd_type, int timeout)
27582961 {
2962
+ DECLARE_COMPLETION_ONSTACK(wait);
2963
+ const u32 tag = ufs_hba_add_info(hba)->reserved_slot;
27592964 struct ufshcd_lrb *lrbp;
27602965 int err;
2761
- int tag;
2762
- struct completion wait;
2763
- unsigned long flags;
2966
+
2967
+ /* Protects use of ufs_hba_add_info(hba)->reserved_slot. */
2968
+ lockdep_assert_held(&hba->dev_cmd.lock);
27642969
27652970 down_read(&hba->clk_scaling_lock);
27662971
2767
- /*
2768
- * Get free slot, sleep if slots are unavailable.
2769
- * Even though we use wait_event() which sleeps indefinitely,
2770
- * the maximum wait time is bounded by SCSI request timeout.
2771
- */
2772
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2773
-
2774
- init_completion(&wait);
27752972 lrbp = &hba->lrb[tag];
27762973 WARN_ON(lrbp->cmd);
27772974 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
27782975 if (unlikely(err))
2779
- goto out_put_tag;
2976
+ goto out;
27802977
27812978 hba->dev_cmd.complete = &wait;
27822979
27832980 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
27842981 /* Make sure descriptors are ready before ringing the doorbell */
27852982 wmb();
2786
- spin_lock_irqsave(hba->host->host_lock, flags);
2787
- ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2983
+
27882984 ufshcd_send_command(hba, tag);
2789
- spin_unlock_irqrestore(hba->host->host_lock, flags);
2790
-
27912985 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2792
-
27932986 ufshcd_add_query_upiu_trace(hba, tag,
27942987 err ? "query_complete_err" : "query_complete");
27952988
2796
-out_put_tag:
2797
- ufshcd_put_dev_cmd_tag(hba, tag);
2798
- wake_up(&hba->dev_cmd.tag_wq);
2989
+out:
27992990 up_read(&hba->clk_scaling_lock);
28002991 return err;
28012992 }
....@@ -2824,14 +3015,14 @@
28243015 (*request)->upiu_req.selector = selector;
28253016 }
28263017
2827
-static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2828
- enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
3018
+int ufshcd_query_flag_retry(struct ufs_hba *hba,
3019
+ enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
28293020 {
28303021 int ret;
28313022 int retries;
28323023
28333024 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2834
- ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
3025
+ ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
28353026 if (ret)
28363027 dev_dbg(hba->dev,
28373028 "%s: failed with error %d, retries %d\n",
....@@ -2846,22 +3037,24 @@
28463037 __func__, opcode, idn, ret, retries);
28473038 return ret;
28483039 }
3040
+EXPORT_SYMBOL_GPL(ufshcd_query_flag_retry);
28493041
28503042 /**
28513043 * ufshcd_query_flag() - API function for sending flag query requests
28523044 * @hba: per-adapter instance
28533045 * @opcode: flag query to perform
28543046 * @idn: flag idn to access
3047
+ * @index: flag index to access
28553048 * @flag_res: the flag value after the query request completes
28563049 *
28573050 * Returns 0 for success, non-zero in case of failure
28583051 */
28593052 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2860
- enum flag_idn idn, bool *flag_res)
3053
+ enum flag_idn idn, u8 index, bool *flag_res)
28613054 {
28623055 struct ufs_query_req *request = NULL;
28633056 struct ufs_query_res *response = NULL;
2864
- int err, index = 0, selector = 0;
3057
+ int err, selector = 0;
28653058 int timeout = QUERY_REQ_TIMEOUT;
28663059
28673060 BUG_ON(!hba);
....@@ -2913,6 +3106,7 @@
29133106 ufshcd_release(hba);
29143107 return err;
29153108 }
3109
+EXPORT_SYMBOL_GPL(ufshcd_query_flag);
29163110
29173111 /**
29183112 * ufshcd_query_attr - API function for sending attribute requests
....@@ -2934,13 +3128,13 @@
29343128
29353129 BUG_ON(!hba);
29363130
2937
- ufshcd_hold(hba, false);
29383131 if (!attr_val) {
29393132 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
29403133 __func__, opcode);
2941
- err = -EINVAL;
2942
- goto out;
3134
+ return -EINVAL;
29433135 }
3136
+
3137
+ ufshcd_hold(hba, false);
29443138
29453139 mutex_lock(&hba->dev_cmd.lock);
29463140 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
....@@ -2973,10 +3167,10 @@
29733167
29743168 out_unlock:
29753169 mutex_unlock(&hba->dev_cmd.lock);
2976
-out:
29773170 ufshcd_release(hba);
29783171 return err;
29793172 }
3173
+EXPORT_SYMBOL_GPL(ufshcd_query_attr);
29803174
29813175 /**
29823176 * ufshcd_query_attr_retry() - API function for sending query
....@@ -2991,14 +3185,14 @@
29913185 *
29923186 * Returns 0 for success, non-zero in case of failure
29933187 */
2994
-static int ufshcd_query_attr_retry(struct ufs_hba *hba,
3188
+int ufshcd_query_attr_retry(struct ufs_hba *hba,
29953189 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
29963190 u32 *attr_val)
29973191 {
29983192 int ret = 0;
29993193 u32 retries;
30003194
3001
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3195
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
30023196 ret = ufshcd_query_attr(hba, opcode, idn, index,
30033197 selector, attr_val);
30043198 if (ret)
....@@ -3014,6 +3208,7 @@
30143208 __func__, idn, ret, QUERY_REQ_RETRIES);
30153209 return ret;
30163210 }
3211
+EXPORT_SYMBOL_GPL(ufshcd_query_attr_retry);
30173212
30183213 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
30193214 enum query_opcode opcode, enum desc_idn idn, u8 index,
....@@ -3025,20 +3220,19 @@
30253220
30263221 BUG_ON(!hba);
30273222
3028
- ufshcd_hold(hba, false);
30293223 if (!desc_buf) {
30303224 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
30313225 __func__, opcode);
3032
- err = -EINVAL;
3033
- goto out;
3226
+ return -EINVAL;
30343227 }
30353228
30363229 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
30373230 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
30383231 __func__, *buf_len);
3039
- err = -EINVAL;
3040
- goto out;
3232
+ return -EINVAL;
30413233 }
3234
+
3235
+ ufshcd_hold(hba, false);
30423236
30433237 mutex_lock(&hba->dev_cmd.lock);
30443238 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
....@@ -3074,7 +3268,6 @@
30743268 out_unlock:
30753269 hba->dev_cmd.query.descriptor = NULL;
30763270 mutex_unlock(&hba->dev_cmd.lock);
3077
-out:
30783271 ufshcd_release(hba);
30793272 return err;
30803273 }
....@@ -3111,95 +3304,38 @@
31113304
31123305 return err;
31133306 }
3114
-
3115
-/**
3116
- * ufshcd_read_desc_length - read the specified descriptor length from header
3117
- * @hba: Pointer to adapter instance
3118
- * @desc_id: descriptor idn value
3119
- * @desc_index: descriptor index
3120
- * @desc_length: pointer to variable to read the length of descriptor
3121
- *
3122
- * Return 0 in case of success, non-zero otherwise
3123
- */
3124
-static int ufshcd_read_desc_length(struct ufs_hba *hba,
3125
- enum desc_idn desc_id,
3126
- int desc_index,
3127
- int *desc_length)
3128
-{
3129
- int ret;
3130
- u8 header[QUERY_DESC_HDR_SIZE];
3131
- int header_len = QUERY_DESC_HDR_SIZE;
3132
-
3133
- if (desc_id >= QUERY_DESC_IDN_MAX)
3134
- return -EINVAL;
3135
-
3136
- ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3137
- desc_id, desc_index, 0, header,
3138
- &header_len);
3139
-
3140
- if (ret) {
3141
- dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3142
- __func__, desc_id);
3143
- return ret;
3144
- } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3145
- dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3146
- __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3147
- desc_id);
3148
- ret = -EINVAL;
3149
- }
3150
-
3151
- *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3152
- return ret;
3153
-
3154
-}
3307
+EXPORT_SYMBOL_GPL(ufshcd_query_descriptor_retry);
31553308
31563309 /**
31573310 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
31583311 * @hba: Pointer to adapter instance
31593312 * @desc_id: descriptor idn value
31603313 * @desc_len: mapped desc length (out)
3161
- *
3162
- * Return 0 in case of success, non-zero otherwise
31633314 */
3164
-int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3165
- enum desc_idn desc_id, int *desc_len)
3315
+void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
3316
+ int *desc_len)
31663317 {
3167
- switch (desc_id) {
3168
- case QUERY_DESC_IDN_DEVICE:
3169
- *desc_len = hba->desc_size.dev_desc;
3170
- break;
3171
- case QUERY_DESC_IDN_POWER:
3172
- *desc_len = hba->desc_size.pwr_desc;
3173
- break;
3174
- case QUERY_DESC_IDN_GEOMETRY:
3175
- *desc_len = hba->desc_size.geom_desc;
3176
- break;
3177
- case QUERY_DESC_IDN_CONFIGURATION:
3178
- *desc_len = hba->desc_size.conf_desc;
3179
- break;
3180
- case QUERY_DESC_IDN_UNIT:
3181
- *desc_len = hba->desc_size.unit_desc;
3182
- break;
3183
- case QUERY_DESC_IDN_INTERCONNECT:
3184
- *desc_len = hba->desc_size.interc_desc;
3185
- break;
3186
- case QUERY_DESC_IDN_STRING:
3187
- *desc_len = QUERY_DESC_MAX_SIZE;
3188
- break;
3189
- case QUERY_DESC_IDN_HEALTH:
3190
- *desc_len = hba->desc_size.hlth_desc;
3191
- break;
3192
- case QUERY_DESC_IDN_RFU_0:
3193
- case QUERY_DESC_IDN_RFU_1:
3318
+ if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
3319
+ desc_id == QUERY_DESC_IDN_RFU_1)
31943320 *desc_len = 0;
3195
- break;
3196
- default:
3197
- *desc_len = 0;
3198
- return -EINVAL;
3199
- }
3200
- return 0;
3321
+ else
3322
+ *desc_len = hba->desc_size[desc_id];
32013323 }
32023324 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3325
+
3326
+static void ufshcd_update_desc_length(struct ufs_hba *hba,
3327
+ enum desc_idn desc_id, int desc_index,
3328
+ unsigned char desc_len)
3329
+{
3330
+ if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
3331
+ desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
3332
+ /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
3333
+ * than the RPMB unit, however, both descriptors share the same
3334
+ * desc_idn, to cover both unit descriptors with one length, we
3335
+ * choose the normal unit descriptor length by desc_index.
3336
+ */
3337
+ hba->desc_size[desc_id] = desc_len;
3338
+}
32033339
32043340 /**
32053341 * ufshcd_read_desc_param - read the specified descriptor parameter
....@@ -3228,21 +3364,22 @@
32283364 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
32293365 return -EINVAL;
32303366
3231
- /* Get the max length of descriptor from structure filled up at probe
3232
- * time.
3233
- */
3234
- ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3367
+ /* Get the length of descriptor */
3368
+ ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3369
+ if (!buff_len) {
3370
+ dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
3371
+ return -EINVAL;
3372
+ }
32353373
3236
- /* Sanity checks */
3237
- if (ret || !buff_len) {
3238
- dev_err(hba->dev, "%s: Failed to get full descriptor length",
3239
- __func__);
3240
- return ret;
3374
+ if (param_offset >= buff_len) {
3375
+ dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3376
+ __func__, param_offset, desc_id, buff_len);
3377
+ return -EINVAL;
32413378 }
32423379
32433380 /* Check whether we need temp memory */
32443381 if (param_offset != 0 || param_size < buff_len) {
3245
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
3382
+ desc_buf = kzalloc(buff_len, GFP_KERNEL);
32463383 if (!desc_buf)
32473384 return -ENOMEM;
32483385 } else {
....@@ -3256,95 +3393,109 @@
32563393 desc_buf, &buff_len);
32573394
32583395 if (ret) {
3259
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3396
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
32603397 __func__, desc_id, desc_index, param_offset, ret);
32613398 goto out;
32623399 }
32633400
32643401 /* Sanity check */
32653402 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3266
- dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3403
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
32673404 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
32683405 ret = -EINVAL;
32693406 goto out;
32703407 }
32713408
3272
- /* Check wherher we will not copy more data, than available */
3273
- if (is_kmalloc && param_size > buff_len)
3274
- param_size = buff_len;
3409
+ /* Update descriptor length */
3410
+ buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3411
+ ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
32753412
3276
- if (is_kmalloc)
3277
- memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3413
+ if (is_kmalloc) {
3414
+ /* Make sure we don't copy more data than available */
3415
+ if (param_offset >= buff_len)
3416
+ ret = -EINVAL;
3417
+ else
3418
+ memcpy(param_read_buf, &desc_buf[param_offset],
3419
+ min_t(u32, param_size, buff_len - param_offset));
3420
+ }
32783421 out:
32793422 if (is_kmalloc)
32803423 kfree(desc_buf);
32813424 return ret;
32823425 }
3426
+EXPORT_SYMBOL_GPL(ufshcd_read_desc_param);
32833427
3284
-static inline int ufshcd_read_desc(struct ufs_hba *hba,
3285
- enum desc_idn desc_id,
3286
- int desc_index,
3287
- u8 *buf,
3288
- u32 size)
3289
-{
3290
- return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3291
-}
3428
+/**
3429
+ * struct uc_string_id - unicode string
3430
+ *
3431
+ * @len: size of this descriptor inclusive
3432
+ * @type: descriptor type
3433
+ * @uc: unicode string character
3434
+ */
3435
+struct uc_string_id {
3436
+ u8 len;
3437
+ u8 type;
3438
+ wchar_t uc[];
3439
+} __packed;
32923440
3293
-static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3294
- u8 *buf,
3295
- u32 size)
3441
+/* replace non-printable or non-ASCII characters with spaces */
3442
+static inline char ufshcd_remove_non_printable(u8 ch)
32963443 {
3297
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3298
-}
3299
-
3300
-static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3301
-{
3302
- return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3444
+ return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
33033445 }
33043446
33053447 /**
33063448 * ufshcd_read_string_desc - read string descriptor
33073449 * @hba: pointer to adapter instance
33083450 * @desc_index: descriptor index
3309
- * @buf: pointer to buffer where descriptor would be read
3310
- * @size: size of buf
3451
+ * @buf: pointer to buffer where descriptor would be read,
3452
+ * the caller should free the memory.
33113453 * @ascii: if true convert from unicode to ascii characters
3454
+ * null terminated string.
33123455 *
3313
- * Return 0 in case of success, non-zero otherwise
3456
+ * Return:
3457
+ * * string size on success.
3458
+ * * -ENOMEM: on allocation failure
3459
+ * * -EINVAL: on a wrong parameter
33143460 */
3315
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3316
- u8 *buf, u32 size, bool ascii)
3461
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3462
+ u8 **buf, bool ascii)
33173463 {
3318
- int err = 0;
3464
+ struct uc_string_id *uc_str;
3465
+ u8 *str;
3466
+ int ret;
33193467
3320
- err = ufshcd_read_desc(hba,
3321
- QUERY_DESC_IDN_STRING, desc_index, buf, size);
3468
+ if (!buf)
3469
+ return -EINVAL;
33223470
3323
- if (err) {
3324
- dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3325
- __func__, QUERY_REQ_RETRIES, err);
3471
+ uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3472
+ if (!uc_str)
3473
+ return -ENOMEM;
3474
+
3475
+ ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3476
+ (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3477
+ if (ret < 0) {
3478
+ dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3479
+ QUERY_REQ_RETRIES, ret);
3480
+ str = NULL;
3481
+ goto out;
3482
+ }
3483
+
3484
+ if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3485
+ dev_dbg(hba->dev, "String Desc is of zero length\n");
3486
+ str = NULL;
3487
+ ret = 0;
33263488 goto out;
33273489 }
33283490
33293491 if (ascii) {
3330
- int desc_len;
3331
- int ascii_len;
3492
+ ssize_t ascii_len;
33323493 int i;
3333
- char *buff_ascii;
3334
-
3335
- desc_len = buf[0];
33363494 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3337
- ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3338
- if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3339
- dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3340
- __func__);
3341
- err = -ENOMEM;
3342
- goto out;
3343
- }
3344
-
3345
- buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3346
- if (!buff_ascii) {
3347
- err = -ENOMEM;
3495
+ ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3496
+ str = kzalloc(ascii_len, GFP_KERNEL);
3497
+ if (!str) {
3498
+ ret = -ENOMEM;
33483499 goto out;
33493500 }
33503501
....@@ -3352,22 +3503,28 @@
33523503 * the descriptor contains string in UTF16 format
33533504 * we need to convert to utf-8 so it can be displayed
33543505 */
3355
- utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3356
- desc_len - QUERY_DESC_HDR_SIZE,
3357
- UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3506
+ ret = utf16s_to_utf8s(uc_str->uc,
3507
+ uc_str->len - QUERY_DESC_HDR_SIZE,
3508
+ UTF16_BIG_ENDIAN, str, ascii_len);
33583509
33593510 /* replace non-printable or non-ASCII characters with spaces */
3360
- for (i = 0; i < ascii_len; i++)
3361
- ufshcd_remove_non_printable(&buff_ascii[i]);
3511
+ for (i = 0; i < ret; i++)
3512
+ str[i] = ufshcd_remove_non_printable(str[i]);
33623513
3363
- memset(buf + QUERY_DESC_HDR_SIZE, 0,
3364
- size - QUERY_DESC_HDR_SIZE);
3365
- memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3366
- buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3367
- kfree(buff_ascii);
3514
+ str[ret++] = '\0';
3515
+
3516
+ } else {
3517
+ str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3518
+ if (!str) {
3519
+ ret = -ENOMEM;
3520
+ goto out;
3521
+ }
3522
+ ret = uc_str->len;
33683523 }
33693524 out:
3370
- return err;
3525
+ *buf = str;
3526
+ kfree(uc_str);
3527
+ return ret;
33713528 }
33723529
33733530 /**
....@@ -3390,11 +3547,36 @@
33903547 * Unit descriptors are only available for general purpose LUs (LUN id
33913548 * from 0 to 7) and RPMB Well known LU.
33923549 */
3393
- if (!ufs_is_valid_unit_desc_lun(lun))
3550
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
33943551 return -EOPNOTSUPP;
33953552
33963553 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
33973554 param_offset, param_read_buf, param_size);
3555
+}
3556
+
3557
+static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3558
+{
3559
+ int err = 0;
3560
+ u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3561
+
3562
+ if (hba->dev_info.wspecversion >= 0x300) {
3563
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3564
+ QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3565
+ &gating_wait);
3566
+ if (err)
3567
+ dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3568
+ err, gating_wait);
3569
+
3570
+ if (gating_wait == 0) {
3571
+ gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3572
+ dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3573
+ gating_wait);
3574
+ }
3575
+
3576
+ hba->dev_info.clk_gating_wait_us = gating_wait;
3577
+ }
3578
+
3579
+ return err;
33983580 }
33993581
34003582 /**
....@@ -3494,7 +3676,6 @@
34943676 */
34953677 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
34963678 {
3497
- struct utp_transfer_cmd_desc *cmd_descp;
34983679 struct utp_transfer_req_desc *utrdlp;
34993680 dma_addr_t cmd_desc_dma_addr;
35003681 dma_addr_t cmd_desc_element_addr;
....@@ -3504,7 +3685,6 @@
35043685 int i;
35053686
35063687 utrdlp = hba->utrdl_base_addr;
3507
- cmd_descp = hba->ucdl_base_addr;
35083688
35093689 response_offset =
35103690 offsetof(struct utp_transfer_cmd_desc, response_upiu);
....@@ -3533,27 +3713,14 @@
35333713 cpu_to_le16(ALIGNED_UPIU_SIZE);
35343714 } else {
35353715 utrdlp[i].response_upiu_offset =
3536
- cpu_to_le16((response_offset >> 2));
3716
+ cpu_to_le16(response_offset >> 2);
35373717 utrdlp[i].prd_table_offset =
3538
- cpu_to_le16((prdt_offset >> 2));
3718
+ cpu_to_le16(prdt_offset >> 2);
35393719 utrdlp[i].response_upiu_length =
35403720 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
35413721 }
35423722
3543
- hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3544
- hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3545
- (i * sizeof(struct utp_transfer_req_desc));
3546
- hba->lrb[i].ucd_req_ptr = (struct utp_upiu_req *)cmd_descp;
3547
- hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3548
- hba->lrb[i].ucd_rsp_ptr =
3549
- (struct utp_upiu_rsp *)cmd_descp->response_upiu;
3550
- hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3551
- response_offset;
3552
- hba->lrb[i].ucd_prdt_ptr =
3553
- (struct ufshcd_sg_entry *)cmd_descp->prd_table;
3554
- hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3555
- prdt_offset;
3556
- cmd_descp = (void *)cmd_descp + cmd_desc_size;
3723
+ ufshcd_init_lrb(hba, &hba->lrb[i], i);
35573724 }
35583725 }
35593726
....@@ -3586,7 +3753,7 @@
35863753 * @hba: per adapter instance
35873754 *
35883755 * DME_RESET command is issued in order to reset UniPro stack.
3589
- * This function now deal with cold reset.
3756
+ * This function now deals with cold reset.
35903757 *
35913758 * Returns 0 on success, non-zero value on failure
35923759 */
....@@ -3796,17 +3963,20 @@
37963963 */
37973964 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
37983965 {
3799
- struct completion uic_async_done;
3966
+ DECLARE_COMPLETION_ONSTACK(uic_async_done);
38003967 unsigned long flags;
38013968 u8 status;
38023969 int ret;
38033970 bool reenable_intr = false;
38043971
38053972 mutex_lock(&hba->uic_cmd_mutex);
3806
- init_completion(&uic_async_done);
38073973 ufshcd_add_delay_before_dme_cmd(hba);
38083974
38093975 spin_lock_irqsave(hba->host->host_lock, flags);
3976
+ if (ufshcd_is_link_broken(hba)) {
3977
+ ret = -ENOLINK;
3978
+ goto out_unlock;
3979
+ }
38103980 hba->uic_async_done = &uic_async_done;
38113981 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
38123982 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
....@@ -3831,10 +4001,18 @@
38314001 dev_err(hba->dev,
38324002 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
38334003 cmd->command, cmd->argument3);
4004
+
4005
+ if (!cmd->cmd_active) {
4006
+ dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4007
+ __func__);
4008
+ goto check_upmcrs;
4009
+ }
4010
+
38344011 ret = -ETIMEDOUT;
38354012 goto out;
38364013 }
38374014
4015
+check_upmcrs:
38384016 status = ufshcd_get_upmcrs(hba);
38394017 if (status != PWR_LOCAL) {
38404018 dev_err(hba->dev,
....@@ -3846,7 +4024,7 @@
38464024 if (ret) {
38474025 ufshcd_print_host_state(hba);
38484026 ufshcd_print_pwr_info(hba);
3849
- ufshcd_print_host_regs(hba);
4027
+ ufshcd_print_evt_hist(hba);
38504028 }
38514029
38524030 spin_lock_irqsave(hba->host->host_lock, flags);
....@@ -3854,6 +4032,14 @@
38544032 hba->uic_async_done = NULL;
38554033 if (reenable_intr)
38564034 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4035
+ if (ret) {
4036
+ dev_err(hba->dev,
4037
+ "%s: Changing link power status failed (%d). Scheduling error handler\n",
4038
+ __func__, ret);
4039
+ ufshcd_set_link_broken(hba);
4040
+ ufshcd_schedule_eh_work(hba);
4041
+ }
4042
+out_unlock:
38574043 spin_unlock_irqrestore(hba->host->host_lock, flags);
38584044 mutex_unlock(&hba->uic_cmd_mutex);
38594045
....@@ -3894,7 +4080,7 @@
38944080 return ret;
38954081 }
38964082
3897
-static int ufshcd_link_recovery(struct ufs_hba *hba)
4083
+int ufshcd_link_recovery(struct ufs_hba *hba)
38984084 {
38994085 int ret;
39004086 unsigned long flags;
....@@ -3903,6 +4089,9 @@
39034089 hba->ufshcd_state = UFSHCD_STATE_RESET;
39044090 ufshcd_set_eh_in_progress(hba);
39054091 spin_unlock_irqrestore(hba->host->host_lock, flags);
4092
+
4093
+ /* Reset the attached device */
4094
+ ufshcd_vops_device_reset(hba);
39064095
39074096 ret = ufshcd_host_reset_and_restore(hba);
39084097
....@@ -3918,8 +4107,9 @@
39184107
39194108 return ret;
39204109 }
4110
+EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
39214111
3922
-static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4112
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
39234113 {
39244114 int ret;
39254115 struct uic_command uic_cmd = {0};
....@@ -3932,46 +4122,18 @@
39324122 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
39334123 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
39344124
3935
- if (ret) {
3936
- int err;
3937
-
4125
+ if (ret)
39384126 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
39394127 __func__, ret);
3940
-
3941
- /*
3942
- * If link recovery fails then return error code returned from
3943
- * ufshcd_link_recovery().
3944
- * If link recovery succeeds then return -EAGAIN to attempt
3945
- * hibern8 enter retry again.
3946
- */
3947
- err = ufshcd_link_recovery(hba);
3948
- if (err) {
3949
- dev_err(hba->dev, "%s: link recovery failed", __func__);
3950
- ret = err;
3951
- } else {
3952
- ret = -EAGAIN;
3953
- }
3954
- } else
4128
+ else
39554129 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
39564130 POST_CHANGE);
39574131
39584132 return ret;
39594133 }
4134
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
39604135
3961
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3962
-{
3963
- int ret = 0, retries;
3964
-
3965
- for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3966
- ret = __ufshcd_uic_hibern8_enter(hba);
3967
- if (!ret)
3968
- goto out;
3969
- }
3970
-out:
3971
- return ret;
3972
-}
3973
-
3974
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4136
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
39754137 {
39764138 struct uic_command uic_cmd = {0};
39774139 int ret;
....@@ -3987,7 +4149,6 @@
39874149 if (ret) {
39884150 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
39894151 __func__, ret);
3990
- ret = ufshcd_link_recovery(hba);
39914152 } else {
39924153 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
39934154 POST_CHANGE);
....@@ -3997,12 +4158,38 @@
39974158
39984159 return ret;
39994160 }
4161
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
40004162
4001
-static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
4163
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4164
+{
4165
+ unsigned long flags;
4166
+ bool update = false;
4167
+
4168
+ if (!ufshcd_is_auto_hibern8_supported(hba))
4169
+ return;
4170
+
4171
+ spin_lock_irqsave(hba->host->host_lock, flags);
4172
+ if (hba->ahit != ahit) {
4173
+ hba->ahit = ahit;
4174
+ update = true;
4175
+ }
4176
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
4177
+
4178
+ if (update && !pm_runtime_suspended(hba->dev)) {
4179
+ pm_runtime_get_sync(hba->dev);
4180
+ ufshcd_hold(hba, false);
4181
+ ufshcd_auto_hibern8_enable(hba);
4182
+ ufshcd_release(hba);
4183
+ pm_runtime_put(hba->dev);
4184
+ }
4185
+}
4186
+EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4187
+
4188
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
40024189 {
40034190 unsigned long flags;
40044191
4005
- if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
4192
+ if (!ufshcd_is_auto_hibern8_supported(hba))
40064193 return;
40074194
40084195 spin_lock_irqsave(hba->host->host_lock, flags);
....@@ -4095,7 +4282,8 @@
40954282 int ret;
40964283
40974284 /* if already configured to the requested pwr_mode */
4098
- if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4285
+ if (!hba->force_pmc &&
4286
+ pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
40994287 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
41004288 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
41014289 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
....@@ -4137,6 +4325,28 @@
41374325 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
41384326 pwr_mode->hs_rate);
41394327
4328
+ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4329
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4330
+ DL_FC0ProtectionTimeOutVal_Default);
4331
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4332
+ DL_TC0ReplayTimeOutVal_Default);
4333
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4334
+ DL_AFC0ReqTimeOutVal_Default);
4335
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4336
+ DL_FC1ProtectionTimeOutVal_Default);
4337
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4338
+ DL_TC1ReplayTimeOutVal_Default);
4339
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4340
+ DL_AFC1ReqTimeOutVal_Default);
4341
+
4342
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4343
+ DL_FC0ProtectionTimeOutVal_Default);
4344
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4345
+ DL_TC0ReplayTimeOutVal_Default);
4346
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4347
+ DL_AFC0ReqTimeOutVal_Default);
4348
+ }
4349
+
41404350 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
41414351 | pwr_mode->pwr_tx);
41424352
....@@ -4172,8 +4382,6 @@
41724382 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
41734383
41744384 ret = ufshcd_change_power_mode(hba, &final_params);
4175
- if (!ret)
4176
- ufshcd_print_pwr_info(hba);
41774385
41784386 return ret;
41794387 }
....@@ -4187,12 +4395,12 @@
41874395 */
41884396 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
41894397 {
4190
- int i;
41914398 int err;
4192
- bool flag_res = 1;
4399
+ bool flag_res = true;
4400
+ ktime_t timeout;
41934401
41944402 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4195
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4403
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
41964404 if (err) {
41974405 dev_err(hba->dev,
41984406 "%s setting fDeviceInit flag failed with error %d\n",
....@@ -4200,20 +4408,26 @@
42004408 goto out;
42014409 }
42024410
4203
- /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4204
- for (i = 0; i < 1000 && !err && flag_res; i++)
4205
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4206
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4411
+ /* Poll fDeviceInit flag to be cleared */
4412
+ timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4413
+ do {
4414
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4415
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4416
+ if (!flag_res)
4417
+ break;
4418
+ usleep_range(500, 1000);
4419
+ } while (ktime_before(ktime_get(), timeout));
42074420
4208
- if (err)
4421
+ if (err) {
42094422 dev_err(hba->dev,
4210
- "%s reading fDeviceInit flag failed with error %d\n",
4211
- __func__, err);
4212
- else if (flag_res)
4423
+ "%s reading fDeviceInit flag failed with error %d\n",
4424
+ __func__, err);
4425
+ } else if (flag_res) {
42134426 dev_err(hba->dev,
4214
- "%s fDeviceInit was not cleared by the device\n",
4215
- __func__);
4216
-
4427
+ "%s fDeviceInit was not cleared by the device\n",
4428
+ __func__);
4429
+ err = -EBUSY;
4430
+ }
42174431 out:
42184432 return err;
42194433 }
....@@ -4230,7 +4444,7 @@
42304444 *
42314445 * Returns 0 on success, non-zero value on failure
42324446 */
4233
-static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4447
+int ufshcd_make_hba_operational(struct ufs_hba *hba)
42344448 {
42354449 int err = 0;
42364450 u32 reg;
....@@ -4270,31 +4484,36 @@
42704484 dev_err(hba->dev,
42714485 "Host controller not ready to process requests");
42724486 err = -EIO;
4273
- goto out;
42744487 }
42754488
4276
-out:
42774489 return err;
42784490 }
4491
+EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
42794492
42804493 /**
42814494 * ufshcd_hba_stop - Send controller to reset state
42824495 * @hba: per adapter instance
4283
- * @can_sleep: perform sleep or just spin
42844496 */
4285
-static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4497
+void ufshcd_hba_stop(struct ufs_hba *hba)
42864498 {
4499
+ unsigned long flags;
42874500 int err;
42884501
4289
- ufshcd_crypto_disable(hba);
4290
-
4502
+ /*
4503
+ * Obtain the host lock to prevent that the controller is disabled
4504
+ * while the UFS interrupt handler is active on another CPU.
4505
+ */
4506
+ spin_lock_irqsave(hba->host->host_lock, flags);
42914507 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4508
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
4509
+
42924510 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
42934511 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4294
- 10, 1, can_sleep);
4512
+ 10, 1);
42954513 if (err)
42964514 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
42974515 }
4516
+EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
42984517
42994518 /**
43004519 * ufshcd_hba_execute_hce - initialize the controller
....@@ -4308,17 +4527,13 @@
43084527 */
43094528 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
43104529 {
4311
- int retry;
4530
+ int retry_outer = 3;
4531
+ int retry_inner;
43124532
4313
- /*
4314
- * msleep of 1 and 5 used in this function might result in msleep(20),
4315
- * but it was necessary to send the UFS FPGA to reset mode during
4316
- * development and testing of this driver. msleep can be changed to
4317
- * mdelay and retry count can be reduced based on the controller.
4318
- */
4533
+start:
43194534 if (!ufshcd_is_hba_active(hba))
43204535 /* change controller state to "reset state" */
4321
- ufshcd_hba_stop(hba, true);
4536
+ ufshcd_hba_stop(hba);
43224537
43234538 /* UniPro link is disabled at this point */
43244539 ufshcd_set_link_off(hba);
....@@ -4338,19 +4553,23 @@
43384553 * instruction might be read back.
43394554 * This delay can be changed based on the controller.
43404555 */
4341
- msleep(1);
4556
+ ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
43424557
43434558 /* wait for the host controller to complete initialization */
4344
- retry = 10;
4559
+ retry_inner = 50;
43454560 while (ufshcd_is_hba_active(hba)) {
4346
- if (retry) {
4347
- retry--;
4561
+ if (retry_inner) {
4562
+ retry_inner--;
43484563 } else {
43494564 dev_err(hba->dev,
43504565 "Controller enable failed\n");
4566
+ if (retry_outer) {
4567
+ retry_outer--;
4568
+ goto start;
4569
+ }
43514570 return -EIO;
43524571 }
4353
- msleep(5);
4572
+ usleep_range(1000, 1100);
43544573 }
43554574
43564575 /* enable UIC related interrupts */
....@@ -4361,7 +4580,7 @@
43614580 return 0;
43624581 }
43634582
4364
-static int ufshcd_hba_enable(struct ufs_hba *hba)
4583
+int ufshcd_hba_enable(struct ufs_hba *hba)
43654584 {
43664585 int ret;
43674586
....@@ -4386,9 +4605,11 @@
43864605
43874606 return ret;
43884607 }
4608
+EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4609
+
43894610 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
43904611 {
4391
- int tx_lanes, i, err = 0;
4612
+ int tx_lanes = 0, i, err = 0;
43924613
43934614 if (!peer)
43944615 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
....@@ -4422,6 +4643,23 @@
44224643 return ufshcd_disable_tx_lcc(hba, true);
44234644 }
44244645
4646
+void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4647
+{
4648
+ struct ufs_event_hist *e;
4649
+
4650
+ if (id >= UFS_EVT_CNT)
4651
+ return;
4652
+
4653
+ e = &hba->ufs_stats.event[id];
4654
+ e->val[e->pos] = val;
4655
+ e->tstamp[e->pos] = ktime_get();
4656
+ e->cnt += 1;
4657
+ e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4658
+
4659
+ ufshcd_vops_event_notify(hba, id, &val);
4660
+}
4661
+EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4662
+
44254663 /**
44264664 * ufshcd_link_startup - Initialize unipro link startup
44274665 * @hba: per adapter instance
....@@ -4449,6 +4687,9 @@
44494687
44504688 /* check if device is detected by inter-connect layer */
44514689 if (!ret && !ufshcd_is_device_present(hba)) {
4690
+ ufshcd_update_evt_hist(hba,
4691
+ UFS_EVT_LINK_STARTUP_FAIL,
4692
+ 0);
44524693 dev_err(hba->dev, "%s: Device not present\n", __func__);
44534694 ret = -ENXIO;
44544695 goto out;
....@@ -4459,13 +4700,21 @@
44594700 * but we can't be sure if the link is up until link startup
44604701 * succeeds. So reset the local Uni-Pro and try again.
44614702 */
4462
- if (ret && ufshcd_hba_enable(hba))
4703
+ if (ret && ufshcd_hba_enable(hba)) {
4704
+ ufshcd_update_evt_hist(hba,
4705
+ UFS_EVT_LINK_STARTUP_FAIL,
4706
+ (u32)ret);
44634707 goto out;
4708
+ }
44644709 } while (ret && retries--);
44654710
4466
- if (ret)
4711
+ if (ret) {
44674712 /* failed to get the link up... retire */
4713
+ ufshcd_update_evt_hist(hba,
4714
+ UFS_EVT_LINK_STARTUP_FAIL,
4715
+ (u32)ret);
44684716 goto out;
4717
+ }
44694718
44704719 if (link_startup_again) {
44714720 link_startup_again = false;
....@@ -4488,13 +4737,15 @@
44884737 if (ret)
44894738 goto out;
44904739
4740
+ /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
4741
+ ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
44914742 ret = ufshcd_make_hba_operational(hba);
44924743 out:
44934744 if (ret) {
44944745 dev_err(hba->dev, "link startup failed %d\n", ret);
44954746 ufshcd_print_host_state(hba);
44964747 ufshcd_print_pwr_info(hba);
4497
- ufshcd_print_host_regs(hba);
4748
+ ufshcd_print_evt_hist(hba);
44984749 }
44994750 return ret;
45004751 }
....@@ -4595,7 +4846,7 @@
45954846 * protected so skip reading bLUWriteProtect parameter for
45964847 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
45974848 */
4598
- else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4849
+ else if (lun >= hba->dev_info.max_lu_supported)
45994850 ret = -ENOTSUPP;
46004851 else
46014852 ret = ufshcd_read_unit_desc_param(hba,
....@@ -4642,6 +4893,9 @@
46424893 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
46434894 sdev->use_10_for_ms = 1;
46444895
4896
+ /* DBD field should be set to 1 in mode sense(10) */
4897
+ sdev->set_dbd_for_ms = 1;
4898
+
46454899 /* allow SCSI layer to restart the device in case of errors */
46464900 sdev->allow_restart = 1;
46474901
....@@ -4667,11 +4921,27 @@
46674921 */
46684922 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
46694923 {
4670
- struct ufs_hba *hba = shost_priv(sdev->host);
4924
+ return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
4925
+}
46714926
4672
- if (depth > hba->nutrs)
4673
- depth = hba->nutrs;
4674
- return scsi_change_queue_depth(sdev, depth);
4927
+static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
4928
+{
4929
+ /* skip well-known LU */
4930
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4931
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
4932
+ return;
4933
+
4934
+ ufshpb_destroy_lu(hba, sdev);
4935
+}
4936
+
4937
+static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
4938
+{
4939
+ /* skip well-known LU */
4940
+ if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
4941
+ !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
4942
+ return;
4943
+
4944
+ ufshpb_init_hpb_lu(hba, sdev);
46754945 }
46764946
46774947 /**
....@@ -4683,13 +4953,18 @@
46834953 struct ufs_hba *hba = shost_priv(sdev->host);
46844954 struct request_queue *q = sdev->request_queue;
46854955
4956
+ ufshcd_hpb_configure(hba, sdev);
4957
+
46864958 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4687
- blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4959
+ if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
4960
+ blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
46884961
46894962 if (ufshcd_is_rpm_autosuspend_allowed(hba))
46904963 sdev->rpm_autosuspend = 1;
46914964
46924965 ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
4966
+
4967
+ trace_android_vh_ufs_update_sdev(sdev);
46934968
46944969 return 0;
46954970 }
....@@ -4701,9 +4976,11 @@
47014976 static void ufshcd_slave_destroy(struct scsi_device *sdev)
47024977 {
47034978 struct ufs_hba *hba;
4704
- struct request_queue *q = sdev->request_queue;
47054979
47064980 hba = shost_priv(sdev->host);
4981
+
4982
+ ufshcd_hpb_destroy(hba, sdev);
4983
+
47074984 /* Drop the reference as it won't be needed anymore */
47084985 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
47094986 unsigned long flags;
....@@ -4712,48 +4989,6 @@
47124989 hba->sdev_ufs_device = NULL;
47134990 spin_unlock_irqrestore(hba->host->host_lock, flags);
47144991 }
4715
-
4716
- ufshcd_crypto_destroy_rq_keyslot_manager(hba, q);
4717
-}
4718
-
4719
-/**
4720
- * ufshcd_task_req_compl - handle task management request completion
4721
- * @hba: per adapter instance
4722
- * @index: index of the completed request
4723
- * @resp: task management service response
4724
- *
4725
- * Returns non-zero value on error, zero on success
4726
- */
4727
-static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4728
-{
4729
- struct utp_task_req_desc *task_req_descp;
4730
- struct utp_upiu_task_rsp *task_rsp_upiup;
4731
- unsigned long flags;
4732
- int ocs_value;
4733
- int task_result;
4734
-
4735
- spin_lock_irqsave(hba->host->host_lock, flags);
4736
-
4737
- /* Clear completed tasks from outstanding_tasks */
4738
- __clear_bit(index, &hba->outstanding_tasks);
4739
-
4740
- task_req_descp = hba->utmrdl_base_addr;
4741
- ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4742
-
4743
- if (ocs_value == OCS_SUCCESS) {
4744
- task_rsp_upiup = (struct utp_upiu_task_rsp *)
4745
- task_req_descp[index].task_rsp_upiu;
4746
- task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4747
- task_result = task_result & MASK_TM_SERVICE_RESP;
4748
- if (resp)
4749
- *resp = (u8)task_result;
4750
- } else {
4751
- dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4752
- __func__, ocs_value);
4753
- }
4754
- spin_unlock_irqrestore(hba->host->host_lock, flags);
4755
-
4756
- return ocs_value;
47574992 }
47584993
47594994 /**
....@@ -4771,6 +5006,7 @@
47715006 switch (scsi_status) {
47725007 case SAM_STAT_CHECK_CONDITION:
47735008 ufshcd_copy_sense_data(lrbp);
5009
+ fallthrough;
47745010 case SAM_STAT_GOOD:
47755011 result |= DID_OK << 16 |
47765012 COMMAND_COMPLETE << 8 |
....@@ -4807,6 +5043,12 @@
48075043 /* overall command status of utrd */
48085044 ocs = ufshcd_get_tr_ocs(lrbp);
48095045
5046
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5047
+ if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
5048
+ MASK_RSP_UPIU_RESULT)
5049
+ ocs = OCS_SUCCESS;
5050
+ }
5051
+
48105052 switch (ocs) {
48115053 case OCS_SUCCESS:
48125054 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
....@@ -4839,8 +5081,19 @@
48395081 * UFS device needs urgent BKOPs.
48405082 */
48415083 if (!hba->pm_op_in_progress &&
4842
- ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4843
- schedule_work(&hba->eeh_work);
5084
+ !ufshcd_eh_in_progress(hba) &&
5085
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
5086
+ schedule_work(&hba->eeh_work)) {
5087
+ /*
5088
+ * Prevent suspend once eeh_work is scheduled
5089
+ * to avoid deadlock between ufshcd_suspend
5090
+ * and exception event handler.
5091
+ */
5092
+ pm_runtime_get_noresume(hba->dev);
5093
+ }
5094
+
5095
+ if (scsi_status == SAM_STAT_GOOD)
5096
+ ufshpb_rsp_upiu(hba, lrbp);
48445097 break;
48455098 case UPIU_TRANSACTION_REJECT_UPIU:
48465099 /* TODO: handle Reject UPIU Response */
....@@ -4849,10 +5102,10 @@
48495102 "Reject UPIU not fully implemented\n");
48505103 break;
48515104 default:
4852
- result = DID_ERROR << 16;
48535105 dev_err(hba->dev,
48545106 "Unexpected request response code = %x\n",
48555107 result);
5108
+ result = DID_ERROR << 16;
48565109 break;
48575110 }
48585111 break;
....@@ -4860,6 +5113,10 @@
48605113 result |= DID_ABORT << 16;
48615114 break;
48625115 case OCS_INVALID_COMMAND_STATUS:
5116
+ dev_err_ratelimited(hba->dev,
5117
+ "Retrying request with tag %d / cdb %#02x because of invalid command status\n",
5118
+ lrbp->task_tag, lrbp->cmd && lrbp->cmd->cmnd ?
5119
+ lrbp->cmd->cmnd[0] : 0);
48635120 result |= DID_REQUEUE << 16;
48645121 break;
48655122 case OCS_INVALID_CMD_TABLE_ATTR:
....@@ -4868,6 +5125,7 @@
48685125 case OCS_MISMATCH_RESP_UPIU_SIZE:
48695126 case OCS_PEER_COMM_FAILURE:
48705127 case OCS_FATAL_ERROR:
5128
+ case OCS_DEVICE_FATAL_ERROR:
48715129 case OCS_INVALID_CRYPTO_CONFIG:
48725130 case OCS_GENERAL_CRYPTO_ERROR:
48735131 default:
....@@ -4875,33 +5133,87 @@
48755133 dev_err(hba->dev,
48765134 "OCS error from controller = %x for tag %d\n",
48775135 ocs, lrbp->task_tag);
4878
- ufshcd_print_host_regs(hba);
5136
+ ufshcd_print_evt_hist(hba);
48795137 ufshcd_print_host_state(hba);
48805138 break;
48815139 } /* end of switch */
48825140
4883
- if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
5141
+ if ((host_byte(result) != DID_OK) &&
5142
+ (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
48845143 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
48855144 return result;
5145
+}
5146
+
5147
+static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5148
+ u32 intr_mask)
5149
+{
5150
+ if (!ufshcd_is_auto_hibern8_supported(hba) ||
5151
+ !ufshcd_is_auto_hibern8_enabled(hba))
5152
+ return false;
5153
+
5154
+ if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5155
+ return false;
5156
+
5157
+ if (hba->active_uic_cmd &&
5158
+ (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5159
+ hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5160
+ return false;
5161
+
5162
+ return true;
48865163 }
48875164
48885165 /**
48895166 * ufshcd_uic_cmd_compl - handle completion of uic command
48905167 * @hba: per adapter instance
48915168 * @intr_status: interrupt status generated by the controller
5169
+ *
5170
+ * Returns
5171
+ * IRQ_HANDLED - If interrupt is valid
5172
+ * IRQ_NONE - If invalid interrupt
48925173 */
4893
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5174
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
48945175 {
5176
+ irqreturn_t retval = IRQ_NONE;
5177
+
5178
+ spin_lock(hba->host->host_lock);
5179
+ if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5180
+ hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5181
+
48955182 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
48965183 hba->active_uic_cmd->argument2 |=
48975184 ufshcd_get_uic_cmd_result(hba);
48985185 hba->active_uic_cmd->argument3 =
48995186 ufshcd_get_dme_attr_val(hba);
5187
+ if (!hba->uic_async_done)
5188
+ hba->active_uic_cmd->cmd_active = 0;
49005189 complete(&hba->active_uic_cmd->done);
5190
+ retval = IRQ_HANDLED;
49015191 }
49025192
4903
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
5193
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
5194
+ hba->active_uic_cmd->cmd_active = 0;
49045195 complete(hba->uic_async_done);
5196
+ retval = IRQ_HANDLED;
5197
+ }
5198
+
5199
+ if (retval == IRQ_HANDLED)
5200
+ ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
5201
+ "complete");
5202
+ spin_unlock(hba->host->host_lock);
5203
+ return retval;
5204
+}
5205
+
5206
+/* Release the resources allocated for processing a SCSI command. */
5207
+static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5208
+ struct ufshcd_lrb *lrbp)
5209
+{
5210
+ struct scsi_cmnd *cmd = lrbp->cmd;
5211
+
5212
+ scsi_dma_unmap(cmd);
5213
+ ufshcd_crypto_clear_prdt(hba, lrbp);
5214
+ lrbp->cmd = NULL; /* Mark the command as completed. */
5215
+ ufshcd_release(hba);
5216
+ ufshcd_clk_scaling_update_busy(hba);
49055217 }
49065218
49075219 /**
....@@ -4914,55 +5226,48 @@
49145226 {
49155227 struct ufshcd_lrb *lrbp;
49165228 struct scsi_cmnd *cmd;
4917
- int result;
49185229 int index;
49195230
49205231 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
5232
+ if (!test_and_clear_bit(index, &hba->outstanding_reqs))
5233
+ continue;
49215234 lrbp = &hba->lrb[index];
5235
+ lrbp->compl_time_stamp = ktime_get();
49225236 cmd = lrbp->cmd;
49235237 if (cmd) {
5238
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5239
+ ufshcd_update_monitor(hba, lrbp);
5240
+ trace_android_vh_ufs_compl_command(hba, lrbp);
49245241 ufshcd_add_command_trace(hba, index, "complete");
4925
- result = ufshcd_transfer_rsp_status(hba, lrbp);
4926
- scsi_dma_unmap(cmd);
4927
- cmd->result = result;
4928
- ufshcd_complete_lrbp_crypto(hba, cmd, lrbp);
4929
- /* Mark completed command as NULL in LRB */
4930
- lrbp->cmd = NULL;
4931
- clear_bit_unlock(index, &hba->lrb_in_use);
5242
+ cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
5243
+ ufshcd_release_scsi_cmd(hba, lrbp);
49325244 /* Do not touch lrbp after scsi done */
49335245 cmd->scsi_done(cmd);
4934
- __ufshcd_release(hba);
49355246 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
49365247 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
49375248 if (hba->dev_cmd.complete) {
5249
+ trace_android_vh_ufs_compl_command(hba, lrbp);
49385250 ufshcd_add_command_trace(hba, index,
49395251 "dev_complete");
49405252 complete(hba->dev_cmd.complete);
5253
+ ufshcd_clk_scaling_update_busy(hba);
49415254 }
49425255 }
4943
- if (ufshcd_is_clkscaling_supported(hba))
4944
- hba->clk_scaling.active_reqs--;
4945
-
4946
- lrbp->compl_time_stamp = ktime_get();
49475256 }
4948
-
4949
- /* clear corresponding bits of completed commands */
4950
- hba->outstanding_reqs ^= completed_reqs;
4951
-
4952
- ufshcd_clk_scaling_update_busy(hba);
4953
-
4954
- /* we might have free'd some tags above */
4955
- wake_up(&hba->dev_cmd.tag_wq);
49565257 }
49575258
49585259 /**
4959
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
5260
+ * ufshcd_trc_handler - handle transfer requests completion
49605261 * @hba: per adapter instance
5262
+ * @use_utrlcnr: get completed requests from UTRLCNR
5263
+ *
5264
+ * Returns
5265
+ * IRQ_HANDLED - If interrupt is valid
5266
+ * IRQ_NONE - If invalid interrupt
49615267 */
4962
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
5268
+static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
49635269 {
4964
- unsigned long completed_reqs;
4965
- u32 tr_doorbell;
5270
+ unsigned long completed_reqs = 0;
49665271
49675272 /* Resetting interrupt aggregation counters first and reading the
49685273 * DOOR_BELL afterward allows us to handle all the completed requests.
....@@ -4975,10 +5280,31 @@
49755280 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
49765281 ufshcd_reset_intr_aggr(hba);
49775282
4978
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4979
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5283
+ if (use_utrlcnr) {
5284
+ u32 utrlcnr;
49805285
4981
- __ufshcd_transfer_req_compl(hba, completed_reqs);
5286
+ utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
5287
+ if (utrlcnr) {
5288
+ ufshcd_writel(hba, utrlcnr,
5289
+ REG_UTP_TRANSFER_REQ_LIST_COMPL);
5290
+ completed_reqs = utrlcnr;
5291
+ }
5292
+ } else {
5293
+ unsigned long flags;
5294
+ u32 tr_doorbell;
5295
+
5296
+ spin_lock_irqsave(hba->host->host_lock, flags);
5297
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5298
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
5299
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
5300
+ }
5301
+
5302
+ if (completed_reqs) {
5303
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
5304
+ return IRQ_HANDLED;
5305
+ } else {
5306
+ return IRQ_NONE;
5307
+ }
49825308 }
49835309
49845310 /**
....@@ -5056,7 +5382,7 @@
50565382 goto out;
50575383
50585384 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5059
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
5385
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
50605386 if (err) {
50615387 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
50625388 __func__, err);
....@@ -5106,7 +5432,7 @@
51065432 }
51075433
51085434 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5109
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
5435
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
51105436 if (err) {
51115437 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
51125438 __func__, err);
....@@ -5141,6 +5467,7 @@
51415467 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
51425468 ufshcd_disable_auto_bkops(hba);
51435469 }
5470
+ hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
51445471 hba->is_urgent_bkops_lvl_checked = false;
51455472 }
51465473
....@@ -5166,7 +5493,7 @@
51665493 * to know whether auto bkops is enabled or disabled after this function
51675494 * returns control to it.
51685495 */
5169
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5496
+int ufshcd_bkops_ctrl(struct ufs_hba *hba,
51705497 enum bkops_status status)
51715498 {
51725499 int err;
....@@ -5191,6 +5518,7 @@
51915518 out:
51925519 return err;
51935520 }
5521
+EXPORT_SYMBOL_GPL(ufshcd_bkops_ctrl);
51945522
51955523 /**
51965524 * ufshcd_urgent_bkops - handle urgent bkops exception event
....@@ -5250,6 +5578,190 @@
52505578 __func__, err);
52515579 }
52525580
5581
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
5582
+{
5583
+ int ret;
5584
+ u8 index;
5585
+ enum query_opcode opcode;
5586
+
5587
+ if (!ufshcd_is_wb_allowed(hba))
5588
+ return 0;
5589
+
5590
+ if (!(enable ^ hba->wb_enabled))
5591
+ return 0;
5592
+ if (enable)
5593
+ opcode = UPIU_QUERY_OPCODE_SET_FLAG;
5594
+ else
5595
+ opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5596
+
5597
+ index = ufshcd_wb_get_query_index(hba);
5598
+ ret = ufshcd_query_flag_retry(hba, opcode,
5599
+ QUERY_FLAG_IDN_WB_EN, index, NULL);
5600
+ if (ret) {
5601
+ dev_err(hba->dev, "%s write booster %s failed %d\n",
5602
+ __func__, enable ? "enable" : "disable", ret);
5603
+ return ret;
5604
+ }
5605
+
5606
+ hba->wb_enabled = enable;
5607
+ dev_dbg(hba->dev, "%s write booster %s %d\n",
5608
+ __func__, enable ? "enable" : "disable", ret);
5609
+
5610
+ return ret;
5611
+}
5612
+
5613
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
5614
+{
5615
+ int val;
5616
+ u8 index;
5617
+
5618
+ if (set)
5619
+ val = UPIU_QUERY_OPCODE_SET_FLAG;
5620
+ else
5621
+ val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
5622
+
5623
+ index = ufshcd_wb_get_query_index(hba);
5624
+ return ufshcd_query_flag_retry(hba, val,
5625
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
5626
+ index, NULL);
5627
+}
5628
+
5629
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
5630
+{
5631
+ if (enable)
5632
+ ufshcd_wb_buf_flush_enable(hba);
5633
+ else
5634
+ ufshcd_wb_buf_flush_disable(hba);
5635
+
5636
+}
5637
+
5638
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
5639
+{
5640
+ int ret;
5641
+ u8 index;
5642
+
5643
+ if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
5644
+ return 0;
5645
+
5646
+ index = ufshcd_wb_get_query_index(hba);
5647
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5648
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5649
+ index, NULL);
5650
+ if (ret)
5651
+ dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
5652
+ __func__, ret);
5653
+ else
5654
+ hba->wb_buf_flush_enabled = true;
5655
+
5656
+ dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
5657
+ return ret;
5658
+}
5659
+
5660
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
5661
+{
5662
+ int ret;
5663
+ u8 index;
5664
+
5665
+ if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
5666
+ return 0;
5667
+
5668
+ index = ufshcd_wb_get_query_index(hba);
5669
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5670
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
5671
+ index, NULL);
5672
+ if (ret) {
5673
+ dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
5674
+ __func__, ret);
5675
+ } else {
5676
+ hba->wb_buf_flush_enabled = false;
5677
+ dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
5678
+ }
5679
+
5680
+ return ret;
5681
+}
5682
+
5683
+static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
5684
+ u32 avail_buf)
5685
+{
5686
+ u32 cur_buf;
5687
+ int ret;
5688
+ u8 index;
5689
+
5690
+ index = ufshcd_wb_get_query_index(hba);
5691
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5692
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
5693
+ index, 0, &cur_buf);
5694
+ if (ret) {
5695
+ dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
5696
+ __func__, ret);
5697
+ return false;
5698
+ }
5699
+
5700
+ if (!cur_buf) {
5701
+ dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
5702
+ cur_buf);
5703
+ return false;
5704
+ }
5705
+ /* Let it continue to flush when available buffer exceeds threshold */
5706
+ if (avail_buf < hba->vps->wb_flush_threshold)
5707
+ return true;
5708
+
5709
+ return false;
5710
+}
5711
+
5712
+static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
5713
+{
5714
+ int ret;
5715
+ u32 avail_buf;
5716
+ u8 index;
5717
+
5718
+ if (!ufshcd_is_wb_allowed(hba))
5719
+ return false;
5720
+ /*
5721
+ * The ufs device needs the vcc to be ON to flush.
5722
+ * With user-space reduction enabled, it's enough to enable flush
5723
+ * by checking only the available buffer. The threshold
5724
+ * defined here is > 90% full.
5725
+ * With user-space preserved enabled, the current-buffer
5726
+ * should be checked too because the wb buffer size can reduce
5727
+ * when disk tends to be full. This info is provided by current
5728
+ * buffer (dCurrentWriteBoosterBufferSize). There's no point in
5729
+ * keeping vcc on when current buffer is empty.
5730
+ */
5731
+ index = ufshcd_wb_get_query_index(hba);
5732
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5733
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
5734
+ index, 0, &avail_buf);
5735
+ if (ret) {
5736
+ dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
5737
+ __func__, ret);
5738
+ return false;
5739
+ }
5740
+
5741
+ if (!hba->dev_info.b_presrv_uspc_en) {
5742
+ if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
5743
+ return true;
5744
+ return false;
5745
+ }
5746
+
5747
+ return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
5748
+}
5749
+
5750
+static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
5751
+{
5752
+ struct ufs_hba *hba = container_of(to_delayed_work(work),
5753
+ struct ufs_hba,
5754
+ rpm_dev_flush_recheck_work);
5755
+ /*
5756
+ * To prevent unnecessary VCC power drain after device finishes
5757
+ * WriteBooster buffer flush or Auto BKOPs, force runtime resume
5758
+ * after a certain delay to recheck the threshold by next runtime
5759
+ * suspend.
5760
+ */
5761
+ pm_runtime_get_sync(hba->dev);
5762
+ pm_runtime_put_sync(hba->dev);
5763
+}
5764
+
52535765 /**
52545766 * ufshcd_exception_event_handler - handle exceptions raised by device
52555767 * @work: pointer to work data
....@@ -5265,7 +5777,7 @@
52655777 hba = container_of(work, struct ufs_hba, eeh_work);
52665778
52675779 pm_runtime_get_sync(hba->dev);
5268
- scsi_block_requests(hba->host);
5780
+ ufshcd_scsi_block_requests(hba);
52695781 err = ufshcd_get_ee_status(hba, &status);
52705782 if (err) {
52715783 dev_err(hba->dev, "%s: failed to get exception status %d\n",
....@@ -5279,15 +5791,22 @@
52795791 ufshcd_bkops_exception_event_handler(hba);
52805792
52815793 out:
5282
- scsi_unblock_requests(hba->host);
5283
- pm_runtime_put_sync(hba->dev);
5794
+ ufshcd_scsi_unblock_requests(hba);
5795
+ /*
5796
+ * pm_runtime_get_noresume is called while scheduling
5797
+ * eeh_work to avoid suspend racing with exception work.
5798
+ * Hence decrement usage counter using pm_runtime_put_noidle
5799
+ * to allow suspend on completion of exception event handler.
5800
+ */
5801
+ pm_runtime_put_noidle(hba->dev);
5802
+ pm_runtime_put(hba->dev);
52845803 return;
52855804 }
52865805
52875806 /* Complete requests that have door-bell cleared */
52885807 static void ufshcd_complete_requests(struct ufs_hba *hba)
52895808 {
5290
- ufshcd_transfer_req_compl(hba);
5809
+ ufshcd_trc_handler(hba, false);
52915810 ufshcd_tmc_handler(hba);
52925811 }
52935812
....@@ -5354,14 +5873,157 @@
53545873 hba->saved_err &= ~UIC_ERROR;
53555874 /* clear NAC error */
53565875 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5357
- if (!hba->saved_uic_err) {
5876
+ if (!hba->saved_uic_err)
53585877 err_handling = false;
5359
- goto out;
5360
- }
53615878 }
53625879 out:
53635880 spin_unlock_irqrestore(hba->host->host_lock, flags);
53645881 return err_handling;
5882
+}
5883
+
5884
+/* host lock must be held before calling this func */
5885
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
5886
+{
5887
+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
5888
+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
5889
+}
5890
+
5891
+/* host lock must be held before calling this func */
5892
+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
5893
+{
5894
+ /* handle fatal errors only when link is not in error state */
5895
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
5896
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
5897
+ ufshcd_is_saved_err_fatal(hba))
5898
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
5899
+ else
5900
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
5901
+ queue_work(hba->eh_wq, &hba->eh_work);
5902
+ }
5903
+}
5904
+
5905
+static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
5906
+{
5907
+ down_write(&hba->clk_scaling_lock);
5908
+ hba->clk_scaling.is_allowed = allow;
5909
+ up_write(&hba->clk_scaling_lock);
5910
+}
5911
+
5912
+static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
5913
+{
5914
+ if (suspend) {
5915
+ if (hba->clk_scaling.is_enabled)
5916
+ ufshcd_suspend_clkscaling(hba);
5917
+ ufshcd_clk_scaling_allow(hba, false);
5918
+ } else {
5919
+ ufshcd_clk_scaling_allow(hba, true);
5920
+ if (hba->clk_scaling.is_enabled)
5921
+ ufshcd_resume_clkscaling(hba);
5922
+ }
5923
+}
5924
+
5925
+static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
5926
+{
5927
+ pm_runtime_get_sync(hba->dev);
5928
+ if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
5929
+ enum ufs_pm_op pm_op;
5930
+
5931
+ /*
5932
+ * Don't assume anything of pm_runtime_get_sync(), if
5933
+ * resume fails, irq and clocks can be OFF, and powers
5934
+ * can be OFF or in LPM.
5935
+ */
5936
+ ufshcd_setup_hba_vreg(hba, true);
5937
+ ufshcd_enable_irq(hba);
5938
+ ufshcd_setup_vreg(hba, true);
5939
+ ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
5940
+ ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
5941
+ ufshcd_hold(hba, false);
5942
+ if (!ufshcd_is_clkgating_allowed(hba))
5943
+ ufshcd_setup_clocks(hba, true);
5944
+ ufshcd_release(hba);
5945
+ pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
5946
+ ufshcd_vops_resume(hba, pm_op);
5947
+ } else {
5948
+ ufshcd_hold(hba, false);
5949
+ if (ufshcd_is_clkscaling_supported(hba) &&
5950
+ hba->clk_scaling.is_enabled)
5951
+ ufshcd_suspend_clkscaling(hba);
5952
+ ufshcd_clk_scaling_allow(hba, false);
5953
+ }
5954
+ ufshcd_scsi_block_requests(hba);
5955
+ /* Drain ufshcd_queuecommand() */
5956
+ down_write(&hba->clk_scaling_lock);
5957
+ up_write(&hba->clk_scaling_lock);
5958
+ cancel_work_sync(&hba->eeh_work);
5959
+}
5960
+
5961
+static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
5962
+{
5963
+ ufshcd_scsi_unblock_requests(hba);
5964
+ ufshcd_release(hba);
5965
+ if (ufshcd_is_clkscaling_supported(hba))
5966
+ ufshcd_clk_scaling_suspend(hba, false);
5967
+ pm_runtime_put(hba->dev);
5968
+}
5969
+
5970
+static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
5971
+{
5972
+ return (!hba->is_powered || hba->shutting_down ||
5973
+ hba->ufshcd_state == UFSHCD_STATE_ERROR ||
5974
+ (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
5975
+ ufshcd_is_link_broken(hba))));
5976
+}
5977
+
5978
+#ifdef CONFIG_PM
5979
+static void ufshcd_recover_pm_error(struct ufs_hba *hba)
5980
+{
5981
+ struct Scsi_Host *shost = hba->host;
5982
+ struct scsi_device *sdev;
5983
+ struct request_queue *q;
5984
+ int ret;
5985
+
5986
+ hba->is_sys_suspended = false;
5987
+ /*
5988
+ * Set RPM status of hba device to RPM_ACTIVE,
5989
+ * this also clears its runtime error.
5990
+ */
5991
+ ret = pm_runtime_set_active(hba->dev);
5992
+ /*
5993
+ * If hba device had runtime error, we also need to resume those
5994
+ * scsi devices under hba in case any of them has failed to be
5995
+ * resumed due to hba runtime resume failure. This is to unblock
5996
+ * blk_queue_enter in case there are bios waiting inside it.
5997
+ */
5998
+ if (!ret) {
5999
+ shost_for_each_device(sdev, shost) {
6000
+ q = sdev->request_queue;
6001
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6002
+ q->rpm_status == RPM_SUSPENDING))
6003
+ pm_request_resume(q->dev);
6004
+ }
6005
+ }
6006
+}
6007
+#else
6008
+static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6009
+{
6010
+}
6011
+#endif
6012
+
6013
+static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6014
+{
6015
+ struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6016
+ u32 mode;
6017
+
6018
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6019
+
6020
+ if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6021
+ return true;
6022
+
6023
+ if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6024
+ return true;
6025
+
6026
+ return false;
53656027 }
53666028
53676029 /**
....@@ -5372,26 +6034,37 @@
53726034 {
53736035 struct ufs_hba *hba;
53746036 unsigned long flags;
5375
- u32 err_xfer = 0;
5376
- u32 err_tm = 0;
5377
- int err = 0;
6037
+ bool err_xfer = false;
6038
+ bool err_tm = false;
6039
+ int err = 0, pmc_err;
53786040 int tag;
5379
- bool needs_reset = false;
6041
+ bool needs_reset = false, needs_restore = false;
53806042
53816043 hba = container_of(work, struct ufs_hba, eh_work);
53826044
5383
- pm_runtime_get_sync(hba->dev);
5384
- ufshcd_hold(hba, false);
5385
-
6045
+ down(&hba->host_sem);
53866046 spin_lock_irqsave(hba->host->host_lock, flags);
5387
- if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5388
- goto out;
5389
-
5390
- hba->ufshcd_state = UFSHCD_STATE_RESET;
6047
+ if (ufshcd_err_handling_should_stop(hba)) {
6048
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6049
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6050
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6051
+ up(&hba->host_sem);
6052
+ return;
6053
+ }
53916054 ufshcd_set_eh_in_progress(hba);
5392
-
6055
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6056
+ ufshcd_err_handling_prepare(hba);
53936057 /* Complete requests that have door-bell cleared by h/w */
53946058 ufshcd_complete_requests(hba);
6059
+ spin_lock_irqsave(hba->host->host_lock, flags);
6060
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6061
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
6062
+ /*
6063
+ * A full reset and restore might have happened after preparation
6064
+ * is finished, double check whether we should stop.
6065
+ */
6066
+ if (ufshcd_err_handling_should_stop(hba))
6067
+ goto skip_err_handling;
53956068
53966069 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
53976070 bool ret;
....@@ -5400,29 +6073,60 @@
54006073 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
54016074 ret = ufshcd_quirk_dl_nac_errors(hba);
54026075 spin_lock_irqsave(hba->host->host_lock, flags);
5403
- if (!ret)
6076
+ if (!ret && ufshcd_err_handling_should_stop(hba))
54046077 goto skip_err_handling;
54056078 }
5406
- if ((hba->saved_err & INT_FATAL_ERRORS) ||
5407
- ((hba->saved_err & UIC_ERROR) &&
5408
- (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5409
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5410
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5411
- needs_reset = true;
6079
+
6080
+ if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6081
+ (hba->saved_uic_err &&
6082
+ (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6083
+ bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6084
+
6085
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6086
+ ufshcd_print_host_state(hba);
6087
+ ufshcd_print_pwr_info(hba);
6088
+ ufshcd_print_evt_hist(hba);
6089
+ ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6090
+ ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
6091
+ spin_lock_irqsave(hba->host->host_lock, flags);
6092
+ }
54126093
54136094 /*
54146095 * if host reset is required then skip clearing the pending
54156096 * transfers forcefully because they will get cleared during
54166097 * host reset and restore
54176098 */
5418
- if (needs_reset)
5419
- goto skip_pending_xfer_clear;
6099
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6100
+ ufshcd_is_saved_err_fatal(hba) ||
6101
+ ((hba->saved_err & UIC_ERROR) &&
6102
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6103
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6104
+ needs_reset = true;
6105
+ goto do_reset;
6106
+ }
54206107
6108
+ /*
6109
+ * If LINERESET was caught, UFS might have been put to PWM mode,
6110
+ * check if power mode restore is needed.
6111
+ */
6112
+ if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6113
+ hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6114
+ if (!hba->saved_uic_err)
6115
+ hba->saved_err &= ~UIC_ERROR;
6116
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6117
+ if (ufshcd_is_pwr_mode_restore_needed(hba))
6118
+ needs_restore = true;
6119
+ spin_lock_irqsave(hba->host->host_lock, flags);
6120
+ if (!hba->saved_err && !needs_restore)
6121
+ goto skip_err_handling;
6122
+ }
6123
+
6124
+ hba->silence_err_logs = true;
54216125 /* release lock as clear command might sleep */
54226126 spin_unlock_irqrestore(hba->host->host_lock, flags);
54236127 /* Clear pending transfer requests */
54246128 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5425
- if (ufshcd_clear_cmd(hba, tag)) {
6129
+ if (ufshcd_try_to_abort_task(hba, tag)) {
54266130 err_xfer = true;
54276131 goto lock_skip_pending_xfer_clear;
54286132 }
....@@ -5437,149 +6141,206 @@
54376141 }
54386142
54396143 lock_skip_pending_xfer_clear:
5440
- spin_lock_irqsave(hba->host->host_lock, flags);
5441
-
54426144 /* Complete the requests that are cleared by s/w */
54436145 ufshcd_complete_requests(hba);
54446146
5445
- if (err_xfer || err_tm)
6147
+ spin_lock_irqsave(hba->host->host_lock, flags);
6148
+ hba->silence_err_logs = false;
6149
+ if (err_xfer || err_tm) {
54466150 needs_reset = true;
6151
+ goto do_reset;
6152
+ }
54476153
5448
-skip_pending_xfer_clear:
6154
+ /*
6155
+ * After all reqs and tasks are cleared from doorbell,
6156
+ * now it is safe to retore power mode.
6157
+ */
6158
+ if (needs_restore) {
6159
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6160
+ /*
6161
+ * Hold the scaling lock just in case dev cmds
6162
+ * are sent via bsg and/or sysfs.
6163
+ */
6164
+ down_write(&hba->clk_scaling_lock);
6165
+ hba->force_pmc = true;
6166
+ pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6167
+ if (pmc_err) {
6168
+ needs_reset = true;
6169
+ dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6170
+ __func__, pmc_err);
6171
+ }
6172
+ hba->force_pmc = false;
6173
+ ufshcd_print_pwr_info(hba);
6174
+ up_write(&hba->clk_scaling_lock);
6175
+ spin_lock_irqsave(hba->host->host_lock, flags);
6176
+ }
6177
+
6178
+do_reset:
54496179 /* Fatal errors need reset */
54506180 if (needs_reset) {
5451
- unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5452
-
5453
- /*
5454
- * ufshcd_reset_and_restore() does the link reinitialization
5455
- * which will need atleast one empty doorbell slot to send the
5456
- * device management commands (NOP and query commands).
5457
- * If there is no slot empty at this moment then free up last
5458
- * slot forcefully.
5459
- */
5460
- if (hba->outstanding_reqs == max_doorbells)
5461
- __ufshcd_transfer_req_compl(hba,
5462
- (1UL << (hba->nutrs - 1)));
5463
-
6181
+ hba->force_reset = false;
54646182 spin_unlock_irqrestore(hba->host->host_lock, flags);
54656183 err = ufshcd_reset_and_restore(hba);
6184
+ if (err)
6185
+ dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6186
+ __func__, err);
6187
+ else
6188
+ ufshcd_recover_pm_error(hba);
54666189 spin_lock_irqsave(hba->host->host_lock, flags);
5467
- if (err) {
5468
- dev_err(hba->dev, "%s: reset and restore failed\n",
5469
- __func__);
5470
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
5471
- }
5472
- /*
5473
- * Inform scsi mid-layer that we did reset and allow to handle
5474
- * Unit Attention properly.
5475
- */
5476
- scsi_report_bus_reset(hba->host, 0);
5477
- hba->saved_err = 0;
5478
- hba->saved_uic_err = 0;
54796190 }
54806191
54816192 skip_err_handling:
54826193 if (!needs_reset) {
5483
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6194
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6195
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
54846196 if (hba->saved_err || hba->saved_uic_err)
54856197 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
54866198 __func__, hba->saved_err, hba->saved_uic_err);
54876199 }
5488
-
54896200 ufshcd_clear_eh_in_progress(hba);
5490
-
5491
-out:
54926201 spin_unlock_irqrestore(hba->host->host_lock, flags);
5493
- ufshcd_scsi_unblock_requests(hba);
5494
- ufshcd_release(hba);
5495
- pm_runtime_put_sync(hba->dev);
5496
-}
5497
-
5498
-static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5499
- u32 reg)
5500
-{
5501
- reg_hist->reg[reg_hist->pos] = reg;
5502
- reg_hist->tstamp[reg_hist->pos] = ktime_get();
5503
- reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
6202
+ ufshcd_err_handling_unprepare(hba);
6203
+ up(&hba->host_sem);
55046204 }
55056205
55066206 /**
55076207 * ufshcd_update_uic_error - check and set fatal UIC error flags.
55086208 * @hba: per-adapter instance
6209
+ *
6210
+ * Returns
6211
+ * IRQ_HANDLED - If interrupt is valid
6212
+ * IRQ_NONE - If invalid interrupt
55096213 */
5510
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
6214
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
55116215 {
55126216 u32 reg;
6217
+ irqreturn_t retval = IRQ_NONE;
55136218
5514
- /* PHY layer lane error */
6219
+ /* PHY layer error */
55156220 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5516
- /* Ignore LINERESET indication, as this is not an error */
55176221 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5518
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
6222
+ (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6223
+ ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
55196224 /*
55206225 * To know whether this error is fatal or not, DB timeout
55216226 * must be checked but this error is handled separately.
55226227 */
5523
- dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5524
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
6228
+ if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6229
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6230
+ __func__);
6231
+
6232
+ /* Got a LINERESET indication. */
6233
+ if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6234
+ struct uic_command *cmd = NULL;
6235
+
6236
+ hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6237
+ if (hba->uic_async_done && hba->active_uic_cmd)
6238
+ cmd = hba->active_uic_cmd;
6239
+ /*
6240
+ * Ignore the LINERESET during power mode change
6241
+ * operation via DME_SET command.
6242
+ */
6243
+ if (cmd && (cmd->command == UIC_CMD_DME_SET))
6244
+ hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6245
+ }
6246
+ retval |= IRQ_HANDLED;
55256247 }
55266248
55276249 /* PA_INIT_ERROR is fatal and needs UIC reset */
55286250 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5529
- if (reg)
5530
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
6251
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6252
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6253
+ ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
55316254
5532
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5533
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5534
- else if (hba->dev_quirks &
5535
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5536
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5537
- hba->uic_error |=
5538
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5539
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5540
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6255
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6256
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6257
+ else if (hba->dev_quirks &
6258
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6259
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6260
+ hba->uic_error |=
6261
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6262
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6263
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6264
+ }
6265
+ retval |= IRQ_HANDLED;
55416266 }
55426267
55436268 /* UIC NL/TL/DME errors needs software retry */
55446269 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5545
- if (reg) {
5546
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
6270
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6271
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6272
+ ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
55476273 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6274
+ retval |= IRQ_HANDLED;
55486275 }
55496276
55506277 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5551
- if (reg) {
5552
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
6278
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6279
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6280
+ ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
55536281 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6282
+ retval |= IRQ_HANDLED;
55546283 }
55556284
55566285 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5557
- if (reg) {
5558
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
6286
+ if ((reg & UIC_DME_ERROR) &&
6287
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
6288
+ ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
55596289 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6290
+ retval |= IRQ_HANDLED;
55606291 }
55616292
55626293 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
55636294 __func__, hba->uic_error);
6295
+ return retval;
55646296 }
55656297
55666298 /**
55676299 * ufshcd_check_errors - Check for errors that need s/w attention
55686300 * @hba: per-adapter instance
6301
+ * @intr_status: interrupt status generated by the controller
6302
+ *
6303
+ * Returns
6304
+ * IRQ_HANDLED - If interrupt is valid
6305
+ * IRQ_NONE - If invalid interrupt
55696306 */
5570
-static void ufshcd_check_errors(struct ufs_hba *hba)
6307
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
55716308 {
55726309 bool queue_eh_work = false;
6310
+ irqreturn_t retval = IRQ_NONE;
55736311
5574
- if (hba->errors & INT_FATAL_ERRORS)
6312
+ spin_lock(hba->host->host_lock);
6313
+ hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6314
+
6315
+ if (hba->errors & INT_FATAL_ERRORS) {
6316
+ ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6317
+ hba->errors);
55756318 queue_eh_work = true;
6319
+ }
55766320
55776321 if (hba->errors & UIC_ERROR) {
55786322 hba->uic_error = 0;
5579
- ufshcd_update_uic_error(hba);
5580
- if (hba->uic_error)
6323
+ retval = ufshcd_update_uic_error(hba);
6324
+ if (hba->uic_error) {
6325
+ dev_err(hba->dev,
6326
+ "Scheduling error handler because of an UIC error\n");
55816327 queue_eh_work = true;
6328
+ }
55826329 }
6330
+
6331
+ if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6332
+ dev_err(hba->dev,
6333
+ "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6334
+ __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6335
+ "Enter" : "Exit",
6336
+ hba->errors, ufshcd_get_upmcrs(hba));
6337
+ ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6338
+ hba->errors);
6339
+ ufshcd_set_link_broken(hba);
6340
+ queue_eh_work = true;
6341
+ }
6342
+
6343
+ trace_android_vh_ufs_check_int_errors(hba, queue_eh_work);
55836344
55846345 if (queue_eh_work) {
55856346 /*
....@@ -5589,30 +6350,20 @@
55896350 hba->saved_err |= hba->errors;
55906351 hba->saved_uic_err |= hba->uic_error;
55916352
5592
- /* handle fatal errors only when link is functional */
5593
- if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5594
- /* block commands from scsi mid-layer */
5595
- ufshcd_scsi_block_requests(hba);
5596
-
5597
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5598
-
5599
- /* dump controller state before resetting */
5600
- if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5601
- bool pr_prdt = !!(hba->saved_err &
5602
- SYSTEM_BUS_FATAL_ERROR);
5603
-
5604
- dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6353
+ /* dump controller state before resetting */
6354
+ if ((hba->saved_err &
6355
+ (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6356
+ (hba->saved_uic_err &&
6357
+ (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6358
+ dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
56056359 __func__, hba->saved_err,
56066360 hba->saved_uic_err);
5607
-
5608
- ufshcd_print_host_regs(hba);
5609
- ufshcd_print_pwr_info(hba);
5610
- ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5611
- ufshcd_print_trs(hba, hba->outstanding_reqs,
5612
- pr_prdt);
5613
- }
5614
- schedule_work(&hba->eh_work);
6361
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6362
+ "host_regs: ");
6363
+ ufshcd_print_pwr_info(hba);
56156364 }
6365
+ ufshcd_schedule_eh_work(hba);
6366
+ retval |= IRQ_HANDLED;
56166367 }
56176368 /*
56186369 * if (!queue_eh_work) -
....@@ -5620,40 +6371,68 @@
56206371 * itself without s/w intervention or errors that will be
56216372 * handled by the SCSI core layer.
56226373 */
6374
+ hba->errors = 0;
6375
+ hba->uic_error = 0;
6376
+ spin_unlock(hba->host->host_lock);
6377
+ return retval;
56236378 }
56246379
56256380 /**
56266381 * ufshcd_tmc_handler - handle task management function completion
56276382 * @hba: per adapter instance
6383
+ *
6384
+ * Returns
6385
+ * IRQ_HANDLED - If interrupt is valid
6386
+ * IRQ_NONE - If invalid interrupt
56286387 */
5629
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
6388
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
56306389 {
5631
- u32 tm_doorbell;
6390
+ struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs;
6391
+ unsigned long flags, pending, issued;
6392
+ irqreturn_t ret = IRQ_NONE;
6393
+ int tag;
56326394
5633
- tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5634
- hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5635
- wake_up(&hba->tm_wq);
6395
+ spin_lock_irqsave(hba->host->host_lock, flags);
6396
+ pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6397
+ issued = hba->outstanding_tasks & ~pending;
6398
+ for_each_set_bit(tag, &issued, hba->nutmrs) {
6399
+ struct request *req = tmf_rqs[tag];
6400
+ struct completion *c = req->end_io_data;
6401
+
6402
+ complete(c);
6403
+ ret = IRQ_HANDLED;
6404
+ }
6405
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6406
+
6407
+ return ret;
56366408 }
56376409
56386410 /**
56396411 * ufshcd_sl_intr - Interrupt service routine
56406412 * @hba: per adapter instance
56416413 * @intr_status: contains interrupts generated by the controller
6414
+ *
6415
+ * Returns
6416
+ * IRQ_HANDLED - If interrupt is valid
6417
+ * IRQ_NONE - If invalid interrupt
56426418 */
5643
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6419
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
56446420 {
5645
- hba->errors = UFSHCD_ERROR_MASK & intr_status;
5646
- if (hba->errors)
5647
- ufshcd_check_errors(hba);
6421
+ irqreturn_t retval = IRQ_NONE;
56486422
56496423 if (intr_status & UFSHCD_UIC_MASK)
5650
- ufshcd_uic_cmd_compl(hba, intr_status);
6424
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6425
+
6426
+ if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6427
+ retval |= ufshcd_check_errors(hba, intr_status);
56516428
56526429 if (intr_status & UTP_TASK_REQ_COMPL)
5653
- ufshcd_tmc_handler(hba);
6430
+ retval |= ufshcd_tmc_handler(hba);
56546431
56556432 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5656
- ufshcd_transfer_req_compl(hba);
6433
+ retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
6434
+
6435
+ return retval;
56576436 }
56586437
56596438 /**
....@@ -5661,8 +6440,9 @@
56616440 * @irq: irq number
56626441 * @__hba: pointer to adapter instance
56636442 *
5664
- * Returns IRQ_HANDLED - If interrupt is valid
5665
- * IRQ_NONE - If invalid interrupt
6443
+ * Returns
6444
+ * IRQ_HANDLED - If interrupt is valid
6445
+ * IRQ_NONE - If invalid interrupt
56666446 */
56676447 static irqreturn_t ufshcd_intr(int irq, void *__hba)
56686448 {
....@@ -5671,8 +6451,9 @@
56716451 struct ufs_hba *hba = __hba;
56726452 int retries = hba->nutrs;
56736453
5674
- spin_lock(hba->host->host_lock);
56756454 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
6455
+ hba->ufs_stats.last_intr_status = intr_status;
6456
+ hba->ufs_stats.last_intr_ts = ktime_get();
56766457
56776458 /*
56786459 * There could be max of hba->nutrs reqs in flight and in worst case
....@@ -5685,15 +6466,22 @@
56856466 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
56866467 if (intr_status)
56876468 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5688
- if (enabled_intr_status) {
5689
- ufshcd_sl_intr(hba, enabled_intr_status);
5690
- retval = IRQ_HANDLED;
5691
- }
6469
+ if (enabled_intr_status)
6470
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
56926471
56936472 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
56946473 }
56956474
5696
- spin_unlock(hba->host->host_lock);
6475
+ if (enabled_intr_status && retval == IRQ_NONE &&
6476
+ !ufshcd_eh_in_progress(hba)) {
6477
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6478
+ __func__,
6479
+ intr_status,
6480
+ hba->ufs_stats.last_intr_status,
6481
+ enabled_intr_status);
6482
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
6483
+ }
6484
+
56976485 return retval;
56986486 }
56996487
....@@ -5713,8 +6501,81 @@
57136501 /* poll for max. 1 sec to clear door bell register by h/w */
57146502 err = ufshcd_wait_for_register(hba,
57156503 REG_UTP_TASK_REQ_DOOR_BELL,
5716
- mask, 0, 1000, 1000, true);
6504
+ mask, 0, 1000, 1000);
57176505 out:
6506
+ return err;
6507
+}
6508
+
6509
+static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
6510
+ struct utp_task_req_desc *treq, u8 tm_function)
6511
+{
6512
+ struct request **tmf_rqs = ufs_hba_add_info(hba)->tmf_rqs;
6513
+ struct request_queue *q = hba->tmf_queue;
6514
+ struct Scsi_Host *host = hba->host;
6515
+ DECLARE_COMPLETION_ONSTACK(wait);
6516
+ struct request *req;
6517
+ unsigned long flags;
6518
+ int task_tag, err;
6519
+
6520
+ /*
6521
+ * blk_get_request() is used here only to get a free tag.
6522
+ */
6523
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
6524
+ if (IS_ERR(req))
6525
+ return PTR_ERR(req);
6526
+
6527
+ req->end_io_data = &wait;
6528
+ ufshcd_hold(hba, false);
6529
+
6530
+ spin_lock_irqsave(host->host_lock, flags);
6531
+
6532
+ task_tag = req->tag;
6533
+ tmf_rqs[req->tag] = req;
6534
+ treq->req_header.dword_0 |= cpu_to_be32(task_tag);
6535
+
6536
+ memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
6537
+ ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
6538
+
6539
+ /* send command to the controller */
6540
+ __set_bit(task_tag, &hba->outstanding_tasks);
6541
+
6542
+ /* Make sure descriptors are ready before ringing the task doorbell */
6543
+ wmb();
6544
+
6545
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
6546
+ /* Make sure that doorbell is committed immediately */
6547
+ wmb();
6548
+
6549
+ spin_unlock_irqrestore(host->host_lock, flags);
6550
+
6551
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6552
+
6553
+ /* wait until the task management command is completed */
6554
+ err = wait_for_completion_io_timeout(&wait,
6555
+ msecs_to_jiffies(TM_CMD_TIMEOUT));
6556
+ if (!err) {
6557
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
6558
+ dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
6559
+ __func__, tm_function);
6560
+ if (ufshcd_clear_tm_cmd(hba, task_tag))
6561
+ dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
6562
+ __func__, task_tag);
6563
+ err = -ETIMEDOUT;
6564
+ } else {
6565
+ err = 0;
6566
+ memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
6567
+
6568
+ ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6569
+ }
6570
+
6571
+ spin_lock_irqsave(hba->host->host_lock, flags);
6572
+ tmf_rqs[req->tag] = NULL;
6573
+ __clear_bit(task_tag, &hba->outstanding_tasks);
6574
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
6575
+
6576
+ ufshcd_release(hba);
6577
+ blk_put_request(req);
6578
+
57186579 return err;
57196580 }
57206581
....@@ -5731,87 +6592,212 @@
57316592 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
57326593 u8 tm_function, u8 *tm_response)
57336594 {
5734
- struct utp_task_req_desc *task_req_descp;
5735
- struct utp_upiu_task_req *task_req_upiup;
5736
- struct Scsi_Host *host;
5737
- unsigned long flags;
5738
- int free_slot;
5739
- int err;
5740
- int task_tag;
5741
-
5742
- host = hba->host;
5743
-
5744
- /*
5745
- * Get free slot, sleep if slots are unavailable.
5746
- * Even though we use wait_event() which sleeps indefinitely,
5747
- * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5748
- */
5749
- wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5750
- ufshcd_hold(hba, false);
5751
-
5752
- spin_lock_irqsave(host->host_lock, flags);
5753
- task_req_descp = hba->utmrdl_base_addr;
5754
- task_req_descp += free_slot;
6595
+ struct utp_task_req_desc treq = { { 0 }, };
6596
+ int ocs_value, err;
57556597
57566598 /* Configure task request descriptor */
5757
- task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5758
- task_req_descp->header.dword_2 =
5759
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6599
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6600
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
57606601
57616602 /* Configure task request UPIU */
5762
- task_req_upiup =
5763
- (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5764
- task_tag = hba->nutrs + free_slot;
5765
- task_req_upiup->header.dword_0 =
5766
- UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5767
- lun_id, task_tag);
5768
- task_req_upiup->header.dword_1 =
5769
- UPIU_HEADER_DWORD(0, tm_function, 0, 0);
6603
+ treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
6604
+ cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
6605
+ treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
6606
+
57706607 /*
57716608 * The host shall provide the same value for LUN field in the basic
57726609 * header and for Input Parameter.
57736610 */
5774
- task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5775
- task_req_upiup->input_param2 = cpu_to_be32(task_id);
6611
+ treq.input_param1 = cpu_to_be32(lun_id);
6612
+ treq.input_param2 = cpu_to_be32(task_id);
57766613
5777
- ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
6614
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
6615
+ if (err == -ETIMEDOUT)
6616
+ return err;
57786617
5779
- /* send command to the controller */
5780
- __set_bit(free_slot, &hba->outstanding_tasks);
6618
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6619
+ if (ocs_value != OCS_SUCCESS)
6620
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
6621
+ __func__, ocs_value);
6622
+ else if (tm_response)
6623
+ *tm_response = be32_to_cpu(treq.output_param1) &
6624
+ MASK_TM_SERVICE_RESP;
6625
+ return err;
6626
+}
57816627
5782
- /* Make sure descriptors are ready before ringing the task doorbell */
5783
- wmb();
6628
+/**
6629
+ * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
6630
+ * @hba: per-adapter instance
6631
+ * @req_upiu: upiu request
6632
+ * @rsp_upiu: upiu reply
6633
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
6634
+ * @buff_len: descriptor size, 0 if NA
6635
+ * @cmd_type: specifies the type (NOP, Query...)
6636
+ * @desc_op: descriptor operation
6637
+ *
6638
+ * Those type of requests uses UTP Transfer Request Descriptor - utrd.
6639
+ * Therefore, it "rides" the device management infrastructure: uses its tag and
6640
+ * tasks work queues.
6641
+ *
6642
+ * Since there is only one available tag for device management commands,
6643
+ * the caller is expected to hold the hba->dev_cmd.lock mutex.
6644
+ */
6645
+static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
6646
+ struct utp_upiu_req *req_upiu,
6647
+ struct utp_upiu_req *rsp_upiu,
6648
+ u8 *desc_buff, int *buff_len,
6649
+ enum dev_cmd_type cmd_type,
6650
+ enum query_opcode desc_op)
6651
+{
6652
+ DECLARE_COMPLETION_ONSTACK(wait);
6653
+ const u32 tag = ufs_hba_add_info(hba)->reserved_slot;
6654
+ struct ufshcd_lrb *lrbp;
6655
+ int err = 0;
6656
+ u8 upiu_flags;
57846657
5785
- ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5786
- /* Make sure that doorbell is committed immediately */
5787
- wmb();
6658
+ /* Protects use of ufs_hba_add_info(hba)->reserved_slot. */
6659
+ lockdep_assert_held(&hba->dev_cmd.lock);
57886660
5789
- spin_unlock_irqrestore(host->host_lock, flags);
6661
+ down_read(&hba->clk_scaling_lock);
57906662
5791
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
6663
+ lrbp = &hba->lrb[tag];
6664
+ WARN_ON(lrbp->cmd);
6665
+ lrbp->cmd = NULL;
6666
+ lrbp->sense_bufflen = 0;
6667
+ lrbp->sense_buffer = NULL;
6668
+ lrbp->task_tag = tag;
6669
+ lrbp->lun = 0;
6670
+ lrbp->intr_cmd = true;
6671
+ ufshcd_prepare_lrbp_crypto(NULL, lrbp);
6672
+ hba->dev_cmd.type = cmd_type;
57926673
5793
- /* wait until the task management command is completed */
5794
- err = wait_event_timeout(hba->tm_wq,
5795
- test_bit(free_slot, &hba->tm_condition),
5796
- msecs_to_jiffies(TM_CMD_TIMEOUT));
5797
- if (!err) {
5798
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5799
- dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5800
- __func__, tm_function);
5801
- if (ufshcd_clear_tm_cmd(hba, free_slot))
5802
- dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5803
- __func__, free_slot);
5804
- err = -ETIMEDOUT;
5805
- } else {
5806
- err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5807
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
6674
+ if (hba->ufs_version <= ufshci_version(1, 1))
6675
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
6676
+ else
6677
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
6678
+
6679
+ /* update the task tag in the request upiu */
6680
+ req_upiu->header.dword_0 |= cpu_to_be32(tag);
6681
+
6682
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
6683
+
6684
+ /* just copy the upiu request as it is */
6685
+ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
6686
+ if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
6687
+ /* The Data Segment Area is optional depending upon the query
6688
+ * function value. for WRITE DESCRIPTOR, the data segment
6689
+ * follows right after the tsf.
6690
+ */
6691
+ memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
6692
+ *buff_len = 0;
58086693 }
58096694
5810
- clear_bit(free_slot, &hba->tm_condition);
5811
- ufshcd_put_tm_slot(hba, free_slot);
5812
- wake_up(&hba->tm_tag_wq);
6695
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
58136696
5814
- ufshcd_release(hba);
6697
+ hba->dev_cmd.complete = &wait;
6698
+
6699
+ /* Make sure descriptors are ready before ringing the doorbell */
6700
+ wmb();
6701
+
6702
+ ufshcd_send_command(hba, tag);
6703
+ /*
6704
+ * ignore the returning value here - ufshcd_check_query_response is
6705
+ * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
6706
+ * read the response directly ignoring all errors.
6707
+ */
6708
+ ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
6709
+
6710
+ /* just copy the upiu response as it is */
6711
+ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
6712
+ if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
6713
+ u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
6714
+ u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
6715
+ MASK_QUERY_DATA_SEG_LEN;
6716
+
6717
+ if (*buff_len >= resp_len) {
6718
+ memcpy(desc_buff, descp, resp_len);
6719
+ *buff_len = resp_len;
6720
+ } else {
6721
+ dev_warn(hba->dev,
6722
+ "%s: rsp size %d is bigger than buffer size %d",
6723
+ __func__, resp_len, *buff_len);
6724
+ *buff_len = 0;
6725
+ err = -EINVAL;
6726
+ }
6727
+ }
6728
+
6729
+ up_read(&hba->clk_scaling_lock);
6730
+ return err;
6731
+}
6732
+
6733
+/**
6734
+ * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
6735
+ * @hba: per-adapter instance
6736
+ * @req_upiu: upiu request
6737
+ * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
6738
+ * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
6739
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
6740
+ * @buff_len: descriptor size, 0 if NA
6741
+ * @desc_op: descriptor operation
6742
+ *
6743
+ * Supports UTP Transfer requests (nop and query), and UTP Task
6744
+ * Management requests.
6745
+ * It is up to the caller to fill the upiu conent properly, as it will
6746
+ * be copied without any further input validations.
6747
+ */
6748
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
6749
+ struct utp_upiu_req *req_upiu,
6750
+ struct utp_upiu_req *rsp_upiu,
6751
+ int msgcode,
6752
+ u8 *desc_buff, int *buff_len,
6753
+ enum query_opcode desc_op)
6754
+{
6755
+ int err;
6756
+ enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
6757
+ struct utp_task_req_desc treq = { { 0 }, };
6758
+ int ocs_value;
6759
+ u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
6760
+
6761
+ switch (msgcode) {
6762
+ case UPIU_TRANSACTION_NOP_OUT:
6763
+ cmd_type = DEV_CMD_TYPE_NOP;
6764
+ fallthrough;
6765
+ case UPIU_TRANSACTION_QUERY_REQ:
6766
+ ufshcd_hold(hba, false);
6767
+ mutex_lock(&hba->dev_cmd.lock);
6768
+ err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
6769
+ desc_buff, buff_len,
6770
+ cmd_type, desc_op);
6771
+ mutex_unlock(&hba->dev_cmd.lock);
6772
+ ufshcd_release(hba);
6773
+
6774
+ break;
6775
+ case UPIU_TRANSACTION_TASK_REQ:
6776
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
6777
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
6778
+
6779
+ memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
6780
+
6781
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
6782
+ if (err == -ETIMEDOUT)
6783
+ break;
6784
+
6785
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
6786
+ if (ocs_value != OCS_SUCCESS) {
6787
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
6788
+ ocs_value);
6789
+ break;
6790
+ }
6791
+
6792
+ memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
6793
+
6794
+ break;
6795
+ default:
6796
+ err = -EINVAL;
6797
+
6798
+ break;
6799
+ }
6800
+
58156801 return err;
58166802 }
58176803
....@@ -5829,7 +6815,6 @@
58296815 u32 pos;
58306816 int err;
58316817 u8 resp = 0xF, lun;
5832
- unsigned long flags;
58336818
58346819 host = cmd->device->host;
58356820 hba = shost_priv(host);
....@@ -5848,14 +6833,13 @@
58486833 err = ufshcd_clear_cmd(hba, pos);
58496834 if (err)
58506835 break;
6836
+ __ufshcd_transfer_req_compl(hba, 1U << pos);
58516837 }
58526838 }
5853
- spin_lock_irqsave(host->host_lock, flags);
5854
- ufshcd_transfer_req_compl(hba);
5855
- spin_unlock_irqrestore(host->host_lock, flags);
58566839
58576840 out:
58586841 hba->req_abort_count = 0;
6842
+ ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
58596843 if (!err) {
58606844 err = SUCCESS;
58616845 } else {
....@@ -5877,8 +6861,9 @@
58776861 }
58786862
58796863 /**
5880
- * ufshcd_abort - abort a specific command
5881
- * @cmd: SCSI command pointer
6864
+ * ufshcd_try_to_abort_task - abort a specific task
6865
+ * @hba: Pointer to adapter instance
6866
+ * @tag: Task tag/index to be aborted
58826867 *
58836868 * Abort the pending command in device by sending UFS_ABORT_TASK task management
58846869 * command, and in host controller by clearing the door-bell register. There can
....@@ -5886,83 +6871,15 @@
58866871 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
58876872 * really issued and then try to abort it.
58886873 *
5889
- * Returns SUCCESS/FAILED
6874
+ * Returns zero on success, non-zero on failure
58906875 */
5891
-static int ufshcd_abort(struct scsi_cmnd *cmd)
6876
+static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
58926877 {
5893
- struct Scsi_Host *host;
5894
- struct ufs_hba *hba;
5895
- unsigned long flags;
5896
- unsigned int tag;
6878
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
58976879 int err = 0;
58986880 int poll_cnt;
58996881 u8 resp = 0xF;
5900
- struct ufshcd_lrb *lrbp;
59016882 u32 reg;
5902
-
5903
- host = cmd->device->host;
5904
- hba = shost_priv(host);
5905
- tag = cmd->request->tag;
5906
- lrbp = &hba->lrb[tag];
5907
- if (!ufshcd_valid_tag(hba, tag)) {
5908
- dev_err(hba->dev,
5909
- "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5910
- __func__, tag, cmd, cmd->request);
5911
- BUG();
5912
- }
5913
-
5914
- /*
5915
- * Task abort to the device W-LUN is illegal. When this command
5916
- * will fail, due to spec violation, scsi err handling next step
5917
- * will be to send LU reset which, again, is a spec violation.
5918
- * To avoid these unnecessary/illegal step we skip to the last error
5919
- * handling stage: reset and restore.
5920
- */
5921
- if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5922
- return ufshcd_eh_host_reset_handler(cmd);
5923
-
5924
- ufshcd_hold(hba, false);
5925
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5926
- /* If command is already aborted/completed, return SUCCESS */
5927
- if (!(test_bit(tag, &hba->outstanding_reqs))) {
5928
- dev_err(hba->dev,
5929
- "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5930
- __func__, tag, hba->outstanding_reqs, reg);
5931
- goto out;
5932
- }
5933
-
5934
- if (!(reg & (1 << tag))) {
5935
- dev_err(hba->dev,
5936
- "%s: cmd was completed, but without a notifying intr, tag = %d",
5937
- __func__, tag);
5938
- }
5939
-
5940
- /* Print Transfer Request of aborted task */
5941
- dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
5942
-
5943
- /*
5944
- * Print detailed info about aborted request.
5945
- * As more than one request might get aborted at the same time,
5946
- * print full information only for the first aborted request in order
5947
- * to reduce repeated printouts. For other aborted requests only print
5948
- * basic details.
5949
- */
5950
- scsi_print_command(hba->lrb[tag].cmd);
5951
- if (!hba->req_abort_count) {
5952
- ufshcd_print_host_regs(hba);
5953
- ufshcd_print_host_state(hba);
5954
- ufshcd_print_pwr_info(hba);
5955
- ufshcd_print_trs(hba, 1 << tag, true);
5956
- } else {
5957
- ufshcd_print_trs(hba, 1 << tag, false);
5958
- }
5959
- hba->req_abort_count++;
5960
-
5961
- /* Skip task abort in case previous aborts failed and report failure */
5962
- if (lrbp->req_abort_skip) {
5963
- err = -EIO;
5964
- goto out;
5965
- }
59666883
59676884 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
59686885 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
....@@ -5988,7 +6905,7 @@
59886905 /* command completed already */
59896906 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
59906907 __func__, tag);
5991
- goto cleanup;
6908
+ goto out;
59926909 } else {
59936910 dev_err(hba->dev,
59946911 "%s: no response from device. tag = %d, err %d\n",
....@@ -6016,36 +6933,129 @@
60166933 }
60176934
60186935 err = ufshcd_clear_cmd(hba, tag);
6019
- if (err) {
6936
+ if (err)
60206937 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
60216938 __func__, tag, err);
6022
- goto out;
6023
- }
6024
-
6025
-cleanup:
6026
- scsi_dma_unmap(cmd);
6027
-
6028
- spin_lock_irqsave(host->host_lock, flags);
6029
- ufshcd_outstanding_req_clear(hba, tag);
6030
- hba->lrb[tag].cmd = NULL;
6031
- spin_unlock_irqrestore(host->host_lock, flags);
6032
-
6033
- clear_bit_unlock(tag, &hba->lrb_in_use);
6034
- wake_up(&hba->dev_cmd.tag_wq);
60356939
60366940 out:
6037
- if (!err) {
6038
- err = SUCCESS;
6941
+ return err;
6942
+}
6943
+
6944
+/**
6945
+ * ufshcd_abort - scsi host template eh_abort_handler callback
6946
+ * @cmd: SCSI command pointer
6947
+ *
6948
+ * Returns SUCCESS/FAILED
6949
+ */
6950
+static int ufshcd_abort(struct scsi_cmnd *cmd)
6951
+{
6952
+ struct Scsi_Host *host;
6953
+ struct ufs_hba *hba;
6954
+ unsigned long flags;
6955
+ unsigned int tag;
6956
+ int err = FAILED, res;
6957
+ bool outstanding;
6958
+ struct ufshcd_lrb *lrbp;
6959
+ u32 reg;
6960
+
6961
+ host = cmd->device->host;
6962
+ hba = shost_priv(host);
6963
+ tag = cmd->request->tag;
6964
+ lrbp = &hba->lrb[tag];
6965
+ if (!ufshcd_valid_tag(hba, tag)) {
6966
+ dev_err(hba->dev,
6967
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
6968
+ __func__, tag, cmd, cmd->request);
6969
+ BUG();
6970
+ }
6971
+
6972
+ ufshcd_hold(hba, false);
6973
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6974
+ /* If command is already aborted/completed, return FAILED. */
6975
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
6976
+ dev_err(hba->dev,
6977
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6978
+ __func__, tag, hba->outstanding_reqs, reg);
6979
+ goto release;
6980
+ }
6981
+
6982
+ /* Print Transfer Request of aborted task */
6983
+ dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6984
+
6985
+ /*
6986
+ * Print detailed info about aborted request.
6987
+ * As more than one request might get aborted at the same time,
6988
+ * print full information only for the first aborted request in order
6989
+ * to reduce repeated printouts. For other aborted requests only print
6990
+ * basic details.
6991
+ */
6992
+ scsi_print_command(cmd);
6993
+ if (!hba->req_abort_count) {
6994
+ ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
6995
+ ufshcd_print_evt_hist(hba);
6996
+ ufshcd_print_host_state(hba);
6997
+ ufshcd_print_pwr_info(hba);
6998
+ ufshcd_print_trs(hba, 1 << tag, true);
60396999 } else {
6040
- dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6041
- ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6042
- err = FAILED;
7000
+ ufshcd_print_trs(hba, 1 << tag, false);
7001
+ }
7002
+ hba->req_abort_count++;
7003
+
7004
+ if (!(reg & (1 << tag))) {
7005
+ dev_err(hba->dev,
7006
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
7007
+ __func__, tag);
7008
+ __ufshcd_transfer_req_compl(hba, 1UL << tag);
7009
+ goto release;
60437010 }
60447011
60457012 /*
6046
- * This ufshcd_release() corresponds to the original scsi cmd that got
6047
- * aborted here (as we won't get any IRQ for it).
7013
+ * Task abort to the device W-LUN is illegal. When this command
7014
+ * will fail, due to spec violation, scsi err handling next step
7015
+ * will be to send LU reset which, again, is a spec violation.
7016
+ * To avoid these unnecessary/illegal steps, first we clean up
7017
+ * the lrb taken by this cmd and re-set it in outstanding_reqs,
7018
+ * then queue the eh_work and bail.
60487019 */
7020
+ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7021
+ ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7022
+
7023
+ spin_lock_irqsave(host->host_lock, flags);
7024
+ hba->force_reset = true;
7025
+ ufshcd_schedule_eh_work(hba);
7026
+ spin_unlock_irqrestore(host->host_lock, flags);
7027
+ goto release;
7028
+ }
7029
+
7030
+ /* Skip task abort in case previous aborts failed and report failure */
7031
+ if (lrbp->req_abort_skip) {
7032
+ dev_err(hba->dev, "%s: skipping abort\n", __func__);
7033
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7034
+ goto release;
7035
+ }
7036
+
7037
+ res = ufshcd_try_to_abort_task(hba, tag);
7038
+ if (res) {
7039
+ dev_err(hba->dev, "%s: failed with err %d\n", __func__, res);
7040
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7041
+ goto release;
7042
+ }
7043
+
7044
+ /*
7045
+ * Clear the corresponding bit from outstanding_reqs since the command
7046
+ * has been aborted successfully.
7047
+ */
7048
+ spin_lock_irqsave(host->host_lock, flags);
7049
+ outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7050
+ spin_unlock_irqrestore(host->host_lock, flags);
7051
+
7052
+ if (outstanding)
7053
+ ufshcd_release_scsi_cmd(hba, lrbp);
7054
+
7055
+ err = SUCCESS;
7056
+
7057
+release:
7058
+ /* Matches the ufshcd_hold() call at the start of this function. */
60497059 ufshcd_release(hba);
60507060 return err;
60517061 }
....@@ -6063,35 +7073,29 @@
60637073 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
60647074 {
60657075 int err;
6066
- unsigned long flags;
60677076
7077
+ ufshpb_reset_host(hba);
60687078 /*
60697079 * Stop the host controller and complete the requests
60707080 * cleared by h/w
60717081 */
6072
- spin_lock_irqsave(hba->host->host_lock, flags);
6073
- ufshcd_hba_stop(hba, false);
7082
+ ufshcd_hba_stop(hba);
60747083 hba->silence_err_logs = true;
60757084 ufshcd_complete_requests(hba);
60767085 hba->silence_err_logs = false;
6077
- spin_unlock_irqrestore(hba->host->host_lock, flags);
60787086
60797087 /* scale up clocks to max frequency before full reinitialization */
6080
- ufshcd_scale_clks(hba, true);
7088
+ ufshcd_set_clk_freq(hba, true);
60817089
60827090 err = ufshcd_hba_enable(hba);
6083
- if (err)
6084
- goto out;
60857091
60867092 /* Establish the link again and restore the device */
6087
- err = ufshcd_probe_hba(hba);
7093
+ if (!err)
7094
+ err = ufshcd_probe_hba(hba, false);
60887095
6089
- if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
6090
- err = -EIO;
6091
-out:
60927096 if (err)
60937097 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6094
-
7098
+ ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
60957099 return err;
60967100 }
60977101
....@@ -6106,12 +7110,42 @@
61067110 */
61077111 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
61087112 {
7113
+ u32 saved_err;
7114
+ u32 saved_uic_err;
61097115 int err = 0;
7116
+ unsigned long flags;
61107117 int retries = MAX_HOST_RESET_RETRIES;
61117118
7119
+ /*
7120
+ * This is a fresh start, cache and clear saved error first,
7121
+ * in case new error generated during reset and restore.
7122
+ */
7123
+ spin_lock_irqsave(hba->host->host_lock, flags);
7124
+ saved_err = hba->saved_err;
7125
+ saved_uic_err = hba->saved_uic_err;
7126
+ hba->saved_err = 0;
7127
+ hba->saved_uic_err = 0;
7128
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
7129
+
61127130 do {
7131
+ /* Reset the attached device */
7132
+ ufshcd_vops_device_reset(hba);
7133
+
61137134 err = ufshcd_host_reset_and_restore(hba);
61147135 } while (err && --retries);
7136
+
7137
+ spin_lock_irqsave(hba->host->host_lock, flags);
7138
+ /*
7139
+ * Inform scsi mid-layer that we did reset and allow to handle
7140
+ * Unit Attention properly.
7141
+ */
7142
+ scsi_report_bus_reset(hba->host, 0);
7143
+ if (err) {
7144
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
7145
+ hba->saved_err |= saved_err;
7146
+ hba->saved_uic_err |= saved_uic_err;
7147
+ }
7148
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
61157149
61167150 return err;
61177151 }
....@@ -6124,48 +7158,25 @@
61247158 */
61257159 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
61267160 {
6127
- int err;
7161
+ int err = SUCCESS;
61287162 unsigned long flags;
61297163 struct ufs_hba *hba;
61307164
61317165 hba = shost_priv(cmd->device->host);
61327166
6133
- ufshcd_hold(hba, false);
6134
- /*
6135
- * Check if there is any race with fatal error handling.
6136
- * If so, wait for it to complete. Even though fatal error
6137
- * handling does reset and restore in some cases, don't assume
6138
- * anything out of it. We are just avoiding race here.
6139
- */
6140
- do {
6141
- spin_lock_irqsave(hba->host->host_lock, flags);
6142
- if (!(work_pending(&hba->eh_work) ||
6143
- hba->ufshcd_state == UFSHCD_STATE_RESET ||
6144
- hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6145
- break;
6146
- spin_unlock_irqrestore(hba->host->host_lock, flags);
6147
- dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6148
- flush_work(&hba->eh_work);
6149
- } while (1);
6150
-
6151
- hba->ufshcd_state = UFSHCD_STATE_RESET;
6152
- ufshcd_set_eh_in_progress(hba);
7167
+ spin_lock_irqsave(hba->host->host_lock, flags);
7168
+ hba->force_reset = true;
7169
+ ufshcd_schedule_eh_work(hba);
7170
+ dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
61537171 spin_unlock_irqrestore(hba->host->host_lock, flags);
61547172
6155
- err = ufshcd_reset_and_restore(hba);
7173
+ flush_work(&hba->eh_work);
61567174
61577175 spin_lock_irqsave(hba->host->host_lock, flags);
6158
- if (!err) {
6159
- err = SUCCESS;
6160
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6161
- } else {
7176
+ if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
61627177 err = FAILED;
6163
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
6164
- }
6165
- ufshcd_clear_eh_in_progress(hba);
61667178 spin_unlock_irqrestore(hba->host->host_lock, flags);
61677179
6168
- ufshcd_release(hba);
61697180 return err;
61707181 }
61717182
....@@ -6257,17 +7268,19 @@
62577268 return icc_level;
62587269 }
62597270
6260
-static void ufshcd_init_icc_levels(struct ufs_hba *hba)
7271
+static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
62617272 {
62627273 int ret;
6263
- int buff_len = hba->desc_size.pwr_desc;
7274
+ int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
62647275 u8 *desc_buf;
7276
+ u32 icc_level;
62657277
62667278 desc_buf = kmalloc(buff_len, GFP_KERNEL);
62677279 if (!desc_buf)
62687280 return;
62697281
6270
- ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
7282
+ ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7283
+ desc_buf, buff_len);
62717284 if (ret) {
62727285 dev_err(hba->dev,
62737286 "%s: Failed reading power descriptor.len = %d ret = %d",
....@@ -6275,20 +7288,17 @@
62757288 goto out;
62767289 }
62777290
6278
- hba->init_prefetch_data.icc_level =
6279
- ufshcd_find_max_sup_active_icc_level(hba,
6280
- desc_buf, buff_len);
6281
- dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6282
- __func__, hba->init_prefetch_data.icc_level);
7291
+ icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
7292
+ buff_len);
7293
+ dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
62837294
62847295 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6285
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6286
- &hba->init_prefetch_data.icc_level);
7296
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
62877297
62887298 if (ret)
62897299 dev_err(hba->dev,
62907300 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6291
- __func__, hba->init_prefetch_data.icc_level , ret);
7301
+ __func__, icc_level, ret);
62927302
62937303 out:
62947304 kfree(desc_buf);
....@@ -6323,7 +7333,6 @@
63237333 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
63247334 {
63257335 int ret = 0;
6326
- struct scsi_device *sdev_rpmb;
63277336 struct scsi_device *sdev_boot;
63287337
63297338 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
....@@ -6335,13 +7344,13 @@
63357344 }
63367345 scsi_device_put(hba->sdev_ufs_device);
63377346
6338
- sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
7347
+ hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
63397348 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6340
- if (IS_ERR(sdev_rpmb)) {
6341
- ret = PTR_ERR(sdev_rpmb);
7349
+ if (IS_ERR(hba->sdev_rpmb)) {
7350
+ ret = PTR_ERR(hba->sdev_rpmb);
63427351 goto remove_sdev_ufs_device;
63437352 }
6344
- scsi_device_put(sdev_rpmb);
7353
+ scsi_device_put(hba->sdev_rpmb);
63457354
63467355 sdev_boot = __scsi_add_device(hba->host, 0, 0,
63477356 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
....@@ -6357,23 +7366,120 @@
63577366 return ret;
63587367 }
63597368
6360
-static int ufs_get_device_desc(struct ufs_hba *hba,
6361
- struct ufs_dev_desc *dev_desc)
7369
+static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
7370
+{
7371
+ struct ufs_dev_info *dev_info = &hba->dev_info;
7372
+ u8 lun;
7373
+ u32 d_lu_wb_buf_alloc;
7374
+
7375
+ if (!ufshcd_is_wb_allowed(hba))
7376
+ return;
7377
+ /*
7378
+ * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
7379
+ * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
7380
+ * enabled
7381
+ */
7382
+ if (!(dev_info->wspecversion >= 0x310 ||
7383
+ dev_info->wspecversion == 0x220 ||
7384
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
7385
+ goto wb_disabled;
7386
+
7387
+ if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
7388
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
7389
+ goto wb_disabled;
7390
+
7391
+ dev_info->d_ext_ufs_feature_sup =
7392
+ get_unaligned_be32(desc_buf +
7393
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
7394
+
7395
+ if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
7396
+ goto wb_disabled;
7397
+
7398
+ /*
7399
+ * WB may be supported but not configured while provisioning.
7400
+ * The spec says, in dedicated wb buffer mode,
7401
+ * a max of 1 lun would have wb buffer configured.
7402
+ * Now only shared buffer mode is supported.
7403
+ */
7404
+ dev_info->b_wb_buffer_type =
7405
+ desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
7406
+
7407
+ dev_info->b_presrv_uspc_en =
7408
+ desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
7409
+
7410
+ if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) {
7411
+ dev_info->d_wb_alloc_units =
7412
+ get_unaligned_be32(desc_buf +
7413
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
7414
+ if (!dev_info->d_wb_alloc_units)
7415
+ goto wb_disabled;
7416
+ } else {
7417
+ for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
7418
+ d_lu_wb_buf_alloc = 0;
7419
+ ufshcd_read_unit_desc_param(hba,
7420
+ lun,
7421
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
7422
+ (u8 *)&d_lu_wb_buf_alloc,
7423
+ sizeof(d_lu_wb_buf_alloc));
7424
+ if (d_lu_wb_buf_alloc) {
7425
+ dev_info->wb_dedicated_lu = lun;
7426
+ break;
7427
+ }
7428
+ }
7429
+
7430
+ if (!d_lu_wb_buf_alloc)
7431
+ goto wb_disabled;
7432
+ }
7433
+ return;
7434
+
7435
+wb_disabled:
7436
+ hba->caps &= ~UFSHCD_CAP_WB_EN;
7437
+}
7438
+
7439
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
7440
+{
7441
+ struct ufs_dev_fix *f;
7442
+ struct ufs_dev_info *dev_info = &hba->dev_info;
7443
+
7444
+ if (!fixups)
7445
+ return;
7446
+
7447
+ for (f = fixups; f->quirk; f++) {
7448
+ if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
7449
+ f->wmanufacturerid == UFS_ANY_VENDOR) &&
7450
+ ((dev_info->model &&
7451
+ STR_PRFX_EQUAL(f->model, dev_info->model)) ||
7452
+ !strcmp(f->model, UFS_ANY_MODEL)))
7453
+ hba->dev_quirks |= f->quirk;
7454
+ }
7455
+}
7456
+EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
7457
+
7458
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
7459
+{
7460
+ /* fix by general quirk table */
7461
+ ufshcd_fixup_dev_quirks(hba, ufs_fixups);
7462
+
7463
+ /* allow vendors to fix quirks */
7464
+ ufshcd_vops_fixup_dev_quirks(hba);
7465
+}
7466
+
7467
+static int ufs_get_device_desc(struct ufs_hba *hba)
63627468 {
63637469 int err;
6364
- size_t buff_len;
63657470 u8 model_index;
7471
+ u8 b_ufs_feature_sup;
63667472 u8 *desc_buf;
7473
+ struct ufs_dev_info *dev_info = &hba->dev_info;
63677474
6368
- buff_len = max_t(size_t, hba->desc_size.dev_desc,
6369
- QUERY_DESC_MAX_SIZE + 1);
6370
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
7475
+ desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
63717476 if (!desc_buf) {
63727477 err = -ENOMEM;
63737478 goto out;
63747479 }
63757480
6376
- err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
7481
+ err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
7482
+ hba->desc_size[QUERY_DESC_IDN_DEVICE]);
63777483 if (err) {
63787484 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
63797485 __func__, err);
....@@ -6384,47 +7490,61 @@
63847490 * getting vendor (manufacturerID) and Bank Index in big endian
63857491 * format
63867492 */
6387
- dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
7493
+ dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
63887494 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
7495
+
7496
+ /* getting Specification Version in big endian format */
7497
+ dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
7498
+ desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
7499
+ b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
63897500
63907501 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
63917502
6392
- /* Zero-pad entire buffer for string termination. */
6393
- memset(desc_buf, 0, buff_len);
7503
+ if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
7504
+ (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
7505
+ bool hpb_en = false;
63947506
6395
- err = ufshcd_read_string_desc(hba, model_index, desc_buf,
6396
- QUERY_DESC_MAX_SIZE, true/*ASCII*/);
6397
- if (err) {
7507
+ ufshpb_get_dev_info(hba, desc_buf);
7508
+
7509
+ if (!ufshpb_is_legacy(hba))
7510
+ err = ufshcd_query_flag_retry(hba,
7511
+ UPIU_QUERY_OPCODE_READ_FLAG,
7512
+ QUERY_FLAG_IDN_HPB_EN, 0,
7513
+ &hpb_en);
7514
+
7515
+ if (ufshpb_is_legacy(hba) || (!err && hpb_en))
7516
+ dev_info->hpb_enabled = true;
7517
+ }
7518
+
7519
+ err = ufshcd_read_string_desc(hba, model_index,
7520
+ &dev_info->model, SD_ASCII_STD);
7521
+ if (err < 0) {
63987522 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
63997523 __func__, err);
64007524 goto out;
64017525 }
64027526
6403
- desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6404
- strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
6405
- min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
6406
- MAX_MODEL_LEN));
7527
+ ufs_fixup_device_setup(hba);
64077528
6408
- /* Null terminate the model string */
6409
- dev_desc->model[MAX_MODEL_LEN] = '\0';
7529
+ ufshcd_wb_probe(hba, desc_buf);
7530
+
7531
+ /*
7532
+ * ufshcd_read_string_desc returns size of the string
7533
+ * reset the error value
7534
+ */
7535
+ err = 0;
64107536
64117537 out:
64127538 kfree(desc_buf);
64137539 return err;
64147540 }
64157541
6416
-static void ufs_fixup_device_setup(struct ufs_hba *hba,
6417
- struct ufs_dev_desc *dev_desc)
7542
+static void ufs_put_device_desc(struct ufs_hba *hba)
64187543 {
6419
- struct ufs_dev_fix *f;
7544
+ struct ufs_dev_info *dev_info = &hba->dev_info;
64207545
6421
- for (f = ufs_fixups; f->quirk; f++) {
6422
- if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6423
- f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6424
- (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6425
- !strcmp(f->card.model, UFS_ANY_MODEL)))
6426
- hba->dev_quirks |= f->quirk;
6427
- }
7546
+ kfree(dev_info->model);
7547
+ dev_info->model = NULL;
64287548 }
64297549
64307550 /**
....@@ -6560,7 +7680,7 @@
65607680 peer_pa_tactivate_us = peer_pa_tactivate *
65617681 gran_to_us_table[peer_granularity - 1];
65627682
6563
- if (pa_tactivate_us > peer_pa_tactivate_us) {
7683
+ if (pa_tactivate_us >= peer_pa_tactivate_us) {
65647684 u32 new_peer_pa_tactivate;
65657685
65667686 new_peer_pa_tactivate = pa_tactivate_us /
....@@ -6581,101 +7701,222 @@
65817701 ufshcd_tune_pa_hibern8time(hba);
65827702 }
65837703
7704
+ ufshcd_vops_apply_dev_quirks(hba);
7705
+
65847706 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
65857707 /* set 1ms timeout for PA_TACTIVATE */
65867708 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
65877709
65887710 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
65897711 ufshcd_quirk_tune_host_pa_tactivate(hba);
6590
-
6591
- ufshcd_vops_apply_dev_quirks(hba);
65927712 }
65937713
65947714 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
65957715 {
6596
- int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6597
-
65987716 hba->ufs_stats.hibern8_exit_cnt = 0;
65997717 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6600
-
6601
- memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6602
- memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6603
- memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6604
- memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6605
- memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6606
-
66077718 hba->req_abort_count = 0;
66087719 }
66097720
6610
-static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
7721
+static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
66117722 {
66127723 int err;
7724
+ size_t buff_len;
7725
+ u8 *desc_buf;
66137726
6614
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6615
- &hba->desc_size.dev_desc);
6616
- if (err)
6617
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
7727
+ buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
7728
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
7729
+ if (!desc_buf) {
7730
+ err = -ENOMEM;
7731
+ goto out;
7732
+ }
66187733
6619
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6620
- &hba->desc_size.pwr_desc);
6621
- if (err)
6622
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
7734
+ err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
7735
+ desc_buf, buff_len);
7736
+ if (err) {
7737
+ dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
7738
+ __func__, err);
7739
+ goto out;
7740
+ }
66237741
6624
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6625
- &hba->desc_size.interc_desc);
6626
- if (err)
6627
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
7742
+ if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
7743
+ hba->dev_info.max_lu_supported = 32;
7744
+ else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
7745
+ hba->dev_info.max_lu_supported = 8;
66287746
6629
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6630
- &hba->desc_size.conf_desc);
6631
- if (err)
6632
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
7747
+ if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
7748
+ GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
7749
+ ufshpb_get_geo_info(hba, desc_buf);
66337750
6634
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6635
- &hba->desc_size.unit_desc);
6636
- if (err)
6637
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6638
-
6639
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6640
- &hba->desc_size.geom_desc);
6641
- if (err)
6642
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6643
- err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6644
- &hba->desc_size.hlth_desc);
6645
- if (err)
6646
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
7751
+out:
7752
+ kfree(desc_buf);
7753
+ return err;
66477754 }
66487755
6649
-static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
7756
+static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
7757
+ {19200000, REF_CLK_FREQ_19_2_MHZ},
7758
+ {26000000, REF_CLK_FREQ_26_MHZ},
7759
+ {38400000, REF_CLK_FREQ_38_4_MHZ},
7760
+ {52000000, REF_CLK_FREQ_52_MHZ},
7761
+ {0, REF_CLK_FREQ_INVAL},
7762
+};
7763
+
7764
+static enum ufs_ref_clk_freq
7765
+ufs_get_bref_clk_from_hz(unsigned long freq)
66507766 {
6651
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6652
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6653
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6654
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6655
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6656
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6657
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
7767
+ int i;
7768
+
7769
+ for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
7770
+ if (ufs_ref_clk_freqs[i].freq_hz == freq)
7771
+ return ufs_ref_clk_freqs[i].val;
7772
+
7773
+ return REF_CLK_FREQ_INVAL;
7774
+}
7775
+
7776
+void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
7777
+{
7778
+ unsigned long freq;
7779
+
7780
+ freq = clk_get_rate(refclk);
7781
+
7782
+ hba->dev_ref_clk_freq =
7783
+ ufs_get_bref_clk_from_hz(freq);
7784
+
7785
+ if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
7786
+ dev_err(hba->dev,
7787
+ "invalid ref_clk setting = %ld\n", freq);
7788
+}
7789
+
7790
+static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
7791
+{
7792
+ int err;
7793
+ u32 ref_clk;
7794
+ u32 freq = hba->dev_ref_clk_freq;
7795
+
7796
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
7797
+ QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
7798
+
7799
+ if (err) {
7800
+ dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
7801
+ err);
7802
+ goto out;
7803
+ }
7804
+
7805
+ if (ref_clk == freq)
7806
+ goto out; /* nothing to update */
7807
+
7808
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7809
+ QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
7810
+
7811
+ if (err) {
7812
+ dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
7813
+ ufs_ref_clk_freqs[freq].freq_hz);
7814
+ goto out;
7815
+ }
7816
+
7817
+ dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
7818
+ ufs_ref_clk_freqs[freq].freq_hz);
7819
+
7820
+out:
7821
+ return err;
7822
+}
7823
+
7824
+static int ufshcd_device_params_init(struct ufs_hba *hba)
7825
+{
7826
+ bool flag;
7827
+ int ret, i;
7828
+
7829
+ /* Init device descriptor sizes */
7830
+ for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
7831
+ hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
7832
+
7833
+ /* Init UFS geometry descriptor related parameters */
7834
+ ret = ufshcd_device_geo_params_init(hba);
7835
+ if (ret)
7836
+ goto out;
7837
+
7838
+ /* Check and apply UFS device quirks */
7839
+ ret = ufs_get_device_desc(hba);
7840
+ if (ret) {
7841
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
7842
+ __func__, ret);
7843
+ goto out;
7844
+ }
7845
+
7846
+ ufshcd_get_ref_clk_gating_wait(hba);
7847
+
7848
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
7849
+ QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
7850
+ hba->dev_info.f_power_on_wp_en = flag;
7851
+
7852
+ /* Probe maximum power mode co-supported by both UFS host and device */
7853
+ if (ufshcd_get_max_pwr_mode(hba))
7854
+ dev_err(hba->dev,
7855
+ "%s: Failed getting max supported power mode\n",
7856
+ __func__);
7857
+out:
7858
+ return ret;
7859
+}
7860
+
7861
+/**
7862
+ * ufshcd_add_lus - probe and add UFS logical units
7863
+ * @hba: per-adapter instance
7864
+ */
7865
+static int ufshcd_add_lus(struct ufs_hba *hba)
7866
+{
7867
+ int ret;
7868
+
7869
+ /* Add required well known logical units to scsi mid layer */
7870
+ ret = ufshcd_scsi_add_wlus(hba);
7871
+ if (ret)
7872
+ goto out;
7873
+
7874
+ /* Initialize devfreq after UFS device is detected */
7875
+ if (ufshcd_is_clkscaling_supported(hba)) {
7876
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
7877
+ &hba->pwr_info,
7878
+ sizeof(struct ufs_pa_layer_attr));
7879
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
7880
+ hba->clk_scaling.is_allowed = true;
7881
+
7882
+ ret = ufshcd_devfreq_init(hba);
7883
+ if (ret)
7884
+ goto out;
7885
+
7886
+ hba->clk_scaling.is_enabled = true;
7887
+ ufshcd_init_clk_scaling_sysfs(hba);
7888
+ }
7889
+
7890
+ ufs_bsg_probe(hba);
7891
+ ufshpb_init(hba);
7892
+ scsi_scan_host(hba->host);
7893
+ pm_runtime_put_sync(hba->dev);
7894
+
7895
+out:
7896
+ return ret;
66587897 }
66597898
66607899 /**
66617900 * ufshcd_probe_hba - probe hba to detect device and initialize
66627901 * @hba: per-adapter instance
7902
+ * @async: asynchronous execution or not
66637903 *
66647904 * Execute link-startup and verify device initialization
66657905 */
6666
-static int ufshcd_probe_hba(struct ufs_hba *hba)
7906
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
66677907 {
6668
- struct ufs_dev_desc card = {0};
66697908 int ret;
7909
+ unsigned long flags;
66707910 ktime_t start = ktime_get();
7911
+
7912
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
66717913
66727914 ret = ufshcd_link_startup(hba);
66737915 if (ret)
66747916 goto out;
66757917
6676
- /* set the default level for urgent bkops */
6677
- hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6678
- hba->is_urgent_bkops_lvl_checked = false;
7918
+ if (hba->quirks & UFSHCD_QUIRK_SKIP_INTERFACE_CONFIGURATION)
7919
+ goto out;
66797920
66807921 /* Debug counters initialization */
66817922 ufshcd_clear_dbg_ufs_stats(hba);
....@@ -6683,108 +7924,71 @@
66837924 /* UniPro link is active now */
66847925 ufshcd_set_link_active(hba);
66857926
6686
- /* Enable Auto-Hibernate if configured */
6687
- ufshcd_auto_hibern8_enable(hba);
6688
-
7927
+ /* Verify device initialization by sending NOP OUT UPIU */
66897928 ret = ufshcd_verify_dev_init(hba);
66907929 if (ret)
66917930 goto out;
66927931
7932
+ /* Initiate UFS initialization, and waiting until completion */
66937933 ret = ufshcd_complete_dev_init(hba);
66947934 if (ret)
66957935 goto out;
66967936
6697
- /* Init check for device descriptor sizes */
6698
- ufshcd_init_desc_sizes(hba);
6699
-
6700
- ret = ufs_get_device_desc(hba, &card);
6701
- if (ret) {
6702
- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6703
- __func__, ret);
6704
- goto out;
7937
+ /*
7938
+ * Initialize UFS device parameters used by driver, these
7939
+ * parameters are associated with UFS descriptors.
7940
+ */
7941
+ if (async) {
7942
+ ret = ufshcd_device_params_init(hba);
7943
+ if (ret)
7944
+ goto out;
67057945 }
67067946
6707
- ufs_fixup_device_setup(hba, &card);
67087947 ufshcd_tune_unipro_params(hba);
6709
-
6710
- ret = ufshcd_set_vccq_rail_unused(hba,
6711
- (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6712
- if (ret)
6713
- goto out;
67147948
67157949 /* UFS device is also active now */
67167950 ufshcd_set_ufs_dev_active(hba);
67177951 ufshcd_force_reset_auto_bkops(hba);
6718
- hba->wlun_dev_clr_ua = true;
67197952
6720
- if (ufshcd_get_max_pwr_mode(hba)) {
6721
- dev_err(hba->dev,
6722
- "%s: Failed getting max supported power mode\n",
6723
- __func__);
6724
- } else {
7953
+ /* Gear up to HS gear if supported */
7954
+ if (hba->max_pwr_info.is_valid) {
7955
+ /*
7956
+ * Set the right value to bRefClkFreq before attempting to
7957
+ * switch to HS gears.
7958
+ */
7959
+ if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
7960
+ ufshcd_set_dev_ref_clk(hba);
67257961 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
67267962 if (ret) {
67277963 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
67287964 __func__, ret);
67297965 goto out;
67307966 }
7967
+ ufshcd_print_pwr_info(hba);
67317968 }
6732
-
6733
- /* set the state as operational after switching to desired gear */
6734
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
67357969
67367970 /*
6737
- * If we are in error handling context or in power management callbacks
6738
- * context, no need to scan the host
7971
+ * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
7972
+ * and for removable UFS card as well, hence always set the parameter.
7973
+ * Note: Error handler may issue the device reset hence resetting
7974
+ * bActiveICCLevel as well so it is always safe to set this here.
67397975 */
6740
- if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6741
- bool flag;
7976
+ ufshcd_set_active_icc_lvl(hba);
67427977
6743
- /* clear any previous UFS device information */
6744
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6745
- if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6746
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6747
- hba->dev_info.f_power_on_wp_en = flag;
7978
+ ufshcd_wb_config(hba);
7979
+ /* Enable Auto-Hibernate if configured */
7980
+ ufshcd_auto_hibern8_enable(hba);
67487981
6749
- if (!hba->is_init_prefetch)
6750
- ufshcd_init_icc_levels(hba);
7982
+ ufshpb_reset(hba);
67517983
6752
- /* Add required well known logical units to scsi mid layer */
6753
- ret = ufshcd_scsi_add_wlus(hba);
6754
- if (ret)
6755
- goto out;
6756
-
6757
- /* Initialize devfreq after UFS device is detected */
6758
- if (ufshcd_is_clkscaling_supported(hba)) {
6759
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
6760
- &hba->pwr_info,
6761
- sizeof(struct ufs_pa_layer_attr));
6762
- hba->clk_scaling.saved_pwr_info.is_valid = true;
6763
- if (!hba->devfreq) {
6764
- ret = ufshcd_devfreq_init(hba);
6765
- if (ret)
6766
- goto out;
6767
- }
6768
- hba->clk_scaling.is_allowed = true;
6769
- }
6770
-
6771
- scsi_scan_host(hba->host);
6772
- pm_runtime_put_sync(hba->dev);
6773
- }
6774
-
6775
- if (!hba->is_init_prefetch)
6776
- hba->is_init_prefetch = true;
6777
-
7984
+ trace_android_rvh_ufs_complete_init(hba);
67787985 out:
6779
- /*
6780
- * If we failed to initialize the device or the device is not
6781
- * present, turn off the power/clocks etc.
6782
- */
6783
- if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6784
- pm_runtime_put_sync(hba->dev);
6785
- ufshcd_exit_clk_scaling(hba);
6786
- ufshcd_hba_exit(hba);
6787
- }
7986
+ spin_lock_irqsave(hba->host->host_lock, flags);
7987
+ if (ret)
7988
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
7989
+ else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
7990
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7991
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
67887992
67897993 trace_ufshcd_init(dev_name(hba->dev), ret,
67907994 ktime_to_us(ktime_sub(ktime_get(), start)),
....@@ -6800,49 +8004,46 @@
68008004 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
68018005 {
68028006 struct ufs_hba *hba = (struct ufs_hba *)data;
8007
+ int ret;
68038008
6804
- ufshcd_probe_hba(hba);
6805
-}
8009
+ down(&hba->host_sem);
8010
+ /* Initialize hba, detect and initialize UFS device */
8011
+ ret = ufshcd_probe_hba(hba, true);
8012
+ up(&hba->host_sem);
8013
+ if (ret)
8014
+ goto out;
68068015
6807
-static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6808
-{
6809
- unsigned long flags;
6810
- struct Scsi_Host *host;
6811
- struct ufs_hba *hba;
6812
- int index;
6813
- bool found = false;
6814
-
6815
- if (!scmd || !scmd->device || !scmd->device->host)
6816
- return BLK_EH_DONE;
6817
-
6818
- host = scmd->device->host;
6819
- hba = shost_priv(host);
6820
- if (!hba)
6821
- return BLK_EH_DONE;
6822
-
6823
- spin_lock_irqsave(host->host_lock, flags);
6824
-
6825
- for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6826
- if (hba->lrb[index].cmd == scmd) {
6827
- found = true;
6828
- break;
6829
- }
6830
- }
6831
-
6832
- spin_unlock_irqrestore(host->host_lock, flags);
6833
-
8016
+ /* Probe and add UFS logical units */
8017
+ ret = ufshcd_add_lus(hba);
8018
+out:
68348019 /*
6835
- * Bypass SCSI error handling and reset the block layer timer if this
6836
- * SCSI command was not actually dispatched to UFS driver, otherwise
6837
- * let SCSI layer handle the error as usual.
8020
+ * If we failed to initialize the device or the device is not
8021
+ * present, turn off the power/clocks etc.
68388022 */
6839
- return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
8023
+ if (ret) {
8024
+ pm_runtime_put_sync(hba->dev);
8025
+ ufshcd_hba_exit(hba);
8026
+ }
68408027 }
68418028
68428029 static const struct attribute_group *ufshcd_driver_groups[] = {
68438030 &ufs_sysfs_unit_descriptor_group,
68448031 &ufs_sysfs_lun_attributes_group,
8032
+#ifdef CONFIG_SCSI_UFS_HPB
8033
+ &ufs_sysfs_hpb_stat_group,
8034
+ &ufs_sysfs_hpb_param_group,
8035
+#endif
68458036 NULL,
8037
+};
8038
+
8039
+static struct ufs_hba_variant_params ufs_hba_vps = {
8040
+ .hba_enable_delay_us = 1000,
8041
+ .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
8042
+ .devfreq_profile.polling_ms = 100,
8043
+ .devfreq_profile.target = ufshcd_devfreq_target,
8044
+ .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
8045
+ .ondemand_data.upthreshold = 70,
8046
+ .ondemand_data.downdifferential = 5,
68468047 };
68478048
68488049 static struct scsi_host_template ufshcd_driver_template = {
....@@ -6857,14 +8058,15 @@
68578058 .eh_abort_handler = ufshcd_abort,
68588059 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
68598060 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
6860
- .eh_timed_out = ufshcd_eh_timed_out,
68618061 .this_id = -1,
68628062 .sg_tablesize = SG_ALL,
68638063 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
68648064 .can_queue = UFSHCD_CAN_QUEUE,
8065
+ .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
68658066 .max_host_blocked = 1,
68668067 .track_queue_depth = 1,
68678068 .sdev_groups = ufshcd_driver_groups,
8069
+ .dma_boundary = PAGE_SIZE - 1,
68688070 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
68698071 };
68708072
....@@ -6897,13 +8099,7 @@
68978099 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
68988100 struct ufs_vreg *vreg)
68998101 {
6900
- if (!vreg)
6901
- return 0;
6902
- else if (vreg->unused)
6903
- return 0;
6904
- else
6905
- return ufshcd_config_vreg_load(hba->dev, vreg,
6906
- UFS_VREG_LPM_LOAD_UA);
8102
+ return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
69078103 }
69088104
69098105 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
....@@ -6911,10 +8107,8 @@
69118107 {
69128108 if (!vreg)
69138109 return 0;
6914
- else if (vreg->unused)
6915
- return 0;
6916
- else
6917
- return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
8110
+
8111
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
69188112 }
69198113
69208114 static int ufshcd_config_vreg(struct device *dev,
....@@ -6931,21 +8125,19 @@
69318125 name = vreg->name;
69328126
69338127 if (regulator_count_voltages(reg) > 0) {
6934
- if (vreg->min_uV && vreg->max_uV) {
6935
- min_uV = on ? vreg->min_uV : 0;
6936
- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6937
- if (ret) {
6938
- dev_err(dev,
6939
- "%s: %s set voltage failed, err=%d\n",
6940
- __func__, name, ret);
6941
- goto out;
6942
- }
6943
- }
6944
-
69458128 uA_load = on ? vreg->max_uA : 0;
69468129 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
69478130 if (ret)
69488131 goto out;
8132
+
8133
+ if (vreg->min_uV && vreg->max_uV) {
8134
+ min_uV = on ? vreg->min_uV : 0;
8135
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
8136
+ if (ret)
8137
+ dev_err(dev,
8138
+ "%s: %s set voltage failed, err=%d\n",
8139
+ __func__, name, ret);
8140
+ }
69498141 }
69508142 out:
69518143 return ret;
....@@ -6955,9 +8147,7 @@
69558147 {
69568148 int ret = 0;
69578149
6958
- if (!vreg)
6959
- goto out;
6960
- else if (vreg->enabled || vreg->unused)
8150
+ if (!vreg || vreg->enabled)
69618151 goto out;
69628152
69638153 ret = ufshcd_config_vreg(dev, vreg, true);
....@@ -6977,9 +8167,7 @@
69778167 {
69788168 int ret = 0;
69798169
6980
- if (!vreg)
6981
- goto out;
6982
- else if (!vreg->enabled || vreg->unused)
8170
+ if (!vreg || !vreg->enabled || vreg->always_on)
69838171 goto out;
69848172
69858173 ret = regulator_disable(vreg->reg);
....@@ -7002,9 +8190,6 @@
70028190 struct device *dev = hba->dev;
70038191 struct ufs_vreg_info *info = &hba->vreg_info;
70048192
7005
- if (!info)
7006
- goto out;
7007
-
70088193 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
70098194 if (ret)
70108195 goto out;
....@@ -7014,8 +8199,6 @@
70148199 goto out;
70158200
70168201 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7017
- if (ret)
7018
- goto out;
70198202
70208203 out:
70218204 if (ret) {
....@@ -7030,10 +8213,7 @@
70308213 {
70318214 struct ufs_vreg_info *info = &hba->vreg_info;
70328215
7033
- if (info)
7034
- return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7035
-
7036
- return 0;
8216
+ return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
70378217 }
70388218
70398219 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
....@@ -7059,18 +8239,13 @@
70598239 struct device *dev = hba->dev;
70608240 struct ufs_vreg_info *info = &hba->vreg_info;
70618241
7062
- if (!info)
7063
- goto out;
7064
-
70658242 ret = ufshcd_get_vreg(dev, info->vcc);
70668243 if (ret)
70678244 goto out;
70688245
70698246 ret = ufshcd_get_vreg(dev, info->vccq);
7070
- if (ret)
7071
- goto out;
7072
-
7073
- ret = ufshcd_get_vreg(dev, info->vccq2);
8247
+ if (!ret)
8248
+ ret = ufshcd_get_vreg(dev, info->vccq2);
70748249 out:
70758250 return ret;
70768251 }
....@@ -7085,38 +8260,7 @@
70858260 return 0;
70868261 }
70878262
7088
-static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
7089
-{
7090
- int ret = 0;
7091
- struct ufs_vreg_info *info = &hba->vreg_info;
7092
-
7093
- if (!info)
7094
- goto out;
7095
- else if (!info->vccq)
7096
- goto out;
7097
-
7098
- if (unused) {
7099
- /* shut off the rail here */
7100
- ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
7101
- /*
7102
- * Mark this rail as no longer used, so it doesn't get enabled
7103
- * later by mistake
7104
- */
7105
- if (!ret)
7106
- info->vccq->unused = true;
7107
- } else {
7108
- /*
7109
- * rail should have been already enabled hence just make sure
7110
- * that unused flag is cleared.
7111
- */
7112
- info->vccq->unused = false;
7113
- }
7114
-out:
7115
- return ret;
7116
-}
7117
-
7118
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7119
- bool skip_ref_clk)
8263
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
71208264 {
71218265 int ret = 0;
71228266 struct ufs_clk_info *clki;
....@@ -7128,20 +8272,18 @@
71288272 if (list_empty(head))
71298273 goto out;
71308274
7131
- /*
7132
- * vendor specific setup_clocks ops may depend on clocks managed by
7133
- * this standard driver hence call the vendor specific setup_clocks
7134
- * before disabling the clocks managed here.
7135
- */
7136
- if (!on) {
7137
- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7138
- if (ret)
7139
- return ret;
7140
- }
8275
+ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
8276
+ if (ret)
8277
+ return ret;
71418278
71428279 list_for_each_entry(clki, head, list) {
71438280 if (!IS_ERR_OR_NULL(clki->clk)) {
7144
- if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
8281
+ /*
8282
+ * Don't disable clocks which are needed
8283
+ * to keep the link active.
8284
+ */
8285
+ if (ufshcd_is_link_active(hba) &&
8286
+ clki->keep_link_active)
71458287 continue;
71468288
71478289 clk_state_changed = on ^ clki->enabled;
....@@ -7161,16 +8303,9 @@
71618303 }
71628304 }
71638305
7164
- /*
7165
- * vendor specific setup_clocks ops may depend on clocks managed by
7166
- * this standard driver hence call the vendor specific setup_clocks
7167
- * after enabling the clocks managed here.
7168
- */
7169
- if (on) {
7170
- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7171
- if (ret)
7172
- return ret;
7173
- }
8306
+ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
8307
+ if (ret)
8308
+ return ret;
71748309
71758310 out:
71768311 if (ret) {
....@@ -7191,11 +8326,6 @@
71918326 (on ? "on" : "off"),
71928327 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
71938328 return ret;
7194
-}
7195
-
7196
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7197
-{
7198
- return __ufshcd_setup_clocks(hba, on, false);
71998329 }
72008330
72018331 static int ufshcd_init_clocks(struct ufs_hba *hba)
....@@ -7219,6 +8349,14 @@
72198349 __func__, clki->name, ret);
72208350 goto out;
72218351 }
8352
+
8353
+ /*
8354
+ * Parse device ref clk freq as per device tree "ref_clk".
8355
+ * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
8356
+ * in ufshcd_alloc_host().
8357
+ */
8358
+ if (!strcmp(clki->name, "ref_clk"))
8359
+ ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
72228360
72238361 if (clki->max_freq) {
72248362 ret = clk_set_rate(clki->clk, clki->max_freq);
....@@ -7250,12 +8388,7 @@
72508388
72518389 err = ufshcd_vops_setup_regulators(hba, true);
72528390 if (err)
7253
- goto out_exit;
7254
-
7255
- goto out;
7256
-
7257
-out_exit:
7258
- ufshcd_vops_exit(hba);
8391
+ ufshcd_vops_exit(hba);
72598392 out:
72608393 if (err)
72618394 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
....@@ -7312,6 +8445,8 @@
73128445 if (err)
73138446 goto out_disable_vreg;
73148447
8448
+ ufs_debugfs_hba_init(hba);
8449
+
73158450 hba->is_powered = true;
73168451 goto out;
73178452
....@@ -7328,45 +8463,18 @@
73288463 static void ufshcd_hba_exit(struct ufs_hba *hba)
73298464 {
73308465 if (hba->is_powered) {
8466
+ ufshcd_exit_clk_scaling(hba);
8467
+ ufshcd_exit_clk_gating(hba);
8468
+ if (hba->eh_wq)
8469
+ destroy_workqueue(hba->eh_wq);
8470
+ ufs_debugfs_hba_exit(hba);
73318471 ufshcd_variant_hba_exit(hba);
73328472 ufshcd_setup_vreg(hba, false);
7333
- ufshcd_suspend_clkscaling(hba);
7334
- if (ufshcd_is_clkscaling_supported(hba))
7335
- if (hba->devfreq)
7336
- ufshcd_suspend_clkscaling(hba);
73378473 ufshcd_setup_clocks(hba, false);
73388474 ufshcd_setup_hba_vreg(hba, false);
73398475 hba->is_powered = false;
8476
+ ufs_put_device_desc(hba);
73408477 }
7341
-}
7342
-
7343
-static int
7344
-ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7345
-{
7346
- unsigned char cmd[6] = {REQUEST_SENSE,
7347
- 0,
7348
- 0,
7349
- 0,
7350
- UFSHCD_REQ_SENSE_SIZE,
7351
- 0};
7352
- char *buffer;
7353
- int ret;
7354
-
7355
- buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
7356
- if (!buffer) {
7357
- ret = -ENOMEM;
7358
- goto out;
7359
- }
7360
-
7361
- ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7362
- UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
7363
- msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7364
- if (ret)
7365
- pr_err("%s: failed with err %d\n", __func__, ret);
7366
-
7367
- kfree(buffer);
7368
-out:
7369
- return ret;
73708478 }
73718479
73728480 /**
....@@ -7385,7 +8493,7 @@
73858493 struct scsi_sense_hdr sshdr;
73868494 struct scsi_device *sdp;
73878495 unsigned long flags;
7388
- int ret;
8496
+ int ret, retries;
73898497
73908498 spin_lock_irqsave(hba->host->host_lock, flags);
73918499 sdp = hba->sdev_ufs_device;
....@@ -7410,13 +8518,6 @@
74108518 * handling context.
74118519 */
74128520 hba->host->eh_noresume = 1;
7413
- if (hba->wlun_dev_clr_ua) {
7414
- ret = ufshcd_send_request_sense(hba, sdp);
7415
- if (ret)
7416
- goto out;
7417
- /* Unit attention condition is cleared now */
7418
- hba->wlun_dev_clr_ua = false;
7419
- }
74208521
74218522 cmd[4] = pwr_mode << 4;
74228523
....@@ -7425,8 +8526,14 @@
74258526 * callbacks hence set the RQF_PM flag so that it doesn't resume the
74268527 * already suspended childs.
74278528 */
7428
- ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7429
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8529
+ for (retries = 3; retries > 0; --retries) {
8530
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
8531
+ START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
8532
+ if (!scsi_status_is_check_condition(ret) ||
8533
+ !scsi_sense_valid(&sshdr) ||
8534
+ sshdr.sense_key != UNIT_ATTENTION)
8535
+ break;
8536
+ }
74308537 if (ret) {
74318538 sdev_printk(KERN_WARNING, sdp,
74328539 "START_STOP failed for power mode: %d, result %x\n",
....@@ -7437,7 +8544,7 @@
74378544
74388545 if (!ret)
74398546 hba->curr_dev_pwr_mode = pwr_mode;
7440
-out:
8547
+
74418548 scsi_device_put(sdp);
74428549 hba->host->eh_noresume = 0;
74438550 return ret;
....@@ -7454,18 +8561,20 @@
74548561
74558562 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
74568563 ret = ufshcd_uic_hibern8_enter(hba);
7457
- if (!ret)
8564
+ if (!ret) {
74588565 ufshcd_set_link_hibern8(hba);
7459
- else
8566
+ } else {
8567
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8568
+ __func__, ret);
74608569 goto out;
8570
+ }
74618571 }
74628572 /*
74638573 * If autobkops is enabled, link can't be turned off because
74648574 * turning off the link would also turn off the device.
74658575 */
74668576 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7467
- (!check_for_bkops || (check_for_bkops &&
7468
- !hba->auto_bkops_enabled))) {
8577
+ (!check_for_bkops || !hba->auto_bkops_enabled)) {
74698578 /*
74708579 * Let's make sure that link is in low power mode, we are doing
74718580 * this currently by putting the link in Hibern8. Otherway to
....@@ -7474,13 +8583,16 @@
74748583 * unipro. But putting the link in hibern8 is much faster.
74758584 */
74768585 ret = ufshcd_uic_hibern8_enter(hba);
7477
- if (ret)
8586
+ if (ret) {
8587
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
8588
+ __func__, ret);
74788589 goto out;
8590
+ }
74798591 /*
74808592 * Change controller state to "reset state" which
74818593 * should also put the link in off/reset state
74828594 */
7483
- ufshcd_hba_stop(hba, true);
8595
+ ufshcd_hba_stop(hba);
74848596 /*
74858597 * TODO: Check if we need any delay to make sure that
74868598 * controller is reset
....@@ -7494,6 +8606,8 @@
74948606
74958607 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
74968608 {
8609
+ bool vcc_off = false;
8610
+
74978611 /*
74988612 * It seems some UFS devices may keep drawing more than sleep current
74998613 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
....@@ -7515,17 +8629,29 @@
75158629 *
75168630 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
75178631 * in low power state which would save some power.
8632
+ *
8633
+ * If Write Booster is enabled and the device needs to flush the WB
8634
+ * buffer OR if bkops status is urgent for WB, keep Vcc on.
75188635 */
75198636 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
75208637 !hba->dev_info.is_lu_power_on_wp) {
75218638 ufshcd_setup_vreg(hba, false);
8639
+ vcc_off = true;
75228640 } else if (!ufshcd_is_ufs_dev_active(hba)) {
75238641 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7524
- if (!ufshcd_is_link_active(hba)) {
8642
+ vcc_off = true;
8643
+ if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
75258644 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
75268645 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
75278646 }
75288647 }
8648
+
8649
+ /*
8650
+ * Some UFS devices require delay after VCC power rail is turned-off.
8651
+ */
8652
+ if (vcc_off && hba->vreg_info.vcc &&
8653
+ hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
8654
+ usleep_range(5000, 5100);
75298655 }
75308656
75318657 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
....@@ -7536,7 +8662,7 @@
75368662 !hba->dev_info.is_lu_power_on_wp) {
75378663 ret = ufshcd_setup_vreg(hba, true);
75388664 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7539
- if (!ret && !ufshcd_is_link_active(hba)) {
8665
+ if (!ufshcd_is_link_active(hba)) {
75408666 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
75418667 if (ret)
75428668 goto vcc_disable;
....@@ -7558,13 +8684,13 @@
75588684
75598685 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
75608686 {
7561
- if (ufshcd_is_link_off(hba))
8687
+ if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
75628688 ufshcd_setup_hba_vreg(hba, false);
75638689 }
75648690
75658691 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
75668692 {
7567
- if (ufshcd_is_link_off(hba))
8693
+ if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
75688694 ufshcd_setup_hba_vreg(hba, true);
75698695 }
75708696
....@@ -7602,9 +8728,7 @@
76028728 req_link_state = UIC_LINK_OFF_STATE;
76038729 }
76048730
7605
- ret = ufshcd_crypto_suspend(hba, pm_op);
7606
- if (ret)
7607
- goto out;
8731
+ ufshpb_suspend(hba);
76088732
76098733 /*
76108734 * If we can't transition into any of the low power modes
....@@ -7613,11 +8737,8 @@
76138737 ufshcd_hold(hba, false);
76148738 hba->clk_gating.is_suspended = true;
76158739
7616
- if (hba->clk_scaling.is_allowed) {
7617
- cancel_work_sync(&hba->clk_scaling.suspend_work);
7618
- cancel_work_sync(&hba->clk_scaling.resume_work);
7619
- ufshcd_suspend_clkscaling(hba);
7620
- }
8740
+ if (ufshcd_is_clkscaling_supported(hba))
8741
+ ufshcd_clk_scaling_suspend(hba, true);
76218742
76228743 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
76238744 req_link_state == UIC_LINK_ACTIVE_STATE) {
....@@ -7648,23 +8769,38 @@
76488769 /* make sure that auto bkops is disabled */
76498770 ufshcd_disable_auto_bkops(hba);
76508771 }
8772
+ /*
8773
+ * If device needs to do BKOP or WB buffer flush during
8774
+ * Hibern8, keep device power mode as "active power mode"
8775
+ * and VCC supply.
8776
+ */
8777
+ hba->dev_info.b_rpm_dev_flush_capable =
8778
+ hba->auto_bkops_enabled ||
8779
+ (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
8780
+ ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
8781
+ ufshcd_is_auto_hibern8_enabled(hba))) &&
8782
+ ufshcd_wb_need_flush(hba));
76518783 }
76528784
7653
- if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7654
- ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7655
- !ufshcd_is_runtime_pm(pm_op))) {
7656
- /* ensure that bkops is disabled */
7657
- ufshcd_disable_auto_bkops(hba);
7658
- ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7659
- if (ret)
7660
- goto enable_gating;
8785
+ flush_work(&hba->eeh_work);
8786
+
8787
+ if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
8788
+ if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
8789
+ !ufshcd_is_runtime_pm(pm_op)) {
8790
+ /* ensure that bkops is disabled */
8791
+ ufshcd_disable_auto_bkops(hba);
8792
+ }
8793
+
8794
+ if (!hba->dev_info.b_rpm_dev_flush_capable) {
8795
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
8796
+ if (ret)
8797
+ goto enable_gating;
8798
+ }
76618799 }
76628800
76638801 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
76648802 if (ret)
76658803 goto set_dev_active;
7666
-
7667
- ufshcd_vreg_set_lpm(hba);
76688804
76698805 disable_clks:
76708806 /*
....@@ -7675,27 +8811,27 @@
76758811 ret = ufshcd_vops_suspend(hba, pm_op);
76768812 if (ret)
76778813 goto set_link_active;
7678
-
7679
- if (!ufshcd_is_link_active(hba))
7680
- ufshcd_setup_clocks(hba, false);
7681
- else
7682
- /* If link is active, device ref_clk can't be switched off */
7683
- __ufshcd_setup_clocks(hba, false, true);
7684
-
7685
- hba->clk_gating.state = CLKS_OFF;
7686
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
76878814 /*
76888815 * Disable the host irq as host controller as there won't be any
76898816 * host controller transaction expected till resume.
76908817 */
76918818 ufshcd_disable_irq(hba);
8819
+
8820
+ ufshcd_setup_clocks(hba, false);
8821
+
8822
+ if (ufshcd_is_clkgating_allowed(hba)) {
8823
+ hba->clk_gating.state = CLKS_OFF;
8824
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
8825
+ hba->clk_gating.state);
8826
+ }
8827
+
8828
+ ufshcd_vreg_set_lpm(hba);
8829
+
76928830 /* Put the host controller in low power mode if possible */
76938831 ufshcd_hba_vreg_set_lpm(hba);
76948832 goto out;
76958833
76968834 set_link_active:
7697
- if (hba->clk_scaling.is_allowed)
7698
- ufshcd_resume_clkscaling(hba);
76998835 ufshcd_vreg_set_hpm(hba);
77008836 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
77018837 ufshcd_set_link_active(hba);
....@@ -7705,13 +8841,23 @@
77058841 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
77068842 ufshcd_disable_auto_bkops(hba);
77078843 enable_gating:
7708
- if (hba->clk_scaling.is_allowed)
7709
- ufshcd_resume_clkscaling(hba);
8844
+ if (ufshcd_is_clkscaling_supported(hba))
8845
+ ufshcd_clk_scaling_suspend(hba, false);
8846
+
77108847 hba->clk_gating.is_suspended = false;
8848
+ hba->dev_info.b_rpm_dev_flush_capable = false;
77118849 ufshcd_release(hba);
7712
- ufshcd_crypto_resume(hba, pm_op);
8850
+ ufshpb_resume(hba);
77138851 out:
8852
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
8853
+ schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
8854
+ msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
8855
+ }
8856
+
77148857 hba->pm_op_in_progress = 0;
8858
+
8859
+ if (ret)
8860
+ ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
77158861 return ret;
77168862 }
77178863
....@@ -7729,26 +8875,22 @@
77298875 {
77308876 int ret;
77318877 enum uic_link_state old_link_state;
7732
- enum ufs_dev_pwr_mode old_pwr_mode;
77338878
77348879 hba->pm_op_in_progress = 1;
77358880 old_link_state = hba->uic_link_state;
7736
- old_pwr_mode = hba->curr_dev_pwr_mode;
77378881
77388882 ufshcd_hba_vreg_set_hpm(hba);
7739
- /* Make sure clocks are enabled before accessing controller */
7740
- ret = ufshcd_setup_clocks(hba, true);
8883
+ ret = ufshcd_vreg_set_hpm(hba);
77418884 if (ret)
77428885 goto out;
77438886
7744
- /* enable the host irq as host controller would be active soon */
7745
- ret = ufshcd_enable_irq(hba);
8887
+ /* Make sure clocks are enabled before accessing controller */
8888
+ ret = ufshcd_setup_clocks(hba, true);
77468889 if (ret)
7747
- goto disable_irq_and_vops_clks;
8890
+ goto disable_vreg;
77488891
7749
- ret = ufshcd_vreg_set_hpm(hba);
7750
- if (ret)
7751
- goto disable_irq_and_vops_clks;
8892
+ /* enable the host irq as host controller would be active soon */
8893
+ ufshcd_enable_irq(hba);
77528894
77538895 /*
77548896 * Call vendor specific resume callback. As these callbacks may access
....@@ -7757,18 +8899,25 @@
77578899 */
77588900 ret = ufshcd_vops_resume(hba, pm_op);
77598901 if (ret)
7760
- goto disable_vreg;
8902
+ goto disable_irq_and_vops_clks;
77618903
77628904 if (ufshcd_is_link_hibern8(hba)) {
77638905 ret = ufshcd_uic_hibern8_exit(hba);
7764
- if (!ret)
8906
+ if (!ret) {
77658907 ufshcd_set_link_active(hba);
7766
- else
8908
+ } else {
8909
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
8910
+ __func__, ret);
77678911 goto vendor_suspend;
8912
+ }
77688913 } else if (ufshcd_is_link_off(hba)) {
7769
- ret = ufshcd_host_reset_and_restore(hba);
77708914 /*
7771
- * ufshcd_host_reset_and_restore() should have already
8915
+ * A full initialization of the host and the device is
8916
+ * required since the link was put to off during suspend.
8917
+ */
8918
+ ret = ufshcd_reset_and_restore(hba);
8919
+ /*
8920
+ * ufshcd_reset_and_restore() should have already
77728921 * set the link state as active
77738922 */
77748923 if (ret || !ufshcd_is_link_active(hba))
....@@ -7781,10 +8930,6 @@
77818930 goto set_old_link_state;
77828931 }
77838932
7784
- ret = ufshcd_crypto_resume(hba, pm_op);
7785
- if (ret)
7786
- goto set_old_dev_pwr_mode;
7787
-
77888933 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
77898934 ufshcd_enable_auto_bkops(hba);
77908935 else
....@@ -7796,33 +8941,42 @@
77968941
77978942 hba->clk_gating.is_suspended = false;
77988943
7799
- if (hba->clk_scaling.is_allowed)
7800
- ufshcd_resume_clkscaling(hba);
7801
-
7802
- /* Schedule clock gating in case of no access to UFS device yet */
7803
- ufshcd_release(hba);
8944
+ if (ufshcd_is_clkscaling_supported(hba))
8945
+ ufshcd_clk_scaling_suspend(hba, false);
78048946
78058947 /* Enable Auto-Hibernate if configured */
78068948 ufshcd_auto_hibern8_enable(hba);
78078949
8950
+ ufshpb_resume(hba);
8951
+
8952
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
8953
+ hba->dev_info.b_rpm_dev_flush_capable = false;
8954
+ cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
8955
+ }
8956
+
8957
+ /* Schedule clock gating in case of no access to UFS device yet */
8958
+ ufshcd_release(hba);
8959
+
78088960 goto out;
78098961
7810
-set_old_dev_pwr_mode:
7811
- if (old_pwr_mode != hba->curr_dev_pwr_mode)
7812
- ufshcd_set_dev_pwr_mode(hba, old_pwr_mode);
78138962 set_old_link_state:
78148963 ufshcd_link_state_transition(hba, old_link_state, 0);
78158964 vendor_suspend:
78168965 ufshcd_vops_suspend(hba, pm_op);
7817
-disable_vreg:
7818
- ufshcd_vreg_set_lpm(hba);
78198966 disable_irq_and_vops_clks:
78208967 ufshcd_disable_irq(hba);
7821
- if (hba->clk_scaling.is_allowed)
7822
- ufshcd_suspend_clkscaling(hba);
78238968 ufshcd_setup_clocks(hba, false);
8969
+ if (ufshcd_is_clkgating_allowed(hba)) {
8970
+ hba->clk_gating.state = CLKS_OFF;
8971
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
8972
+ hba->clk_gating.state);
8973
+ }
8974
+disable_vreg:
8975
+ ufshcd_vreg_set_lpm(hba);
78248976 out:
78258977 hba->pm_op_in_progress = 0;
8978
+ if (ret)
8979
+ ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
78268980 return ret;
78278981 }
78288982
....@@ -7839,13 +8993,19 @@
78398993 int ret = 0;
78408994 ktime_t start = ktime_get();
78418995
7842
- if (!hba || !hba->is_powered)
8996
+ down(&hba->host_sem);
8997
+
8998
+ if (!hba->is_powered)
78438999 return 0;
9000
+
9001
+ cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
78449002
78459003 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
78469004 hba->curr_dev_pwr_mode) &&
78479005 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7848
- hba->uic_link_state))
9006
+ hba->uic_link_state) &&
9007
+ pm_runtime_suspended(hba->dev) &&
9008
+ !hba->dev_info.b_rpm_dev_flush_capable)
78499009 goto out;
78509010
78519011 if (pm_runtime_suspended(hba->dev)) {
....@@ -7869,6 +9029,8 @@
78699029 hba->curr_dev_pwr_mode, hba->uic_link_state);
78709030 if (!ret)
78719031 hba->is_sys_suspended = true;
9032
+ else
9033
+ up(&hba->host_sem);
78729034 return ret;
78739035 }
78749036 EXPORT_SYMBOL(ufshcd_system_suspend);
....@@ -7885,9 +9047,6 @@
78859047 int ret = 0;
78869048 ktime_t start = ktime_get();
78879049
7888
- if (!hba)
7889
- return -EINVAL;
7890
-
78919050 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
78929051 /*
78939052 * Let the runtime resume take care of resuming
....@@ -7902,6 +9061,7 @@
79029061 hba->curr_dev_pwr_mode, hba->uic_link_state);
79039062 if (!ret)
79049063 hba->is_sys_suspended = false;
9064
+ up(&hba->host_sem);
79059065 return ret;
79069066 }
79079067 EXPORT_SYMBOL(ufshcd_system_resume);
....@@ -7918,9 +9078,6 @@
79189078 {
79199079 int ret = 0;
79209080 ktime_t start = ktime_get();
7921
-
7922
- if (!hba)
7923
- return -EINVAL;
79249081
79259082 if (!hba->is_powered)
79269083 goto out;
....@@ -7960,9 +9117,6 @@
79609117 int ret = 0;
79619118 ktime_t start = ktime_get();
79629119
7963
- if (!hba)
7964
- return -EINVAL;
7965
-
79669120 if (!hba->is_powered)
79679121 goto out;
79689122 else
....@@ -7993,6 +9147,10 @@
79939147 {
79949148 int ret = 0;
79959149
9150
+ down(&hba->host_sem);
9151
+ hba->shutting_down = true;
9152
+ up(&hba->host_sem);
9153
+
79969154 if (!hba->is_powered)
79979155 goto out;
79989156
....@@ -8005,6 +9163,7 @@
80059163 out:
80069164 if (ret)
80079165 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
9166
+ hba->is_powered = false;
80089167 /* allow force shutdown even in case of errors */
80099168 return 0;
80109169 }
....@@ -8017,16 +9176,16 @@
80179176 */
80189177 void ufshcd_remove(struct ufs_hba *hba)
80199178 {
9179
+ ufs_bsg_remove(hba);
9180
+ ufshpb_remove(hba);
80209181 ufs_sysfs_remove_nodes(hba->dev);
9182
+ blk_cleanup_queue(hba->tmf_queue);
9183
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
9184
+ blk_cleanup_queue(hba->cmd_queue);
80219185 scsi_remove_host(hba->host);
80229186 /* disable interrupts */
80239187 ufshcd_disable_intr(hba, hba->intr_mask);
8024
- ufshcd_hba_stop(hba, true);
8025
-
8026
- ufshcd_exit_clk_scaling(hba);
8027
- ufshcd_exit_clk_gating(hba);
8028
- if (ufshcd_is_clkscaling_supported(hba))
8029
- device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
9188
+ ufshcd_hba_stop(hba);
80309189 ufshcd_hba_exit(hba);
80319190 }
80329191 EXPORT_SYMBOL_GPL(ufshcd_remove);
....@@ -8077,23 +9236,17 @@
80779236 }
80789237
80799238 host = scsi_host_alloc(&ufshcd_driver_template,
8080
- sizeof(struct ufs_hba));
9239
+ sizeof(struct ufs_hba_add_info));
80819240 if (!host) {
80829241 dev_err(dev, "scsi_host_alloc failed\n");
80839242 err = -ENOMEM;
80849243 goto out_error;
80859244 }
8086
-
8087
- /*
8088
- * Do not use blk-mq at this time because blk-mq does not support
8089
- * runtime pm.
8090
- */
8091
- host->use_blk_mq = false;
8092
-
80939245 hba = shost_priv(host);
80949246 hba->host = host;
80959247 hba->dev = dev;
80969248 *hba_handle = hba;
9249
+ hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
80979250 hba->sg_entry_size = sizeof(struct ufshcd_sg_entry);
80989251
80999252 INIT_LIST_HEAD(&hba->clk_list_head);
....@@ -8102,6 +9255,18 @@
81029255 return err;
81039256 }
81049257 EXPORT_SYMBOL(ufshcd_alloc_host);
9258
+
9259
+/* This function exists because blk_mq_alloc_tag_set() requires this. */
9260
+static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
9261
+ const struct blk_mq_queue_data *qd)
9262
+{
9263
+ WARN_ON_ONCE(true);
9264
+ return BLK_STS_NOTSUPP;
9265
+}
9266
+
9267
+static const struct blk_mq_ops ufshcd_tmf_ops = {
9268
+ .queue_rq = ufshcd_queue_tmf,
9269
+};
81059270
81069271 /**
81079272 * ufshcd_init - Driver initialization routine
....@@ -8112,9 +9277,11 @@
81129277 */
81139278 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
81149279 {
9280
+ struct request ***tmf_rqs = &ufs_hba_add_info(hba)->tmf_rqs;
81159281 int err;
81169282 struct Scsi_Host *host = hba->host;
81179283 struct device *dev = hba->dev;
9284
+ char eh_wq_name[sizeof("ufs_eh_wq_00")];
81189285
81199286 /*
81209287 * dev_set_drvdata() must be called before any callbacks are registered
....@@ -8132,24 +9299,21 @@
81329299
81339300 hba->mmio_base = mmio_base;
81349301 hba->irq = irq;
8135
-
8136
- /* Set descriptor lengths to specification defaults */
8137
- ufshcd_def_desc_sizes(hba);
9302
+ hba->vps = &ufs_hba_vps;
81389303
81399304 err = ufshcd_hba_init(hba);
81409305 if (err)
81419306 goto out_error;
81429307
81439308 /* Read capabilities registers */
8144
- ufshcd_hba_capabilities(hba);
9309
+ err = ufshcd_hba_capabilities(hba);
9310
+ if (err)
9311
+ goto out_disable;
81459312
81469313 /* Get UFS version supported by the controller */
81479314 hba->ufs_version = ufshcd_get_ufs_version(hba);
81489315
8149
- if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8150
- (hba->ufs_version != UFSHCI_VERSION_11) &&
8151
- (hba->ufs_version != UFSHCI_VERSION_20) &&
8152
- (hba->ufs_version != UFSHCI_VERSION_21))
9316
+ if (hba->ufs_version < ufshci_version(1, 0))
81539317 dev_err(hba->dev, "invalid UFS version 0x%x\n",
81549318 hba->ufs_version);
81559319
....@@ -8172,23 +9336,30 @@
81729336 /* Configure LRB */
81739337 ufshcd_host_memory_configure(hba);
81749338
8175
- host->can_queue = hba->nutrs;
8176
- host->cmd_per_lun = hba->nutrs;
9339
+ host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
9340
+ host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
81779341 host->max_id = UFSHCD_MAX_ID;
81789342 host->max_lun = UFS_MAX_LUNS;
81799343 host->max_channel = UFSHCD_MAX_CHANNEL;
81809344 host->unique_id = host->host_no;
8181
- host->max_cmd_len = MAX_CDB_SIZE;
9345
+ host->max_cmd_len = UFS_CDB_SIZE;
81829346
81839347 hba->max_pwr_info.is_valid = false;
81849348
8185
- /* Initailize wait queue for task management */
8186
- init_waitqueue_head(&hba->tm_wq);
8187
- init_waitqueue_head(&hba->tm_tag_wq);
8188
-
81899349 /* Initialize work queues */
9350
+ snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
9351
+ hba->host->host_no);
9352
+ hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
9353
+ if (!hba->eh_wq) {
9354
+ dev_err(hba->dev, "%s: failed to create eh workqueue\n",
9355
+ __func__);
9356
+ err = -ENOMEM;
9357
+ goto out_disable;
9358
+ }
81909359 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
81919360 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
9361
+
9362
+ sema_init(&hba->host_sem, 1);
81929363
81939364 /* Initialize UIC command mutex */
81949365 mutex_init(&hba->uic_cmd_mutex);
....@@ -8197,9 +9368,6 @@
81979368 mutex_init(&hba->dev_cmd.lock);
81989369
81999370 init_rwsem(&hba->clk_scaling_lock);
8200
-
8201
- /* Initialize device management tag acquire wait queue */
8202
- init_waitqueue_head(&hba->dev_cmd.tag_wq);
82039371
82049372 ufshcd_init_clk_gating(hba);
82059373
....@@ -8223,7 +9391,7 @@
82239391 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
82249392 if (err) {
82259393 dev_err(hba->dev, "request irq failed\n");
8226
- goto exit_gating;
9394
+ goto out_disable;
82279395 } else {
82289396 hba->is_irq_enabled = true;
82299397 }
....@@ -8231,23 +9399,48 @@
82319399 err = scsi_add_host(host, hba->dev);
82329400 if (err) {
82339401 dev_err(hba->dev, "scsi_add_host failed\n");
8234
- goto exit_gating;
9402
+ goto out_disable;
82359403 }
82369404
8237
- /* Init crypto */
8238
- err = ufshcd_hba_init_crypto(hba);
8239
- if (err) {
8240
- dev_err(hba->dev, "crypto setup failed\n");
9405
+ hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
9406
+ if (IS_ERR(hba->cmd_queue)) {
9407
+ err = PTR_ERR(hba->cmd_queue);
82419408 goto out_remove_scsi_host;
82429409 }
9410
+
9411
+ hba->tmf_tag_set = (struct blk_mq_tag_set) {
9412
+ .nr_hw_queues = 1,
9413
+ .queue_depth = hba->nutmrs,
9414
+ .ops = &ufshcd_tmf_ops,
9415
+ .flags = BLK_MQ_F_NO_SCHED,
9416
+ };
9417
+ err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
9418
+ if (err < 0)
9419
+ goto free_cmd_queue;
9420
+ hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
9421
+ if (IS_ERR(hba->tmf_queue)) {
9422
+ err = PTR_ERR(hba->tmf_queue);
9423
+ goto free_tmf_tag_set;
9424
+ }
9425
+ *tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, sizeof(**tmf_rqs),
9426
+ GFP_KERNEL);
9427
+ if (!*tmf_rqs) {
9428
+ err = -ENOMEM;
9429
+ goto free_tmf_queue;
9430
+ }
9431
+
9432
+ /* Reset the attached device */
9433
+ ufshcd_vops_device_reset(hba);
9434
+
9435
+ ufshcd_init_crypto(hba);
82439436
82449437 /* Host controller enable */
82459438 err = ufshcd_hba_enable(hba);
82469439 if (err) {
82479440 dev_err(hba->dev, "Host controller enable failed\n");
8248
- ufshcd_print_host_regs(hba);
9441
+ ufshcd_print_evt_hist(hba);
82499442 ufshcd_print_host_state(hba);
8250
- goto out_remove_scsi_host;
9443
+ goto free_tmf_queue;
82519444 }
82529445
82539446 /*
....@@ -8262,8 +9455,11 @@
82629455 UFS_SLEEP_PWR_MODE,
82639456 UIC_LINK_HIBERN8_STATE);
82649457
9458
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
9459
+ ufshcd_rpm_dev_flush_recheck_work);
9460
+
82659461 /* Set the default auto-hiberate idle timer value to 150 ms */
8266
- if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
9462
+ if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
82679463 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
82689464 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
82699465 }
....@@ -8280,15 +9476,19 @@
82809476 ufshcd_set_ufs_dev_active(hba);
82819477
82829478 async_schedule(ufshcd_async_scan, hba);
8283
- ufs_sysfs_add_nodes(hba->dev);
9479
+ ufs_sysfs_add_nodes(hba);
82849480
9481
+ device_enable_async_suspend(dev);
82859482 return 0;
82869483
9484
+free_tmf_queue:
9485
+ blk_cleanup_queue(hba->tmf_queue);
9486
+free_tmf_tag_set:
9487
+ blk_mq_free_tag_set(&hba->tmf_tag_set);
9488
+free_cmd_queue:
9489
+ blk_cleanup_queue(hba->cmd_queue);
82879490 out_remove_scsi_host:
82889491 scsi_remove_host(hba->host);
8289
-exit_gating:
8290
- ufshcd_exit_clk_scaling(hba);
8291
- ufshcd_exit_clk_gating(hba);
82929492 out_disable:
82939493 hba->is_irq_enabled = false;
82949494 ufshcd_hba_exit(hba);
....@@ -8297,8 +9497,23 @@
82979497 }
82989498 EXPORT_SYMBOL_GPL(ufshcd_init);
82999499
9500
+static int __init ufshcd_core_init(void)
9501
+{
9502
+ ufs_debugfs_init();
9503
+ return 0;
9504
+}
9505
+
9506
+static void __exit ufshcd_core_exit(void)
9507
+{
9508
+ ufs_debugfs_exit();
9509
+}
9510
+
9511
+module_init(ufshcd_core_init);
9512
+module_exit(ufshcd_core_exit);
9513
+
83009514 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
83019515 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
83029516 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
9517
+MODULE_SOFTDEP("pre: governor_simpleondemand");
83039518 MODULE_LICENSE("GPL");
83049519 MODULE_VERSION(UFSHCD_DRIVER_VERSION);