Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Universal Flash Storage Host controller driver Core
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7 *
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
38 */
39
40#include <linux/async.h>
41#include <linux/devfreq.h>
42#include <linux/nls.h>
43#include <linux/of.h>
44#include <linux/bitfield.h>
45#include "ufshcd.h"
46#include "ufs_quirks.h"
47#include "unipro.h"
48#include "ufs-sysfs.h"
49#include "ufs_bsg.h"
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/ufs.h>
53
54#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
55 UTP_TASK_REQ_COMPL |\
56 UFSHCD_ERROR_MASK)
57/* UIC command timeout, unit: ms */
58#define UIC_CMD_TIMEOUT 500
59
60/* NOP OUT retries waiting for NOP IN response */
61#define NOP_OUT_RETRIES 10
62/* Timeout after 30 msecs if NOP OUT hangs without response */
63#define NOP_OUT_TIMEOUT 30 /* msecs */
64
65/* Query request retries */
66#define QUERY_REQ_RETRIES 3
67/* Query request timeout */
68#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
69
70/* Task management command timeout */
71#define TM_CMD_TIMEOUT 100 /* msecs */
72
73/* maximum number of retries for a general UIC command */
74#define UFS_UIC_COMMAND_RETRIES 3
75
76/* maximum number of link-startup retries */
77#define DME_LINKSTARTUP_RETRIES 3
78
79/* Maximum retries for Hibern8 enter */
80#define UIC_HIBERN8_ENTER_RETRIES 3
81
82/* maximum number of reset retries before giving up */
83#define MAX_HOST_RESET_RETRIES 5
84
85/* Expose the flag value from utp_upiu_query.value */
86#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87
88/* Interrupt aggregation default timeout, unit: 40us */
89#define INT_AGGR_DEF_TO 0x02
90
91#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
92 ({ \
93 int _ret; \
94 if (_on) \
95 _ret = ufshcd_enable_vreg(_dev, _vreg); \
96 else \
97 _ret = ufshcd_disable_vreg(_dev, _vreg); \
98 _ret; \
99 })
100
101#define ufshcd_hex_dump(prefix_str, buf, len) do { \
102 size_t __len = (len); \
103 print_hex_dump(KERN_ERR, prefix_str, \
104 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
105 16, 4, buf, __len, false); \
106} while (0)
107
108int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
109 const char *prefix)
110{
111 u32 *regs;
112 size_t pos;
113
114 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
115 return -EINVAL;
116
117 regs = kzalloc(len, GFP_KERNEL);
118 if (!regs)
119 return -ENOMEM;
120
121 for (pos = 0; pos < len; pos += 4)
122 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
123
124 ufshcd_hex_dump(prefix, regs, len);
125 kfree(regs);
126
127 return 0;
128}
129EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
130
131enum {
132 UFSHCD_MAX_CHANNEL = 0,
133 UFSHCD_MAX_ID = 1,
134 UFSHCD_CMD_PER_LUN = 32,
135 UFSHCD_CAN_QUEUE = 32,
136};
137
138/* UFSHCD states */
139enum {
140 UFSHCD_STATE_RESET,
141 UFSHCD_STATE_ERROR,
142 UFSHCD_STATE_OPERATIONAL,
143 UFSHCD_STATE_EH_SCHEDULED,
144};
145
146/* UFSHCD error handling flags */
147enum {
148 UFSHCD_EH_IN_PROGRESS = (1 << 0),
149};
150
151/* UFSHCD UIC layer error flags */
152enum {
153 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
154 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
155 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
156 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
157 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
158 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
159};
160
161#define ufshcd_set_eh_in_progress(h) \
162 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
163#define ufshcd_eh_in_progress(h) \
164 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
165#define ufshcd_clear_eh_in_progress(h) \
166 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
167
168#define ufshcd_set_ufs_dev_active(h) \
169 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
170#define ufshcd_set_ufs_dev_sleep(h) \
171 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
172#define ufshcd_set_ufs_dev_poweroff(h) \
173 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
174#define ufshcd_is_ufs_dev_active(h) \
175 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
176#define ufshcd_is_ufs_dev_sleep(h) \
177 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
178#define ufshcd_is_ufs_dev_poweroff(h) \
179 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
180
181struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
182 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
183 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
184 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
185 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
186 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
187 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
188};
189
190static inline enum ufs_dev_pwr_mode
191ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
192{
193 return ufs_pm_lvl_states[lvl].dev_state;
194}
195
196static inline enum uic_link_state
197ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
198{
199 return ufs_pm_lvl_states[lvl].link_state;
200}
201
202static inline enum ufs_pm_level
203ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
204 enum uic_link_state link_state)
205{
206 enum ufs_pm_level lvl;
207
208 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
209 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
210 (ufs_pm_lvl_states[lvl].link_state == link_state))
211 return lvl;
212 }
213
214 /* if no match found, return the level 0 */
215 return UFS_PM_LVL_0;
216}
217
218static struct ufs_dev_fix ufs_fixups[] = {
219 /* UFS cards deviations table */
220 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
221 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
222 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
223 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
224 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
225 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
226 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
227 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
228 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
229 UFS_DEVICE_QUIRK_PA_TACTIVATE),
230 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
231 UFS_DEVICE_QUIRK_PA_TACTIVATE),
232 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
233 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
234 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
235 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
236
237 END_FIX
238};
239
240static void ufshcd_tmc_handler(struct ufs_hba *hba);
241static void ufshcd_async_scan(void *data, async_cookie_t cookie);
242static int ufshcd_reset_and_restore(struct ufs_hba *hba);
243static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
244static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
245static void ufshcd_hba_exit(struct ufs_hba *hba);
246static int ufshcd_probe_hba(struct ufs_hba *hba);
247static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
248 bool skip_ref_clk);
249static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
250static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
251static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
252static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
253static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
254static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
255static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
256static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
257static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
258static irqreturn_t ufshcd_intr(int irq, void *__hba);
259static int ufshcd_change_power_mode(struct ufs_hba *hba,
260 struct ufs_pa_layer_attr *pwr_mode);
261static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
262{
263 return tag >= 0 && tag < hba->nutrs;
264}
265
266static inline int ufshcd_enable_irq(struct ufs_hba *hba)
267{
268 int ret = 0;
269
270 if (!hba->is_irq_enabled) {
271 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
272 hba);
273 if (ret)
274 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
275 __func__, ret);
276 hba->is_irq_enabled = true;
277 }
278
279 return ret;
280}
281
282static inline void ufshcd_disable_irq(struct ufs_hba *hba)
283{
284 if (hba->is_irq_enabled) {
285 free_irq(hba->irq, hba);
286 hba->is_irq_enabled = false;
287 }
288}
289
290static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
291{
292 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
293 scsi_unblock_requests(hba->host);
294}
295
296static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
297{
298 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
299 scsi_block_requests(hba->host);
300}
301
302/* replace non-printable or non-ASCII characters with spaces */
303static inline void ufshcd_remove_non_printable(char *val)
304{
305 if (!val)
306 return;
307
308 if (*val < 0x20 || *val > 0x7e)
309 *val = ' ';
310}
311
312static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
313 const char *str)
314{
315 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
316
317 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
318}
319
320static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
321 const char *str)
322{
323 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
324
325 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
326}
327
328static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
329 const char *str)
330{
331 int off = (int)tag - hba->nutrs;
332 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
333
334 trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
335 &descp->input_param1);
336}
337
338static void ufshcd_add_command_trace(struct ufs_hba *hba,
339 unsigned int tag, const char *str)
340{
341 sector_t lba = -1;
342 u8 opcode = 0;
343 u32 intr, doorbell;
344 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
345 int transfer_len = -1;
346
347 if (!trace_ufshcd_command_enabled()) {
348 /* trace UPIU W/O tracing command */
349 if (lrbp->cmd)
350 ufshcd_add_cmd_upiu_trace(hba, tag, str);
351 return;
352 }
353
354 if (lrbp->cmd) { /* data phase exists */
355 /* trace UPIU also */
356 ufshcd_add_cmd_upiu_trace(hba, tag, str);
357 opcode = (u8)(*lrbp->cmd->cmnd);
358 if ((opcode == READ_10) || (opcode == WRITE_10)) {
359 /*
360 * Currently we only fully trace read(10) and write(10)
361 * commands
362 */
363 if (lrbp->cmd->request && lrbp->cmd->request->bio)
364 lba =
365 lrbp->cmd->request->bio->bi_iter.bi_sector;
366 transfer_len = be32_to_cpu(
367 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
368 }
369 }
370
371 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
372 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
373 trace_ufshcd_command(dev_name(hba->dev), str, tag,
374 doorbell, transfer_len, intr, lba, opcode);
375}
376
377static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
378{
379 struct ufs_clk_info *clki;
380 struct list_head *head = &hba->clk_list_head;
381
382 if (list_empty(head))
383 return;
384
385 list_for_each_entry(clki, head, list) {
386 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
387 clki->max_freq)
388 dev_err(hba->dev, "clk: %s, rate: %u\n",
389 clki->name, clki->curr_freq);
390 }
391}
392
393static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
394 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
395{
396 int i;
397 bool found = false;
398
399 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
400 int p = (i + err_hist->pos) % UIC_ERR_REG_HIST_LENGTH;
401
402 if (err_hist->reg[p] == 0)
403 continue;
404 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
405 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
406 found = true;
407 }
408
409 if (!found)
410 dev_err(hba->dev, "No record of %s uic errors\n", err_name);
411}
412
413static void ufshcd_print_host_regs(struct ufs_hba *hba)
414{
415 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
416 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
417 hba->ufs_version, hba->capabilities);
418 dev_err(hba->dev,
419 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
420 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
421 dev_err(hba->dev,
422 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
423 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
424 hba->ufs_stats.hibern8_exit_cnt);
425
426 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
427 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
428 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
429 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
430 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
431
432 ufshcd_print_clk_freqs(hba);
433
434 if (hba->vops && hba->vops->dbg_register_dump)
435 hba->vops->dbg_register_dump(hba);
436}
437
438static
439void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
440{
441 struct ufshcd_lrb *lrbp;
442 int prdt_length;
443 int tag;
444
445 for_each_set_bit(tag, &bitmap, hba->nutrs) {
446 lrbp = &hba->lrb[tag];
447
448 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
449 tag, ktime_to_us(lrbp->issue_time_stamp));
450 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
451 tag, ktime_to_us(lrbp->compl_time_stamp));
452 dev_err(hba->dev,
453 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
454 tag, (u64)lrbp->utrd_dma_addr);
455
456 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
457 sizeof(struct utp_transfer_req_desc));
458 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
459 (u64)lrbp->ucd_req_dma_addr);
460 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
461 sizeof(struct utp_upiu_req));
462 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
463 (u64)lrbp->ucd_rsp_dma_addr);
464 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
465 sizeof(struct utp_upiu_rsp));
466
467 prdt_length = le16_to_cpu(
468 lrbp->utr_descriptor_ptr->prd_table_length);
469 dev_err(hba->dev,
470 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
471 tag, prdt_length,
472 (u64)lrbp->ucd_prdt_dma_addr);
473
474 if (pr_prdt)
475 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
476 sizeof(struct ufshcd_sg_entry) * prdt_length);
477 }
478}
479
480static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
481{
482 int tag;
483
484 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
485 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
486
487 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
488 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
489 }
490}
491
492static void ufshcd_print_host_state(struct ufs_hba *hba)
493{
494 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
495 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
496 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
497 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
498 hba->saved_err, hba->saved_uic_err);
499 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
500 hba->curr_dev_pwr_mode, hba->uic_link_state);
501 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
502 hba->pm_op_in_progress, hba->is_sys_suspended);
503 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
504 hba->auto_bkops_enabled, hba->host->host_self_blocked);
505 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
506 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
507 hba->eh_flags, hba->req_abort_count);
508 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
509 hba->capabilities, hba->caps);
510 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
511 hba->dev_quirks);
512}
513
514/**
515 * ufshcd_print_pwr_info - print power params as saved in hba
516 * power info
517 * @hba: per-adapter instance
518 */
519static void ufshcd_print_pwr_info(struct ufs_hba *hba)
520{
521 static const char * const names[] = {
522 "INVALID MODE",
523 "FAST MODE",
524 "SLOW_MODE",
525 "INVALID MODE",
526 "FASTAUTO_MODE",
527 "SLOWAUTO_MODE",
528 "INVALID MODE",
529 };
530
531 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
532 __func__,
533 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
534 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
535 names[hba->pwr_info.pwr_rx],
536 names[hba->pwr_info.pwr_tx],
537 hba->pwr_info.hs_rate);
538}
539
540/*
541 * ufshcd_wait_for_register - wait for register value to change
542 * @hba - per-adapter interface
543 * @reg - mmio register offset
544 * @mask - mask to apply to read register value
545 * @val - wait condition
546 * @interval_us - polling interval in microsecs
547 * @timeout_ms - timeout in millisecs
548 * @can_sleep - perform sleep or just spin
549 *
550 * Returns -ETIMEDOUT on error, zero on success
551 */
552int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
553 u32 val, unsigned long interval_us,
554 unsigned long timeout_ms, bool can_sleep)
555{
556 int err = 0;
557 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
558
559 /* ignore bits that we don't intend to wait on */
560 val = val & mask;
561
562 while ((ufshcd_readl(hba, reg) & mask) != val) {
563 if (can_sleep)
564 usleep_range(interval_us, interval_us + 50);
565 else
566 udelay(interval_us);
567 if (time_after(jiffies, timeout)) {
568 if ((ufshcd_readl(hba, reg) & mask) != val)
569 err = -ETIMEDOUT;
570 break;
571 }
572 }
573
574 return err;
575}
576
577/**
578 * ufshcd_get_intr_mask - Get the interrupt bit mask
579 * @hba: Pointer to adapter instance
580 *
581 * Returns interrupt bit mask per version
582 */
583static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
584{
585 u32 intr_mask = 0;
586
587 switch (hba->ufs_version) {
588 case UFSHCI_VERSION_10:
589 intr_mask = INTERRUPT_MASK_ALL_VER_10;
590 break;
591 case UFSHCI_VERSION_11:
592 case UFSHCI_VERSION_20:
593 intr_mask = INTERRUPT_MASK_ALL_VER_11;
594 break;
595 case UFSHCI_VERSION_21:
596 default:
597 intr_mask = INTERRUPT_MASK_ALL_VER_21;
598 break;
599 }
600
601 return intr_mask;
602}
603
604/**
605 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
606 * @hba: Pointer to adapter instance
607 *
608 * Returns UFSHCI version supported by the controller
609 */
610static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
611{
612 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
613 return ufshcd_vops_get_ufs_hci_version(hba);
614
615 return ufshcd_readl(hba, REG_UFS_VERSION);
616}
617
618/**
619 * ufshcd_is_device_present - Check if any device connected to
620 * the host controller
621 * @hba: pointer to adapter instance
622 *
623 * Returns true if device present, false if no device detected
624 */
625static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
626{
627 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
628 DEVICE_PRESENT) ? true : false;
629}
630
631/**
632 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
633 * @lrbp: pointer to local command reference block
634 *
635 * This function is used to get the OCS field from UTRD
636 * Returns the OCS field in the UTRD
637 */
638static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
639{
640 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
641}
642
643/**
644 * ufshcd_get_tm_free_slot - get a free slot for task management request
645 * @hba: per adapter instance
646 * @free_slot: pointer to variable with available slot value
647 *
648 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
649 * Returns 0 if free slot is not available, else return 1 with tag value
650 * in @free_slot.
651 */
652static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
653{
654 int tag;
655 bool ret = false;
656
657 if (!free_slot)
658 goto out;
659
660 do {
661 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
662 if (tag >= hba->nutmrs)
663 goto out;
664 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
665
666 *free_slot = tag;
667 ret = true;
668out:
669 return ret;
670}
671
672static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
673{
674 clear_bit_unlock(slot, &hba->tm_slots_in_use);
675}
676
677/**
678 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
679 * @hba: per adapter instance
680 * @pos: position of the bit to be cleared
681 */
682static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
683{
684 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
685 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
686 else
687 ufshcd_writel(hba, ~(1 << pos),
688 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
689}
690
691/**
692 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
693 * @hba: per adapter instance
694 * @pos: position of the bit to be cleared
695 */
696static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
697{
698 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
699 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
700 else
701 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
702}
703
704/**
705 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
706 * @hba: per adapter instance
707 * @tag: position of the bit to be cleared
708 */
709static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
710{
711 __clear_bit(tag, &hba->outstanding_reqs);
712}
713
714/**
715 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
716 * @reg: Register value of host controller status
717 *
718 * Returns integer, 0 on Success and positive value if failed
719 */
720static inline int ufshcd_get_lists_status(u32 reg)
721{
722 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
723}
724
725/**
726 * ufshcd_get_uic_cmd_result - Get the UIC command result
727 * @hba: Pointer to adapter instance
728 *
729 * This function gets the result of UIC command completion
730 * Returns 0 on success, non zero value on error
731 */
732static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
733{
734 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
735 MASK_UIC_COMMAND_RESULT;
736}
737
738/**
739 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
740 * @hba: Pointer to adapter instance
741 *
742 * This function gets UIC command argument3
743 * Returns 0 on success, non zero value on error
744 */
745static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
746{
747 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
748}
749
750/**
751 * ufshcd_get_req_rsp - returns the TR response transaction type
752 * @ucd_rsp_ptr: pointer to response UPIU
753 */
754static inline int
755ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
756{
757 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
758}
759
760/**
761 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
762 * @ucd_rsp_ptr: pointer to response UPIU
763 *
764 * This function gets the response status and scsi_status from response UPIU
765 * Returns the response result code.
766 */
767static inline int
768ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
769{
770 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
771}
772
773/*
774 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
775 * from response UPIU
776 * @ucd_rsp_ptr: pointer to response UPIU
777 *
778 * Return the data segment length.
779 */
780static inline unsigned int
781ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
782{
783 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
784 MASK_RSP_UPIU_DATA_SEG_LEN;
785}
786
787/**
788 * ufshcd_is_exception_event - Check if the device raised an exception event
789 * @ucd_rsp_ptr: pointer to response UPIU
790 *
791 * The function checks if the device raised an exception event indicated in
792 * the Device Information field of response UPIU.
793 *
794 * Returns true if exception is raised, false otherwise.
795 */
796static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
797{
798 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
799 MASK_RSP_EXCEPTION_EVENT ? true : false;
800}
801
802/**
803 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
804 * @hba: per adapter instance
805 */
806static inline void
807ufshcd_reset_intr_aggr(struct ufs_hba *hba)
808{
809 ufshcd_writel(hba, INT_AGGR_ENABLE |
810 INT_AGGR_COUNTER_AND_TIMER_RESET,
811 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
812}
813
814/**
815 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
816 * @hba: per adapter instance
817 * @cnt: Interrupt aggregation counter threshold
818 * @tmout: Interrupt aggregation timeout value
819 */
820static inline void
821ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
822{
823 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
824 INT_AGGR_COUNTER_THLD_VAL(cnt) |
825 INT_AGGR_TIMEOUT_VAL(tmout),
826 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
827}
828
829/**
830 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
831 * @hba: per adapter instance
832 */
833static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
834{
835 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
836}
837
838/**
839 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
840 * When run-stop registers are set to 1, it indicates the
841 * host controller that it can process the requests
842 * @hba: per adapter instance
843 */
844static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
845{
846 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
847 REG_UTP_TASK_REQ_LIST_RUN_STOP);
848 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
849 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
850}
851
852/**
853 * ufshcd_hba_start - Start controller initialization sequence
854 * @hba: per adapter instance
855 */
856static inline void ufshcd_hba_start(struct ufs_hba *hba)
857{
858 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
859}
860
861/**
862 * ufshcd_is_hba_active - Get controller state
863 * @hba: per adapter instance
864 *
865 * Returns false if controller is active, true otherwise
866 */
867static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
868{
869 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
870 ? false : true;
871}
872
873u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
874{
875 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
876 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
877 (hba->ufs_version == UFSHCI_VERSION_11))
878 return UFS_UNIPRO_VER_1_41;
879 else
880 return UFS_UNIPRO_VER_1_6;
881}
882EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
883
884static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
885{
886 /*
887 * If both host and device support UniPro ver1.6 or later, PA layer
888 * parameters tuning happens during link startup itself.
889 *
890 * We can manually tune PA layer parameters if either host or device
891 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
892 * logic simple, we will only do manual tuning if local unipro version
893 * doesn't support ver1.6 or later.
894 */
895 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
896 return true;
897 else
898 return false;
899}
900
901static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
902{
903 int ret = 0;
904 struct ufs_clk_info *clki;
905 struct list_head *head = &hba->clk_list_head;
906 ktime_t start = ktime_get();
907 bool clk_state_changed = false;
908
909 if (list_empty(head))
910 goto out;
911
912 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
913 if (ret)
914 return ret;
915
916 list_for_each_entry(clki, head, list) {
917 if (!IS_ERR_OR_NULL(clki->clk)) {
918 if (scale_up && clki->max_freq) {
919 if (clki->curr_freq == clki->max_freq)
920 continue;
921
922 clk_state_changed = true;
923 ret = clk_set_rate(clki->clk, clki->max_freq);
924 if (ret) {
925 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
926 __func__, clki->name,
927 clki->max_freq, ret);
928 break;
929 }
930 trace_ufshcd_clk_scaling(dev_name(hba->dev),
931 "scaled up", clki->name,
932 clki->curr_freq,
933 clki->max_freq);
934
935 clki->curr_freq = clki->max_freq;
936
937 } else if (!scale_up && clki->min_freq) {
938 if (clki->curr_freq == clki->min_freq)
939 continue;
940
941 clk_state_changed = true;
942 ret = clk_set_rate(clki->clk, clki->min_freq);
943 if (ret) {
944 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
945 __func__, clki->name,
946 clki->min_freq, ret);
947 break;
948 }
949 trace_ufshcd_clk_scaling(dev_name(hba->dev),
950 "scaled down", clki->name,
951 clki->curr_freq,
952 clki->min_freq);
953 clki->curr_freq = clki->min_freq;
954 }
955 }
956 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
957 clki->name, clk_get_rate(clki->clk));
958 }
959
960 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
961
962out:
963 if (clk_state_changed)
964 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
965 (scale_up ? "up" : "down"),
966 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
967 return ret;
968}
969
970/**
971 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
972 * @hba: per adapter instance
973 * @scale_up: True if scaling up and false if scaling down
974 *
975 * Returns true if scaling is required, false otherwise.
976 */
977static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
978 bool scale_up)
979{
980 struct ufs_clk_info *clki;
981 struct list_head *head = &hba->clk_list_head;
982
983 if (list_empty(head))
984 return false;
985
986 list_for_each_entry(clki, head, list) {
987 if (!IS_ERR_OR_NULL(clki->clk)) {
988 if (scale_up && clki->max_freq) {
989 if (clki->curr_freq == clki->max_freq)
990 continue;
991 return true;
992 } else if (!scale_up && clki->min_freq) {
993 if (clki->curr_freq == clki->min_freq)
994 continue;
995 return true;
996 }
997 }
998 }
999
1000 return false;
1001}
1002
1003static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1004 u64 wait_timeout_us)
1005{
1006 unsigned long flags;
1007 int ret = 0;
1008 u32 tm_doorbell;
1009 u32 tr_doorbell;
1010 bool timeout = false, do_last_check = false;
1011 ktime_t start;
1012
1013 ufshcd_hold(hba, false);
1014 spin_lock_irqsave(hba->host->host_lock, flags);
1015 /*
1016 * Wait for all the outstanding tasks/transfer requests.
1017 * Verify by checking the doorbell registers are clear.
1018 */
1019 start = ktime_get();
1020 do {
1021 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1022 ret = -EBUSY;
1023 goto out;
1024 }
1025
1026 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1027 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1028 if (!tm_doorbell && !tr_doorbell) {
1029 timeout = false;
1030 break;
1031 } else if (do_last_check) {
1032 break;
1033 }
1034
1035 spin_unlock_irqrestore(hba->host->host_lock, flags);
1036 schedule();
1037 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1038 wait_timeout_us) {
1039 timeout = true;
1040 /*
1041 * We might have scheduled out for long time so make
1042 * sure to check if doorbells are cleared by this time
1043 * or not.
1044 */
1045 do_last_check = true;
1046 }
1047 spin_lock_irqsave(hba->host->host_lock, flags);
1048 } while (tm_doorbell || tr_doorbell);
1049
1050 if (timeout) {
1051 dev_err(hba->dev,
1052 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1053 __func__, tm_doorbell, tr_doorbell);
1054 ret = -EBUSY;
1055 }
1056out:
1057 spin_unlock_irqrestore(hba->host->host_lock, flags);
1058 ufshcd_release(hba);
1059 return ret;
1060}
1061
1062/**
1063 * ufshcd_scale_gear - scale up/down UFS gear
1064 * @hba: per adapter instance
1065 * @scale_up: True for scaling up gear and false for scaling down
1066 *
1067 * Returns 0 for success,
1068 * Returns -EBUSY if scaling can't happen at this time
1069 * Returns non-zero for any other errors
1070 */
1071static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1072{
1073 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1074 int ret = 0;
1075 struct ufs_pa_layer_attr new_pwr_info;
1076
1077 if (scale_up) {
1078 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1079 sizeof(struct ufs_pa_layer_attr));
1080 } else {
1081 memcpy(&new_pwr_info, &hba->pwr_info,
1082 sizeof(struct ufs_pa_layer_attr));
1083
1084 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1085 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1086 /* save the current power mode */
1087 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1088 &hba->pwr_info,
1089 sizeof(struct ufs_pa_layer_attr));
1090
1091 /* scale down gear */
1092 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1093 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1094 }
1095 }
1096
1097 /* check if the power mode needs to be changed or not? */
1098 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1099
1100 if (ret)
1101 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1102 __func__, ret,
1103 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1104 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1105
1106 return ret;
1107}
1108
1109static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1110{
1111 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1112 int ret = 0;
1113 /*
1114 * make sure that there are no outstanding requests when
1115 * clock scaling is in progress
1116 */
1117 ufshcd_scsi_block_requests(hba);
1118 down_write(&hba->clk_scaling_lock);
1119 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1120 ret = -EBUSY;
1121 up_write(&hba->clk_scaling_lock);
1122 ufshcd_scsi_unblock_requests(hba);
1123 }
1124
1125 return ret;
1126}
1127
1128static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1129{
1130 up_write(&hba->clk_scaling_lock);
1131 ufshcd_scsi_unblock_requests(hba);
1132}
1133
1134/**
1135 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1136 * @hba: per adapter instance
1137 * @scale_up: True for scaling up and false for scalin down
1138 *
1139 * Returns 0 for success,
1140 * Returns -EBUSY if scaling can't happen at this time
1141 * Returns non-zero for any other errors
1142 */
1143static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1144{
1145 int ret = 0;
1146
1147 /* let's not get into low power until clock scaling is completed */
1148 ufshcd_hold(hba, false);
1149
1150 ret = ufshcd_clock_scaling_prepare(hba);
1151 if (ret)
1152 return ret;
1153
1154 /* scale down the gear before scaling down clocks */
1155 if (!scale_up) {
1156 ret = ufshcd_scale_gear(hba, false);
1157 if (ret)
1158 goto out;
1159 }
1160
1161 ret = ufshcd_scale_clks(hba, scale_up);
1162 if (ret) {
1163 if (!scale_up)
1164 ufshcd_scale_gear(hba, true);
1165 goto out;
1166 }
1167
1168 /* scale up the gear after scaling up clocks */
1169 if (scale_up) {
1170 ret = ufshcd_scale_gear(hba, true);
1171 if (ret) {
1172 ufshcd_scale_clks(hba, false);
1173 goto out;
1174 }
1175 }
1176
1177 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1178
1179out:
1180 ufshcd_clock_scaling_unprepare(hba);
1181 ufshcd_release(hba);
1182 return ret;
1183}
1184
1185static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1186{
1187 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1188 clk_scaling.suspend_work);
1189 unsigned long irq_flags;
1190
1191 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1192 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1193 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1194 return;
1195 }
1196 hba->clk_scaling.is_suspended = true;
1197 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1198
1199 __ufshcd_suspend_clkscaling(hba);
1200}
1201
1202static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1203{
1204 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1205 clk_scaling.resume_work);
1206 unsigned long irq_flags;
1207
1208 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1209 if (!hba->clk_scaling.is_suspended) {
1210 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1211 return;
1212 }
1213 hba->clk_scaling.is_suspended = false;
1214 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1215
1216 devfreq_resume_device(hba->devfreq);
1217}
1218
1219static int ufshcd_devfreq_target(struct device *dev,
1220 unsigned long *freq, u32 flags)
1221{
1222 int ret = 0;
1223 struct ufs_hba *hba = dev_get_drvdata(dev);
1224 ktime_t start;
1225 bool scale_up, sched_clk_scaling_suspend_work = false;
1226 struct list_head *clk_list = &hba->clk_list_head;
1227 struct ufs_clk_info *clki;
1228 unsigned long irq_flags;
1229
1230 if (!ufshcd_is_clkscaling_supported(hba))
1231 return -EINVAL;
1232
1233 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1234 if (ufshcd_eh_in_progress(hba)) {
1235 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1236 return 0;
1237 }
1238
1239 if (!hba->clk_scaling.active_reqs)
1240 sched_clk_scaling_suspend_work = true;
1241
1242 if (list_empty(clk_list)) {
1243 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1244 goto out;
1245 }
1246
1247 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1248 scale_up = (*freq == clki->max_freq) ? true : false;
1249 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1250 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1251 ret = 0;
1252 goto out; /* no state change required */
1253 }
1254 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1255
1256 start = ktime_get();
1257 ret = ufshcd_devfreq_scale(hba, scale_up);
1258
1259 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1260 (scale_up ? "up" : "down"),
1261 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1262
1263out:
1264 if (sched_clk_scaling_suspend_work)
1265 queue_work(hba->clk_scaling.workq,
1266 &hba->clk_scaling.suspend_work);
1267
1268 return ret;
1269}
1270
1271
1272static int ufshcd_devfreq_get_dev_status(struct device *dev,
1273 struct devfreq_dev_status *stat)
1274{
1275 struct ufs_hba *hba = dev_get_drvdata(dev);
1276 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1277 unsigned long flags;
1278
1279 if (!ufshcd_is_clkscaling_supported(hba))
1280 return -EINVAL;
1281
1282 memset(stat, 0, sizeof(*stat));
1283
1284 spin_lock_irqsave(hba->host->host_lock, flags);
1285 if (!scaling->window_start_t)
1286 goto start_window;
1287
1288 if (scaling->is_busy_started)
1289 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1290 scaling->busy_start_t));
1291
1292 stat->total_time = jiffies_to_usecs((long)jiffies -
1293 (long)scaling->window_start_t);
1294 stat->busy_time = scaling->tot_busy_t;
1295start_window:
1296 scaling->window_start_t = jiffies;
1297 scaling->tot_busy_t = 0;
1298
1299 if (hba->outstanding_reqs) {
1300 scaling->busy_start_t = ktime_get();
1301 scaling->is_busy_started = true;
1302 } else {
1303 scaling->busy_start_t = 0;
1304 scaling->is_busy_started = false;
1305 }
1306 spin_unlock_irqrestore(hba->host->host_lock, flags);
1307 return 0;
1308}
1309
1310static struct devfreq_dev_profile ufs_devfreq_profile = {
1311 .polling_ms = 100,
1312 .target = ufshcd_devfreq_target,
1313 .get_dev_status = ufshcd_devfreq_get_dev_status,
1314};
1315
1316static int ufshcd_devfreq_init(struct ufs_hba *hba)
1317{
1318 struct list_head *clk_list = &hba->clk_list_head;
1319 struct ufs_clk_info *clki;
1320 struct devfreq *devfreq;
1321 int ret;
1322
1323 /* Skip devfreq if we don't have any clocks in the list */
1324 if (list_empty(clk_list))
1325 return 0;
1326
1327 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1328 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1329 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1330
1331 devfreq = devfreq_add_device(hba->dev,
1332 &ufs_devfreq_profile,
1333 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1334 NULL);
1335 if (IS_ERR(devfreq)) {
1336 ret = PTR_ERR(devfreq);
1337 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1338
1339 dev_pm_opp_remove(hba->dev, clki->min_freq);
1340 dev_pm_opp_remove(hba->dev, clki->max_freq);
1341 return ret;
1342 }
1343
1344 hba->devfreq = devfreq;
1345
1346 return 0;
1347}
1348
1349static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1350{
1351 struct list_head *clk_list = &hba->clk_list_head;
1352 struct ufs_clk_info *clki;
1353
1354 if (!hba->devfreq)
1355 return;
1356
1357 devfreq_remove_device(hba->devfreq);
1358 hba->devfreq = NULL;
1359
1360 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1361 dev_pm_opp_remove(hba->dev, clki->min_freq);
1362 dev_pm_opp_remove(hba->dev, clki->max_freq);
1363}
1364
1365static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1366{
1367 unsigned long flags;
1368
1369 devfreq_suspend_device(hba->devfreq);
1370 spin_lock_irqsave(hba->host->host_lock, flags);
1371 hba->clk_scaling.window_start_t = 0;
1372 spin_unlock_irqrestore(hba->host->host_lock, flags);
1373}
1374
1375static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1376{
1377 unsigned long flags;
1378 bool suspend = false;
1379
1380 if (!ufshcd_is_clkscaling_supported(hba))
1381 return;
1382
1383 spin_lock_irqsave(hba->host->host_lock, flags);
1384 if (!hba->clk_scaling.is_suspended) {
1385 suspend = true;
1386 hba->clk_scaling.is_suspended = true;
1387 }
1388 spin_unlock_irqrestore(hba->host->host_lock, flags);
1389
1390 if (suspend)
1391 __ufshcd_suspend_clkscaling(hba);
1392}
1393
1394static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1395{
1396 unsigned long flags;
1397 bool resume = false;
1398
1399 if (!ufshcd_is_clkscaling_supported(hba))
1400 return;
1401
1402 spin_lock_irqsave(hba->host->host_lock, flags);
1403 if (hba->clk_scaling.is_suspended) {
1404 resume = true;
1405 hba->clk_scaling.is_suspended = false;
1406 }
1407 spin_unlock_irqrestore(hba->host->host_lock, flags);
1408
1409 if (resume)
1410 devfreq_resume_device(hba->devfreq);
1411}
1412
1413static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1414 struct device_attribute *attr, char *buf)
1415{
1416 struct ufs_hba *hba = dev_get_drvdata(dev);
1417
1418 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1419}
1420
1421static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1422 struct device_attribute *attr, const char *buf, size_t count)
1423{
1424 struct ufs_hba *hba = dev_get_drvdata(dev);
1425 u32 value;
1426 int err;
1427
1428 if (kstrtou32(buf, 0, &value))
1429 return -EINVAL;
1430
1431 value = !!value;
1432 if (value == hba->clk_scaling.is_allowed)
1433 goto out;
1434
1435 pm_runtime_get_sync(hba->dev);
1436 ufshcd_hold(hba, false);
1437
1438 cancel_work_sync(&hba->clk_scaling.suspend_work);
1439 cancel_work_sync(&hba->clk_scaling.resume_work);
1440
1441 hba->clk_scaling.is_allowed = value;
1442
1443 if (value) {
1444 ufshcd_resume_clkscaling(hba);
1445 } else {
1446 ufshcd_suspend_clkscaling(hba);
1447 err = ufshcd_devfreq_scale(hba, true);
1448 if (err)
1449 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1450 __func__, err);
1451 }
1452
1453 ufshcd_release(hba);
1454 pm_runtime_put_sync(hba->dev);
1455out:
1456 return count;
1457}
1458
1459static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1460{
1461 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1462 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1463 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1464 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1465 hba->clk_scaling.enable_attr.attr.mode = 0644;
1466 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1467 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1468}
1469
1470static void ufshcd_ungate_work(struct work_struct *work)
1471{
1472 int ret;
1473 unsigned long flags;
1474 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1475 clk_gating.ungate_work);
1476
1477 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1478
1479 spin_lock_irqsave(hba->host->host_lock, flags);
1480 if (hba->clk_gating.state == CLKS_ON) {
1481 spin_unlock_irqrestore(hba->host->host_lock, flags);
1482 goto unblock_reqs;
1483 }
1484
1485 spin_unlock_irqrestore(hba->host->host_lock, flags);
1486 ufshcd_setup_clocks(hba, true);
1487
1488 /* Exit from hibern8 */
1489 if (ufshcd_can_hibern8_during_gating(hba)) {
1490 /* Prevent gating in this path */
1491 hba->clk_gating.is_suspended = true;
1492 if (ufshcd_is_link_hibern8(hba)) {
1493 ret = ufshcd_uic_hibern8_exit(hba);
1494 if (ret)
1495 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1496 __func__, ret);
1497 else
1498 ufshcd_set_link_active(hba);
1499 }
1500 hba->clk_gating.is_suspended = false;
1501 }
1502unblock_reqs:
1503 ufshcd_scsi_unblock_requests(hba);
1504}
1505
1506/**
1507 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1508 * Also, exit from hibern8 mode and set the link as active.
1509 * @hba: per adapter instance
1510 * @async: This indicates whether caller should ungate clocks asynchronously.
1511 */
1512int ufshcd_hold(struct ufs_hba *hba, bool async)
1513{
1514 int rc = 0;
1515 unsigned long flags;
1516
1517 if (!ufshcd_is_clkgating_allowed(hba))
1518 goto out;
1519 spin_lock_irqsave(hba->host->host_lock, flags);
1520 hba->clk_gating.active_reqs++;
1521
1522 if (ufshcd_eh_in_progress(hba)) {
1523 spin_unlock_irqrestore(hba->host->host_lock, flags);
1524 return 0;
1525 }
1526
1527start:
1528 switch (hba->clk_gating.state) {
1529 case CLKS_ON:
1530 /*
1531 * Wait for the ungate work to complete if in progress.
1532 * Though the clocks may be in ON state, the link could
1533 * still be in hibner8 state if hibern8 is allowed
1534 * during clock gating.
1535 * Make sure we exit hibern8 state also in addition to
1536 * clocks being ON.
1537 */
1538 if (ufshcd_can_hibern8_during_gating(hba) &&
1539 ufshcd_is_link_hibern8(hba)) {
1540 spin_unlock_irqrestore(hba->host->host_lock, flags);
1541 flush_work(&hba->clk_gating.ungate_work);
1542 spin_lock_irqsave(hba->host->host_lock, flags);
1543 goto start;
1544 }
1545 break;
1546 case REQ_CLKS_OFF:
1547 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1548 hba->clk_gating.state = CLKS_ON;
1549 trace_ufshcd_clk_gating(dev_name(hba->dev),
1550 hba->clk_gating.state);
1551 break;
1552 }
1553 /*
1554 * If we are here, it means gating work is either done or
1555 * currently running. Hence, fall through to cancel gating
1556 * work and to enable clocks.
1557 */
1558 /* fallthrough */
1559 case CLKS_OFF:
1560 ufshcd_scsi_block_requests(hba);
1561 hba->clk_gating.state = REQ_CLKS_ON;
1562 trace_ufshcd_clk_gating(dev_name(hba->dev),
1563 hba->clk_gating.state);
1564 queue_work(hba->clk_gating.clk_gating_workq,
1565 &hba->clk_gating.ungate_work);
1566 /*
1567 * fall through to check if we should wait for this
1568 * work to be done or not.
1569 */
1570 /* fallthrough */
1571 case REQ_CLKS_ON:
1572 if (async) {
1573 rc = -EAGAIN;
1574 hba->clk_gating.active_reqs--;
1575 break;
1576 }
1577
1578 spin_unlock_irqrestore(hba->host->host_lock, flags);
1579 flush_work(&hba->clk_gating.ungate_work);
1580 /* Make sure state is CLKS_ON before returning */
1581 spin_lock_irqsave(hba->host->host_lock, flags);
1582 goto start;
1583 default:
1584 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1585 __func__, hba->clk_gating.state);
1586 break;
1587 }
1588 spin_unlock_irqrestore(hba->host->host_lock, flags);
1589out:
1590 return rc;
1591}
1592EXPORT_SYMBOL_GPL(ufshcd_hold);
1593
1594static void ufshcd_gate_work(struct work_struct *work)
1595{
1596 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1597 clk_gating.gate_work.work);
1598 unsigned long flags;
1599
1600 spin_lock_irqsave(hba->host->host_lock, flags);
1601 /*
1602 * In case you are here to cancel this work the gating state
1603 * would be marked as REQ_CLKS_ON. In this case save time by
1604 * skipping the gating work and exit after changing the clock
1605 * state to CLKS_ON.
1606 */
1607 if (hba->clk_gating.is_suspended ||
1608 (hba->clk_gating.state == REQ_CLKS_ON)) {
1609 hba->clk_gating.state = CLKS_ON;
1610 trace_ufshcd_clk_gating(dev_name(hba->dev),
1611 hba->clk_gating.state);
1612 goto rel_lock;
1613 }
1614
1615 if (hba->clk_gating.active_reqs
1616 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1617 || hba->lrb_in_use || hba->outstanding_tasks
1618 || hba->active_uic_cmd || hba->uic_async_done)
1619 goto rel_lock;
1620
1621 spin_unlock_irqrestore(hba->host->host_lock, flags);
1622
1623 /* put the link into hibern8 mode before turning off clocks */
1624 if (ufshcd_can_hibern8_during_gating(hba)) {
1625 if (ufshcd_uic_hibern8_enter(hba)) {
1626 hba->clk_gating.state = CLKS_ON;
1627 trace_ufshcd_clk_gating(dev_name(hba->dev),
1628 hba->clk_gating.state);
1629 goto out;
1630 }
1631 ufshcd_set_link_hibern8(hba);
1632 }
1633
1634 if (!ufshcd_is_link_active(hba))
1635 ufshcd_setup_clocks(hba, false);
1636 else
1637 /* If link is active, device ref_clk can't be switched off */
1638 __ufshcd_setup_clocks(hba, false, true);
1639
1640 /*
1641 * In case you are here to cancel this work the gating state
1642 * would be marked as REQ_CLKS_ON. In this case keep the state
1643 * as REQ_CLKS_ON which would anyway imply that clocks are off
1644 * and a request to turn them on is pending. By doing this way,
1645 * we keep the state machine in tact and this would ultimately
1646 * prevent from doing cancel work multiple times when there are
1647 * new requests arriving before the current cancel work is done.
1648 */
1649 spin_lock_irqsave(hba->host->host_lock, flags);
1650 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1651 hba->clk_gating.state = CLKS_OFF;
1652 trace_ufshcd_clk_gating(dev_name(hba->dev),
1653 hba->clk_gating.state);
1654 }
1655rel_lock:
1656 spin_unlock_irqrestore(hba->host->host_lock, flags);
1657out:
1658 return;
1659}
1660
1661/* host lock must be held before calling this variant */
1662static void __ufshcd_release(struct ufs_hba *hba)
1663{
1664 if (!ufshcd_is_clkgating_allowed(hba))
1665 return;
1666
1667 hba->clk_gating.active_reqs--;
1668
1669 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1670 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1671 || hba->lrb_in_use || hba->outstanding_tasks
1672 || hba->active_uic_cmd || hba->uic_async_done
1673 || ufshcd_eh_in_progress(hba))
1674 return;
1675
1676 hba->clk_gating.state = REQ_CLKS_OFF;
1677 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1678 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1679 &hba->clk_gating.gate_work,
1680 msecs_to_jiffies(hba->clk_gating.delay_ms));
1681}
1682
1683void ufshcd_release(struct ufs_hba *hba)
1684{
1685 unsigned long flags;
1686
1687 spin_lock_irqsave(hba->host->host_lock, flags);
1688 __ufshcd_release(hba);
1689 spin_unlock_irqrestore(hba->host->host_lock, flags);
1690}
1691EXPORT_SYMBOL_GPL(ufshcd_release);
1692
1693static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1694 struct device_attribute *attr, char *buf)
1695{
1696 struct ufs_hba *hba = dev_get_drvdata(dev);
1697
1698 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1699}
1700
1701static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1702 struct device_attribute *attr, const char *buf, size_t count)
1703{
1704 struct ufs_hba *hba = dev_get_drvdata(dev);
1705 unsigned long flags, value;
1706
1707 if (kstrtoul(buf, 0, &value))
1708 return -EINVAL;
1709
1710 spin_lock_irqsave(hba->host->host_lock, flags);
1711 hba->clk_gating.delay_ms = value;
1712 spin_unlock_irqrestore(hba->host->host_lock, flags);
1713 return count;
1714}
1715
1716static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1717 struct device_attribute *attr, char *buf)
1718{
1719 struct ufs_hba *hba = dev_get_drvdata(dev);
1720
1721 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1722}
1723
1724static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1725 struct device_attribute *attr, const char *buf, size_t count)
1726{
1727 struct ufs_hba *hba = dev_get_drvdata(dev);
1728 unsigned long flags;
1729 u32 value;
1730
1731 if (kstrtou32(buf, 0, &value))
1732 return -EINVAL;
1733
1734 value = !!value;
1735 if (value == hba->clk_gating.is_enabled)
1736 goto out;
1737
1738 if (value) {
1739 ufshcd_release(hba);
1740 } else {
1741 spin_lock_irqsave(hba->host->host_lock, flags);
1742 hba->clk_gating.active_reqs++;
1743 spin_unlock_irqrestore(hba->host->host_lock, flags);
1744 }
1745
1746 hba->clk_gating.is_enabled = value;
1747out:
1748 return count;
1749}
1750
1751static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1752{
1753 char wq_name[sizeof("ufs_clkscaling_00")];
1754
1755 if (!ufshcd_is_clkscaling_supported(hba))
1756 return;
1757
1758 INIT_WORK(&hba->clk_scaling.suspend_work,
1759 ufshcd_clk_scaling_suspend_work);
1760 INIT_WORK(&hba->clk_scaling.resume_work,
1761 ufshcd_clk_scaling_resume_work);
1762
1763 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1764 hba->host->host_no);
1765 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1766
1767 ufshcd_clkscaling_init_sysfs(hba);
1768}
1769
1770static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1771{
1772 if (!ufshcd_is_clkscaling_supported(hba))
1773 return;
1774
1775 destroy_workqueue(hba->clk_scaling.workq);
1776 ufshcd_devfreq_remove(hba);
1777}
1778
1779static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1780{
1781 char wq_name[sizeof("ufs_clk_gating_00")];
1782
1783 if (!ufshcd_is_clkgating_allowed(hba))
1784 return;
1785
1786 hba->clk_gating.delay_ms = 150;
1787 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1788 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1789
1790 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1791 hba->host->host_no);
1792 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1793 WQ_MEM_RECLAIM);
1794
1795 hba->clk_gating.is_enabled = true;
1796
1797 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1798 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1799 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1800 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1801 hba->clk_gating.delay_attr.attr.mode = 0644;
1802 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1803 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1804
1805 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1806 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1807 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1808 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1809 hba->clk_gating.enable_attr.attr.mode = 0644;
1810 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1811 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1812}
1813
1814static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1815{
1816 if (!ufshcd_is_clkgating_allowed(hba))
1817 return;
1818 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1819 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1820 cancel_work_sync(&hba->clk_gating.ungate_work);
1821 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1822 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1823}
1824
1825/* Must be called with host lock acquired */
1826static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1827{
1828 bool queue_resume_work = false;
1829
1830 if (!ufshcd_is_clkscaling_supported(hba))
1831 return;
1832
1833 if (!hba->clk_scaling.active_reqs++)
1834 queue_resume_work = true;
1835
1836 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1837 return;
1838
1839 if (queue_resume_work)
1840 queue_work(hba->clk_scaling.workq,
1841 &hba->clk_scaling.resume_work);
1842
1843 if (!hba->clk_scaling.window_start_t) {
1844 hba->clk_scaling.window_start_t = jiffies;
1845 hba->clk_scaling.tot_busy_t = 0;
1846 hba->clk_scaling.is_busy_started = false;
1847 }
1848
1849 if (!hba->clk_scaling.is_busy_started) {
1850 hba->clk_scaling.busy_start_t = ktime_get();
1851 hba->clk_scaling.is_busy_started = true;
1852 }
1853}
1854
1855static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1856{
1857 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1858
1859 if (!ufshcd_is_clkscaling_supported(hba))
1860 return;
1861
1862 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1863 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1864 scaling->busy_start_t));
1865 scaling->busy_start_t = 0;
1866 scaling->is_busy_started = false;
1867 }
1868}
1869/**
1870 * ufshcd_send_command - Send SCSI or device management commands
1871 * @hba: per adapter instance
1872 * @task_tag: Task tag of the command
1873 */
1874static inline
1875void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1876{
1877 hba->lrb[task_tag].issue_time_stamp = ktime_get();
1878 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1879 ufshcd_clk_scaling_start_busy(hba);
1880 __set_bit(task_tag, &hba->outstanding_reqs);
1881 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1882 /* Make sure that doorbell is committed immediately */
1883 wmb();
1884 ufshcd_add_command_trace(hba, task_tag, "send");
1885}
1886
1887/**
1888 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1889 * @lrbp: pointer to local reference block
1890 */
1891static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1892{
1893 int len;
1894 if (lrbp->sense_buffer &&
1895 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1896 int len_to_copy;
1897
1898 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1899 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
1900
1901 memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
1902 len_to_copy);
1903 }
1904}
1905
1906/**
1907 * ufshcd_copy_query_response() - Copy the Query Response and the data
1908 * descriptor
1909 * @hba: per adapter instance
1910 * @lrbp: pointer to local reference block
1911 */
1912static
1913int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1914{
1915 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1916
1917 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1918
1919 /* Get the descriptor */
1920 if (hba->dev_cmd.query.descriptor &&
1921 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1922 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1923 GENERAL_UPIU_REQUEST_SIZE;
1924 u16 resp_len;
1925 u16 buf_len;
1926
1927 /* data segment length */
1928 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1929 MASK_QUERY_DATA_SEG_LEN;
1930 buf_len = be16_to_cpu(
1931 hba->dev_cmd.query.request.upiu_req.length);
1932 if (likely(buf_len >= resp_len)) {
1933 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1934 } else {
1935 dev_warn(hba->dev,
1936 "%s: Response size is bigger than buffer",
1937 __func__);
1938 return -EINVAL;
1939 }
1940 }
1941
1942 return 0;
1943}
1944
1945/**
1946 * ufshcd_hba_capabilities - Read controller capabilities
1947 * @hba: per adapter instance
1948 */
1949static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1950{
1951 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1952
1953 /* nutrs and nutmrs are 0 based values */
1954 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1955 hba->nutmrs =
1956 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1957}
1958
1959/**
1960 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1961 * to accept UIC commands
1962 * @hba: per adapter instance
1963 * Return true on success, else false
1964 */
1965static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1966{
1967 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1968 return true;
1969 else
1970 return false;
1971}
1972
1973/**
1974 * ufshcd_get_upmcrs - Get the power mode change request status
1975 * @hba: Pointer to adapter instance
1976 *
1977 * This function gets the UPMCRS field of HCS register
1978 * Returns value of UPMCRS field
1979 */
1980static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1981{
1982 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1983}
1984
1985/**
1986 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1987 * @hba: per adapter instance
1988 * @uic_cmd: UIC command
1989 *
1990 * Mutex must be held.
1991 */
1992static inline void
1993ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1994{
1995 WARN_ON(hba->active_uic_cmd);
1996
1997 hba->active_uic_cmd = uic_cmd;
1998
1999 /* Write Args */
2000 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2001 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2002 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2003
2004 /* Write UIC Cmd */
2005 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2006 REG_UIC_COMMAND);
2007}
2008
2009/**
2010 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2011 * @hba: per adapter instance
2012 * @uic_cmd: UIC command
2013 *
2014 * Must be called with mutex held.
2015 * Returns 0 only if success.
2016 */
2017static int
2018ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2019{
2020 int ret;
2021 unsigned long flags;
2022
2023 if (wait_for_completion_timeout(&uic_cmd->done,
2024 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2025 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2026 else
2027 ret = -ETIMEDOUT;
2028
2029 spin_lock_irqsave(hba->host->host_lock, flags);
2030 hba->active_uic_cmd = NULL;
2031 spin_unlock_irqrestore(hba->host->host_lock, flags);
2032
2033 return ret;
2034}
2035
2036/**
2037 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2038 * @hba: per adapter instance
2039 * @uic_cmd: UIC command
2040 * @completion: initialize the completion only if this is set to true
2041 *
2042 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2043 * with mutex held and host_lock locked.
2044 * Returns 0 only if success.
2045 */
2046static int
2047__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2048 bool completion)
2049{
2050 if (!ufshcd_ready_for_uic_cmd(hba)) {
2051 dev_err(hba->dev,
2052 "Controller not ready to accept UIC commands\n");
2053 return -EIO;
2054 }
2055
2056 if (completion)
2057 init_completion(&uic_cmd->done);
2058
2059 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2060
2061 return 0;
2062}
2063
2064/**
2065 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2066 * @hba: per adapter instance
2067 * @uic_cmd: UIC command
2068 *
2069 * Returns 0 only if success.
2070 */
2071int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2072{
2073 int ret;
2074 unsigned long flags;
2075
2076 ufshcd_hold(hba, false);
2077 mutex_lock(&hba->uic_cmd_mutex);
2078 ufshcd_add_delay_before_dme_cmd(hba);
2079
2080 spin_lock_irqsave(hba->host->host_lock, flags);
2081 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2082 spin_unlock_irqrestore(hba->host->host_lock, flags);
2083 if (!ret)
2084 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2085
2086 mutex_unlock(&hba->uic_cmd_mutex);
2087
2088 ufshcd_release(hba);
2089 return ret;
2090}
2091
2092/**
2093 * ufshcd_map_sg - Map scatter-gather list to prdt
2094 * @hba: per adapter instance
2095 * @lrbp: pointer to local reference block
2096 *
2097 * Returns 0 in case of success, non-zero value in case of failure
2098 */
2099static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2100{
2101 struct ufshcd_sg_entry *prd_table;
2102 struct scatterlist *sg;
2103 struct scsi_cmnd *cmd;
2104 int sg_segments;
2105 int i;
2106
2107 cmd = lrbp->cmd;
2108 sg_segments = scsi_dma_map(cmd);
2109 if (sg_segments < 0)
2110 return sg_segments;
2111
2112 if (sg_segments) {
2113 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2114 lrbp->utr_descriptor_ptr->prd_table_length =
2115 cpu_to_le16((u16)(sg_segments *
2116 sizeof(struct ufshcd_sg_entry)));
2117 else
2118 lrbp->utr_descriptor_ptr->prd_table_length =
2119 cpu_to_le16((u16) (sg_segments));
2120
2121 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2122
2123 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2124 prd_table[i].size =
2125 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2126 prd_table[i].base_addr =
2127 cpu_to_le32(lower_32_bits(sg->dma_address));
2128 prd_table[i].upper_addr =
2129 cpu_to_le32(upper_32_bits(sg->dma_address));
2130 prd_table[i].reserved = 0;
2131 }
2132 } else {
2133 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2134 }
2135
2136 return 0;
2137}
2138
2139/**
2140 * ufshcd_enable_intr - enable interrupts
2141 * @hba: per adapter instance
2142 * @intrs: interrupt bits
2143 */
2144static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2145{
2146 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2147
2148 if (hba->ufs_version == UFSHCI_VERSION_10) {
2149 u32 rw;
2150 rw = set & INTERRUPT_MASK_RW_VER_10;
2151 set = rw | ((set ^ intrs) & intrs);
2152 } else {
2153 set |= intrs;
2154 }
2155
2156 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2157}
2158
2159/**
2160 * ufshcd_disable_intr - disable interrupts
2161 * @hba: per adapter instance
2162 * @intrs: interrupt bits
2163 */
2164static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2165{
2166 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2167
2168 if (hba->ufs_version == UFSHCI_VERSION_10) {
2169 u32 rw;
2170 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2171 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2172 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2173
2174 } else {
2175 set &= ~intrs;
2176 }
2177
2178 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2179}
2180
2181/**
2182 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2183 * descriptor according to request
2184 * @lrbp: pointer to local reference block
2185 * @upiu_flags: flags required in the header
2186 * @cmd_dir: requests data direction
2187 */
2188static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2189 u32 *upiu_flags, enum dma_data_direction cmd_dir)
2190{
2191 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2192 u32 data_direction;
2193 u32 dword_0;
2194
2195 if (cmd_dir == DMA_FROM_DEVICE) {
2196 data_direction = UTP_DEVICE_TO_HOST;
2197 *upiu_flags = UPIU_CMD_FLAGS_READ;
2198 } else if (cmd_dir == DMA_TO_DEVICE) {
2199 data_direction = UTP_HOST_TO_DEVICE;
2200 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2201 } else {
2202 data_direction = UTP_NO_DATA_TRANSFER;
2203 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2204 }
2205
2206 dword_0 = data_direction | (lrbp->command_type
2207 << UPIU_COMMAND_TYPE_OFFSET);
2208 if (lrbp->intr_cmd)
2209 dword_0 |= UTP_REQ_DESC_INT_CMD;
2210
2211 /* Transfer request descriptor header fields */
2212 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2213 /* dword_1 is reserved, hence it is set to 0 */
2214 req_desc->header.dword_1 = 0;
2215 /*
2216 * assigning invalid value for command status. Controller
2217 * updates OCS on command completion, with the command
2218 * status
2219 */
2220 req_desc->header.dword_2 =
2221 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2222 /* dword_3 is reserved, hence it is set to 0 */
2223 req_desc->header.dword_3 = 0;
2224
2225 req_desc->prd_table_length = 0;
2226}
2227
2228/**
2229 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2230 * for scsi commands
2231 * @lrbp: local reference block pointer
2232 * @upiu_flags: flags
2233 */
2234static
2235void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2236{
2237 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2238 unsigned short cdb_len;
2239
2240 /* command descriptor fields */
2241 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2242 UPIU_TRANSACTION_COMMAND, upiu_flags,
2243 lrbp->lun, lrbp->task_tag);
2244 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2245 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2246
2247 /* Total EHS length and Data segment length will be zero */
2248 ucd_req_ptr->header.dword_2 = 0;
2249
2250 ucd_req_ptr->sc.exp_data_transfer_len =
2251 cpu_to_be32(lrbp->cmd->sdb.length);
2252
2253 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
2254 memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
2255 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2256
2257 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2258}
2259
2260/**
2261 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2262 * for query requsts
2263 * @hba: UFS hba
2264 * @lrbp: local reference block pointer
2265 * @upiu_flags: flags
2266 */
2267static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2268 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2269{
2270 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2271 struct ufs_query *query = &hba->dev_cmd.query;
2272 u16 len = be16_to_cpu(query->request.upiu_req.length);
2273
2274 /* Query request header */
2275 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2276 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2277 lrbp->lun, lrbp->task_tag);
2278 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2279 0, query->request.query_func, 0, 0);
2280
2281 /* Data segment length only need for WRITE_DESC */
2282 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2283 ucd_req_ptr->header.dword_2 =
2284 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2285 else
2286 ucd_req_ptr->header.dword_2 = 0;
2287
2288 /* Copy the Query Request buffer as is */
2289 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2290 QUERY_OSF_SIZE);
2291
2292 /* Copy the Descriptor */
2293 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2294 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2295
2296 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2297}
2298
2299static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2300{
2301 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2302
2303 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2304
2305 /* command descriptor fields */
2306 ucd_req_ptr->header.dword_0 =
2307 UPIU_HEADER_DWORD(
2308 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2309 /* clear rest of the fields of basic header */
2310 ucd_req_ptr->header.dword_1 = 0;
2311 ucd_req_ptr->header.dword_2 = 0;
2312
2313 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2314}
2315
2316/**
2317 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2318 * for Device Management Purposes
2319 * @hba: per adapter instance
2320 * @lrbp: pointer to local reference block
2321 */
2322static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2323{
2324 u32 upiu_flags;
2325 int ret = 0;
2326
2327 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2328 (hba->ufs_version == UFSHCI_VERSION_11))
2329 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2330 else
2331 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2332
2333 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2334 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2335 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2336 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2337 ufshcd_prepare_utp_nop_upiu(lrbp);
2338 else
2339 ret = -EINVAL;
2340
2341 return ret;
2342}
2343
2344/**
2345 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2346 * for SCSI Purposes
2347 * @hba: per adapter instance
2348 * @lrbp: pointer to local reference block
2349 */
2350static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2351{
2352 u32 upiu_flags;
2353 int ret = 0;
2354
2355 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2356 (hba->ufs_version == UFSHCI_VERSION_11))
2357 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2358 else
2359 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2360
2361 if (likely(lrbp->cmd)) {
2362 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2363 lrbp->cmd->sc_data_direction);
2364 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2365 } else {
2366 ret = -EINVAL;
2367 }
2368
2369 return ret;
2370}
2371
2372/**
2373 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2374 * @upiu_wlun_id: UPIU W-LUN id
2375 *
2376 * Returns SCSI W-LUN id
2377 */
2378static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2379{
2380 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2381}
2382
2383/**
2384 * ufshcd_queuecommand - main entry point for SCSI requests
2385 * @host: SCSI host pointer
2386 * @cmd: command from SCSI Midlayer
2387 *
2388 * Returns 0 for success, non-zero in case of failure
2389 */
2390static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2391{
2392 struct ufshcd_lrb *lrbp;
2393 struct ufs_hba *hba;
2394 unsigned long flags;
2395 int tag;
2396 int err = 0;
2397
2398 hba = shost_priv(host);
2399
2400 tag = cmd->request->tag;
2401 if (!ufshcd_valid_tag(hba, tag)) {
2402 dev_err(hba->dev,
2403 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2404 __func__, tag, cmd, cmd->request);
2405 BUG();
2406 }
2407
2408 if (!down_read_trylock(&hba->clk_scaling_lock))
2409 return SCSI_MLQUEUE_HOST_BUSY;
2410
2411 spin_lock_irqsave(hba->host->host_lock, flags);
2412 switch (hba->ufshcd_state) {
2413 case UFSHCD_STATE_OPERATIONAL:
2414 break;
2415 case UFSHCD_STATE_EH_SCHEDULED:
2416 case UFSHCD_STATE_RESET:
2417 err = SCSI_MLQUEUE_HOST_BUSY;
2418 goto out_unlock;
2419 case UFSHCD_STATE_ERROR:
2420 set_host_byte(cmd, DID_ERROR);
2421 cmd->scsi_done(cmd);
2422 goto out_unlock;
2423 default:
2424 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2425 __func__, hba->ufshcd_state);
2426 set_host_byte(cmd, DID_BAD_TARGET);
2427 cmd->scsi_done(cmd);
2428 goto out_unlock;
2429 }
2430
2431 /* if error handling is in progress, don't issue commands */
2432 if (ufshcd_eh_in_progress(hba)) {
2433 set_host_byte(cmd, DID_ERROR);
2434 cmd->scsi_done(cmd);
2435 goto out_unlock;
2436 }
2437 spin_unlock_irqrestore(hba->host->host_lock, flags);
2438
2439 hba->req_abort_count = 0;
2440
2441 /* acquire the tag to make sure device cmds don't use it */
2442 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2443 /*
2444 * Dev manage command in progress, requeue the command.
2445 * Requeuing the command helps in cases where the request *may*
2446 * find different tag instead of waiting for dev manage command
2447 * completion.
2448 */
2449 err = SCSI_MLQUEUE_HOST_BUSY;
2450 goto out;
2451 }
2452
2453 err = ufshcd_hold(hba, true);
2454 if (err) {
2455 err = SCSI_MLQUEUE_HOST_BUSY;
2456 clear_bit_unlock(tag, &hba->lrb_in_use);
2457 goto out;
2458 }
2459 WARN_ON(hba->clk_gating.state != CLKS_ON);
2460
2461 lrbp = &hba->lrb[tag];
2462
2463 WARN_ON(lrbp->cmd);
2464 lrbp->cmd = cmd;
2465 lrbp->sense_bufflen = UFS_SENSE_SIZE;
2466 lrbp->sense_buffer = cmd->sense_buffer;
2467 lrbp->task_tag = tag;
2468 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2469 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2470 lrbp->req_abort_skip = false;
2471
2472 ufshcd_comp_scsi_upiu(hba, lrbp);
2473
2474 err = ufshcd_map_sg(hba, lrbp);
2475 if (err) {
2476 lrbp->cmd = NULL;
2477 clear_bit_unlock(tag, &hba->lrb_in_use);
2478 goto out;
2479 }
2480 /* Make sure descriptors are ready before ringing the doorbell */
2481 wmb();
2482
2483 /* issue command to the controller */
2484 spin_lock_irqsave(hba->host->host_lock, flags);
2485 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2486 ufshcd_send_command(hba, tag);
2487out_unlock:
2488 spin_unlock_irqrestore(hba->host->host_lock, flags);
2489out:
2490 up_read(&hba->clk_scaling_lock);
2491 return err;
2492}
2493
2494static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2495 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2496{
2497 lrbp->cmd = NULL;
2498 lrbp->sense_bufflen = 0;
2499 lrbp->sense_buffer = NULL;
2500 lrbp->task_tag = tag;
2501 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2502 lrbp->intr_cmd = true; /* No interrupt aggregation */
2503 hba->dev_cmd.type = cmd_type;
2504
2505 return ufshcd_comp_devman_upiu(hba, lrbp);
2506}
2507
2508static int
2509ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2510{
2511 int err = 0;
2512 unsigned long flags;
2513 u32 mask = 1 << tag;
2514
2515 /* clear outstanding transaction before retry */
2516 spin_lock_irqsave(hba->host->host_lock, flags);
2517 ufshcd_utrl_clear(hba, tag);
2518 spin_unlock_irqrestore(hba->host->host_lock, flags);
2519
2520 /*
2521 * wait for for h/w to clear corresponding bit in door-bell.
2522 * max. wait is 1 sec.
2523 */
2524 err = ufshcd_wait_for_register(hba,
2525 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2526 mask, ~mask, 1000, 1000, true);
2527
2528 return err;
2529}
2530
2531static int
2532ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2533{
2534 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2535
2536 /* Get the UPIU response */
2537 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2538 UPIU_RSP_CODE_OFFSET;
2539 return query_res->response;
2540}
2541
2542/**
2543 * ufshcd_dev_cmd_completion() - handles device management command responses
2544 * @hba: per adapter instance
2545 * @lrbp: pointer to local reference block
2546 */
2547static int
2548ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2549{
2550 int resp;
2551 int err = 0;
2552
2553 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2554 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2555
2556 switch (resp) {
2557 case UPIU_TRANSACTION_NOP_IN:
2558 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2559 err = -EINVAL;
2560 dev_err(hba->dev, "%s: unexpected response %x\n",
2561 __func__, resp);
2562 }
2563 break;
2564 case UPIU_TRANSACTION_QUERY_RSP:
2565 err = ufshcd_check_query_response(hba, lrbp);
2566 if (!err)
2567 err = ufshcd_copy_query_response(hba, lrbp);
2568 break;
2569 case UPIU_TRANSACTION_REJECT_UPIU:
2570 /* TODO: handle Reject UPIU Response */
2571 err = -EPERM;
2572 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2573 __func__);
2574 break;
2575 default:
2576 err = -EINVAL;
2577 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2578 __func__, resp);
2579 break;
2580 }
2581
2582 return err;
2583}
2584
2585static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2586 struct ufshcd_lrb *lrbp, int max_timeout)
2587{
2588 int err = 0;
2589 unsigned long time_left;
2590 unsigned long flags;
2591
2592 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2593 msecs_to_jiffies(max_timeout));
2594
2595 /* Make sure descriptors are ready before ringing the doorbell */
2596 wmb();
2597 spin_lock_irqsave(hba->host->host_lock, flags);
2598 hba->dev_cmd.complete = NULL;
2599 if (likely(time_left)) {
2600 err = ufshcd_get_tr_ocs(lrbp);
2601 if (!err)
2602 err = ufshcd_dev_cmd_completion(hba, lrbp);
2603 }
2604 spin_unlock_irqrestore(hba->host->host_lock, flags);
2605
2606 if (!time_left) {
2607 err = -ETIMEDOUT;
2608 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2609 __func__, lrbp->task_tag);
2610 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2611 /* successfully cleared the command, retry if needed */
2612 err = -EAGAIN;
2613 /*
2614 * in case of an error, after clearing the doorbell,
2615 * we also need to clear the outstanding_request
2616 * field in hba
2617 */
2618 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2619 }
2620
2621 return err;
2622}
2623
2624/**
2625 * ufshcd_get_dev_cmd_tag - Get device management command tag
2626 * @hba: per-adapter instance
2627 * @tag_out: pointer to variable with available slot value
2628 *
2629 * Get a free slot and lock it until device management command
2630 * completes.
2631 *
2632 * Returns false if free slot is unavailable for locking, else
2633 * return true with tag value in @tag.
2634 */
2635static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2636{
2637 int tag;
2638 bool ret = false;
2639 unsigned long tmp;
2640
2641 if (!tag_out)
2642 goto out;
2643
2644 do {
2645 tmp = ~hba->lrb_in_use;
2646 tag = find_last_bit(&tmp, hba->nutrs);
2647 if (tag >= hba->nutrs)
2648 goto out;
2649 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2650
2651 *tag_out = tag;
2652 ret = true;
2653out:
2654 return ret;
2655}
2656
2657static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2658{
2659 clear_bit_unlock(tag, &hba->lrb_in_use);
2660}
2661
2662/**
2663 * ufshcd_exec_dev_cmd - API for sending device management requests
2664 * @hba: UFS hba
2665 * @cmd_type: specifies the type (NOP, Query...)
2666 * @timeout: time in seconds
2667 *
2668 * NOTE: Since there is only one available tag for device management commands,
2669 * it is expected you hold the hba->dev_cmd.lock mutex.
2670 */
2671static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2672 enum dev_cmd_type cmd_type, int timeout)
2673{
2674 struct ufshcd_lrb *lrbp;
2675 int err;
2676 int tag;
2677 struct completion wait;
2678 unsigned long flags;
2679
2680 down_read(&hba->clk_scaling_lock);
2681
2682 /*
2683 * Get free slot, sleep if slots are unavailable.
2684 * Even though we use wait_event() which sleeps indefinitely,
2685 * the maximum wait time is bounded by SCSI request timeout.
2686 */
2687 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2688
2689 init_completion(&wait);
2690 lrbp = &hba->lrb[tag];
2691 WARN_ON(lrbp->cmd);
2692 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2693 if (unlikely(err))
2694 goto out_put_tag;
2695
2696 hba->dev_cmd.complete = &wait;
2697
2698 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2699 /* Make sure descriptors are ready before ringing the doorbell */
2700 wmb();
2701 spin_lock_irqsave(hba->host->host_lock, flags);
2702 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2703 ufshcd_send_command(hba, tag);
2704 spin_unlock_irqrestore(hba->host->host_lock, flags);
2705
2706 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2707
2708 ufshcd_add_query_upiu_trace(hba, tag,
2709 err ? "query_complete_err" : "query_complete");
2710
2711out_put_tag:
2712 ufshcd_put_dev_cmd_tag(hba, tag);
2713 wake_up(&hba->dev_cmd.tag_wq);
2714 up_read(&hba->clk_scaling_lock);
2715 return err;
2716}
2717
2718/**
2719 * ufshcd_init_query() - init the query response and request parameters
2720 * @hba: per-adapter instance
2721 * @request: address of the request pointer to be initialized
2722 * @response: address of the response pointer to be initialized
2723 * @opcode: operation to perform
2724 * @idn: flag idn to access
2725 * @index: LU number to access
2726 * @selector: query/flag/descriptor further identification
2727 */
2728static inline void ufshcd_init_query(struct ufs_hba *hba,
2729 struct ufs_query_req **request, struct ufs_query_res **response,
2730 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2731{
2732 *request = &hba->dev_cmd.query.request;
2733 *response = &hba->dev_cmd.query.response;
2734 memset(*request, 0, sizeof(struct ufs_query_req));
2735 memset(*response, 0, sizeof(struct ufs_query_res));
2736 (*request)->upiu_req.opcode = opcode;
2737 (*request)->upiu_req.idn = idn;
2738 (*request)->upiu_req.index = index;
2739 (*request)->upiu_req.selector = selector;
2740}
2741
2742static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2743 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2744{
2745 int ret;
2746 int retries;
2747
2748 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2749 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2750 if (ret)
2751 dev_dbg(hba->dev,
2752 "%s: failed with error %d, retries %d\n",
2753 __func__, ret, retries);
2754 else
2755 break;
2756 }
2757
2758 if (ret)
2759 dev_err(hba->dev,
2760 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2761 __func__, opcode, idn, ret, retries);
2762 return ret;
2763}
2764
2765/**
2766 * ufshcd_query_flag() - API function for sending flag query requests
2767 * @hba: per-adapter instance
2768 * @opcode: flag query to perform
2769 * @idn: flag idn to access
2770 * @flag_res: the flag value after the query request completes
2771 *
2772 * Returns 0 for success, non-zero in case of failure
2773 */
2774int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2775 enum flag_idn idn, bool *flag_res)
2776{
2777 struct ufs_query_req *request = NULL;
2778 struct ufs_query_res *response = NULL;
2779 int err, index = 0, selector = 0;
2780 int timeout = QUERY_REQ_TIMEOUT;
2781
2782 BUG_ON(!hba);
2783
2784 ufshcd_hold(hba, false);
2785 mutex_lock(&hba->dev_cmd.lock);
2786 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2787 selector);
2788
2789 switch (opcode) {
2790 case UPIU_QUERY_OPCODE_SET_FLAG:
2791 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2792 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2793 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2794 break;
2795 case UPIU_QUERY_OPCODE_READ_FLAG:
2796 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2797 if (!flag_res) {
2798 /* No dummy reads */
2799 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2800 __func__);
2801 err = -EINVAL;
2802 goto out_unlock;
2803 }
2804 break;
2805 default:
2806 dev_err(hba->dev,
2807 "%s: Expected query flag opcode but got = %d\n",
2808 __func__, opcode);
2809 err = -EINVAL;
2810 goto out_unlock;
2811 }
2812
2813 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2814
2815 if (err) {
2816 dev_err(hba->dev,
2817 "%s: Sending flag query for idn %d failed, err = %d\n",
2818 __func__, idn, err);
2819 goto out_unlock;
2820 }
2821
2822 if (flag_res)
2823 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2824 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2825
2826out_unlock:
2827 mutex_unlock(&hba->dev_cmd.lock);
2828 ufshcd_release(hba);
2829 return err;
2830}
2831
2832/**
2833 * ufshcd_query_attr - API function for sending attribute requests
2834 * @hba: per-adapter instance
2835 * @opcode: attribute opcode
2836 * @idn: attribute idn to access
2837 * @index: index field
2838 * @selector: selector field
2839 * @attr_val: the attribute value after the query request completes
2840 *
2841 * Returns 0 for success, non-zero in case of failure
2842*/
2843int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2844 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2845{
2846 struct ufs_query_req *request = NULL;
2847 struct ufs_query_res *response = NULL;
2848 int err;
2849
2850 BUG_ON(!hba);
2851
2852 ufshcd_hold(hba, false);
2853 if (!attr_val) {
2854 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2855 __func__, opcode);
2856 err = -EINVAL;
2857 goto out;
2858 }
2859
2860 mutex_lock(&hba->dev_cmd.lock);
2861 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2862 selector);
2863
2864 switch (opcode) {
2865 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2866 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2867 request->upiu_req.value = cpu_to_be32(*attr_val);
2868 break;
2869 case UPIU_QUERY_OPCODE_READ_ATTR:
2870 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2871 break;
2872 default:
2873 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2874 __func__, opcode);
2875 err = -EINVAL;
2876 goto out_unlock;
2877 }
2878
2879 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2880
2881 if (err) {
2882 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2883 __func__, opcode, idn, index, err);
2884 goto out_unlock;
2885 }
2886
2887 *attr_val = be32_to_cpu(response->upiu_res.value);
2888
2889out_unlock:
2890 mutex_unlock(&hba->dev_cmd.lock);
2891out:
2892 ufshcd_release(hba);
2893 return err;
2894}
2895
2896/**
2897 * ufshcd_query_attr_retry() - API function for sending query
2898 * attribute with retries
2899 * @hba: per-adapter instance
2900 * @opcode: attribute opcode
2901 * @idn: attribute idn to access
2902 * @index: index field
2903 * @selector: selector field
2904 * @attr_val: the attribute value after the query request
2905 * completes
2906 *
2907 * Returns 0 for success, non-zero in case of failure
2908*/
2909static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2910 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2911 u32 *attr_val)
2912{
2913 int ret = 0;
2914 u32 retries;
2915
2916 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2917 ret = ufshcd_query_attr(hba, opcode, idn, index,
2918 selector, attr_val);
2919 if (ret)
2920 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2921 __func__, ret, retries);
2922 else
2923 break;
2924 }
2925
2926 if (ret)
2927 dev_err(hba->dev,
2928 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2929 __func__, idn, ret, QUERY_REQ_RETRIES);
2930 return ret;
2931}
2932
2933static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2934 enum query_opcode opcode, enum desc_idn idn, u8 index,
2935 u8 selector, u8 *desc_buf, int *buf_len)
2936{
2937 struct ufs_query_req *request = NULL;
2938 struct ufs_query_res *response = NULL;
2939 int err;
2940
2941 BUG_ON(!hba);
2942
2943 ufshcd_hold(hba, false);
2944 if (!desc_buf) {
2945 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2946 __func__, opcode);
2947 err = -EINVAL;
2948 goto out;
2949 }
2950
2951 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2952 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2953 __func__, *buf_len);
2954 err = -EINVAL;
2955 goto out;
2956 }
2957
2958 mutex_lock(&hba->dev_cmd.lock);
2959 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2960 selector);
2961 hba->dev_cmd.query.descriptor = desc_buf;
2962 request->upiu_req.length = cpu_to_be16(*buf_len);
2963
2964 switch (opcode) {
2965 case UPIU_QUERY_OPCODE_WRITE_DESC:
2966 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2967 break;
2968 case UPIU_QUERY_OPCODE_READ_DESC:
2969 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2970 break;
2971 default:
2972 dev_err(hba->dev,
2973 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2974 __func__, opcode);
2975 err = -EINVAL;
2976 goto out_unlock;
2977 }
2978
2979 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2980
2981 if (err) {
2982 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2983 __func__, opcode, idn, index, err);
2984 goto out_unlock;
2985 }
2986
2987 hba->dev_cmd.query.descriptor = NULL;
2988 *buf_len = be16_to_cpu(response->upiu_res.length);
2989
2990out_unlock:
2991 mutex_unlock(&hba->dev_cmd.lock);
2992out:
2993 ufshcd_release(hba);
2994 return err;
2995}
2996
2997/**
2998 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
2999 * @hba: per-adapter instance
3000 * @opcode: attribute opcode
3001 * @idn: attribute idn to access
3002 * @index: index field
3003 * @selector: selector field
3004 * @desc_buf: the buffer that contains the descriptor
3005 * @buf_len: length parameter passed to the device
3006 *
3007 * Returns 0 for success, non-zero in case of failure.
3008 * The buf_len parameter will contain, on return, the length parameter
3009 * received on the response.
3010 */
3011int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3012 enum query_opcode opcode,
3013 enum desc_idn idn, u8 index,
3014 u8 selector,
3015 u8 *desc_buf, int *buf_len)
3016{
3017 int err;
3018 int retries;
3019
3020 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3021 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3022 selector, desc_buf, buf_len);
3023 if (!err || err == -EINVAL)
3024 break;
3025 }
3026
3027 return err;
3028}
3029
3030/**
3031 * ufshcd_read_desc_length - read the specified descriptor length from header
3032 * @hba: Pointer to adapter instance
3033 * @desc_id: descriptor idn value
3034 * @desc_index: descriptor index
3035 * @desc_length: pointer to variable to read the length of descriptor
3036 *
3037 * Return 0 in case of success, non-zero otherwise
3038 */
3039static int ufshcd_read_desc_length(struct ufs_hba *hba,
3040 enum desc_idn desc_id,
3041 int desc_index,
3042 int *desc_length)
3043{
3044 int ret;
3045 u8 header[QUERY_DESC_HDR_SIZE];
3046 int header_len = QUERY_DESC_HDR_SIZE;
3047
3048 if (desc_id >= QUERY_DESC_IDN_MAX)
3049 return -EINVAL;
3050
3051 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3052 desc_id, desc_index, 0, header,
3053 &header_len);
3054
3055 if (ret) {
3056 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3057 __func__, desc_id);
3058 return ret;
3059 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3060 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3061 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3062 desc_id);
3063 ret = -EINVAL;
3064 }
3065
3066 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3067 return ret;
3068
3069}
3070
3071/**
3072 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3073 * @hba: Pointer to adapter instance
3074 * @desc_id: descriptor idn value
3075 * @desc_len: mapped desc length (out)
3076 *
3077 * Return 0 in case of success, non-zero otherwise
3078 */
3079int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3080 enum desc_idn desc_id, int *desc_len)
3081{
3082 switch (desc_id) {
3083 case QUERY_DESC_IDN_DEVICE:
3084 *desc_len = hba->desc_size.dev_desc;
3085 break;
3086 case QUERY_DESC_IDN_POWER:
3087 *desc_len = hba->desc_size.pwr_desc;
3088 break;
3089 case QUERY_DESC_IDN_GEOMETRY:
3090 *desc_len = hba->desc_size.geom_desc;
3091 break;
3092 case QUERY_DESC_IDN_CONFIGURATION:
3093 *desc_len = hba->desc_size.conf_desc;
3094 break;
3095 case QUERY_DESC_IDN_UNIT:
3096 *desc_len = hba->desc_size.unit_desc;
3097 break;
3098 case QUERY_DESC_IDN_INTERCONNECT:
3099 *desc_len = hba->desc_size.interc_desc;
3100 break;
3101 case QUERY_DESC_IDN_STRING:
3102 *desc_len = QUERY_DESC_MAX_SIZE;
3103 break;
3104 case QUERY_DESC_IDN_HEALTH:
3105 *desc_len = hba->desc_size.hlth_desc;
3106 break;
3107 case QUERY_DESC_IDN_RFU_0:
3108 case QUERY_DESC_IDN_RFU_1:
3109 *desc_len = 0;
3110 break;
3111 default:
3112 *desc_len = 0;
3113 return -EINVAL;
3114 }
3115 return 0;
3116}
3117EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3118
3119/**
3120 * ufshcd_read_desc_param - read the specified descriptor parameter
3121 * @hba: Pointer to adapter instance
3122 * @desc_id: descriptor idn value
3123 * @desc_index: descriptor index
3124 * @param_offset: offset of the parameter to read
3125 * @param_read_buf: pointer to buffer where parameter would be read
3126 * @param_size: sizeof(param_read_buf)
3127 *
3128 * Return 0 in case of success, non-zero otherwise
3129 */
3130int ufshcd_read_desc_param(struct ufs_hba *hba,
3131 enum desc_idn desc_id,
3132 int desc_index,
3133 u8 param_offset,
3134 u8 *param_read_buf,
3135 u8 param_size)
3136{
3137 int ret;
3138 u8 *desc_buf;
3139 int buff_len;
3140 bool is_kmalloc = true;
3141
3142 /* Safety check */
3143 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3144 return -EINVAL;
3145
3146 /* Get the max length of descriptor from structure filled up at probe
3147 * time.
3148 */
3149 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3150
3151 /* Sanity checks */
3152 if (ret || !buff_len) {
3153 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3154 __func__);
3155 return ret;
3156 }
3157
3158 /* Check whether we need temp memory */
3159 if (param_offset != 0 || param_size < buff_len) {
3160 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3161 if (!desc_buf)
3162 return -ENOMEM;
3163 } else {
3164 desc_buf = param_read_buf;
3165 is_kmalloc = false;
3166 }
3167
3168 /* Request for full descriptor */
3169 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3170 desc_id, desc_index, 0,
3171 desc_buf, &buff_len);
3172
3173 if (ret) {
3174 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3175 __func__, desc_id, desc_index, param_offset, ret);
3176 goto out;
3177 }
3178
3179 /* Sanity check */
3180 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3181 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3182 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3183 ret = -EINVAL;
3184 goto out;
3185 }
3186
3187 /* Check wherher we will not copy more data, than available */
3188 if (is_kmalloc && param_size > buff_len)
3189 param_size = buff_len;
3190
3191 if (is_kmalloc)
3192 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3193out:
3194 if (is_kmalloc)
3195 kfree(desc_buf);
3196 return ret;
3197}
3198
3199static inline int ufshcd_read_desc(struct ufs_hba *hba,
3200 enum desc_idn desc_id,
3201 int desc_index,
3202 u8 *buf,
3203 u32 size)
3204{
3205 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3206}
3207
3208static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3209 u8 *buf,
3210 u32 size)
3211{
3212 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3213}
3214
3215static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3216{
3217 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3218}
3219
3220/**
3221 * ufshcd_read_string_desc - read string descriptor
3222 * @hba: pointer to adapter instance
3223 * @desc_index: descriptor index
3224 * @buf: pointer to buffer where descriptor would be read
3225 * @size: size of buf
3226 * @ascii: if true convert from unicode to ascii characters
3227 *
3228 * Return 0 in case of success, non-zero otherwise
3229 */
3230int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3231 u8 *buf, u32 size, bool ascii)
3232{
3233 int err = 0;
3234
3235 err = ufshcd_read_desc(hba,
3236 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3237
3238 if (err) {
3239 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3240 __func__, QUERY_REQ_RETRIES, err);
3241 goto out;
3242 }
3243
3244 if (ascii) {
3245 int desc_len;
3246 int ascii_len;
3247 int i;
3248 char *buff_ascii;
3249
3250 desc_len = buf[0];
3251 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3252 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3253 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3254 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3255 __func__);
3256 err = -ENOMEM;
3257 goto out;
3258 }
3259
3260 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3261 if (!buff_ascii) {
3262 err = -ENOMEM;
3263 goto out;
3264 }
3265
3266 /*
3267 * the descriptor contains string in UTF16 format
3268 * we need to convert to utf-8 so it can be displayed
3269 */
3270 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3271 desc_len - QUERY_DESC_HDR_SIZE,
3272 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3273
3274 /* replace non-printable or non-ASCII characters with spaces */
3275 for (i = 0; i < ascii_len; i++)
3276 ufshcd_remove_non_printable(&buff_ascii[i]);
3277
3278 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3279 size - QUERY_DESC_HDR_SIZE);
3280 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3281 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3282 kfree(buff_ascii);
3283 }
3284out:
3285 return err;
3286}
3287
3288/**
3289 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3290 * @hba: Pointer to adapter instance
3291 * @lun: lun id
3292 * @param_offset: offset of the parameter to read
3293 * @param_read_buf: pointer to buffer where parameter would be read
3294 * @param_size: sizeof(param_read_buf)
3295 *
3296 * Return 0 in case of success, non-zero otherwise
3297 */
3298static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3299 int lun,
3300 enum unit_desc_param param_offset,
3301 u8 *param_read_buf,
3302 u32 param_size)
3303{
3304 /*
3305 * Unit descriptors are only available for general purpose LUs (LUN id
3306 * from 0 to 7) and RPMB Well known LU.
3307 */
3308 if (!ufs_is_valid_unit_desc_lun(lun))
3309 return -EOPNOTSUPP;
3310
3311 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3312 param_offset, param_read_buf, param_size);
3313}
3314
3315/**
3316 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3317 * @hba: per adapter instance
3318 *
3319 * 1. Allocate DMA memory for Command Descriptor array
3320 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3321 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3322 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3323 * (UTMRDL)
3324 * 4. Allocate memory for local reference block(lrb).
3325 *
3326 * Returns 0 for success, non-zero in case of failure
3327 */
3328static int ufshcd_memory_alloc(struct ufs_hba *hba)
3329{
3330 size_t utmrdl_size, utrdl_size, ucdl_size;
3331
3332 /* Allocate memory for UTP command descriptors */
3333 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3334 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3335 ucdl_size,
3336 &hba->ucdl_dma_addr,
3337 GFP_KERNEL);
3338
3339 /*
3340 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3341 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3342 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3343 * be aligned to 128 bytes as well
3344 */
3345 if (!hba->ucdl_base_addr ||
3346 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3347 dev_err(hba->dev,
3348 "Command Descriptor Memory allocation failed\n");
3349 goto out;
3350 }
3351
3352 /*
3353 * Allocate memory for UTP Transfer descriptors
3354 * UFSHCI requires 1024 byte alignment of UTRD
3355 */
3356 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3357 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3358 utrdl_size,
3359 &hba->utrdl_dma_addr,
3360 GFP_KERNEL);
3361 if (!hba->utrdl_base_addr ||
3362 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3363 dev_err(hba->dev,
3364 "Transfer Descriptor Memory allocation failed\n");
3365 goto out;
3366 }
3367
3368 /*
3369 * Allocate memory for UTP Task Management descriptors
3370 * UFSHCI requires 1024 byte alignment of UTMRD
3371 */
3372 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3373 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3374 utmrdl_size,
3375 &hba->utmrdl_dma_addr,
3376 GFP_KERNEL);
3377 if (!hba->utmrdl_base_addr ||
3378 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3379 dev_err(hba->dev,
3380 "Task Management Descriptor Memory allocation failed\n");
3381 goto out;
3382 }
3383
3384 /* Allocate memory for local reference block */
3385 hba->lrb = devm_kcalloc(hba->dev,
3386 hba->nutrs, sizeof(struct ufshcd_lrb),
3387 GFP_KERNEL);
3388 if (!hba->lrb) {
3389 dev_err(hba->dev, "LRB Memory allocation failed\n");
3390 goto out;
3391 }
3392 return 0;
3393out:
3394 return -ENOMEM;
3395}
3396
3397/**
3398 * ufshcd_host_memory_configure - configure local reference block with
3399 * memory offsets
3400 * @hba: per adapter instance
3401 *
3402 * Configure Host memory space
3403 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3404 * address.
3405 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3406 * and PRDT offset.
3407 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3408 * into local reference block.
3409 */
3410static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3411{
3412 struct utp_transfer_cmd_desc *cmd_descp;
3413 struct utp_transfer_req_desc *utrdlp;
3414 dma_addr_t cmd_desc_dma_addr;
3415 dma_addr_t cmd_desc_element_addr;
3416 u16 response_offset;
3417 u16 prdt_offset;
3418 int cmd_desc_size;
3419 int i;
3420
3421 utrdlp = hba->utrdl_base_addr;
3422 cmd_descp = hba->ucdl_base_addr;
3423
3424 response_offset =
3425 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3426 prdt_offset =
3427 offsetof(struct utp_transfer_cmd_desc, prd_table);
3428
3429 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3430 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3431
3432 for (i = 0; i < hba->nutrs; i++) {
3433 /* Configure UTRD with command descriptor base address */
3434 cmd_desc_element_addr =
3435 (cmd_desc_dma_addr + (cmd_desc_size * i));
3436 utrdlp[i].command_desc_base_addr_lo =
3437 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3438 utrdlp[i].command_desc_base_addr_hi =
3439 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3440
3441 /* Response upiu and prdt offset should be in double words */
3442 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3443 utrdlp[i].response_upiu_offset =
3444 cpu_to_le16(response_offset);
3445 utrdlp[i].prd_table_offset =
3446 cpu_to_le16(prdt_offset);
3447 utrdlp[i].response_upiu_length =
3448 cpu_to_le16(ALIGNED_UPIU_SIZE);
3449 } else {
3450 utrdlp[i].response_upiu_offset =
3451 cpu_to_le16((response_offset >> 2));
3452 utrdlp[i].prd_table_offset =
3453 cpu_to_le16((prdt_offset >> 2));
3454 utrdlp[i].response_upiu_length =
3455 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3456 }
3457
3458 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3459 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3460 (i * sizeof(struct utp_transfer_req_desc));
3461 hba->lrb[i].ucd_req_ptr =
3462 (struct utp_upiu_req *)(cmd_descp + i);
3463 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3464 hba->lrb[i].ucd_rsp_ptr =
3465 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3466 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3467 response_offset;
3468 hba->lrb[i].ucd_prdt_ptr =
3469 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3470 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3471 prdt_offset;
3472 }
3473}
3474
3475/**
3476 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3477 * @hba: per adapter instance
3478 *
3479 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3480 * in order to initialize the Unipro link startup procedure.
3481 * Once the Unipro links are up, the device connected to the controller
3482 * is detected.
3483 *
3484 * Returns 0 on success, non-zero value on failure
3485 */
3486static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3487{
3488 struct uic_command uic_cmd = {0};
3489 int ret;
3490
3491 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3492
3493 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3494 if (ret)
3495 dev_dbg(hba->dev,
3496 "dme-link-startup: error code %d\n", ret);
3497 return ret;
3498}
3499/**
3500 * ufshcd_dme_reset - UIC command for DME_RESET
3501 * @hba: per adapter instance
3502 *
3503 * DME_RESET command is issued in order to reset UniPro stack.
3504 * This function now deal with cold reset.
3505 *
3506 * Returns 0 on success, non-zero value on failure
3507 */
3508static int ufshcd_dme_reset(struct ufs_hba *hba)
3509{
3510 struct uic_command uic_cmd = {0};
3511 int ret;
3512
3513 uic_cmd.command = UIC_CMD_DME_RESET;
3514
3515 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3516 if (ret)
3517 dev_err(hba->dev,
3518 "dme-reset: error code %d\n", ret);
3519
3520 return ret;
3521}
3522
3523/**
3524 * ufshcd_dme_enable - UIC command for DME_ENABLE
3525 * @hba: per adapter instance
3526 *
3527 * DME_ENABLE command is issued in order to enable UniPro stack.
3528 *
3529 * Returns 0 on success, non-zero value on failure
3530 */
3531static int ufshcd_dme_enable(struct ufs_hba *hba)
3532{
3533 struct uic_command uic_cmd = {0};
3534 int ret;
3535
3536 uic_cmd.command = UIC_CMD_DME_ENABLE;
3537
3538 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3539 if (ret)
3540 dev_err(hba->dev,
3541 "dme-reset: error code %d\n", ret);
3542
3543 return ret;
3544}
3545
3546static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3547{
3548 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3549 unsigned long min_sleep_time_us;
3550
3551 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3552 return;
3553
3554 /*
3555 * last_dme_cmd_tstamp will be 0 only for 1st call to
3556 * this function
3557 */
3558 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3559 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3560 } else {
3561 unsigned long delta =
3562 (unsigned long) ktime_to_us(
3563 ktime_sub(ktime_get(),
3564 hba->last_dme_cmd_tstamp));
3565
3566 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3567 min_sleep_time_us =
3568 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3569 else
3570 return; /* no more delay required */
3571 }
3572
3573 /* allow sleep for extra 50us if needed */
3574 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3575}
3576
3577/**
3578 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3579 * @hba: per adapter instance
3580 * @attr_sel: uic command argument1
3581 * @attr_set: attribute set type as uic command argument2
3582 * @mib_val: setting value as uic command argument3
3583 * @peer: indicate whether peer or local
3584 *
3585 * Returns 0 on success, non-zero value on failure
3586 */
3587int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3588 u8 attr_set, u32 mib_val, u8 peer)
3589{
3590 struct uic_command uic_cmd = {0};
3591 static const char *const action[] = {
3592 "dme-set",
3593 "dme-peer-set"
3594 };
3595 const char *set = action[!!peer];
3596 int ret;
3597 int retries = UFS_UIC_COMMAND_RETRIES;
3598
3599 uic_cmd.command = peer ?
3600 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3601 uic_cmd.argument1 = attr_sel;
3602 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3603 uic_cmd.argument3 = mib_val;
3604
3605 do {
3606 /* for peer attributes we retry upon failure */
3607 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3608 if (ret)
3609 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3610 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3611 } while (ret && peer && --retries);
3612
3613 if (ret)
3614 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3615 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3616 UFS_UIC_COMMAND_RETRIES - retries);
3617
3618 return ret;
3619}
3620EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3621
3622/**
3623 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3624 * @hba: per adapter instance
3625 * @attr_sel: uic command argument1
3626 * @mib_val: the value of the attribute as returned by the UIC command
3627 * @peer: indicate whether peer or local
3628 *
3629 * Returns 0 on success, non-zero value on failure
3630 */
3631int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3632 u32 *mib_val, u8 peer)
3633{
3634 struct uic_command uic_cmd = {0};
3635 static const char *const action[] = {
3636 "dme-get",
3637 "dme-peer-get"
3638 };
3639 const char *get = action[!!peer];
3640 int ret;
3641 int retries = UFS_UIC_COMMAND_RETRIES;
3642 struct ufs_pa_layer_attr orig_pwr_info;
3643 struct ufs_pa_layer_attr temp_pwr_info;
3644 bool pwr_mode_change = false;
3645
3646 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3647 orig_pwr_info = hba->pwr_info;
3648 temp_pwr_info = orig_pwr_info;
3649
3650 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3651 orig_pwr_info.pwr_rx == FAST_MODE) {
3652 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3653 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3654 pwr_mode_change = true;
3655 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3656 orig_pwr_info.pwr_rx == SLOW_MODE) {
3657 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3658 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3659 pwr_mode_change = true;
3660 }
3661 if (pwr_mode_change) {
3662 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3663 if (ret)
3664 goto out;
3665 }
3666 }
3667
3668 uic_cmd.command = peer ?
3669 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3670 uic_cmd.argument1 = attr_sel;
3671
3672 do {
3673 /* for peer attributes we retry upon failure */
3674 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3675 if (ret)
3676 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3677 get, UIC_GET_ATTR_ID(attr_sel), ret);
3678 } while (ret && peer && --retries);
3679
3680 if (ret)
3681 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3682 get, UIC_GET_ATTR_ID(attr_sel),
3683 UFS_UIC_COMMAND_RETRIES - retries);
3684
3685 if (mib_val && !ret)
3686 *mib_val = uic_cmd.argument3;
3687
3688 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3689 && pwr_mode_change)
3690 ufshcd_change_power_mode(hba, &orig_pwr_info);
3691out:
3692 return ret;
3693}
3694EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3695
3696/**
3697 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3698 * state) and waits for it to take effect.
3699 *
3700 * @hba: per adapter instance
3701 * @cmd: UIC command to execute
3702 *
3703 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3704 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3705 * and device UniPro link and hence it's final completion would be indicated by
3706 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3707 * addition to normal UIC command completion Status (UCCS). This function only
3708 * returns after the relevant status bits indicate the completion.
3709 *
3710 * Returns 0 on success, non-zero value on failure
3711 */
3712static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3713{
3714 struct completion uic_async_done;
3715 unsigned long flags;
3716 u8 status;
3717 int ret;
3718 bool reenable_intr = false;
3719
3720 mutex_lock(&hba->uic_cmd_mutex);
3721 init_completion(&uic_async_done);
3722 ufshcd_add_delay_before_dme_cmd(hba);
3723
3724 spin_lock_irqsave(hba->host->host_lock, flags);
3725 hba->uic_async_done = &uic_async_done;
3726 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3727 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3728 /*
3729 * Make sure UIC command completion interrupt is disabled before
3730 * issuing UIC command.
3731 */
3732 wmb();
3733 reenable_intr = true;
3734 }
3735 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3736 spin_unlock_irqrestore(hba->host->host_lock, flags);
3737 if (ret) {
3738 dev_err(hba->dev,
3739 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3740 cmd->command, cmd->argument3, ret);
3741 goto out;
3742 }
3743
3744 if (!wait_for_completion_timeout(hba->uic_async_done,
3745 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3746 dev_err(hba->dev,
3747 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3748 cmd->command, cmd->argument3);
3749 ret = -ETIMEDOUT;
3750 goto out;
3751 }
3752
3753 status = ufshcd_get_upmcrs(hba);
3754 if (status != PWR_LOCAL) {
3755 dev_err(hba->dev,
3756 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3757 cmd->command, status);
3758 ret = (status != PWR_OK) ? status : -1;
3759 }
3760out:
3761 if (ret) {
3762 ufshcd_print_host_state(hba);
3763 ufshcd_print_pwr_info(hba);
3764 ufshcd_print_host_regs(hba);
3765 }
3766
3767 spin_lock_irqsave(hba->host->host_lock, flags);
3768 hba->active_uic_cmd = NULL;
3769 hba->uic_async_done = NULL;
3770 if (reenable_intr)
3771 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3772 spin_unlock_irqrestore(hba->host->host_lock, flags);
3773 mutex_unlock(&hba->uic_cmd_mutex);
3774
3775 return ret;
3776}
3777
3778/**
3779 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3780 * using DME_SET primitives.
3781 * @hba: per adapter instance
3782 * @mode: powr mode value
3783 *
3784 * Returns 0 on success, non-zero value on failure
3785 */
3786static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3787{
3788 struct uic_command uic_cmd = {0};
3789 int ret;
3790
3791 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3792 ret = ufshcd_dme_set(hba,
3793 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3794 if (ret) {
3795 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3796 __func__, ret);
3797 goto out;
3798 }
3799 }
3800
3801 uic_cmd.command = UIC_CMD_DME_SET;
3802 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3803 uic_cmd.argument3 = mode;
3804 ufshcd_hold(hba, false);
3805 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3806 ufshcd_release(hba);
3807
3808out:
3809 return ret;
3810}
3811
3812static int ufshcd_link_recovery(struct ufs_hba *hba)
3813{
3814 int ret;
3815 unsigned long flags;
3816
3817 spin_lock_irqsave(hba->host->host_lock, flags);
3818 hba->ufshcd_state = UFSHCD_STATE_RESET;
3819 ufshcd_set_eh_in_progress(hba);
3820 spin_unlock_irqrestore(hba->host->host_lock, flags);
3821
3822 ret = ufshcd_host_reset_and_restore(hba);
3823
3824 spin_lock_irqsave(hba->host->host_lock, flags);
3825 if (ret)
3826 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3827 ufshcd_clear_eh_in_progress(hba);
3828 spin_unlock_irqrestore(hba->host->host_lock, flags);
3829
3830 if (ret)
3831 dev_err(hba->dev, "%s: link recovery failed, err %d",
3832 __func__, ret);
3833
3834 return ret;
3835}
3836
3837static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3838{
3839 int ret;
3840 struct uic_command uic_cmd = {0};
3841 ktime_t start = ktime_get();
3842
3843 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3844
3845 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3846 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3847 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3848 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3849
3850 if (ret) {
3851 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3852 __func__, ret);
3853
3854 /*
3855 * If link recovery fails then return error so that caller
3856 * don't retry the hibern8 enter again.
3857 */
3858 if (ufshcd_link_recovery(hba))
3859 ret = -ENOLINK;
3860 } else
3861 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3862 POST_CHANGE);
3863
3864 return ret;
3865}
3866
3867static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3868{
3869 int ret = 0, retries;
3870
3871 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3872 ret = __ufshcd_uic_hibern8_enter(hba);
3873 if (!ret || ret == -ENOLINK)
3874 goto out;
3875 }
3876out:
3877 return ret;
3878}
3879
3880static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3881{
3882 struct uic_command uic_cmd = {0};
3883 int ret;
3884 ktime_t start = ktime_get();
3885
3886 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3887
3888 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3889 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3890 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3891 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3892
3893 if (ret) {
3894 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3895 __func__, ret);
3896 ret = ufshcd_link_recovery(hba);
3897 } else {
3898 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3899 POST_CHANGE);
3900 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3901 hba->ufs_stats.hibern8_exit_cnt++;
3902 }
3903
3904 return ret;
3905}
3906
3907static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
3908{
3909 unsigned long flags;
3910
3911 if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
3912 return;
3913
3914 spin_lock_irqsave(hba->host->host_lock, flags);
3915 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3916 spin_unlock_irqrestore(hba->host->host_lock, flags);
3917}
3918
3919 /**
3920 * ufshcd_init_pwr_info - setting the POR (power on reset)
3921 * values in hba power info
3922 * @hba: per-adapter instance
3923 */
3924static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3925{
3926 hba->pwr_info.gear_rx = UFS_PWM_G1;
3927 hba->pwr_info.gear_tx = UFS_PWM_G1;
3928 hba->pwr_info.lane_rx = 1;
3929 hba->pwr_info.lane_tx = 1;
3930 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3931 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3932 hba->pwr_info.hs_rate = 0;
3933}
3934
3935/**
3936 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3937 * @hba: per-adapter instance
3938 */
3939static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3940{
3941 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3942
3943 if (hba->max_pwr_info.is_valid)
3944 return 0;
3945
3946 pwr_info->pwr_tx = FAST_MODE;
3947 pwr_info->pwr_rx = FAST_MODE;
3948 pwr_info->hs_rate = PA_HS_MODE_B;
3949
3950 /* Get the connected lane count */
3951 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3952 &pwr_info->lane_rx);
3953 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3954 &pwr_info->lane_tx);
3955
3956 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3957 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3958 __func__,
3959 pwr_info->lane_rx,
3960 pwr_info->lane_tx);
3961 return -EINVAL;
3962 }
3963
3964 /*
3965 * First, get the maximum gears of HS speed.
3966 * If a zero value, it means there is no HSGEAR capability.
3967 * Then, get the maximum gears of PWM speed.
3968 */
3969 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3970 if (!pwr_info->gear_rx) {
3971 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3972 &pwr_info->gear_rx);
3973 if (!pwr_info->gear_rx) {
3974 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3975 __func__, pwr_info->gear_rx);
3976 return -EINVAL;
3977 }
3978 pwr_info->pwr_rx = SLOW_MODE;
3979 }
3980
3981 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3982 &pwr_info->gear_tx);
3983 if (!pwr_info->gear_tx) {
3984 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3985 &pwr_info->gear_tx);
3986 if (!pwr_info->gear_tx) {
3987 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3988 __func__, pwr_info->gear_tx);
3989 return -EINVAL;
3990 }
3991 pwr_info->pwr_tx = SLOW_MODE;
3992 }
3993
3994 hba->max_pwr_info.is_valid = true;
3995 return 0;
3996}
3997
3998static int ufshcd_change_power_mode(struct ufs_hba *hba,
3999 struct ufs_pa_layer_attr *pwr_mode)
4000{
4001 int ret;
4002
4003 /* if already configured to the requested pwr_mode */
4004 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4005 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4006 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4007 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4008 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4009 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4010 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4011 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4012 return 0;
4013 }
4014
4015 /*
4016 * Configure attributes for power mode change with below.
4017 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4018 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4019 * - PA_HSSERIES
4020 */
4021 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4022 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4023 pwr_mode->lane_rx);
4024 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4025 pwr_mode->pwr_rx == FAST_MODE)
4026 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4027 else
4028 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4029
4030 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4031 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4032 pwr_mode->lane_tx);
4033 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4034 pwr_mode->pwr_tx == FAST_MODE)
4035 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4036 else
4037 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4038
4039 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4040 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4041 pwr_mode->pwr_rx == FAST_MODE ||
4042 pwr_mode->pwr_tx == FAST_MODE)
4043 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4044 pwr_mode->hs_rate);
4045
4046 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4047 | pwr_mode->pwr_tx);
4048
4049 if (ret) {
4050 dev_err(hba->dev,
4051 "%s: power mode change failed %d\n", __func__, ret);
4052 } else {
4053 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4054 pwr_mode);
4055
4056 memcpy(&hba->pwr_info, pwr_mode,
4057 sizeof(struct ufs_pa_layer_attr));
4058 }
4059
4060 return ret;
4061}
4062
4063/**
4064 * ufshcd_config_pwr_mode - configure a new power mode
4065 * @hba: per-adapter instance
4066 * @desired_pwr_mode: desired power configuration
4067 */
4068int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4069 struct ufs_pa_layer_attr *desired_pwr_mode)
4070{
4071 struct ufs_pa_layer_attr final_params = { 0 };
4072 int ret;
4073
4074 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4075 desired_pwr_mode, &final_params);
4076
4077 if (ret)
4078 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4079
4080 ret = ufshcd_change_power_mode(hba, &final_params);
4081 if (!ret)
4082 ufshcd_print_pwr_info(hba);
4083
4084 return ret;
4085}
4086EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4087
4088/**
4089 * ufshcd_complete_dev_init() - checks device readiness
4090 * @hba: per-adapter instance
4091 *
4092 * Set fDeviceInit flag and poll until device toggles it.
4093 */
4094static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4095{
4096 int i;
4097 int err;
4098 bool flag_res = 1;
4099
4100 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4101 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4102 if (err) {
4103 dev_err(hba->dev,
4104 "%s setting fDeviceInit flag failed with error %d\n",
4105 __func__, err);
4106 goto out;
4107 }
4108
4109 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4110 for (i = 0; i < 1000 && !err && flag_res; i++)
4111 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4112 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4113
4114 if (err)
4115 dev_err(hba->dev,
4116 "%s reading fDeviceInit flag failed with error %d\n",
4117 __func__, err);
4118 else if (flag_res)
4119 dev_err(hba->dev,
4120 "%s fDeviceInit was not cleared by the device\n",
4121 __func__);
4122
4123out:
4124 return err;
4125}
4126
4127/**
4128 * ufshcd_make_hba_operational - Make UFS controller operational
4129 * @hba: per adapter instance
4130 *
4131 * To bring UFS host controller to operational state,
4132 * 1. Enable required interrupts
4133 * 2. Configure interrupt aggregation
4134 * 3. Program UTRL and UTMRL base address
4135 * 4. Configure run-stop-registers
4136 *
4137 * Returns 0 on success, non-zero value on failure
4138 */
4139static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4140{
4141 int err = 0;
4142 u32 reg;
4143
4144 /* Enable required interrupts */
4145 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4146
4147 /* Configure interrupt aggregation */
4148 if (ufshcd_is_intr_aggr_allowed(hba))
4149 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4150 else
4151 ufshcd_disable_intr_aggr(hba);
4152
4153 /* Configure UTRL and UTMRL base address registers */
4154 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4155 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4156 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4157 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4158 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4159 REG_UTP_TASK_REQ_LIST_BASE_L);
4160 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4161 REG_UTP_TASK_REQ_LIST_BASE_H);
4162
4163 /*
4164 * Make sure base address and interrupt setup are updated before
4165 * enabling the run/stop registers below.
4166 */
4167 wmb();
4168
4169 /*
4170 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4171 */
4172 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4173 if (!(ufshcd_get_lists_status(reg))) {
4174 ufshcd_enable_run_stop_reg(hba);
4175 } else {
4176 dev_err(hba->dev,
4177 "Host controller not ready to process requests");
4178 err = -EIO;
4179 goto out;
4180 }
4181
4182out:
4183 return err;
4184}
4185
4186/**
4187 * ufshcd_hba_stop - Send controller to reset state
4188 * @hba: per adapter instance
4189 * @can_sleep: perform sleep or just spin
4190 */
4191static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4192{
4193 int err;
4194
4195 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4196 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4197 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4198 10, 1, can_sleep);
4199 if (err)
4200 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4201}
4202
4203/**
4204 * ufshcd_hba_execute_hce - initialize the controller
4205 * @hba: per adapter instance
4206 *
4207 * The controller resets itself and controller firmware initialization
4208 * sequence kicks off. When controller is ready it will set
4209 * the Host Controller Enable bit to 1.
4210 *
4211 * Returns 0 on success, non-zero value on failure
4212 */
4213static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4214{
4215 int retry;
4216
4217 /*
4218 * msleep of 1 and 5 used in this function might result in msleep(20),
4219 * but it was necessary to send the UFS FPGA to reset mode during
4220 * development and testing of this driver. msleep can be changed to
4221 * mdelay and retry count can be reduced based on the controller.
4222 */
4223 if (!ufshcd_is_hba_active(hba))
4224 /* change controller state to "reset state" */
4225 ufshcd_hba_stop(hba, true);
4226
4227 /* UniPro link is disabled at this point */
4228 ufshcd_set_link_off(hba);
4229
4230 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4231
4232 /* start controller initialization sequence */
4233 ufshcd_hba_start(hba);
4234
4235 /*
4236 * To initialize a UFS host controller HCE bit must be set to 1.
4237 * During initialization the HCE bit value changes from 1->0->1.
4238 * When the host controller completes initialization sequence
4239 * it sets the value of HCE bit to 1. The same HCE bit is read back
4240 * to check if the controller has completed initialization sequence.
4241 * So without this delay the value HCE = 1, set in the previous
4242 * instruction might be read back.
4243 * This delay can be changed based on the controller.
4244 */
4245 msleep(1);
4246
4247 /* wait for the host controller to complete initialization */
4248 retry = 10;
4249 while (ufshcd_is_hba_active(hba)) {
4250 if (retry) {
4251 retry--;
4252 } else {
4253 dev_err(hba->dev,
4254 "Controller enable failed\n");
4255 return -EIO;
4256 }
4257 msleep(5);
4258 }
4259
4260 /* enable UIC related interrupts */
4261 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4262
4263 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4264
4265 return 0;
4266}
4267
4268static int ufshcd_hba_enable(struct ufs_hba *hba)
4269{
4270 int ret;
4271
4272 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4273 ufshcd_set_link_off(hba);
4274 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4275
4276 /* enable UIC related interrupts */
4277 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4278 ret = ufshcd_dme_reset(hba);
4279 if (!ret) {
4280 ret = ufshcd_dme_enable(hba);
4281 if (!ret)
4282 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4283 if (ret)
4284 dev_err(hba->dev,
4285 "Host controller enable failed with non-hce\n");
4286 }
4287 } else {
4288 ret = ufshcd_hba_execute_hce(hba);
4289 }
4290
4291 return ret;
4292}
4293static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4294{
4295 int tx_lanes, i, err = 0;
4296
4297 if (!peer)
4298 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4299 &tx_lanes);
4300 else
4301 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4302 &tx_lanes);
4303 for (i = 0; i < tx_lanes; i++) {
4304 if (!peer)
4305 err = ufshcd_dme_set(hba,
4306 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4307 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4308 0);
4309 else
4310 err = ufshcd_dme_peer_set(hba,
4311 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4312 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4313 0);
4314 if (err) {
4315 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4316 __func__, peer, i, err);
4317 break;
4318 }
4319 }
4320
4321 return err;
4322}
4323
4324static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4325{
4326 return ufshcd_disable_tx_lcc(hba, true);
4327}
4328
4329/**
4330 * ufshcd_link_startup - Initialize unipro link startup
4331 * @hba: per adapter instance
4332 *
4333 * Returns 0 for success, non-zero in case of failure
4334 */
4335static int ufshcd_link_startup(struct ufs_hba *hba)
4336{
4337 int ret;
4338 int retries = DME_LINKSTARTUP_RETRIES;
4339 bool link_startup_again = false;
4340
4341 /*
4342 * If UFS device isn't active then we will have to issue link startup
4343 * 2 times to make sure the device state move to active.
4344 */
4345 if (!ufshcd_is_ufs_dev_active(hba))
4346 link_startup_again = true;
4347
4348link_startup:
4349 do {
4350 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4351
4352 ret = ufshcd_dme_link_startup(hba);
4353
4354 /* check if device is detected by inter-connect layer */
4355 if (!ret && !ufshcd_is_device_present(hba)) {
4356 dev_err(hba->dev, "%s: Device not present\n", __func__);
4357 ret = -ENXIO;
4358 goto out;
4359 }
4360
4361 /*
4362 * DME link lost indication is only received when link is up,
4363 * but we can't be sure if the link is up until link startup
4364 * succeeds. So reset the local Uni-Pro and try again.
4365 */
4366 if (ret && ufshcd_hba_enable(hba))
4367 goto out;
4368 } while (ret && retries--);
4369
4370 if (ret)
4371 /* failed to get the link up... retire */
4372 goto out;
4373
4374 if (link_startup_again) {
4375 link_startup_again = false;
4376 retries = DME_LINKSTARTUP_RETRIES;
4377 goto link_startup;
4378 }
4379
4380 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4381 ufshcd_init_pwr_info(hba);
4382 ufshcd_print_pwr_info(hba);
4383
4384 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4385 ret = ufshcd_disable_device_tx_lcc(hba);
4386 if (ret)
4387 goto out;
4388 }
4389
4390 /* Include any host controller configuration via UIC commands */
4391 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4392 if (ret)
4393 goto out;
4394
4395 ret = ufshcd_make_hba_operational(hba);
4396out:
4397 if (ret) {
4398 dev_err(hba->dev, "link startup failed %d\n", ret);
4399 ufshcd_print_host_state(hba);
4400 ufshcd_print_pwr_info(hba);
4401 ufshcd_print_host_regs(hba);
4402 }
4403 return ret;
4404}
4405
4406/**
4407 * ufshcd_verify_dev_init() - Verify device initialization
4408 * @hba: per-adapter instance
4409 *
4410 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4411 * device Transport Protocol (UTP) layer is ready after a reset.
4412 * If the UTP layer at the device side is not initialized, it may
4413 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4414 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4415 */
4416static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4417{
4418 int err = 0;
4419 int retries;
4420
4421 ufshcd_hold(hba, false);
4422 mutex_lock(&hba->dev_cmd.lock);
4423 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4424 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4425 NOP_OUT_TIMEOUT);
4426
4427 if (!err || err == -ETIMEDOUT)
4428 break;
4429
4430 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4431 }
4432 mutex_unlock(&hba->dev_cmd.lock);
4433 ufshcd_release(hba);
4434
4435 if (err)
4436 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4437 return err;
4438}
4439
4440/**
4441 * ufshcd_set_queue_depth - set lun queue depth
4442 * @sdev: pointer to SCSI device
4443 *
4444 * Read bLUQueueDepth value and activate scsi tagged command
4445 * queueing. For WLUN, queue depth is set to 1. For best-effort
4446 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4447 * value that host can queue.
4448 */
4449static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4450{
4451 int ret = 0;
4452 u8 lun_qdepth;
4453 struct ufs_hba *hba;
4454
4455 hba = shost_priv(sdev->host);
4456
4457 lun_qdepth = hba->nutrs;
4458 ret = ufshcd_read_unit_desc_param(hba,
4459 ufshcd_scsi_to_upiu_lun(sdev->lun),
4460 UNIT_DESC_PARAM_LU_Q_DEPTH,
4461 &lun_qdepth,
4462 sizeof(lun_qdepth));
4463
4464 /* Some WLUN doesn't support unit descriptor */
4465 if (ret == -EOPNOTSUPP)
4466 lun_qdepth = 1;
4467 else if (!lun_qdepth)
4468 /* eventually, we can figure out the real queue depth */
4469 lun_qdepth = hba->nutrs;
4470 else
4471 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4472
4473 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4474 __func__, lun_qdepth);
4475 scsi_change_queue_depth(sdev, lun_qdepth);
4476}
4477
4478/*
4479 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4480 * @hba: per-adapter instance
4481 * @lun: UFS device lun id
4482 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4483 *
4484 * Returns 0 in case of success and b_lu_write_protect status would be returned
4485 * @b_lu_write_protect parameter.
4486 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4487 * Returns -EINVAL in case of invalid parameters passed to this function.
4488 */
4489static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4490 u8 lun,
4491 u8 *b_lu_write_protect)
4492{
4493 int ret;
4494
4495 if (!b_lu_write_protect)
4496 ret = -EINVAL;
4497 /*
4498 * According to UFS device spec, RPMB LU can't be write
4499 * protected so skip reading bLUWriteProtect parameter for
4500 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4501 */
4502 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4503 ret = -ENOTSUPP;
4504 else
4505 ret = ufshcd_read_unit_desc_param(hba,
4506 lun,
4507 UNIT_DESC_PARAM_LU_WR_PROTECT,
4508 b_lu_write_protect,
4509 sizeof(*b_lu_write_protect));
4510 return ret;
4511}
4512
4513/**
4514 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4515 * status
4516 * @hba: per-adapter instance
4517 * @sdev: pointer to SCSI device
4518 *
4519 */
4520static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4521 struct scsi_device *sdev)
4522{
4523 if (hba->dev_info.f_power_on_wp_en &&
4524 !hba->dev_info.is_lu_power_on_wp) {
4525 u8 b_lu_write_protect;
4526
4527 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4528 &b_lu_write_protect) &&
4529 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4530 hba->dev_info.is_lu_power_on_wp = true;
4531 }
4532}
4533
4534/**
4535 * ufshcd_slave_alloc - handle initial SCSI device configurations
4536 * @sdev: pointer to SCSI device
4537 *
4538 * Returns success
4539 */
4540static int ufshcd_slave_alloc(struct scsi_device *sdev)
4541{
4542 struct ufs_hba *hba;
4543
4544 hba = shost_priv(sdev->host);
4545
4546 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4547 sdev->use_10_for_ms = 1;
4548
4549 /* allow SCSI layer to restart the device in case of errors */
4550 sdev->allow_restart = 1;
4551
4552 /* REPORT SUPPORTED OPERATION CODES is not supported */
4553 sdev->no_report_opcodes = 1;
4554
4555 /* WRITE_SAME command is not supported */
4556 sdev->no_write_same = 1;
4557
4558 ufshcd_set_queue_depth(sdev);
4559
4560 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4561
4562 return 0;
4563}
4564
4565/**
4566 * ufshcd_change_queue_depth - change queue depth
4567 * @sdev: pointer to SCSI device
4568 * @depth: required depth to set
4569 *
4570 * Change queue depth and make sure the max. limits are not crossed.
4571 */
4572static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4573{
4574 struct ufs_hba *hba = shost_priv(sdev->host);
4575
4576 if (depth > hba->nutrs)
4577 depth = hba->nutrs;
4578 return scsi_change_queue_depth(sdev, depth);
4579}
4580
4581/**
4582 * ufshcd_slave_configure - adjust SCSI device configurations
4583 * @sdev: pointer to SCSI device
4584 */
4585static int ufshcd_slave_configure(struct scsi_device *sdev)
4586{
4587 struct request_queue *q = sdev->request_queue;
4588
4589 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4590 return 0;
4591}
4592
4593/**
4594 * ufshcd_slave_destroy - remove SCSI device configurations
4595 * @sdev: pointer to SCSI device
4596 */
4597static void ufshcd_slave_destroy(struct scsi_device *sdev)
4598{
4599 struct ufs_hba *hba;
4600
4601 hba = shost_priv(sdev->host);
4602 /* Drop the reference as it won't be needed anymore */
4603 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4604 unsigned long flags;
4605
4606 spin_lock_irqsave(hba->host->host_lock, flags);
4607 hba->sdev_ufs_device = NULL;
4608 spin_unlock_irqrestore(hba->host->host_lock, flags);
4609 }
4610}
4611
4612/**
4613 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4614 * @lrbp: pointer to local reference block of completed command
4615 * @scsi_status: SCSI command status
4616 *
4617 * Returns value base on SCSI command status
4618 */
4619static inline int
4620ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4621{
4622 int result = 0;
4623
4624 switch (scsi_status) {
4625 case SAM_STAT_CHECK_CONDITION:
4626 ufshcd_copy_sense_data(lrbp);
4627 /* fallthrough */
4628 case SAM_STAT_GOOD:
4629 result |= DID_OK << 16 |
4630 COMMAND_COMPLETE << 8 |
4631 scsi_status;
4632 break;
4633 case SAM_STAT_TASK_SET_FULL:
4634 case SAM_STAT_BUSY:
4635 case SAM_STAT_TASK_ABORTED:
4636 ufshcd_copy_sense_data(lrbp);
4637 result |= scsi_status;
4638 break;
4639 default:
4640 result |= DID_ERROR << 16;
4641 break;
4642 } /* end of switch */
4643
4644 return result;
4645}
4646
4647/**
4648 * ufshcd_transfer_rsp_status - Get overall status of the response
4649 * @hba: per adapter instance
4650 * @lrbp: pointer to local reference block of completed command
4651 *
4652 * Returns result of the command to notify SCSI midlayer
4653 */
4654static inline int
4655ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4656{
4657 int result = 0;
4658 int scsi_status;
4659 int ocs;
4660
4661 /* overall command status of utrd */
4662 ocs = ufshcd_get_tr_ocs(lrbp);
4663
4664 switch (ocs) {
4665 case OCS_SUCCESS:
4666 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4667 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4668 switch (result) {
4669 case UPIU_TRANSACTION_RESPONSE:
4670 /*
4671 * get the response UPIU result to extract
4672 * the SCSI command status
4673 */
4674 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4675
4676 /*
4677 * get the result based on SCSI status response
4678 * to notify the SCSI midlayer of the command status
4679 */
4680 scsi_status = result & MASK_SCSI_STATUS;
4681 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4682
4683 /*
4684 * Currently we are only supporting BKOPs exception
4685 * events hence we can ignore BKOPs exception event
4686 * during power management callbacks. BKOPs exception
4687 * event is not expected to be raised in runtime suspend
4688 * callback as it allows the urgent bkops.
4689 * During system suspend, we are anyway forcefully
4690 * disabling the bkops and if urgent bkops is needed
4691 * it will be enabled on system resume. Long term
4692 * solution could be to abort the system suspend if
4693 * UFS device needs urgent BKOPs.
4694 */
4695 if (!hba->pm_op_in_progress &&
4696 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4697 schedule_work(&hba->eeh_work);
4698 break;
4699 case UPIU_TRANSACTION_REJECT_UPIU:
4700 /* TODO: handle Reject UPIU Response */
4701 result = DID_ERROR << 16;
4702 dev_err(hba->dev,
4703 "Reject UPIU not fully implemented\n");
4704 break;
4705 default:
4706 dev_err(hba->dev,
4707 "Unexpected request response code = %x\n",
4708 result);
4709 result = DID_ERROR << 16;
4710 break;
4711 }
4712 break;
4713 case OCS_ABORTED:
4714 result |= DID_ABORT << 16;
4715 break;
4716 case OCS_INVALID_COMMAND_STATUS:
4717 result |= DID_REQUEUE << 16;
4718 break;
4719 case OCS_INVALID_CMD_TABLE_ATTR:
4720 case OCS_INVALID_PRDT_ATTR:
4721 case OCS_MISMATCH_DATA_BUF_SIZE:
4722 case OCS_MISMATCH_RESP_UPIU_SIZE:
4723 case OCS_PEER_COMM_FAILURE:
4724 case OCS_FATAL_ERROR:
4725 default:
4726 result |= DID_ERROR << 16;
4727 dev_err(hba->dev,
4728 "OCS error from controller = %x for tag %d\n",
4729 ocs, lrbp->task_tag);
4730 ufshcd_print_host_regs(hba);
4731 ufshcd_print_host_state(hba);
4732 break;
4733 } /* end of switch */
4734
4735 if (host_byte(result) != DID_OK)
4736 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4737 return result;
4738}
4739
4740/**
4741 * ufshcd_uic_cmd_compl - handle completion of uic command
4742 * @hba: per adapter instance
4743 * @intr_status: interrupt status generated by the controller
4744 */
4745static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4746{
4747 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4748 hba->active_uic_cmd->argument2 |=
4749 ufshcd_get_uic_cmd_result(hba);
4750 hba->active_uic_cmd->argument3 =
4751 ufshcd_get_dme_attr_val(hba);
4752 complete(&hba->active_uic_cmd->done);
4753 }
4754
4755 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4756 complete(hba->uic_async_done);
4757}
4758
4759/**
4760 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4761 * @hba: per adapter instance
4762 * @completed_reqs: requests to complete
4763 */
4764static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4765 unsigned long completed_reqs)
4766{
4767 struct ufshcd_lrb *lrbp;
4768 struct scsi_cmnd *cmd;
4769 int result;
4770 int index;
4771
4772 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4773 lrbp = &hba->lrb[index];
4774 cmd = lrbp->cmd;
4775 if (cmd) {
4776 ufshcd_add_command_trace(hba, index, "complete");
4777 result = ufshcd_transfer_rsp_status(hba, lrbp);
4778 scsi_dma_unmap(cmd);
4779 cmd->result = result;
4780 /* Mark completed command as NULL in LRB */
4781 lrbp->cmd = NULL;
4782 clear_bit_unlock(index, &hba->lrb_in_use);
4783 /* Do not touch lrbp after scsi done */
4784 cmd->scsi_done(cmd);
4785 __ufshcd_release(hba);
4786 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4787 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4788 if (hba->dev_cmd.complete) {
4789 ufshcd_add_command_trace(hba, index,
4790 "dev_complete");
4791 complete(hba->dev_cmd.complete);
4792 }
4793 }
4794 if (ufshcd_is_clkscaling_supported(hba))
4795 hba->clk_scaling.active_reqs--;
4796
4797 lrbp->compl_time_stamp = ktime_get();
4798 }
4799
4800 /* clear corresponding bits of completed commands */
4801 hba->outstanding_reqs ^= completed_reqs;
4802
4803 ufshcd_clk_scaling_update_busy(hba);
4804
4805 /* we might have free'd some tags above */
4806 wake_up(&hba->dev_cmd.tag_wq);
4807}
4808
4809/**
4810 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4811 * @hba: per adapter instance
4812 */
4813static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4814{
4815 unsigned long completed_reqs;
4816 u32 tr_doorbell;
4817
4818 /* Resetting interrupt aggregation counters first and reading the
4819 * DOOR_BELL afterward allows us to handle all the completed requests.
4820 * In order to prevent other interrupts starvation the DB is read once
4821 * after reset. The down side of this solution is the possibility of
4822 * false interrupt if device completes another request after resetting
4823 * aggregation and before reading the DB.
4824 */
4825 if (ufshcd_is_intr_aggr_allowed(hba) &&
4826 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
4827 ufshcd_reset_intr_aggr(hba);
4828
4829 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4830 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4831
4832 __ufshcd_transfer_req_compl(hba, completed_reqs);
4833}
4834
4835/**
4836 * ufshcd_disable_ee - disable exception event
4837 * @hba: per-adapter instance
4838 * @mask: exception event to disable
4839 *
4840 * Disables exception event in the device so that the EVENT_ALERT
4841 * bit is not set.
4842 *
4843 * Returns zero on success, non-zero error value on failure.
4844 */
4845static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4846{
4847 int err = 0;
4848 u32 val;
4849
4850 if (!(hba->ee_ctrl_mask & mask))
4851 goto out;
4852
4853 val = hba->ee_ctrl_mask & ~mask;
4854 val &= MASK_EE_STATUS;
4855 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4856 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4857 if (!err)
4858 hba->ee_ctrl_mask &= ~mask;
4859out:
4860 return err;
4861}
4862
4863/**
4864 * ufshcd_enable_ee - enable exception event
4865 * @hba: per-adapter instance
4866 * @mask: exception event to enable
4867 *
4868 * Enable corresponding exception event in the device to allow
4869 * device to alert host in critical scenarios.
4870 *
4871 * Returns zero on success, non-zero error value on failure.
4872 */
4873static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4874{
4875 int err = 0;
4876 u32 val;
4877
4878 if (hba->ee_ctrl_mask & mask)
4879 goto out;
4880
4881 val = hba->ee_ctrl_mask | mask;
4882 val &= MASK_EE_STATUS;
4883 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4884 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4885 if (!err)
4886 hba->ee_ctrl_mask |= mask;
4887out:
4888 return err;
4889}
4890
4891/**
4892 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4893 * @hba: per-adapter instance
4894 *
4895 * Allow device to manage background operations on its own. Enabling
4896 * this might lead to inconsistent latencies during normal data transfers
4897 * as the device is allowed to manage its own way of handling background
4898 * operations.
4899 *
4900 * Returns zero on success, non-zero on failure.
4901 */
4902static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4903{
4904 int err = 0;
4905
4906 if (hba->auto_bkops_enabled)
4907 goto out;
4908
4909 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4910 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4911 if (err) {
4912 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4913 __func__, err);
4914 goto out;
4915 }
4916
4917 hba->auto_bkops_enabled = true;
4918 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
4919
4920 /* No need of URGENT_BKOPS exception from the device */
4921 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4922 if (err)
4923 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4924 __func__, err);
4925out:
4926 return err;
4927}
4928
4929/**
4930 * ufshcd_disable_auto_bkops - block device in doing background operations
4931 * @hba: per-adapter instance
4932 *
4933 * Disabling background operations improves command response latency but
4934 * has drawback of device moving into critical state where the device is
4935 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4936 * host is idle so that BKOPS are managed effectively without any negative
4937 * impacts.
4938 *
4939 * Returns zero on success, non-zero on failure.
4940 */
4941static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4942{
4943 int err = 0;
4944
4945 if (!hba->auto_bkops_enabled)
4946 goto out;
4947
4948 /*
4949 * If host assisted BKOPs is to be enabled, make sure
4950 * urgent bkops exception is allowed.
4951 */
4952 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4953 if (err) {
4954 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4955 __func__, err);
4956 goto out;
4957 }
4958
4959 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
4960 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4961 if (err) {
4962 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
4963 __func__, err);
4964 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4965 goto out;
4966 }
4967
4968 hba->auto_bkops_enabled = false;
4969 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
4970out:
4971 return err;
4972}
4973
4974/**
4975 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
4976 * @hba: per adapter instance
4977 *
4978 * After a device reset the device may toggle the BKOPS_EN flag
4979 * to default value. The s/w tracking variables should be updated
4980 * as well. This function would change the auto-bkops state based on
4981 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
4982 */
4983static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
4984{
4985 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
4986 hba->auto_bkops_enabled = false;
4987 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
4988 ufshcd_enable_auto_bkops(hba);
4989 } else {
4990 hba->auto_bkops_enabled = true;
4991 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
4992 ufshcd_disable_auto_bkops(hba);
4993 }
4994}
4995
4996static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
4997{
4998 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
4999 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5000}
5001
5002/**
5003 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5004 * @hba: per-adapter instance
5005 * @status: bkops_status value
5006 *
5007 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5008 * flag in the device to permit background operations if the device
5009 * bkops_status is greater than or equal to "status" argument passed to
5010 * this function, disable otherwise.
5011 *
5012 * Returns 0 for success, non-zero in case of failure.
5013 *
5014 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5015 * to know whether auto bkops is enabled or disabled after this function
5016 * returns control to it.
5017 */
5018static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5019 enum bkops_status status)
5020{
5021 int err;
5022 u32 curr_status = 0;
5023
5024 err = ufshcd_get_bkops_status(hba, &curr_status);
5025 if (err) {
5026 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5027 __func__, err);
5028 goto out;
5029 } else if (curr_status > BKOPS_STATUS_MAX) {
5030 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5031 __func__, curr_status);
5032 err = -EINVAL;
5033 goto out;
5034 }
5035
5036 if (curr_status >= status)
5037 err = ufshcd_enable_auto_bkops(hba);
5038 else
5039 err = ufshcd_disable_auto_bkops(hba);
5040out:
5041 return err;
5042}
5043
5044/**
5045 * ufshcd_urgent_bkops - handle urgent bkops exception event
5046 * @hba: per-adapter instance
5047 *
5048 * Enable fBackgroundOpsEn flag in the device to permit background
5049 * operations.
5050 *
5051 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5052 * and negative error value for any other failure.
5053 */
5054static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5055{
5056 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5057}
5058
5059static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5060{
5061 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5062 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5063}
5064
5065static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5066{
5067 int err;
5068 u32 curr_status = 0;
5069
5070 if (hba->is_urgent_bkops_lvl_checked)
5071 goto enable_auto_bkops;
5072
5073 err = ufshcd_get_bkops_status(hba, &curr_status);
5074 if (err) {
5075 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5076 __func__, err);
5077 goto out;
5078 }
5079
5080 /*
5081 * We are seeing that some devices are raising the urgent bkops
5082 * exception events even when BKOPS status doesn't indicate performace
5083 * impacted or critical. Handle these device by determining their urgent
5084 * bkops status at runtime.
5085 */
5086 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5087 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5088 __func__, curr_status);
5089 /* update the current status as the urgent bkops level */
5090 hba->urgent_bkops_lvl = curr_status;
5091 hba->is_urgent_bkops_lvl_checked = true;
5092 }
5093
5094enable_auto_bkops:
5095 err = ufshcd_enable_auto_bkops(hba);
5096out:
5097 if (err < 0)
5098 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5099 __func__, err);
5100}
5101
5102/**
5103 * ufshcd_exception_event_handler - handle exceptions raised by device
5104 * @work: pointer to work data
5105 *
5106 * Read bExceptionEventStatus attribute from the device and handle the
5107 * exception event accordingly.
5108 */
5109static void ufshcd_exception_event_handler(struct work_struct *work)
5110{
5111 struct ufs_hba *hba;
5112 int err;
5113 u32 status = 0;
5114 hba = container_of(work, struct ufs_hba, eeh_work);
5115
5116 pm_runtime_get_sync(hba->dev);
5117 scsi_block_requests(hba->host);
5118 err = ufshcd_get_ee_status(hba, &status);
5119 if (err) {
5120 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5121 __func__, err);
5122 goto out;
5123 }
5124
5125 status &= hba->ee_ctrl_mask;
5126
5127 if (status & MASK_EE_URGENT_BKOPS)
5128 ufshcd_bkops_exception_event_handler(hba);
5129
5130out:
5131 scsi_unblock_requests(hba->host);
5132 pm_runtime_put_sync(hba->dev);
5133 return;
5134}
5135
5136/* Complete requests that have door-bell cleared */
5137static void ufshcd_complete_requests(struct ufs_hba *hba)
5138{
5139 ufshcd_transfer_req_compl(hba);
5140 ufshcd_tmc_handler(hba);
5141}
5142
5143/**
5144 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5145 * to recover from the DL NAC errors or not.
5146 * @hba: per-adapter instance
5147 *
5148 * Returns true if error handling is required, false otherwise
5149 */
5150static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5151{
5152 unsigned long flags;
5153 bool err_handling = true;
5154
5155 spin_lock_irqsave(hba->host->host_lock, flags);
5156 /*
5157 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5158 * device fatal error and/or DL NAC & REPLAY timeout errors.
5159 */
5160 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5161 goto out;
5162
5163 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5164 ((hba->saved_err & UIC_ERROR) &&
5165 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5166 goto out;
5167
5168 if ((hba->saved_err & UIC_ERROR) &&
5169 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5170 int err;
5171 /*
5172 * wait for 50ms to see if we can get any other errors or not.
5173 */
5174 spin_unlock_irqrestore(hba->host->host_lock, flags);
5175 msleep(50);
5176 spin_lock_irqsave(hba->host->host_lock, flags);
5177
5178 /*
5179 * now check if we have got any other severe errors other than
5180 * DL NAC error?
5181 */
5182 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5183 ((hba->saved_err & UIC_ERROR) &&
5184 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5185 goto out;
5186
5187 /*
5188 * As DL NAC is the only error received so far, send out NOP
5189 * command to confirm if link is still active or not.
5190 * - If we don't get any response then do error recovery.
5191 * - If we get response then clear the DL NAC error bit.
5192 */
5193
5194 spin_unlock_irqrestore(hba->host->host_lock, flags);
5195 err = ufshcd_verify_dev_init(hba);
5196 spin_lock_irqsave(hba->host->host_lock, flags);
5197
5198 if (err)
5199 goto out;
5200
5201 /* Link seems to be alive hence ignore the DL NAC errors */
5202 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5203 hba->saved_err &= ~UIC_ERROR;
5204 /* clear NAC error */
5205 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5206 if (!hba->saved_uic_err) {
5207 err_handling = false;
5208 goto out;
5209 }
5210 }
5211out:
5212 spin_unlock_irqrestore(hba->host->host_lock, flags);
5213 return err_handling;
5214}
5215
5216/**
5217 * ufshcd_err_handler - handle UFS errors that require s/w attention
5218 * @work: pointer to work structure
5219 */
5220static void ufshcd_err_handler(struct work_struct *work)
5221{
5222 struct ufs_hba *hba;
5223 unsigned long flags;
5224 u32 err_xfer = 0;
5225 u32 err_tm = 0;
5226 int err = 0;
5227 int tag;
5228 bool needs_reset = false;
5229
5230 hba = container_of(work, struct ufs_hba, eh_work);
5231
5232 pm_runtime_get_sync(hba->dev);
5233 ufshcd_hold(hba, false);
5234
5235 spin_lock_irqsave(hba->host->host_lock, flags);
5236 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5237 goto out;
5238
5239 hba->ufshcd_state = UFSHCD_STATE_RESET;
5240 ufshcd_set_eh_in_progress(hba);
5241
5242 /* Complete requests that have door-bell cleared by h/w */
5243 ufshcd_complete_requests(hba);
5244
5245 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5246 bool ret;
5247
5248 spin_unlock_irqrestore(hba->host->host_lock, flags);
5249 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5250 ret = ufshcd_quirk_dl_nac_errors(hba);
5251 spin_lock_irqsave(hba->host->host_lock, flags);
5252 if (!ret)
5253 goto skip_err_handling;
5254 }
5255 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5256 (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
5257 ((hba->saved_err & UIC_ERROR) &&
5258 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5259 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5260 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5261 needs_reset = true;
5262
5263 /*
5264 * if host reset is required then skip clearing the pending
5265 * transfers forcefully because they will automatically get
5266 * cleared after link startup.
5267 */
5268 if (needs_reset)
5269 goto skip_pending_xfer_clear;
5270
5271 /* release lock as clear command might sleep */
5272 spin_unlock_irqrestore(hba->host->host_lock, flags);
5273 /* Clear pending transfer requests */
5274 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5275 if (ufshcd_clear_cmd(hba, tag)) {
5276 err_xfer = true;
5277 goto lock_skip_pending_xfer_clear;
5278 }
5279 }
5280
5281 /* Clear pending task management requests */
5282 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5283 if (ufshcd_clear_tm_cmd(hba, tag)) {
5284 err_tm = true;
5285 goto lock_skip_pending_xfer_clear;
5286 }
5287 }
5288
5289lock_skip_pending_xfer_clear:
5290 spin_lock_irqsave(hba->host->host_lock, flags);
5291
5292 /* Complete the requests that are cleared by s/w */
5293 ufshcd_complete_requests(hba);
5294
5295 if (err_xfer || err_tm)
5296 needs_reset = true;
5297
5298skip_pending_xfer_clear:
5299 /* Fatal errors need reset */
5300 if (needs_reset) {
5301 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5302
5303 /*
5304 * ufshcd_reset_and_restore() does the link reinitialization
5305 * which will need atleast one empty doorbell slot to send the
5306 * device management commands (NOP and query commands).
5307 * If there is no slot empty at this moment then free up last
5308 * slot forcefully.
5309 */
5310 if (hba->outstanding_reqs == max_doorbells)
5311 __ufshcd_transfer_req_compl(hba,
5312 (1UL << (hba->nutrs - 1)));
5313
5314 spin_unlock_irqrestore(hba->host->host_lock, flags);
5315 err = ufshcd_reset_and_restore(hba);
5316 spin_lock_irqsave(hba->host->host_lock, flags);
5317 if (err) {
5318 dev_err(hba->dev, "%s: reset and restore failed\n",
5319 __func__);
5320 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5321 }
5322 /*
5323 * Inform scsi mid-layer that we did reset and allow to handle
5324 * Unit Attention properly.
5325 */
5326 scsi_report_bus_reset(hba->host, 0);
5327 hba->saved_err = 0;
5328 hba->saved_uic_err = 0;
5329 }
5330
5331skip_err_handling:
5332 if (!needs_reset) {
5333 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5334 if (hba->saved_err || hba->saved_uic_err)
5335 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5336 __func__, hba->saved_err, hba->saved_uic_err);
5337 }
5338
5339 ufshcd_clear_eh_in_progress(hba);
5340
5341out:
5342 spin_unlock_irqrestore(hba->host->host_lock, flags);
5343 ufshcd_scsi_unblock_requests(hba);
5344 ufshcd_release(hba);
5345 pm_runtime_put_sync(hba->dev);
5346}
5347
5348static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5349 u32 reg)
5350{
5351 reg_hist->reg[reg_hist->pos] = reg;
5352 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5353 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5354}
5355
5356/**
5357 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5358 * @hba: per-adapter instance
5359 */
5360static void ufshcd_update_uic_error(struct ufs_hba *hba)
5361{
5362 u32 reg;
5363
5364 /* PHY layer lane error */
5365 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5366 /* Ignore LINERESET indication, as this is not an error */
5367 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5368 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5369 /*
5370 * To know whether this error is fatal or not, DB timeout
5371 * must be checked but this error is handled separately.
5372 */
5373 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5374 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5375 }
5376
5377 /* PA_INIT_ERROR is fatal and needs UIC reset */
5378 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5379 if (reg)
5380 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5381
5382 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5383 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5384 else if (hba->dev_quirks &
5385 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5386 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5387 hba->uic_error |=
5388 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5389 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5390 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5391 }
5392
5393 /* UIC NL/TL/DME errors needs software retry */
5394 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5395 if (reg) {
5396 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
5397 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5398 }
5399
5400 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5401 if (reg) {
5402 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
5403 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5404 }
5405
5406 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5407 if (reg) {
5408 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
5409 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5410 }
5411
5412 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5413 __func__, hba->uic_error);
5414}
5415
5416static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5417 u32 intr_mask)
5418{
5419 if (!ufshcd_is_auto_hibern8_supported(hba))
5420 return false;
5421
5422 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5423 return false;
5424
5425 if (hba->active_uic_cmd &&
5426 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5427 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5428 return false;
5429
5430 return true;
5431}
5432
5433/**
5434 * ufshcd_check_errors - Check for errors that need s/w attention
5435 * @hba: per-adapter instance
5436 */
5437static void ufshcd_check_errors(struct ufs_hba *hba)
5438{
5439 bool queue_eh_work = false;
5440
5441 if (hba->errors & INT_FATAL_ERRORS)
5442 queue_eh_work = true;
5443
5444 if (hba->errors & UIC_ERROR) {
5445 hba->uic_error = 0;
5446 ufshcd_update_uic_error(hba);
5447 if (hba->uic_error)
5448 queue_eh_work = true;
5449 }
5450
5451 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
5452 dev_err(hba->dev,
5453 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
5454 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
5455 "Enter" : "Exit",
5456 hba->errors, ufshcd_get_upmcrs(hba));
5457 queue_eh_work = true;
5458 }
5459
5460 if (queue_eh_work) {
5461 /*
5462 * update the transfer error masks to sticky bits, let's do this
5463 * irrespective of current ufshcd_state.
5464 */
5465 hba->saved_err |= hba->errors;
5466 hba->saved_uic_err |= hba->uic_error;
5467
5468 /* handle fatal errors only when link is functional */
5469 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5470 /* block commands from scsi mid-layer */
5471 ufshcd_scsi_block_requests(hba);
5472
5473 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5474
5475 /* dump controller state before resetting */
5476 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5477 bool pr_prdt = !!(hba->saved_err &
5478 SYSTEM_BUS_FATAL_ERROR);
5479
5480 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5481 __func__, hba->saved_err,
5482 hba->saved_uic_err);
5483
5484 ufshcd_print_host_regs(hba);
5485 ufshcd_print_pwr_info(hba);
5486 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5487 ufshcd_print_trs(hba, hba->outstanding_reqs,
5488 pr_prdt);
5489 }
5490 schedule_work(&hba->eh_work);
5491 }
5492 }
5493 /*
5494 * if (!queue_eh_work) -
5495 * Other errors are either non-fatal where host recovers
5496 * itself without s/w intervention or errors that will be
5497 * handled by the SCSI core layer.
5498 */
5499}
5500
5501/**
5502 * ufshcd_tmc_handler - handle task management function completion
5503 * @hba: per adapter instance
5504 */
5505static void ufshcd_tmc_handler(struct ufs_hba *hba)
5506{
5507 u32 tm_doorbell;
5508
5509 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5510 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5511 wake_up(&hba->tm_wq);
5512}
5513
5514/**
5515 * ufshcd_sl_intr - Interrupt service routine
5516 * @hba: per adapter instance
5517 * @intr_status: contains interrupts generated by the controller
5518 */
5519static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5520{
5521 hba->errors = UFSHCD_ERROR_MASK & intr_status;
5522
5523 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5524 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5525
5526 if (hba->errors)
5527 ufshcd_check_errors(hba);
5528
5529 if (intr_status & UFSHCD_UIC_MASK)
5530 ufshcd_uic_cmd_compl(hba, intr_status);
5531
5532 if (intr_status & UTP_TASK_REQ_COMPL)
5533 ufshcd_tmc_handler(hba);
5534
5535 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5536 ufshcd_transfer_req_compl(hba);
5537}
5538
5539/**
5540 * ufshcd_intr - Main interrupt service routine
5541 * @irq: irq number
5542 * @__hba: pointer to adapter instance
5543 *
5544 * Returns IRQ_HANDLED - If interrupt is valid
5545 * IRQ_NONE - If invalid interrupt
5546 */
5547static irqreturn_t ufshcd_intr(int irq, void *__hba)
5548{
5549 u32 intr_status, enabled_intr_status;
5550 irqreturn_t retval = IRQ_NONE;
5551 struct ufs_hba *hba = __hba;
5552 int retries = hba->nutrs;
5553
5554 spin_lock(hba->host->host_lock);
5555 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5556
5557 /*
5558 * There could be max of hba->nutrs reqs in flight and in worst case
5559 * if the reqs get finished 1 by 1 after the interrupt status is
5560 * read, make sure we handle them by checking the interrupt status
5561 * again in a loop until we process all of the reqs before returning.
5562 */
5563 do {
5564 enabled_intr_status =
5565 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5566 if (intr_status)
5567 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5568 if (enabled_intr_status) {
5569 ufshcd_sl_intr(hba, enabled_intr_status);
5570 retval = IRQ_HANDLED;
5571 }
5572
5573 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5574 } while (intr_status && --retries);
5575
5576 spin_unlock(hba->host->host_lock);
5577 return retval;
5578}
5579
5580static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5581{
5582 int err = 0;
5583 u32 mask = 1 << tag;
5584 unsigned long flags;
5585
5586 if (!test_bit(tag, &hba->outstanding_tasks))
5587 goto out;
5588
5589 spin_lock_irqsave(hba->host->host_lock, flags);
5590 ufshcd_utmrl_clear(hba, tag);
5591 spin_unlock_irqrestore(hba->host->host_lock, flags);
5592
5593 /* poll for max. 1 sec to clear door bell register by h/w */
5594 err = ufshcd_wait_for_register(hba,
5595 REG_UTP_TASK_REQ_DOOR_BELL,
5596 mask, 0, 1000, 1000, true);
5597out:
5598 return err;
5599}
5600
5601static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
5602 struct utp_task_req_desc *treq, u8 tm_function)
5603{
5604 struct Scsi_Host *host = hba->host;
5605 unsigned long flags;
5606 int free_slot, task_tag, err;
5607
5608 /*
5609 * Get free slot, sleep if slots are unavailable.
5610 * Even though we use wait_event() which sleeps indefinitely,
5611 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5612 */
5613 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5614 ufshcd_hold(hba, false);
5615
5616 spin_lock_irqsave(host->host_lock, flags);
5617 task_tag = hba->nutrs + free_slot;
5618
5619 treq->req_header.dword_0 |= cpu_to_be32(task_tag);
5620
5621 memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
5622 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5623
5624 /* send command to the controller */
5625 __set_bit(free_slot, &hba->outstanding_tasks);
5626
5627 /* Make sure descriptors are ready before ringing the task doorbell */
5628 wmb();
5629
5630 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5631 /* Make sure that doorbell is committed immediately */
5632 wmb();
5633
5634 spin_unlock_irqrestore(host->host_lock, flags);
5635
5636 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5637
5638 /* wait until the task management command is completed */
5639 err = wait_event_timeout(hba->tm_wq,
5640 test_bit(free_slot, &hba->tm_condition),
5641 msecs_to_jiffies(TM_CMD_TIMEOUT));
5642 if (!err) {
5643 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5644 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5645 __func__, tm_function);
5646 if (ufshcd_clear_tm_cmd(hba, free_slot))
5647 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5648 __func__, free_slot);
5649 err = -ETIMEDOUT;
5650 } else {
5651 err = 0;
5652 memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
5653
5654 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5655
5656 spin_lock_irqsave(hba->host->host_lock, flags);
5657 __clear_bit(free_slot, &hba->outstanding_tasks);
5658 spin_unlock_irqrestore(hba->host->host_lock, flags);
5659
5660 }
5661
5662 clear_bit(free_slot, &hba->tm_condition);
5663 ufshcd_put_tm_slot(hba, free_slot);
5664 wake_up(&hba->tm_tag_wq);
5665
5666 ufshcd_release(hba);
5667 return err;
5668}
5669
5670/**
5671 * ufshcd_issue_tm_cmd - issues task management commands to controller
5672 * @hba: per adapter instance
5673 * @lun_id: LUN ID to which TM command is sent
5674 * @task_id: task ID to which the TM command is applicable
5675 * @tm_function: task management function opcode
5676 * @tm_response: task management service response return value
5677 *
5678 * Returns non-zero value on error, zero on success.
5679 */
5680static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5681 u8 tm_function, u8 *tm_response)
5682{
5683 struct utp_task_req_desc treq = { { 0 }, };
5684 int ocs_value, err;
5685
5686 /* Configure task request descriptor */
5687 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5688 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5689
5690 /* Configure task request UPIU */
5691 treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
5692 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
5693 treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
5694
5695 /*
5696 * The host shall provide the same value for LUN field in the basic
5697 * header and for Input Parameter.
5698 */
5699 treq.input_param1 = cpu_to_be32(lun_id);
5700 treq.input_param2 = cpu_to_be32(task_id);
5701
5702 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
5703 if (err == -ETIMEDOUT)
5704 return err;
5705
5706 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
5707 if (ocs_value != OCS_SUCCESS)
5708 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
5709 __func__, ocs_value);
5710 else if (tm_response)
5711 *tm_response = be32_to_cpu(treq.output_param1) &
5712 MASK_TM_SERVICE_RESP;
5713 return err;
5714}
5715
5716/**
5717 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
5718 * @hba: per-adapter instance
5719 * @req_upiu: upiu request
5720 * @rsp_upiu: upiu reply
5721 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
5722 * @desc_buff: pointer to descriptor buffer, NULL if NA
5723 * @buff_len: descriptor size, 0 if NA
5724 * @desc_op: descriptor operation
5725 *
5726 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
5727 * Therefore, it "rides" the device management infrastructure: uses its tag and
5728 * tasks work queues.
5729 *
5730 * Since there is only one available tag for device management commands,
5731 * the caller is expected to hold the hba->dev_cmd.lock mutex.
5732 */
5733static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
5734 struct utp_upiu_req *req_upiu,
5735 struct utp_upiu_req *rsp_upiu,
5736 u8 *desc_buff, int *buff_len,
5737 int cmd_type,
5738 enum query_opcode desc_op)
5739{
5740 struct ufshcd_lrb *lrbp;
5741 int err = 0;
5742 int tag;
5743 struct completion wait;
5744 unsigned long flags;
5745 u32 upiu_flags;
5746
5747 down_read(&hba->clk_scaling_lock);
5748
5749 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
5750
5751 init_completion(&wait);
5752 lrbp = &hba->lrb[tag];
5753 WARN_ON(lrbp->cmd);
5754
5755 lrbp->cmd = NULL;
5756 lrbp->sense_bufflen = 0;
5757 lrbp->sense_buffer = NULL;
5758 lrbp->task_tag = tag;
5759 lrbp->lun = 0;
5760 lrbp->intr_cmd = true;
5761 hba->dev_cmd.type = cmd_type;
5762
5763 switch (hba->ufs_version) {
5764 case UFSHCI_VERSION_10:
5765 case UFSHCI_VERSION_11:
5766 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
5767 break;
5768 default:
5769 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
5770 break;
5771 }
5772
5773 /* update the task tag in the request upiu */
5774 req_upiu->header.dword_0 |= cpu_to_be32(tag);
5775
5776 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
5777
5778 /* just copy the upiu request as it is */
5779 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
5780 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
5781 /* The Data Segment Area is optional depending upon the query
5782 * function value. for WRITE DESCRIPTOR, the data segment
5783 * follows right after the tsf.
5784 */
5785 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
5786 *buff_len = 0;
5787 }
5788
5789 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
5790
5791 hba->dev_cmd.complete = &wait;
5792
5793 /* Make sure descriptors are ready before ringing the doorbell */
5794 wmb();
5795 spin_lock_irqsave(hba->host->host_lock, flags);
5796 ufshcd_send_command(hba, tag);
5797 spin_unlock_irqrestore(hba->host->host_lock, flags);
5798
5799 /*
5800 * ignore the returning value here - ufshcd_check_query_response is
5801 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
5802 * read the response directly ignoring all errors.
5803 */
5804 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
5805
5806 /* just copy the upiu response as it is */
5807 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
5808 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
5809 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
5810 u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
5811 MASK_QUERY_DATA_SEG_LEN;
5812
5813 if (*buff_len >= resp_len) {
5814 memcpy(desc_buff, descp, resp_len);
5815 *buff_len = resp_len;
5816 } else {
5817 dev_warn(hba->dev, "rsp size is bigger than buffer");
5818 *buff_len = 0;
5819 err = -EINVAL;
5820 }
5821 }
5822
5823 ufshcd_put_dev_cmd_tag(hba, tag);
5824 wake_up(&hba->dev_cmd.tag_wq);
5825 up_read(&hba->clk_scaling_lock);
5826 return err;
5827}
5828
5829/**
5830 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
5831 * @hba: per-adapter instance
5832 * @req_upiu: upiu request
5833 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
5834 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
5835 * @desc_buff: pointer to descriptor buffer, NULL if NA
5836 * @buff_len: descriptor size, 0 if NA
5837 * @desc_op: descriptor operation
5838 *
5839 * Supports UTP Transfer requests (nop and query), and UTP Task
5840 * Management requests.
5841 * It is up to the caller to fill the upiu conent properly, as it will
5842 * be copied without any further input validations.
5843 */
5844int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
5845 struct utp_upiu_req *req_upiu,
5846 struct utp_upiu_req *rsp_upiu,
5847 int msgcode,
5848 u8 *desc_buff, int *buff_len,
5849 enum query_opcode desc_op)
5850{
5851 int err;
5852 int cmd_type = DEV_CMD_TYPE_QUERY;
5853 struct utp_task_req_desc treq = { { 0 }, };
5854 int ocs_value;
5855 u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
5856
5857 switch (msgcode) {
5858 case UPIU_TRANSACTION_NOP_OUT:
5859 cmd_type = DEV_CMD_TYPE_NOP;
5860 /* fall through */
5861 case UPIU_TRANSACTION_QUERY_REQ:
5862 ufshcd_hold(hba, false);
5863 mutex_lock(&hba->dev_cmd.lock);
5864 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
5865 desc_buff, buff_len,
5866 cmd_type, desc_op);
5867 mutex_unlock(&hba->dev_cmd.lock);
5868 ufshcd_release(hba);
5869
5870 break;
5871 case UPIU_TRANSACTION_TASK_REQ:
5872 treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5873 treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5874
5875 memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
5876
5877 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
5878 if (err == -ETIMEDOUT)
5879 break;
5880
5881 ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
5882 if (ocs_value != OCS_SUCCESS) {
5883 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
5884 ocs_value);
5885 break;
5886 }
5887
5888 memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
5889
5890 break;
5891 default:
5892 err = -EINVAL;
5893
5894 break;
5895 }
5896
5897 return err;
5898}
5899
5900/**
5901 * ufshcd_eh_device_reset_handler - device reset handler registered to
5902 * scsi layer.
5903 * @cmd: SCSI command pointer
5904 *
5905 * Returns SUCCESS/FAILED
5906 */
5907static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5908{
5909 struct Scsi_Host *host;
5910 struct ufs_hba *hba;
5911 unsigned int tag;
5912 u32 pos;
5913 int err;
5914 u8 resp = 0xF;
5915 struct ufshcd_lrb *lrbp;
5916 unsigned long flags;
5917
5918 host = cmd->device->host;
5919 hba = shost_priv(host);
5920 tag = cmd->request->tag;
5921
5922 lrbp = &hba->lrb[tag];
5923 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5924 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5925 if (!err)
5926 err = resp;
5927 goto out;
5928 }
5929
5930 /* clear the commands that were pending for corresponding LUN */
5931 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5932 if (hba->lrb[pos].lun == lrbp->lun) {
5933 err = ufshcd_clear_cmd(hba, pos);
5934 if (err)
5935 break;
5936 }
5937 }
5938 spin_lock_irqsave(host->host_lock, flags);
5939 ufshcd_transfer_req_compl(hba);
5940 spin_unlock_irqrestore(host->host_lock, flags);
5941
5942out:
5943 hba->req_abort_count = 0;
5944 if (!err) {
5945 err = SUCCESS;
5946 } else {
5947 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5948 err = FAILED;
5949 }
5950 return err;
5951}
5952
5953static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5954{
5955 struct ufshcd_lrb *lrbp;
5956 int tag;
5957
5958 for_each_set_bit(tag, &bitmap, hba->nutrs) {
5959 lrbp = &hba->lrb[tag];
5960 lrbp->req_abort_skip = true;
5961 }
5962}
5963
5964/**
5965 * ufshcd_abort - abort a specific command
5966 * @cmd: SCSI command pointer
5967 *
5968 * Abort the pending command in device by sending UFS_ABORT_TASK task management
5969 * command, and in host controller by clearing the door-bell register. There can
5970 * be race between controller sending the command to the device while abort is
5971 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5972 * really issued and then try to abort it.
5973 *
5974 * Returns SUCCESS/FAILED
5975 */
5976static int ufshcd_abort(struct scsi_cmnd *cmd)
5977{
5978 struct Scsi_Host *host;
5979 struct ufs_hba *hba;
5980 unsigned long flags;
5981 unsigned int tag;
5982 int err = 0;
5983 int poll_cnt;
5984 u8 resp = 0xF;
5985 struct ufshcd_lrb *lrbp;
5986 u32 reg;
5987
5988 host = cmd->device->host;
5989 hba = shost_priv(host);
5990 tag = cmd->request->tag;
5991 lrbp = &hba->lrb[tag];
5992 if (!ufshcd_valid_tag(hba, tag)) {
5993 dev_err(hba->dev,
5994 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5995 __func__, tag, cmd, cmd->request);
5996 BUG();
5997 }
5998
5999 /*
6000 * Task abort to the device W-LUN is illegal. When this command
6001 * will fail, due to spec violation, scsi err handling next step
6002 * will be to send LU reset which, again, is a spec violation.
6003 * To avoid these unnecessary/illegal step we skip to the last error
6004 * handling stage: reset and restore.
6005 */
6006 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
6007 return ufshcd_eh_host_reset_handler(cmd);
6008
6009 ufshcd_hold(hba, false);
6010 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6011 /* If command is already aborted/completed, return SUCCESS */
6012 if (!(test_bit(tag, &hba->outstanding_reqs))) {
6013 dev_err(hba->dev,
6014 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
6015 __func__, tag, hba->outstanding_reqs, reg);
6016 goto out;
6017 }
6018
6019 if (!(reg & (1 << tag))) {
6020 dev_err(hba->dev,
6021 "%s: cmd was completed, but without a notifying intr, tag = %d",
6022 __func__, tag);
6023 }
6024
6025 /* Print Transfer Request of aborted task */
6026 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
6027
6028 /*
6029 * Print detailed info about aborted request.
6030 * As more than one request might get aborted at the same time,
6031 * print full information only for the first aborted request in order
6032 * to reduce repeated printouts. For other aborted requests only print
6033 * basic details.
6034 */
6035 scsi_print_command(hba->lrb[tag].cmd);
6036 if (!hba->req_abort_count) {
6037 ufshcd_print_host_regs(hba);
6038 ufshcd_print_host_state(hba);
6039 ufshcd_print_pwr_info(hba);
6040 ufshcd_print_trs(hba, 1 << tag, true);
6041 } else {
6042 ufshcd_print_trs(hba, 1 << tag, false);
6043 }
6044 hba->req_abort_count++;
6045
6046 /* Skip task abort in case previous aborts failed and report failure */
6047 if (lrbp->req_abort_skip) {
6048 err = -EIO;
6049 goto out;
6050 }
6051
6052 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
6053 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6054 UFS_QUERY_TASK, &resp);
6055 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
6056 /* cmd pending in the device */
6057 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
6058 __func__, tag);
6059 break;
6060 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6061 /*
6062 * cmd not pending in the device, check if it is
6063 * in transition.
6064 */
6065 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
6066 __func__, tag);
6067 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
6068 if (reg & (1 << tag)) {
6069 /* sleep for max. 200us to stabilize */
6070 usleep_range(100, 200);
6071 continue;
6072 }
6073 /* command completed already */
6074 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
6075 __func__, tag);
6076 goto out;
6077 } else {
6078 dev_err(hba->dev,
6079 "%s: no response from device. tag = %d, err %d\n",
6080 __func__, tag, err);
6081 if (!err)
6082 err = resp; /* service response error */
6083 goto out;
6084 }
6085 }
6086
6087 if (!poll_cnt) {
6088 err = -EBUSY;
6089 goto out;
6090 }
6091
6092 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
6093 UFS_ABORT_TASK, &resp);
6094 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
6095 if (!err) {
6096 err = resp; /* service response error */
6097 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
6098 __func__, tag, err);
6099 }
6100 goto out;
6101 }
6102
6103 err = ufshcd_clear_cmd(hba, tag);
6104 if (err) {
6105 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6106 __func__, tag, err);
6107 goto out;
6108 }
6109
6110 scsi_dma_unmap(cmd);
6111
6112 spin_lock_irqsave(host->host_lock, flags);
6113 ufshcd_outstanding_req_clear(hba, tag);
6114 hba->lrb[tag].cmd = NULL;
6115 spin_unlock_irqrestore(host->host_lock, flags);
6116
6117 clear_bit_unlock(tag, &hba->lrb_in_use);
6118 wake_up(&hba->dev_cmd.tag_wq);
6119
6120out:
6121 if (!err) {
6122 err = SUCCESS;
6123 } else {
6124 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6125 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6126 err = FAILED;
6127 }
6128
6129 /*
6130 * This ufshcd_release() corresponds to the original scsi cmd that got
6131 * aborted here (as we won't get any IRQ for it).
6132 */
6133 ufshcd_release(hba);
6134 return err;
6135}
6136
6137/**
6138 * ufshcd_host_reset_and_restore - reset and restore host controller
6139 * @hba: per-adapter instance
6140 *
6141 * Note that host controller reset may issue DME_RESET to
6142 * local and remote (device) Uni-Pro stack and the attributes
6143 * are reset to default state.
6144 *
6145 * Returns zero on success, non-zero on failure
6146 */
6147static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6148{
6149 int err;
6150 unsigned long flags;
6151
6152 /* Reset the host controller */
6153 spin_lock_irqsave(hba->host->host_lock, flags);
6154 ufshcd_hba_stop(hba, false);
6155 spin_unlock_irqrestore(hba->host->host_lock, flags);
6156
6157 /* scale up clocks to max frequency before full reinitialization */
6158 ufshcd_scale_clks(hba, true);
6159
6160 err = ufshcd_hba_enable(hba);
6161 if (err)
6162 goto out;
6163
6164 /* Establish the link again and restore the device */
6165 err = ufshcd_probe_hba(hba);
6166
6167 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
6168 err = -EIO;
6169out:
6170 if (err)
6171 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6172
6173 return err;
6174}
6175
6176/**
6177 * ufshcd_reset_and_restore - reset and re-initialize host/device
6178 * @hba: per-adapter instance
6179 *
6180 * Reset and recover device, host and re-establish link. This
6181 * is helpful to recover the communication in fatal error conditions.
6182 *
6183 * Returns zero on success, non-zero on failure
6184 */
6185static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6186{
6187 int err = 0;
6188 unsigned long flags;
6189 int retries = MAX_HOST_RESET_RETRIES;
6190
6191 do {
6192 err = ufshcd_host_reset_and_restore(hba);
6193 } while (err && --retries);
6194
6195 /*
6196 * After reset the door-bell might be cleared, complete
6197 * outstanding requests in s/w here.
6198 */
6199 spin_lock_irqsave(hba->host->host_lock, flags);
6200 ufshcd_transfer_req_compl(hba);
6201 ufshcd_tmc_handler(hba);
6202 spin_unlock_irqrestore(hba->host->host_lock, flags);
6203
6204 return err;
6205}
6206
6207/**
6208 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6209 * @cmd: SCSI command pointer
6210 *
6211 * Returns SUCCESS/FAILED
6212 */
6213static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6214{
6215 int err;
6216 unsigned long flags;
6217 struct ufs_hba *hba;
6218
6219 hba = shost_priv(cmd->device->host);
6220
6221 ufshcd_hold(hba, false);
6222 /*
6223 * Check if there is any race with fatal error handling.
6224 * If so, wait for it to complete. Even though fatal error
6225 * handling does reset and restore in some cases, don't assume
6226 * anything out of it. We are just avoiding race here.
6227 */
6228 do {
6229 spin_lock_irqsave(hba->host->host_lock, flags);
6230 if (!(work_pending(&hba->eh_work) ||
6231 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6232 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6233 break;
6234 spin_unlock_irqrestore(hba->host->host_lock, flags);
6235 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6236 flush_work(&hba->eh_work);
6237 } while (1);
6238
6239 hba->ufshcd_state = UFSHCD_STATE_RESET;
6240 ufshcd_set_eh_in_progress(hba);
6241 spin_unlock_irqrestore(hba->host->host_lock, flags);
6242
6243 err = ufshcd_reset_and_restore(hba);
6244
6245 spin_lock_irqsave(hba->host->host_lock, flags);
6246 if (!err) {
6247 err = SUCCESS;
6248 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6249 } else {
6250 err = FAILED;
6251 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6252 }
6253 ufshcd_clear_eh_in_progress(hba);
6254 spin_unlock_irqrestore(hba->host->host_lock, flags);
6255
6256 ufshcd_release(hba);
6257 return err;
6258}
6259
6260/**
6261 * ufshcd_get_max_icc_level - calculate the ICC level
6262 * @sup_curr_uA: max. current supported by the regulator
6263 * @start_scan: row at the desc table to start scan from
6264 * @buff: power descriptor buffer
6265 *
6266 * Returns calculated max ICC level for specific regulator
6267 */
6268static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6269{
6270 int i;
6271 int curr_uA;
6272 u16 data;
6273 u16 unit;
6274
6275 for (i = start_scan; i >= 0; i--) {
6276 data = be16_to_cpup((__be16 *)&buff[2 * i]);
6277 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6278 ATTR_ICC_LVL_UNIT_OFFSET;
6279 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6280 switch (unit) {
6281 case UFSHCD_NANO_AMP:
6282 curr_uA = curr_uA / 1000;
6283 break;
6284 case UFSHCD_MILI_AMP:
6285 curr_uA = curr_uA * 1000;
6286 break;
6287 case UFSHCD_AMP:
6288 curr_uA = curr_uA * 1000 * 1000;
6289 break;
6290 case UFSHCD_MICRO_AMP:
6291 default:
6292 break;
6293 }
6294 if (sup_curr_uA >= curr_uA)
6295 break;
6296 }
6297 if (i < 0) {
6298 i = 0;
6299 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6300 }
6301
6302 return (u32)i;
6303}
6304
6305/**
6306 * ufshcd_calc_icc_level - calculate the max ICC level
6307 * In case regulators are not initialized we'll return 0
6308 * @hba: per-adapter instance
6309 * @desc_buf: power descriptor buffer to extract ICC levels from.
6310 * @len: length of desc_buff
6311 *
6312 * Returns calculated ICC level
6313 */
6314static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6315 u8 *desc_buf, int len)
6316{
6317 u32 icc_level = 0;
6318
6319 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6320 !hba->vreg_info.vccq2) {
6321 dev_err(hba->dev,
6322 "%s: Regulator capability was not set, actvIccLevel=%d",
6323 __func__, icc_level);
6324 goto out;
6325 }
6326
6327 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
6328 icc_level = ufshcd_get_max_icc_level(
6329 hba->vreg_info.vcc->max_uA,
6330 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6331 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6332
6333 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
6334 icc_level = ufshcd_get_max_icc_level(
6335 hba->vreg_info.vccq->max_uA,
6336 icc_level,
6337 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6338
6339 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
6340 icc_level = ufshcd_get_max_icc_level(
6341 hba->vreg_info.vccq2->max_uA,
6342 icc_level,
6343 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6344out:
6345 return icc_level;
6346}
6347
6348static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6349{
6350 int ret;
6351 int buff_len = hba->desc_size.pwr_desc;
6352 u8 *desc_buf;
6353
6354 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6355 if (!desc_buf)
6356 return;
6357
6358 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6359 if (ret) {
6360 dev_err(hba->dev,
6361 "%s: Failed reading power descriptor.len = %d ret = %d",
6362 __func__, buff_len, ret);
6363 goto out;
6364 }
6365
6366 hba->init_prefetch_data.icc_level =
6367 ufshcd_find_max_sup_active_icc_level(hba,
6368 desc_buf, buff_len);
6369 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6370 __func__, hba->init_prefetch_data.icc_level);
6371
6372 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6373 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6374 &hba->init_prefetch_data.icc_level);
6375
6376 if (ret)
6377 dev_err(hba->dev,
6378 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6379 __func__, hba->init_prefetch_data.icc_level , ret);
6380
6381out:
6382 kfree(desc_buf);
6383}
6384
6385/**
6386 * ufshcd_scsi_add_wlus - Adds required W-LUs
6387 * @hba: per-adapter instance
6388 *
6389 * UFS device specification requires the UFS devices to support 4 well known
6390 * logical units:
6391 * "REPORT_LUNS" (address: 01h)
6392 * "UFS Device" (address: 50h)
6393 * "RPMB" (address: 44h)
6394 * "BOOT" (address: 30h)
6395 * UFS device's power management needs to be controlled by "POWER CONDITION"
6396 * field of SSU (START STOP UNIT) command. But this "power condition" field
6397 * will take effect only when its sent to "UFS device" well known logical unit
6398 * hence we require the scsi_device instance to represent this logical unit in
6399 * order for the UFS host driver to send the SSU command for power management.
6400 *
6401 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6402 * Block) LU so user space process can control this LU. User space may also
6403 * want to have access to BOOT LU.
6404 *
6405 * This function adds scsi device instances for each of all well known LUs
6406 * (except "REPORT LUNS" LU).
6407 *
6408 * Returns zero on success (all required W-LUs are added successfully),
6409 * non-zero error value on failure (if failed to add any of the required W-LU).
6410 */
6411static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6412{
6413 int ret = 0;
6414 struct scsi_device *sdev_rpmb;
6415 struct scsi_device *sdev_boot;
6416
6417 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6418 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6419 if (IS_ERR(hba->sdev_ufs_device)) {
6420 ret = PTR_ERR(hba->sdev_ufs_device);
6421 hba->sdev_ufs_device = NULL;
6422 goto out;
6423 }
6424 scsi_device_put(hba->sdev_ufs_device);
6425
6426 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6427 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6428 if (IS_ERR(sdev_rpmb)) {
6429 ret = PTR_ERR(sdev_rpmb);
6430 goto remove_sdev_ufs_device;
6431 }
6432 scsi_device_put(sdev_rpmb);
6433
6434 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6435 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6436 if (IS_ERR(sdev_boot))
6437 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6438 else
6439 scsi_device_put(sdev_boot);
6440 goto out;
6441
6442remove_sdev_ufs_device:
6443 scsi_remove_device(hba->sdev_ufs_device);
6444out:
6445 return ret;
6446}
6447
6448static int ufs_get_device_desc(struct ufs_hba *hba,
6449 struct ufs_dev_desc *dev_desc)
6450{
6451 int err;
6452 size_t buff_len;
6453 u8 model_index;
6454 u8 *desc_buf;
6455
6456 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6457 QUERY_DESC_MAX_SIZE + 1);
6458 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6459 if (!desc_buf) {
6460 err = -ENOMEM;
6461 goto out;
6462 }
6463
6464 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6465 if (err) {
6466 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6467 __func__, err);
6468 goto out;
6469 }
6470
6471 /*
6472 * getting vendor (manufacturerID) and Bank Index in big endian
6473 * format
6474 */
6475 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6476 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6477
6478 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6479
6480 /* Zero-pad entire buffer for string termination. */
6481 memset(desc_buf, 0, buff_len);
6482
6483 err = ufshcd_read_string_desc(hba, model_index, desc_buf,
6484 QUERY_DESC_MAX_SIZE, true/*ASCII*/);
6485 if (err) {
6486 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6487 __func__, err);
6488 goto out;
6489 }
6490
6491 desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6492 strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
6493 min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
6494 MAX_MODEL_LEN));
6495
6496 /* Null terminate the model string */
6497 dev_desc->model[MAX_MODEL_LEN] = '\0';
6498
6499out:
6500 kfree(desc_buf);
6501 return err;
6502}
6503
6504static void ufs_fixup_device_setup(struct ufs_hba *hba,
6505 struct ufs_dev_desc *dev_desc)
6506{
6507 struct ufs_dev_fix *f;
6508
6509 for (f = ufs_fixups; f->quirk; f++) {
6510 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6511 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6512 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6513 !strcmp(f->card.model, UFS_ANY_MODEL)))
6514 hba->dev_quirks |= f->quirk;
6515 }
6516}
6517
6518/**
6519 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6520 * @hba: per-adapter instance
6521 *
6522 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6523 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6524 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6525 * the hibern8 exit latency.
6526 *
6527 * Returns zero on success, non-zero error value on failure.
6528 */
6529static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6530{
6531 int ret = 0;
6532 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6533
6534 ret = ufshcd_dme_peer_get(hba,
6535 UIC_ARG_MIB_SEL(
6536 RX_MIN_ACTIVATETIME_CAPABILITY,
6537 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6538 &peer_rx_min_activatetime);
6539 if (ret)
6540 goto out;
6541
6542 /* make sure proper unit conversion is applied */
6543 tuned_pa_tactivate =
6544 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6545 / PA_TACTIVATE_TIME_UNIT_US);
6546 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6547 tuned_pa_tactivate);
6548
6549out:
6550 return ret;
6551}
6552
6553/**
6554 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6555 * @hba: per-adapter instance
6556 *
6557 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6558 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6559 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6560 * This optimal value can help reduce the hibern8 exit latency.
6561 *
6562 * Returns zero on success, non-zero error value on failure.
6563 */
6564static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6565{
6566 int ret = 0;
6567 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6568 u32 max_hibern8_time, tuned_pa_hibern8time;
6569
6570 ret = ufshcd_dme_get(hba,
6571 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6572 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6573 &local_tx_hibern8_time_cap);
6574 if (ret)
6575 goto out;
6576
6577 ret = ufshcd_dme_peer_get(hba,
6578 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6579 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6580 &peer_rx_hibern8_time_cap);
6581 if (ret)
6582 goto out;
6583
6584 max_hibern8_time = max(local_tx_hibern8_time_cap,
6585 peer_rx_hibern8_time_cap);
6586 /* make sure proper unit conversion is applied */
6587 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6588 / PA_HIBERN8_TIME_UNIT_US);
6589 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6590 tuned_pa_hibern8time);
6591out:
6592 return ret;
6593}
6594
6595/**
6596 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6597 * less than device PA_TACTIVATE time.
6598 * @hba: per-adapter instance
6599 *
6600 * Some UFS devices require host PA_TACTIVATE to be lower than device
6601 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6602 * for such devices.
6603 *
6604 * Returns zero on success, non-zero error value on failure.
6605 */
6606static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6607{
6608 int ret = 0;
6609 u32 granularity, peer_granularity;
6610 u32 pa_tactivate, peer_pa_tactivate;
6611 u32 pa_tactivate_us, peer_pa_tactivate_us;
6612 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6613
6614 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6615 &granularity);
6616 if (ret)
6617 goto out;
6618
6619 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6620 &peer_granularity);
6621 if (ret)
6622 goto out;
6623
6624 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6625 (granularity > PA_GRANULARITY_MAX_VAL)) {
6626 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6627 __func__, granularity);
6628 return -EINVAL;
6629 }
6630
6631 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6632 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6633 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6634 __func__, peer_granularity);
6635 return -EINVAL;
6636 }
6637
6638 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6639 if (ret)
6640 goto out;
6641
6642 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6643 &peer_pa_tactivate);
6644 if (ret)
6645 goto out;
6646
6647 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6648 peer_pa_tactivate_us = peer_pa_tactivate *
6649 gran_to_us_table[peer_granularity - 1];
6650
6651 if (pa_tactivate_us > peer_pa_tactivate_us) {
6652 u32 new_peer_pa_tactivate;
6653
6654 new_peer_pa_tactivate = pa_tactivate_us /
6655 gran_to_us_table[peer_granularity - 1];
6656 new_peer_pa_tactivate++;
6657 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6658 new_peer_pa_tactivate);
6659 }
6660
6661out:
6662 return ret;
6663}
6664
6665static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6666{
6667 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6668 ufshcd_tune_pa_tactivate(hba);
6669 ufshcd_tune_pa_hibern8time(hba);
6670 }
6671
6672 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6673 /* set 1ms timeout for PA_TACTIVATE */
6674 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6675
6676 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6677 ufshcd_quirk_tune_host_pa_tactivate(hba);
6678
6679 ufshcd_vops_apply_dev_quirks(hba);
6680}
6681
6682static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6683{
6684 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6685
6686 hba->ufs_stats.hibern8_exit_cnt = 0;
6687 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6688
6689 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6690 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6691 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6692 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6693 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6694
6695 hba->req_abort_count = 0;
6696}
6697
6698static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6699{
6700 int err;
6701
6702 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6703 &hba->desc_size.dev_desc);
6704 if (err)
6705 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6706
6707 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6708 &hba->desc_size.pwr_desc);
6709 if (err)
6710 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6711
6712 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6713 &hba->desc_size.interc_desc);
6714 if (err)
6715 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6716
6717 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6718 &hba->desc_size.conf_desc);
6719 if (err)
6720 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6721
6722 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6723 &hba->desc_size.unit_desc);
6724 if (err)
6725 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6726
6727 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6728 &hba->desc_size.geom_desc);
6729 if (err)
6730 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6731 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6732 &hba->desc_size.hlth_desc);
6733 if (err)
6734 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6735}
6736
6737static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6738{
6739 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6740 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6741 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6742 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6743 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6744 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6745 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6746}
6747
6748static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
6749 {19200000, REF_CLK_FREQ_19_2_MHZ},
6750 {26000000, REF_CLK_FREQ_26_MHZ},
6751 {38400000, REF_CLK_FREQ_38_4_MHZ},
6752 {52000000, REF_CLK_FREQ_52_MHZ},
6753 {0, REF_CLK_FREQ_INVAL},
6754};
6755
6756static enum ufs_ref_clk_freq
6757ufs_get_bref_clk_from_hz(unsigned long freq)
6758{
6759 int i;
6760
6761 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
6762 if (ufs_ref_clk_freqs[i].freq_hz == freq)
6763 return ufs_ref_clk_freqs[i].val;
6764
6765 return REF_CLK_FREQ_INVAL;
6766}
6767
6768void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
6769{
6770 unsigned long freq;
6771
6772 freq = clk_get_rate(refclk);
6773
6774 hba->dev_ref_clk_freq =
6775 ufs_get_bref_clk_from_hz(freq);
6776
6777 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
6778 dev_err(hba->dev,
6779 "invalid ref_clk setting = %ld\n", freq);
6780}
6781
6782static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
6783{
6784 int err;
6785 u32 ref_clk;
6786 u32 freq = hba->dev_ref_clk_freq;
6787
6788 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6789 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
6790
6791 if (err) {
6792 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
6793 err);
6794 goto out;
6795 }
6796
6797 if (ref_clk == freq)
6798 goto out; /* nothing to update */
6799
6800 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6801 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
6802
6803 if (err) {
6804 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
6805 ufs_ref_clk_freqs[freq].freq_hz);
6806 goto out;
6807 }
6808
6809 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
6810 ufs_ref_clk_freqs[freq].freq_hz);
6811
6812out:
6813 return err;
6814}
6815
6816/**
6817 * ufshcd_probe_hba - probe hba to detect device and initialize
6818 * @hba: per-adapter instance
6819 *
6820 * Execute link-startup and verify device initialization
6821 */
6822static int ufshcd_probe_hba(struct ufs_hba *hba)
6823{
6824 struct ufs_dev_desc card = {0};
6825 int ret;
6826 ktime_t start = ktime_get();
6827
6828 ret = ufshcd_link_startup(hba);
6829 if (ret)
6830 goto out;
6831
6832 /* set the default level for urgent bkops */
6833 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6834 hba->is_urgent_bkops_lvl_checked = false;
6835
6836 /* Debug counters initialization */
6837 ufshcd_clear_dbg_ufs_stats(hba);
6838
6839 /* UniPro link is active now */
6840 ufshcd_set_link_active(hba);
6841
6842 /* Enable Auto-Hibernate if configured */
6843 ufshcd_auto_hibern8_enable(hba);
6844
6845 ret = ufshcd_verify_dev_init(hba);
6846 if (ret)
6847 goto out;
6848
6849 ret = ufshcd_complete_dev_init(hba);
6850 if (ret)
6851 goto out;
6852
6853 /* Init check for device descriptor sizes */
6854 ufshcd_init_desc_sizes(hba);
6855
6856 ret = ufs_get_device_desc(hba, &card);
6857 if (ret) {
6858 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6859 __func__, ret);
6860 goto out;
6861 }
6862
6863 ufs_fixup_device_setup(hba, &card);
6864 ufshcd_tune_unipro_params(hba);
6865
6866 /* UFS device is also active now */
6867 ufshcd_set_ufs_dev_active(hba);
6868 ufshcd_force_reset_auto_bkops(hba);
6869 hba->wlun_dev_clr_ua = true;
6870
6871 if (ufshcd_get_max_pwr_mode(hba)) {
6872 dev_err(hba->dev,
6873 "%s: Failed getting max supported power mode\n",
6874 __func__);
6875 } else {
6876 /*
6877 * Set the right value to bRefClkFreq before attempting to
6878 * switch to HS gears.
6879 */
6880 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
6881 ufshcd_set_dev_ref_clk(hba);
6882 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6883 if (ret) {
6884 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6885 __func__, ret);
6886 goto out;
6887 }
6888 }
6889
6890 /* set the state as operational after switching to desired gear */
6891 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6892
6893 /*
6894 * If we are in error handling context or in power management callbacks
6895 * context, no need to scan the host
6896 */
6897 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6898 bool flag;
6899
6900 /* clear any previous UFS device information */
6901 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6902 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6903 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6904 hba->dev_info.f_power_on_wp_en = flag;
6905
6906 if (!hba->is_init_prefetch)
6907 ufshcd_init_icc_levels(hba);
6908
6909 /* Add required well known logical units to scsi mid layer */
6910 if (ufshcd_scsi_add_wlus(hba))
6911 goto out;
6912
6913 /* Initialize devfreq after UFS device is detected */
6914 if (ufshcd_is_clkscaling_supported(hba)) {
6915 memcpy(&hba->clk_scaling.saved_pwr_info.info,
6916 &hba->pwr_info,
6917 sizeof(struct ufs_pa_layer_attr));
6918 hba->clk_scaling.saved_pwr_info.is_valid = true;
6919 if (!hba->devfreq) {
6920 ret = ufshcd_devfreq_init(hba);
6921 if (ret)
6922 goto out;
6923 }
6924 hba->clk_scaling.is_allowed = true;
6925 }
6926
6927 ufs_bsg_probe(hba);
6928
6929 scsi_scan_host(hba->host);
6930 pm_runtime_put_sync(hba->dev);
6931 }
6932
6933 if (!hba->is_init_prefetch)
6934 hba->is_init_prefetch = true;
6935
6936out:
6937 /*
6938 * If we failed to initialize the device or the device is not
6939 * present, turn off the power/clocks etc.
6940 */
6941 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6942 pm_runtime_put_sync(hba->dev);
6943 ufshcd_exit_clk_scaling(hba);
6944 ufshcd_hba_exit(hba);
6945 }
6946
6947 trace_ufshcd_init(dev_name(hba->dev), ret,
6948 ktime_to_us(ktime_sub(ktime_get(), start)),
6949 hba->curr_dev_pwr_mode, hba->uic_link_state);
6950 return ret;
6951}
6952
6953/**
6954 * ufshcd_async_scan - asynchronous execution for probing hba
6955 * @data: data pointer to pass to this function
6956 * @cookie: cookie data
6957 */
6958static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6959{
6960 struct ufs_hba *hba = (struct ufs_hba *)data;
6961
6962 ufshcd_probe_hba(hba);
6963}
6964
6965static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6966{
6967 unsigned long flags;
6968 struct Scsi_Host *host;
6969 struct ufs_hba *hba;
6970 int index;
6971 bool found = false;
6972
6973 if (!scmd || !scmd->device || !scmd->device->host)
6974 return BLK_EH_DONE;
6975
6976 host = scmd->device->host;
6977 hba = shost_priv(host);
6978 if (!hba)
6979 return BLK_EH_DONE;
6980
6981 spin_lock_irqsave(host->host_lock, flags);
6982
6983 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6984 if (hba->lrb[index].cmd == scmd) {
6985 found = true;
6986 break;
6987 }
6988 }
6989
6990 spin_unlock_irqrestore(host->host_lock, flags);
6991
6992 /*
6993 * Bypass SCSI error handling and reset the block layer timer if this
6994 * SCSI command was not actually dispatched to UFS driver, otherwise
6995 * let SCSI layer handle the error as usual.
6996 */
6997 return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
6998}
6999
7000static const struct attribute_group *ufshcd_driver_groups[] = {
7001 &ufs_sysfs_unit_descriptor_group,
7002 &ufs_sysfs_lun_attributes_group,
7003 NULL,
7004};
7005
7006static struct scsi_host_template ufshcd_driver_template = {
7007 .module = THIS_MODULE,
7008 .name = UFSHCD,
7009 .proc_name = UFSHCD,
7010 .queuecommand = ufshcd_queuecommand,
7011 .slave_alloc = ufshcd_slave_alloc,
7012 .slave_configure = ufshcd_slave_configure,
7013 .slave_destroy = ufshcd_slave_destroy,
7014 .change_queue_depth = ufshcd_change_queue_depth,
7015 .eh_abort_handler = ufshcd_abort,
7016 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
7017 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7018 .eh_timed_out = ufshcd_eh_timed_out,
7019 .this_id = -1,
7020 .sg_tablesize = SG_ALL,
7021 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
7022 .can_queue = UFSHCD_CAN_QUEUE,
7023 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
7024 .max_host_blocked = 1,
7025 .track_queue_depth = 1,
7026 .sdev_groups = ufshcd_driver_groups,
7027 .dma_boundary = PAGE_SIZE - 1,
7028};
7029
7030static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
7031 int ua)
7032{
7033 int ret;
7034
7035 if (!vreg)
7036 return 0;
7037
7038 /*
7039 * "set_load" operation shall be required on those regulators
7040 * which specifically configured current limitation. Otherwise
7041 * zero max_uA may cause unexpected behavior when regulator is
7042 * enabled or set as high power mode.
7043 */
7044 if (!vreg->max_uA)
7045 return 0;
7046
7047 ret = regulator_set_load(vreg->reg, ua);
7048 if (ret < 0) {
7049 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
7050 __func__, vreg->name, ua, ret);
7051 }
7052
7053 return ret;
7054}
7055
7056static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7057 struct ufs_vreg *vreg)
7058{
7059 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
7060}
7061
7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7063 struct ufs_vreg *vreg)
7064{
7065 if (!vreg)
7066 return 0;
7067
7068 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7069}
7070
7071static int ufshcd_config_vreg(struct device *dev,
7072 struct ufs_vreg *vreg, bool on)
7073{
7074 int ret = 0;
7075 struct regulator *reg;
7076 const char *name;
7077 int min_uV, uA_load;
7078
7079 BUG_ON(!vreg);
7080
7081 reg = vreg->reg;
7082 name = vreg->name;
7083
7084 if (regulator_count_voltages(reg) > 0) {
7085 if (vreg->min_uV && vreg->max_uV) {
7086 min_uV = on ? vreg->min_uV : 0;
7087 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
7088 if (ret) {
7089 dev_err(dev,
7090 "%s: %s set voltage failed, err=%d\n",
7091 __func__, name, ret);
7092 goto out;
7093 }
7094 }
7095
7096 uA_load = on ? vreg->max_uA : 0;
7097 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
7098 if (ret)
7099 goto out;
7100 }
7101out:
7102 return ret;
7103}
7104
7105static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
7106{
7107 int ret = 0;
7108
7109 if (!vreg || vreg->enabled)
7110 goto out;
7111
7112 ret = ufshcd_config_vreg(dev, vreg, true);
7113 if (!ret)
7114 ret = regulator_enable(vreg->reg);
7115
7116 if (!ret)
7117 vreg->enabled = true;
7118 else
7119 dev_err(dev, "%s: %s enable failed, err=%d\n",
7120 __func__, vreg->name, ret);
7121out:
7122 return ret;
7123}
7124
7125static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
7126{
7127 int ret = 0;
7128
7129 if (!vreg || !vreg->enabled)
7130 goto out;
7131
7132 ret = regulator_disable(vreg->reg);
7133
7134 if (!ret) {
7135 /* ignore errors on applying disable config */
7136 ufshcd_config_vreg(dev, vreg, false);
7137 vreg->enabled = false;
7138 } else {
7139 dev_err(dev, "%s: %s disable failed, err=%d\n",
7140 __func__, vreg->name, ret);
7141 }
7142out:
7143 return ret;
7144}
7145
7146static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
7147{
7148 int ret = 0;
7149 struct device *dev = hba->dev;
7150 struct ufs_vreg_info *info = &hba->vreg_info;
7151
7152 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
7153 if (ret)
7154 goto out;
7155
7156 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
7157 if (ret)
7158 goto out;
7159
7160 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7161 if (ret)
7162 goto out;
7163
7164out:
7165 if (ret) {
7166 ufshcd_toggle_vreg(dev, info->vccq2, false);
7167 ufshcd_toggle_vreg(dev, info->vccq, false);
7168 ufshcd_toggle_vreg(dev, info->vcc, false);
7169 }
7170 return ret;
7171}
7172
7173static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7174{
7175 struct ufs_vreg_info *info = &hba->vreg_info;
7176
7177 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7178}
7179
7180static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7181{
7182 int ret = 0;
7183
7184 if (!vreg)
7185 goto out;
7186
7187 vreg->reg = devm_regulator_get(dev, vreg->name);
7188 if (IS_ERR(vreg->reg)) {
7189 ret = PTR_ERR(vreg->reg);
7190 dev_err(dev, "%s: %s get failed, err=%d\n",
7191 __func__, vreg->name, ret);
7192 }
7193out:
7194 return ret;
7195}
7196
7197static int ufshcd_init_vreg(struct ufs_hba *hba)
7198{
7199 int ret = 0;
7200 struct device *dev = hba->dev;
7201 struct ufs_vreg_info *info = &hba->vreg_info;
7202
7203 ret = ufshcd_get_vreg(dev, info->vcc);
7204 if (ret)
7205 goto out;
7206
7207 ret = ufshcd_get_vreg(dev, info->vccq);
7208 if (ret)
7209 goto out;
7210
7211 ret = ufshcd_get_vreg(dev, info->vccq2);
7212out:
7213 return ret;
7214}
7215
7216static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7217{
7218 struct ufs_vreg_info *info = &hba->vreg_info;
7219
7220 if (info)
7221 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7222
7223 return 0;
7224}
7225
7226static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7227 bool skip_ref_clk)
7228{
7229 int ret = 0;
7230 struct ufs_clk_info *clki;
7231 struct list_head *head = &hba->clk_list_head;
7232 unsigned long flags;
7233 ktime_t start = ktime_get();
7234 bool clk_state_changed = false;
7235
7236 if (list_empty(head))
7237 goto out;
7238
7239 /*
7240 * vendor specific setup_clocks ops may depend on clocks managed by
7241 * this standard driver hence call the vendor specific setup_clocks
7242 * before disabling the clocks managed here.
7243 */
7244 if (!on) {
7245 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7246 if (ret)
7247 return ret;
7248 }
7249
7250 list_for_each_entry(clki, head, list) {
7251 if (!IS_ERR_OR_NULL(clki->clk)) {
7252 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7253 continue;
7254
7255 clk_state_changed = on ^ clki->enabled;
7256 if (on && !clki->enabled) {
7257 ret = clk_prepare_enable(clki->clk);
7258 if (ret) {
7259 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7260 __func__, clki->name, ret);
7261 goto out;
7262 }
7263 } else if (!on && clki->enabled) {
7264 clk_disable_unprepare(clki->clk);
7265 }
7266 clki->enabled = on;
7267 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7268 clki->name, on ? "en" : "dis");
7269 }
7270 }
7271
7272 /*
7273 * vendor specific setup_clocks ops may depend on clocks managed by
7274 * this standard driver hence call the vendor specific setup_clocks
7275 * after enabling the clocks managed here.
7276 */
7277 if (on) {
7278 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7279 if (ret)
7280 return ret;
7281 }
7282
7283out:
7284 if (ret) {
7285 list_for_each_entry(clki, head, list) {
7286 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7287 clk_disable_unprepare(clki->clk);
7288 }
7289 } else if (!ret && on) {
7290 spin_lock_irqsave(hba->host->host_lock, flags);
7291 hba->clk_gating.state = CLKS_ON;
7292 trace_ufshcd_clk_gating(dev_name(hba->dev),
7293 hba->clk_gating.state);
7294 spin_unlock_irqrestore(hba->host->host_lock, flags);
7295 }
7296
7297 if (clk_state_changed)
7298 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7299 (on ? "on" : "off"),
7300 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7301 return ret;
7302}
7303
7304static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7305{
7306 return __ufshcd_setup_clocks(hba, on, false);
7307}
7308
7309static int ufshcd_init_clocks(struct ufs_hba *hba)
7310{
7311 int ret = 0;
7312 struct ufs_clk_info *clki;
7313 struct device *dev = hba->dev;
7314 struct list_head *head = &hba->clk_list_head;
7315
7316 if (list_empty(head))
7317 goto out;
7318
7319 list_for_each_entry(clki, head, list) {
7320 if (!clki->name)
7321 continue;
7322
7323 clki->clk = devm_clk_get(dev, clki->name);
7324 if (IS_ERR(clki->clk)) {
7325 ret = PTR_ERR(clki->clk);
7326 dev_err(dev, "%s: %s clk get failed, %d\n",
7327 __func__, clki->name, ret);
7328 goto out;
7329 }
7330
7331 /*
7332 * Parse device ref clk freq as per device tree "ref_clk".
7333 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
7334 * in ufshcd_alloc_host().
7335 */
7336 if (!strcmp(clki->name, "ref_clk"))
7337 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
7338
7339 if (clki->max_freq) {
7340 ret = clk_set_rate(clki->clk, clki->max_freq);
7341 if (ret) {
7342 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7343 __func__, clki->name,
7344 clki->max_freq, ret);
7345 goto out;
7346 }
7347 clki->curr_freq = clki->max_freq;
7348 }
7349 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7350 clki->name, clk_get_rate(clki->clk));
7351 }
7352out:
7353 return ret;
7354}
7355
7356static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7357{
7358 int err = 0;
7359
7360 if (!hba->vops)
7361 goto out;
7362
7363 err = ufshcd_vops_init(hba);
7364 if (err)
7365 goto out;
7366
7367 err = ufshcd_vops_setup_regulators(hba, true);
7368 if (err)
7369 goto out_exit;
7370
7371 goto out;
7372
7373out_exit:
7374 ufshcd_vops_exit(hba);
7375out:
7376 if (err)
7377 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7378 __func__, ufshcd_get_var_name(hba), err);
7379 return err;
7380}
7381
7382static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7383{
7384 if (!hba->vops)
7385 return;
7386
7387 ufshcd_vops_setup_regulators(hba, false);
7388
7389 ufshcd_vops_exit(hba);
7390}
7391
7392static int ufshcd_hba_init(struct ufs_hba *hba)
7393{
7394 int err;
7395
7396 /*
7397 * Handle host controller power separately from the UFS device power
7398 * rails as it will help controlling the UFS host controller power
7399 * collapse easily which is different than UFS device power collapse.
7400 * Also, enable the host controller power before we go ahead with rest
7401 * of the initialization here.
7402 */
7403 err = ufshcd_init_hba_vreg(hba);
7404 if (err)
7405 goto out;
7406
7407 err = ufshcd_setup_hba_vreg(hba, true);
7408 if (err)
7409 goto out;
7410
7411 err = ufshcd_init_clocks(hba);
7412 if (err)
7413 goto out_disable_hba_vreg;
7414
7415 err = ufshcd_setup_clocks(hba, true);
7416 if (err)
7417 goto out_disable_hba_vreg;
7418
7419 err = ufshcd_init_vreg(hba);
7420 if (err)
7421 goto out_disable_clks;
7422
7423 err = ufshcd_setup_vreg(hba, true);
7424 if (err)
7425 goto out_disable_clks;
7426
7427 err = ufshcd_variant_hba_init(hba);
7428 if (err)
7429 goto out_disable_vreg;
7430
7431 hba->is_powered = true;
7432 goto out;
7433
7434out_disable_vreg:
7435 ufshcd_setup_vreg(hba, false);
7436out_disable_clks:
7437 ufshcd_setup_clocks(hba, false);
7438out_disable_hba_vreg:
7439 ufshcd_setup_hba_vreg(hba, false);
7440out:
7441 return err;
7442}
7443
7444static void ufshcd_hba_exit(struct ufs_hba *hba)
7445{
7446 if (hba->is_powered) {
7447 ufshcd_variant_hba_exit(hba);
7448 ufshcd_setup_vreg(hba, false);
7449 ufshcd_suspend_clkscaling(hba);
7450 if (ufshcd_is_clkscaling_supported(hba))
7451 if (hba->devfreq)
7452 ufshcd_suspend_clkscaling(hba);
7453 ufshcd_setup_clocks(hba, false);
7454 ufshcd_setup_hba_vreg(hba, false);
7455 hba->is_powered = false;
7456 }
7457}
7458
7459static int
7460ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7461{
7462 unsigned char cmd[6] = {REQUEST_SENSE,
7463 0,
7464 0,
7465 0,
7466 UFS_SENSE_SIZE,
7467 0};
7468 char *buffer;
7469 int ret;
7470
7471 buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL);
7472 if (!buffer) {
7473 ret = -ENOMEM;
7474 goto out;
7475 }
7476
7477 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7478 UFS_SENSE_SIZE, NULL, NULL,
7479 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7480 if (ret)
7481 pr_err("%s: failed with err %d\n", __func__, ret);
7482
7483 kfree(buffer);
7484out:
7485 return ret;
7486}
7487
7488/**
7489 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7490 * power mode
7491 * @hba: per adapter instance
7492 * @pwr_mode: device power mode to set
7493 *
7494 * Returns 0 if requested power mode is set successfully
7495 * Returns non-zero if failed to set the requested power mode
7496 */
7497static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7498 enum ufs_dev_pwr_mode pwr_mode)
7499{
7500 unsigned char cmd[6] = { START_STOP };
7501 struct scsi_sense_hdr sshdr;
7502 struct scsi_device *sdp;
7503 unsigned long flags;
7504 int ret;
7505
7506 spin_lock_irqsave(hba->host->host_lock, flags);
7507 sdp = hba->sdev_ufs_device;
7508 if (sdp) {
7509 ret = scsi_device_get(sdp);
7510 if (!ret && !scsi_device_online(sdp)) {
7511 ret = -ENODEV;
7512 scsi_device_put(sdp);
7513 }
7514 } else {
7515 ret = -ENODEV;
7516 }
7517 spin_unlock_irqrestore(hba->host->host_lock, flags);
7518
7519 if (ret)
7520 return ret;
7521
7522 /*
7523 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7524 * handling, which would wait for host to be resumed. Since we know
7525 * we are functional while we are here, skip host resume in error
7526 * handling context.
7527 */
7528 hba->host->eh_noresume = 1;
7529 if (hba->wlun_dev_clr_ua) {
7530 ret = ufshcd_send_request_sense(hba, sdp);
7531 if (ret)
7532 goto out;
7533 /* Unit attention condition is cleared now */
7534 hba->wlun_dev_clr_ua = false;
7535 }
7536
7537 cmd[4] = pwr_mode << 4;
7538
7539 /*
7540 * Current function would be generally called from the power management
7541 * callbacks hence set the RQF_PM flag so that it doesn't resume the
7542 * already suspended childs.
7543 */
7544 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7545 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7546 if (ret) {
7547 sdev_printk(KERN_WARNING, sdp,
7548 "START_STOP failed for power mode: %d, result %x\n",
7549 pwr_mode, ret);
7550 if (driver_byte(ret) == DRIVER_SENSE)
7551 scsi_print_sense_hdr(sdp, NULL, &sshdr);
7552 }
7553
7554 if (!ret)
7555 hba->curr_dev_pwr_mode = pwr_mode;
7556out:
7557 scsi_device_put(sdp);
7558 hba->host->eh_noresume = 0;
7559 return ret;
7560}
7561
7562static int ufshcd_link_state_transition(struct ufs_hba *hba,
7563 enum uic_link_state req_link_state,
7564 int check_for_bkops)
7565{
7566 int ret = 0;
7567
7568 if (req_link_state == hba->uic_link_state)
7569 return 0;
7570
7571 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7572 ret = ufshcd_uic_hibern8_enter(hba);
7573 if (!ret)
7574 ufshcd_set_link_hibern8(hba);
7575 else
7576 goto out;
7577 }
7578 /*
7579 * If autobkops is enabled, link can't be turned off because
7580 * turning off the link would also turn off the device.
7581 */
7582 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7583 (!check_for_bkops || (check_for_bkops &&
7584 !hba->auto_bkops_enabled))) {
7585 /*
7586 * Let's make sure that link is in low power mode, we are doing
7587 * this currently by putting the link in Hibern8. Otherway to
7588 * put the link in low power mode is to send the DME end point
7589 * to device and then send the DME reset command to local
7590 * unipro. But putting the link in hibern8 is much faster.
7591 */
7592 ret = ufshcd_uic_hibern8_enter(hba);
7593 if (ret)
7594 goto out;
7595 /*
7596 * Change controller state to "reset state" which
7597 * should also put the link in off/reset state
7598 */
7599 ufshcd_hba_stop(hba, true);
7600 /*
7601 * TODO: Check if we need any delay to make sure that
7602 * controller is reset
7603 */
7604 ufshcd_set_link_off(hba);
7605 }
7606
7607out:
7608 return ret;
7609}
7610
7611static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7612{
7613 /*
7614 * It seems some UFS devices may keep drawing more than sleep current
7615 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7616 * To avoid this situation, add 2ms delay before putting these UFS
7617 * rails in LPM mode.
7618 */
7619 if (!ufshcd_is_link_active(hba) &&
7620 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7621 usleep_range(2000, 2100);
7622
7623 /*
7624 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7625 * power.
7626 *
7627 * If UFS device and link is in OFF state, all power supplies (VCC,
7628 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7629 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7630 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7631 *
7632 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7633 * in low power state which would save some power.
7634 */
7635 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7636 !hba->dev_info.is_lu_power_on_wp) {
7637 ufshcd_setup_vreg(hba, false);
7638 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7639 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7640 if (!ufshcd_is_link_active(hba)) {
7641 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7642 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7643 }
7644 }
7645}
7646
7647static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7648{
7649 int ret = 0;
7650
7651 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7652 !hba->dev_info.is_lu_power_on_wp) {
7653 ret = ufshcd_setup_vreg(hba, true);
7654 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7655 if (!ret && !ufshcd_is_link_active(hba)) {
7656 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7657 if (ret)
7658 goto vcc_disable;
7659 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7660 if (ret)
7661 goto vccq_lpm;
7662 }
7663 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7664 }
7665 goto out;
7666
7667vccq_lpm:
7668 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7669vcc_disable:
7670 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7671out:
7672 return ret;
7673}
7674
7675static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7676{
7677 if (ufshcd_is_link_off(hba))
7678 ufshcd_setup_hba_vreg(hba, false);
7679}
7680
7681static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7682{
7683 if (ufshcd_is_link_off(hba))
7684 ufshcd_setup_hba_vreg(hba, true);
7685}
7686
7687/**
7688 * ufshcd_suspend - helper function for suspend operations
7689 * @hba: per adapter instance
7690 * @pm_op: desired low power operation type
7691 *
7692 * This function will try to put the UFS device and link into low power
7693 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7694 * (System PM level).
7695 *
7696 * If this function is called during shutdown, it will make sure that
7697 * both UFS device and UFS link is powered off.
7698 *
7699 * NOTE: UFS device & link must be active before we enter in this function.
7700 *
7701 * Returns 0 for success and non-zero for failure
7702 */
7703static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7704{
7705 int ret = 0;
7706 enum ufs_pm_level pm_lvl;
7707 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7708 enum uic_link_state req_link_state;
7709
7710 hba->pm_op_in_progress = 1;
7711 if (!ufshcd_is_shutdown_pm(pm_op)) {
7712 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7713 hba->rpm_lvl : hba->spm_lvl;
7714 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7715 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7716 } else {
7717 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7718 req_link_state = UIC_LINK_OFF_STATE;
7719 }
7720
7721 /*
7722 * If we can't transition into any of the low power modes
7723 * just gate the clocks.
7724 */
7725 ufshcd_hold(hba, false);
7726 hba->clk_gating.is_suspended = true;
7727
7728 if (hba->clk_scaling.is_allowed) {
7729 cancel_work_sync(&hba->clk_scaling.suspend_work);
7730 cancel_work_sync(&hba->clk_scaling.resume_work);
7731 ufshcd_suspend_clkscaling(hba);
7732 }
7733
7734 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7735 req_link_state == UIC_LINK_ACTIVE_STATE) {
7736 goto disable_clks;
7737 }
7738
7739 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7740 (req_link_state == hba->uic_link_state))
7741 goto enable_gating;
7742
7743 /* UFS device & link must be active before we enter in this function */
7744 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7745 ret = -EINVAL;
7746 goto enable_gating;
7747 }
7748
7749 if (ufshcd_is_runtime_pm(pm_op)) {
7750 if (ufshcd_can_autobkops_during_suspend(hba)) {
7751 /*
7752 * The device is idle with no requests in the queue,
7753 * allow background operations if bkops status shows
7754 * that performance might be impacted.
7755 */
7756 ret = ufshcd_urgent_bkops(hba);
7757 if (ret)
7758 goto enable_gating;
7759 } else {
7760 /* make sure that auto bkops is disabled */
7761 ufshcd_disable_auto_bkops(hba);
7762 }
7763 }
7764
7765 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7766 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7767 !ufshcd_is_runtime_pm(pm_op))) {
7768 /* ensure that bkops is disabled */
7769 ufshcd_disable_auto_bkops(hba);
7770 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7771 if (ret)
7772 goto enable_gating;
7773 }
7774
7775 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7776 if (ret)
7777 goto set_dev_active;
7778
7779 ufshcd_vreg_set_lpm(hba);
7780
7781disable_clks:
7782 /*
7783 * Call vendor specific suspend callback. As these callbacks may access
7784 * vendor specific host controller register space call them before the
7785 * host clocks are ON.
7786 */
7787 ret = ufshcd_vops_suspend(hba, pm_op);
7788 if (ret)
7789 goto set_link_active;
7790
7791 if (!ufshcd_is_link_active(hba))
7792 ufshcd_setup_clocks(hba, false);
7793 else
7794 /* If link is active, device ref_clk can't be switched off */
7795 __ufshcd_setup_clocks(hba, false, true);
7796
7797 hba->clk_gating.state = CLKS_OFF;
7798 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
7799 /*
7800 * Disable the host irq as host controller as there won't be any
7801 * host controller transaction expected till resume.
7802 */
7803 ufshcd_disable_irq(hba);
7804 /* Put the host controller in low power mode if possible */
7805 ufshcd_hba_vreg_set_lpm(hba);
7806 goto out;
7807
7808set_link_active:
7809 if (hba->clk_scaling.is_allowed)
7810 ufshcd_resume_clkscaling(hba);
7811 ufshcd_vreg_set_hpm(hba);
7812 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7813 ufshcd_set_link_active(hba);
7814 else if (ufshcd_is_link_off(hba))
7815 ufshcd_host_reset_and_restore(hba);
7816set_dev_active:
7817 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7818 ufshcd_disable_auto_bkops(hba);
7819enable_gating:
7820 if (hba->clk_scaling.is_allowed)
7821 ufshcd_resume_clkscaling(hba);
7822 hba->clk_gating.is_suspended = false;
7823 ufshcd_release(hba);
7824out:
7825 hba->pm_op_in_progress = 0;
7826 return ret;
7827}
7828
7829/**
7830 * ufshcd_resume - helper function for resume operations
7831 * @hba: per adapter instance
7832 * @pm_op: runtime PM or system PM
7833 *
7834 * This function basically brings the UFS device, UniPro link and controller
7835 * to active state.
7836 *
7837 * Returns 0 for success and non-zero for failure
7838 */
7839static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7840{
7841 int ret;
7842 enum uic_link_state old_link_state;
7843
7844 hba->pm_op_in_progress = 1;
7845 old_link_state = hba->uic_link_state;
7846
7847 ufshcd_hba_vreg_set_hpm(hba);
7848 /* Make sure clocks are enabled before accessing controller */
7849 ret = ufshcd_setup_clocks(hba, true);
7850 if (ret)
7851 goto out;
7852
7853 /* enable the host irq as host controller would be active soon */
7854 ret = ufshcd_enable_irq(hba);
7855 if (ret)
7856 goto disable_irq_and_vops_clks;
7857
7858 ret = ufshcd_vreg_set_hpm(hba);
7859 if (ret)
7860 goto disable_irq_and_vops_clks;
7861
7862 /*
7863 * Call vendor specific resume callback. As these callbacks may access
7864 * vendor specific host controller register space call them when the
7865 * host clocks are ON.
7866 */
7867 ret = ufshcd_vops_resume(hba, pm_op);
7868 if (ret)
7869 goto disable_vreg;
7870
7871 if (ufshcd_is_link_hibern8(hba)) {
7872 ret = ufshcd_uic_hibern8_exit(hba);
7873 if (!ret)
7874 ufshcd_set_link_active(hba);
7875 else
7876 goto vendor_suspend;
7877 } else if (ufshcd_is_link_off(hba)) {
7878 ret = ufshcd_host_reset_and_restore(hba);
7879 /*
7880 * ufshcd_host_reset_and_restore() should have already
7881 * set the link state as active
7882 */
7883 if (ret || !ufshcd_is_link_active(hba))
7884 goto vendor_suspend;
7885 }
7886
7887 if (!ufshcd_is_ufs_dev_active(hba)) {
7888 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7889 if (ret)
7890 goto set_old_link_state;
7891 }
7892
7893 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7894 ufshcd_enable_auto_bkops(hba);
7895 else
7896 /*
7897 * If BKOPs operations are urgently needed at this moment then
7898 * keep auto-bkops enabled or else disable it.
7899 */
7900 ufshcd_urgent_bkops(hba);
7901
7902 hba->clk_gating.is_suspended = false;
7903
7904 if (hba->clk_scaling.is_allowed)
7905 ufshcd_resume_clkscaling(hba);
7906
7907 /* Schedule clock gating in case of no access to UFS device yet */
7908 ufshcd_release(hba);
7909
7910 /* Enable Auto-Hibernate if configured */
7911 ufshcd_auto_hibern8_enable(hba);
7912
7913 goto out;
7914
7915set_old_link_state:
7916 ufshcd_link_state_transition(hba, old_link_state, 0);
7917vendor_suspend:
7918 ufshcd_vops_suspend(hba, pm_op);
7919disable_vreg:
7920 ufshcd_vreg_set_lpm(hba);
7921disable_irq_and_vops_clks:
7922 ufshcd_disable_irq(hba);
7923 if (hba->clk_scaling.is_allowed)
7924 ufshcd_suspend_clkscaling(hba);
7925 ufshcd_setup_clocks(hba, false);
7926out:
7927 hba->pm_op_in_progress = 0;
7928 return ret;
7929}
7930
7931/**
7932 * ufshcd_system_suspend - system suspend routine
7933 * @hba: per adapter instance
7934 *
7935 * Check the description of ufshcd_suspend() function for more details.
7936 *
7937 * Returns 0 for success and non-zero for failure
7938 */
7939int ufshcd_system_suspend(struct ufs_hba *hba)
7940{
7941 int ret = 0;
7942 ktime_t start = ktime_get();
7943
7944 if (!hba || !hba->is_powered)
7945 return 0;
7946
7947 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
7948 hba->curr_dev_pwr_mode) &&
7949 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7950 hba->uic_link_state))
7951 goto out;
7952
7953 if (pm_runtime_suspended(hba->dev)) {
7954 /*
7955 * UFS device and/or UFS link low power states during runtime
7956 * suspend seems to be different than what is expected during
7957 * system suspend. Hence runtime resume the devic & link and
7958 * let the system suspend low power states to take effect.
7959 * TODO: If resume takes longer time, we might have optimize
7960 * it in future by not resuming everything if possible.
7961 */
7962 ret = ufshcd_runtime_resume(hba);
7963 if (ret)
7964 goto out;
7965 }
7966
7967 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
7968out:
7969 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
7970 ktime_to_us(ktime_sub(ktime_get(), start)),
7971 hba->curr_dev_pwr_mode, hba->uic_link_state);
7972 if (!ret)
7973 hba->is_sys_suspended = true;
7974 return ret;
7975}
7976EXPORT_SYMBOL(ufshcd_system_suspend);
7977
7978/**
7979 * ufshcd_system_resume - system resume routine
7980 * @hba: per adapter instance
7981 *
7982 * Returns 0 for success and non-zero for failure
7983 */
7984
7985int ufshcd_system_resume(struct ufs_hba *hba)
7986{
7987 int ret = 0;
7988 ktime_t start = ktime_get();
7989
7990 if (!hba)
7991 return -EINVAL;
7992
7993 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
7994 /*
7995 * Let the runtime resume take care of resuming
7996 * if runtime suspended.
7997 */
7998 goto out;
7999 else
8000 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
8001out:
8002 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8003 ktime_to_us(ktime_sub(ktime_get(), start)),
8004 hba->curr_dev_pwr_mode, hba->uic_link_state);
8005 if (!ret)
8006 hba->is_sys_suspended = false;
8007 return ret;
8008}
8009EXPORT_SYMBOL(ufshcd_system_resume);
8010
8011/**
8012 * ufshcd_runtime_suspend - runtime suspend routine
8013 * @hba: per adapter instance
8014 *
8015 * Check the description of ufshcd_suspend() function for more details.
8016 *
8017 * Returns 0 for success and non-zero for failure
8018 */
8019int ufshcd_runtime_suspend(struct ufs_hba *hba)
8020{
8021 int ret = 0;
8022 ktime_t start = ktime_get();
8023
8024 if (!hba)
8025 return -EINVAL;
8026
8027 if (!hba->is_powered)
8028 goto out;
8029 else
8030 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
8031out:
8032 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
8033 ktime_to_us(ktime_sub(ktime_get(), start)),
8034 hba->curr_dev_pwr_mode, hba->uic_link_state);
8035 return ret;
8036}
8037EXPORT_SYMBOL(ufshcd_runtime_suspend);
8038
8039/**
8040 * ufshcd_runtime_resume - runtime resume routine
8041 * @hba: per adapter instance
8042 *
8043 * This function basically brings the UFS device, UniPro link and controller
8044 * to active state. Following operations are done in this function:
8045 *
8046 * 1. Turn on all the controller related clocks
8047 * 2. Bring the UniPro link out of Hibernate state
8048 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
8049 * to active state.
8050 * 4. If auto-bkops is enabled on the device, disable it.
8051 *
8052 * So following would be the possible power state after this function return
8053 * successfully:
8054 * S1: UFS device in Active state with VCC rail ON
8055 * UniPro link in Active state
8056 * All the UFS/UniPro controller clocks are ON
8057 *
8058 * Returns 0 for success and non-zero for failure
8059 */
8060int ufshcd_runtime_resume(struct ufs_hba *hba)
8061{
8062 int ret = 0;
8063 ktime_t start = ktime_get();
8064
8065 if (!hba)
8066 return -EINVAL;
8067
8068 if (!hba->is_powered)
8069 goto out;
8070 else
8071 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
8072out:
8073 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
8074 ktime_to_us(ktime_sub(ktime_get(), start)),
8075 hba->curr_dev_pwr_mode, hba->uic_link_state);
8076 return ret;
8077}
8078EXPORT_SYMBOL(ufshcd_runtime_resume);
8079
8080int ufshcd_runtime_idle(struct ufs_hba *hba)
8081{
8082 return 0;
8083}
8084EXPORT_SYMBOL(ufshcd_runtime_idle);
8085
8086/**
8087 * ufshcd_shutdown - shutdown routine
8088 * @hba: per adapter instance
8089 *
8090 * This function would power off both UFS device and UFS link.
8091 *
8092 * Returns 0 always to allow force shutdown even in case of errors.
8093 */
8094int ufshcd_shutdown(struct ufs_hba *hba)
8095{
8096 int ret = 0;
8097
8098 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
8099 goto out;
8100
8101 if (pm_runtime_suspended(hba->dev)) {
8102 ret = ufshcd_runtime_resume(hba);
8103 if (ret)
8104 goto out;
8105 }
8106
8107 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
8108out:
8109 if (ret)
8110 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
8111 /* allow force shutdown even in case of errors */
8112 return 0;
8113}
8114EXPORT_SYMBOL(ufshcd_shutdown);
8115
8116/**
8117 * ufshcd_remove - de-allocate SCSI host and host memory space
8118 * data structure memory
8119 * @hba: per adapter instance
8120 */
8121void ufshcd_remove(struct ufs_hba *hba)
8122{
8123 ufs_bsg_remove(hba);
8124 ufs_sysfs_remove_nodes(hba->dev);
8125 scsi_remove_host(hba->host);
8126 /* disable interrupts */
8127 ufshcd_disable_intr(hba, hba->intr_mask);
8128 ufshcd_hba_stop(hba, true);
8129
8130 ufshcd_exit_clk_scaling(hba);
8131 ufshcd_exit_clk_gating(hba);
8132 if (ufshcd_is_clkscaling_supported(hba))
8133 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8134 ufshcd_hba_exit(hba);
8135}
8136EXPORT_SYMBOL_GPL(ufshcd_remove);
8137
8138/**
8139 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8140 * @hba: pointer to Host Bus Adapter (HBA)
8141 */
8142void ufshcd_dealloc_host(struct ufs_hba *hba)
8143{
8144 scsi_host_put(hba->host);
8145}
8146EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8147
8148/**
8149 * ufshcd_set_dma_mask - Set dma mask based on the controller
8150 * addressing capability
8151 * @hba: per adapter instance
8152 *
8153 * Returns 0 for success, non-zero for failure
8154 */
8155static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8156{
8157 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8158 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8159 return 0;
8160 }
8161 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8162}
8163
8164/**
8165 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
8166 * @dev: pointer to device handle
8167 * @hba_handle: driver private handle
8168 * Returns 0 on success, non-zero value on failure
8169 */
8170int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8171{
8172 struct Scsi_Host *host;
8173 struct ufs_hba *hba;
8174 int err = 0;
8175
8176 if (!dev) {
8177 dev_err(dev,
8178 "Invalid memory reference for dev is NULL\n");
8179 err = -ENODEV;
8180 goto out_error;
8181 }
8182
8183 host = scsi_host_alloc(&ufshcd_driver_template,
8184 sizeof(struct ufs_hba));
8185 if (!host) {
8186 dev_err(dev, "scsi_host_alloc failed\n");
8187 err = -ENOMEM;
8188 goto out_error;
8189 }
8190 hba = shost_priv(host);
8191 hba->host = host;
8192 hba->dev = dev;
8193 *hba_handle = hba;
8194 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
8195
8196 INIT_LIST_HEAD(&hba->clk_list_head);
8197
8198out_error:
8199 return err;
8200}
8201EXPORT_SYMBOL(ufshcd_alloc_host);
8202
8203/**
8204 * ufshcd_init - Driver initialization routine
8205 * @hba: per-adapter instance
8206 * @mmio_base: base register address
8207 * @irq: Interrupt line of device
8208 * Returns 0 on success, non-zero value on failure
8209 */
8210int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8211{
8212 int err;
8213 struct Scsi_Host *host = hba->host;
8214 struct device *dev = hba->dev;
8215
8216 if (!mmio_base) {
8217 dev_err(hba->dev,
8218 "Invalid memory reference for mmio_base is NULL\n");
8219 err = -ENODEV;
8220 goto out_error;
8221 }
8222
8223 hba->mmio_base = mmio_base;
8224 hba->irq = irq;
8225
8226 /* Set descriptor lengths to specification defaults */
8227 ufshcd_def_desc_sizes(hba);
8228
8229 err = ufshcd_hba_init(hba);
8230 if (err)
8231 goto out_error;
8232
8233 /* Read capabilities registers */
8234 ufshcd_hba_capabilities(hba);
8235
8236 /* Get UFS version supported by the controller */
8237 hba->ufs_version = ufshcd_get_ufs_version(hba);
8238
8239 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8240 (hba->ufs_version != UFSHCI_VERSION_11) &&
8241 (hba->ufs_version != UFSHCI_VERSION_20) &&
8242 (hba->ufs_version != UFSHCI_VERSION_21))
8243 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8244 hba->ufs_version);
8245
8246 /* Get Interrupt bit mask per version */
8247 hba->intr_mask = ufshcd_get_intr_mask(hba);
8248
8249 err = ufshcd_set_dma_mask(hba);
8250 if (err) {
8251 dev_err(hba->dev, "set dma mask failed\n");
8252 goto out_disable;
8253 }
8254
8255 /* Allocate memory for host memory space */
8256 err = ufshcd_memory_alloc(hba);
8257 if (err) {
8258 dev_err(hba->dev, "Memory allocation failed\n");
8259 goto out_disable;
8260 }
8261
8262 /* Configure LRB */
8263 ufshcd_host_memory_configure(hba);
8264
8265 host->can_queue = hba->nutrs;
8266 host->cmd_per_lun = hba->nutrs;
8267 host->max_id = UFSHCD_MAX_ID;
8268 host->max_lun = UFS_MAX_LUNS;
8269 host->max_channel = UFSHCD_MAX_CHANNEL;
8270 host->unique_id = host->host_no;
8271 host->max_cmd_len = UFS_CDB_SIZE;
8272
8273 hba->max_pwr_info.is_valid = false;
8274
8275 /* Initailize wait queue for task management */
8276 init_waitqueue_head(&hba->tm_wq);
8277 init_waitqueue_head(&hba->tm_tag_wq);
8278
8279 /* Initialize work queues */
8280 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8281 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8282
8283 /* Initialize UIC command mutex */
8284 mutex_init(&hba->uic_cmd_mutex);
8285
8286 /* Initialize mutex for device management commands */
8287 mutex_init(&hba->dev_cmd.lock);
8288
8289 init_rwsem(&hba->clk_scaling_lock);
8290
8291 /* Initialize device management tag acquire wait queue */
8292 init_waitqueue_head(&hba->dev_cmd.tag_wq);
8293
8294 ufshcd_init_clk_gating(hba);
8295
8296 ufshcd_init_clk_scaling(hba);
8297
8298 /*
8299 * In order to avoid any spurious interrupt immediately after
8300 * registering UFS controller interrupt handler, clear any pending UFS
8301 * interrupt status and disable all the UFS interrupts.
8302 */
8303 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8304 REG_INTERRUPT_STATUS);
8305 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8306 /*
8307 * Make sure that UFS interrupts are disabled and any pending interrupt
8308 * status is cleared before registering UFS interrupt handler.
8309 */
8310 mb();
8311
8312 /* IRQ registration */
8313 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8314 if (err) {
8315 dev_err(hba->dev, "request irq failed\n");
8316 goto exit_gating;
8317 } else {
8318 hba->is_irq_enabled = true;
8319 }
8320
8321 err = scsi_add_host(host, hba->dev);
8322 if (err) {
8323 dev_err(hba->dev, "scsi_add_host failed\n");
8324 goto exit_gating;
8325 }
8326
8327 /* Host controller enable */
8328 err = ufshcd_hba_enable(hba);
8329 if (err) {
8330 dev_err(hba->dev, "Host controller enable failed\n");
8331 ufshcd_print_host_regs(hba);
8332 ufshcd_print_host_state(hba);
8333 goto out_remove_scsi_host;
8334 }
8335
8336 /*
8337 * Set the default power management level for runtime and system PM.
8338 * Default power saving mode is to keep UFS link in Hibern8 state
8339 * and UFS device in sleep state.
8340 */
8341 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8342 UFS_SLEEP_PWR_MODE,
8343 UIC_LINK_HIBERN8_STATE);
8344 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8345 UFS_SLEEP_PWR_MODE,
8346 UIC_LINK_HIBERN8_STATE);
8347
8348 /* Set the default auto-hiberate idle timer value to 150 ms */
8349 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
8350 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8351 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8352 }
8353
8354 /* Hold auto suspend until async scan completes */
8355 pm_runtime_get_sync(dev);
8356 atomic_set(&hba->scsi_block_reqs_cnt, 0);
8357 /*
8358 * We are assuming that device wasn't put in sleep/power-down
8359 * state exclusively during the boot stage before kernel.
8360 * This assumption helps avoid doing link startup twice during
8361 * ufshcd_probe_hba().
8362 */
8363 ufshcd_set_ufs_dev_active(hba);
8364
8365 async_schedule(ufshcd_async_scan, hba);
8366 ufs_sysfs_add_nodes(hba->dev);
8367
8368 return 0;
8369
8370out_remove_scsi_host:
8371 scsi_remove_host(hba->host);
8372exit_gating:
8373 ufshcd_exit_clk_scaling(hba);
8374 ufshcd_exit_clk_gating(hba);
8375out_disable:
8376 hba->is_irq_enabled = false;
8377 ufshcd_hba_exit(hba);
8378out_error:
8379 return err;
8380}
8381EXPORT_SYMBOL_GPL(ufshcd_init);
8382
8383MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8384MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8385MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8386MODULE_LICENSE("GPL");
8387MODULE_VERSION(UFSHCD_DRIVER_VERSION);