Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
4 */
5
6#include <linux/acpi.h>
7#include <linux/time.h>
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/module.h>
11#include <linux/of.h>
12#include <linux/platform_device.h>
13#include <linux/phy/phy.h>
14#include <linux/gpio/consumer.h>
15#include <linux/reset-controller.h>
16#include <linux/devfreq.h>
17
18#include <soc/qcom/ice.h>
19
20#include <ufs/ufshcd.h>
21#include "ufshcd-pltfrm.h"
22#include <ufs/unipro.h>
23#include "ufs-qcom.h"
24#include <ufs/ufshci.h>
25#include <ufs/ufs_quirks.h>
26
27#define MCQ_QCFGPTR_MASK GENMASK(7, 0)
28#define MCQ_QCFGPTR_UNIT 0x200
29#define MCQ_SQATTR_OFFSET(c) \
30 ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
31#define MCQ_QCFG_SIZE 0x40
32
33enum {
34 TSTBUS_UAWM,
35 TSTBUS_UARM,
36 TSTBUS_TXUC,
37 TSTBUS_RXUC,
38 TSTBUS_DFC,
39 TSTBUS_TRLUT,
40 TSTBUS_TMRLUT,
41 TSTBUS_OCSC,
42 TSTBUS_UTP_HCI,
43 TSTBUS_COMBINED,
44 TSTBUS_WRAPPER,
45 TSTBUS_UNIPRO,
46 TSTBUS_MAX,
47};
48
49static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
50
51static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
52static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
53 u32 clk_cycles);
54
55static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
56{
57 return container_of(rcd, struct ufs_qcom_host, rcdev);
58}
59
60#ifdef CONFIG_SCSI_UFS_CRYPTO
61
62static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
63{
64 if (host->hba->caps & UFSHCD_CAP_CRYPTO)
65 qcom_ice_enable(host->ice);
66}
67
68static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
69{
70 struct ufs_hba *hba = host->hba;
71 struct device *dev = hba->dev;
72 struct qcom_ice *ice;
73
74 ice = of_qcom_ice_get(dev);
75 if (ice == ERR_PTR(-EOPNOTSUPP)) {
76 dev_warn(dev, "Disabling inline encryption support\n");
77 ice = NULL;
78 }
79
80 if (IS_ERR_OR_NULL(ice))
81 return PTR_ERR_OR_ZERO(ice);
82
83 host->ice = ice;
84 hba->caps |= UFSHCD_CAP_CRYPTO;
85
86 return 0;
87}
88
89static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
90{
91 if (host->hba->caps & UFSHCD_CAP_CRYPTO)
92 return qcom_ice_resume(host->ice);
93
94 return 0;
95}
96
97static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
98{
99 if (host->hba->caps & UFSHCD_CAP_CRYPTO)
100 return qcom_ice_suspend(host->ice);
101
102 return 0;
103}
104
105static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
106 const union ufs_crypto_cfg_entry *cfg,
107 int slot)
108{
109 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
110 union ufs_crypto_cap_entry cap;
111 bool config_enable =
112 cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE;
113
114 /* Only AES-256-XTS has been tested so far. */
115 cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
116 if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
117 cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
118 return -EINVAL;
119
120 if (config_enable)
121 return qcom_ice_program_key(host->ice,
122 QCOM_ICE_CRYPTO_ALG_AES_XTS,
123 QCOM_ICE_CRYPTO_KEY_SIZE_256,
124 cfg->crypto_key,
125 cfg->data_unit_size, slot);
126 else
127 return qcom_ice_evict_key(host->ice, slot);
128}
129
130#else
131
132#define ufs_qcom_ice_program_key NULL
133
134static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
135{
136}
137
138static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
139{
140 return 0;
141}
142
143static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
144{
145 return 0;
146}
147
148static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
149{
150 return 0;
151}
152#endif
153
154static int ufs_qcom_host_clk_get(struct device *dev,
155 const char *name, struct clk **clk_out, bool optional)
156{
157 struct clk *clk;
158 int err = 0;
159
160 clk = devm_clk_get(dev, name);
161 if (!IS_ERR(clk)) {
162 *clk_out = clk;
163 return 0;
164 }
165
166 err = PTR_ERR(clk);
167
168 if (optional && err == -ENOENT) {
169 *clk_out = NULL;
170 return 0;
171 }
172
173 if (err != -EPROBE_DEFER)
174 dev_err(dev, "failed to get %s err %d\n", name, err);
175
176 return err;
177}
178
179static int ufs_qcom_host_clk_enable(struct device *dev,
180 const char *name, struct clk *clk)
181{
182 int err = 0;
183
184 err = clk_prepare_enable(clk);
185 if (err)
186 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
187
188 return err;
189}
190
191static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
192{
193 if (!host->is_lane_clks_enabled)
194 return;
195
196 clk_disable_unprepare(host->tx_l1_sync_clk);
197 clk_disable_unprepare(host->tx_l0_sync_clk);
198 clk_disable_unprepare(host->rx_l1_sync_clk);
199 clk_disable_unprepare(host->rx_l0_sync_clk);
200
201 host->is_lane_clks_enabled = false;
202}
203
204static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
205{
206 int err;
207 struct device *dev = host->hba->dev;
208
209 if (host->is_lane_clks_enabled)
210 return 0;
211
212 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
213 host->rx_l0_sync_clk);
214 if (err)
215 return err;
216
217 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
218 host->tx_l0_sync_clk);
219 if (err)
220 goto disable_rx_l0;
221
222 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
223 host->rx_l1_sync_clk);
224 if (err)
225 goto disable_tx_l0;
226
227 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
228 host->tx_l1_sync_clk);
229 if (err)
230 goto disable_rx_l1;
231
232 host->is_lane_clks_enabled = true;
233
234 return 0;
235
236disable_rx_l1:
237 clk_disable_unprepare(host->rx_l1_sync_clk);
238disable_tx_l0:
239 clk_disable_unprepare(host->tx_l0_sync_clk);
240disable_rx_l0:
241 clk_disable_unprepare(host->rx_l0_sync_clk);
242
243 return err;
244}
245
246static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
247{
248 int err = 0;
249 struct device *dev = host->hba->dev;
250
251 if (has_acpi_companion(dev))
252 return 0;
253
254 err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
255 &host->rx_l0_sync_clk, false);
256 if (err)
257 return err;
258
259 err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
260 &host->tx_l0_sync_clk, false);
261 if (err)
262 return err;
263
264 /* In case of single lane per direction, don't read lane1 clocks */
265 if (host->hba->lanes_per_direction > 1) {
266 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
267 &host->rx_l1_sync_clk, false);
268 if (err)
269 return err;
270
271 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
272 &host->tx_l1_sync_clk, true);
273 }
274
275 return 0;
276}
277
278static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
279{
280 int err;
281 u32 tx_fsm_val = 0;
282 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
283
284 do {
285 err = ufshcd_dme_get(hba,
286 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
287 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
288 &tx_fsm_val);
289 if (err || tx_fsm_val == TX_FSM_HIBERN8)
290 break;
291
292 /* sleep for max. 200us */
293 usleep_range(100, 200);
294 } while (time_before(jiffies, timeout));
295
296 /*
297 * we might have scheduled out for long during polling so
298 * check the state again.
299 */
300 if (time_after(jiffies, timeout))
301 err = ufshcd_dme_get(hba,
302 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
303 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
304 &tx_fsm_val);
305
306 if (err) {
307 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
308 __func__, err);
309 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
310 err = tx_fsm_val;
311 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
312 __func__, err);
313 }
314
315 return err;
316}
317
318static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
319{
320 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
321 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
322 REG_UFS_CFG1);
323
324 if (host->hw_ver.major == 0x05)
325 ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
326
327 /* make sure above configuration is applied before we return */
328 mb();
329}
330
331/*
332 * ufs_qcom_host_reset - reset host controller and PHY
333 */
334static int ufs_qcom_host_reset(struct ufs_hba *hba)
335{
336 int ret = 0;
337 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
338 bool reenable_intr = false;
339
340 if (!host->core_reset) {
341 dev_warn(hba->dev, "%s: reset control not set\n", __func__);
342 return 0;
343 }
344
345 reenable_intr = hba->is_irq_enabled;
346 disable_irq(hba->irq);
347 hba->is_irq_enabled = false;
348
349 ret = reset_control_assert(host->core_reset);
350 if (ret) {
351 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
352 __func__, ret);
353 return ret;
354 }
355
356 /*
357 * The hardware requirement for delay between assert/deassert
358 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
359 * ~125us (4/32768). To be on the safe side add 200us delay.
360 */
361 usleep_range(200, 210);
362
363 ret = reset_control_deassert(host->core_reset);
364 if (ret)
365 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
366 __func__, ret);
367
368 usleep_range(1000, 1100);
369
370 if (reenable_intr) {
371 enable_irq(hba->irq);
372 hba->is_irq_enabled = true;
373 }
374
375 return 0;
376}
377
378static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
379{
380 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
381
382 if (host->hw_ver.major == 0x1) {
383 /*
384 * HS-G3 operations may not reliably work on legacy QCOM
385 * UFS host controller hardware even though capability
386 * exchange during link startup phase may end up
387 * negotiating maximum supported gear as G3.
388 * Hence downgrade the maximum supported gear to HS-G2.
389 */
390 return UFS_HS_G2;
391 } else if (host->hw_ver.major >= 0x4) {
392 return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
393 }
394
395 /* Default is HS-G3 */
396 return UFS_HS_G3;
397}
398
399static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
400{
401 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
402 struct phy *phy = host->generic_phy;
403 int ret;
404
405 /* Reset UFS Host Controller and PHY */
406 ret = ufs_qcom_host_reset(hba);
407 if (ret)
408 dev_warn(hba->dev, "%s: host reset returned %d\n",
409 __func__, ret);
410
411 /* phy initialization - calibrate the phy */
412 ret = phy_init(phy);
413 if (ret) {
414 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
415 __func__, ret);
416 return ret;
417 }
418
419 phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->hs_gear);
420
421 /* power on phy - start serdes and phy's power and clocks */
422 ret = phy_power_on(phy);
423 if (ret) {
424 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
425 __func__, ret);
426 goto out_disable_phy;
427 }
428
429 ufs_qcom_select_unipro_mode(host);
430
431 return 0;
432
433out_disable_phy:
434 phy_exit(phy);
435
436 return ret;
437}
438
439/*
440 * The UTP controller has a number of internal clock gating cells (CGCs).
441 * Internal hardware sub-modules within the UTP controller control the CGCs.
442 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
443 * in a specific operation, UTP controller CGCs are by default disabled and
444 * this function enables them (after every UFS link startup) to save some power
445 * leakage.
446 */
447static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
448{
449 ufshcd_writel(hba,
450 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
451 REG_UFS_CFG2);
452
453 /* Ensure that HW clock gating is enabled before next operations */
454 mb();
455}
456
457static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
458 enum ufs_notify_change_status status)
459{
460 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
461 int err = 0;
462
463 switch (status) {
464 case PRE_CHANGE:
465 ufs_qcom_power_up_sequence(hba);
466 /*
467 * The PHY PLL output is the source of tx/rx lane symbol
468 * clocks, hence, enable the lane clocks only after PHY
469 * is initialized.
470 */
471 err = ufs_qcom_enable_lane_clks(host);
472 break;
473 case POST_CHANGE:
474 /* check if UFS PHY moved from DISABLED to HIBERN8 */
475 err = ufs_qcom_check_hibern8(hba);
476 ufs_qcom_enable_hw_clk_gating(hba);
477 ufs_qcom_ice_enable(host);
478 break;
479 default:
480 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
481 err = -EINVAL;
482 break;
483 }
484 return err;
485}
486
487/*
488 * Returns zero for success and non-zero in case of a failure
489 */
490static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
491 u32 hs, u32 rate, bool update_link_startup_timer)
492{
493 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
494 struct ufs_clk_info *clki;
495 u32 core_clk_period_in_ns;
496 u32 tx_clk_cycles_per_us = 0;
497 unsigned long core_clk_rate = 0;
498 u32 core_clk_cycles_per_us = 0;
499
500 static u32 pwm_fr_table[][2] = {
501 {UFS_PWM_G1, 0x1},
502 {UFS_PWM_G2, 0x1},
503 {UFS_PWM_G3, 0x1},
504 {UFS_PWM_G4, 0x1},
505 };
506
507 static u32 hs_fr_table_rA[][2] = {
508 {UFS_HS_G1, 0x1F},
509 {UFS_HS_G2, 0x3e},
510 {UFS_HS_G3, 0x7D},
511 };
512
513 static u32 hs_fr_table_rB[][2] = {
514 {UFS_HS_G1, 0x24},
515 {UFS_HS_G2, 0x49},
516 {UFS_HS_G3, 0x92},
517 };
518
519 /*
520 * The Qunipro controller does not use following registers:
521 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
522 * UFS_REG_PA_LINK_STARTUP_TIMER
523 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
524 * Aggregation logic.
525 */
526 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
527 return 0;
528
529 if (gear == 0) {
530 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
531 return -EINVAL;
532 }
533
534 list_for_each_entry(clki, &hba->clk_list_head, list) {
535 if (!strcmp(clki->name, "core_clk"))
536 core_clk_rate = clk_get_rate(clki->clk);
537 }
538
539 /* If frequency is smaller than 1MHz, set to 1MHz */
540 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
541 core_clk_rate = DEFAULT_CLK_RATE_HZ;
542
543 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
544 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
545 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
546 /*
547 * make sure above write gets applied before we return from
548 * this function.
549 */
550 mb();
551 }
552
553 if (ufs_qcom_cap_qunipro(host))
554 return 0;
555
556 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
557 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
558 core_clk_period_in_ns &= MASK_CLK_NS_REG;
559
560 switch (hs) {
561 case FASTAUTO_MODE:
562 case FAST_MODE:
563 if (rate == PA_HS_MODE_A) {
564 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
565 dev_err(hba->dev,
566 "%s: index %d exceeds table size %zu\n",
567 __func__, gear,
568 ARRAY_SIZE(hs_fr_table_rA));
569 return -EINVAL;
570 }
571 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
572 } else if (rate == PA_HS_MODE_B) {
573 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
574 dev_err(hba->dev,
575 "%s: index %d exceeds table size %zu\n",
576 __func__, gear,
577 ARRAY_SIZE(hs_fr_table_rB));
578 return -EINVAL;
579 }
580 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
581 } else {
582 dev_err(hba->dev, "%s: invalid rate = %d\n",
583 __func__, rate);
584 return -EINVAL;
585 }
586 break;
587 case SLOWAUTO_MODE:
588 case SLOW_MODE:
589 if (gear > ARRAY_SIZE(pwm_fr_table)) {
590 dev_err(hba->dev,
591 "%s: index %d exceeds table size %zu\n",
592 __func__, gear,
593 ARRAY_SIZE(pwm_fr_table));
594 return -EINVAL;
595 }
596 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
597 break;
598 case UNCHANGED:
599 default:
600 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
601 return -EINVAL;
602 }
603
604 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
605 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
606 /* this register 2 fields shall be written at once */
607 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
608 REG_UFS_TX_SYMBOL_CLK_NS_US);
609 /*
610 * make sure above write gets applied before we return from
611 * this function.
612 */
613 mb();
614 }
615
616 if (update_link_startup_timer && host->hw_ver.major != 0x5) {
617 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
618 REG_UFS_CFG0);
619 /*
620 * make sure that this configuration is applied before
621 * we return
622 */
623 mb();
624 }
625
626 return 0;
627}
628
629static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
630 enum ufs_notify_change_status status)
631{
632 int err = 0;
633 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
634
635 switch (status) {
636 case PRE_CHANGE:
637 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
638 0, true)) {
639 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
640 __func__);
641 return -EINVAL;
642 }
643
644 if (ufs_qcom_cap_qunipro(host))
645 /*
646 * set unipro core clock cycles to 150 & clear clock
647 * divider
648 */
649 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
650 150);
651
652 /*
653 * Some UFS devices (and may be host) have issues if LCC is
654 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
655 * before link startup which will make sure that both host
656 * and device TX LCC are disabled once link startup is
657 * completed.
658 */
659 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
660 err = ufshcd_disable_host_tx_lcc(hba);
661
662 break;
663 default:
664 break;
665 }
666
667 return err;
668}
669
670static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
671{
672 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
673
674 /* reset gpio is optional */
675 if (!host->device_reset)
676 return;
677
678 gpiod_set_value_cansleep(host->device_reset, asserted);
679}
680
681static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
682 enum ufs_notify_change_status status)
683{
684 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
685 struct phy *phy = host->generic_phy;
686
687 if (status == PRE_CHANGE)
688 return 0;
689
690 if (ufs_qcom_is_link_off(hba)) {
691 /*
692 * Disable the tx/rx lane symbol clocks before PHY is
693 * powered down as the PLL source should be disabled
694 * after downstream clocks are disabled.
695 */
696 ufs_qcom_disable_lane_clks(host);
697 phy_power_off(phy);
698
699 /* reset the connected UFS device during power down */
700 ufs_qcom_device_reset_ctrl(hba, true);
701
702 } else if (!ufs_qcom_is_link_active(hba)) {
703 ufs_qcom_disable_lane_clks(host);
704 }
705
706 return ufs_qcom_ice_suspend(host);
707}
708
709static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
710{
711 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
712 struct phy *phy = host->generic_phy;
713 int err;
714
715 if (ufs_qcom_is_link_off(hba)) {
716 err = phy_power_on(phy);
717 if (err) {
718 dev_err(hba->dev, "%s: failed PHY power on: %d\n",
719 __func__, err);
720 return err;
721 }
722
723 err = ufs_qcom_enable_lane_clks(host);
724 if (err)
725 return err;
726
727 } else if (!ufs_qcom_is_link_active(hba)) {
728 err = ufs_qcom_enable_lane_clks(host);
729 if (err)
730 return err;
731 }
732
733 return ufs_qcom_ice_resume(host);
734}
735
736static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
737{
738 if (host->dev_ref_clk_ctrl_mmio &&
739 (enable ^ host->is_dev_ref_clk_enabled)) {
740 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
741
742 if (enable)
743 temp |= host->dev_ref_clk_en_mask;
744 else
745 temp &= ~host->dev_ref_clk_en_mask;
746
747 /*
748 * If we are here to disable this clock it might be immediately
749 * after entering into hibern8 in which case we need to make
750 * sure that device ref_clk is active for specific time after
751 * hibern8 enter.
752 */
753 if (!enable) {
754 unsigned long gating_wait;
755
756 gating_wait = host->hba->dev_info.clk_gating_wait_us;
757 if (!gating_wait) {
758 udelay(1);
759 } else {
760 /*
761 * bRefClkGatingWaitTime defines the minimum
762 * time for which the reference clock is
763 * required by device during transition from
764 * HS-MODE to LS-MODE or HIBERN8 state. Give it
765 * more delay to be on the safe side.
766 */
767 gating_wait += 10;
768 usleep_range(gating_wait, gating_wait + 10);
769 }
770 }
771
772 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
773
774 /*
775 * Make sure the write to ref_clk reaches the destination and
776 * not stored in a Write Buffer (WB).
777 */
778 readl(host->dev_ref_clk_ctrl_mmio);
779
780 /*
781 * If we call hibern8 exit after this, we need to make sure that
782 * device ref_clk is stable for at least 1us before the hibern8
783 * exit command.
784 */
785 if (enable)
786 udelay(1);
787
788 host->is_dev_ref_clk_enabled = enable;
789 }
790}
791
792static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
793 enum ufs_notify_change_status status,
794 struct ufs_pa_layer_attr *dev_max_params,
795 struct ufs_pa_layer_attr *dev_req_params)
796{
797 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
798 struct ufs_dev_params ufs_qcom_cap;
799 int ret = 0;
800
801 if (!dev_req_params) {
802 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
803 return -EINVAL;
804 }
805
806 switch (status) {
807 case PRE_CHANGE:
808 ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
809 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
810
811 /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
812 ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
813
814 ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
815 dev_max_params,
816 dev_req_params);
817 if (ret) {
818 dev_err(hba->dev, "%s: failed to determine capabilities\n",
819 __func__);
820 return ret;
821 }
822
823 /* Use the agreed gear */
824 host->hs_gear = dev_req_params->gear_tx;
825
826 /* enable the device ref clock before changing to HS mode */
827 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
828 ufshcd_is_hs_mode(dev_req_params))
829 ufs_qcom_dev_ref_clk_ctrl(host, true);
830
831 if (host->hw_ver.major >= 0x4) {
832 ufshcd_dme_configure_adapt(hba,
833 dev_req_params->gear_tx,
834 PA_INITIAL_ADAPT);
835 }
836 break;
837 case POST_CHANGE:
838 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
839 dev_req_params->pwr_rx,
840 dev_req_params->hs_rate, false)) {
841 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
842 __func__);
843 /*
844 * we return error code at the end of the routine,
845 * but continue to configure UFS_PHY_TX_LANE_ENABLE
846 * and bus voting as usual
847 */
848 ret = -EINVAL;
849 }
850
851 /* cache the power mode parameters to use internally */
852 memcpy(&host->dev_req_params,
853 dev_req_params, sizeof(*dev_req_params));
854
855 /* disable the device ref clock if entered PWM mode */
856 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
857 !ufshcd_is_hs_mode(dev_req_params))
858 ufs_qcom_dev_ref_clk_ctrl(host, false);
859 break;
860 default:
861 ret = -EINVAL;
862 break;
863 }
864
865 return ret;
866}
867
868static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
869{
870 int err;
871 u32 pa_vs_config_reg1;
872
873 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
874 &pa_vs_config_reg1);
875 if (err)
876 return err;
877
878 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
879 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
880 (pa_vs_config_reg1 | (1 << 12)));
881}
882
883static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
884{
885 int err = 0;
886
887 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
888 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
889
890 if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
891 hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
892
893 return err;
894}
895
896static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
897{
898 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
899
900 if (host->hw_ver.major == 0x1)
901 return ufshci_version(1, 1);
902 else
903 return ufshci_version(2, 0);
904}
905
906/**
907 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
908 * @hba: host controller instance
909 *
910 * QCOM UFS host controller might have some non standard behaviours (quirks)
911 * than what is specified by UFSHCI specification. Advertise all such
912 * quirks to standard UFS host controller driver so standard takes them into
913 * account.
914 */
915static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
916{
917 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
918
919 if (host->hw_ver.major == 0x01) {
920 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
921 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
922 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
923
924 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
925 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
926
927 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
928 }
929
930 if (host->hw_ver.major == 0x2) {
931 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
932
933 if (!ufs_qcom_cap_qunipro(host))
934 /* Legacy UniPro mode still need following quirks */
935 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
936 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
937 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
938 }
939
940 if (host->hw_ver.major > 0x3)
941 hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
942}
943
944static void ufs_qcom_set_caps(struct ufs_hba *hba)
945{
946 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
947
948 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
949 hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
950 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
951 hba->caps |= UFSHCD_CAP_WB_EN;
952 hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
953 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
954
955 if (host->hw_ver.major >= 0x2) {
956 host->caps = UFS_QCOM_CAP_QUNIPRO |
957 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
958 }
959}
960
961/**
962 * ufs_qcom_setup_clocks - enables/disable clocks
963 * @hba: host controller instance
964 * @on: If true, enable clocks else disable them.
965 * @status: PRE_CHANGE or POST_CHANGE notify
966 *
967 * Returns 0 on success, non-zero on failure.
968 */
969static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
970 enum ufs_notify_change_status status)
971{
972 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
973
974 /*
975 * In case ufs_qcom_init() is not yet done, simply ignore.
976 * This ufs_qcom_setup_clocks() shall be called from
977 * ufs_qcom_init() after init is done.
978 */
979 if (!host)
980 return 0;
981
982 switch (status) {
983 case PRE_CHANGE:
984 if (!on) {
985 if (!ufs_qcom_is_link_active(hba)) {
986 /* disable device ref_clk */
987 ufs_qcom_dev_ref_clk_ctrl(host, false);
988 }
989 }
990 break;
991 case POST_CHANGE:
992 if (on) {
993 /* enable the device ref clock for HS mode*/
994 if (ufshcd_is_hs_mode(&hba->pwr_info))
995 ufs_qcom_dev_ref_clk_ctrl(host, true);
996 }
997 break;
998 }
999
1000 return 0;
1001}
1002
1003static int
1004ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
1005{
1006 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1007
1008 ufs_qcom_assert_reset(host->hba);
1009 /* provide 1ms delay to let the reset pulse propagate. */
1010 usleep_range(1000, 1100);
1011 return 0;
1012}
1013
1014static int
1015ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
1016{
1017 struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
1018
1019 ufs_qcom_deassert_reset(host->hba);
1020
1021 /*
1022 * after reset deassertion, phy will need all ref clocks,
1023 * voltage, current to settle down before starting serdes.
1024 */
1025 usleep_range(1000, 1100);
1026 return 0;
1027}
1028
1029static const struct reset_control_ops ufs_qcom_reset_ops = {
1030 .assert = ufs_qcom_reset_assert,
1031 .deassert = ufs_qcom_reset_deassert,
1032};
1033
1034/**
1035 * ufs_qcom_init - bind phy with controller
1036 * @hba: host controller instance
1037 *
1038 * Binds PHY with controller and powers up PHY enabling clocks
1039 * and regulators.
1040 *
1041 * Returns -EPROBE_DEFER if binding fails, returns negative error
1042 * on phy power up failure and returns zero on success.
1043 */
1044static int ufs_qcom_init(struct ufs_hba *hba)
1045{
1046 int err;
1047 struct device *dev = hba->dev;
1048 struct platform_device *pdev = to_platform_device(dev);
1049 struct ufs_qcom_host *host;
1050 struct resource *res;
1051 struct ufs_clk_info *clki;
1052
1053 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1054 if (!host) {
1055 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1056 return -ENOMEM;
1057 }
1058
1059 /* Make a two way bind between the qcom host and the hba */
1060 host->hba = hba;
1061 ufshcd_set_variant(hba, host);
1062
1063 /* Setup the optional reset control of HCI */
1064 host->core_reset = devm_reset_control_get_optional(hba->dev, "rst");
1065 if (IS_ERR(host->core_reset)) {
1066 err = dev_err_probe(dev, PTR_ERR(host->core_reset),
1067 "Failed to get reset control\n");
1068 goto out_variant_clear;
1069 }
1070
1071 /* Fire up the reset controller. Failure here is non-fatal. */
1072 host->rcdev.of_node = dev->of_node;
1073 host->rcdev.ops = &ufs_qcom_reset_ops;
1074 host->rcdev.owner = dev->driver->owner;
1075 host->rcdev.nr_resets = 1;
1076 err = devm_reset_controller_register(dev, &host->rcdev);
1077 if (err)
1078 dev_warn(dev, "Failed to register reset controller\n");
1079
1080 if (!has_acpi_companion(dev)) {
1081 host->generic_phy = devm_phy_get(dev, "ufsphy");
1082 if (IS_ERR(host->generic_phy)) {
1083 err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n");
1084 goto out_variant_clear;
1085 }
1086 }
1087
1088 host->device_reset = devm_gpiod_get_optional(dev, "reset",
1089 GPIOD_OUT_HIGH);
1090 if (IS_ERR(host->device_reset)) {
1091 err = PTR_ERR(host->device_reset);
1092 if (err != -EPROBE_DEFER)
1093 dev_err(dev, "failed to acquire reset gpio: %d\n", err);
1094 goto out_variant_clear;
1095 }
1096
1097 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1098 &host->hw_ver.minor, &host->hw_ver.step);
1099
1100 /*
1101 * for newer controllers, device reference clock control bit has
1102 * moved inside UFS controller register address space itself.
1103 */
1104 if (host->hw_ver.major >= 0x02) {
1105 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1106 host->dev_ref_clk_en_mask = BIT(26);
1107 } else {
1108 /* "dev_ref_clk_ctrl_mem" is optional resource */
1109 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1110 "dev_ref_clk_ctrl_mem");
1111 if (res) {
1112 host->dev_ref_clk_ctrl_mmio =
1113 devm_ioremap_resource(dev, res);
1114 if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
1115 host->dev_ref_clk_ctrl_mmio = NULL;
1116 host->dev_ref_clk_en_mask = BIT(5);
1117 }
1118 }
1119
1120 list_for_each_entry(clki, &hba->clk_list_head, list) {
1121 if (!strcmp(clki->name, "core_clk_unipro"))
1122 clki->keep_link_active = true;
1123 }
1124
1125 err = ufs_qcom_init_lane_clks(host);
1126 if (err)
1127 goto out_variant_clear;
1128
1129 ufs_qcom_set_caps(hba);
1130 ufs_qcom_advertise_quirks(hba);
1131
1132 err = ufs_qcom_ice_init(host);
1133 if (err)
1134 goto out_variant_clear;
1135
1136 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1137
1138 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1139 ufs_qcom_hosts[hba->dev->id] = host;
1140
1141 ufs_qcom_get_default_testbus_cfg(host);
1142 err = ufs_qcom_testbus_config(host);
1143 if (err)
1144 /* Failure is non-fatal */
1145 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1146 __func__, err);
1147
1148 /*
1149 * Power up the PHY using the minimum supported gear (UFS_HS_G2).
1150 * Switching to max gear will be performed during reinit if supported.
1151 */
1152 host->hs_gear = UFS_HS_G2;
1153
1154 return 0;
1155
1156out_variant_clear:
1157 ufshcd_set_variant(hba, NULL);
1158
1159 return err;
1160}
1161
1162static void ufs_qcom_exit(struct ufs_hba *hba)
1163{
1164 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1165
1166 ufs_qcom_disable_lane_clks(host);
1167 phy_power_off(host->generic_phy);
1168 phy_exit(host->generic_phy);
1169}
1170
1171static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1172 u32 clk_cycles)
1173{
1174 int err;
1175 u32 core_clk_ctrl_reg;
1176
1177 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1178 return -EINVAL;
1179
1180 err = ufshcd_dme_get(hba,
1181 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1182 &core_clk_ctrl_reg);
1183 if (err)
1184 return err;
1185
1186 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1187 core_clk_ctrl_reg |= clk_cycles;
1188
1189 /* Clear CORE_CLK_DIV_EN */
1190 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1191
1192 return ufshcd_dme_set(hba,
1193 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1194 core_clk_ctrl_reg);
1195}
1196
1197static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1198{
1199 /* nothing to do as of now */
1200 return 0;
1201}
1202
1203static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1204{
1205 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1206
1207 if (!ufs_qcom_cap_qunipro(host))
1208 return 0;
1209
1210 /* set unipro core clock cycles to 150 and clear clock divider */
1211 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1212}
1213
1214static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1215{
1216 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1217 int err;
1218 u32 core_clk_ctrl_reg;
1219
1220 if (!ufs_qcom_cap_qunipro(host))
1221 return 0;
1222
1223 err = ufshcd_dme_get(hba,
1224 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1225 &core_clk_ctrl_reg);
1226
1227 /* make sure CORE_CLK_DIV_EN is cleared */
1228 if (!err &&
1229 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1230 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1231 err = ufshcd_dme_set(hba,
1232 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1233 core_clk_ctrl_reg);
1234 }
1235
1236 return err;
1237}
1238
1239static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1240{
1241 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1242
1243 if (!ufs_qcom_cap_qunipro(host))
1244 return 0;
1245
1246 /* set unipro core clock cycles to 75 and clear clock divider */
1247 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1248}
1249
1250static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1251 bool scale_up, enum ufs_notify_change_status status)
1252{
1253 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1254 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1255 int err = 0;
1256
1257 if (status == PRE_CHANGE) {
1258 err = ufshcd_uic_hibern8_enter(hba);
1259 if (err)
1260 return err;
1261 if (scale_up)
1262 err = ufs_qcom_clk_scale_up_pre_change(hba);
1263 else
1264 err = ufs_qcom_clk_scale_down_pre_change(hba);
1265 if (err)
1266 ufshcd_uic_hibern8_exit(hba);
1267
1268 } else {
1269 if (scale_up)
1270 err = ufs_qcom_clk_scale_up_post_change(hba);
1271 else
1272 err = ufs_qcom_clk_scale_down_post_change(hba);
1273
1274
1275 if (err) {
1276 ufshcd_uic_hibern8_exit(hba);
1277 return err;
1278 }
1279
1280 ufs_qcom_cfg_timers(hba,
1281 dev_req_params->gear_rx,
1282 dev_req_params->pwr_rx,
1283 dev_req_params->hs_rate,
1284 false);
1285 ufshcd_uic_hibern8_exit(hba);
1286 }
1287
1288 return 0;
1289}
1290
1291static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1292{
1293 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1294 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1295 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1296}
1297
1298static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1299{
1300 /* provide a legal default configuration */
1301 host->testbus.select_major = TSTBUS_UNIPRO;
1302 host->testbus.select_minor = 37;
1303}
1304
1305static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1306{
1307 if (host->testbus.select_major >= TSTBUS_MAX) {
1308 dev_err(host->hba->dev,
1309 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1310 __func__, host->testbus.select_major);
1311 return false;
1312 }
1313
1314 return true;
1315}
1316
1317int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1318{
1319 int reg;
1320 int offset;
1321 u32 mask = TEST_BUS_SUB_SEL_MASK;
1322
1323 if (!host)
1324 return -EINVAL;
1325
1326 if (!ufs_qcom_testbus_cfg_is_ok(host))
1327 return -EPERM;
1328
1329 switch (host->testbus.select_major) {
1330 case TSTBUS_UAWM:
1331 reg = UFS_TEST_BUS_CTRL_0;
1332 offset = 24;
1333 break;
1334 case TSTBUS_UARM:
1335 reg = UFS_TEST_BUS_CTRL_0;
1336 offset = 16;
1337 break;
1338 case TSTBUS_TXUC:
1339 reg = UFS_TEST_BUS_CTRL_0;
1340 offset = 8;
1341 break;
1342 case TSTBUS_RXUC:
1343 reg = UFS_TEST_BUS_CTRL_0;
1344 offset = 0;
1345 break;
1346 case TSTBUS_DFC:
1347 reg = UFS_TEST_BUS_CTRL_1;
1348 offset = 24;
1349 break;
1350 case TSTBUS_TRLUT:
1351 reg = UFS_TEST_BUS_CTRL_1;
1352 offset = 16;
1353 break;
1354 case TSTBUS_TMRLUT:
1355 reg = UFS_TEST_BUS_CTRL_1;
1356 offset = 8;
1357 break;
1358 case TSTBUS_OCSC:
1359 reg = UFS_TEST_BUS_CTRL_1;
1360 offset = 0;
1361 break;
1362 case TSTBUS_WRAPPER:
1363 reg = UFS_TEST_BUS_CTRL_2;
1364 offset = 16;
1365 break;
1366 case TSTBUS_COMBINED:
1367 reg = UFS_TEST_BUS_CTRL_2;
1368 offset = 8;
1369 break;
1370 case TSTBUS_UTP_HCI:
1371 reg = UFS_TEST_BUS_CTRL_2;
1372 offset = 0;
1373 break;
1374 case TSTBUS_UNIPRO:
1375 reg = UFS_UNIPRO_CFG;
1376 offset = 20;
1377 mask = 0xFFF;
1378 break;
1379 /*
1380 * No need for a default case, since
1381 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1382 * is legal
1383 */
1384 }
1385 mask <<= offset;
1386 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1387 (u32)host->testbus.select_major << 19,
1388 REG_UFS_CFG1);
1389 ufshcd_rmwl(host->hba, mask,
1390 (u32)host->testbus.select_minor << offset,
1391 reg);
1392 ufs_qcom_enable_test_bus(host);
1393 /*
1394 * Make sure the test bus configuration is
1395 * committed before returning.
1396 */
1397 mb();
1398
1399 return 0;
1400}
1401
1402static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1403{
1404 u32 reg;
1405 struct ufs_qcom_host *host;
1406
1407 host = ufshcd_get_variant(hba);
1408
1409 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1410 "HCI Vendor Specific Registers ");
1411
1412 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1413 ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC ");
1414
1415 reg = ufshcd_readl(hba, REG_UFS_CFG1);
1416 reg |= UTP_DBG_RAMS_EN;
1417 ufshcd_writel(hba, reg, REG_UFS_CFG1);
1418
1419 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1420 ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM ");
1421
1422 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1423 ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM ");
1424
1425 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1426 ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM ");
1427
1428 /* clear bit 17 - UTP_DBG_RAMS_EN */
1429 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1430
1431 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1432 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM ");
1433
1434 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1435 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM ");
1436
1437 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1438 ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC ");
1439
1440 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1441 ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC ");
1442
1443 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1444 ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC ");
1445
1446 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1447 ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT ");
1448
1449 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1450 ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
1451}
1452
1453/**
1454 * ufs_qcom_device_reset() - toggle the (optional) device reset line
1455 * @hba: per-adapter instance
1456 *
1457 * Toggles the (optional) reset line to reset the attached device.
1458 */
1459static int ufs_qcom_device_reset(struct ufs_hba *hba)
1460{
1461 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1462
1463 /* reset gpio is optional */
1464 if (!host->device_reset)
1465 return -EOPNOTSUPP;
1466
1467 /*
1468 * The UFS device shall detect reset pulses of 1us, sleep for 10us to
1469 * be on the safe side.
1470 */
1471 ufs_qcom_device_reset_ctrl(hba, true);
1472 usleep_range(10, 15);
1473
1474 ufs_qcom_device_reset_ctrl(hba, false);
1475 usleep_range(10, 15);
1476
1477 return 0;
1478}
1479
1480#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
1481static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1482 struct devfreq_dev_profile *p,
1483 struct devfreq_simple_ondemand_data *d)
1484{
1485 p->polling_ms = 60;
1486 d->upthreshold = 70;
1487 d->downdifferential = 5;
1488}
1489#else
1490static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
1491 struct devfreq_dev_profile *p,
1492 struct devfreq_simple_ondemand_data *data)
1493{
1494}
1495#endif
1496
1497static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
1498{
1499 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1500
1501 phy_power_off(host->generic_phy);
1502}
1503
1504/* Resources */
1505static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
1506 {.name = "ufs_mem",},
1507 {.name = "mcq",},
1508 /* Submission Queue DAO */
1509 {.name = "mcq_sqd",},
1510 /* Submission Queue Interrupt Status */
1511 {.name = "mcq_sqis",},
1512 /* Completion Queue DAO */
1513 {.name = "mcq_cqd",},
1514 /* Completion Queue Interrupt Status */
1515 {.name = "mcq_cqis",},
1516 /* MCQ vendor specific */
1517 {.name = "mcq_vs",},
1518};
1519
1520static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
1521{
1522 struct platform_device *pdev = to_platform_device(hba->dev);
1523 struct ufshcd_res_info *res;
1524 struct resource *res_mem, *res_mcq;
1525 int i, ret = 0;
1526
1527 memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
1528
1529 for (i = 0; i < RES_MAX; i++) {
1530 res = &hba->res[i];
1531 res->resource = platform_get_resource_byname(pdev,
1532 IORESOURCE_MEM,
1533 res->name);
1534 if (!res->resource) {
1535 dev_info(hba->dev, "Resource %s not provided\n", res->name);
1536 if (i == RES_UFS)
1537 return -ENOMEM;
1538 continue;
1539 } else if (i == RES_UFS) {
1540 res_mem = res->resource;
1541 res->base = hba->mmio_base;
1542 continue;
1543 }
1544
1545 res->base = devm_ioremap_resource(hba->dev, res->resource);
1546 if (IS_ERR(res->base)) {
1547 dev_err(hba->dev, "Failed to map res %s, err=%d\n",
1548 res->name, (int)PTR_ERR(res->base));
1549 ret = PTR_ERR(res->base);
1550 res->base = NULL;
1551 return ret;
1552 }
1553 }
1554
1555 /* MCQ resource provided in DT */
1556 res = &hba->res[RES_MCQ];
1557 /* Bail if MCQ resource is provided */
1558 if (res->base)
1559 goto out;
1560
1561 /* Explicitly allocate MCQ resource from ufs_mem */
1562 res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
1563 if (!res_mcq)
1564 return -ENOMEM;
1565
1566 res_mcq->start = res_mem->start +
1567 MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
1568 res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
1569 res_mcq->flags = res_mem->flags;
1570 res_mcq->name = "mcq";
1571
1572 ret = insert_resource(&iomem_resource, res_mcq);
1573 if (ret) {
1574 dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
1575 ret);
1576 return ret;
1577 }
1578
1579 res->base = devm_ioremap_resource(hba->dev, res_mcq);
1580 if (IS_ERR(res->base)) {
1581 dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
1582 (int)PTR_ERR(res->base));
1583 ret = PTR_ERR(res->base);
1584 goto ioremap_err;
1585 }
1586
1587out:
1588 hba->mcq_base = res->base;
1589 return 0;
1590ioremap_err:
1591 res->base = NULL;
1592 remove_resource(res_mcq);
1593 return ret;
1594}
1595
1596static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
1597{
1598 struct ufshcd_res_info *mem_res, *sqdao_res;
1599 struct ufshcd_mcq_opr_info_t *opr;
1600 int i;
1601
1602 mem_res = &hba->res[RES_UFS];
1603 sqdao_res = &hba->res[RES_MCQ_SQD];
1604
1605 if (!mem_res->base || !sqdao_res->base)
1606 return -EINVAL;
1607
1608 for (i = 0; i < OPR_MAX; i++) {
1609 opr = &hba->mcq_opr[i];
1610 opr->offset = sqdao_res->resource->start -
1611 mem_res->resource->start + 0x40 * i;
1612 opr->stride = 0x100;
1613 opr->base = sqdao_res->base + 0x40 * i;
1614 }
1615
1616 return 0;
1617}
1618
1619static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
1620{
1621 /* Qualcomm HC supports up to 64 */
1622 return MAX_SUPP_MAC;
1623}
1624
1625static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
1626 unsigned long *ocqs)
1627{
1628 struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
1629
1630 if (!mcq_vs_res->base)
1631 return -EINVAL;
1632
1633 *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
1634
1635 return 0;
1636}
1637
1638static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1639{
1640 struct device *dev = msi_desc_to_dev(desc);
1641 struct ufs_hba *hba = dev_get_drvdata(dev);
1642
1643 ufshcd_mcq_config_esi(hba, msg);
1644}
1645
1646static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
1647{
1648 struct ufs_hba *hba = __hba;
1649 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1650 u32 id = irq - host->esi_base;
1651 struct ufs_hw_queue *hwq = &hba->uhq[id];
1652
1653 ufshcd_mcq_write_cqis(hba, 0x1, id);
1654 ufshcd_mcq_poll_cqe_lock(hba, hwq);
1655
1656 return IRQ_HANDLED;
1657}
1658
1659static int ufs_qcom_config_esi(struct ufs_hba *hba)
1660{
1661 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1662 struct msi_desc *desc;
1663 struct msi_desc *failed_desc = NULL;
1664 int nr_irqs, ret;
1665
1666 if (host->esi_enabled)
1667 return 0;
1668 else if (host->esi_base < 0)
1669 return -EINVAL;
1670
1671 /*
1672 * 1. We only handle CQs as of now.
1673 * 2. Poll queues do not need ESI.
1674 */
1675 nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
1676 ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
1677 ufs_qcom_write_msi_msg);
1678 if (ret)
1679 goto out;
1680
1681 msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
1682 if (!desc->msi_index)
1683 host->esi_base = desc->irq;
1684
1685 ret = devm_request_irq(hba->dev, desc->irq,
1686 ufs_qcom_mcq_esi_handler,
1687 IRQF_SHARED, "qcom-mcq-esi", hba);
1688 if (ret) {
1689 dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
1690 __func__, desc->irq, ret);
1691 failed_desc = desc;
1692 break;
1693 }
1694 }
1695
1696 if (ret) {
1697 /* Rewind */
1698 msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
1699 if (desc == failed_desc)
1700 break;
1701 devm_free_irq(hba->dev, desc->irq, hba);
1702 }
1703 platform_msi_domain_free_irqs(hba->dev);
1704 } else {
1705 if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
1706 host->hw_ver.step == 0) {
1707 ufshcd_writel(hba,
1708 ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
1709 REG_UFS_CFG3);
1710 }
1711 ufshcd_mcq_enable_esi(hba);
1712 }
1713
1714out:
1715 if (ret) {
1716 host->esi_base = -1;
1717 dev_warn(hba->dev, "Failed to request Platform MSI %d\n", ret);
1718 } else {
1719 host->esi_enabled = true;
1720 }
1721
1722 return ret;
1723}
1724
1725/*
1726 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1727 *
1728 * The variant operations configure the necessary controller and PHY
1729 * handshake during initialization.
1730 */
1731static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1732 .name = "qcom",
1733 .init = ufs_qcom_init,
1734 .exit = ufs_qcom_exit,
1735 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
1736 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1737 .setup_clocks = ufs_qcom_setup_clocks,
1738 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1739 .link_startup_notify = ufs_qcom_link_startup_notify,
1740 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1741 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
1742 .suspend = ufs_qcom_suspend,
1743 .resume = ufs_qcom_resume,
1744 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
1745 .device_reset = ufs_qcom_device_reset,
1746 .config_scaling_param = ufs_qcom_config_scaling_param,
1747 .program_key = ufs_qcom_ice_program_key,
1748 .reinit_notify = ufs_qcom_reinit_notify,
1749 .mcq_config_resource = ufs_qcom_mcq_config_resource,
1750 .get_hba_mac = ufs_qcom_get_hba_mac,
1751 .op_runtime_config = ufs_qcom_op_runtime_config,
1752 .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
1753 .config_esi = ufs_qcom_config_esi,
1754};
1755
1756/**
1757 * ufs_qcom_probe - probe routine of the driver
1758 * @pdev: pointer to Platform device handle
1759 *
1760 * Return zero for success and non-zero for failure
1761 */
1762static int ufs_qcom_probe(struct platform_device *pdev)
1763{
1764 int err;
1765 struct device *dev = &pdev->dev;
1766
1767 /* Perform generic probe */
1768 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1769 if (err)
1770 return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n");
1771
1772 return 0;
1773}
1774
1775/**
1776 * ufs_qcom_remove - set driver_data of the device to NULL
1777 * @pdev: pointer to platform device handle
1778 *
1779 * Always returns 0
1780 */
1781static int ufs_qcom_remove(struct platform_device *pdev)
1782{
1783 struct ufs_hba *hba = platform_get_drvdata(pdev);
1784
1785 pm_runtime_get_sync(&(pdev)->dev);
1786 ufshcd_remove(hba);
1787 platform_msi_domain_free_irqs(hba->dev);
1788 return 0;
1789}
1790
1791static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
1792 { .compatible = "qcom,ufshc"},
1793 {},
1794};
1795MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1796
1797#ifdef CONFIG_ACPI
1798static const struct acpi_device_id ufs_qcom_acpi_match[] = {
1799 { "QCOM24A5" },
1800 { },
1801};
1802MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
1803#endif
1804
1805static const struct dev_pm_ops ufs_qcom_pm_ops = {
1806 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
1807 .prepare = ufshcd_suspend_prepare,
1808 .complete = ufshcd_resume_complete,
1809#ifdef CONFIG_PM_SLEEP
1810 .suspend = ufshcd_system_suspend,
1811 .resume = ufshcd_system_resume,
1812 .freeze = ufshcd_system_freeze,
1813 .restore = ufshcd_system_restore,
1814 .thaw = ufshcd_system_thaw,
1815#endif
1816};
1817
1818static struct platform_driver ufs_qcom_pltform = {
1819 .probe = ufs_qcom_probe,
1820 .remove = ufs_qcom_remove,
1821 .driver = {
1822 .name = "ufshcd-qcom",
1823 .pm = &ufs_qcom_pm_ops,
1824 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1825 .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
1826 },
1827};
1828module_platform_driver(ufs_qcom_pltform);
1829
1830MODULE_LICENSE("GPL v2");