Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.7 1133 lines 35 kB view raw
1/* 2 * Universal Flash Storage Host controller driver 3 * 4 * This code is based on drivers/scsi/ufs/ufshcd.h 5 * Copyright (C) 2011-2013 Samsung India Software Operations 6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 7 * 8 * Authors: 9 * Santosh Yaraganavi <santosh.sy@samsung.com> 10 * Vinayak Holikatti <h.vinayak@samsung.com> 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 2 15 * of the License, or (at your option) any later version. 16 * See the COPYING file in the top-level directory or visit 17 * <http://www.gnu.org/licenses/gpl-2.0.html> 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * This program is provided "AS IS" and "WITH ALL FAULTS" and 25 * without warranty of any kind. You are solely responsible for 26 * determining the appropriateness of using and distributing 27 * the program and assume all risks associated with your exercise 28 * of rights with respect to the program, including but not limited 29 * to infringement of third party rights, the risks and costs of 30 * program errors, damage to or loss of data, programs or equipment, 31 * and unavailability or interruption of operations. Under no 32 * circumstances will the contributor of this Program be liable for 33 * any damages of any kind arising from your use or distribution of 34 * this program. 35 */ 36 37#ifndef _UFSHCD_H 38#define _UFSHCD_H 39 40#include <linux/module.h> 41#include <linux/kernel.h> 42#include <linux/init.h> 43#include <linux/interrupt.h> 44#include <linux/io.h> 45#include <linux/delay.h> 46#include <linux/slab.h> 47#include <linux/spinlock.h> 48#include <linux/rwsem.h> 49#include <linux/workqueue.h> 50#include <linux/errno.h> 51#include <linux/types.h> 52#include <linux/wait.h> 53#include <linux/bitops.h> 54#include <linux/pm_runtime.h> 55#include <linux/clk.h> 56#include <linux/completion.h> 57#include <linux/regulator/consumer.h> 58#include <linux/bitfield.h> 59#include <linux/devfreq.h> 60#include "unipro.h" 61 62#include <asm/irq.h> 63#include <asm/byteorder.h> 64#include <scsi/scsi.h> 65#include <scsi/scsi_cmnd.h> 66#include <scsi/scsi_host.h> 67#include <scsi/scsi_tcq.h> 68#include <scsi/scsi_dbg.h> 69#include <scsi/scsi_eh.h> 70 71#include "ufs.h" 72#include "ufshci.h" 73 74#define UFSHCD "ufshcd" 75#define UFSHCD_DRIVER_VERSION "0.2" 76 77struct ufs_hba; 78 79enum dev_cmd_type { 80 DEV_CMD_TYPE_NOP = 0x0, 81 DEV_CMD_TYPE_QUERY = 0x1, 82}; 83 84/** 85 * struct uic_command - UIC command structure 86 * @command: UIC command 87 * @argument1: UIC command argument 1 88 * @argument2: UIC command argument 2 89 * @argument3: UIC command argument 3 90 * @cmd_active: Indicate if UIC command is outstanding 91 * @result: UIC command result 92 * @done: UIC command completion 93 */ 94struct uic_command { 95 u32 command; 96 u32 argument1; 97 u32 argument2; 98 u32 argument3; 99 int cmd_active; 100 int result; 101 struct completion done; 102}; 103 104/* Used to differentiate the power management options */ 105enum ufs_pm_op { 106 UFS_RUNTIME_PM, 107 UFS_SYSTEM_PM, 108 UFS_SHUTDOWN_PM, 109}; 110 111#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM) 112#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM) 113#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM) 114 115/* Host <-> Device UniPro Link state */ 116enum uic_link_state { 117 UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */ 118 UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */ 119 UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */ 120}; 121 122#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE) 123#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ 124 UIC_LINK_ACTIVE_STATE) 125#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ 126 UIC_LINK_HIBERN8_STATE) 127#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE) 128#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ 129 UIC_LINK_ACTIVE_STATE) 130#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ 131 UIC_LINK_HIBERN8_STATE) 132 133#define ufshcd_set_ufs_dev_active(h) \ 134 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) 135#define ufshcd_set_ufs_dev_sleep(h) \ 136 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) 137#define ufshcd_set_ufs_dev_poweroff(h) \ 138 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) 139#define ufshcd_is_ufs_dev_active(h) \ 140 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) 141#define ufshcd_is_ufs_dev_sleep(h) \ 142 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) 143#define ufshcd_is_ufs_dev_poweroff(h) \ 144 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) 145 146/* 147 * UFS Power management levels. 148 * Each level is in increasing order of power savings. 149 */ 150enum ufs_pm_level { 151 UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */ 152 UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */ 153 UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */ 154 UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */ 155 UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */ 156 UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */ 157 UFS_PM_LVL_MAX 158}; 159 160struct ufs_pm_lvl_states { 161 enum ufs_dev_pwr_mode dev_state; 162 enum uic_link_state link_state; 163}; 164 165/** 166 * struct ufshcd_lrb - local reference block 167 * @utr_descriptor_ptr: UTRD address of the command 168 * @ucd_req_ptr: UCD address of the command 169 * @ucd_rsp_ptr: Response UPIU address for this command 170 * @ucd_prdt_ptr: PRDT address of the command 171 * @utrd_dma_addr: UTRD dma address for debug 172 * @ucd_prdt_dma_addr: PRDT dma address for debug 173 * @ucd_rsp_dma_addr: UPIU response dma address for debug 174 * @ucd_req_dma_addr: UPIU request dma address for debug 175 * @cmd: pointer to SCSI command 176 * @sense_buffer: pointer to sense buffer address of the SCSI command 177 * @sense_bufflen: Length of the sense buffer 178 * @scsi_status: SCSI status of the command 179 * @command_type: SCSI, UFS, Query. 180 * @task_tag: Task tag of the command 181 * @lun: LUN of the command 182 * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) 183 * @issue_time_stamp: time stamp for debug purposes 184 * @compl_time_stamp: time stamp for statistics 185 * @req_abort_skip: skip request abort task flag 186 */ 187struct ufshcd_lrb { 188 struct utp_transfer_req_desc *utr_descriptor_ptr; 189 struct utp_upiu_req *ucd_req_ptr; 190 struct utp_upiu_rsp *ucd_rsp_ptr; 191 struct ufshcd_sg_entry *ucd_prdt_ptr; 192 193 dma_addr_t utrd_dma_addr; 194 dma_addr_t ucd_req_dma_addr; 195 dma_addr_t ucd_rsp_dma_addr; 196 dma_addr_t ucd_prdt_dma_addr; 197 198 struct scsi_cmnd *cmd; 199 u8 *sense_buffer; 200 unsigned int sense_bufflen; 201 int scsi_status; 202 203 int command_type; 204 int task_tag; 205 u8 lun; /* UPIU LUN id field is only 8-bit wide */ 206 bool intr_cmd; 207 ktime_t issue_time_stamp; 208 ktime_t compl_time_stamp; 209 210 bool req_abort_skip; 211}; 212 213/** 214 * struct ufs_query - holds relevant data structures for query request 215 * @request: request upiu and function 216 * @descriptor: buffer for sending/receiving descriptor 217 * @response: response upiu and response 218 */ 219struct ufs_query { 220 struct ufs_query_req request; 221 u8 *descriptor; 222 struct ufs_query_res response; 223}; 224 225/** 226 * struct ufs_dev_cmd - all assosiated fields with device management commands 227 * @type: device management command type - Query, NOP OUT 228 * @lock: lock to allow one command at a time 229 * @complete: internal commands completion 230 */ 231struct ufs_dev_cmd { 232 enum dev_cmd_type type; 233 struct mutex lock; 234 struct completion *complete; 235 struct ufs_query query; 236}; 237 238struct ufs_desc_size { 239 int dev_desc; 240 int pwr_desc; 241 int geom_desc; 242 int interc_desc; 243 int unit_desc; 244 int conf_desc; 245 int hlth_desc; 246}; 247 248/** 249 * struct ufs_clk_info - UFS clock related info 250 * @list: list headed by hba->clk_list_head 251 * @clk: clock node 252 * @name: clock name 253 * @max_freq: maximum frequency supported by the clock 254 * @min_freq: min frequency that can be used for clock scaling 255 * @curr_freq: indicates the current frequency that it is set to 256 * @enabled: variable to check against multiple enable/disable 257 */ 258struct ufs_clk_info { 259 struct list_head list; 260 struct clk *clk; 261 const char *name; 262 u32 max_freq; 263 u32 min_freq; 264 u32 curr_freq; 265 bool enabled; 266}; 267 268enum ufs_notify_change_status { 269 PRE_CHANGE, 270 POST_CHANGE, 271}; 272 273struct ufs_pa_layer_attr { 274 u32 gear_rx; 275 u32 gear_tx; 276 u32 lane_rx; 277 u32 lane_tx; 278 u32 pwr_rx; 279 u32 pwr_tx; 280 u32 hs_rate; 281}; 282 283struct ufs_pwr_mode_info { 284 bool is_valid; 285 struct ufs_pa_layer_attr info; 286}; 287 288/** 289 * struct ufs_hba_variant_ops - variant specific callbacks 290 * @name: variant name 291 * @init: called when the driver is initialized 292 * @exit: called to cleanup everything done in init 293 * @get_ufs_hci_version: called to get UFS HCI version 294 * @clk_scale_notify: notifies that clks are scaled up/down 295 * @setup_clocks: called before touching any of the controller registers 296 * @setup_regulators: called before accessing the host controller 297 * @hce_enable_notify: called before and after HCE enable bit is set to allow 298 * variant specific Uni-Pro initialization. 299 * @link_startup_notify: called before and after Link startup is carried out 300 * to allow variant specific Uni-Pro initialization. 301 * @pwr_change_notify: called before and after a power mode change 302 * is carried out to allow vendor spesific capabilities 303 * to be set. 304 * @setup_xfer_req: called before any transfer request is issued 305 * to set some things 306 * @setup_task_mgmt: called before any task management request is issued 307 * to set some things 308 * @hibern8_notify: called around hibern8 enter/exit 309 * @apply_dev_quirks: called to apply device specific quirks 310 * @suspend: called during host controller PM callback 311 * @resume: called during host controller PM callback 312 * @dbg_register_dump: used to dump controller debug information 313 * @phy_initialization: used to initialize phys 314 * @device_reset: called to issue a reset pulse on the UFS device 315 */ 316struct ufs_hba_variant_ops { 317 const char *name; 318 int (*init)(struct ufs_hba *); 319 void (*exit)(struct ufs_hba *); 320 u32 (*get_ufs_hci_version)(struct ufs_hba *); 321 int (*clk_scale_notify)(struct ufs_hba *, bool, 322 enum ufs_notify_change_status); 323 int (*setup_clocks)(struct ufs_hba *, bool, 324 enum ufs_notify_change_status); 325 int (*setup_regulators)(struct ufs_hba *, bool); 326 int (*hce_enable_notify)(struct ufs_hba *, 327 enum ufs_notify_change_status); 328 int (*link_startup_notify)(struct ufs_hba *, 329 enum ufs_notify_change_status); 330 int (*pwr_change_notify)(struct ufs_hba *, 331 enum ufs_notify_change_status status, 332 struct ufs_pa_layer_attr *, 333 struct ufs_pa_layer_attr *); 334 void (*setup_xfer_req)(struct ufs_hba *, int, bool); 335 void (*setup_task_mgmt)(struct ufs_hba *, int, u8); 336 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, 337 enum ufs_notify_change_status); 338 int (*apply_dev_quirks)(struct ufs_hba *hba); 339 int (*suspend)(struct ufs_hba *, enum ufs_pm_op); 340 int (*resume)(struct ufs_hba *, enum ufs_pm_op); 341 void (*dbg_register_dump)(struct ufs_hba *hba); 342 int (*phy_initialization)(struct ufs_hba *); 343 void (*device_reset)(struct ufs_hba *hba); 344 void (*config_scaling_param)(struct ufs_hba *hba, 345 struct devfreq_dev_profile *profile, 346 void *data); 347}; 348 349/* clock gating state */ 350enum clk_gating_state { 351 CLKS_OFF, 352 CLKS_ON, 353 REQ_CLKS_OFF, 354 REQ_CLKS_ON, 355}; 356 357/** 358 * struct ufs_clk_gating - UFS clock gating related info 359 * @gate_work: worker to turn off clocks after some delay as specified in 360 * delay_ms 361 * @ungate_work: worker to turn on clocks that will be used in case of 362 * interrupt context 363 * @state: the current clocks state 364 * @delay_ms: gating delay in ms 365 * @is_suspended: clk gating is suspended when set to 1 which can be used 366 * during suspend/resume 367 * @delay_attr: sysfs attribute to control delay_attr 368 * @enable_attr: sysfs attribute to enable/disable clock gating 369 * @is_enabled: Indicates the current status of clock gating 370 * @active_reqs: number of requests that are pending and should be waited for 371 * completion before gating clocks. 372 */ 373struct ufs_clk_gating { 374 struct delayed_work gate_work; 375 struct work_struct ungate_work; 376 enum clk_gating_state state; 377 unsigned long delay_ms; 378 bool is_suspended; 379 struct device_attribute delay_attr; 380 struct device_attribute enable_attr; 381 bool is_enabled; 382 int active_reqs; 383 struct workqueue_struct *clk_gating_workq; 384}; 385 386struct ufs_saved_pwr_info { 387 struct ufs_pa_layer_attr info; 388 bool is_valid; 389}; 390 391/** 392 * struct ufs_clk_scaling - UFS clock scaling related data 393 * @active_reqs: number of requests that are pending. If this is zero when 394 * devfreq ->target() function is called then schedule "suspend_work" to 395 * suspend devfreq. 396 * @tot_busy_t: Total busy time in current polling window 397 * @window_start_t: Start time (in jiffies) of the current polling window 398 * @busy_start_t: Start time of current busy period 399 * @enable_attr: sysfs attribute to enable/disable clock scaling 400 * @saved_pwr_info: UFS power mode may also be changed during scaling and this 401 * one keeps track of previous power mode. 402 * @workq: workqueue to schedule devfreq suspend/resume work 403 * @suspend_work: worker to suspend devfreq 404 * @resume_work: worker to resume devfreq 405 * @is_allowed: tracks if scaling is currently allowed or not 406 * @is_busy_started: tracks if busy period has started or not 407 * @is_suspended: tracks if devfreq is suspended or not 408 */ 409struct ufs_clk_scaling { 410 int active_reqs; 411 unsigned long tot_busy_t; 412 unsigned long window_start_t; 413 ktime_t busy_start_t; 414 struct device_attribute enable_attr; 415 struct ufs_saved_pwr_info saved_pwr_info; 416 struct workqueue_struct *workq; 417 struct work_struct suspend_work; 418 struct work_struct resume_work; 419 bool is_allowed; 420 bool is_busy_started; 421 bool is_suspended; 422}; 423 424#define UFS_ERR_REG_HIST_LENGTH 8 425/** 426 * struct ufs_err_reg_hist - keeps history of errors 427 * @pos: index to indicate cyclic buffer position 428 * @reg: cyclic buffer for registers value 429 * @tstamp: cyclic buffer for time stamp 430 */ 431struct ufs_err_reg_hist { 432 int pos; 433 u32 reg[UFS_ERR_REG_HIST_LENGTH]; 434 ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH]; 435}; 436 437/** 438 * struct ufs_stats - keeps usage/err statistics 439 * @hibern8_exit_cnt: Counter to keep track of number of exits, 440 * reset this after link-startup. 441 * @last_hibern8_exit_tstamp: Set time after the hibern8 exit. 442 * Clear after the first successful command completion. 443 * @pa_err: tracks pa-uic errors 444 * @dl_err: tracks dl-uic errors 445 * @nl_err: tracks nl-uic errors 446 * @tl_err: tracks tl-uic errors 447 * @dme_err: tracks dme errors 448 * @auto_hibern8_err: tracks auto-hibernate errors 449 * @fatal_err: tracks fatal errors 450 * @linkup_err: tracks link-startup errors 451 * @resume_err: tracks resume errors 452 * @suspend_err: tracks suspend errors 453 * @dev_reset: tracks device reset events 454 * @host_reset: tracks host reset events 455 * @tsk_abort: tracks task abort events 456 */ 457struct ufs_stats { 458 u32 hibern8_exit_cnt; 459 ktime_t last_hibern8_exit_tstamp; 460 461 /* uic specific errors */ 462 struct ufs_err_reg_hist pa_err; 463 struct ufs_err_reg_hist dl_err; 464 struct ufs_err_reg_hist nl_err; 465 struct ufs_err_reg_hist tl_err; 466 struct ufs_err_reg_hist dme_err; 467 468 /* fatal errors */ 469 struct ufs_err_reg_hist auto_hibern8_err; 470 struct ufs_err_reg_hist fatal_err; 471 struct ufs_err_reg_hist link_startup_err; 472 struct ufs_err_reg_hist resume_err; 473 struct ufs_err_reg_hist suspend_err; 474 475 /* abnormal events */ 476 struct ufs_err_reg_hist dev_reset; 477 struct ufs_err_reg_hist host_reset; 478 struct ufs_err_reg_hist task_abort; 479}; 480 481enum ufshcd_quirks { 482 /* Interrupt aggregation support is broken */ 483 UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0, 484 485 /* 486 * delay before each dme command is required as the unipro 487 * layer has shown instabilities 488 */ 489 UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1, 490 491 /* 492 * If UFS host controller is having issue in processing LCC (Line 493 * Control Command) coming from device then enable this quirk. 494 * When this quirk is enabled, host controller driver should disable 495 * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE 496 * attribute of device to 0). 497 */ 498 UFSHCD_QUIRK_BROKEN_LCC = 1 << 2, 499 500 /* 501 * The attribute PA_RXHSUNTERMCAP specifies whether or not the 502 * inbound Link supports unterminated line in HS mode. Setting this 503 * attribute to 1 fixes moving to HS gear. 504 */ 505 UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3, 506 507 /* 508 * This quirk needs to be enabled if the host controller only allows 509 * accessing the peer dme attributes in AUTO mode (FAST AUTO or 510 * SLOW AUTO). 511 */ 512 UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4, 513 514 /* 515 * This quirk needs to be enabled if the host controller doesn't 516 * advertise the correct version in UFS_VER register. If this quirk 517 * is enabled, standard UFS host driver will call the vendor specific 518 * ops (get_ufs_hci_version) to get the correct version. 519 */ 520 UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5, 521}; 522 523enum ufshcd_caps { 524 /* Allow dynamic clk gating */ 525 UFSHCD_CAP_CLK_GATING = 1 << 0, 526 527 /* Allow hiberb8 with clk gating */ 528 UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1, 529 530 /* Allow dynamic clk scaling */ 531 UFSHCD_CAP_CLK_SCALING = 1 << 2, 532 533 /* Allow auto bkops to enabled during runtime suspend */ 534 UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3, 535 536 /* 537 * This capability allows host controller driver to use the UFS HCI's 538 * interrupt aggregation capability. 539 * CAUTION: Enabling this might reduce overall UFS throughput. 540 */ 541 UFSHCD_CAP_INTR_AGGR = 1 << 4, 542 543 /* 544 * This capability allows the device auto-bkops to be always enabled 545 * except during suspend (both runtime and suspend). 546 * Enabling this capability means that device will always be allowed 547 * to do background operation when it's active but it might degrade 548 * the performance of ongoing read/write operations. 549 */ 550 UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5, 551 552 /* 553 * This capability allows host controller driver to automatically 554 * enable runtime power management by itself instead of waiting 555 * for userspace to control the power management. 556 */ 557 UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6, 558}; 559 560/** 561 * struct ufs_hba - per adapter private structure 562 * @mmio_base: UFSHCI base register address 563 * @ucdl_base_addr: UFS Command Descriptor base address 564 * @utrdl_base_addr: UTP Transfer Request Descriptor base address 565 * @utmrdl_base_addr: UTP Task Management Descriptor base address 566 * @ucdl_dma_addr: UFS Command Descriptor DMA address 567 * @utrdl_dma_addr: UTRDL DMA address 568 * @utmrdl_dma_addr: UTMRDL DMA address 569 * @host: Scsi_Host instance of the driver 570 * @dev: device handle 571 * @lrb: local reference block 572 * @cmd_queue: Used to allocate command tags from hba->host->tag_set. 573 * @outstanding_tasks: Bits representing outstanding task requests 574 * @outstanding_reqs: Bits representing outstanding transfer requests 575 * @capabilities: UFS Controller Capabilities 576 * @nutrs: Transfer Request Queue depth supported by controller 577 * @nutmrs: Task Management Queue depth supported by controller 578 * @ufs_version: UFS Version to which controller complies 579 * @vops: pointer to variant specific operations 580 * @priv: pointer to variant specific private data 581 * @irq: Irq number of the controller 582 * @active_uic_cmd: handle of active UIC command 583 * @uic_cmd_mutex: mutex for uic command 584 * @tmf_tag_set: TMF tag set. 585 * @tmf_queue: Used to allocate TMF tags. 586 * @pwr_done: completion for power mode change 587 * @ufshcd_state: UFSHCD states 588 * @eh_flags: Error handling flags 589 * @intr_mask: Interrupt Mask Bits 590 * @ee_ctrl_mask: Exception event control mask 591 * @is_powered: flag to check if HBA is powered 592 * @eh_work: Worker to handle UFS errors that require s/w attention 593 * @eeh_work: Worker to handle exception events 594 * @errors: HBA errors 595 * @uic_error: UFS interconnect layer error status 596 * @saved_err: sticky error mask 597 * @saved_uic_err: sticky UIC error mask 598 * @silence_err_logs: flag to silence error logs 599 * @dev_cmd: ufs device management command information 600 * @last_dme_cmd_tstamp: time stamp of the last completed DME command 601 * @auto_bkops_enabled: to track whether bkops is enabled in device 602 * @vreg_info: UFS device voltage regulator information 603 * @clk_list_head: UFS host controller clocks list node head 604 * @pwr_info: holds current power mode 605 * @max_pwr_info: keeps the device max valid pwm 606 * @desc_size: descriptor sizes reported by device 607 * @urgent_bkops_lvl: keeps track of urgent bkops level for device 608 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for 609 * device is known or not. 610 * @scsi_block_reqs_cnt: reference counting for scsi block requests 611 */ 612struct ufs_hba { 613 void __iomem *mmio_base; 614 615 /* Virtual memory reference */ 616 struct utp_transfer_cmd_desc *ucdl_base_addr; 617 struct utp_transfer_req_desc *utrdl_base_addr; 618 struct utp_task_req_desc *utmrdl_base_addr; 619 620 /* DMA memory reference */ 621 dma_addr_t ucdl_dma_addr; 622 dma_addr_t utrdl_dma_addr; 623 dma_addr_t utmrdl_dma_addr; 624 625 struct Scsi_Host *host; 626 struct device *dev; 627 struct request_queue *cmd_queue; 628 /* 629 * This field is to keep a reference to "scsi_device" corresponding to 630 * "UFS device" W-LU. 631 */ 632 struct scsi_device *sdev_ufs_device; 633 634 enum ufs_dev_pwr_mode curr_dev_pwr_mode; 635 enum uic_link_state uic_link_state; 636 /* Desired UFS power management level during runtime PM */ 637 enum ufs_pm_level rpm_lvl; 638 /* Desired UFS power management level during system PM */ 639 enum ufs_pm_level spm_lvl; 640 struct device_attribute rpm_lvl_attr; 641 struct device_attribute spm_lvl_attr; 642 int pm_op_in_progress; 643 644 /* Auto-Hibernate Idle Timer register value */ 645 u32 ahit; 646 647 struct ufshcd_lrb *lrb; 648 649 unsigned long outstanding_tasks; 650 unsigned long outstanding_reqs; 651 652 u32 capabilities; 653 int nutrs; 654 int nutmrs; 655 u32 ufs_version; 656 const struct ufs_hba_variant_ops *vops; 657 void *priv; 658 unsigned int irq; 659 bool is_irq_enabled; 660 enum ufs_ref_clk_freq dev_ref_clk_freq; 661 662 unsigned int quirks; /* Deviations from standard UFSHCI spec. */ 663 664 /* Device deviations from standard UFS device spec. */ 665 unsigned int dev_quirks; 666 667 struct blk_mq_tag_set tmf_tag_set; 668 struct request_queue *tmf_queue; 669 670 struct uic_command *active_uic_cmd; 671 struct mutex uic_cmd_mutex; 672 struct completion *uic_async_done; 673 674 u32 ufshcd_state; 675 u32 eh_flags; 676 u32 intr_mask; 677 u16 ee_ctrl_mask; 678 u16 hba_enable_delay_us; 679 bool is_powered; 680 681 /* Work Queues */ 682 struct work_struct eh_work; 683 struct work_struct eeh_work; 684 685 /* HBA Errors */ 686 u32 errors; 687 u32 uic_error; 688 u32 saved_err; 689 u32 saved_uic_err; 690 struct ufs_stats ufs_stats; 691 bool silence_err_logs; 692 693 /* Device management request data */ 694 struct ufs_dev_cmd dev_cmd; 695 ktime_t last_dme_cmd_tstamp; 696 697 /* Keeps information of the UFS device connected to this host */ 698 struct ufs_dev_info dev_info; 699 bool auto_bkops_enabled; 700 struct ufs_vreg_info vreg_info; 701 struct list_head clk_list_head; 702 703 bool wlun_dev_clr_ua; 704 705 /* Number of requests aborts */ 706 int req_abort_count; 707 708 /* Number of lanes available (1 or 2) for Rx/Tx */ 709 u32 lanes_per_direction; 710 struct ufs_pa_layer_attr pwr_info; 711 struct ufs_pwr_mode_info max_pwr_info; 712 713 struct ufs_clk_gating clk_gating; 714 /* Control to enable/disable host capabilities */ 715 u32 caps; 716 717 struct devfreq *devfreq; 718 struct ufs_clk_scaling clk_scaling; 719 bool is_sys_suspended; 720 721 enum bkops_status urgent_bkops_lvl; 722 bool is_urgent_bkops_lvl_checked; 723 724 struct rw_semaphore clk_scaling_lock; 725 struct ufs_desc_size desc_size; 726 atomic_t scsi_block_reqs_cnt; 727 728 struct device bsg_dev; 729 struct request_queue *bsg_queue; 730}; 731 732/* Returns true if clocks can be gated. Otherwise false */ 733static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) 734{ 735 return hba->caps & UFSHCD_CAP_CLK_GATING; 736} 737static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba) 738{ 739 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; 740} 741static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba) 742{ 743 return hba->caps & UFSHCD_CAP_CLK_SCALING; 744} 745static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) 746{ 747 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; 748} 749static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba) 750{ 751 return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND; 752} 753 754static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) 755{ 756/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/ 757#ifndef CONFIG_SCSI_UFS_DWC 758 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) && 759 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR)) 760 return true; 761 else 762 return false; 763#else 764return true; 765#endif 766} 767 768static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba) 769{ 770 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT); 771} 772 773static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba) 774{ 775 return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false; 776} 777 778#define ufshcd_writel(hba, val, reg) \ 779 writel((val), (hba)->mmio_base + (reg)) 780#define ufshcd_readl(hba, reg) \ 781 readl((hba)->mmio_base + (reg)) 782 783/** 784 * ufshcd_rmwl - read modify write into a register 785 * @hba - per adapter instance 786 * @mask - mask to apply on read value 787 * @val - actual value to write 788 * @reg - register address 789 */ 790static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) 791{ 792 u32 tmp; 793 794 tmp = ufshcd_readl(hba, reg); 795 tmp &= ~mask; 796 tmp |= (val & mask); 797 ufshcd_writel(hba, tmp, reg); 798} 799 800int ufshcd_alloc_host(struct device *, struct ufs_hba **); 801void ufshcd_dealloc_host(struct ufs_hba *); 802int ufshcd_hba_enable(struct ufs_hba *hba); 803int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); 804int ufshcd_link_recovery(struct ufs_hba *hba); 805int ufshcd_make_hba_operational(struct ufs_hba *hba); 806void ufshcd_remove(struct ufs_hba *); 807int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); 808void ufshcd_delay_us(unsigned long us, unsigned long tolerance); 809int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 810 u32 val, unsigned long interval_us, 811 unsigned long timeout_ms, bool can_sleep); 812void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); 813void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, 814 u32 reg); 815 816static inline void check_upiu_size(void) 817{ 818 BUILD_BUG_ON(ALIGNED_UPIU_SIZE < 819 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); 820} 821 822/** 823 * ufshcd_set_variant - set variant specific data to the hba 824 * @hba - per adapter instance 825 * @variant - pointer to variant specific data 826 */ 827static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant) 828{ 829 BUG_ON(!hba); 830 hba->priv = variant; 831} 832 833/** 834 * ufshcd_get_variant - get variant specific data from the hba 835 * @hba - per adapter instance 836 */ 837static inline void *ufshcd_get_variant(struct ufs_hba *hba) 838{ 839 BUG_ON(!hba); 840 return hba->priv; 841} 842static inline bool ufshcd_keep_autobkops_enabled_except_suspend( 843 struct ufs_hba *hba) 844{ 845 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND; 846} 847 848extern int ufshcd_runtime_suspend(struct ufs_hba *hba); 849extern int ufshcd_runtime_resume(struct ufs_hba *hba); 850extern int ufshcd_runtime_idle(struct ufs_hba *hba); 851extern int ufshcd_system_suspend(struct ufs_hba *hba); 852extern int ufshcd_system_resume(struct ufs_hba *hba); 853extern int ufshcd_shutdown(struct ufs_hba *hba); 854extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, 855 u8 attr_set, u32 mib_val, u8 peer); 856extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 857 u32 *mib_val, u8 peer); 858extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, 859 struct ufs_pa_layer_attr *desired_pwr_mode); 860 861/* UIC command interfaces for DME primitives */ 862#define DME_LOCAL 0 863#define DME_PEER 1 864#define ATTR_SET_NOR 0 /* NORMAL */ 865#define ATTR_SET_ST 1 /* STATIC */ 866 867static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, 868 u32 mib_val) 869{ 870 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, 871 mib_val, DME_LOCAL); 872} 873 874static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, 875 u32 mib_val) 876{ 877 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, 878 mib_val, DME_LOCAL); 879} 880 881static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, 882 u32 mib_val) 883{ 884 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, 885 mib_val, DME_PEER); 886} 887 888static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, 889 u32 mib_val) 890{ 891 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, 892 mib_val, DME_PEER); 893} 894 895static inline int ufshcd_dme_get(struct ufs_hba *hba, 896 u32 attr_sel, u32 *mib_val) 897{ 898 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); 899} 900 901static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, 902 u32 attr_sel, u32 *mib_val) 903{ 904 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); 905} 906 907static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info) 908{ 909 return (pwr_info->pwr_rx == FAST_MODE || 910 pwr_info->pwr_rx == FASTAUTO_MODE) && 911 (pwr_info->pwr_tx == FAST_MODE || 912 pwr_info->pwr_tx == FASTAUTO_MODE); 913} 914 915static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba) 916{ 917 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); 918} 919 920/* Expose Query-Request API */ 921int ufshcd_query_descriptor_retry(struct ufs_hba *hba, 922 enum query_opcode opcode, 923 enum desc_idn idn, u8 index, 924 u8 selector, 925 u8 *desc_buf, int *buf_len); 926int ufshcd_read_desc_param(struct ufs_hba *hba, 927 enum desc_idn desc_id, 928 int desc_index, 929 u8 param_offset, 930 u8 *param_read_buf, 931 u8 param_size); 932int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, 933 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); 934int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 935 enum flag_idn idn, bool *flag_res); 936 937void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); 938void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); 939 940#define SD_ASCII_STD true 941#define SD_RAW false 942int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, 943 u8 **buf, bool ascii); 944 945int ufshcd_hold(struct ufs_hba *hba, bool async); 946void ufshcd_release(struct ufs_hba *hba); 947 948int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, 949 int *desc_length); 950 951u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); 952 953int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); 954 955int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, 956 struct utp_upiu_req *req_upiu, 957 struct utp_upiu_req *rsp_upiu, 958 int msgcode, 959 u8 *desc_buff, int *buff_len, 960 enum query_opcode desc_op); 961 962/* Wrapper functions for safely calling variant operations */ 963static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) 964{ 965 if (hba->vops) 966 return hba->vops->name; 967 return ""; 968} 969 970static inline int ufshcd_vops_init(struct ufs_hba *hba) 971{ 972 if (hba->vops && hba->vops->init) 973 return hba->vops->init(hba); 974 975 return 0; 976} 977 978static inline void ufshcd_vops_exit(struct ufs_hba *hba) 979{ 980 if (hba->vops && hba->vops->exit) 981 return hba->vops->exit(hba); 982} 983 984static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) 985{ 986 if (hba->vops && hba->vops->get_ufs_hci_version) 987 return hba->vops->get_ufs_hci_version(hba); 988 989 return ufshcd_readl(hba, REG_UFS_VERSION); 990} 991 992static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, 993 bool up, enum ufs_notify_change_status status) 994{ 995 if (hba->vops && hba->vops->clk_scale_notify) 996 return hba->vops->clk_scale_notify(hba, up, status); 997 return 0; 998} 999 1000static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on, 1001 enum ufs_notify_change_status status) 1002{ 1003 if (hba->vops && hba->vops->setup_clocks) 1004 return hba->vops->setup_clocks(hba, on, status); 1005 return 0; 1006} 1007 1008static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status) 1009{ 1010 if (hba->vops && hba->vops->setup_regulators) 1011 return hba->vops->setup_regulators(hba, status); 1012 1013 return 0; 1014} 1015 1016static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba, 1017 bool status) 1018{ 1019 if (hba->vops && hba->vops->hce_enable_notify) 1020 return hba->vops->hce_enable_notify(hba, status); 1021 1022 return 0; 1023} 1024static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, 1025 bool status) 1026{ 1027 if (hba->vops && hba->vops->link_startup_notify) 1028 return hba->vops->link_startup_notify(hba, status); 1029 1030 return 0; 1031} 1032 1033static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, 1034 bool status, 1035 struct ufs_pa_layer_attr *dev_max_params, 1036 struct ufs_pa_layer_attr *dev_req_params) 1037{ 1038 if (hba->vops && hba->vops->pwr_change_notify) 1039 return hba->vops->pwr_change_notify(hba, status, 1040 dev_max_params, dev_req_params); 1041 1042 return -ENOTSUPP; 1043} 1044 1045static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, 1046 bool is_scsi_cmd) 1047{ 1048 if (hba->vops && hba->vops->setup_xfer_req) 1049 return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); 1050} 1051 1052static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, 1053 int tag, u8 tm_function) 1054{ 1055 if (hba->vops && hba->vops->setup_task_mgmt) 1056 return hba->vops->setup_task_mgmt(hba, tag, tm_function); 1057} 1058 1059static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba, 1060 enum uic_cmd_dme cmd, 1061 enum ufs_notify_change_status status) 1062{ 1063 if (hba->vops && hba->vops->hibern8_notify) 1064 return hba->vops->hibern8_notify(hba, cmd, status); 1065} 1066 1067static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba) 1068{ 1069 if (hba->vops && hba->vops->apply_dev_quirks) 1070 return hba->vops->apply_dev_quirks(hba); 1071 return 0; 1072} 1073 1074static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) 1075{ 1076 if (hba->vops && hba->vops->suspend) 1077 return hba->vops->suspend(hba, op); 1078 1079 return 0; 1080} 1081 1082static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op) 1083{ 1084 if (hba->vops && hba->vops->resume) 1085 return hba->vops->resume(hba, op); 1086 1087 return 0; 1088} 1089 1090static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) 1091{ 1092 if (hba->vops && hba->vops->dbg_register_dump) 1093 hba->vops->dbg_register_dump(hba); 1094} 1095 1096static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) 1097{ 1098 if (hba->vops && hba->vops->device_reset) { 1099 hba->vops->device_reset(hba); 1100 ufshcd_set_ufs_dev_active(hba); 1101 ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0); 1102 } 1103} 1104 1105static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba, 1106 struct devfreq_dev_profile 1107 *profile, void *data) 1108{ 1109 if (hba->vops && hba->vops->config_scaling_param) 1110 hba->vops->config_scaling_param(hba, profile, data); 1111} 1112 1113extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; 1114 1115/* 1116 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN 1117 * @scsi_lun: scsi LUN id 1118 * 1119 * Returns UPIU LUN id 1120 */ 1121static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) 1122{ 1123 if (scsi_is_wlun(scsi_lun)) 1124 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) 1125 | UFS_UPIU_WLUN_ID; 1126 else 1127 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; 1128} 1129 1130int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 1131 const char *prefix); 1132 1133#endif /* End of Header */