at v5.3-rc2 431 lines 11 kB view raw
1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ 2/* 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * Copyright(c) 2018 Intel Corporation. All rights reserved. 7 * 8 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 */ 10 11#ifndef __SOUND_SOC_SOF_IO_H 12#define __SOUND_SOC_SOF_IO_H 13 14#include <linux/device.h> 15#include <linux/interrupt.h> 16#include <linux/kernel.h> 17#include <linux/types.h> 18#include <sound/pcm.h> 19#include "sof-priv.h" 20 21#define sof_ops(sdev) \ 22 ((sdev)->pdata->desc->ops) 23 24/* Mandatory operations are verified during probing */ 25 26/* init */ 27static inline int snd_sof_probe(struct snd_sof_dev *sdev) 28{ 29 return sof_ops(sdev)->probe(sdev); 30} 31 32static inline int snd_sof_remove(struct snd_sof_dev *sdev) 33{ 34 if (sof_ops(sdev)->remove) 35 return sof_ops(sdev)->remove(sdev); 36 37 return 0; 38} 39 40/* control */ 41 42/* 43 * snd_sof_dsp_run returns the core mask of the cores that are available 44 * after successful fw boot 45 */ 46static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev) 47{ 48 return sof_ops(sdev)->run(sdev); 49} 50 51static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev) 52{ 53 if (sof_ops(sdev)->stall) 54 return sof_ops(sdev)->stall(sdev); 55 56 return 0; 57} 58 59static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev) 60{ 61 if (sof_ops(sdev)->reset) 62 return sof_ops(sdev)->reset(sdev); 63 64 return 0; 65} 66 67/* dsp core power up/power down */ 68static inline int snd_sof_dsp_core_power_up(struct snd_sof_dev *sdev, 69 unsigned int core_mask) 70{ 71 if (sof_ops(sdev)->core_power_up) 72 return sof_ops(sdev)->core_power_up(sdev, core_mask); 73 74 return 0; 75} 76 77static inline int snd_sof_dsp_core_power_down(struct snd_sof_dev *sdev, 78 unsigned int core_mask) 79{ 80 if (sof_ops(sdev)->core_power_down) 81 return sof_ops(sdev)->core_power_down(sdev, core_mask); 82 83 return 0; 84} 85 86/* pre/post fw load */ 87static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev) 88{ 89 if (sof_ops(sdev)->pre_fw_run) 90 return sof_ops(sdev)->pre_fw_run(sdev); 91 92 return 0; 93} 94 95static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev) 96{ 97 if (sof_ops(sdev)->post_fw_run) 98 return sof_ops(sdev)->post_fw_run(sdev); 99 100 return 0; 101} 102 103/* power management */ 104static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev) 105{ 106 if (sof_ops(sdev)->resume) 107 return sof_ops(sdev)->resume(sdev); 108 109 return 0; 110} 111 112static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev, int state) 113{ 114 if (sof_ops(sdev)->suspend) 115 return sof_ops(sdev)->suspend(sdev, state); 116 117 return 0; 118} 119 120static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev) 121{ 122 if (sof_ops(sdev)->runtime_resume) 123 return sof_ops(sdev)->runtime_resume(sdev); 124 125 return 0; 126} 127 128static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev, 129 int state) 130{ 131 if (sof_ops(sdev)->runtime_suspend) 132 return sof_ops(sdev)->runtime_suspend(sdev, state); 133 134 return 0; 135} 136 137static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev *sdev) 138{ 139 if (sof_ops(sdev)->runtime_idle) 140 return sof_ops(sdev)->runtime_idle(sdev); 141 142 return 0; 143} 144 145static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev) 146{ 147 if (sof_ops(sdev)->set_hw_params_upon_resume) 148 return sof_ops(sdev)->set_hw_params_upon_resume(sdev); 149 return 0; 150} 151 152static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq) 153{ 154 if (sof_ops(sdev)->set_clk) 155 return sof_ops(sdev)->set_clk(sdev, freq); 156 157 return 0; 158} 159 160/* debug */ 161static inline void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, u32 flags) 162{ 163 if (sof_ops(sdev)->dbg_dump) 164 return sof_ops(sdev)->dbg_dump(sdev, flags); 165} 166 167static inline void snd_sof_ipc_dump(struct snd_sof_dev *sdev) 168{ 169 if (sof_ops(sdev)->ipc_dump) 170 return sof_ops(sdev)->ipc_dump(sdev); 171} 172 173/* register IO */ 174static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar, 175 u32 offset, u32 value) 176{ 177 if (sof_ops(sdev)->write) { 178 sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value); 179 return; 180 } 181 182 dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__); 183} 184 185static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar, 186 u32 offset, u64 value) 187{ 188 if (sof_ops(sdev)->write64) { 189 sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value); 190 return; 191 } 192 193 dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__); 194} 195 196static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar, 197 u32 offset) 198{ 199 if (sof_ops(sdev)->read) 200 return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset); 201 202 dev_err(sdev->dev, "error: %s not defined\n", __func__); 203 return -ENOTSUPP; 204} 205 206static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar, 207 u32 offset) 208{ 209 if (sof_ops(sdev)->read64) 210 return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset); 211 212 dev_err(sdev->dev, "error: %s not defined\n", __func__); 213 return -ENOTSUPP; 214} 215 216/* block IO */ 217static inline void snd_sof_dsp_block_read(struct snd_sof_dev *sdev, u32 bar, 218 u32 offset, void *dest, size_t bytes) 219{ 220 sof_ops(sdev)->block_read(sdev, bar, offset, dest, bytes); 221} 222 223static inline void snd_sof_dsp_block_write(struct snd_sof_dev *sdev, u32 bar, 224 u32 offset, void *src, size_t bytes) 225{ 226 sof_ops(sdev)->block_write(sdev, bar, offset, src, bytes); 227} 228 229/* ipc */ 230static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev, 231 struct snd_sof_ipc_msg *msg) 232{ 233 return sof_ops(sdev)->send_msg(sdev, msg); 234} 235 236/* host DMA trace */ 237static inline int snd_sof_dma_trace_init(struct snd_sof_dev *sdev, 238 u32 *stream_tag) 239{ 240 if (sof_ops(sdev)->trace_init) 241 return sof_ops(sdev)->trace_init(sdev, stream_tag); 242 243 return 0; 244} 245 246static inline int snd_sof_dma_trace_release(struct snd_sof_dev *sdev) 247{ 248 if (sof_ops(sdev)->trace_release) 249 return sof_ops(sdev)->trace_release(sdev); 250 251 return 0; 252} 253 254static inline int snd_sof_dma_trace_trigger(struct snd_sof_dev *sdev, int cmd) 255{ 256 if (sof_ops(sdev)->trace_trigger) 257 return sof_ops(sdev)->trace_trigger(sdev, cmd); 258 259 return 0; 260} 261 262/* host PCM ops */ 263static inline int 264snd_sof_pcm_platform_open(struct snd_sof_dev *sdev, 265 struct snd_pcm_substream *substream) 266{ 267 if (sof_ops(sdev) && sof_ops(sdev)->pcm_open) 268 return sof_ops(sdev)->pcm_open(sdev, substream); 269 270 return 0; 271} 272 273/* disconnect pcm substream to a host stream */ 274static inline int 275snd_sof_pcm_platform_close(struct snd_sof_dev *sdev, 276 struct snd_pcm_substream *substream) 277{ 278 if (sof_ops(sdev) && sof_ops(sdev)->pcm_close) 279 return sof_ops(sdev)->pcm_close(sdev, substream); 280 281 return 0; 282} 283 284/* host stream hw params */ 285static inline int 286snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev, 287 struct snd_pcm_substream *substream, 288 struct snd_pcm_hw_params *params, 289 struct sof_ipc_stream_params *ipc_params) 290{ 291 if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params) 292 return sof_ops(sdev)->pcm_hw_params(sdev, substream, 293 params, ipc_params); 294 295 return 0; 296} 297 298/* host stream hw free */ 299static inline int 300snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev, 301 struct snd_pcm_substream *substream) 302{ 303 if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free) 304 return sof_ops(sdev)->pcm_hw_free(sdev, substream); 305 306 return 0; 307} 308 309/* host stream trigger */ 310static inline int 311snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev, 312 struct snd_pcm_substream *substream, int cmd) 313{ 314 if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger) 315 return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd); 316 317 return 0; 318} 319 320/* host DSP message data */ 321static inline void snd_sof_ipc_msg_data(struct snd_sof_dev *sdev, 322 struct snd_pcm_substream *substream, 323 void *p, size_t sz) 324{ 325 sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz); 326} 327 328/* host configure DSP HW parameters */ 329static inline int 330snd_sof_ipc_pcm_params(struct snd_sof_dev *sdev, 331 struct snd_pcm_substream *substream, 332 const struct sof_ipc_pcm_params_reply *reply) 333{ 334 return sof_ops(sdev)->ipc_pcm_params(sdev, substream, reply); 335} 336 337/* host stream pointer */ 338static inline snd_pcm_uframes_t 339snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev, 340 struct snd_pcm_substream *substream) 341{ 342 if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer) 343 return sof_ops(sdev)->pcm_pointer(sdev, substream); 344 345 return 0; 346} 347 348static inline const struct snd_sof_dsp_ops 349*sof_get_ops(const struct sof_dev_desc *d, 350 const struct sof_ops_table mach_ops[], int asize) 351{ 352 int i; 353 354 for (i = 0; i < asize; i++) { 355 if (d == mach_ops[i].desc) 356 return mach_ops[i].ops; 357 } 358 359 /* not found */ 360 return NULL; 361} 362 363/** 364 * snd_sof_dsp_register_poll_timeout - Periodically poll an address 365 * until a condition is met or a timeout occurs 366 * @op: accessor function (takes @addr as its only argument) 367 * @addr: Address to poll 368 * @val: Variable to read the value into 369 * @cond: Break condition (usually involving @val) 370 * @sleep_us: Maximum time to sleep between reads in us (0 371 * tight-loops). Should be less than ~20ms since usleep_range 372 * is used (see Documentation/timers/timers-howto.rst). 373 * @timeout_us: Timeout in us, 0 means never timeout 374 * 375 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either 376 * case, the last read value at @addr is stored in @val. Must not 377 * be called from atomic context if sleep_us or timeout_us are used. 378 * 379 * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. 380 */ 381#define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \ 382({ \ 383 u64 __timeout_us = (timeout_us); \ 384 unsigned long __sleep_us = (sleep_us); \ 385 ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ 386 might_sleep_if((__sleep_us) != 0); \ 387 for (;;) { \ 388 (val) = snd_sof_dsp_read(sdev, bar, offset); \ 389 if (cond) { \ 390 dev_dbg(sdev->dev, \ 391 "FW Poll Status: reg=%#x successful\n", (val)); \ 392 break; \ 393 } \ 394 if (__timeout_us && \ 395 ktime_compare(ktime_get(), __timeout) > 0) { \ 396 (val) = snd_sof_dsp_read(sdev, bar, offset); \ 397 dev_dbg(sdev->dev, \ 398 "FW Poll Status: reg=%#x timedout\n", (val)); \ 399 break; \ 400 } \ 401 if (__sleep_us) \ 402 usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ 403 } \ 404 (cond) ? 0 : -ETIMEDOUT; \ 405}) 406 407/* This is for registers bits with attribute RWC */ 408bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset, 409 u32 mask, u32 value); 410 411bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar, 412 u32 offset, u32 mask, u32 value); 413 414bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar, 415 u32 offset, u64 mask, u64 value); 416 417bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset, 418 u32 mask, u32 value); 419 420bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar, 421 u32 offset, u64 mask, u64 value); 422 423void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar, 424 u32 offset, u32 mask, u32 value); 425 426int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset, 427 u32 mask, u32 target, u32 timeout_ms, 428 u32 interval_us); 429 430void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset); 431#endif