Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.19 1278 lines 31 kB view raw
1/* 2 * Intel SST Firmware Loader 3 * 4 * Copyright (C) 2013, Intel Corporation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License version 8 * 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/kernel.h> 18#include <linux/slab.h> 19#include <linux/sched.h> 20#include <linux/firmware.h> 21#include <linux/export.h> 22#include <linux/module.h> 23#include <linux/platform_device.h> 24#include <linux/dma-mapping.h> 25#include <linux/dmaengine.h> 26#include <linux/pci.h> 27#include <linux/acpi.h> 28 29/* supported DMA engine drivers */ 30#include <linux/dma/dw.h> 31 32#include <asm/page.h> 33#include <asm/pgtable.h> 34 35#include "sst-dsp.h" 36#include "sst-dsp-priv.h" 37 38#define SST_DMA_RESOURCES 2 39#define SST_DSP_DMA_MAX_BURST 0x3 40#define SST_HSW_BLOCK_ANY 0xffffffff 41 42#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000 43 44struct sst_dma { 45 struct sst_dsp *sst; 46 47 struct dw_dma_chip *chip; 48 49 struct dma_async_tx_descriptor *desc; 50 struct dma_chan *ch; 51}; 52 53static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes) 54{ 55 u32 tmp = 0; 56 int i, m, n; 57 const u8 *src_byte = src; 58 59 m = bytes / 4; 60 n = bytes % 4; 61 62 /* __iowrite32_copy use 32bit size values so divide by 4 */ 63 __iowrite32_copy((void *)dest, src, m); 64 65 if (n) { 66 for (i = 0; i < n; i++) 67 tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8); 68 __iowrite32_copy((void *)(dest + m * 4), &tmp, 1); 69 } 70 71} 72 73static void sst_dma_transfer_complete(void *arg) 74{ 75 struct sst_dsp *sst = (struct sst_dsp *)arg; 76 77 dev_dbg(sst->dev, "DMA: callback\n"); 78} 79 80static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr, 81 dma_addr_t src_addr, size_t size) 82{ 83 struct dma_async_tx_descriptor *desc; 84 struct sst_dma *dma = sst->dma; 85 86 if (dma->ch == NULL) { 87 dev_err(sst->dev, "error: no DMA channel\n"); 88 return -ENODEV; 89 } 90 91 dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n", 92 (unsigned long)src_addr, (unsigned long)dest_addr, size); 93 94 desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr, 95 src_addr, size, DMA_CTRL_ACK); 96 if (!desc){ 97 dev_err(sst->dev, "error: dma prep memcpy failed\n"); 98 return -EINVAL; 99 } 100 101 desc->callback = sst_dma_transfer_complete; 102 desc->callback_param = sst; 103 104 desc->tx_submit(desc); 105 dma_wait_for_async_tx(desc); 106 107 return 0; 108} 109 110/* copy to DSP */ 111int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr, 112 dma_addr_t src_addr, size_t size) 113{ 114 return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP, 115 src_addr, size); 116} 117EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto); 118 119/* copy from DSP */ 120int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr, 121 dma_addr_t src_addr, size_t size) 122{ 123 return sst_dsp_dma_copy(sst, dest_addr, 124 src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size); 125} 126EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom); 127 128/* remove module from memory - callers hold locks */ 129static void block_list_remove(struct sst_dsp *dsp, 130 struct list_head *block_list) 131{ 132 struct sst_mem_block *block, *tmp; 133 int err; 134 135 /* disable each block */ 136 list_for_each_entry(block, block_list, module_list) { 137 138 if (block->ops && block->ops->disable) { 139 err = block->ops->disable(block); 140 if (err < 0) 141 dev_err(dsp->dev, 142 "error: cant disable block %d:%d\n", 143 block->type, block->index); 144 } 145 } 146 147 /* mark each block as free */ 148 list_for_each_entry_safe(block, tmp, block_list, module_list) { 149 list_del(&block->module_list); 150 list_move(&block->list, &dsp->free_block_list); 151 dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n", 152 block->type, block->index, block->offset); 153 } 154} 155 156/* prepare the memory block to receive data from host - callers hold locks */ 157static int block_list_prepare(struct sst_dsp *dsp, 158 struct list_head *block_list) 159{ 160 struct sst_mem_block *block; 161 int ret = 0; 162 163 /* enable each block so that's it'e ready for data */ 164 list_for_each_entry(block, block_list, module_list) { 165 166 if (block->ops && block->ops->enable && !block->users) { 167 ret = block->ops->enable(block); 168 if (ret < 0) { 169 dev_err(dsp->dev, 170 "error: cant disable block %d:%d\n", 171 block->type, block->index); 172 goto err; 173 } 174 } 175 } 176 return ret; 177 178err: 179 list_for_each_entry(block, block_list, module_list) { 180 if (block->ops && block->ops->disable) 181 block->ops->disable(block); 182 } 183 return ret; 184} 185 186static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem, 187 int irq) 188{ 189 struct dw_dma_chip *chip; 190 int err; 191 192 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); 193 if (!chip) 194 return ERR_PTR(-ENOMEM); 195 196 chip->irq = irq; 197 chip->regs = devm_ioremap_resource(dev, mem); 198 if (IS_ERR(chip->regs)) 199 return ERR_CAST(chip->regs); 200 201 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31)); 202 if (err) 203 return ERR_PTR(err); 204 205 chip->dev = dev; 206 207 err = dw_dma_probe(chip); 208 if (err) 209 return ERR_PTR(err); 210 211 return chip; 212} 213 214static void dw_remove(struct dw_dma_chip *chip) 215{ 216 dw_dma_remove(chip); 217} 218 219static bool dma_chan_filter(struct dma_chan *chan, void *param) 220{ 221 struct sst_dsp *dsp = (struct sst_dsp *)param; 222 223 return chan->device->dev == dsp->dma_dev; 224} 225 226int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id) 227{ 228 struct sst_dma *dma = dsp->dma; 229 struct dma_slave_config slave; 230 dma_cap_mask_t mask; 231 int ret; 232 233 dma_cap_zero(mask); 234 dma_cap_set(DMA_SLAVE, mask); 235 dma_cap_set(DMA_MEMCPY, mask); 236 237 dma->ch = dma_request_channel(mask, dma_chan_filter, dsp); 238 if (dma->ch == NULL) { 239 dev_err(dsp->dev, "error: DMA request channel failed\n"); 240 return -EIO; 241 } 242 243 memset(&slave, 0, sizeof(slave)); 244 slave.direction = DMA_MEM_TO_DEV; 245 slave.src_addr_width = 246 slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 247 slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST; 248 249 ret = dmaengine_slave_config(dma->ch, &slave); 250 if (ret) { 251 dev_err(dsp->dev, "error: unable to set DMA slave config %d\n", 252 ret); 253 dma_release_channel(dma->ch); 254 dma->ch = NULL; 255 } 256 257 return ret; 258} 259EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel); 260 261void sst_dsp_dma_put_channel(struct sst_dsp *dsp) 262{ 263 struct sst_dma *dma = dsp->dma; 264 265 if (!dma->ch) 266 return; 267 268 dma_release_channel(dma->ch); 269 dma->ch = NULL; 270} 271EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel); 272 273static int sst_dma_new(struct sst_dsp *sst) 274{ 275 struct sst_pdata *sst_pdata = sst->pdata; 276 struct sst_dma *dma; 277 struct resource mem; 278 int ret = 0; 279 280 if (sst->pdata->resindex_dma_base == -1) 281 /* DMA is not used, return and squelsh error messages */ 282 return 0; 283 284 /* configure the correct platform data for whatever DMA engine 285 * is attached to the ADSP IP. */ 286 switch (sst->pdata->dma_engine) { 287 case SST_DMA_TYPE_DW: 288 break; 289 default: 290 dev_err(sst->dev, "error: invalid DMA engine %d\n", 291 sst->pdata->dma_engine); 292 return -EINVAL; 293 } 294 295 dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL); 296 if (!dma) 297 return -ENOMEM; 298 299 dma->sst = sst; 300 301 memset(&mem, 0, sizeof(mem)); 302 303 mem.start = sst->addr.lpe_base + sst_pdata->dma_base; 304 mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1; 305 mem.flags = IORESOURCE_MEM; 306 307 /* now register DMA engine device */ 308 dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq); 309 if (IS_ERR(dma->chip)) { 310 dev_err(sst->dev, "error: DMA device register failed\n"); 311 ret = PTR_ERR(dma->chip); 312 goto err_dma_dev; 313 } 314 315 sst->dma = dma; 316 sst->fw_use_dma = true; 317 return 0; 318 319err_dma_dev: 320 devm_kfree(sst->dev, dma); 321 return ret; 322} 323 324static void sst_dma_free(struct sst_dma *dma) 325{ 326 327 if (dma == NULL) 328 return; 329 330 if (dma->ch) 331 dma_release_channel(dma->ch); 332 333 if (dma->chip) 334 dw_remove(dma->chip); 335 336} 337 338/* create new generic firmware object */ 339struct sst_fw *sst_fw_new(struct sst_dsp *dsp, 340 const struct firmware *fw, void *private) 341{ 342 struct sst_fw *sst_fw; 343 int err; 344 345 if (!dsp->ops->parse_fw) 346 return NULL; 347 348 sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL); 349 if (sst_fw == NULL) 350 return NULL; 351 352 sst_fw->dsp = dsp; 353 sst_fw->private = private; 354 sst_fw->size = fw->size; 355 356 /* allocate DMA buffer to store FW data */ 357 sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size, 358 &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL); 359 if (!sst_fw->dma_buf) { 360 dev_err(dsp->dev, "error: DMA alloc failed\n"); 361 kfree(sst_fw); 362 return NULL; 363 } 364 365 /* copy FW data to DMA-able memory */ 366 memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size); 367 368 if (dsp->fw_use_dma) { 369 err = sst_dsp_dma_get_channel(dsp, 0); 370 if (err < 0) 371 goto chan_err; 372 } 373 374 /* call core specific FW paser to load FW data into DSP */ 375 err = dsp->ops->parse_fw(sst_fw); 376 if (err < 0) { 377 dev_err(dsp->dev, "error: parse fw failed %d\n", err); 378 goto parse_err; 379 } 380 381 if (dsp->fw_use_dma) 382 sst_dsp_dma_put_channel(dsp); 383 384 mutex_lock(&dsp->mutex); 385 list_add(&sst_fw->list, &dsp->fw_list); 386 mutex_unlock(&dsp->mutex); 387 388 return sst_fw; 389 390parse_err: 391 if (dsp->fw_use_dma) 392 sst_dsp_dma_put_channel(dsp); 393chan_err: 394 dma_free_coherent(dsp->dma_dev, sst_fw->size, 395 sst_fw->dma_buf, 396 sst_fw->dmable_fw_paddr); 397 sst_fw->dma_buf = NULL; 398 kfree(sst_fw); 399 return NULL; 400} 401EXPORT_SYMBOL_GPL(sst_fw_new); 402 403int sst_fw_reload(struct sst_fw *sst_fw) 404{ 405 struct sst_dsp *dsp = sst_fw->dsp; 406 int ret; 407 408 dev_dbg(dsp->dev, "reloading firmware\n"); 409 410 /* call core specific FW paser to load FW data into DSP */ 411 ret = dsp->ops->parse_fw(sst_fw); 412 if (ret < 0) 413 dev_err(dsp->dev, "error: parse fw failed %d\n", ret); 414 415 return ret; 416} 417EXPORT_SYMBOL_GPL(sst_fw_reload); 418 419void sst_fw_unload(struct sst_fw *sst_fw) 420{ 421 struct sst_dsp *dsp = sst_fw->dsp; 422 struct sst_module *module, *mtmp; 423 struct sst_module_runtime *runtime, *rtmp; 424 425 dev_dbg(dsp->dev, "unloading firmware\n"); 426 427 mutex_lock(&dsp->mutex); 428 429 /* check module by module */ 430 list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) { 431 if (module->sst_fw == sst_fw) { 432 433 /* remove runtime modules */ 434 list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) { 435 436 block_list_remove(dsp, &runtime->block_list); 437 list_del(&runtime->list); 438 kfree(runtime); 439 } 440 441 /* now remove the module */ 442 block_list_remove(dsp, &module->block_list); 443 list_del(&module->list); 444 kfree(module); 445 } 446 } 447 448 /* remove all scratch blocks */ 449 block_list_remove(dsp, &dsp->scratch_block_list); 450 451 mutex_unlock(&dsp->mutex); 452} 453EXPORT_SYMBOL_GPL(sst_fw_unload); 454 455/* free single firmware object */ 456void sst_fw_free(struct sst_fw *sst_fw) 457{ 458 struct sst_dsp *dsp = sst_fw->dsp; 459 460 mutex_lock(&dsp->mutex); 461 list_del(&sst_fw->list); 462 mutex_unlock(&dsp->mutex); 463 464 if (sst_fw->dma_buf) 465 dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf, 466 sst_fw->dmable_fw_paddr); 467 kfree(sst_fw); 468} 469EXPORT_SYMBOL_GPL(sst_fw_free); 470 471/* free all firmware objects */ 472void sst_fw_free_all(struct sst_dsp *dsp) 473{ 474 struct sst_fw *sst_fw, *t; 475 476 mutex_lock(&dsp->mutex); 477 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) { 478 479 list_del(&sst_fw->list); 480 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf, 481 sst_fw->dmable_fw_paddr); 482 kfree(sst_fw); 483 } 484 mutex_unlock(&dsp->mutex); 485} 486EXPORT_SYMBOL_GPL(sst_fw_free_all); 487 488/* create a new SST generic module from FW template */ 489struct sst_module *sst_module_new(struct sst_fw *sst_fw, 490 struct sst_module_template *template, void *private) 491{ 492 struct sst_dsp *dsp = sst_fw->dsp; 493 struct sst_module *sst_module; 494 495 sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL); 496 if (sst_module == NULL) 497 return NULL; 498 499 sst_module->id = template->id; 500 sst_module->dsp = dsp; 501 sst_module->sst_fw = sst_fw; 502 sst_module->scratch_size = template->scratch_size; 503 sst_module->persistent_size = template->persistent_size; 504 sst_module->entry = template->entry; 505 sst_module->state = SST_MODULE_STATE_UNLOADED; 506 507 INIT_LIST_HEAD(&sst_module->block_list); 508 INIT_LIST_HEAD(&sst_module->runtime_list); 509 510 mutex_lock(&dsp->mutex); 511 list_add(&sst_module->list, &dsp->module_list); 512 mutex_unlock(&dsp->mutex); 513 514 return sst_module; 515} 516EXPORT_SYMBOL_GPL(sst_module_new); 517 518/* free firmware module and remove from available list */ 519void sst_module_free(struct sst_module *sst_module) 520{ 521 struct sst_dsp *dsp = sst_module->dsp; 522 523 mutex_lock(&dsp->mutex); 524 list_del(&sst_module->list); 525 mutex_unlock(&dsp->mutex); 526 527 kfree(sst_module); 528} 529EXPORT_SYMBOL_GPL(sst_module_free); 530 531struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module, 532 int id, void *private) 533{ 534 struct sst_dsp *dsp = module->dsp; 535 struct sst_module_runtime *runtime; 536 537 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); 538 if (runtime == NULL) 539 return NULL; 540 541 runtime->id = id; 542 runtime->dsp = dsp; 543 runtime->module = module; 544 INIT_LIST_HEAD(&runtime->block_list); 545 546 mutex_lock(&dsp->mutex); 547 list_add(&runtime->list, &module->runtime_list); 548 mutex_unlock(&dsp->mutex); 549 550 return runtime; 551} 552EXPORT_SYMBOL_GPL(sst_module_runtime_new); 553 554void sst_module_runtime_free(struct sst_module_runtime *runtime) 555{ 556 struct sst_dsp *dsp = runtime->dsp; 557 558 mutex_lock(&dsp->mutex); 559 list_del(&runtime->list); 560 mutex_unlock(&dsp->mutex); 561 562 kfree(runtime); 563} 564EXPORT_SYMBOL_GPL(sst_module_runtime_free); 565 566static struct sst_mem_block *find_block(struct sst_dsp *dsp, 567 struct sst_block_allocator *ba) 568{ 569 struct sst_mem_block *block; 570 571 list_for_each_entry(block, &dsp->free_block_list, list) { 572 if (block->type == ba->type && block->offset == ba->offset) 573 return block; 574 } 575 576 return NULL; 577} 578 579/* Block allocator must be on block boundary */ 580static int block_alloc_contiguous(struct sst_dsp *dsp, 581 struct sst_block_allocator *ba, struct list_head *block_list) 582{ 583 struct list_head tmp = LIST_HEAD_INIT(tmp); 584 struct sst_mem_block *block; 585 u32 block_start = SST_HSW_BLOCK_ANY; 586 int size = ba->size, offset = ba->offset; 587 588 while (ba->size > 0) { 589 590 block = find_block(dsp, ba); 591 if (!block) { 592 list_splice(&tmp, &dsp->free_block_list); 593 594 ba->size = size; 595 ba->offset = offset; 596 return -ENOMEM; 597 } 598 599 list_move_tail(&block->list, &tmp); 600 ba->offset += block->size; 601 ba->size -= block->size; 602 } 603 ba->size = size; 604 ba->offset = offset; 605 606 list_for_each_entry(block, &tmp, list) { 607 608 if (block->offset < block_start) 609 block_start = block->offset; 610 611 list_add(&block->module_list, block_list); 612 613 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n", 614 block->type, block->index, block->offset); 615 } 616 617 list_splice(&tmp, &dsp->used_block_list); 618 return 0; 619} 620 621/* allocate first free DSP blocks for data - callers hold locks */ 622static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba, 623 struct list_head *block_list) 624{ 625 struct sst_mem_block *block, *tmp; 626 int ret = 0; 627 628 if (ba->size == 0) 629 return 0; 630 631 /* find first free whole blocks that can hold module */ 632 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { 633 634 /* ignore blocks with wrong type */ 635 if (block->type != ba->type) 636 continue; 637 638 if (ba->size > block->size) 639 continue; 640 641 ba->offset = block->offset; 642 block->bytes_used = ba->size % block->size; 643 list_add(&block->module_list, block_list); 644 list_move(&block->list, &dsp->used_block_list); 645 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n", 646 block->type, block->index, block->offset); 647 return 0; 648 } 649 650 /* then find free multiple blocks that can hold module */ 651 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { 652 653 /* ignore blocks with wrong type */ 654 if (block->type != ba->type) 655 continue; 656 657 /* do we span > 1 blocks */ 658 if (ba->size > block->size) { 659 660 /* align ba to block boundary */ 661 ba->offset = block->offset; 662 663 ret = block_alloc_contiguous(dsp, ba, block_list); 664 if (ret == 0) 665 return ret; 666 667 } 668 } 669 670 /* not enough free block space */ 671 return -ENOMEM; 672} 673 674int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba, 675 struct list_head *block_list) 676{ 677 int ret; 678 679 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n", 680 ba->size, ba->offset, ba->type); 681 682 mutex_lock(&dsp->mutex); 683 684 ret = block_alloc(dsp, ba, block_list); 685 if (ret < 0) { 686 dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret); 687 goto out; 688 } 689 690 /* prepare DSP blocks for module usage */ 691 ret = block_list_prepare(dsp, block_list); 692 if (ret < 0) 693 dev_err(dsp->dev, "error: prepare failed\n"); 694 695out: 696 mutex_unlock(&dsp->mutex); 697 return ret; 698} 699EXPORT_SYMBOL_GPL(sst_alloc_blocks); 700 701int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list) 702{ 703 mutex_lock(&dsp->mutex); 704 block_list_remove(dsp, block_list); 705 mutex_unlock(&dsp->mutex); 706 return 0; 707} 708EXPORT_SYMBOL_GPL(sst_free_blocks); 709 710/* allocate memory blocks for static module addresses - callers hold locks */ 711static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba, 712 struct list_head *block_list) 713{ 714 struct sst_mem_block *block, *tmp; 715 struct sst_block_allocator ba_tmp = *ba; 716 u32 end = ba->offset + ba->size, block_end; 717 int err; 718 719 /* only IRAM/DRAM blocks are managed */ 720 if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM) 721 return 0; 722 723 /* are blocks already attached to this module */ 724 list_for_each_entry_safe(block, tmp, block_list, module_list) { 725 726 /* ignore blocks with wrong type */ 727 if (block->type != ba->type) 728 continue; 729 730 block_end = block->offset + block->size; 731 732 /* find block that holds section */ 733 if (ba->offset >= block->offset && end <= block_end) 734 return 0; 735 736 /* does block span more than 1 section */ 737 if (ba->offset >= block->offset && ba->offset < block_end) { 738 739 /* align ba to block boundary */ 740 ba_tmp.size -= block_end - ba->offset; 741 ba_tmp.offset = block_end; 742 err = block_alloc_contiguous(dsp, &ba_tmp, block_list); 743 if (err < 0) 744 return -ENOMEM; 745 746 /* module already owns blocks */ 747 return 0; 748 } 749 } 750 751 /* find first free blocks that can hold section in free list */ 752 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { 753 block_end = block->offset + block->size; 754 755 /* ignore blocks with wrong type */ 756 if (block->type != ba->type) 757 continue; 758 759 /* find block that holds section */ 760 if (ba->offset >= block->offset && end <= block_end) { 761 762 /* add block */ 763 list_move(&block->list, &dsp->used_block_list); 764 list_add(&block->module_list, block_list); 765 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n", 766 block->type, block->index, block->offset); 767 return 0; 768 } 769 770 /* does block span more than 1 section */ 771 if (ba->offset >= block->offset && ba->offset < block_end) { 772 773 /* add block */ 774 list_move(&block->list, &dsp->used_block_list); 775 list_add(&block->module_list, block_list); 776 /* align ba to block boundary */ 777 ba_tmp.size -= block_end - ba->offset; 778 ba_tmp.offset = block_end; 779 780 err = block_alloc_contiguous(dsp, &ba_tmp, block_list); 781 if (err < 0) 782 return -ENOMEM; 783 784 return 0; 785 } 786 } 787 788 return -ENOMEM; 789} 790 791/* Load fixed module data into DSP memory blocks */ 792int sst_module_alloc_blocks(struct sst_module *module) 793{ 794 struct sst_dsp *dsp = module->dsp; 795 struct sst_fw *sst_fw = module->sst_fw; 796 struct sst_block_allocator ba; 797 int ret; 798 799 memset(&ba, 0, sizeof(ba)); 800 ba.size = module->size; 801 ba.type = module->type; 802 ba.offset = module->offset; 803 804 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n", 805 ba.size, ba.offset, ba.type); 806 807 mutex_lock(&dsp->mutex); 808 809 /* alloc blocks that includes this section */ 810 ret = block_alloc_fixed(dsp, &ba, &module->block_list); 811 if (ret < 0) { 812 dev_err(dsp->dev, 813 "error: no free blocks for section at offset 0x%x size 0x%x\n", 814 module->offset, module->size); 815 mutex_unlock(&dsp->mutex); 816 return -ENOMEM; 817 } 818 819 /* prepare DSP blocks for module copy */ 820 ret = block_list_prepare(dsp, &module->block_list); 821 if (ret < 0) { 822 dev_err(dsp->dev, "error: fw module prepare failed\n"); 823 goto err; 824 } 825 826 /* copy partial module data to blocks */ 827 if (dsp->fw_use_dma) { 828 ret = sst_dsp_dma_copyto(dsp, 829 dsp->addr.lpe_base + module->offset, 830 sst_fw->dmable_fw_paddr + module->data_offset, 831 module->size); 832 if (ret < 0) { 833 dev_err(dsp->dev, "error: module copy failed\n"); 834 goto err; 835 } 836 } else 837 sst_memcpy32(dsp->addr.lpe + module->offset, module->data, 838 module->size); 839 840 mutex_unlock(&dsp->mutex); 841 return ret; 842 843err: 844 block_list_remove(dsp, &module->block_list); 845 mutex_unlock(&dsp->mutex); 846 return ret; 847} 848EXPORT_SYMBOL_GPL(sst_module_alloc_blocks); 849 850/* Unload entire module from DSP memory */ 851int sst_module_free_blocks(struct sst_module *module) 852{ 853 struct sst_dsp *dsp = module->dsp; 854 855 mutex_lock(&dsp->mutex); 856 block_list_remove(dsp, &module->block_list); 857 mutex_unlock(&dsp->mutex); 858 return 0; 859} 860EXPORT_SYMBOL_GPL(sst_module_free_blocks); 861 862int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime, 863 int offset) 864{ 865 struct sst_dsp *dsp = runtime->dsp; 866 struct sst_module *module = runtime->module; 867 struct sst_block_allocator ba; 868 int ret; 869 870 if (module->persistent_size == 0) 871 return 0; 872 873 memset(&ba, 0, sizeof(ba)); 874 ba.size = module->persistent_size; 875 ba.type = SST_MEM_DRAM; 876 877 mutex_lock(&dsp->mutex); 878 879 /* do we need to allocate at a fixed address ? */ 880 if (offset != 0) { 881 882 ba.offset = offset; 883 884 dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n", 885 ba.size, ba.type, ba.offset); 886 887 /* alloc blocks that includes this section */ 888 ret = block_alloc_fixed(dsp, &ba, &runtime->block_list); 889 890 } else { 891 dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n", 892 ba.size, ba.type); 893 894 /* alloc blocks that includes this section */ 895 ret = block_alloc(dsp, &ba, &runtime->block_list); 896 } 897 if (ret < 0) { 898 dev_err(dsp->dev, 899 "error: no free blocks for runtime module size 0x%x\n", 900 module->persistent_size); 901 mutex_unlock(&dsp->mutex); 902 return -ENOMEM; 903 } 904 runtime->persistent_offset = ba.offset; 905 906 /* prepare DSP blocks for module copy */ 907 ret = block_list_prepare(dsp, &runtime->block_list); 908 if (ret < 0) { 909 dev_err(dsp->dev, "error: runtime block prepare failed\n"); 910 goto err; 911 } 912 913 mutex_unlock(&dsp->mutex); 914 return ret; 915 916err: 917 block_list_remove(dsp, &module->block_list); 918 mutex_unlock(&dsp->mutex); 919 return ret; 920} 921EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks); 922 923int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime) 924{ 925 struct sst_dsp *dsp = runtime->dsp; 926 927 mutex_lock(&dsp->mutex); 928 block_list_remove(dsp, &runtime->block_list); 929 mutex_unlock(&dsp->mutex); 930 return 0; 931} 932EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks); 933 934int sst_module_runtime_save(struct sst_module_runtime *runtime, 935 struct sst_module_runtime_context *context) 936{ 937 struct sst_dsp *dsp = runtime->dsp; 938 struct sst_module *module = runtime->module; 939 int ret = 0; 940 941 dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n", 942 runtime->id, runtime->persistent_offset, 943 module->persistent_size); 944 945 context->buffer = dma_alloc_coherent(dsp->dma_dev, 946 module->persistent_size, 947 &context->dma_buffer, GFP_DMA | GFP_KERNEL); 948 if (!context->buffer) { 949 dev_err(dsp->dev, "error: DMA context alloc failed\n"); 950 return -ENOMEM; 951 } 952 953 mutex_lock(&dsp->mutex); 954 955 if (dsp->fw_use_dma) { 956 957 ret = sst_dsp_dma_get_channel(dsp, 0); 958 if (ret < 0) 959 goto err; 960 961 ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer, 962 dsp->addr.lpe_base + runtime->persistent_offset, 963 module->persistent_size); 964 sst_dsp_dma_put_channel(dsp); 965 if (ret < 0) { 966 dev_err(dsp->dev, "error: context copy failed\n"); 967 goto err; 968 } 969 } else 970 sst_memcpy32(context->buffer, dsp->addr.lpe + 971 runtime->persistent_offset, 972 module->persistent_size); 973 974err: 975 mutex_unlock(&dsp->mutex); 976 return ret; 977} 978EXPORT_SYMBOL_GPL(sst_module_runtime_save); 979 980int sst_module_runtime_restore(struct sst_module_runtime *runtime, 981 struct sst_module_runtime_context *context) 982{ 983 struct sst_dsp *dsp = runtime->dsp; 984 struct sst_module *module = runtime->module; 985 int ret = 0; 986 987 dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n", 988 runtime->id, runtime->persistent_offset, 989 module->persistent_size); 990 991 mutex_lock(&dsp->mutex); 992 993 if (!context->buffer) { 994 dev_info(dsp->dev, "no context buffer need to restore!\n"); 995 goto err; 996 } 997 998 if (dsp->fw_use_dma) { 999 1000 ret = sst_dsp_dma_get_channel(dsp, 0); 1001 if (ret < 0) 1002 goto err; 1003 1004 ret = sst_dsp_dma_copyto(dsp, 1005 dsp->addr.lpe_base + runtime->persistent_offset, 1006 context->dma_buffer, module->persistent_size); 1007 sst_dsp_dma_put_channel(dsp); 1008 if (ret < 0) { 1009 dev_err(dsp->dev, "error: module copy failed\n"); 1010 goto err; 1011 } 1012 } else 1013 sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset, 1014 context->buffer, module->persistent_size); 1015 1016 dma_free_coherent(dsp->dma_dev, module->persistent_size, 1017 context->buffer, context->dma_buffer); 1018 context->buffer = NULL; 1019 1020err: 1021 mutex_unlock(&dsp->mutex); 1022 return ret; 1023} 1024EXPORT_SYMBOL_GPL(sst_module_runtime_restore); 1025 1026/* register a DSP memory block for use with FW based modules */ 1027struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset, 1028 u32 size, enum sst_mem_type type, const struct sst_block_ops *ops, 1029 u32 index, void *private) 1030{ 1031 struct sst_mem_block *block; 1032 1033 block = kzalloc(sizeof(*block), GFP_KERNEL); 1034 if (block == NULL) 1035 return NULL; 1036 1037 block->offset = offset; 1038 block->size = size; 1039 block->index = index; 1040 block->type = type; 1041 block->dsp = dsp; 1042 block->private = private; 1043 block->ops = ops; 1044 1045 mutex_lock(&dsp->mutex); 1046 list_add(&block->list, &dsp->free_block_list); 1047 mutex_unlock(&dsp->mutex); 1048 1049 return block; 1050} 1051EXPORT_SYMBOL_GPL(sst_mem_block_register); 1052 1053/* unregister all DSP memory blocks */ 1054void sst_mem_block_unregister_all(struct sst_dsp *dsp) 1055{ 1056 struct sst_mem_block *block, *tmp; 1057 1058 mutex_lock(&dsp->mutex); 1059 1060 /* unregister used blocks */ 1061 list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) { 1062 list_del(&block->list); 1063 kfree(block); 1064 } 1065 1066 /* unregister free blocks */ 1067 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { 1068 list_del(&block->list); 1069 kfree(block); 1070 } 1071 1072 mutex_unlock(&dsp->mutex); 1073} 1074EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all); 1075 1076/* allocate scratch buffer blocks */ 1077int sst_block_alloc_scratch(struct sst_dsp *dsp) 1078{ 1079 struct sst_module *module; 1080 struct sst_block_allocator ba; 1081 int ret; 1082 1083 mutex_lock(&dsp->mutex); 1084 1085 /* calculate required scratch size */ 1086 dsp->scratch_size = 0; 1087 list_for_each_entry(module, &dsp->module_list, list) { 1088 dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n", 1089 module->id, module->scratch_size); 1090 if (dsp->scratch_size < module->scratch_size) 1091 dsp->scratch_size = module->scratch_size; 1092 } 1093 1094 dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n", 1095 dsp->scratch_size); 1096 1097 if (dsp->scratch_size == 0) { 1098 dev_info(dsp->dev, "no modules need scratch buffer\n"); 1099 mutex_unlock(&dsp->mutex); 1100 return 0; 1101 } 1102 1103 /* allocate blocks for module scratch buffers */ 1104 dev_dbg(dsp->dev, "allocating scratch blocks\n"); 1105 1106 ba.size = dsp->scratch_size; 1107 ba.type = SST_MEM_DRAM; 1108 1109 /* do we need to allocate at fixed offset */ 1110 if (dsp->scratch_offset != 0) { 1111 1112 dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n", 1113 ba.size, ba.type, ba.offset); 1114 1115 ba.offset = dsp->scratch_offset; 1116 1117 /* alloc blocks that includes this section */ 1118 ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list); 1119 1120 } else { 1121 dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n", 1122 ba.size, ba.type); 1123 1124 ba.offset = 0; 1125 ret = block_alloc(dsp, &ba, &dsp->scratch_block_list); 1126 } 1127 if (ret < 0) { 1128 dev_err(dsp->dev, "error: can't alloc scratch blocks\n"); 1129 mutex_unlock(&dsp->mutex); 1130 return ret; 1131 } 1132 1133 ret = block_list_prepare(dsp, &dsp->scratch_block_list); 1134 if (ret < 0) { 1135 dev_err(dsp->dev, "error: scratch block prepare failed\n"); 1136 mutex_unlock(&dsp->mutex); 1137 return ret; 1138 } 1139 1140 /* assign the same offset of scratch to each module */ 1141 dsp->scratch_offset = ba.offset; 1142 mutex_unlock(&dsp->mutex); 1143 return dsp->scratch_size; 1144} 1145EXPORT_SYMBOL_GPL(sst_block_alloc_scratch); 1146 1147/* free all scratch blocks */ 1148void sst_block_free_scratch(struct sst_dsp *dsp) 1149{ 1150 mutex_lock(&dsp->mutex); 1151 block_list_remove(dsp, &dsp->scratch_block_list); 1152 mutex_unlock(&dsp->mutex); 1153} 1154EXPORT_SYMBOL_GPL(sst_block_free_scratch); 1155 1156/* get a module from it's unique ID */ 1157struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id) 1158{ 1159 struct sst_module *module; 1160 1161 mutex_lock(&dsp->mutex); 1162 1163 list_for_each_entry(module, &dsp->module_list, list) { 1164 if (module->id == id) { 1165 mutex_unlock(&dsp->mutex); 1166 return module; 1167 } 1168 } 1169 1170 mutex_unlock(&dsp->mutex); 1171 return NULL; 1172} 1173EXPORT_SYMBOL_GPL(sst_module_get_from_id); 1174 1175struct sst_module_runtime *sst_module_runtime_get_from_id( 1176 struct sst_module *module, u32 id) 1177{ 1178 struct sst_module_runtime *runtime; 1179 struct sst_dsp *dsp = module->dsp; 1180 1181 mutex_lock(&dsp->mutex); 1182 1183 list_for_each_entry(runtime, &module->runtime_list, list) { 1184 if (runtime->id == id) { 1185 mutex_unlock(&dsp->mutex); 1186 return runtime; 1187 } 1188 } 1189 1190 mutex_unlock(&dsp->mutex); 1191 return NULL; 1192} 1193EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id); 1194 1195/* returns block address in DSP address space */ 1196u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset, 1197 enum sst_mem_type type) 1198{ 1199 switch (type) { 1200 case SST_MEM_IRAM: 1201 return offset - dsp->addr.iram_offset + 1202 dsp->addr.dsp_iram_offset; 1203 case SST_MEM_DRAM: 1204 return offset - dsp->addr.dram_offset + 1205 dsp->addr.dsp_dram_offset; 1206 default: 1207 return 0; 1208 } 1209} 1210EXPORT_SYMBOL_GPL(sst_dsp_get_offset); 1211 1212struct sst_dsp *sst_dsp_new(struct device *dev, 1213 struct sst_dsp_device *sst_dev, struct sst_pdata *pdata) 1214{ 1215 struct sst_dsp *sst; 1216 int err; 1217 1218 dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id); 1219 1220 sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL); 1221 if (sst == NULL) 1222 return NULL; 1223 1224 spin_lock_init(&sst->spinlock); 1225 mutex_init(&sst->mutex); 1226 sst->dev = dev; 1227 sst->dma_dev = pdata->dma_dev; 1228 sst->thread_context = sst_dev->thread_context; 1229 sst->sst_dev = sst_dev; 1230 sst->id = pdata->id; 1231 sst->irq = pdata->irq; 1232 sst->ops = sst_dev->ops; 1233 sst->pdata = pdata; 1234 INIT_LIST_HEAD(&sst->used_block_list); 1235 INIT_LIST_HEAD(&sst->free_block_list); 1236 INIT_LIST_HEAD(&sst->module_list); 1237 INIT_LIST_HEAD(&sst->fw_list); 1238 INIT_LIST_HEAD(&sst->scratch_block_list); 1239 1240 /* Initialise SST Audio DSP */ 1241 if (sst->ops->init) { 1242 err = sst->ops->init(sst, pdata); 1243 if (err < 0) 1244 return NULL; 1245 } 1246 1247 /* Register the ISR */ 1248 err = request_threaded_irq(sst->irq, sst->ops->irq_handler, 1249 sst_dev->thread, IRQF_SHARED, "AudioDSP", sst); 1250 if (err) 1251 goto irq_err; 1252 1253 err = sst_dma_new(sst); 1254 if (err) 1255 dev_warn(dev, "sst_dma_new failed %d\n", err); 1256 1257 return sst; 1258 1259irq_err: 1260 if (sst->ops->free) 1261 sst->ops->free(sst); 1262 1263 return NULL; 1264} 1265EXPORT_SYMBOL_GPL(sst_dsp_new); 1266 1267void sst_dsp_free(struct sst_dsp *sst) 1268{ 1269 free_irq(sst->irq, sst); 1270 if (sst->ops->free) 1271 sst->ops->free(sst); 1272 1273 sst_dma_free(sst->dma); 1274} 1275EXPORT_SYMBOL_GPL(sst_dsp_free); 1276 1277MODULE_DESCRIPTION("Intel SST Firmware Loader"); 1278MODULE_LICENSE("GPL v2");