Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
async_tx: checkpatch says s/__FUNCTION__/__func__/g
iop-adma.c: replace remaining __FUNCTION__ occurrences
fsldma: Add a completed cookie updated action in DMA finish interrupt.
fsldma: Add device_prep_dma_interrupt support to fsldma.c
dmaengine: Fix a bug about BUG_ON() on DMA engine capability DMA_INTERRUPT.
fsldma: Fix fsldma.c warning messages when it's compiled under PPC64.

+76 -46
+3 -3
crypto/async_tx/async_memcpy.c
··· 66 } 67 68 if (tx) { 69 - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 70 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 71 } else { 72 void *dest_buf, *src_buf; 73 - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 74 75 /* wait for any prerequisite operations */ 76 if (depend_tx) { ··· 80 BUG_ON(depend_tx->ack); 81 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 82 panic("%s: DMA_ERROR waiting for depend_tx\n", 83 - __FUNCTION__); 84 } 85 86 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
··· 66 } 67 68 if (tx) { 69 + pr_debug("%s: (async) len: %zu\n", __func__, len); 70 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 71 } else { 72 void *dest_buf, *src_buf; 73 + pr_debug("%s: (sync) len: %zu\n", __func__, len); 74 75 /* wait for any prerequisite operations */ 76 if (depend_tx) { ··· 80 BUG_ON(depend_tx->ack); 81 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 82 panic("%s: DMA_ERROR waiting for depend_tx\n", 83 + __func__); 84 } 85 86 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
+3 -3
crypto/async_tx/async_memset.c
··· 63 } 64 65 if (tx) { 66 - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 68 } else { /* run the memset synchronously */ 69 void *dest_buf; 70 - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 71 72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 73 ··· 79 BUG_ON(depend_tx->ack); 80 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 81 panic("%s: DMA_ERROR waiting for depend_tx\n", 82 - __FUNCTION__); 83 } 84 85 memset(dest_buf, val, len);
··· 63 } 64 65 if (tx) { 66 + pr_debug("%s: (async) len: %zu\n", __func__, len); 67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 68 } else { /* run the memset synchronously */ 69 void *dest_buf; 70 + pr_debug("%s: (sync) len: %zu\n", __func__, len); 71 72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 73 ··· 79 BUG_ON(depend_tx->ack); 80 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 81 panic("%s: DMA_ERROR waiting for depend_tx\n", 82 + __func__); 83 } 84 85 memset(dest_buf, val, len);
+3 -3
crypto/async_tx/async_tx.c
··· 472 tx = NULL; 473 474 if (tx) { 475 - pr_debug("%s: (async)\n", __FUNCTION__); 476 477 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 478 } else { 479 - pr_debug("%s: (sync)\n", __FUNCTION__); 480 481 /* wait for any prerequisite operations */ 482 if (depend_tx) { ··· 486 BUG_ON(depend_tx->ack); 487 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 488 panic("%s: DMA_ERROR waiting for depend_tx\n", 489 - __FUNCTION__); 490 } 491 492 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
··· 472 tx = NULL; 473 474 if (tx) { 475 + pr_debug("%s: (async)\n", __func__); 476 477 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 478 } else { 479 + pr_debug("%s: (sync)\n", __func__); 480 481 /* wait for any prerequisite operations */ 482 if (depend_tx) { ··· 486 BUG_ON(depend_tx->ack); 487 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 488 panic("%s: DMA_ERROR waiting for depend_tx\n", 489 + __func__); 490 } 491 492 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+6 -6
crypto/async_tx/async_xor.c
··· 47 int i; 48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 49 50 - pr_debug("%s: len: %zu\n", __FUNCTION__, len); 51 52 dma_dest = dma_map_page(device->dev, dest, offset, len, 53 DMA_FROM_DEVICE); ··· 86 void *_dest; 87 int i; 88 89 - pr_debug("%s: len: %zu\n", __FUNCTION__, len); 90 91 /* reuse the 'src_list' array to convert to buffer pointers */ 92 for (i = 0; i < src_cnt; i++) ··· 196 DMA_ERROR) 197 panic("%s: DMA_ERROR waiting for " 198 "depend_tx\n", 199 - __FUNCTION__); 200 } 201 202 do_sync_xor(dest, &src_list[src_off], offset, ··· 276 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 277 int i; 278 279 - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 280 281 for (i = 0; i < src_cnt; i++) 282 dma_src[i] = dma_map_page(device->dev, src_list[i], ··· 299 } else { 300 unsigned long xor_flags = flags; 301 302 - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 303 304 xor_flags |= ASYNC_TX_XOR_DROP_DST; 305 xor_flags &= ~ASYNC_TX_ACK; ··· 310 if (tx) { 311 if (dma_wait_for_async_tx(tx) == DMA_ERROR) 312 panic("%s: DMA_ERROR waiting for tx\n", 313 - __FUNCTION__); 314 async_tx_ack(tx); 315 } 316
··· 47 int i; 48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 49 50 + pr_debug("%s: len: %zu\n", __func__, len); 51 52 dma_dest = dma_map_page(device->dev, dest, offset, len, 53 DMA_FROM_DEVICE); ··· 86 void *_dest; 87 int i; 88 89 + pr_debug("%s: len: %zu\n", __func__, len); 90 91 /* reuse the 'src_list' array to convert to buffer pointers */ 92 for (i = 0; i < src_cnt; i++) ··· 196 DMA_ERROR) 197 panic("%s: DMA_ERROR waiting for " 198 "depend_tx\n", 199 + __func__); 200 } 201 202 do_sync_xor(dest, &src_list[src_off], offset, ··· 276 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 277 int i; 278 279 + pr_debug("%s: (async) len: %zu\n", __func__, len); 280 281 for (i = 0; i < src_cnt; i++) 282 dma_src[i] = dma_map_page(device->dev, src_list[i], ··· 299 } else { 300 unsigned long xor_flags = flags; 301 302 + pr_debug("%s: (sync) len: %zu\n", __func__, len); 303 304 xor_flags |= ASYNC_TX_XOR_DROP_DST; 305 xor_flags &= ~ASYNC_TX_ACK; ··· 310 if (tx) { 311 if (dma_wait_for_async_tx(tx) == DMA_ERROR) 312 panic("%s: DMA_ERROR waiting for tx\n", 313 + __func__); 314 async_tx_ack(tx); 315 } 316
+1 -1
drivers/dma/dmaengine.c
··· 357 !device->device_prep_dma_zero_sum); 358 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 359 !device->device_prep_dma_memset); 360 - BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && 361 !device->device_prep_dma_interrupt); 362 363 BUG_ON(!device->device_alloc_chan_resources);
··· 357 !device->device_prep_dma_zero_sum); 358 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 359 !device->device_prep_dma_memset); 360 + BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 361 !device->device_prep_dma_interrupt); 362 363 BUG_ON(!device->device_alloc_chan_resources);
+44 -14
drivers/dma/fsldma.c
··· 57 58 } 59 60 - static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val) 61 { 62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); 63 } 64 65 - static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan) 66 { 67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); 68 } ··· 406 dma_pool_destroy(fsl_chan->desc_pool); 407 } 408 409 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 410 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 411 size_t len, unsigned long flags) ··· 462 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 463 #endif 464 465 - copy = min(len, FSL_DMA_BCR_MAX_CNT); 466 467 set_desc_cnt(fsl_chan, &new->hw, copy); 468 set_desc_src(fsl_chan, &new->hw, dma_src); ··· 539 540 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 541 542 - fsl_dma_update_completed_cookie(fsl_chan); 543 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", 544 fsl_chan->completed_cookie); 545 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { ··· 606 if (ld_node != &fsl_chan->ld_queue) { 607 /* Get the ld start address from ld_queue */ 608 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 609 - dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n", 610 - (u64)next_dest_addr); 611 set_cdar(fsl_chan, next_dest_addr); 612 dma_start(fsl_chan); 613 } else { ··· 687 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) 688 { 689 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 690 - dma_addr_t stat; 691 692 stat = get_sr(fsl_chan); 693 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", ··· 706 */ 707 if (stat & FSL_DMA_SR_EOSI) { 708 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 709 - dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, " 710 - "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan), 711 - (u64)get_ndar(fsl_chan)); 712 stat &= ~FSL_DMA_SR_EOSI; 713 } 714 715 /* If it current transfer is the end-of-transfer, ··· 751 fsl_chan_ld_cleanup(fsl_chan); 752 } 753 754 static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) 755 { 756 if (fsl_chan) 757 dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); 758 } 759 760 static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) 761 { 762 struct dma_chan *chan; ··· 865 if (err) { 866 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); 867 i++); 868 - dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is " 869 "error! src 0x%x, dest 0x%x\n", 870 - i, test_size, *(src + i), *(dest + i)); 871 } 872 873 free_resources: ··· 876 kfree(src); 877 return err; 878 } 879 880 static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, 881 const struct of_device_id *match) ··· 1037 } 1038 1039 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1040 - "controller at 0x%08x...\n", 1041 - match->compatible, fdev->reg.start); 1042 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 1043 - fdev->reg.start + 1); 1044 ··· 1046 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1047 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1048 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1049 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1050 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1051 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
··· 57 58 } 59 60 + static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) 61 { 62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); 63 } 64 65 + static u32 get_sr(struct fsl_dma_chan *fsl_chan) 66 { 67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); 68 } ··· 406 dma_pool_destroy(fsl_chan->desc_pool); 407 } 408 409 + static struct dma_async_tx_descriptor * 410 + fsl_dma_prep_interrupt(struct dma_chan *chan) 411 + { 412 + struct fsl_dma_chan *fsl_chan; 413 + struct fsl_desc_sw *new; 414 + 415 + if (!chan) 416 + return NULL; 417 + 418 + fsl_chan = to_fsl_chan(chan); 419 + 420 + new = fsl_dma_alloc_descriptor(fsl_chan); 421 + if (!new) { 422 + dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); 423 + return NULL; 424 + } 425 + 426 + new->async_tx.cookie = -EBUSY; 427 + new->async_tx.ack = 0; 428 + 429 + /* Set End-of-link to the last link descriptor of new list*/ 430 + set_ld_eol(fsl_chan, new); 431 + 432 + return &new->async_tx; 433 + } 434 + 435 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 436 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 437 size_t len, unsigned long flags) ··· 436 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 437 #endif 438 439 + copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 440 441 set_desc_cnt(fsl_chan, &new->hw, copy); 442 set_desc_src(fsl_chan, &new->hw, dma_src); ··· 513 514 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 515 516 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", 517 fsl_chan->completed_cookie); 518 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { ··· 581 if (ld_node != &fsl_chan->ld_queue) { 582 /* Get the ld start address from ld_queue */ 583 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 584 + dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", 585 + (void *)next_dest_addr); 586 set_cdar(fsl_chan, next_dest_addr); 587 dma_start(fsl_chan); 588 } else { ··· 662 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) 663 { 664 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 665 + u32 stat; 666 667 stat = get_sr(fsl_chan); 668 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", ··· 681 */ 682 if (stat & FSL_DMA_SR_EOSI) { 683 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 684 + dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", 685 + (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); 686 stat &= ~FSL_DMA_SR_EOSI; 687 + fsl_dma_update_completed_cookie(fsl_chan); 688 } 689 690 /* If it current transfer is the end-of-transfer, ··· 726 fsl_chan_ld_cleanup(fsl_chan); 727 } 728 729 + #ifdef FSL_DMA_CALLBACKTEST 730 static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) 731 { 732 if (fsl_chan) 733 dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); 734 } 735 + #endif 736 737 + #ifdef CONFIG_FSL_DMA_SELFTEST 738 static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) 739 { 740 struct dma_chan *chan; ··· 837 if (err) { 838 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); 839 i++); 840 + dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is " 841 "error! src 0x%x, dest 0x%x\n", 842 + i, (long)test_size, *(src + i), *(dest + i)); 843 } 844 845 free_resources: ··· 848 kfree(src); 849 return err; 850 } 851 + #endif 852 853 static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, 854 const struct of_device_id *match) ··· 1008 } 1009 1010 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1011 + "controller at %p...\n", 1012 + match->compatible, (void *)fdev->reg.start); 1013 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 1014 - fdev->reg.start + 1); 1015 ··· 1017 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1018 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1019 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1020 + fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1021 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1022 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1023 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
+16 -16
drivers/dma/iop-adma.c
··· 140 int busy = iop_chan_is_busy(iop_chan); 141 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 142 143 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 144 /* free completed slots from the chain starting with 145 * the oldest descriptor 146 */ ··· 438 spin_unlock_bh(&iop_chan->lock); 439 440 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", 441 - __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx); 442 443 return cookie; 444 } ··· 520 struct iop_adma_desc_slot *sw_desc, *grp_start; 521 int slot_cnt, slots_per_op; 522 523 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 524 525 spin_lock_bh(&iop_chan->lock); 526 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); ··· 548 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 549 550 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 551 - __FUNCTION__, len); 552 553 spin_lock_bh(&iop_chan->lock); 554 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); ··· 580 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 581 582 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 583 - __FUNCTION__, len); 584 585 spin_lock_bh(&iop_chan->lock); 586 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); ··· 614 615 dev_dbg(iop_chan->device->common.dev, 616 "%s src_cnt: %d len: %u flags: %lx\n", 617 - __FUNCTION__, src_cnt, len, flags); 618 619 spin_lock_bh(&iop_chan->lock); 620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); ··· 648 return NULL; 649 650 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 651 - __FUNCTION__, src_cnt, len); 652 653 spin_lock_bh(&iop_chan->lock); 654 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); ··· 659 iop_desc_set_zero_sum_byte_count(grp_start, len); 660 grp_start->xor_check_result = result; 661 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 662 - __FUNCTION__, grp_start->xor_check_result); 663 sw_desc->unmap_src_cnt = src_cnt; 664 sw_desc->unmap_len = len; 665 while (src_cnt--) ··· 700 iop_chan->last_used = NULL; 701 702 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", 703 - __FUNCTION__, iop_chan->slots_allocated); 704 spin_unlock_bh(&iop_chan->lock); 705 706 /* one is ok since we left it on there on purpose */ ··· 753 { 754 struct iop_adma_chan *chan = data; 755 756 - dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); 757 758 tasklet_schedule(&chan->irq_tasklet); 759 ··· 766 { 767 struct iop_adma_chan *chan = data; 768 769 - dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); 770 771 tasklet_schedule(&chan->irq_tasklet); 772 ··· 823 int err = 0; 824 struct iop_adma_chan *iop_chan; 825 826 - dev_dbg(device->common.dev, "%s\n", __FUNCTION__); 827 828 src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); 829 if (!src) ··· 906 int err = 0; 907 struct iop_adma_chan *iop_chan; 908 909 - dev_dbg(device->common.dev, "%s\n", __FUNCTION__); 910 911 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 912 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); ··· 1159 } 1160 1161 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", 1162 - __FUNCTION__, adev->dma_desc_pool_virt, 1163 (void *) adev->dma_desc_pool); 1164 1165 adev->id = plat_data->hw_id; ··· 1289 dma_cookie_t cookie; 1290 int slot_cnt, slots_per_op; 1291 1292 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 1293 1294 spin_lock_bh(&iop_chan->lock); 1295 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); ··· 1346 dma_cookie_t cookie; 1347 int slot_cnt, slots_per_op; 1348 1349 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 1350 1351 spin_lock_bh(&iop_chan->lock); 1352 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
··· 140 int busy = iop_chan_is_busy(iop_chan); 141 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 142 143 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 144 /* free completed slots from the chain starting with 145 * the oldest descriptor 146 */ ··· 438 spin_unlock_bh(&iop_chan->lock); 439 440 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", 441 + __func__, sw_desc->async_tx.cookie, sw_desc->idx); 442 443 return cookie; 444 } ··· 520 struct iop_adma_desc_slot *sw_desc, *grp_start; 521 int slot_cnt, slots_per_op; 522 523 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 524 525 spin_lock_bh(&iop_chan->lock); 526 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); ··· 548 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 549 550 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 551 + __func__, len); 552 553 spin_lock_bh(&iop_chan->lock); 554 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); ··· 580 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 581 582 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 583 + __func__, len); 584 585 spin_lock_bh(&iop_chan->lock); 586 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); ··· 614 615 dev_dbg(iop_chan->device->common.dev, 616 "%s src_cnt: %d len: %u flags: %lx\n", 617 + __func__, src_cnt, len, flags); 618 619 spin_lock_bh(&iop_chan->lock); 620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); ··· 648 return NULL; 649 650 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 651 + __func__, src_cnt, len); 652 653 spin_lock_bh(&iop_chan->lock); 654 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); ··· 659 iop_desc_set_zero_sum_byte_count(grp_start, len); 660 grp_start->xor_check_result = result; 661 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 662 + __func__, grp_start->xor_check_result); 663 sw_desc->unmap_src_cnt = src_cnt; 664 sw_desc->unmap_len = len; 665 while (src_cnt--) ··· 700 iop_chan->last_used = NULL; 701 702 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", 703 + __func__, iop_chan->slots_allocated); 704 spin_unlock_bh(&iop_chan->lock); 705 706 /* one is ok since we left it on there on purpose */ ··· 753 { 754 struct iop_adma_chan *chan = data; 755 756 + dev_dbg(chan->device->common.dev, "%s\n", __func__); 757 758 tasklet_schedule(&chan->irq_tasklet); 759 ··· 766 { 767 struct iop_adma_chan *chan = data; 768 769 + dev_dbg(chan->device->common.dev, "%s\n", __func__); 770 771 tasklet_schedule(&chan->irq_tasklet); 772 ··· 823 int err = 0; 824 struct iop_adma_chan *iop_chan; 825 826 + dev_dbg(device->common.dev, "%s\n", __func__); 827 828 src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); 829 if (!src) ··· 906 int err = 0; 907 struct iop_adma_chan *iop_chan; 908 909 + dev_dbg(device->common.dev, "%s\n", __func__); 910 911 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 912 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); ··· 1159 } 1160 1161 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", 1162 + __func__, adev->dma_desc_pool_virt, 1163 (void *) adev->dma_desc_pool); 1164 1165 adev->id = plat_data->hw_id; ··· 1289 dma_cookie_t cookie; 1290 int slot_cnt, slots_per_op; 1291 1292 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1293 1294 spin_lock_bh(&iop_chan->lock); 1295 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); ··· 1346 dma_cookie_t cookie; 1347 int slot_cnt, slots_per_op; 1348 1349 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1350 1351 spin_lock_bh(&iop_chan->lock); 1352 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);