Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
async_tx: checkpatch says s/__FUNCTION__/__func__/g
iop-adma.c: replace remaining __FUNCTION__ occurrences
fsldma: Add a completed cookie updated action in DMA finish interrupt.
fsldma: Add device_prep_dma_interrupt support to fsldma.c
dmaengine: Fix a bug about BUG_ON() on DMA engine capability DMA_INTERRUPT.
fsldma: Fix fsldma.c warning messages when it's compiled under PPC64.

+76 -46
+3 -3
crypto/async_tx/async_memcpy.c
··· 66 66 } 67 67 68 68 if (tx) { 69 - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 69 + pr_debug("%s: (async) len: %zu\n", __func__, len); 70 70 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 71 71 } else { 72 72 void *dest_buf, *src_buf; 73 - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 73 + pr_debug("%s: (sync) len: %zu\n", __func__, len); 74 74 75 75 /* wait for any prerequisite operations */ 76 76 if (depend_tx) { ··· 80 80 BUG_ON(depend_tx->ack); 81 81 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 82 82 panic("%s: DMA_ERROR waiting for depend_tx\n", 83 - __FUNCTION__); 83 + __func__); 84 84 } 85 85 86 86 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
+3 -3
crypto/async_tx/async_memset.c
··· 63 63 } 64 64 65 65 if (tx) { 66 - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 66 + pr_debug("%s: (async) len: %zu\n", __func__, len); 67 67 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 68 68 } else { /* run the memset synchronously */ 69 69 void *dest_buf; 70 - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 70 + pr_debug("%s: (sync) len: %zu\n", __func__, len); 71 71 72 72 dest_buf = (void *) (((char *) page_address(dest)) + offset); 73 73 ··· 79 79 BUG_ON(depend_tx->ack); 80 80 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 81 81 panic("%s: DMA_ERROR waiting for depend_tx\n", 82 - __FUNCTION__); 82 + __func__); 83 83 } 84 84 85 85 memset(dest_buf, val, len);
+3 -3
crypto/async_tx/async_tx.c
··· 472 472 tx = NULL; 473 473 474 474 if (tx) { 475 - pr_debug("%s: (async)\n", __FUNCTION__); 475 + pr_debug("%s: (async)\n", __func__); 476 476 477 477 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); 478 478 } else { 479 - pr_debug("%s: (sync)\n", __FUNCTION__); 479 + pr_debug("%s: (sync)\n", __func__); 480 480 481 481 /* wait for any prerequisite operations */ 482 482 if (depend_tx) { ··· 486 486 BUG_ON(depend_tx->ack); 487 487 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) 488 488 panic("%s: DMA_ERROR waiting for depend_tx\n", 489 - __FUNCTION__); 489 + __func__); 490 490 } 491 491 492 492 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
+6 -6
crypto/async_tx/async_xor.c
··· 47 47 int i; 48 48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 49 49 50 - pr_debug("%s: len: %zu\n", __FUNCTION__, len); 50 + pr_debug("%s: len: %zu\n", __func__, len); 51 51 52 52 dma_dest = dma_map_page(device->dev, dest, offset, len, 53 53 DMA_FROM_DEVICE); ··· 86 86 void *_dest; 87 87 int i; 88 88 89 - pr_debug("%s: len: %zu\n", __FUNCTION__, len); 89 + pr_debug("%s: len: %zu\n", __func__, len); 90 90 91 91 /* reuse the 'src_list' array to convert to buffer pointers */ 92 92 for (i = 0; i < src_cnt; i++) ··· 196 196 DMA_ERROR) 197 197 panic("%s: DMA_ERROR waiting for " 198 198 "depend_tx\n", 199 - __FUNCTION__); 199 + __func__); 200 200 } 201 201 202 202 do_sync_xor(dest, &src_list[src_off], offset, ··· 276 276 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; 277 277 int i; 278 278 279 - pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); 279 + pr_debug("%s: (async) len: %zu\n", __func__, len); 280 280 281 281 for (i = 0; i < src_cnt; i++) 282 282 dma_src[i] = dma_map_page(device->dev, src_list[i], ··· 299 299 } else { 300 300 unsigned long xor_flags = flags; 301 301 302 - pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len); 302 + pr_debug("%s: (sync) len: %zu\n", __func__, len); 303 303 304 304 xor_flags |= ASYNC_TX_XOR_DROP_DST; 305 305 xor_flags &= ~ASYNC_TX_ACK; ··· 310 310 if (tx) { 311 311 if (dma_wait_for_async_tx(tx) == DMA_ERROR) 312 312 panic("%s: DMA_ERROR waiting for tx\n", 313 - __FUNCTION__); 313 + __func__); 314 314 async_tx_ack(tx); 315 315 } 316 316
+1 -1
drivers/dma/dmaengine.c
··· 357 357 !device->device_prep_dma_zero_sum); 358 358 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && 359 359 !device->device_prep_dma_memset); 360 - BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && 360 + BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 361 361 !device->device_prep_dma_interrupt); 362 362 363 363 BUG_ON(!device->device_alloc_chan_resources);
+44 -14
drivers/dma/fsldma.c
··· 57 57 58 58 } 59 59 60 - static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val) 60 + static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) 61 61 { 62 62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); 63 63 } 64 64 65 - static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan) 65 + static u32 get_sr(struct fsl_dma_chan *fsl_chan) 66 66 { 67 67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); 68 68 } ··· 406 406 dma_pool_destroy(fsl_chan->desc_pool); 407 407 } 408 408 409 + static struct dma_async_tx_descriptor * 410 + fsl_dma_prep_interrupt(struct dma_chan *chan) 411 + { 412 + struct fsl_dma_chan *fsl_chan; 413 + struct fsl_desc_sw *new; 414 + 415 + if (!chan) 416 + return NULL; 417 + 418 + fsl_chan = to_fsl_chan(chan); 419 + 420 + new = fsl_dma_alloc_descriptor(fsl_chan); 421 + if (!new) { 422 + dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); 423 + return NULL; 424 + } 425 + 426 + new->async_tx.cookie = -EBUSY; 427 + new->async_tx.ack = 0; 428 + 429 + /* Set End-of-link to the last link descriptor of new list*/ 430 + set_ld_eol(fsl_chan, new); 431 + 432 + return &new->async_tx; 433 + } 434 + 409 435 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 410 436 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 411 437 size_t len, unsigned long flags) ··· 462 436 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 463 437 #endif 464 438 465 - copy = min(len, FSL_DMA_BCR_MAX_CNT); 439 + copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 466 440 467 441 set_desc_cnt(fsl_chan, &new->hw, copy); 468 442 set_desc_src(fsl_chan, &new->hw, dma_src); ··· 539 513 540 514 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 541 515 542 - fsl_dma_update_completed_cookie(fsl_chan); 543 516 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", 544 517 fsl_chan->completed_cookie); 545 518 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { ··· 606 581 if (ld_node != &fsl_chan->ld_queue) { 607 582 /* Get the ld start address from ld_queue */ 608 583 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 609 - dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n", 610 - (u64)next_dest_addr); 584 + dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", 585 + (void *)next_dest_addr); 611 586 set_cdar(fsl_chan, next_dest_addr); 612 587 dma_start(fsl_chan); 613 588 } else { ··· 687 662 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) 688 663 { 689 664 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 690 - dma_addr_t stat; 665 + u32 stat; 691 666 692 667 stat = get_sr(fsl_chan); 693 668 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", ··· 706 681 */ 707 682 if (stat & FSL_DMA_SR_EOSI) { 708 683 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 709 - dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, " 710 - "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan), 711 - (u64)get_ndar(fsl_chan)); 684 + dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", 685 + (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); 712 686 stat &= ~FSL_DMA_SR_EOSI; 687 + fsl_dma_update_completed_cookie(fsl_chan); 713 688 } 714 689 715 690 /* If it current transfer is the end-of-transfer, ··· 751 726 fsl_chan_ld_cleanup(fsl_chan); 752 727 } 753 728 729 + #ifdef FSL_DMA_CALLBACKTEST 754 730 static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) 755 731 { 756 732 if (fsl_chan) 757 733 dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); 758 734 } 735 + #endif 759 736 737 + #ifdef CONFIG_FSL_DMA_SELFTEST 760 738 static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) 761 739 { 762 740 struct dma_chan *chan; ··· 865 837 if (err) { 866 838 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); 867 839 i++); 868 - dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is " 840 + dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is " 869 841 "error! src 0x%x, dest 0x%x\n", 870 - i, test_size, *(src + i), *(dest + i)); 842 + i, (long)test_size, *(src + i), *(dest + i)); 871 843 } 872 844 873 845 free_resources: ··· 876 848 kfree(src); 877 849 return err; 878 850 } 851 + #endif 879 852 880 853 static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, 881 854 const struct of_device_id *match) ··· 1037 1008 } 1038 1009 1039 1010 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1040 - "controller at 0x%08x...\n", 1041 - match->compatible, fdev->reg.start); 1011 + "controller at %p...\n", 1012 + match->compatible, (void *)fdev->reg.start); 1042 1013 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 1043 1014 - fdev->reg.start + 1); 1044 1015 ··· 1046 1017 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1047 1018 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1048 1019 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1020 + fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1049 1021 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1050 1022 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1051 1023 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
+16 -16
drivers/dma/iop-adma.c
··· 140 140 int busy = iop_chan_is_busy(iop_chan); 141 141 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 142 142 143 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 143 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 144 144 /* free completed slots from the chain starting with 145 145 * the oldest descriptor 146 146 */ ··· 438 438 spin_unlock_bh(&iop_chan->lock); 439 439 440 440 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", 441 - __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx); 441 + __func__, sw_desc->async_tx.cookie, sw_desc->idx); 442 442 443 443 return cookie; 444 444 } ··· 520 520 struct iop_adma_desc_slot *sw_desc, *grp_start; 521 521 int slot_cnt, slots_per_op; 522 522 523 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 523 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 524 524 525 525 spin_lock_bh(&iop_chan->lock); 526 526 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); ··· 548 548 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 549 549 550 550 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 551 - __FUNCTION__, len); 551 + __func__, len); 552 552 553 553 spin_lock_bh(&iop_chan->lock); 554 554 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); ··· 580 580 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); 581 581 582 582 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 583 - __FUNCTION__, len); 583 + __func__, len); 584 584 585 585 spin_lock_bh(&iop_chan->lock); 586 586 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); ··· 614 614 615 615 dev_dbg(iop_chan->device->common.dev, 616 616 "%s src_cnt: %d len: %u flags: %lx\n", 617 - __FUNCTION__, src_cnt, len, flags); 617 + __func__, src_cnt, len, flags); 618 618 619 619 spin_lock_bh(&iop_chan->lock); 620 620 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); ··· 648 648 return NULL; 649 649 650 650 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 651 - __FUNCTION__, src_cnt, len); 651 + __func__, src_cnt, len); 652 652 653 653 spin_lock_bh(&iop_chan->lock); 654 654 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); ··· 659 659 iop_desc_set_zero_sum_byte_count(grp_start, len); 660 660 grp_start->xor_check_result = result; 661 661 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 662 - __FUNCTION__, grp_start->xor_check_result); 662 + __func__, grp_start->xor_check_result); 663 663 sw_desc->unmap_src_cnt = src_cnt; 664 664 sw_desc->unmap_len = len; 665 665 while (src_cnt--) ··· 700 700 iop_chan->last_used = NULL; 701 701 702 702 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", 703 - __FUNCTION__, iop_chan->slots_allocated); 703 + __func__, iop_chan->slots_allocated); 704 704 spin_unlock_bh(&iop_chan->lock); 705 705 706 706 /* one is ok since we left it on there on purpose */ ··· 753 753 { 754 754 struct iop_adma_chan *chan = data; 755 755 756 - dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); 756 + dev_dbg(chan->device->common.dev, "%s\n", __func__); 757 757 758 758 tasklet_schedule(&chan->irq_tasklet); 759 759 ··· 766 766 { 767 767 struct iop_adma_chan *chan = data; 768 768 769 - dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); 769 + dev_dbg(chan->device->common.dev, "%s\n", __func__); 770 770 771 771 tasklet_schedule(&chan->irq_tasklet); 772 772 ··· 823 823 int err = 0; 824 824 struct iop_adma_chan *iop_chan; 825 825 826 - dev_dbg(device->common.dev, "%s\n", __FUNCTION__); 826 + dev_dbg(device->common.dev, "%s\n", __func__); 827 827 828 828 src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); 829 829 if (!src) ··· 906 906 int err = 0; 907 907 struct iop_adma_chan *iop_chan; 908 908 909 - dev_dbg(device->common.dev, "%s\n", __FUNCTION__); 909 + dev_dbg(device->common.dev, "%s\n", __func__); 910 910 911 911 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 912 912 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); ··· 1159 1159 } 1160 1160 1161 1161 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", 1162 - __FUNCTION__, adev->dma_desc_pool_virt, 1162 + __func__, adev->dma_desc_pool_virt, 1163 1163 (void *) adev->dma_desc_pool); 1164 1164 1165 1165 adev->id = plat_data->hw_id; ··· 1289 1289 dma_cookie_t cookie; 1290 1290 int slot_cnt, slots_per_op; 1291 1291 1292 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 1292 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1293 1293 1294 1294 spin_lock_bh(&iop_chan->lock); 1295 1295 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); ··· 1346 1346 dma_cookie_t cookie; 1347 1347 int slot_cnt, slots_per_op; 1348 1348 1349 - dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); 1349 + dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1350 1350 1351 1351 spin_lock_bh(&iop_chan->lock); 1352 1352 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);