I/OAT: fixups from code comments

A few fixups from Andrew's code comments.
- removed "static inline" forward-declares
- changed use of min() to min_t()
- removed some unnecessary NULL initializations
- removed a couple of BUG() calls

Fixes this:

drivers/dma/ioat_dma.c: In function `ioat1_tx_submit':
drivers/dma/ioat_dma.c:177: sorry, unimplemented: inlining failed in call to '__ioat1_dma_memcpy_issue_pending': function body not available
drivers/dma/ioat_dma.c:268: sorry, unimplemented: called from here

Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Cc: "Williams, Dan J" <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Shannon Nelson and committed by Linus Torvalds 711924b1 7c9e70ef

+78 -66
+77 -65
drivers/dma/ioat_dma.c
··· 173 173 tx_to_ioat_desc(tx)->dst = addr; 174 174 } 175 175 176 + /** 177 + * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 178 + * descriptors to hw 179 + * @chan: DMA channel handle 180 + */ 176 181 static inline void __ioat1_dma_memcpy_issue_pending( 177 - struct ioat_dma_chan *ioat_chan); 182 + struct ioat_dma_chan *ioat_chan) 183 + { 184 + ioat_chan->pending = 0; 185 + writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); 186 + } 187 + 188 + static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) 189 + { 190 + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 191 + 192 + if (ioat_chan->pending != 0) { 193 + spin_lock_bh(&ioat_chan->desc_lock); 194 + __ioat1_dma_memcpy_issue_pending(ioat_chan); 195 + spin_unlock_bh(&ioat_chan->desc_lock); 196 + } 197 + } 198 + 178 199 static inline void __ioat2_dma_memcpy_issue_pending( 179 - struct ioat_dma_chan *ioat_chan); 200 + struct ioat_dma_chan *ioat_chan) 201 + { 202 + ioat_chan->pending = 0; 203 + writew(ioat_chan->dmacount, 204 + ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 205 + } 206 + 207 + static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) 208 + { 209 + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 210 + 211 + if (ioat_chan->pending != 0) { 212 + spin_lock_bh(&ioat_chan->desc_lock); 213 + __ioat2_dma_memcpy_issue_pending(ioat_chan); 214 + spin_unlock_bh(&ioat_chan->desc_lock); 215 + } 216 + } 180 217 181 218 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 182 219 { ··· 240 203 prev = to_ioat_desc(ioat_chan->used_desc.prev); 241 204 prefetch(prev->hw); 242 205 do { 243 - copy = min((u32) len, ioat_chan->xfercap); 206 + copy = min_t(size_t, len, ioat_chan->xfercap); 244 207 245 208 new->async_tx.ack = 1; 246 209 ··· 328 291 orig_ack = first->async_tx.ack; 329 292 new = first; 330 293 331 - /* ioat_chan->desc_lock is still in force in version 2 path */ 332 - 294 + /* 295 + * ioat_chan->desc_lock is still in force in version 2 path 296 + * it gets unlocked at end of this function 297 + */ 333 298 do { 334 - copy = min((u32) len, ioat_chan->xfercap); 299 + copy = min_t(size_t, len, ioat_chan->xfercap); 335 300 336 301 new->async_tx.ack = 1; 337 302 ··· 471 432 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 472 433 { 473 434 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 474 - struct ioat_desc_sw *desc = NULL; 435 + struct ioat_desc_sw *desc; 475 436 u16 chanctrl; 476 437 u32 chanerr; 477 438 int i; ··· 614 575 static struct ioat_desc_sw * 615 576 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 616 577 { 617 - struct ioat_desc_sw *new = NULL; 578 + struct ioat_desc_sw *new; 618 579 619 580 if (!list_empty(&ioat_chan->free_desc)) { 620 581 new = to_ioat_desc(ioat_chan->free_desc.next); ··· 622 583 } else { 623 584 /* try to get another desc */ 624 585 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 625 - /* will this ever happen? */ 626 - /* TODO add upper limit on these */ 627 - BUG_ON(!new); 586 + if (!new) { 587 + dev_err(&ioat_chan->device->pdev->dev, 588 + "alloc failed\n"); 589 + return NULL; 590 + } 628 591 } 629 592 630 593 prefetch(new->hw); ··· 636 595 static struct ioat_desc_sw * 637 596 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 638 597 { 639 - struct ioat_desc_sw *new = NULL; 598 + struct ioat_desc_sw *new; 640 599 641 600 /* 642 601 * used.prev points to where to start processing ··· 650 609 if (ioat_chan->used_desc.prev && 651 610 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { 652 611 653 - struct ioat_desc_sw *desc = NULL; 654 - struct ioat_desc_sw *noop_desc = NULL; 612 + struct ioat_desc_sw *desc; 613 + struct ioat_desc_sw *noop_desc; 655 614 int i; 656 615 657 616 /* set up the noop descriptor */ ··· 665 624 ioat_chan->pending++; 666 625 ioat_chan->dmacount++; 667 626 668 - /* get a few more descriptors */ 627 + /* try to get a few more descriptors */ 669 628 for (i = 16; i; i--) { 670 629 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 671 - BUG_ON(!desc); 630 + if (!desc) { 631 + dev_err(&ioat_chan->device->pdev->dev, 632 + "alloc failed\n"); 633 + break; 634 + } 672 635 list_add_tail(&desc->node, ioat_chan->used_desc.next); 673 636 674 637 desc->hw->next ··· 722 677 723 678 spin_lock_bh(&ioat_chan->desc_lock); 724 679 new = ioat_dma_get_next_descriptor(ioat_chan); 725 - new->len = len; 726 680 spin_unlock_bh(&ioat_chan->desc_lock); 727 681 728 - return new ? &new->async_tx : NULL; 682 + if (new) { 683 + new->len = len; 684 + return &new->async_tx; 685 + } else 686 + return NULL; 729 687 } 730 688 731 689 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( ··· 741 693 742 694 spin_lock_bh(&ioat_chan->desc_lock); 743 695 new = ioat2_dma_get_next_descriptor(ioat_chan); 744 - new->len = len; 745 696 746 - /* leave ioat_chan->desc_lock set in version 2 path */ 747 - return new ? &new->async_tx : NULL; 748 - } 697 + /* 698 + * leave ioat_chan->desc_lock set in ioat 2 path 699 + * it will get unlocked at end of tx_submit 700 + */ 749 701 750 - 751 - /** 752 - * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 753 - * descriptors to hw 754 - * @chan: DMA channel handle 755 - */ 756 - static inline void __ioat1_dma_memcpy_issue_pending( 757 - struct ioat_dma_chan *ioat_chan) 758 - { 759 - ioat_chan->pending = 0; 760 - writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); 761 - } 762 - 763 - static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) 764 - { 765 - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 766 - 767 - if (ioat_chan->pending != 0) { 768 - spin_lock_bh(&ioat_chan->desc_lock); 769 - __ioat1_dma_memcpy_issue_pending(ioat_chan); 770 - spin_unlock_bh(&ioat_chan->desc_lock); 771 - } 772 - } 773 - 774 - static inline void __ioat2_dma_memcpy_issue_pending( 775 - struct ioat_dma_chan *ioat_chan) 776 - { 777 - ioat_chan->pending = 0; 778 - writew(ioat_chan->dmacount, 779 - ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 780 - } 781 - 782 - static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) 783 - { 784 - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 785 - 786 - if (ioat_chan->pending != 0) { 787 - spin_lock_bh(&ioat_chan->desc_lock); 788 - __ioat2_dma_memcpy_issue_pending(ioat_chan); 789 - spin_unlock_bh(&ioat_chan->desc_lock); 790 - } 702 + if (new) { 703 + new->len = len; 704 + return &new->async_tx; 705 + } else 706 + return NULL; 791 707 } 792 708 793 709 static void ioat_dma_cleanup_tasklet(unsigned long data) ··· 1031 1019 static void ioat_dma_test_callback(void *dma_async_param) 1032 1020 { 1033 1021 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", 1034 - dma_async_param); 1022 + dma_async_param); 1035 1023 } 1036 1024 1037 1025 /** ··· 1044 1032 u8 *src; 1045 1033 u8 *dest; 1046 1034 struct dma_chan *dma_chan; 1047 - struct dma_async_tx_descriptor *tx = NULL; 1035 + struct dma_async_tx_descriptor *tx; 1048 1036 dma_addr_t addr; 1049 1037 dma_cookie_t cookie; 1050 1038 int err = 0;
+1 -1
drivers/dma/ioatdma.h
··· 76 76 dma_cookie_t completed_cookie; 77 77 unsigned long last_completion; 78 78 79 - u32 xfercap; /* XFERCAP register value expanded out */ 79 + size_t xfercap; /* XFERCAP register value expanded out */ 80 80 81 81 spinlock_t cleanup_lock; 82 82 spinlock_t desc_lock;