I/OAT: fixups from code comments

A few fixups from Andrew's code comments.
- removed "static inline" forward-declares
- changed use of min() to min_t()
- removed some unnecessary NULL initializations
- removed a couple of BUG() calls

Fixes this:

drivers/dma/ioat_dma.c: In function `ioat1_tx_submit':
drivers/dma/ioat_dma.c:177: sorry, unimplemented: inlining failed in call to '__ioat1_dma_memcpy_issue_pending': function body not available
drivers/dma/ioat_dma.c:268: sorry, unimplemented: called from here

Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Cc: "Williams, Dan J" <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Shannon Nelson and committed by Linus Torvalds 711924b1 7c9e70ef

+78 -66
+77 -65
drivers/dma/ioat_dma.c
··· 173 tx_to_ioat_desc(tx)->dst = addr; 174 } 175 176 static inline void __ioat1_dma_memcpy_issue_pending( 177 - struct ioat_dma_chan *ioat_chan); 178 static inline void __ioat2_dma_memcpy_issue_pending( 179 - struct ioat_dma_chan *ioat_chan); 180 181 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 182 { ··· 240 prev = to_ioat_desc(ioat_chan->used_desc.prev); 241 prefetch(prev->hw); 242 do { 243 - copy = min((u32) len, ioat_chan->xfercap); 244 245 new->async_tx.ack = 1; 246 ··· 328 orig_ack = first->async_tx.ack; 329 new = first; 330 331 - /* ioat_chan->desc_lock is still in force in version 2 path */ 332 - 333 do { 334 - copy = min((u32) len, ioat_chan->xfercap); 335 336 new->async_tx.ack = 1; 337 ··· 471 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 472 { 473 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 474 - struct ioat_desc_sw *desc = NULL; 475 u16 chanctrl; 476 u32 chanerr; 477 int i; ··· 614 static struct ioat_desc_sw * 615 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 616 { 617 - struct ioat_desc_sw *new = NULL; 618 619 if (!list_empty(&ioat_chan->free_desc)) { 620 new = to_ioat_desc(ioat_chan->free_desc.next); ··· 622 } else { 623 /* try to get another desc */ 624 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 625 - /* will this ever happen? */ 626 - /* TODO add upper limit on these */ 627 - BUG_ON(!new); 628 } 629 630 prefetch(new->hw); ··· 636 static struct ioat_desc_sw * 637 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 638 { 639 - struct ioat_desc_sw *new = NULL; 640 641 /* 642 * used.prev points to where to start processing ··· 650 if (ioat_chan->used_desc.prev && 651 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { 652 653 - struct ioat_desc_sw *desc = NULL; 654 - struct ioat_desc_sw *noop_desc = NULL; 655 int i; 656 657 /* set up the noop descriptor */ ··· 665 ioat_chan->pending++; 666 ioat_chan->dmacount++; 667 668 - /* get a few more descriptors */ 669 for (i = 16; i; i--) { 670 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 671 - BUG_ON(!desc); 672 list_add_tail(&desc->node, ioat_chan->used_desc.next); 673 674 desc->hw->next ··· 722 723 spin_lock_bh(&ioat_chan->desc_lock); 724 new = ioat_dma_get_next_descriptor(ioat_chan); 725 - new->len = len; 726 spin_unlock_bh(&ioat_chan->desc_lock); 727 728 - return new ? &new->async_tx : NULL; 729 } 730 731 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( ··· 741 742 spin_lock_bh(&ioat_chan->desc_lock); 743 new = ioat2_dma_get_next_descriptor(ioat_chan); 744 - new->len = len; 745 746 - /* leave ioat_chan->desc_lock set in version 2 path */ 747 - return new ? &new->async_tx : NULL; 748 - } 749 750 - 751 - /** 752 - * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 753 - * descriptors to hw 754 - * @chan: DMA channel handle 755 - */ 756 - static inline void __ioat1_dma_memcpy_issue_pending( 757 - struct ioat_dma_chan *ioat_chan) 758 - { 759 - ioat_chan->pending = 0; 760 - writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); 761 - } 762 - 763 - static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) 764 - { 765 - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 766 - 767 - if (ioat_chan->pending != 0) { 768 - spin_lock_bh(&ioat_chan->desc_lock); 769 - __ioat1_dma_memcpy_issue_pending(ioat_chan); 770 - spin_unlock_bh(&ioat_chan->desc_lock); 771 - } 772 - } 773 - 774 - static inline void __ioat2_dma_memcpy_issue_pending( 775 - struct ioat_dma_chan *ioat_chan) 776 - { 777 - ioat_chan->pending = 0; 778 - writew(ioat_chan->dmacount, 779 - ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 780 - } 781 - 782 - static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) 783 - { 784 - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 785 - 786 - if (ioat_chan->pending != 0) { 787 - spin_lock_bh(&ioat_chan->desc_lock); 788 - __ioat2_dma_memcpy_issue_pending(ioat_chan); 789 - spin_unlock_bh(&ioat_chan->desc_lock); 790 - } 791 } 792 793 static void ioat_dma_cleanup_tasklet(unsigned long data) ··· 1031 static void ioat_dma_test_callback(void *dma_async_param) 1032 { 1033 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", 1034 - dma_async_param); 1035 } 1036 1037 /** ··· 1044 u8 *src; 1045 u8 *dest; 1046 struct dma_chan *dma_chan; 1047 - struct dma_async_tx_descriptor *tx = NULL; 1048 dma_addr_t addr; 1049 dma_cookie_t cookie; 1050 int err = 0;
··· 173 tx_to_ioat_desc(tx)->dst = addr; 174 } 175 176 + /** 177 + * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended 178 + * descriptors to hw 179 + * @chan: DMA channel handle 180 + */ 181 static inline void __ioat1_dma_memcpy_issue_pending( 182 + struct ioat_dma_chan *ioat_chan) 183 + { 184 + ioat_chan->pending = 0; 185 + writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); 186 + } 187 + 188 + static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) 189 + { 190 + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 191 + 192 + if (ioat_chan->pending != 0) { 193 + spin_lock_bh(&ioat_chan->desc_lock); 194 + __ioat1_dma_memcpy_issue_pending(ioat_chan); 195 + spin_unlock_bh(&ioat_chan->desc_lock); 196 + } 197 + } 198 + 199 static inline void __ioat2_dma_memcpy_issue_pending( 200 + struct ioat_dma_chan *ioat_chan) 201 + { 202 + ioat_chan->pending = 0; 203 + writew(ioat_chan->dmacount, 204 + ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 205 + } 206 + 207 + static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) 208 + { 209 + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 210 + 211 + if (ioat_chan->pending != 0) { 212 + spin_lock_bh(&ioat_chan->desc_lock); 213 + __ioat2_dma_memcpy_issue_pending(ioat_chan); 214 + spin_unlock_bh(&ioat_chan->desc_lock); 215 + } 216 + } 217 218 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 219 { ··· 203 prev = to_ioat_desc(ioat_chan->used_desc.prev); 204 prefetch(prev->hw); 205 do { 206 + copy = min_t(size_t, len, ioat_chan->xfercap); 207 208 new->async_tx.ack = 1; 209 ··· 291 orig_ack = first->async_tx.ack; 292 new = first; 293 294 + /* 295 + * ioat_chan->desc_lock is still in force in version 2 path 296 + * it gets unlocked at end of this function 297 + */ 298 do { 299 + copy = min_t(size_t, len, ioat_chan->xfercap); 300 301 new->async_tx.ack = 1; 302 ··· 432 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 433 { 434 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 435 + struct ioat_desc_sw *desc; 436 u16 chanctrl; 437 u32 chanerr; 438 int i; ··· 575 static struct ioat_desc_sw * 576 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 577 { 578 + struct ioat_desc_sw *new; 579 580 if (!list_empty(&ioat_chan->free_desc)) { 581 new = to_ioat_desc(ioat_chan->free_desc.next); ··· 583 } else { 584 /* try to get another desc */ 585 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 586 + if (!new) { 587 + dev_err(&ioat_chan->device->pdev->dev, 588 + "alloc failed\n"); 589 + return NULL; 590 + } 591 } 592 593 prefetch(new->hw); ··· 595 static struct ioat_desc_sw * 596 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) 597 { 598 + struct ioat_desc_sw *new; 599 600 /* 601 * used.prev points to where to start processing ··· 609 if (ioat_chan->used_desc.prev && 610 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { 611 612 + struct ioat_desc_sw *desc; 613 + struct ioat_desc_sw *noop_desc; 614 int i; 615 616 /* set up the noop descriptor */ ··· 624 ioat_chan->pending++; 625 ioat_chan->dmacount++; 626 627 + /* try to get a few more descriptors */ 628 for (i = 16; i; i--) { 629 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); 630 + if (!desc) { 631 + dev_err(&ioat_chan->device->pdev->dev, 632 + "alloc failed\n"); 633 + break; 634 + } 635 list_add_tail(&desc->node, ioat_chan->used_desc.next); 636 637 desc->hw->next ··· 677 678 spin_lock_bh(&ioat_chan->desc_lock); 679 new = ioat_dma_get_next_descriptor(ioat_chan); 680 spin_unlock_bh(&ioat_chan->desc_lock); 681 682 + if (new) { 683 + new->len = len; 684 + return &new->async_tx; 685 + } else 686 + return NULL; 687 } 688 689 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( ··· 693 694 spin_lock_bh(&ioat_chan->desc_lock); 695 new = ioat2_dma_get_next_descriptor(ioat_chan); 696 697 + /* 698 + * leave ioat_chan->desc_lock set in ioat 2 path 699 + * it will get unlocked at end of tx_submit 700 + */ 701 702 + if (new) { 703 + new->len = len; 704 + return &new->async_tx; 705 + } else 706 + return NULL; 707 } 708 709 static void ioat_dma_cleanup_tasklet(unsigned long data) ··· 1019 static void ioat_dma_test_callback(void *dma_async_param) 1020 { 1021 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", 1022 + dma_async_param); 1023 } 1024 1025 /** ··· 1032 u8 *src; 1033 u8 *dest; 1034 struct dma_chan *dma_chan; 1035 + struct dma_async_tx_descriptor *tx; 1036 dma_addr_t addr; 1037 dma_cookie_t cookie; 1038 int err = 0;
+1 -1
drivers/dma/ioatdma.h
··· 76 dma_cookie_t completed_cookie; 77 unsigned long last_completion; 78 79 - u32 xfercap; /* XFERCAP register value expanded out */ 80 81 spinlock_t cleanup_lock; 82 spinlock_t desc_lock;
··· 76 dma_cookie_t completed_cookie; 77 unsigned long last_completion; 78 79 + size_t xfercap; /* XFERCAP register value expanded out */ 80 81 spinlock_t cleanup_lock; 82 spinlock_t desc_lock;