Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

intel_th: msu: Switch over to scatterlist

Instead of using a home-grown array of pointers to the DMA pages, switch
over to scatterlist data types and accessors, which has all the convenient
accessors, can be used to batch-map DMA memory and is convenient for
passing around between different layers, which will be useful when MSU
buffer management has to cross the boundaries of the MSU driver.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Alexander Shishkin and committed by
Greg Kroah-Hartman
ba39bd83 0de9e035

+104 -59
+104 -59
drivers/hwtracing/intel_th/msu.c
··· 29 29 #define msc_dev(x) (&(x)->thdev->dev) 30 30 31 31 /** 32 - * struct msc_block - multiblock mode block descriptor 33 - * @bdesc: pointer to hardware descriptor (beginning of the block) 34 - * @addr: physical address of the block 35 - */ 36 - struct msc_block { 37 - struct msc_block_desc *bdesc; 38 - dma_addr_t addr; 39 - }; 40 - 41 - /** 42 32 * struct msc_window - multiblock mode window descriptor 43 33 * @entry: window list linkage (msc::win_list) 44 34 * @pgoff: page offset into the buffer that this window starts at 45 35 * @nr_blocks: number of blocks (pages) in this window 46 - * @block: array of block descriptors 36 + * @sgt: array of block descriptors 47 37 */ 48 38 struct msc_window { 49 39 struct list_head entry; 50 40 unsigned long pgoff; 51 41 unsigned int nr_blocks; 52 42 struct msc *msc; 53 - struct msc_block block[0]; 43 + struct sg_table sgt; 54 44 }; 55 45 56 46 /** ··· 133 143 return false; 134 144 } 135 145 146 + static inline struct msc_block_desc * 147 + msc_win_block(struct msc_window *win, unsigned int block) 148 + { 149 + return sg_virt(&win->sgt.sgl[block]); 150 + } 151 + 152 + static inline dma_addr_t 153 + msc_win_baddr(struct msc_window *win, unsigned int block) 154 + { 155 + return sg_dma_address(&win->sgt.sgl[block]); 156 + } 157 + 158 + static inline unsigned long 159 + msc_win_bpfn(struct msc_window *win, unsigned int block) 160 + { 161 + return msc_win_baddr(win, block) >> PAGE_SHIFT; 162 + } 163 + 136 164 /** 137 165 * msc_oldest_window() - locate the window with oldest data 138 166 * @msc: MSC device ··· 176 168 * something like 2, in which case we're good 177 169 */ 178 170 list_for_each_entry(win, &msc->win_list, entry) { 179 - if (win->block[0].addr == win_addr) 171 + if (sg_dma_address(win->sgt.sgl) == win_addr) 180 172 found++; 181 173 182 174 /* skip the empty ones */ 183 - if (msc_block_is_empty(win->block[0].bdesc)) 175 + if (msc_block_is_empty(msc_win_block(win, 0))) 184 176 continue; 185 177 186 178 if (found) ··· 199 191 static unsigned int msc_win_oldest_block(struct msc_window *win) 200 192 { 201 193 unsigned int blk; 202 - struct msc_block_desc *bdesc = win->block[0].bdesc; 194 + struct msc_block_desc *bdesc = msc_win_block(win, 0); 203 195 204 196 /* without wrapping, first block is the oldest */ 205 197 if (!msc_block_wrapped(bdesc)) ··· 210 202 * oldest data for this window. 211 203 */ 212 204 for (blk = 0; blk < win->nr_blocks; blk++) { 213 - bdesc = win->block[blk].bdesc; 205 + bdesc = msc_win_block(win, blk); 214 206 215 207 if (msc_block_last_written(bdesc)) 216 208 return blk; ··· 246 238 247 239 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 248 240 { 249 - return iter->win->block[iter->block].bdesc; 241 + return msc_win_block(iter->win, iter->block); 250 242 } 251 243 252 244 static void msc_iter_init(struct msc_iter *iter) ··· 479 471 offsetof(struct msc_block_desc, hw_tag); 480 472 481 473 for (blk = 0; blk < win->nr_blocks; blk++) { 482 - struct msc_block_desc *bdesc = win->block[blk].bdesc; 474 + struct msc_block_desc *bdesc = msc_win_block(win, blk); 483 475 484 476 memset(&bdesc->hw_tag, 0, hw_sz); 485 477 } ··· 742 734 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 743 735 } 744 736 737 + static int __msc_buffer_win_alloc(struct msc_window *win, 738 + unsigned int nr_blocks) 739 + { 740 + struct scatterlist *sg_ptr; 741 + void *block; 742 + int i, ret; 743 + 744 + ret = sg_alloc_table(&win->sgt, nr_blocks, GFP_KERNEL); 745 + if (ret) 746 + return -ENOMEM; 747 + 748 + for_each_sg(win->sgt.sgl, sg_ptr, nr_blocks, i) { 749 + block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent, 750 + PAGE_SIZE, &sg_dma_address(sg_ptr), 751 + GFP_KERNEL); 752 + if (!block) 753 + goto err_nomem; 754 + 755 + sg_set_buf(sg_ptr, block, PAGE_SIZE); 756 + } 757 + 758 + return nr_blocks; 759 + 760 + err_nomem: 761 + for (i--; i >= 0; i--) 762 + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 763 + msc_win_block(win, i), 764 + msc_win_baddr(win, i)); 765 + 766 + sg_free_table(&win->sgt); 767 + 768 + return -ENOMEM; 769 + } 770 + 745 771 /** 746 772 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 747 773 * @msc: MSC device ··· 789 747 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 790 748 { 791 749 struct msc_window *win; 792 - unsigned long size = PAGE_SIZE; 793 - int i, ret = -ENOMEM; 750 + int ret = -ENOMEM, i; 794 751 795 752 if (!nr_blocks) 796 753 return 0; 797 754 798 - win = kzalloc(offsetof(struct msc_window, block[nr_blocks]), 799 - GFP_KERNEL); 755 + /* 756 + * This limitation hold as long as we need random access to the 757 + * block. When that changes, this can go away. 758 + */ 759 + if (nr_blocks > SG_MAX_SINGLE_ALLOC) 760 + return -EINVAL; 761 + 762 + win = kzalloc(sizeof(*win), GFP_KERNEL); 800 763 if (!win) 801 764 return -ENOMEM; 765 + 766 + win->msc = msc; 802 767 803 768 if (!list_empty(&msc->win_list)) { 804 769 struct msc_window *prev = list_last_entry(&msc->win_list, 805 770 struct msc_window, 806 771 entry); 807 772 773 + /* This works as long as blocks are page-sized */ 808 774 win->pgoff = prev->pgoff + prev->nr_blocks; 809 775 } 810 776 811 - for (i = 0; i < nr_blocks; i++) { 812 - win->block[i].bdesc = 813 - dma_alloc_coherent(msc_dev(msc)->parent->parent, size, 814 - &win->block[i].addr, GFP_KERNEL); 815 - 816 - if (!win->block[i].bdesc) 817 - goto err_nomem; 777 + ret = __msc_buffer_win_alloc(win, nr_blocks); 778 + if (ret < 0) 779 + goto err_nomem; 818 780 819 781 #ifdef CONFIG_X86 782 + for (i = 0; i < ret; i++) 820 783 /* Set the page as uncached */ 821 - set_memory_uc((unsigned long)win->block[i].bdesc, 1); 784 + set_memory_uc((unsigned long)msc_win_block(win, i), 1); 822 785 #endif 823 - } 824 786 825 - win->msc = msc; 826 - win->nr_blocks = nr_blocks; 787 + win->nr_blocks = ret; 827 788 828 789 if (list_empty(&msc->win_list)) { 829 - msc->base = win->block[0].bdesc; 830 - msc->base_addr = win->block[0].addr; 790 + msc->base = msc_win_block(win, 0); 791 + msc->base_addr = msc_win_baddr(win, 0); 831 792 } 832 793 833 794 list_add_tail(&win->entry, &msc->win_list); ··· 839 794 return 0; 840 795 841 796 err_nomem: 842 - for (i--; i >= 0; i--) { 843 - #ifdef CONFIG_X86 844 - /* Reset the page to write-back before releasing */ 845 - set_memory_wb((unsigned long)win->block[i].bdesc, 1); 846 - #endif 847 - dma_free_coherent(msc_dev(msc)->parent->parent, size, 848 - win->block[i].bdesc, win->block[i].addr); 849 - } 850 797 kfree(win); 851 798 852 799 return ret; 800 + } 801 + 802 + static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) 803 + { 804 + int i; 805 + 806 + for (i = 0; i < win->nr_blocks; i++) { 807 + struct page *page = sg_page(&win->sgt.sgl[i]); 808 + 809 + page->mapping = NULL; 810 + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 811 + msc_win_block(win, i), msc_win_baddr(win, i)); 812 + } 813 + sg_free_table(&win->sgt); 853 814 } 854 815 855 816 /** ··· 878 827 msc->base_addr = 0; 879 828 } 880 829 881 - for (i = 0; i < win->nr_blocks; i++) { 882 - struct page *page = virt_to_page(win->block[i].bdesc); 883 - 884 - page->mapping = NULL; 885 830 #ifdef CONFIG_X86 886 - /* Reset the page to write-back before releasing */ 887 - set_memory_wb((unsigned long)win->block[i].bdesc, 1); 831 + for (i = 0; i < win->nr_blocks; i++) 832 + /* Reset the page to write-back */ 833 + set_memory_wb((unsigned long)msc_win_block(win, i), 1); 888 834 #endif 889 - dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 890 - win->block[i].bdesc, win->block[i].addr); 891 - } 835 + 836 + __msc_buffer_win_free(msc, win); 892 837 893 838 kfree(win); 894 839 } ··· 918 871 } 919 872 920 873 for (blk = 0; blk < win->nr_blocks; blk++) { 921 - struct msc_block_desc *bdesc = win->block[blk].bdesc; 874 + struct msc_block_desc *bdesc = msc_win_block(win, blk); 922 875 923 876 memset(bdesc, 0, sizeof(*bdesc)); 924 877 925 - bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT; 878 + bdesc->next_win = msc_win_bpfn(next_win, 0); 926 879 927 880 /* 928 881 * Similarly to last window, last block should point ··· 930 883 */ 931 884 if (blk == win->nr_blocks - 1) { 932 885 sw_tag |= MSC_SW_TAG_LASTBLK; 933 - bdesc->next_blk = 934 - win->block[0].addr >> PAGE_SHIFT; 886 + bdesc->next_blk = msc_win_bpfn(win, 0); 935 887 } else { 936 - bdesc->next_blk = 937 - win->block[blk + 1].addr >> PAGE_SHIFT; 888 + bdesc->next_blk = msc_win_bpfn(win, blk + 1); 938 889 } 939 890 940 891 bdesc->sw_tag = sw_tag; ··· 1107 1062 1108 1063 found: 1109 1064 pgoff -= win->pgoff; 1110 - return virt_to_page(win->block[pgoff].bdesc); 1065 + return sg_page(&win->sgt.sgl[pgoff]); 1111 1066 } 1112 1067 1113 1068 /**