Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools/testing/scatterlist: Test new __sg_alloc_table_from_pages

Exercise the new __sg_alloc_table_from_pages API (and through
it also the old sg_alloc_table_from_pages), checking that the
created table has the expected number of segments depending on
the sequence of input pages and other conditions.

v2: Move to data driven for readability.
v3: Add some more testcases and -fsanitize=undefined. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: linux-kernel@vger.kernel.org
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20170906145506.14952-1-tvrtko.ursulin@linux.intel.com
[tursulin: whitespace fixup]

+234
+30
tools/testing/scatterlist/Makefile
··· 1 + CFLAGS += -I. -I../../include -g -O2 -Wall -fsanitize=address 2 + LDFLAGS += -fsanitize=address -fsanitize=undefined 3 + TARGETS = main 4 + OFILES = main.o scatterlist.o 5 + 6 + ifeq ($(BUILD), 32) 7 + CFLAGS += -m32 8 + LDFLAGS += -m32 9 + endif 10 + 11 + targets: include $(TARGETS) 12 + 13 + main: $(OFILES) 14 + 15 + clean: 16 + $(RM) $(TARGETS) $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h asm/io.h 17 + @rmdir asm 18 + 19 + scatterlist.c: ../../../lib/scatterlist.c 20 + @sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ 21 + 22 + .PHONY: include 23 + 24 + include: ../../../include/linux/scatterlist.h 25 + @mkdir -p linux 26 + @mkdir -p asm 27 + @touch asm/io.h 28 + @touch linux/highmem.h 29 + @touch linux/kmemleak.h 30 + @cp $< linux/scatterlist.h
+125
tools/testing/scatterlist/linux/mm.h
··· 1 + #ifndef _LINUX_MM_H 2 + #define _LINUX_MM_H 3 + 4 + #include <assert.h> 5 + #include <string.h> 6 + #include <stdlib.h> 7 + #include <errno.h> 8 + #include <limits.h> 9 + #include <stdio.h> 10 + 11 + typedef unsigned long dma_addr_t; 12 + 13 + #define unlikely 14 + 15 + #define BUG_ON(x) assert(!(x)) 16 + 17 + #define WARN_ON(condition) ({ \ 18 + int __ret_warn_on = !!(condition); \ 19 + unlikely(__ret_warn_on); \ 20 + }) 21 + 22 + #define WARN_ON_ONCE(condition) ({ \ 23 + int __ret_warn_on = !!(condition); \ 24 + if (unlikely(__ret_warn_on)) \ 25 + assert(0); \ 26 + unlikely(__ret_warn_on); \ 27 + }) 28 + 29 + #define PAGE_SIZE (4096) 30 + #define PAGE_SHIFT (12) 31 + #define PAGE_MASK (~(PAGE_SIZE-1)) 32 + 33 + #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) 34 + #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) 35 + #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) 36 + 37 + #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 38 + 39 + #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 40 + 41 + #define virt_to_page(x) ((void *)x) 42 + #define page_address(x) ((void *)x) 43 + 44 + static inline unsigned long page_to_phys(struct page *page) 45 + { 46 + assert(0); 47 + 48 + return 0; 49 + } 50 + 51 + #define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE) 52 + #define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE) 53 + #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 54 + 55 + #define __min(t1, t2, min1, min2, x, y) ({ \ 56 + t1 min1 = (x); \ 57 + t2 min2 = (y); \ 58 + (void) (&min1 == &min2); \ 59 + min1 < min2 ? min1 : min2; }) 60 + 61 + #define ___PASTE(a,b) a##b 62 + #define __PASTE(a,b) ___PASTE(a,b) 63 + 64 + #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) 65 + 66 + #define min(x, y) \ 67 + __min(typeof(x), typeof(y), \ 68 + __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ 69 + x, y) 70 + 71 + #define min_t(type, x, y) \ 72 + __min(type, type, \ 73 + __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ 74 + x, y) 75 + 76 + #define preemptible() (1) 77 + 78 + static inline void *kmap(struct page *page) 79 + { 80 + assert(0); 81 + 82 + return NULL; 83 + } 84 + 85 + static inline void *kmap_atomic(struct page *page) 86 + { 87 + assert(0); 88 + 89 + return NULL; 90 + } 91 + 92 + static inline void kunmap(void *addr) 93 + { 94 + assert(0); 95 + } 96 + 97 + static inline void kunmap_atomic(void *addr) 98 + { 99 + assert(0); 100 + } 101 + 102 + static inline unsigned long __get_free_page(unsigned int flags) 103 + { 104 + return (unsigned long)malloc(PAGE_SIZE); 105 + } 106 + 107 + static inline void free_page(unsigned long page) 108 + { 109 + free((void *)page); 110 + } 111 + 112 + static inline void *kmalloc(unsigned int size, unsigned int flags) 113 + { 114 + return malloc(size); 115 + } 116 + 117 + #define kfree(x) free(x) 118 + 119 + #define kmemleak_alloc(a, b, c, d) 120 + #define kmemleak_free(a) 121 + 122 + #define PageSlab(p) (0) 123 + #define flush_kernel_dcache_page(p) 124 + 125 + #endif
+79
tools/testing/scatterlist/main.c
··· 1 + #include <stdio.h> 2 + #include <assert.h> 3 + 4 + #include <linux/scatterlist.h> 5 + 6 + #define MAX_PAGES (64) 7 + 8 + static void set_pages(struct page **pages, const unsigned *array, unsigned num) 9 + { 10 + unsigned int i; 11 + 12 + assert(num < MAX_PAGES); 13 + for (i = 0; i < num; i++) 14 + pages[i] = (struct page *)(unsigned long) 15 + ((1 + array[i]) * PAGE_SIZE); 16 + } 17 + 18 + #define pfn(...) (unsigned []){ __VA_ARGS__ } 19 + 20 + int main(void) 21 + { 22 + const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT; 23 + struct test { 24 + int alloc_ret; 25 + unsigned num_pages; 26 + unsigned *pfn; 27 + unsigned size; 28 + unsigned int max_seg; 29 + unsigned int expected_segments; 30 + } *test, tests[] = { 31 + { -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 }, 32 + { -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 }, 33 + { -EINVAL, 1, pfn(0), PAGE_SIZE, sgmax + 1, 1 }, 34 + { 0, 1, pfn(0), PAGE_SIZE, sgmax, 1 }, 35 + { 0, 1, pfn(0), 1, sgmax, 1 }, 36 + { 0, 2, pfn(0, 1), 2 * PAGE_SIZE, sgmax, 1 }, 37 + { 0, 2, pfn(1, 0), 2 * PAGE_SIZE, sgmax, 2 }, 38 + { 0, 3, pfn(0, 1, 2), 3 * PAGE_SIZE, sgmax, 1 }, 39 + { 0, 3, pfn(0, 2, 1), 3 * PAGE_SIZE, sgmax, 3 }, 40 + { 0, 3, pfn(0, 1, 3), 3 * PAGE_SIZE, sgmax, 2 }, 41 + { 0, 3, pfn(1, 2, 4), 3 * PAGE_SIZE, sgmax, 2 }, 42 + { 0, 3, pfn(1, 3, 4), 3 * PAGE_SIZE, sgmax, 2 }, 43 + { 0, 4, pfn(0, 1, 3, 4), 4 * PAGE_SIZE, sgmax, 2 }, 44 + { 0, 5, pfn(0, 1, 3, 4, 5), 5 * PAGE_SIZE, sgmax, 2 }, 45 + { 0, 5, pfn(0, 1, 3, 4, 6), 5 * PAGE_SIZE, sgmax, 3 }, 46 + { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, sgmax, 1 }, 47 + { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, 2 * PAGE_SIZE, 3 }, 48 + { 0, 6, pfn(0, 1, 2, 3, 4, 5), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 }, 49 + { 0, 6, pfn(0, 2, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 4 }, 50 + { 0, 6, pfn(0, 1, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 }, 51 + { 0, 0, NULL, 0, 0, 0 }, 52 + }; 53 + unsigned int i; 54 + 55 + for (i = 0, test = tests; test->expected_segments; test++, i++) { 56 + struct page *pages[MAX_PAGES]; 57 + struct sg_table st; 58 + int ret; 59 + 60 + set_pages(pages, test->pfn, test->num_pages); 61 + 62 + ret = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 63 + 0, test->size, test->max_seg, 64 + GFP_KERNEL); 65 + assert(ret == test->alloc_ret); 66 + 67 + if (test->alloc_ret) 68 + continue; 69 + 70 + assert(st.nents == test->expected_segments); 71 + assert(st.orig_nents == test->expected_segments); 72 + 73 + sg_free_table(&st); 74 + } 75 + 76 + assert(i == (sizeof(tests) / sizeof(tests[0])) - 1); 77 + 78 + return 0; 79 + }