Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: Bump up ARCH_KMALLOC_MINALIGN for DMA cases.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

+9 -13
+9 -13
include/asm-sh/page.h
··· 138 138 #endif 139 139 140 140 /* 141 - * Slub defaults to 8-byte alignment, we're only interested in 4. 142 - * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways. 141 + * Some drivers need to perform DMA into kmalloc'ed buffers 142 + * and so we have to increase the kmalloc minalign for this. 143 143 */ 144 - #ifdef CONFIG_SUPERH32 145 - #define ARCH_KMALLOC_MINALIGN 4 146 - #define ARCH_SLAB_MINALIGN 4 147 - #else 148 - /* If gcc inlines memset, it will use st.q instructions. Therefore, we need 149 - kmalloc allocations to be 8-byte aligned. Without this, the alignment 150 - becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on 151 - sh64 at the moment). */ 152 - #define ARCH_KMALLOC_MINALIGN 8 144 + #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES 153 145 146 + #ifdef CONFIG_SUPERH64 154 147 /* 155 - * We want 8-byte alignment for the slab caches as well, otherwise we have 156 - * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). 148 + * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still 149 + * happily generate {ld/st}.q pairs, requiring us to have 8-byte 150 + * alignment to avoid traps. The kmalloc alignment is gauranteed by 151 + * virtue of L1_CACHE_BYTES, requiring this to only be special cased 152 + * for slab caches. 157 153 */ 158 154 #define ARCH_SLAB_MINALIGN 8 159 155 #endif