···138138#endif139139140140/*141141- * Slub defaults to 8-byte alignment, we're only interested in 4.142142- * Slab defaults to BYTES_PER_WORD, which ends up being the same anyways.141141+ * Some drivers need to perform DMA into kmalloc'ed buffers142142+ * and so we have to increase the kmalloc minalign for this.143143 */144144-#ifdef CONFIG_SUPERH32145145-#define ARCH_KMALLOC_MINALIGN 4146146-#define ARCH_SLAB_MINALIGN 4147147-#else148148-/* If gcc inlines memset, it will use st.q instructions. Therefore, we need149149- kmalloc allocations to be 8-byte aligned. Without this, the alignment150150- becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on151151- sh64 at the moment). */152152-#define ARCH_KMALLOC_MINALIGN 8144144+#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES153145146146+#ifdef CONFIG_SUPERH64154147/*155155- * We want 8-byte alignment for the slab caches as well, otherwise we have156156- * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create().148148+ * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still149149+ * happily generate {ld/st}.q pairs, requiring us to have 8-byte150150+ * alignment to avoid traps. The kmalloc alignment is gauranteed by151151+ * virtue of L1_CACHE_BYTES, requiring this to only be special cased152152+ * for slab caches.157153 */158154#define ARCH_SLAB_MINALIGN 8159155#endif