Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: add helpers to run flush_dcache_page() against a bio and a request's pages

Mtdblock driver doesn't call flush_dcache_page for pages in request. So,
this causes problems on architectures where the icache doesn't fill from
the dcache or with dcache aliases. The patch fixes this.

The ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE symbol was introduced to avoid
pointless empty cache-thrashing loops on architectures for which
flush_dcache_page() is a no-op. Every architecture was provided with this
flush pages on architectires where ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE is
equal 1 or do nothing otherwise.

See "fix mtd_blkdevs problem with caches on some architectures" discussion
on LKML for more information.

Signed-off-by: Ilya Loginov <isloginov@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Peter Horton <phorton@bitbox.co.uk>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by

Ilya Loginov and committed by
Jens Axboe
2d4dc890 3586e917

+83
+1
arch/alpha/include/asm/cacheflush.h
··· 9 9 #define flush_cache_dup_mm(mm) do { } while (0) 10 10 #define flush_cache_range(vma, start, end) do { } while (0) 11 11 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 12 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 12 13 #define flush_dcache_page(page) do { } while (0) 13 14 #define flush_dcache_mmap_lock(mapping) do { } while (0) 14 15 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+1
arch/arm/include/asm/cacheflush.h
··· 408 408 * about to change to user space. This is the same method as used on SPARC64. 409 409 * See update_mmu_cache for the user space part. 410 410 */ 411 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 411 412 extern void flush_dcache_page(struct page *); 412 413 413 414 extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
+1
arch/avr32/include/asm/cacheflush.h
··· 107 107 * do something here, but only for certain configurations. No such 108 108 * configurations exist at this time. 109 109 */ 110 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 110 111 #define flush_dcache_page(page) do { } while (0) 111 112 #define flush_dcache_mmap_lock(page) do { } while (0) 112 113 #define flush_dcache_mmap_unlock(page) do { } while (0)
+2
arch/blackfin/include/asm/cacheflush.h
··· 68 68 #endif 69 69 #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK) 70 70 # define flush_dcache_range(start,end) blackfin_dcache_flush_range((start), (end)) 71 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 71 72 # define flush_dcache_page(page) blackfin_dflush_page(page_address(page)) 72 73 #else 73 74 # define flush_dcache_range(start,end) do { } while (0) 75 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 74 76 # define flush_dcache_page(page) do { } while (0) 75 77 #endif 76 78
+1
arch/cris/include/asm/cacheflush.h
··· 12 12 #define flush_cache_dup_mm(mm) do { } while (0) 13 13 #define flush_cache_range(vma, start, end) do { } while (0) 14 14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 15 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 15 16 #define flush_dcache_page(page) do { } while (0) 16 17 #define flush_dcache_mmap_lock(mapping) do { } while (0) 17 18 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+1
arch/frv/include/asm/cacheflush.h
··· 47 47 } 48 48 49 49 /* dcache/icache coherency... */ 50 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 50 51 #ifdef CONFIG_MMU 51 52 extern void flush_dcache_page(struct page *page); 52 53 #else
+1
arch/h8300/include/asm/cacheflush.h
··· 15 15 #define flush_cache_dup_mm(mm) do { } while (0) 16 16 #define flush_cache_range(vma,a,b) 17 17 #define flush_cache_page(vma,p,pfn) 18 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 18 19 #define flush_dcache_page(page) 19 20 #define flush_dcache_mmap_lock(mapping) 20 21 #define flush_dcache_mmap_unlock(mapping)
+1
arch/ia64/include/asm/cacheflush.h
··· 25 25 #define flush_cache_vmap(start, end) do { } while (0) 26 26 #define flush_cache_vunmap(start, end) do { } while (0) 27 27 28 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 28 29 #define flush_dcache_page(page) \ 29 30 do { \ 30 31 clear_bit(PG_arch_1, &(page)->flags); \
+3
arch/m32r/include/asm/cacheflush.h
··· 12 12 #define flush_cache_dup_mm(mm) do { } while (0) 13 13 #define flush_cache_range(vma, start, end) do { } while (0) 14 14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 15 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 15 16 #define flush_dcache_page(page) do { } while (0) 16 17 #define flush_dcache_mmap_lock(mapping) do { } while (0) 17 18 #define flush_dcache_mmap_unlock(mapping) do { } while (0) ··· 34 33 #define flush_cache_dup_mm(mm) do { } while (0) 35 34 #define flush_cache_range(vma, start, end) do { } while (0) 36 35 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 36 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 37 37 #define flush_dcache_page(page) do { } while (0) 38 38 #define flush_dcache_mmap_lock(mapping) do { } while (0) 39 39 #define flush_dcache_mmap_unlock(mapping) do { } while (0) ··· 48 46 #define flush_cache_dup_mm(mm) do { } while (0) 49 47 #define flush_cache_range(vma, start, end) do { } while (0) 50 48 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 49 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 51 50 #define flush_dcache_page(page) do { } while (0) 52 51 #define flush_dcache_mmap_lock(mapping) do { } while (0) 53 52 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+1
arch/m68k/include/asm/cacheflush_mm.h
··· 128 128 } 129 129 } 130 130 131 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 131 132 #define flush_dcache_page(page) __flush_page_to_ram(page_address(page)) 132 133 #define flush_dcache_mmap_lock(mapping) do { } while (0) 133 134 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+1
arch/m68k/include/asm/cacheflush_no.h
··· 12 12 #define flush_cache_range(vma, start, end) __flush_cache_all() 13 13 #define flush_cache_page(vma, vmaddr) do { } while (0) 14 14 #define flush_dcache_range(start,len) __flush_cache_all() 15 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 15 16 #define flush_dcache_page(page) do { } while (0) 16 17 #define flush_dcache_mmap_lock(mapping) do { } while (0) 17 18 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+1
arch/microblaze/include/asm/cacheflush.h
··· 37 37 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 38 38 39 39 #define flush_dcache_range(start, end) __invalidate_dcache_range(start, end) 40 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 40 41 #define flush_dcache_page(page) do { } while (0) 41 42 #define flush_dcache_mmap_lock(mapping) do { } while (0) 42 43 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+1
arch/mips/include/asm/cacheflush.h
··· 38 38 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); 39 39 extern void __flush_dcache_page(struct page *page); 40 40 41 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 41 42 static inline void flush_dcache_page(struct page *page) 42 43 { 43 44 if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
+1
arch/mn10300/include/asm/cacheflush.h
··· 26 26 #define flush_cache_page(vma, vmaddr, pfn) do {} while (0) 27 27 #define flush_cache_vmap(start, end) do {} while (0) 28 28 #define flush_cache_vunmap(start, end) do {} while (0) 29 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 29 30 #define flush_dcache_page(page) do {} while (0) 30 31 #define flush_dcache_mmap_lock(mapping) do {} while (0) 31 32 #define flush_dcache_mmap_unlock(mapping) do {} while (0)
+1
arch/parisc/include/asm/cacheflush.h
··· 42 42 #define flush_cache_vmap(start, end) flush_cache_all() 43 43 #define flush_cache_vunmap(start, end) flush_cache_all() 44 44 45 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 45 46 extern void flush_dcache_page(struct page *page); 46 47 47 48 #define flush_dcache_mmap_lock(mapping) \
+1
arch/powerpc/include/asm/cacheflush.h
··· 25 25 #define flush_cache_vmap(start, end) do { } while (0) 26 26 #define flush_cache_vunmap(start, end) do { } while (0) 27 27 28 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 28 29 extern void flush_dcache_page(struct page *page); 29 30 #define flush_dcache_mmap_lock(mapping) do { } while (0) 30 31 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+1
arch/s390/include/asm/cacheflush.h
··· 10 10 #define flush_cache_dup_mm(mm) do { } while (0) 11 11 #define flush_cache_range(vma, start, end) do { } while (0) 12 12 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 13 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 13 14 #define flush_dcache_page(page) do { } while (0) 14 15 #define flush_dcache_mmap_lock(mapping) do { } while (0) 15 16 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+1
arch/score/include/asm/cacheflush.h
··· 16 16 extern void flush_dcache_range(unsigned long start, unsigned long end); 17 17 18 18 #define flush_cache_dup_mm(mm) do {} while (0) 19 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 19 20 #define flush_dcache_page(page) do {} while (0) 20 21 #define flush_dcache_mmap_lock(mapping) do {} while (0) 21 22 #define flush_dcache_mmap_unlock(mapping) do {} while (0)
+1
arch/sh/include/asm/cacheflush.h
··· 42 42 unsigned long addr, unsigned long pfn); 43 43 extern void flush_cache_range(struct vm_area_struct *vma, 44 44 unsigned long start, unsigned long end); 45 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 45 46 extern void flush_dcache_page(struct page *page); 46 47 extern void flush_icache_range(unsigned long start, unsigned long end); 47 48 extern void flush_icache_page(struct vm_area_struct *vma,
+1
arch/sparc/include/asm/cacheflush_32.h
··· 75 75 76 76 extern void sparc_flush_page_to_ram(struct page *page); 77 77 78 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 78 79 #define flush_dcache_page(page) sparc_flush_page_to_ram(page) 79 80 #define flush_dcache_mmap_lock(mapping) do { } while (0) 80 81 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+1
arch/sparc/include/asm/cacheflush_64.h
··· 37 37 #endif 38 38 39 39 extern void __flush_dcache_range(unsigned long start, unsigned long end); 40 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 40 41 extern void flush_dcache_page(struct page *page); 41 42 42 43 #define flush_icache_page(vma, pg) do { } while(0)
+1
arch/x86/include/asm/cacheflush.h
··· 12 12 unsigned long start, unsigned long end) { } 13 13 static inline void flush_cache_page(struct vm_area_struct *vma, 14 14 unsigned long vmaddr, unsigned long pfn) { } 15 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 15 16 static inline void flush_dcache_page(struct page *page) { } 16 17 static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } 17 18 static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
+1
arch/xtensa/include/asm/cacheflush.h
··· 101 101 #define flush_cache_vmap(start,end) flush_cache_all() 102 102 #define flush_cache_vunmap(start,end) flush_cache_all() 103 103 104 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 104 105 extern void flush_dcache_page(struct page*); 105 106 extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); 106 107 extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
+19
block/blk-core.c
··· 2358 2358 rq->rq_disk = bio->bi_bdev->bd_disk; 2359 2359 } 2360 2360 2361 + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 2362 + /** 2363 + * rq_flush_dcache_pages - Helper function to flush all pages in a request 2364 + * @rq: the request to be flushed 2365 + * 2366 + * Description: 2367 + * Flush all pages in @rq. 2368 + */ 2369 + void rq_flush_dcache_pages(struct request *rq) 2370 + { 2371 + struct req_iterator iter; 2372 + struct bio_vec *bvec; 2373 + 2374 + rq_for_each_segment(bvec, rq, iter) 2375 + flush_dcache_page(bvec->bv_page); 2376 + } 2377 + EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2378 + #endif 2379 + 2361 2380 /** 2362 2381 * blk_lld_busy - Check if underlying low-level drivers of a device are busy 2363 2382 * @q : the queue of the device being checked
+2
drivers/mtd/mtd_blkdevs.c
··· 59 59 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 60 60 if (tr->readsect(dev, block, buf)) 61 61 return -EIO; 62 + rq_flush_dcache_pages(req); 62 63 return 0; 63 64 64 65 case WRITE: 65 66 if (!tr->writesect) 66 67 return -EIO; 67 68 69 + rq_flush_dcache_pages(req); 68 70 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 69 71 if (tr->writesect(dev, block, buf)) 70 72 return -EIO;
+12
fs/bio.c
··· 1393 1393 } 1394 1394 } 1395 1395 1396 + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1397 + void bio_flush_dcache_pages(struct bio *bi) 1398 + { 1399 + int i; 1400 + struct bio_vec *bvec; 1401 + 1402 + bio_for_each_segment(bvec, bi, i) 1403 + flush_dcache_page(bvec->bv_page); 1404 + } 1405 + EXPORT_SYMBOL(bio_flush_dcache_pages); 1406 + #endif 1407 + 1396 1408 /** 1397 1409 * bio_endio - end I/O on a bio 1398 1410 * @bio: bio
+1
include/asm-generic/cacheflush.h
··· 13 13 #define flush_cache_dup_mm(mm) do { } while (0) 14 14 #define flush_cache_range(vma, start, end) do { } while (0) 15 15 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 16 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 16 17 #define flush_dcache_page(page) do { } while (0) 17 18 #define flush_dcache_mmap_lock(mapping) do { } while (0) 18 19 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+12
include/linux/bio.h
··· 391 391 gfp_t, int); 392 392 extern void bio_set_pages_dirty(struct bio *bio); 393 393 extern void bio_check_pages_dirty(struct bio *bio); 394 + 395 + #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 396 + # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 397 + #endif 398 + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 399 + extern void bio_flush_dcache_pages(struct bio *bi); 400 + #else 401 + static inline void bio_flush_dcache_pages(struct bio *bi) 402 + { 403 + } 404 + #endif 405 + 394 406 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, 395 407 unsigned long, unsigned int, int, gfp_t); 396 408 extern struct bio *bio_copy_user_iov(struct request_queue *,
+11
include/linux/blkdev.h
··· 752 752 #define rq_iter_last(rq, _iter) \ 753 753 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 754 754 755 + #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 756 + # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 757 + #endif 758 + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 759 + extern void rq_flush_dcache_pages(struct request *rq); 760 + #else 761 + static inline void rq_flush_dcache_pages(struct request *rq) 762 + { 763 + } 764 + #endif 765 + 755 766 extern int blk_register_queue(struct gendisk *disk); 756 767 extern void blk_unregister_queue(struct gendisk *disk); 757 768 extern void register_disk(struct gendisk *dev);