Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs/buffer.c: clean up EXPORT* macros

According to Documentation/CodingStyle the EXPORT* macro should follow
immediately after the closing function brace line.

Also, mark_buffer_async_write_endio() and do_thaw_all() are not used
elsewhere so they should be marked as static.

In addition, file_fsync() is actually in fs/sync.c so move the EXPORT* to
that file.

Signed-off-by: H Hartley Sweeten <hsweeten@visionengravers.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

H Hartley Sweeten and committed by
Linus Torvalds
1fe72eaa 88e0fbc4

+28 -30
+27 -30
fs/buffer.c
··· 52 52 bh->b_end_io = handler; 53 53 bh->b_private = private; 54 54 } 55 + EXPORT_SYMBOL(init_buffer); 55 56 56 57 static int sync_buffer(void *word) 57 58 { ··· 81 80 smp_mb__after_clear_bit(); 82 81 wake_up_bit(&bh->b_state, BH_Lock); 83 82 } 83 + EXPORT_SYMBOL(unlock_buffer); 84 84 85 85 /* 86 86 * Block until a buffer comes unlocked. This doesn't stop it ··· 92 90 { 93 91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); 94 92 } 93 + EXPORT_SYMBOL(__wait_on_buffer); 95 94 96 95 static void 97 96 __clear_page_buffers(struct page *page) ··· 147 144 __end_buffer_read_notouch(bh, uptodate); 148 145 put_bh(bh); 149 146 } 147 + EXPORT_SYMBOL(end_buffer_read_sync); 150 148 151 149 void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 152 150 { ··· 168 164 unlock_buffer(bh); 169 165 put_bh(bh); 170 166 } 167 + EXPORT_SYMBOL(end_buffer_write_sync); 171 168 172 169 /* 173 170 * Various filesystems appear to want __find_get_block to be non-blocking. ··· 277 272 invalidate_bh_lrus(); 278 273 invalidate_mapping_pages(mapping, 0, -1); 279 274 } 275 + EXPORT_SYMBOL(invalidate_bdev); 280 276 281 277 /* 282 278 * Kick pdflush then try to free up some ZONE_NORMAL memory. ··· 416 410 local_irq_restore(flags); 417 411 return; 418 412 } 413 + EXPORT_SYMBOL(end_buffer_async_write); 419 414 420 415 /* 421 416 * If a page's buffers are under async readin (end_buffer_async_read ··· 445 438 set_buffer_async_read(bh); 446 439 } 447 440 448 - void mark_buffer_async_write_endio(struct buffer_head *bh, 449 - bh_end_io_t *handler) 441 + static void mark_buffer_async_write_endio(struct buffer_head *bh, 442 + bh_end_io_t *handler) 450 443 { 451 444 bh->b_end_io = handler; 452 445 set_buffer_async_write(bh); ··· 560 553 return err; 561 554 } 562 555 563 - void do_thaw_all(struct work_struct *work) 556 + static void do_thaw_all(struct work_struct *work) 564 557 { 565 558 struct super_block *sb; 566 559 char b[BDEVNAME_SIZE]; ··· 1179 1172 } 1180 1173 } 1181 1174 } 1175 + EXPORT_SYMBOL(mark_buffer_dirty); 1182 1176 1183 1177 /* 1184 1178 * Decrement a buffer_head's reference count. If all buffers against a page ··· 1196 1188 } 1197 1189 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1198 1190 } 1191 + EXPORT_SYMBOL(__brelse); 1199 1192 1200 1193 /* 1201 1194 * bforget() is like brelse(), except it discards any ··· 1215 1206 } 1216 1207 __brelse(bh); 1217 1208 } 1209 + EXPORT_SYMBOL(__bforget); 1218 1210 1219 1211 static struct buffer_head *__bread_slow(struct buffer_head *bh) 1220 1212 { ··· 2228 2218 } 2229 2219 return 0; 2230 2220 } 2221 + EXPORT_SYMBOL(block_read_full_page); 2231 2222 2232 2223 /* utility function for filesystems that need to do work on expanding 2233 2224 * truncates. Uses filesystem pagecache writes to allow the filesystem to ··· 2263 2252 out: 2264 2253 return err; 2265 2254 } 2255 + EXPORT_SYMBOL(generic_cont_expand_simple); 2266 2256 2267 2257 static int cont_expand_zero(struct file *file, struct address_space *mapping, 2268 2258 loff_t pos, loff_t *bytes) ··· 2364 2352 out: 2365 2353 return err; 2366 2354 } 2355 + EXPORT_SYMBOL(cont_write_begin); 2367 2356 2368 2357 int block_prepare_write(struct page *page, unsigned from, unsigned to, 2369 2358 get_block_t *get_block) ··· 2375 2362 ClearPageUptodate(page); 2376 2363 return err; 2377 2364 } 2365 + EXPORT_SYMBOL(block_prepare_write); 2378 2366 2379 2367 int block_commit_write(struct page *page, unsigned from, unsigned to) 2380 2368 { ··· 2383 2369 __block_commit_write(inode,page,from,to); 2384 2370 return 0; 2385 2371 } 2372 + EXPORT_SYMBOL(block_commit_write); 2386 2373 2387 2374 /* 2388 2375 * block_page_mkwrite() is not allowed to change the file size as it gets ··· 2441 2426 out: 2442 2427 return ret; 2443 2428 } 2429 + EXPORT_SYMBOL(block_page_mkwrite); 2444 2430 2445 2431 /* 2446 2432 * nobh_write_begin()'s prereads are special: the buffer_heads are freed ··· 2865 2849 out: 2866 2850 return err; 2867 2851 } 2852 + EXPORT_SYMBOL(block_truncate_page); 2868 2853 2869 2854 /* 2870 2855 * The generic ->writepage function for buffer-backed address_spaces ··· 2907 2890 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2908 2891 return __block_write_full_page(inode, page, get_block, wbc, handler); 2909 2892 } 2893 + EXPORT_SYMBOL(block_write_full_page_endio); 2910 2894 2911 2895 /* 2912 2896 * The generic ->writepage function for buffer-backed address_spaces ··· 2918 2900 return block_write_full_page_endio(page, get_block, wbc, 2919 2901 end_buffer_async_write); 2920 2902 } 2921 - 2903 + EXPORT_SYMBOL(block_write_full_page); 2922 2904 2923 2905 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2924 2906 get_block_t *get_block) ··· 2931 2913 get_block(inode, block, &tmp, 0); 2932 2914 return tmp.b_blocknr; 2933 2915 } 2916 + EXPORT_SYMBOL(generic_block_bmap); 2934 2917 2935 2918 static void end_bio_bh_io_sync(struct bio *bio, int err) 2936 2919 { ··· 3001 2982 bio_put(bio); 3002 2983 return ret; 3003 2984 } 2985 + EXPORT_SYMBOL(submit_bh); 3004 2986 3005 2987 /** 3006 2988 * ll_rw_block: low-level access to block devices (DEPRECATED) ··· 3063 3043 unlock_buffer(bh); 3064 3044 } 3065 3045 } 3046 + EXPORT_SYMBOL(ll_rw_block); 3066 3047 3067 3048 /* 3068 3049 * For a data-integrity writeout, we need to wait upon any in-progress I/O ··· 3092 3071 } 3093 3072 return ret; 3094 3073 } 3074 + EXPORT_SYMBOL(sync_dirty_buffer); 3095 3075 3096 3076 /* 3097 3077 * try_to_free_buffers() checks if all the buffers on this particular page ··· 3207 3185 if (mapping) 3208 3186 blk_run_backing_dev(mapping->backing_dev_info, page); 3209 3187 } 3188 + EXPORT_SYMBOL(block_sync_page); 3210 3189 3211 3190 /* 3212 3191 * There are no bdflush tunables left. But distributions are ··· 3384 3361 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3385 3362 hotcpu_notifier(buffer_cpu_notify, 0); 3386 3363 } 3387 - 3388 - EXPORT_SYMBOL(__bforget); 3389 - EXPORT_SYMBOL(__brelse); 3390 - EXPORT_SYMBOL(__wait_on_buffer); 3391 - EXPORT_SYMBOL(block_commit_write); 3392 - EXPORT_SYMBOL(block_prepare_write); 3393 - EXPORT_SYMBOL(block_page_mkwrite); 3394 - EXPORT_SYMBOL(block_read_full_page); 3395 - EXPORT_SYMBOL(block_sync_page); 3396 - EXPORT_SYMBOL(block_truncate_page); 3397 - EXPORT_SYMBOL(block_write_full_page); 3398 - EXPORT_SYMBOL(block_write_full_page_endio); 3399 - EXPORT_SYMBOL(cont_write_begin); 3400 - EXPORT_SYMBOL(end_buffer_read_sync); 3401 - EXPORT_SYMBOL(end_buffer_write_sync); 3402 - EXPORT_SYMBOL(end_buffer_async_write); 3403 - EXPORT_SYMBOL(file_fsync); 3404 - EXPORT_SYMBOL(generic_block_bmap); 3405 - EXPORT_SYMBOL(generic_cont_expand_simple); 3406 - EXPORT_SYMBOL(init_buffer); 3407 - EXPORT_SYMBOL(invalidate_bdev); 3408 - EXPORT_SYMBOL(ll_rw_block); 3409 - EXPORT_SYMBOL(mark_buffer_dirty); 3410 - EXPORT_SYMBOL(submit_bh); 3411 - EXPORT_SYMBOL(sync_dirty_buffer); 3412 - EXPORT_SYMBOL(unlock_buffer);
+1
fs/sync.c
··· 183 183 ret = err; 184 184 return ret; 185 185 } 186 + EXPORT_SYMBOL(file_fsync); 186 187 187 188 /** 188 189 * vfs_fsync_range - helper to sync a range of data & metadata to disk