Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] kernel-doc for mm/filemap.c

mm/filemap.c:
- add lots of kernel-doc;
- fix some typos and kernel-doc errors;
- drop some blank lines between function close and EXPORT_SYMBOL();

Signed-off-by: Randy Dunlap <rdunlap@xenotime.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Randy Dunlap and committed by
Linus Torvalds
485bb99b 800590f5

+120 -53
+120 -53
mm/filemap.c
··· 171 171 } 172 172 173 173 /** 174 - * filemap_fdatawrite_range - start writeback against all of a mapping's 175 - * dirty pages that lie within the byte offsets <start, end> 174 + * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 176 175 * @mapping: address space structure to write 177 176 * @start: offset in bytes where the range starts 178 177 * @end: offset in bytes where the range ends (inclusive) 179 178 * @sync_mode: enable synchronous operation 180 179 * 180 + * Start writeback against all of a mapping's dirty pages that lie 181 + * within the byte offsets <start, end> inclusive. 182 + * 181 183 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 182 - * opposed to a regular memory * cleansing writeback. The difference between 184 + * opposed to a regular memory cleansing writeback. The difference between 183 185 * these two operations is that if a dirty page/buffer is encountered, it must 184 186 * be waited upon, and not just skipped over. 185 187 */ ··· 221 219 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 222 220 } 223 221 224 - /* 222 + /** 223 + * filemap_flush - mostly a non-blocking flush 224 + * @mapping: target address_space 225 + * 225 226 * This is a mostly non-blocking flush. Not suitable for data-integrity 226 227 * purposes - I/O may not be started against all dirty pages. 227 228 */ ··· 234 229 } 235 230 EXPORT_SYMBOL(filemap_flush); 236 231 237 - /* 232 + /** 233 + * wait_on_page_writeback_range - wait for writeback to complete 234 + * @mapping: target address_space 235 + * @start: beginning page index 236 + * @end: ending page index 237 + * 238 238 * Wait for writeback to complete against pages indexed by start->end 239 239 * inclusive 240 240 */ ··· 286 276 return ret; 287 277 } 288 278 289 - /* 279 + /** 280 + * sync_page_range - write and wait on all pages in the passed range 281 + * @inode: target inode 282 + * @mapping: target address_space 283 + * @pos: beginning offset in pages to write 284 + * @count: number of bytes to write 285 + * 290 286 * Write and wait upon all the pages in the passed range. This is a "data 291 287 * integrity" operation. It waits upon in-flight writeout before starting and 292 288 * waiting upon new writeout. If there was an IO error, return it. ··· 321 305 } 322 306 EXPORT_SYMBOL(sync_page_range); 323 307 324 - /* 308 + /** 309 + * sync_page_range_nolock 310 + * @inode: target inode 311 + * @mapping: target address_space 312 + * @pos: beginning offset in pages to write 313 + * @count: number of bytes to write 314 + * 325 315 * Note: Holding i_mutex across sync_page_range_nolock is not a good idea 326 316 * as it forces O_SYNC writers to different parts of the same file 327 317 * to be serialised right until io completion. ··· 351 329 EXPORT_SYMBOL(sync_page_range_nolock); 352 330 353 331 /** 354 - * filemap_fdatawait - walk the list of under-writeback pages of the given 355 - * address space and wait for all of them. 356 - * 332 + * filemap_fdatawait - wait for all under-writeback pages to complete 357 333 * @mapping: address space structure to wait for 334 + * 335 + * Walk the list of under-writeback pages of the given address space 336 + * and wait for all of them. 358 337 */ 359 338 int filemap_fdatawait(struct address_space *mapping) 360 339 { ··· 391 368 } 392 369 EXPORT_SYMBOL(filemap_write_and_wait); 393 370 394 - /* 371 + /** 372 + * filemap_write_and_wait_range - write out & wait on a file range 373 + * @mapping: the address_space for the pages 374 + * @lstart: offset in bytes where the range starts 375 + * @lend: offset in bytes where the range ends (inclusive) 376 + * 395 377 * Write out and wait upon file offsets lstart->lend, inclusive. 396 378 * 397 379 * Note that `lend' is inclusive (describes the last byte to be written) so ··· 422 394 return err; 423 395 } 424 396 425 - /* 426 - * This function is used to add newly allocated pagecache pages: 397 + /** 398 + * add_to_page_cache - add newly allocated pagecache pages 399 + * @page: page to add 400 + * @mapping: the page's address_space 401 + * @offset: page index 402 + * @gfp_mask: page allocation mode 403 + * 404 + * This function is used to add newly allocated pagecache pages; 427 405 * the page is new, so we can just run SetPageLocked() against it. 428 406 * The other page state flags were set by rmqueue(). 429 407 * ··· 456 422 } 457 423 return error; 458 424 } 459 - 460 425 EXPORT_SYMBOL(add_to_page_cache); 461 426 462 427 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, ··· 522 489 EXPORT_SYMBOL(wait_on_page_bit); 523 490 524 491 /** 525 - * unlock_page() - unlock a locked page 526 - * 492 + * unlock_page - unlock a locked page 527 493 * @page: the page 528 494 * 529 495 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). ··· 545 513 } 546 514 EXPORT_SYMBOL(unlock_page); 547 515 548 - /* 549 - * End writeback against a page. 516 + /** 517 + * end_page_writeback - end writeback against a page 518 + * @page: the page 550 519 */ 551 520 void end_page_writeback(struct page *page) 552 521 { ··· 560 527 } 561 528 EXPORT_SYMBOL(end_page_writeback); 562 529 563 - /* 564 - * Get a lock on the page, assuming we need to sleep to get it. 530 + /** 531 + * __lock_page - get a lock on the page, assuming we need to sleep to get it 532 + * @page: the page to lock 565 533 * 566 - * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some 534 + * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some 567 535 * random driver's requestfn sets TASK_RUNNING, we could busywait. However 568 536 * chances are that on the second loop, the block layer's plug list is empty, 569 537 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. ··· 578 544 } 579 545 EXPORT_SYMBOL(__lock_page); 580 546 581 - /* 582 - * a rather lightweight function, finding and getting a reference to a 547 + /** 548 + * find_get_page - find and get a page reference 549 + * @mapping: the address_space to search 550 + * @offset: the page index 551 + * 552 + * A rather lightweight function, finding and getting a reference to a 583 553 * hashed page atomically. 584 554 */ 585 555 struct page * find_get_page(struct address_space *mapping, unsigned long offset) ··· 597 559 read_unlock_irq(&mapping->tree_lock); 598 560 return page; 599 561 } 600 - 601 562 EXPORT_SYMBOL(find_get_page); 602 563 603 - /* 604 - * Same as above, but trylock it instead of incrementing the count. 564 + /** 565 + * find_trylock_page - find and lock a page 566 + * @mapping: the address_space to search 567 + * @offset: the page index 568 + * 569 + * Same as find_get_page(), but trylock it instead of incrementing the count. 605 570 */ 606 571 struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) 607 572 { ··· 617 576 read_unlock_irq(&mapping->tree_lock); 618 577 return page; 619 578 } 620 - 621 579 EXPORT_SYMBOL(find_trylock_page); 622 580 623 581 /** 624 582 * find_lock_page - locate, pin and lock a pagecache page 625 - * 626 583 * @mapping: the address_space to search 627 584 * @offset: the page index 628 585 * ··· 656 617 read_unlock_irq(&mapping->tree_lock); 657 618 return page; 658 619 } 659 - 660 620 EXPORT_SYMBOL(find_lock_page); 661 621 662 622 /** 663 623 * find_or_create_page - locate or add a pagecache page 664 - * 665 624 * @mapping: the page's address_space 666 625 * @index: the page's index into the mapping 667 626 * @gfp_mask: page allocation mode ··· 700 663 page_cache_release(cached_page); 701 664 return page; 702 665 } 703 - 704 666 EXPORT_SYMBOL(find_or_create_page); 705 667 706 668 /** ··· 765 729 return i; 766 730 } 767 731 768 - /* 732 + /** 733 + * find_get_pages_tag - find and return pages that match @tag 734 + * @mapping: the address_space to search 735 + * @index: the starting page index 736 + * @tag: the tag index 737 + * @nr_pages: the maximum number of pages 738 + * @pages: where the resulting pages are placed 739 + * 769 740 * Like find_get_pages, except we only return pages which are tagged with 770 - * `tag'. We update *index to index the next page for the traversal. 741 + * @tag. We update @index to index the next page for the traversal. 771 742 */ 772 743 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 773 744 int tag, unsigned int nr_pages, struct page **pages) ··· 793 750 return ret; 794 751 } 795 752 796 - /* 753 + /** 754 + * grab_cache_page_nowait - returns locked page at given index in given cache 755 + * @mapping: target address_space 756 + * @index: the page index 757 + * 797 758 * Same as grab_cache_page, but do not wait if the page is unavailable. 798 759 * This is intended for speculative data generators, where the data can 799 760 * be regenerated if the page couldn't be grabbed. This routine should ··· 826 779 } 827 780 return page; 828 781 } 829 - 830 782 EXPORT_SYMBOL(grab_cache_page_nowait); 831 783 832 - /* 784 + /** 785 + * do_generic_mapping_read - generic file read routine 786 + * @mapping: address_space to be read 787 + * @_ra: file's readahead state 788 + * @filp: the file to read 789 + * @ppos: current file position 790 + * @desc: read_descriptor 791 + * @actor: read method 792 + * 833 793 * This is a generic file read routine, and uses the 834 - * mapping->a_ops->readpage() function for the actual low-level 835 - * stuff. 794 + * mapping->a_ops->readpage() function for the actual low-level stuff. 836 795 * 837 796 * This is really ugly. But the goto's actually try to clarify some 838 797 * of the logic when it comes to error handling etc. 839 798 * 840 - * Note the struct file* is only passed for the use of readpage. It may be 841 - * NULL. 799 + * Note the struct file* is only passed for the use of readpage. 800 + * It may be NULL. 842 801 */ 843 802 void do_generic_mapping_read(struct address_space *mapping, 844 803 struct file_ra_state *_ra, ··· 1057 1004 if (filp) 1058 1005 file_accessed(filp); 1059 1006 } 1060 - 1061 1007 EXPORT_SYMBOL(do_generic_mapping_read); 1062 1008 1063 1009 int file_read_actor(read_descriptor_t *desc, struct page *page, ··· 1097 1045 return size; 1098 1046 } 1099 1047 1100 - /* 1048 + /** 1049 + * __generic_file_aio_read - generic filesystem read routine 1050 + * @iocb: kernel I/O control block 1051 + * @iov: io vector request 1052 + * @nr_segs: number of segments in the iovec 1053 + * @ppos: current file position 1054 + * 1101 1055 * This is the "read()" routine for all filesystems 1102 1056 * that can use the page cache directly. 1103 1057 */ ··· 1182 1124 out: 1183 1125 return retval; 1184 1126 } 1185 - 1186 1127 EXPORT_SYMBOL(__generic_file_aio_read); 1187 1128 1188 1129 ssize_t ··· 1192 1135 BUG_ON(iocb->ki_pos != pos); 1193 1136 return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); 1194 1137 } 1195 - 1196 1138 EXPORT_SYMBOL(generic_file_aio_read); 1197 1139 1198 1140 ssize_t ··· 1207 1151 ret = wait_on_sync_kiocb(&kiocb); 1208 1152 return ret; 1209 1153 } 1210 - 1211 1154 EXPORT_SYMBOL(generic_file_read); 1212 1155 1213 1156 int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) ··· 1247 1192 return desc.written; 1248 1193 return desc.error; 1249 1194 } 1250 - 1251 1195 EXPORT_SYMBOL(generic_file_sendfile); 1252 1196 1253 1197 static ssize_t ··· 1282 1228 } 1283 1229 1284 1230 #ifdef CONFIG_MMU 1285 - /* 1231 + static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); 1232 + /** 1233 + * page_cache_read - adds requested page to the page cache if not already there 1234 + * @file: file to read 1235 + * @offset: page index 1236 + * 1286 1237 * This adds the requested page to the page cache if it isn't already there, 1287 1238 * and schedules an I/O to read in its contents from disk. 1288 1239 */ 1289 - static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); 1290 1240 static int fastcall page_cache_read(struct file * file, unsigned long offset) 1291 1241 { 1292 1242 struct address_space *mapping = file->f_mapping; ··· 1317 1259 1318 1260 #define MMAP_LOTSAMISS (100) 1319 1261 1320 - /* 1262 + /** 1263 + * filemap_nopage - read in file data for page fault handling 1264 + * @area: the applicable vm_area 1265 + * @address: target address to read in 1266 + * @type: returned with VM_FAULT_{MINOR,MAJOR} if not %NULL 1267 + * 1321 1268 * filemap_nopage() is invoked via the vma operations vector for a 1322 1269 * mapped memory region to read in file data during a page fault. 1323 1270 * ··· 1525 1462 page_cache_release(page); 1526 1463 return NULL; 1527 1464 } 1528 - 1529 1465 EXPORT_SYMBOL(filemap_nopage); 1530 1466 1531 1467 static struct page * filemap_getpage(struct file *file, unsigned long pgoff, ··· 1778 1716 return page; 1779 1717 } 1780 1718 1781 - /* 1719 + /** 1720 + * read_cache_page - read into page cache, fill it if needed 1721 + * @mapping: the page's address_space 1722 + * @index: the page index 1723 + * @filler: function to perform the read 1724 + * @data: destination for read data 1725 + * 1782 1726 * Read into the page cache. If a page already exists, 1783 1727 * and PageUptodate() is not set, try to fill the page. 1784 1728 */ ··· 1822 1754 out: 1823 1755 return page; 1824 1756 } 1825 - 1826 1757 EXPORT_SYMBOL(read_cache_page); 1827 1758 1828 1759 /* ··· 1921 1854 /* 1922 1855 * Performs necessary checks before doing a write 1923 1856 * 1924 - * Can adjust writing position aor amount of bytes to write. 1857 + * Can adjust writing position or amount of bytes to write. 1925 1858 * Returns appropriate error code that caller should return or 1926 1859 * zero in case that write should be allowed. 1927 1860 */