[PATCH] xip: reduce code duplication

This patch reworks filemap_xip.c with the goal to reduce code duplication
from mm/filemap.c. It applies agains 2.6.12-rc6-mm1. Instead of
implementing the aio functions, this one implements the synchronous
read/write functions only. For readv and writev, the generic fallback is
used. For aio, we rely on the application doing the fallback. Since our
"synchronous" function does memcpy immediately anyway, there is no
performance difference between using the fallbacks or implementing each
operation.

Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Carsten Otte and committed by Linus Torvalds eb6fe0c3 6d79125b

+65 -207
+2 -6
fs/ext2/file.c
··· 58 #ifdef CONFIG_EXT2_FS_XIP 59 struct file_operations ext2_xip_file_operations = { 60 .llseek = generic_file_llseek, 61 - .read = do_sync_read, 62 - .write = do_sync_write, 63 - .aio_read = xip_file_aio_read, 64 - .aio_write = xip_file_aio_write, 65 .ioctl = ext2_ioctl, 66 .mmap = xip_file_mmap, 67 .open = generic_file_open, 68 .release = ext2_release_file, 69 .fsync = ext2_sync_file, 70 - .readv = xip_file_readv, 71 - .writev = xip_file_writev, 72 .sendfile = xip_file_sendfile, 73 }; 74 #endif
··· 58 #ifdef CONFIG_EXT2_FS_XIP 59 struct file_operations ext2_xip_file_operations = { 60 .llseek = generic_file_llseek, 61 + .read = xip_file_read, 62 + .write = xip_file_write, 63 .ioctl = ext2_ioctl, 64 .mmap = xip_file_mmap, 65 .open = generic_file_open, 66 .release = ext2_release_file, 67 .fsync = ext2_sync_file, 68 .sendfile = xip_file_sendfile, 69 }; 70 #endif
+4 -8
include/linux/fs.h
··· 1500 extern int nonseekable_open(struct inode * inode, struct file * filp); 1501 1502 #ifdef CONFIG_FS_XIP 1503 - extern ssize_t xip_file_aio_read(struct kiocb *iocb, char __user *buf, 1504 - size_t count, loff_t pos); 1505 - extern ssize_t xip_file_readv(struct file *filp, const struct iovec *iov, 1506 - unsigned long nr_segs, loff_t *ppos); 1507 extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos, 1508 size_t count, read_actor_t actor, 1509 void *target); 1510 extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); 1511 - extern ssize_t xip_file_aio_write(struct kiocb *iocb, const char __user *buf, 1512 - size_t count, loff_t pos); 1513 - extern ssize_t xip_file_writev(struct file *file, const struct iovec *iov, 1514 - unsigned long nr_segs, loff_t *ppos); 1515 extern int xip_truncate_page(struct address_space *mapping, loff_t from); 1516 #else 1517 static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
··· 1500 extern int nonseekable_open(struct inode * inode, struct file * filp); 1501 1502 #ifdef CONFIG_FS_XIP 1503 + extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len, 1504 + loff_t *ppos); 1505 extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos, 1506 size_t count, read_actor_t actor, 1507 void *target); 1508 extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); 1509 + extern ssize_t xip_file_write(struct file *filp, const char __user *buf, 1510 + size_t len, loff_t *ppos); 1511 extern int xip_truncate_page(struct address_space *mapping, loff_t from); 1512 #else 1513 static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
+1 -1
mm/filemap.h
··· 15 #include <linux/config.h> 16 #include <asm/uaccess.h> 17 18 - extern size_t 19 __filemap_copy_from_user_iovec(char *vaddr, 20 const struct iovec *iov, 21 size_t base,
··· 15 #include <linux/config.h> 16 #include <asm/uaccess.h> 17 18 + size_t 19 __filemap_copy_from_user_iovec(char *vaddr, 20 const struct iovec *iov, 21 size_t base,
+58 -192
mm/filemap_xip.c
··· 114 file_accessed(filp); 115 } 116 117 - /* 118 - * This is the "read()" routine for all filesystems 119 - * that uses the get_xip_page address space operation. 120 - */ 121 - static ssize_t 122 - __xip_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 123 - unsigned long nr_segs, loff_t *ppos) 124 - { 125 - struct file *filp = iocb->ki_filp; 126 - ssize_t retval; 127 - unsigned long seg; 128 - size_t count; 129 - 130 - count = 0; 131 - for (seg = 0; seg < nr_segs; seg++) { 132 - const struct iovec *iv = &iov[seg]; 133 - 134 - /* 135 - * If any segment has a negative length, or the cumulative 136 - * length ever wraps negative then return -EINVAL. 137 - */ 138 - count += iv->iov_len; 139 - if (unlikely((ssize_t)(count|iv->iov_len) < 0)) 140 - return -EINVAL; 141 - if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) 142 - continue; 143 - if (seg == 0) 144 - return -EFAULT; 145 - nr_segs = seg; 146 - count -= iv->iov_len; /* This segment is no good */ 147 - break; 148 - } 149 - 150 - retval = 0; 151 - if (count) { 152 - for (seg = 0; seg < nr_segs; seg++) { 153 - read_descriptor_t desc; 154 - 155 - desc.written = 0; 156 - desc.arg.buf = iov[seg].iov_base; 157 - desc.count = iov[seg].iov_len; 158 - if (desc.count == 0) 159 - continue; 160 - desc.error = 0; 161 - do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, 162 - ppos, &desc, file_read_actor); 163 - retval += desc.written; 164 - if (!retval) { 165 - retval = desc.error; 166 - break; 167 - } 168 - } 169 - } 170 - return retval; 171 - } 172 - 173 ssize_t 174 - xip_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, 175 - loff_t pos) 176 { 177 - struct iovec local_iov = { .iov_base = buf, .iov_len = count }; 178 179 - BUG_ON(iocb->ki_pos != pos); 180 - return __xip_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); 181 } 182 - EXPORT_SYMBOL_GPL(xip_file_aio_read); 183 - 184 - ssize_t 185 - xip_file_readv(struct file *filp, const struct iovec *iov, 186 - unsigned long nr_segs, loff_t *ppos) 187 - { 188 - struct kiocb kiocb; 189 - 190 - init_sync_kiocb(&kiocb, filp); 191 - return __xip_file_aio_read(&kiocb, iov, nr_segs, ppos); 192 - } 193 - EXPORT_SYMBOL_GPL(xip_file_readv); 194 195 ssize_t 196 xip_file_sendfile(struct file *in_file, loff_t *ppos, ··· 271 EXPORT_SYMBOL_GPL(xip_file_mmap); 272 273 static ssize_t 274 - do_xip_file_write(struct kiocb *iocb, const struct iovec *iov, 275 - unsigned long nr_segs, loff_t pos, loff_t *ppos, 276 - size_t count) 277 { 278 - struct file *file = iocb->ki_filp; 279 - struct address_space * mapping = file->f_mapping; 280 struct address_space_operations *a_ops = mapping->a_ops; 281 struct inode *inode = mapping->host; 282 long status = 0; 283 struct page *page; 284 size_t bytes; 285 - const struct iovec *cur_iov = iov; /* current iovec */ 286 - size_t iov_base = 0; /* offset in the current iovec */ 287 - char __user *buf; 288 ssize_t written = 0; 289 290 BUG_ON(!mapping->a_ops->get_xip_page); 291 292 - buf = iov->iov_base; 293 do { 294 unsigned long index; 295 unsigned long offset; ··· 304 fault_in_pages_readable(buf, bytes); 305 306 page = a_ops->get_xip_page(mapping, 307 - index*(PAGE_SIZE/512), 0); 308 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { 309 /* we allocate a new page unmap it */ 310 page = a_ops->get_xip_page(mapping, 311 - index*(PAGE_SIZE/512), 1); 312 if (!IS_ERR(page)) 313 - /* unmap page at pgoff from all other vmas */ 314 - __xip_unmap(mapping, index); 315 - 316 } 317 318 if (IS_ERR(page)) { ··· 321 322 BUG_ON(!PageUptodate(page)); 323 324 - if (likely(nr_segs == 1)) 325 - copied = filemap_copy_from_user(page, offset, 326 - buf, bytes); 327 - else 328 - copied = filemap_copy_from_user_iovec(page, offset, 329 - cur_iov, iov_base, bytes); 330 flush_dcache_page(page); 331 if (likely(copied > 0)) { 332 status = copied; ··· 331 count -= status; 332 pos += status; 333 buf += status; 334 - if (unlikely(nr_segs > 1)) 335 - filemap_set_next_iovec(&cur_iov, 336 - &iov_base, status); 337 } 338 } 339 if (unlikely(copied != bytes)) ··· 352 return written ? written : status; 353 } 354 355 - static ssize_t 356 - xip_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, 357 - unsigned long nr_segs, loff_t *ppos) 358 { 359 - struct file *file = iocb->ki_filp; 360 - struct address_space * mapping = file->f_mapping; 361 - size_t ocount; /* original count */ 362 - size_t count; /* after file limit checks */ 363 - struct inode *inode = mapping->host; 364 - unsigned long seg; 365 - loff_t pos; 366 - ssize_t written; 367 - ssize_t err; 368 369 - ocount = 0; 370 - for (seg = 0; seg < nr_segs; seg++) { 371 - const struct iovec *iv = &iov[seg]; 372 373 - /* 374 - * If any segment has a negative length, or the cumulative 375 - * length ever wraps negative then return -EINVAL. 376 - */ 377 - ocount += iv->iov_len; 378 - if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) 379 - return -EINVAL; 380 - if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) 381 - continue; 382 - if (seg == 0) 383 - return -EFAULT; 384 - nr_segs = seg; 385 - ocount -= iv->iov_len; /* This segment is no good */ 386 - break; 387 } 388 389 - count = ocount; 390 pos = *ppos; 391 392 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 393 394 - written = 0; 395 396 - err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 397 - if (err) 398 - goto out; 399 - 400 if (count == 0) 401 - goto out; 402 403 - err = remove_suid(file->f_dentry); 404 - if (err) 405 - goto out; 406 407 inode_update_time(inode, 1); 408 409 - /* use execute in place to copy directly to disk */ 410 - written = do_xip_file_write (iocb, iov, 411 - nr_segs, pos, ppos, count); 412 - out: 413 - return written ? written : err; 414 - } 415 416 - static ssize_t 417 - __xip_file_write_nolock(struct file *file, const struct iovec *iov, 418 - unsigned long nr_segs, loff_t *ppos) 419 - { 420 - struct kiocb kiocb; 421 - 422 - init_sync_kiocb(&kiocb, file); 423 - return xip_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); 424 - } 425 - 426 - ssize_t 427 - xip_file_aio_write(struct kiocb *iocb, const char __user *buf, 428 - size_t count, loff_t pos) 429 - { 430 - struct file *file = iocb->ki_filp; 431 - struct address_space *mapping = file->f_mapping; 432 - struct inode *inode = mapping->host; 433 - ssize_t ret; 434 - struct iovec local_iov = { .iov_base = (void __user *)buf, 435 - .iov_len = count }; 436 - 437 - BUG_ON(iocb->ki_pos != pos); 438 - 439 - down(&inode->i_sem); 440 - ret = xip_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos); 441 up(&inode->i_sem); 442 return ret; 443 } 444 - EXPORT_SYMBOL_GPL(xip_file_aio_write); 445 - 446 - ssize_t xip_file_writev(struct file *file, const struct iovec *iov, 447 - unsigned long nr_segs, loff_t *ppos) 448 - { 449 - struct address_space *mapping = file->f_mapping; 450 - struct inode *inode = mapping->host; 451 - ssize_t ret; 452 - 453 - down(&inode->i_sem); 454 - ret = __xip_file_write_nolock(file, iov, nr_segs, ppos); 455 - up(&inode->i_sem); 456 - return ret; 457 - } 458 - EXPORT_SYMBOL_GPL(xip_file_writev); 459 460 /* 461 * truncate a page used for execute in place ··· 413 unsigned length; 414 struct page *page; 415 void *kaddr; 416 - int err; 417 418 BUG_ON(!mapping->a_ops->get_xip_page); 419 ··· 427 428 page = mapping->a_ops->get_xip_page(mapping, 429 index*(PAGE_SIZE/512), 0); 430 - err = -ENOMEM; 431 if (!page) 432 - goto out; 433 if (unlikely(IS_ERR(page))) { 434 - if (PTR_ERR(page) == -ENODATA) { 435 /* Hole? No need to truncate */ 436 return 0; 437 - } else { 438 - err = PTR_ERR(page); 439 - goto out; 440 - } 441 } else 442 BUG_ON(!PageUptodate(page)); 443 kaddr = kmap_atomic(page, KM_USER0); ··· 442 kunmap_atomic(kaddr, KM_USER0); 443 444 flush_dcache_page(page); 445 - err = 0; 446 - out: 447 - return err; 448 } 449 EXPORT_SYMBOL_GPL(xip_truncate_page);
··· 114 file_accessed(filp); 115 } 116 117 ssize_t 118 + xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) 119 { 120 + read_descriptor_t desc; 121 122 + if (!access_ok(VERIFY_WRITE, buf, len)) 123 + return -EFAULT; 124 + 125 + desc.written = 0; 126 + desc.arg.buf = buf; 127 + desc.count = len; 128 + desc.error = 0; 129 + 130 + do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, 131 + ppos, &desc, file_read_actor); 132 + 133 + if (desc.written) 134 + return desc.written; 135 + else 136 + return desc.error; 137 } 138 + EXPORT_SYMBOL_GPL(xip_file_read); 139 140 ssize_t 141 xip_file_sendfile(struct file *in_file, loff_t *ppos, ··· 326 EXPORT_SYMBOL_GPL(xip_file_mmap); 327 328 static ssize_t 329 + __xip_file_write(struct file *filp, const char __user *buf, 330 + size_t count, loff_t pos, loff_t *ppos) 331 { 332 + struct address_space * mapping = filp->f_mapping; 333 struct address_space_operations *a_ops = mapping->a_ops; 334 struct inode *inode = mapping->host; 335 long status = 0; 336 struct page *page; 337 size_t bytes; 338 ssize_t written = 0; 339 340 BUG_ON(!mapping->a_ops->get_xip_page); 341 342 do { 343 unsigned long index; 344 unsigned long offset; ··· 365 fault_in_pages_readable(buf, bytes); 366 367 page = a_ops->get_xip_page(mapping, 368 + index*(PAGE_SIZE/512), 0); 369 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { 370 /* we allocate a new page unmap it */ 371 page = a_ops->get_xip_page(mapping, 372 + index*(PAGE_SIZE/512), 1); 373 if (!IS_ERR(page)) 374 + /* unmap page at pgoff from all other vmas */ 375 + __xip_unmap(mapping, index); 376 } 377 378 if (IS_ERR(page)) { ··· 383 384 BUG_ON(!PageUptodate(page)); 385 386 + copied = filemap_copy_from_user(page, offset, buf, bytes); 387 flush_dcache_page(page); 388 if (likely(copied > 0)) { 389 status = copied; ··· 398 count -= status; 399 pos += status; 400 buf += status; 401 } 402 } 403 if (unlikely(copied != bytes)) ··· 422 return written ? written : status; 423 } 424 425 + ssize_t 426 + xip_file_write(struct file *filp, const char __user *buf, size_t len, 427 + loff_t *ppos) 428 { 429 + struct address_space *mapping = filp->f_mapping; 430 + struct inode *inode = mapping->host; 431 + size_t count; 432 + loff_t pos; 433 + ssize_t ret; 434 435 + down(&inode->i_sem); 436 437 + if (!access_ok(VERIFY_READ, buf, len)) { 438 + ret=-EFAULT; 439 + goto out_up; 440 } 441 442 pos = *ppos; 443 + count = len; 444 445 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 446 447 + /* We can write back this queue in page reclaim */ 448 + current->backing_dev_info = mapping->backing_dev_info; 449 450 + ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); 451 + if (ret) 452 + goto out_backing; 453 if (count == 0) 454 + goto out_backing; 455 456 + ret = remove_suid(filp->f_dentry); 457 + if (ret) 458 + goto out_backing; 459 460 inode_update_time(inode, 1); 461 462 + ret = __xip_file_write (filp, buf, count, pos, ppos); 463 464 + out_backing: 465 + current->backing_dev_info = NULL; 466 + out_up: 467 up(&inode->i_sem); 468 return ret; 469 } 470 + EXPORT_SYMBOL_GPL(xip_file_write); 471 472 /* 473 * truncate a page used for execute in place ··· 541 unsigned length; 542 struct page *page; 543 void *kaddr; 544 545 BUG_ON(!mapping->a_ops->get_xip_page); 546 ··· 556 557 page = mapping->a_ops->get_xip_page(mapping, 558 index*(PAGE_SIZE/512), 0); 559 if (!page) 560 + return -ENOMEM; 561 if (unlikely(IS_ERR(page))) { 562 + if (PTR_ERR(page) == -ENODATA) 563 /* Hole? No need to truncate */ 564 return 0; 565 + else 566 + return PTR_ERR(page); 567 } else 568 BUG_ON(!PageUptodate(page)); 569 kaddr = kmap_atomic(page, KM_USER0); ··· 574 kunmap_atomic(kaddr, KM_USER0); 575 576 flush_dcache_page(page); 577 + return 0; 578 } 579 EXPORT_SYMBOL_GPL(xip_truncate_page);