[PATCH] xip: reduce code duplication

This patch reworks filemap_xip.c with the goal to reduce code duplication
from mm/filemap.c. It applies agains 2.6.12-rc6-mm1. Instead of
implementing the aio functions, this one implements the synchronous
read/write functions only. For readv and writev, the generic fallback is
used. For aio, we rely on the application doing the fallback. Since our
"synchronous" function does memcpy immediately anyway, there is no
performance difference between using the fallbacks or implementing each
operation.

Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Carsten Otte and committed by Linus Torvalds eb6fe0c3 6d79125b

+65 -207
+2 -6
fs/ext2/file.c
··· 58 58 #ifdef CONFIG_EXT2_FS_XIP 59 59 struct file_operations ext2_xip_file_operations = { 60 60 .llseek = generic_file_llseek, 61 - .read = do_sync_read, 62 - .write = do_sync_write, 63 - .aio_read = xip_file_aio_read, 64 - .aio_write = xip_file_aio_write, 61 + .read = xip_file_read, 62 + .write = xip_file_write, 65 63 .ioctl = ext2_ioctl, 66 64 .mmap = xip_file_mmap, 67 65 .open = generic_file_open, 68 66 .release = ext2_release_file, 69 67 .fsync = ext2_sync_file, 70 - .readv = xip_file_readv, 71 - .writev = xip_file_writev, 72 68 .sendfile = xip_file_sendfile, 73 69 }; 74 70 #endif
+4 -8
include/linux/fs.h
··· 1500 1500 extern int nonseekable_open(struct inode * inode, struct file * filp); 1501 1501 1502 1502 #ifdef CONFIG_FS_XIP 1503 - extern ssize_t xip_file_aio_read(struct kiocb *iocb, char __user *buf, 1504 - size_t count, loff_t pos); 1505 - extern ssize_t xip_file_readv(struct file *filp, const struct iovec *iov, 1506 - unsigned long nr_segs, loff_t *ppos); 1503 + extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len, 1504 + loff_t *ppos); 1507 1505 extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos, 1508 1506 size_t count, read_actor_t actor, 1509 1507 void *target); 1510 1508 extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); 1511 - extern ssize_t xip_file_aio_write(struct kiocb *iocb, const char __user *buf, 1512 - size_t count, loff_t pos); 1513 - extern ssize_t xip_file_writev(struct file *file, const struct iovec *iov, 1514 - unsigned long nr_segs, loff_t *ppos); 1509 + extern ssize_t xip_file_write(struct file *filp, const char __user *buf, 1510 + size_t len, loff_t *ppos); 1515 1511 extern int xip_truncate_page(struct address_space *mapping, loff_t from); 1516 1512 #else 1517 1513 static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
+1 -1
mm/filemap.h
··· 15 15 #include <linux/config.h> 16 16 #include <asm/uaccess.h> 17 17 18 - extern size_t 18 + size_t 19 19 __filemap_copy_from_user_iovec(char *vaddr, 20 20 const struct iovec *iov, 21 21 size_t base,
+58 -192
mm/filemap_xip.c
··· 114 114 file_accessed(filp); 115 115 } 116 116 117 - /* 118 - * This is the "read()" routine for all filesystems 119 - * that uses the get_xip_page address space operation. 120 - */ 121 - static ssize_t 122 - __xip_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 123 - unsigned long nr_segs, loff_t *ppos) 124 - { 125 - struct file *filp = iocb->ki_filp; 126 - ssize_t retval; 127 - unsigned long seg; 128 - size_t count; 129 - 130 - count = 0; 131 - for (seg = 0; seg < nr_segs; seg++) { 132 - const struct iovec *iv = &iov[seg]; 133 - 134 - /* 135 - * If any segment has a negative length, or the cumulative 136 - * length ever wraps negative then return -EINVAL. 137 - */ 138 - count += iv->iov_len; 139 - if (unlikely((ssize_t)(count|iv->iov_len) < 0)) 140 - return -EINVAL; 141 - if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) 142 - continue; 143 - if (seg == 0) 144 - return -EFAULT; 145 - nr_segs = seg; 146 - count -= iv->iov_len; /* This segment is no good */ 147 - break; 148 - } 149 - 150 - retval = 0; 151 - if (count) { 152 - for (seg = 0; seg < nr_segs; seg++) { 153 - read_descriptor_t desc; 154 - 155 - desc.written = 0; 156 - desc.arg.buf = iov[seg].iov_base; 157 - desc.count = iov[seg].iov_len; 158 - if (desc.count == 0) 159 - continue; 160 - desc.error = 0; 161 - do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, 162 - ppos, &desc, file_read_actor); 163 - retval += desc.written; 164 - if (!retval) { 165 - retval = desc.error; 166 - break; 167 - } 168 - } 169 - } 170 - return retval; 171 - } 172 - 173 117 ssize_t 174 - xip_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, 175 - loff_t pos) 118 + xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) 176 119 { 177 - struct iovec local_iov = { .iov_base = buf, .iov_len = count }; 120 + read_descriptor_t desc; 178 121 179 - BUG_ON(iocb->ki_pos != pos); 180 - return __xip_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); 122 + if (!access_ok(VERIFY_WRITE, buf, len)) 123 + return -EFAULT; 124 + 125 + desc.written = 0; 126 + desc.arg.buf = buf; 127 + desc.count = len; 128 + desc.error = 0; 129 + 130 + do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, 131 + ppos, &desc, file_read_actor); 132 + 133 + if (desc.written) 134 + return desc.written; 135 + else 136 + return desc.error; 181 137 } 182 - EXPORT_SYMBOL_GPL(xip_file_aio_read); 183 - 184 - ssize_t 185 - xip_file_readv(struct file *filp, const struct iovec *iov, 186 - unsigned long nr_segs, loff_t *ppos) 187 - { 188 - struct kiocb kiocb; 189 - 190 - init_sync_kiocb(&kiocb, filp); 191 - return __xip_file_aio_read(&kiocb, iov, nr_segs, ppos); 192 - } 193 - EXPORT_SYMBOL_GPL(xip_file_readv); 138 + EXPORT_SYMBOL_GPL(xip_file_read); 194 139 195 140 ssize_t 196 141 xip_file_sendfile(struct file *in_file, loff_t *ppos, ··· 271 326 EXPORT_SYMBOL_GPL(xip_file_mmap); 272 327 273 328 static ssize_t 274 - do_xip_file_write(struct kiocb *iocb, const struct iovec *iov, 275 - unsigned long nr_segs, loff_t pos, loff_t *ppos, 276 - size_t count) 329 + __xip_file_write(struct file *filp, const char __user *buf, 330 + size_t count, loff_t pos, loff_t *ppos) 277 331 { 278 - struct file *file = iocb->ki_filp; 279 - struct address_space * mapping = file->f_mapping; 332 + struct address_space * mapping = filp->f_mapping; 280 333 struct address_space_operations *a_ops = mapping->a_ops; 281 334 struct inode *inode = mapping->host; 282 335 long status = 0; 283 336 struct page *page; 284 337 size_t bytes; 285 - const struct iovec *cur_iov = iov; /* current iovec */ 286 - size_t iov_base = 0; /* offset in the current iovec */ 287 - char __user *buf; 288 338 ssize_t written = 0; 289 339 290 340 BUG_ON(!mapping->a_ops->get_xip_page); 291 341 292 - buf = iov->iov_base; 293 342 do { 294 343 unsigned long index; 295 344 unsigned long offset; ··· 304 365 fault_in_pages_readable(buf, bytes); 305 366 306 367 page = a_ops->get_xip_page(mapping, 307 - index*(PAGE_SIZE/512), 0); 368 + index*(PAGE_SIZE/512), 0); 308 369 if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { 309 370 /* we allocate a new page unmap it */ 310 371 page = a_ops->get_xip_page(mapping, 311 - index*(PAGE_SIZE/512), 1); 372 + index*(PAGE_SIZE/512), 1); 312 373 if (!IS_ERR(page)) 313 - /* unmap page at pgoff from all other vmas */ 314 - __xip_unmap(mapping, index); 315 - 374 + /* unmap page at pgoff from all other vmas */ 375 + __xip_unmap(mapping, index); 316 376 } 317 377 318 378 if (IS_ERR(page)) { ··· 321 383 322 384 BUG_ON(!PageUptodate(page)); 323 385 324 - if (likely(nr_segs == 1)) 325 - copied = filemap_copy_from_user(page, offset, 326 - buf, bytes); 327 - else 328 - copied = filemap_copy_from_user_iovec(page, offset, 329 - cur_iov, iov_base, bytes); 386 + copied = filemap_copy_from_user(page, offset, buf, bytes); 330 387 flush_dcache_page(page); 331 388 if (likely(copied > 0)) { 332 389 status = copied; ··· 331 398 count -= status; 332 399 pos += status; 333 400 buf += status; 334 - if (unlikely(nr_segs > 1)) 335 - filemap_set_next_iovec(&cur_iov, 336 - &iov_base, status); 337 401 } 338 402 } 339 403 if (unlikely(copied != bytes)) ··· 352 422 return written ? written : status; 353 423 } 354 424 355 - static ssize_t 356 - xip_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, 357 - unsigned long nr_segs, loff_t *ppos) 425 + ssize_t 426 + xip_file_write(struct file *filp, const char __user *buf, size_t len, 427 + loff_t *ppos) 358 428 { 359 - struct file *file = iocb->ki_filp; 360 - struct address_space * mapping = file->f_mapping; 361 - size_t ocount; /* original count */ 362 - size_t count; /* after file limit checks */ 363 - struct inode *inode = mapping->host; 364 - unsigned long seg; 365 - loff_t pos; 366 - ssize_t written; 367 - ssize_t err; 429 + struct address_space *mapping = filp->f_mapping; 430 + struct inode *inode = mapping->host; 431 + size_t count; 432 + loff_t pos; 433 + ssize_t ret; 368 434 369 - ocount = 0; 370 - for (seg = 0; seg < nr_segs; seg++) { 371 - const struct iovec *iv = &iov[seg]; 435 + down(&inode->i_sem); 372 436 373 - /* 374 - * If any segment has a negative length, or the cumulative 375 - * length ever wraps negative then return -EINVAL. 376 - */ 377 - ocount += iv->iov_len; 378 - if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) 379 - return -EINVAL; 380 - if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) 381 - continue; 382 - if (seg == 0) 383 - return -EFAULT; 384 - nr_segs = seg; 385 - ocount -= iv->iov_len; /* This segment is no good */ 386 - break; 437 + if (!access_ok(VERIFY_READ, buf, len)) { 438 + ret=-EFAULT; 439 + goto out_up; 387 440 } 388 441 389 - count = ocount; 390 442 pos = *ppos; 443 + count = len; 391 444 392 445 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 393 446 394 - written = 0; 447 + /* We can write back this queue in page reclaim */ 448 + current->backing_dev_info = mapping->backing_dev_info; 395 449 396 - err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 397 - if (err) 398 - goto out; 399 - 450 + ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); 451 + if (ret) 452 + goto out_backing; 400 453 if (count == 0) 401 - goto out; 454 + goto out_backing; 402 455 403 - err = remove_suid(file->f_dentry); 404 - if (err) 405 - goto out; 456 + ret = remove_suid(filp->f_dentry); 457 + if (ret) 458 + goto out_backing; 406 459 407 460 inode_update_time(inode, 1); 408 461 409 - /* use execute in place to copy directly to disk */ 410 - written = do_xip_file_write (iocb, iov, 411 - nr_segs, pos, ppos, count); 412 - out: 413 - return written ? written : err; 414 - } 462 + ret = __xip_file_write (filp, buf, count, pos, ppos); 415 463 416 - static ssize_t 417 - __xip_file_write_nolock(struct file *file, const struct iovec *iov, 418 - unsigned long nr_segs, loff_t *ppos) 419 - { 420 - struct kiocb kiocb; 421 - 422 - init_sync_kiocb(&kiocb, file); 423 - return xip_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); 424 - } 425 - 426 - ssize_t 427 - xip_file_aio_write(struct kiocb *iocb, const char __user *buf, 428 - size_t count, loff_t pos) 429 - { 430 - struct file *file = iocb->ki_filp; 431 - struct address_space *mapping = file->f_mapping; 432 - struct inode *inode = mapping->host; 433 - ssize_t ret; 434 - struct iovec local_iov = { .iov_base = (void __user *)buf, 435 - .iov_len = count }; 436 - 437 - BUG_ON(iocb->ki_pos != pos); 438 - 439 - down(&inode->i_sem); 440 - ret = xip_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos); 464 + out_backing: 465 + current->backing_dev_info = NULL; 466 + out_up: 441 467 up(&inode->i_sem); 442 468 return ret; 443 469 } 444 - EXPORT_SYMBOL_GPL(xip_file_aio_write); 445 - 446 - ssize_t xip_file_writev(struct file *file, const struct iovec *iov, 447 - unsigned long nr_segs, loff_t *ppos) 448 - { 449 - struct address_space *mapping = file->f_mapping; 450 - struct inode *inode = mapping->host; 451 - ssize_t ret; 452 - 453 - down(&inode->i_sem); 454 - ret = __xip_file_write_nolock(file, iov, nr_segs, ppos); 455 - up(&inode->i_sem); 456 - return ret; 457 - } 458 - EXPORT_SYMBOL_GPL(xip_file_writev); 470 + EXPORT_SYMBOL_GPL(xip_file_write); 459 471 460 472 /* 461 473 * truncate a page used for execute in place ··· 413 541 unsigned length; 414 542 struct page *page; 415 543 void *kaddr; 416 - int err; 417 544 418 545 BUG_ON(!mapping->a_ops->get_xip_page); 419 546 ··· 427 556 428 557 page = mapping->a_ops->get_xip_page(mapping, 429 558 index*(PAGE_SIZE/512), 0); 430 - err = -ENOMEM; 431 559 if (!page) 432 - goto out; 560 + return -ENOMEM; 433 561 if (unlikely(IS_ERR(page))) { 434 - if (PTR_ERR(page) == -ENODATA) { 562 + if (PTR_ERR(page) == -ENODATA) 435 563 /* Hole? No need to truncate */ 436 564 return 0; 437 - } else { 438 - err = PTR_ERR(page); 439 - goto out; 440 - } 565 + else 566 + return PTR_ERR(page); 441 567 } else 442 568 BUG_ON(!PageUptodate(page)); 443 569 kaddr = kmap_atomic(page, KM_USER0); ··· 442 574 kunmap_atomic(kaddr, KM_USER0); 443 575 444 576 flush_dcache_page(page); 445 - err = 0; 446 - out: 447 - return err; 577 + return 0; 448 578 } 449 579 EXPORT_SYMBOL_GPL(xip_truncate_page);