Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block:
ide: always ensure that blk_delay_queue() is called if we have pending IO
block: fix request sorting at unplug
dm: improve block integrity support
fs: export empty_aops
ide: ide_requeue_and_plug() reinstate "always plug" behaviour
blk-throttle: don't call xchg on bool
ufs: remove unessecary blk_flush_plug
block: make the flush insertion use the tail of the dispatch list
block: get rid of elv_insert() interface
block: dump request state on seeing a corrupted request completion

+148 -91
+2 -2
block/blk-core.c
··· 2163 2163 * size, something has gone terribly wrong. 2164 2164 */ 2165 2165 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 2166 - printk(KERN_ERR "blk: request botched\n"); 2166 + blk_dump_rq_flags(req, "request botched"); 2167 2167 req->__data_len = blk_rq_cur_bytes(req); 2168 2168 } 2169 2169 ··· 2665 2665 struct request *rqa = container_of(a, struct request, queuelist); 2666 2666 struct request *rqb = container_of(b, struct request, queuelist); 2667 2667 2668 - return !(rqa->q == rqb->q); 2668 + return !(rqa->q <= rqb->q); 2669 2669 } 2670 2670 2671 2671 static void flush_plug_list(struct blk_plug *plug)
+3 -3
block/blk-flush.c
··· 261 261 q->flush_rq.end_io = flush_end_io; 262 262 263 263 q->flush_pending_idx ^= 1; 264 - elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE); 264 + list_add_tail(&q->flush_rq.queuelist, &q->queue_head); 265 265 return true; 266 266 } 267 267 ··· 281 281 * blk_insert_flush - insert a new FLUSH/FUA request 282 282 * @rq: request to insert 283 283 * 284 - * To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions. 284 + * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. 285 285 * @rq is being submitted. Analyze what needs to be done and put it on the 286 286 * right queue. 287 287 * ··· 312 312 */ 313 313 if ((policy & REQ_FSEQ_DATA) && 314 314 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { 315 - list_add(&rq->queuelist, &q->queue_head); 315 + list_add_tail(&rq->queuelist, &q->queue_head); 316 316 return; 317 317 } 318 318
+11 -1
block/blk-integrity.c
··· 30 30 31 31 static struct kmem_cache *integrity_cachep; 32 32 33 + static const char *bi_unsupported_name = "unsupported"; 34 + 33 35 /** 34 36 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements 35 37 * @q: request queue ··· 360 358 .release = blk_integrity_release, 361 359 }; 362 360 361 + bool blk_integrity_is_initialized(struct gendisk *disk) 362 + { 363 + struct blk_integrity *bi = blk_get_integrity(disk); 364 + 365 + return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0); 366 + } 367 + EXPORT_SYMBOL(blk_integrity_is_initialized); 368 + 363 369 /** 364 370 * blk_integrity_register - Register a gendisk as being integrity-capable 365 371 * @disk: struct gendisk pointer to make integrity-aware ··· 417 407 bi->get_tag_fn = template->get_tag_fn; 418 408 bi->tag_size = template->tag_size; 419 409 } else 420 - bi->name = "unsupported"; 410 + bi->name = bi_unsupported_name; 421 411 422 412 return 0; 423 413 }
+2 -2
block/blk-throttle.c
··· 77 77 unsigned long slice_end[2]; 78 78 79 79 /* Some throttle limits got updated for the group */ 80 - bool limits_changed; 80 + int limits_changed; 81 81 }; 82 82 83 83 struct throtl_data ··· 102 102 /* Work for dispatching throttled bios */ 103 103 struct delayed_work throtl_work; 104 104 105 - bool limits_changed; 105 + int limits_changed; 106 106 }; 107 107 108 108 enum tg_state_flags {
+15 -20
block/elevator.c
··· 610 610 611 611 rq->cmd_flags &= ~REQ_STARTED; 612 612 613 - elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 613 + __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); 614 614 } 615 615 616 616 void elv_drain_elevator(struct request_queue *q) ··· 655 655 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 656 656 } 657 657 658 - void elv_insert(struct request_queue *q, struct request *rq, int where) 658 + void __elv_add_request(struct request_queue *q, struct request *rq, int where) 659 659 { 660 660 trace_block_rq_insert(q, rq); 661 661 662 662 rq->q = q; 663 + 664 + BUG_ON(rq->cmd_flags & REQ_ON_PLUG); 665 + 666 + if (rq->cmd_flags & REQ_SOFTBARRIER) { 667 + /* barriers are scheduling boundary, update end_sector */ 668 + if (rq->cmd_type == REQ_TYPE_FS || 669 + (rq->cmd_flags & REQ_DISCARD)) { 670 + q->end_sector = rq_end_sector(rq); 671 + q->boundary_rq = rq; 672 + } 673 + } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 674 + where == ELEVATOR_INSERT_SORT) 675 + where = ELEVATOR_INSERT_BACK; 663 676 664 677 switch (where) { 665 678 case ELEVATOR_INSERT_REQUEUE: ··· 734 721 __func__, where); 735 722 BUG(); 736 723 } 737 - } 738 - 739 - void __elv_add_request(struct request_queue *q, struct request *rq, int where) 740 - { 741 - BUG_ON(rq->cmd_flags & REQ_ON_PLUG); 742 - 743 - if (rq->cmd_flags & REQ_SOFTBARRIER) { 744 - /* barriers are scheduling boundary, update end_sector */ 745 - if (rq->cmd_type == REQ_TYPE_FS || 746 - (rq->cmd_flags & REQ_DISCARD)) { 747 - q->end_sector = rq_end_sector(rq); 748 - q->boundary_rq = rq; 749 - } 750 - } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 751 - where == ELEVATOR_INSERT_SORT) 752 - where = ELEVATOR_INSERT_BACK; 753 - 754 - elv_insert(q, rq, where); 755 724 } 756 725 EXPORT_SYMBOL(__elv_add_request); 757 726
+21 -22
drivers/ide/ide-io.c
··· 430 430 } 431 431 } 432 432 433 + static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq) 434 + { 435 + if (rq) 436 + blk_requeue_request(q, rq); 437 + if (rq || blk_peek_request(q)) { 438 + /* Use 3ms as that was the old plug delay */ 439 + blk_delay_queue(q, 3); 440 + } 441 + } 442 + 443 + void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) 444 + { 445 + struct request_queue *q = drive->queue; 446 + unsigned long flags; 447 + 448 + spin_lock_irqsave(q->queue_lock, flags); 449 + __ide_requeue_and_plug(q, rq); 450 + spin_unlock_irqrestore(q->queue_lock, flags); 451 + } 452 + 433 453 /* 434 454 * Issue a new request to a device. 435 455 */ ··· 570 550 ide_unlock_host(host); 571 551 plug_device_2: 572 552 spin_lock_irq(q->queue_lock); 573 - 574 - if (rq) { 575 - blk_requeue_request(q, rq); 576 - blk_delay_queue(q, queue_run_ms); 577 - } 578 - } 579 - 580 - void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) 581 - { 582 - struct request_queue *q = drive->queue; 583 - unsigned long flags; 584 - 585 - spin_lock_irqsave(q->queue_lock, flags); 586 - 587 - if (rq) 588 - blk_requeue_request(q, rq); 589 - 590 - spin_unlock_irqrestore(q->queue_lock, flags); 591 - 592 - /* Use 3ms as that was the old plug delay */ 593 - if (rq) 594 - blk_delay_queue(q, 3); 553 + __ide_requeue_and_plug(q, rq); 595 554 } 596 555 597 556 static int drive_is_ready(ide_drive_t *drive)
+80 -34
drivers/md/dm-table.c
··· 927 927 } 928 928 929 929 /* 930 + * Get a disk whose integrity profile reflects the table's profile. 931 + * If %match_all is true, all devices' profiles must match. 932 + * If %match_all is false, all devices must at least have an 933 + * allocated integrity profile; but uninitialized is ok. 934 + * Returns NULL if integrity support was inconsistent or unavailable. 935 + */ 936 + static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, 937 + bool match_all) 938 + { 939 + struct list_head *devices = dm_table_get_devices(t); 940 + struct dm_dev_internal *dd = NULL; 941 + struct gendisk *prev_disk = NULL, *template_disk = NULL; 942 + 943 + list_for_each_entry(dd, devices, list) { 944 + template_disk = dd->dm_dev.bdev->bd_disk; 945 + if (!blk_get_integrity(template_disk)) 946 + goto no_integrity; 947 + if (!match_all && !blk_integrity_is_initialized(template_disk)) 948 + continue; /* skip uninitialized profiles */ 949 + else if (prev_disk && 950 + blk_integrity_compare(prev_disk, template_disk) < 0) 951 + goto no_integrity; 952 + prev_disk = template_disk; 953 + } 954 + 955 + return template_disk; 956 + 957 + no_integrity: 958 + if (prev_disk) 959 + DMWARN("%s: integrity not set: %s and %s profile mismatch", 960 + dm_device_name(t->md), 961 + prev_disk->disk_name, 962 + template_disk->disk_name); 963 + return NULL; 964 + } 965 + 966 + /* 930 967 * Register the mapped device for blk_integrity support if 931 - * the underlying devices support it. 968 + * the underlying devices have an integrity profile. But all devices 969 + * may not have matching profiles (checking all devices isn't reliable 970 + * during table load because this table may use other DM device(s) which 971 + * must be resumed before they will have an initialized integity profile). 972 + * Stacked DM devices force a 2 stage integrity profile validation: 973 + * 1 - during load, validate all initialized integrity profiles match 974 + * 2 - during resume, validate all integrity profiles match 932 975 */ 933 976 static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) 934 977 { 935 - struct list_head *devices = dm_table_get_devices(t); 936 - struct dm_dev_internal *dd; 978 + struct gendisk *template_disk = NULL; 937 979 938 - list_for_each_entry(dd, devices, list) 939 - if (bdev_get_integrity(dd->dm_dev.bdev)) { 940 - t->integrity_supported = 1; 941 - return blk_integrity_register(dm_disk(md), NULL); 942 - } 980 + template_disk = dm_table_get_integrity_disk(t, false); 981 + if (!template_disk) 982 + return 0; 943 983 984 + if (!blk_integrity_is_initialized(dm_disk(md))) { 985 + t->integrity_supported = 1; 986 + return blk_integrity_register(dm_disk(md), NULL); 987 + } 988 + 989 + /* 990 + * If DM device already has an initalized integrity 991 + * profile the new profile should not conflict. 992 + */ 993 + if (blk_integrity_is_initialized(template_disk) && 994 + blk_integrity_compare(dm_disk(md), template_disk) < 0) { 995 + DMWARN("%s: conflict with existing integrity profile: " 996 + "%s profile mismatch", 997 + dm_device_name(t->md), 998 + template_disk->disk_name); 999 + return 1; 1000 + } 1001 + 1002 + /* Preserve existing initialized integrity profile */ 1003 + t->integrity_supported = 1; 944 1004 return 0; 945 1005 } 946 1006 ··· 1154 1094 1155 1095 /* 1156 1096 * Set the integrity profile for this device if all devices used have 1157 - * matching profiles. 1097 + * matching profiles. We're quite deep in the resume path but still 1098 + * don't know if all devices (particularly DM devices this device 1099 + * may be stacked on) have matching profiles. Even if the profiles 1100 + * don't match we have no way to fail (to resume) at this point. 1158 1101 */ 1159 1102 static void dm_table_set_integrity(struct dm_table *t) 1160 1103 { 1161 - struct list_head *devices = dm_table_get_devices(t); 1162 - struct dm_dev_internal *prev = NULL, *dd = NULL; 1104 + struct gendisk *template_disk = NULL; 1163 1105 1164 1106 if (!blk_get_integrity(dm_disk(t->md))) 1165 1107 return; 1166 1108 1167 - list_for_each_entry(dd, devices, list) { 1168 - if (prev && 1169 - blk_integrity_compare(prev->dm_dev.bdev->bd_disk, 1170 - dd->dm_dev.bdev->bd_disk) < 0) { 1171 - DMWARN("%s: integrity not set: %s and %s mismatch", 1172 - dm_device_name(t->md), 1173 - prev->dm_dev.bdev->bd_disk->disk_name, 1174 - dd->dm_dev.bdev->bd_disk->disk_name); 1175 - goto no_integrity; 1176 - } 1177 - prev = dd; 1109 + template_disk = dm_table_get_integrity_disk(t, true); 1110 + if (!template_disk && 1111 + blk_integrity_is_initialized(dm_disk(t->md))) { 1112 + DMWARN("%s: device no longer has a valid integrity profile", 1113 + dm_device_name(t->md)); 1114 + return; 1178 1115 } 1179 - 1180 - if (!prev || !bdev_get_integrity(prev->dm_dev.bdev)) 1181 - goto no_integrity; 1182 - 1183 1116 blk_integrity_register(dm_disk(t->md), 1184 - bdev_get_integrity(prev->dm_dev.bdev)); 1185 - 1186 - return; 1187 - 1188 - no_integrity: 1189 - blk_integrity_register(dm_disk(t->md), NULL); 1190 - 1191 - return; 1117 + blk_get_integrity(template_disk)); 1192 1118 } 1193 1119 1194 1120 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+8 -1
fs/inode.c
··· 125 125 static DECLARE_RWSEM(iprune_sem); 126 126 127 127 /* 128 + * Empty aops. Can be used for the cases where the user does not 129 + * define any of the address_space operations. 130 + */ 131 + const struct address_space_operations empty_aops = { 132 + }; 133 + EXPORT_SYMBOL(empty_aops); 134 + 135 + /* 128 136 * Statistics gathering.. 129 137 */ 130 138 struct inodes_stat_t inodes_stat; ··· 184 176 */ 185 177 int inode_init_always(struct super_block *sb, struct inode *inode) 186 178 { 187 - static const struct address_space_operations empty_aops; 188 179 static const struct inode_operations empty_iops; 189 180 static const struct file_operations empty_fops; 190 181 struct address_space *const mapping = &inode->i_data;
-2
fs/nilfs2/page.c
··· 495 495 void nilfs_mapping_init(struct address_space *mapping, 496 496 struct backing_dev_info *bdi) 497 497 { 498 - static const struct address_space_operations empty_aops; 499 - 500 498 mapping->host = NULL; 501 499 mapping->flags = 0; 502 500 mapping_set_gfp_mask(mapping, GFP_NOFS);
+2 -2
fs/ubifs/xattr.c
··· 56 56 */ 57 57 58 58 #include "ubifs.h" 59 + #include <linux/fs.h> 59 60 #include <linux/slab.h> 60 61 #include <linux/xattr.h> 61 62 #include <linux/posix_acl_xattr.h> ··· 81 80 }; 82 81 83 82 static const struct inode_operations none_inode_operations; 84 - static const struct address_space_operations none_address_operations; 85 83 static const struct file_operations none_file_operations; 86 84 87 85 /** ··· 130 130 } 131 131 132 132 /* Re-define all operations to be "nothing" */ 133 - inode->i_mapping->a_ops = &none_address_operations; 133 + inode->i_mapping->a_ops = &empty_aops; 134 134 inode->i_op = &none_inode_operations; 135 135 inode->i_fop = &none_file_operations; 136 136
-1
fs/ufs/truncate.c
··· 479 479 break; 480 480 if (IS_SYNC(inode) && (inode->i_state & I_DIRTY)) 481 481 ufs_sync_inode (inode); 482 - blk_flush_plug(current); 483 482 yield(); 484 483 } 485 484
+2
include/linux/blkdev.h
··· 1206 1206 struct kobject kobj; 1207 1207 }; 1208 1208 1209 + extern bool blk_integrity_is_initialized(struct gendisk *); 1209 1210 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1210 1211 extern void blk_integrity_unregister(struct gendisk *); 1211 1212 extern int blk_integrity_compare(struct gendisk *, struct gendisk *); ··· 1263 1262 #define queue_max_integrity_segments(a) (0) 1264 1263 #define blk_integrity_merge_rq(a, b, c) (0) 1265 1264 #define blk_integrity_merge_bio(a, b, c) (0) 1265 + #define blk_integrity_is_initialized(a) (0) 1266 1266 1267 1267 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 1268 1268
-1
include/linux/elevator.h
··· 101 101 extern void elv_dispatch_add_tail(struct request_queue *, struct request *); 102 102 extern void elv_add_request(struct request_queue *, struct request *, int); 103 103 extern void __elv_add_request(struct request_queue *, struct request *, int); 104 - extern void elv_insert(struct request_queue *, struct request *, int); 105 104 extern int elv_merge(struct request_queue *, struct request **, struct bio *); 106 105 extern int elv_try_merge(struct request *, struct bio *); 107 106 extern void elv_merge_requests(struct request_queue *, struct request *,
+2
include/linux/fs.h
··· 613 613 int (*error_remove_page)(struct address_space *, struct page *); 614 614 }; 615 615 616 + extern const struct address_space_operations empty_aops; 617 + 616 618 /* 617 619 * pagecache_write_begin/pagecache_write_end must be used by general code 618 620 * to write into the pagecache.