Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-4.14/block' of git://git.kernel.dk/linux-block

Pull block layer updates from Jens Axboe:
"This is the first pull request for 4.14, containing most of the code
changes. It's a quiet series this round, which I think we needed after
the churn of the last few series. This contains:

- Fix for a registration race in loop, from Anton Volkov.

- Overflow complaint fix from Arnd for DAC960.

- Series of drbd changes from the usual suspects.

- Conversion of the stec/skd driver to blk-mq. From Bart.

- A few BFQ improvements/fixes from Paolo.

- CFQ improvement from Ritesh, allowing idling for group idle.

- A few fixes found by Dan's smatch, courtesy of Dan.

- A warning fixup for a race between changing the IO scheduler and
device remova. From David Jeffery.

- A few nbd fixes from Josef.

- Support for cgroup info in blktrace, from Shaohua.

- Also from Shaohua, new features in the null_blk driver to allow it
to actually hold data, among other things.

- Various corner cases and error handling fixes from Weiping Zhang.

- Improvements to the IO stats tracking for blk-mq from me. Can
drastically improve performance for fast devices and/or big
machines.

- Series from Christoph removing bi_bdev as being needed for IO
submission, in preparation for nvme multipathing code.

- Series from Bart, including various cleanups and fixes for switch
fall through case complaints"

* 'for-4.14/block' of git://git.kernel.dk/linux-block: (162 commits)
kernfs: checking for IS_ERR() instead of NULL
drbd: remove BIOSET_NEED_RESCUER flag from drbd_{md_,}io_bio_set
drbd: Fix allyesconfig build, fix recent commit
drbd: switch from kmalloc() to kmalloc_array()
drbd: abort drbd_start_resync if there is no connection
drbd: move global variables to drbd namespace and make some static
drbd: rename "usermode_helper" to "drbd_usermode_helper"
drbd: fix race between handshake and admin disconnect/down
drbd: fix potential deadlock when trying to detach during handshake
drbd: A single dot should be put into a sequence.
drbd: fix rmmod cleanup, remove _all_ debugfs entries
drbd: Use setup_timer() instead of init_timer() to simplify the code.
drbd: fix potential get_ldev/put_ldev refcount imbalance during attach
drbd: new disk-option disable-write-same
drbd: Fix resource role for newly created resources in events2
drbd: mark symbols static where possible
drbd: Send P_NEG_ACK upon write error in protocol != C
drbd: add explicit plugging when submitting batches
drbd: change list_for_each_safe to while(list_first_entry_or_null)
drbd: introduce drbd_recv_header_maybe_unplug
...

+3721 -3523
+6
MAINTAINERS
··· 12561 12561 S: Odd Fixes 12562 12562 F: drivers/net/ethernet/adaptec/starfire* 12563 12563 12564 + STEC S1220 SKD DRIVER 12565 + M: Bart Van Assche <bart.vanassche@wdc.com> 12566 + L: linux-block@vger.kernel.org 12567 + S: Maintained 12568 + F: drivers/block/skd*[ch] 12569 + 12564 12570 STI CEC DRIVER 12565 12571 M: Benjamin Gaignard <benjamin.gaignard@linaro.org> 12566 12572 S: Maintained
+1 -1
arch/powerpc/sysdev/axonram.c
··· 110 110 static blk_qc_t 111 111 axon_ram_make_request(struct request_queue *queue, struct bio *bio) 112 112 { 113 - struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; 113 + struct axon_ram_bank *bank = bio->bi_disk->private_data; 114 114 unsigned long phys_mem, phys_end; 115 115 void *user_mem; 116 116 struct bio_vec vec;
+100 -82
block/bfq-iosched.c
··· 128 128 BFQ_BFQQ_FNS(wait_request); 129 129 BFQ_BFQQ_FNS(non_blocking_wait_rq); 130 130 BFQ_BFQQ_FNS(fifo_expire); 131 - BFQ_BFQQ_FNS(idle_window); 131 + BFQ_BFQQ_FNS(has_short_ttime); 132 132 BFQ_BFQQ_FNS(sync); 133 133 BFQ_BFQQ_FNS(IO_bound); 134 134 BFQ_BFQQ_FNS(in_large_burst); ··· 731 731 unsigned int old_wr_coeff = bfqq->wr_coeff; 732 732 bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); 733 733 734 - if (bic->saved_idle_window) 735 - bfq_mark_bfqq_idle_window(bfqq); 734 + if (bic->saved_has_short_ttime) 735 + bfq_mark_bfqq_has_short_ttime(bfqq); 736 736 else 737 - bfq_clear_bfqq_idle_window(bfqq); 737 + bfq_clear_bfqq_has_short_ttime(bfqq); 738 738 739 739 if (bic->saved_IO_bound) 740 740 bfq_mark_bfqq_IO_bound(bfqq); ··· 2012 2012 return; 2013 2013 2014 2014 bic->saved_ttime = bfqq->ttime; 2015 - bic->saved_idle_window = bfq_bfqq_idle_window(bfqq); 2015 + bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); 2016 2016 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); 2017 2017 bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); 2018 2018 bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); ··· 3038 3038 } 3039 3039 3040 3040 bfq_log_bfqq(bfqd, bfqq, 3041 - "expire (%d, slow %d, num_disp %d, idle_win %d)", reason, 3042 - slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq)); 3041 + "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason, 3042 + slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq)); 3043 3043 3044 3044 /* 3045 3045 * Increase, decrease or leave budget unchanged according to ··· 3114 3114 static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) 3115 3115 { 3116 3116 struct bfq_data *bfqd = bfqq->bfqd; 3117 - bool idling_boosts_thr, idling_boosts_thr_without_issues, 3117 + bool rot_without_queueing = 3118 + !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, 3119 + bfqq_sequential_and_IO_bound, 3120 + idling_boosts_thr, idling_boosts_thr_without_issues, 3118 3121 idling_needed_for_service_guarantees, 3119 3122 asymmetric_scenario; 3120 3123 ··· 3125 3122 return true; 3126 3123 3127 3124 /* 3125 + * Idling is performed only if slice_idle > 0. In addition, we 3126 + * do not idle if 3127 + * (a) bfqq is async 3128 + * (b) bfqq is in the idle io prio class: in this case we do 3129 + * not idle because we want to minimize the bandwidth that 3130 + * queues in this class can steal to higher-priority queues 3131 + */ 3132 + if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || 3133 + bfq_class_idle(bfqq)) 3134 + return false; 3135 + 3136 + bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && 3137 + bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); 3138 + 3139 + /* 3128 3140 * The next variable takes into account the cases where idling 3129 3141 * boosts the throughput. 3130 3142 * 3131 3143 * The value of the variable is computed considering, first, that 3132 3144 * idling is virtually always beneficial for the throughput if: 3133 - * (a) the device is not NCQ-capable, or 3134 - * (b) regardless of the presence of NCQ, the device is rotational 3135 - * and the request pattern for bfqq is I/O-bound and sequential. 3145 + * (a) the device is not NCQ-capable and rotational, or 3146 + * (b) regardless of the presence of NCQ, the device is rotational and 3147 + * the request pattern for bfqq is I/O-bound and sequential, or 3148 + * (c) regardless of whether it is rotational, the device is 3149 + * not NCQ-capable and the request pattern for bfqq is 3150 + * I/O-bound and sequential. 3136 3151 * 3137 3152 * Secondly, and in contrast to the above item (b), idling an 3138 3153 * NCQ-capable flash-based device would not boost the 3139 3154 * throughput even with sequential I/O; rather it would lower 3140 3155 * the throughput in proportion to how fast the device 3141 3156 * is. Accordingly, the next variable is true if any of the 3142 - * above conditions (a) and (b) is true, and, in particular, 3143 - * happens to be false if bfqd is an NCQ-capable flash-based 3144 - * device. 3157 + * above conditions (a), (b) or (c) is true, and, in 3158 + * particular, happens to be false if bfqd is an NCQ-capable 3159 + * flash-based device. 3145 3160 */ 3146 - idling_boosts_thr = !bfqd->hw_tag || 3147 - (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) && 3148 - bfq_bfqq_idle_window(bfqq)); 3161 + idling_boosts_thr = rot_without_queueing || 3162 + ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && 3163 + bfqq_sequential_and_IO_bound); 3149 3164 3150 3165 /* 3151 3166 * The value of the next variable, ··· 3334 3313 asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); 3335 3314 3336 3315 /* 3337 - * We have now all the components we need to compute the return 3338 - * value of the function, which is true only if both the following 3339 - * conditions hold: 3340 - * 1) bfqq is sync, because idling make sense only for sync queues; 3341 - * 2) idling either boosts the throughput (without issues), or 3342 - * is necessary to preserve service guarantees. 3316 + * We have now all the components we need to compute the 3317 + * return value of the function, which is true only if idling 3318 + * either boosts the throughput (without issues), or is 3319 + * necessary to preserve service guarantees. 3343 3320 */ 3344 - return bfq_bfqq_sync(bfqq) && 3345 - (idling_boosts_thr_without_issues || 3346 - idling_needed_for_service_guarantees); 3321 + return idling_boosts_thr_without_issues || 3322 + idling_needed_for_service_guarantees; 3347 3323 } 3348 3324 3349 3325 /* ··· 3356 3338 */ 3357 3339 static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) 3358 3340 { 3359 - struct bfq_data *bfqd = bfqq->bfqd; 3360 - 3361 - return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 && 3362 - bfq_bfqq_may_idle(bfqq); 3341 + return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq); 3363 3342 } 3364 3343 3365 3344 /* ··· 3798 3783 case IOPRIO_CLASS_IDLE: 3799 3784 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; 3800 3785 bfqq->new_ioprio = 7; 3801 - bfq_clear_bfqq_idle_window(bfqq); 3802 3786 break; 3803 3787 } 3804 3788 ··· 3857 3843 bfq_set_next_ioprio_data(bfqq, bic); 3858 3844 3859 3845 if (is_sync) { 3846 + /* 3847 + * No need to mark as has_short_ttime if in 3848 + * idle_class, because no device idling is performed 3849 + * for queues in idle class 3850 + */ 3860 3851 if (!bfq_class_idle(bfqq)) 3861 - bfq_mark_bfqq_idle_window(bfqq); 3852 + /* tentatively mark as has_short_ttime */ 3853 + bfq_mark_bfqq_has_short_ttime(bfqq); 3862 3854 bfq_mark_bfqq_sync(bfqq); 3863 3855 bfq_mark_bfqq_just_created(bfqq); 3864 3856 } else ··· 4005 3985 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); 4006 3986 } 4007 3987 4008 - /* 4009 - * Disable idle window if the process thinks too long or seeks so much that 4010 - * it doesn't matter. 4011 - */ 4012 - static void bfq_update_idle_window(struct bfq_data *bfqd, 4013 - struct bfq_queue *bfqq, 4014 - struct bfq_io_cq *bic) 3988 + static void bfq_update_has_short_ttime(struct bfq_data *bfqd, 3989 + struct bfq_queue *bfqq, 3990 + struct bfq_io_cq *bic) 4015 3991 { 4016 - int enable_idle; 3992 + bool has_short_ttime = true; 4017 3993 4018 - /* Don't idle for async or idle io prio class. */ 4019 - if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) 3994 + /* 3995 + * No need to update has_short_ttime if bfqq is async or in 3996 + * idle io prio class, or if bfq_slice_idle is zero, because 3997 + * no device idling is performed for bfqq in this case. 3998 + */ 3999 + if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || 4000 + bfqd->bfq_slice_idle == 0) 4020 4001 return; 4021 4002 4022 4003 /* Idle window just restored, statistics are meaningless. */ ··· 4025 4004 bfqd->bfq_wr_min_idle_time)) 4026 4005 return; 4027 4006 4028 - enable_idle = bfq_bfqq_idle_window(bfqq); 4029 - 4007 + /* Think time is infinite if no process is linked to 4008 + * bfqq. Otherwise check average think time to 4009 + * decide whether to mark as has_short_ttime 4010 + */ 4030 4011 if (atomic_read(&bic->icq.ioc->active_ref) == 0 || 4031 - bfqd->bfq_slice_idle == 0 || 4032 - (bfqd->hw_tag && BFQQ_SEEKY(bfqq) && 4033 - bfqq->wr_coeff == 1)) 4034 - enable_idle = 0; 4035 - else if (bfq_sample_valid(bfqq->ttime.ttime_samples)) { 4036 - if (bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle && 4037 - bfqq->wr_coeff == 1) 4038 - enable_idle = 0; 4039 - else 4040 - enable_idle = 1; 4041 - } 4042 - bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d", 4043 - enable_idle); 4012 + (bfq_sample_valid(bfqq->ttime.ttime_samples) && 4013 + bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) 4014 + has_short_ttime = false; 4044 4015 4045 - if (enable_idle) 4046 - bfq_mark_bfqq_idle_window(bfqq); 4016 + bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", 4017 + has_short_ttime); 4018 + 4019 + if (has_short_ttime) 4020 + bfq_mark_bfqq_has_short_ttime(bfqq); 4047 4021 else 4048 - bfq_clear_bfqq_idle_window(bfqq); 4022 + bfq_clear_bfqq_has_short_ttime(bfqq); 4049 4023 } 4050 4024 4051 4025 /* ··· 4056 4040 bfqq->meta_pending++; 4057 4041 4058 4042 bfq_update_io_thinktime(bfqd, bfqq); 4043 + bfq_update_has_short_ttime(bfqd, bfqq, bic); 4059 4044 bfq_update_io_seektime(bfqd, bfqq, rq); 4060 - if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || 4061 - !BFQQ_SEEKY(bfqq)) 4062 - bfq_update_idle_window(bfqd, bfqq, bic); 4063 4045 4064 4046 bfq_log_bfqq(bfqd, bfqq, 4065 - "rq_enqueued: idle_window=%d (seeky %d)", 4066 - bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq)); 4047 + "rq_enqueued: has_short_ttime=%d (seeky %d)", 4048 + bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); 4067 4049 4068 4050 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 4069 4051 ··· 4801 4787 return sprintf(page, "%u\n", var); 4802 4788 } 4803 4789 4804 - static ssize_t bfq_var_store(unsigned long *var, const char *page, 4805 - size_t count) 4790 + static void bfq_var_store(unsigned long *var, const char *page) 4806 4791 { 4807 4792 unsigned long new_val; 4808 4793 int ret = kstrtoul(page, 10, &new_val); 4809 4794 4810 4795 if (ret == 0) 4811 4796 *var = new_val; 4812 - 4813 - return count; 4814 4797 } 4815 4798 4816 4799 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ ··· 4849 4838 { \ 4850 4839 struct bfq_data *bfqd = e->elevator_data; \ 4851 4840 unsigned long uninitialized_var(__data); \ 4852 - int ret = bfq_var_store(&__data, (page), count); \ 4841 + bfq_var_store(&__data, (page)); \ 4853 4842 if (__data < (MIN)) \ 4854 4843 __data = (MIN); \ 4855 4844 else if (__data > (MAX)) \ ··· 4860 4849 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ 4861 4850 else \ 4862 4851 *(__PTR) = __data; \ 4863 - return ret; \ 4852 + return count; \ 4864 4853 } 4865 4854 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, 4866 4855 INT_MAX, 2); ··· 4877 4866 { \ 4878 4867 struct bfq_data *bfqd = e->elevator_data; \ 4879 4868 unsigned long uninitialized_var(__data); \ 4880 - int ret = bfq_var_store(&__data, (page), count); \ 4869 + bfq_var_store(&__data, (page)); \ 4881 4870 if (__data < (MIN)) \ 4882 4871 __data = (MIN); \ 4883 4872 else if (__data > (MAX)) \ 4884 4873 __data = (MAX); \ 4885 4874 *(__PTR) = (u64)__data * NSEC_PER_USEC; \ 4886 - return ret; \ 4875 + return count; \ 4887 4876 } 4888 4877 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, 4889 4878 UINT_MAX); ··· 4894 4883 { 4895 4884 struct bfq_data *bfqd = e->elevator_data; 4896 4885 unsigned long uninitialized_var(__data); 4897 - int ret = bfq_var_store(&__data, (page), count); 4886 + 4887 + bfq_var_store(&__data, (page)); 4898 4888 4899 4889 if (__data == 0) 4900 4890 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); ··· 4907 4895 4908 4896 bfqd->bfq_user_max_budget = __data; 4909 4897 4910 - return ret; 4898 + return count; 4911 4899 } 4912 4900 4913 4901 /* ··· 4919 4907 { 4920 4908 struct bfq_data *bfqd = e->elevator_data; 4921 4909 unsigned long uninitialized_var(__data); 4922 - int ret = bfq_var_store(&__data, (page), count); 4910 + 4911 + bfq_var_store(&__data, (page)); 4923 4912 4924 4913 if (__data < 1) 4925 4914 __data = 1; ··· 4931 4918 if (bfqd->bfq_user_max_budget == 0) 4932 4919 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); 4933 4920 4934 - return ret; 4921 + return count; 4935 4922 } 4936 4923 4937 4924 static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, ··· 4939 4926 { 4940 4927 struct bfq_data *bfqd = e->elevator_data; 4941 4928 unsigned long uninitialized_var(__data); 4942 - int ret = bfq_var_store(&__data, (page), count); 4929 + 4930 + bfq_var_store(&__data, (page)); 4943 4931 4944 4932 if (__data > 1) 4945 4933 __data = 1; ··· 4950 4936 4951 4937 bfqd->strict_guarantees = __data; 4952 4938 4953 - return ret; 4939 + return count; 4954 4940 } 4955 4941 4956 4942 static ssize_t bfq_low_latency_store(struct elevator_queue *e, ··· 4958 4944 { 4959 4945 struct bfq_data *bfqd = e->elevator_data; 4960 4946 unsigned long uninitialized_var(__data); 4961 - int ret = bfq_var_store(&__data, (page), count); 4947 + 4948 + bfq_var_store(&__data, (page)); 4962 4949 4963 4950 if (__data > 1) 4964 4951 __data = 1; ··· 4967 4952 bfq_end_wr(bfqd); 4968 4953 bfqd->low_latency = __data; 4969 4954 4970 - return ret; 4955 + return count; 4971 4956 } 4972 4957 4973 4958 #define BFQ_ATTR(name) \ ··· 5013 4998 .elevator_name = "bfq", 5014 4999 .elevator_owner = THIS_MODULE, 5015 5000 }; 5001 + MODULE_ALIAS("bfq-iosched"); 5016 5002 5017 5003 static int __init bfq_init(void) 5018 5004 { ··· 5064 5048 5065 5049 ret = elv_register(&iosched_bfq_mq); 5066 5050 if (ret) 5067 - goto err_pol_unreg; 5051 + goto slab_kill; 5068 5052 5069 5053 return 0; 5070 5054 5055 + slab_kill: 5056 + bfq_slab_kill(); 5071 5057 err_pol_unreg: 5072 5058 #ifdef CONFIG_BFQ_GROUP_IOSCHED 5073 5059 blkcg_policy_unregister(&blkcg_policy_bfq);
+14 -11
block/bfq-iosched.h
··· 360 360 uint64_t blkcg_serial_nr; /* the current blkcg serial */ 361 361 #endif 362 362 /* 363 - * Snapshot of the idle window before merging; taken to 364 - * remember this value while the queue is merged, so as to be 365 - * able to restore it in case of split. 363 + * Snapshot of the has_short_time flag before merging; taken 364 + * to remember its value while the queue is merged, so as to 365 + * be able to restore it in case of split. 366 366 */ 367 - bool saved_idle_window; 367 + bool saved_has_short_ttime; 368 368 /* 369 369 * Same purpose as the previous two fields for the I/O bound 370 370 * classification of a queue. ··· 638 638 * without idling the device 639 639 */ 640 640 BFQQF_fifo_expire, /* FIFO checked in this slice */ 641 - BFQQF_idle_window, /* slice idling enabled */ 641 + BFQQF_has_short_ttime, /* queue has a short think time */ 642 642 BFQQF_sync, /* synchronous queue */ 643 643 BFQQF_IO_bound, /* 644 644 * bfqq has timed-out at least once ··· 667 667 BFQ_BFQQ_FNS(wait_request); 668 668 BFQ_BFQQ_FNS(non_blocking_wait_rq); 669 669 BFQ_BFQQ_FNS(fifo_expire); 670 - BFQ_BFQQ_FNS(idle_window); 670 + BFQ_BFQQ_FNS(has_short_ttime); 671 671 BFQ_BFQQ_FNS(sync); 672 672 BFQ_BFQQ_FNS(IO_bound); 673 673 BFQ_BFQQ_FNS(in_large_burst); ··· 929 929 struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 930 930 931 931 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ 932 - blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\ 933 - bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ 934 - bfqq_group(bfqq)->blkg_path, ##args); \ 932 + blk_add_cgroup_trace_msg((bfqd)->queue, \ 933 + bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \ 934 + "bfq%d%c " fmt, (bfqq)->pid, \ 935 + bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args); \ 935 936 } while (0) 936 937 937 - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \ 938 - blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args) 938 + #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ 939 + blk_add_cgroup_trace_msg((bfqd)->queue, \ 940 + bfqg_to_blkg(bfqg)->blkcg, fmt, ##args); \ 941 + } while (0) 939 942 940 943 #else /* CONFIG_BFQ_GROUP_IOSCHED */ 941 944
+12 -14
block/bio-integrity.c
··· 146 146 iv = bip->bip_vec + bip->bip_vcnt; 147 147 148 148 if (bip->bip_vcnt && 149 - bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), 149 + bvec_gap_to_prev(bio->bi_disk->queue, 150 150 &bip->bip_vec[bip->bip_vcnt - 1], offset)) 151 151 return 0; 152 152 ··· 190 190 static blk_status_t bio_integrity_process(struct bio *bio, 191 191 struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn) 192 192 { 193 - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 193 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 194 194 struct blk_integrity_iter iter; 195 195 struct bvec_iter bviter; 196 196 struct bio_vec bv; ··· 199 199 void *prot_buf = page_address(bip->bip_vec->bv_page) + 200 200 bip->bip_vec->bv_offset; 201 201 202 - iter.disk_name = bio->bi_bdev->bd_disk->disk_name; 202 + iter.disk_name = bio->bi_disk->disk_name; 203 203 iter.interval = 1 << bi->interval_exp; 204 204 iter.seed = proc_iter->bi_sector; 205 205 iter.prot_buf = prot_buf; ··· 236 236 bool bio_integrity_prep(struct bio *bio) 237 237 { 238 238 struct bio_integrity_payload *bip; 239 - struct blk_integrity *bi; 240 - struct request_queue *q; 239 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 240 + struct request_queue *q = bio->bi_disk->queue; 241 241 void *buf; 242 242 unsigned long start, end; 243 243 unsigned int len, nr_pages; ··· 245 245 unsigned int intervals; 246 246 blk_status_t status; 247 247 248 - bi = bdev_get_integrity(bio->bi_bdev); 249 - q = bdev_get_queue(bio->bi_bdev); 248 + if (!bi) 249 + return true; 250 + 250 251 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) 251 252 return true; 252 253 ··· 256 255 257 256 /* Already protected? */ 258 257 if (bio_integrity(bio)) 259 - return true; 260 - 261 - if (bi == NULL) 262 258 return true; 263 259 264 260 if (bio_data_dir(bio) == READ) { ··· 352 354 struct bio_integrity_payload *bip = 353 355 container_of(work, struct bio_integrity_payload, bip_work); 354 356 struct bio *bio = bip->bip_bio; 355 - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 357 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 356 358 struct bvec_iter iter = bio->bi_iter; 357 359 358 360 /* ··· 385 387 */ 386 388 bool __bio_integrity_endio(struct bio *bio) 387 389 { 388 - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 390 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 389 391 struct bio_integrity_payload *bip = bio_integrity(bio); 390 392 391 393 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && ··· 411 413 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) 412 414 { 413 415 struct bio_integrity_payload *bip = bio_integrity(bio); 414 - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 416 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 415 417 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); 416 418 417 419 bip->bip_iter.bi_sector += bytes_done >> 9; ··· 428 430 void bio_integrity_trim(struct bio *bio) 429 431 { 430 432 struct bio_integrity_payload *bip = bio_integrity(bio); 431 - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 433 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); 432 434 433 435 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); 434 436 }
+17 -13
block/bio.c
··· 593 593 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 594 594 595 595 /* 596 - * most users will be overriding ->bi_bdev with a new target, 596 + * most users will be overriding ->bi_disk with a new target, 597 597 * so we don't set nor calculate new physical/hw segment counts here 598 598 */ 599 - bio->bi_bdev = bio_src->bi_bdev; 599 + bio->bi_disk = bio_src->bi_disk; 600 600 bio_set_flag(bio, BIO_CLONED); 601 601 bio->bi_opf = bio_src->bi_opf; 602 602 bio->bi_write_hint = bio_src->bi_write_hint; ··· 681 681 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); 682 682 if (!bio) 683 683 return NULL; 684 - bio->bi_bdev = bio_src->bi_bdev; 684 + bio->bi_disk = bio_src->bi_disk; 685 685 bio->bi_opf = bio_src->bi_opf; 686 686 bio->bi_write_hint = bio_src->bi_write_hint; 687 687 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; ··· 936 936 * 937 937 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 938 938 * bio_endio() on failure. 939 + * 940 + * WARNING: Unlike to how submit_bio() is usually used, this function does not 941 + * result in bio reference to be consumed. The caller must drop the reference 942 + * on his own. 939 943 */ 940 944 int submit_bio_wait(struct bio *bio) 941 945 { ··· 1736 1732 } 1737 1733 } 1738 1734 1739 - void generic_start_io_acct(int rw, unsigned long sectors, 1740 - struct hd_struct *part) 1735 + void generic_start_io_acct(struct request_queue *q, int rw, 1736 + unsigned long sectors, struct hd_struct *part) 1741 1737 { 1742 1738 int cpu = part_stat_lock(); 1743 1739 1744 - part_round_stats(cpu, part); 1740 + part_round_stats(q, cpu, part); 1745 1741 part_stat_inc(cpu, part, ios[rw]); 1746 1742 part_stat_add(cpu, part, sectors[rw], sectors); 1747 - part_inc_in_flight(part, rw); 1743 + part_inc_in_flight(q, part, rw); 1748 1744 1749 1745 part_stat_unlock(); 1750 1746 } 1751 1747 EXPORT_SYMBOL(generic_start_io_acct); 1752 1748 1753 - void generic_end_io_acct(int rw, struct hd_struct *part, 1754 - unsigned long start_time) 1749 + void generic_end_io_acct(struct request_queue *q, int rw, 1750 + struct hd_struct *part, unsigned long start_time) 1755 1751 { 1756 1752 unsigned long duration = jiffies - start_time; 1757 1753 int cpu = part_stat_lock(); 1758 1754 1759 1755 part_stat_add(cpu, part, ticks[rw], duration); 1760 - part_round_stats(cpu, part); 1761 - part_dec_in_flight(part, rw); 1756 + part_round_stats(q, cpu, part); 1757 + part_dec_in_flight(q, part, rw); 1762 1758 1763 1759 part_stat_unlock(); 1764 1760 } ··· 1830 1826 goto again; 1831 1827 } 1832 1828 1833 - if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1834 - trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio, 1829 + if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) { 1830 + trace_block_bio_complete(bio->bi_disk->queue, bio, 1835 1831 blk_status_to_errno(bio->bi_status)); 1836 1832 bio_clear_flag(bio, BIO_TRACE_COMPLETION); 1837 1833 }
+5 -3
block/blk-cgroup.c
··· 1067 1067 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 1068 1068 if (!blkcg) { 1069 1069 ret = ERR_PTR(-ENOMEM); 1070 - goto free_blkcg; 1070 + goto unlock; 1071 1071 } 1072 1072 } 1073 1073 ··· 1111 1111 for (i--; i >= 0; i--) 1112 1112 if (blkcg->cpd[i]) 1113 1113 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); 1114 - free_blkcg: 1115 - kfree(blkcg); 1114 + 1115 + if (blkcg != &blkcg_root) 1116 + kfree(blkcg); 1117 + unlock: 1116 1118 mutex_unlock(&blkcg_pol_mutex); 1117 1119 return ret; 1118 1120 }
+81 -72
block/blk-core.c
··· 280 280 void blk_start_queue(struct request_queue *q) 281 281 { 282 282 lockdep_assert_held(q->queue_lock); 283 - WARN_ON(!irqs_disabled()); 283 + WARN_ON(!in_interrupt() && !irqs_disabled()); 284 284 WARN_ON_ONCE(q->mq_ops); 285 285 286 286 queue_flag_clear(QUEUE_FLAG_STOPPED, q); ··· 1469 1469 __elv_add_request(q, rq, where); 1470 1470 } 1471 1471 1472 - static void part_round_stats_single(int cpu, struct hd_struct *part, 1473 - unsigned long now) 1472 + static void part_round_stats_single(struct request_queue *q, int cpu, 1473 + struct hd_struct *part, unsigned long now, 1474 + unsigned int inflight) 1474 1475 { 1475 - int inflight; 1476 - 1477 - if (now == part->stamp) 1478 - return; 1479 - 1480 - inflight = part_in_flight(part); 1481 1476 if (inflight) { 1482 1477 __part_stat_add(cpu, part, time_in_queue, 1483 1478 inflight * (now - part->stamp)); ··· 1483 1488 1484 1489 /** 1485 1490 * part_round_stats() - Round off the performance stats on a struct disk_stats. 1491 + * @q: target block queue 1486 1492 * @cpu: cpu number for stats access 1487 1493 * @part: target partition 1488 1494 * ··· 1498 1502 * /proc/diskstats. This accounts immediately for all queue usage up to 1499 1503 * the current jiffies and restarts the counters again. 1500 1504 */ 1501 - void part_round_stats(int cpu, struct hd_struct *part) 1505 + void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part) 1502 1506 { 1507 + struct hd_struct *part2 = NULL; 1503 1508 unsigned long now = jiffies; 1509 + unsigned int inflight[2]; 1510 + int stats = 0; 1504 1511 1505 - if (part->partno) 1506 - part_round_stats_single(cpu, &part_to_disk(part)->part0, now); 1507 - part_round_stats_single(cpu, part, now); 1512 + if (part->stamp != now) 1513 + stats |= 1; 1514 + 1515 + if (part->partno) { 1516 + part2 = &part_to_disk(part)->part0; 1517 + if (part2->stamp != now) 1518 + stats |= 2; 1519 + } 1520 + 1521 + if (!stats) 1522 + return; 1523 + 1524 + part_in_flight(q, part, inflight); 1525 + 1526 + if (stats & 2) 1527 + part_round_stats_single(q, cpu, part2, now, inflight[1]); 1528 + if (stats & 1) 1529 + part_round_stats_single(q, cpu, part, now, inflight[0]); 1508 1530 } 1509 1531 EXPORT_SYMBOL_GPL(part_round_stats); 1510 1532 ··· 1910 1896 return BLK_QC_T_NONE; 1911 1897 } 1912 1898 1913 - /* 1914 - * If bio->bi_dev is a partition, remap the location 1915 - */ 1916 - static inline void blk_partition_remap(struct bio *bio) 1917 - { 1918 - struct block_device *bdev = bio->bi_bdev; 1919 - 1920 - /* 1921 - * Zone reset does not include bi_size so bio_sectors() is always 0. 1922 - * Include a test for the reset op code and perform the remap if needed. 1923 - */ 1924 - if (bdev != bdev->bd_contains && 1925 - (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) { 1926 - struct hd_struct *p = bdev->bd_part; 1927 - 1928 - bio->bi_iter.bi_sector += p->start_sect; 1929 - bio->bi_bdev = bdev->bd_contains; 1930 - 1931 - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1932 - bdev->bd_dev, 1933 - bio->bi_iter.bi_sector - p->start_sect); 1934 - } 1935 - } 1936 - 1937 1899 static void handle_bad_sector(struct bio *bio) 1938 1900 { 1939 1901 char b[BDEVNAME_SIZE]; 1940 1902 1941 1903 printk(KERN_INFO "attempt to access beyond end of device\n"); 1942 1904 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", 1943 - bdevname(bio->bi_bdev, b), 1944 - bio->bi_opf, 1905 + bio_devname(bio, b), bio->bi_opf, 1945 1906 (unsigned long long)bio_end_sector(bio), 1946 - (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); 1907 + (long long)get_capacity(bio->bi_disk)); 1947 1908 } 1948 1909 1949 1910 #ifdef CONFIG_FAIL_MAKE_REQUEST ··· 1957 1968 #endif /* CONFIG_FAIL_MAKE_REQUEST */ 1958 1969 1959 1970 /* 1971 + * Remap block n of partition p to block n+start(p) of the disk. 1972 + */ 1973 + static inline int blk_partition_remap(struct bio *bio) 1974 + { 1975 + struct hd_struct *p; 1976 + int ret = 0; 1977 + 1978 + /* 1979 + * Zone reset does not include bi_size so bio_sectors() is always 0. 1980 + * Include a test for the reset op code and perform the remap if needed. 1981 + */ 1982 + if (!bio->bi_partno || 1983 + (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET)) 1984 + return 0; 1985 + 1986 + rcu_read_lock(); 1987 + p = __disk_get_part(bio->bi_disk, bio->bi_partno); 1988 + if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) { 1989 + bio->bi_iter.bi_sector += p->start_sect; 1990 + bio->bi_partno = 0; 1991 + trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), 1992 + bio->bi_iter.bi_sector - p->start_sect); 1993 + } else { 1994 + printk("%s: fail for partition %d\n", __func__, bio->bi_partno); 1995 + ret = -EIO; 1996 + } 1997 + rcu_read_unlock(); 1998 + 1999 + return ret; 2000 + } 2001 + 2002 + /* 1960 2003 * Check whether this bio extends beyond the end of the device. 1961 2004 */ 1962 2005 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) ··· 1999 1978 return 0; 2000 1979 2001 1980 /* Test device or partition size, when known. */ 2002 - maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1981 + maxsector = get_capacity(bio->bi_disk); 2003 1982 if (maxsector) { 2004 1983 sector_t sector = bio->bi_iter.bi_sector; 2005 1984 ··· 2024 2003 int nr_sectors = bio_sectors(bio); 2025 2004 blk_status_t status = BLK_STS_IOERR; 2026 2005 char b[BDEVNAME_SIZE]; 2027 - struct hd_struct *part; 2028 2006 2029 2007 might_sleep(); 2030 2008 2031 2009 if (bio_check_eod(bio, nr_sectors)) 2032 2010 goto end_io; 2033 2011 2034 - q = bdev_get_queue(bio->bi_bdev); 2012 + q = bio->bi_disk->queue; 2035 2013 if (unlikely(!q)) { 2036 2014 printk(KERN_ERR 2037 2015 "generic_make_request: Trying to access " 2038 2016 "nonexistent block-device %s (%Lu)\n", 2039 - bdevname(bio->bi_bdev, b), 2040 - (long long) bio->bi_iter.bi_sector); 2017 + bio_devname(bio, b), (long long)bio->bi_iter.bi_sector); 2041 2018 goto end_io; 2042 2019 } 2043 2020 ··· 2047 2028 if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) 2048 2029 goto not_supported; 2049 2030 2050 - part = bio->bi_bdev->bd_part; 2051 - if (should_fail_request(part, bio->bi_iter.bi_size) || 2052 - should_fail_request(&part_to_disk(part)->part0, 2053 - bio->bi_iter.bi_size)) 2031 + if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) 2054 2032 goto end_io; 2055 2033 2056 - /* 2057 - * If this device has partitions, remap block n 2058 - * of partition p to block n+start(p) of the disk. 2059 - */ 2060 - blk_partition_remap(bio); 2034 + if (blk_partition_remap(bio)) 2035 + goto end_io; 2061 2036 2062 2037 if (bio_check_eod(bio, nr_sectors)) 2063 2038 goto end_io; ··· 2080 2067 goto not_supported; 2081 2068 break; 2082 2069 case REQ_OP_WRITE_SAME: 2083 - if (!bdev_write_same(bio->bi_bdev)) 2070 + if (!q->limits.max_write_same_sectors) 2084 2071 goto not_supported; 2085 2072 break; 2086 2073 case REQ_OP_ZONE_REPORT: 2087 2074 case REQ_OP_ZONE_RESET: 2088 - if (!bdev_is_zoned(bio->bi_bdev)) 2075 + if (!blk_queue_is_zoned(q)) 2089 2076 goto not_supported; 2090 2077 break; 2091 2078 case REQ_OP_WRITE_ZEROES: 2092 - if (!bdev_write_zeroes_sectors(bio->bi_bdev)) 2079 + if (!q->limits.max_write_zeroes_sectors) 2093 2080 goto not_supported; 2094 2081 break; 2095 2082 default: ··· 2196 2183 bio_list_init(&bio_list_on_stack[0]); 2197 2184 current->bio_list = bio_list_on_stack; 2198 2185 do { 2199 - struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2186 + struct request_queue *q = bio->bi_disk->queue; 2200 2187 2201 2188 if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) { 2202 2189 struct bio_list lower, same; ··· 2214 2201 bio_list_init(&lower); 2215 2202 bio_list_init(&same); 2216 2203 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) 2217 - if (q == bdev_get_queue(bio->bi_bdev)) 2204 + if (q == bio->bi_disk->queue) 2218 2205 bio_list_add(&same, bio); 2219 2206 else 2220 2207 bio_list_add(&lower, bio); ··· 2257 2244 unsigned int count; 2258 2245 2259 2246 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) 2260 - count = bdev_logical_block_size(bio->bi_bdev) >> 9; 2247 + count = queue_logical_block_size(bio->bi_disk->queue); 2261 2248 else 2262 2249 count = bio_sectors(bio); 2263 2250 ··· 2274 2261 current->comm, task_pid_nr(current), 2275 2262 op_is_write(bio_op(bio)) ? "WRITE" : "READ", 2276 2263 (unsigned long long)bio->bi_iter.bi_sector, 2277 - bdevname(bio->bi_bdev, b), 2278 - count); 2264 + bio_devname(bio, b), count); 2279 2265 } 2280 2266 } 2281 2267 ··· 2443 2431 2444 2432 part_stat_inc(cpu, part, ios[rw]); 2445 2433 part_stat_add(cpu, part, ticks[rw], duration); 2446 - part_round_stats(cpu, part); 2447 - part_dec_in_flight(part, rw); 2434 + part_round_stats(req->q, cpu, part); 2435 + part_dec_in_flight(req->q, part, rw); 2448 2436 2449 2437 hd_struct_put(part); 2450 2438 part_stat_unlock(); ··· 2501 2489 part = &rq->rq_disk->part0; 2502 2490 hd_struct_get(part); 2503 2491 } 2504 - part_round_stats(cpu, part); 2505 - part_inc_in_flight(part, rw); 2492 + part_round_stats(rq->q, cpu, part); 2493 + part_inc_in_flight(rq->q, part, rw); 2506 2494 rq->part = part; 2507 2495 } 2508 2496 ··· 2615 2603 } 2616 2604 EXPORT_SYMBOL(blk_peek_request); 2617 2605 2618 - void blk_dequeue_request(struct request *rq) 2606 + static void blk_dequeue_request(struct request *rq) 2619 2607 { 2620 2608 struct request_queue *q = rq->q; 2621 2609 ··· 2642 2630 * Description: 2643 2631 * Dequeue @req and start timeout timer on it. This hands off the 2644 2632 * request to the driver. 2645 - * 2646 - * Block internal functions which don't want to start timer should 2647 - * call blk_dequeue_request(). 2648 2633 */ 2649 2634 void blk_start_request(struct request *req) 2650 2635 { ··· 3044 3035 rq->__data_len = bio->bi_iter.bi_size; 3045 3036 rq->bio = rq->biotail = bio; 3046 3037 3047 - if (bio->bi_bdev) 3048 - rq->rq_disk = bio->bi_bdev->bd_disk; 3038 + if (bio->bi_disk) 3039 + rq->rq_disk = bio->bi_disk; 3049 3040 } 3050 3041 3051 3042 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+13 -13
block/blk-flush.c
··· 1 1 /* 2 - * Functions to sequence FLUSH and FUA writes. 2 + * Functions to sequence PREFLUSH and FUA writes. 3 3 * 4 4 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics 5 5 * Copyright (C) 2011 Tejun Heo <tj@kernel.org> 6 6 * 7 7 * This file is released under the GPLv2. 8 8 * 9 - * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three 9 + * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three 10 10 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request 11 11 * properties and hardware capability. 12 12 * ··· 16 16 * REQ_FUA means that the data must be on non-volatile media on request 17 17 * completion. 18 18 * 19 - * If the device doesn't have writeback cache, FLUSH and FUA don't make any 20 - * difference. The requests are either completed immediately if there's no 21 - * data or executed as normal requests otherwise. 19 + * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any 20 + * difference. The requests are either completed immediately if there's no data 21 + * or executed as normal requests otherwise. 22 22 * 23 23 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is 24 24 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. ··· 31 31 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a 32 32 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush 33 33 * completes, all the requests which were pending are proceeded to the next 34 - * step. This allows arbitrary merging of different types of FLUSH/FUA 34 + * step. This allows arbitrary merging of different types of PREFLUSH/FUA 35 35 * requests. 36 36 * 37 37 * Currently, the following conditions are used to determine when to issue ··· 47 47 * C3. The second condition is ignored if there is a request which has 48 48 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid 49 49 * starvation in the unlikely case where there are continuous stream of 50 - * FUA (without FLUSH) requests. 50 + * FUA (without PREFLUSH) requests. 51 51 * 52 52 * For devices which support FUA, it isn't clear whether C2 (and thus C3) 53 53 * is beneficial. 54 54 * 55 - * Note that a sequenced FLUSH/FUA request with DATA is completed twice. 55 + * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice. 56 56 * Once while executing DATA and again after the whole sequence is 57 57 * complete. The first completion updates the contained bio but doesn't 58 58 * finish it so that the bio submitter is notified only after the whole 59 59 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in 60 60 * req_bio_endio(). 61 61 * 62 - * The above peculiarity requires that each FLUSH/FUA request has only one 62 + * The above peculiarity requires that each PREFLUSH/FUA request has only one 63 63 * bio attached to it, which is guaranteed as they aren't allowed to be 64 64 * merged in the usual way. 65 65 */ ··· 76 76 #include "blk-mq-tag.h" 77 77 #include "blk-mq-sched.h" 78 78 79 - /* FLUSH/FUA sequences */ 79 + /* PREFLUSH/FUA sequences */ 80 80 enum { 81 81 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ 82 82 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ ··· 148 148 149 149 /** 150 150 * blk_flush_complete_seq - complete flush sequence 151 - * @rq: FLUSH/FUA request being sequenced 151 + * @rq: PREFLUSH/FUA request being sequenced 152 152 * @fq: flush queue 153 153 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) 154 154 * @error: whether an error occurred ··· 406 406 } 407 407 408 408 /** 409 - * blk_insert_flush - insert a new FLUSH/FUA request 409 + * blk_insert_flush - insert a new PREFLUSH/FUA request 410 410 * @rq: request to insert 411 411 * 412 412 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. ··· 525 525 return -ENXIO; 526 526 527 527 bio = bio_alloc(gfp_mask, 0); 528 - bio->bi_bdev = bdev; 528 + bio_set_dev(bio, bdev); 529 529 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 530 530 531 531 ret = submit_bio_wait(bio);
+4 -4
block/blk-lib.c
··· 77 77 78 78 bio = next_bio(bio, 0, gfp_mask); 79 79 bio->bi_iter.bi_sector = sector; 80 - bio->bi_bdev = bdev; 80 + bio_set_dev(bio, bdev); 81 81 bio_set_op_attrs(bio, op, 0); 82 82 83 83 bio->bi_iter.bi_size = req_sects << 9; ··· 168 168 while (nr_sects) { 169 169 bio = next_bio(bio, 1, gfp_mask); 170 170 bio->bi_iter.bi_sector = sector; 171 - bio->bi_bdev = bdev; 171 + bio_set_dev(bio, bdev); 172 172 bio->bi_vcnt = 1; 173 173 bio->bi_io_vec->bv_page = page; 174 174 bio->bi_io_vec->bv_offset = 0; ··· 241 241 while (nr_sects) { 242 242 bio = next_bio(bio, 0, gfp_mask); 243 243 bio->bi_iter.bi_sector = sector; 244 - bio->bi_bdev = bdev; 244 + bio_set_dev(bio, bdev); 245 245 bio->bi_opf = REQ_OP_WRITE_ZEROES; 246 246 if (flags & BLKDEV_ZERO_NOUNMAP) 247 247 bio->bi_opf |= REQ_NOUNMAP; ··· 323 323 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), 324 324 gfp_mask); 325 325 bio->bi_iter.bi_sector = sector; 326 - bio->bi_bdev = bdev; 326 + bio_set_dev(bio, bdev); 327 327 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 328 328 329 329 while (nr_sects != 0) {
+3 -3
block/blk-merge.c
··· 633 633 cpu = part_stat_lock(); 634 634 part = req->part; 635 635 636 - part_round_stats(cpu, part); 637 - part_dec_in_flight(part, rq_data_dir(req)); 636 + part_round_stats(req->q, cpu, part); 637 + part_dec_in_flight(req->q, part, rq_data_dir(req)); 638 638 639 639 hd_struct_put(part); 640 640 part_stat_unlock(); ··· 786 786 return false; 787 787 788 788 /* must be same device and not a special request */ 789 - if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) 789 + if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq)) 790 790 return false; 791 791 792 792 /* only merge integrity protected bio into ditto rq */
+1 -3
block/blk-mq-debugfs.c
··· 48 48 static const char *const blk_queue_flag_name[] = { 49 49 QUEUE_FLAG_NAME(QUEUED), 50 50 QUEUE_FLAG_NAME(STOPPED), 51 - QUEUE_FLAG_NAME(SYNCFULL), 52 - QUEUE_FLAG_NAME(ASYNCFULL), 53 51 QUEUE_FLAG_NAME(DYING), 54 52 QUEUE_FLAG_NAME(BYPASS), 55 53 QUEUE_FLAG_NAME(BIDI), ··· 742 744 return seq_release(inode, file); 743 745 } 744 746 745 - const struct file_operations blk_mq_debugfs_fops = { 747 + static const struct file_operations blk_mq_debugfs_fops = { 746 748 .open = blk_mq_debugfs_open, 747 749 .read = seq_read, 748 750 .write = blk_mq_debugfs_write,
+18 -7
block/blk-mq-tag.c
··· 214 214 bitnr += tags->nr_reserved_tags; 215 215 rq = tags->rqs[bitnr]; 216 216 217 - if (rq->q == hctx->queue) 217 + /* 218 + * We can hit rq == NULL here, because the tagging functions 219 + * test and set the bit before assining ->rqs[]. 220 + */ 221 + if (rq && rq->q == hctx->queue) 218 222 iter_data->fn(hctx, rq, iter_data->data, reserved); 219 223 return true; 220 224 } ··· 252 248 253 249 if (!reserved) 254 250 bitnr += tags->nr_reserved_tags; 255 - rq = tags->rqs[bitnr]; 256 251 257 - iter_data->fn(rq, iter_data->data, reserved); 252 + /* 253 + * We can hit rq == NULL here, because the tagging functions 254 + * test and set the bit before assining ->rqs[]. 255 + */ 256 + rq = tags->rqs[bitnr]; 257 + if (rq) 258 + iter_data->fn(rq, iter_data->data, reserved); 259 + 258 260 return true; 259 261 } 260 262 ··· 298 288 } 299 289 EXPORT_SYMBOL(blk_mq_tagset_busy_iter); 300 290 301 - int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) 291 + int blk_mq_reinit_tagset(struct blk_mq_tag_set *set, 292 + int (reinit_request)(void *, struct request *)) 302 293 { 303 294 int i, j, ret = 0; 304 295 305 - if (!set->ops->reinit_request) 296 + if (WARN_ON_ONCE(!reinit_request)) 306 297 goto out; 307 298 308 299 for (i = 0; i < set->nr_hw_queues; i++) { ··· 316 305 if (!tags->static_rqs[j]) 317 306 continue; 318 307 319 - ret = set->ops->reinit_request(set->driver_data, 320 - tags->static_rqs[j]); 308 + ret = reinit_request(set->driver_data, 309 + tags->static_rqs[j]); 321 310 if (ret) 322 311 goto out; 323 312 }
+49 -5
block/blk-mq.c
··· 83 83 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw); 84 84 } 85 85 86 + struct mq_inflight { 87 + struct hd_struct *part; 88 + unsigned int *inflight; 89 + }; 90 + 91 + static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, 92 + struct request *rq, void *priv, 93 + bool reserved) 94 + { 95 + struct mq_inflight *mi = priv; 96 + 97 + if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) && 98 + !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) { 99 + /* 100 + * index[0] counts the specific partition that was asked 101 + * for. index[1] counts the ones that are active on the 102 + * whole device, so increment that if mi->part is indeed 103 + * a partition, and not a whole device. 104 + */ 105 + if (rq->part == mi->part) 106 + mi->inflight[0]++; 107 + if (mi->part->partno) 108 + mi->inflight[1]++; 109 + } 110 + } 111 + 112 + void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, 113 + unsigned int inflight[2]) 114 + { 115 + struct mq_inflight mi = { .part = part, .inflight = inflight, }; 116 + 117 + inflight[0] = inflight[1] = 0; 118 + blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); 119 + } 120 + 86 121 void blk_freeze_queue_start(struct request_queue *q) 87 122 { 88 123 int freeze_depth; ··· 659 624 container_of(work, struct request_queue, requeue_work.work); 660 625 LIST_HEAD(rq_list); 661 626 struct request *rq, *next; 662 - unsigned long flags; 663 627 664 - spin_lock_irqsave(&q->requeue_lock, flags); 628 + spin_lock_irq(&q->requeue_lock); 665 629 list_splice_init(&q->requeue_list, &rq_list); 666 - spin_unlock_irqrestore(&q->requeue_lock, flags); 630 + spin_unlock_irq(&q->requeue_lock); 667 631 668 632 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 669 633 if (!(rq->rq_flags & RQF_SOFTBARRIER)) ··· 1136 1102 { 1137 1103 int srcu_idx; 1138 1104 1105 + /* 1106 + * We should be running this queue from one of the CPUs that 1107 + * are mapped to it. 1108 + */ 1139 1109 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && 1140 1110 cpu_online(hctx->next_cpu)); 1111 + 1112 + /* 1113 + * We can't run the queue inline with ints disabled. Ensure that 1114 + * we catch bad users of this early. 1115 + */ 1116 + WARN_ON_ONCE(in_interrupt()); 1141 1117 1142 1118 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1143 1119 rcu_read_lock(); ··· 1262 1218 /* 1263 1219 * This function is often used for pausing .queue_rq() by driver when 1264 1220 * there isn't enough resource or some conditions aren't satisfied, and 1265 - * BLK_MQ_RQ_QUEUE_BUSY is usually returned. 1221 + * BLK_STS_RESOURCE is usually returned. 1266 1222 * 1267 1223 * We do not guarantee that dispatch can be drained or blocked 1268 1224 * after blk_mq_stop_hw_queue() returns. Please use ··· 1279 1235 /* 1280 1236 * This function is often used for pausing .queue_rq() by driver when 1281 1237 * there isn't enough resource or some conditions aren't satisfied, and 1282 - * BLK_MQ_RQ_QUEUE_BUSY is usually returned. 1238 + * BLK_STS_RESOURCE is usually returned. 1283 1239 * 1284 1240 * We do not guarantee that dispatch can be drained or blocked 1285 1241 * after blk_mq_stop_hw_queues() returns. Please use
+3
block/blk-mq.h
··· 133 133 return hctx->nr_ctx && hctx->tags; 134 134 } 135 135 136 + void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, 137 + unsigned int inflight[2]); 138 + 136 139 #endif
+1
block/blk-settings.c
··· 68 68 69 69 void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) 70 70 { 71 + WARN_ON_ONCE(q->mq_ops); 71 72 q->rq_timed_out_fn = fn; 72 73 } 73 74 EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
+2
block/blk-sysfs.c
··· 931 931 if (WARN_ON(!q)) 932 932 return; 933 933 934 + mutex_lock(&q->sysfs_lock); 934 935 queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q); 936 + mutex_unlock(&q->sysfs_lock); 935 937 936 938 wbt_exit(q); 937 939
-1
block/blk-tag.c
··· 290 290 */ 291 291 clear_bit_unlock(tag, bqt->tag_map); 292 292 } 293 - EXPORT_SYMBOL(blk_queue_end_tag); 294 293 295 294 /** 296 295 * blk_queue_start_tag - find a free tag and assign it
+3 -10
block/blk-throttle.c
··· 373 373 if (likely(!blk_trace_note_message_enabled(__td->queue))) \ 374 374 break; \ 375 375 if ((__tg)) { \ 376 - char __pbuf[128]; \ 377 - \ 378 - blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \ 379 - blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \ 376 + blk_add_cgroup_trace_msg(__td->queue, \ 377 + tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\ 380 378 } else { \ 381 379 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ 382 380 } \ ··· 2112 2114 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) 2113 2115 { 2114 2116 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2115 - int ret; 2116 - 2117 - ret = bio_associate_current(bio); 2118 - if (ret == 0 || ret == -EBUSY) 2117 + if (bio->bi_css) 2119 2118 bio->bi_cg_private = tg; 2120 2119 blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio)); 2121 - #else 2122 - bio_associate_current(bio); 2123 2120 #endif 2124 2121 } 2125 2122
+2 -2
block/blk-zoned.c
··· 116 116 if (!bio) 117 117 return -ENOMEM; 118 118 119 - bio->bi_bdev = bdev; 119 + bio_set_dev(bio, bdev); 120 120 bio->bi_iter.bi_sector = blk_zone_start(q, sector); 121 121 bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0); 122 122 ··· 234 234 235 235 bio = bio_alloc(gfp_mask, 0); 236 236 bio->bi_iter.bi_sector = sector; 237 - bio->bi_bdev = bdev; 237 + bio_set_dev(bio, bdev); 238 238 bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); 239 239 240 240 ret = submit_bio_wait(bio);
+2 -1
block/blk.h
··· 64 64 struct bio *bio); 65 65 void blk_queue_bypass_start(struct request_queue *q); 66 66 void blk_queue_bypass_end(struct request_queue *q); 67 - void blk_dequeue_request(struct request *rq); 68 67 void __blk_queue_free_tags(struct request_queue *q); 69 68 void blk_freeze_queue(struct request_queue *q); 70 69 ··· 202 203 if (e->type->ops.sq.elevator_deactivate_req_fn) 203 204 e->type->ops.sq.elevator_deactivate_req_fn(q, rq); 204 205 } 206 + 207 + struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); 205 208 206 209 #ifdef CONFIG_FAIL_IO_TIMEOUT 207 210 int blk_should_fake_timeout(struct request_queue *);
-7
block/bsg.c
··· 932 932 933 933 return ret; 934 934 } 935 - /* 936 - * block device ioctls 937 - */ 938 935 default: 939 - #if 0 940 - return ioctl_by_bdev(bd->bdev, cmd, arg); 941 - #else 942 936 return -ENOTTY; 943 - #endif 944 937 } 945 938 } 946 939
+14 -17
block/cfq-iosched.c
··· 656 656 } 657 657 658 658 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ 659 - char __pbuf[128]; \ 660 - \ 661 - blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ 662 - blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \ 659 + blk_add_cgroup_trace_msg((cfqd)->queue, \ 660 + cfqg_to_blkg((cfqq)->cfqg)->blkcg, \ 661 + "cfq%d%c%c " fmt, (cfqq)->pid, \ 663 662 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 664 663 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ 665 - __pbuf, ##args); \ 664 + ##args); \ 666 665 } while (0) 667 666 668 667 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ 669 - char __pbuf[128]; \ 670 - \ 671 - blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \ 672 - blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \ 668 + blk_add_cgroup_trace_msg((cfqd)->queue, \ 669 + cfqg_to_blkg(cfqg)->blkcg, fmt, ##args); \ 673 670 } while (0) 674 671 675 672 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, ··· 2934 2937 * for devices that support queuing, otherwise we still have a problem 2935 2938 * with sync vs async workloads. 2936 2939 */ 2937 - if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) 2940 + if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag && 2941 + !cfqd->cfq_group_idle) 2938 2942 return; 2939 2943 2940 2944 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); ··· 4712 4714 return sprintf(page, "%u\n", var); 4713 4715 } 4714 4716 4715 - static ssize_t 4716 - cfq_var_store(unsigned int *var, const char *page, size_t count) 4717 + static void 4718 + cfq_var_store(unsigned int *var, const char *page) 4717 4719 { 4718 4720 char *p = (char *) page; 4719 4721 4720 4722 *var = simple_strtoul(p, &p, 10); 4721 - return count; 4722 4723 } 4723 4724 4724 4725 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ ··· 4763 4766 { \ 4764 4767 struct cfq_data *cfqd = e->elevator_data; \ 4765 4768 unsigned int __data; \ 4766 - int ret = cfq_var_store(&__data, (page), count); \ 4769 + cfq_var_store(&__data, (page)); \ 4767 4770 if (__data < (MIN)) \ 4768 4771 __data = (MIN); \ 4769 4772 else if (__data > (MAX)) \ ··· 4772 4775 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ 4773 4776 else \ 4774 4777 *(__PTR) = __data; \ 4775 - return ret; \ 4778 + return count; \ 4776 4779 } 4777 4780 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 4778 4781 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, ··· 4797 4800 { \ 4798 4801 struct cfq_data *cfqd = e->elevator_data; \ 4799 4802 unsigned int __data; \ 4800 - int ret = cfq_var_store(&__data, (page), count); \ 4803 + cfq_var_store(&__data, (page)); \ 4801 4804 if (__data < (MIN)) \ 4802 4805 __data = (MIN); \ 4803 4806 else if (__data > (MAX)) \ 4804 4807 __data = (MAX); \ 4805 4808 *(__PTR) = (u64)__data * NSEC_PER_USEC; \ 4806 - return ret; \ 4809 + return count; \ 4807 4810 } 4808 4811 USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX); 4809 4812 USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
+4 -5
block/deadline-iosched.c
··· 373 373 return sprintf(page, "%d\n", var); 374 374 } 375 375 376 - static ssize_t 377 - deadline_var_store(int *var, const char *page, size_t count) 376 + static void 377 + deadline_var_store(int *var, const char *page) 378 378 { 379 379 char *p = (char *) page; 380 380 381 381 *var = simple_strtol(p, &p, 10); 382 - return count; 383 382 } 384 383 385 384 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ ··· 402 403 { \ 403 404 struct deadline_data *dd = e->elevator_data; \ 404 405 int __data; \ 405 - int ret = deadline_var_store(&__data, (page), count); \ 406 + deadline_var_store(&__data, (page)); \ 406 407 if (__data < (MIN)) \ 407 408 __data = (MIN); \ 408 409 else if (__data > (MAX)) \ ··· 411 412 *(__PTR) = msecs_to_jiffies(__data); \ 412 413 else \ 413 414 *(__PTR) = __data; \ 414 - return ret; \ 415 + return count; \ 415 416 } 416 417 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); 417 418 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
+4
block/elevator.c
··· 1055 1055 char elevator_name[ELV_NAME_MAX]; 1056 1056 struct elevator_type *e; 1057 1057 1058 + /* Make sure queue is not in the middle of being removed */ 1059 + if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) 1060 + return -ENOENT; 1061 + 1058 1062 /* 1059 1063 * Special case for mq, turn off scheduling 1060 1064 */
+71 -20
block/genhd.c
··· 45 45 static void disk_del_events(struct gendisk *disk); 46 46 static void disk_release_events(struct gendisk *disk); 47 47 48 + void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw) 49 + { 50 + if (q->mq_ops) 51 + return; 52 + 53 + atomic_inc(&part->in_flight[rw]); 54 + if (part->partno) 55 + atomic_inc(&part_to_disk(part)->part0.in_flight[rw]); 56 + } 57 + 58 + void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw) 59 + { 60 + if (q->mq_ops) 61 + return; 62 + 63 + atomic_dec(&part->in_flight[rw]); 64 + if (part->partno) 65 + atomic_dec(&part_to_disk(part)->part0.in_flight[rw]); 66 + } 67 + 68 + void part_in_flight(struct request_queue *q, struct hd_struct *part, 69 + unsigned int inflight[2]) 70 + { 71 + if (q->mq_ops) { 72 + blk_mq_in_flight(q, part, inflight); 73 + return; 74 + } 75 + 76 + inflight[0] = atomic_read(&part->in_flight[0]) + 77 + atomic_read(&part->in_flight[1]); 78 + if (part->partno) { 79 + part = &part_to_disk(part)->part0; 80 + inflight[1] = atomic_read(&part->in_flight[0]) + 81 + atomic_read(&part->in_flight[1]); 82 + } 83 + } 84 + 85 + struct hd_struct *__disk_get_part(struct gendisk *disk, int partno) 86 + { 87 + struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); 88 + 89 + if (unlikely(partno < 0 || partno >= ptbl->len)) 90 + return NULL; 91 + return rcu_dereference(ptbl->part[partno]); 92 + } 93 + 48 94 /** 49 95 * disk_get_part - get partition 50 96 * @disk: disk to look partition from ··· 107 61 */ 108 62 struct hd_struct *disk_get_part(struct gendisk *disk, int partno) 109 63 { 110 - struct hd_struct *part = NULL; 111 - struct disk_part_tbl *ptbl; 112 - 113 - if (unlikely(partno < 0)) 114 - return NULL; 64 + struct hd_struct *part; 115 65 116 66 rcu_read_lock(); 117 - 118 - ptbl = rcu_dereference(disk->part_tbl); 119 - if (likely(partno < ptbl->len)) { 120 - part = rcu_dereference(ptbl->part[partno]); 121 - if (part) 122 - get_device(part_to_dev(part)); 123 - } 124 - 67 + part = __disk_get_part(disk, partno); 68 + if (part) 69 + get_device(part_to_dev(part)); 125 70 rcu_read_unlock(); 126 71 127 72 return part; ··· 1135 1098 * original ptbl is freed using RCU callback. 1136 1099 * 1137 1100 * LOCKING: 1138 - * Matching bd_mutx locked. 1101 + * Matching bd_mutex locked or the caller is the only user of @disk. 1139 1102 */ 1140 1103 static void disk_replace_part_tbl(struct gendisk *disk, 1141 1104 struct disk_part_tbl *new_ptbl) 1142 1105 { 1143 - struct disk_part_tbl *old_ptbl = disk->part_tbl; 1106 + struct disk_part_tbl *old_ptbl = 1107 + rcu_dereference_protected(disk->part_tbl, 1); 1144 1108 1145 1109 rcu_assign_pointer(disk->part_tbl, new_ptbl); 1146 1110 ··· 1160 1122 * uses RCU to allow unlocked dereferencing for stats and other stuff. 1161 1123 * 1162 1124 * LOCKING: 1163 - * Matching bd_mutex locked, might sleep. 1125 + * Matching bd_mutex locked or the caller is the only user of @disk. 1126 + * Might sleep. 1164 1127 * 1165 1128 * RETURNS: 1166 1129 * 0 on success, -errno on failure. 1167 1130 */ 1168 1131 int disk_expand_part_tbl(struct gendisk *disk, int partno) 1169 1132 { 1170 - struct disk_part_tbl *old_ptbl = disk->part_tbl; 1133 + struct disk_part_tbl *old_ptbl = 1134 + rcu_dereference_protected(disk->part_tbl, 1); 1171 1135 struct disk_part_tbl *new_ptbl; 1172 1136 int len = old_ptbl ? old_ptbl->len : 0; 1173 1137 int i, target; ··· 1252 1212 struct disk_part_iter piter; 1253 1213 struct hd_struct *hd; 1254 1214 char buf[BDEVNAME_SIZE]; 1215 + unsigned int inflight[2]; 1255 1216 int cpu; 1256 1217 1257 1218 /* ··· 1266 1225 disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); 1267 1226 while ((hd = disk_part_iter_next(&piter))) { 1268 1227 cpu = part_stat_lock(); 1269 - part_round_stats(cpu, hd); 1228 + part_round_stats(gp->queue, cpu, hd); 1270 1229 part_stat_unlock(); 1230 + part_in_flight(gp->queue, hd, inflight); 1271 1231 seq_printf(seqf, "%4d %7d %s %lu %lu %lu " 1272 1232 "%u %lu %lu %lu %u %u %u %u\n", 1273 1233 MAJOR(part_devt(hd)), MINOR(part_devt(hd)), ··· 1281 1239 part_stat_read(hd, merges[WRITE]), 1282 1240 part_stat_read(hd, sectors[WRITE]), 1283 1241 jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])), 1284 - part_in_flight(hd), 1242 + inflight[0], 1285 1243 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1286 1244 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 1287 1245 ); ··· 1363 1321 struct gendisk *alloc_disk_node(int minors, int node_id) 1364 1322 { 1365 1323 struct gendisk *disk; 1324 + struct disk_part_tbl *ptbl; 1325 + 1326 + if (minors > DISK_MAX_PARTS) { 1327 + printk(KERN_ERR 1328 + "block: can't allocated more than %d partitions\n", 1329 + DISK_MAX_PARTS); 1330 + minors = DISK_MAX_PARTS; 1331 + } 1366 1332 1367 1333 disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); 1368 1334 if (disk) { ··· 1384 1334 kfree(disk); 1385 1335 return NULL; 1386 1336 } 1387 - disk->part_tbl->part[0] = &disk->part0; 1337 + ptbl = rcu_dereference_protected(disk->part_tbl, 1); 1338 + rcu_assign_pointer(ptbl->part[0], &disk->part0); 1388 1339 1389 1340 /* 1390 1341 * set_capacity() and get_capacity() currently don't use
+5 -5
block/mq-deadline.c
··· 457 457 return sprintf(page, "%d\n", var); 458 458 } 459 459 460 - static ssize_t 461 - deadline_var_store(int *var, const char *page, size_t count) 460 + static void 461 + deadline_var_store(int *var, const char *page) 462 462 { 463 463 char *p = (char *) page; 464 464 465 465 *var = simple_strtol(p, &p, 10); 466 - return count; 467 466 } 468 467 469 468 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ ··· 486 487 { \ 487 488 struct deadline_data *dd = e->elevator_data; \ 488 489 int __data; \ 489 - int ret = deadline_var_store(&__data, (page), count); \ 490 + deadline_var_store(&__data, (page)); \ 490 491 if (__data < (MIN)) \ 491 492 __data = (MIN); \ 492 493 else if (__data > (MAX)) \ ··· 495 496 *(__PTR) = msecs_to_jiffies(__data); \ 496 497 else \ 497 498 *(__PTR) = __data; \ 498 - return ret; \ 499 + return count; \ 499 500 } 500 501 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); 501 502 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); ··· 659 660 .elevator_name = "mq-deadline", 660 661 .elevator_owner = THIS_MODULE, 661 662 }; 663 + MODULE_ALIAS("mq-deadline-iosched"); 662 664 663 665 static int __init deadline_init(void) 664 666 {
+17 -6
block/partition-generic.c
··· 112 112 struct device_attribute *attr, char *buf) 113 113 { 114 114 struct hd_struct *p = dev_to_part(dev); 115 + struct request_queue *q = dev_to_disk(dev)->queue; 116 + unsigned int inflight[2]; 115 117 int cpu; 116 118 117 119 cpu = part_stat_lock(); 118 - part_round_stats(cpu, p); 120 + part_round_stats(q, cpu, p); 119 121 part_stat_unlock(); 122 + part_in_flight(q, p, inflight); 120 123 return sprintf(buf, 121 124 "%8lu %8lu %8llu %8u " 122 125 "%8lu %8lu %8llu %8u " ··· 133 130 part_stat_read(p, merges[WRITE]), 134 131 (unsigned long long)part_stat_read(p, sectors[WRITE]), 135 132 jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), 136 - part_in_flight(p), 133 + inflight[0], 137 134 jiffies_to_msecs(part_stat_read(p, io_ticks)), 138 135 jiffies_to_msecs(part_stat_read(p, time_in_queue))); 139 136 } ··· 252 249 call_rcu(&part->rcu_head, delete_partition_rcu_cb); 253 250 } 254 251 252 + /* 253 + * Must be called either with bd_mutex held, before a disk can be opened or 254 + * after all disk users are gone. 255 + */ 255 256 void delete_partition(struct gendisk *disk, int partno) 256 257 { 257 - struct disk_part_tbl *ptbl = disk->part_tbl; 258 + struct disk_part_tbl *ptbl = 259 + rcu_dereference_protected(disk->part_tbl, 1); 258 260 struct hd_struct *part; 259 261 260 262 if (partno >= ptbl->len) 261 263 return; 262 264 263 - part = ptbl->part[partno]; 265 + part = rcu_dereference_protected(ptbl->part[partno], 1); 264 266 if (!part) 265 267 return; 266 268 ··· 285 277 static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, 286 278 whole_disk_show, NULL); 287 279 280 + /* 281 + * Must be called either with bd_mutex held, before a disk can be opened or 282 + * after all disk users are gone. 283 + */ 288 284 struct hd_struct *add_partition(struct gendisk *disk, int partno, 289 285 sector_t start, sector_t len, int flags, 290 286 struct partition_meta_info *info) ··· 304 292 err = disk_expand_part_tbl(disk, partno); 305 293 if (err) 306 294 return ERR_PTR(err); 307 - ptbl = disk->part_tbl; 295 + ptbl = rcu_dereference_protected(disk->part_tbl, 1); 308 296 309 297 if (ptbl->part[partno]) 310 298 return ERR_PTR(-EBUSY); ··· 403 391 device_del(pdev); 404 392 out_put: 405 393 put_device(pdev); 406 - blk_free_devt(devt); 407 394 return ERR_PTR(err); 408 395 } 409 396
+8 -4
drivers/block/DAC960.c
··· 1678 1678 Enquiry2->FirmwareID.FirmwareType = '0'; 1679 1679 Enquiry2->FirmwareID.TurnID = 0; 1680 1680 } 1681 - sprintf(Controller->FirmwareVersion, "%d.%02d-%c-%02d", 1682 - Enquiry2->FirmwareID.MajorVersion, Enquiry2->FirmwareID.MinorVersion, 1683 - Enquiry2->FirmwareID.FirmwareType, Enquiry2->FirmwareID.TurnID); 1681 + snprintf(Controller->FirmwareVersion, sizeof(Controller->FirmwareVersion), 1682 + "%d.%02d-%c-%02d", 1683 + Enquiry2->FirmwareID.MajorVersion, 1684 + Enquiry2->FirmwareID.MinorVersion, 1685 + Enquiry2->FirmwareID.FirmwareType, 1686 + Enquiry2->FirmwareID.TurnID); 1684 1687 if (!((Controller->FirmwareVersion[0] == '5' && 1685 1688 strcmp(Controller->FirmwareVersion, "5.06") >= 0) || 1686 1689 (Controller->FirmwareVersion[0] == '4' && ··· 6591 6588 &dac960_proc_fops); 6592 6589 } 6593 6590 6594 - sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); 6591 + snprintf(Controller->ControllerName, sizeof(Controller->ControllerName), 6592 + "c%d", Controller->ControllerNumber); 6595 6593 ControllerProcEntry = proc_mkdir(Controller->ControllerName, 6596 6594 DAC960_ProcDirectoryEntry); 6597 6595 proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
+1
drivers/block/Kconfig
··· 17 17 18 18 config BLK_DEV_NULL_BLK 19 19 tristate "Null test block driver" 20 + depends on CONFIGFS_FS 20 21 21 22 config BLK_DEV_FD 22 23 tristate "Normal floppy disk support"
+2 -3
drivers/block/brd.c
··· 294 294 295 295 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) 296 296 { 297 - struct block_device *bdev = bio->bi_bdev; 298 - struct brd_device *brd = bdev->bd_disk->private_data; 297 + struct brd_device *brd = bio->bi_disk->private_data; 299 298 struct bio_vec bvec; 300 299 sector_t sector; 301 300 struct bvec_iter iter; 302 301 303 302 sector = bio->bi_iter.bi_sector; 304 - if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 303 + if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) 305 304 goto io_error; 306 305 307 306 bio_for_each_segment(bvec, bio, iter) {
+1 -1
drivers/block/drbd/drbd_actlog.c
··· 151 151 op_flags |= REQ_SYNC; 152 152 153 153 bio = bio_alloc_drbd(GFP_NOIO); 154 - bio->bi_bdev = bdev->md_bdev; 154 + bio_set_dev(bio, bdev->md_bdev); 155 155 bio->bi_iter.bi_sector = sector; 156 156 err = -EIO; 157 157 if (bio_add_page(bio, device->md_io.page, size, 0) != size)
+1 -1
drivers/block/drbd/drbd_bitmap.c
··· 1019 1019 bm_store_page_idx(page, page_nr); 1020 1020 } else 1021 1021 page = b->bm_pages[page_nr]; 1022 - bio->bi_bdev = device->ldev->md_bdev; 1022 + bio_set_dev(bio, device->ldev->md_bdev); 1023 1023 bio->bi_iter.bi_sector = on_disk_sector; 1024 1024 /* bio_add_page of a single page to an empty bio will always succeed, 1025 1025 * according to api. Do we want to assert that? */
+14 -17
drivers/block/drbd/drbd_int.h
··· 63 63 # define __must_hold(x) 64 64 #endif 65 65 66 - /* module parameter, defined in drbd_main.c */ 67 - extern unsigned int minor_count; 68 - extern bool disable_sendpage; 69 - extern bool allow_oos; 70 - void tl_abort_disk_io(struct drbd_device *device); 71 - 66 + /* shared module parameters, defined in drbd_main.c */ 72 67 #ifdef CONFIG_DRBD_FAULT_INJECTION 73 - extern int enable_faults; 74 - extern int fault_rate; 75 - extern int fault_devs; 68 + extern int drbd_enable_faults; 69 + extern int drbd_fault_rate; 76 70 #endif 77 71 78 - extern char usermode_helper[]; 72 + extern unsigned int drbd_minor_count; 73 + extern char drbd_usermode_helper[]; 74 + extern int drbd_proc_details; 79 75 80 76 81 77 /* This is used to stop/restart our threads. ··· 177 181 static inline int 178 182 drbd_insert_fault(struct drbd_device *device, unsigned int type) { 179 183 #ifdef CONFIG_DRBD_FAULT_INJECTION 180 - return fault_rate && 181 - (enable_faults & (1<<type)) && 184 + return drbd_fault_rate && 185 + (drbd_enable_faults & (1<<type)) && 182 186 _drbd_insert_fault(device, type); 183 187 #else 184 188 return 0; ··· 741 745 unsigned current_tle_writes; /* writes seen within this tl epoch */ 742 746 743 747 unsigned long last_reconnect_jif; 748 + /* empty member on older kernels without blk_start_plug() */ 749 + struct blk_plug receiver_plug; 744 750 struct drbd_thread receiver; 745 751 struct drbd_thread worker; 746 752 struct drbd_thread ack_receiver; ··· 1129 1131 extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *); 1130 1132 extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev); 1131 1133 extern void drbd_device_cleanup(struct drbd_device *device); 1132 - void drbd_print_uuids(struct drbd_device *device, const char *text); 1134 + extern void drbd_print_uuids(struct drbd_device *device, const char *text); 1135 + extern void drbd_queue_unplug(struct drbd_device *device); 1133 1136 1134 1137 extern void conn_md_sync(struct drbd_connection *connection); 1135 1138 extern void drbd_md_write(struct drbd_device *device, void *buffer); ··· 1462 1463 extern void drbd_destroy_resource(struct kref *kref); 1463 1464 extern void conn_free_crypto(struct drbd_connection *connection); 1464 1465 1465 - extern int proc_details; 1466 - 1467 1466 /* drbd_req */ 1468 1467 extern void do_submit(struct work_struct *ws); 1469 1468 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long); ··· 1625 1628 int fault_type, struct bio *bio) 1626 1629 { 1627 1630 __release(local); 1628 - if (!bio->bi_bdev) { 1629 - drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); 1631 + if (!bio->bi_disk) { 1632 + drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n"); 1630 1633 bio->bi_status = BLK_STS_IOERR; 1631 1634 bio_endio(bio); 1632 1635 return;
+61 -50
drivers/block/drbd/drbd_main.c
··· 77 77 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); 78 78 79 79 #include <linux/moduleparam.h> 80 - /* allow_open_on_secondary */ 81 - MODULE_PARM_DESC(allow_oos, "DONT USE!"); 82 80 /* thanks to these macros, if compiled into the kernel (not-module), 83 - * this becomes the boot parameter drbd.minor_count */ 84 - module_param(minor_count, uint, 0444); 85 - module_param(disable_sendpage, bool, 0644); 86 - module_param(allow_oos, bool, 0); 87 - module_param(proc_details, int, 0644); 81 + * these become boot parameters (e.g., drbd.minor_count) */ 88 82 89 83 #ifdef CONFIG_DRBD_FAULT_INJECTION 90 - int enable_faults; 91 - int fault_rate; 92 - static int fault_count; 93 - int fault_devs; 84 + int drbd_enable_faults; 85 + int drbd_fault_rate; 86 + static int drbd_fault_count; 87 + static int drbd_fault_devs; 94 88 /* bitmap of enabled faults */ 95 - module_param(enable_faults, int, 0664); 89 + module_param_named(enable_faults, drbd_enable_faults, int, 0664); 96 90 /* fault rate % value - applies to all enabled faults */ 97 - module_param(fault_rate, int, 0664); 91 + module_param_named(fault_rate, drbd_fault_rate, int, 0664); 98 92 /* count of faults inserted */ 99 - module_param(fault_count, int, 0664); 93 + module_param_named(fault_count, drbd_fault_count, int, 0664); 100 94 /* bitmap of devices to insert faults on */ 101 - module_param(fault_devs, int, 0644); 95 + module_param_named(fault_devs, drbd_fault_devs, int, 0644); 102 96 #endif 103 97 104 - /* module parameter, defined */ 105 - unsigned int minor_count = DRBD_MINOR_COUNT_DEF; 106 - bool disable_sendpage; 107 - bool allow_oos; 108 - int proc_details; /* Detail level in proc drbd*/ 98 + /* module parameters we can keep static */ 99 + static bool drbd_allow_oos; /* allow_open_on_secondary */ 100 + static bool drbd_disable_sendpage; 101 + MODULE_PARM_DESC(allow_oos, "DONT USE!"); 102 + module_param_named(allow_oos, drbd_allow_oos, bool, 0); 103 + module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644); 109 104 105 + /* module parameters we share */ 106 + int drbd_proc_details; /* Detail level in proc drbd*/ 107 + module_param_named(proc_details, drbd_proc_details, int, 0644); 108 + /* module parameters shared with defaults */ 109 + unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF; 110 110 /* Module parameter for setting the user mode helper program 111 111 * to run. Default is /sbin/drbdadm */ 112 - char usermode_helper[80] = "/sbin/drbdadm"; 113 - 114 - module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644); 112 + char drbd_usermode_helper[80] = "/sbin/drbdadm"; 113 + module_param_named(minor_count, drbd_minor_count, uint, 0444); 114 + module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644); 115 115 116 116 /* in 2.6.x, our device mapping and config info contains our virtual gendisks 117 117 * as member "struct gendisk *vdisk;" ··· 923 923 } 924 924 925 925 /* communicated if (agreed_features & DRBD_FF_WSAME) */ 926 - void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q) 926 + static void 927 + assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, 928 + struct request_queue *q) 927 929 { 928 930 if (q) { 929 931 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q)); ··· 1562 1560 * put_page(); and would cause either a VM_BUG directly, or 1563 1561 * __page_cache_release a page that would actually still be referenced 1564 1562 * by someone, leading to some obscure delayed Oops somewhere else. */ 1565 - if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) 1563 + if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page)) 1566 1564 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags); 1567 1565 1568 1566 msg_flags |= MSG_NOSIGNAL; ··· 1934 1932 if (device->state.role != R_PRIMARY) { 1935 1933 if (mode & FMODE_WRITE) 1936 1934 rv = -EROFS; 1937 - else if (!allow_oos) 1935 + else if (!drbd_allow_oos) 1938 1936 rv = -EMEDIUMTYPE; 1939 1937 } 1940 1938 ··· 1952 1950 mutex_lock(&drbd_main_mutex); 1953 1951 device->open_cnt--; 1954 1952 mutex_unlock(&drbd_main_mutex); 1953 + } 1954 + 1955 + /* need to hold resource->req_lock */ 1956 + void drbd_queue_unplug(struct drbd_device *device) 1957 + { 1958 + if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) { 1959 + D_ASSERT(device, device->state.role == R_PRIMARY); 1960 + if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) { 1961 + drbd_queue_work_if_unqueued( 1962 + &first_peer_device(device)->connection->sender_work, 1963 + &device->unplug_work); 1964 + } 1965 + } 1955 1966 } 1956 1967 1957 1968 static void drbd_set_defaults(struct drbd_device *device) ··· 2023 2008 device->unplug_work.cb = w_send_write_hint; 2024 2009 device->bm_io_work.w.cb = w_bitmap_io; 2025 2010 2026 - init_timer(&device->resync_timer); 2027 - init_timer(&device->md_sync_timer); 2028 - init_timer(&device->start_resync_timer); 2029 - init_timer(&device->request_timer); 2030 - device->resync_timer.function = resync_timer_fn; 2031 - device->resync_timer.data = (unsigned long) device; 2032 - device->md_sync_timer.function = md_sync_timer_fn; 2033 - device->md_sync_timer.data = (unsigned long) device; 2034 - device->start_resync_timer.function = start_resync_timer_fn; 2035 - device->start_resync_timer.data = (unsigned long) device; 2036 - device->request_timer.function = request_timer_fn; 2037 - device->request_timer.data = (unsigned long) device; 2011 + setup_timer(&device->resync_timer, resync_timer_fn, 2012 + (unsigned long)device); 2013 + setup_timer(&device->md_sync_timer, md_sync_timer_fn, 2014 + (unsigned long)device); 2015 + setup_timer(&device->start_resync_timer, start_resync_timer_fn, 2016 + (unsigned long)device); 2017 + setup_timer(&device->request_timer, request_timer_fn, 2018 + (unsigned long)device); 2038 2019 2039 2020 init_waitqueue_head(&device->misc_wait); 2040 2021 init_waitqueue_head(&device->state_wait); ··· 2142 2131 static int drbd_create_mempools(void) 2143 2132 { 2144 2133 struct page *page; 2145 - const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count; 2134 + const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count; 2146 2135 int i; 2147 2136 2148 2137 /* prepare our caches and mempools */ ··· 2178 2167 goto Enomem; 2179 2168 2180 2169 /* mempools */ 2181 - drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_RESCUER); 2170 + drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0); 2182 2171 if (drbd_io_bio_set == NULL) 2183 2172 goto Enomem; 2184 2173 2185 2174 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0, 2186 - BIOSET_NEED_BVECS | 2187 - BIOSET_NEED_RESCUER); 2175 + BIOSET_NEED_BVECS); 2188 2176 if (drbd_md_io_bio_set == NULL) 2189 2177 goto Enomem; 2190 2178 ··· 2419 2409 destroy_workqueue(retry.wq); 2420 2410 2421 2411 drbd_genl_unregister(); 2422 - drbd_debugfs_cleanup(); 2423 2412 2424 2413 idr_for_each_entry(&drbd_devices, device, i) 2425 2414 drbd_delete_device(device); ··· 2428 2419 list_del(&resource->resources); 2429 2420 drbd_free_resource(resource); 2430 2421 } 2422 + 2423 + drbd_debugfs_cleanup(); 2431 2424 2432 2425 drbd_destroy_mempools(); 2433 2426 unregister_blkdev(DRBD_MAJOR, "drbd"); ··· 2983 2972 { 2984 2973 int err; 2985 2974 2986 - if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) { 2987 - pr_err("invalid minor_count (%d)\n", minor_count); 2975 + if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) { 2976 + pr_err("invalid minor_count (%d)\n", drbd_minor_count); 2988 2977 #ifdef MODULE 2989 2978 return -EINVAL; 2990 2979 #else 2991 - minor_count = DRBD_MINOR_COUNT_DEF; 2980 + drbd_minor_count = DRBD_MINOR_COUNT_DEF; 2992 2981 #endif 2993 2982 } 2994 2983 ··· 3911 3900 static struct fault_random_state rrs = {0, 0}; 3912 3901 3913 3902 unsigned int ret = ( 3914 - (fault_devs == 0 || 3915 - ((1 << device_to_minor(device)) & fault_devs) != 0) && 3916 - (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate)); 3903 + (drbd_fault_devs == 0 || 3904 + ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) && 3905 + (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate)); 3917 3906 3918 3907 if (ret) { 3919 - fault_count++; 3908 + drbd_fault_count++; 3920 3909 3921 3910 if (__ratelimit(&drbd_ratelimit_state)) 3922 3911 drbd_warn(device, "***Simulating %s failure\n",
+24 -36
drivers/block/drbd/drbd_nl.c
··· 344 344 (char[60]) { }, /* address */ 345 345 NULL }; 346 346 char mb[14]; 347 - char *argv[] = {usermode_helper, cmd, mb, NULL }; 347 + char *argv[] = {drbd_usermode_helper, cmd, mb, NULL }; 348 348 struct drbd_connection *connection = first_peer_device(device)->connection; 349 349 struct sib_info sib; 350 350 int ret; ··· 359 359 * write out any unsynced meta data changes now */ 360 360 drbd_md_sync(device); 361 361 362 - drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 362 + drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb); 363 363 sib.sib_reason = SIB_HELPER_PRE; 364 364 sib.helper_name = cmd; 365 365 drbd_bcast_event(device, &sib); 366 366 notify_helper(NOTIFY_CALL, device, connection, cmd, 0); 367 - ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); 367 + ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC); 368 368 if (ret) 369 369 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n", 370 - usermode_helper, cmd, mb, 370 + drbd_usermode_helper, cmd, mb, 371 371 (ret >> 8) & 0xff, ret); 372 372 else 373 373 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n", 374 - usermode_helper, cmd, mb, 374 + drbd_usermode_helper, cmd, mb, 375 375 (ret >> 8) & 0xff, ret); 376 376 sib.sib_reason = SIB_HELPER_POST; 377 377 sib.helper_exit_code = ret; ··· 396 396 (char[60]) { }, /* address */ 397 397 NULL }; 398 398 char *resource_name = connection->resource->name; 399 - char *argv[] = {usermode_helper, cmd, resource_name, NULL }; 399 + char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL }; 400 400 int ret; 401 401 402 402 setup_khelper_env(connection, envp); 403 403 conn_md_sync(connection); 404 404 405 - drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name); 405 + drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name); 406 406 /* TODO: conn_bcast_event() ?? */ 407 407 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0); 408 408 409 - ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); 409 + ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC); 410 410 if (ret) 411 411 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n", 412 - usermode_helper, cmd, resource_name, 412 + drbd_usermode_helper, cmd, resource_name, 413 413 (ret >> 8) & 0xff, ret); 414 414 else 415 415 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n", 416 - usermode_helper, cmd, resource_name, 416 + drbd_usermode_helper, cmd, resource_name, 417 417 (ret >> 8) & 0xff, ret); 418 418 /* TODO: conn_bcast_event() ?? */ 419 419 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret); ··· 1236 1236 1237 1237 static void decide_on_write_same_support(struct drbd_device *device, 1238 1238 struct request_queue *q, 1239 - struct request_queue *b, struct o_qlim *o) 1239 + struct request_queue *b, struct o_qlim *o, 1240 + bool disable_write_same) 1240 1241 { 1241 1242 struct drbd_peer_device *peer_device = first_peer_device(device); 1242 1243 struct drbd_connection *connection = peer_device->connection; 1243 1244 bool can_do = b ? b->limits.max_write_same_sectors : true; 1245 + 1246 + if (can_do && disable_write_same) { 1247 + can_do = false; 1248 + drbd_info(peer_device, "WRITE_SAME disabled by config\n"); 1249 + } 1244 1250 1245 1251 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) { 1246 1252 can_do = false; ··· 1308 1302 struct request_queue *b = NULL; 1309 1303 struct disk_conf *dc; 1310 1304 bool discard_zeroes_if_aligned = true; 1305 + bool disable_write_same = false; 1311 1306 1312 1307 if (bdev) { 1313 1308 b = bdev->backing_bdev->bd_disk->queue; ··· 1318 1311 dc = rcu_dereference(device->ldev->disk_conf); 1319 1312 max_segments = dc->max_bio_bvecs; 1320 1313 discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned; 1314 + disable_write_same = dc->disable_write_same; 1321 1315 rcu_read_unlock(); 1322 1316 1323 1317 blk_set_stacking_limits(&q->limits); ··· 1329 1321 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 1330 1322 blk_queue_segment_boundary(q, PAGE_SIZE-1); 1331 1323 decide_on_discard_support(device, q, b, discard_zeroes_if_aligned); 1332 - decide_on_write_same_support(device, q, b, o); 1324 + decide_on_write_same_support(device, q, b, o, disable_write_same); 1333 1325 1334 1326 if (b) { 1335 1327 blk_queue_stack_limits(q, b); ··· 1620 1612 if (write_ordering_changed(old_disk_conf, new_disk_conf)) 1621 1613 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH); 1622 1614 1623 - if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned) 1615 + if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned 1616 + || old_disk_conf->disable_write_same != new_disk_conf->disable_write_same) 1624 1617 drbd_reconsider_queue_parameters(device, device->ldev, NULL); 1625 1618 1626 1619 drbd_md_sync(device); ··· 2149 2140 2150 2141 static int adm_detach(struct drbd_device *device, int force) 2151 2142 { 2152 - enum drbd_state_rv retcode; 2153 - void *buffer; 2154 - int ret; 2155 - 2156 2143 if (force) { 2157 2144 set_bit(FORCE_DETACH, &device->flags); 2158 2145 drbd_force_state(device, NS(disk, D_FAILED)); 2159 - retcode = SS_SUCCESS; 2160 - goto out; 2146 + return SS_SUCCESS; 2161 2147 } 2162 2148 2163 - drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ 2164 - buffer = drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */ 2165 - if (buffer) { 2166 - retcode = drbd_request_state(device, NS(disk, D_FAILED)); 2167 - drbd_md_put_buffer(device); 2168 - } else /* already <= D_FAILED */ 2169 - retcode = SS_NOTHING_TO_DO; 2170 - /* D_FAILED will transition to DISKLESS. */ 2171 - drbd_resume_io(device); 2172 - ret = wait_event_interruptible(device->misc_wait, 2173 - device->state.disk != D_FAILED); 2174 - if ((int)retcode == (int)SS_IS_DISKLESS) 2175 - retcode = SS_NOTHING_TO_DO; 2176 - if (ret) 2177 - retcode = ERR_INTR; 2178 - out: 2179 - return retcode; 2149 + return drbd_request_detach_interruptible(device); 2180 2150 } 2181 2151 2182 2152 /* Detaching the disk is a process in multiple stages. First we need to lock
+5 -5
drivers/block/drbd/drbd_proc.c
··· 127 127 seq_putc(seq, '='); 128 128 seq_putc(seq, '>'); 129 129 for (i = 0; i < y; i++) 130 - seq_printf(seq, "."); 130 + seq_putc(seq, '.'); 131 131 seq_puts(seq, "] "); 132 132 133 133 if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T) ··· 179 179 seq_printf_with_thousands_grouping(seq, dbdt); 180 180 seq_puts(seq, " ("); 181 181 /* ------------------------- ~3s average ------------------------ */ 182 - if (proc_details >= 1) { 182 + if (drbd_proc_details >= 1) { 183 183 /* this is what drbd_rs_should_slow_down() uses */ 184 184 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; 185 185 dt = (jiffies - device->rs_mark_time[i]) / HZ; ··· 209 209 } 210 210 seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); 211 211 212 - if (proc_details >= 1) { 212 + if (drbd_proc_details >= 1) { 213 213 /* 64 bit: 214 214 * we convert to sectors in the display below. */ 215 215 unsigned long bm_bits = drbd_bm_bits(device); ··· 332 332 state.conn == C_VERIFY_T) 333 333 drbd_syncer_progress(device, seq, state); 334 334 335 - if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) { 335 + if (drbd_proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) { 336 336 lc_seq_printf_stats(seq, device->resync); 337 337 lc_seq_printf_stats(seq, device->act_log); 338 338 put_ldev(device); 339 339 } 340 340 341 - if (proc_details >= 2) 341 + if (drbd_proc_details >= 2) 342 342 seq_printf(seq, "\tblocked on activity log: %d\n", atomic_read(&device->ap_actlog_cnt)); 343 343 } 344 344 rcu_read_unlock();
+52 -8
drivers/block/drbd/drbd_receiver.c
··· 332 332 if (page == NULL) 333 333 return; 334 334 335 - if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count) 335 + if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count) 336 336 i = page_chain_free(page); 337 337 else { 338 338 struct page *tmp; ··· 1100 1100 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) 1101 1101 mutex_lock(peer_device->device->state_mutex); 1102 1102 1103 + /* avoid a race with conn_request_state( C_DISCONNECTING ) */ 1104 + spin_lock_irq(&connection->resource->req_lock); 1103 1105 set_bit(STATE_SENT, &connection->flags); 1106 + spin_unlock_irq(&connection->resource->req_lock); 1104 1107 1105 1108 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) 1106 1109 mutex_unlock(peer_device->device->state_mutex); ··· 1197 1194 return 0; 1198 1195 } 1199 1196 1197 + static void drbd_unplug_all_devices(struct drbd_connection *connection) 1198 + { 1199 + if (current->plug == &connection->receiver_plug) { 1200 + blk_finish_plug(&connection->receiver_plug); 1201 + blk_start_plug(&connection->receiver_plug); 1202 + } /* else: maybe just schedule() ?? */ 1203 + } 1204 + 1200 1205 static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi) 1201 1206 { 1202 1207 void *buffer = connection->data.rbuf; ··· 1220 1209 return err; 1221 1210 } 1222 1211 1212 + static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi) 1213 + { 1214 + void *buffer = connection->data.rbuf; 1215 + unsigned int size = drbd_header_size(connection); 1216 + int err; 1217 + 1218 + err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT); 1219 + if (err != size) { 1220 + /* If we have nothing in the receive buffer now, to reduce 1221 + * application latency, try to drain the backend queues as 1222 + * quickly as possible, and let remote TCP know what we have 1223 + * received so far. */ 1224 + if (err == -EAGAIN) { 1225 + drbd_tcp_quickack(connection->data.socket); 1226 + drbd_unplug_all_devices(connection); 1227 + } 1228 + if (err > 0) { 1229 + buffer += err; 1230 + size -= err; 1231 + } 1232 + err = drbd_recv_all_warn(connection, buffer, size); 1233 + if (err) 1234 + return err; 1235 + } 1236 + 1237 + err = decode_header(connection, connection->data.rbuf, pi); 1238 + connection->last_received = jiffies; 1239 + 1240 + return err; 1241 + } 1223 1242 /* This is blkdev_issue_flush, but asynchronous. 1224 1243 * We want to submit to all component volumes in parallel, 1225 1244 * then wait for all completions. ··· 1264 1223 struct issue_flush_context *ctx; 1265 1224 }; 1266 1225 1267 - void one_flush_endio(struct bio *bio) 1226 + static void one_flush_endio(struct bio *bio) 1268 1227 { 1269 1228 struct one_flush_context *octx = bio->bi_private; 1270 1229 struct drbd_device *device = octx->device; ··· 1306 1265 1307 1266 octx->device = device; 1308 1267 octx->ctx = ctx; 1309 - bio->bi_bdev = device->ldev->backing_bdev; 1268 + bio_set_dev(bio, device->ldev->backing_bdev); 1310 1269 bio->bi_private = octx; 1311 1270 bio->bi_end_io = one_flush_endio; 1312 1271 bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; ··· 1589 1548 } 1590 1549 /* > peer_req->i.sector, unless this is the first bio */ 1591 1550 bio->bi_iter.bi_sector = sector; 1592 - bio->bi_bdev = device->ldev->backing_bdev; 1551 + bio_set_dev(bio, device->ldev->backing_bdev); 1593 1552 bio_set_op_attrs(bio, op, op_flags); 1594 1553 bio->bi_private = peer_req; 1595 1554 bio->bi_end_io = drbd_peer_request_endio; ··· 4126 4085 return config_unknown_volume(connection, pi); 4127 4086 device = peer_device->device; 4128 4087 4129 - p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); 4088 + p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO); 4130 4089 if (!p_uuid) { 4131 4090 drbd_err(device, "kmalloc of p_uuid failed\n"); 4132 4091 return false; ··· 4923 4882 struct data_cmd const *cmd; 4924 4883 4925 4884 drbd_thread_current_set_cpu(&connection->receiver); 4926 - update_receiver_timing_details(connection, drbd_recv_header); 4927 - if (drbd_recv_header(connection, &pi)) 4885 + update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug); 4886 + if (drbd_recv_header_maybe_unplug(connection, &pi)) 4928 4887 goto err_out; 4929 4888 4930 4889 cmd = &drbd_cmd_handler[pi.cmd]; ··· 5416 5375 } 5417 5376 } while (h == 0); 5418 5377 5419 - if (h > 0) 5378 + if (h > 0) { 5379 + blk_start_plug(&connection->receiver_plug); 5420 5380 drbdd(connection); 5381 + blk_finish_plug(&connection->receiver_plug); 5382 + } 5421 5383 5422 5384 conn_disconnect(connection); 5423 5385
+77 -9
drivers/block/drbd/drbd_req.c
··· 36 36 /* Update disk stats at start of I/O request */ 37 37 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) 38 38 { 39 - generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9, 40 - &device->vdisk->part0); 39 + struct request_queue *q = device->rq_queue; 40 + 41 + generic_start_io_acct(q, bio_data_dir(req->master_bio), 42 + req->i.size >> 9, &device->vdisk->part0); 41 43 } 42 44 43 45 /* Update disk stats when completing request upwards */ 44 46 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) 45 47 { 46 - generic_end_io_acct(bio_data_dir(req->master_bio), 48 + struct request_queue *q = device->rq_queue; 49 + 50 + generic_end_io_acct(q, bio_data_dir(req->master_bio), 47 51 &device->vdisk->part0, req->start_jif); 48 52 } 49 53 ··· 1179 1175 else 1180 1176 type = DRBD_FAULT_DT_RD; 1181 1177 1182 - bio->bi_bdev = device->ldev->backing_bdev; 1178 + bio_set_dev(bio, device->ldev->backing_bdev); 1183 1179 1184 1180 /* State may have changed since we grabbed our reference on the 1185 1181 * ->ldev member. Double check, and short-circuit to endio. ··· 1279 1275 return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE; 1280 1276 } 1281 1277 1278 + struct drbd_plug_cb { 1279 + struct blk_plug_cb cb; 1280 + struct drbd_request *most_recent_req; 1281 + /* do we need more? */ 1282 + }; 1283 + 1284 + static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule) 1285 + { 1286 + struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb); 1287 + struct drbd_resource *resource = plug->cb.data; 1288 + struct drbd_request *req = plug->most_recent_req; 1289 + 1290 + kfree(cb); 1291 + if (!req) 1292 + return; 1293 + 1294 + spin_lock_irq(&resource->req_lock); 1295 + /* In case the sender did not process it yet, raise the flag to 1296 + * have it followed with P_UNPLUG_REMOTE just after. */ 1297 + req->rq_state |= RQ_UNPLUG; 1298 + /* but also queue a generic unplug */ 1299 + drbd_queue_unplug(req->device); 1300 + kref_put(&req->kref, drbd_req_destroy); 1301 + spin_unlock_irq(&resource->req_lock); 1302 + } 1303 + 1304 + static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource) 1305 + { 1306 + /* A lot of text to say 1307 + * return (struct drbd_plug_cb*)blk_check_plugged(); */ 1308 + struct drbd_plug_cb *plug; 1309 + struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug)); 1310 + 1311 + if (cb) 1312 + plug = container_of(cb, struct drbd_plug_cb, cb); 1313 + else 1314 + plug = NULL; 1315 + return plug; 1316 + } 1317 + 1318 + static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req) 1319 + { 1320 + struct drbd_request *tmp = plug->most_recent_req; 1321 + /* Will be sent to some peer. 1322 + * Remember to tag it with UNPLUG_REMOTE on unplug */ 1323 + kref_get(&req->kref); 1324 + plug->most_recent_req = req; 1325 + if (tmp) 1326 + kref_put(&tmp->kref, drbd_req_destroy); 1327 + } 1328 + 1282 1329 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) 1283 1330 { 1284 1331 struct drbd_resource *resource = device->resource; ··· 1402 1347 no_remote = true; 1403 1348 } 1404 1349 1350 + if (no_remote == false) { 1351 + struct drbd_plug_cb *plug = drbd_check_plugged(resource); 1352 + if (plug) 1353 + drbd_update_plug(plug, req); 1354 + } 1355 + 1405 1356 /* If it took the fast path in drbd_request_prepare, add it here. 1406 1357 * The slow path has added it already. */ 1407 1358 if (list_empty(&req->req_pending_master_completion)) ··· 1456 1395 1457 1396 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) 1458 1397 { 1398 + struct blk_plug plug; 1459 1399 struct drbd_request *req, *tmp; 1400 + 1401 + blk_start_plug(&plug); 1460 1402 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { 1461 1403 const int rw = bio_data_dir(req->master_bio); 1462 1404 ··· 1477 1413 list_del_init(&req->tl_requests); 1478 1414 drbd_send_and_submit(device, req); 1479 1415 } 1416 + blk_finish_plug(&plug); 1480 1417 } 1481 1418 1482 1419 static bool prepare_al_transaction_nonblock(struct drbd_device *device, ··· 1485 1420 struct list_head *pending, 1486 1421 struct list_head *later) 1487 1422 { 1488 - struct drbd_request *req, *tmp; 1423 + struct drbd_request *req; 1489 1424 int wake = 0; 1490 1425 int err; 1491 1426 1492 1427 spin_lock_irq(&device->al_lock); 1493 - list_for_each_entry_safe(req, tmp, incoming, tl_requests) { 1428 + while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { 1494 1429 err = drbd_al_begin_io_nonblock(device, &req->i); 1495 1430 if (err == -ENOBUFS) 1496 1431 break; ··· 1507 1442 return !list_empty(pending); 1508 1443 } 1509 1444 1510 - void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) 1445 + static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) 1511 1446 { 1512 - struct drbd_request *req, *tmp; 1447 + struct blk_plug plug; 1448 + struct drbd_request *req; 1513 1449 1514 - list_for_each_entry_safe(req, tmp, pending, tl_requests) { 1450 + blk_start_plug(&plug); 1451 + while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) { 1515 1452 req->rq_state |= RQ_IN_ACT_LOG; 1516 1453 req->in_actlog_jif = jiffies; 1517 1454 atomic_dec(&device->ap_actlog_cnt); 1518 1455 list_del_init(&req->tl_requests); 1519 1456 drbd_send_and_submit(device, req); 1520 1457 } 1458 + blk_finish_plug(&plug); 1521 1459 } 1522 1460 1523 1461 void do_submit(struct work_struct *ws)
+6
drivers/block/drbd/drbd_req.h
··· 212 212 /* Should call drbd_al_complete_io() for this request... */ 213 213 __RQ_IN_ACT_LOG, 214 214 215 + /* This was the most recent request during some blk_finish_plug() 216 + * or its implicit from-schedule equivalent. 217 + * We may use it as hint to send a P_UNPLUG_REMOTE */ 218 + __RQ_UNPLUG, 219 + 215 220 /* The peer has sent a retry ACK */ 216 221 __RQ_POSTPONED, 217 222 ··· 254 249 #define RQ_WSAME (1UL << __RQ_WSAME) 255 250 #define RQ_UNMAP (1UL << __RQ_UNMAP) 256 251 #define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG) 252 + #define RQ_UNPLUG (1UL << __RQ_UNPLUG) 257 253 #define RQ_POSTPONED (1UL << __RQ_POSTPONED) 258 254 #define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP) 259 255 #define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
+47 -1
drivers/block/drbd/drbd_state.c
··· 346 346 347 347 enum drbd_role conn_highest_role(struct drbd_connection *connection) 348 348 { 349 - enum drbd_role role = R_UNKNOWN; 349 + enum drbd_role role = R_SECONDARY; 350 350 struct drbd_peer_device *peer_device; 351 351 int vnr; 352 352 ··· 579 579 unsigned long flags; 580 580 union drbd_state os, ns; 581 581 enum drbd_state_rv rv; 582 + void *buffer = NULL; 582 583 583 584 init_completion(&done); 584 585 585 586 if (f & CS_SERIALIZE) 586 587 mutex_lock(device->state_mutex); 588 + if (f & CS_INHIBIT_MD_IO) 589 + buffer = drbd_md_get_buffer(device, __func__); 587 590 588 591 spin_lock_irqsave(&device->resource->req_lock, flags); 589 592 os = drbd_read_state(device); ··· 639 636 } 640 637 641 638 abort: 639 + if (buffer) 640 + drbd_md_put_buffer(device); 642 641 if (f & CS_SERIALIZE) 643 642 mutex_unlock(device->state_mutex); 644 643 ··· 665 660 666 661 wait_event(device->state_wait, 667 662 (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE); 663 + 664 + return rv; 665 + } 666 + 667 + /* 668 + * We grab drbd_md_get_buffer(), because we don't want to "fail" the disk while 669 + * there is IO in-flight: the transition into D_FAILED for detach purposes 670 + * may get misinterpreted as actual IO error in a confused endio function. 671 + * 672 + * We wrap it all into wait_event(), to retry in case the drbd_req_state() 673 + * returns SS_IN_TRANSIENT_STATE. 674 + * 675 + * To avoid potential deadlock with e.g. the receiver thread trying to grab 676 + * drbd_md_get_buffer() while trying to get out of the "transient state", we 677 + * need to grab and release the meta data buffer inside of that wait_event loop. 678 + */ 679 + static enum drbd_state_rv 680 + request_detach(struct drbd_device *device) 681 + { 682 + return drbd_req_state(device, NS(disk, D_FAILED), 683 + CS_VERBOSE | CS_ORDERED | CS_INHIBIT_MD_IO); 684 + } 685 + 686 + enum drbd_state_rv 687 + drbd_request_detach_interruptible(struct drbd_device *device) 688 + { 689 + enum drbd_state_rv rv; 690 + int ret; 691 + 692 + drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */ 693 + wait_event_interruptible(device->state_wait, 694 + (rv = request_detach(device)) != SS_IN_TRANSIENT_STATE); 695 + drbd_resume_io(device); 696 + 697 + ret = wait_event_interruptible(device->misc_wait, 698 + device->state.disk != D_FAILED); 699 + 700 + if (rv == SS_IS_DISKLESS) 701 + rv = SS_NOTHING_TO_DO; 702 + if (ret) 703 + rv = ERR_INTR; 668 704 669 705 return rv; 670 706 }
+8
drivers/block/drbd/drbd_state.h
··· 71 71 CS_DC_SUSP = 1 << 10, 72 72 CS_DC_MASK = CS_DC_ROLE + CS_DC_PEER + CS_DC_CONN + CS_DC_DISK + CS_DC_PDSK, 73 73 CS_IGN_OUTD_FAIL = 1 << 11, 74 + 75 + /* Make sure no meta data IO is in flight, by calling 76 + * drbd_md_get_buffer(). Used for graceful detach. */ 77 + CS_INHIBIT_MD_IO = 1 << 12, 74 78 }; 75 79 76 80 /* drbd_dev_state and drbd_state are different types. This is to stress the ··· 159 155 { 160 156 return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED); 161 157 } 158 + 159 + /* for use in adm_detach() (drbd_adm_detach(), drbd_adm_down()) */ 160 + enum drbd_state_rv 161 + drbd_request_detach_interruptible(struct drbd_device *device); 162 162 163 163 enum drbd_role conn_highest_role(struct drbd_connection *connection); 164 164 enum drbd_role conn_highest_peer(struct drbd_connection *connection);
+38 -10
drivers/block/drbd/drbd_worker.c
··· 65 65 device = bio->bi_private; 66 66 device->md_io.error = blk_status_to_errno(bio->bi_status); 67 67 68 + /* special case: drbd_md_read() during drbd_adm_attach() */ 69 + if (device->ldev) 70 + put_ldev(device); 71 + bio_put(bio); 72 + 68 73 /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able 69 74 * to timeout on the lower level device, and eventually detach from it. 70 75 * If this io completion runs after that timeout expired, this ··· 84 79 drbd_md_put_buffer(device); 85 80 device->md_io.done = 1; 86 81 wake_up(&device->misc_wait); 87 - bio_put(bio); 88 - if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */ 89 - put_ldev(device); 90 82 } 91 83 92 84 /* reads on behalf of the partner, ··· 129 127 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; 130 128 block_id = peer_req->block_id; 131 129 peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO; 130 + 131 + if (peer_req->flags & EE_WAS_ERROR) { 132 + /* In protocol != C, we usually do not send write acks. 133 + * In case of a write error, send the neg ack anyways. */ 134 + if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags)) 135 + inc_unacked(device); 136 + drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); 137 + } 132 138 133 139 spin_lock_irqsave(&device->resource->req_lock, flags); 134 140 device->writ_cnt += peer_req->i.size >> 9; ··· 205 195 } 206 196 } 207 197 208 - void drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device) 198 + static void 199 + drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device) 209 200 { 210 201 panic("drbd%u %s/%u potential random memory corruption caused by delayed completion of aborted local request\n", 211 202 device->minor, device->resource->name, device->vnr); ··· 1393 1382 return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0); 1394 1383 } 1395 1384 1385 + static int pd_send_unplug_remote(struct drbd_peer_device *pd) 1386 + { 1387 + struct drbd_socket *sock = &pd->connection->data; 1388 + if (!drbd_prepare_command(pd, sock)) 1389 + return -EIO; 1390 + return drbd_send_command(pd, sock, P_UNPLUG_REMOTE, 0, NULL, 0); 1391 + } 1392 + 1396 1393 int w_send_write_hint(struct drbd_work *w, int cancel) 1397 1394 { 1398 1395 struct drbd_device *device = 1399 1396 container_of(w, struct drbd_device, unplug_work); 1400 - struct drbd_socket *sock; 1401 1397 1402 1398 if (cancel) 1403 1399 return 0; 1404 - sock = &first_peer_device(device)->connection->data; 1405 - if (!drbd_prepare_command(first_peer_device(device), sock)) 1406 - return -EIO; 1407 - return drbd_send_command(first_peer_device(device), sock, P_UNPLUG_REMOTE, 0, NULL, 0); 1400 + return pd_send_unplug_remote(first_peer_device(device)); 1408 1401 } 1409 1402 1410 1403 static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch) ··· 1470 1455 struct drbd_device *device = req->device; 1471 1456 struct drbd_peer_device *const peer_device = first_peer_device(device); 1472 1457 struct drbd_connection *connection = peer_device->connection; 1458 + bool do_send_unplug = req->rq_state & RQ_UNPLUG; 1473 1459 int err; 1474 1460 1475 1461 if (unlikely(cancel)) { ··· 1486 1470 err = drbd_send_dblock(peer_device, req); 1487 1471 req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); 1488 1472 1473 + if (do_send_unplug && !err) 1474 + pd_send_unplug_remote(peer_device); 1475 + 1489 1476 return err; 1490 1477 } 1491 1478 ··· 1503 1484 struct drbd_device *device = req->device; 1504 1485 struct drbd_peer_device *const peer_device = first_peer_device(device); 1505 1486 struct drbd_connection *connection = peer_device->connection; 1487 + bool do_send_unplug = req->rq_state & RQ_UNPLUG; 1506 1488 int err; 1507 1489 1508 1490 if (unlikely(cancel)) { ··· 1521 1501 1522 1502 req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); 1523 1503 1504 + if (do_send_unplug && !err) 1505 + pd_send_unplug_remote(peer_device); 1506 + 1524 1507 return err; 1525 1508 } 1526 1509 ··· 1536 1513 drbd_al_begin_io(device, &req->i); 1537 1514 1538 1515 drbd_req_make_private_bio(req, req->master_bio); 1539 - req->private_bio->bi_bdev = device->ldev->backing_bdev; 1516 + bio_set_dev(req->private_bio, device->ldev->backing_bdev); 1540 1517 generic_make_request(req->private_bio); 1541 1518 1542 1519 return 0; ··· 1753 1730 1754 1731 if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) { 1755 1732 drbd_err(device, "Resync already running!\n"); 1733 + return; 1734 + } 1735 + 1736 + if (!connection) { 1737 + drbd_err(device, "No connection to peer, aborting!\n"); 1756 1738 return; 1757 1739 } 1758 1740
+1 -1
drivers/block/floppy.c
··· 4134 4134 cbdata.drive = drive; 4135 4135 4136 4136 bio_init(&bio, &bio_vec, 1); 4137 - bio.bi_bdev = bdev; 4137 + bio_set_dev(&bio, bdev); 4138 4138 bio_add_page(&bio, page, size, 0); 4139 4139 4140 4140 bio.bi_iter.bi_sector = 0;
+8 -6
drivers/block/loop.c
··· 1966 1966 struct loop_device *lo; 1967 1967 int err; 1968 1968 1969 - err = misc_register(&loop_misc); 1970 - if (err < 0) 1971 - return err; 1972 - 1973 1969 part_shift = 0; 1974 1970 if (max_part > 0) { 1975 1971 part_shift = fls(max_part); ··· 1983 1987 1984 1988 if ((1UL << part_shift) > DISK_MAX_PARTS) { 1985 1989 err = -EINVAL; 1986 - goto misc_out; 1990 + goto err_out; 1987 1991 } 1988 1992 1989 1993 if (max_loop > 1UL << (MINORBITS - part_shift)) { 1990 1994 err = -EINVAL; 1991 - goto misc_out; 1995 + goto err_out; 1992 1996 } 1993 1997 1994 1998 /* ··· 2006 2010 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; 2007 2011 range = 1UL << MINORBITS; 2008 2012 } 2013 + 2014 + err = misc_register(&loop_misc); 2015 + if (err < 0) 2016 + goto err_out; 2017 + 2009 2018 2010 2019 if (register_blkdev(LOOP_MAJOR, "loop")) { 2011 2020 err = -EIO; ··· 2031 2030 2032 2031 misc_out: 2033 2032 misc_deregister(&loop_misc); 2033 + err_out: 2034 2034 return err; 2035 2035 } 2036 2036
+12 -3
drivers/block/nbd.c
··· 128 128 #define NBD_MAGIC 0x68797548 129 129 130 130 static unsigned int nbds_max = 16; 131 - static int max_part; 131 + static int max_part = 16; 132 132 static struct workqueue_struct *recv_workqueue; 133 133 static int part_shift; 134 134 ··· 165 165 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); 166 166 } 167 167 168 - static struct device_attribute pid_attr = { 168 + static const struct device_attribute pid_attr = { 169 169 .attr = { .name = "pid", .mode = S_IRUGO}, 170 170 .show = pid_show, 171 171 }; ··· 1584 1584 } 1585 1585 } else { 1586 1586 nbd = idr_find(&nbd_index_idr, index); 1587 + if (!nbd) { 1588 + ret = nbd_dev_add(index); 1589 + if (ret < 0) { 1590 + mutex_unlock(&nbd_index_mutex); 1591 + printk(KERN_ERR "nbd: failed to add new device\n"); 1592 + return ret; 1593 + } 1594 + nbd = idr_find(&nbd_index_idr, index); 1595 + } 1587 1596 } 1588 1597 if (!nbd) { 1589 1598 printk(KERN_ERR "nbd: couldn't find device at index %d\n", ··· 2146 2137 module_param(nbds_max, int, 0444); 2147 2138 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 2148 2139 module_param(max_part, int, 0444); 2149 - MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); 2140 + MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");
+1214 -95
drivers/block/null_blk.c
··· 1 + /* 2 + * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and 3 + * Shaohua Li <shli@fb.com> 4 + */ 1 5 #include <linux/module.h> 2 6 3 7 #include <linux/moduleparam.h> ··· 13 9 #include <linux/blk-mq.h> 14 10 #include <linux/hrtimer.h> 15 11 #include <linux/lightnvm.h> 12 + #include <linux/configfs.h> 13 + #include <linux/badblocks.h> 14 + 15 + #define SECTOR_SHIFT 9 16 + #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 17 + #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) 18 + #define SECTOR_SIZE (1 << SECTOR_SHIFT) 19 + #define SECTOR_MASK (PAGE_SECTORS - 1) 20 + 21 + #define FREE_BATCH 16 22 + 23 + #define TICKS_PER_SEC 50ULL 24 + #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC) 25 + 26 + static inline u64 mb_per_tick(int mbps) 27 + { 28 + return (1 << 20) / TICKS_PER_SEC * ((u64) mbps); 29 + } 16 30 17 31 struct nullb_cmd { 18 32 struct list_head list; ··· 41 19 unsigned int tag; 42 20 struct nullb_queue *nq; 43 21 struct hrtimer timer; 22 + blk_status_t error; 44 23 }; 45 24 46 25 struct nullb_queue { 47 26 unsigned long *tag_map; 48 27 wait_queue_head_t wait; 49 28 unsigned int queue_depth; 29 + struct nullb_device *dev; 50 30 51 31 struct nullb_cmd *cmds; 52 32 }; 53 33 34 + /* 35 + * Status flags for nullb_device. 36 + * 37 + * CONFIGURED: Device has been configured and turned on. Cannot reconfigure. 38 + * UP: Device is currently on and visible in userspace. 39 + * THROTTLED: Device is being throttled. 40 + * CACHE: Device is using a write-back cache. 41 + */ 42 + enum nullb_device_flags { 43 + NULLB_DEV_FL_CONFIGURED = 0, 44 + NULLB_DEV_FL_UP = 1, 45 + NULLB_DEV_FL_THROTTLED = 2, 46 + NULLB_DEV_FL_CACHE = 3, 47 + }; 48 + 49 + /* 50 + * nullb_page is a page in memory for nullb devices. 51 + * 52 + * @page: The page holding the data. 53 + * @bitmap: The bitmap represents which sector in the page has data. 54 + * Each bit represents one block size. For example, sector 8 55 + * will use the 7th bit 56 + * The highest 2 bits of bitmap are for special purpose. LOCK means the cache 57 + * page is being flushing to storage. FREE means the cache page is freed and 58 + * should be skipped from flushing to storage. Please see 59 + * null_make_cache_space 60 + */ 61 + struct nullb_page { 62 + struct page *page; 63 + unsigned long bitmap; 64 + }; 65 + #define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1) 66 + #define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2) 67 + 68 + struct nullb_device { 69 + struct nullb *nullb; 70 + struct config_item item; 71 + struct radix_tree_root data; /* data stored in the disk */ 72 + struct radix_tree_root cache; /* disk cache data */ 73 + unsigned long flags; /* device flags */ 74 + unsigned int curr_cache; 75 + struct badblocks badblocks; 76 + 77 + unsigned long size; /* device size in MB */ 78 + unsigned long completion_nsec; /* time in ns to complete a request */ 79 + unsigned long cache_size; /* disk cache size in MB */ 80 + unsigned int submit_queues; /* number of submission queues */ 81 + unsigned int home_node; /* home node for the device */ 82 + unsigned int queue_mode; /* block interface */ 83 + unsigned int blocksize; /* block size */ 84 + unsigned int irqmode; /* IRQ completion handler */ 85 + unsigned int hw_queue_depth; /* queue depth */ 86 + unsigned int index; /* index of the disk, only valid with a disk */ 87 + unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */ 88 + bool use_lightnvm; /* register as a LightNVM device */ 89 + bool blocking; /* blocking blk-mq device */ 90 + bool use_per_node_hctx; /* use per-node allocation for hardware context */ 91 + bool power; /* power on/off the device */ 92 + bool memory_backed; /* if data is stored in memory */ 93 + bool discard; /* if support discard */ 94 + }; 95 + 54 96 struct nullb { 97 + struct nullb_device *dev; 55 98 struct list_head list; 56 99 unsigned int index; 57 100 struct request_queue *q; ··· 124 37 struct nvm_dev *ndev; 125 38 struct blk_mq_tag_set *tag_set; 126 39 struct blk_mq_tag_set __tag_set; 127 - struct hrtimer timer; 128 40 unsigned int queue_depth; 41 + atomic_long_t cur_bytes; 42 + struct hrtimer bw_timer; 43 + unsigned long cache_flush_pos; 129 44 spinlock_t lock; 130 45 131 46 struct nullb_queue *queues; ··· 138 49 static LIST_HEAD(nullb_list); 139 50 static struct mutex lock; 140 51 static int null_major; 141 - static int nullb_indexes; 52 + static DEFINE_IDA(nullb_indexes); 142 53 static struct kmem_cache *ppa_cache; 143 54 static struct blk_mq_tag_set tag_set; 144 55 ··· 154 65 NULL_Q_MQ = 2, 155 66 }; 156 67 157 - static int submit_queues; 158 - module_param(submit_queues, int, S_IRUGO); 68 + static int g_submit_queues = 1; 69 + module_param_named(submit_queues, g_submit_queues, int, S_IRUGO); 159 70 MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 160 71 161 - static int home_node = NUMA_NO_NODE; 162 - module_param(home_node, int, S_IRUGO); 72 + static int g_home_node = NUMA_NO_NODE; 73 + module_param_named(home_node, g_home_node, int, S_IRUGO); 163 74 MODULE_PARM_DESC(home_node, "Home node for the device"); 164 75 165 - static int queue_mode = NULL_Q_MQ; 76 + static int g_queue_mode = NULL_Q_MQ; 166 77 167 78 static int null_param_store_val(const char *str, int *val, int min, int max) 168 79 { ··· 181 92 182 93 static int null_set_queue_mode(const char *str, const struct kernel_param *kp) 183 94 { 184 - return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ); 95 + return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ); 185 96 } 186 97 187 98 static const struct kernel_param_ops null_queue_mode_param_ops = { ··· 189 100 .get = param_get_int, 190 101 }; 191 102 192 - device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO); 103 + device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO); 193 104 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); 194 105 195 - static int gb = 250; 196 - module_param(gb, int, S_IRUGO); 106 + static int g_gb = 250; 107 + module_param_named(gb, g_gb, int, S_IRUGO); 197 108 MODULE_PARM_DESC(gb, "Size in GB"); 198 109 199 - static int bs = 512; 200 - module_param(bs, int, S_IRUGO); 110 + static int g_bs = 512; 111 + module_param_named(bs, g_bs, int, S_IRUGO); 201 112 MODULE_PARM_DESC(bs, "Block size (in bytes)"); 202 113 203 114 static int nr_devices = 1; 204 115 module_param(nr_devices, int, S_IRUGO); 205 116 MODULE_PARM_DESC(nr_devices, "Number of devices to register"); 206 117 207 - static bool use_lightnvm; 208 - module_param(use_lightnvm, bool, S_IRUGO); 118 + static bool g_use_lightnvm; 119 + module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO); 209 120 MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); 210 121 211 - static bool blocking; 212 - module_param(blocking, bool, S_IRUGO); 122 + static bool g_blocking; 123 + module_param_named(blocking, g_blocking, bool, S_IRUGO); 213 124 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); 214 125 215 126 static bool shared_tags; 216 127 module_param(shared_tags, bool, S_IRUGO); 217 128 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); 218 129 219 - static int irqmode = NULL_IRQ_SOFTIRQ; 130 + static int g_irqmode = NULL_IRQ_SOFTIRQ; 220 131 221 132 static int null_set_irqmode(const char *str, const struct kernel_param *kp) 222 133 { 223 - return null_param_store_val(str, &irqmode, NULL_IRQ_NONE, 134 + return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE, 224 135 NULL_IRQ_TIMER); 225 136 } 226 137 ··· 229 140 .get = param_get_int, 230 141 }; 231 142 232 - device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); 143 + device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO); 233 144 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); 234 145 235 - static unsigned long completion_nsec = 10000; 236 - module_param(completion_nsec, ulong, S_IRUGO); 146 + static unsigned long g_completion_nsec = 10000; 147 + module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO); 237 148 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); 238 149 239 - static int hw_queue_depth = 64; 240 - module_param(hw_queue_depth, int, S_IRUGO); 150 + static int g_hw_queue_depth = 64; 151 + module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO); 241 152 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 242 153 243 - static bool use_per_node_hctx = false; 244 - module_param(use_per_node_hctx, bool, S_IRUGO); 154 + static bool g_use_per_node_hctx; 155 + module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO); 245 156 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); 157 + 158 + static struct nullb_device *null_alloc_dev(void); 159 + static void null_free_dev(struct nullb_device *dev); 160 + static void null_del_dev(struct nullb *nullb); 161 + static int null_add_dev(struct nullb_device *dev); 162 + static void null_free_device_storage(struct nullb_device *dev, bool is_cache); 163 + 164 + static inline struct nullb_device *to_nullb_device(struct config_item *item) 165 + { 166 + return item ? container_of(item, struct nullb_device, item) : NULL; 167 + } 168 + 169 + static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page) 170 + { 171 + return snprintf(page, PAGE_SIZE, "%u\n", val); 172 + } 173 + 174 + static inline ssize_t nullb_device_ulong_attr_show(unsigned long val, 175 + char *page) 176 + { 177 + return snprintf(page, PAGE_SIZE, "%lu\n", val); 178 + } 179 + 180 + static inline ssize_t nullb_device_bool_attr_show(bool val, char *page) 181 + { 182 + return snprintf(page, PAGE_SIZE, "%u\n", val); 183 + } 184 + 185 + static ssize_t nullb_device_uint_attr_store(unsigned int *val, 186 + const char *page, size_t count) 187 + { 188 + unsigned int tmp; 189 + int result; 190 + 191 + result = kstrtouint(page, 0, &tmp); 192 + if (result) 193 + return result; 194 + 195 + *val = tmp; 196 + return count; 197 + } 198 + 199 + static ssize_t nullb_device_ulong_attr_store(unsigned long *val, 200 + const char *page, size_t count) 201 + { 202 + int result; 203 + unsigned long tmp; 204 + 205 + result = kstrtoul(page, 0, &tmp); 206 + if (result) 207 + return result; 208 + 209 + *val = tmp; 210 + return count; 211 + } 212 + 213 + static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, 214 + size_t count) 215 + { 216 + bool tmp; 217 + int result; 218 + 219 + result = kstrtobool(page, &tmp); 220 + if (result) 221 + return result; 222 + 223 + *val = tmp; 224 + return count; 225 + } 226 + 227 + /* The following macro should only be used with TYPE = {uint, ulong, bool}. */ 228 + #define NULLB_DEVICE_ATTR(NAME, TYPE) \ 229 + static ssize_t \ 230 + nullb_device_##NAME##_show(struct config_item *item, char *page) \ 231 + { \ 232 + return nullb_device_##TYPE##_attr_show( \ 233 + to_nullb_device(item)->NAME, page); \ 234 + } \ 235 + static ssize_t \ 236 + nullb_device_##NAME##_store(struct config_item *item, const char *page, \ 237 + size_t count) \ 238 + { \ 239 + if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \ 240 + return -EBUSY; \ 241 + return nullb_device_##TYPE##_attr_store( \ 242 + &to_nullb_device(item)->NAME, page, count); \ 243 + } \ 244 + CONFIGFS_ATTR(nullb_device_, NAME); 245 + 246 + NULLB_DEVICE_ATTR(size, ulong); 247 + NULLB_DEVICE_ATTR(completion_nsec, ulong); 248 + NULLB_DEVICE_ATTR(submit_queues, uint); 249 + NULLB_DEVICE_ATTR(home_node, uint); 250 + NULLB_DEVICE_ATTR(queue_mode, uint); 251 + NULLB_DEVICE_ATTR(blocksize, uint); 252 + NULLB_DEVICE_ATTR(irqmode, uint); 253 + NULLB_DEVICE_ATTR(hw_queue_depth, uint); 254 + NULLB_DEVICE_ATTR(index, uint); 255 + NULLB_DEVICE_ATTR(use_lightnvm, bool); 256 + NULLB_DEVICE_ATTR(blocking, bool); 257 + NULLB_DEVICE_ATTR(use_per_node_hctx, bool); 258 + NULLB_DEVICE_ATTR(memory_backed, bool); 259 + NULLB_DEVICE_ATTR(discard, bool); 260 + NULLB_DEVICE_ATTR(mbps, uint); 261 + NULLB_DEVICE_ATTR(cache_size, ulong); 262 + 263 + static ssize_t nullb_device_power_show(struct config_item *item, char *page) 264 + { 265 + return nullb_device_bool_attr_show(to_nullb_device(item)->power, page); 266 + } 267 + 268 + static ssize_t nullb_device_power_store(struct config_item *item, 269 + const char *page, size_t count) 270 + { 271 + struct nullb_device *dev = to_nullb_device(item); 272 + bool newp = false; 273 + ssize_t ret; 274 + 275 + ret = nullb_device_bool_attr_store(&newp, page, count); 276 + if (ret < 0) 277 + return ret; 278 + 279 + if (!dev->power && newp) { 280 + if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) 281 + return count; 282 + if (null_add_dev(dev)) { 283 + clear_bit(NULLB_DEV_FL_UP, &dev->flags); 284 + return -ENOMEM; 285 + } 286 + 287 + set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); 288 + dev->power = newp; 289 + } else if (dev->power && !newp) { 290 + mutex_lock(&lock); 291 + dev->power = newp; 292 + null_del_dev(dev->nullb); 293 + mutex_unlock(&lock); 294 + clear_bit(NULLB_DEV_FL_UP, &dev->flags); 295 + } 296 + 297 + return count; 298 + } 299 + 300 + CONFIGFS_ATTR(nullb_device_, power); 301 + 302 + static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page) 303 + { 304 + struct nullb_device *t_dev = to_nullb_device(item); 305 + 306 + return badblocks_show(&t_dev->badblocks, page, 0); 307 + } 308 + 309 + static ssize_t nullb_device_badblocks_store(struct config_item *item, 310 + const char *page, size_t count) 311 + { 312 + struct nullb_device *t_dev = to_nullb_device(item); 313 + char *orig, *buf, *tmp; 314 + u64 start, end; 315 + int ret; 316 + 317 + orig = kstrndup(page, count, GFP_KERNEL); 318 + if (!orig) 319 + return -ENOMEM; 320 + 321 + buf = strstrip(orig); 322 + 323 + ret = -EINVAL; 324 + if (buf[0] != '+' && buf[0] != '-') 325 + goto out; 326 + tmp = strchr(&buf[1], '-'); 327 + if (!tmp) 328 + goto out; 329 + *tmp = '\0'; 330 + ret = kstrtoull(buf + 1, 0, &start); 331 + if (ret) 332 + goto out; 333 + ret = kstrtoull(tmp + 1, 0, &end); 334 + if (ret) 335 + goto out; 336 + ret = -EINVAL; 337 + if (start > end) 338 + goto out; 339 + /* enable badblocks */ 340 + cmpxchg(&t_dev->badblocks.shift, -1, 0); 341 + if (buf[0] == '+') 342 + ret = badblocks_set(&t_dev->badblocks, start, 343 + end - start + 1, 1); 344 + else 345 + ret = badblocks_clear(&t_dev->badblocks, start, 346 + end - start + 1); 347 + if (ret == 0) 348 + ret = count; 349 + out: 350 + kfree(orig); 351 + return ret; 352 + } 353 + CONFIGFS_ATTR(nullb_device_, badblocks); 354 + 355 + static struct configfs_attribute *nullb_device_attrs[] = { 356 + &nullb_device_attr_size, 357 + &nullb_device_attr_completion_nsec, 358 + &nullb_device_attr_submit_queues, 359 + &nullb_device_attr_home_node, 360 + &nullb_device_attr_queue_mode, 361 + &nullb_device_attr_blocksize, 362 + &nullb_device_attr_irqmode, 363 + &nullb_device_attr_hw_queue_depth, 364 + &nullb_device_attr_index, 365 + &nullb_device_attr_use_lightnvm, 366 + &nullb_device_attr_blocking, 367 + &nullb_device_attr_use_per_node_hctx, 368 + &nullb_device_attr_power, 369 + &nullb_device_attr_memory_backed, 370 + &nullb_device_attr_discard, 371 + &nullb_device_attr_mbps, 372 + &nullb_device_attr_cache_size, 373 + &nullb_device_attr_badblocks, 374 + NULL, 375 + }; 376 + 377 + static void nullb_device_release(struct config_item *item) 378 + { 379 + struct nullb_device *dev = to_nullb_device(item); 380 + 381 + badblocks_exit(&dev->badblocks); 382 + null_free_device_storage(dev, false); 383 + null_free_dev(dev); 384 + } 385 + 386 + static struct configfs_item_operations nullb_device_ops = { 387 + .release = nullb_device_release, 388 + }; 389 + 390 + static struct config_item_type nullb_device_type = { 391 + .ct_item_ops = &nullb_device_ops, 392 + .ct_attrs = nullb_device_attrs, 393 + .ct_owner = THIS_MODULE, 394 + }; 395 + 396 + static struct 397 + config_item *nullb_group_make_item(struct config_group *group, const char *name) 398 + { 399 + struct nullb_device *dev; 400 + 401 + dev = null_alloc_dev(); 402 + if (!dev) 403 + return ERR_PTR(-ENOMEM); 404 + 405 + config_item_init_type_name(&dev->item, name, &nullb_device_type); 406 + 407 + return &dev->item; 408 + } 409 + 410 + static void 411 + nullb_group_drop_item(struct config_group *group, struct config_item *item) 412 + { 413 + struct nullb_device *dev = to_nullb_device(item); 414 + 415 + if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { 416 + mutex_lock(&lock); 417 + dev->power = false; 418 + null_del_dev(dev->nullb); 419 + mutex_unlock(&lock); 420 + } 421 + 422 + config_item_put(item); 423 + } 424 + 425 + static ssize_t memb_group_features_show(struct config_item *item, char *page) 426 + { 427 + return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n"); 428 + } 429 + 430 + CONFIGFS_ATTR_RO(memb_group_, features); 431 + 432 + static struct configfs_attribute *nullb_group_attrs[] = { 433 + &memb_group_attr_features, 434 + NULL, 435 + }; 436 + 437 + static struct configfs_group_operations nullb_group_ops = { 438 + .make_item = nullb_group_make_item, 439 + .drop_item = nullb_group_drop_item, 440 + }; 441 + 442 + static struct config_item_type nullb_group_type = { 443 + .ct_group_ops = &nullb_group_ops, 444 + .ct_attrs = nullb_group_attrs, 445 + .ct_owner = THIS_MODULE, 446 + }; 447 + 448 + static struct configfs_subsystem nullb_subsys = { 449 + .su_group = { 450 + .cg_item = { 451 + .ci_namebuf = "nullb", 452 + .ci_type = &nullb_group_type, 453 + }, 454 + }, 455 + }; 456 + 457 + static inline int null_cache_active(struct nullb *nullb) 458 + { 459 + return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); 460 + } 461 + 462 + static struct nullb_device *null_alloc_dev(void) 463 + { 464 + struct nullb_device *dev; 465 + 466 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 467 + if (!dev) 468 + return NULL; 469 + INIT_RADIX_TREE(&dev->data, GFP_ATOMIC); 470 + INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); 471 + if (badblocks_init(&dev->badblocks, 0)) { 472 + kfree(dev); 473 + return NULL; 474 + } 475 + 476 + dev->size = g_gb * 1024; 477 + dev->completion_nsec = g_completion_nsec; 478 + dev->submit_queues = g_submit_queues; 479 + dev->home_node = g_home_node; 480 + dev->queue_mode = g_queue_mode; 481 + dev->blocksize = g_bs; 482 + dev->irqmode = g_irqmode; 483 + dev->hw_queue_depth = g_hw_queue_depth; 484 + dev->use_lightnvm = g_use_lightnvm; 485 + dev->blocking = g_blocking; 486 + dev->use_per_node_hctx = g_use_per_node_hctx; 487 + return dev; 488 + } 489 + 490 + static void null_free_dev(struct nullb_device *dev) 491 + { 492 + kfree(dev); 493 + } 246 494 247 495 static void put_tag(struct nullb_queue *nq, unsigned int tag) 248 496 { ··· 619 193 cmd = &nq->cmds[tag]; 620 194 cmd->tag = tag; 621 195 cmd->nq = nq; 622 - if (irqmode == NULL_IRQ_TIMER) { 196 + if (nq->dev->irqmode == NULL_IRQ_TIMER) { 623 197 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, 624 198 HRTIMER_MODE_REL); 625 199 cmd->timer.function = null_cmd_timer_expired; ··· 655 229 static void end_cmd(struct nullb_cmd *cmd) 656 230 { 657 231 struct request_queue *q = NULL; 232 + int queue_mode = cmd->nq->dev->queue_mode; 658 233 659 234 if (cmd->rq) 660 235 q = cmd->rq->q; 661 236 662 237 switch (queue_mode) { 663 238 case NULL_Q_MQ: 664 - blk_mq_end_request(cmd->rq, BLK_STS_OK); 239 + blk_mq_end_request(cmd->rq, cmd->error); 665 240 return; 666 241 case NULL_Q_RQ: 667 242 INIT_LIST_HEAD(&cmd->rq->queuelist); 668 - blk_end_request_all(cmd->rq, BLK_STS_OK); 243 + blk_end_request_all(cmd->rq, cmd->error); 669 244 break; 670 245 case NULL_Q_BIO: 246 + cmd->bio->bi_status = cmd->error; 671 247 bio_endio(cmd->bio); 672 248 break; 673 249 } ··· 695 267 696 268 static void null_cmd_end_timer(struct nullb_cmd *cmd) 697 269 { 698 - ktime_t kt = completion_nsec; 270 + ktime_t kt = cmd->nq->dev->completion_nsec; 699 271 700 272 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); 701 273 } 702 274 703 275 static void null_softirq_done_fn(struct request *rq) 704 276 { 705 - if (queue_mode == NULL_Q_MQ) 277 + struct nullb *nullb = rq->q->queuedata; 278 + 279 + if (nullb->dev->queue_mode == NULL_Q_MQ) 706 280 end_cmd(blk_mq_rq_to_pdu(rq)); 707 281 else 708 282 end_cmd(rq->special); 709 283 } 710 284 711 - static inline void null_handle_cmd(struct nullb_cmd *cmd) 285 + static struct nullb_page *null_alloc_page(gfp_t gfp_flags) 712 286 { 287 + struct nullb_page *t_page; 288 + 289 + t_page = kmalloc(sizeof(struct nullb_page), gfp_flags); 290 + if (!t_page) 291 + goto out; 292 + 293 + t_page->page = alloc_pages(gfp_flags, 0); 294 + if (!t_page->page) 295 + goto out_freepage; 296 + 297 + t_page->bitmap = 0; 298 + return t_page; 299 + out_freepage: 300 + kfree(t_page); 301 + out: 302 + return NULL; 303 + } 304 + 305 + static void null_free_page(struct nullb_page *t_page) 306 + { 307 + __set_bit(NULLB_PAGE_FREE, &t_page->bitmap); 308 + if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap)) 309 + return; 310 + __free_page(t_page->page); 311 + kfree(t_page); 312 + } 313 + 314 + static void null_free_sector(struct nullb *nullb, sector_t sector, 315 + bool is_cache) 316 + { 317 + unsigned int sector_bit; 318 + u64 idx; 319 + struct nullb_page *t_page, *ret; 320 + struct radix_tree_root *root; 321 + 322 + root = is_cache ? &nullb->dev->cache : &nullb->dev->data; 323 + idx = sector >> PAGE_SECTORS_SHIFT; 324 + sector_bit = (sector & SECTOR_MASK); 325 + 326 + t_page = radix_tree_lookup(root, idx); 327 + if (t_page) { 328 + __clear_bit(sector_bit, &t_page->bitmap); 329 + 330 + if (!t_page->bitmap) { 331 + ret = radix_tree_delete_item(root, idx, t_page); 332 + WARN_ON(ret != t_page); 333 + null_free_page(ret); 334 + if (is_cache) 335 + nullb->dev->curr_cache -= PAGE_SIZE; 336 + } 337 + } 338 + } 339 + 340 + static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, 341 + struct nullb_page *t_page, bool is_cache) 342 + { 343 + struct radix_tree_root *root; 344 + 345 + root = is_cache ? &nullb->dev->cache : &nullb->dev->data; 346 + 347 + if (radix_tree_insert(root, idx, t_page)) { 348 + null_free_page(t_page); 349 + t_page = radix_tree_lookup(root, idx); 350 + WARN_ON(!t_page || t_page->page->index != idx); 351 + } else if (is_cache) 352 + nullb->dev->curr_cache += PAGE_SIZE; 353 + 354 + return t_page; 355 + } 356 + 357 + static void null_free_device_storage(struct nullb_device *dev, bool is_cache) 358 + { 359 + unsigned long pos = 0; 360 + int nr_pages; 361 + struct nullb_page *ret, *t_pages[FREE_BATCH]; 362 + struct radix_tree_root *root; 363 + 364 + root = is_cache ? &dev->cache : &dev->data; 365 + 366 + do { 367 + int i; 368 + 369 + nr_pages = radix_tree_gang_lookup(root, 370 + (void **)t_pages, pos, FREE_BATCH); 371 + 372 + for (i = 0; i < nr_pages; i++) { 373 + pos = t_pages[i]->page->index; 374 + ret = radix_tree_delete_item(root, pos, t_pages[i]); 375 + WARN_ON(ret != t_pages[i]); 376 + null_free_page(ret); 377 + } 378 + 379 + pos++; 380 + } while (nr_pages == FREE_BATCH); 381 + 382 + if (is_cache) 383 + dev->curr_cache = 0; 384 + } 385 + 386 + static struct nullb_page *__null_lookup_page(struct nullb *nullb, 387 + sector_t sector, bool for_write, bool is_cache) 388 + { 389 + unsigned int sector_bit; 390 + u64 idx; 391 + struct nullb_page *t_page; 392 + struct radix_tree_root *root; 393 + 394 + idx = sector >> PAGE_SECTORS_SHIFT; 395 + sector_bit = (sector & SECTOR_MASK); 396 + 397 + root = is_cache ? &nullb->dev->cache : &nullb->dev->data; 398 + t_page = radix_tree_lookup(root, idx); 399 + WARN_ON(t_page && t_page->page->index != idx); 400 + 401 + if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap))) 402 + return t_page; 403 + 404 + return NULL; 405 + } 406 + 407 + static struct nullb_page *null_lookup_page(struct nullb *nullb, 408 + sector_t sector, bool for_write, bool ignore_cache) 409 + { 410 + struct nullb_page *page = NULL; 411 + 412 + if (!ignore_cache) 413 + page = __null_lookup_page(nullb, sector, for_write, true); 414 + if (page) 415 + return page; 416 + return __null_lookup_page(nullb, sector, for_write, false); 417 + } 418 + 419 + static struct nullb_page *null_insert_page(struct nullb *nullb, 420 + sector_t sector, bool ignore_cache) 421 + { 422 + u64 idx; 423 + struct nullb_page *t_page; 424 + 425 + t_page = null_lookup_page(nullb, sector, true, ignore_cache); 426 + if (t_page) 427 + return t_page; 428 + 429 + spin_unlock_irq(&nullb->lock); 430 + 431 + t_page = null_alloc_page(GFP_NOIO); 432 + if (!t_page) 433 + goto out_lock; 434 + 435 + if (radix_tree_preload(GFP_NOIO)) 436 + goto out_freepage; 437 + 438 + spin_lock_irq(&nullb->lock); 439 + idx = sector >> PAGE_SECTORS_SHIFT; 440 + t_page->page->index = idx; 441 + t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); 442 + radix_tree_preload_end(); 443 + 444 + return t_page; 445 + out_freepage: 446 + null_free_page(t_page); 447 + out_lock: 448 + spin_lock_irq(&nullb->lock); 449 + return null_lookup_page(nullb, sector, true, ignore_cache); 450 + } 451 + 452 + static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) 453 + { 454 + int i; 455 + unsigned int offset; 456 + u64 idx; 457 + struct nullb_page *t_page, *ret; 458 + void *dst, *src; 459 + 460 + idx = c_page->page->index; 461 + 462 + t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); 463 + 464 + __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap); 465 + if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) { 466 + null_free_page(c_page); 467 + if (t_page && t_page->bitmap == 0) { 468 + ret = radix_tree_delete_item(&nullb->dev->data, 469 + idx, t_page); 470 + null_free_page(t_page); 471 + } 472 + return 0; 473 + } 474 + 475 + if (!t_page) 476 + return -ENOMEM; 477 + 478 + src = kmap_atomic(c_page->page); 479 + dst = kmap_atomic(t_page->page); 480 + 481 + for (i = 0; i < PAGE_SECTORS; 482 + i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { 483 + if (test_bit(i, &c_page->bitmap)) { 484 + offset = (i << SECTOR_SHIFT); 485 + memcpy(dst + offset, src + offset, 486 + nullb->dev->blocksize); 487 + __set_bit(i, &t_page->bitmap); 488 + } 489 + } 490 + 491 + kunmap_atomic(dst); 492 + kunmap_atomic(src); 493 + 494 + ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); 495 + null_free_page(ret); 496 + nullb->dev->curr_cache -= PAGE_SIZE; 497 + 498 + return 0; 499 + } 500 + 501 + static int null_make_cache_space(struct nullb *nullb, unsigned long n) 502 + { 503 + int i, err, nr_pages; 504 + struct nullb_page *c_pages[FREE_BATCH]; 505 + unsigned long flushed = 0, one_round; 506 + 507 + again: 508 + if ((nullb->dev->cache_size * 1024 * 1024) > 509 + nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) 510 + return 0; 511 + 512 + nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, 513 + (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); 514 + /* 515 + * nullb_flush_cache_page could unlock before using the c_pages. To 516 + * avoid race, we don't allow page free 517 + */ 518 + for (i = 0; i < nr_pages; i++) { 519 + nullb->cache_flush_pos = c_pages[i]->page->index; 520 + /* 521 + * We found the page which is being flushed to disk by other 522 + * threads 523 + */ 524 + if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap)) 525 + c_pages[i] = NULL; 526 + else 527 + __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap); 528 + } 529 + 530 + one_round = 0; 531 + for (i = 0; i < nr_pages; i++) { 532 + if (c_pages[i] == NULL) 533 + continue; 534 + err = null_flush_cache_page(nullb, c_pages[i]); 535 + if (err) 536 + return err; 537 + one_round++; 538 + } 539 + flushed += one_round << PAGE_SHIFT; 540 + 541 + if (n > flushed) { 542 + if (nr_pages == 0) 543 + nullb->cache_flush_pos = 0; 544 + if (one_round == 0) { 545 + /* give other threads a chance */ 546 + spin_unlock_irq(&nullb->lock); 547 + spin_lock_irq(&nullb->lock); 548 + } 549 + goto again; 550 + } 551 + return 0; 552 + } 553 + 554 + static int copy_to_nullb(struct nullb *nullb, struct page *source, 555 + unsigned int off, sector_t sector, size_t n, bool is_fua) 556 + { 557 + size_t temp, count = 0; 558 + unsigned int offset; 559 + struct nullb_page *t_page; 560 + void *dst, *src; 561 + 562 + while (count < n) { 563 + temp = min_t(size_t, nullb->dev->blocksize, n - count); 564 + 565 + if (null_cache_active(nullb) && !is_fua) 566 + null_make_cache_space(nullb, PAGE_SIZE); 567 + 568 + offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; 569 + t_page = null_insert_page(nullb, sector, 570 + !null_cache_active(nullb) || is_fua); 571 + if (!t_page) 572 + return -ENOSPC; 573 + 574 + src = kmap_atomic(source); 575 + dst = kmap_atomic(t_page->page); 576 + memcpy(dst + offset, src + off + count, temp); 577 + kunmap_atomic(dst); 578 + kunmap_atomic(src); 579 + 580 + __set_bit(sector & SECTOR_MASK, &t_page->bitmap); 581 + 582 + if (is_fua) 583 + null_free_sector(nullb, sector, true); 584 + 585 + count += temp; 586 + sector += temp >> SECTOR_SHIFT; 587 + } 588 + return 0; 589 + } 590 + 591 + static int copy_from_nullb(struct nullb *nullb, struct page *dest, 592 + unsigned int off, sector_t sector, size_t n) 593 + { 594 + size_t temp, count = 0; 595 + unsigned int offset; 596 + struct nullb_page *t_page; 597 + void *dst, *src; 598 + 599 + while (count < n) { 600 + temp = min_t(size_t, nullb->dev->blocksize, n - count); 601 + 602 + offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; 603 + t_page = null_lookup_page(nullb, sector, false, 604 + !null_cache_active(nullb)); 605 + 606 + dst = kmap_atomic(dest); 607 + if (!t_page) { 608 + memset(dst + off + count, 0, temp); 609 + goto next; 610 + } 611 + src = kmap_atomic(t_page->page); 612 + memcpy(dst + off + count, src + offset, temp); 613 + kunmap_atomic(src); 614 + next: 615 + kunmap_atomic(dst); 616 + 617 + count += temp; 618 + sector += temp >> SECTOR_SHIFT; 619 + } 620 + return 0; 621 + } 622 + 623 + static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n) 624 + { 625 + size_t temp; 626 + 627 + spin_lock_irq(&nullb->lock); 628 + while (n > 0) { 629 + temp = min_t(size_t, n, nullb->dev->blocksize); 630 + null_free_sector(nullb, sector, false); 631 + if (null_cache_active(nullb)) 632 + null_free_sector(nullb, sector, true); 633 + sector += temp >> SECTOR_SHIFT; 634 + n -= temp; 635 + } 636 + spin_unlock_irq(&nullb->lock); 637 + } 638 + 639 + static int null_handle_flush(struct nullb *nullb) 640 + { 641 + int err; 642 + 643 + if (!null_cache_active(nullb)) 644 + return 0; 645 + 646 + spin_lock_irq(&nullb->lock); 647 + while (true) { 648 + err = null_make_cache_space(nullb, 649 + nullb->dev->cache_size * 1024 * 1024); 650 + if (err || nullb->dev->curr_cache == 0) 651 + break; 652 + } 653 + 654 + WARN_ON(!radix_tree_empty(&nullb->dev->cache)); 655 + spin_unlock_irq(&nullb->lock); 656 + return err; 657 + } 658 + 659 + static int null_transfer(struct nullb *nullb, struct page *page, 660 + unsigned int len, unsigned int off, bool is_write, sector_t sector, 661 + bool is_fua) 662 + { 663 + int err = 0; 664 + 665 + if (!is_write) { 666 + err = copy_from_nullb(nullb, page, off, sector, len); 667 + flush_dcache_page(page); 668 + } else { 669 + flush_dcache_page(page); 670 + err = copy_to_nullb(nullb, page, off, sector, len, is_fua); 671 + } 672 + 673 + return err; 674 + } 675 + 676 + static int null_handle_rq(struct nullb_cmd *cmd) 677 + { 678 + struct request *rq = cmd->rq; 679 + struct nullb *nullb = cmd->nq->dev->nullb; 680 + int err; 681 + unsigned int len; 682 + sector_t sector; 683 + struct req_iterator iter; 684 + struct bio_vec bvec; 685 + 686 + sector = blk_rq_pos(rq); 687 + 688 + if (req_op(rq) == REQ_OP_DISCARD) { 689 + null_handle_discard(nullb, sector, blk_rq_bytes(rq)); 690 + return 0; 691 + } 692 + 693 + spin_lock_irq(&nullb->lock); 694 + rq_for_each_segment(bvec, rq, iter) { 695 + len = bvec.bv_len; 696 + err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, 697 + op_is_write(req_op(rq)), sector, 698 + req_op(rq) & REQ_FUA); 699 + if (err) { 700 + spin_unlock_irq(&nullb->lock); 701 + return err; 702 + } 703 + sector += len >> SECTOR_SHIFT; 704 + } 705 + spin_unlock_irq(&nullb->lock); 706 + 707 + return 0; 708 + } 709 + 710 + static int null_handle_bio(struct nullb_cmd *cmd) 711 + { 712 + struct bio *bio = cmd->bio; 713 + struct nullb *nullb = cmd->nq->dev->nullb; 714 + int err; 715 + unsigned int len; 716 + sector_t sector; 717 + struct bio_vec bvec; 718 + struct bvec_iter iter; 719 + 720 + sector = bio->bi_iter.bi_sector; 721 + 722 + if (bio_op(bio) == REQ_OP_DISCARD) { 723 + null_handle_discard(nullb, sector, 724 + bio_sectors(bio) << SECTOR_SHIFT); 725 + return 0; 726 + } 727 + 728 + spin_lock_irq(&nullb->lock); 729 + bio_for_each_segment(bvec, bio, iter) { 730 + len = bvec.bv_len; 731 + err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, 732 + op_is_write(bio_op(bio)), sector, 733 + bio_op(bio) & REQ_FUA); 734 + if (err) { 735 + spin_unlock_irq(&nullb->lock); 736 + return err; 737 + } 738 + sector += len >> SECTOR_SHIFT; 739 + } 740 + spin_unlock_irq(&nullb->lock); 741 + return 0; 742 + } 743 + 744 + static void null_stop_queue(struct nullb *nullb) 745 + { 746 + struct request_queue *q = nullb->q; 747 + 748 + if (nullb->dev->queue_mode == NULL_Q_MQ) 749 + blk_mq_stop_hw_queues(q); 750 + else { 751 + spin_lock_irq(q->queue_lock); 752 + blk_stop_queue(q); 753 + spin_unlock_irq(q->queue_lock); 754 + } 755 + } 756 + 757 + static void null_restart_queue_async(struct nullb *nullb) 758 + { 759 + struct request_queue *q = nullb->q; 760 + unsigned long flags; 761 + 762 + if (nullb->dev->queue_mode == NULL_Q_MQ) 763 + blk_mq_start_stopped_hw_queues(q, true); 764 + else { 765 + spin_lock_irqsave(q->queue_lock, flags); 766 + blk_start_queue_async(q); 767 + spin_unlock_irqrestore(q->queue_lock, flags); 768 + } 769 + } 770 + 771 + static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) 772 + { 773 + struct nullb_device *dev = cmd->nq->dev; 774 + struct nullb *nullb = dev->nullb; 775 + int err = 0; 776 + 777 + if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { 778 + struct request *rq = cmd->rq; 779 + 780 + if (!hrtimer_active(&nullb->bw_timer)) 781 + hrtimer_restart(&nullb->bw_timer); 782 + 783 + if (atomic_long_sub_return(blk_rq_bytes(rq), 784 + &nullb->cur_bytes) < 0) { 785 + null_stop_queue(nullb); 786 + /* race with timer */ 787 + if (atomic_long_read(&nullb->cur_bytes) > 0) 788 + null_restart_queue_async(nullb); 789 + if (dev->queue_mode == NULL_Q_RQ) { 790 + struct request_queue *q = nullb->q; 791 + 792 + spin_lock_irq(q->queue_lock); 793 + rq->rq_flags |= RQF_DONTPREP; 794 + blk_requeue_request(q, rq); 795 + spin_unlock_irq(q->queue_lock); 796 + return BLK_STS_OK; 797 + } else 798 + /* requeue request */ 799 + return BLK_STS_RESOURCE; 800 + } 801 + } 802 + 803 + if (nullb->dev->badblocks.shift != -1) { 804 + int bad_sectors; 805 + sector_t sector, size, first_bad; 806 + bool is_flush = true; 807 + 808 + if (dev->queue_mode == NULL_Q_BIO && 809 + bio_op(cmd->bio) != REQ_OP_FLUSH) { 810 + is_flush = false; 811 + sector = cmd->bio->bi_iter.bi_sector; 812 + size = bio_sectors(cmd->bio); 813 + } 814 + if (dev->queue_mode != NULL_Q_BIO && 815 + req_op(cmd->rq) != REQ_OP_FLUSH) { 816 + is_flush = false; 817 + sector = blk_rq_pos(cmd->rq); 818 + size = blk_rq_sectors(cmd->rq); 819 + } 820 + if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector, 821 + size, &first_bad, &bad_sectors)) { 822 + cmd->error = BLK_STS_IOERR; 823 + goto out; 824 + } 825 + } 826 + 827 + if (dev->memory_backed) { 828 + if (dev->queue_mode == NULL_Q_BIO) { 829 + if (bio_op(cmd->bio) == REQ_OP_FLUSH) 830 + err = null_handle_flush(nullb); 831 + else 832 + err = null_handle_bio(cmd); 833 + } else { 834 + if (req_op(cmd->rq) == REQ_OP_FLUSH) 835 + err = null_handle_flush(nullb); 836 + else 837 + err = null_handle_rq(cmd); 838 + } 839 + } 840 + cmd->error = errno_to_blk_status(err); 841 + out: 713 842 /* Complete IO by inline, softirq or timer */ 714 - switch (irqmode) { 843 + switch (dev->irqmode) { 715 844 case NULL_IRQ_SOFTIRQ: 716 - switch (queue_mode) { 845 + switch (dev->queue_mode) { 717 846 case NULL_Q_MQ: 718 847 blk_mq_complete_request(cmd->rq); 719 848 break; ··· 1292 307 null_cmd_end_timer(cmd); 1293 308 break; 1294 309 } 310 + return BLK_STS_OK; 311 + } 312 + 313 + static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) 314 + { 315 + struct nullb *nullb = container_of(timer, struct nullb, bw_timer); 316 + ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); 317 + unsigned int mbps = nullb->dev->mbps; 318 + 319 + if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) 320 + return HRTIMER_NORESTART; 321 + 322 + atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); 323 + null_restart_queue_async(nullb); 324 + 325 + hrtimer_forward_now(&nullb->bw_timer, timer_interval); 326 + 327 + return HRTIMER_RESTART; 328 + } 329 + 330 + static void nullb_setup_bwtimer(struct nullb *nullb) 331 + { 332 + ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); 333 + 334 + hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 335 + nullb->bw_timer.function = nullb_bwtimer_fn; 336 + atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); 337 + hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); 1295 338 } 1296 339 1297 340 static struct nullb_queue *nullb_to_queue(struct nullb *nullb) ··· 1379 366 const struct blk_mq_queue_data *bd) 1380 367 { 1381 368 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 369 + struct nullb_queue *nq = hctx->driver_data; 1382 370 1383 371 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); 1384 372 1385 - if (irqmode == NULL_IRQ_TIMER) { 373 + if (nq->dev->irqmode == NULL_IRQ_TIMER) { 1386 374 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1387 375 cmd->timer.function = null_cmd_timer_expired; 1388 376 } 1389 377 cmd->rq = bd->rq; 1390 - cmd->nq = hctx->driver_data; 378 + cmd->nq = nq; 1391 379 1392 380 blk_mq_start_request(bd->rq); 1393 381 1394 - null_handle_cmd(cmd); 1395 - return BLK_STS_OK; 382 + return null_handle_cmd(cmd); 1396 383 } 1397 384 1398 385 static const struct blk_mq_ops null_mq_ops = { ··· 1451 438 1452 439 static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) 1453 440 { 1454 - sector_t size = gb * 1024 * 1024 * 1024ULL; 441 + struct nullb *nullb = dev->q->queuedata; 442 + sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL; 1455 443 sector_t blksize; 1456 444 struct nvm_id_group *grp; 1457 445 ··· 1474 460 id->ppaf.ch_offset = 56; 1475 461 id->ppaf.ch_len = 8; 1476 462 1477 - sector_div(size, bs); /* convert size to pages */ 463 + sector_div(size, nullb->dev->blocksize); /* convert size to pages */ 1478 464 size >>= 8; /* concert size to pgs pr blk */ 1479 465 grp = &id->grp; 1480 466 grp->mtype = 0; ··· 1488 474 grp->num_blk = blksize; 1489 475 grp->num_pln = 1; 1490 476 1491 - grp->fpg_sz = bs; 1492 - grp->csecs = bs; 477 + grp->fpg_sz = nullb->dev->blocksize; 478 + grp->csecs = nullb->dev->blocksize; 1493 479 grp->trdt = 25000; 1494 480 grp->trdm = 25000; 1495 481 grp->tprt = 500000; ··· 1497 483 grp->tbet = 1500000; 1498 484 grp->tbem = 1500000; 1499 485 grp->mpos = 0x010101; /* single plane rwe */ 1500 - grp->cpar = hw_queue_depth; 486 + grp->cpar = nullb->dev->hw_queue_depth; 1501 487 1502 488 return 0; 1503 489 } ··· 1582 568 1583 569 static void null_del_dev(struct nullb *nullb) 1584 570 { 571 + struct nullb_device *dev = nullb->dev; 572 + 573 + ida_simple_remove(&nullb_indexes, nullb->index); 574 + 1585 575 list_del_init(&nullb->list); 1586 576 1587 - if (use_lightnvm) 577 + if (dev->use_lightnvm) 1588 578 null_nvm_unregister(nullb); 1589 579 else 1590 580 del_gendisk(nullb->disk); 581 + 582 + if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { 583 + hrtimer_cancel(&nullb->bw_timer); 584 + atomic_long_set(&nullb->cur_bytes, LONG_MAX); 585 + null_restart_queue_async(nullb); 586 + } 587 + 1591 588 blk_cleanup_queue(nullb->q); 1592 - if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) 589 + if (dev->queue_mode == NULL_Q_MQ && 590 + nullb->tag_set == &nullb->__tag_set) 1593 591 blk_mq_free_tag_set(nullb->tag_set); 1594 - if (!use_lightnvm) 592 + if (!dev->use_lightnvm) 1595 593 put_disk(nullb->disk); 1596 594 cleanup_queues(nullb); 595 + if (null_cache_active(nullb)) 596 + null_free_device_storage(nullb->dev, true); 1597 597 kfree(nullb); 598 + dev->nullb = NULL; 599 + } 600 + 601 + static void null_config_discard(struct nullb *nullb) 602 + { 603 + if (nullb->dev->discard == false) 604 + return; 605 + nullb->q->limits.discard_granularity = nullb->dev->blocksize; 606 + nullb->q->limits.discard_alignment = nullb->dev->blocksize; 607 + blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); 608 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q); 1598 609 } 1599 610 1600 611 static int null_open(struct block_device *bdev, fmode_t mode) ··· 1644 605 1645 606 init_waitqueue_head(&nq->wait); 1646 607 nq->queue_depth = nullb->queue_depth; 608 + nq->dev = nullb->dev; 1647 609 } 1648 610 1649 611 static void null_init_queues(struct nullb *nullb) ··· 1692 652 1693 653 static int setup_queues(struct nullb *nullb) 1694 654 { 1695 - nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), 1696 - GFP_KERNEL); 655 + nullb->queues = kzalloc(nullb->dev->submit_queues * 656 + sizeof(struct nullb_queue), GFP_KERNEL); 1697 657 if (!nullb->queues) 1698 658 return -ENOMEM; 1699 659 1700 660 nullb->nr_queues = 0; 1701 - nullb->queue_depth = hw_queue_depth; 661 + nullb->queue_depth = nullb->dev->hw_queue_depth; 1702 662 1703 663 return 0; 1704 664 } ··· 1708 668 struct nullb_queue *nq; 1709 669 int i, ret = 0; 1710 670 1711 - for (i = 0; i < submit_queues; i++) { 671 + for (i = 0; i < nullb->dev->submit_queues; i++) { 1712 672 nq = &nullb->queues[i]; 1713 673 1714 674 null_init_queue(nullb, nq); ··· 1726 686 struct gendisk *disk; 1727 687 sector_t size; 1728 688 1729 - disk = nullb->disk = alloc_disk_node(1, home_node); 689 + disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node); 1730 690 if (!disk) 1731 691 return -ENOMEM; 1732 - size = gb * 1024 * 1024 * 1024ULL; 692 + size = (sector_t)nullb->dev->size * 1024 * 1024ULL; 1733 693 set_capacity(disk, size >> 9); 1734 694 1735 695 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; ··· 1744 704 return 0; 1745 705 } 1746 706 1747 - static int null_init_tag_set(struct blk_mq_tag_set *set) 707 + static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) 1748 708 { 1749 709 set->ops = &null_mq_ops; 1750 - set->nr_hw_queues = submit_queues; 1751 - set->queue_depth = hw_queue_depth; 1752 - set->numa_node = home_node; 710 + set->nr_hw_queues = nullb ? nullb->dev->submit_queues : 711 + g_submit_queues; 712 + set->queue_depth = nullb ? nullb->dev->hw_queue_depth : 713 + g_hw_queue_depth; 714 + set->numa_node = nullb ? nullb->dev->home_node : g_home_node; 1753 715 set->cmd_size = sizeof(struct nullb_cmd); 1754 716 set->flags = BLK_MQ_F_SHOULD_MERGE; 1755 717 set->driver_data = NULL; 1756 718 1757 - if (blocking) 719 + if ((nullb && nullb->dev->blocking) || g_blocking) 1758 720 set->flags |= BLK_MQ_F_BLOCKING; 1759 721 1760 722 return blk_mq_alloc_tag_set(set); 1761 723 } 1762 724 1763 - static int null_add_dev(void) 725 + static void null_validate_conf(struct nullb_device *dev) 726 + { 727 + dev->blocksize = round_down(dev->blocksize, 512); 728 + dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); 729 + if (dev->use_lightnvm && dev->blocksize != 4096) 730 + dev->blocksize = 4096; 731 + 732 + if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ) 733 + dev->queue_mode = NULL_Q_MQ; 734 + 735 + if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) { 736 + if (dev->submit_queues != nr_online_nodes) 737 + dev->submit_queues = nr_online_nodes; 738 + } else if (dev->submit_queues > nr_cpu_ids) 739 + dev->submit_queues = nr_cpu_ids; 740 + else if (dev->submit_queues == 0) 741 + dev->submit_queues = 1; 742 + 743 + dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ); 744 + dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); 745 + 746 + /* Do memory allocation, so set blocking */ 747 + if (dev->memory_backed) 748 + dev->blocking = true; 749 + else /* cache is meaningless */ 750 + dev->cache_size = 0; 751 + dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, 752 + dev->cache_size); 753 + dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); 754 + /* can not stop a queue */ 755 + if (dev->queue_mode == NULL_Q_BIO) 756 + dev->mbps = 0; 757 + } 758 + 759 + static int null_add_dev(struct nullb_device *dev) 1764 760 { 1765 761 struct nullb *nullb; 1766 762 int rv; 1767 763 1768 - nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); 764 + null_validate_conf(dev); 765 + 766 + nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); 1769 767 if (!nullb) { 1770 768 rv = -ENOMEM; 1771 769 goto out; 1772 770 } 771 + nullb->dev = dev; 772 + dev->nullb = nullb; 1773 773 1774 774 spin_lock_init(&nullb->lock); 1775 - 1776 - if (queue_mode == NULL_Q_MQ && use_per_node_hctx) 1777 - submit_queues = nr_online_nodes; 1778 775 1779 776 rv = setup_queues(nullb); 1780 777 if (rv) 1781 778 goto out_free_nullb; 1782 779 1783 - if (queue_mode == NULL_Q_MQ) { 780 + if (dev->queue_mode == NULL_Q_MQ) { 1784 781 if (shared_tags) { 1785 782 nullb->tag_set = &tag_set; 1786 783 rv = 0; 1787 784 } else { 1788 785 nullb->tag_set = &nullb->__tag_set; 1789 - rv = null_init_tag_set(nullb->tag_set); 786 + rv = null_init_tag_set(nullb, nullb->tag_set); 1790 787 } 1791 788 1792 789 if (rv) ··· 1835 758 goto out_cleanup_tags; 1836 759 } 1837 760 null_init_queues(nullb); 1838 - } else if (queue_mode == NULL_Q_BIO) { 1839 - nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 761 + } else if (dev->queue_mode == NULL_Q_BIO) { 762 + nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); 1840 763 if (!nullb->q) { 1841 764 rv = -ENOMEM; 1842 765 goto out_cleanup_queues; ··· 1846 769 if (rv) 1847 770 goto out_cleanup_blk_queue; 1848 771 } else { 1849 - nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 772 + nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, 773 + dev->home_node); 1850 774 if (!nullb->q) { 1851 775 rv = -ENOMEM; 1852 776 goto out_cleanup_queues; ··· 1859 781 goto out_cleanup_blk_queue; 1860 782 } 1861 783 784 + if (dev->mbps) { 785 + set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); 786 + nullb_setup_bwtimer(nullb); 787 + } 788 + 789 + if (dev->cache_size > 0) { 790 + set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); 791 + blk_queue_write_cache(nullb->q, true, true); 792 + blk_queue_flush_queueable(nullb->q, true); 793 + } 794 + 1862 795 nullb->q->queuedata = nullb; 1863 796 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 1864 797 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 1865 798 1866 799 mutex_lock(&lock); 1867 - nullb->index = nullb_indexes++; 800 + nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); 801 + dev->index = nullb->index; 1868 802 mutex_unlock(&lock); 1869 803 1870 - blk_queue_logical_block_size(nullb->q, bs); 1871 - blk_queue_physical_block_size(nullb->q, bs); 804 + blk_queue_logical_block_size(nullb->q, dev->blocksize); 805 + blk_queue_physical_block_size(nullb->q, dev->blocksize); 806 + 807 + null_config_discard(nullb); 1872 808 1873 809 sprintf(nullb->disk_name, "nullb%d", nullb->index); 1874 810 1875 - if (use_lightnvm) 811 + if (dev->use_lightnvm) 1876 812 rv = null_nvm_register(nullb); 1877 813 else 1878 814 rv = null_gendisk_register(nullb); ··· 1902 810 out_cleanup_blk_queue: 1903 811 blk_cleanup_queue(nullb->q); 1904 812 out_cleanup_tags: 1905 - if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) 813 + if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) 1906 814 blk_mq_free_tag_set(nullb->tag_set); 1907 815 out_cleanup_queues: 1908 816 cleanup_queues(nullb); ··· 1917 825 int ret = 0; 1918 826 unsigned int i; 1919 827 struct nullb *nullb; 828 + struct nullb_device *dev; 1920 829 1921 - if (bs > PAGE_SIZE) { 830 + /* check for nullb_page.bitmap */ 831 + if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT)) 832 + return -EINVAL; 833 + 834 + if (g_bs > PAGE_SIZE) { 1922 835 pr_warn("null_blk: invalid block size\n"); 1923 836 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); 1924 - bs = PAGE_SIZE; 837 + g_bs = PAGE_SIZE; 1925 838 } 1926 839 1927 - if (use_lightnvm && bs != 4096) { 840 + if (g_use_lightnvm && g_bs != 4096) { 1928 841 pr_warn("null_blk: LightNVM only supports 4k block size\n"); 1929 842 pr_warn("null_blk: defaults block size to 4k\n"); 1930 - bs = 4096; 843 + g_bs = 4096; 1931 844 } 1932 845 1933 - if (use_lightnvm && queue_mode != NULL_Q_MQ) { 846 + if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) { 1934 847 pr_warn("null_blk: LightNVM only supported for blk-mq\n"); 1935 848 pr_warn("null_blk: defaults queue mode to blk-mq\n"); 1936 - queue_mode = NULL_Q_MQ; 849 + g_queue_mode = NULL_Q_MQ; 1937 850 } 1938 851 1939 - if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { 1940 - if (submit_queues < nr_online_nodes) { 1941 - pr_warn("null_blk: submit_queues param is set to %u.", 852 + if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) { 853 + if (g_submit_queues != nr_online_nodes) { 854 + pr_warn("null_blk: submit_queues param is set to %u.\n", 1942 855 nr_online_nodes); 1943 - submit_queues = nr_online_nodes; 856 + g_submit_queues = nr_online_nodes; 1944 857 } 1945 - } else if (submit_queues > nr_cpu_ids) 1946 - submit_queues = nr_cpu_ids; 1947 - else if (!submit_queues) 1948 - submit_queues = 1; 858 + } else if (g_submit_queues > nr_cpu_ids) 859 + g_submit_queues = nr_cpu_ids; 860 + else if (g_submit_queues <= 0) 861 + g_submit_queues = 1; 1949 862 1950 - if (queue_mode == NULL_Q_MQ && shared_tags) { 1951 - ret = null_init_tag_set(&tag_set); 863 + if (g_queue_mode == NULL_Q_MQ && shared_tags) { 864 + ret = null_init_tag_set(NULL, &tag_set); 1952 865 if (ret) 1953 866 return ret; 1954 867 } 868 + 869 + config_group_init(&nullb_subsys.su_group); 870 + mutex_init(&nullb_subsys.su_mutex); 871 + 872 + ret = configfs_register_subsystem(&nullb_subsys); 873 + if (ret) 874 + goto err_tagset; 1955 875 1956 876 mutex_init(&lock); 1957 877 1958 878 null_major = register_blkdev(0, "nullb"); 1959 879 if (null_major < 0) { 1960 880 ret = null_major; 1961 - goto err_tagset; 881 + goto err_conf; 1962 882 } 1963 883 1964 - if (use_lightnvm) { 884 + if (g_use_lightnvm) { 1965 885 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), 1966 886 0, 0, NULL); 1967 887 if (!ppa_cache) { ··· 1984 880 } 1985 881 1986 882 for (i = 0; i < nr_devices; i++) { 1987 - ret = null_add_dev(); 1988 - if (ret) 883 + dev = null_alloc_dev(); 884 + if (!dev) 1989 885 goto err_dev; 886 + ret = null_add_dev(dev); 887 + if (ret) { 888 + null_free_dev(dev); 889 + goto err_dev; 890 + } 1990 891 } 1991 892 1992 893 pr_info("null: module loaded\n"); ··· 2000 891 err_dev: 2001 892 while (!list_empty(&nullb_list)) { 2002 893 nullb = list_entry(nullb_list.next, struct nullb, list); 894 + dev = nullb->dev; 2003 895 null_del_dev(nullb); 896 + null_free_dev(dev); 2004 897 } 2005 898 kmem_cache_destroy(ppa_cache); 2006 899 err_ppa: 2007 900 unregister_blkdev(null_major, "nullb"); 901 + err_conf: 902 + configfs_unregister_subsystem(&nullb_subsys); 2008 903 err_tagset: 2009 - if (queue_mode == NULL_Q_MQ && shared_tags) 904 + if (g_queue_mode == NULL_Q_MQ && shared_tags) 2010 905 blk_mq_free_tag_set(&tag_set); 2011 906 return ret; 2012 907 } ··· 2019 906 { 2020 907 struct nullb *nullb; 2021 908 909 + configfs_unregister_subsystem(&nullb_subsys); 910 + 2022 911 unregister_blkdev(null_major, "nullb"); 2023 912 2024 913 mutex_lock(&lock); 2025 914 while (!list_empty(&nullb_list)) { 915 + struct nullb_device *dev; 916 + 2026 917 nullb = list_entry(nullb_list.next, struct nullb, list); 918 + dev = nullb->dev; 2027 919 null_del_dev(nullb); 920 + null_free_dev(dev); 2028 921 } 2029 922 mutex_unlock(&lock); 2030 923 2031 - if (queue_mode == NULL_Q_MQ && shared_tags) 924 + if (g_queue_mode == NULL_Q_MQ && shared_tags) 2032 925 blk_mq_free_tag_set(&tag_set); 2033 926 2034 927 kmem_cache_destroy(ppa_cache); ··· 2043 924 module_init(null_init); 2044 925 module_exit(null_exit); 2045 926 2046 - MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); 927 + MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>"); 2047 928 MODULE_LICENSE("GPL");
+5 -6
drivers/block/pktcdvd.c
··· 1028 1028 bio = pkt->r_bios[f]; 1029 1029 bio_reset(bio); 1030 1030 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1031 - bio->bi_bdev = pd->bdev; 1031 + bio_set_dev(bio, pd->bdev); 1032 1032 bio->bi_end_io = pkt_end_io_read; 1033 1033 bio->bi_private = pkt; 1034 1034 ··· 1122 1122 pkt->sector = new_sector; 1123 1123 1124 1124 bio_reset(pkt->bio); 1125 - pkt->bio->bi_bdev = pd->bdev; 1125 + bio_set_set(pkt->bio, pd->bdev); 1126 1126 bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); 1127 1127 pkt->bio->bi_iter.bi_sector = new_sector; 1128 1128 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; ··· 1267 1267 1268 1268 bio_reset(pkt->w_bio); 1269 1269 pkt->w_bio->bi_iter.bi_sector = pkt->sector; 1270 - pkt->w_bio->bi_bdev = pd->bdev; 1270 + bio_set_dev(pkt->w_bio, pd->bdev); 1271 1271 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1272 1272 pkt->w_bio->bi_private = pkt; 1273 1273 ··· 2314 2314 2315 2315 psd->pd = pd; 2316 2316 psd->bio = bio; 2317 - cloned_bio->bi_bdev = pd->bdev; 2317 + bio_set_dev(cloned_bio, pd->bdev); 2318 2318 cloned_bio->bi_private = psd; 2319 2319 cloned_bio->bi_end_io = pkt_end_io_read_cloned; 2320 2320 pd->stats.secs_r += bio_sectors(bio); ··· 2415 2415 2416 2416 pd = q->queuedata; 2417 2417 if (!pd) { 2418 - pr_err("%s incorrect request queue\n", 2419 - bdevname(bio->bi_bdev, b)); 2418 + pr_err("%s incorrect request queue\n", bio_devname(bio, b)); 2420 2419 goto end_io; 2421 2420 } 2422 2421
+3 -3
drivers/block/rsxx/dev.c
··· 112 112 113 113 static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio) 114 114 { 115 - generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio), 115 + generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio), 116 116 &card->gendisk->part0); 117 117 } 118 118 ··· 120 120 struct bio *bio, 121 121 unsigned long start_time) 122 122 { 123 - generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0, 124 - start_time); 123 + generic_end_io_acct(card->queue, bio_data_dir(bio), 124 + &card->gendisk->part0, start_time); 125 125 } 126 126 127 127 static void bio_dma_done_cb(struct rsxx_cardinfo *card,
+847 -2441
drivers/block/skd_main.c
··· 1 - /* Copyright 2012 STEC, Inc. 1 + /* 2 + * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST 3 + * was acquired by Western Digital in 2012. 2 4 * 3 - * This file is licensed under the terms of the 3-clause 4 - * BSD License (http://opensource.org/licenses/BSD-3-Clause) 5 - * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), 6 - * at your option. Both licenses are also available in the LICENSE file 7 - * distributed with this project. This file may not be copied, modified, 8 - * or distributed except in accordance with those terms. 9 - * Gordoni Waidhofer <gwaidhofer@stec-inc.com> 10 - * Initial Driver Design! 11 - * Thomas Swann <tswann@stec-inc.com> 12 - * Interrupt handling. 13 - * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com> 14 - * biomode implementation. 15 - * Akhil Bhansali <abhansali@stec-inc.com> 16 - * Added support for DISCARD / FLUSH and FUA. 5 + * Copyright 2012 sTec, Inc. 6 + * Copyright (c) 2017 Western Digital Corporation or its affiliates. 7 + * 8 + * This file is part of the Linux kernel, and is made available under 9 + * the terms of the GNU General Public License version 2. 17 10 */ 18 11 19 12 #include <linux/kernel.h> ··· 16 23 #include <linux/slab.h> 17 24 #include <linux/spinlock.h> 18 25 #include <linux/blkdev.h> 26 + #include <linux/blk-mq.h> 19 27 #include <linux/sched.h> 20 28 #include <linux/interrupt.h> 21 29 #include <linux/compiler.h> 22 30 #include <linux/workqueue.h> 23 - #include <linux/bitops.h> 24 31 #include <linux/delay.h> 25 32 #include <linux/time.h> 26 33 #include <linux/hdreg.h> ··· 30 37 #include <linux/version.h> 31 38 #include <linux/err.h> 32 39 #include <linux/aer.h> 33 - #include <linux/ctype.h> 34 40 #include <linux/wait.h> 35 - #include <linux/uio.h> 41 + #include <linux/stringify.h> 42 + #include <linux/slab_def.h> 36 43 #include <scsi/scsi.h> 37 44 #include <scsi/sg.h> 38 45 #include <linux/io.h> ··· 44 51 static int skd_dbg_level; 45 52 static int skd_isr_comp_limit = 4; 46 53 47 - enum { 48 - STEC_LINK_2_5GTS = 0, 49 - STEC_LINK_5GTS = 1, 50 - STEC_LINK_8GTS = 2, 51 - STEC_LINK_UNKNOWN = 0xFF 52 - }; 53 - 54 - enum { 55 - SKD_FLUSH_INITIALIZER, 56 - SKD_FLUSH_ZERO_SIZE_FIRST, 57 - SKD_FLUSH_DATA_SECOND, 58 - }; 59 - 60 54 #define SKD_ASSERT(expr) \ 61 55 do { \ 62 56 if (unlikely(!(expr))) { \ ··· 53 73 } while (0) 54 74 55 75 #define DRV_NAME "skd" 56 - #define DRV_VERSION "2.2.1" 57 - #define DRV_BUILD_ID "0260" 58 76 #define PFX DRV_NAME ": " 59 - #define DRV_BIN_VERSION 0x100 60 - #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID 61 77 62 - MODULE_AUTHOR("bug-reports: support@stec-inc.com"); 63 - MODULE_LICENSE("Dual BSD/GPL"); 78 + MODULE_LICENSE("GPL"); 64 79 65 - MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")"); 66 - MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); 80 + MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver"); 67 81 68 82 #define PCI_VENDOR_ID_STEC 0x1B39 69 83 #define PCI_DEVICE_ID_S1120 0x0001 ··· 70 96 #define SKD_PAUSE_TIMEOUT (5 * 1000) 71 97 72 98 #define SKD_N_FITMSG_BYTES (512u) 99 + #define SKD_MAX_REQ_PER_MSG 14 73 100 74 - #define SKD_N_SPECIAL_CONTEXT 32u 75 101 #define SKD_N_SPECIAL_FITMSG_BYTES (128u) 76 102 77 103 /* SG elements are 32 bytes, so we can make this 4096 and still be under the 78 104 * 128KB limit. That allows 4096*4K = 16M xfer size 79 105 */ 80 106 #define SKD_N_SG_PER_REQ_DEFAULT 256u 81 - #define SKD_N_SG_PER_SPECIAL 256u 82 107 83 108 #define SKD_N_COMPLETION_ENTRY 256u 84 109 #define SKD_N_READ_CAP_BYTES (8u) 85 110 86 111 #define SKD_N_INTERNAL_BYTES (512u) 87 112 113 + #define SKD_SKCOMP_SIZE \ 114 + ((sizeof(struct fit_completion_entry_v1) + \ 115 + sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY) 116 + 88 117 /* 5 bits of uniqifier, 0xF800 */ 89 - #define SKD_ID_INCR (0x400) 90 118 #define SKD_ID_TABLE_MASK (3u << 8u) 91 119 #define SKD_ID_RW_REQUEST (0u << 8u) 92 120 #define SKD_ID_INTERNAL (1u << 8u) 93 - #define SKD_ID_SPECIAL_REQUEST (2u << 8u) 94 121 #define SKD_ID_FIT_MSG (3u << 8u) 95 122 #define SKD_ID_SLOT_MASK 0x00FFu 96 123 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu 97 - 98 - #define SKD_N_TIMEOUT_SLOT 4u 99 - #define SKD_TIMEOUT_SLOT_MASK 3u 100 124 101 125 #define SKD_N_MAX_SECTORS 2048u 102 126 ··· 113 141 SKD_DRVR_STATE_ONLINE, 114 142 SKD_DRVR_STATE_PAUSING, 115 143 SKD_DRVR_STATE_PAUSED, 116 - SKD_DRVR_STATE_DRAINING_TIMEOUT, 117 144 SKD_DRVR_STATE_RESTARTING, 118 145 SKD_DRVR_STATE_RESUMING, 119 146 SKD_DRVR_STATE_STOPPING, ··· 129 158 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) 130 159 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) 131 160 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) 132 - #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u) 133 161 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) 134 162 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) 135 163 #define SKD_START_WAIT_SECONDS 90u ··· 139 169 SKD_REQ_STATE_BUSY, 140 170 SKD_REQ_STATE_COMPLETED, 141 171 SKD_REQ_STATE_TIMEOUT, 142 - SKD_REQ_STATE_ABORTED, 143 - }; 144 - 145 - enum skd_fit_msg_state { 146 - SKD_MSG_STATE_IDLE, 147 - SKD_MSG_STATE_BUSY, 148 172 }; 149 173 150 174 enum skd_check_status_action { ··· 149 185 SKD_CHECK_STATUS_BUSY_IMMINENT, 150 186 }; 151 187 188 + struct skd_msg_buf { 189 + struct fit_msg_hdr fmh; 190 + struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG]; 191 + }; 192 + 152 193 struct skd_fitmsg_context { 153 - enum skd_fit_msg_state state; 154 - 155 - struct skd_fitmsg_context *next; 156 - 157 194 u32 id; 158 - u16 outstanding; 159 195 160 196 u32 length; 161 - u32 offset; 162 197 163 - u8 *msg_buf; 198 + struct skd_msg_buf *msg_buf; 164 199 dma_addr_t mb_dma_address; 165 200 }; 166 201 167 202 struct skd_request_context { 168 203 enum skd_req_state state; 169 204 170 - struct skd_request_context *next; 171 - 172 205 u16 id; 173 206 u32 fitmsg_id; 174 207 175 - struct request *req; 176 208 u8 flush_cmd; 177 209 178 - u32 timeout_stamp; 179 - u8 sg_data_dir; 210 + enum dma_data_direction data_dir; 180 211 struct scatterlist *sg; 181 212 u32 n_sg; 182 213 u32 sg_byte_count; ··· 183 224 184 225 struct fit_comp_error_info err_info; 185 226 227 + blk_status_t status; 186 228 }; 187 - #define SKD_DATA_DIR_HOST_TO_CARD 1 188 - #define SKD_DATA_DIR_CARD_TO_HOST 2 189 229 190 230 struct skd_special_context { 191 231 struct skd_request_context req; 192 232 193 - u8 orphaned; 194 - 195 233 void *data_buf; 196 234 dma_addr_t db_dma_address; 197 235 198 - u8 *msg_buf; 236 + struct skd_msg_buf *msg_buf; 199 237 dma_addr_t mb_dma_address; 200 - }; 201 - 202 - struct skd_sg_io { 203 - fmode_t mode; 204 - void __user *argp; 205 - 206 - struct sg_io_hdr sg; 207 - 208 - u8 cdb[16]; 209 - 210 - u32 dxfer_len; 211 - u32 iovcnt; 212 - struct sg_iovec *iov; 213 - struct sg_iovec no_iov_iov; 214 - 215 - struct skd_special_context *skspcl; 216 238 }; 217 239 218 240 typedef enum skd_irq_type { ··· 205 265 #define SKD_MAX_BARS 2 206 266 207 267 struct skd_device { 208 - volatile void __iomem *mem_map[SKD_MAX_BARS]; 268 + void __iomem *mem_map[SKD_MAX_BARS]; 209 269 resource_size_t mem_phys[SKD_MAX_BARS]; 210 270 u32 mem_size[SKD_MAX_BARS]; 211 271 ··· 216 276 217 277 spinlock_t lock; 218 278 struct gendisk *disk; 279 + struct blk_mq_tag_set tag_set; 219 280 struct request_queue *queue; 281 + struct skd_fitmsg_context *skmsg; 220 282 struct device *class_dev; 221 283 int gendisk_on; 222 284 int sync_done; 223 285 224 - atomic_t device_count; 225 286 u32 devno; 226 287 u32 major; 227 - char name[32]; 228 288 char isr_name[30]; 229 289 230 290 enum skd_drvr_state state; 231 291 u32 drive_state; 232 292 233 - u32 in_flight; 234 293 u32 cur_max_queue_depth; 235 294 u32 queue_low_water_mark; 236 295 u32 dev_max_queue_depth; ··· 237 298 u32 num_fitmsg_context; 238 299 u32 num_req_context; 239 300 240 - u32 timeout_slot[SKD_N_TIMEOUT_SLOT]; 241 - u32 timeout_stamp; 242 - struct skd_fitmsg_context *skmsg_free_list; 243 301 struct skd_fitmsg_context *skmsg_table; 244 - 245 - struct skd_request_context *skreq_free_list; 246 - struct skd_request_context *skreq_table; 247 - 248 - struct skd_special_context *skspcl_free_list; 249 - struct skd_special_context *skspcl_table; 250 302 251 303 struct skd_special_context internal_skspcl; 252 304 u32 read_cap_blocksize; ··· 245 315 int read_cap_is_valid; 246 316 int inquiry_is_valid; 247 317 u8 inq_serial_num[13]; /*12 chars plus null term */ 248 - u8 id_str[80]; /* holds a composite name (pci + sernum) */ 249 318 250 319 u8 skcomp_cycle; 251 320 u32 skcomp_ix; 321 + struct kmem_cache *msgbuf_cache; 322 + struct kmem_cache *sglist_cache; 323 + struct kmem_cache *databuf_cache; 252 324 struct fit_completion_entry_v1 *skcomp_table; 253 325 struct fit_comp_error_info *skerr_table; 254 326 dma_addr_t cq_dma_address; ··· 261 329 u32 timer_countdown; 262 330 u32 timer_substate; 263 331 264 - int n_special; 265 332 int sgs_per_request; 266 333 u32 last_mtd; 267 334 ··· 274 343 275 344 u32 timo_slot; 276 345 277 - 346 + struct work_struct start_queue; 278 347 struct work_struct completion_worker; 279 348 }; 280 349 ··· 284 353 285 354 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) 286 355 { 287 - u32 val; 356 + u32 val = readl(skdev->mem_map[1] + offset); 288 357 289 - if (likely(skdev->dbg_level < 2)) 290 - return readl(skdev->mem_map[1] + offset); 291 - else { 292 - barrier(); 293 - val = readl(skdev->mem_map[1] + offset); 294 - barrier(); 295 - pr_debug("%s:%s:%d offset %x = %x\n", 296 - skdev->name, __func__, __LINE__, offset, val); 297 - return val; 298 - } 299 - 358 + if (unlikely(skdev->dbg_level >= 2)) 359 + dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); 360 + return val; 300 361 } 301 362 302 363 static inline void skd_reg_write32(struct skd_device *skdev, u32 val, 303 364 u32 offset) 304 365 { 305 - if (likely(skdev->dbg_level < 2)) { 306 - writel(val, skdev->mem_map[1] + offset); 307 - barrier(); 308 - } else { 309 - barrier(); 310 - writel(val, skdev->mem_map[1] + offset); 311 - barrier(); 312 - pr_debug("%s:%s:%d offset %x = %x\n", 313 - skdev->name, __func__, __LINE__, offset, val); 314 - } 366 + writel(val, skdev->mem_map[1] + offset); 367 + if (unlikely(skdev->dbg_level >= 2)) 368 + dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); 315 369 } 316 370 317 371 static inline void skd_reg_write64(struct skd_device *skdev, u64 val, 318 372 u32 offset) 319 373 { 320 - if (likely(skdev->dbg_level < 2)) { 321 - writeq(val, skdev->mem_map[1] + offset); 322 - barrier(); 323 - } else { 324 - barrier(); 325 - writeq(val, skdev->mem_map[1] + offset); 326 - barrier(); 327 - pr_debug("%s:%s:%d offset %x = %016llx\n", 328 - skdev->name, __func__, __LINE__, offset, val); 329 - } 374 + writeq(val, skdev->mem_map[1] + offset); 375 + if (unlikely(skdev->dbg_level >= 2)) 376 + dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset, 377 + val); 330 378 } 331 379 332 380 333 - #define SKD_IRQ_DEFAULT SKD_IRQ_MSI 381 + #define SKD_IRQ_DEFAULT SKD_IRQ_MSIX 334 382 static int skd_isr_type = SKD_IRQ_DEFAULT; 335 383 336 384 module_param(skd_isr_type, int, 0444); ··· 322 412 module_param(skd_max_req_per_msg, int, 0444); 323 413 MODULE_PARM_DESC(skd_max_req_per_msg, 324 414 "Maximum SCSI requests packed in a single message." 325 - " (1-14, default==1)"); 415 + " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)"); 326 416 327 417 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 328 418 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" ··· 339 429 "Maximum SG elements per block request." 340 430 " (1-4096, default==256)"); 341 431 342 - static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; 432 + static int skd_max_pass_thru = 1; 343 433 module_param(skd_max_pass_thru, int, 0444); 344 434 MODULE_PARM_DESC(skd_max_pass_thru, 345 - "Maximum SCSI pass-thru at a time." " (1-50, default==32)"); 435 + "Maximum SCSI pass-thru at a time. IGNORED"); 346 436 347 437 module_param(skd_dbg_level, int, 0444); 348 438 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); ··· 359 449 struct skd_fitmsg_context *skmsg); 360 450 static void skd_send_special_fitmsg(struct skd_device *skdev, 361 451 struct skd_special_context *skspcl); 362 - static void skd_request_fn(struct request_queue *rq); 363 - static void skd_end_request(struct skd_device *skdev, 364 - struct skd_request_context *skreq, blk_status_t status); 365 452 static bool skd_preop_sg_list(struct skd_device *skdev, 366 453 struct skd_request_context *skreq); 367 454 static void skd_postop_sg_list(struct skd_device *skdev, ··· 367 460 static void skd_restart_device(struct skd_device *skdev); 368 461 static int skd_quiesce_dev(struct skd_device *skdev); 369 462 static int skd_unquiesce_dev(struct skd_device *skdev); 370 - static void skd_release_special(struct skd_device *skdev, 371 - struct skd_special_context *skspcl); 372 463 static void skd_disable_interrupts(struct skd_device *skdev); 373 464 static void skd_isr_fwstate(struct skd_device *skdev); 374 - static void skd_recover_requests(struct skd_device *skdev, int requeue); 465 + static void skd_recover_requests(struct skd_device *skdev); 375 466 static void skd_soft_reset(struct skd_device *skdev); 376 467 377 - static const char *skd_name(struct skd_device *skdev); 378 468 const char *skd_drive_state_to_str(int state); 379 469 const char *skd_skdev_state_to_str(enum skd_drvr_state state); 380 470 static void skd_log_skdev(struct skd_device *skdev, const char *event); 381 - static void skd_log_skmsg(struct skd_device *skdev, 382 - struct skd_fitmsg_context *skmsg, const char *event); 383 471 static void skd_log_skreq(struct skd_device *skdev, 384 472 struct skd_request_context *skreq, const char *event); 385 473 ··· 383 481 * READ/WRITE REQUESTS 384 482 ***************************************************************************** 385 483 */ 386 - static void skd_fail_all_pending(struct skd_device *skdev) 484 + static void skd_inc_in_flight(struct request *rq, void *data, bool reserved) 387 485 { 388 - struct request_queue *q = skdev->queue; 389 - struct request *req; 486 + int *count = data; 390 487 391 - for (;; ) { 392 - req = blk_peek_request(q); 393 - if (req == NULL) 394 - break; 395 - blk_start_request(req); 396 - __blk_end_request_all(req, BLK_STS_IOERR); 397 - } 488 + count++; 489 + } 490 + 491 + static int skd_in_flight(struct skd_device *skdev) 492 + { 493 + int count = 0; 494 + 495 + blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count); 496 + 497 + return count; 398 498 } 399 499 400 500 static void ··· 405 501 unsigned count) 406 502 { 407 503 if (data_dir == READ) 408 - scsi_req->cdb[0] = 0x28; 504 + scsi_req->cdb[0] = READ_10; 409 505 else 410 - scsi_req->cdb[0] = 0x2a; 506 + scsi_req->cdb[0] = WRITE_10; 411 507 412 508 scsi_req->cdb[1] = 0; 413 509 scsi_req->cdb[2] = (lba & 0xff000000) >> 24; ··· 426 522 { 427 523 skreq->flush_cmd = 1; 428 524 429 - scsi_req->cdb[0] = 0x35; 525 + scsi_req->cdb[0] = SYNCHRONIZE_CACHE; 430 526 scsi_req->cdb[1] = 0; 431 527 scsi_req->cdb[2] = 0; 432 528 scsi_req->cdb[3] = 0; ··· 438 534 scsi_req->cdb[9] = 0; 439 535 } 440 536 441 - static void skd_request_fn_not_online(struct request_queue *q); 442 - 443 - static void skd_request_fn(struct request_queue *q) 537 + /* 538 + * Return true if and only if all pending requests should be failed. 539 + */ 540 + static bool skd_fail_all(struct request_queue *q) 444 541 { 445 542 struct skd_device *skdev = q->queuedata; 446 - struct skd_fitmsg_context *skmsg = NULL; 447 - struct fit_msg_hdr *fmh = NULL; 448 - struct skd_request_context *skreq; 449 - struct request *req = NULL; 450 - struct skd_scsi_request *scsi_req; 451 - unsigned long io_flags; 452 - u32 lba; 453 - u32 count; 454 - int data_dir; 455 - u32 be_lba; 456 - u32 be_count; 457 - u64 be_dmaa; 458 - u64 cmdctxt; 459 - u32 timo_slot; 460 - void *cmd_ptr; 461 - int flush, fua; 462 - 463 - if (skdev->state != SKD_DRVR_STATE_ONLINE) { 464 - skd_request_fn_not_online(q); 465 - return; 466 - } 467 - 468 - if (blk_queue_stopped(skdev->queue)) { 469 - if (skdev->skmsg_free_list == NULL || 470 - skdev->skreq_free_list == NULL || 471 - skdev->in_flight >= skdev->queue_low_water_mark) 472 - /* There is still some kind of shortage */ 473 - return; 474 - 475 - queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); 476 - } 477 - 478 - /* 479 - * Stop conditions: 480 - * - There are no more native requests 481 - * - There are already the maximum number of requests in progress 482 - * - There are no more skd_request_context entries 483 - * - There are no more FIT msg buffers 484 - */ 485 - for (;; ) { 486 - 487 - flush = fua = 0; 488 - 489 - req = blk_peek_request(q); 490 - 491 - /* Are there any native requests to start? */ 492 - if (req == NULL) 493 - break; 494 - 495 - lba = (u32)blk_rq_pos(req); 496 - count = blk_rq_sectors(req); 497 - data_dir = rq_data_dir(req); 498 - io_flags = req->cmd_flags; 499 - 500 - if (req_op(req) == REQ_OP_FLUSH) 501 - flush++; 502 - 503 - if (io_flags & REQ_FUA) 504 - fua++; 505 - 506 - pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) " 507 - "count=%u(0x%x) dir=%d\n", 508 - skdev->name, __func__, __LINE__, 509 - req, lba, lba, count, count, data_dir); 510 - 511 - /* At this point we know there is a request */ 512 - 513 - /* Are too many requets already in progress? */ 514 - if (skdev->in_flight >= skdev->cur_max_queue_depth) { 515 - pr_debug("%s:%s:%d qdepth %d, limit %d\n", 516 - skdev->name, __func__, __LINE__, 517 - skdev->in_flight, skdev->cur_max_queue_depth); 518 - break; 519 - } 520 - 521 - /* Is a skd_request_context available? */ 522 - skreq = skdev->skreq_free_list; 523 - if (skreq == NULL) { 524 - pr_debug("%s:%s:%d Out of req=%p\n", 525 - skdev->name, __func__, __LINE__, q); 526 - break; 527 - } 528 - SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); 529 - SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0); 530 - 531 - /* Now we check to see if we can get a fit msg */ 532 - if (skmsg == NULL) { 533 - if (skdev->skmsg_free_list == NULL) { 534 - pr_debug("%s:%s:%d Out of msg\n", 535 - skdev->name, __func__, __LINE__); 536 - break; 537 - } 538 - } 539 - 540 - skreq->flush_cmd = 0; 541 - skreq->n_sg = 0; 542 - skreq->sg_byte_count = 0; 543 - 544 - /* 545 - * OK to now dequeue request from q. 546 - * 547 - * At this point we are comitted to either start or reject 548 - * the native request. Note that skd_request_context is 549 - * available but is still at the head of the free list. 550 - */ 551 - blk_start_request(req); 552 - skreq->req = req; 553 - skreq->fitmsg_id = 0; 554 - 555 - /* Either a FIT msg is in progress or we have to start one. */ 556 - if (skmsg == NULL) { 557 - /* Are there any FIT msg buffers available? */ 558 - skmsg = skdev->skmsg_free_list; 559 - if (skmsg == NULL) { 560 - pr_debug("%s:%s:%d Out of msg skdev=%p\n", 561 - skdev->name, __func__, __LINE__, 562 - skdev); 563 - break; 564 - } 565 - SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); 566 - SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0); 567 - 568 - skdev->skmsg_free_list = skmsg->next; 569 - 570 - skmsg->state = SKD_MSG_STATE_BUSY; 571 - skmsg->id += SKD_ID_INCR; 572 - 573 - /* Initialize the FIT msg header */ 574 - fmh = (struct fit_msg_hdr *)skmsg->msg_buf; 575 - memset(fmh, 0, sizeof(*fmh)); 576 - fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 577 - skmsg->length = sizeof(*fmh); 578 - } 579 - 580 - skreq->fitmsg_id = skmsg->id; 581 - 582 - /* 583 - * Note that a FIT msg may have just been started 584 - * but contains no SoFIT requests yet. 585 - */ 586 - 587 - /* 588 - * Transcode the request, checking as we go. The outcome of 589 - * the transcoding is represented by the error variable. 590 - */ 591 - cmd_ptr = &skmsg->msg_buf[skmsg->length]; 592 - memset(cmd_ptr, 0, 32); 593 - 594 - be_lba = cpu_to_be32(lba); 595 - be_count = cpu_to_be32(count); 596 - be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address); 597 - cmdctxt = skreq->id + SKD_ID_INCR; 598 - 599 - scsi_req = cmd_ptr; 600 - scsi_req->hdr.tag = cmdctxt; 601 - scsi_req->hdr.sg_list_dma_address = be_dmaa; 602 - 603 - if (data_dir == READ) 604 - skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; 605 - else 606 - skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; 607 - 608 - if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { 609 - skd_prep_zerosize_flush_cdb(scsi_req, skreq); 610 - SKD_ASSERT(skreq->flush_cmd == 1); 611 - 612 - } else { 613 - skd_prep_rw_cdb(scsi_req, data_dir, lba, count); 614 - } 615 - 616 - if (fua) 617 - scsi_req->cdb[1] |= SKD_FUA_NV; 618 - 619 - if (!req->bio) 620 - goto skip_sg; 621 - 622 - if (!skd_preop_sg_list(skdev, skreq)) { 623 - /* 624 - * Complete the native request with error. 625 - * Note that the request context is still at the 626 - * head of the free list, and that the SoFIT request 627 - * was encoded into the FIT msg buffer but the FIT 628 - * msg length has not been updated. In short, the 629 - * only resource that has been allocated but might 630 - * not be used is that the FIT msg could be empty. 631 - */ 632 - pr_debug("%s:%s:%d error Out\n", 633 - skdev->name, __func__, __LINE__); 634 - skd_end_request(skdev, skreq, BLK_STS_RESOURCE); 635 - continue; 636 - } 637 - 638 - skip_sg: 639 - scsi_req->hdr.sg_list_len_bytes = 640 - cpu_to_be32(skreq->sg_byte_count); 641 - 642 - /* Complete resource allocations. */ 643 - skdev->skreq_free_list = skreq->next; 644 - skreq->state = SKD_REQ_STATE_BUSY; 645 - skreq->id += SKD_ID_INCR; 646 - 647 - skmsg->length += sizeof(struct skd_scsi_request); 648 - fmh->num_protocol_cmds_coalesced++; 649 - 650 - /* 651 - * Update the active request counts. 652 - * Capture the timeout timestamp. 653 - */ 654 - skreq->timeout_stamp = skdev->timeout_stamp; 655 - timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 656 - skdev->timeout_slot[timo_slot]++; 657 - skdev->in_flight++; 658 - pr_debug("%s:%s:%d req=0x%x busy=%d\n", 659 - skdev->name, __func__, __LINE__, 660 - skreq->id, skdev->in_flight); 661 - 662 - /* 663 - * If the FIT msg buffer is full send it. 664 - */ 665 - if (skmsg->length >= SKD_N_FITMSG_BYTES || 666 - fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { 667 - skd_send_fitmsg(skdev, skmsg); 668 - skmsg = NULL; 669 - fmh = NULL; 670 - } 671 - } 672 - 673 - /* 674 - * Is a FIT msg in progress? If it is empty put the buffer back 675 - * on the free list. If it is non-empty send what we got. 676 - * This minimizes latency when there are fewer requests than 677 - * what fits in a FIT msg. 678 - */ 679 - if (skmsg != NULL) { 680 - /* Bigger than just a FIT msg header? */ 681 - if (skmsg->length > sizeof(struct fit_msg_hdr)) { 682 - pr_debug("%s:%s:%d sending msg=%p, len %d\n", 683 - skdev->name, __func__, __LINE__, 684 - skmsg, skmsg->length); 685 - skd_send_fitmsg(skdev, skmsg); 686 - } else { 687 - /* 688 - * The FIT msg is empty. It means we got started 689 - * on the msg, but the requests were rejected. 690 - */ 691 - skmsg->state = SKD_MSG_STATE_IDLE; 692 - skmsg->id += SKD_ID_INCR; 693 - skmsg->next = skdev->skmsg_free_list; 694 - skdev->skmsg_free_list = skmsg; 695 - } 696 - skmsg = NULL; 697 - fmh = NULL; 698 - } 699 - 700 - /* 701 - * If req is non-NULL it means there is something to do but 702 - * we are out of a resource. 703 - */ 704 - if (req) 705 - blk_stop_queue(skdev->queue); 706 - } 707 - 708 - static void skd_end_request(struct skd_device *skdev, 709 - struct skd_request_context *skreq, blk_status_t error) 710 - { 711 - if (unlikely(error)) { 712 - struct request *req = skreq->req; 713 - char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; 714 - u32 lba = (u32)blk_rq_pos(req); 715 - u32 count = blk_rq_sectors(req); 716 - 717 - pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n", 718 - skd_name(skdev), cmd, lba, count, skreq->id); 719 - } else 720 - pr_debug("%s:%s:%d id=0x%x error=%d\n", 721 - skdev->name, __func__, __LINE__, skreq->id, error); 722 - 723 - __blk_end_request_all(skreq->req, error); 724 - } 725 - 726 - static bool skd_preop_sg_list(struct skd_device *skdev, 727 - struct skd_request_context *skreq) 728 - { 729 - struct request *req = skreq->req; 730 - int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; 731 - int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; 732 - struct scatterlist *sg = &skreq->sg[0]; 733 - int n_sg; 734 - int i; 735 - 736 - skreq->sg_byte_count = 0; 737 - 738 - /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || 739 - skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */ 740 - 741 - n_sg = blk_rq_map_sg(skdev->queue, req, sg); 742 - if (n_sg <= 0) 743 - return false; 744 - 745 - /* 746 - * Map scatterlist to PCI bus addresses. 747 - * Note PCI might change the number of entries. 748 - */ 749 - n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); 750 - if (n_sg <= 0) 751 - return false; 752 - 753 - SKD_ASSERT(n_sg <= skdev->sgs_per_request); 754 - 755 - skreq->n_sg = n_sg; 756 - 757 - for (i = 0; i < n_sg; i++) { 758 - struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 759 - u32 cnt = sg_dma_len(&sg[i]); 760 - uint64_t dma_addr = sg_dma_address(&sg[i]); 761 - 762 - sgd->control = FIT_SGD_CONTROL_NOT_LAST; 763 - sgd->byte_count = cnt; 764 - skreq->sg_byte_count += cnt; 765 - sgd->host_side_addr = dma_addr; 766 - sgd->dev_side_addr = 0; 767 - } 768 - 769 - skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; 770 - skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; 771 - 772 - if (unlikely(skdev->dbg_level > 1)) { 773 - pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", 774 - skdev->name, __func__, __LINE__, 775 - skreq->id, skreq->sksg_list, skreq->sksg_dma_address); 776 - for (i = 0; i < n_sg; i++) { 777 - struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 778 - pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " 779 - "addr=0x%llx next=0x%llx\n", 780 - skdev->name, __func__, __LINE__, 781 - i, sgd->byte_count, sgd->control, 782 - sgd->host_side_addr, sgd->next_desc_ptr); 783 - } 784 - } 785 - 786 - return true; 787 - } 788 - 789 - static void skd_postop_sg_list(struct skd_device *skdev, 790 - struct skd_request_context *skreq) 791 - { 792 - int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; 793 - int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; 794 - 795 - /* 796 - * restore the next ptr for next IO request so we 797 - * don't have to set it every time. 798 - */ 799 - skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = 800 - skreq->sksg_dma_address + 801 - ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); 802 - pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); 803 - } 804 - 805 - static void skd_request_fn_not_online(struct request_queue *q) 806 - { 807 - struct skd_device *skdev = q->queuedata; 808 - int error; 809 543 810 544 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); 811 545 ··· 464 922 case SKD_DRVR_STATE_BUSY: 465 923 case SKD_DRVR_STATE_BUSY_IMMINENT: 466 924 case SKD_DRVR_STATE_BUSY_ERASE: 467 - case SKD_DRVR_STATE_DRAINING_TIMEOUT: 468 - return; 925 + return false; 469 926 470 927 case SKD_DRVR_STATE_BUSY_SANITIZE: 471 928 case SKD_DRVR_STATE_STOPPING: ··· 472 931 case SKD_DRVR_STATE_FAULT: 473 932 case SKD_DRVR_STATE_DISAPPEARED: 474 933 default: 475 - error = -EIO; 476 - break; 934 + return true; 935 + } 936 + } 937 + 938 + static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 939 + const struct blk_mq_queue_data *mqd) 940 + { 941 + struct request *const req = mqd->rq; 942 + struct request_queue *const q = req->q; 943 + struct skd_device *skdev = q->queuedata; 944 + struct skd_fitmsg_context *skmsg; 945 + struct fit_msg_hdr *fmh; 946 + const u32 tag = blk_mq_unique_tag(req); 947 + struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req); 948 + struct skd_scsi_request *scsi_req; 949 + unsigned long flags = 0; 950 + const u32 lba = blk_rq_pos(req); 951 + const u32 count = blk_rq_sectors(req); 952 + const int data_dir = rq_data_dir(req); 953 + 954 + if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE)) 955 + return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE; 956 + 957 + blk_mq_start_request(req); 958 + 959 + WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n", 960 + tag, skd_max_queue_depth, q->nr_requests); 961 + 962 + SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); 963 + 964 + dev_dbg(&skdev->pdev->dev, 965 + "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, 966 + lba, count, count, data_dir); 967 + 968 + skreq->id = tag + SKD_ID_RW_REQUEST; 969 + skreq->flush_cmd = 0; 970 + skreq->n_sg = 0; 971 + skreq->sg_byte_count = 0; 972 + 973 + skreq->fitmsg_id = 0; 974 + 975 + skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 976 + 977 + if (req->bio && !skd_preop_sg_list(skdev, skreq)) { 978 + dev_dbg(&skdev->pdev->dev, "error Out\n"); 979 + skreq->status = BLK_STS_RESOURCE; 980 + blk_mq_complete_request(req); 981 + return BLK_STS_OK; 477 982 } 478 983 479 - /* If we get here, terminate all pending block requeusts 480 - * with EIO and any scsi pass thru with appropriate sense 481 - */ 984 + dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address, 985 + skreq->n_sg * 986 + sizeof(struct fit_sg_descriptor), 987 + DMA_TO_DEVICE); 482 988 483 - skd_fail_all_pending(skdev); 989 + /* Either a FIT msg is in progress or we have to start one. */ 990 + if (skd_max_req_per_msg == 1) { 991 + skmsg = NULL; 992 + } else { 993 + spin_lock_irqsave(&skdev->lock, flags); 994 + skmsg = skdev->skmsg; 995 + } 996 + if (!skmsg) { 997 + skmsg = &skdev->skmsg_table[tag]; 998 + skdev->skmsg = skmsg; 999 + 1000 + /* Initialize the FIT msg header */ 1001 + fmh = &skmsg->msg_buf->fmh; 1002 + memset(fmh, 0, sizeof(*fmh)); 1003 + fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 1004 + skmsg->length = sizeof(*fmh); 1005 + } else { 1006 + fmh = &skmsg->msg_buf->fmh; 1007 + } 1008 + 1009 + skreq->fitmsg_id = skmsg->id; 1010 + 1011 + scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced]; 1012 + memset(scsi_req, 0, sizeof(*scsi_req)); 1013 + 1014 + scsi_req->hdr.tag = skreq->id; 1015 + scsi_req->hdr.sg_list_dma_address = 1016 + cpu_to_be64(skreq->sksg_dma_address); 1017 + 1018 + if (req_op(req) == REQ_OP_FLUSH) { 1019 + skd_prep_zerosize_flush_cdb(scsi_req, skreq); 1020 + SKD_ASSERT(skreq->flush_cmd == 1); 1021 + } else { 1022 + skd_prep_rw_cdb(scsi_req, data_dir, lba, count); 1023 + } 1024 + 1025 + if (req->cmd_flags & REQ_FUA) 1026 + scsi_req->cdb[1] |= SKD_FUA_NV; 1027 + 1028 + scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count); 1029 + 1030 + /* Complete resource allocations. */ 1031 + skreq->state = SKD_REQ_STATE_BUSY; 1032 + 1033 + skmsg->length += sizeof(struct skd_scsi_request); 1034 + fmh->num_protocol_cmds_coalesced++; 1035 + 1036 + dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id, 1037 + skd_in_flight(skdev)); 1038 + 1039 + /* 1040 + * If the FIT msg buffer is full send it. 1041 + */ 1042 + if (skd_max_req_per_msg == 1) { 1043 + skd_send_fitmsg(skdev, skmsg); 1044 + } else { 1045 + if (mqd->last || 1046 + fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { 1047 + skd_send_fitmsg(skdev, skmsg); 1048 + skdev->skmsg = NULL; 1049 + } 1050 + spin_unlock_irqrestore(&skdev->lock, flags); 1051 + } 1052 + 1053 + return BLK_STS_OK; 1054 + } 1055 + 1056 + static enum blk_eh_timer_return skd_timed_out(struct request *req, 1057 + bool reserved) 1058 + { 1059 + struct skd_device *skdev = req->q->queuedata; 1060 + 1061 + dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n", 1062 + blk_mq_unique_tag(req)); 1063 + 1064 + return BLK_EH_RESET_TIMER; 1065 + } 1066 + 1067 + static void skd_complete_rq(struct request *req) 1068 + { 1069 + struct skd_request_context *skreq = blk_mq_rq_to_pdu(req); 1070 + 1071 + blk_mq_end_request(req, skreq->status); 1072 + } 1073 + 1074 + static bool skd_preop_sg_list(struct skd_device *skdev, 1075 + struct skd_request_context *skreq) 1076 + { 1077 + struct request *req = blk_mq_rq_from_pdu(skreq); 1078 + struct scatterlist *sgl = &skreq->sg[0], *sg; 1079 + int n_sg; 1080 + int i; 1081 + 1082 + skreq->sg_byte_count = 0; 1083 + 1084 + WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE && 1085 + skreq->data_dir != DMA_FROM_DEVICE); 1086 + 1087 + n_sg = blk_rq_map_sg(skdev->queue, req, sgl); 1088 + if (n_sg <= 0) 1089 + return false; 1090 + 1091 + /* 1092 + * Map scatterlist to PCI bus addresses. 1093 + * Note PCI might change the number of entries. 1094 + */ 1095 + n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir); 1096 + if (n_sg <= 0) 1097 + return false; 1098 + 1099 + SKD_ASSERT(n_sg <= skdev->sgs_per_request); 1100 + 1101 + skreq->n_sg = n_sg; 1102 + 1103 + for_each_sg(sgl, sg, n_sg, i) { 1104 + struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 1105 + u32 cnt = sg_dma_len(sg); 1106 + uint64_t dma_addr = sg_dma_address(sg); 1107 + 1108 + sgd->control = FIT_SGD_CONTROL_NOT_LAST; 1109 + sgd->byte_count = cnt; 1110 + skreq->sg_byte_count += cnt; 1111 + sgd->host_side_addr = dma_addr; 1112 + sgd->dev_side_addr = 0; 1113 + } 1114 + 1115 + skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; 1116 + skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; 1117 + 1118 + if (unlikely(skdev->dbg_level > 1)) { 1119 + dev_dbg(&skdev->pdev->dev, 1120 + "skreq=%x sksg_list=%p sksg_dma=%llx\n", 1121 + skreq->id, skreq->sksg_list, skreq->sksg_dma_address); 1122 + for (i = 0; i < n_sg; i++) { 1123 + struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 1124 + 1125 + dev_dbg(&skdev->pdev->dev, 1126 + " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", 1127 + i, sgd->byte_count, sgd->control, 1128 + sgd->host_side_addr, sgd->next_desc_ptr); 1129 + } 1130 + } 1131 + 1132 + return true; 1133 + } 1134 + 1135 + static void skd_postop_sg_list(struct skd_device *skdev, 1136 + struct skd_request_context *skreq) 1137 + { 1138 + /* 1139 + * restore the next ptr for next IO request so we 1140 + * don't have to set it every time. 1141 + */ 1142 + skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = 1143 + skreq->sksg_dma_address + 1144 + ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); 1145 + pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir); 484 1146 } 485 1147 486 1148 /* ··· 694 950 695 951 static void skd_timer_tick_not_online(struct skd_device *skdev); 696 952 953 + static void skd_start_queue(struct work_struct *work) 954 + { 955 + struct skd_device *skdev = container_of(work, typeof(*skdev), 956 + start_queue); 957 + 958 + /* 959 + * Although it is safe to call blk_start_queue() from interrupt 960 + * context, blk_mq_start_hw_queues() must not be called from 961 + * interrupt context. 962 + */ 963 + blk_mq_start_hw_queues(skdev->queue); 964 + } 965 + 697 966 static void skd_timer_tick(ulong arg) 698 967 { 699 968 struct skd_device *skdev = (struct skd_device *)arg; 700 - 701 - u32 timo_slot; 702 - u32 overdue_timestamp; 703 969 unsigned long reqflags; 704 970 u32 state; 705 971 ··· 726 972 if (state != skdev->drive_state) 727 973 skd_isr_fwstate(skdev); 728 974 729 - if (skdev->state != SKD_DRVR_STATE_ONLINE) { 975 + if (skdev->state != SKD_DRVR_STATE_ONLINE) 730 976 skd_timer_tick_not_online(skdev); 731 - goto timer_func_out; 732 - } 733 - skdev->timeout_stamp++; 734 - timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 735 977 736 - /* 737 - * All requests that happened during the previous use of 738 - * this slot should be done by now. The previous use was 739 - * over 7 seconds ago. 740 - */ 741 - if (skdev->timeout_slot[timo_slot] == 0) 742 - goto timer_func_out; 743 - 744 - /* Something is overdue */ 745 - overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT; 746 - 747 - pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n", 748 - skdev->name, __func__, __LINE__, 749 - skdev->timeout_slot[timo_slot], skdev->in_flight); 750 - pr_err("(%s): Overdue IOs (%d), busy %d\n", 751 - skd_name(skdev), skdev->timeout_slot[timo_slot], 752 - skdev->in_flight); 753 - 754 - skdev->timer_countdown = SKD_DRAINING_TIMO; 755 - skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; 756 - skdev->timo_slot = timo_slot; 757 - blk_stop_queue(skdev->queue); 758 - 759 - timer_func_out: 760 978 mod_timer(&skdev->timer, (jiffies + HZ)); 761 979 762 980 spin_unlock_irqrestore(&skdev->lock, reqflags); ··· 741 1015 case SKD_DRVR_STATE_LOAD: 742 1016 break; 743 1017 case SKD_DRVR_STATE_BUSY_SANITIZE: 744 - pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n", 745 - skdev->name, __func__, __LINE__, 746 - skdev->drive_state, skdev->state); 1018 + dev_dbg(&skdev->pdev->dev, 1019 + "drive busy sanitize[%x], driver[%x]\n", 1020 + skdev->drive_state, skdev->state); 747 1021 /* If we've been in sanitize for 3 seconds, we figure we're not 748 1022 * going to get anymore completions, so recover requests now 749 1023 */ ··· 751 1025 skdev->timer_countdown--; 752 1026 return; 753 1027 } 754 - skd_recover_requests(skdev, 0); 1028 + skd_recover_requests(skdev); 755 1029 break; 756 1030 757 1031 case SKD_DRVR_STATE_BUSY: 758 1032 case SKD_DRVR_STATE_BUSY_IMMINENT: 759 1033 case SKD_DRVR_STATE_BUSY_ERASE: 760 - pr_debug("%s:%s:%d busy[%x], countdown=%d\n", 761 - skdev->name, __func__, __LINE__, 762 - skdev->state, skdev->timer_countdown); 1034 + dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n", 1035 + skdev->state, skdev->timer_countdown); 763 1036 if (skdev->timer_countdown > 0) { 764 1037 skdev->timer_countdown--; 765 1038 return; 766 1039 } 767 - pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.", 768 - skdev->name, __func__, __LINE__, 769 - skdev->state, skdev->timer_countdown); 1040 + dev_dbg(&skdev->pdev->dev, 1041 + "busy[%x], timedout=%d, restarting device.", 1042 + skdev->state, skdev->timer_countdown); 770 1043 skd_restart_device(skdev); 771 1044 break; 772 1045 ··· 779 1054 * revcover at some point. */ 780 1055 skdev->state = SKD_DRVR_STATE_FAULT; 781 1056 782 - pr_err("(%s): DriveFault Connect Timeout (%x)\n", 783 - skd_name(skdev), skdev->drive_state); 1057 + dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n", 1058 + skdev->drive_state); 784 1059 785 1060 /*start the queue so we can respond with error to requests */ 786 1061 /* wakeup anyone waiting for startup complete */ 787 - blk_start_queue(skdev->queue); 1062 + schedule_work(&skdev->start_queue); 788 1063 skdev->gendisk_on = -1; 789 1064 wake_up_interruptible(&skdev->waitq); 790 1065 break; ··· 797 1072 case SKD_DRVR_STATE_PAUSED: 798 1073 break; 799 1074 800 - case SKD_DRVR_STATE_DRAINING_TIMEOUT: 801 - pr_debug("%s:%s:%d " 802 - "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", 803 - skdev->name, __func__, __LINE__, 804 - skdev->timo_slot, 805 - skdev->timer_countdown, 806 - skdev->in_flight, 807 - skdev->timeout_slot[skdev->timo_slot]); 808 - /* if the slot has cleared we can let the I/O continue */ 809 - if (skdev->timeout_slot[skdev->timo_slot] == 0) { 810 - pr_debug("%s:%s:%d Slot drained, starting queue.\n", 811 - skdev->name, __func__, __LINE__); 812 - skdev->state = SKD_DRVR_STATE_ONLINE; 813 - blk_start_queue(skdev->queue); 814 - return; 815 - } 816 - if (skdev->timer_countdown > 0) { 817 - skdev->timer_countdown--; 818 - return; 819 - } 820 - skd_restart_device(skdev); 821 - break; 822 - 823 1075 case SKD_DRVR_STATE_RESTARTING: 824 1076 if (skdev->timer_countdown > 0) { 825 1077 skdev->timer_countdown--; ··· 805 1103 /* For now, we fault the drive. Could attempt resets to 806 1104 * revcover at some point. */ 807 1105 skdev->state = SKD_DRVR_STATE_FAULT; 808 - pr_err("(%s): DriveFault Reconnect Timeout (%x)\n", 809 - skd_name(skdev), skdev->drive_state); 1106 + dev_err(&skdev->pdev->dev, 1107 + "DriveFault Reconnect Timeout (%x)\n", 1108 + skdev->drive_state); 810 1109 811 1110 /* 812 1111 * Recovering does two things: ··· 827 1124 /* It never came out of soft reset. Try to 828 1125 * recover the requests and then let them 829 1126 * fail. This is to mitigate hung processes. */ 830 - skd_recover_requests(skdev, 0); 1127 + skd_recover_requests(skdev); 831 1128 else { 832 - pr_err("(%s): Disable BusMaster (%x)\n", 833 - skd_name(skdev), skdev->drive_state); 1129 + dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n", 1130 + skdev->drive_state); 834 1131 pci_disable_device(skdev->pdev); 835 1132 skd_disable_interrupts(skdev); 836 - skd_recover_requests(skdev, 0); 1133 + skd_recover_requests(skdev); 837 1134 } 838 1135 839 1136 /*start the queue so we can respond with error to requests */ 840 1137 /* wakeup anyone waiting for startup complete */ 841 - blk_start_queue(skdev->queue); 1138 + schedule_work(&skdev->start_queue); 842 1139 skdev->gendisk_on = -1; 843 1140 wake_up_interruptible(&skdev->waitq); 844 1141 break; ··· 857 1154 { 858 1155 int rc; 859 1156 860 - init_timer(&skdev->timer); 861 1157 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev); 862 1158 863 1159 rc = mod_timer(&skdev->timer, (jiffies + HZ)); 864 1160 if (rc) 865 - pr_err("%s: failed to start timer %d\n", 866 - __func__, rc); 1161 + dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc); 867 1162 return rc; 868 1163 } 869 1164 870 1165 static void skd_kill_timer(struct skd_device *skdev) 871 1166 { 872 1167 del_timer_sync(&skdev->timer); 873 - } 874 - 875 - /* 876 - ***************************************************************************** 877 - * IOCTL 878 - ***************************************************************************** 879 - */ 880 - static int skd_ioctl_sg_io(struct skd_device *skdev, 881 - fmode_t mode, void __user *argp); 882 - static int skd_sg_io_get_and_check_args(struct skd_device *skdev, 883 - struct skd_sg_io *sksgio); 884 - static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, 885 - struct skd_sg_io *sksgio); 886 - static int skd_sg_io_prep_buffering(struct skd_device *skdev, 887 - struct skd_sg_io *sksgio); 888 - static int skd_sg_io_copy_buffer(struct skd_device *skdev, 889 - struct skd_sg_io *sksgio, int dxfer_dir); 890 - static int skd_sg_io_send_fitmsg(struct skd_device *skdev, 891 - struct skd_sg_io *sksgio); 892 - static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio); 893 - static int skd_sg_io_release_skspcl(struct skd_device *skdev, 894 - struct skd_sg_io *sksgio); 895 - static int skd_sg_io_put_status(struct skd_device *skdev, 896 - struct skd_sg_io *sksgio); 897 - 898 - static void skd_complete_special(struct skd_device *skdev, 899 - volatile struct fit_completion_entry_v1 900 - *skcomp, 901 - volatile struct fit_comp_error_info *skerr, 902 - struct skd_special_context *skspcl); 903 - 904 - static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, 905 - uint cmd_in, ulong arg) 906 - { 907 - static const int sg_version_num = 30527; 908 - int rc = 0, timeout; 909 - struct gendisk *disk = bdev->bd_disk; 910 - struct skd_device *skdev = disk->private_data; 911 - int __user *p = (int __user *)arg; 912 - 913 - pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", 914 - skdev->name, __func__, __LINE__, 915 - disk->disk_name, current->comm, mode, cmd_in, arg); 916 - 917 - if (!capable(CAP_SYS_ADMIN)) 918 - return -EPERM; 919 - 920 - switch (cmd_in) { 921 - case SG_SET_TIMEOUT: 922 - rc = get_user(timeout, p); 923 - if (!rc) 924 - disk->queue->sg_timeout = clock_t_to_jiffies(timeout); 925 - break; 926 - case SG_GET_TIMEOUT: 927 - rc = jiffies_to_clock_t(disk->queue->sg_timeout); 928 - break; 929 - case SG_GET_VERSION_NUM: 930 - rc = put_user(sg_version_num, p); 931 - break; 932 - case SG_IO: 933 - rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg); 934 - break; 935 - 936 - default: 937 - rc = -ENOTTY; 938 - break; 939 - } 940 - 941 - pr_debug("%s:%s:%d %s: completion rc %d\n", 942 - skdev->name, __func__, __LINE__, disk->disk_name, rc); 943 - return rc; 944 - } 945 - 946 - static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode, 947 - void __user *argp) 948 - { 949 - int rc; 950 - struct skd_sg_io sksgio; 951 - 952 - memset(&sksgio, 0, sizeof(sksgio)); 953 - sksgio.mode = mode; 954 - sksgio.argp = argp; 955 - sksgio.iov = &sksgio.no_iov_iov; 956 - 957 - switch (skdev->state) { 958 - case SKD_DRVR_STATE_ONLINE: 959 - case SKD_DRVR_STATE_BUSY_IMMINENT: 960 - break; 961 - 962 - default: 963 - pr_debug("%s:%s:%d drive not online\n", 964 - skdev->name, __func__, __LINE__); 965 - rc = -ENXIO; 966 - goto out; 967 - } 968 - 969 - rc = skd_sg_io_get_and_check_args(skdev, &sksgio); 970 - if (rc) 971 - goto out; 972 - 973 - rc = skd_sg_io_obtain_skspcl(skdev, &sksgio); 974 - if (rc) 975 - goto out; 976 - 977 - rc = skd_sg_io_prep_buffering(skdev, &sksgio); 978 - if (rc) 979 - goto out; 980 - 981 - rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV); 982 - if (rc) 983 - goto out; 984 - 985 - rc = skd_sg_io_send_fitmsg(skdev, &sksgio); 986 - if (rc) 987 - goto out; 988 - 989 - rc = skd_sg_io_await(skdev, &sksgio); 990 - if (rc) 991 - goto out; 992 - 993 - rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV); 994 - if (rc) 995 - goto out; 996 - 997 - rc = skd_sg_io_put_status(skdev, &sksgio); 998 - if (rc) 999 - goto out; 1000 - 1001 - rc = 0; 1002 - 1003 - out: 1004 - skd_sg_io_release_skspcl(skdev, &sksgio); 1005 - 1006 - if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov) 1007 - kfree(sksgio.iov); 1008 - return rc; 1009 - } 1010 - 1011 - static int skd_sg_io_get_and_check_args(struct skd_device *skdev, 1012 - struct skd_sg_io *sksgio) 1013 - { 1014 - struct sg_io_hdr *sgp = &sksgio->sg; 1015 - int i, acc; 1016 - 1017 - if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) { 1018 - pr_debug("%s:%s:%d access sg failed %p\n", 1019 - skdev->name, __func__, __LINE__, sksgio->argp); 1020 - return -EFAULT; 1021 - } 1022 - 1023 - if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) { 1024 - pr_debug("%s:%s:%d copy_from_user sg failed %p\n", 1025 - skdev->name, __func__, __LINE__, sksgio->argp); 1026 - return -EFAULT; 1027 - } 1028 - 1029 - if (sgp->interface_id != SG_INTERFACE_ID_ORIG) { 1030 - pr_debug("%s:%s:%d interface_id invalid 0x%x\n", 1031 - skdev->name, __func__, __LINE__, sgp->interface_id); 1032 - return -EINVAL; 1033 - } 1034 - 1035 - if (sgp->cmd_len > sizeof(sksgio->cdb)) { 1036 - pr_debug("%s:%s:%d cmd_len invalid %d\n", 1037 - skdev->name, __func__, __LINE__, sgp->cmd_len); 1038 - return -EINVAL; 1039 - } 1040 - 1041 - if (sgp->iovec_count > 256) { 1042 - pr_debug("%s:%s:%d iovec_count invalid %d\n", 1043 - skdev->name, __func__, __LINE__, sgp->iovec_count); 1044 - return -EINVAL; 1045 - } 1046 - 1047 - if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) { 1048 - pr_debug("%s:%s:%d dxfer_len invalid %d\n", 1049 - skdev->name, __func__, __LINE__, sgp->dxfer_len); 1050 - return -EINVAL; 1051 - } 1052 - 1053 - switch (sgp->dxfer_direction) { 1054 - case SG_DXFER_NONE: 1055 - acc = -1; 1056 - break; 1057 - 1058 - case SG_DXFER_TO_DEV: 1059 - acc = VERIFY_READ; 1060 - break; 1061 - 1062 - case SG_DXFER_FROM_DEV: 1063 - case SG_DXFER_TO_FROM_DEV: 1064 - acc = VERIFY_WRITE; 1065 - break; 1066 - 1067 - default: 1068 - pr_debug("%s:%s:%d dxfer_dir invalid %d\n", 1069 - skdev->name, __func__, __LINE__, sgp->dxfer_direction); 1070 - return -EINVAL; 1071 - } 1072 - 1073 - if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) { 1074 - pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n", 1075 - skdev->name, __func__, __LINE__, sgp->cmdp); 1076 - return -EFAULT; 1077 - } 1078 - 1079 - if (sgp->mx_sb_len != 0) { 1080 - if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) { 1081 - pr_debug("%s:%s:%d access sbp failed %p\n", 1082 - skdev->name, __func__, __LINE__, sgp->sbp); 1083 - return -EFAULT; 1084 - } 1085 - } 1086 - 1087 - if (sgp->iovec_count == 0) { 1088 - sksgio->iov[0].iov_base = sgp->dxferp; 1089 - sksgio->iov[0].iov_len = sgp->dxfer_len; 1090 - sksgio->iovcnt = 1; 1091 - sksgio->dxfer_len = sgp->dxfer_len; 1092 - } else { 1093 - struct sg_iovec *iov; 1094 - uint nbytes = sizeof(*iov) * sgp->iovec_count; 1095 - size_t iov_data_len; 1096 - 1097 - iov = kmalloc(nbytes, GFP_KERNEL); 1098 - if (iov == NULL) { 1099 - pr_debug("%s:%s:%d alloc iovec failed %d\n", 1100 - skdev->name, __func__, __LINE__, 1101 - sgp->iovec_count); 1102 - return -ENOMEM; 1103 - } 1104 - sksgio->iov = iov; 1105 - sksgio->iovcnt = sgp->iovec_count; 1106 - 1107 - if (copy_from_user(iov, sgp->dxferp, nbytes)) { 1108 - pr_debug("%s:%s:%d copy_from_user iovec failed %p\n", 1109 - skdev->name, __func__, __LINE__, sgp->dxferp); 1110 - return -EFAULT; 1111 - } 1112 - 1113 - /* 1114 - * Sum up the vecs, making sure they don't overflow 1115 - */ 1116 - iov_data_len = 0; 1117 - for (i = 0; i < sgp->iovec_count; i++) { 1118 - if (iov_data_len + iov[i].iov_len < iov_data_len) 1119 - return -EINVAL; 1120 - iov_data_len += iov[i].iov_len; 1121 - } 1122 - 1123 - /* SG_IO howto says that the shorter of the two wins */ 1124 - if (sgp->dxfer_len < iov_data_len) { 1125 - sksgio->iovcnt = iov_shorten((struct iovec *)iov, 1126 - sgp->iovec_count, 1127 - sgp->dxfer_len); 1128 - sksgio->dxfer_len = sgp->dxfer_len; 1129 - } else 1130 - sksgio->dxfer_len = iov_data_len; 1131 - } 1132 - 1133 - if (sgp->dxfer_direction != SG_DXFER_NONE) { 1134 - struct sg_iovec *iov = sksgio->iov; 1135 - for (i = 0; i < sksgio->iovcnt; i++, iov++) { 1136 - if (!access_ok(acc, iov->iov_base, iov->iov_len)) { 1137 - pr_debug("%s:%s:%d access data failed %p/%d\n", 1138 - skdev->name, __func__, __LINE__, 1139 - iov->iov_base, (int)iov->iov_len); 1140 - return -EFAULT; 1141 - } 1142 - } 1143 - } 1144 - 1145 - return 0; 1146 - } 1147 - 1148 - static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, 1149 - struct skd_sg_io *sksgio) 1150 - { 1151 - struct skd_special_context *skspcl = NULL; 1152 - int rc; 1153 - 1154 - for (;;) { 1155 - ulong flags; 1156 - 1157 - spin_lock_irqsave(&skdev->lock, flags); 1158 - skspcl = skdev->skspcl_free_list; 1159 - if (skspcl != NULL) { 1160 - skdev->skspcl_free_list = 1161 - (struct skd_special_context *)skspcl->req.next; 1162 - skspcl->req.id += SKD_ID_INCR; 1163 - skspcl->req.state = SKD_REQ_STATE_SETUP; 1164 - skspcl->orphaned = 0; 1165 - skspcl->req.n_sg = 0; 1166 - } 1167 - spin_unlock_irqrestore(&skdev->lock, flags); 1168 - 1169 - if (skspcl != NULL) { 1170 - rc = 0; 1171 - break; 1172 - } 1173 - 1174 - pr_debug("%s:%s:%d blocking\n", 1175 - skdev->name, __func__, __LINE__); 1176 - 1177 - rc = wait_event_interruptible_timeout( 1178 - skdev->waitq, 1179 - (skdev->skspcl_free_list != NULL), 1180 - msecs_to_jiffies(sksgio->sg.timeout)); 1181 - 1182 - pr_debug("%s:%s:%d unblocking, rc=%d\n", 1183 - skdev->name, __func__, __LINE__, rc); 1184 - 1185 - if (rc <= 0) { 1186 - if (rc == 0) 1187 - rc = -ETIMEDOUT; 1188 - else 1189 - rc = -EINTR; 1190 - break; 1191 - } 1192 - /* 1193 - * If we get here rc > 0 meaning the timeout to 1194 - * wait_event_interruptible_timeout() had time left, hence the 1195 - * sought event -- non-empty free list -- happened. 1196 - * Retry the allocation. 1197 - */ 1198 - } 1199 - sksgio->skspcl = skspcl; 1200 - 1201 - return rc; 1202 - } 1203 - 1204 - static int skd_skreq_prep_buffering(struct skd_device *skdev, 1205 - struct skd_request_context *skreq, 1206 - u32 dxfer_len) 1207 - { 1208 - u32 resid = dxfer_len; 1209 - 1210 - /* 1211 - * The DMA engine must have aligned addresses and byte counts. 1212 - */ 1213 - resid += (-resid) & 3; 1214 - skreq->sg_byte_count = resid; 1215 - 1216 - skreq->n_sg = 0; 1217 - 1218 - while (resid > 0) { 1219 - u32 nbytes = PAGE_SIZE; 1220 - u32 ix = skreq->n_sg; 1221 - struct scatterlist *sg = &skreq->sg[ix]; 1222 - struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; 1223 - struct page *page; 1224 - 1225 - if (nbytes > resid) 1226 - nbytes = resid; 1227 - 1228 - page = alloc_page(GFP_KERNEL); 1229 - if (page == NULL) 1230 - return -ENOMEM; 1231 - 1232 - sg_set_page(sg, page, nbytes, 0); 1233 - 1234 - /* TODO: This should be going through a pci_???() 1235 - * routine to do proper mapping. */ 1236 - sksg->control = FIT_SGD_CONTROL_NOT_LAST; 1237 - sksg->byte_count = nbytes; 1238 - 1239 - sksg->host_side_addr = sg_phys(sg); 1240 - 1241 - sksg->dev_side_addr = 0; 1242 - sksg->next_desc_ptr = skreq->sksg_dma_address + 1243 - (ix + 1) * sizeof(*sksg); 1244 - 1245 - skreq->n_sg++; 1246 - resid -= nbytes; 1247 - } 1248 - 1249 - if (skreq->n_sg > 0) { 1250 - u32 ix = skreq->n_sg - 1; 1251 - struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; 1252 - 1253 - sksg->control = FIT_SGD_CONTROL_LAST; 1254 - sksg->next_desc_ptr = 0; 1255 - } 1256 - 1257 - if (unlikely(skdev->dbg_level > 1)) { 1258 - u32 i; 1259 - 1260 - pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n", 1261 - skdev->name, __func__, __LINE__, 1262 - skreq->id, skreq->sksg_list, skreq->sksg_dma_address); 1263 - for (i = 0; i < skreq->n_sg; i++) { 1264 - struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; 1265 - 1266 - pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " 1267 - "addr=0x%llx next=0x%llx\n", 1268 - skdev->name, __func__, __LINE__, 1269 - i, sgd->byte_count, sgd->control, 1270 - sgd->host_side_addr, sgd->next_desc_ptr); 1271 - } 1272 - } 1273 - 1274 - return 0; 1275 - } 1276 - 1277 - static int skd_sg_io_prep_buffering(struct skd_device *skdev, 1278 - struct skd_sg_io *sksgio) 1279 - { 1280 - struct skd_special_context *skspcl = sksgio->skspcl; 1281 - struct skd_request_context *skreq = &skspcl->req; 1282 - u32 dxfer_len = sksgio->dxfer_len; 1283 - int rc; 1284 - 1285 - rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len); 1286 - /* 1287 - * Eventually, errors or not, skd_release_special() is called 1288 - * to recover allocations including partial allocations. 1289 - */ 1290 - return rc; 1291 - } 1292 - 1293 - static int skd_sg_io_copy_buffer(struct skd_device *skdev, 1294 - struct skd_sg_io *sksgio, int dxfer_dir) 1295 - { 1296 - struct skd_special_context *skspcl = sksgio->skspcl; 1297 - u32 iov_ix = 0; 1298 - struct sg_iovec curiov; 1299 - u32 sksg_ix = 0; 1300 - u8 *bufp = NULL; 1301 - u32 buf_len = 0; 1302 - u32 resid = sksgio->dxfer_len; 1303 - int rc; 1304 - 1305 - curiov.iov_len = 0; 1306 - curiov.iov_base = NULL; 1307 - 1308 - if (dxfer_dir != sksgio->sg.dxfer_direction) { 1309 - if (dxfer_dir != SG_DXFER_TO_DEV || 1310 - sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV) 1311 - return 0; 1312 - } 1313 - 1314 - while (resid > 0) { 1315 - u32 nbytes = PAGE_SIZE; 1316 - 1317 - if (curiov.iov_len == 0) { 1318 - curiov = sksgio->iov[iov_ix++]; 1319 - continue; 1320 - } 1321 - 1322 - if (buf_len == 0) { 1323 - struct page *page; 1324 - page = sg_page(&skspcl->req.sg[sksg_ix++]); 1325 - bufp = page_address(page); 1326 - buf_len = PAGE_SIZE; 1327 - } 1328 - 1329 - nbytes = min_t(u32, nbytes, resid); 1330 - nbytes = min_t(u32, nbytes, curiov.iov_len); 1331 - nbytes = min_t(u32, nbytes, buf_len); 1332 - 1333 - if (dxfer_dir == SG_DXFER_TO_DEV) 1334 - rc = __copy_from_user(bufp, curiov.iov_base, nbytes); 1335 - else 1336 - rc = __copy_to_user(curiov.iov_base, bufp, nbytes); 1337 - 1338 - if (rc) 1339 - return -EFAULT; 1340 - 1341 - resid -= nbytes; 1342 - curiov.iov_len -= nbytes; 1343 - curiov.iov_base += nbytes; 1344 - buf_len -= nbytes; 1345 - } 1346 - 1347 - return 0; 1348 - } 1349 - 1350 - static int skd_sg_io_send_fitmsg(struct skd_device *skdev, 1351 - struct skd_sg_io *sksgio) 1352 - { 1353 - struct skd_special_context *skspcl = sksgio->skspcl; 1354 - struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; 1355 - struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; 1356 - 1357 - memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES); 1358 - 1359 - /* Initialize the FIT msg header */ 1360 - fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 1361 - fmh->num_protocol_cmds_coalesced = 1; 1362 - 1363 - /* Initialize the SCSI request */ 1364 - if (sksgio->sg.dxfer_direction != SG_DXFER_NONE) 1365 - scsi_req->hdr.sg_list_dma_address = 1366 - cpu_to_be64(skspcl->req.sksg_dma_address); 1367 - scsi_req->hdr.tag = skspcl->req.id; 1368 - scsi_req->hdr.sg_list_len_bytes = 1369 - cpu_to_be32(skspcl->req.sg_byte_count); 1370 - memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb)); 1371 - 1372 - skspcl->req.state = SKD_REQ_STATE_BUSY; 1373 - skd_send_special_fitmsg(skdev, skspcl); 1374 - 1375 - return 0; 1376 - } 1377 - 1378 - static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio) 1379 - { 1380 - unsigned long flags; 1381 - int rc; 1382 - 1383 - rc = wait_event_interruptible_timeout(skdev->waitq, 1384 - (sksgio->skspcl->req.state != 1385 - SKD_REQ_STATE_BUSY), 1386 - msecs_to_jiffies(sksgio->sg. 1387 - timeout)); 1388 - 1389 - spin_lock_irqsave(&skdev->lock, flags); 1390 - 1391 - if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) { 1392 - pr_debug("%s:%s:%d skspcl %p aborted\n", 1393 - skdev->name, __func__, __LINE__, sksgio->skspcl); 1394 - 1395 - /* Build check cond, sense and let command finish. */ 1396 - /* For a timeout, we must fabricate completion and sense 1397 - * data to complete the command */ 1398 - sksgio->skspcl->req.completion.status = 1399 - SAM_STAT_CHECK_CONDITION; 1400 - 1401 - memset(&sksgio->skspcl->req.err_info, 0, 1402 - sizeof(sksgio->skspcl->req.err_info)); 1403 - sksgio->skspcl->req.err_info.type = 0x70; 1404 - sksgio->skspcl->req.err_info.key = ABORTED_COMMAND; 1405 - sksgio->skspcl->req.err_info.code = 0x44; 1406 - sksgio->skspcl->req.err_info.qual = 0; 1407 - rc = 0; 1408 - } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY) 1409 - /* No longer on the adapter. We finish. */ 1410 - rc = 0; 1411 - else { 1412 - /* Something's gone wrong. Still busy. Timeout or 1413 - * user interrupted (control-C). Mark as an orphan 1414 - * so it will be disposed when completed. */ 1415 - sksgio->skspcl->orphaned = 1; 1416 - sksgio->skspcl = NULL; 1417 - if (rc == 0) { 1418 - pr_debug("%s:%s:%d timed out %p (%u ms)\n", 1419 - skdev->name, __func__, __LINE__, 1420 - sksgio, sksgio->sg.timeout); 1421 - rc = -ETIMEDOUT; 1422 - } else { 1423 - pr_debug("%s:%s:%d cntlc %p\n", 1424 - skdev->name, __func__, __LINE__, sksgio); 1425 - rc = -EINTR; 1426 - } 1427 - } 1428 - 1429 - spin_unlock_irqrestore(&skdev->lock, flags); 1430 - 1431 - return rc; 1432 - } 1433 - 1434 - static int skd_sg_io_put_status(struct skd_device *skdev, 1435 - struct skd_sg_io *sksgio) 1436 - { 1437 - struct sg_io_hdr *sgp = &sksgio->sg; 1438 - struct skd_special_context *skspcl = sksgio->skspcl; 1439 - int resid = 0; 1440 - 1441 - u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes); 1442 - 1443 - sgp->status = skspcl->req.completion.status; 1444 - resid = sksgio->dxfer_len - nb; 1445 - 1446 - sgp->masked_status = sgp->status & STATUS_MASK; 1447 - sgp->msg_status = 0; 1448 - sgp->host_status = 0; 1449 - sgp->driver_status = 0; 1450 - sgp->resid = resid; 1451 - if (sgp->masked_status || sgp->host_status || sgp->driver_status) 1452 - sgp->info |= SG_INFO_CHECK; 1453 - 1454 - pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n", 1455 - skdev->name, __func__, __LINE__, 1456 - sgp->status, sgp->masked_status, sgp->resid); 1457 - 1458 - if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) { 1459 - if (sgp->mx_sb_len > 0) { 1460 - struct fit_comp_error_info *ei = &skspcl->req.err_info; 1461 - u32 nbytes = sizeof(*ei); 1462 - 1463 - nbytes = min_t(u32, nbytes, sgp->mx_sb_len); 1464 - 1465 - sgp->sb_len_wr = nbytes; 1466 - 1467 - if (__copy_to_user(sgp->sbp, ei, nbytes)) { 1468 - pr_debug("%s:%s:%d copy_to_user sense failed %p\n", 1469 - skdev->name, __func__, __LINE__, 1470 - sgp->sbp); 1471 - return -EFAULT; 1472 - } 1473 - } 1474 - } 1475 - 1476 - if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) { 1477 - pr_debug("%s:%s:%d copy_to_user sg failed %p\n", 1478 - skdev->name, __func__, __LINE__, sksgio->argp); 1479 - return -EFAULT; 1480 - } 1481 - 1482 - return 0; 1483 - } 1484 - 1485 - static int skd_sg_io_release_skspcl(struct skd_device *skdev, 1486 - struct skd_sg_io *sksgio) 1487 - { 1488 - struct skd_special_context *skspcl = sksgio->skspcl; 1489 - 1490 - if (skspcl != NULL) { 1491 - ulong flags; 1492 - 1493 - sksgio->skspcl = NULL; 1494 - 1495 - spin_lock_irqsave(&skdev->lock, flags); 1496 - skd_release_special(skdev, skspcl); 1497 - spin_unlock_irqrestore(&skdev->lock, flags); 1498 - } 1499 - 1500 - return 0; 1501 1168 } 1502 1169 1503 1170 /* ··· 884 1811 uint64_t dma_address; 885 1812 struct skd_scsi_request *scsi; 886 1813 887 - fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0]; 1814 + fmh = &skspcl->msg_buf->fmh; 888 1815 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; 889 1816 fmh->num_protocol_cmds_coalesced = 1; 890 1817 891 - scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; 1818 + scsi = &skspcl->msg_buf->scsi[0]; 892 1819 memset(scsi, 0, sizeof(*scsi)); 893 1820 dma_address = skspcl->req.sksg_dma_address; 894 1821 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); 1822 + skspcl->req.n_sg = 1; 895 1823 sgd->control = FIT_SGD_CONTROL_LAST; 896 1824 sgd->byte_count = 0; 897 1825 sgd->host_side_addr = skspcl->db_dma_address; ··· 920 1846 */ 921 1847 return; 922 1848 923 - SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0); 924 1849 skspcl->req.state = SKD_REQ_STATE_BUSY; 925 - skspcl->req.id += SKD_ID_INCR; 926 1850 927 - scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; 1851 + scsi = &skspcl->msg_buf->scsi[0]; 928 1852 scsi->hdr.tag = skspcl->req.id; 929 1853 930 1854 memset(scsi->cdb, 0, sizeof(scsi->cdb)); ··· 1012 1940 /* If the check condition is of special interest, log a message */ 1013 1941 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) 1014 1942 && (code == 0x04) && (qual == 0x06)) { 1015 - pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/" 1016 - "ascq/fruc %02x/%02x/%02x/%02x\n", 1017 - skd_name(skdev), key, code, qual, fruc); 1943 + dev_err(&skdev->pdev->dev, 1944 + "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", 1945 + key, code, qual, fruc); 1018 1946 } 1019 1947 } 1020 1948 1021 1949 static void skd_complete_internal(struct skd_device *skdev, 1022 - volatile struct fit_completion_entry_v1 1023 - *skcomp, 1024 - volatile struct fit_comp_error_info *skerr, 1950 + struct fit_completion_entry_v1 *skcomp, 1951 + struct fit_comp_error_info *skerr, 1025 1952 struct skd_special_context *skspcl) 1026 1953 { 1027 1954 u8 *buf = skspcl->data_buf; 1028 1955 u8 status; 1029 1956 int i; 1030 - struct skd_scsi_request *scsi = 1031 - (struct skd_scsi_request *)&skspcl->msg_buf[64]; 1957 + struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0]; 1958 + 1959 + lockdep_assert_held(&skdev->lock); 1032 1960 1033 1961 SKD_ASSERT(skspcl == &skdev->internal_skspcl); 1034 1962 1035 - pr_debug("%s:%s:%d complete internal %x\n", 1036 - skdev->name, __func__, __LINE__, scsi->cdb[0]); 1963 + dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]); 1964 + 1965 + dma_sync_single_for_cpu(&skdev->pdev->dev, 1966 + skspcl->db_dma_address, 1967 + skspcl->req.sksg_list[0].byte_count, 1968 + DMA_BIDIRECTIONAL); 1037 1969 1038 1970 skspcl->req.completion = *skcomp; 1039 1971 skspcl->req.state = SKD_REQ_STATE_IDLE; 1040 - skspcl->req.id += SKD_ID_INCR; 1041 1972 1042 1973 status = skspcl->req.completion.status; 1043 1974 ··· 1056 1981 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); 1057 1982 else { 1058 1983 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1059 - pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n", 1060 - skdev->name, __func__, __LINE__, 1061 - skdev->state); 1984 + dev_dbg(&skdev->pdev->dev, 1985 + "TUR failed, don't send anymore state 0x%x\n", 1986 + skdev->state); 1062 1987 return; 1063 1988 } 1064 - pr_debug("%s:%s:%d **** TUR failed, retry skerr\n", 1065 - skdev->name, __func__, __LINE__); 1066 - skd_send_internal_skspcl(skdev, skspcl, 0x00); 1989 + dev_dbg(&skdev->pdev->dev, 1990 + "**** TUR failed, retry skerr\n"); 1991 + skd_send_internal_skspcl(skdev, skspcl, 1992 + TEST_UNIT_READY); 1067 1993 } 1068 1994 break; 1069 1995 ··· 1073 1997 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); 1074 1998 else { 1075 1999 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1076 - pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n", 1077 - skdev->name, __func__, __LINE__, 1078 - skdev->state); 2000 + dev_dbg(&skdev->pdev->dev, 2001 + "write buffer failed, don't send anymore state 0x%x\n", 2002 + skdev->state); 1079 2003 return; 1080 2004 } 1081 - pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n", 1082 - skdev->name, __func__, __LINE__); 1083 - skd_send_internal_skspcl(skdev, skspcl, 0x00); 2005 + dev_dbg(&skdev->pdev->dev, 2006 + "**** write buffer failed, retry skerr\n"); 2007 + skd_send_internal_skspcl(skdev, skspcl, 2008 + TEST_UNIT_READY); 1084 2009 } 1085 2010 break; 1086 2011 ··· 1091 2014 skd_send_internal_skspcl(skdev, skspcl, 1092 2015 READ_CAPACITY); 1093 2016 else { 1094 - pr_err( 1095 - "(%s):*** W/R Buffer mismatch %d ***\n", 1096 - skd_name(skdev), skdev->connect_retries); 2017 + dev_err(&skdev->pdev->dev, 2018 + "*** W/R Buffer mismatch %d ***\n", 2019 + skdev->connect_retries); 1097 2020 if (skdev->connect_retries < 1098 2021 SKD_MAX_CONNECT_RETRIES) { 1099 2022 skdev->connect_retries++; 1100 2023 skd_soft_reset(skdev); 1101 2024 } else { 1102 - pr_err( 1103 - "(%s): W/R Buffer Connect Error\n", 1104 - skd_name(skdev)); 2025 + dev_err(&skdev->pdev->dev, 2026 + "W/R Buffer Connect Error\n"); 1105 2027 return; 1106 2028 } 1107 2029 } 1108 2030 1109 2031 } else { 1110 2032 if (skdev->state == SKD_DRVR_STATE_STOPPING) { 1111 - pr_debug("%s:%s:%d " 1112 - "read buffer failed, don't send anymore state 0x%x\n", 1113 - skdev->name, __func__, __LINE__, 1114 - skdev->state); 2033 + dev_dbg(&skdev->pdev->dev, 2034 + "read buffer failed, don't send anymore state 0x%x\n", 2035 + skdev->state); 1115 2036 return; 1116 2037 } 1117 - pr_debug("%s:%s:%d " 1118 - "**** read buffer failed, retry skerr\n", 1119 - skdev->name, __func__, __LINE__); 1120 - skd_send_internal_skspcl(skdev, skspcl, 0x00); 2038 + dev_dbg(&skdev->pdev->dev, 2039 + "**** read buffer failed, retry skerr\n"); 2040 + skd_send_internal_skspcl(skdev, skspcl, 2041 + TEST_UNIT_READY); 1121 2042 } 1122 2043 break; 1123 2044 ··· 1129 2054 (buf[4] << 24) | (buf[5] << 16) | 1130 2055 (buf[6] << 8) | buf[7]; 1131 2056 1132 - pr_debug("%s:%s:%d last lba %d, bs %d\n", 1133 - skdev->name, __func__, __LINE__, 1134 - skdev->read_cap_last_lba, 1135 - skdev->read_cap_blocksize); 2057 + dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n", 2058 + skdev->read_cap_last_lba, 2059 + skdev->read_cap_blocksize); 1136 2060 1137 2061 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); 1138 2062 ··· 1142 2068 (skerr->key == MEDIUM_ERROR)) { 1143 2069 skdev->read_cap_last_lba = ~0; 1144 2070 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); 1145 - pr_debug("%s:%s:%d " 1146 - "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n", 1147 - skdev->name, __func__, __LINE__); 2071 + dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n"); 1148 2072 skd_send_internal_skspcl(skdev, skspcl, INQUIRY); 1149 2073 } else { 1150 - pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n", 1151 - skdev->name, __func__, __LINE__); 2074 + dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n"); 1152 2075 skd_send_internal_skspcl(skdev, skspcl, 1153 2076 TEST_UNIT_READY); 1154 2077 } ··· 1162 2091 } 1163 2092 1164 2093 if (skd_unquiesce_dev(skdev) < 0) 1165 - pr_debug("%s:%s:%d **** failed, to ONLINE device\n", 1166 - skdev->name, __func__, __LINE__); 2094 + dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n"); 1167 2095 /* connection is complete */ 1168 2096 skdev->connect_retries = 0; 1169 2097 break; ··· 1190 2120 struct skd_fitmsg_context *skmsg) 1191 2121 { 1192 2122 u64 qcmd; 1193 - struct fit_msg_hdr *fmh; 1194 2123 1195 - pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n", 1196 - skdev->name, __func__, __LINE__, 1197 - skmsg->mb_dma_address, skdev->in_flight); 1198 - pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n", 1199 - skdev->name, __func__, __LINE__, 1200 - skmsg->msg_buf, skmsg->offset); 2124 + dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n", 2125 + skmsg->mb_dma_address, skd_in_flight(skdev)); 2126 + dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf); 1201 2127 1202 2128 qcmd = skmsg->mb_dma_address; 1203 2129 qcmd |= FIT_QCMD_QID_NORMAL; 1204 - 1205 - fmh = (struct fit_msg_hdr *)skmsg->msg_buf; 1206 - skmsg->outstanding = fmh->num_protocol_cmds_coalesced; 1207 2130 1208 2131 if (unlikely(skdev->dbg_level > 1)) { 1209 2132 u8 *bp = (u8 *)skmsg->msg_buf; 1210 2133 int i; 1211 2134 for (i = 0; i < skmsg->length; i += 8) { 1212 - pr_debug("%s:%s:%d msg[%2d] %8ph\n", 1213 - skdev->name, __func__, __LINE__, i, &bp[i]); 2135 + dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i, 2136 + &bp[i]); 1214 2137 if (i == 0) 1215 2138 i = 64 - 8; 1216 2139 } ··· 1223 2160 */ 1224 2161 qcmd |= FIT_QCMD_MSGSIZE_64; 1225 2162 2163 + dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address, 2164 + skmsg->length, DMA_TO_DEVICE); 2165 + 2166 + /* Make sure skd_msg_buf is written before the doorbell is triggered. */ 2167 + smp_wmb(); 2168 + 1226 2169 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1227 2170 } 1228 2171 ··· 1237 2168 { 1238 2169 u64 qcmd; 1239 2170 2171 + WARN_ON_ONCE(skspcl->req.n_sg != 1); 2172 + 1240 2173 if (unlikely(skdev->dbg_level > 1)) { 1241 2174 u8 *bp = (u8 *)skspcl->msg_buf; 1242 2175 int i; 1243 2176 1244 2177 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { 1245 - pr_debug("%s:%s:%d spcl[%2d] %8ph\n", 1246 - skdev->name, __func__, __LINE__, i, &bp[i]); 2178 + dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i, 2179 + &bp[i]); 1247 2180 if (i == 0) 1248 2181 i = 64 - 8; 1249 2182 } 1250 2183 1251 - pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", 1252 - skdev->name, __func__, __LINE__, 1253 - skspcl, skspcl->req.id, skspcl->req.sksg_list, 1254 - skspcl->req.sksg_dma_address); 2184 + dev_dbg(&skdev->pdev->dev, 2185 + "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", 2186 + skspcl, skspcl->req.id, skspcl->req.sksg_list, 2187 + skspcl->req.sksg_dma_address); 1255 2188 for (i = 0; i < skspcl->req.n_sg; i++) { 1256 2189 struct fit_sg_descriptor *sgd = 1257 2190 &skspcl->req.sksg_list[i]; 1258 2191 1259 - pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x " 1260 - "addr=0x%llx next=0x%llx\n", 1261 - skdev->name, __func__, __LINE__, 1262 - i, sgd->byte_count, sgd->control, 1263 - sgd->host_side_addr, sgd->next_desc_ptr); 2192 + dev_dbg(&skdev->pdev->dev, 2193 + " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", 2194 + i, sgd->byte_count, sgd->control, 2195 + sgd->host_side_addr, sgd->next_desc_ptr); 1264 2196 } 1265 2197 } 1266 2198 ··· 1271 2201 */ 1272 2202 qcmd = skspcl->mb_dma_address; 1273 2203 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; 2204 + 2205 + dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address, 2206 + SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE); 2207 + dma_sync_single_for_device(&skdev->pdev->dev, 2208 + skspcl->req.sksg_dma_address, 2209 + 1 * sizeof(struct fit_sg_descriptor), 2210 + DMA_TO_DEVICE); 2211 + dma_sync_single_for_device(&skdev->pdev->dev, 2212 + skspcl->db_dma_address, 2213 + skspcl->req.sksg_list[0].byte_count, 2214 + DMA_BIDIRECTIONAL); 2215 + 2216 + /* Make sure skd_msg_buf is written before the doorbell is triggered. */ 2217 + smp_wmb(); 1274 2218 1275 2219 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); 1276 2220 } ··· 1296 2212 */ 1297 2213 1298 2214 static void skd_complete_other(struct skd_device *skdev, 1299 - volatile struct fit_completion_entry_v1 *skcomp, 1300 - volatile struct fit_comp_error_info *skerr); 2215 + struct fit_completion_entry_v1 *skcomp, 2216 + struct fit_comp_error_info *skerr); 1301 2217 1302 2218 struct sns_info { 1303 2219 u8 type; ··· 1346 2262 1347 2263 static enum skd_check_status_action 1348 2264 skd_check_status(struct skd_device *skdev, 1349 - u8 cmp_status, volatile struct fit_comp_error_info *skerr) 2265 + u8 cmp_status, struct fit_comp_error_info *skerr) 1350 2266 { 1351 - int i, n; 2267 + int i; 1352 2268 1353 - pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", 1354 - skd_name(skdev), skerr->key, skerr->code, skerr->qual, 1355 - skerr->fruc); 2269 + dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", 2270 + skerr->key, skerr->code, skerr->qual, skerr->fruc); 1356 2271 1357 - pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", 1358 - skdev->name, __func__, __LINE__, skerr->type, cmp_status, 1359 - skerr->key, skerr->code, skerr->qual, skerr->fruc); 2272 + dev_dbg(&skdev->pdev->dev, 2273 + "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", 2274 + skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual, 2275 + skerr->fruc); 1360 2276 1361 2277 /* Does the info match an entry in the good category? */ 1362 - n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]); 1363 - for (i = 0; i < n; i++) { 2278 + for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) { 1364 2279 struct sns_info *sns = &skd_chkstat_table[i]; 1365 2280 1366 2281 if (sns->mask & 0x10) ··· 1383 2300 continue; 1384 2301 1385 2302 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { 1386 - pr_err("(%s): SMART Alert: sense key/asc/ascq " 1387 - "%02x/%02x/%02x\n", 1388 - skd_name(skdev), skerr->key, 1389 - skerr->code, skerr->qual); 2303 + dev_err(&skdev->pdev->dev, 2304 + "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n", 2305 + skerr->key, skerr->code, skerr->qual); 1390 2306 } 1391 2307 return sns->action; 1392 2308 } ··· 1394 2312 * zero status means good 1395 2313 */ 1396 2314 if (cmp_status) { 1397 - pr_debug("%s:%s:%d status check: error\n", 1398 - skdev->name, __func__, __LINE__); 2315 + dev_dbg(&skdev->pdev->dev, "status check: error\n"); 1399 2316 return SKD_CHECK_STATUS_REPORT_ERROR; 1400 2317 } 1401 2318 1402 - pr_debug("%s:%s:%d status check good default\n", 1403 - skdev->name, __func__, __LINE__); 2319 + dev_dbg(&skdev->pdev->dev, "status check good default\n"); 1404 2320 return SKD_CHECK_STATUS_REPORT_GOOD; 1405 2321 } 1406 2322 1407 2323 static void skd_resolve_req_exception(struct skd_device *skdev, 1408 - struct skd_request_context *skreq) 2324 + struct skd_request_context *skreq, 2325 + struct request *req) 1409 2326 { 1410 2327 u8 cmp_status = skreq->completion.status; 1411 2328 1412 2329 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { 1413 2330 case SKD_CHECK_STATUS_REPORT_GOOD: 1414 2331 case SKD_CHECK_STATUS_REPORT_SMART_ALERT: 1415 - skd_end_request(skdev, skreq, BLK_STS_OK); 2332 + skreq->status = BLK_STS_OK; 2333 + blk_mq_complete_request(req); 1416 2334 break; 1417 2335 1418 2336 case SKD_CHECK_STATUS_BUSY_IMMINENT: 1419 2337 skd_log_skreq(skdev, skreq, "retry(busy)"); 1420 - blk_requeue_request(skdev->queue, skreq->req); 1421 - pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); 2338 + blk_requeue_request(skdev->queue, req); 2339 + dev_info(&skdev->pdev->dev, "drive BUSY imminent\n"); 1422 2340 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; 1423 2341 skdev->timer_countdown = SKD_TIMER_MINUTES(20); 1424 2342 skd_quiesce_dev(skdev); 1425 2343 break; 1426 2344 1427 2345 case SKD_CHECK_STATUS_REQUEUE_REQUEST: 1428 - if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) { 2346 + if ((unsigned long) ++req->special < SKD_MAX_RETRIES) { 1429 2347 skd_log_skreq(skdev, skreq, "retry"); 1430 - blk_requeue_request(skdev->queue, skreq->req); 2348 + blk_requeue_request(skdev->queue, req); 1431 2349 break; 1432 2350 } 1433 - /* fall through to report error */ 2351 + /* fall through */ 1434 2352 1435 2353 case SKD_CHECK_STATUS_REPORT_ERROR: 1436 2354 default: 1437 - skd_end_request(skdev, skreq, BLK_STS_IOERR); 2355 + skreq->status = BLK_STS_IOERR; 2356 + blk_mq_complete_request(req); 1438 2357 break; 1439 2358 } 1440 2359 } 1441 2360 1442 - /* assume spinlock is already held */ 1443 2361 static void skd_release_skreq(struct skd_device *skdev, 1444 2362 struct skd_request_context *skreq) 1445 2363 { 1446 - u32 msg_slot; 1447 - struct skd_fitmsg_context *skmsg; 1448 - 1449 - u32 timo_slot; 1450 - 1451 - /* 1452 - * Reclaim the FIT msg buffer if this is 1453 - * the first of the requests it carried to 1454 - * be completed. The FIT msg buffer used to 1455 - * send this request cannot be reused until 1456 - * we are sure the s1120 card has copied 1457 - * it to its memory. The FIT msg might have 1458 - * contained several requests. As soon as 1459 - * any of them are completed we know that 1460 - * the entire FIT msg was transferred. 1461 - * Only the first completed request will 1462 - * match the FIT msg buffer id. The FIT 1463 - * msg buffer id is immediately updated. 1464 - * When subsequent requests complete the FIT 1465 - * msg buffer id won't match, so we know 1466 - * quite cheaply that it is already done. 1467 - */ 1468 - msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; 1469 - SKD_ASSERT(msg_slot < skdev->num_fitmsg_context); 1470 - 1471 - skmsg = &skdev->skmsg_table[msg_slot]; 1472 - if (skmsg->id == skreq->fitmsg_id) { 1473 - SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY); 1474 - SKD_ASSERT(skmsg->outstanding > 0); 1475 - skmsg->outstanding--; 1476 - if (skmsg->outstanding == 0) { 1477 - skmsg->state = SKD_MSG_STATE_IDLE; 1478 - skmsg->id += SKD_ID_INCR; 1479 - skmsg->next = skdev->skmsg_free_list; 1480 - skdev->skmsg_free_list = skmsg; 1481 - } 1482 - } 1483 - 1484 - /* 1485 - * Decrease the number of active requests. 1486 - * Also decrements the count in the timeout slot. 1487 - */ 1488 - SKD_ASSERT(skdev->in_flight > 0); 1489 - skdev->in_flight -= 1; 1490 - 1491 - timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; 1492 - SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0); 1493 - skdev->timeout_slot[timo_slot] -= 1; 1494 - 1495 - /* 1496 - * Reset backpointer 1497 - */ 1498 - skreq->req = NULL; 1499 - 1500 2364 /* 1501 2365 * Reclaim the skd_request_context 1502 2366 */ 1503 2367 skreq->state = SKD_REQ_STATE_IDLE; 1504 - skreq->id += SKD_ID_INCR; 1505 - skreq->next = skdev->skreq_free_list; 1506 - skdev->skreq_free_list = skreq; 1507 2368 } 1508 - 1509 - #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA 1510 - 1511 - static void skd_do_inq_page_00(struct skd_device *skdev, 1512 - volatile struct fit_completion_entry_v1 *skcomp, 1513 - volatile struct fit_comp_error_info *skerr, 1514 - uint8_t *cdb, uint8_t *buf) 1515 - { 1516 - uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size; 1517 - 1518 - /* Caller requested "supported pages". The driver needs to insert 1519 - * its page. 1520 - */ 1521 - pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n", 1522 - skdev->name, __func__, __LINE__); 1523 - 1524 - /* If the device rejected the request because the CDB was 1525 - * improperly formed, then just leave. 1526 - */ 1527 - if (skcomp->status == SAM_STAT_CHECK_CONDITION && 1528 - skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24) 1529 - return; 1530 - 1531 - /* Get the amount of space the caller allocated */ 1532 - max_bytes = (cdb[3] << 8) | cdb[4]; 1533 - 1534 - /* Get the number of pages actually returned by the device */ 1535 - drive_pages = (buf[2] << 8) | buf[3]; 1536 - drive_bytes = drive_pages + 4; 1537 - new_size = drive_pages + 1; 1538 - 1539 - /* Supported pages must be in numerical order, so find where 1540 - * the driver page needs to be inserted into the list of 1541 - * pages returned by the device. 1542 - */ 1543 - for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) { 1544 - if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE) 1545 - return; /* Device using this page code. abort */ 1546 - else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE) 1547 - break; 1548 - } 1549 - 1550 - if (insert_pt < max_bytes) { 1551 - uint16_t u; 1552 - 1553 - /* Shift everything up one byte to make room. */ 1554 - for (u = new_size + 3; u > insert_pt; u--) 1555 - buf[u] = buf[u - 1]; 1556 - buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE; 1557 - 1558 - /* SCSI byte order increment of num_returned_bytes by 1 */ 1559 - skcomp->num_returned_bytes = 1560 - be32_to_cpu(skcomp->num_returned_bytes) + 1; 1561 - skcomp->num_returned_bytes = 1562 - be32_to_cpu(skcomp->num_returned_bytes); 1563 - } 1564 - 1565 - /* update page length field to reflect the driver's page too */ 1566 - buf[2] = (uint8_t)((new_size >> 8) & 0xFF); 1567 - buf[3] = (uint8_t)((new_size >> 0) & 0xFF); 1568 - } 1569 - 1570 - static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width) 1571 - { 1572 - int pcie_reg; 1573 - u16 pci_bus_speed; 1574 - u8 pci_lanes; 1575 - 1576 - pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1577 - if (pcie_reg) { 1578 - u16 linksta; 1579 - pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta); 1580 - 1581 - pci_bus_speed = linksta & 0xF; 1582 - pci_lanes = (linksta & 0x3F0) >> 4; 1583 - } else { 1584 - *speed = STEC_LINK_UNKNOWN; 1585 - *width = 0xFF; 1586 - return; 1587 - } 1588 - 1589 - switch (pci_bus_speed) { 1590 - case 1: 1591 - *speed = STEC_LINK_2_5GTS; 1592 - break; 1593 - case 2: 1594 - *speed = STEC_LINK_5GTS; 1595 - break; 1596 - case 3: 1597 - *speed = STEC_LINK_8GTS; 1598 - break; 1599 - default: 1600 - *speed = STEC_LINK_UNKNOWN; 1601 - break; 1602 - } 1603 - 1604 - if (pci_lanes <= 0x20) 1605 - *width = pci_lanes; 1606 - else 1607 - *width = 0xFF; 1608 - } 1609 - 1610 - static void skd_do_inq_page_da(struct skd_device *skdev, 1611 - volatile struct fit_completion_entry_v1 *skcomp, 1612 - volatile struct fit_comp_error_info *skerr, 1613 - uint8_t *cdb, uint8_t *buf) 1614 - { 1615 - struct pci_dev *pdev = skdev->pdev; 1616 - unsigned max_bytes; 1617 - struct driver_inquiry_data inq; 1618 - u16 val; 1619 - 1620 - pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n", 1621 - skdev->name, __func__, __LINE__); 1622 - 1623 - memset(&inq, 0, sizeof(inq)); 1624 - 1625 - inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE; 1626 - 1627 - skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes); 1628 - inq.pcie_bus_number = cpu_to_be16(pdev->bus->number); 1629 - inq.pcie_device_number = PCI_SLOT(pdev->devfn); 1630 - inq.pcie_function_number = PCI_FUNC(pdev->devfn); 1631 - 1632 - pci_read_config_word(pdev, PCI_VENDOR_ID, &val); 1633 - inq.pcie_vendor_id = cpu_to_be16(val); 1634 - 1635 - pci_read_config_word(pdev, PCI_DEVICE_ID, &val); 1636 - inq.pcie_device_id = cpu_to_be16(val); 1637 - 1638 - pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val); 1639 - inq.pcie_subsystem_vendor_id = cpu_to_be16(val); 1640 - 1641 - pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val); 1642 - inq.pcie_subsystem_device_id = cpu_to_be16(val); 1643 - 1644 - /* Driver version, fixed lenth, padded with spaces on the right */ 1645 - inq.driver_version_length = sizeof(inq.driver_version); 1646 - memset(&inq.driver_version, ' ', sizeof(inq.driver_version)); 1647 - memcpy(inq.driver_version, DRV_VER_COMPL, 1648 - min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL))); 1649 - 1650 - inq.page_length = cpu_to_be16((sizeof(inq) - 4)); 1651 - 1652 - /* Clear the error set by the device */ 1653 - skcomp->status = SAM_STAT_GOOD; 1654 - memset((void *)skerr, 0, sizeof(*skerr)); 1655 - 1656 - /* copy response into output buffer */ 1657 - max_bytes = (cdb[3] << 8) | cdb[4]; 1658 - memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq))); 1659 - 1660 - skcomp->num_returned_bytes = 1661 - be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq))); 1662 - } 1663 - 1664 - static void skd_do_driver_inq(struct skd_device *skdev, 1665 - volatile struct fit_completion_entry_v1 *skcomp, 1666 - volatile struct fit_comp_error_info *skerr, 1667 - uint8_t *cdb, uint8_t *buf) 1668 - { 1669 - if (!buf) 1670 - return; 1671 - else if (cdb[0] != INQUIRY) 1672 - return; /* Not an INQUIRY */ 1673 - else if ((cdb[1] & 1) == 0) 1674 - return; /* EVPD not set */ 1675 - else if (cdb[2] == 0) 1676 - /* Need to add driver's page to supported pages list */ 1677 - skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf); 1678 - else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE) 1679 - /* Caller requested driver's page */ 1680 - skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf); 1681 - } 1682 - 1683 - static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg) 1684 - { 1685 - if (!sg) 1686 - return NULL; 1687 - if (!sg_page(sg)) 1688 - return NULL; 1689 - return sg_virt(sg); 1690 - } 1691 - 1692 - static void skd_process_scsi_inq(struct skd_device *skdev, 1693 - volatile struct fit_completion_entry_v1 1694 - *skcomp, 1695 - volatile struct fit_comp_error_info *skerr, 1696 - struct skd_special_context *skspcl) 1697 - { 1698 - uint8_t *buf; 1699 - struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; 1700 - struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; 1701 - 1702 - dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg, 1703 - skspcl->req.sg_data_dir); 1704 - buf = skd_sg_1st_page_ptr(skspcl->req.sg); 1705 - 1706 - if (buf) 1707 - skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf); 1708 - } 1709 - 1710 2369 1711 2370 static int skd_isr_completion_posted(struct skd_device *skdev, 1712 2371 int limit, int *enqueued) 1713 2372 { 1714 - volatile struct fit_completion_entry_v1 *skcmp = NULL; 1715 - volatile struct fit_comp_error_info *skerr; 2373 + struct fit_completion_entry_v1 *skcmp; 2374 + struct fit_comp_error_info *skerr; 1716 2375 u16 req_id; 1717 - u32 req_slot; 2376 + u32 tag; 2377 + u16 hwq = 0; 2378 + struct request *rq; 1718 2379 struct skd_request_context *skreq; 1719 - u16 cmp_cntxt = 0; 1720 - u8 cmp_status = 0; 1721 - u8 cmp_cycle = 0; 1722 - u32 cmp_bytes = 0; 2380 + u16 cmp_cntxt; 2381 + u8 cmp_status; 2382 + u8 cmp_cycle; 2383 + u32 cmp_bytes; 1723 2384 int rc = 0; 1724 2385 int processed = 0; 2386 + 2387 + lockdep_assert_held(&skdev->lock); 1725 2388 1726 2389 for (;; ) { 1727 2390 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); ··· 1479 2652 1480 2653 skerr = &skdev->skerr_table[skdev->skcomp_ix]; 1481 2654 1482 - pr_debug("%s:%s:%d " 1483 - "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " 1484 - "busy=%d rbytes=0x%x proto=%d\n", 1485 - skdev->name, __func__, __LINE__, skdev->skcomp_cycle, 1486 - skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status, 1487 - skdev->in_flight, cmp_bytes, skdev->proto_ver); 2655 + dev_dbg(&skdev->pdev->dev, 2656 + "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n", 2657 + skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle, 2658 + cmp_cntxt, cmp_status, skd_in_flight(skdev), 2659 + cmp_bytes, skdev->proto_ver); 1488 2660 1489 2661 if (cmp_cycle != skdev->skcomp_cycle) { 1490 - pr_debug("%s:%s:%d end of completions\n", 1491 - skdev->name, __func__, __LINE__); 2662 + dev_dbg(&skdev->pdev->dev, "end of completions\n"); 1492 2663 break; 1493 2664 } 1494 2665 /* ··· 1505 2680 * r/w request (see skd_start() above) or a special request. 1506 2681 */ 1507 2682 req_id = cmp_cntxt; 1508 - req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; 2683 + tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK; 1509 2684 1510 2685 /* Is this other than a r/w request? */ 1511 - if (req_slot >= skdev->num_req_context) { 2686 + if (tag >= skdev->num_req_context) { 1512 2687 /* 1513 2688 * This is not a completion for a r/w request. 1514 2689 */ 2690 + WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], 2691 + tag)); 1515 2692 skd_complete_other(skdev, skcmp, skerr); 1516 2693 continue; 1517 2694 } 1518 2695 1519 - skreq = &skdev->skreq_table[req_slot]; 2696 + rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag); 2697 + if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt, 2698 + tag)) 2699 + continue; 2700 + skreq = blk_mq_rq_to_pdu(rq); 1520 2701 1521 2702 /* 1522 2703 * Make sure the request ID for the slot matches. 1523 2704 */ 1524 2705 if (skreq->id != req_id) { 1525 - pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n", 1526 - skdev->name, __func__, __LINE__, 1527 - req_id, skreq->id); 1528 - { 1529 - u16 new_id = cmp_cntxt; 1530 - pr_err("(%s): Completion mismatch " 1531 - "comp_id=0x%04x skreq=0x%04x new=0x%04x\n", 1532 - skd_name(skdev), req_id, 1533 - skreq->id, new_id); 2706 + dev_err(&skdev->pdev->dev, 2707 + "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n", 2708 + req_id, skreq->id, cmp_cntxt); 1534 2709 1535 - continue; 1536 - } 2710 + continue; 1537 2711 } 1538 2712 1539 2713 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); 1540 - 1541 - if (skreq->state == SKD_REQ_STATE_ABORTED) { 1542 - pr_debug("%s:%s:%d reclaim req %p id=%04x\n", 1543 - skdev->name, __func__, __LINE__, 1544 - skreq, skreq->id); 1545 - /* a previously timed out command can 1546 - * now be cleaned up */ 1547 - skd_release_skreq(skdev, skreq); 1548 - continue; 1549 - } 1550 2714 1551 2715 skreq->completion = *skcmp; 1552 2716 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { ··· 1548 2734 if (skreq->n_sg > 0) 1549 2735 skd_postop_sg_list(skdev, skreq); 1550 2736 1551 - if (!skreq->req) { 1552 - pr_debug("%s:%s:%d NULL backptr skdreq %p, " 1553 - "req=0x%x req_id=0x%x\n", 1554 - skdev->name, __func__, __LINE__, 1555 - skreq, skreq->id, req_id); 1556 - } else { 1557 - /* 1558 - * Capture the outcome and post it back to the 1559 - * native request. 1560 - */ 1561 - if (likely(cmp_status == SAM_STAT_GOOD)) 1562 - skd_end_request(skdev, skreq, BLK_STS_OK); 1563 - else 1564 - skd_resolve_req_exception(skdev, skreq); 1565 - } 2737 + skd_release_skreq(skdev, skreq); 1566 2738 1567 2739 /* 1568 - * Release the skreq, its FIT msg (if one), timeout slot, 1569 - * and queue depth. 2740 + * Capture the outcome and post it back to the native request. 1570 2741 */ 1571 - skd_release_skreq(skdev, skreq); 2742 + if (likely(cmp_status == SAM_STAT_GOOD)) { 2743 + skreq->status = BLK_STS_OK; 2744 + blk_mq_complete_request(rq); 2745 + } else { 2746 + skd_resolve_req_exception(skdev, skreq, rq); 2747 + } 1572 2748 1573 2749 /* skd_isr_comp_limit equal zero means no limit */ 1574 2750 if (limit) { ··· 1569 2765 } 1570 2766 } 1571 2767 1572 - if ((skdev->state == SKD_DRVR_STATE_PAUSING) 1573 - && (skdev->in_flight) == 0) { 2768 + if (skdev->state == SKD_DRVR_STATE_PAUSING && 2769 + skd_in_flight(skdev) == 0) { 1574 2770 skdev->state = SKD_DRVR_STATE_PAUSED; 1575 2771 wake_up_interruptible(&skdev->waitq); 1576 2772 } ··· 1579 2775 } 1580 2776 1581 2777 static void skd_complete_other(struct skd_device *skdev, 1582 - volatile struct fit_completion_entry_v1 *skcomp, 1583 - volatile struct fit_comp_error_info *skerr) 2778 + struct fit_completion_entry_v1 *skcomp, 2779 + struct fit_comp_error_info *skerr) 1584 2780 { 1585 2781 u32 req_id = 0; 1586 2782 u32 req_table; 1587 2783 u32 req_slot; 1588 2784 struct skd_special_context *skspcl; 1589 2785 2786 + lockdep_assert_held(&skdev->lock); 2787 + 1590 2788 req_id = skcomp->tag; 1591 2789 req_table = req_id & SKD_ID_TABLE_MASK; 1592 2790 req_slot = req_id & SKD_ID_SLOT_MASK; 1593 2791 1594 - pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n", 1595 - skdev->name, __func__, __LINE__, 1596 - req_table, req_id, req_slot); 2792 + dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table, 2793 + req_id, req_slot); 1597 2794 1598 2795 /* 1599 2796 * Based on the request id, determine how to dispatch this completion. ··· 1604 2799 switch (req_table) { 1605 2800 case SKD_ID_RW_REQUEST: 1606 2801 /* 1607 - * The caller, skd_completion_posted_isr() above, 2802 + * The caller, skd_isr_completion_posted() above, 1608 2803 * handles r/w requests. The only way we get here 1609 2804 * is if the req_slot is out of bounds. 1610 2805 */ 1611 - break; 1612 - 1613 - case SKD_ID_SPECIAL_REQUEST: 1614 - /* 1615 - * Make sure the req_slot is in bounds and that the id 1616 - * matches. 1617 - */ 1618 - if (req_slot < skdev->n_special) { 1619 - skspcl = &skdev->skspcl_table[req_slot]; 1620 - if (skspcl->req.id == req_id && 1621 - skspcl->req.state == SKD_REQ_STATE_BUSY) { 1622 - skd_complete_special(skdev, 1623 - skcomp, skerr, skspcl); 1624 - return; 1625 - } 1626 - } 1627 2806 break; 1628 2807 1629 2808 case SKD_ID_INTERNAL: ··· 1640 2851 */ 1641 2852 } 1642 2853 1643 - static void skd_complete_special(struct skd_device *skdev, 1644 - volatile struct fit_completion_entry_v1 1645 - *skcomp, 1646 - volatile struct fit_comp_error_info *skerr, 1647 - struct skd_special_context *skspcl) 1648 - { 1649 - pr_debug("%s:%s:%d completing special request %p\n", 1650 - skdev->name, __func__, __LINE__, skspcl); 1651 - if (skspcl->orphaned) { 1652 - /* Discard orphaned request */ 1653 - /* ?: Can this release directly or does it need 1654 - * to use a worker? */ 1655 - pr_debug("%s:%s:%d release orphaned %p\n", 1656 - skdev->name, __func__, __LINE__, skspcl); 1657 - skd_release_special(skdev, skspcl); 1658 - return; 1659 - } 1660 - 1661 - skd_process_scsi_inq(skdev, skcomp, skerr, skspcl); 1662 - 1663 - skspcl->req.state = SKD_REQ_STATE_COMPLETED; 1664 - skspcl->req.completion = *skcomp; 1665 - skspcl->req.err_info = *skerr; 1666 - 1667 - skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key, 1668 - skerr->code, skerr->qual, skerr->fruc); 1669 - 1670 - wake_up_interruptible(&skdev->waitq); 1671 - } 1672 - 1673 - /* assume spinlock is already held */ 1674 - static void skd_release_special(struct skd_device *skdev, 1675 - struct skd_special_context *skspcl) 1676 - { 1677 - int i, was_depleted; 1678 - 1679 - for (i = 0; i < skspcl->req.n_sg; i++) { 1680 - struct page *page = sg_page(&skspcl->req.sg[i]); 1681 - __free_page(page); 1682 - } 1683 - 1684 - was_depleted = (skdev->skspcl_free_list == NULL); 1685 - 1686 - skspcl->req.state = SKD_REQ_STATE_IDLE; 1687 - skspcl->req.id += SKD_ID_INCR; 1688 - skspcl->req.next = 1689 - (struct skd_request_context *)skdev->skspcl_free_list; 1690 - skdev->skspcl_free_list = (struct skd_special_context *)skspcl; 1691 - 1692 - if (was_depleted) { 1693 - pr_debug("%s:%s:%d skspcl was depleted\n", 1694 - skdev->name, __func__, __LINE__); 1695 - /* Free list was depleted. Their might be waiters. */ 1696 - wake_up_interruptible(&skdev->waitq); 1697 - } 1698 - } 1699 - 1700 2854 static void skd_reset_skcomp(struct skd_device *skdev) 1701 2855 { 1702 - u32 nbytes; 1703 - struct fit_completion_entry_v1 *skcomp; 1704 - 1705 - nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; 1706 - nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 1707 - 1708 - memset(skdev->skcomp_table, 0, nbytes); 2856 + memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE); 1709 2857 1710 2858 skdev->skcomp_ix = 0; 1711 2859 skdev->skcomp_cycle = 1; ··· 1667 2941 * process everything in compq 1668 2942 */ 1669 2943 skd_isr_completion_posted(skdev, 0, &flush_enqueued); 1670 - skd_request_fn(skdev->queue); 2944 + schedule_work(&skdev->start_queue); 1671 2945 1672 2946 spin_unlock_irqrestore(&skdev->lock, flags); 1673 2947 } ··· 1677 2951 static irqreturn_t 1678 2952 skd_isr(int irq, void *ptr) 1679 2953 { 1680 - struct skd_device *skdev; 2954 + struct skd_device *skdev = ptr; 1681 2955 u32 intstat; 1682 2956 u32 ack; 1683 2957 int rc = 0; 1684 2958 int deferred = 0; 1685 2959 int flush_enqueued = 0; 1686 2960 1687 - skdev = (struct skd_device *)ptr; 1688 2961 spin_lock(&skdev->lock); 1689 2962 1690 2963 for (;; ) { ··· 1692 2967 ack = FIT_INT_DEF_MASK; 1693 2968 ack &= intstat; 1694 2969 1695 - pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n", 1696 - skdev->name, __func__, __LINE__, intstat, ack); 2970 + dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat, 2971 + ack); 1697 2972 1698 2973 /* As long as there is an int pending on device, keep 1699 2974 * running loop. When none, get out, but if we've never ··· 1743 3018 } 1744 3019 1745 3020 if (unlikely(flush_enqueued)) 1746 - skd_request_fn(skdev->queue); 3021 + schedule_work(&skdev->start_queue); 1747 3022 1748 3023 if (deferred) 1749 3024 schedule_work(&skdev->completion_worker); 1750 3025 else if (!flush_enqueued) 1751 - skd_request_fn(skdev->queue); 3026 + schedule_work(&skdev->start_queue); 1752 3027 1753 3028 spin_unlock(&skdev->lock); 1754 3029 ··· 1758 3033 static void skd_drive_fault(struct skd_device *skdev) 1759 3034 { 1760 3035 skdev->state = SKD_DRVR_STATE_FAULT; 1761 - pr_err("(%s): Drive FAULT\n", skd_name(skdev)); 3036 + dev_err(&skdev->pdev->dev, "Drive FAULT\n"); 1762 3037 } 1763 3038 1764 3039 static void skd_drive_disappeared(struct skd_device *skdev) 1765 3040 { 1766 3041 skdev->state = SKD_DRVR_STATE_DISAPPEARED; 1767 - pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev)); 3042 + dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n"); 1768 3043 } 1769 3044 1770 3045 static void skd_isr_fwstate(struct skd_device *skdev) ··· 1777 3052 sense = SKD_READL(skdev, FIT_STATUS); 1778 3053 state = sense & FIT_SR_DRIVE_STATE_MASK; 1779 3054 1780 - pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n", 1781 - skd_name(skdev), 1782 - skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 1783 - skd_drive_state_to_str(state), state); 3055 + dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n", 3056 + skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 3057 + skd_drive_state_to_str(state), state); 1784 3058 1785 3059 skdev->drive_state = state; 1786 3060 ··· 1790 3066 break; 1791 3067 } 1792 3068 if (skdev->state == SKD_DRVR_STATE_RESTARTING) 1793 - skd_recover_requests(skdev, 0); 3069 + skd_recover_requests(skdev); 1794 3070 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { 1795 3071 skdev->timer_countdown = SKD_STARTING_TIMO; 1796 3072 skdev->state = SKD_DRVR_STATE_STARTING; ··· 1811 3087 skdev->cur_max_queue_depth * 2 / 3 + 1; 1812 3088 if (skdev->queue_low_water_mark < 1) 1813 3089 skdev->queue_low_water_mark = 1; 1814 - pr_info( 1815 - "(%s): Queue depth limit=%d dev=%d lowat=%d\n", 1816 - skd_name(skdev), 1817 - skdev->cur_max_queue_depth, 1818 - skdev->dev_max_queue_depth, skdev->queue_low_water_mark); 3090 + dev_info(&skdev->pdev->dev, 3091 + "Queue depth limit=%d dev=%d lowat=%d\n", 3092 + skdev->cur_max_queue_depth, 3093 + skdev->dev_max_queue_depth, 3094 + skdev->queue_low_water_mark); 1819 3095 1820 3096 skd_refresh_device_data(skdev); 1821 3097 break; ··· 1831 3107 */ 1832 3108 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 1833 3109 skdev->timer_countdown = SKD_TIMER_SECONDS(3); 1834 - blk_start_queue(skdev->queue); 3110 + schedule_work(&skdev->start_queue); 1835 3111 break; 1836 3112 case FIT_SR_DRIVE_BUSY_ERASE: 1837 3113 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; ··· 1852 3128 } 1853 3129 break; 1854 3130 case FIT_SR_DRIVE_FW_BOOTING: 1855 - pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n", 1856 - skdev->name, __func__, __LINE__, skdev->name); 3131 + dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n"); 1857 3132 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 1858 3133 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; 1859 3134 break; ··· 1864 3141 1865 3142 case FIT_SR_DRIVE_FAULT: 1866 3143 skd_drive_fault(skdev); 1867 - skd_recover_requests(skdev, 0); 1868 - blk_start_queue(skdev->queue); 3144 + skd_recover_requests(skdev); 3145 + schedule_work(&skdev->start_queue); 1869 3146 break; 1870 3147 1871 3148 /* PCIe bus returned all Fs? */ 1872 3149 case 0xFF: 1873 - pr_info("(%s): state=0x%x sense=0x%x\n", 1874 - skd_name(skdev), state, sense); 3150 + dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state, 3151 + sense); 1875 3152 skd_drive_disappeared(skdev); 1876 - skd_recover_requests(skdev, 0); 1877 - blk_start_queue(skdev->queue); 3153 + skd_recover_requests(skdev); 3154 + schedule_work(&skdev->start_queue); 1878 3155 break; 1879 3156 default: 1880 3157 /* ··· 1882 3159 */ 1883 3160 break; 1884 3161 } 1885 - pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", 1886 - skd_name(skdev), 1887 - skd_skdev_state_to_str(prev_driver_state), prev_driver_state, 1888 - skd_skdev_state_to_str(skdev->state), skdev->state); 3162 + dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", 3163 + skd_skdev_state_to_str(prev_driver_state), prev_driver_state, 3164 + skd_skdev_state_to_str(skdev->state), skdev->state); 1889 3165 } 1890 3166 1891 - static void skd_recover_requests(struct skd_device *skdev, int requeue) 3167 + static void skd_recover_request(struct request *req, void *data, bool reserved) 1892 3168 { 1893 - int i; 3169 + struct skd_device *const skdev = data; 3170 + struct skd_request_context *skreq = blk_mq_rq_to_pdu(req); 1894 3171 1895 - for (i = 0; i < skdev->num_req_context; i++) { 1896 - struct skd_request_context *skreq = &skdev->skreq_table[i]; 3172 + if (skreq->state != SKD_REQ_STATE_BUSY) 3173 + return; 1897 3174 1898 - if (skreq->state == SKD_REQ_STATE_BUSY) { 1899 - skd_log_skreq(skdev, skreq, "recover"); 3175 + skd_log_skreq(skdev, skreq, "recover"); 1900 3176 1901 - SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); 1902 - SKD_ASSERT(skreq->req != NULL); 3177 + /* Release DMA resources for the request. */ 3178 + if (skreq->n_sg > 0) 3179 + skd_postop_sg_list(skdev, skreq); 1903 3180 1904 - /* Release DMA resources for the request. */ 1905 - if (skreq->n_sg > 0) 1906 - skd_postop_sg_list(skdev, skreq); 3181 + skreq->state = SKD_REQ_STATE_IDLE; 3182 + skreq->status = BLK_STS_IOERR; 3183 + blk_mq_complete_request(req); 3184 + } 1907 3185 1908 - if (requeue && 1909 - (unsigned long) ++skreq->req->special < 1910 - SKD_MAX_RETRIES) 1911 - blk_requeue_request(skdev->queue, skreq->req); 1912 - else 1913 - skd_end_request(skdev, skreq, BLK_STS_IOERR); 1914 - 1915 - skreq->req = NULL; 1916 - 1917 - skreq->state = SKD_REQ_STATE_IDLE; 1918 - skreq->id += SKD_ID_INCR; 1919 - } 1920 - if (i > 0) 1921 - skreq[-1].next = skreq; 1922 - skreq->next = NULL; 1923 - } 1924 - skdev->skreq_free_list = skdev->skreq_table; 1925 - 1926 - for (i = 0; i < skdev->num_fitmsg_context; i++) { 1927 - struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; 1928 - 1929 - if (skmsg->state == SKD_MSG_STATE_BUSY) { 1930 - skd_log_skmsg(skdev, skmsg, "salvaged"); 1931 - SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0); 1932 - skmsg->state = SKD_MSG_STATE_IDLE; 1933 - skmsg->id += SKD_ID_INCR; 1934 - } 1935 - if (i > 0) 1936 - skmsg[-1].next = skmsg; 1937 - skmsg->next = NULL; 1938 - } 1939 - skdev->skmsg_free_list = skdev->skmsg_table; 1940 - 1941 - for (i = 0; i < skdev->n_special; i++) { 1942 - struct skd_special_context *skspcl = &skdev->skspcl_table[i]; 1943 - 1944 - /* If orphaned, reclaim it because it has already been reported 1945 - * to the process as an error (it was just waiting for 1946 - * a completion that didn't come, and now it will never come) 1947 - * If busy, change to a state that will cause it to error 1948 - * out in the wait routine and let it do the normal 1949 - * reporting and reclaiming 1950 - */ 1951 - if (skspcl->req.state == SKD_REQ_STATE_BUSY) { 1952 - if (skspcl->orphaned) { 1953 - pr_debug("%s:%s:%d orphaned %p\n", 1954 - skdev->name, __func__, __LINE__, 1955 - skspcl); 1956 - skd_release_special(skdev, skspcl); 1957 - } else { 1958 - pr_debug("%s:%s:%d not orphaned %p\n", 1959 - skdev->name, __func__, __LINE__, 1960 - skspcl); 1961 - skspcl->req.state = SKD_REQ_STATE_ABORTED; 1962 - } 1963 - } 1964 - } 1965 - skdev->skspcl_free_list = skdev->skspcl_table; 1966 - 1967 - for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) 1968 - skdev->timeout_slot[i] = 0; 1969 - 1970 - skdev->in_flight = 0; 3186 + static void skd_recover_requests(struct skd_device *skdev) 3187 + { 3188 + blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev); 1971 3189 } 1972 3190 1973 3191 static void skd_isr_msg_from_dev(struct skd_device *skdev) ··· 1919 3255 1920 3256 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 1921 3257 1922 - pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n", 1923 - skdev->name, __func__, __LINE__, mfd, skdev->last_mtd); 3258 + dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd, 3259 + skdev->last_mtd); 1924 3260 1925 3261 /* ignore any mtd that is an ack for something we didn't send */ 1926 3262 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) ··· 1931 3267 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); 1932 3268 1933 3269 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { 1934 - pr_err("(%s): protocol mismatch\n", 1935 - skdev->name); 1936 - pr_err("(%s): got=%d support=%d\n", 1937 - skdev->name, skdev->proto_ver, 1938 - FIT_PROTOCOL_VERSION_1); 1939 - pr_err("(%s): please upgrade driver\n", 1940 - skdev->name); 3270 + dev_err(&skdev->pdev->dev, "protocol mismatch\n"); 3271 + dev_err(&skdev->pdev->dev, " got=%d support=%d\n", 3272 + skdev->proto_ver, FIT_PROTOCOL_VERSION_1); 3273 + dev_err(&skdev->pdev->dev, " please upgrade driver\n"); 1941 3274 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; 1942 3275 skd_soft_reset(skdev); 1943 3276 break; ··· 1988 3327 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); 1989 3328 skdev->last_mtd = mtd; 1990 3329 1991 - pr_err("(%s): Time sync driver=0x%x device=0x%x\n", 1992 - skd_name(skdev), 1993 - skdev->connect_time_stamp, skdev->drive_jiffies); 3330 + dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n", 3331 + skdev->connect_time_stamp, skdev->drive_jiffies); 1994 3332 break; 1995 3333 1996 3334 case FIT_MTD_ARM_QUEUE: ··· 2011 3351 sense = SKD_READL(skdev, FIT_CONTROL); 2012 3352 sense &= ~FIT_CR_ENABLE_INTERRUPTS; 2013 3353 SKD_WRITEL(skdev, sense, FIT_CONTROL); 2014 - pr_debug("%s:%s:%d sense 0x%x\n", 2015 - skdev->name, __func__, __LINE__, sense); 3354 + dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense); 2016 3355 2017 3356 /* Note that the 1s is written. A 1-bit means 2018 3357 * disable, a 0 means enable. ··· 2030 3371 /* Note that the compliment of mask is written. A 1-bit means 2031 3372 * disable, a 0 means enable. */ 2032 3373 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); 2033 - pr_debug("%s:%s:%d interrupt mask=0x%x\n", 2034 - skdev->name, __func__, __LINE__, ~val); 3374 + dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val); 2035 3375 2036 3376 val = SKD_READL(skdev, FIT_CONTROL); 2037 3377 val |= FIT_CR_ENABLE_INTERRUPTS; 2038 - pr_debug("%s:%s:%d control=0x%x\n", 2039 - skdev->name, __func__, __LINE__, val); 3378 + dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); 2040 3379 SKD_WRITEL(skdev, val, FIT_CONTROL); 2041 3380 } 2042 3381 ··· 2050 3393 2051 3394 val = SKD_READL(skdev, FIT_CONTROL); 2052 3395 val |= (FIT_CR_SOFT_RESET); 2053 - pr_debug("%s:%s:%d control=0x%x\n", 2054 - skdev->name, __func__, __LINE__, val); 3396 + dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); 2055 3397 SKD_WRITEL(skdev, val, FIT_CONTROL); 2056 3398 } 2057 3399 ··· 2067 3411 2068 3412 sense = SKD_READL(skdev, FIT_STATUS); 2069 3413 2070 - pr_debug("%s:%s:%d initial status=0x%x\n", 2071 - skdev->name, __func__, __LINE__, sense); 3414 + dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense); 2072 3415 2073 3416 state = sense & FIT_SR_DRIVE_STATE_MASK; 2074 3417 skdev->drive_state = state; ··· 2080 3425 2081 3426 switch (skdev->drive_state) { 2082 3427 case FIT_SR_DRIVE_OFFLINE: 2083 - pr_err("(%s): Drive offline...\n", skd_name(skdev)); 3428 + dev_err(&skdev->pdev->dev, "Drive offline...\n"); 2084 3429 break; 2085 3430 2086 3431 case FIT_SR_DRIVE_FW_BOOTING: 2087 - pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n", 2088 - skdev->name, __func__, __LINE__, skdev->name); 3432 + dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n"); 2089 3433 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; 2090 3434 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; 2091 3435 break; 2092 3436 2093 3437 case FIT_SR_DRIVE_BUSY_SANITIZE: 2094 - pr_info("(%s): Start: BUSY_SANITIZE\n", 2095 - skd_name(skdev)); 3438 + dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n"); 2096 3439 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; 2097 3440 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 2098 3441 break; 2099 3442 2100 3443 case FIT_SR_DRIVE_BUSY_ERASE: 2101 - pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev)); 3444 + dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n"); 2102 3445 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; 2103 3446 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 2104 3447 break; ··· 2107 3454 break; 2108 3455 2109 3456 case FIT_SR_DRIVE_BUSY: 2110 - pr_err("(%s): Drive Busy...\n", skd_name(skdev)); 3457 + dev_err(&skdev->pdev->dev, "Drive Busy...\n"); 2111 3458 skdev->state = SKD_DRVR_STATE_BUSY; 2112 3459 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; 2113 3460 break; 2114 3461 2115 3462 case FIT_SR_DRIVE_SOFT_RESET: 2116 - pr_err("(%s) drive soft reset in prog\n", 2117 - skd_name(skdev)); 3463 + dev_err(&skdev->pdev->dev, "drive soft reset in prog\n"); 2118 3464 break; 2119 3465 2120 3466 case FIT_SR_DRIVE_FAULT: ··· 2123 3471 */ 2124 3472 skd_drive_fault(skdev); 2125 3473 /*start the queue so we can respond with error to requests */ 2126 - pr_debug("%s:%s:%d starting %s queue\n", 2127 - skdev->name, __func__, __LINE__, skdev->name); 2128 - blk_start_queue(skdev->queue); 3474 + dev_dbg(&skdev->pdev->dev, "starting queue\n"); 3475 + schedule_work(&skdev->start_queue); 2129 3476 skdev->gendisk_on = -1; 2130 3477 wake_up_interruptible(&skdev->waitq); 2131 3478 break; ··· 2134 3483 * to the BAR1 addresses. */ 2135 3484 skd_drive_disappeared(skdev); 2136 3485 /*start the queue so we can respond with error to requests */ 2137 - pr_debug("%s:%s:%d starting %s queue to error-out reqs\n", 2138 - skdev->name, __func__, __LINE__, skdev->name); 2139 - blk_start_queue(skdev->queue); 3486 + dev_dbg(&skdev->pdev->dev, 3487 + "starting queue to error-out reqs\n"); 3488 + schedule_work(&skdev->start_queue); 2140 3489 skdev->gendisk_on = -1; 2141 3490 wake_up_interruptible(&skdev->waitq); 2142 3491 break; 2143 3492 2144 3493 default: 2145 - pr_err("(%s) Start: unknown state %x\n", 2146 - skd_name(skdev), skdev->drive_state); 3494 + dev_err(&skdev->pdev->dev, "Start: unknown state %x\n", 3495 + skdev->drive_state); 2147 3496 break; 2148 3497 } 2149 3498 2150 3499 state = SKD_READL(skdev, FIT_CONTROL); 2151 - pr_debug("%s:%s:%d FIT Control Status=0x%x\n", 2152 - skdev->name, __func__, __LINE__, state); 3500 + dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state); 2153 3501 2154 3502 state = SKD_READL(skdev, FIT_INT_STATUS_HOST); 2155 - pr_debug("%s:%s:%d Intr Status=0x%x\n", 2156 - skdev->name, __func__, __LINE__, state); 3503 + dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state); 2157 3504 2158 3505 state = SKD_READL(skdev, FIT_INT_MASK_HOST); 2159 - pr_debug("%s:%s:%d Intr Mask=0x%x\n", 2160 - skdev->name, __func__, __LINE__, state); 3506 + dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state); 2161 3507 2162 3508 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); 2163 - pr_debug("%s:%s:%d Msg from Dev=0x%x\n", 2164 - skdev->name, __func__, __LINE__, state); 3509 + dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state); 2165 3510 2166 3511 state = SKD_READL(skdev, FIT_HW_VERSION); 2167 - pr_debug("%s:%s:%d HW version=0x%x\n", 2168 - skdev->name, __func__, __LINE__, state); 3512 + dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state); 2169 3513 2170 3514 spin_unlock_irqrestore(&skdev->lock, flags); 2171 3515 } ··· 2175 3529 spin_lock_irqsave(&skdev->lock, flags); 2176 3530 2177 3531 if (skdev->state != SKD_DRVR_STATE_ONLINE) { 2178 - pr_err("(%s): skd_stop_device not online no sync\n", 2179 - skd_name(skdev)); 3532 + dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__); 2180 3533 goto stop_out; 2181 3534 } 2182 3535 2183 3536 if (skspcl->req.state != SKD_REQ_STATE_IDLE) { 2184 - pr_err("(%s): skd_stop_device no special\n", 2185 - skd_name(skdev)); 3537 + dev_err(&skdev->pdev->dev, "%s no special\n", __func__); 2186 3538 goto stop_out; 2187 3539 } 2188 3540 ··· 2198 3554 2199 3555 switch (skdev->sync_done) { 2200 3556 case 0: 2201 - pr_err("(%s): skd_stop_device no sync\n", 2202 - skd_name(skdev)); 3557 + dev_err(&skdev->pdev->dev, "%s no sync\n", __func__); 2203 3558 break; 2204 3559 case 1: 2205 - pr_err("(%s): skd_stop_device sync done\n", 2206 - skd_name(skdev)); 3560 + dev_err(&skdev->pdev->dev, "%s sync done\n", __func__); 2207 3561 break; 2208 3562 default: 2209 - pr_err("(%s): skd_stop_device sync error\n", 2210 - skd_name(skdev)); 3563 + dev_err(&skdev->pdev->dev, "%s sync error\n", __func__); 2211 3564 } 2212 3565 2213 3566 stop_out: ··· 2234 3593 } 2235 3594 2236 3595 if (dev_state != FIT_SR_DRIVE_INIT) 2237 - pr_err("(%s): skd_stop_device state error 0x%02x\n", 2238 - skd_name(skdev), dev_state); 3596 + dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__, 3597 + dev_state); 2239 3598 } 2240 3599 2241 3600 /* assume spinlock is held */ ··· 2248 3607 2249 3608 state = SKD_READL(skdev, FIT_STATUS); 2250 3609 2251 - pr_debug("%s:%s:%d drive status=0x%x\n", 2252 - skdev->name, __func__, __LINE__, state); 3610 + dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state); 2253 3611 2254 3612 state &= FIT_SR_DRIVE_STATE_MASK; 2255 3613 skdev->drive_state = state; ··· 2268 3628 switch (skdev->state) { 2269 3629 case SKD_DRVR_STATE_BUSY: 2270 3630 case SKD_DRVR_STATE_BUSY_IMMINENT: 2271 - pr_debug("%s:%s:%d stopping %s queue\n", 2272 - skdev->name, __func__, __LINE__, skdev->name); 2273 - blk_stop_queue(skdev->queue); 3631 + dev_dbg(&skdev->pdev->dev, "stopping queue\n"); 3632 + blk_mq_stop_hw_queues(skdev->queue); 2274 3633 break; 2275 3634 case SKD_DRVR_STATE_ONLINE: 2276 3635 case SKD_DRVR_STATE_STOPPING: ··· 2281 3642 case SKD_DRVR_STATE_RESUMING: 2282 3643 default: 2283 3644 rc = -EINVAL; 2284 - pr_debug("%s:%s:%d state [%d] not implemented\n", 2285 - skdev->name, __func__, __LINE__, skdev->state); 3645 + dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n", 3646 + skdev->state); 2286 3647 } 2287 3648 return rc; 2288 3649 } ··· 2294 3655 2295 3656 skd_log_skdev(skdev, "unquiesce"); 2296 3657 if (skdev->state == SKD_DRVR_STATE_ONLINE) { 2297 - pr_debug("%s:%s:%d **** device already ONLINE\n", 2298 - skdev->name, __func__, __LINE__); 3658 + dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n"); 2299 3659 return 0; 2300 3660 } 2301 3661 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { ··· 2307 3669 * to become available. 2308 3670 */ 2309 3671 skdev->state = SKD_DRVR_STATE_BUSY; 2310 - pr_debug("%s:%s:%d drive BUSY state\n", 2311 - skdev->name, __func__, __LINE__); 3672 + dev_dbg(&skdev->pdev->dev, "drive BUSY state\n"); 2312 3673 return 0; 2313 3674 } 2314 3675 ··· 2326 3689 case SKD_DRVR_STATE_IDLE: 2327 3690 case SKD_DRVR_STATE_LOAD: 2328 3691 skdev->state = SKD_DRVR_STATE_ONLINE; 2329 - pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", 2330 - skd_name(skdev), 2331 - skd_skdev_state_to_str(prev_driver_state), 2332 - prev_driver_state, skd_skdev_state_to_str(skdev->state), 2333 - skdev->state); 2334 - pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n", 2335 - skdev->name, __func__, __LINE__); 2336 - pr_debug("%s:%s:%d starting %s queue\n", 2337 - skdev->name, __func__, __LINE__, skdev->name); 2338 - pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev)); 2339 - blk_start_queue(skdev->queue); 3692 + dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", 3693 + skd_skdev_state_to_str(prev_driver_state), 3694 + prev_driver_state, skd_skdev_state_to_str(skdev->state), 3695 + skdev->state); 3696 + dev_dbg(&skdev->pdev->dev, 3697 + "**** device ONLINE...starting block queue\n"); 3698 + dev_dbg(&skdev->pdev->dev, "starting queue\n"); 3699 + dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n"); 3700 + schedule_work(&skdev->start_queue); 2340 3701 skdev->gendisk_on = 1; 2341 3702 wake_up_interruptible(&skdev->waitq); 2342 3703 break; 2343 3704 2344 3705 case SKD_DRVR_STATE_DISAPPEARED: 2345 3706 default: 2346 - pr_debug("%s:%s:%d **** driver state %d, not implemented \n", 2347 - skdev->name, __func__, __LINE__, 2348 - skdev->state); 3707 + dev_dbg(&skdev->pdev->dev, 3708 + "**** driver state %d, not implemented\n", 3709 + skdev->state); 2349 3710 return -EBUSY; 2350 3711 } 2351 3712 return 0; ··· 2361 3726 unsigned long flags; 2362 3727 2363 3728 spin_lock_irqsave(&skdev->lock, flags); 2364 - pr_debug("%s:%s:%d MSIX = 0x%x\n", 2365 - skdev->name, __func__, __LINE__, 2366 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2367 - pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev), 2368 - irq, SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3729 + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 3730 + SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3731 + dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq, 3732 + SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2369 3733 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); 2370 3734 spin_unlock_irqrestore(&skdev->lock, flags); 2371 3735 return IRQ_HANDLED; ··· 2376 3742 unsigned long flags; 2377 3743 2378 3744 spin_lock_irqsave(&skdev->lock, flags); 2379 - pr_debug("%s:%s:%d MSIX = 0x%x\n", 2380 - skdev->name, __func__, __LINE__, 2381 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3745 + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 3746 + SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2382 3747 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); 2383 3748 skd_isr_fwstate(skdev); 2384 3749 spin_unlock_irqrestore(&skdev->lock, flags); ··· 2392 3759 int deferred; 2393 3760 2394 3761 spin_lock_irqsave(&skdev->lock, flags); 2395 - pr_debug("%s:%s:%d MSIX = 0x%x\n", 2396 - skdev->name, __func__, __LINE__, 2397 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3762 + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 3763 + SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2398 3764 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); 2399 3765 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, 2400 3766 &flush_enqueued); 2401 3767 if (flush_enqueued) 2402 - skd_request_fn(skdev->queue); 3768 + schedule_work(&skdev->start_queue); 2403 3769 2404 3770 if (deferred) 2405 3771 schedule_work(&skdev->completion_worker); 2406 3772 else if (!flush_enqueued) 2407 - skd_request_fn(skdev->queue); 3773 + schedule_work(&skdev->start_queue); 2408 3774 2409 3775 spin_unlock_irqrestore(&skdev->lock, flags); 2410 3776 ··· 2416 3784 unsigned long flags; 2417 3785 2418 3786 spin_lock_irqsave(&skdev->lock, flags); 2419 - pr_debug("%s:%s:%d MSIX = 0x%x\n", 2420 - skdev->name, __func__, __LINE__, 2421 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3787 + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 3788 + SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2422 3789 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); 2423 3790 skd_isr_msg_from_dev(skdev); 2424 3791 spin_unlock_irqrestore(&skdev->lock, flags); ··· 2430 3799 unsigned long flags; 2431 3800 2432 3801 spin_lock_irqsave(&skdev->lock, flags); 2433 - pr_debug("%s:%s:%d MSIX = 0x%x\n", 2434 - skdev->name, __func__, __LINE__, 2435 - SKD_READL(skdev, FIT_INT_STATUS_HOST)); 3802 + dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", 3803 + SKD_READL(skdev, FIT_INT_STATUS_HOST)); 2436 3804 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); 2437 3805 spin_unlock_irqrestore(&skdev->lock, flags); 2438 3806 return IRQ_HANDLED; ··· 2480 3850 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT, 2481 3851 PCI_IRQ_MSIX); 2482 3852 if (rc < 0) { 2483 - pr_err("(%s): failed to enable MSI-X %d\n", 2484 - skd_name(skdev), rc); 3853 + dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc); 2485 3854 goto out; 2486 3855 } 2487 3856 ··· 2488 3859 sizeof(struct skd_msix_entry), GFP_KERNEL); 2489 3860 if (!skdev->msix_entries) { 2490 3861 rc = -ENOMEM; 2491 - pr_err("(%s): msix table allocation error\n", 2492 - skd_name(skdev)); 3862 + dev_err(&skdev->pdev->dev, "msix table allocation error\n"); 2493 3863 goto out; 2494 3864 } 2495 3865 ··· 2505 3877 msix_entries[i].handler, 0, 2506 3878 qentry->isr_name, skdev); 2507 3879 if (rc) { 2508 - pr_err("(%s): Unable to register(%d) MSI-X " 2509 - "handler %d: %s\n", 2510 - skd_name(skdev), rc, i, qentry->isr_name); 3880 + dev_err(&skdev->pdev->dev, 3881 + "Unable to register(%d) MSI-X handler %d: %s\n", 3882 + rc, i, qentry->isr_name); 2511 3883 goto msix_out; 2512 3884 } 2513 3885 } 2514 3886 2515 - pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n", 2516 - skdev->name, __func__, __LINE__, 2517 - pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT); 3887 + dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n", 3888 + SKD_MAX_MSIX_COUNT); 2518 3889 return 0; 2519 3890 2520 3891 msix_out: ··· 2536 3909 if (!rc) 2537 3910 return 0; 2538 3911 2539 - pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n", 2540 - skd_name(skdev), rc); 3912 + dev_err(&skdev->pdev->dev, 3913 + "failed to enable MSI-X, re-trying with MSI %d\n", rc); 2541 3914 } 2542 3915 2543 3916 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME, ··· 2547 3920 irq_flag |= PCI_IRQ_MSI; 2548 3921 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag); 2549 3922 if (rc < 0) { 2550 - pr_err("(%s): failed to allocate the MSI interrupt %d\n", 2551 - skd_name(skdev), rc); 3923 + dev_err(&skdev->pdev->dev, 3924 + "failed to allocate the MSI interrupt %d\n", rc); 2552 3925 return rc; 2553 3926 } 2554 3927 ··· 2557 3930 skdev->isr_name, skdev); 2558 3931 if (rc) { 2559 3932 pci_free_irq_vectors(pdev); 2560 - pr_err("(%s): failed to allocate interrupt %d\n", 2561 - skd_name(skdev), rc); 3933 + dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n", 3934 + rc); 2562 3935 return rc; 2563 3936 } 2564 3937 ··· 2592 3965 ***************************************************************************** 2593 3966 */ 2594 3967 3968 + static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, 3969 + dma_addr_t *dma_handle, gfp_t gfp, 3970 + enum dma_data_direction dir) 3971 + { 3972 + struct device *dev = &skdev->pdev->dev; 3973 + void *buf; 3974 + 3975 + buf = kmem_cache_alloc(s, gfp); 3976 + if (!buf) 3977 + return NULL; 3978 + *dma_handle = dma_map_single(dev, buf, s->size, dir); 3979 + if (dma_mapping_error(dev, *dma_handle)) { 3980 + kfree(buf); 3981 + buf = NULL; 3982 + } 3983 + return buf; 3984 + } 3985 + 3986 + static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s, 3987 + void *vaddr, dma_addr_t dma_handle, 3988 + enum dma_data_direction dir) 3989 + { 3990 + if (!vaddr) 3991 + return; 3992 + 3993 + dma_unmap_single(&skdev->pdev->dev, dma_handle, s->size, dir); 3994 + kmem_cache_free(s, vaddr); 3995 + } 3996 + 2595 3997 static int skd_cons_skcomp(struct skd_device *skdev) 2596 3998 { 2597 3999 int rc = 0; 2598 4000 struct fit_completion_entry_v1 *skcomp; 2599 - u32 nbytes; 2600 4001 2601 - nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; 2602 - nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; 4002 + dev_dbg(&skdev->pdev->dev, 4003 + "comp pci_alloc, total bytes %zd entries %d\n", 4004 + SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); 2603 4005 2604 - pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n", 2605 - skdev->name, __func__, __LINE__, 2606 - nbytes, SKD_N_COMPLETION_ENTRY); 2607 - 2608 - skcomp = pci_zalloc_consistent(skdev->pdev, nbytes, 4006 + skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE, 2609 4007 &skdev->cq_dma_address); 2610 4008 2611 4009 if (skcomp == NULL) { ··· 2652 4000 int rc = 0; 2653 4001 u32 i; 2654 4002 2655 - pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n", 2656 - skdev->name, __func__, __LINE__, 2657 - sizeof(struct skd_fitmsg_context), 2658 - skdev->num_fitmsg_context, 2659 - sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); 4003 + dev_dbg(&skdev->pdev->dev, 4004 + "skmsg_table kcalloc, struct %lu, count %u total %lu\n", 4005 + sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context, 4006 + sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); 2660 4007 2661 - skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context) 2662 - *skdev->num_fitmsg_context, GFP_KERNEL); 4008 + skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context, 4009 + sizeof(struct skd_fitmsg_context), 4010 + GFP_KERNEL); 2663 4011 if (skdev->skmsg_table == NULL) { 2664 4012 rc = -ENOMEM; 2665 4013 goto err_out; ··· 2672 4020 2673 4021 skmsg->id = i + SKD_ID_FIT_MSG; 2674 4022 2675 - skmsg->state = SKD_MSG_STATE_IDLE; 2676 4023 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, 2677 - SKD_N_FITMSG_BYTES + 64, 4024 + SKD_N_FITMSG_BYTES, 2678 4025 &skmsg->mb_dma_address); 2679 4026 2680 4027 if (skmsg->msg_buf == NULL) { ··· 2681 4030 goto err_out; 2682 4031 } 2683 4032 2684 - skmsg->offset = (u32)((u64)skmsg->msg_buf & 2685 - (~FIT_QCMD_BASE_ADDRESS_MASK)); 2686 - skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK; 2687 - skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf & 2688 - FIT_QCMD_BASE_ADDRESS_MASK); 2689 - skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK; 2690 - skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK; 4033 + WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) & 4034 + (FIT_QCMD_ALIGN - 1), 4035 + "not aligned: msg_buf %p mb_dma_address %#llx\n", 4036 + skmsg->msg_buf, skmsg->mb_dma_address); 2691 4037 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); 2692 - 2693 - skmsg->next = &skmsg[1]; 2694 4038 } 2695 - 2696 - /* Free list is in order starting with the 0th entry. */ 2697 - skdev->skmsg_table[i - 1].next = NULL; 2698 - skdev->skmsg_free_list = skdev->skmsg_table; 2699 4039 2700 4040 err_out: 2701 4041 return rc; ··· 2697 4055 dma_addr_t *ret_dma_addr) 2698 4056 { 2699 4057 struct fit_sg_descriptor *sg_list; 2700 - u32 nbytes; 2701 4058 2702 - nbytes = sizeof(*sg_list) * n_sg; 2703 - 2704 - sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); 4059 + sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr, 4060 + GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE); 2705 4061 2706 4062 if (sg_list != NULL) { 2707 4063 uint64_t dma_address = *ret_dma_addr; 2708 4064 u32 i; 2709 - 2710 - memset(sg_list, 0, nbytes); 2711 4065 2712 4066 for (i = 0; i < n_sg - 1; i++) { 2713 4067 uint64_t ndp_off; ··· 2717 4079 return sg_list; 2718 4080 } 2719 4081 2720 - static int skd_cons_skreq(struct skd_device *skdev) 4082 + static void skd_free_sg_list(struct skd_device *skdev, 4083 + struct fit_sg_descriptor *sg_list, 4084 + dma_addr_t dma_addr) 2721 4085 { 2722 - int rc = 0; 2723 - u32 i; 4086 + if (WARN_ON_ONCE(!sg_list)) 4087 + return; 2724 4088 2725 - pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n", 2726 - skdev->name, __func__, __LINE__, 2727 - sizeof(struct skd_request_context), 2728 - skdev->num_req_context, 2729 - sizeof(struct skd_request_context) * skdev->num_req_context); 2730 - 2731 - skdev->skreq_table = kzalloc(sizeof(struct skd_request_context) 2732 - * skdev->num_req_context, GFP_KERNEL); 2733 - if (skdev->skreq_table == NULL) { 2734 - rc = -ENOMEM; 2735 - goto err_out; 2736 - } 2737 - 2738 - pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n", 2739 - skdev->name, __func__, __LINE__, 2740 - skdev->sgs_per_request, sizeof(struct scatterlist), 2741 - skdev->sgs_per_request * sizeof(struct scatterlist)); 2742 - 2743 - for (i = 0; i < skdev->num_req_context; i++) { 2744 - struct skd_request_context *skreq; 2745 - 2746 - skreq = &skdev->skreq_table[i]; 2747 - 2748 - skreq->id = i + SKD_ID_RW_REQUEST; 2749 - skreq->state = SKD_REQ_STATE_IDLE; 2750 - 2751 - skreq->sg = kzalloc(sizeof(struct scatterlist) * 2752 - skdev->sgs_per_request, GFP_KERNEL); 2753 - if (skreq->sg == NULL) { 2754 - rc = -ENOMEM; 2755 - goto err_out; 2756 - } 2757 - sg_init_table(skreq->sg, skdev->sgs_per_request); 2758 - 2759 - skreq->sksg_list = skd_cons_sg_list(skdev, 2760 - skdev->sgs_per_request, 2761 - &skreq->sksg_dma_address); 2762 - 2763 - if (skreq->sksg_list == NULL) { 2764 - rc = -ENOMEM; 2765 - goto err_out; 2766 - } 2767 - 2768 - skreq->next = &skreq[1]; 2769 - } 2770 - 2771 - /* Free list is in order starting with the 0th entry. */ 2772 - skdev->skreq_table[i - 1].next = NULL; 2773 - skdev->skreq_free_list = skdev->skreq_table; 2774 - 2775 - err_out: 2776 - return rc; 4089 + skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr, 4090 + DMA_TO_DEVICE); 2777 4091 } 2778 4092 2779 - static int skd_cons_skspcl(struct skd_device *skdev) 4093 + static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq, 4094 + unsigned int hctx_idx, unsigned int numa_node) 2780 4095 { 2781 - int rc = 0; 2782 - u32 i, nbytes; 4096 + struct skd_device *skdev = set->driver_data; 4097 + struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq); 2783 4098 2784 - pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n", 2785 - skdev->name, __func__, __LINE__, 2786 - sizeof(struct skd_special_context), 2787 - skdev->n_special, 2788 - sizeof(struct skd_special_context) * skdev->n_special); 4099 + skreq->state = SKD_REQ_STATE_IDLE; 4100 + skreq->sg = (void *)(skreq + 1); 4101 + sg_init_table(skreq->sg, skd_sgs_per_request); 4102 + skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request, 4103 + &skreq->sksg_dma_address); 2789 4104 2790 - skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context) 2791 - * skdev->n_special, GFP_KERNEL); 2792 - if (skdev->skspcl_table == NULL) { 2793 - rc = -ENOMEM; 2794 - goto err_out; 2795 - } 4105 + return skreq->sksg_list ? 0 : -ENOMEM; 4106 + } 2796 4107 2797 - for (i = 0; i < skdev->n_special; i++) { 2798 - struct skd_special_context *skspcl; 4108 + static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq, 4109 + unsigned int hctx_idx) 4110 + { 4111 + struct skd_device *skdev = set->driver_data; 4112 + struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq); 2799 4113 2800 - skspcl = &skdev->skspcl_table[i]; 2801 - 2802 - skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST; 2803 - skspcl->req.state = SKD_REQ_STATE_IDLE; 2804 - 2805 - skspcl->req.next = &skspcl[1].req; 2806 - 2807 - nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 2808 - 2809 - skspcl->msg_buf = 2810 - pci_zalloc_consistent(skdev->pdev, nbytes, 2811 - &skspcl->mb_dma_address); 2812 - if (skspcl->msg_buf == NULL) { 2813 - rc = -ENOMEM; 2814 - goto err_out; 2815 - } 2816 - 2817 - skspcl->req.sg = kzalloc(sizeof(struct scatterlist) * 2818 - SKD_N_SG_PER_SPECIAL, GFP_KERNEL); 2819 - if (skspcl->req.sg == NULL) { 2820 - rc = -ENOMEM; 2821 - goto err_out; 2822 - } 2823 - 2824 - skspcl->req.sksg_list = skd_cons_sg_list(skdev, 2825 - SKD_N_SG_PER_SPECIAL, 2826 - &skspcl->req. 2827 - sksg_dma_address); 2828 - if (skspcl->req.sksg_list == NULL) { 2829 - rc = -ENOMEM; 2830 - goto err_out; 2831 - } 2832 - } 2833 - 2834 - /* Free list is in order starting with the 0th entry. */ 2835 - skdev->skspcl_table[i - 1].req.next = NULL; 2836 - skdev->skspcl_free_list = skdev->skspcl_table; 2837 - 2838 - return rc; 2839 - 2840 - err_out: 2841 - return rc; 4114 + skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address); 2842 4115 } 2843 4116 2844 4117 static int skd_cons_sksb(struct skd_device *skdev) 2845 4118 { 2846 4119 int rc = 0; 2847 4120 struct skd_special_context *skspcl; 2848 - u32 nbytes; 2849 4121 2850 4122 skspcl = &skdev->internal_skspcl; 2851 4123 2852 4124 skspcl->req.id = 0 + SKD_ID_INTERNAL; 2853 4125 skspcl->req.state = SKD_REQ_STATE_IDLE; 2854 4126 2855 - nbytes = SKD_N_INTERNAL_BYTES; 2856 - 2857 - skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes, 2858 - &skspcl->db_dma_address); 4127 + skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache, 4128 + &skspcl->db_dma_address, 4129 + GFP_DMA | __GFP_ZERO, 4130 + DMA_BIDIRECTIONAL); 2859 4131 if (skspcl->data_buf == NULL) { 2860 4132 rc = -ENOMEM; 2861 4133 goto err_out; 2862 4134 } 2863 4135 2864 - nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 2865 - skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes, 2866 - &skspcl->mb_dma_address); 4136 + skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache, 4137 + &skspcl->mb_dma_address, 4138 + GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE); 2867 4139 if (skspcl->msg_buf == NULL) { 2868 4140 rc = -ENOMEM; 2869 4141 goto err_out; ··· 2794 4246 err_out: 2795 4247 return rc; 2796 4248 } 4249 + 4250 + static const struct blk_mq_ops skd_mq_ops = { 4251 + .queue_rq = skd_mq_queue_rq, 4252 + .complete = skd_complete_rq, 4253 + .timeout = skd_timed_out, 4254 + .init_request = skd_init_request, 4255 + .exit_request = skd_exit_request, 4256 + }; 2797 4257 2798 4258 static int skd_cons_disk(struct skd_device *skdev) 2799 4259 { ··· 2824 4268 disk->fops = &skd_blockdev_ops; 2825 4269 disk->private_data = skdev; 2826 4270 2827 - q = blk_init_queue(skd_request_fn, &skdev->lock); 2828 - if (!q) { 2829 - rc = -ENOMEM; 4271 + memset(&skdev->tag_set, 0, sizeof(skdev->tag_set)); 4272 + skdev->tag_set.ops = &skd_mq_ops; 4273 + skdev->tag_set.nr_hw_queues = 1; 4274 + skdev->tag_set.queue_depth = skd_max_queue_depth; 4275 + skdev->tag_set.cmd_size = sizeof(struct skd_request_context) + 4276 + skdev->sgs_per_request * sizeof(struct scatterlist); 4277 + skdev->tag_set.numa_node = NUMA_NO_NODE; 4278 + skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 4279 + BLK_MQ_F_SG_MERGE | 4280 + BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO); 4281 + skdev->tag_set.driver_data = skdev; 4282 + rc = blk_mq_alloc_tag_set(&skdev->tag_set); 4283 + if (rc) 4284 + goto err_out; 4285 + q = blk_mq_init_queue(&skdev->tag_set); 4286 + if (IS_ERR(q)) { 4287 + blk_mq_free_tag_set(&skdev->tag_set); 4288 + rc = PTR_ERR(q); 2830 4289 goto err_out; 2831 4290 } 2832 - blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 4291 + q->queuedata = skdev; 2833 4292 2834 4293 skdev->queue = q; 2835 4294 disk->queue = q; 2836 - q->queuedata = skdev; 2837 4295 2838 4296 blk_queue_write_cache(q, true, true); 2839 4297 blk_queue_max_segments(q, skdev->sgs_per_request); 2840 4298 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); 2841 4299 2842 - /* set sysfs ptimal_io_size to 8K */ 4300 + /* set optimal I/O size to 8KB */ 2843 4301 blk_queue_io_opt(q, 8192); 2844 4302 2845 4303 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 2846 4304 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 2847 4305 4306 + blk_queue_rq_timeout(q, 8 * HZ); 4307 + 2848 4308 spin_lock_irqsave(&skdev->lock, flags); 2849 - pr_debug("%s:%s:%d stopping %s queue\n", 2850 - skdev->name, __func__, __LINE__, skdev->name); 2851 - blk_stop_queue(skdev->queue); 4309 + dev_dbg(&skdev->pdev->dev, "stopping queue\n"); 4310 + blk_mq_stop_hw_queues(skdev->queue); 2852 4311 spin_unlock_irqrestore(&skdev->lock, flags); 2853 4312 2854 4313 err_out: ··· 2877 4306 { 2878 4307 struct skd_device *skdev; 2879 4308 int blk_major = skd_major; 4309 + size_t size; 2880 4310 int rc; 2881 4311 2882 4312 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); 2883 4313 2884 4314 if (!skdev) { 2885 - pr_err(PFX "(%s): memory alloc failure\n", 2886 - pci_name(pdev)); 4315 + dev_err(&pdev->dev, "memory alloc failure\n"); 2887 4316 return NULL; 2888 4317 } 2889 4318 ··· 2891 4320 skdev->pdev = pdev; 2892 4321 skdev->devno = skd_next_devno++; 2893 4322 skdev->major = blk_major; 2894 - sprintf(skdev->name, DRV_NAME "%d", skdev->devno); 2895 4323 skdev->dev_max_queue_depth = 0; 2896 4324 2897 4325 skdev->num_req_context = skd_max_queue_depth; 2898 4326 skdev->num_fitmsg_context = skd_max_queue_depth; 2899 - skdev->n_special = skd_max_pass_thru; 2900 4327 skdev->cur_max_queue_depth = 1; 2901 4328 skdev->queue_low_water_mark = 1; 2902 4329 skdev->proto_ver = 99; 2903 4330 skdev->sgs_per_request = skd_sgs_per_request; 2904 4331 skdev->dbg_level = skd_dbg_level; 2905 4332 2906 - atomic_set(&skdev->device_count, 0); 2907 - 2908 4333 spin_lock_init(&skdev->lock); 2909 4334 4335 + INIT_WORK(&skdev->start_queue, skd_start_queue); 2910 4336 INIT_WORK(&skdev->completion_worker, skd_completion_worker); 2911 4337 2912 - pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); 4338 + size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES); 4339 + skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0, 4340 + SLAB_HWCACHE_ALIGN, NULL); 4341 + if (!skdev->msgbuf_cache) 4342 + goto err_out; 4343 + WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size, 4344 + "skd-msgbuf: %d < %zd\n", 4345 + kmem_cache_size(skdev->msgbuf_cache), size); 4346 + size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor); 4347 + skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0, 4348 + SLAB_HWCACHE_ALIGN, NULL); 4349 + if (!skdev->sglist_cache) 4350 + goto err_out; 4351 + WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size, 4352 + "skd-sglist: %d < %zd\n", 4353 + kmem_cache_size(skdev->sglist_cache), size); 4354 + size = SKD_N_INTERNAL_BYTES; 4355 + skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0, 4356 + SLAB_HWCACHE_ALIGN, NULL); 4357 + if (!skdev->databuf_cache) 4358 + goto err_out; 4359 + WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size, 4360 + "skd-databuf: %d < %zd\n", 4361 + kmem_cache_size(skdev->databuf_cache), size); 4362 + 4363 + dev_dbg(&skdev->pdev->dev, "skcomp\n"); 2913 4364 rc = skd_cons_skcomp(skdev); 2914 4365 if (rc < 0) 2915 4366 goto err_out; 2916 4367 2917 - pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); 4368 + dev_dbg(&skdev->pdev->dev, "skmsg\n"); 2918 4369 rc = skd_cons_skmsg(skdev); 2919 4370 if (rc < 0) 2920 4371 goto err_out; 2921 4372 2922 - pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); 2923 - rc = skd_cons_skreq(skdev); 2924 - if (rc < 0) 2925 - goto err_out; 2926 - 2927 - pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); 2928 - rc = skd_cons_skspcl(skdev); 2929 - if (rc < 0) 2930 - goto err_out; 2931 - 2932 - pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); 4373 + dev_dbg(&skdev->pdev->dev, "sksb\n"); 2933 4374 rc = skd_cons_sksb(skdev); 2934 4375 if (rc < 0) 2935 4376 goto err_out; 2936 4377 2937 - pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); 4378 + dev_dbg(&skdev->pdev->dev, "disk\n"); 2938 4379 rc = skd_cons_disk(skdev); 2939 4380 if (rc < 0) 2940 4381 goto err_out; 2941 4382 2942 - pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__); 4383 + dev_dbg(&skdev->pdev->dev, "VICTORY\n"); 2943 4384 return skdev; 2944 4385 2945 4386 err_out: 2946 - pr_debug("%s:%s:%d construct failed\n", 2947 - skdev->name, __func__, __LINE__); 4387 + dev_dbg(&skdev->pdev->dev, "construct failed\n"); 2948 4388 skd_destruct(skdev); 2949 4389 return NULL; 2950 4390 } ··· 2968 4386 2969 4387 static void skd_free_skcomp(struct skd_device *skdev) 2970 4388 { 2971 - if (skdev->skcomp_table != NULL) { 2972 - u32 nbytes; 2973 - 2974 - nbytes = sizeof(skdev->skcomp_table[0]) * 2975 - SKD_N_COMPLETION_ENTRY; 2976 - pci_free_consistent(skdev->pdev, nbytes, 4389 + if (skdev->skcomp_table) 4390 + pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE, 2977 4391 skdev->skcomp_table, skdev->cq_dma_address); 2978 - } 2979 4392 2980 4393 skdev->skcomp_table = NULL; 2981 4394 skdev->cq_dma_address = 0; ··· 2989 4412 skmsg = &skdev->skmsg_table[i]; 2990 4413 2991 4414 if (skmsg->msg_buf != NULL) { 2992 - skmsg->msg_buf += skmsg->offset; 2993 - skmsg->mb_dma_address += skmsg->offset; 2994 4415 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, 2995 4416 skmsg->msg_buf, 2996 4417 skmsg->mb_dma_address); ··· 3001 4426 skdev->skmsg_table = NULL; 3002 4427 } 3003 4428 3004 - static void skd_free_sg_list(struct skd_device *skdev, 3005 - struct fit_sg_descriptor *sg_list, 3006 - u32 n_sg, dma_addr_t dma_addr) 3007 - { 3008 - if (sg_list != NULL) { 3009 - u32 nbytes; 3010 - 3011 - nbytes = sizeof(*sg_list) * n_sg; 3012 - 3013 - pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); 3014 - } 3015 - } 3016 - 3017 - static void skd_free_skreq(struct skd_device *skdev) 3018 - { 3019 - u32 i; 3020 - 3021 - if (skdev->skreq_table == NULL) 3022 - return; 3023 - 3024 - for (i = 0; i < skdev->num_req_context; i++) { 3025 - struct skd_request_context *skreq; 3026 - 3027 - skreq = &skdev->skreq_table[i]; 3028 - 3029 - skd_free_sg_list(skdev, skreq->sksg_list, 3030 - skdev->sgs_per_request, 3031 - skreq->sksg_dma_address); 3032 - 3033 - skreq->sksg_list = NULL; 3034 - skreq->sksg_dma_address = 0; 3035 - 3036 - kfree(skreq->sg); 3037 - } 3038 - 3039 - kfree(skdev->skreq_table); 3040 - skdev->skreq_table = NULL; 3041 - } 3042 - 3043 - static void skd_free_skspcl(struct skd_device *skdev) 3044 - { 3045 - u32 i; 3046 - u32 nbytes; 3047 - 3048 - if (skdev->skspcl_table == NULL) 3049 - return; 3050 - 3051 - for (i = 0; i < skdev->n_special; i++) { 3052 - struct skd_special_context *skspcl; 3053 - 3054 - skspcl = &skdev->skspcl_table[i]; 3055 - 3056 - if (skspcl->msg_buf != NULL) { 3057 - nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 3058 - pci_free_consistent(skdev->pdev, nbytes, 3059 - skspcl->msg_buf, 3060 - skspcl->mb_dma_address); 3061 - } 3062 - 3063 - skspcl->msg_buf = NULL; 3064 - skspcl->mb_dma_address = 0; 3065 - 3066 - skd_free_sg_list(skdev, skspcl->req.sksg_list, 3067 - SKD_N_SG_PER_SPECIAL, 3068 - skspcl->req.sksg_dma_address); 3069 - 3070 - skspcl->req.sksg_list = NULL; 3071 - skspcl->req.sksg_dma_address = 0; 3072 - 3073 - kfree(skspcl->req.sg); 3074 - } 3075 - 3076 - kfree(skdev->skspcl_table); 3077 - skdev->skspcl_table = NULL; 3078 - } 3079 - 3080 4429 static void skd_free_sksb(struct skd_device *skdev) 3081 4430 { 3082 - struct skd_special_context *skspcl; 3083 - u32 nbytes; 4431 + struct skd_special_context *skspcl = &skdev->internal_skspcl; 3084 4432 3085 - skspcl = &skdev->internal_skspcl; 3086 - 3087 - if (skspcl->data_buf != NULL) { 3088 - nbytes = SKD_N_INTERNAL_BYTES; 3089 - 3090 - pci_free_consistent(skdev->pdev, nbytes, 3091 - skspcl->data_buf, skspcl->db_dma_address); 3092 - } 4433 + skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf, 4434 + skspcl->db_dma_address, DMA_BIDIRECTIONAL); 3093 4435 3094 4436 skspcl->data_buf = NULL; 3095 4437 skspcl->db_dma_address = 0; 3096 4438 3097 - if (skspcl->msg_buf != NULL) { 3098 - nbytes = SKD_N_SPECIAL_FITMSG_BYTES; 3099 - pci_free_consistent(skdev->pdev, nbytes, 3100 - skspcl->msg_buf, skspcl->mb_dma_address); 3101 - } 4439 + skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf, 4440 + skspcl->mb_dma_address, DMA_TO_DEVICE); 3102 4441 3103 4442 skspcl->msg_buf = NULL; 3104 4443 skspcl->mb_dma_address = 0; 3105 4444 3106 - skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, 4445 + skd_free_sg_list(skdev, skspcl->req.sksg_list, 3107 4446 skspcl->req.sksg_dma_address); 3108 4447 3109 4448 skspcl->req.sksg_list = NULL; ··· 3028 4539 { 3029 4540 struct gendisk *disk = skdev->disk; 3030 4541 3031 - if (disk != NULL) { 3032 - struct request_queue *q = disk->queue; 4542 + if (disk && (disk->flags & GENHD_FL_UP)) 4543 + del_gendisk(disk); 3033 4544 3034 - if (disk->flags & GENHD_FL_UP) 3035 - del_gendisk(disk); 3036 - if (q) 3037 - blk_cleanup_queue(q); 3038 - put_disk(disk); 4545 + if (skdev->queue) { 4546 + blk_cleanup_queue(skdev->queue); 4547 + skdev->queue = NULL; 4548 + if (disk) 4549 + disk->queue = NULL; 3039 4550 } 4551 + 4552 + if (skdev->tag_set.tags) 4553 + blk_mq_free_tag_set(&skdev->tag_set); 4554 + 4555 + put_disk(disk); 3040 4556 skdev->disk = NULL; 3041 4557 } 3042 4558 ··· 3050 4556 if (skdev == NULL) 3051 4557 return; 3052 4558 4559 + cancel_work_sync(&skdev->start_queue); 3053 4560 3054 - pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__); 4561 + dev_dbg(&skdev->pdev->dev, "disk\n"); 3055 4562 skd_free_disk(skdev); 3056 4563 3057 - pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__); 4564 + dev_dbg(&skdev->pdev->dev, "sksb\n"); 3058 4565 skd_free_sksb(skdev); 3059 4566 3060 - pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__); 3061 - skd_free_skspcl(skdev); 3062 - 3063 - pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__); 3064 - skd_free_skreq(skdev); 3065 - 3066 - pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__); 4567 + dev_dbg(&skdev->pdev->dev, "skmsg\n"); 3067 4568 skd_free_skmsg(skdev); 3068 4569 3069 - pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); 4570 + dev_dbg(&skdev->pdev->dev, "skcomp\n"); 3070 4571 skd_free_skcomp(skdev); 3071 4572 3072 - pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__); 4573 + kmem_cache_destroy(skdev->databuf_cache); 4574 + kmem_cache_destroy(skdev->sglist_cache); 4575 + kmem_cache_destroy(skdev->msgbuf_cache); 4576 + 4577 + dev_dbg(&skdev->pdev->dev, "skdev\n"); 3073 4578 kfree(skdev); 3074 4579 } 3075 4580 ··· 3085 4592 3086 4593 skdev = bdev->bd_disk->private_data; 3087 4594 3088 - pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n", 3089 - skdev->name, __func__, __LINE__, 3090 - bdev->bd_disk->disk_name, current->comm); 4595 + dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n", 4596 + bdev->bd_disk->disk_name, current->comm); 3091 4597 3092 4598 if (skdev->read_cap_is_valid) { 3093 4599 capacity = get_capacity(skdev->disk); ··· 3101 4609 3102 4610 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev) 3103 4611 { 3104 - pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__); 4612 + dev_dbg(&skdev->pdev->dev, "add_disk\n"); 3105 4613 device_add_disk(parent, skdev->disk); 3106 4614 return 0; 3107 4615 } 3108 4616 3109 4617 static const struct block_device_operations skd_blockdev_ops = { 3110 4618 .owner = THIS_MODULE, 3111 - .ioctl = skd_bdev_ioctl, 3112 4619 .getgeo = skd_bdev_getgeo, 3113 4620 }; 3114 - 3115 4621 3116 4622 /* 3117 4623 ***************************************************************************** ··· 3161 4671 char pci_str[32]; 3162 4672 struct skd_device *skdev; 3163 4673 3164 - pr_info("STEC s1120 Driver(%s) version %s-b%s\n", 3165 - DRV_NAME, DRV_VERSION, DRV_BUILD_ID); 3166 - pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n", 3167 - pci_name(pdev), pdev->vendor, pdev->device); 4674 + dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor, 4675 + pdev->device); 3168 4676 3169 4677 rc = pci_enable_device(pdev); 3170 4678 if (rc) ··· 3173 4685 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3174 4686 if (!rc) { 3175 4687 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3176 - 3177 - pr_err("(%s): consistent DMA mask error %d\n", 3178 - pci_name(pdev), rc); 4688 + dev_err(&pdev->dev, "consistent DMA mask error %d\n", 4689 + rc); 3179 4690 } 3180 4691 } else { 3181 - (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))); 4692 + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3182 4693 if (rc) { 3183 - 3184 - pr_err("(%s): DMA mask error %d\n", 3185 - pci_name(pdev), rc); 4694 + dev_err(&pdev->dev, "DMA mask error %d\n", rc); 3186 4695 goto err_out_regions; 3187 4696 } 3188 4697 } ··· 3199 4714 } 3200 4715 3201 4716 skd_pci_info(skdev, pci_str); 3202 - pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str); 4717 + dev_info(&pdev->dev, "%s 64bit\n", pci_str); 3203 4718 3204 4719 pci_set_master(pdev); 3205 4720 rc = pci_enable_pcie_error_reporting(pdev); 3206 4721 if (rc) { 3207 - pr_err( 3208 - "(%s): bad enable of PCIe error reporting rc=%d\n", 3209 - skd_name(skdev), rc); 4722 + dev_err(&pdev->dev, 4723 + "bad enable of PCIe error reporting rc=%d\n", rc); 3210 4724 skdev->pcie_error_reporting_is_enabled = 0; 3211 4725 } else 3212 4726 skdev->pcie_error_reporting_is_enabled = 1; 3213 - 3214 4727 3215 4728 pci_set_drvdata(pdev, skdev); 3216 4729 ··· 3218 4735 skdev->mem_map[i] = ioremap(skdev->mem_phys[i], 3219 4736 skdev->mem_size[i]); 3220 4737 if (!skdev->mem_map[i]) { 3221 - pr_err("(%s): Unable to map adapter memory!\n", 3222 - skd_name(skdev)); 4738 + dev_err(&pdev->dev, 4739 + "Unable to map adapter memory!\n"); 3223 4740 rc = -ENODEV; 3224 4741 goto err_out_iounmap; 3225 4742 } 3226 - pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", 3227 - skdev->name, __func__, __LINE__, 3228 - skdev->mem_map[i], 3229 - (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); 4743 + dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", 4744 + skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], 4745 + skdev->mem_size[i]); 3230 4746 } 3231 4747 3232 4748 rc = skd_acquire_irq(skdev); 3233 4749 if (rc) { 3234 - pr_err("(%s): interrupt resource error %d\n", 3235 - skd_name(skdev), rc); 4750 + dev_err(&pdev->dev, "interrupt resource error %d\n", rc); 3236 4751 goto err_out_iounmap; 3237 4752 } 3238 4753 ··· 3252 4771 } else { 3253 4772 /* we timed out, something is wrong with the device, 3254 4773 don't add the disk structure */ 3255 - pr_err( 3256 - "(%s): error: waiting for s1120 timed out %d!\n", 3257 - skd_name(skdev), rc); 4774 + dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n", 4775 + rc); 3258 4776 /* in case of no error; we timeout with ENXIO */ 3259 4777 if (!rc) 3260 4778 rc = -ENXIO; 3261 4779 goto err_out_timer; 3262 4780 } 3263 - 3264 - 3265 - #ifdef SKD_VMK_POLL_HANDLER 3266 - if (skdev->irq_type == SKD_IRQ_MSIX) { 3267 - /* MSIX completion handler is being used for coredump */ 3268 - vmklnx_scsi_register_poll_handler(skdev->scsi_host, 3269 - skdev->msix_entries[5].vector, 3270 - skd_comp_q, skdev); 3271 - } else { 3272 - vmklnx_scsi_register_poll_handler(skdev->scsi_host, 3273 - skdev->pdev->irq, skd_isr, 3274 - skdev); 3275 - } 3276 - #endif /* SKD_VMK_POLL_HANDLER */ 3277 4781 3278 4782 return rc; 3279 4783 ··· 3292 4826 3293 4827 skdev = pci_get_drvdata(pdev); 3294 4828 if (!skdev) { 3295 - pr_err("%s: no device data for PCI\n", pci_name(pdev)); 4829 + dev_err(&pdev->dev, "no device data for PCI\n"); 3296 4830 return; 3297 4831 } 3298 4832 skd_stop_device(skdev); ··· 3300 4834 3301 4835 for (i = 0; i < SKD_MAX_BARS; i++) 3302 4836 if (skdev->mem_map[i]) 3303 - iounmap((u32 *)skdev->mem_map[i]); 4837 + iounmap(skdev->mem_map[i]); 3304 4838 3305 4839 if (skdev->pcie_error_reporting_is_enabled) 3306 4840 pci_disable_pcie_error_reporting(pdev); ··· 3321 4855 3322 4856 skdev = pci_get_drvdata(pdev); 3323 4857 if (!skdev) { 3324 - pr_err("%s: no device data for PCI\n", pci_name(pdev)); 4858 + dev_err(&pdev->dev, "no device data for PCI\n"); 3325 4859 return -EIO; 3326 4860 } 3327 4861 ··· 3331 4865 3332 4866 for (i = 0; i < SKD_MAX_BARS; i++) 3333 4867 if (skdev->mem_map[i]) 3334 - iounmap((u32 *)skdev->mem_map[i]); 4868 + iounmap(skdev->mem_map[i]); 3335 4869 3336 4870 if (skdev->pcie_error_reporting_is_enabled) 3337 4871 pci_disable_pcie_error_reporting(pdev); ··· 3351 4885 3352 4886 skdev = pci_get_drvdata(pdev); 3353 4887 if (!skdev) { 3354 - pr_err("%s: no device data for PCI\n", pci_name(pdev)); 4888 + dev_err(&pdev->dev, "no device data for PCI\n"); 3355 4889 return -1; 3356 4890 } 3357 4891 ··· 3369 4903 if (!rc) { 3370 4904 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3371 4905 3372 - pr_err("(%s): consistent DMA mask error %d\n", 3373 - pci_name(pdev), rc); 4906 + dev_err(&pdev->dev, "consistent DMA mask error %d\n", 4907 + rc); 3374 4908 } 3375 4909 } else { 3376 4910 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3377 4911 if (rc) { 3378 4912 3379 - pr_err("(%s): DMA mask error %d\n", 3380 - pci_name(pdev), rc); 4913 + dev_err(&pdev->dev, "DMA mask error %d\n", rc); 3381 4914 goto err_out_regions; 3382 4915 } 3383 4916 } ··· 3384 4919 pci_set_master(pdev); 3385 4920 rc = pci_enable_pcie_error_reporting(pdev); 3386 4921 if (rc) { 3387 - pr_err("(%s): bad enable of PCIe error reporting rc=%d\n", 3388 - skdev->name, rc); 4922 + dev_err(&pdev->dev, 4923 + "bad enable of PCIe error reporting rc=%d\n", rc); 3389 4924 skdev->pcie_error_reporting_is_enabled = 0; 3390 4925 } else 3391 4926 skdev->pcie_error_reporting_is_enabled = 1; ··· 3397 4932 skdev->mem_map[i] = ioremap(skdev->mem_phys[i], 3398 4933 skdev->mem_size[i]); 3399 4934 if (!skdev->mem_map[i]) { 3400 - pr_err("(%s): Unable to map adapter memory!\n", 3401 - skd_name(skdev)); 4935 + dev_err(&pdev->dev, "Unable to map adapter memory!\n"); 3402 4936 rc = -ENODEV; 3403 4937 goto err_out_iounmap; 3404 4938 } 3405 - pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n", 3406 - skdev->name, __func__, __LINE__, 3407 - skdev->mem_map[i], 3408 - (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); 4939 + dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", 4940 + skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], 4941 + skdev->mem_size[i]); 3409 4942 } 3410 4943 rc = skd_acquire_irq(skdev); 3411 4944 if (rc) { 3412 - 3413 - pr_err("(%s): interrupt resource error %d\n", 3414 - pci_name(pdev), rc); 4945 + dev_err(&pdev->dev, "interrupt resource error %d\n", rc); 3415 4946 goto err_out_iounmap; 3416 4947 } 3417 4948 ··· 3445 4984 { 3446 4985 struct skd_device *skdev; 3447 4986 3448 - pr_err("skd_pci_shutdown called\n"); 4987 + dev_err(&pdev->dev, "%s called\n", __func__); 3449 4988 3450 4989 skdev = pci_get_drvdata(pdev); 3451 4990 if (!skdev) { 3452 - pr_err("%s: no device data for PCI\n", pci_name(pdev)); 4991 + dev_err(&pdev->dev, "no device data for PCI\n"); 3453 4992 return; 3454 4993 } 3455 4994 3456 - pr_err("%s: calling stop\n", skd_name(skdev)); 4995 + dev_err(&pdev->dev, "calling stop\n"); 3457 4996 skd_stop_device(skdev); 3458 4997 } 3459 4998 ··· 3472 5011 * LOGGING SUPPORT 3473 5012 ***************************************************************************** 3474 5013 */ 3475 - 3476 - static const char *skd_name(struct skd_device *skdev) 3477 - { 3478 - memset(skdev->id_str, 0, sizeof(skdev->id_str)); 3479 - 3480 - if (skdev->inquiry_is_valid) 3481 - snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]", 3482 - skdev->name, skdev->inq_serial_num, 3483 - pci_name(skdev->pdev)); 3484 - else 3485 - snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]", 3486 - skdev->name, pci_name(skdev->pdev)); 3487 - 3488 - return skdev->id_str; 3489 - } 3490 5014 3491 5015 const char *skd_drive_state_to_str(int state) 3492 5016 { ··· 3524 5078 return "PAUSING"; 3525 5079 case SKD_DRVR_STATE_PAUSED: 3526 5080 return "PAUSED"; 3527 - case SKD_DRVR_STATE_DRAINING_TIMEOUT: 3528 - return "DRAINING_TIMEOUT"; 3529 5081 case SKD_DRVR_STATE_RESTARTING: 3530 5082 return "RESTARTING"; 3531 5083 case SKD_DRVR_STATE_RESUMING: ··· 3550 5106 } 3551 5107 } 3552 5108 3553 - static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) 3554 - { 3555 - switch (state) { 3556 - case SKD_MSG_STATE_IDLE: 3557 - return "IDLE"; 3558 - case SKD_MSG_STATE_BUSY: 3559 - return "BUSY"; 3560 - default: 3561 - return "???"; 3562 - } 3563 - } 3564 - 3565 5109 static const char *skd_skreq_state_to_str(enum skd_req_state state) 3566 5110 { 3567 5111 switch (state) { ··· 3563 5131 return "COMPLETED"; 3564 5132 case SKD_REQ_STATE_TIMEOUT: 3565 5133 return "TIMEOUT"; 3566 - case SKD_REQ_STATE_ABORTED: 3567 - return "ABORTED"; 3568 5134 default: 3569 5135 return "???"; 3570 5136 } ··· 3570 5140 3571 5141 static void skd_log_skdev(struct skd_device *skdev, const char *event) 3572 5142 { 3573 - pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n", 3574 - skdev->name, __func__, __LINE__, skdev->name, skdev, event); 3575 - pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n", 3576 - skdev->name, __func__, __LINE__, 3577 - skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 3578 - skd_skdev_state_to_str(skdev->state), skdev->state); 3579 - pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n", 3580 - skdev->name, __func__, __LINE__, 3581 - skdev->in_flight, skdev->cur_max_queue_depth, 3582 - skdev->dev_max_queue_depth, skdev->queue_low_water_mark); 3583 - pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n", 3584 - skdev->name, __func__, __LINE__, 3585 - skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); 3586 - } 3587 - 3588 - static void skd_log_skmsg(struct skd_device *skdev, 3589 - struct skd_fitmsg_context *skmsg, const char *event) 3590 - { 3591 - pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n", 3592 - skdev->name, __func__, __LINE__, skdev->name, skmsg, event); 3593 - pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n", 3594 - skdev->name, __func__, __LINE__, 3595 - skd_skmsg_state_to_str(skmsg->state), skmsg->state, 3596 - skmsg->id, skmsg->length); 5143 + dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event); 5144 + dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n", 5145 + skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, 5146 + skd_skdev_state_to_str(skdev->state), skdev->state); 5147 + dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n", 5148 + skd_in_flight(skdev), skdev->cur_max_queue_depth, 5149 + skdev->dev_max_queue_depth, skdev->queue_low_water_mark); 5150 + dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n", 5151 + skdev->skcomp_cycle, skdev->skcomp_ix); 3597 5152 } 3598 5153 3599 5154 static void skd_log_skreq(struct skd_device *skdev, 3600 5155 struct skd_request_context *skreq, const char *event) 3601 5156 { 3602 - pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n", 3603 - skdev->name, __func__, __LINE__, skdev->name, skreq, event); 3604 - pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n", 3605 - skdev->name, __func__, __LINE__, 3606 - skd_skreq_state_to_str(skreq->state), skreq->state, 3607 - skreq->id, skreq->fitmsg_id); 3608 - pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n", 3609 - skdev->name, __func__, __LINE__, 3610 - skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); 5157 + struct request *req = blk_mq_rq_from_pdu(skreq); 5158 + u32 lba = blk_rq_pos(req); 5159 + u32 count = blk_rq_sectors(req); 3611 5160 3612 - if (skreq->req != NULL) { 3613 - struct request *req = skreq->req; 3614 - u32 lba = (u32)blk_rq_pos(req); 3615 - u32 count = blk_rq_sectors(req); 5161 + dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event); 5162 + dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n", 5163 + skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id, 5164 + skreq->fitmsg_id); 5165 + dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n", 5166 + skreq->data_dir, skreq->n_sg); 3616 5167 3617 - pr_debug("%s:%s:%d " 3618 - "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", 3619 - skdev->name, __func__, __LINE__, 3620 - req, lba, lba, count, count, 3621 - (int)rq_data_dir(req)); 3622 - } else 3623 - pr_debug("%s:%s:%d req=NULL\n", 3624 - skdev->name, __func__, __LINE__); 5168 + dev_dbg(&skdev->pdev->dev, 5169 + "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba, 5170 + count, count, (int)rq_data_dir(req)); 3625 5171 } 3626 5172 3627 5173 /* ··· 3608 5202 3609 5203 static int __init skd_init(void) 3610 5204 { 3611 - pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID); 5205 + BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8); 5206 + BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32); 5207 + BUILD_BUG_ON(sizeof(struct skd_command_header) != 16); 5208 + BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32); 5209 + BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44); 5210 + BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0); 5211 + BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64); 5212 + BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES); 3612 5213 3613 5214 switch (skd_isr_type) { 3614 5215 case SKD_IRQ_LEGACY: ··· 3635 5222 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; 3636 5223 } 3637 5224 3638 - if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { 5225 + if (skd_max_req_per_msg < 1 || 5226 + skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) { 3639 5227 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", 3640 5228 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); 3641 5229 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; ··· 3660 5246 skd_isr_comp_limit = 0; 3661 5247 } 3662 5248 3663 - if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) { 3664 - pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n", 3665 - skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT); 3666 - skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; 3667 - } 3668 - 3669 5249 return pci_register_driver(&skd_driver); 3670 5250 } 3671 5251 3672 5252 static void __exit skd_exit(void) 3673 5253 { 3674 - pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID); 3675 - 3676 5254 pci_unregister_driver(&skd_driver); 3677 5255 3678 5256 if (skd_major)
+16 -22
drivers/block/skd_s1120.h
··· 1 - /* Copyright 2012 STEC, Inc. 1 + /* 2 + * Copyright 2012 STEC, Inc. 3 + * Copyright (c) 2017 Western Digital Corporation or its affiliates. 2 4 * 3 - * This file is licensed under the terms of the 3-clause 4 - * BSD License (http://opensource.org/licenses/BSD-3-Clause) 5 - * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), 6 - * at your option. Both licenses are also available in the LICENSE file 7 - * distributed with this project. This file may not be copied, modified, 8 - * or distributed except in accordance with those terms. 5 + * This file is part of the Linux kernel, and is made available under 6 + * the terms of the GNU General Public License version 2. 9 7 */ 10 8 11 9 12 10 #ifndef SKD_S1120_H 13 11 #define SKD_S1120_H 14 - 15 - #pragma pack(push, s1120_h, 1) 16 12 17 13 /* 18 14 * Q-channel, 64-bit r/w ··· 26 30 #define FIT_QCMD_MSGSIZE_128 (0x1 << 4) 27 31 #define FIT_QCMD_MSGSIZE_256 (0x2 << 4) 28 32 #define FIT_QCMD_MSGSIZE_512 (0x3 << 4) 29 - #define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull) 33 + #define FIT_QCMD_ALIGN L1_CACHE_BYTES 30 34 31 35 /* 32 36 * Control, 32-bit r/w ··· 246 250 * 20-23 of the FIT_MTD_FITFW_INIT response. 247 251 */ 248 252 struct fit_completion_entry_v1 { 249 - uint32_t num_returned_bytes; 253 + __be32 num_returned_bytes; 250 254 uint16_t tag; 251 255 uint8_t status; /* SCSI status */ 252 256 uint8_t cycle; ··· 274 278 uint16_t sks_low; /* 10: Sense Key Specific (LSW) */ 275 279 uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */ 276 280 uint16_t uec; /* 14: Additional Sense Bytes */ 277 - uint64_t per; /* 16: Additional Sense Bytes */ 281 + uint64_t per __packed; /* 16: Additional Sense Bytes */ 278 282 uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */ 279 283 }; 280 284 ··· 288 292 * Version one has the last 32 bits sg_list_len_bytes; 289 293 */ 290 294 struct skd_command_header { 291 - uint64_t sg_list_dma_address; 295 + __be64 sg_list_dma_address; 292 296 uint16_t tag; 293 297 uint8_t attribute; 294 298 uint8_t add_cdb_len; /* In 32 bit words */ 295 - uint32_t sg_list_len_bytes; 299 + __be32 sg_list_len_bytes; 296 300 }; 297 301 298 302 struct skd_scsi_request { ··· 305 309 uint8_t peripheral_device_type:5; 306 310 uint8_t qualifier:3; 307 311 uint8_t page_code; 308 - uint16_t page_length; 309 - uint16_t pcie_bus_number; 312 + __be16 page_length; 313 + __be16 pcie_bus_number; 310 314 uint8_t pcie_device_number; 311 315 uint8_t pcie_function_number; 312 316 uint8_t pcie_link_speed; 313 317 uint8_t pcie_link_lanes; 314 - uint16_t pcie_vendor_id; 315 - uint16_t pcie_device_id; 316 - uint16_t pcie_subsystem_vendor_id; 317 - uint16_t pcie_subsystem_device_id; 318 + __be16 pcie_vendor_id; 319 + __be16 pcie_device_id; 320 + __be16 pcie_subsystem_vendor_id; 321 + __be16 pcie_subsystem_device_id; 318 322 uint8_t reserved1[2]; 319 323 uint8_t reserved2[3]; 320 324 uint8_t driver_version_length; 321 325 uint8_t driver_version[0x14]; 322 326 }; 323 - 324 - #pragma pack(pop, s1120_h) 325 327 326 328 #endif /* SKD_S1120_H */
+1 -1
drivers/block/virtio_blk.c
··· 265 265 } 266 266 267 267 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 268 - if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT) 268 + if (blk_rq_is_scsi(req)) 269 269 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num); 270 270 else 271 271 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
+5 -4
drivers/block/xen-blkback/blkback.c
··· 705 705 GNTMAP_host_map, pages[i]->handle); 706 706 pages[i]->handle = BLKBACK_INVALID_HANDLE; 707 707 invcount++; 708 - } 708 + } 709 709 710 - return invcount; 710 + return invcount; 711 711 } 712 712 713 713 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data) ··· 1251 1251 break; 1252 1252 case BLKIF_OP_WRITE_BARRIER: 1253 1253 drain = true; 1254 + /* fall through */ 1254 1255 case BLKIF_OP_FLUSH_DISKCACHE: 1255 1256 ring->st_f_req++; 1256 1257 operation = REQ_OP_WRITE; ··· 1363 1362 goto fail_put_bio; 1364 1363 1365 1364 biolist[nbio++] = bio; 1366 - bio->bi_bdev = preq.bdev; 1365 + bio_set_dev(bio, preq.bdev); 1367 1366 bio->bi_private = pending_req; 1368 1367 bio->bi_end_io = end_block_io_op; 1369 1368 bio->bi_iter.bi_sector = preq.sector_number; ··· 1382 1381 goto fail_put_bio; 1383 1382 1384 1383 biolist[nbio++] = bio; 1385 - bio->bi_bdev = preq.bdev; 1384 + bio_set_dev(bio, preq.bdev); 1386 1385 bio->bi_private = pending_req; 1387 1386 bio->bi_end_io = end_block_io_op; 1388 1387 bio_set_op_attrs(bio, operation, operation_flags);
+2 -1
drivers/block/xen-blkback/xenbus.c
··· 816 816 xenbus_switch_state(dev, XenbusStateClosed); 817 817 if (xenbus_dev_is_online(dev)) 818 818 break; 819 - /* fall through if not online */ 819 + /* fall through */ 820 + /* if not online */ 820 821 case XenbusStateUnknown: 821 822 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */ 822 823 device_unregister(&dev->dev);
+1 -1
drivers/block/xen-blkfront.c
··· 2456 2456 case XenbusStateClosed: 2457 2457 if (dev->state == XenbusStateClosed) 2458 2458 break; 2459 - /* Missed the backend's Closing state -- fallthrough */ 2459 + /* fall through */ 2460 2460 case XenbusStateClosing: 2461 2461 if (info) 2462 2462 blkfront_closing(info);
+5 -4
drivers/block/zram/zram_drv.c
··· 467 467 return -ENOMEM; 468 468 469 469 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); 470 - bio->bi_bdev = zram->bdev; 470 + bio_set_dev(bio, zram->bdev); 471 471 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { 472 472 bio_put(bio); 473 473 return -EIO; ··· 561 561 } 562 562 563 563 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); 564 - bio->bi_bdev = zram->bdev; 564 + bio_set_dev(bio, zram->bdev); 565 565 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, 566 566 bvec->bv_offset)) { 567 567 bio_put(bio); ··· 1171 1171 { 1172 1172 unsigned long start_time = jiffies; 1173 1173 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ; 1174 + struct request_queue *q = zram->disk->queue; 1174 1175 int ret; 1175 1176 1176 - generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT, 1177 + generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT, 1177 1178 &zram->disk->part0); 1178 1179 1179 1180 if (!is_write) { ··· 1186 1185 ret = zram_bvec_write(zram, bvec, index, offset, bio); 1187 1186 } 1188 1187 1189 - generic_end_io_acct(rw_acct, &zram->disk->part0, start_time); 1188 + generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time); 1190 1189 1191 1190 if (unlikely(ret < 0)) { 1192 1191 if (!is_write)
+1 -1
drivers/ide/ide-floppy.c
··· 72 72 drive->failed_pc = NULL; 73 73 74 74 if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || 75 - (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT)) 75 + blk_rq_is_scsi(rq)) 76 76 uptodate = 1; /* FIXME */ 77 77 else if (pc->c[0] == GPCMD_REQUEST_SENSE) { 78 78
+1 -1
drivers/md/bcache/debug.c
··· 49 49 v->keys.ops = b->keys.ops; 50 50 51 51 bio = bch_bbio_alloc(b->c); 52 - bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; 52 + bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev); 53 53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); 54 54 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; 55 55 bio->bi_opf = REQ_OP_READ | REQ_META;
+1 -1
drivers/md/bcache/io.c
··· 34 34 struct bbio *b = container_of(bio, struct bbio, bio); 35 35 36 36 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); 37 - bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 37 + bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); 38 38 39 39 b->submit_time_us = local_clock_us(); 40 40 closure_bio_submit(bio, bio->bi_private);
+3 -3
drivers/md/bcache/journal.c
··· 53 53 54 54 bio_reset(bio); 55 55 bio->bi_iter.bi_sector = bucket + offset; 56 - bio->bi_bdev = ca->bdev; 56 + bio_set_dev(bio, ca->bdev); 57 57 bio->bi_iter.bi_size = len << 9; 58 58 59 59 bio->bi_end_io = journal_read_endio; ··· 452 452 bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); 453 453 bio->bi_iter.bi_sector = bucket_to_sector(ca->set, 454 454 ca->sb.d[ja->discard_idx]); 455 - bio->bi_bdev = ca->bdev; 455 + bio_set_dev(bio, ca->bdev); 456 456 bio->bi_iter.bi_size = bucket_bytes(ca); 457 457 bio->bi_end_io = journal_discard_endio; 458 458 ··· 623 623 624 624 bio_reset(bio); 625 625 bio->bi_iter.bi_sector = PTR_OFFSET(k, i); 626 - bio->bi_bdev = ca->bdev; 626 + bio_set_dev(bio, ca->bdev); 627 627 bio->bi_iter.bi_size = sectors << 9; 628 628 629 629 bio->bi_end_io = journal_write_endio;
+11 -10
drivers/md/bcache/request.c
··· 607 607 static void bio_complete(struct search *s) 608 608 { 609 609 if (s->orig_bio) { 610 - generic_end_io_acct(bio_data_dir(s->orig_bio), 610 + struct request_queue *q = s->orig_bio->bi_disk->queue; 611 + generic_end_io_acct(q, bio_data_dir(s->orig_bio), 611 612 &s->d->disk->part0, s->start_time); 612 613 613 614 trace_bcache_request_end(s->d, s->orig_bio); ··· 735 734 if (s->iop.bio) { 736 735 bio_reset(s->iop.bio); 737 736 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; 738 - s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 737 + bio_copy_dev(s->iop.bio, s->cache_miss); 739 738 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 740 739 bch_bio_map(s->iop.bio, NULL); 741 740 ··· 794 793 !(bio->bi_opf & REQ_META) && 795 794 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) 796 795 reada = min_t(sector_t, dc->readahead >> 9, 797 - bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); 796 + get_capacity(bio->bi_disk) - bio_end_sector(bio)); 798 797 799 798 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 800 799 ··· 820 819 goto out_submit; 821 820 822 821 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; 823 - cache_bio->bi_bdev = miss->bi_bdev; 822 + bio_copy_dev(cache_bio, miss); 824 823 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 825 824 826 825 cache_bio->bi_end_io = request_endio; ··· 919 918 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, 920 919 dc->disk.bio_split); 921 920 922 - flush->bi_bdev = bio->bi_bdev; 921 + bio_copy_dev(flush, bio); 923 922 flush->bi_end_io = request_endio; 924 923 flush->bi_private = cl; 925 924 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; ··· 956 955 struct bio *bio) 957 956 { 958 957 struct search *s; 959 - struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 958 + struct bcache_device *d = bio->bi_disk->private_data; 960 959 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 961 960 int rw = bio_data_dir(bio); 962 961 963 - generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); 962 + generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); 964 963 965 - bio->bi_bdev = dc->bdev; 964 + bio_set_dev(bio, dc->bdev); 966 965 bio->bi_iter.bi_sector += dc->sb.data_offset; 967 966 968 967 if (cached_dev_get(dc)) { ··· 1072 1071 { 1073 1072 struct search *s; 1074 1073 struct closure *cl; 1075 - struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 1074 + struct bcache_device *d = bio->bi_disk->private_data; 1076 1075 int rw = bio_data_dir(bio); 1077 1076 1078 - generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); 1077 + generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); 1079 1078 1080 1079 s = search_alloc(bio, d); 1081 1080 cl = &s->cl;
+3 -3
drivers/md/bcache/super.c
··· 257 257 closure_init(cl, parent); 258 258 259 259 bio_reset(bio); 260 - bio->bi_bdev = dc->bdev; 260 + bio_set_dev(bio, dc->bdev); 261 261 bio->bi_end_io = write_bdev_super_endio; 262 262 bio->bi_private = dc; 263 263 ··· 303 303 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); 304 304 305 305 bio_reset(bio); 306 - bio->bi_bdev = ca->bdev; 306 + bio_set_dev(bio, ca->bdev); 307 307 bio->bi_end_io = write_super_endio; 308 308 bio->bi_private = ca; 309 309 ··· 508 508 closure_init_stack(cl); 509 509 510 510 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; 511 - bio->bi_bdev = ca->bdev; 511 + bio_set_dev(bio, ca->bdev); 512 512 bio->bi_iter.bi_size = bucket_bytes(ca); 513 513 514 514 bio->bi_end_io = prio_endio;
+2 -3
drivers/md/bcache/writeback.c
··· 181 181 dirty_init(w); 182 182 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); 183 183 io->bio.bi_iter.bi_sector = KEY_START(&w->key); 184 - io->bio.bi_bdev = io->dc->bdev; 184 + bio_set_dev(&io->bio, io->dc->bdev); 185 185 io->bio.bi_end_io = dirty_endio; 186 186 187 187 closure_bio_submit(&io->bio, cl); ··· 250 250 dirty_init(w); 251 251 bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); 252 252 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); 253 - io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 254 - &w->key, 0)->bdev; 253 + bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); 255 254 io->bio.bi_end_io = read_dirty_endio; 256 255 257 256 if (bio_alloc_pages(&io->bio, GFP_KERNEL))
+6 -3
drivers/md/dm-bio-record.h
··· 18 18 */ 19 19 20 20 struct dm_bio_details { 21 - struct block_device *bi_bdev; 21 + struct gendisk *bi_disk; 22 + u8 bi_partno; 22 23 unsigned long bi_flags; 23 24 struct bvec_iter bi_iter; 24 25 }; 25 26 26 27 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) 27 28 { 28 - bd->bi_bdev = bio->bi_bdev; 29 + bd->bi_disk = bio->bi_disk; 30 + bd->bi_partno = bio->bi_partno; 29 31 bd->bi_flags = bio->bi_flags; 30 32 bd->bi_iter = bio->bi_iter; 31 33 } 32 34 33 35 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) 34 36 { 35 - bio->bi_bdev = bd->bi_bdev; 37 + bio->bi_disk = bd->bi_disk; 38 + bio->bi_partno = bd->bi_partno; 36 39 bio->bi_flags = bd->bi_flags; 37 40 bio->bi_iter = bd->bi_iter; 38 41 }
+1 -1
drivers/md/dm-bufio.c
··· 616 616 617 617 bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); 618 618 b->bio.bi_iter.bi_sector = sector; 619 - b->bio.bi_bdev = b->c->bdev; 619 + bio_set_dev(&b->bio, b->c->bdev); 620 620 b->bio.bi_end_io = inline_endio; 621 621 /* 622 622 * Use of .bi_private isn't a problem here because
+2 -2
drivers/md/dm-cache-target.c
··· 833 833 *--------------------------------------------------------------*/ 834 834 static void remap_to_origin(struct cache *cache, struct bio *bio) 835 835 { 836 - bio->bi_bdev = cache->origin_dev->bdev; 836 + bio_set_dev(bio, cache->origin_dev->bdev); 837 837 } 838 838 839 839 static void remap_to_cache(struct cache *cache, struct bio *bio, ··· 842 842 sector_t bi_sector = bio->bi_iter.bi_sector; 843 843 sector_t block = from_cblock(cblock); 844 844 845 - bio->bi_bdev = cache->cache_dev->bdev; 845 + bio_set_dev(bio, cache->cache_dev->bdev); 846 846 if (!block_size_is_power_of_two(cache)) 847 847 bio->bi_iter.bi_sector = 848 848 (block * cache->sectors_per_block) +
+2 -5
drivers/md/dm-crypt.c
··· 932 932 bip->bip_iter.bi_size = tag_len; 933 933 bip->bip_iter.bi_sector = io->cc->start + io->sector; 934 934 935 - /* We own the metadata, do not let bio_free to release it */ 936 - bip->bip_flags &= ~BIP_BLOCK_INTEGRITY; 937 - 938 935 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata), 939 936 tag_len, offset_in_page(io->integrity_metadata)); 940 937 if (unlikely(ret != tag_len)) ··· 1543 1546 1544 1547 clone->bi_private = io; 1545 1548 clone->bi_end_io = crypt_endio; 1546 - clone->bi_bdev = cc->dev->bdev; 1549 + bio_set_dev(clone, cc->dev->bdev); 1547 1550 clone->bi_opf = io->base_bio->bi_opf; 1548 1551 } 1549 1552 ··· 2792 2795 */ 2793 2796 if (unlikely(bio->bi_opf & REQ_PREFLUSH || 2794 2797 bio_op(bio) == REQ_OP_DISCARD)) { 2795 - bio->bi_bdev = cc->dev->bdev; 2798 + bio_set_dev(bio, cc->dev->bdev); 2796 2799 if (bio_sectors(bio)) 2797 2800 bio->bi_iter.bi_sector = cc->start + 2798 2801 dm_target_offset(ti, bio->bi_iter.bi_sector);
+2 -2
drivers/md/dm-delay.c
··· 282 282 struct delay_c *dc = ti->private; 283 283 284 284 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { 285 - bio->bi_bdev = dc->dev_write->bdev; 285 + bio_set_dev(bio, dc->dev_write->bdev); 286 286 if (bio_sectors(bio)) 287 287 bio->bi_iter.bi_sector = dc->start_write + 288 288 dm_target_offset(ti, bio->bi_iter.bi_sector); ··· 290 290 return delay_bio(dc, dc->write_delay, bio); 291 291 } 292 292 293 - bio->bi_bdev = dc->dev_read->bdev; 293 + bio_set_dev(bio, dc->dev_read->bdev); 294 294 bio->bi_iter.bi_sector = dc->start_read + 295 295 dm_target_offset(ti, bio->bi_iter.bi_sector); 296 296
+1 -1
drivers/md/dm-era-target.c
··· 1192 1192 1193 1193 static void remap_to_origin(struct era *era, struct bio *bio) 1194 1194 { 1195 - bio->bi_bdev = era->origin_dev->bdev; 1195 + bio_set_dev(bio, era->origin_dev->bdev); 1196 1196 } 1197 1197 1198 1198 /*----------------------------------------------------------------
+1 -1
drivers/md/dm-flakey.c
··· 274 274 { 275 275 struct flakey_c *fc = ti->private; 276 276 277 - bio->bi_bdev = fc->dev->bdev; 277 + bio_set_dev(bio, fc->dev->bdev); 278 278 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) 279 279 bio->bi_iter.bi_sector = 280 280 flakey_map_sector(ti, bio->bi_iter.bi_sector);
+7 -4
drivers/md/dm-integrity.c
··· 250 250 251 251 struct completion *completion; 252 252 253 - struct block_device *orig_bi_bdev; 253 + struct gendisk *orig_bi_disk; 254 + u8 orig_bi_partno; 254 255 bio_end_io_t *orig_bi_end_io; 255 256 struct bio_integrity_payload *orig_bi_integrity; 256 257 struct bvec_iter orig_bi_iter; ··· 1165 1164 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 1166 1165 1167 1166 bio->bi_iter = dio->orig_bi_iter; 1168 - bio->bi_bdev = dio->orig_bi_bdev; 1167 + bio->bi_disk = dio->orig_bi_disk; 1168 + bio->bi_partno = dio->orig_bi_partno; 1169 1169 if (dio->orig_bi_integrity) { 1170 1170 bio->bi_integrity = dio->orig_bi_integrity; 1171 1171 bio->bi_opf |= REQ_INTEGRITY; ··· 1683 1681 1684 1682 dio->orig_bi_iter = bio->bi_iter; 1685 1683 1686 - dio->orig_bi_bdev = bio->bi_bdev; 1687 - bio->bi_bdev = ic->dev->bdev; 1684 + dio->orig_bi_disk = bio->bi_disk; 1685 + dio->orig_bi_partno = bio->bi_partno; 1686 + bio_set_dev(bio, ic->dev->bdev); 1688 1687 1689 1688 dio->orig_bi_integrity = bio_integrity(bio); 1690 1689 bio->bi_integrity = NULL;
+1 -1
drivers/md/dm-io.c
··· 347 347 348 348 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 349 349 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); 350 - bio->bi_bdev = where->bdev; 350 + bio_set_dev(bio, where->bdev); 351 351 bio->bi_end_io = endio; 352 352 bio_set_op_attrs(bio, op, op_flags); 353 353 store_io_and_region_in_bio(bio, io, region);
+1 -1
drivers/md/dm-linear.c
··· 88 88 { 89 89 struct linear_c *lc = ti->private; 90 90 91 - bio->bi_bdev = lc->dev->bdev; 91 + bio_set_dev(bio, lc->dev->bdev); 92 92 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) 93 93 bio->bi_iter.bi_sector = 94 94 linear_map_sector(ti, bio->bi_iter.bi_sector);
+4 -4
drivers/md/dm-log-writes.c
··· 198 198 } 199 199 bio->bi_iter.bi_size = 0; 200 200 bio->bi_iter.bi_sector = sector; 201 - bio->bi_bdev = lc->logdev->bdev; 201 + bio_set_dev(bio, lc->logdev->bdev); 202 202 bio->bi_end_io = log_end_io; 203 203 bio->bi_private = lc; 204 204 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ··· 263 263 } 264 264 bio->bi_iter.bi_size = 0; 265 265 bio->bi_iter.bi_sector = sector; 266 - bio->bi_bdev = lc->logdev->bdev; 266 + bio_set_dev(bio, lc->logdev->bdev); 267 267 bio->bi_end_io = log_end_io; 268 268 bio->bi_private = lc; 269 269 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ··· 285 285 } 286 286 bio->bi_iter.bi_size = 0; 287 287 bio->bi_iter.bi_sector = sector; 288 - bio->bi_bdev = lc->logdev->bdev; 288 + bio_set_dev(bio, lc->logdev->bdev); 289 289 bio->bi_end_io = log_end_io; 290 290 bio->bi_private = lc; 291 291 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); ··· 539 539 { 540 540 struct log_writes_c *lc = ti->private; 541 541 542 - bio->bi_bdev = lc->dev->bdev; 542 + bio_set_dev(bio, lc->dev->bdev); 543 543 } 544 544 545 545 static int log_writes_map(struct dm_target *ti, struct bio *bio)
+1 -1
drivers/md/dm-mpath.c
··· 565 565 mpio->nr_bytes = nr_bytes; 566 566 567 567 bio->bi_status = 0; 568 - bio->bi_bdev = pgpath->path.dev->bdev; 568 + bio_set_dev(bio, pgpath->path.dev->bdev); 569 569 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; 570 570 571 571 if (pgpath->pg->ps.type->start_io)
+6 -6
drivers/md/dm-raid1.c
··· 145 145 146 146 struct dm_raid1_bio_record { 147 147 struct mirror *m; 148 - /* if details->bi_bdev == NULL, details were not saved */ 148 + /* if details->bi_disk == NULL, details were not saved */ 149 149 struct dm_bio_details details; 150 150 region_t write_region; 151 151 }; ··· 464 464 465 465 static void map_bio(struct mirror *m, struct bio *bio) 466 466 { 467 - bio->bi_bdev = m->dev->bdev; 467 + bio_set_dev(bio, m->dev->bdev); 468 468 bio->bi_iter.bi_sector = map_sector(m, bio); 469 469 } 470 470 ··· 1199 1199 struct dm_raid1_bio_record *bio_record = 1200 1200 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1201 1201 1202 - bio_record->details.bi_bdev = NULL; 1202 + bio_record->details.bi_disk = NULL; 1203 1203 1204 1204 if (rw == WRITE) { 1205 1205 /* Save region for mirror_end_io() handler */ ··· 1266 1266 goto out; 1267 1267 1268 1268 if (unlikely(*error)) { 1269 - if (!bio_record->details.bi_bdev) { 1269 + if (!bio_record->details.bi_disk) { 1270 1270 /* 1271 1271 * There wasn't enough memory to record necessary 1272 1272 * information for a retry or there was no other ··· 1291 1291 bd = &bio_record->details; 1292 1292 1293 1293 dm_bio_restore(bd, bio); 1294 - bio_record->details.bi_bdev = NULL; 1294 + bio_record->details.bi_disk = NULL; 1295 1295 bio->bi_status = 0; 1296 1296 1297 1297 queue_bio(ms, bio, rw); ··· 1301 1301 } 1302 1302 1303 1303 out: 1304 - bio_record->details.bi_bdev = NULL; 1304 + bio_record->details.bi_disk = NULL; 1305 1305 1306 1306 return DM_ENDIO_DONE; 1307 1307 }
+8 -8
drivers/md/dm-snap.c
··· 1663 1663 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, 1664 1664 struct bio *bio, chunk_t chunk) 1665 1665 { 1666 - bio->bi_bdev = s->cow->bdev; 1666 + bio_set_dev(bio, s->cow->bdev); 1667 1667 bio->bi_iter.bi_sector = 1668 1668 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + 1669 1669 (chunk - e->old_chunk)) + ··· 1681 1681 init_tracked_chunk(bio); 1682 1682 1683 1683 if (bio->bi_opf & REQ_PREFLUSH) { 1684 - bio->bi_bdev = s->cow->bdev; 1684 + bio_set_dev(bio, s->cow->bdev); 1685 1685 return DM_MAPIO_REMAPPED; 1686 1686 } 1687 1687 ··· 1769 1769 goto out; 1770 1770 } 1771 1771 } else { 1772 - bio->bi_bdev = s->origin->bdev; 1772 + bio_set_dev(bio, s->origin->bdev); 1773 1773 track_chunk(s, bio, chunk); 1774 1774 } 1775 1775 ··· 1802 1802 1803 1803 if (bio->bi_opf & REQ_PREFLUSH) { 1804 1804 if (!dm_bio_get_target_bio_nr(bio)) 1805 - bio->bi_bdev = s->origin->bdev; 1805 + bio_set_dev(bio, s->origin->bdev); 1806 1806 else 1807 - bio->bi_bdev = s->cow->bdev; 1807 + bio_set_dev(bio, s->cow->bdev); 1808 1808 return DM_MAPIO_REMAPPED; 1809 1809 } 1810 1810 ··· 1824 1824 chunk >= s->first_merging_chunk && 1825 1825 chunk < (s->first_merging_chunk + 1826 1826 s->num_merging_chunks)) { 1827 - bio->bi_bdev = s->origin->bdev; 1827 + bio_set_dev(bio, s->origin->bdev); 1828 1828 bio_list_add(&s->bios_queued_during_merge, bio); 1829 1829 r = DM_MAPIO_SUBMITTED; 1830 1830 goto out_unlock; ··· 1838 1838 } 1839 1839 1840 1840 redirect_to_origin: 1841 - bio->bi_bdev = s->origin->bdev; 1841 + bio_set_dev(bio, s->origin->bdev); 1842 1842 1843 1843 if (bio_data_dir(bio) == WRITE) { 1844 1844 up_write(&s->lock); ··· 2285 2285 struct dm_origin *o = ti->private; 2286 2286 unsigned available_sectors; 2287 2287 2288 - bio->bi_bdev = o->dev->bdev; 2288 + bio_set_dev(bio, o->dev->bdev); 2289 2289 2290 2290 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) 2291 2291 return DM_MAPIO_REMAPPED;
+4 -6
drivers/md/dm-stripe.c
··· 270 270 stripe_map_range_sector(sc, bio_end_sector(bio), 271 271 target_stripe, &end); 272 272 if (begin < end) { 273 - bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; 273 + bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev); 274 274 bio->bi_iter.bi_sector = begin + 275 275 sc->stripe[target_stripe].physical_start; 276 276 bio->bi_iter.bi_size = to_bytes(end - begin); ··· 291 291 if (bio->bi_opf & REQ_PREFLUSH) { 292 292 target_bio_nr = dm_bio_get_target_bio_nr(bio); 293 293 BUG_ON(target_bio_nr >= sc->stripes); 294 - bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; 294 + bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev); 295 295 return DM_MAPIO_REMAPPED; 296 296 } 297 297 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || ··· 306 306 &stripe, &bio->bi_iter.bi_sector); 307 307 308 308 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; 309 - bio->bi_bdev = sc->stripe[stripe].dev->bdev; 309 + bio_set_dev(bio, sc->stripe[stripe].dev->bdev); 310 310 311 311 return DM_MAPIO_REMAPPED; 312 312 } ··· 430 430 return DM_ENDIO_DONE; 431 431 432 432 memset(major_minor, 0, sizeof(major_minor)); 433 - sprintf(major_minor, "%d:%d", 434 - MAJOR(disk_devt(bio->bi_bdev->bd_disk)), 435 - MINOR(disk_devt(bio->bi_bdev->bd_disk))); 433 + sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio))); 436 434 437 435 /* 438 436 * Test to see which stripe drive triggered the event
+1 -1
drivers/md/dm-switch.c
··· 322 322 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); 323 323 unsigned path_nr = switch_get_path_nr(sctx, offset); 324 324 325 - bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; 325 + bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev); 326 326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; 327 327 328 328 return DM_MAPIO_REMAPPED;
+3 -3
drivers/md/dm-thin.c
··· 679 679 struct pool *pool = tc->pool; 680 680 sector_t bi_sector = bio->bi_iter.bi_sector; 681 681 682 - bio->bi_bdev = tc->pool_dev->bdev; 682 + bio_set_dev(bio, tc->pool_dev->bdev); 683 683 if (block_size_is_power_of_two(pool)) 684 684 bio->bi_iter.bi_sector = 685 685 (block << pool->sectors_per_block_shift) | ··· 691 691 692 692 static void remap_to_origin(struct thin_c *tc, struct bio *bio) 693 693 { 694 - bio->bi_bdev = tc->origin_dev->bdev; 694 + bio_set_dev(bio, tc->origin_dev->bdev); 695 695 } 696 696 697 697 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) ··· 3313 3313 * As this is a singleton target, ti->begin is always zero. 3314 3314 */ 3315 3315 spin_lock_irqsave(&pool->lock, flags); 3316 - bio->bi_bdev = pt->data_dev->bdev; 3316 + bio_set_dev(bio, pt->data_dev->bdev); 3317 3317 r = DM_MAPIO_REMAPPED; 3318 3318 spin_unlock_irqrestore(&pool->lock, flags); 3319 3319
+1 -1
drivers/md/dm-verity-target.c
··· 637 637 struct dm_verity *v = ti->private; 638 638 struct dm_verity_io *io; 639 639 640 - bio->bi_bdev = v->data_dev->bdev; 640 + bio_set_dev(bio, v->data_dev->bdev); 641 641 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); 642 642 643 643 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+3 -3
drivers/md/dm-zoned-metadata.c
··· 409 409 } 410 410 411 411 bio->bi_iter.bi_sector = dmz_blk2sect(block); 412 - bio->bi_bdev = zmd->dev->bdev; 412 + bio_set_dev(bio, zmd->dev->bdev); 413 413 bio->bi_private = mblk; 414 414 bio->bi_end_io = dmz_mblock_bio_end_io; 415 415 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO); ··· 564 564 set_bit(DMZ_META_WRITING, &mblk->state); 565 565 566 566 bio->bi_iter.bi_sector = dmz_blk2sect(block); 567 - bio->bi_bdev = zmd->dev->bdev; 567 + bio_set_dev(bio, zmd->dev->bdev); 568 568 bio->bi_private = mblk; 569 569 bio->bi_end_io = dmz_mblock_bio_end_io; 570 570 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); ··· 586 586 return -ENOMEM; 587 587 588 588 bio->bi_iter.bi_sector = dmz_blk2sect(block); 589 - bio->bi_bdev = zmd->dev->bdev; 589 + bio_set_dev(bio, zmd->dev->bdev); 590 590 bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO); 591 591 bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); 592 592 ret = submit_bio_wait(bio);
+2 -2
drivers/md/dm-zoned-target.c
··· 238 238 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); 239 239 240 240 /* Setup and submit the BIO */ 241 - bio->bi_bdev = dmz->dev->bdev; 241 + bio_set_dev(bio, dmz->dev->bdev); 242 242 bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); 243 243 atomic_inc(&bioctx->ref); 244 244 generic_make_request(bio); ··· 586 586 (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)), 587 587 (unsigned int)dmz_bio_blocks(bio)); 588 588 589 - bio->bi_bdev = dev->bdev; 589 + bio_set_dev(bio, dev->bdev); 590 590 591 591 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) 592 592 return DM_MAPIO_REMAPPED;
+8 -8
drivers/md/dm.c
··· 510 510 io->start_time = jiffies; 511 511 512 512 cpu = part_stat_lock(); 513 - part_round_stats(cpu, &dm_disk(md)->part0); 513 + part_round_stats(md->queue, cpu, &dm_disk(md)->part0); 514 514 part_stat_unlock(); 515 515 atomic_set(&dm_disk(md)->part0.in_flight[rw], 516 516 atomic_inc_return(&md->pending[rw])); ··· 529 529 int pending; 530 530 int rw = bio_data_dir(bio); 531 531 532 - generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); 532 + generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); 533 533 534 534 if (unlikely(dm_stats_used(&md->stats))) 535 535 dm_stats_account_io(&md->stats, bio_data_dir(bio), ··· 841 841 842 842 if (unlikely(error == BLK_STS_TARGET)) { 843 843 if (bio_op(bio) == REQ_OP_WRITE_SAME && 844 - !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) 844 + !bio->bi_disk->queue->limits.max_write_same_sectors) 845 845 disable_write_same(md); 846 846 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 847 - !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) 847 + !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 848 848 disable_write_zeroes(md); 849 849 } 850 850 ··· 1205 1205 break; 1206 1206 case DM_MAPIO_REMAPPED: 1207 1207 /* the bio has been remapped so dispatch it */ 1208 - trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, 1209 - tio->io->bio->bi_bdev->bd_dev, sector); 1208 + trace_block_bio_remap(clone->bi_disk->queue, clone, 1209 + bio_dev(tio->io->bio), sector); 1210 1210 generic_make_request(clone); 1211 1211 break; 1212 1212 case DM_MAPIO_KILL: ··· 1532 1532 1533 1533 map = dm_get_live_table(md, &srcu_idx); 1534 1534 1535 - generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); 1535 + generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0); 1536 1536 1537 1537 /* if we're suspended, we have to queue this io for later */ 1538 1538 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { ··· 1786 1786 goto bad; 1787 1787 1788 1788 bio_init(&md->flush_bio, NULL, 0); 1789 - md->flush_bio.bi_bdev = md->bdev; 1789 + bio_set_dev(&md->flush_bio, md->bdev); 1790 1790 md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1791 1791 1792 1792 dm_stats_init(&md->stats);
+2 -2
drivers/md/faulty.c
··· 216 216 if (failit) { 217 217 struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); 218 218 219 - b->bi_bdev = conf->rdev->bdev; 219 + bio_set_dev(b, conf->rdev->bdev); 220 220 b->bi_private = bio; 221 221 b->bi_end_io = faulty_fail; 222 222 bio = b; 223 223 } else 224 - bio->bi_bdev = conf->rdev->bdev; 224 + bio_set_dev(bio, conf->rdev->bdev); 225 225 226 226 generic_make_request(bio); 227 227 return true;
+3 -3
drivers/md/linear.c
··· 275 275 bio = split; 276 276 } 277 277 278 - bio->bi_bdev = tmp_dev->rdev->bdev; 278 + bio_set_dev(bio, tmp_dev->rdev->bdev); 279 279 bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - 280 280 start_sector + data_offset; 281 281 282 282 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 283 - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { 283 + !blk_queue_discard(bio->bi_disk->queue))) { 284 284 /* Just ignore it */ 285 285 bio_endio(bio); 286 286 } else { 287 287 if (mddev->gendisk) 288 - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 288 + trace_block_bio_remap(bio->bi_disk->queue, 289 289 bio, disk_devt(mddev->gendisk), 290 290 bio_sector); 291 291 mddev_check_writesame(mddev, bio);
+6 -4
drivers/md/md.c
··· 422 422 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 423 423 bi->bi_end_io = md_end_flush; 424 424 bi->bi_private = rdev; 425 - bi->bi_bdev = rdev->bdev; 425 + bio_set_dev(bi, rdev->bdev); 426 426 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 427 427 atomic_inc(&mddev->flush_pending); 428 428 submit_bio(bi); ··· 772 772 773 773 atomic_inc(&rdev->nr_pending); 774 774 775 - bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 775 + bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); 776 776 bio->bi_iter.bi_sector = sector; 777 777 bio_add_page(bio, page, size, 0); 778 778 bio->bi_private = rdev; ··· 803 803 struct bio *bio = md_bio_alloc_sync(rdev->mddev); 804 804 int ret; 805 805 806 - bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 807 - rdev->meta_bdev : rdev->bdev; 806 + if (metadata_op && rdev->meta_bdev) 807 + bio_set_dev(bio, rdev->meta_bdev); 808 + else 809 + bio_set_dev(bio, rdev->bdev); 808 810 bio_set_op_attrs(bio, op, op_flags); 809 811 if (metadata_op) 810 812 bio->bi_iter.bi_sector = sector + rdev->sb_start;
+7 -2
drivers/md/md.h
··· 509 509 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); 510 510 } 511 511 512 + static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) 513 + { 514 + atomic_add(nr_sectors, &bio->bi_disk->sync_io); 515 + } 516 + 512 517 struct md_personality 513 518 { 514 519 char *name; ··· 726 721 static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio) 727 722 { 728 723 if (bio_op(bio) == REQ_OP_WRITE_SAME && 729 - !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) 724 + !bio->bi_disk->queue->limits.max_write_same_sectors) 730 725 mddev->queue->limits.max_write_same_sectors = 0; 731 726 } 732 727 733 728 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) 734 729 { 735 730 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 736 - !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) 731 + !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 737 732 mddev->queue->limits.max_write_zeroes_sectors = 0; 738 733 } 739 734 #endif /* _MD_MD_H */
+4 -4
drivers/md/multipath.c
··· 134 134 __bio_clone_fast(&mp_bh->bio, bio); 135 135 136 136 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; 137 - mp_bh->bio.bi_bdev = multipath->rdev->bdev; 137 + bio_set_dev(&mp_bh->bio, multipath->rdev->bdev); 138 138 mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; 139 139 mp_bh->bio.bi_end_io = multipath_end_request; 140 140 mp_bh->bio.bi_private = mp_bh; ··· 345 345 346 346 if ((mp_bh->path = multipath_map (conf))<0) { 347 347 pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", 348 - bdevname(bio->bi_bdev,b), 348 + bio_devname(bio, b), 349 349 (unsigned long long)bio->bi_iter.bi_sector); 350 350 multipath_end_bh_io(mp_bh, BLK_STS_IOERR); 351 351 } else { 352 352 pr_err("multipath: %s: redirecting sector %llu to another IO path\n", 353 - bdevname(bio->bi_bdev,b), 353 + bio_devname(bio, b), 354 354 (unsigned long long)bio->bi_iter.bi_sector); 355 355 *bio = *(mp_bh->master_bio); 356 356 bio->bi_iter.bi_sector += 357 357 conf->multipaths[mp_bh->path].rdev->data_offset; 358 - bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 358 + bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev); 359 359 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; 360 360 bio->bi_end_io = multipath_end_request; 361 361 bio->bi_private = mp_bh;
+3 -4
drivers/md/raid0.c
··· 588 588 589 589 zone = find_zone(mddev->private, &sector); 590 590 tmp_dev = map_sector(mddev, zone, sector, &sector); 591 - bio->bi_bdev = tmp_dev->bdev; 591 + bio_set_dev(bio, tmp_dev->bdev); 592 592 bio->bi_iter.bi_sector = sector + zone->dev_start + 593 593 tmp_dev->data_offset; 594 594 595 595 if (mddev->gendisk) 596 - trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), 597 - bio, disk_devt(mddev->gendisk), 598 - bio_sector); 596 + trace_block_bio_remap(bio->bi_disk->queue, bio, 597 + disk_devt(mddev->gendisk), bio_sector); 599 598 mddev_check_writesame(mddev, bio); 600 599 mddev_check_write_zeroes(mddev, bio); 601 600 generic_make_request(bio);
+15 -19
drivers/md/raid1.c
··· 786 786 787 787 while (bio) { /* submit pending writes */ 788 788 struct bio *next = bio->bi_next; 789 - struct md_rdev *rdev = (void*)bio->bi_bdev; 789 + struct md_rdev *rdev = (void *)bio->bi_disk; 790 790 bio->bi_next = NULL; 791 - bio->bi_bdev = rdev->bdev; 791 + bio_set_dev(bio, rdev->bdev); 792 792 if (test_bit(Faulty, &rdev->flags)) { 793 793 bio_io_error(bio); 794 794 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 795 - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 795 + !blk_queue_discard(bio->bi_disk->queue))) 796 796 /* Just ignore it */ 797 797 bio_endio(bio); 798 798 else ··· 1273 1273 1274 1274 read_bio->bi_iter.bi_sector = r1_bio->sector + 1275 1275 mirror->rdev->data_offset; 1276 - read_bio->bi_bdev = mirror->rdev->bdev; 1276 + bio_set_dev(read_bio, mirror->rdev->bdev); 1277 1277 read_bio->bi_end_io = raid1_end_read_request; 1278 1278 bio_set_op_attrs(read_bio, op, do_sync); 1279 1279 if (test_bit(FailFast, &mirror->rdev->flags) && ··· 1282 1282 read_bio->bi_private = r1_bio; 1283 1283 1284 1284 if (mddev->gendisk) 1285 - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), 1286 - read_bio, disk_devt(mddev->gendisk), 1287 - r1_bio->sector); 1285 + trace_block_bio_remap(read_bio->bi_disk->queue, read_bio, 1286 + disk_devt(mddev->gendisk), r1_bio->sector); 1288 1287 1289 1288 generic_make_request(read_bio); 1290 1289 } ··· 1495 1496 1496 1497 mbio->bi_iter.bi_sector = (r1_bio->sector + 1497 1498 conf->mirrors[i].rdev->data_offset); 1498 - mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1499 + bio_set_dev(mbio, conf->mirrors[i].rdev->bdev); 1499 1500 mbio->bi_end_io = raid1_end_write_request; 1500 1501 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); 1501 1502 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && ··· 1507 1508 atomic_inc(&r1_bio->remaining); 1508 1509 1509 1510 if (mddev->gendisk) 1510 - trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), 1511 + trace_block_bio_remap(mbio->bi_disk->queue, 1511 1512 mbio, disk_devt(mddev->gendisk), 1512 1513 r1_bio->sector); 1513 1514 /* flush_pending_writes() needs access to the rdev so...*/ 1514 - mbio->bi_bdev = (void*)conf->mirrors[i].rdev; 1515 + mbio->bi_disk = (void *)conf->mirrors[i].rdev; 1515 1516 1516 1517 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); 1517 1518 if (cb) ··· 1989 1990 * Don't fail devices as that won't really help. 1990 1991 */ 1991 1992 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", 1992 - mdname(mddev), 1993 - bdevname(bio->bi_bdev, b), 1993 + mdname(mddev), bio_devname(bio, b), 1994 1994 (unsigned long long)r1_bio->sector); 1995 1995 for (d = 0; d < conf->raid_disks * 2; d++) { 1996 1996 rdev = conf->mirrors[d].rdev; ··· 2080 2082 b->bi_status = status; 2081 2083 b->bi_iter.bi_sector = r1_bio->sector + 2082 2084 conf->mirrors[i].rdev->data_offset; 2083 - b->bi_bdev = conf->mirrors[i].rdev->bdev; 2085 + bio_set_dev(b, conf->mirrors[i].rdev->bdev); 2084 2086 b->bi_end_io = end_sync_read; 2085 2087 rp->raid_bio = r1_bio; 2086 2088 b->bi_private = rp; ··· 2348 2350 2349 2351 bio_trim(wbio, sector - r1_bio->sector, sectors); 2350 2352 wbio->bi_iter.bi_sector += rdev->data_offset; 2351 - wbio->bi_bdev = rdev->bdev; 2353 + bio_set_dev(wbio, rdev->bdev); 2352 2354 2353 2355 if (submit_bio_wait(wbio) < 0) 2354 2356 /* failure! */ ··· 2438 2440 struct mddev *mddev = conf->mddev; 2439 2441 struct bio *bio; 2440 2442 struct md_rdev *rdev; 2441 - dev_t bio_dev; 2442 2443 sector_t bio_sector; 2443 2444 2444 2445 clear_bit(R1BIO_ReadError, &r1_bio->state); ··· 2451 2454 */ 2452 2455 2453 2456 bio = r1_bio->bios[r1_bio->read_disk]; 2454 - bio_dev = bio->bi_bdev->bd_dev; 2455 2457 bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector; 2456 2458 bio_put(bio); 2457 2459 r1_bio->bios[r1_bio->read_disk] = NULL; ··· 2723 2727 if (bio->bi_end_io) { 2724 2728 atomic_inc(&rdev->nr_pending); 2725 2729 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; 2726 - bio->bi_bdev = rdev->bdev; 2730 + bio_set_dev(bio, rdev->bdev); 2727 2731 if (test_bit(FailFast, &rdev->flags)) 2728 2732 bio->bi_opf |= MD_FAILFAST; 2729 2733 } ··· 2849 2853 bio = r1_bio->bios[i]; 2850 2854 if (bio->bi_end_io == end_sync_read) { 2851 2855 read_targets--; 2852 - md_sync_acct(bio->bi_bdev, nr_sectors); 2856 + md_sync_acct_bio(bio, nr_sectors); 2853 2857 if (read_targets == 1) 2854 2858 bio->bi_opf &= ~MD_FAILFAST; 2855 2859 generic_make_request(bio); ··· 2858 2862 } else { 2859 2863 atomic_set(&r1_bio->remaining, 1); 2860 2864 bio = r1_bio->bios[r1_bio->read_disk]; 2861 - md_sync_acct(bio->bi_bdev, nr_sectors); 2865 + md_sync_acct_bio(bio, nr_sectors); 2862 2866 if (read_targets == 1) 2863 2867 bio->bi_opf &= ~MD_FAILFAST; 2864 2868 generic_make_request(bio);
+24 -26
drivers/md/raid10.c
··· 901 901 902 902 while (bio) { /* submit pending writes */ 903 903 struct bio *next = bio->bi_next; 904 - struct md_rdev *rdev = (void*)bio->bi_bdev; 904 + struct md_rdev *rdev = (void*)bio->bi_disk; 905 905 bio->bi_next = NULL; 906 - bio->bi_bdev = rdev->bdev; 906 + bio_set_dev(bio, rdev->bdev); 907 907 if (test_bit(Faulty, &rdev->flags)) { 908 908 bio_io_error(bio); 909 909 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 910 - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 910 + !blk_queue_discard(bio->bi_disk->queue))) 911 911 /* Just ignore it */ 912 912 bio_endio(bio); 913 913 else ··· 1085 1085 1086 1086 while (bio) { /* submit pending writes */ 1087 1087 struct bio *next = bio->bi_next; 1088 - struct md_rdev *rdev = (void*)bio->bi_bdev; 1088 + struct md_rdev *rdev = (void*)bio->bi_disk; 1089 1089 bio->bi_next = NULL; 1090 - bio->bi_bdev = rdev->bdev; 1090 + bio_set_dev(bio, rdev->bdev); 1091 1091 if (test_bit(Faulty, &rdev->flags)) { 1092 1092 bio_io_error(bio); 1093 1093 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1094 - !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1094 + !blk_queue_discard(bio->bi_disk->queue))) 1095 1095 /* Just ignore it */ 1096 1096 bio_endio(bio); 1097 1097 else ··· 1200 1200 1201 1201 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + 1202 1202 choose_data_offset(r10_bio, rdev); 1203 - read_bio->bi_bdev = rdev->bdev; 1203 + bio_set_dev(read_bio, rdev->bdev); 1204 1204 read_bio->bi_end_io = raid10_end_read_request; 1205 1205 bio_set_op_attrs(read_bio, op, do_sync); 1206 1206 if (test_bit(FailFast, &rdev->flags) && ··· 1209 1209 read_bio->bi_private = r10_bio; 1210 1210 1211 1211 if (mddev->gendisk) 1212 - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), 1212 + trace_block_bio_remap(read_bio->bi_disk->queue, 1213 1213 read_bio, disk_devt(mddev->gendisk), 1214 1214 r10_bio->sector); 1215 1215 generic_make_request(read_bio); ··· 1249 1249 1250 1250 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + 1251 1251 choose_data_offset(r10_bio, rdev)); 1252 - mbio->bi_bdev = rdev->bdev; 1252 + bio_set_dev(mbio, rdev->bdev); 1253 1253 mbio->bi_end_io = raid10_end_write_request; 1254 1254 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1255 1255 if (!replacement && test_bit(FailFast, ··· 1259 1259 mbio->bi_private = r10_bio; 1260 1260 1261 1261 if (conf->mddev->gendisk) 1262 - trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), 1262 + trace_block_bio_remap(mbio->bi_disk->queue, 1263 1263 mbio, disk_devt(conf->mddev->gendisk), 1264 1264 r10_bio->sector); 1265 1265 /* flush_pending_writes() needs access to the rdev so...*/ 1266 - mbio->bi_bdev = (void *)rdev; 1266 + mbio->bi_disk = (void *)rdev; 1267 1267 1268 1268 atomic_inc(&r10_bio->remaining); 1269 1269 ··· 2094 2094 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 2095 2095 tbio->bi_opf |= MD_FAILFAST; 2096 2096 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; 2097 - tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2097 + bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); 2098 2098 generic_make_request(tbio); 2099 2099 } 2100 2100 ··· 2552 2552 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); 2553 2553 wbio->bi_iter.bi_sector = wsector + 2554 2554 choose_data_offset(r10_bio, rdev); 2555 - wbio->bi_bdev = rdev->bdev; 2555 + bio_set_dev(wbio, rdev->bdev); 2556 2556 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2557 2557 2558 2558 if (submit_bio_wait(wbio) < 0) ··· 2575 2575 struct bio *bio; 2576 2576 struct r10conf *conf = mddev->private; 2577 2577 struct md_rdev *rdev = r10_bio->devs[slot].rdev; 2578 - dev_t bio_dev; 2579 2578 sector_t bio_last_sector; 2580 2579 2581 2580 /* we got a read error. Maybe the drive is bad. Maybe just ··· 2586 2587 * frozen. 2587 2588 */ 2588 2589 bio = r10_bio->devs[slot].bio; 2589 - bio_dev = bio->bi_bdev->bd_dev; 2590 2590 bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors; 2591 2591 bio_put(bio); 2592 2592 r10_bio->devs[slot].bio = NULL; ··· 2948 2950 2949 2951 /* Again, very different code for resync and recovery. 2950 2952 * Both must result in an r10bio with a list of bios that 2951 - * have bi_end_io, bi_sector, bi_bdev set, 2953 + * have bi_end_io, bi_sector, bi_disk set, 2952 2954 * and bi_private set to the r10bio. 2953 2955 * For recovery, we may actually create several r10bios 2954 2956 * with 2 bios in each, that correspond to the bios in the main one. ··· 3093 3095 from_addr = r10_bio->devs[j].addr; 3094 3096 bio->bi_iter.bi_sector = from_addr + 3095 3097 rdev->data_offset; 3096 - bio->bi_bdev = rdev->bdev; 3098 + bio_set_dev(bio, rdev->bdev); 3097 3099 atomic_inc(&rdev->nr_pending); 3098 3100 /* and we write to 'i' (if not in_sync) */ 3099 3101 ··· 3115 3117 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3116 3118 bio->bi_iter.bi_sector = to_addr 3117 3119 + mrdev->data_offset; 3118 - bio->bi_bdev = mrdev->bdev; 3120 + bio_set_dev(bio, mrdev->bdev); 3119 3121 atomic_inc(&r10_bio->remaining); 3120 3122 } else 3121 3123 r10_bio->devs[1].bio->bi_end_io = NULL; ··· 3141 3143 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3142 3144 bio->bi_iter.bi_sector = to_addr + 3143 3145 mreplace->data_offset; 3144 - bio->bi_bdev = mreplace->bdev; 3146 + bio_set_dev(bio, mreplace->bdev); 3145 3147 atomic_inc(&r10_bio->remaining); 3146 3148 break; 3147 3149 } ··· 3287 3289 if (test_bit(FailFast, &rdev->flags)) 3288 3290 bio->bi_opf |= MD_FAILFAST; 3289 3291 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3290 - bio->bi_bdev = rdev->bdev; 3292 + bio_set_dev(bio, rdev->bdev); 3291 3293 count++; 3292 3294 3293 3295 rdev = rcu_dereference(conf->mirrors[d].replacement); ··· 3309 3311 if (test_bit(FailFast, &rdev->flags)) 3310 3312 bio->bi_opf |= MD_FAILFAST; 3311 3313 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3312 - bio->bi_bdev = rdev->bdev; 3314 + bio_set_dev(bio, rdev->bdev); 3313 3315 count++; 3314 3316 rcu_read_unlock(); 3315 3317 } ··· 3365 3367 r10_bio->sectors = nr_sectors; 3366 3368 3367 3369 if (bio->bi_end_io == end_sync_read) { 3368 - md_sync_acct(bio->bi_bdev, nr_sectors); 3370 + md_sync_acct_bio(bio, nr_sectors); 3369 3371 bio->bi_status = 0; 3370 3372 generic_make_request(bio); 3371 3373 } ··· 4381 4383 4382 4384 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4383 4385 4384 - read_bio->bi_bdev = rdev->bdev; 4386 + bio_set_dev(read_bio, rdev->bdev); 4385 4387 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4386 4388 + rdev->data_offset); 4387 4389 read_bio->bi_private = r10_bio; ··· 4415 4417 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4416 4418 continue; 4417 4419 4418 - b->bi_bdev = rdev2->bdev; 4420 + bio_set_dev(b, rdev2->bdev); 4419 4421 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + 4420 4422 rdev2->new_data_offset; 4421 4423 b->bi_end_io = end_reshape_write; ··· 4447 4449 r10_bio->sectors = nr_sectors; 4448 4450 4449 4451 /* Now submit the read */ 4450 - md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); 4452 + md_sync_acct_bio(read_bio, r10_bio->sectors); 4451 4453 atomic_inc(&r10_bio->remaining); 4452 4454 read_bio->bi_next = NULL; 4453 4455 generic_make_request(read_bio); ··· 4509 4511 } 4510 4512 atomic_inc(&rdev->nr_pending); 4511 4513 rcu_read_unlock(); 4512 - md_sync_acct(b->bi_bdev, r10_bio->sectors); 4514 + md_sync_acct_bio(b, r10_bio->sectors); 4513 4515 atomic_inc(&r10_bio->remaining); 4514 4516 b->bi_next = NULL; 4515 4517 generic_make_request(b);
+3 -3
drivers/md/raid5-cache.c
··· 745 745 struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); 746 746 747 747 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 748 - bio->bi_bdev = log->rdev->bdev; 748 + bio_set_dev(bio, log->rdev->bdev); 749 749 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; 750 750 751 751 return bio; ··· 1313 1313 if (!do_flush) 1314 1314 return; 1315 1315 bio_reset(&log->flush_bio); 1316 - log->flush_bio.bi_bdev = log->rdev->bdev; 1316 + bio_set_dev(&log->flush_bio, log->rdev->bdev); 1317 1317 log->flush_bio.bi_end_io = r5l_log_flush_endio; 1318 1318 log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 1319 1319 submit_bio(&log->flush_bio); ··· 1691 1691 sector_t offset) 1692 1692 { 1693 1693 bio_reset(ctx->ra_bio); 1694 - ctx->ra_bio->bi_bdev = log->rdev->bdev; 1694 + bio_set_dev(ctx->ra_bio, log->rdev->bdev); 1695 1695 bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); 1696 1696 ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; 1697 1697
+3 -3
drivers/md/raid5-ppl.c
··· 415 415 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n", 416 416 __func__, io->seq, bio->bi_iter.bi_size, 417 417 (unsigned long long)bio->bi_iter.bi_sector, 418 - bdevname(bio->bi_bdev, b)); 418 + bio_devname(bio, b)); 419 419 420 420 submit_bio(bio); 421 421 } ··· 453 453 454 454 bio->bi_end_io = ppl_log_endio; 455 455 bio->bi_opf = REQ_OP_WRITE | REQ_FUA; 456 - bio->bi_bdev = log->rdev->bdev; 456 + bio_set_dev(bio, log->rdev->bdev); 457 457 bio->bi_iter.bi_sector = log->rdev->ppl.sector; 458 458 bio_add_page(bio, io->header_page, PAGE_SIZE, 0); 459 459 ··· 468 468 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, 469 469 ppl_conf->bs); 470 470 bio->bi_opf = prev->bi_opf; 471 - bio->bi_bdev = prev->bi_bdev; 471 + bio_copy_dev(bio, prev); 472 472 bio->bi_iter.bi_sector = bio_end_sector(prev); 473 473 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); 474 474
+9 -7
drivers/md/raid5.c
··· 1096 1096 1097 1097 set_bit(STRIPE_IO_STARTED, &sh->state); 1098 1098 1099 - bi->bi_bdev = rdev->bdev; 1099 + bio_set_dev(bi, rdev->bdev); 1100 1100 bio_set_op_attrs(bi, op, op_flags); 1101 1101 bi->bi_end_io = op_is_write(op) 1102 1102 ? raid5_end_write_request ··· 1145 1145 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 1146 1146 1147 1147 if (conf->mddev->gendisk) 1148 - trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 1148 + trace_block_bio_remap(bi->bi_disk->queue, 1149 1149 bi, disk_devt(conf->mddev->gendisk), 1150 1150 sh->dev[i].sector); 1151 1151 if (should_defer && op_is_write(op)) ··· 1160 1160 1161 1161 set_bit(STRIPE_IO_STARTED, &sh->state); 1162 1162 1163 - rbi->bi_bdev = rrdev->bdev; 1163 + bio_set_dev(rbi, rrdev->bdev); 1164 1164 bio_set_op_attrs(rbi, op, op_flags); 1165 1165 BUG_ON(!op_is_write(op)); 1166 1166 rbi->bi_end_io = raid5_end_write_request; ··· 1193 1193 if (op == REQ_OP_DISCARD) 1194 1194 rbi->bi_vcnt = 0; 1195 1195 if (conf->mddev->gendisk) 1196 - trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 1196 + trace_block_bio_remap(rbi->bi_disk->queue, 1197 1197 rbi, disk_devt(conf->mddev->gendisk), 1198 1198 sh->dev[i].sector); 1199 1199 if (should_defer && op_is_write(op)) ··· 5092 5092 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 5093 5093 { 5094 5094 struct r5conf *conf = mddev->private; 5095 - sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); 5095 + sector_t sector = bio->bi_iter.bi_sector; 5096 5096 unsigned int chunk_sectors; 5097 5097 unsigned int bio_sectors = bio_sectors(bio); 5098 + 5099 + WARN_ON_ONCE(bio->bi_partno); 5098 5100 5099 5101 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); 5100 5102 return chunk_sectors >= ··· 5233 5231 atomic_inc(&rdev->nr_pending); 5234 5232 rcu_read_unlock(); 5235 5233 raid_bio->bi_next = (void*)rdev; 5236 - align_bi->bi_bdev = rdev->bdev; 5234 + bio_set_dev(align_bi, rdev->bdev); 5237 5235 bio_clear_flag(align_bi, BIO_SEG_VALID); 5238 5236 5239 5237 if (is_badblock(rdev, align_bi->bi_iter.bi_sector, ··· 5255 5253 spin_unlock_irq(&conf->device_lock); 5256 5254 5257 5255 if (mddev->gendisk) 5258 - trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 5256 + trace_block_bio_remap(align_bi->bi_disk->queue, 5259 5257 align_bi, disk_devt(mddev->gendisk), 5260 5258 raid_bio->bi_iter.bi_sector); 5261 5259 generic_make_request(align_bi);
+5 -4
drivers/nvdimm/nd.h
··· 390 390 void __nd_iostat_start(struct bio *bio, unsigned long *start); 391 391 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) 392 392 { 393 - struct gendisk *disk = bio->bi_bdev->bd_disk; 393 + struct gendisk *disk = bio->bi_disk; 394 394 395 395 if (!blk_queue_io_stat(disk->queue)) 396 396 return false; 397 397 398 398 *start = jiffies; 399 - generic_start_io_acct(bio_data_dir(bio), 399 + generic_start_io_acct(disk->queue, bio_data_dir(bio), 400 400 bio_sectors(bio), &disk->part0); 401 401 return true; 402 402 } 403 403 static inline void nd_iostat_end(struct bio *bio, unsigned long start) 404 404 { 405 - struct gendisk *disk = bio->bi_bdev->bd_disk; 405 + struct gendisk *disk = bio->bi_disk; 406 406 407 - generic_end_io_acct(bio_data_dir(bio), &disk->part0, start); 407 + generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0, 408 + start); 408 409 } 409 410 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, 410 411 unsigned int len)
+2 -9
drivers/nvme/host/core.c
··· 613 613 614 614 if (!disk) 615 615 goto submit; 616 - bio->bi_bdev = bdget_disk(disk, 0); 617 - if (!bio->bi_bdev) { 618 - ret = -ENODEV; 619 - goto out_unmap; 620 - } 616 + bio->bi_disk = disk; 621 617 622 618 if (meta_buffer && meta_len) { 623 619 struct bio_integrity_payload *bip; ··· 664 668 out_free_meta: 665 669 kfree(meta); 666 670 out_unmap: 667 - if (bio) { 668 - if (disk && bio->bi_bdev) 669 - bdput(bio->bi_bdev); 671 + if (bio) 670 672 blk_rq_unmap_user(bio); 671 - } 672 673 out: 673 674 blk_mq_free_request(req); 674 675 return ret;
+1 -3
drivers/nvme/host/fc.c
··· 2168 2168 .complete = nvme_fc_complete_rq, 2169 2169 .init_request = nvme_fc_init_request, 2170 2170 .exit_request = nvme_fc_exit_request, 2171 - .reinit_request = nvme_fc_reinit_request, 2172 2171 .init_hctx = nvme_fc_init_hctx, 2173 2172 .poll = nvme_fc_poll, 2174 2173 .timeout = nvme_fc_timeout, ··· 2268 2269 2269 2270 nvme_fc_init_io_queues(ctrl); 2270 2271 2271 - ret = blk_mq_reinit_tagset(&ctrl->tag_set); 2272 + ret = blk_mq_reinit_tagset(&ctrl->tag_set, nvme_fc_reinit_request); 2272 2273 if (ret) 2273 2274 goto out_free_io_queues; 2274 2275 ··· 2654 2655 .complete = nvme_fc_complete_rq, 2655 2656 .init_request = nvme_fc_init_request, 2656 2657 .exit_request = nvme_fc_exit_request, 2657 - .reinit_request = nvme_fc_reinit_request, 2658 2658 .init_hctx = nvme_fc_init_admin_hctx, 2659 2659 .timeout = nvme_fc_timeout, 2660 2660 };
+2 -13
drivers/nvme/host/lightnvm.c
··· 643 643 vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma); 644 644 } 645 645 646 - if (!disk) 647 - goto submit; 648 - 649 - bio->bi_bdev = bdget_disk(disk, 0); 650 - if (!bio->bi_bdev) { 651 - ret = -ENODEV; 652 - goto err_meta; 653 - } 646 + bio->bi_disk = disk; 654 647 } 655 648 656 - submit: 657 649 blk_execute_rq(q, NULL, rq, 0); 658 650 659 651 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) ··· 665 673 if (meta_buf && meta_len) 666 674 dma_pool_free(dev->dma_pool, metadata, metadata_dma); 667 675 err_map: 668 - if (bio) { 669 - if (disk && bio->bi_bdev) 670 - bdput(bio->bi_bdev); 676 + if (bio) 671 677 blk_rq_unmap_user(bio); 672 - } 673 678 err_ppa: 674 679 if (ppa_buf && ppa_len) 675 680 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
+6 -5
drivers/nvme/host/rdma.c
··· 711 711 if (ctrl->ctrl.queue_count > 1) { 712 712 nvme_rdma_free_io_queues(ctrl); 713 713 714 - ret = blk_mq_reinit_tagset(&ctrl->tag_set); 714 + ret = blk_mq_reinit_tagset(&ctrl->tag_set, 715 + nvme_rdma_reinit_request); 715 716 if (ret) 716 717 goto requeue; 717 718 } 718 719 719 720 nvme_rdma_stop_and_free_queue(&ctrl->queues[0]); 720 721 721 - ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set); 722 + ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set, 723 + nvme_rdma_reinit_request); 722 724 if (ret) 723 725 goto requeue; 724 726 ··· 1523 1521 .complete = nvme_rdma_complete_rq, 1524 1522 .init_request = nvme_rdma_init_request, 1525 1523 .exit_request = nvme_rdma_exit_request, 1526 - .reinit_request = nvme_rdma_reinit_request, 1527 1524 .init_hctx = nvme_rdma_init_hctx, 1528 1525 .poll = nvme_rdma_poll, 1529 1526 .timeout = nvme_rdma_timeout, ··· 1534 1533 .complete = nvme_rdma_complete_rq, 1535 1534 .init_request = nvme_rdma_init_request, 1536 1535 .exit_request = nvme_rdma_exit_request, 1537 - .reinit_request = nvme_rdma_reinit_request, 1538 1536 .init_hctx = nvme_rdma_init_admin_hctx, 1539 1537 .timeout = nvme_rdma_timeout, 1540 1538 }; ··· 1731 1731 } 1732 1732 1733 1733 if (ctrl->ctrl.queue_count > 1) { 1734 - ret = blk_mq_reinit_tagset(&ctrl->tag_set); 1734 + ret = blk_mq_reinit_tagset(&ctrl->tag_set, 1735 + nvme_rdma_reinit_request); 1735 1736 if (ret) 1736 1737 goto del_dead_ctrl; 1737 1738
+3 -3
drivers/nvme/target/io-cmd.c
··· 68 68 69 69 nvmet_inline_bio_init(req); 70 70 bio = &req->inline_bio; 71 - bio->bi_bdev = req->ns->bdev; 71 + bio_set_dev(bio, req->ns->bdev); 72 72 bio->bi_iter.bi_sector = sector; 73 73 bio->bi_private = req; 74 74 bio->bi_end_io = nvmet_bio_done; ··· 80 80 struct bio *prev = bio; 81 81 82 82 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); 83 - bio->bi_bdev = req->ns->bdev; 83 + bio_set_dev(bio, req->ns->bdev); 84 84 bio->bi_iter.bi_sector = sector; 85 85 bio_set_op_attrs(bio, op, op_flags); 86 86 ··· 104 104 nvmet_inline_bio_init(req); 105 105 bio = &req->inline_bio; 106 106 107 - bio->bi_bdev = req->ns->bdev; 107 + bio_set_dev(bio, req->ns->bdev); 108 108 bio->bi_private = req; 109 109 bio->bi_end_io = nvmet_bio_done; 110 110 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+2 -2
drivers/s390/block/dcssblk.c
··· 856 856 blk_queue_split(q, &bio); 857 857 858 858 bytes_done = 0; 859 - dev_info = bio->bi_bdev->bd_disk->private_data; 859 + dev_info = bio->bi_disk->private_data; 860 860 if (dev_info == NULL) 861 861 goto fail; 862 862 if ((bio->bi_iter.bi_sector & 7) != 0 || 863 863 (bio->bi_iter.bi_size & 4095) != 0) 864 864 /* Request is not page-aligned. */ 865 865 goto fail; 866 - if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { 866 + if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) { 867 867 /* Request beyond end of DCSS segment. */ 868 868 goto fail; 869 869 }
+1 -1
drivers/s390/block/xpram.c
··· 183 183 */ 184 184 static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) 185 185 { 186 - xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 186 + xpram_device_t *xdev = bio->bi_disk->private_data; 187 187 struct bio_vec bvec; 188 188 struct bvec_iter iter; 189 189 unsigned int index;
+2 -2
drivers/target/target_core_iblock.c
··· 338 338 return NULL; 339 339 } 340 340 341 - bio->bi_bdev = ib_dev->ibd_bd; 341 + bio_set_dev(bio, ib_dev->ibd_bd); 342 342 bio->bi_private = cmd; 343 343 bio->bi_end_io = &iblock_bio_done; 344 344 bio->bi_iter.bi_sector = lba; ··· 395 395 396 396 bio = bio_alloc(GFP_KERNEL, 0); 397 397 bio->bi_end_io = iblock_end_io_flush; 398 - bio->bi_bdev = ib_dev->ibd_bd; 398 + bio_set_dev(bio, ib_dev->ibd_bd); 399 399 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 400 400 if (!immed) 401 401 bio->bi_private = cmd;
+3 -2
fs/block_dev.c
··· 223 223 } 224 224 225 225 bio_init(&bio, vecs, nr_pages); 226 - bio.bi_bdev = bdev; 226 + bio_set_dev(&bio, bdev); 227 227 bio.bi_iter.bi_sector = pos >> 9; 228 228 bio.bi_write_hint = iocb->ki_hint; 229 229 bio.bi_private = current; ··· 362 362 363 363 blk_start_plug(&plug); 364 364 for (;;) { 365 - bio->bi_bdev = bdev; 365 + bio_set_dev(bio, bdev); 366 366 bio->bi_iter.bi_sector = pos >> 9; 367 367 bio->bi_write_hint = iocb->ki_hint; 368 368 bio->bi_private = dio; ··· 1451 1451 bdev->bd_disk = disk; 1452 1452 bdev->bd_queue = disk->queue; 1453 1453 bdev->bd_contains = bdev; 1454 + bdev->bd_partno = partno; 1454 1455 1455 1456 if (!partno) { 1456 1457 ret = -ENXIO;
+18 -23
fs/btrfs/check-integrity.c
··· 296 296 struct btrfsic_dev_state *ds, 297 297 struct btrfsic_dev_state_hashtable *h); 298 298 static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds); 299 - static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( 300 - struct block_device *bdev, 299 + static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev, 301 300 struct btrfsic_dev_state_hashtable *h); 302 301 static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void); 303 302 static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf); ··· 384 385 int superblock_mirror_num, 385 386 struct btrfsic_dev_state **selected_dev_state, 386 387 struct btrfs_super_block *selected_super); 387 - static struct btrfsic_dev_state *btrfsic_dev_state_lookup( 388 - struct block_device *bdev); 388 + static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev); 389 389 static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 390 390 u64 bytenr, 391 391 struct btrfsic_dev_state *dev_state, ··· 624 626 list_del(&ds->collision_resolving_node); 625 627 } 626 628 627 - static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( 628 - struct block_device *bdev, 629 + static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev, 629 630 struct btrfsic_dev_state_hashtable *h) 630 631 { 631 632 const unsigned int hashval = 632 - (((unsigned int)((uintptr_t)bdev)) & 633 - (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); 633 + dev & (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1); 634 634 struct btrfsic_dev_state *ds; 635 635 636 636 list_for_each_entry(ds, h->table + hashval, collision_resolving_node) { 637 - if (ds->bdev == bdev) 637 + if (ds->bdev->bd_dev == dev) 638 638 return ds; 639 639 } 640 640 ··· 664 668 if (!device->bdev || !device->name) 665 669 continue; 666 670 667 - dev_state = btrfsic_dev_state_lookup(device->bdev); 671 + dev_state = btrfsic_dev_state_lookup(device->bdev->bd_dev); 668 672 BUG_ON(NULL == dev_state); 669 673 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 670 674 ret = btrfsic_process_superblock_dev_mirror( ··· 1552 1556 } 1553 1557 1554 1558 device = multi->stripes[0].dev; 1555 - block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev); 1559 + block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev->bd_dev); 1556 1560 block_ctx_out->dev_bytenr = multi->stripes[0].physical; 1557 1561 block_ctx_out->start = bytenr; 1558 1562 block_ctx_out->len = len; ··· 1635 1639 unsigned int j; 1636 1640 1637 1641 bio = btrfs_io_bio_alloc(num_pages - i); 1638 - bio->bi_bdev = block_ctx->dev->bdev; 1642 + bio_set_dev(bio, block_ctx->dev->bdev); 1639 1643 bio->bi_iter.bi_sector = dev_bytenr >> 9; 1640 1644 bio_set_op_attrs(bio, REQ_OP_READ, 0); 1641 1645 ··· 2650 2654 pr_info("btrfsic: error, kmalloc failed!\n"); 2651 2655 return NULL; 2652 2656 } 2653 - dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev); 2657 + dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev->bd_dev); 2654 2658 if (NULL == dev_state) { 2655 2659 pr_info("btrfsic: error, lookup dev_state failed!\n"); 2656 2660 btrfsic_block_free(block); ··· 2730 2734 } 2731 2735 } 2732 2736 2733 - static struct btrfsic_dev_state *btrfsic_dev_state_lookup( 2734 - struct block_device *bdev) 2737 + static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev) 2735 2738 { 2736 - return btrfsic_dev_state_hashtable_lookup(bdev, 2739 + return btrfsic_dev_state_hashtable_lookup(dev, 2737 2740 &btrfsic_dev_state_hashtable); 2738 2741 } 2739 2742 ··· 2746 2751 mutex_lock(&btrfsic_mutex); 2747 2752 /* since btrfsic_submit_bh() might also be called before 2748 2753 * btrfsic_mount(), this might return NULL */ 2749 - dev_state = btrfsic_dev_state_lookup(bh->b_bdev); 2754 + dev_state = btrfsic_dev_state_lookup(bh->b_bdev->bd_dev); 2750 2755 2751 2756 /* Only called to write the superblock (incl. FLUSH/FUA) */ 2752 2757 if (NULL != dev_state && ··· 2803 2808 mutex_lock(&btrfsic_mutex); 2804 2809 /* since btrfsic_submit_bio() is also called before 2805 2810 * btrfsic_mount(), this might return NULL */ 2806 - dev_state = btrfsic_dev_state_lookup(bio->bi_bdev); 2811 + dev_state = btrfsic_dev_state_lookup(bio_dev(bio)); 2807 2812 if (NULL != dev_state && 2808 2813 (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) { 2809 2814 unsigned int i = 0; ··· 2819 2824 bio_is_patched = 0; 2820 2825 if (dev_state->state->print_mask & 2821 2826 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2822 - pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 2827 + pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_disk=%p)\n", 2823 2828 bio_op(bio), bio->bi_opf, segs, 2824 2829 (unsigned long long)bio->bi_iter.bi_sector, 2825 - dev_bytenr, bio->bi_bdev); 2830 + dev_bytenr, bio->bi_disk); 2826 2831 2827 2832 mapped_datav = kmalloc_array(segs, 2828 2833 sizeof(*mapped_datav), GFP_NOFS); ··· 2851 2856 } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { 2852 2857 if (dev_state->state->print_mask & 2853 2858 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2854 - pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", 2855 - bio_op(bio), bio->bi_opf, bio->bi_bdev); 2859 + pr_info("submit_bio(rw=%d,0x%x FLUSH, disk=%p)\n", 2860 + bio_op(bio), bio->bi_opf, bio->bi_disk); 2856 2861 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2857 2862 if ((dev_state->state->print_mask & 2858 2863 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | ··· 2993 2998 continue; 2994 2999 2995 3000 ds = btrfsic_dev_state_hashtable_lookup( 2996 - device->bdev, 3001 + device->bdev->bd_dev, 2997 3002 &btrfsic_dev_state_hashtable); 2998 3003 if (NULL != ds) { 2999 3004 state = ds->state;
+1 -1
fs/btrfs/disk-io.c
··· 3499 3499 3500 3500 bio_reset(bio); 3501 3501 bio->bi_end_io = btrfs_end_empty_barrier; 3502 - bio->bi_bdev = device->bdev; 3502 + bio_set_dev(bio, device->bdev); 3503 3503 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; 3504 3504 init_completion(&device->flush_wait); 3505 3505 bio->bi_private = &device->flush_wait;
+3 -3
fs/btrfs/extent_io.c
··· 2033 2033 bio_put(bio); 2034 2034 return -EIO; 2035 2035 } 2036 - bio->bi_bdev = dev->bdev; 2036 + bio_set_dev(bio, dev->bdev); 2037 2037 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; 2038 2038 bio_add_page(bio, page, length, pg_offset); 2039 2039 ··· 2335 2335 bio = btrfs_io_bio_alloc(1); 2336 2336 bio->bi_end_io = endio_func; 2337 2337 bio->bi_iter.bi_sector = failrec->logical >> 9; 2338 - bio->bi_bdev = fs_info->fs_devices->latest_bdev; 2338 + bio_set_dev(bio, fs_info->fs_devices->latest_bdev); 2339 2339 bio->bi_iter.bi_size = 0; 2340 2340 bio->bi_private = data; 2341 2341 ··· 2675 2675 struct bio *bio; 2676 2676 2677 2677 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset); 2678 - bio->bi_bdev = bdev; 2678 + bio_set_dev(bio, bdev); 2679 2679 bio->bi_iter.bi_sector = first_byte >> 9; 2680 2680 btrfs_io_bio_init(btrfs_io_bio(bio)); 2681 2681 return bio;
+5 -3
fs/btrfs/raid56.c
··· 1090 1090 */ 1091 1091 if (last_end == disk_start && stripe->dev->bdev && 1092 1092 !last->bi_status && 1093 - last->bi_bdev == stripe->dev->bdev) { 1093 + last->bi_disk == stripe->dev->bdev->bd_disk && 1094 + last->bi_partno == stripe->dev->bdev->bd_partno) { 1094 1095 ret = bio_add_page(last, page, PAGE_SIZE, 0); 1095 1096 if (ret == PAGE_SIZE) 1096 1097 return 0; ··· 1101 1100 /* put a new bio on the list */ 1102 1101 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1); 1103 1102 bio->bi_iter.bi_size = 0; 1104 - bio->bi_bdev = stripe->dev->bdev; 1103 + bio_set_dev(bio, stripe->dev->bdev); 1105 1104 bio->bi_iter.bi_sector = disk_start >> 9; 1106 1105 1107 1106 bio_add_page(bio, page, PAGE_SIZE, 0); ··· 1348 1347 stripe_start = stripe->physical; 1349 1348 if (physical >= stripe_start && 1350 1349 physical < stripe_start + rbio->stripe_len && 1351 - bio->bi_bdev == stripe->dev->bdev) { 1350 + bio->bi_disk == stripe->dev->bdev->bd_disk && 1351 + bio->bi_partno == stripe->dev->bdev->bd_partno) { 1352 1352 return i; 1353 1353 } 1354 1354 }
+6 -6
fs/btrfs/scrub.c
··· 1738 1738 1739 1739 WARN_ON(!page->page); 1740 1740 bio = btrfs_io_bio_alloc(1); 1741 - bio->bi_bdev = page->dev->bdev; 1741 + bio_set_dev(bio, page->dev->bdev); 1742 1742 1743 1743 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1744 1744 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) { ··· 1826 1826 } 1827 1827 1828 1828 bio = btrfs_io_bio_alloc(1); 1829 - bio->bi_bdev = page_bad->dev->bdev; 1829 + bio_set_dev(bio, page_bad->dev->bdev); 1830 1830 bio->bi_iter.bi_sector = page_bad->physical >> 9; 1831 1831 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1832 1832 ··· 1921 1921 1922 1922 bio->bi_private = sbio; 1923 1923 bio->bi_end_io = scrub_wr_bio_end_io; 1924 - bio->bi_bdev = sbio->dev->bdev; 1924 + bio_set_dev(bio, sbio->dev->bdev); 1925 1925 bio->bi_iter.bi_sector = sbio->physical >> 9; 1926 1926 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1927 1927 sbio->status = 0; ··· 1964 1964 1965 1965 sbio = sctx->wr_curr_bio; 1966 1966 sctx->wr_curr_bio = NULL; 1967 - WARN_ON(!sbio->bio->bi_bdev); 1967 + WARN_ON(!sbio->bio->bi_disk); 1968 1968 scrub_pending_bio_inc(sctx); 1969 1969 /* process all writes in a single worker thread. Then the block layer 1970 1970 * orders the requests before sending them to the driver which ··· 2321 2321 2322 2322 bio->bi_private = sbio; 2323 2323 bio->bi_end_io = scrub_bio_end_io; 2324 - bio->bi_bdev = sbio->dev->bdev; 2324 + bio_set_dev(bio, sbio->dev->bdev); 2325 2325 bio->bi_iter.bi_sector = sbio->physical >> 9; 2326 2326 bio_set_op_attrs(bio, REQ_OP_READ, 0); 2327 2327 sbio->status = 0; ··· 4627 4627 bio = btrfs_io_bio_alloc(1); 4628 4628 bio->bi_iter.bi_size = 0; 4629 4629 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; 4630 - bio->bi_bdev = dev->bdev; 4630 + bio_set_dev(bio, dev->bdev); 4631 4631 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; 4632 4632 ret = bio_add_page(bio, page, PAGE_SIZE, 0); 4633 4633 if (ret != PAGE_SIZE) {
+1 -1
fs/btrfs/volumes.c
··· 6188 6188 rcu_read_unlock(); 6189 6189 } 6190 6190 #endif 6191 - bio->bi_bdev = dev->bdev; 6191 + bio_set_dev(bio, dev->bdev); 6192 6192 6193 6193 btrfs_bio_counter_inc_noblocked(fs_info); 6194 6194
+2 -2
fs/buffer.c
··· 3056 3056 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; 3057 3057 unsigned truncated_bytes; 3058 3058 3059 - maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 3059 + maxsector = get_capacity(bio->bi_disk); 3060 3060 if (!maxsector) 3061 3061 return; 3062 3062 ··· 3115 3115 } 3116 3116 3117 3117 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 3118 - bio->bi_bdev = bh->b_bdev; 3118 + bio_set_dev(bio, bh->b_bdev); 3119 3119 bio->bi_write_hint = write_hint; 3120 3120 3121 3121 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+1 -1
fs/crypto/bio.c
··· 115 115 err = -ENOMEM; 116 116 goto errout; 117 117 } 118 - bio->bi_bdev = inode->i_sb->s_bdev; 118 + bio_set_dev(bio, inode->i_sb->s_bdev); 119 119 bio->bi_iter.bi_sector = 120 120 pblk << (inode->i_sb->s_blocksize_bits - 9); 121 121 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+4 -4
fs/direct-io.c
··· 111 111 int op; 112 112 int op_flags; 113 113 blk_qc_t bio_cookie; 114 - struct block_device *bio_bdev; 114 + struct gendisk *bio_disk; 115 115 struct inode *inode; 116 116 loff_t i_size; /* i_size when submitted */ 117 117 dio_iodone_t *end_io; /* IO completion function */ ··· 377 377 */ 378 378 bio = bio_alloc(GFP_KERNEL, nr_vecs); 379 379 380 - bio->bi_bdev = bdev; 380 + bio_set_dev(bio, bdev); 381 381 bio->bi_iter.bi_sector = first_sector; 382 382 bio_set_op_attrs(bio, dio->op, dio->op_flags); 383 383 if (dio->is_async) ··· 412 412 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) 413 413 bio_set_pages_dirty(bio); 414 414 415 - dio->bio_bdev = bio->bi_bdev; 415 + dio->bio_disk = bio->bi_disk; 416 416 417 417 if (sdio->submit_io) { 418 418 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); ··· 458 458 dio->waiter = current; 459 459 spin_unlock_irqrestore(&dio->bio_lock, flags); 460 460 if (!(dio->iocb->ki_flags & IOCB_HIPRI) || 461 - !blk_mq_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie)) 461 + !blk_mq_poll(dio->bio_disk->queue, dio->bio_cookie)) 462 462 io_schedule(); 463 463 /* wake up sets us TASK_RUNNING */ 464 464 spin_lock_irqsave(&dio->bio_lock, flags);
+1 -1
fs/exofs/ore.c
··· 869 869 goto out; 870 870 } 871 871 872 - bio->bi_bdev = NULL; 872 + bio->bi_disk = NULL; 873 873 bio->bi_next = NULL; 874 874 per_dev->offset = master_dev->offset; 875 875 per_dev->length = master_dev->length;
+2 -2
fs/ext4/page-io.c
··· 300 300 char b[BDEVNAME_SIZE]; 301 301 302 302 if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n", 303 - bdevname(bio->bi_bdev, b), 303 + bio_devname(bio, b), 304 304 (long long) bio->bi_iter.bi_sector, 305 305 (unsigned) bio_sectors(bio), 306 306 bio->bi_status)) { ··· 375 375 return -ENOMEM; 376 376 wbc_init_bio(io->io_wbc, bio); 377 377 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 378 - bio->bi_bdev = bh->b_bdev; 378 + bio_set_dev(bio, bh->b_bdev); 379 379 bio->bi_end_io = ext4_end_bio; 380 380 bio->bi_private = ext4_get_io_end(io->io_end); 381 381 io->io_bio = bio;
+1 -1
fs/ext4/readpage.c
··· 254 254 fscrypt_release_ctx(ctx); 255 255 goto set_error_page; 256 256 } 257 - bio->bi_bdev = bdev; 257 + bio_set_dev(bio, bdev); 258 258 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); 259 259 bio->bi_end_io = mpage_end_io; 260 260 bio->bi_private = ctx;
+3 -2
fs/f2fs/data.c
··· 142 142 } 143 143 } 144 144 if (bio) { 145 - bio->bi_bdev = bdev; 145 + bio_set_dev(bio, bdev); 146 146 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); 147 147 } 148 148 return bdev; ··· 161 161 static bool __same_bdev(struct f2fs_sb_info *sbi, 162 162 block_t blk_addr, struct bio *bio) 163 163 { 164 - return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev; 164 + struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL); 165 + return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; 165 166 } 166 167 167 168 /*
+1 -1
fs/f2fs/segment.c
··· 447 447 int ret; 448 448 449 449 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; 450 - bio->bi_bdev = bdev; 450 + bio_set_dev(bio, bdev); 451 451 ret = submit_bio_wait(bio); 452 452 bio_put(bio); 453 453
+1 -1
fs/gfs2/lops.c
··· 268 268 269 269 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 270 270 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); 271 - bio->bi_bdev = sb->s_bdev; 271 + bio_set_dev(bio, sb->s_bdev); 272 272 bio->bi_end_io = gfs2_end_log_write; 273 273 bio->bi_private = sdp; 274 274
+1 -1
fs/gfs2/meta_io.c
··· 221 221 222 222 bio = bio_alloc(GFP_NOIO, num); 223 223 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 224 - bio->bi_bdev = bh->b_bdev; 224 + bio_set_dev(bio, bh->b_bdev); 225 225 while (num > 0) { 226 226 bh = *bhs; 227 227 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
+1 -1
fs/gfs2/ops_fstype.c
··· 242 242 243 243 bio = bio_alloc(GFP_NOFS, 1); 244 244 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); 245 - bio->bi_bdev = sb->s_bdev; 245 + bio_set_dev(bio, sb->s_bdev); 246 246 bio_add_page(bio, page, PAGE_SIZE, 0); 247 247 248 248 bio->bi_end_io = end_bio_io_page;
+1 -1
fs/hfsplus/wrapper.c
··· 65 65 66 66 bio = bio_alloc(GFP_NOIO, 1); 67 67 bio->bi_iter.bi_sector = sector; 68 - bio->bi_bdev = sb->s_bdev; 68 + bio_set_dev(bio, sb->s_bdev); 69 69 bio_set_op_attrs(bio, op, op_flags); 70 70 71 71 if (op != WRITE && data)
+2 -2
fs/iomap.c
··· 805 805 struct bio *bio; 806 806 807 807 bio = bio_alloc(GFP_KERNEL, 1); 808 - bio->bi_bdev = iomap->bdev; 808 + bio_set_dev(bio, iomap->bdev); 809 809 bio->bi_iter.bi_sector = 810 810 iomap->blkno + ((pos - iomap->offset) >> 9); 811 811 bio->bi_private = dio; ··· 884 884 return 0; 885 885 886 886 bio = bio_alloc(GFP_KERNEL, nr_pages); 887 - bio->bi_bdev = iomap->bdev; 887 + bio_set_dev(bio, iomap->bdev); 888 888 bio->bi_iter.bi_sector = 889 889 iomap->blkno + ((pos - iomap->offset) >> 9); 890 890 bio->bi_write_hint = dio->iocb->ki_hint;
+2 -2
fs/jfs/jfs_logmgr.c
··· 1995 1995 bio = bio_alloc(GFP_NOFS, 1); 1996 1996 1997 1997 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); 1998 - bio->bi_bdev = log->bdev; 1998 + bio_set_dev(bio, log->bdev); 1999 1999 2000 2000 bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); 2001 2001 BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); ··· 2139 2139 2140 2140 bio = bio_alloc(GFP_NOFS, 1); 2141 2141 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); 2142 - bio->bi_bdev = log->bdev; 2142 + bio_set_dev(bio, log->bdev); 2143 2143 2144 2144 bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); 2145 2145 BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
+2 -2
fs/jfs/jfs_metapage.c
··· 430 430 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage); 431 431 432 432 bio = bio_alloc(GFP_NOFS, 1); 433 - bio->bi_bdev = inode->i_sb->s_bdev; 433 + bio_set_dev(bio, inode->i_sb->s_bdev); 434 434 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); 435 435 bio->bi_end_io = metapage_write_end_io; 436 436 bio->bi_private = page; ··· 510 510 submit_bio(bio); 511 511 512 512 bio = bio_alloc(GFP_NOFS, 1); 513 - bio->bi_bdev = inode->i_sb->s_bdev; 513 + bio_set_dev(bio, inode->i_sb->s_bdev); 514 514 bio->bi_iter.bi_sector = 515 515 pblock << (inode->i_blkbits - 9); 516 516 bio->bi_end_io = metapage_read_end_io;
+88 -23
fs/kernfs/dir.c
··· 21 21 DEFINE_MUTEX(kernfs_mutex); 22 22 static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */ 23 23 static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */ 24 + static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */ 24 25 25 26 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) 26 27 ··· 508 507 struct kernfs_node *parent; 509 508 struct kernfs_root *root; 510 509 510 + /* 511 + * kernfs_node is freed with ->count 0, kernfs_find_and_get_node_by_ino 512 + * depends on this to filter reused stale node 513 + */ 511 514 if (!kn || !atomic_dec_and_test(&kn->count)) 512 515 return; 513 516 root = kernfs_root(kn); ··· 538 533 simple_xattrs_free(&kn->iattr->xattrs); 539 534 } 540 535 kfree(kn->iattr); 541 - ida_simple_remove(&root->ino_ida, kn->ino); 536 + spin_lock(&kernfs_idr_lock); 537 + idr_remove(&root->ino_idr, kn->id.ino); 538 + spin_unlock(&kernfs_idr_lock); 542 539 kmem_cache_free(kernfs_node_cache, kn); 543 540 544 541 kn = parent; ··· 549 542 goto repeat; 550 543 } else { 551 544 /* just released the root kn, free @root too */ 552 - ida_destroy(&root->ino_ida); 545 + idr_destroy(&root->ino_idr); 553 546 kfree(root); 554 547 } 555 548 } ··· 566 559 if (d_really_is_negative(dentry)) 567 560 goto out_bad_unlocked; 568 561 569 - kn = dentry->d_fsdata; 562 + kn = kernfs_dentry_node(dentry); 570 563 mutex_lock(&kernfs_mutex); 571 564 572 565 /* The kernfs node has been deactivated */ ··· 574 567 goto out_bad; 575 568 576 569 /* The kernfs node has been moved? */ 577 - if (dentry->d_parent->d_fsdata != kn->parent) 570 + if (kernfs_dentry_node(dentry->d_parent) != kn->parent) 578 571 goto out_bad; 579 572 580 573 /* The kernfs node has been renamed */ ··· 594 587 return 0; 595 588 } 596 589 597 - static void kernfs_dop_release(struct dentry *dentry) 598 - { 599 - kernfs_put(dentry->d_fsdata); 600 - } 601 - 602 590 const struct dentry_operations kernfs_dops = { 603 591 .d_revalidate = kernfs_dop_revalidate, 604 - .d_release = kernfs_dop_release, 605 592 }; 606 593 607 594 /** ··· 611 610 */ 612 611 struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) 613 612 { 614 - if (dentry->d_sb->s_op == &kernfs_sops) 615 - return dentry->d_fsdata; 613 + if (dentry->d_sb->s_op == &kernfs_sops && 614 + !d_really_is_negative(dentry)) 615 + return kernfs_dentry_node(dentry); 616 616 return NULL; 617 617 } 618 618 ··· 622 620 unsigned flags) 623 621 { 624 622 struct kernfs_node *kn; 623 + u32 gen; 624 + int cursor; 625 625 int ret; 626 626 627 627 name = kstrdup_const(name, GFP_KERNEL); ··· 634 630 if (!kn) 635 631 goto err_out1; 636 632 637 - ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL); 633 + idr_preload(GFP_KERNEL); 634 + spin_lock(&kernfs_idr_lock); 635 + cursor = idr_get_cursor(&root->ino_idr); 636 + ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC); 637 + if (ret >= 0 && ret < cursor) 638 + root->next_generation++; 639 + gen = root->next_generation; 640 + spin_unlock(&kernfs_idr_lock); 641 + idr_preload_end(); 638 642 if (ret < 0) 639 643 goto err_out2; 640 - kn->ino = ret; 644 + kn->id.ino = ret; 645 + kn->id.generation = gen; 641 646 647 + /* 648 + * set ino first. This barrier is paired with atomic_inc_not_zero in 649 + * kernfs_find_and_get_node_by_ino 650 + */ 651 + smp_mb__before_atomic(); 642 652 atomic_set(&kn->count, 1); 643 653 atomic_set(&kn->active, KN_DEACTIVATED_BIAS); 644 654 RB_CLEAR_NODE(&kn->rb); ··· 682 664 kn->parent = parent; 683 665 } 684 666 return kn; 667 + } 668 + 669 + /* 670 + * kernfs_find_and_get_node_by_ino - get kernfs_node from inode number 671 + * @root: the kernfs root 672 + * @ino: inode number 673 + * 674 + * RETURNS: 675 + * NULL on failure. Return a kernfs node with reference counter incremented 676 + */ 677 + struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root, 678 + unsigned int ino) 679 + { 680 + struct kernfs_node *kn; 681 + 682 + rcu_read_lock(); 683 + kn = idr_find(&root->ino_idr, ino); 684 + if (!kn) 685 + goto out; 686 + 687 + /* 688 + * Since kernfs_node is freed in RCU, it's possible an old node for ino 689 + * is freed, but reused before RCU grace period. But a freed node (see 690 + * kernfs_put) or an incompletedly initialized node (see 691 + * __kernfs_new_node) should have 'count' 0. We can use this fact to 692 + * filter out such node. 693 + */ 694 + if (!atomic_inc_not_zero(&kn->count)) { 695 + kn = NULL; 696 + goto out; 697 + } 698 + 699 + /* 700 + * The node could be a new node or a reused node. If it's a new node, 701 + * we are ok. If it's reused because of RCU (because of 702 + * SLAB_TYPESAFE_BY_RCU), the __kernfs_new_node always sets its 'ino' 703 + * before 'count'. So if 'count' is uptodate, 'ino' should be uptodate, 704 + * hence we can use 'ino' to filter stale node. 705 + */ 706 + if (kn->id.ino != ino) 707 + goto out; 708 + rcu_read_unlock(); 709 + 710 + return kn; 711 + out: 712 + rcu_read_unlock(); 713 + kernfs_put(kn); 714 + return NULL; 685 715 } 686 716 687 717 /** ··· 941 875 if (!root) 942 876 return ERR_PTR(-ENOMEM); 943 877 944 - ida_init(&root->ino_ida); 878 + idr_init(&root->ino_idr); 945 879 INIT_LIST_HEAD(&root->supers); 880 + root->next_generation = 1; 946 881 947 882 kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO, 948 883 KERNFS_DIR); 949 884 if (!kn) { 950 - ida_destroy(&root->ino_ida); 885 + idr_destroy(&root->ino_idr); 951 886 kfree(root); 952 887 return ERR_PTR(-ENOMEM); 953 888 } ··· 1051 984 unsigned int flags) 1052 985 { 1053 986 struct dentry *ret; 1054 - struct kernfs_node *parent = dentry->d_parent->d_fsdata; 987 + struct kernfs_node *parent = dir->i_private; 1055 988 struct kernfs_node *kn; 1056 989 struct inode *inode; 1057 990 const void *ns = NULL; ··· 1068 1001 ret = NULL; 1069 1002 goto out_unlock; 1070 1003 } 1071 - kernfs_get(kn); 1072 - dentry->d_fsdata = kn; 1073 1004 1074 1005 /* attach dentry and inode */ 1075 1006 inode = kernfs_get_inode(dir->i_sb, kn); ··· 1104 1039 1105 1040 static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry) 1106 1041 { 1107 - struct kernfs_node *kn = dentry->d_fsdata; 1042 + struct kernfs_node *kn = kernfs_dentry_node(dentry); 1108 1043 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; 1109 1044 int ret; 1110 1045 ··· 1124 1059 struct inode *new_dir, struct dentry *new_dentry, 1125 1060 unsigned int flags) 1126 1061 { 1127 - struct kernfs_node *kn = old_dentry->d_fsdata; 1062 + struct kernfs_node *kn = kernfs_dentry_node(old_dentry); 1128 1063 struct kernfs_node *new_parent = new_dir->i_private; 1129 1064 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; 1130 1065 int ret; ··· 1637 1572 static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx) 1638 1573 { 1639 1574 struct dentry *dentry = file->f_path.dentry; 1640 - struct kernfs_node *parent = dentry->d_fsdata; 1575 + struct kernfs_node *parent = kernfs_dentry_node(dentry); 1641 1576 struct kernfs_node *pos = file->private_data; 1642 1577 const void *ns = NULL; 1643 1578 ··· 1654 1589 const char *name = pos->name; 1655 1590 unsigned int type = dt_type(pos); 1656 1591 int len = strlen(name); 1657 - ino_t ino = pos->ino; 1592 + ino_t ino = pos->id.ino; 1658 1593 1659 1594 ctx->pos = pos->hash; 1660 1595 file->private_data = pos;
+5 -5
fs/kernfs/file.c
··· 616 616 617 617 static int kernfs_fop_open(struct inode *inode, struct file *file) 618 618 { 619 - struct kernfs_node *kn = file->f_path.dentry->d_fsdata; 619 + struct kernfs_node *kn = inode->i_private; 620 620 struct kernfs_root *root = kernfs_root(kn); 621 621 const struct kernfs_ops *ops; 622 622 struct kernfs_open_file *of; ··· 768 768 769 769 static int kernfs_fop_release(struct inode *inode, struct file *filp) 770 770 { 771 - struct kernfs_node *kn = filp->f_path.dentry->d_fsdata; 771 + struct kernfs_node *kn = inode->i_private; 772 772 struct kernfs_open_file *of = kernfs_of(filp); 773 773 774 774 if (kn->flags & KERNFS_HAS_RELEASE) { ··· 835 835 static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait) 836 836 { 837 837 struct kernfs_open_file *of = kernfs_of(filp); 838 - struct kernfs_node *kn = filp->f_path.dentry->d_fsdata; 838 + struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry); 839 839 struct kernfs_open_node *on = kn->attr.open; 840 840 841 841 if (!kernfs_get_active(kn)) ··· 895 895 * have the matching @file available. Look up the inodes 896 896 * and generate the events manually. 897 897 */ 898 - inode = ilookup(info->sb, kn->ino); 898 + inode = ilookup(info->sb, kn->id.ino); 899 899 if (!inode) 900 900 continue; 901 901 ··· 903 903 if (parent) { 904 904 struct inode *p_inode; 905 905 906 - p_inode = ilookup(info->sb, parent->ino); 906 + p_inode = ilookup(info->sb, parent->id.ino); 907 907 if (p_inode) { 908 908 fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD, 909 909 inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
+5 -4
fs/kernfs/inode.c
··· 112 112 int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr) 113 113 { 114 114 struct inode *inode = d_inode(dentry); 115 - struct kernfs_node *kn = dentry->d_fsdata; 115 + struct kernfs_node *kn = inode->i_private; 116 116 int error; 117 117 118 118 if (!kn) ··· 154 154 155 155 ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size) 156 156 { 157 - struct kernfs_node *kn = dentry->d_fsdata; 157 + struct kernfs_node *kn = kernfs_dentry_node(dentry); 158 158 struct kernfs_iattrs *attrs; 159 159 160 160 attrs = kernfs_iattrs(kn); ··· 203 203 int kernfs_iop_getattr(const struct path *path, struct kstat *stat, 204 204 u32 request_mask, unsigned int query_flags) 205 205 { 206 - struct kernfs_node *kn = path->dentry->d_fsdata; 207 206 struct inode *inode = d_inode(path->dentry); 207 + struct kernfs_node *kn = inode->i_private; 208 208 209 209 mutex_lock(&kernfs_mutex); 210 210 kernfs_refresh_inode(kn, inode); ··· 220 220 inode->i_private = kn; 221 221 inode->i_mapping->a_ops = &kernfs_aops; 222 222 inode->i_op = &kernfs_iops; 223 + inode->i_generation = kn->id.generation; 223 224 224 225 set_default_inode_attr(inode, kn->mode); 225 226 kernfs_refresh_inode(kn, inode); ··· 266 265 { 267 266 struct inode *inode; 268 267 269 - inode = iget_locked(sb, kn->ino); 268 + inode = iget_locked(sb, kn->id.ino); 270 269 if (inode && (inode->i_state & I_NEW)) 271 270 kernfs_init_inode(kn, inode); 272 271
+9
fs/kernfs/kernfs-internal.h
··· 70 70 }; 71 71 #define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info)) 72 72 73 + static inline struct kernfs_node *kernfs_dentry_node(struct dentry *dentry) 74 + { 75 + if (d_really_is_negative(dentry)) 76 + return NULL; 77 + return d_inode(dentry)->i_private; 78 + } 79 + 73 80 extern const struct super_operations kernfs_sops; 74 81 extern struct kmem_cache *kernfs_node_cache; 75 82 ··· 105 98 struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, 106 99 const char *name, umode_t mode, 107 100 unsigned flags); 101 + struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root, 102 + unsigned int ino); 108 103 109 104 /* 110 105 * file.c
+87 -7
fs/kernfs/mount.c
··· 16 16 #include <linux/pagemap.h> 17 17 #include <linux/namei.h> 18 18 #include <linux/seq_file.h> 19 + #include <linux/exportfs.h> 19 20 20 21 #include "kernfs-internal.h" 21 22 ··· 34 33 35 34 static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry) 36 35 { 37 - struct kernfs_root *root = kernfs_root(dentry->d_fsdata); 36 + struct kernfs_root *root = kernfs_root(kernfs_dentry_node(dentry)); 38 37 struct kernfs_syscall_ops *scops = root->syscall_ops; 39 38 40 39 if (scops && scops->show_options) ··· 44 43 45 44 static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry) 46 45 { 47 - struct kernfs_node *node = dentry->d_fsdata; 46 + struct kernfs_node *node = kernfs_dentry_node(dentry); 48 47 struct kernfs_root *root = kernfs_root(node); 49 48 struct kernfs_syscall_ops *scops = root->syscall_ops; 50 49 ··· 63 62 .remount_fs = kernfs_sop_remount_fs, 64 63 .show_options = kernfs_sop_show_options, 65 64 .show_path = kernfs_sop_show_path, 65 + }; 66 + 67 + /* 68 + * Similar to kernfs_fh_get_inode, this one gets kernfs node from inode 69 + * number and generation 70 + */ 71 + struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root, 72 + const union kernfs_node_id *id) 73 + { 74 + struct kernfs_node *kn; 75 + 76 + kn = kernfs_find_and_get_node_by_ino(root, id->ino); 77 + if (!kn) 78 + return NULL; 79 + if (kn->id.generation != id->generation) { 80 + kernfs_put(kn); 81 + return NULL; 82 + } 83 + return kn; 84 + } 85 + 86 + static struct inode *kernfs_fh_get_inode(struct super_block *sb, 87 + u64 ino, u32 generation) 88 + { 89 + struct kernfs_super_info *info = kernfs_info(sb); 90 + struct inode *inode; 91 + struct kernfs_node *kn; 92 + 93 + if (ino == 0) 94 + return ERR_PTR(-ESTALE); 95 + 96 + kn = kernfs_find_and_get_node_by_ino(info->root, ino); 97 + if (!kn) 98 + return ERR_PTR(-ESTALE); 99 + inode = kernfs_get_inode(sb, kn); 100 + kernfs_put(kn); 101 + if (!inode) 102 + return ERR_PTR(-ESTALE); 103 + 104 + if (generation && inode->i_generation != generation) { 105 + /* we didn't find the right inode.. */ 106 + iput(inode); 107 + return ERR_PTR(-ESTALE); 108 + } 109 + return inode; 110 + } 111 + 112 + static struct dentry *kernfs_fh_to_dentry(struct super_block *sb, struct fid *fid, 113 + int fh_len, int fh_type) 114 + { 115 + return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 116 + kernfs_fh_get_inode); 117 + } 118 + 119 + static struct dentry *kernfs_fh_to_parent(struct super_block *sb, struct fid *fid, 120 + int fh_len, int fh_type) 121 + { 122 + return generic_fh_to_parent(sb, fid, fh_len, fh_type, 123 + kernfs_fh_get_inode); 124 + } 125 + 126 + static struct dentry *kernfs_get_parent_dentry(struct dentry *child) 127 + { 128 + struct kernfs_node *kn = kernfs_dentry_node(child); 129 + 130 + return d_obtain_alias(kernfs_get_inode(child->d_sb, kn->parent)); 131 + } 132 + 133 + static const struct export_operations kernfs_export_ops = { 134 + .fh_to_dentry = kernfs_fh_to_dentry, 135 + .fh_to_parent = kernfs_fh_to_parent, 136 + .get_parent = kernfs_get_parent_dentry, 66 137 }; 67 138 68 139 /** ··· 232 159 sb->s_magic = magic; 233 160 sb->s_op = &kernfs_sops; 234 161 sb->s_xattr = kernfs_xattr_handlers; 162 + if (info->root->flags & KERNFS_ROOT_SUPPORT_EXPORTOP) 163 + sb->s_export_op = &kernfs_export_ops; 235 164 sb->s_time_gran = 1; 236 165 237 166 /* get root inode, initialize and unlock it */ ··· 251 176 pr_debug("%s: could not get root dentry!\n", __func__); 252 177 return -ENOMEM; 253 178 } 254 - kernfs_get(info->root->kn); 255 - root->d_fsdata = info->root->kn; 256 179 sb->s_root = root; 257 180 sb->s_d_op = &kernfs_dops; 258 181 return 0; ··· 356 283 void kernfs_kill_sb(struct super_block *sb) 357 284 { 358 285 struct kernfs_super_info *info = kernfs_info(sb); 359 - struct kernfs_node *root_kn = sb->s_root->d_fsdata; 360 286 361 287 mutex_lock(&kernfs_mutex); 362 288 list_del(&info->node); ··· 367 295 */ 368 296 kill_anon_super(sb); 369 297 kfree(info); 370 - kernfs_put(root_kn); 371 298 } 372 299 373 300 /** ··· 401 330 402 331 void __init kernfs_init(void) 403 332 { 333 + 334 + /* 335 + * the slab is freed in RCU context, so kernfs_find_and_get_node_by_ino 336 + * can access the slab lock free. This could introduce stale nodes, 337 + * please see how kernfs_find_and_get_node_by_ino filters out stale 338 + * nodes. 339 + */ 404 340 kernfs_node_cache = kmem_cache_create("kernfs_node_cache", 405 341 sizeof(struct kernfs_node), 406 - 0, SLAB_PANIC, NULL); 342 + 0, 343 + SLAB_PANIC | SLAB_TYPESAFE_BY_RCU, 344 + NULL); 407 345 }
+3 -3
fs/kernfs/symlink.c
··· 98 98 return 0; 99 99 } 100 100 101 - static int kernfs_getlink(struct dentry *dentry, char *path) 101 + static int kernfs_getlink(struct inode *inode, char *path) 102 102 { 103 - struct kernfs_node *kn = dentry->d_fsdata; 103 + struct kernfs_node *kn = inode->i_private; 104 104 struct kernfs_node *parent = kn->parent; 105 105 struct kernfs_node *target = kn->symlink.target_kn; 106 106 int error; ··· 124 124 body = kzalloc(PAGE_SIZE, GFP_KERNEL); 125 125 if (!body) 126 126 return ERR_PTR(-ENOMEM); 127 - error = kernfs_getlink(dentry, body); 127 + error = kernfs_getlink(inode, body); 128 128 if (unlikely(error < 0)) { 129 129 kfree(body); 130 130 return ERR_PTR(error);
+1 -1
fs/mpage.c
··· 83 83 } 84 84 85 85 if (bio) { 86 - bio->bi_bdev = bdev; 86 + bio_set_dev(bio, bdev); 87 87 bio->bi_iter.bi_sector = first_sector; 88 88 } 89 89 return bio;
+1 -1
fs/nfs/blocklayout/blocklayout.c
··· 130 130 131 131 if (bio) { 132 132 bio->bi_iter.bi_sector = disk_sector; 133 - bio->bi_bdev = bdev; 133 + bio_set_dev(bio, bdev); 134 134 bio->bi_end_io = end_io; 135 135 bio->bi_private = par; 136 136 }
+1 -1
fs/nilfs2/segbuf.c
··· 400 400 bio = bio_alloc(GFP_NOIO, nr_vecs); 401 401 } 402 402 if (likely(bio)) { 403 - bio->bi_bdev = nilfs->ns_bdev; 403 + bio_set_dev(bio, nilfs->ns_bdev); 404 404 bio->bi_iter.bi_sector = 405 405 start << (nilfs->ns_blocksize_bits - 9); 406 406 }
+1 -1
fs/ocfs2/cluster/heartbeat.c
··· 553 553 554 554 /* Must put everything in 512 byte sectors for the bio... */ 555 555 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); 556 - bio->bi_bdev = reg->hr_bdev; 556 + bio_set_dev(bio, reg->hr_bdev); 557 557 bio->bi_private = wc; 558 558 bio->bi_end_io = o2hb_bio_end_io; 559 559 bio_set_op_attrs(bio, op, op_flags);
+1 -1
fs/xfs/xfs_aops.c
··· 540 540 struct buffer_head *bh) 541 541 { 542 542 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 543 - bio->bi_bdev = bh->b_bdev; 543 + bio_set_dev(bio, bh->b_bdev); 544 544 } 545 545 546 546 static struct xfs_ioend *
+1 -1
fs/xfs/xfs_buf.c
··· 1281 1281 nr_pages = min(total_nr_pages, BIO_MAX_PAGES); 1282 1282 1283 1283 bio = bio_alloc(GFP_NOIO, nr_pages); 1284 - bio->bi_bdev = bp->b_target->bt_bdev; 1284 + bio_set_dev(bio, bp->b_target->bt_bdev); 1285 1285 bio->bi_iter.bi_sector = sector; 1286 1286 bio->bi_end_io = xfs_buf_bio_end_io; 1287 1287 bio->bi_private = bp;
+23 -4
include/linux/bio.h
··· 471 471 extern void bio_set_pages_dirty(struct bio *bio); 472 472 extern void bio_check_pages_dirty(struct bio *bio); 473 473 474 - void generic_start_io_acct(int rw, unsigned long sectors, 475 - struct hd_struct *part); 476 - void generic_end_io_acct(int rw, struct hd_struct *part, 477 - unsigned long start_time); 474 + void generic_start_io_acct(struct request_queue *q, int rw, 475 + unsigned long sectors, struct hd_struct *part); 476 + void generic_end_io_acct(struct request_queue *q, int rw, 477 + struct hd_struct *part, 478 + unsigned long start_time); 478 479 479 480 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 480 481 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" ··· 501 500 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); 502 501 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); 503 502 extern unsigned int bvec_nr_vecs(unsigned short idx); 503 + 504 + #define bio_set_dev(bio, bdev) \ 505 + do { \ 506 + (bio)->bi_disk = (bdev)->bd_disk; \ 507 + (bio)->bi_partno = (bdev)->bd_partno; \ 508 + } while (0) 509 + 510 + #define bio_copy_dev(dst, src) \ 511 + do { \ 512 + (dst)->bi_disk = (src)->bi_disk; \ 513 + (dst)->bi_partno = (src)->bi_partno; \ 514 + } while (0) 515 + 516 + #define bio_dev(bio) \ 517 + disk_devt((bio)->bi_disk) 518 + 519 + #define bio_devname(bio, buf) \ 520 + __bdevname(bio_dev(bio), (buf)) 504 521 505 522 #ifdef CONFIG_BLK_CGROUP 506 523 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
+3
include/linux/blk-cgroup.h
··· 691 691 rcu_read_lock(); 692 692 blkcg = bio_blkcg(bio); 693 693 694 + /* associate blkcg if bio hasn't attached one */ 695 + bio_associate_blkcg(bio, &blkcg->css); 696 + 694 697 blkg = blkg_lookup(blkcg, q); 695 698 if (unlikely(!blkg)) { 696 699 spin_lock_irq(q->queue_lock);
+2 -3
include/linux/blk-mq.h
··· 97 97 unsigned int, unsigned int); 98 98 typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, 99 99 unsigned int); 100 - typedef int (reinit_request_fn)(void *, struct request *); 101 100 102 101 typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 103 102 bool); ··· 142 143 */ 143 144 init_request_fn *init_request; 144 145 exit_request_fn *exit_request; 145 - reinit_request_fn *reinit_request; 146 146 /* Called from inside blk_get_request() */ 147 147 void (*initialize_rq_fn)(struct request *rq); 148 148 ··· 259 261 void blk_mq_freeze_queue_wait(struct request_queue *q); 260 262 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, 261 263 unsigned long timeout); 262 - int blk_mq_reinit_tagset(struct blk_mq_tag_set *set); 264 + int blk_mq_reinit_tagset(struct blk_mq_tag_set *set, 265 + int (reinit_request)(void *, struct request *)); 263 266 264 267 int blk_mq_map_queues(struct blk_mq_tag_set *set); 265 268 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
+2 -1
include/linux/blk_types.h
··· 48 48 */ 49 49 struct bio { 50 50 struct bio *bi_next; /* request queue link */ 51 - struct block_device *bi_bdev; 51 + struct gendisk *bi_disk; 52 + u8 bi_partno; 52 53 blk_status_t bi_status; 53 54 unsigned int bi_opf; /* bottom bits req flags, 54 55 * top bits REQ_OP. Use
+29 -31
include/linux/blkdev.h
··· 600 600 u64 write_hints[BLK_MAX_WRITE_HINTS]; 601 601 }; 602 602 603 - #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 604 - #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 605 - #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 606 - #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 607 - #define QUEUE_FLAG_DYING 5 /* queue being torn down */ 608 - #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 609 - #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 610 - #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 611 - #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 612 - #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ 613 - #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ 614 - #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ 603 + #define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */ 604 + #define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ 605 + #define QUEUE_FLAG_DYING 2 /* queue being torn down */ 606 + #define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */ 607 + #define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */ 608 + #define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ 609 + #define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ 610 + #define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ 611 + #define QUEUE_FLAG_STACKABLE 8 /* supports request stacking */ 612 + #define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ 615 613 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 616 - #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ 617 - #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 618 - #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 619 - #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 620 - #define QUEUE_FLAG_SECERASE 17 /* supports secure erase */ 621 - #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 622 - #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 623 - #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 624 - #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 625 - #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ 626 - #define QUEUE_FLAG_WC 23 /* Write back caching */ 627 - #define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ 628 - #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 629 - #define QUEUE_FLAG_DAX 26 /* device supports DAX */ 630 - #define QUEUE_FLAG_STATS 27 /* track rq completion times */ 631 - #define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */ 632 - #define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */ 633 - #define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */ 634 - #define QUEUE_FLAG_QUIESCED 31 /* queue has been quiesced */ 614 + #define QUEUE_FLAG_IO_STAT 10 /* do IO stats */ 615 + #define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */ 616 + #define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */ 617 + #define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */ 618 + #define QUEUE_FLAG_SECERASE 14 /* supports secure erase */ 619 + #define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */ 620 + #define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */ 621 + #define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */ 622 + #define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/ 623 + #define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */ 624 + #define QUEUE_FLAG_WC 20 /* Write back caching */ 625 + #define QUEUE_FLAG_FUA 21 /* device supports FUA writes */ 626 + #define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */ 627 + #define QUEUE_FLAG_DAX 23 /* device supports DAX */ 628 + #define QUEUE_FLAG_STATS 24 /* track rq completion times */ 629 + #define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ 630 + #define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ 631 + #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ 632 + #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ 635 633 636 634 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 637 635 (1 << QUEUE_FLAG_STACKABLE) | \
+9 -4
include/linux/blktrace_api.h
··· 28 28 atomic_t dropped; 29 29 }; 30 30 31 + struct blkcg; 32 + 31 33 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 32 34 extern void blk_trace_shutdown(struct request_queue *); 33 - extern __printf(2, 3) 34 - void __trace_note_message(struct blk_trace *, const char *fmt, ...); 35 + extern __printf(3, 4) 36 + void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...); 35 37 36 38 /** 37 39 * blk_add_trace_msg - Add a (simple) message to the blktrace stream ··· 48 46 * NOTE: Can not use 'static inline' due to presence of var args... 49 47 * 50 48 **/ 51 - #define blk_add_trace_msg(q, fmt, ...) \ 49 + #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ 52 50 do { \ 53 51 struct blk_trace *bt = (q)->blk_trace; \ 54 52 if (unlikely(bt)) \ 55 - __trace_note_message(bt, fmt, ##__VA_ARGS__); \ 53 + __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\ 56 54 } while (0) 55 + #define blk_add_trace_msg(q, fmt, ...) \ 56 + blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) 57 57 #define BLK_TN_MAX_MSG 128 58 58 59 59 static inline bool blk_trace_note_message_enabled(struct request_queue *q) ··· 86 82 # define blk_trace_startstop(q, start) (-ENOTTY) 87 83 # define blk_trace_remove(q) (-ENOTTY) 88 84 # define blk_add_trace_msg(q, fmt, ...) do { } while (0) 85 + # define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0) 89 86 # define blk_trace_remove_sysfs(dev) do { } while (0) 90 87 # define blk_trace_note_message_enabled(q) (false) 91 88 static inline int blk_trace_init_sysfs(struct device *dev)
+15 -1
include/linux/cgroup.h
··· 578 578 /* returns ino associated with a cgroup */ 579 579 static inline ino_t cgroup_ino(struct cgroup *cgrp) 580 580 { 581 - return cgrp->kn->ino; 581 + return cgrp->kn->id.ino; 582 582 } 583 583 584 584 /* cft/css accessors for cftype->write() operation */ ··· 644 644 current->no_cgroup_migration = 0; 645 645 } 646 646 647 + static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) 648 + { 649 + return &cgrp->kn->id; 650 + } 651 + 652 + void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, 653 + char *buf, size_t buflen); 647 654 #else /* !CONFIG_CGROUPS */ 648 655 649 656 struct cgroup_subsys_state; ··· 673 666 static inline int cgroup_init(void) { return 0; } 674 667 static inline void cgroup_init_kthreadd(void) {} 675 668 static inline void cgroup_kthread_ready(void) {} 669 + static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) 670 + { 671 + return NULL; 672 + } 676 673 677 674 static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 678 675 struct cgroup *ancestor) 679 676 { 680 677 return true; 681 678 } 679 + 680 + static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, 681 + char *buf, size_t buflen) {} 682 682 #endif /* !CONFIG_CGROUPS */ 683 683 684 684 /*
+1 -1
include/linux/drbd.h
··· 51 51 #endif 52 52 53 53 extern const char *drbd_buildtag(void); 54 - #define REL_VERSION "8.4.7" 54 + #define REL_VERSION "8.4.10" 55 55 #define API_VERSION 1 56 56 #define PRO_VERSION_MIN 86 57 57 #define PRO_VERSION_MAX 101
+2 -1
include/linux/drbd_genl.h
··· 132 132 __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF) 133 133 __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF) 134 134 __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF) 135 - __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED) 135 + __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF) 136 + __flg_field_def(26, 0 /* OPTIONAL */, disable_write_same, DRBD_DISABLE_WRITE_SAME_DEF) 136 137 ) 137 138 138 139 GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,
+7 -1
include/linux/drbd_limits.h
··· 209 209 #define DRBD_MD_FLUSHES_DEF 1 210 210 #define DRBD_TCP_CORK_DEF 1 211 211 #define DRBD_AL_UPDATES_DEF 1 212 + 212 213 /* We used to ignore the discard_zeroes_data setting. 213 214 * To not change established (and expected) behaviour, 214 215 * by default assume that, for discard_zeroes_data=0, 215 216 * we can make that an effective discard_zeroes_data=1, 216 217 * if we only explicitly zero-out unaligned partial chunks. */ 217 - #define DRBD_DISCARD_ZEROES_IF_ALIGNED 1 218 + #define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1 219 + 220 + /* Some backends pretend to support WRITE SAME, 221 + * but fail such requests when they are actually submitted. 222 + * This is to tell DRBD to not even try. */ 223 + #define DRBD_DISABLE_WRITE_SAME_DEF 0 218 224 219 225 #define DRBD_ALLOW_TWO_PRIMARIES_DEF 0 220 226 #define DRBD_ALWAYS_ASBP_DEF 0
+1
include/linux/fs.h
··· 429 429 #endif 430 430 struct block_device * bd_contains; 431 431 unsigned bd_block_size; 432 + u8 bd_partno; 432 433 struct hd_struct * bd_part; 433 434 /* number of times partitions within this device have been opened. */ 434 435 unsigned bd_part_count;
+7 -19
include/linux/genhd.h
··· 362 362 #define part_stat_sub(cpu, gendiskp, field, subnd) \ 363 363 part_stat_add(cpu, gendiskp, field, -subnd) 364 364 365 - static inline void part_inc_in_flight(struct hd_struct *part, int rw) 366 - { 367 - atomic_inc(&part->in_flight[rw]); 368 - if (part->partno) 369 - atomic_inc(&part_to_disk(part)->part0.in_flight[rw]); 370 - } 371 - 372 - static inline void part_dec_in_flight(struct hd_struct *part, int rw) 373 - { 374 - atomic_dec(&part->in_flight[rw]); 375 - if (part->partno) 376 - atomic_dec(&part_to_disk(part)->part0.in_flight[rw]); 377 - } 378 - 379 - static inline int part_in_flight(struct hd_struct *part) 380 - { 381 - return atomic_read(&part->in_flight[0]) + atomic_read(&part->in_flight[1]); 382 - } 365 + void part_in_flight(struct request_queue *q, struct hd_struct *part, 366 + unsigned int inflight[2]); 367 + void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, 368 + int rw); 369 + void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, 370 + int rw); 383 371 384 372 static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk) 385 373 { ··· 383 395 } 384 396 385 397 /* block/blk-core.c */ 386 - extern void part_round_stats(int cpu, struct hd_struct *part); 398 + extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part); 387 399 388 400 /* block/genhd.c */ 389 401 extern void device_add_disk(struct device *parent, struct gendisk *disk);
+26 -2
include/linux/kernfs.h
··· 69 69 * following flag enables that behavior. 70 70 */ 71 71 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002, 72 + 73 + /* 74 + * The filesystem supports exportfs operation, so userspace can use 75 + * fhandle to access nodes of the fs. 76 + */ 77 + KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004, 72 78 }; 73 79 74 80 /* type-specific structures for kernfs_node union members */ ··· 99 93 struct kernfs_open_node *open; 100 94 loff_t size; 101 95 struct kernfs_node *notify_next; /* for kernfs_notify() */ 96 + }; 97 + 98 + /* represent a kernfs node */ 99 + union kernfs_node_id { 100 + struct { 101 + /* 102 + * blktrace will export this struct as a simplified 'struct 103 + * fid' (which is a big data struction), so userspace can use 104 + * it to find kernfs node. The layout must match the first two 105 + * fields of 'struct fid' exactly. 106 + */ 107 + u32 ino; 108 + u32 generation; 109 + }; 110 + u64 id; 102 111 }; 103 112 104 113 /* ··· 152 131 153 132 void *priv; 154 133 134 + union kernfs_node_id id; 155 135 unsigned short flags; 156 136 umode_t mode; 157 - unsigned int ino; 158 137 struct kernfs_iattrs *iattr; 159 138 }; 160 139 ··· 184 163 unsigned int flags; /* KERNFS_ROOT_* flags */ 185 164 186 165 /* private fields, do not use outside kernfs proper */ 187 - struct ida ino_ida; 166 + struct idr ino_idr; 167 + u32 next_generation; 188 168 struct kernfs_syscall_ops *syscall_ops; 189 169 190 170 /* list of kernfs_super_info of this root, protected by kernfs_mutex */ ··· 358 336 359 337 void kernfs_init(void); 360 338 339 + struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root, 340 + const union kernfs_node_id *id); 361 341 #else /* CONFIG_KERNFS */ 362 342 363 343 static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
+3 -3
include/trace/events/bcache.h
··· 21 21 ), 22 22 23 23 TP_fast_assign( 24 - __entry->dev = bio->bi_bdev->bd_dev; 24 + __entry->dev = bio_dev(bio); 25 25 __entry->orig_major = d->disk->major; 26 26 __entry->orig_minor = d->disk->first_minor; 27 27 __entry->sector = bio->bi_iter.bi_sector; ··· 98 98 ), 99 99 100 100 TP_fast_assign( 101 - __entry->dev = bio->bi_bdev->bd_dev; 101 + __entry->dev = bio_dev(bio); 102 102 __entry->sector = bio->bi_iter.bi_sector; 103 103 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 104 104 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 133 133 ), 134 134 135 135 TP_fast_assign( 136 - __entry->dev = bio->bi_bdev->bd_dev; 136 + __entry->dev = bio_dev(bio); 137 137 __entry->sector = bio->bi_iter.bi_sector; 138 138 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 139 139 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+8 -8
include/trace/events/block.h
··· 236 236 ), 237 237 238 238 TP_fast_assign( 239 - __entry->dev = bio->bi_bdev ? 240 - bio->bi_bdev->bd_dev : 0; 239 + __entry->dev = bio_dev(bio); 241 240 __entry->sector = bio->bi_iter.bi_sector; 242 241 __entry->nr_sector = bio_sectors(bio); 243 242 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 273 274 ), 274 275 275 276 TP_fast_assign( 276 - __entry->dev = bio->bi_bdev->bd_dev; 277 + __entry->dev = bio_dev(bio); 277 278 __entry->sector = bio->bi_iter.bi_sector; 278 279 __entry->nr_sector = bio_sectors(bio); 279 280 __entry->error = error; ··· 301 302 ), 302 303 303 304 TP_fast_assign( 304 - __entry->dev = bio->bi_bdev->bd_dev; 305 + __entry->dev = bio_dev(bio); 305 306 __entry->sector = bio->bi_iter.bi_sector; 306 307 __entry->nr_sector = bio_sectors(bio); 307 308 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 368 369 ), 369 370 370 371 TP_fast_assign( 371 - __entry->dev = bio->bi_bdev->bd_dev; 372 + __entry->dev = bio_dev(bio); 372 373 __entry->sector = bio->bi_iter.bi_sector; 373 374 __entry->nr_sector = bio_sectors(bio); 374 375 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 396 397 ), 397 398 398 399 TP_fast_assign( 399 - __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 400 + __entry->dev = bio ? bio_dev(bio) : 0; 401 + __entry->dev = bio_dev(bio); 400 402 __entry->sector = bio ? bio->bi_iter.bi_sector : 0; 401 403 __entry->nr_sector = bio ? bio_sectors(bio) : 0; 402 404 blk_fill_rwbs(__entry->rwbs, ··· 532 532 ), 533 533 534 534 TP_fast_assign( 535 - __entry->dev = bio->bi_bdev->bd_dev; 535 + __entry->dev = bio_dev(bio); 536 536 __entry->sector = bio->bi_iter.bi_sector; 537 537 __entry->new_sector = new_sector; 538 538 blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ··· 573 573 ), 574 574 575 575 TP_fast_assign( 576 - __entry->dev = bio->bi_bdev->bd_dev; 576 + __entry->dev = bio_dev(bio); 577 577 __entry->sector = bio->bi_iter.bi_sector; 578 578 __entry->nr_sector = bio_sectors(bio); 579 579 __entry->old_dev = dev;
+1 -1
include/trace/events/f2fs.h
··· 829 829 830 830 TP_fast_assign( 831 831 __entry->dev = sb->s_dev; 832 - __entry->target = bio->bi_bdev->bd_dev; 832 + __entry->target = bio_dev(bio); 833 833 __entry->op = bio_op(bio); 834 834 __entry->op_flags = bio->bi_opf; 835 835 __entry->type = type;
+1 -1
include/trace/events/writeback.h
··· 136 136 137 137 static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) 138 138 { 139 - return wb->memcg_css->cgroup->kn->ino; 139 + return wb->memcg_css->cgroup->kn->id.ino; 140 140 } 141 141 142 142 static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
+3
include/uapi/linux/blktrace_api.h
··· 52 52 __BLK_TA_REMAP, /* bio was remapped */ 53 53 __BLK_TA_ABORT, /* request aborted */ 54 54 __BLK_TA_DRV_DATA, /* driver-specific binary data */ 55 + __BLK_TA_CGROUP = 1 << 8, /* from a cgroup*/ 55 56 }; 56 57 57 58 /* ··· 62 61 __BLK_TN_PROCESS = 0, /* establish pid/name mapping */ 63 62 __BLK_TN_TIMESTAMP, /* include system clock */ 64 63 __BLK_TN_MESSAGE, /* Character string message */ 64 + __BLK_TN_CGROUP = __BLK_TA_CGROUP, /* from a cgroup */ 65 65 }; 66 66 67 67 ··· 109 107 __u32 cpu; /* on what cpu did it happen */ 110 108 __u16 error; /* completion error */ 111 109 __u16 pdu_len; /* length of data after this trace */ 110 + /* cgroup id will be stored here if exists */ 112 111 }; 113 112 114 113 /*
+14 -1
kernel/cgroup/cgroup.c
··· 1879 1879 &cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops; 1880 1880 1881 1881 root->kf_root = kernfs_create_root(kf_sops, 1882 - KERNFS_ROOT_CREATE_DEACTIVATED, 1882 + KERNFS_ROOT_CREATE_DEACTIVATED | 1883 + KERNFS_ROOT_SUPPORT_EXPORTOP, 1883 1884 root_cgrp); 1884 1885 if (IS_ERR(root->kf_root)) { 1885 1886 ret = PTR_ERR(root->kf_root); ··· 5256 5255 return 0; 5257 5256 } 5258 5257 core_initcall(cgroup_wq_init); 5258 + 5259 + void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, 5260 + char *buf, size_t buflen) 5261 + { 5262 + struct kernfs_node *kn; 5263 + 5264 + kn = kernfs_get_node_by_id(cgrp_dfl_root.kf_root, id); 5265 + if (!kn) 5266 + return; 5267 + kernfs_path(kn, buf, buflen); 5268 + kernfs_put(kn); 5269 + } 5259 5270 5260 5271 /* 5261 5272 * proc_cgroup_show()
+2 -3
kernel/power/swap.c
··· 242 242 243 243 if (bio->bi_status) { 244 244 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", 245 - imajor(bio->bi_bdev->bd_inode), 246 - iminor(bio->bi_bdev->bd_inode), 245 + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 247 246 (unsigned long long)bio->bi_iter.bi_sector); 248 247 } 249 248 ··· 269 270 270 271 bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1); 271 272 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); 272 - bio->bi_bdev = hib_resume_bdev; 273 + bio_set_dev(bio, hib_resume_bdev); 273 274 bio_set_op_attrs(bio, op, op_flags); 274 275 275 276 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+181 -76
kernel/trace/blktrace.c
··· 27 27 #include <linux/time.h> 28 28 #include <linux/uaccess.h> 29 29 #include <linux/list.h> 30 + #include <linux/blk-cgroup.h> 30 31 31 32 #include "../../block/blk.h" 32 33 ··· 47 46 48 47 /* Select an alternative, minimalistic output than the original one */ 49 48 #define TRACE_BLK_OPT_CLASSIC 0x1 49 + #define TRACE_BLK_OPT_CGROUP 0x2 50 + #define TRACE_BLK_OPT_CGNAME 0x4 50 51 51 52 static struct tracer_opt blk_tracer_opts[] = { 52 53 /* Default disable the minimalistic output */ 53 54 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, 55 + #ifdef CONFIG_BLK_CGROUP 56 + { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) }, 57 + { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) }, 58 + #endif 54 59 { } 55 60 }; 56 61 ··· 75 68 * Send out a notify message. 76 69 */ 77 70 static void trace_note(struct blk_trace *bt, pid_t pid, int action, 78 - const void *data, size_t len) 71 + const void *data, size_t len, 72 + union kernfs_node_id *cgid) 79 73 { 80 74 struct blk_io_trace *t; 81 75 struct ring_buffer_event *event = NULL; ··· 84 76 int pc = 0; 85 77 int cpu = smp_processor_id(); 86 78 bool blk_tracer = blk_tracer_enabled; 79 + ssize_t cgid_len = cgid ? sizeof(*cgid) : 0; 87 80 88 81 if (blk_tracer) { 89 82 buffer = blk_tr->trace_buffer.buffer; 90 83 pc = preempt_count(); 91 84 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 92 - sizeof(*t) + len, 85 + sizeof(*t) + len + cgid_len, 93 86 0, pc); 94 87 if (!event) 95 88 return; ··· 101 92 if (!bt->rchan) 102 93 return; 103 94 104 - t = relay_reserve(bt->rchan, sizeof(*t) + len); 95 + t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); 105 96 if (t) { 106 97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 107 98 t->time = ktime_to_ns(ktime_get()); 108 99 record_it: 109 100 t->device = bt->dev; 110 - t->action = action; 101 + t->action = action | (cgid ? __BLK_TN_CGROUP : 0); 111 102 t->pid = pid; 112 103 t->cpu = cpu; 113 - t->pdu_len = len; 114 - memcpy((void *) t + sizeof(*t), data, len); 104 + t->pdu_len = len + cgid_len; 105 + if (cgid) 106 + memcpy((void *)t + sizeof(*t), cgid, cgid_len); 107 + memcpy((void *) t + sizeof(*t) + cgid_len, data, len); 115 108 116 109 if (blk_tracer) 117 110 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); ··· 133 122 spin_lock_irqsave(&running_trace_lock, flags); 134 123 list_for_each_entry(bt, &running_trace_list, running_list) { 135 124 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, 136 - sizeof(tsk->comm)); 125 + sizeof(tsk->comm), NULL); 137 126 } 138 127 spin_unlock_irqrestore(&running_trace_lock, flags); 139 128 } ··· 150 139 words[1] = now.tv_nsec; 151 140 152 141 local_irq_save(flags); 153 - trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words)); 142 + trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL); 154 143 local_irq_restore(flags); 155 144 } 156 145 157 - void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) 146 + void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg, 147 + const char *fmt, ...) 158 148 { 159 149 int n; 160 150 va_list args; ··· 179 167 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); 180 168 va_end(args); 181 169 182 - trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); 170 + if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 171 + blkcg = NULL; 172 + #ifdef CONFIG_BLK_CGROUP 173 + trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, 174 + blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL); 175 + #else 176 + trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL); 177 + #endif 183 178 local_irq_restore(flags); 184 179 } 185 180 EXPORT_SYMBOL_GPL(__trace_note_message); ··· 223 204 */ 224 205 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, 225 206 int op, int op_flags, u32 what, int error, int pdu_len, 226 - void *pdu_data) 207 + void *pdu_data, union kernfs_node_id *cgid) 227 208 { 228 209 struct task_struct *tsk = current; 229 210 struct ring_buffer_event *event = NULL; ··· 234 215 pid_t pid; 235 216 int cpu, pc = 0; 236 217 bool blk_tracer = blk_tracer_enabled; 218 + ssize_t cgid_len = cgid ? sizeof(*cgid) : 0; 237 219 238 220 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer)) 239 221 return; ··· 249 229 what |= BLK_TC_ACT(BLK_TC_DISCARD); 250 230 if (op == REQ_OP_FLUSH) 251 231 what |= BLK_TC_ACT(BLK_TC_FLUSH); 232 + if (cgid) 233 + what |= __BLK_TA_CGROUP; 252 234 253 235 pid = tsk->pid; 254 236 if (act_log_check(bt, what, sector, pid)) ··· 263 241 buffer = blk_tr->trace_buffer.buffer; 264 242 pc = preempt_count(); 265 243 event = trace_buffer_lock_reserve(buffer, TRACE_BLK, 266 - sizeof(*t) + pdu_len, 244 + sizeof(*t) + pdu_len + cgid_len, 267 245 0, pc); 268 246 if (!event) 269 247 return; ··· 280 258 * from coming in and stepping on our toes. 281 259 */ 282 260 local_irq_save(flags); 283 - t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); 261 + t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len); 284 262 if (t) { 285 263 sequence = per_cpu_ptr(bt->sequence, cpu); 286 264 ··· 302 280 t->action = what; 303 281 t->device = bt->dev; 304 282 t->error = error; 305 - t->pdu_len = pdu_len; 283 + t->pdu_len = pdu_len + cgid_len; 306 284 285 + if (cgid_len) 286 + memcpy((void *)t + sizeof(*t), cgid, cgid_len); 307 287 if (pdu_len) 308 - memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); 288 + memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len); 309 289 310 290 if (blk_tracer) { 311 291 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); ··· 383 359 return PTR_ERR(msg); 384 360 385 361 bt = filp->private_data; 386 - __trace_note_message(bt, "%s", msg); 362 + __trace_note_message(bt, NULL, "%s", msg); 387 363 kfree(msg); 388 364 389 365 return count; ··· 708 684 } 709 685 } 710 686 687 + #ifdef CONFIG_BLK_CGROUP 688 + static union kernfs_node_id * 689 + blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 690 + { 691 + struct blk_trace *bt = q->blk_trace; 692 + 693 + if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP)) 694 + return NULL; 695 + 696 + if (!bio->bi_css) 697 + return NULL; 698 + return cgroup_get_kernfs_id(bio->bi_css->cgroup); 699 + } 700 + #else 701 + static union kernfs_node_id * 702 + blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) 703 + { 704 + return NULL; 705 + } 706 + #endif 707 + 708 + static union kernfs_node_id * 709 + blk_trace_request_get_cgid(struct request_queue *q, struct request *rq) 710 + { 711 + if (!rq->bio) 712 + return NULL; 713 + /* Use the first bio */ 714 + return blk_trace_bio_get_cgid(q, rq->bio); 715 + } 716 + 711 717 /* 712 718 * blktrace probes 713 719 */ ··· 748 694 * @error: return status to log 749 695 * @nr_bytes: number of completed bytes 750 696 * @what: the action 697 + * @cgid: the cgroup info 751 698 * 752 699 * Description: 753 700 * Records an action against a request. Will log the bio offset + size. 754 701 * 755 702 **/ 756 703 static void blk_add_trace_rq(struct request *rq, int error, 757 - unsigned int nr_bytes, u32 what) 704 + unsigned int nr_bytes, u32 what, 705 + union kernfs_node_id *cgid) 758 706 { 759 707 struct blk_trace *bt = rq->q->blk_trace; 760 708 ··· 769 713 what |= BLK_TC_ACT(BLK_TC_FS); 770 714 771 715 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), 772 - rq->cmd_flags, what, error, 0, NULL); 716 + rq->cmd_flags, what, error, 0, NULL, cgid); 773 717 } 774 718 775 719 static void blk_add_trace_rq_insert(void *ignore, 776 720 struct request_queue *q, struct request *rq) 777 721 { 778 - blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT); 722 + blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, 723 + blk_trace_request_get_cgid(q, rq)); 779 724 } 780 725 781 726 static void blk_add_trace_rq_issue(void *ignore, 782 727 struct request_queue *q, struct request *rq) 783 728 { 784 - blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE); 729 + blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE, 730 + blk_trace_request_get_cgid(q, rq)); 785 731 } 786 732 787 733 static void blk_add_trace_rq_requeue(void *ignore, 788 734 struct request_queue *q, 789 735 struct request *rq) 790 736 { 791 - blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE); 737 + blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE, 738 + blk_trace_request_get_cgid(q, rq)); 792 739 } 793 740 794 741 static void blk_add_trace_rq_complete(void *ignore, struct request *rq, 795 742 int error, unsigned int nr_bytes) 796 743 { 797 - blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE); 744 + blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE, 745 + blk_trace_request_get_cgid(rq->q, rq)); 798 746 } 799 747 800 748 /** ··· 813 753 * 814 754 **/ 815 755 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, 816 - u32 what, int error) 756 + u32 what, int error, union kernfs_node_id *cgid) 817 757 { 818 758 struct blk_trace *bt = q->blk_trace; 819 759 ··· 821 761 return; 822 762 823 763 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 824 - bio_op(bio), bio->bi_opf, what, error, 0, NULL); 764 + bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid); 825 765 } 826 766 827 767 static void blk_add_trace_bio_bounce(void *ignore, 828 768 struct request_queue *q, struct bio *bio) 829 769 { 830 - blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); 770 + blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0, 771 + blk_trace_bio_get_cgid(q, bio)); 831 772 } 832 773 833 774 static void blk_add_trace_bio_complete(void *ignore, 834 775 struct request_queue *q, struct bio *bio, 835 776 int error) 836 777 { 837 - blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); 778 + blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error, 779 + blk_trace_bio_get_cgid(q, bio)); 838 780 } 839 781 840 782 static void blk_add_trace_bio_backmerge(void *ignore, ··· 844 782 struct request *rq, 845 783 struct bio *bio) 846 784 { 847 - blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); 785 + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0, 786 + blk_trace_bio_get_cgid(q, bio)); 848 787 } 849 788 850 789 static void blk_add_trace_bio_frontmerge(void *ignore, ··· 853 790 struct request *rq, 854 791 struct bio *bio) 855 792 { 856 - blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); 793 + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0, 794 + blk_trace_bio_get_cgid(q, bio)); 857 795 } 858 796 859 797 static void blk_add_trace_bio_queue(void *ignore, 860 798 struct request_queue *q, struct bio *bio) 861 799 { 862 - blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0); 800 + blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0, 801 + blk_trace_bio_get_cgid(q, bio)); 863 802 } 864 803 865 804 static void blk_add_trace_getrq(void *ignore, ··· 869 804 struct bio *bio, int rw) 870 805 { 871 806 if (bio) 872 - blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); 807 + blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0, 808 + blk_trace_bio_get_cgid(q, bio)); 873 809 else { 874 810 struct blk_trace *bt = q->blk_trace; 875 811 876 812 if (bt) 877 813 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0, 878 - NULL); 814 + NULL, NULL); 879 815 } 880 816 } 881 817 ··· 886 820 struct bio *bio, int rw) 887 821 { 888 822 if (bio) 889 - blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); 823 + blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0, 824 + blk_trace_bio_get_cgid(q, bio)); 890 825 else { 891 826 struct blk_trace *bt = q->blk_trace; 892 827 893 828 if (bt) 894 829 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ, 895 - 0, 0, NULL); 830 + 0, 0, NULL, NULL); 896 831 } 897 832 } 898 833 ··· 902 835 struct blk_trace *bt = q->blk_trace; 903 836 904 837 if (bt) 905 - __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 838 + __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL); 906 839 } 907 840 908 841 static void blk_add_trace_unplug(void *ignore, struct request_queue *q, ··· 919 852 else 920 853 what = BLK_TA_UNPLUG_TIMER; 921 854 922 - __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); 855 + __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL); 923 856 } 924 857 } 925 858 ··· 935 868 __blk_add_trace(bt, bio->bi_iter.bi_sector, 936 869 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf, 937 870 BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu), 938 - &rpdu); 871 + &rpdu, blk_trace_bio_get_cgid(q, bio)); 939 872 } 940 873 } 941 874 ··· 963 896 return; 964 897 965 898 r.device_from = cpu_to_be32(dev); 966 - r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); 899 + r.device_to = cpu_to_be32(bio_dev(bio)); 967 900 r.sector_from = cpu_to_be64(from); 968 901 969 902 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, 970 903 bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status, 971 - sizeof(r), &r); 904 + sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); 972 905 } 973 906 974 907 /** ··· 1001 934 1002 935 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 1003 936 rq_data_dir(rq), 0, BLK_TA_REMAP, 0, 1004 - sizeof(r), &r); 937 + sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); 1005 938 } 1006 939 1007 940 /** ··· 1025 958 return; 1026 959 1027 960 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, 1028 - BLK_TA_DRV_DATA, 0, len, data); 961 + BLK_TA_DRV_DATA, 0, len, data, 962 + blk_trace_request_get_cgid(q, rq)); 1029 963 } 1030 964 EXPORT_SYMBOL_GPL(blk_add_driver_data); 1031 965 ··· 1099 1031 int i = 0; 1100 1032 int tc = t->action >> BLK_TC_SHIFT; 1101 1033 1102 - if (t->action == BLK_TN_MESSAGE) { 1034 + if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1103 1035 rwbs[i++] = 'N'; 1104 1036 goto out; 1105 1037 } ··· 1134 1066 return (const struct blk_io_trace *)ent; 1135 1067 } 1136 1068 1137 - static inline const void *pdu_start(const struct trace_entry *ent) 1069 + static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg) 1138 1070 { 1139 - return te_blk_io_trace(ent) + 1; 1071 + return (void *)(te_blk_io_trace(ent) + 1) + 1072 + (has_cg ? sizeof(union kernfs_node_id) : 0); 1073 + } 1074 + 1075 + static inline const void *cgid_start(const struct trace_entry *ent) 1076 + { 1077 + return (void *)(te_blk_io_trace(ent) + 1); 1078 + } 1079 + 1080 + static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg) 1081 + { 1082 + return te_blk_io_trace(ent)->pdu_len - 1083 + (has_cg ? sizeof(union kernfs_node_id) : 0); 1140 1084 } 1141 1085 1142 1086 static inline u32 t_action(const struct trace_entry *ent) ··· 1176 1096 return te_blk_io_trace(ent)->error; 1177 1097 } 1178 1098 1179 - static __u64 get_pdu_int(const struct trace_entry *ent) 1099 + static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) 1180 1100 { 1181 - const __u64 *val = pdu_start(ent); 1101 + const __u64 *val = pdu_start(ent, has_cg); 1182 1102 return be64_to_cpu(*val); 1183 1103 } 1184 1104 1185 1105 static void get_pdu_remap(const struct trace_entry *ent, 1186 - struct blk_io_trace_remap *r) 1106 + struct blk_io_trace_remap *r, bool has_cg) 1187 1107 { 1188 - const struct blk_io_trace_remap *__r = pdu_start(ent); 1108 + const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); 1189 1109 __u64 sector_from = __r->sector_from; 1190 1110 1191 1111 r->device_from = be32_to_cpu(__r->device_from); ··· 1193 1113 r->sector_from = be64_to_cpu(sector_from); 1194 1114 } 1195 1115 1196 - typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act); 1116 + typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act, 1117 + bool has_cg); 1197 1118 1198 - static void blk_log_action_classic(struct trace_iterator *iter, const char *act) 1119 + static void blk_log_action_classic(struct trace_iterator *iter, const char *act, 1120 + bool has_cg) 1199 1121 { 1200 1122 char rwbs[RWBS_LEN]; 1201 1123 unsigned long long ts = iter->ts; ··· 1213 1131 secs, nsec_rem, iter->ent->pid, act, rwbs); 1214 1132 } 1215 1133 1216 - static void blk_log_action(struct trace_iterator *iter, const char *act) 1134 + static void blk_log_action(struct trace_iterator *iter, const char *act, 1135 + bool has_cg) 1217 1136 { 1218 1137 char rwbs[RWBS_LEN]; 1219 1138 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); 1220 1139 1221 1140 fill_rwbs(rwbs, t); 1222 - trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", 1223 - MAJOR(t->device), MINOR(t->device), act, rwbs); 1141 + if (has_cg) { 1142 + const union kernfs_node_id *id = cgid_start(iter->ent); 1143 + 1144 + if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) { 1145 + char blkcg_name_buf[NAME_MAX + 1] = "<...>"; 1146 + 1147 + cgroup_path_from_kernfs_id(id, blkcg_name_buf, 1148 + sizeof(blkcg_name_buf)); 1149 + trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ", 1150 + MAJOR(t->device), MINOR(t->device), 1151 + blkcg_name_buf, act, rwbs); 1152 + } else 1153 + trace_seq_printf(&iter->seq, 1154 + "%3d,%-3d %x,%-x %2s %3s ", 1155 + MAJOR(t->device), MINOR(t->device), 1156 + id->ino, id->generation, act, rwbs); 1157 + } else 1158 + trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", 1159 + MAJOR(t->device), MINOR(t->device), act, rwbs); 1224 1160 } 1225 1161 1226 - static void blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) 1162 + static void blk_log_dump_pdu(struct trace_seq *s, 1163 + const struct trace_entry *ent, bool has_cg) 1227 1164 { 1228 1165 const unsigned char *pdu_buf; 1229 1166 int pdu_len; 1230 1167 int i, end; 1231 1168 1232 - pdu_buf = pdu_start(ent); 1233 - pdu_len = te_blk_io_trace(ent)->pdu_len; 1169 + pdu_buf = pdu_start(ent, has_cg); 1170 + pdu_len = pdu_real_len(ent, has_cg); 1234 1171 1235 1172 if (!pdu_len) 1236 1173 return; ··· 1280 1179 trace_seq_puts(s, ") "); 1281 1180 } 1282 1181 1283 - static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) 1182 + static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1284 1183 { 1285 1184 char cmd[TASK_COMM_LEN]; 1286 1185 ··· 1288 1187 1289 1188 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1290 1189 trace_seq_printf(s, "%u ", t_bytes(ent)); 1291 - blk_log_dump_pdu(s, ent); 1190 + blk_log_dump_pdu(s, ent, has_cg); 1292 1191 trace_seq_printf(s, "[%s]\n", cmd); 1293 1192 } else { 1294 1193 if (t_sec(ent)) ··· 1300 1199 } 1301 1200 1302 1201 static void blk_log_with_error(struct trace_seq *s, 1303 - const struct trace_entry *ent) 1202 + const struct trace_entry *ent, bool has_cg) 1304 1203 { 1305 1204 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { 1306 - blk_log_dump_pdu(s, ent); 1205 + blk_log_dump_pdu(s, ent, has_cg); 1307 1206 trace_seq_printf(s, "[%d]\n", t_error(ent)); 1308 1207 } else { 1309 1208 if (t_sec(ent)) ··· 1316 1215 } 1317 1216 } 1318 1217 1319 - static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) 1218 + static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1320 1219 { 1321 1220 struct blk_io_trace_remap r = { .device_from = 0, }; 1322 1221 1323 - get_pdu_remap(ent, &r); 1222 + get_pdu_remap(ent, &r, has_cg); 1324 1223 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", 1325 1224 t_sector(ent), t_sec(ent), 1326 1225 MAJOR(r.device_from), MINOR(r.device_from), 1327 1226 (unsigned long long)r.sector_from); 1328 1227 } 1329 1228 1330 - static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) 1229 + static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1331 1230 { 1332 1231 char cmd[TASK_COMM_LEN]; 1333 1232 ··· 1336 1235 trace_seq_printf(s, "[%s]\n", cmd); 1337 1236 } 1338 1237 1339 - static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) 1238 + static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1340 1239 { 1341 1240 char cmd[TASK_COMM_LEN]; 1342 1241 1343 1242 trace_find_cmdline(ent->pid, cmd); 1344 1243 1345 - trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); 1244 + trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg)); 1346 1245 } 1347 1246 1348 - static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent) 1247 + static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) 1349 1248 { 1350 1249 char cmd[TASK_COMM_LEN]; 1351 1250 1352 1251 trace_find_cmdline(ent->pid, cmd); 1353 1252 1354 1253 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), 1355 - get_pdu_int(ent), cmd); 1254 + get_pdu_int(ent, has_cg), cmd); 1356 1255 } 1357 1256 1358 - static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) 1257 + static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent, 1258 + bool has_cg) 1359 1259 { 1360 - const struct blk_io_trace *t = te_blk_io_trace(ent); 1361 1260 1362 - trace_seq_putmem(s, t + 1, t->pdu_len); 1261 + trace_seq_putmem(s, pdu_start(ent, has_cg), 1262 + pdu_real_len(ent, has_cg)); 1363 1263 trace_seq_putc(s, '\n'); 1364 1264 } 1365 1265 ··· 1400 1298 1401 1299 static const struct { 1402 1300 const char *act[2]; 1403 - void (*print)(struct trace_seq *s, const struct trace_entry *ent); 1301 + void (*print)(struct trace_seq *s, const struct trace_entry *ent, 1302 + bool has_cg); 1404 1303 } what2act[] = { 1405 1304 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, 1406 1305 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, ··· 1429 1326 u16 what; 1430 1327 bool long_act; 1431 1328 blk_log_action_t *log_action; 1329 + bool has_cg; 1432 1330 1433 1331 t = te_blk_io_trace(iter->ent); 1434 - what = t->action & ((1 << BLK_TC_SHIFT) - 1); 1332 + what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP; 1435 1333 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE); 1436 1334 log_action = classic ? &blk_log_action_classic : &blk_log_action; 1335 + has_cg = t->action & __BLK_TA_CGROUP; 1437 1336 1438 - if (t->action == BLK_TN_MESSAGE) { 1439 - log_action(iter, long_act ? "message" : "m"); 1440 - blk_log_msg(s, iter->ent); 1337 + if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) { 1338 + log_action(iter, long_act ? "message" : "m", has_cg); 1339 + blk_log_msg(s, iter->ent, has_cg); 1441 1340 return trace_handle_return(s); 1442 1341 } 1443 1342 1444 1343 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) 1445 1344 trace_seq_printf(s, "Unknown action %x\n", what); 1446 1345 else { 1447 - log_action(iter, what2act[what].act[long_act]); 1448 - what2act[what].print(s, iter->ent); 1346 + log_action(iter, what2act[what].act[long_act], has_cg); 1347 + what2act[what].print(s, iter->ent, has_cg); 1449 1348 } 1450 1349 1451 1350 return trace_handle_return(s);
+9 -8
mm/page_io.c
··· 33 33 34 34 bio = bio_alloc(gfp_flags, nr); 35 35 if (bio) { 36 - bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); 36 + struct block_device *bdev; 37 + 38 + bio->bi_iter.bi_sector = map_swap_page(page, &bdev); 39 + bio_set_dev(bio, bdev); 37 40 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; 38 41 bio->bi_end_io = end_io; 39 42 ··· 63 60 */ 64 61 set_page_dirty(page); 65 62 pr_alert("Write-error on swap-device (%u:%u:%llu)\n", 66 - imajor(bio->bi_bdev->bd_inode), 67 - iminor(bio->bi_bdev->bd_inode), 63 + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 68 64 (unsigned long long)bio->bi_iter.bi_sector); 69 65 ClearPageReclaim(page); 70 66 } ··· 128 126 SetPageError(page); 129 127 ClearPageUptodate(page); 130 128 pr_alert("Read-error on swap-device (%u:%u:%llu)\n", 131 - imajor(bio->bi_bdev->bd_inode), 132 - iminor(bio->bi_bdev->bd_inode), 129 + MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), 133 130 (unsigned long long)bio->bi_iter.bi_sector); 134 131 goto out; 135 132 } ··· 352 351 int ret = 0; 353 352 struct swap_info_struct *sis = page_swap_info(page); 354 353 blk_qc_t qc; 355 - struct block_device *bdev; 354 + struct gendisk *disk; 356 355 357 356 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 358 357 VM_BUG_ON_PAGE(!PageLocked(page), page); ··· 391 390 ret = -ENOMEM; 392 391 goto out; 393 392 } 394 - bdev = bio->bi_bdev; 393 + disk = bio->bi_disk; 395 394 /* 396 395 * Keep this task valid during swap readpage because the oom killer may 397 396 * attempt to access it in the page fault retry time check. ··· 407 406 if (!READ_ONCE(bio->bi_private)) 408 407 break; 409 408 410 - if (!blk_mq_poll(bdev_get_queue(bdev), qc)) 409 + if (!blk_mq_poll(disk->queue, qc)) 411 410 break; 412 411 } 413 412 __set_current_state(TASK_RUNNING);