Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
5#include <linux/bio-integrity.h>
6#include <linux/blk-crypto.h>
7#include <linux/lockdep.h>
8#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
9#include <linux/sched/sysctl.h>
10#include <linux/timekeeping.h>
11#include <xen/xen.h>
12#include "blk-crypto-internal.h"
13
14struct elevator_type;
15
16#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
17
18/* Max future timer expiry for timeouts */
19#define BLK_MAX_TIMEOUT (5 * HZ)
20
21extern struct dentry *blk_debugfs_root;
22
23struct blk_flush_queue {
24 spinlock_t mq_flush_lock;
25 unsigned int flush_pending_idx:1;
26 unsigned int flush_running_idx:1;
27 blk_status_t rq_status;
28 unsigned long flush_pending_since;
29 struct list_head flush_queue[2];
30 unsigned long flush_data_in_flight;
31 struct request *flush_rq;
32};
33
34bool is_flush_rq(struct request *req);
35
36struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
37 gfp_t flags);
38void blk_free_flush_queue(struct blk_flush_queue *q);
39
40bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
41bool blk_queue_start_drain(struct request_queue *q);
42bool __blk_freeze_queue_start(struct request_queue *q,
43 struct task_struct *owner);
44int __bio_queue_enter(struct request_queue *q, struct bio *bio);
45void submit_bio_noacct_nocheck(struct bio *bio);
46void bio_await_chain(struct bio *bio);
47
48static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
49{
50 rcu_read_lock();
51 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
52 goto fail;
53
54 /*
55 * The code that increments the pm_only counter must ensure that the
56 * counter is globally visible before the queue is unfrozen.
57 */
58 if (blk_queue_pm_only(q) &&
59 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
60 goto fail_put;
61
62 rcu_read_unlock();
63 return true;
64
65fail_put:
66 blk_queue_exit(q);
67fail:
68 rcu_read_unlock();
69 return false;
70}
71
72static inline int bio_queue_enter(struct bio *bio)
73{
74 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
75
76 if (blk_try_enter_queue(q, false)) {
77 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
78 rwsem_release(&q->io_lockdep_map, _RET_IP_);
79 return 0;
80 }
81 return __bio_queue_enter(q, bio);
82}
83
84static inline void blk_wait_io(struct completion *done)
85{
86 /* Prevent hang_check timer from firing at us during very long I/O */
87 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
88
89 if (timeout)
90 while (!wait_for_completion_io_timeout(done, timeout))
91 ;
92 else
93 wait_for_completion_io(done);
94}
95
96#define BIO_INLINE_VECS 4
97struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
98 gfp_t gfp_mask);
99void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
100
101bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
102 struct page *page, unsigned len, unsigned offset,
103 bool *same_page);
104
105static inline bool biovec_phys_mergeable(struct request_queue *q,
106 struct bio_vec *vec1, struct bio_vec *vec2)
107{
108 unsigned long mask = queue_segment_boundary(q);
109 phys_addr_t addr1 = bvec_phys(vec1);
110 phys_addr_t addr2 = bvec_phys(vec2);
111
112 /*
113 * Merging adjacent physical pages may not work correctly under KMSAN
114 * if their metadata pages aren't adjacent. Just disable merging.
115 */
116 if (IS_ENABLED(CONFIG_KMSAN))
117 return false;
118
119 if (addr1 + vec1->bv_len != addr2)
120 return false;
121 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
122 return false;
123 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
124 return false;
125 return true;
126}
127
128static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
129 struct bio_vec *bprv, unsigned int offset)
130{
131 return (offset & lim->virt_boundary_mask) ||
132 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
133}
134
135/*
136 * Check if adding a bio_vec after bprv with offset would create a gap in
137 * the SG list. Most drivers don't care about this, but some do.
138 */
139static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
140 struct bio_vec *bprv, unsigned int offset)
141{
142 if (!lim->virt_boundary_mask)
143 return false;
144 return __bvec_gap_to_prev(lim, bprv, offset);
145}
146
147static inline bool rq_mergeable(struct request *rq)
148{
149 if (blk_rq_is_passthrough(rq))
150 return false;
151
152 if (req_op(rq) == REQ_OP_FLUSH)
153 return false;
154
155 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
156 return false;
157
158 if (req_op(rq) == REQ_OP_ZONE_APPEND)
159 return false;
160
161 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
162 return false;
163 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
164 return false;
165
166 return true;
167}
168
169/*
170 * There are two different ways to handle DISCARD merges:
171 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
172 * send the bios to controller together. The ranges don't need to be
173 * contiguous.
174 * 2) Otherwise, the request will be normal read/write requests. The ranges
175 * need to be contiguous.
176 */
177static inline bool blk_discard_mergable(struct request *req)
178{
179 if (req_op(req) == REQ_OP_DISCARD &&
180 queue_max_discard_segments(req->q) > 1)
181 return true;
182 return false;
183}
184
185static inline unsigned int blk_rq_get_max_segments(struct request *rq)
186{
187 if (req_op(rq) == REQ_OP_DISCARD)
188 return queue_max_discard_segments(rq->q);
189 return queue_max_segments(rq->q);
190}
191
192static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
193{
194 struct request_queue *q = rq->q;
195 enum req_op op = req_op(rq);
196
197 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
198 return min(q->limits.max_discard_sectors,
199 UINT_MAX >> SECTOR_SHIFT);
200
201 if (unlikely(op == REQ_OP_WRITE_ZEROES))
202 return q->limits.max_write_zeroes_sectors;
203
204 if (rq->cmd_flags & REQ_ATOMIC)
205 return q->limits.atomic_write_max_sectors;
206
207 return q->limits.max_sectors;
208}
209
210#ifdef CONFIG_BLK_DEV_INTEGRITY
211void blk_flush_integrity(void);
212void bio_integrity_free(struct bio *bio);
213
214/*
215 * Integrity payloads can either be owned by the submitter, in which case
216 * bio_uninit will free them, or owned and generated by the block layer,
217 * in which case we'll verify them here (for reads) and free them before
218 * the bio is handed back to the submitted.
219 */
220bool __bio_integrity_endio(struct bio *bio);
221static inline bool bio_integrity_endio(struct bio *bio)
222{
223 struct bio_integrity_payload *bip = bio_integrity(bio);
224
225 if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
226 return __bio_integrity_endio(bio);
227 return true;
228}
229
230bool blk_integrity_merge_rq(struct request_queue *, struct request *,
231 struct request *);
232bool blk_integrity_merge_bio(struct request_queue *, struct request *,
233 struct bio *);
234
235static inline bool integrity_req_gap_back_merge(struct request *req,
236 struct bio *next)
237{
238 struct bio_integrity_payload *bip = bio_integrity(req->bio);
239 struct bio_integrity_payload *bip_next = bio_integrity(next);
240
241 return bvec_gap_to_prev(&req->q->limits,
242 &bip->bip_vec[bip->bip_vcnt - 1],
243 bip_next->bip_vec[0].bv_offset);
244}
245
246static inline bool integrity_req_gap_front_merge(struct request *req,
247 struct bio *bio)
248{
249 struct bio_integrity_payload *bip = bio_integrity(bio);
250 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
251
252 return bvec_gap_to_prev(&req->q->limits,
253 &bip->bip_vec[bip->bip_vcnt - 1],
254 bip_next->bip_vec[0].bv_offset);
255}
256
257extern const struct attribute_group blk_integrity_attr_group;
258#else /* CONFIG_BLK_DEV_INTEGRITY */
259static inline bool blk_integrity_merge_rq(struct request_queue *rq,
260 struct request *r1, struct request *r2)
261{
262 return true;
263}
264static inline bool blk_integrity_merge_bio(struct request_queue *rq,
265 struct request *r, struct bio *b)
266{
267 return true;
268}
269static inline bool integrity_req_gap_back_merge(struct request *req,
270 struct bio *next)
271{
272 return false;
273}
274static inline bool integrity_req_gap_front_merge(struct request *req,
275 struct bio *bio)
276{
277 return false;
278}
279
280static inline void blk_flush_integrity(void)
281{
282}
283static inline bool bio_integrity_endio(struct bio *bio)
284{
285 return true;
286}
287static inline void bio_integrity_free(struct bio *bio)
288{
289}
290#endif /* CONFIG_BLK_DEV_INTEGRITY */
291
292unsigned long blk_rq_timeout(unsigned long timeout);
293void blk_add_timer(struct request *req);
294
295enum bio_merge_status {
296 BIO_MERGE_OK,
297 BIO_MERGE_NONE,
298 BIO_MERGE_FAILED,
299};
300
301enum bio_merge_status bio_attempt_back_merge(struct request *req,
302 struct bio *bio, unsigned int nr_segs);
303bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
304 unsigned int nr_segs);
305bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
306 struct bio *bio, unsigned int nr_segs);
307
308/*
309 * Plug flush limits
310 */
311#define BLK_MAX_REQUEST_COUNT 32
312#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
313
314/*
315 * Internal elevator interface
316 */
317#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
318
319bool blk_insert_flush(struct request *rq);
320
321int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
322void elevator_disable(struct request_queue *q);
323void elevator_exit(struct request_queue *q);
324int elv_register_queue(struct request_queue *q, bool uevent);
325void elv_unregister_queue(struct request_queue *q);
326
327ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
328 char *buf);
329ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
330 char *buf);
331ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
332 char *buf);
333ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
334 char *buf);
335ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
336 const char *buf, size_t count);
337ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
338ssize_t part_timeout_store(struct device *, struct device_attribute *,
339 const char *, size_t);
340
341struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
342 unsigned *nsegs);
343struct bio *bio_split_write_zeroes(struct bio *bio,
344 const struct queue_limits *lim, unsigned *nsegs);
345struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
346 unsigned *nr_segs);
347struct bio *bio_split_zone_append(struct bio *bio,
348 const struct queue_limits *lim, unsigned *nr_segs);
349
350/*
351 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
352 *
353 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
354 * always valid if a bio has data. The check might lead to occasional false
355 * positives when bios are cloned, but compared to the performance impact of
356 * cloned bios themselves the loop below doesn't matter anyway.
357 */
358static inline bool bio_may_need_split(struct bio *bio,
359 const struct queue_limits *lim)
360{
361 return lim->chunk_sectors || bio->bi_vcnt != 1 ||
362 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
363}
364
365/**
366 * __bio_split_to_limits - split a bio to fit the queue limits
367 * @bio: bio to be split
368 * @lim: queue limits to split based on
369 * @nr_segs: returns the number of segments in the returned bio
370 *
371 * Check if @bio needs splitting based on the queue limits, and if so split off
372 * a bio fitting the limits from the beginning of @bio and return it. @bio is
373 * shortened to the remainder and re-submitted.
374 *
375 * The split bio is allocated from @q->bio_split, which is provided by the
376 * block layer.
377 */
378static inline struct bio *__bio_split_to_limits(struct bio *bio,
379 const struct queue_limits *lim, unsigned int *nr_segs)
380{
381 switch (bio_op(bio)) {
382 case REQ_OP_READ:
383 case REQ_OP_WRITE:
384 if (bio_may_need_split(bio, lim))
385 return bio_split_rw(bio, lim, nr_segs);
386 *nr_segs = 1;
387 return bio;
388 case REQ_OP_ZONE_APPEND:
389 return bio_split_zone_append(bio, lim, nr_segs);
390 case REQ_OP_DISCARD:
391 case REQ_OP_SECURE_ERASE:
392 return bio_split_discard(bio, lim, nr_segs);
393 case REQ_OP_WRITE_ZEROES:
394 return bio_split_write_zeroes(bio, lim, nr_segs);
395 default:
396 /* other operations can't be split */
397 *nr_segs = 0;
398 return bio;
399 }
400}
401
402int ll_back_merge_fn(struct request *req, struct bio *bio,
403 unsigned int nr_segs);
404bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
405 struct request *next);
406unsigned int blk_recalc_rq_segments(struct request *rq);
407bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
408enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
409
410int blk_set_default_limits(struct queue_limits *lim);
411void blk_apply_bdi_limits(struct backing_dev_info *bdi,
412 struct queue_limits *lim);
413int blk_dev_init(void);
414
415void update_io_ticks(struct block_device *part, unsigned long now, bool end);
416unsigned int part_in_flight(struct block_device *part);
417
418static inline void req_set_nomerge(struct request_queue *q, struct request *req)
419{
420 req->cmd_flags |= REQ_NOMERGE;
421 if (req == q->last_merge)
422 q->last_merge = NULL;
423}
424
425/*
426 * Internal io_context interface
427 */
428struct io_cq *ioc_find_get_icq(struct request_queue *q);
429struct io_cq *ioc_lookup_icq(struct request_queue *q);
430#ifdef CONFIG_BLK_ICQ
431void ioc_clear_queue(struct request_queue *q);
432#else
433static inline void ioc_clear_queue(struct request_queue *q)
434{
435}
436#endif /* CONFIG_BLK_ICQ */
437
438struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
439
440static inline bool blk_queue_may_bounce(struct request_queue *q)
441{
442 return IS_ENABLED(CONFIG_BOUNCE) &&
443 (q->limits.features & BLK_FEAT_BOUNCE_HIGH) &&
444 max_low_pfn >= max_pfn;
445}
446
447static inline struct bio *blk_queue_bounce(struct bio *bio,
448 struct request_queue *q)
449{
450 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
451 return __blk_queue_bounce(bio, q);
452 return bio;
453}
454
455#ifdef CONFIG_BLK_DEV_ZONED
456void disk_init_zone_resources(struct gendisk *disk);
457void disk_free_zone_resources(struct gendisk *disk);
458static inline bool bio_zone_write_plugging(struct bio *bio)
459{
460 return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
461}
462void blk_zone_write_plug_bio_merged(struct bio *bio);
463void blk_zone_write_plug_init_request(struct request *rq);
464static inline void blk_zone_update_request_bio(struct request *rq,
465 struct bio *bio)
466{
467 /*
468 * For zone append requests, the request sector indicates the location
469 * at which the BIO data was written. Return this value to the BIO
470 * issuer through the BIO iter sector.
471 * For plugged zone writes, which include emulated zone append, we need
472 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
473 * lookup the zone write plug.
474 */
475 if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
476 bio->bi_iter.bi_sector = rq->__sector;
477}
478void blk_zone_write_plug_bio_endio(struct bio *bio);
479static inline void blk_zone_bio_endio(struct bio *bio)
480{
481 /*
482 * For write BIOs to zoned devices, signal the completion of the BIO so
483 * that the next write BIO can be submitted by zone write plugging.
484 */
485 if (bio_zone_write_plugging(bio))
486 blk_zone_write_plug_bio_endio(bio);
487}
488
489void blk_zone_write_plug_finish_request(struct request *rq);
490static inline void blk_zone_finish_request(struct request *rq)
491{
492 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
493 blk_zone_write_plug_finish_request(rq);
494}
495int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
496 unsigned long arg);
497int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
498 unsigned int cmd, unsigned long arg);
499#else /* CONFIG_BLK_DEV_ZONED */
500static inline void disk_init_zone_resources(struct gendisk *disk)
501{
502}
503static inline void disk_free_zone_resources(struct gendisk *disk)
504{
505}
506static inline bool bio_zone_write_plugging(struct bio *bio)
507{
508 return false;
509}
510static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
511{
512}
513static inline void blk_zone_write_plug_init_request(struct request *rq)
514{
515}
516static inline void blk_zone_update_request_bio(struct request *rq,
517 struct bio *bio)
518{
519}
520static inline void blk_zone_bio_endio(struct bio *bio)
521{
522}
523static inline void blk_zone_finish_request(struct request *rq)
524{
525}
526static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
527 unsigned int cmd, unsigned long arg)
528{
529 return -ENOTTY;
530}
531static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
532 blk_mode_t mode, unsigned int cmd, unsigned long arg)
533{
534 return -ENOTTY;
535}
536#endif /* CONFIG_BLK_DEV_ZONED */
537
538struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
539void bdev_add(struct block_device *bdev, dev_t dev);
540void bdev_unhash(struct block_device *bdev);
541void bdev_drop(struct block_device *bdev);
542
543int blk_alloc_ext_minor(void);
544void blk_free_ext_minor(unsigned int minor);
545#define ADDPART_FLAG_NONE 0
546#define ADDPART_FLAG_RAID 1
547#define ADDPART_FLAG_WHOLEDISK 2
548#define ADDPART_FLAG_READONLY 4
549int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
550 sector_t length);
551int bdev_del_partition(struct gendisk *disk, int partno);
552int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
553 sector_t length);
554void drop_partition(struct block_device *part);
555
556void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
557
558struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
559 struct lock_class_key *lkclass);
560
561/*
562 * Clean up a page appropriately, where the page may be pinned, may have a
563 * ref taken on it or neither.
564 */
565static inline void bio_release_page(struct bio *bio, struct page *page)
566{
567 if (bio_flagged(bio, BIO_PAGE_PINNED))
568 unpin_user_page(page);
569}
570
571struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
572
573int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
574
575int disk_alloc_events(struct gendisk *disk);
576void disk_add_events(struct gendisk *disk);
577void disk_del_events(struct gendisk *disk);
578void disk_release_events(struct gendisk *disk);
579void disk_block_events(struct gendisk *disk);
580void disk_unblock_events(struct gendisk *disk);
581void disk_flush_events(struct gendisk *disk, unsigned int mask);
582extern struct device_attribute dev_attr_events;
583extern struct device_attribute dev_attr_events_async;
584extern struct device_attribute dev_attr_events_poll_msecs;
585
586extern struct attribute_group blk_trace_attr_group;
587
588blk_mode_t file_to_blk_mode(struct file *file);
589int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
590 loff_t lstart, loff_t lend);
591long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
592int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
593long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
594
595extern const struct address_space_operations def_blk_aops;
596
597int disk_register_independent_access_ranges(struct gendisk *disk);
598void disk_unregister_independent_access_ranges(struct gendisk *disk);
599
600#ifdef CONFIG_FAIL_MAKE_REQUEST
601bool should_fail_request(struct block_device *part, unsigned int bytes);
602#else /* CONFIG_FAIL_MAKE_REQUEST */
603static inline bool should_fail_request(struct block_device *part,
604 unsigned int bytes)
605{
606 return false;
607}
608#endif /* CONFIG_FAIL_MAKE_REQUEST */
609
610/*
611 * Optimized request reference counting. Ideally we'd make timeouts be more
612 * clever, as that's the only reason we need references at all... But until
613 * this happens, this is faster than using refcount_t. Also see:
614 *
615 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
616 */
617#define req_ref_zero_or_close_to_overflow(req) \
618 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
619
620static inline bool req_ref_inc_not_zero(struct request *req)
621{
622 return atomic_inc_not_zero(&req->ref);
623}
624
625static inline bool req_ref_put_and_test(struct request *req)
626{
627 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
628 return atomic_dec_and_test(&req->ref);
629}
630
631static inline void req_ref_set(struct request *req, int value)
632{
633 atomic_set(&req->ref, value);
634}
635
636static inline int req_ref_read(struct request *req)
637{
638 return atomic_read(&req->ref);
639}
640
641static inline u64 blk_time_get_ns(void)
642{
643 struct blk_plug *plug = current->plug;
644
645 if (!plug || !in_task())
646 return ktime_get_ns();
647
648 /*
649 * 0 could very well be a valid time, but rather than flag "this is
650 * a valid timestamp" separately, just accept that we'll do an extra
651 * ktime_get_ns() if we just happen to get 0 as the current time.
652 */
653 if (!plug->cur_ktime) {
654 plug->cur_ktime = ktime_get_ns();
655 current->flags |= PF_BLOCK_TS;
656 }
657 return plug->cur_ktime;
658}
659
660static inline ktime_t blk_time_get(void)
661{
662 return ns_to_ktime(blk_time_get_ns());
663}
664
665/*
666 * From most significant bit:
667 * 1 bit: reserved for other usage, see below
668 * 12 bits: original size of bio
669 * 51 bits: issue time of bio
670 */
671#define BIO_ISSUE_RES_BITS 1
672#define BIO_ISSUE_SIZE_BITS 12
673#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
674#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
675#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
676#define BIO_ISSUE_SIZE_MASK \
677 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
678#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
679
680/* Reserved bit for blk-throtl */
681#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
682
683static inline u64 __bio_issue_time(u64 time)
684{
685 return time & BIO_ISSUE_TIME_MASK;
686}
687
688static inline u64 bio_issue_time(struct bio_issue *issue)
689{
690 return __bio_issue_time(issue->value);
691}
692
693static inline sector_t bio_issue_size(struct bio_issue *issue)
694{
695 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
696}
697
698static inline void bio_issue_init(struct bio_issue *issue,
699 sector_t size)
700{
701 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
702 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
703 (blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
704 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
705}
706
707void bdev_release(struct file *bdev_file);
708int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
709 const struct blk_holder_ops *hops, struct file *bdev_file);
710int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
711
712void blk_integrity_generate(struct bio *bio);
713void blk_integrity_verify(struct bio *bio);
714void blk_integrity_prepare(struct request *rq);
715void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
716
717#ifdef CONFIG_LOCKDEP
718static inline void blk_freeze_acquire_lock(struct request_queue *q)
719{
720 if (!q->mq_freeze_disk_dead)
721 rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
722 if (!q->mq_freeze_queue_dying)
723 rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
724}
725
726static inline void blk_unfreeze_release_lock(struct request_queue *q)
727{
728 if (!q->mq_freeze_queue_dying)
729 rwsem_release(&q->q_lockdep_map, _RET_IP_);
730 if (!q->mq_freeze_disk_dead)
731 rwsem_release(&q->io_lockdep_map, _RET_IP_);
732}
733#else
734static inline void blk_freeze_acquire_lock(struct request_queue *q)
735{
736}
737static inline void blk_unfreeze_release_lock(struct request_queue *q)
738{
739}
740#endif
741
742#endif /* BLK_INTERNAL_H */