Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
5#include <linux/bio-integrity.h>
6#include <linux/blk-crypto.h>
7#include <linux/lockdep.h>
8#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
9#include <linux/sched/sysctl.h>
10#include <linux/timekeeping.h>
11#include <xen/xen.h>
12#include "blk-crypto-internal.h"
13
14struct elv_change_ctx;
15
16/*
17 * Default upper limit for the software max_sectors limit used for regular I/Os.
18 * This can be increased through sysfs.
19 *
20 * This should not be confused with the max_hw_sector limit that is entirely
21 * controlled by the block device driver, usually based on hardware limits.
22 */
23#define BLK_DEF_MAX_SECTORS_CAP (SZ_4M >> SECTOR_SHIFT)
24
25#define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
26#define BLK_MIN_SEGMENT_SIZE 4096
27
28/* Max future timer expiry for timeouts */
29#define BLK_MAX_TIMEOUT (5 * HZ)
30
31extern const struct kobj_type blk_queue_ktype;
32extern struct dentry *blk_debugfs_root;
33
34struct blk_flush_queue {
35 spinlock_t mq_flush_lock;
36 unsigned int flush_pending_idx:1;
37 unsigned int flush_running_idx:1;
38 blk_status_t rq_status;
39 unsigned long flush_pending_since;
40 struct list_head flush_queue[2];
41 unsigned long flush_data_in_flight;
42 struct request *flush_rq;
43 struct rcu_head rcu_head;
44};
45
46bool is_flush_rq(struct request *req);
47
48struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
49 gfp_t flags);
50void blk_free_flush_queue(struct blk_flush_queue *q);
51
52bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
53bool blk_queue_start_drain(struct request_queue *q);
54bool __blk_freeze_queue_start(struct request_queue *q,
55 struct task_struct *owner);
56int __bio_queue_enter(struct request_queue *q, struct bio *bio);
57void submit_bio_noacct_nocheck(struct bio *bio, bool split);
58void bio_await_chain(struct bio *bio);
59
60static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
61{
62 rcu_read_lock();
63 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
64 goto fail;
65
66 /*
67 * The code that increments the pm_only counter must ensure that the
68 * counter is globally visible before the queue is unfrozen.
69 */
70 if (blk_queue_pm_only(q) &&
71 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
72 goto fail_put;
73
74 rcu_read_unlock();
75 return true;
76
77fail_put:
78 blk_queue_exit(q);
79fail:
80 rcu_read_unlock();
81 return false;
82}
83
84static inline int bio_queue_enter(struct bio *bio)
85{
86 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
87
88 if (blk_try_enter_queue(q, false)) {
89 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
90 rwsem_release(&q->io_lockdep_map, _RET_IP_);
91 return 0;
92 }
93 return __bio_queue_enter(q, bio);
94}
95
96static inline void blk_wait_io(struct completion *done)
97{
98 /* Prevent hang_check timer from firing at us during very long I/O */
99 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
100
101 if (timeout)
102 while (!wait_for_completion_io_timeout(done, timeout))
103 ;
104 else
105 wait_for_completion_io(done);
106}
107
108struct block_device *blkdev_get_no_open(dev_t dev, bool autoload);
109void blkdev_put_no_open(struct block_device *bdev);
110
111#define BIO_INLINE_VECS 4
112struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
113 gfp_t gfp_mask);
114void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
115
116bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
117 struct page *page, unsigned len, unsigned offset);
118
119static inline bool biovec_phys_mergeable(struct request_queue *q,
120 struct bio_vec *vec1, struct bio_vec *vec2)
121{
122 unsigned long mask = queue_segment_boundary(q);
123 phys_addr_t addr1 = bvec_phys(vec1);
124 phys_addr_t addr2 = bvec_phys(vec2);
125
126 /*
127 * Merging adjacent physical pages may not work correctly under KMSAN
128 * if their metadata pages aren't adjacent. Just disable merging.
129 */
130 if (IS_ENABLED(CONFIG_KMSAN))
131 return false;
132
133 if (addr1 + vec1->bv_len != addr2)
134 return false;
135 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
136 return false;
137 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
138 return false;
139 return true;
140}
141
142static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
143 struct bio_vec *bprv, unsigned int offset)
144{
145 return (offset & lim->virt_boundary_mask) ||
146 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
147}
148
149/*
150 * Check if adding a bio_vec after bprv with offset would create a gap in
151 * the SG list. Most drivers don't care about this, but some do.
152 */
153static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
154 struct bio_vec *bprv, unsigned int offset)
155{
156 if (!lim->virt_boundary_mask)
157 return false;
158 return __bvec_gap_to_prev(lim, bprv, offset);
159}
160
161static inline bool rq_mergeable(struct request *rq)
162{
163 if (blk_rq_is_passthrough(rq))
164 return false;
165
166 if (req_op(rq) == REQ_OP_FLUSH)
167 return false;
168
169 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
170 return false;
171
172 if (req_op(rq) == REQ_OP_ZONE_APPEND)
173 return false;
174
175 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
176 return false;
177 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
178 return false;
179
180 return true;
181}
182
183/*
184 * There are two different ways to handle DISCARD merges:
185 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
186 * send the bios to controller together. The ranges don't need to be
187 * contiguous.
188 * 2) Otherwise, the request will be normal read/write requests. The ranges
189 * need to be contiguous.
190 */
191static inline bool blk_discard_mergable(struct request *req)
192{
193 if (req_op(req) == REQ_OP_DISCARD &&
194 queue_max_discard_segments(req->q) > 1)
195 return true;
196 return false;
197}
198
199static inline unsigned int blk_rq_get_max_segments(struct request *rq)
200{
201 if (req_op(rq) == REQ_OP_DISCARD)
202 return queue_max_discard_segments(rq->q);
203 return queue_max_segments(rq->q);
204}
205
206static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
207{
208 struct request_queue *q = rq->q;
209 enum req_op op = req_op(rq);
210
211 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
212 return min(q->limits.max_discard_sectors,
213 UINT_MAX >> SECTOR_SHIFT);
214
215 if (unlikely(op == REQ_OP_WRITE_ZEROES))
216 return q->limits.max_write_zeroes_sectors;
217
218 if (rq->cmd_flags & REQ_ATOMIC)
219 return q->limits.atomic_write_max_sectors;
220
221 return q->limits.max_sectors;
222}
223
224#ifdef CONFIG_BLK_DEV_INTEGRITY
225void blk_flush_integrity(void);
226void bio_integrity_free(struct bio *bio);
227
228/*
229 * Integrity payloads can either be owned by the submitter, in which case
230 * bio_uninit will free them, or owned and generated by the block layer,
231 * in which case we'll verify them here (for reads) and free them before
232 * the bio is handed back to the submitted.
233 */
234bool __bio_integrity_endio(struct bio *bio);
235static inline bool bio_integrity_endio(struct bio *bio)
236{
237 struct bio_integrity_payload *bip = bio_integrity(bio);
238
239 if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
240 return __bio_integrity_endio(bio);
241 return true;
242}
243
244bool blk_integrity_merge_rq(struct request_queue *, struct request *,
245 struct request *);
246bool blk_integrity_merge_bio(struct request_queue *, struct request *,
247 struct bio *);
248
249static inline bool integrity_req_gap_back_merge(struct request *req,
250 struct bio *next)
251{
252 struct bio_integrity_payload *bip = bio_integrity(req->bio);
253 struct bio_integrity_payload *bip_next = bio_integrity(next);
254
255 return bvec_gap_to_prev(&req->q->limits,
256 &bip->bip_vec[bip->bip_vcnt - 1],
257 bip_next->bip_vec[0].bv_offset);
258}
259
260static inline bool integrity_req_gap_front_merge(struct request *req,
261 struct bio *bio)
262{
263 struct bio_integrity_payload *bip = bio_integrity(bio);
264 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
265
266 return bvec_gap_to_prev(&req->q->limits,
267 &bip->bip_vec[bip->bip_vcnt - 1],
268 bip_next->bip_vec[0].bv_offset);
269}
270
271extern const struct attribute_group blk_integrity_attr_group;
272#else /* CONFIG_BLK_DEV_INTEGRITY */
273static inline bool blk_integrity_merge_rq(struct request_queue *rq,
274 struct request *r1, struct request *r2)
275{
276 return true;
277}
278static inline bool blk_integrity_merge_bio(struct request_queue *rq,
279 struct request *r, struct bio *b)
280{
281 return true;
282}
283static inline bool integrity_req_gap_back_merge(struct request *req,
284 struct bio *next)
285{
286 return false;
287}
288static inline bool integrity_req_gap_front_merge(struct request *req,
289 struct bio *bio)
290{
291 return false;
292}
293
294static inline void blk_flush_integrity(void)
295{
296}
297static inline bool bio_integrity_endio(struct bio *bio)
298{
299 return true;
300}
301static inline void bio_integrity_free(struct bio *bio)
302{
303}
304#endif /* CONFIG_BLK_DEV_INTEGRITY */
305
306unsigned long blk_rq_timeout(unsigned long timeout);
307void blk_add_timer(struct request *req);
308
309enum bio_merge_status {
310 BIO_MERGE_OK,
311 BIO_MERGE_NONE,
312 BIO_MERGE_FAILED,
313};
314
315enum bio_merge_status bio_attempt_back_merge(struct request *req,
316 struct bio *bio, unsigned int nr_segs);
317bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
318 unsigned int nr_segs);
319bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
320 struct bio *bio, unsigned int nr_segs);
321
322/*
323 * Plug flush limits
324 */
325#define BLK_MAX_REQUEST_COUNT 32
326#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
327
328/*
329 * Internal elevator interface
330 */
331#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
332
333bool blk_insert_flush(struct request *rq);
334
335void elv_update_nr_hw_queues(struct request_queue *q,
336 struct elv_change_ctx *ctx);
337void elevator_set_default(struct request_queue *q);
338void elevator_set_none(struct request_queue *q);
339
340ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
341 char *buf);
342ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
343 char *buf);
344ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
345 char *buf);
346ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
347 char *buf);
348ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
349 const char *buf, size_t count);
350ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
351ssize_t part_timeout_store(struct device *, struct device_attribute *,
352 const char *, size_t);
353
354struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
355 unsigned *nsegs);
356struct bio *bio_split_write_zeroes(struct bio *bio,
357 const struct queue_limits *lim, unsigned *nsegs);
358struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
359 unsigned *nr_segs);
360struct bio *bio_split_zone_append(struct bio *bio,
361 const struct queue_limits *lim, unsigned *nr_segs);
362
363/*
364 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
365 *
366 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
367 * always valid if a bio has data. The check might lead to occasional false
368 * positives when bios are cloned, but compared to the performance impact of
369 * cloned bios themselves the loop below doesn't matter anyway.
370 */
371static inline bool bio_may_need_split(struct bio *bio,
372 const struct queue_limits *lim)
373{
374 if (lim->chunk_sectors)
375 return true;
376 if (bio->bi_vcnt != 1)
377 return true;
378 return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset >
379 lim->max_fast_segment_size;
380}
381
382/**
383 * __bio_split_to_limits - split a bio to fit the queue limits
384 * @bio: bio to be split
385 * @lim: queue limits to split based on
386 * @nr_segs: returns the number of segments in the returned bio
387 *
388 * Check if @bio needs splitting based on the queue limits, and if so split off
389 * a bio fitting the limits from the beginning of @bio and return it. @bio is
390 * shortened to the remainder and re-submitted.
391 *
392 * The split bio is allocated from @q->bio_split, which is provided by the
393 * block layer.
394 */
395static inline struct bio *__bio_split_to_limits(struct bio *bio,
396 const struct queue_limits *lim, unsigned int *nr_segs)
397{
398 switch (bio_op(bio)) {
399 case REQ_OP_READ:
400 case REQ_OP_WRITE:
401 if (bio_may_need_split(bio, lim))
402 return bio_split_rw(bio, lim, nr_segs);
403 *nr_segs = 1;
404 return bio;
405 case REQ_OP_ZONE_APPEND:
406 return bio_split_zone_append(bio, lim, nr_segs);
407 case REQ_OP_DISCARD:
408 case REQ_OP_SECURE_ERASE:
409 return bio_split_discard(bio, lim, nr_segs);
410 case REQ_OP_WRITE_ZEROES:
411 return bio_split_write_zeroes(bio, lim, nr_segs);
412 default:
413 /* other operations can't be split */
414 *nr_segs = 0;
415 return bio;
416 }
417}
418
419/**
420 * get_max_segment_size() - maximum number of bytes to add as a single segment
421 * @lim: Request queue limits.
422 * @paddr: address of the range to add
423 * @len: maximum length available to add at @paddr
424 *
425 * Returns the maximum number of bytes of the range starting at @paddr that can
426 * be added to a single segment.
427 */
428static inline unsigned get_max_segment_size(const struct queue_limits *lim,
429 phys_addr_t paddr, unsigned int len)
430{
431 /*
432 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
433 * after having calculated the minimum.
434 */
435 return min_t(unsigned long, len,
436 min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
437 (unsigned long)lim->max_segment_size - 1) + 1);
438}
439
440int ll_back_merge_fn(struct request *req, struct bio *bio,
441 unsigned int nr_segs);
442bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
443 struct request *next);
444unsigned int blk_recalc_rq_segments(struct request *rq);
445bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
446enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
447
448int blk_set_default_limits(struct queue_limits *lim);
449void blk_apply_bdi_limits(struct backing_dev_info *bdi,
450 struct queue_limits *lim);
451int blk_dev_init(void);
452
453void update_io_ticks(struct block_device *part, unsigned long now, bool end);
454
455static inline void req_set_nomerge(struct request_queue *q, struct request *req)
456{
457 req->cmd_flags |= REQ_NOMERGE;
458 if (req == q->last_merge)
459 q->last_merge = NULL;
460}
461
462/*
463 * Internal io_context interface
464 */
465struct io_cq *ioc_find_get_icq(struct request_queue *q);
466struct io_cq *ioc_lookup_icq(struct request_queue *q);
467#ifdef CONFIG_BLK_ICQ
468void ioc_clear_queue(struct request_queue *q);
469#else
470static inline void ioc_clear_queue(struct request_queue *q)
471{
472}
473#endif /* CONFIG_BLK_ICQ */
474
475#ifdef CONFIG_BLK_DEV_ZONED
476void disk_init_zone_resources(struct gendisk *disk);
477void disk_free_zone_resources(struct gendisk *disk);
478static inline bool bio_zone_write_plugging(struct bio *bio)
479{
480 return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
481}
482static inline bool blk_req_bio_is_zone_append(struct request *rq,
483 struct bio *bio)
484{
485 return req_op(rq) == REQ_OP_ZONE_APPEND ||
486 bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
487}
488void blk_zone_write_plug_bio_merged(struct bio *bio);
489void blk_zone_write_plug_init_request(struct request *rq);
490void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio);
491void blk_zone_mgmt_bio_endio(struct bio *bio);
492void blk_zone_write_plug_bio_endio(struct bio *bio);
493static inline void blk_zone_bio_endio(struct bio *bio)
494{
495 /*
496 * Zone management BIOs may impact zone write plugs (e.g. a zone reset
497 * changes a zone write plug zone write pointer offset), but these
498 * operation do not go through zone write plugging as they may operate
499 * on zones that do not have a zone write
500 * plug. blk_zone_mgmt_bio_endio() handles the potential changes to zone
501 * write plugs that are present.
502 */
503 if (op_is_zone_mgmt(bio_op(bio))) {
504 blk_zone_mgmt_bio_endio(bio);
505 return;
506 }
507
508 /*
509 * For write BIOs to zoned devices, signal the completion of the BIO so
510 * that the next write BIO can be submitted by zone write plugging.
511 */
512 if (bio_zone_write_plugging(bio))
513 blk_zone_write_plug_bio_endio(bio);
514}
515
516void blk_zone_write_plug_finish_request(struct request *rq);
517static inline void blk_zone_finish_request(struct request *rq)
518{
519 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
520 blk_zone_write_plug_finish_request(rq);
521}
522int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
523 unsigned long arg);
524int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
525 unsigned int cmd, unsigned long arg);
526#else /* CONFIG_BLK_DEV_ZONED */
527static inline void disk_init_zone_resources(struct gendisk *disk)
528{
529}
530static inline void disk_free_zone_resources(struct gendisk *disk)
531{
532}
533static inline bool bio_zone_write_plugging(struct bio *bio)
534{
535 return false;
536}
537static inline bool blk_req_bio_is_zone_append(struct request *req,
538 struct bio *bio)
539{
540 return false;
541}
542static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
543{
544}
545static inline void blk_zone_write_plug_init_request(struct request *rq)
546{
547}
548static inline void blk_zone_append_update_request_bio(struct request *rq,
549 struct bio *bio)
550{
551}
552static inline void blk_zone_bio_endio(struct bio *bio)
553{
554}
555static inline void blk_zone_finish_request(struct request *rq)
556{
557}
558static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
559 unsigned int cmd, unsigned long arg)
560{
561 return -ENOTTY;
562}
563static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
564 blk_mode_t mode, unsigned int cmd, unsigned long arg)
565{
566 return -ENOTTY;
567}
568#endif /* CONFIG_BLK_DEV_ZONED */
569
570struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
571void bdev_add(struct block_device *bdev, dev_t dev);
572void bdev_unhash(struct block_device *bdev);
573void bdev_drop(struct block_device *bdev);
574
575int blk_alloc_ext_minor(void);
576void blk_free_ext_minor(unsigned int minor);
577#define ADDPART_FLAG_NONE 0
578#define ADDPART_FLAG_RAID 1
579#define ADDPART_FLAG_WHOLEDISK 2
580#define ADDPART_FLAG_READONLY 4
581int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
582 sector_t length);
583int bdev_del_partition(struct gendisk *disk, int partno);
584int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
585 sector_t length);
586void drop_partition(struct block_device *part);
587
588void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
589
590struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
591 struct lock_class_key *lkclass);
592
593/*
594 * Clean up a page appropriately, where the page may be pinned, may have a
595 * ref taken on it or neither.
596 */
597static inline void bio_release_page(struct bio *bio, struct page *page)
598{
599 if (bio_flagged(bio, BIO_PAGE_PINNED))
600 unpin_user_page(page);
601}
602
603struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
604
605int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
606
607int disk_alloc_events(struct gendisk *disk);
608void disk_add_events(struct gendisk *disk);
609void disk_del_events(struct gendisk *disk);
610void disk_release_events(struct gendisk *disk);
611void disk_block_events(struct gendisk *disk);
612void disk_unblock_events(struct gendisk *disk);
613void disk_flush_events(struct gendisk *disk, unsigned int mask);
614extern struct device_attribute dev_attr_events;
615extern struct device_attribute dev_attr_events_async;
616extern struct device_attribute dev_attr_events_poll_msecs;
617
618extern struct attribute_group blk_trace_attr_group;
619
620blk_mode_t file_to_blk_mode(struct file *file);
621int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
622 loff_t lstart, loff_t lend);
623long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
624int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
625long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
626
627extern const struct address_space_operations def_blk_aops;
628
629int disk_register_independent_access_ranges(struct gendisk *disk);
630void disk_unregister_independent_access_ranges(struct gendisk *disk);
631
632int should_fail_bio(struct bio *bio);
633#ifdef CONFIG_FAIL_MAKE_REQUEST
634bool should_fail_request(struct block_device *part, unsigned int bytes);
635#else /* CONFIG_FAIL_MAKE_REQUEST */
636static inline bool should_fail_request(struct block_device *part,
637 unsigned int bytes)
638{
639 return false;
640}
641#endif /* CONFIG_FAIL_MAKE_REQUEST */
642
643/*
644 * Optimized request reference counting. Ideally we'd make timeouts be more
645 * clever, as that's the only reason we need references at all... But until
646 * this happens, this is faster than using refcount_t. Also see:
647 *
648 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
649 */
650#define req_ref_zero_or_close_to_overflow(req) \
651 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
652
653static inline bool req_ref_inc_not_zero(struct request *req)
654{
655 return atomic_inc_not_zero(&req->ref);
656}
657
658static inline bool req_ref_put_and_test(struct request *req)
659{
660 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
661 return atomic_dec_and_test(&req->ref);
662}
663
664static inline void req_ref_set(struct request *req, int value)
665{
666 atomic_set(&req->ref, value);
667}
668
669static inline int req_ref_read(struct request *req)
670{
671 return atomic_read(&req->ref);
672}
673
674static inline u64 blk_time_get_ns(void)
675{
676 struct blk_plug *plug = current->plug;
677
678 if (!plug || !in_task())
679 return ktime_get_ns();
680
681 /*
682 * 0 could very well be a valid time, but rather than flag "this is
683 * a valid timestamp" separately, just accept that we'll do an extra
684 * ktime_get_ns() if we just happen to get 0 as the current time.
685 */
686 if (!plug->cur_ktime) {
687 plug->cur_ktime = ktime_get_ns();
688 current->flags |= PF_BLOCK_TS;
689 }
690 return plug->cur_ktime;
691}
692
693static inline ktime_t blk_time_get(void)
694{
695 return ns_to_ktime(blk_time_get_ns());
696}
697
698void bdev_release(struct file *bdev_file);
699int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
700 const struct blk_holder_ops *hops, struct file *bdev_file);
701int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
702
703void blk_integrity_generate(struct bio *bio);
704void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
705void blk_integrity_prepare(struct request *rq);
706void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
707
708#ifdef CONFIG_LOCKDEP
709static inline void blk_freeze_acquire_lock(struct request_queue *q)
710{
711 if (!q->mq_freeze_disk_dead)
712 rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
713 if (!q->mq_freeze_queue_dying)
714 rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
715}
716
717static inline void blk_unfreeze_release_lock(struct request_queue *q)
718{
719 if (!q->mq_freeze_queue_dying)
720 rwsem_release(&q->q_lockdep_map, _RET_IP_);
721 if (!q->mq_freeze_disk_dead)
722 rwsem_release(&q->io_lockdep_map, _RET_IP_);
723}
724#else
725static inline void blk_freeze_acquire_lock(struct request_queue *q)
726{
727}
728static inline void blk_unfreeze_release_lock(struct request_queue *q)
729{
730}
731#endif
732
733#endif /* BLK_INTERNAL_H */