Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2009-2011 Red Hat, Inc.
4 *
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 *
7 * This file is released under the GPL.
8 */
9
10#ifndef _LINUX_DM_BUFIO_H
11#define _LINUX_DM_BUFIO_H
12
13#include <linux/blkdev.h>
14#include <linux/types.h>
15
16/*----------------------------------------------------------------*/
17
18struct dm_bufio_client;
19struct dm_buffer;
20
21/*
22 * Flags for dm_bufio_client_create
23 */
24#define DM_BUFIO_CLIENT_NO_SLEEP 0x1
25
26/*
27 * Create a buffered IO cache on a given device
28 */
29struct dm_bufio_client *
30dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
31 unsigned int reserved_buffers, unsigned int aux_size,
32 void (*alloc_callback)(struct dm_buffer *),
33 void (*write_callback)(struct dm_buffer *),
34 unsigned int flags);
35
36/*
37 * Release a buffered IO cache.
38 */
39void dm_bufio_client_destroy(struct dm_bufio_client *c);
40
41/*
42 * Set the sector range.
43 * When this function is called, there must be no I/O in progress on the bufio
44 * client.
45 */
46void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
47
48/*
49 * WARNING: to avoid deadlocks, these conditions are observed:
50 *
51 * - At most one thread can hold at most "reserved_buffers" simultaneously.
52 * - Each other threads can hold at most one buffer.
53 * - Threads which call only dm_bufio_get can hold unlimited number of
54 * buffers.
55 */
56
57/*
58 * Read a given block from disk. Returns pointer to data. Returns a
59 * pointer to dm_buffer that can be used to release the buffer or to make
60 * it dirty.
61 */
62void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
63 struct dm_buffer **bp);
64
65/*
66 * Like dm_bufio_read, but return buffer from cache, don't read
67 * it. If the buffer is not in the cache, return NULL.
68 */
69void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
70 struct dm_buffer **bp);
71
72/*
73 * Like dm_bufio_read, but don't read anything from the disk. It is
74 * expected that the caller initializes the buffer and marks it dirty.
75 */
76void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
77 struct dm_buffer **bp);
78
79/*
80 * Prefetch the specified blocks to the cache.
81 * The function starts to read the blocks and returns without waiting for
82 * I/O to finish.
83 */
84void dm_bufio_prefetch(struct dm_bufio_client *c,
85 sector_t block, unsigned int n_blocks);
86
87/*
88 * Release a reference obtained with dm_bufio_{read,get,new}. The data
89 * pointer and dm_buffer pointer is no longer valid after this call.
90 */
91void dm_bufio_release(struct dm_buffer *b);
92
93/*
94 * Mark a buffer dirty. It should be called after the buffer is modified.
95 *
96 * In case of memory pressure, the buffer may be written after
97 * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So
98 * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
99 * the actual writing may occur earlier.
100 */
101void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
102
103/*
104 * Mark a part of the buffer dirty.
105 *
106 * The specified part of the buffer is scheduled to be written. dm-bufio may
107 * write the specified part of the buffer or it may write a larger superset.
108 */
109void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
110 unsigned int start, unsigned int end);
111
112/*
113 * Initiate writing of dirty buffers, without waiting for completion.
114 */
115void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
116
117/*
118 * Write all dirty buffers. Guarantees that all dirty buffers created prior
119 * to this call are on disk when this call exits.
120 */
121int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
122
123/*
124 * Send an empty write barrier to the device to flush hardware disk cache.
125 */
126int dm_bufio_issue_flush(struct dm_bufio_client *c);
127
128/*
129 * Send a discard request to the underlying device.
130 */
131int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
132
133/*
134 * Like dm_bufio_release but also move the buffer to the new
135 * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
136 */
137void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
138
139/*
140 * Free the given buffer.
141 * This is just a hint, if the buffer is in use or dirty, this function
142 * does nothing.
143 */
144void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
145
146/*
147 * Free the given range of buffers.
148 * This is just a hint, if the buffer is in use or dirty, this function
149 * does nothing.
150 */
151void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks);
152
153/*
154 * Set the minimum number of buffers before cleanup happens.
155 */
156void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n);
157
158unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c);
159sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
160struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
161sector_t dm_bufio_get_block_number(struct dm_buffer *b);
162void *dm_bufio_get_block_data(struct dm_buffer *b);
163void *dm_bufio_get_aux_data(struct dm_buffer *b);
164struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
165
166/*----------------------------------------------------------------*/
167
168#endif