1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2009-2011 Red Hat, Inc.
4  *
5  * Author: Mikulas Patocka <mpatocka@redhat.com>
6  *
7  * This file is released under the GPL.
8  */
9 
10 #ifndef _LINUX_DM_BUFIO_H
11 #define _LINUX_DM_BUFIO_H
12 
13 #include <linux/blkdev.h>
14 #include <linux/types.h>
15 
16 /*----------------------------------------------------------------*/
17 
18 struct dm_bufio_client;
19 struct dm_buffer;
20 
21 /*
22  * Flags for dm_bufio_client_create
23  */
24 #define DM_BUFIO_CLIENT_NO_SLEEP 0x1
25 
26 /*
27  * Create a buffered IO cache on a given device
28  */
29 struct dm_bufio_client *
30 dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
31 		       unsigned int reserved_buffers, unsigned int aux_size,
32 		       void (*alloc_callback)(struct dm_buffer *),
33 		       void (*write_callback)(struct dm_buffer *),
34 		       unsigned int flags);
35 
36 /*
37  * Release a buffered IO cache.
38  */
39 void dm_bufio_client_destroy(struct dm_bufio_client *c);
40 
41 void dm_bufio_client_reset(struct dm_bufio_client *c);
42 
43 /*
44  * Set the sector range.
45  * When this function is called, there must be no I/O in progress on the bufio
46  * client.
47  */
48 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
49 
50 /*
51  * WARNING: to avoid deadlocks, these conditions are observed:
52  *
53  * - At most one thread can hold at most "reserved_buffers" simultaneously.
54  * - Each other threads can hold at most one buffer.
55  * - Threads which call only dm_bufio_get can hold unlimited number of
56  *   buffers.
57  */
58 
59 /*
60  * Read a given block from disk. Returns pointer to data.  Returns a
61  * pointer to dm_buffer that can be used to release the buffer or to make
62  * it dirty.
63  */
64 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
65 		    struct dm_buffer **bp);
66 
67 /*
68  * Like dm_bufio_read, but return buffer from cache, don't read
69  * it. If the buffer is not in the cache, return NULL.
70  */
71 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
72 		   struct dm_buffer **bp);
73 
74 /*
75  * Like dm_bufio_read, but don't read anything from the disk.  It is
76  * expected that the caller initializes the buffer and marks it dirty.
77  */
78 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
79 		   struct dm_buffer **bp);
80 
81 /*
82  * Prefetch the specified blocks to the cache.
83  * The function starts to read the blocks and returns without waiting for
84  * I/O to finish.
85  */
86 void dm_bufio_prefetch(struct dm_bufio_client *c,
87 		       sector_t block, unsigned int n_blocks);
88 
89 /*
90  * Release a reference obtained with dm_bufio_{read,get,new}. The data
91  * pointer and dm_buffer pointer is no longer valid after this call.
92  */
93 void dm_bufio_release(struct dm_buffer *b);
94 
95 /*
96  * Mark a buffer dirty. It should be called after the buffer is modified.
97  *
98  * In case of memory pressure, the buffer may be written after
99  * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers.  So
100  * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
101  * the actual writing may occur earlier.
102  */
103 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
104 
105 /*
106  * Mark a part of the buffer dirty.
107  *
108  * The specified part of the buffer is scheduled to be written. dm-bufio may
109  * write the specified part of the buffer or it may write a larger superset.
110  */
111 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
112 					unsigned int start, unsigned int end);
113 
114 /*
115  * Initiate writing of dirty buffers, without waiting for completion.
116  */
117 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
118 
119 /*
120  * Write all dirty buffers. Guarantees that all dirty buffers created prior
121  * to this call are on disk when this call exits.
122  */
123 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
124 
125 /*
126  * Send an empty write barrier to the device to flush hardware disk cache.
127  */
128 int dm_bufio_issue_flush(struct dm_bufio_client *c);
129 
130 /*
131  * Send a discard request to the underlying device.
132  */
133 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
134 
135 /*
136  * Free the given buffer.
137  * This is just a hint, if the buffer is in use or dirty, this function
138  * does nothing.
139  */
140 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
141 
142 /*
143  * Free the given range of buffers.
144  * This is just a hint, if the buffer is in use or dirty, this function
145  * does nothing.
146  */
147 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks);
148 
149 /*
150  * Set the minimum number of buffers before cleanup happens.
151  */
152 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n);
153 
154 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c);
155 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
156 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
157 sector_t dm_bufio_get_block_number(struct dm_buffer *b);
158 void *dm_bufio_get_block_data(struct dm_buffer *b);
159 void *dm_bufio_get_aux_data(struct dm_buffer *b);
160 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b);
161 
162 /*----------------------------------------------------------------*/
163 
164 #endif
165