Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* Object lifetime handling and tracing.
3 *
4 * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/slab.h>
9#include <linux/mempool.h>
10#include <linux/delay.h>
11#include "internal.h"
12
13/*
14 * Allocate an I/O request and initialise it.
15 */
16struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
17 struct file *file,
18 loff_t start, size_t len,
19 enum netfs_io_origin origin)
20{
21 static atomic_t debug_ids;
22 struct inode *inode = file ? file_inode(file) : mapping->host;
23 struct netfs_inode *ctx = netfs_inode(inode);
24 struct netfs_io_request *rreq;
25 mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
26 struct kmem_cache *cache = mempool->pool_data;
27 bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
28 origin == NETFS_DIO_READ ||
29 origin == NETFS_DIO_WRITE);
30 bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx);
31 int ret;
32
33 for (;;) {
34 rreq = mempool_alloc(mempool, GFP_KERNEL);
35 if (rreq)
36 break;
37 msleep(10);
38 }
39
40 memset(rreq, 0, kmem_cache_size(cache));
41 rreq->start = start;
42 rreq->len = len;
43 rreq->upper_len = len;
44 rreq->origin = origin;
45 rreq->netfs_ops = ctx->ops;
46 rreq->mapping = mapping;
47 rreq->inode = inode;
48 rreq->i_size = i_size_read(inode);
49 rreq->debug_id = atomic_inc_return(&debug_ids);
50 rreq->wsize = INT_MAX;
51 spin_lock_init(&rreq->lock);
52 INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
53 INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
54 INIT_LIST_HEAD(&rreq->subrequests);
55 INIT_WORK(&rreq->work, NULL);
56 refcount_set(&rreq->ref, 1);
57
58 __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
59 if (cached) {
60 __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
61 if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags))
62 /* Filesystem uses deprecated PG_private_2 marking. */
63 __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
64 }
65 if (file && file->f_flags & O_NONBLOCK)
66 __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
67 if (rreq->netfs_ops->init_request) {
68 ret = rreq->netfs_ops->init_request(rreq, file);
69 if (ret < 0) {
70 mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
71 return ERR_PTR(ret);
72 }
73 }
74
75 trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
76 netfs_proc_add_rreq(rreq);
77 netfs_stat(&netfs_n_rh_rreq);
78 return rreq;
79}
80
81void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
82{
83 int r;
84
85 __refcount_inc(&rreq->ref, &r);
86 trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
87}
88
89void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
90{
91 struct netfs_io_subrequest *subreq;
92 struct netfs_io_stream *stream;
93 int s;
94
95 while (!list_empty(&rreq->subrequests)) {
96 subreq = list_first_entry(&rreq->subrequests,
97 struct netfs_io_subrequest, rreq_link);
98 list_del(&subreq->rreq_link);
99 netfs_put_subrequest(subreq, was_async,
100 netfs_sreq_trace_put_clear);
101 }
102
103 for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
104 stream = &rreq->io_streams[s];
105 while (!list_empty(&stream->subrequests)) {
106 subreq = list_first_entry(&stream->subrequests,
107 struct netfs_io_subrequest, rreq_link);
108 list_del(&subreq->rreq_link);
109 netfs_put_subrequest(subreq, was_async,
110 netfs_sreq_trace_put_clear);
111 }
112 }
113}
114
115static void netfs_free_request_rcu(struct rcu_head *rcu)
116{
117 struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
118
119 mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
120 netfs_stat_d(&netfs_n_rh_rreq);
121}
122
123static void netfs_free_request(struct work_struct *work)
124{
125 struct netfs_io_request *rreq =
126 container_of(work, struct netfs_io_request, work);
127 unsigned int i;
128
129 trace_netfs_rreq(rreq, netfs_rreq_trace_free);
130 netfs_proc_del_rreq(rreq);
131 netfs_clear_subrequests(rreq, false);
132 if (rreq->netfs_ops->free_request)
133 rreq->netfs_ops->free_request(rreq);
134 if (rreq->cache_resources.ops)
135 rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
136 if (rreq->direct_bv) {
137 for (i = 0; i < rreq->direct_bv_count; i++) {
138 if (rreq->direct_bv[i].bv_page) {
139 if (rreq->direct_bv_unpin)
140 unpin_user_page(rreq->direct_bv[i].bv_page);
141 }
142 }
143 kvfree(rreq->direct_bv);
144 }
145 call_rcu(&rreq->rcu, netfs_free_request_rcu);
146}
147
148void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
149 enum netfs_rreq_ref_trace what)
150{
151 unsigned int debug_id;
152 bool dead;
153 int r;
154
155 if (rreq) {
156 debug_id = rreq->debug_id;
157 dead = __refcount_dec_and_test(&rreq->ref, &r);
158 trace_netfs_rreq_ref(debug_id, r - 1, what);
159 if (dead) {
160 if (was_async) {
161 rreq->work.func = netfs_free_request;
162 if (!queue_work(system_unbound_wq, &rreq->work))
163 BUG();
164 } else {
165 netfs_free_request(&rreq->work);
166 }
167 }
168 }
169}
170
171/*
172 * Allocate and partially initialise an I/O request structure.
173 */
174struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
175{
176 struct netfs_io_subrequest *subreq;
177 mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
178 struct kmem_cache *cache = mempool->pool_data;
179
180 for (;;) {
181 subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
182 GFP_KERNEL);
183 if (subreq)
184 break;
185 msleep(10);
186 }
187
188 memset(subreq, 0, kmem_cache_size(cache));
189 INIT_WORK(&subreq->work, NULL);
190 INIT_LIST_HEAD(&subreq->rreq_link);
191 refcount_set(&subreq->ref, 2);
192 subreq->rreq = rreq;
193 subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
194 netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
195 netfs_stat(&netfs_n_rh_sreq);
196 return subreq;
197}
198
199void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
200 enum netfs_sreq_ref_trace what)
201{
202 int r;
203
204 __refcount_inc(&subreq->ref, &r);
205 trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
206 what);
207}
208
209static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
210 bool was_async)
211{
212 struct netfs_io_request *rreq = subreq->rreq;
213
214 trace_netfs_sreq(subreq, netfs_sreq_trace_free);
215 if (rreq->netfs_ops->free_subrequest)
216 rreq->netfs_ops->free_subrequest(subreq);
217 mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
218 netfs_stat_d(&netfs_n_rh_sreq);
219 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
220}
221
222void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
223 enum netfs_sreq_ref_trace what)
224{
225 unsigned int debug_index = subreq->debug_index;
226 unsigned int debug_id = subreq->rreq->debug_id;
227 bool dead;
228 int r;
229
230 dead = __refcount_dec_and_test(&subreq->ref, &r);
231 trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
232 if (dead)
233 netfs_free_subrequest(subreq, was_async);
234}