Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Cryptographic scatter and gather helpers.
4 *
5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
7 * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
8 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
9 */
10
11#ifndef _CRYPTO_SCATTERWALK_H
12#define _CRYPTO_SCATTERWALK_H
13
14#include <crypto/algapi.h>
15
16#include <linux/highmem.h>
17#include <linux/mm.h>
18#include <linux/scatterlist.h>
19
20static inline void scatterwalk_crypto_chain(struct scatterlist *head,
21 struct scatterlist *sg, int num)
22{
23 if (sg)
24 sg_chain(head, num, sg);
25 else
26 sg_mark_end(head);
27}
28
29static inline void scatterwalk_start(struct scatter_walk *walk,
30 struct scatterlist *sg)
31{
32 walk->sg = sg;
33 walk->offset = sg->offset;
34}
35
36/*
37 * This is equivalent to scatterwalk_start(walk, sg) followed by
38 * scatterwalk_skip(walk, pos).
39 */
40static inline void scatterwalk_start_at_pos(struct scatter_walk *walk,
41 struct scatterlist *sg,
42 unsigned int pos)
43{
44 while (pos > sg->length) {
45 pos -= sg->length;
46 sg = sg_next(sg);
47 }
48 walk->sg = sg;
49 walk->offset = sg->offset + pos;
50}
51
52static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
53 unsigned int nbytes)
54{
55 unsigned int len_this_sg;
56 unsigned int limit;
57
58 if (walk->offset >= walk->sg->offset + walk->sg->length)
59 scatterwalk_start(walk, sg_next(walk->sg));
60 len_this_sg = walk->sg->offset + walk->sg->length - walk->offset;
61
62 /*
63 * HIGHMEM case: the page may have to be mapped into memory. To avoid
64 * the complexity of having to map multiple pages at once per sg entry,
65 * clamp the returned length to not cross a page boundary.
66 *
67 * !HIGHMEM case: no mapping is needed; all pages of the sg entry are
68 * already mapped contiguously in the kernel's direct map. For improved
69 * performance, allow the walker to return data segments that cross a
70 * page boundary. Do still cap the length to PAGE_SIZE, since some
71 * users rely on that to avoid disabling preemption for too long when
72 * using SIMD. It's also needed for when skcipher_walk uses a bounce
73 * page due to the data not being aligned to the algorithm's alignmask.
74 */
75 if (IS_ENABLED(CONFIG_HIGHMEM))
76 limit = PAGE_SIZE - offset_in_page(walk->offset);
77 else
78 limit = PAGE_SIZE;
79
80 return min3(nbytes, len_this_sg, limit);
81}
82
83/*
84 * Create a scatterlist that represents the remaining data in a walk. Uses
85 * chaining to reference the original scatterlist, so this uses at most two
86 * entries in @sg_out regardless of the number of entries in the original list.
87 * Assumes that sg_init_table() was already done.
88 */
89static inline void scatterwalk_get_sglist(struct scatter_walk *walk,
90 struct scatterlist sg_out[2])
91{
92 if (walk->offset >= walk->sg->offset + walk->sg->length)
93 scatterwalk_start(walk, sg_next(walk->sg));
94 sg_set_page(sg_out, sg_page(walk->sg),
95 walk->sg->offset + walk->sg->length - walk->offset,
96 walk->offset);
97 scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2);
98}
99
100static inline void scatterwalk_map(struct scatter_walk *walk)
101{
102 struct page *base_page = sg_page(walk->sg);
103 unsigned int offset = walk->offset;
104 void *addr;
105
106 if (IS_ENABLED(CONFIG_HIGHMEM)) {
107 struct page *page;
108
109 page = base_page + (offset >> PAGE_SHIFT);
110 offset = offset_in_page(offset);
111 addr = kmap_local_page(page) + offset;
112 } else {
113 /*
114 * When !HIGHMEM we allow the walker to return segments that
115 * span a page boundary; see scatterwalk_clamp(). To make it
116 * clear that in this case we're working in the linear buffer of
117 * the whole sg entry in the kernel's direct map rather than
118 * within the mapped buffer of a single page, compute the
119 * address as an offset from the page_address() of the first
120 * page of the sg entry. Either way the result is the address
121 * in the direct map, but this makes it clearer what is really
122 * going on.
123 */
124 addr = page_address(base_page) + offset;
125 }
126
127 walk->__addr = addr;
128}
129
130/**
131 * scatterwalk_next() - Get the next data buffer in a scatterlist walk
132 * @walk: the scatter_walk
133 * @total: the total number of bytes remaining, > 0
134 *
135 * A virtual address for the next segment of data from the scatterlist will
136 * be placed into @walk->addr. The caller must call scatterwalk_done_src()
137 * or scatterwalk_done_dst() when it is done using this virtual address.
138 *
139 * Returns: the next number of bytes available, <= @total
140 */
141static inline unsigned int scatterwalk_next(struct scatter_walk *walk,
142 unsigned int total)
143{
144 unsigned int nbytes = scatterwalk_clamp(walk, total);
145
146 scatterwalk_map(walk);
147 return nbytes;
148}
149
150static inline void scatterwalk_unmap(struct scatter_walk *walk)
151{
152 if (IS_ENABLED(CONFIG_HIGHMEM))
153 kunmap_local(walk->__addr);
154}
155
156static inline void scatterwalk_advance(struct scatter_walk *walk,
157 unsigned int nbytes)
158{
159 walk->offset += nbytes;
160}
161
162/**
163 * scatterwalk_done_src() - Finish one step of a walk of source scatterlist
164 * @walk: the scatter_walk
165 * @nbytes: the number of bytes processed this step, less than or equal to the
166 * number of bytes that scatterwalk_next() returned.
167 *
168 * Use this if the mapped address was not written to, i.e. it is source data.
169 */
170static inline void scatterwalk_done_src(struct scatter_walk *walk,
171 unsigned int nbytes)
172{
173 scatterwalk_unmap(walk);
174 scatterwalk_advance(walk, nbytes);
175}
176
177/*
178 * Flush the dcache of any pages that overlap the region
179 * [offset, offset + nbytes) relative to base_page.
180 *
181 * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
182 * that all relevant code (including the call to sg_page() in the caller, if
183 * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
184 */
185static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
186 unsigned int offset,
187 unsigned int nbytes)
188{
189 unsigned int num_pages;
190
191 base_page += offset / PAGE_SIZE;
192 offset %= PAGE_SIZE;
193
194 /*
195 * This is an overflow-safe version of
196 * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
197 */
198 num_pages = nbytes / PAGE_SIZE;
199 num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
200
201 for (unsigned int i = 0; i < num_pages; i++)
202 flush_dcache_page(base_page + i);
203}
204
205/**
206 * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
207 * @walk: the scatter_walk
208 * @nbytes: the number of bytes processed this step, less than or equal to the
209 * number of bytes that scatterwalk_next() returned.
210 *
211 * Use this if the mapped address may have been written to, i.e. it is
212 * destination data.
213 */
214static inline void scatterwalk_done_dst(struct scatter_walk *walk,
215 unsigned int nbytes)
216{
217 scatterwalk_unmap(walk);
218 if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
219 __scatterwalk_flush_dcache_pages(sg_page(walk->sg),
220 walk->offset, nbytes);
221 scatterwalk_advance(walk, nbytes);
222}
223
224void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes);
225
226void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
227 unsigned int nbytes);
228
229void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
230 unsigned int nbytes);
231
232void memcpy_from_sglist(void *buf, struct scatterlist *sg,
233 unsigned int start, unsigned int nbytes);
234
235void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
236 const void *buf, unsigned int nbytes);
237
238void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
239 unsigned int nbytes);
240
241/* In new code, please use memcpy_{from,to}_sglist() directly instead. */
242static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
243 unsigned int start,
244 unsigned int nbytes, int out)
245{
246 if (out)
247 memcpy_to_sglist(sg, start, buf, nbytes);
248 else
249 memcpy_from_sglist(buf, sg, start, nbytes);
250}
251
252struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
253 struct scatterlist *src,
254 unsigned int len);
255
256#endif /* _CRYPTO_SCATTERWALK_H */