Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: skcipher - Add ablkcipher_walk interfaces

These are akin to the blkcipher_walk helpers.

The main differences in the async variant are:

1) Only physical walking is supported. We can't hold on to
kmap mappings across the async operation to support virtual
ablkcipher_walk operations anyways.

2) Bounce buffers used for async more need to be persistent and
freed at a later point in time when the async op completes.
Therefore we maintain a list of writeback buffers and require
that the ablkcipher_walk user call the 'complete' operation
so we can copy the bounce buffers out to the real buffers and
free up the bounce buffer chunks.

These interfaces will be used by the new Niagara2 crypto driver.

Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

David S. Miller and committed by
Herbert Xu
bf06099d a8f1a052

+317
+277
crypto/ablkcipher.c
··· 24 24 #include <linux/slab.h> 25 25 #include <linux/seq_file.h> 26 26 27 + #include <crypto/scatterwalk.h> 28 + 27 29 #include "internal.h" 28 30 29 31 static const char *skcipher_default_geniv __read_mostly; 32 + 33 + struct ablkcipher_buffer { 34 + struct list_head entry; 35 + struct scatter_walk dst; 36 + unsigned int len; 37 + void *data; 38 + }; 39 + 40 + enum { 41 + ABLKCIPHER_WALK_SLOW = 1 << 0, 42 + }; 43 + 44 + static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) 45 + { 46 + scatterwalk_copychunks(p->data, &p->dst, p->len, 1); 47 + } 48 + 49 + void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) 50 + { 51 + struct ablkcipher_buffer *p, *tmp; 52 + 53 + list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { 54 + ablkcipher_buffer_write(p); 55 + list_del(&p->entry); 56 + kfree(p); 57 + } 58 + } 59 + EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); 60 + 61 + static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, 62 + struct ablkcipher_buffer *p) 63 + { 64 + p->dst = walk->out; 65 + list_add_tail(&p->entry, &walk->buffers); 66 + } 67 + 68 + /* Get a spot of the specified length that does not straddle a page. 69 + * The caller needs to ensure that there is enough space for this operation. 70 + */ 71 + static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) 72 + { 73 + u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); 74 + return max(start, end_page); 75 + } 76 + 77 + static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, 78 + unsigned int bsize) 79 + { 80 + unsigned int n = bsize; 81 + 82 + for (;;) { 83 + unsigned int len_this_page = scatterwalk_pagelen(&walk->out); 84 + 85 + if (len_this_page > n) 86 + len_this_page = n; 87 + scatterwalk_advance(&walk->out, n); 88 + if (n == len_this_page) 89 + break; 90 + n -= len_this_page; 91 + scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg)); 92 + } 93 + 94 + return bsize; 95 + } 96 + 97 + static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, 98 + unsigned int n) 99 + { 100 + scatterwalk_advance(&walk->in, n); 101 + scatterwalk_advance(&walk->out, n); 102 + 103 + return n; 104 + } 105 + 106 + static int ablkcipher_walk_next(struct ablkcipher_request *req, 107 + struct ablkcipher_walk *walk); 108 + 109 + int ablkcipher_walk_done(struct ablkcipher_request *req, 110 + struct ablkcipher_walk *walk, int err) 111 + { 112 + struct crypto_tfm *tfm = req->base.tfm; 113 + unsigned int nbytes = 0; 114 + 115 + if (likely(err >= 0)) { 116 + unsigned int n = walk->nbytes - err; 117 + 118 + if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) 119 + n = ablkcipher_done_fast(walk, n); 120 + else if (WARN_ON(err)) { 121 + err = -EINVAL; 122 + goto err; 123 + } else 124 + n = ablkcipher_done_slow(walk, n); 125 + 126 + nbytes = walk->total - n; 127 + err = 0; 128 + } 129 + 130 + scatterwalk_done(&walk->in, 0, nbytes); 131 + scatterwalk_done(&walk->out, 1, nbytes); 132 + 133 + err: 134 + walk->total = nbytes; 135 + walk->nbytes = nbytes; 136 + 137 + if (nbytes) { 138 + crypto_yield(req->base.flags); 139 + return ablkcipher_walk_next(req, walk); 140 + } 141 + 142 + if (walk->iv != req->info) 143 + memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); 144 + if (walk->iv_buffer) 145 + kfree(walk->iv_buffer); 146 + 147 + return err; 148 + } 149 + EXPORT_SYMBOL_GPL(ablkcipher_walk_done); 150 + 151 + static inline int ablkcipher_next_slow(struct ablkcipher_request *req, 152 + struct ablkcipher_walk *walk, 153 + unsigned int bsize, 154 + unsigned int alignmask, 155 + void **src_p, void **dst_p) 156 + { 157 + unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); 158 + struct ablkcipher_buffer *p; 159 + void *src, *dst, *base; 160 + unsigned int n; 161 + 162 + n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); 163 + n += (aligned_bsize * 3 - (alignmask + 1) + 164 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); 165 + 166 + p = kmalloc(n, GFP_ATOMIC); 167 + if (!p) 168 + ablkcipher_walk_done(req, walk, -ENOMEM); 169 + 170 + base = p + 1; 171 + 172 + dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); 173 + src = dst = ablkcipher_get_spot(dst, bsize); 174 + 175 + p->len = bsize; 176 + p->data = dst; 177 + 178 + scatterwalk_copychunks(src, &walk->in, bsize, 0); 179 + 180 + ablkcipher_queue_write(walk, p); 181 + 182 + walk->nbytes = bsize; 183 + walk->flags |= ABLKCIPHER_WALK_SLOW; 184 + 185 + *src_p = src; 186 + *dst_p = dst; 187 + 188 + return 0; 189 + } 190 + 191 + static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, 192 + struct crypto_tfm *tfm, 193 + unsigned int alignmask) 194 + { 195 + unsigned bs = walk->blocksize; 196 + unsigned int ivsize = tfm->crt_ablkcipher.ivsize; 197 + unsigned aligned_bs = ALIGN(bs, alignmask + 1); 198 + unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - 199 + (alignmask + 1); 200 + u8 *iv; 201 + 202 + size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); 203 + walk->iv_buffer = kmalloc(size, GFP_ATOMIC); 204 + if (!walk->iv_buffer) 205 + return -ENOMEM; 206 + 207 + iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); 208 + iv = ablkcipher_get_spot(iv, bs) + aligned_bs; 209 + iv = ablkcipher_get_spot(iv, bs) + aligned_bs; 210 + iv = ablkcipher_get_spot(iv, ivsize); 211 + 212 + walk->iv = memcpy(iv, walk->iv, ivsize); 213 + return 0; 214 + } 215 + 216 + static inline int ablkcipher_next_fast(struct ablkcipher_request *req, 217 + struct ablkcipher_walk *walk) 218 + { 219 + walk->src.page = scatterwalk_page(&walk->in); 220 + walk->src.offset = offset_in_page(walk->in.offset); 221 + walk->dst.page = scatterwalk_page(&walk->out); 222 + walk->dst.offset = offset_in_page(walk->out.offset); 223 + 224 + return 0; 225 + } 226 + 227 + static int ablkcipher_walk_next(struct ablkcipher_request *req, 228 + struct ablkcipher_walk *walk) 229 + { 230 + struct crypto_tfm *tfm = req->base.tfm; 231 + unsigned int alignmask, bsize, n; 232 + void *src, *dst; 233 + int err; 234 + 235 + alignmask = crypto_tfm_alg_alignmask(tfm); 236 + n = walk->total; 237 + if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { 238 + req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; 239 + return ablkcipher_walk_done(req, walk, -EINVAL); 240 + } 241 + 242 + walk->flags &= ~ABLKCIPHER_WALK_SLOW; 243 + src = dst = NULL; 244 + 245 + bsize = min(walk->blocksize, n); 246 + n = scatterwalk_clamp(&walk->in, n); 247 + n = scatterwalk_clamp(&walk->out, n); 248 + 249 + if (n < bsize || 250 + !scatterwalk_aligned(&walk->in, alignmask) || 251 + !scatterwalk_aligned(&walk->out, alignmask)) { 252 + err = ablkcipher_next_slow(req, walk, bsize, alignmask, 253 + &src, &dst); 254 + goto set_phys_lowmem; 255 + } 256 + 257 + walk->nbytes = n; 258 + 259 + return ablkcipher_next_fast(req, walk); 260 + 261 + set_phys_lowmem: 262 + if (err >= 0) { 263 + walk->src.page = virt_to_page(src); 264 + walk->dst.page = virt_to_page(dst); 265 + walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); 266 + walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); 267 + } 268 + 269 + return err; 270 + } 271 + 272 + static int ablkcipher_walk_first(struct ablkcipher_request *req, 273 + struct ablkcipher_walk *walk) 274 + { 275 + struct crypto_tfm *tfm = req->base.tfm; 276 + unsigned int alignmask; 277 + 278 + alignmask = crypto_tfm_alg_alignmask(tfm); 279 + if (WARN_ON_ONCE(in_irq())) 280 + return -EDEADLK; 281 + 282 + walk->nbytes = walk->total; 283 + if (unlikely(!walk->total)) 284 + return 0; 285 + 286 + walk->iv_buffer = NULL; 287 + walk->iv = req->info; 288 + if (unlikely(((unsigned long)walk->iv & alignmask))) { 289 + int err = ablkcipher_copy_iv(walk, tfm, alignmask); 290 + if (err) 291 + return err; 292 + } 293 + 294 + scatterwalk_start(&walk->in, walk->in.sg); 295 + scatterwalk_start(&walk->out, walk->out.sg); 296 + 297 + return ablkcipher_walk_next(req, walk); 298 + } 299 + 300 + int ablkcipher_walk_phys(struct ablkcipher_request *req, 301 + struct ablkcipher_walk *walk) 302 + { 303 + walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); 304 + return ablkcipher_walk_first(req, walk); 305 + } 306 + EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); 30 307 31 308 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, 32 309 unsigned int keylen)
+40
include/crypto/algapi.h
··· 103 103 unsigned int blocksize; 104 104 }; 105 105 106 + struct ablkcipher_walk { 107 + struct { 108 + struct page *page; 109 + unsigned int offset; 110 + } src, dst; 111 + 112 + struct scatter_walk in; 113 + unsigned int nbytes; 114 + struct scatter_walk out; 115 + unsigned int total; 116 + struct list_head buffers; 117 + u8 *iv_buffer; 118 + u8 *iv; 119 + int flags; 120 + unsigned int blocksize; 121 + }; 122 + 106 123 extern const struct crypto_type crypto_ablkcipher_type; 107 124 extern const struct crypto_type crypto_aead_type; 108 125 extern const struct crypto_type crypto_blkcipher_type; ··· 189 172 int blkcipher_walk_virt_block(struct blkcipher_desc *desc, 190 173 struct blkcipher_walk *walk, 191 174 unsigned int blocksize); 175 + 176 + int ablkcipher_walk_done(struct ablkcipher_request *req, 177 + struct ablkcipher_walk *walk, int err); 178 + int ablkcipher_walk_phys(struct ablkcipher_request *req, 179 + struct ablkcipher_walk *walk); 180 + void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); 192 181 193 182 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) 194 183 { ··· 304 281 walk->in.sg = src; 305 282 walk->out.sg = dst; 306 283 walk->total = nbytes; 284 + } 285 + 286 + static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk, 287 + struct scatterlist *dst, 288 + struct scatterlist *src, 289 + unsigned int nbytes) 290 + { 291 + walk->in.sg = src; 292 + walk->out.sg = dst; 293 + walk->total = nbytes; 294 + INIT_LIST_HEAD(&walk->buffers); 295 + } 296 + 297 + static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk) 298 + { 299 + if (unlikely(!list_empty(&walk->buffers))) 300 + __ablkcipher_walk_complete(walk); 307 301 } 308 302 309 303 static inline struct crypto_async_request *crypto_get_backlog(