Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: scomp - Add chaining and virtual address support

Add chaining and virtual address support to all scomp algorithms.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+65 -31
+65 -31
crypto/scompress.c
··· 178 178 unsigned int dlen = req->dlen; 179 179 struct page *spage, *dpage; 180 180 unsigned int soff, doff; 181 - void *src, *dst; 182 181 unsigned int n; 182 + const u8 *src; 183 + u8 *dst; 183 184 int ret; 184 185 185 186 if (!req->src || !slen) ··· 189 188 if (!req->dst || !dlen) 190 189 return -EINVAL; 191 190 192 - soff = req->src->offset; 193 - spage = nth_page(sg_page(req->src), soff / PAGE_SIZE); 194 - soff = offset_in_page(soff); 195 - 196 - n = slen / PAGE_SIZE; 197 - n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE; 198 - if (slen <= req->src->length && (!PageHighMem(nth_page(spage, n)) || 199 - size_add(soff, slen) <= PAGE_SIZE)) 200 - src = kmap_local_page(spage) + soff; 201 - else 202 - src = scratch->src; 203 - 204 - doff = req->dst->offset; 205 - dpage = nth_page(sg_page(req->dst), doff / PAGE_SIZE); 206 - doff = offset_in_page(doff); 207 - 208 - n = dlen / PAGE_SIZE; 209 - n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE; 210 - if (dlen <= req->dst->length && (!PageHighMem(nth_page(dpage, n)) || 211 - size_add(doff, dlen) <= PAGE_SIZE)) 212 - dst = kmap_local_page(dpage) + doff; 191 + if (acomp_request_src_isvirt(req)) 192 + src = req->svirt; 213 193 else { 214 - if (dlen > SCOMP_SCRATCH_SIZE) 215 - dlen = SCOMP_SCRATCH_SIZE; 216 - dst = scratch->dst; 194 + soff = req->src->offset; 195 + spage = nth_page(sg_page(req->src), soff / PAGE_SIZE); 196 + soff = offset_in_page(soff); 197 + 198 + n = slen / PAGE_SIZE; 199 + n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE; 200 + if (slen <= req->src->length && 201 + (!PageHighMem(nth_page(spage, n)) || 202 + size_add(soff, slen) <= PAGE_SIZE)) 203 + src = kmap_local_page(spage) + soff; 204 + else 205 + src = scratch->src; 206 + } 207 + 208 + if (acomp_request_dst_isvirt(req)) 209 + dst = req->dvirt; 210 + else { 211 + doff = req->dst->offset; 212 + dpage = nth_page(sg_page(req->dst), doff / PAGE_SIZE); 213 + doff = offset_in_page(doff); 214 + 215 + n = dlen / PAGE_SIZE; 216 + n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE; 217 + if (dlen <= req->dst->length && 218 + (!PageHighMem(nth_page(dpage, n)) || 219 + size_add(doff, dlen) <= PAGE_SIZE)) 220 + dst = kmap_local_page(dpage) + doff; 221 + else { 222 + if (dlen > SCOMP_SCRATCH_SIZE) 223 + dlen = SCOMP_SCRATCH_SIZE; 224 + dst = scratch->dst; 225 + } 217 226 } 218 227 219 228 spin_lock_bh(&scratch->lock); 220 229 221 230 if (src == scratch->src) 222 - memcpy_from_sglist(src, req->src, 0, slen); 231 + memcpy_from_sglist(scratch->src, req->src, 0, slen); 223 232 224 233 stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream); 225 234 spin_lock(&stream->lock); ··· 248 237 249 238 req->dlen = dlen; 250 239 251 - if (dst != scratch->dst) { 240 + if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) { 252 241 kunmap_local(dst); 253 242 dlen += doff; 254 243 for (;;) { ··· 259 248 dpage = nth_page(dpage, 1); 260 249 } 261 250 } 262 - if (src != scratch->src) 251 + if (!acomp_request_src_isvirt(req) && src != scratch->src) 263 252 kunmap_local(src); 264 253 265 254 return ret; 266 255 } 267 256 257 + static int scomp_acomp_chain(struct acomp_req *req, int dir) 258 + { 259 + struct acomp_req *r2; 260 + int err; 261 + 262 + err = scomp_acomp_comp_decomp(req, dir); 263 + req->base.err = err; 264 + 265 + list_for_each_entry(r2, &req->base.list, base.list) 266 + r2->base.err = scomp_acomp_comp_decomp(r2, dir); 267 + 268 + return err; 269 + } 270 + 268 271 static int scomp_acomp_compress(struct acomp_req *req) 269 272 { 270 - return scomp_acomp_comp_decomp(req, 1); 273 + return scomp_acomp_chain(req, 1); 271 274 } 272 275 273 276 static int scomp_acomp_decompress(struct acomp_req *req) 274 277 { 275 - return scomp_acomp_comp_decomp(req, 0); 278 + return scomp_acomp_chain(req, 0); 276 279 } 277 280 278 281 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) ··· 347 322 .tfmsize = offsetof(struct crypto_scomp, base), 348 323 }; 349 324 350 - int crypto_register_scomp(struct scomp_alg *alg) 325 + static void scomp_prepare_alg(struct scomp_alg *alg) 351 326 { 352 327 struct crypto_alg *base = &alg->calg.base; 353 328 354 329 comp_prepare_alg(&alg->calg); 330 + 331 + base->cra_flags |= CRYPTO_ALG_REQ_CHAIN; 332 + } 333 + 334 + int crypto_register_scomp(struct scomp_alg *alg) 335 + { 336 + struct crypto_alg *base = &alg->calg.base; 337 + 338 + scomp_prepare_alg(alg); 355 339 356 340 base->cra_type = &crypto_scomp_type; 357 341 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;