Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: remove the second argument of k[un]map_atomic()

Signed-off-by: Cong Wang <amwang@redhat.com>

authored by

Cong Wang and committed by
Cong Wang
f0dfc0b0 8fd75e12

+23 -45
+2 -2
crypto/ahash.c
··· 46 46 unsigned int nbytes = min(walk->entrylen, 47 47 ((unsigned int)(PAGE_SIZE)) - offset); 48 48 49 - walk->data = crypto_kmap(walk->pg, 0); 49 + walk->data = kmap_atomic(walk->pg); 50 50 walk->data += offset; 51 51 52 52 if (offset & alignmask) { ··· 93 93 return nbytes; 94 94 } 95 95 96 - crypto_kunmap(walk->data, 0); 96 + kunmap_atomic(walk->data); 97 97 crypto_yield(walk->flags); 98 98 99 99 if (err)
+4 -4
crypto/async_tx/async_memcpy.c
··· 79 79 /* wait for any prerequisite operations */ 80 80 async_tx_quiesce(&submit->depend_tx); 81 81 82 - dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 83 - src_buf = kmap_atomic(src, KM_USER1) + src_offset; 82 + dest_buf = kmap_atomic(dest) + dest_offset; 83 + src_buf = kmap_atomic(src) + src_offset; 84 84 85 85 memcpy(dest_buf, src_buf, len); 86 86 87 - kunmap_atomic(src_buf, KM_USER1); 88 - kunmap_atomic(dest_buf, KM_USER0); 87 + kunmap_atomic(src_buf); 88 + kunmap_atomic(dest_buf); 89 89 90 90 async_tx_sync_epilog(submit); 91 91 }
+4 -4
crypto/blkcipher.c
··· 43 43 44 44 static inline void blkcipher_map_src(struct blkcipher_walk *walk) 45 45 { 46 - walk->src.virt.addr = scatterwalk_map(&walk->in, 0); 46 + walk->src.virt.addr = scatterwalk_map(&walk->in); 47 47 } 48 48 49 49 static inline void blkcipher_map_dst(struct blkcipher_walk *walk) 50 50 { 51 - walk->dst.virt.addr = scatterwalk_map(&walk->out, 1); 51 + walk->dst.virt.addr = scatterwalk_map(&walk->out); 52 52 } 53 53 54 54 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) 55 55 { 56 - scatterwalk_unmap(walk->src.virt.addr, 0); 56 + scatterwalk_unmap(walk->src.virt.addr); 57 57 } 58 58 59 59 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) 60 60 { 61 - scatterwalk_unmap(walk->dst.virt.addr, 1); 61 + scatterwalk_unmap(walk->dst.virt.addr); 62 62 } 63 63 64 64 /* Get a spot of the specified length that does not straddle a page.
+2 -2
crypto/ccm.c
··· 216 216 scatterwalk_start(&walk, sg_next(walk.sg)); 217 217 n = scatterwalk_clamp(&walk, len); 218 218 } 219 - data_src = scatterwalk_map(&walk, 0); 219 + data_src = scatterwalk_map(&walk); 220 220 221 221 compute_mac(tfm, data_src, n, pctx); 222 222 len -= n; 223 223 224 - scatterwalk_unmap(data_src, 0); 224 + scatterwalk_unmap(data_src); 225 225 scatterwalk_advance(&walk, n); 226 226 scatterwalk_done(&walk, 0, len); 227 227 if (len)
+4 -4
crypto/scatterwalk.c
··· 40 40 } 41 41 EXPORT_SYMBOL_GPL(scatterwalk_start); 42 42 43 - void *scatterwalk_map(struct scatter_walk *walk, int out) 43 + void *scatterwalk_map(struct scatter_walk *walk) 44 44 { 45 - return crypto_kmap(scatterwalk_page(walk), out) + 45 + return kmap_atomic(scatterwalk_page(walk)) + 46 46 offset_in_page(walk->offset); 47 47 } 48 48 EXPORT_SYMBOL_GPL(scatterwalk_map); ··· 83 83 if (len_this_page > nbytes) 84 84 len_this_page = nbytes; 85 85 86 - vaddr = scatterwalk_map(walk, out); 86 + vaddr = scatterwalk_map(walk); 87 87 memcpy_dir(buf, vaddr, len_this_page, out); 88 - scatterwalk_unmap(vaddr, out); 88 + scatterwalk_unmap(vaddr); 89 89 90 90 scatterwalk_advance(walk, len_this_page); 91 91
+4 -4
crypto/shash.c
··· 281 281 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { 282 282 void *data; 283 283 284 - data = crypto_kmap(sg_page(sg), 0); 284 + data = kmap_atomic(sg_page(sg)); 285 285 err = crypto_shash_digest(desc, data + offset, nbytes, 286 286 req->result); 287 - crypto_kunmap(data, 0); 287 + kunmap_atomic(data); 288 288 crypto_yield(desc->flags); 289 289 } else 290 290 err = crypto_shash_init(desc) ?: ··· 420 420 421 421 desc->flags = hdesc->flags; 422 422 423 - data = crypto_kmap(sg_page(sg), 0); 423 + data = kmap_atomic(sg_page(sg)); 424 424 err = crypto_shash_digest(desc, data + offset, nbytes, out); 425 - crypto_kunmap(data, 0); 425 + kunmap_atomic(data); 426 426 crypto_yield(desc->flags); 427 427 goto out; 428 428 }
+3 -25
include/crypto/scatterwalk.h
··· 25 25 #include <linux/scatterlist.h> 26 26 #include <linux/sched.h> 27 27 28 - static inline enum km_type crypto_kmap_type(int out) 29 - { 30 - enum km_type type; 31 - 32 - if (in_softirq()) 33 - type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0; 34 - else 35 - type = out * (KM_USER1 - KM_USER0) + KM_USER0; 36 - 37 - return type; 38 - } 39 - 40 - static inline void *crypto_kmap(struct page *page, int out) 41 - { 42 - return kmap_atomic(page, crypto_kmap_type(out)); 43 - } 44 - 45 - static inline void crypto_kunmap(void *vaddr, int out) 46 - { 47 - kunmap_atomic(vaddr, crypto_kmap_type(out)); 48 - } 49 - 50 28 static inline void crypto_yield(u32 flags) 51 29 { 52 30 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ··· 99 121 return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 100 122 } 101 123 102 - static inline void scatterwalk_unmap(void *vaddr, int out) 124 + static inline void scatterwalk_unmap(void *vaddr) 103 125 { 104 - crypto_kunmap(vaddr, out); 126 + kunmap_atomic(vaddr); 105 127 } 106 128 107 129 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); 108 130 void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 109 131 size_t nbytes, int out); 110 - void *scatterwalk_map(struct scatter_walk *walk, int out); 132 + void *scatterwalk_map(struct scatter_walk *walk); 111 133 void scatterwalk_done(struct scatter_walk *walk, int out, int more); 112 134 113 135 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,