Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfrm: ipcomp: Use crypto_acomp interface

Replace the legacy comperssion interface with the new acomp
interface. This is the first user to make full user of the
asynchronous nature of acomp by plugging into the existing xfrm
resume interface.

As a result of SG support by acomp, the linear scratch buffer
in ipcomp can be removed.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+237 -261
+1 -12
include/net/ipcomp.h
··· 3 3 #define _NET_IPCOMP_H 4 4 5 5 #include <linux/skbuff.h> 6 - #include <linux/types.h> 7 - 8 - #define IPCOMP_SCRATCH_SIZE 65400 9 - 10 - struct crypto_comp; 11 - struct ip_comp_hdr; 12 - 13 - struct ipcomp_data { 14 - u16 threshold; 15 - struct crypto_comp * __percpu *tfms; 16 - }; 17 6 18 7 struct ip_comp_hdr; 19 - struct sk_buff; 8 + struct netlink_ext_ack; 20 9 struct xfrm_state; 21 10 22 11 int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb);
+3 -4
net/xfrm/xfrm_algo.c
··· 5 5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 6 6 */ 7 7 8 + #include <crypto/acompress.h> 8 9 #include <crypto/aead.h> 9 10 #include <crypto/hash.h> 10 11 #include <crypto/skcipher.h> 11 12 #include <linux/module.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/pfkeyv2.h> 14 - #include <linux/crypto.h> 15 15 #include <linux/scatterlist.h> 16 16 #include <net/xfrm.h> 17 17 #if IS_ENABLED(CONFIG_INET_ESP) || IS_ENABLED(CONFIG_INET6_ESP) ··· 669 669 }; 670 670 671 671 static const struct xfrm_algo_list xfrm_calg_list = { 672 - .find = crypto_has_comp, 672 + .find = crypto_has_acomp, 673 673 .algs = calg_list, 674 674 .entries = ARRAY_SIZE(calg_list), 675 675 }; ··· 828 828 } 829 829 830 830 for (i = 0; i < calg_entries(); i++) { 831 - status = crypto_has_comp(calg_list[i].name, 0, 832 - CRYPTO_ALG_ASYNC); 831 + status = crypto_has_acomp(calg_list[i].name, 0, 0); 833 832 if (calg_list[i].available != status) 834 833 calg_list[i].available = status; 835 834 }
+233 -245
net/xfrm/xfrm_ipcomp.c
··· 3 3 * IP Payload Compression Protocol (IPComp) - RFC3173. 4 4 * 5 5 * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> 6 - * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au> 6 + * Copyright (c) 2003-2025 Herbert Xu <herbert@gondor.apana.org.au> 7 7 * 8 8 * Todo: 9 9 * - Tunable compression parameters. ··· 11 11 * - Adaptive compression. 12 12 */ 13 13 14 - #include <linux/crypto.h> 14 + #include <crypto/acompress.h> 15 15 #include <linux/err.h> 16 - #include <linux/list.h> 17 16 #include <linux/module.h> 18 - #include <linux/mutex.h> 19 - #include <linux/percpu.h> 17 + #include <linux/skbuff_ref.h> 20 18 #include <linux/slab.h> 21 - #include <linux/smp.h> 22 - #include <linux/vmalloc.h> 23 - #include <net/ip.h> 24 19 #include <net/ipcomp.h> 25 20 #include <net/xfrm.h> 26 21 27 - struct ipcomp_tfms { 28 - struct list_head list; 29 - struct crypto_comp * __percpu *tfms; 30 - int users; 22 + #define IPCOMP_SCRATCH_SIZE 65400 23 + 24 + struct ipcomp_skb_cb { 25 + struct xfrm_skb_cb xfrm; 26 + struct acomp_req *req; 31 27 }; 32 28 33 - static DEFINE_MUTEX(ipcomp_resource_mutex); 34 - static void * __percpu *ipcomp_scratches; 35 - static int ipcomp_scratch_users; 36 - static LIST_HEAD(ipcomp_tfms_list); 29 + struct ipcomp_data { 30 + u16 threshold; 31 + struct crypto_acomp *tfm; 32 + }; 37 33 38 - static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) 34 + struct ipcomp_req_extra { 35 + struct xfrm_state *x; 36 + struct scatterlist sg[]; 37 + }; 38 + 39 + static inline struct ipcomp_skb_cb *ipcomp_cb(struct sk_buff *skb) 39 40 { 40 - struct ipcomp_data *ipcd = x->data; 41 - const int plen = skb->len; 42 - int dlen = IPCOMP_SCRATCH_SIZE; 43 - const u8 *start = skb->data; 44 - u8 *scratch = *this_cpu_ptr(ipcomp_scratches); 45 - struct crypto_comp *tfm = *this_cpu_ptr(ipcd->tfms); 46 - int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); 47 - int len; 41 + struct ipcomp_skb_cb *cb = (void *)skb->cb; 48 42 49 - if (err) 50 - return err; 43 + BUILD_BUG_ON(sizeof(*cb) > sizeof(skb->cb)); 44 + return cb; 45 + } 51 46 52 - if (dlen < (plen + sizeof(struct ip_comp_hdr))) 53 - return -EINVAL; 47 + static int ipcomp_post_acomp(struct sk_buff *skb, int err, int hlen) 48 + { 49 + struct acomp_req *req = ipcomp_cb(skb)->req; 50 + struct ipcomp_req_extra *extra; 51 + const int plen = skb->data_len; 52 + struct scatterlist *dsg; 53 + int len, dlen; 54 54 55 - len = dlen - plen; 56 - if (len > skb_tailroom(skb)) 57 - len = skb_tailroom(skb); 55 + if (unlikely(err)) 56 + goto out_free_req; 58 57 59 - __skb_put(skb, len); 58 + extra = acomp_request_extra(req); 59 + dsg = extra->sg; 60 + dlen = req->dlen; 60 61 61 - len += plen; 62 - skb_copy_to_linear_data(skb, scratch, len); 62 + pskb_trim_unique(skb, 0); 63 + __skb_put(skb, hlen); 63 64 64 - while ((scratch += len, dlen -= len) > 0) { 65 + /* Only update truesize on input. */ 66 + if (!hlen) 67 + skb->truesize += dlen - plen; 68 + skb->data_len = dlen; 69 + skb->len += dlen; 70 + 71 + do { 65 72 skb_frag_t *frag; 66 73 struct page *page; 67 74 68 - if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) 69 - return -EMSGSIZE; 70 - 71 75 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; 72 - page = alloc_page(GFP_ATOMIC); 73 - 74 - if (!page) 75 - return -ENOMEM; 76 + page = sg_page(dsg); 77 + dsg = sg_next(dsg); 76 78 77 79 len = PAGE_SIZE; 78 80 if (dlen < len) 79 81 len = dlen; 80 82 81 83 skb_frag_fill_page_desc(frag, page, 0, len); 82 - memcpy(skb_frag_address(frag), scratch, len); 83 - 84 - skb->truesize += len; 85 - skb->data_len += len; 86 - skb->len += len; 87 84 88 85 skb_shinfo(skb)->nr_frags++; 89 - } 86 + } while ((dlen -= len)); 90 87 91 - return 0; 88 + for (; dsg; dsg = sg_next(dsg)) 89 + __free_page(sg_page(dsg)); 90 + 91 + out_free_req: 92 + acomp_request_free(req); 93 + return err; 94 + } 95 + 96 + static int ipcomp_input_done2(struct sk_buff *skb, int err) 97 + { 98 + struct ip_comp_hdr *ipch = ip_comp_hdr(skb); 99 + const int plen = skb->len; 100 + 101 + skb_reset_transport_header(skb); 102 + 103 + return ipcomp_post_acomp(skb, err, 0) ?: 104 + skb->len < (plen + sizeof(ip_comp_hdr)) ? -EINVAL : 105 + ipch->nexthdr; 106 + } 107 + 108 + static void ipcomp_input_done(void *data, int err) 109 + { 110 + struct sk_buff *skb = data; 111 + 112 + xfrm_input_resume(skb, ipcomp_input_done2(skb, err)); 113 + } 114 + 115 + static struct acomp_req *ipcomp_setup_req(struct xfrm_state *x, 116 + struct sk_buff *skb, int minhead, 117 + int dlen) 118 + { 119 + const int dnfrags = min(MAX_SKB_FRAGS, 16); 120 + struct ipcomp_data *ipcd = x->data; 121 + struct ipcomp_req_extra *extra; 122 + struct scatterlist *sg, *dsg; 123 + const int plen = skb->len; 124 + struct crypto_acomp *tfm; 125 + struct acomp_req *req; 126 + int nfrags; 127 + int total; 128 + int err; 129 + int i; 130 + 131 + ipcomp_cb(skb)->req = NULL; 132 + 133 + do { 134 + struct sk_buff *trailer; 135 + 136 + if (skb->len > PAGE_SIZE) { 137 + if (skb_linearize_cow(skb)) 138 + return ERR_PTR(-ENOMEM); 139 + nfrags = 1; 140 + break; 141 + } 142 + 143 + if (!skb_cloned(skb) && skb_headlen(skb) >= minhead) { 144 + if (!skb_is_nonlinear(skb)) { 145 + nfrags = 1; 146 + break; 147 + } else if (!skb_has_frag_list(skb)) { 148 + nfrags = skb_shinfo(skb)->nr_frags; 149 + nfrags++; 150 + break; 151 + } 152 + } 153 + 154 + nfrags = skb_cow_data(skb, skb_headlen(skb) < minhead ? 155 + minhead - skb_headlen(skb) : 0, 156 + &trailer); 157 + if (nfrags < 0) 158 + return ERR_PTR(nfrags); 159 + } while (0); 160 + 161 + tfm = ipcd->tfm; 162 + req = acomp_request_alloc_extra( 163 + tfm, sizeof(*extra) + sizeof(*sg) * (nfrags + dnfrags), 164 + GFP_ATOMIC); 165 + ipcomp_cb(skb)->req = req; 166 + if (!req) 167 + return ERR_PTR(-ENOMEM); 168 + 169 + extra = acomp_request_extra(req); 170 + extra->x = x; 171 + 172 + dsg = extra->sg; 173 + sg = dsg + dnfrags; 174 + sg_init_table(sg, nfrags); 175 + err = skb_to_sgvec(skb, sg, 0, plen); 176 + if (unlikely(err < 0)) 177 + return ERR_PTR(err); 178 + 179 + sg_init_table(dsg, dnfrags); 180 + total = 0; 181 + for (i = 0; i < dnfrags && total < dlen; i++) { 182 + struct page *page; 183 + 184 + page = alloc_page(GFP_ATOMIC); 185 + if (!page) 186 + break; 187 + sg_set_page(dsg + i, page, PAGE_SIZE, 0); 188 + total += PAGE_SIZE; 189 + } 190 + if (!i) 191 + return ERR_PTR(-ENOMEM); 192 + sg_mark_end(dsg + i - 1); 193 + dlen = min(dlen, total); 194 + 195 + acomp_request_set_params(req, sg, dsg, plen, dlen); 196 + 197 + return req; 198 + } 199 + 200 + static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) 201 + { 202 + struct acomp_req *req; 203 + int err; 204 + 205 + req = ipcomp_setup_req(x, skb, 0, IPCOMP_SCRATCH_SIZE); 206 + err = PTR_ERR(req); 207 + if (IS_ERR(req)) 208 + goto out; 209 + 210 + acomp_request_set_callback(req, 0, ipcomp_input_done, skb); 211 + err = crypto_acomp_decompress(req); 212 + if (err == -EINPROGRESS) 213 + return err; 214 + 215 + out: 216 + return ipcomp_input_done2(skb, err); 92 217 } 93 218 94 219 int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) 95 220 { 96 - int nexthdr; 97 - int err = -ENOMEM; 98 - struct ip_comp_hdr *ipch; 221 + struct ip_comp_hdr *ipch __maybe_unused; 99 222 100 223 if (!pskb_may_pull(skb, sizeof(*ipch))) 101 224 return -EINVAL; 102 225 103 - if (skb_linearize_cow(skb)) 104 - goto out; 105 - 106 226 skb->ip_summed = CHECKSUM_NONE; 107 227 108 228 /* Remove ipcomp header and decompress original payload */ 109 - ipch = (void *)skb->data; 110 - nexthdr = ipch->nexthdr; 111 - 112 - skb->transport_header = skb->network_header + sizeof(*ipch); 113 229 __skb_pull(skb, sizeof(*ipch)); 114 - err = ipcomp_decompress(x, skb); 115 - if (err) 116 - goto out; 117 230 118 - err = nexthdr; 119 - 120 - out: 121 - return err; 231 + return ipcomp_decompress(x, skb); 122 232 } 123 233 EXPORT_SYMBOL_GPL(ipcomp_input); 124 234 125 - static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) 235 + static int ipcomp_output_push(struct sk_buff *skb) 126 236 { 127 - struct ipcomp_data *ipcd = x->data; 128 - const int plen = skb->len; 129 - int dlen = IPCOMP_SCRATCH_SIZE; 130 - u8 *start = skb->data; 131 - struct crypto_comp *tfm; 132 - u8 *scratch; 133 - int err; 134 - 135 - local_bh_disable(); 136 - scratch = *this_cpu_ptr(ipcomp_scratches); 137 - tfm = *this_cpu_ptr(ipcd->tfms); 138 - err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); 139 - if (err) 140 - goto out; 141 - 142 - if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) { 143 - err = -EMSGSIZE; 144 - goto out; 145 - } 146 - 147 - memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 148 - local_bh_enable(); 149 - 150 - pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); 237 + skb_push(skb, -skb_network_offset(skb)); 151 238 return 0; 152 - 153 - out: 154 - local_bh_enable(); 155 - return err; 156 239 } 157 240 158 - int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) 241 + static int ipcomp_output_done2(struct xfrm_state *x, struct sk_buff *skb, 242 + int err) 159 243 { 160 - int err; 161 244 struct ip_comp_hdr *ipch; 162 - struct ipcomp_data *ipcd = x->data; 163 245 164 - if (skb->len < ipcd->threshold) { 165 - /* Don't bother compressing */ 246 + err = ipcomp_post_acomp(skb, err, sizeof(*ipch)); 247 + if (err) 166 248 goto out_ok; 167 - } 168 - 169 - if (skb_linearize_cow(skb)) 170 - goto out_ok; 171 - 172 - err = ipcomp_compress(x, skb); 173 - 174 - if (err) { 175 - goto out_ok; 176 - } 177 249 178 250 /* Install ipcomp header, convert into ipcomp datagram. */ 179 251 ipch = ip_comp_hdr(skb); ··· 254 182 ipch->cpi = htons((u16 )ntohl(x->id.spi)); 255 183 *skb_mac_header(skb) = IPPROTO_COMP; 256 184 out_ok: 257 - skb_push(skb, -skb_network_offset(skb)); 258 - return 0; 185 + return ipcomp_output_push(skb); 186 + } 187 + 188 + static void ipcomp_output_done(void *data, int err) 189 + { 190 + struct ipcomp_req_extra *extra; 191 + struct sk_buff *skb = data; 192 + struct acomp_req *req; 193 + 194 + req = ipcomp_cb(skb)->req; 195 + extra = acomp_request_extra(req); 196 + 197 + xfrm_output_resume(skb_to_full_sk(skb), skb, 198 + ipcomp_output_done2(extra->x, skb, err)); 199 + } 200 + 201 + static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) 202 + { 203 + struct ip_comp_hdr *ipch __maybe_unused; 204 + struct acomp_req *req; 205 + int err; 206 + 207 + req = ipcomp_setup_req(x, skb, sizeof(*ipch), 208 + skb->len - sizeof(*ipch)); 209 + err = PTR_ERR(req); 210 + if (IS_ERR(req)) 211 + goto out; 212 + 213 + acomp_request_set_callback(req, 0, ipcomp_output_done, skb); 214 + err = crypto_acomp_compress(req); 215 + if (err == -EINPROGRESS) 216 + return err; 217 + 218 + out: 219 + return ipcomp_output_done2(x, skb, err); 220 + } 221 + 222 + int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) 223 + { 224 + struct ipcomp_data *ipcd = x->data; 225 + 226 + if (skb->len < ipcd->threshold) { 227 + /* Don't bother compressing */ 228 + return ipcomp_output_push(skb); 229 + } 230 + 231 + return ipcomp_compress(x, skb); 259 232 } 260 233 EXPORT_SYMBOL_GPL(ipcomp_output); 261 234 262 - static void ipcomp_free_scratches(void) 263 - { 264 - int i; 265 - void * __percpu *scratches; 266 - 267 - if (--ipcomp_scratch_users) 268 - return; 269 - 270 - scratches = ipcomp_scratches; 271 - if (!scratches) 272 - return; 273 - 274 - for_each_possible_cpu(i) 275 - vfree(*per_cpu_ptr(scratches, i)); 276 - 277 - free_percpu(scratches); 278 - ipcomp_scratches = NULL; 279 - } 280 - 281 - static void * __percpu *ipcomp_alloc_scratches(void) 282 - { 283 - void * __percpu *scratches; 284 - int i; 285 - 286 - if (ipcomp_scratch_users++) 287 - return ipcomp_scratches; 288 - 289 - scratches = alloc_percpu(void *); 290 - if (!scratches) 291 - return NULL; 292 - 293 - ipcomp_scratches = scratches; 294 - 295 - for_each_possible_cpu(i) { 296 - void *scratch; 297 - 298 - scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i)); 299 - if (!scratch) 300 - return NULL; 301 - *per_cpu_ptr(scratches, i) = scratch; 302 - } 303 - 304 - return scratches; 305 - } 306 - 307 - static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms) 308 - { 309 - struct ipcomp_tfms *pos; 310 - int cpu; 311 - 312 - list_for_each_entry(pos, &ipcomp_tfms_list, list) { 313 - if (pos->tfms == tfms) 314 - break; 315 - } 316 - 317 - WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list)); 318 - 319 - if (--pos->users) 320 - return; 321 - 322 - list_del(&pos->list); 323 - kfree(pos); 324 - 325 - if (!tfms) 326 - return; 327 - 328 - for_each_possible_cpu(cpu) { 329 - struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 330 - crypto_free_comp(tfm); 331 - } 332 - free_percpu(tfms); 333 - } 334 - 335 - static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name) 336 - { 337 - struct ipcomp_tfms *pos; 338 - struct crypto_comp * __percpu *tfms; 339 - int cpu; 340 - 341 - 342 - list_for_each_entry(pos, &ipcomp_tfms_list, list) { 343 - struct crypto_comp *tfm; 344 - 345 - /* This can be any valid CPU ID so we don't need locking. */ 346 - tfm = this_cpu_read(*pos->tfms); 347 - 348 - if (!strcmp(crypto_comp_name(tfm), alg_name)) { 349 - pos->users++; 350 - return pos->tfms; 351 - } 352 - } 353 - 354 - pos = kmalloc(sizeof(*pos), GFP_KERNEL); 355 - if (!pos) 356 - return NULL; 357 - 358 - pos->users = 1; 359 - INIT_LIST_HEAD(&pos->list); 360 - list_add(&pos->list, &ipcomp_tfms_list); 361 - 362 - pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 363 - if (!tfms) 364 - goto error; 365 - 366 - for_each_possible_cpu(cpu) { 367 - struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 368 - CRYPTO_ALG_ASYNC); 369 - if (IS_ERR(tfm)) 370 - goto error; 371 - *per_cpu_ptr(tfms, cpu) = tfm; 372 - } 373 - 374 - return tfms; 375 - 376 - error: 377 - ipcomp_free_tfms(tfms); 378 - return NULL; 379 - } 380 - 381 235 static void ipcomp_free_data(struct ipcomp_data *ipcd) 382 236 { 383 - if (ipcd->tfms) 384 - ipcomp_free_tfms(ipcd->tfms); 385 - ipcomp_free_scratches(); 237 + crypto_free_acomp(ipcd->tfm); 386 238 } 387 239 388 240 void ipcomp_destroy(struct xfrm_state *x) ··· 315 319 if (!ipcd) 316 320 return; 317 321 xfrm_state_delete_tunnel(x); 318 - mutex_lock(&ipcomp_resource_mutex); 319 322 ipcomp_free_data(ipcd); 320 - mutex_unlock(&ipcomp_resource_mutex); 321 323 kfree(ipcd); 322 324 } 323 325 EXPORT_SYMBOL_GPL(ipcomp_destroy); ··· 342 348 if (!ipcd) 343 349 goto out; 344 350 345 - mutex_lock(&ipcomp_resource_mutex); 346 - if (!ipcomp_alloc_scratches()) 351 + ipcd->tfm = crypto_alloc_acomp(x->calg->alg_name, 0, 0); 352 + if (IS_ERR(ipcd->tfm)) 347 353 goto error; 348 - 349 - ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); 350 - if (!ipcd->tfms) 351 - goto error; 352 - mutex_unlock(&ipcomp_resource_mutex); 353 354 354 355 calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); 355 356 BUG_ON(!calg_desc); ··· 356 367 357 368 error: 358 369 ipcomp_free_data(ipcd); 359 - mutex_unlock(&ipcomp_resource_mutex); 360 370 kfree(ipcd); 361 371 goto out; 362 372 }