ipsec: ipcomp - Merge IPComp implementations

This patch merges the IPv4/IPv6 IPComp implementations since most
of the code is identical. As a result future enhancements will no
longer need to be duplicated.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Herbert Xu and committed by
David S. Miller
6fccab67 cffe1c5d

+377 -606
+6
include/net/ipcomp.h
··· 14 14 15 15 struct ip_comp_hdr; 16 16 struct sk_buff; 17 + struct xfrm_state; 18 + 19 + int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb); 20 + int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb); 21 + void ipcomp_destroy(struct xfrm_state *x); 22 + int ipcomp_init_state(struct xfrm_state *x); 17 23 18 24 static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb) 19 25 {
+1 -3
net/ipv4/Kconfig
··· 356 356 357 357 config INET_IPCOMP 358 358 tristate "IP: IPComp transformation" 359 - select XFRM 360 359 select INET_XFRM_TUNNEL 361 - select CRYPTO 362 - select CRYPTO_DEFLATE 360 + select XFRM_IPCOMP 363 361 ---help--- 364 362 Support for IP Payload Compression Protocol (IPComp) (RFC3173), 365 363 typically needed for IPsec.
+7 -308
net/ipv4/ipcomp.c
··· 14 14 * - Adaptive compression. 15 15 */ 16 16 #include <linux/module.h> 17 - #include <linux/crypto.h> 18 17 #include <linux/err.h> 19 - #include <linux/pfkeyv2.h> 20 - #include <linux/percpu.h> 21 - #include <linux/smp.h> 22 - #include <linux/list.h> 23 - #include <linux/vmalloc.h> 24 18 #include <linux/rtnetlink.h> 25 - #include <linux/mutex.h> 26 19 #include <net/ip.h> 27 20 #include <net/xfrm.h> 28 21 #include <net/icmp.h> 29 22 #include <net/ipcomp.h> 30 23 #include <net/protocol.h> 31 - 32 - struct ipcomp_tfms { 33 - struct list_head list; 34 - struct crypto_comp **tfms; 35 - int users; 36 - }; 37 - 38 - static DEFINE_MUTEX(ipcomp_resource_mutex); 39 - static void **ipcomp_scratches; 40 - static int ipcomp_scratch_users; 41 - static LIST_HEAD(ipcomp_tfms_list); 42 - 43 - static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) 44 - { 45 - struct ipcomp_data *ipcd = x->data; 46 - const int plen = skb->len; 47 - int dlen = IPCOMP_SCRATCH_SIZE; 48 - const u8 *start = skb->data; 49 - const int cpu = get_cpu(); 50 - u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 51 - struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); 52 - int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); 53 - 54 - if (err) 55 - goto out; 56 - 57 - if (dlen < (plen + sizeof(struct ip_comp_hdr))) { 58 - err = -EINVAL; 59 - goto out; 60 - } 61 - 62 - err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); 63 - if (err) 64 - goto out; 65 - 66 - skb->truesize += dlen - plen; 67 - __skb_put(skb, dlen - plen); 68 - skb_copy_to_linear_data(skb, scratch, dlen); 69 - out: 70 - put_cpu(); 71 - return err; 72 - } 73 - 74 - static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) 75 - { 76 - int nexthdr; 77 - int err = -ENOMEM; 78 - struct ip_comp_hdr *ipch; 79 - 80 - if (skb_linearize_cow(skb)) 81 - goto out; 82 - 83 - skb->ip_summed = CHECKSUM_NONE; 84 - 85 - /* Remove ipcomp header and decompress original payload */ 86 - ipch = (void *)skb->data; 87 - nexthdr = ipch->nexthdr; 88 - 89 - skb->transport_header = skb->network_header + sizeof(*ipch); 90 - __skb_pull(skb, sizeof(*ipch)); 91 - err = ipcomp_decompress(x, skb); 92 - if (err) 93 - goto out; 94 - 95 - err = nexthdr; 96 - 97 - out: 98 - return err; 99 - } 100 - 101 - static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) 102 - { 103 - struct ipcomp_data *ipcd = x->data; 104 - const int plen = skb->len; 105 - int dlen = IPCOMP_SCRATCH_SIZE; 106 - u8 *start = skb->data; 107 - const int cpu = get_cpu(); 108 - u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 109 - struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); 110 - int err; 111 - 112 - local_bh_disable(); 113 - err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); 114 - local_bh_enable(); 115 - if (err) 116 - goto out; 117 - 118 - if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) { 119 - err = -EMSGSIZE; 120 - goto out; 121 - } 122 - 123 - memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 124 - put_cpu(); 125 - 126 - pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); 127 - return 0; 128 - 129 - out: 130 - put_cpu(); 131 - return err; 132 - } 133 - 134 - static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) 135 - { 136 - int err; 137 - struct ip_comp_hdr *ipch; 138 - struct ipcomp_data *ipcd = x->data; 139 - 140 - if (skb->len < ipcd->threshold) { 141 - /* Don't bother compressing */ 142 - goto out_ok; 143 - } 144 - 145 - if (skb_linearize_cow(skb)) 146 - goto out_ok; 147 - 148 - err = ipcomp_compress(x, skb); 149 - 150 - if (err) { 151 - goto out_ok; 152 - } 153 - 154 - /* Install ipcomp header, convert into ipcomp datagram. */ 155 - ipch = ip_comp_hdr(skb); 156 - ipch->nexthdr = *skb_mac_header(skb); 157 - ipch->flags = 0; 158 - ipch->cpi = htons((u16 )ntohl(x->id.spi)); 159 - *skb_mac_header(skb) = IPPROTO_COMP; 160 - out_ok: 161 - skb_push(skb, -skb_network_offset(skb)); 162 - return 0; 163 - } 24 + #include <net/sock.h> 164 25 165 26 static void ipcomp4_err(struct sk_buff *skb, u32 info) 166 27 { ··· 102 241 return err; 103 242 } 104 243 105 - static void ipcomp_free_scratches(void) 106 - { 107 - int i; 108 - void **scratches; 109 - 110 - if (--ipcomp_scratch_users) 111 - return; 112 - 113 - scratches = ipcomp_scratches; 114 - if (!scratches) 115 - return; 116 - 117 - for_each_possible_cpu(i) 118 - vfree(*per_cpu_ptr(scratches, i)); 119 - 120 - free_percpu(scratches); 121 - } 122 - 123 - static void **ipcomp_alloc_scratches(void) 124 - { 125 - int i; 126 - void **scratches; 127 - 128 - if (ipcomp_scratch_users++) 129 - return ipcomp_scratches; 130 - 131 - scratches = alloc_percpu(void *); 132 - if (!scratches) 133 - return NULL; 134 - 135 - ipcomp_scratches = scratches; 136 - 137 - for_each_possible_cpu(i) { 138 - void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); 139 - if (!scratch) 140 - return NULL; 141 - *per_cpu_ptr(scratches, i) = scratch; 142 - } 143 - 144 - return scratches; 145 - } 146 - 147 - static void ipcomp_free_tfms(struct crypto_comp **tfms) 148 - { 149 - struct ipcomp_tfms *pos; 150 - int cpu; 151 - 152 - list_for_each_entry(pos, &ipcomp_tfms_list, list) { 153 - if (pos->tfms == tfms) 154 - break; 155 - } 156 - 157 - BUG_TRAP(pos); 158 - 159 - if (--pos->users) 160 - return; 161 - 162 - list_del(&pos->list); 163 - kfree(pos); 164 - 165 - if (!tfms) 166 - return; 167 - 168 - for_each_possible_cpu(cpu) { 169 - struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 170 - crypto_free_comp(tfm); 171 - } 172 - free_percpu(tfms); 173 - } 174 - 175 - static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 176 - { 177 - struct ipcomp_tfms *pos; 178 - struct crypto_comp **tfms; 179 - int cpu; 180 - 181 - /* This can be any valid CPU ID so we don't need locking. */ 182 - cpu = raw_smp_processor_id(); 183 - 184 - list_for_each_entry(pos, &ipcomp_tfms_list, list) { 185 - struct crypto_comp *tfm; 186 - 187 - tfms = pos->tfms; 188 - tfm = *per_cpu_ptr(tfms, cpu); 189 - 190 - if (!strcmp(crypto_comp_name(tfm), alg_name)) { 191 - pos->users++; 192 - return tfms; 193 - } 194 - } 195 - 196 - pos = kmalloc(sizeof(*pos), GFP_KERNEL); 197 - if (!pos) 198 - return NULL; 199 - 200 - pos->users = 1; 201 - INIT_LIST_HEAD(&pos->list); 202 - list_add(&pos->list, &ipcomp_tfms_list); 203 - 204 - pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 205 - if (!tfms) 206 - goto error; 207 - 208 - for_each_possible_cpu(cpu) { 209 - struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 210 - CRYPTO_ALG_ASYNC); 211 - if (IS_ERR(tfm)) 212 - goto error; 213 - *per_cpu_ptr(tfms, cpu) = tfm; 214 - } 215 - 216 - return tfms; 217 - 218 - error: 219 - ipcomp_free_tfms(tfms); 220 - return NULL; 221 - } 222 - 223 - static void ipcomp_free_data(struct ipcomp_data *ipcd) 224 - { 225 - if (ipcd->tfms) 226 - ipcomp_free_tfms(ipcd->tfms); 227 - ipcomp_free_scratches(); 228 - } 229 - 230 - static void ipcomp_destroy(struct xfrm_state *x) 231 - { 232 - struct ipcomp_data *ipcd = x->data; 233 - if (!ipcd) 234 - return; 235 - xfrm_state_delete_tunnel(x); 236 - mutex_lock(&ipcomp_resource_mutex); 237 - ipcomp_free_data(ipcd); 238 - mutex_unlock(&ipcomp_resource_mutex); 239 - kfree(ipcd); 240 - } 241 - 242 - static int ipcomp_init_state(struct xfrm_state *x) 244 + static int ipcomp4_init_state(struct xfrm_state *x) 243 245 { 244 246 int err; 245 247 struct ipcomp_data *ipcd; 246 248 struct xfrm_algo_desc *calg_desc; 247 - 248 - err = -EINVAL; 249 - if (!x->calg) 250 - goto out; 251 - 252 - if (x->encap) 253 - goto out; 254 249 255 250 x->props.header_len = 0; 256 251 switch (x->props.mode) { ··· 119 402 goto out; 120 403 } 121 404 122 - err = -ENOMEM; 123 - ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 124 - if (!ipcd) 405 + err = ipcomp_init_state(x); 406 + if (err) 125 407 goto out; 126 - 127 - mutex_lock(&ipcomp_resource_mutex); 128 - if (!ipcomp_alloc_scratches()) 129 - goto error; 130 - 131 - ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); 132 - if (!ipcd->tfms) 133 - goto error; 134 - mutex_unlock(&ipcomp_resource_mutex); 135 408 136 409 if (x->props.mode == XFRM_MODE_TUNNEL) { 137 410 err = ipcomp_tunnel_attach(x); ··· 129 422 goto error_tunnel; 130 423 } 131 424 132 - calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); 133 - BUG_ON(!calg_desc); 134 - ipcd->threshold = calg_desc->uinfo.comp.threshold; 135 - x->data = ipcd; 136 425 err = 0; 137 426 out: 138 427 return err; 139 428 140 429 error_tunnel: 141 - mutex_lock(&ipcomp_resource_mutex); 142 - error: 143 - ipcomp_free_data(ipcd); 144 - mutex_unlock(&ipcomp_resource_mutex); 145 - kfree(ipcd); 430 + ipcomp_destroy(x); 146 431 goto out; 147 432 } 148 433 ··· 142 443 .description = "IPCOMP4", 143 444 .owner = THIS_MODULE, 144 445 .proto = IPPROTO_COMP, 145 - .init_state = ipcomp_init_state, 446 + .init_state = ipcomp4_init_state, 146 447 .destructor = ipcomp_destroy, 147 448 .input = ipcomp_input, 148 449 .output = ipcomp_output ··· 180 481 module_exit(ipcomp4_fini); 181 482 182 483 MODULE_LICENSE("GPL"); 183 - MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173"); 484 + MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp/IPv4) - RFC3173"); 184 485 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); 185 486 186 487 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_COMP);
+1 -3
net/ipv6/Kconfig
··· 96 96 97 97 config INET6_IPCOMP 98 98 tristate "IPv6: IPComp transformation" 99 - select XFRM 100 99 select INET6_XFRM_TUNNEL 101 - select CRYPTO 102 - select CRYPTO_DEFLATE 100 + select XFRM_IPCOMP 103 101 ---help--- 104 102 Support for IP Payload Compression Protocol (IPComp) (RFC3173), 105 103 typically needed for IPsec.
+6 -292
net/ipv6/ipcomp6.c
··· 50 50 #include <linux/icmpv6.h> 51 51 #include <linux/mutex.h> 52 52 53 - struct ipcomp6_tfms { 54 - struct list_head list; 55 - struct crypto_comp **tfms; 56 - int users; 57 - }; 58 - 59 - static DEFINE_MUTEX(ipcomp6_resource_mutex); 60 - static void **ipcomp6_scratches; 61 - static int ipcomp6_scratch_users; 62 - static LIST_HEAD(ipcomp6_tfms_list); 63 - 64 - static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) 65 - { 66 - int nexthdr; 67 - int err = -ENOMEM; 68 - struct ip_comp_hdr *ipch; 69 - int plen, dlen; 70 - struct ipcomp_data *ipcd = x->data; 71 - u8 *start, *scratch; 72 - struct crypto_comp *tfm; 73 - int cpu; 74 - 75 - if (skb_linearize_cow(skb)) 76 - goto out; 77 - 78 - skb->ip_summed = CHECKSUM_NONE; 79 - 80 - /* Remove ipcomp header and decompress original payload */ 81 - ipch = (void *)skb->data; 82 - nexthdr = ipch->nexthdr; 83 - 84 - skb->transport_header = skb->network_header + sizeof(*ipch); 85 - __skb_pull(skb, sizeof(*ipch)); 86 - 87 - /* decompression */ 88 - plen = skb->len; 89 - dlen = IPCOMP_SCRATCH_SIZE; 90 - start = skb->data; 91 - 92 - cpu = get_cpu(); 93 - scratch = *per_cpu_ptr(ipcomp6_scratches, cpu); 94 - tfm = *per_cpu_ptr(ipcd->tfms, cpu); 95 - 96 - err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); 97 - if (err) 98 - goto out_put_cpu; 99 - 100 - if (dlen < (plen + sizeof(*ipch))) { 101 - err = -EINVAL; 102 - goto out_put_cpu; 103 - } 104 - 105 - err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); 106 - if (err) { 107 - goto out_put_cpu; 108 - } 109 - 110 - skb->truesize += dlen - plen; 111 - __skb_put(skb, dlen - plen); 112 - skb_copy_to_linear_data(skb, scratch, dlen); 113 - err = nexthdr; 114 - 115 - out_put_cpu: 116 - put_cpu(); 117 - out: 118 - return err; 119 - } 120 - 121 - static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb) 122 - { 123 - int err; 124 - struct ip_comp_hdr *ipch; 125 - struct ipcomp_data *ipcd = x->data; 126 - int plen, dlen; 127 - u8 *start, *scratch; 128 - struct crypto_comp *tfm; 129 - int cpu; 130 - 131 - /* check whether datagram len is larger than threshold */ 132 - if (skb->len < ipcd->threshold) { 133 - goto out_ok; 134 - } 135 - 136 - if (skb_linearize_cow(skb)) 137 - goto out_ok; 138 - 139 - /* compression */ 140 - plen = skb->len; 141 - dlen = IPCOMP_SCRATCH_SIZE; 142 - start = skb->data; 143 - 144 - cpu = get_cpu(); 145 - scratch = *per_cpu_ptr(ipcomp6_scratches, cpu); 146 - tfm = *per_cpu_ptr(ipcd->tfms, cpu); 147 - 148 - local_bh_disable(); 149 - err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); 150 - local_bh_enable(); 151 - if (err || (dlen + sizeof(*ipch)) >= plen) { 152 - put_cpu(); 153 - goto out_ok; 154 - } 155 - memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 156 - put_cpu(); 157 - pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); 158 - 159 - /* insert ipcomp header and replace datagram */ 160 - ipch = ip_comp_hdr(skb); 161 - ipch->nexthdr = *skb_mac_header(skb); 162 - ipch->flags = 0; 163 - ipch->cpi = htons((u16 )ntohl(x->id.spi)); 164 - *skb_mac_header(skb) = IPPROTO_COMP; 165 - 166 - out_ok: 167 - skb_push(skb, -skb_network_offset(skb)); 168 - 169 - return 0; 170 - } 171 - 172 53 static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 173 54 int type, int code, int offset, __be32 info) 174 55 { ··· 132 251 return err; 133 252 } 134 253 135 - static void ipcomp6_free_scratches(void) 136 - { 137 - int i; 138 - void **scratches; 139 - 140 - if (--ipcomp6_scratch_users) 141 - return; 142 - 143 - scratches = ipcomp6_scratches; 144 - if (!scratches) 145 - return; 146 - 147 - for_each_possible_cpu(i) { 148 - void *scratch = *per_cpu_ptr(scratches, i); 149 - 150 - vfree(scratch); 151 - } 152 - 153 - free_percpu(scratches); 154 - } 155 - 156 - static void **ipcomp6_alloc_scratches(void) 157 - { 158 - int i; 159 - void **scratches; 160 - 161 - if (ipcomp6_scratch_users++) 162 - return ipcomp6_scratches; 163 - 164 - scratches = alloc_percpu(void *); 165 - if (!scratches) 166 - return NULL; 167 - 168 - ipcomp6_scratches = scratches; 169 - 170 - for_each_possible_cpu(i) { 171 - void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); 172 - if (!scratch) 173 - return NULL; 174 - *per_cpu_ptr(scratches, i) = scratch; 175 - } 176 - 177 - return scratches; 178 - } 179 - 180 - static void ipcomp6_free_tfms(struct crypto_comp **tfms) 181 - { 182 - struct ipcomp6_tfms *pos; 183 - int cpu; 184 - 185 - list_for_each_entry(pos, &ipcomp6_tfms_list, list) { 186 - if (pos->tfms == tfms) 187 - break; 188 - } 189 - 190 - BUG_TRAP(pos); 191 - 192 - if (--pos->users) 193 - return; 194 - 195 - list_del(&pos->list); 196 - kfree(pos); 197 - 198 - if (!tfms) 199 - return; 200 - 201 - for_each_possible_cpu(cpu) { 202 - struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 203 - crypto_free_comp(tfm); 204 - } 205 - free_percpu(tfms); 206 - } 207 - 208 - static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name) 209 - { 210 - struct ipcomp6_tfms *pos; 211 - struct crypto_comp **tfms; 212 - int cpu; 213 - 214 - /* This can be any valid CPU ID so we don't need locking. */ 215 - cpu = raw_smp_processor_id(); 216 - 217 - list_for_each_entry(pos, &ipcomp6_tfms_list, list) { 218 - struct crypto_comp *tfm; 219 - 220 - tfms = pos->tfms; 221 - tfm = *per_cpu_ptr(tfms, cpu); 222 - 223 - if (!strcmp(crypto_comp_name(tfm), alg_name)) { 224 - pos->users++; 225 - return tfms; 226 - } 227 - } 228 - 229 - pos = kmalloc(sizeof(*pos), GFP_KERNEL); 230 - if (!pos) 231 - return NULL; 232 - 233 - pos->users = 1; 234 - INIT_LIST_HEAD(&pos->list); 235 - list_add(&pos->list, &ipcomp6_tfms_list); 236 - 237 - pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 238 - if (!tfms) 239 - goto error; 240 - 241 - for_each_possible_cpu(cpu) { 242 - struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 243 - CRYPTO_ALG_ASYNC); 244 - if (IS_ERR(tfm)) 245 - goto error; 246 - *per_cpu_ptr(tfms, cpu) = tfm; 247 - } 248 - 249 - return tfms; 250 - 251 - error: 252 - ipcomp6_free_tfms(tfms); 253 - return NULL; 254 - } 255 - 256 - static void ipcomp6_free_data(struct ipcomp_data *ipcd) 257 - { 258 - if (ipcd->tfms) 259 - ipcomp6_free_tfms(ipcd->tfms); 260 - ipcomp6_free_scratches(); 261 - } 262 - 263 - static void ipcomp6_destroy(struct xfrm_state *x) 264 - { 265 - struct ipcomp_data *ipcd = x->data; 266 - if (!ipcd) 267 - return; 268 - xfrm_state_delete_tunnel(x); 269 - mutex_lock(&ipcomp6_resource_mutex); 270 - ipcomp6_free_data(ipcd); 271 - mutex_unlock(&ipcomp6_resource_mutex); 272 - kfree(ipcd); 273 - 274 - xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); 275 - } 276 - 277 254 static int ipcomp6_init_state(struct xfrm_state *x) 278 255 { 279 256 int err; 280 257 struct ipcomp_data *ipcd; 281 258 struct xfrm_algo_desc *calg_desc; 282 - 283 - err = -EINVAL; 284 - if (!x->calg) 285 - goto out; 286 - 287 - if (x->encap) 288 - goto out; 289 259 290 260 x->props.header_len = 0; 291 261 switch (x->props.mode) { ··· 149 417 goto out; 150 418 } 151 419 152 - err = -ENOMEM; 153 - ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 154 - if (!ipcd) 420 + err = ipcomp_init_state(x); 421 + if (err) 155 422 goto out; 156 - 157 - mutex_lock(&ipcomp6_resource_mutex); 158 - if (!ipcomp6_alloc_scratches()) 159 - goto error; 160 - 161 - ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name); 162 - if (!ipcd->tfms) 163 - goto error; 164 - mutex_unlock(&ipcomp6_resource_mutex); 165 423 166 424 if (x->props.mode == XFRM_MODE_TUNNEL) { 167 425 err = ipcomp6_tunnel_attach(x); ··· 159 437 goto error_tunnel; 160 438 } 161 439 162 - calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); 163 - BUG_ON(!calg_desc); 164 - ipcd->threshold = calg_desc->uinfo.comp.threshold; 165 - x->data = ipcd; 166 440 err = 0; 167 441 out: 168 442 return err; 169 443 error_tunnel: 170 - mutex_lock(&ipcomp6_resource_mutex); 171 - error: 172 - ipcomp6_free_data(ipcd); 173 - mutex_unlock(&ipcomp6_resource_mutex); 174 - kfree(ipcd); 444 + ipcomp_destroy(x); 175 445 176 446 goto out; 177 447 } ··· 174 460 .owner = THIS_MODULE, 175 461 .proto = IPPROTO_COMP, 176 462 .init_state = ipcomp6_init_state, 177 - .destructor = ipcomp6_destroy, 178 - .input = ipcomp6_input, 179 - .output = ipcomp6_output, 463 + .destructor = ipcomp_destroy, 464 + .input = ipcomp_input, 465 + .output = ipcomp_output, 180 466 .hdr_offset = xfrm6_find_1stfragopt, 181 467 }; 182 468
+6
net/xfrm/Kconfig
··· 46 46 47 47 If unsure, say N. 48 48 49 + config XFRM_IPCOMP 50 + tristate 51 + select XFRM 52 + select CRYPTO 53 + select CRYPTO_DEFLATE 54 + 49 55 config NET_KEY 50 56 tristate "PF_KEY sockets" 51 57 select XFRM
+1
net/xfrm/Makefile
··· 6 6 xfrm_input.o xfrm_output.o xfrm_algo.o 7 7 obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o 8 8 obj-$(CONFIG_XFRM_USER) += xfrm_user.o 9 + obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o 9 10
+349
net/xfrm/xfrm_ipcomp.c
··· 1 + /* 2 + * IP Payload Compression Protocol (IPComp) - RFC3173. 3 + * 4 + * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> 5 + * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License as published by the Free 9 + * Software Foundation; either version 2 of the License, or (at your option) 10 + * any later version. 11 + * 12 + * Todo: 13 + * - Tunable compression parameters. 14 + * - Compression stats. 15 + * - Adaptive compression. 16 + */ 17 + 18 + #include <linux/crypto.h> 19 + #include <linux/err.h> 20 + #include <linux/list.h> 21 + #include <linux/module.h> 22 + #include <linux/mutex.h> 23 + #include <linux/percpu.h> 24 + #include <linux/rtnetlink.h> 25 + #include <linux/smp.h> 26 + #include <linux/vmalloc.h> 27 + #include <net/ip.h> 28 + #include <net/ipcomp.h> 29 + #include <net/xfrm.h> 30 + 31 + struct ipcomp_tfms { 32 + struct list_head list; 33 + struct crypto_comp **tfms; 34 + int users; 35 + }; 36 + 37 + static DEFINE_MUTEX(ipcomp_resource_mutex); 38 + static void **ipcomp_scratches; 39 + static int ipcomp_scratch_users; 40 + static LIST_HEAD(ipcomp_tfms_list); 41 + 42 + static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) 43 + { 44 + struct ipcomp_data *ipcd = x->data; 45 + const int plen = skb->len; 46 + int dlen = IPCOMP_SCRATCH_SIZE; 47 + const u8 *start = skb->data; 48 + const int cpu = get_cpu(); 49 + u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 50 + struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); 51 + int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); 52 + 53 + if (err) 54 + goto out; 55 + 56 + if (dlen < (plen + sizeof(struct ip_comp_hdr))) { 57 + err = -EINVAL; 58 + goto out; 59 + } 60 + 61 + err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); 62 + if (err) 63 + goto out; 64 + 65 + skb->truesize += dlen - plen; 66 + __skb_put(skb, dlen - plen); 67 + skb_copy_to_linear_data(skb, scratch, dlen); 68 + out: 69 + put_cpu(); 70 + return err; 71 + } 72 + 73 + int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) 74 + { 75 + int nexthdr; 76 + int err = -ENOMEM; 77 + struct ip_comp_hdr *ipch; 78 + 79 + if (skb_linearize_cow(skb)) 80 + goto out; 81 + 82 + skb->ip_summed = CHECKSUM_NONE; 83 + 84 + /* Remove ipcomp header and decompress original payload */ 85 + ipch = (void *)skb->data; 86 + nexthdr = ipch->nexthdr; 87 + 88 + skb->transport_header = skb->network_header + sizeof(*ipch); 89 + __skb_pull(skb, sizeof(*ipch)); 90 + err = ipcomp_decompress(x, skb); 91 + if (err) 92 + goto out; 93 + 94 + err = nexthdr; 95 + 96 + out: 97 + return err; 98 + } 99 + EXPORT_SYMBOL_GPL(ipcomp_input); 100 + 101 + static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) 102 + { 103 + struct ipcomp_data *ipcd = x->data; 104 + const int plen = skb->len; 105 + int dlen = IPCOMP_SCRATCH_SIZE; 106 + u8 *start = skb->data; 107 + const int cpu = get_cpu(); 108 + u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 109 + struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); 110 + int err; 111 + 112 + local_bh_disable(); 113 + err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); 114 + local_bh_enable(); 115 + if (err) 116 + goto out; 117 + 118 + if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) { 119 + err = -EMSGSIZE; 120 + goto out; 121 + } 122 + 123 + memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 124 + put_cpu(); 125 + 126 + pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); 127 + return 0; 128 + 129 + out: 130 + put_cpu(); 131 + return err; 132 + } 133 + 134 + int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) 135 + { 136 + int err; 137 + struct ip_comp_hdr *ipch; 138 + struct ipcomp_data *ipcd = x->data; 139 + 140 + if (skb->len < ipcd->threshold) { 141 + /* Don't bother compressing */ 142 + goto out_ok; 143 + } 144 + 145 + if (skb_linearize_cow(skb)) 146 + goto out_ok; 147 + 148 + err = ipcomp_compress(x, skb); 149 + 150 + if (err) { 151 + goto out_ok; 152 + } 153 + 154 + /* Install ipcomp header, convert into ipcomp datagram. */ 155 + ipch = ip_comp_hdr(skb); 156 + ipch->nexthdr = *skb_mac_header(skb); 157 + ipch->flags = 0; 158 + ipch->cpi = htons((u16 )ntohl(x->id.spi)); 159 + *skb_mac_header(skb) = IPPROTO_COMP; 160 + out_ok: 161 + skb_push(skb, -skb_network_offset(skb)); 162 + return 0; 163 + } 164 + EXPORT_SYMBOL_GPL(ipcomp_output); 165 + 166 + static void ipcomp_free_scratches(void) 167 + { 168 + int i; 169 + void **scratches; 170 + 171 + if (--ipcomp_scratch_users) 172 + return; 173 + 174 + scratches = ipcomp_scratches; 175 + if (!scratches) 176 + return; 177 + 178 + for_each_possible_cpu(i) 179 + vfree(*per_cpu_ptr(scratches, i)); 180 + 181 + free_percpu(scratches); 182 + } 183 + 184 + static void **ipcomp_alloc_scratches(void) 185 + { 186 + int i; 187 + void **scratches; 188 + 189 + if (ipcomp_scratch_users++) 190 + return ipcomp_scratches; 191 + 192 + scratches = alloc_percpu(void *); 193 + if (!scratches) 194 + return NULL; 195 + 196 + ipcomp_scratches = scratches; 197 + 198 + for_each_possible_cpu(i) { 199 + void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); 200 + if (!scratch) 201 + return NULL; 202 + *per_cpu_ptr(scratches, i) = scratch; 203 + } 204 + 205 + return scratches; 206 + } 207 + 208 + static void ipcomp_free_tfms(struct crypto_comp **tfms) 209 + { 210 + struct ipcomp_tfms *pos; 211 + int cpu; 212 + 213 + list_for_each_entry(pos, &ipcomp_tfms_list, list) { 214 + if (pos->tfms == tfms) 215 + break; 216 + } 217 + 218 + BUG_TRAP(pos); 219 + 220 + if (--pos->users) 221 + return; 222 + 223 + list_del(&pos->list); 224 + kfree(pos); 225 + 226 + if (!tfms) 227 + return; 228 + 229 + for_each_possible_cpu(cpu) { 230 + struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 231 + crypto_free_comp(tfm); 232 + } 233 + free_percpu(tfms); 234 + } 235 + 236 + static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 237 + { 238 + struct ipcomp_tfms *pos; 239 + struct crypto_comp **tfms; 240 + int cpu; 241 + 242 + /* This can be any valid CPU ID so we don't need locking. */ 243 + cpu = raw_smp_processor_id(); 244 + 245 + list_for_each_entry(pos, &ipcomp_tfms_list, list) { 246 + struct crypto_comp *tfm; 247 + 248 + tfms = pos->tfms; 249 + tfm = *per_cpu_ptr(tfms, cpu); 250 + 251 + if (!strcmp(crypto_comp_name(tfm), alg_name)) { 252 + pos->users++; 253 + return tfms; 254 + } 255 + } 256 + 257 + pos = kmalloc(sizeof(*pos), GFP_KERNEL); 258 + if (!pos) 259 + return NULL; 260 + 261 + pos->users = 1; 262 + INIT_LIST_HEAD(&pos->list); 263 + list_add(&pos->list, &ipcomp_tfms_list); 264 + 265 + pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 266 + if (!tfms) 267 + goto error; 268 + 269 + for_each_possible_cpu(cpu) { 270 + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 271 + CRYPTO_ALG_ASYNC); 272 + if (IS_ERR(tfm)) 273 + goto error; 274 + *per_cpu_ptr(tfms, cpu) = tfm; 275 + } 276 + 277 + return tfms; 278 + 279 + error: 280 + ipcomp_free_tfms(tfms); 281 + return NULL; 282 + } 283 + 284 + static void ipcomp_free_data(struct ipcomp_data *ipcd) 285 + { 286 + if (ipcd->tfms) 287 + ipcomp_free_tfms(ipcd->tfms); 288 + ipcomp_free_scratches(); 289 + } 290 + 291 + void ipcomp_destroy(struct xfrm_state *x) 292 + { 293 + struct ipcomp_data *ipcd = x->data; 294 + if (!ipcd) 295 + return; 296 + xfrm_state_delete_tunnel(x); 297 + mutex_lock(&ipcomp_resource_mutex); 298 + ipcomp_free_data(ipcd); 299 + mutex_unlock(&ipcomp_resource_mutex); 300 + kfree(ipcd); 301 + } 302 + EXPORT_SYMBOL_GPL(ipcomp_destroy); 303 + 304 + int ipcomp_init_state(struct xfrm_state *x) 305 + { 306 + int err; 307 + struct ipcomp_data *ipcd; 308 + struct xfrm_algo_desc *calg_desc; 309 + 310 + err = -EINVAL; 311 + if (!x->calg) 312 + goto out; 313 + 314 + if (x->encap) 315 + goto out; 316 + 317 + err = -ENOMEM; 318 + ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 319 + if (!ipcd) 320 + goto out; 321 + 322 + mutex_lock(&ipcomp_resource_mutex); 323 + if (!ipcomp_alloc_scratches()) 324 + goto error; 325 + 326 + ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); 327 + if (!ipcd->tfms) 328 + goto error; 329 + mutex_unlock(&ipcomp_resource_mutex); 330 + 331 + calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); 332 + BUG_ON(!calg_desc); 333 + ipcd->threshold = calg_desc->uinfo.comp.threshold; 334 + x->data = ipcd; 335 + err = 0; 336 + out: 337 + return err; 338 + 339 + error: 340 + ipcomp_free_data(ipcd); 341 + mutex_unlock(&ipcomp_resource_mutex); 342 + kfree(ipcd); 343 + goto out; 344 + } 345 + EXPORT_SYMBOL_GPL(ipcomp_init_state); 346 + 347 + MODULE_LICENSE("GPL"); 348 + MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173"); 349 + MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");