Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
ipsec: ipcomp - Decompress into frags if necessary
ipsec: ipcomp - Merge IPComp implementations
pkt_sched: Fix locking in shutdown_scheduler_queue()

+415 -608
+6
include/net/ipcomp.h
··· 14 14 15 15 struct ip_comp_hdr; 16 16 struct sk_buff; 17 + struct xfrm_state; 18 + 19 + int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb); 20 + int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb); 21 + void ipcomp_destroy(struct xfrm_state *x); 22 + int ipcomp_init_state(struct xfrm_state *x); 17 23 18 24 static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb) 19 25 {
+1 -3
net/ipv4/Kconfig
··· 356 356 357 357 config INET_IPCOMP 358 358 tristate "IP: IPComp transformation" 359 - select XFRM 360 359 select INET_XFRM_TUNNEL 361 - select CRYPTO 362 - select CRYPTO_DEFLATE 360 + select XFRM_IPCOMP 363 361 ---help--- 364 362 Support for IP Payload Compression Protocol (IPComp) (RFC3173), 365 363 typically needed for IPsec.
+7 -308
net/ipv4/ipcomp.c
··· 14 14 * - Adaptive compression. 15 15 */ 16 16 #include <linux/module.h> 17 - #include <linux/crypto.h> 18 17 #include <linux/err.h> 19 - #include <linux/pfkeyv2.h> 20 - #include <linux/percpu.h> 21 - #include <linux/smp.h> 22 - #include <linux/list.h> 23 - #include <linux/vmalloc.h> 24 18 #include <linux/rtnetlink.h> 25 - #include <linux/mutex.h> 26 19 #include <net/ip.h> 27 20 #include <net/xfrm.h> 28 21 #include <net/icmp.h> 29 22 #include <net/ipcomp.h> 30 23 #include <net/protocol.h> 31 - 32 - struct ipcomp_tfms { 33 - struct list_head list; 34 - struct crypto_comp **tfms; 35 - int users; 36 - }; 37 - 38 - static DEFINE_MUTEX(ipcomp_resource_mutex); 39 - static void **ipcomp_scratches; 40 - static int ipcomp_scratch_users; 41 - static LIST_HEAD(ipcomp_tfms_list); 42 - 43 - static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) 44 - { 45 - struct ipcomp_data *ipcd = x->data; 46 - const int plen = skb->len; 47 - int dlen = IPCOMP_SCRATCH_SIZE; 48 - const u8 *start = skb->data; 49 - const int cpu = get_cpu(); 50 - u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 51 - struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); 52 - int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); 53 - 54 - if (err) 55 - goto out; 56 - 57 - if (dlen < (plen + sizeof(struct ip_comp_hdr))) { 58 - err = -EINVAL; 59 - goto out; 60 - } 61 - 62 - err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); 63 - if (err) 64 - goto out; 65 - 66 - skb->truesize += dlen - plen; 67 - __skb_put(skb, dlen - plen); 68 - skb_copy_to_linear_data(skb, scratch, dlen); 69 - out: 70 - put_cpu(); 71 - return err; 72 - } 73 - 74 - static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) 75 - { 76 - int nexthdr; 77 - int err = -ENOMEM; 78 - struct ip_comp_hdr *ipch; 79 - 80 - if (skb_linearize_cow(skb)) 81 - goto out; 82 - 83 - skb->ip_summed = CHECKSUM_NONE; 84 - 85 - /* Remove ipcomp header and decompress original payload */ 86 - ipch = (void *)skb->data; 87 - nexthdr = ipch->nexthdr; 88 - 89 - skb->transport_header = skb->network_header + sizeof(*ipch); 90 - __skb_pull(skb, sizeof(*ipch)); 91 - err = ipcomp_decompress(x, skb); 92 - if (err) 93 - goto out; 94 - 95 - err = nexthdr; 96 - 97 - out: 98 - return err; 99 - } 100 - 101 - static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) 102 - { 103 - struct ipcomp_data *ipcd = x->data; 104 - const int plen = skb->len; 105 - int dlen = IPCOMP_SCRATCH_SIZE; 106 - u8 *start = skb->data; 107 - const int cpu = get_cpu(); 108 - u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 109 - struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); 110 - int err; 111 - 112 - local_bh_disable(); 113 - err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); 114 - local_bh_enable(); 115 - if (err) 116 - goto out; 117 - 118 - if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) { 119 - err = -EMSGSIZE; 120 - goto out; 121 - } 122 - 123 - memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 124 - put_cpu(); 125 - 126 - pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); 127 - return 0; 128 - 129 - out: 130 - put_cpu(); 131 - return err; 132 - } 133 - 134 - static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) 135 - { 136 - int err; 137 - struct ip_comp_hdr *ipch; 138 - struct ipcomp_data *ipcd = x->data; 139 - 140 - if (skb->len < ipcd->threshold) { 141 - /* Don't bother compressing */ 142 - goto out_ok; 143 - } 144 - 145 - if (skb_linearize_cow(skb)) 146 - goto out_ok; 147 - 148 - err = ipcomp_compress(x, skb); 149 - 150 - if (err) { 151 - goto out_ok; 152 - } 153 - 154 - /* Install ipcomp header, convert into ipcomp datagram. */ 155 - ipch = ip_comp_hdr(skb); 156 - ipch->nexthdr = *skb_mac_header(skb); 157 - ipch->flags = 0; 158 - ipch->cpi = htons((u16 )ntohl(x->id.spi)); 159 - *skb_mac_header(skb) = IPPROTO_COMP; 160 - out_ok: 161 - skb_push(skb, -skb_network_offset(skb)); 162 - return 0; 163 - } 24 + #include <net/sock.h> 164 25 165 26 static void ipcomp4_err(struct sk_buff *skb, u32 info) 166 27 { ··· 102 241 return err; 103 242 } 104 243 105 - static void ipcomp_free_scratches(void) 106 - { 107 - int i; 108 - void **scratches; 109 - 110 - if (--ipcomp_scratch_users) 111 - return; 112 - 113 - scratches = ipcomp_scratches; 114 - if (!scratches) 115 - return; 116 - 117 - for_each_possible_cpu(i) 118 - vfree(*per_cpu_ptr(scratches, i)); 119 - 120 - free_percpu(scratches); 121 - } 122 - 123 - static void **ipcomp_alloc_scratches(void) 124 - { 125 - int i; 126 - void **scratches; 127 - 128 - if (ipcomp_scratch_users++) 129 - return ipcomp_scratches; 130 - 131 - scratches = alloc_percpu(void *); 132 - if (!scratches) 133 - return NULL; 134 - 135 - ipcomp_scratches = scratches; 136 - 137 - for_each_possible_cpu(i) { 138 - void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); 139 - if (!scratch) 140 - return NULL; 141 - *per_cpu_ptr(scratches, i) = scratch; 142 - } 143 - 144 - return scratches; 145 - } 146 - 147 - static void ipcomp_free_tfms(struct crypto_comp **tfms) 148 - { 149 - struct ipcomp_tfms *pos; 150 - int cpu; 151 - 152 - list_for_each_entry(pos, &ipcomp_tfms_list, list) { 153 - if (pos->tfms == tfms) 154 - break; 155 - } 156 - 157 - BUG_TRAP(pos); 158 - 159 - if (--pos->users) 160 - return; 161 - 162 - list_del(&pos->list); 163 - kfree(pos); 164 - 165 - if (!tfms) 166 - return; 167 - 168 - for_each_possible_cpu(cpu) { 169 - struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 170 - crypto_free_comp(tfm); 171 - } 172 - free_percpu(tfms); 173 - } 174 - 175 - static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 176 - { 177 - struct ipcomp_tfms *pos; 178 - struct crypto_comp **tfms; 179 - int cpu; 180 - 181 - /* This can be any valid CPU ID so we don't need locking. */ 182 - cpu = raw_smp_processor_id(); 183 - 184 - list_for_each_entry(pos, &ipcomp_tfms_list, list) { 185 - struct crypto_comp *tfm; 186 - 187 - tfms = pos->tfms; 188 - tfm = *per_cpu_ptr(tfms, cpu); 189 - 190 - if (!strcmp(crypto_comp_name(tfm), alg_name)) { 191 - pos->users++; 192 - return tfms; 193 - } 194 - } 195 - 196 - pos = kmalloc(sizeof(*pos), GFP_KERNEL); 197 - if (!pos) 198 - return NULL; 199 - 200 - pos->users = 1; 201 - INIT_LIST_HEAD(&pos->list); 202 - list_add(&pos->list, &ipcomp_tfms_list); 203 - 204 - pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 205 - if (!tfms) 206 - goto error; 207 - 208 - for_each_possible_cpu(cpu) { 209 - struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 210 - CRYPTO_ALG_ASYNC); 211 - if (IS_ERR(tfm)) 212 - goto error; 213 - *per_cpu_ptr(tfms, cpu) = tfm; 214 - } 215 - 216 - return tfms; 217 - 218 - error: 219 - ipcomp_free_tfms(tfms); 220 - return NULL; 221 - } 222 - 223 - static void ipcomp_free_data(struct ipcomp_data *ipcd) 224 - { 225 - if (ipcd->tfms) 226 - ipcomp_free_tfms(ipcd->tfms); 227 - ipcomp_free_scratches(); 228 - } 229 - 230 - static void ipcomp_destroy(struct xfrm_state *x) 231 - { 232 - struct ipcomp_data *ipcd = x->data; 233 - if (!ipcd) 234 - return; 235 - xfrm_state_delete_tunnel(x); 236 - mutex_lock(&ipcomp_resource_mutex); 237 - ipcomp_free_data(ipcd); 238 - mutex_unlock(&ipcomp_resource_mutex); 239 - kfree(ipcd); 240 - } 241 - 242 - static int ipcomp_init_state(struct xfrm_state *x) 244 + static int ipcomp4_init_state(struct xfrm_state *x) 243 245 { 244 246 int err; 245 247 struct ipcomp_data *ipcd; 246 248 struct xfrm_algo_desc *calg_desc; 247 - 248 - err = -EINVAL; 249 - if (!x->calg) 250 - goto out; 251 - 252 - if (x->encap) 253 - goto out; 254 249 255 250 x->props.header_len = 0; 256 251 switch (x->props.mode) { ··· 119 402 goto out; 120 403 } 121 404 122 - err = -ENOMEM; 123 - ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 124 - if (!ipcd) 405 + err = ipcomp_init_state(x); 406 + if (err) 125 407 goto out; 126 - 127 - mutex_lock(&ipcomp_resource_mutex); 128 - if (!ipcomp_alloc_scratches()) 129 - goto error; 130 - 131 - ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); 132 - if (!ipcd->tfms) 133 - goto error; 134 - mutex_unlock(&ipcomp_resource_mutex); 135 408 136 409 if (x->props.mode == XFRM_MODE_TUNNEL) { 137 410 err = ipcomp_tunnel_attach(x); ··· 129 422 goto error_tunnel; 130 423 } 131 424 132 - calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); 133 - BUG_ON(!calg_desc); 134 - ipcd->threshold = calg_desc->uinfo.comp.threshold; 135 - x->data = ipcd; 136 425 err = 0; 137 426 out: 138 427 return err; 139 428 140 429 error_tunnel: 141 - mutex_lock(&ipcomp_resource_mutex); 142 - error: 143 - ipcomp_free_data(ipcd); 144 - mutex_unlock(&ipcomp_resource_mutex); 145 - kfree(ipcd); 430 + ipcomp_destroy(x); 146 431 goto out; 147 432 } 148 433 ··· 142 443 .description = "IPCOMP4", 143 444 .owner = THIS_MODULE, 144 445 .proto = IPPROTO_COMP, 145 - .init_state = ipcomp_init_state, 446 + .init_state = ipcomp4_init_state, 146 447 .destructor = ipcomp_destroy, 147 448 .input = ipcomp_input, 148 449 .output = ipcomp_output ··· 180 481 module_exit(ipcomp4_fini); 181 482 182 483 MODULE_LICENSE("GPL"); 183 - MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173"); 484 + MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp/IPv4) - RFC3173"); 184 485 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>"); 185 486 186 487 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_COMP);
+1 -3
net/ipv6/Kconfig
··· 96 96 97 97 config INET6_IPCOMP 98 98 tristate "IPv6: IPComp transformation" 99 - select XFRM 100 99 select INET6_XFRM_TUNNEL 101 - select CRYPTO 102 - select CRYPTO_DEFLATE 100 + select XFRM_IPCOMP 103 101 ---help--- 104 102 Support for IP Payload Compression Protocol (IPComp) (RFC3173), 105 103 typically needed for IPsec.
+6 -292
net/ipv6/ipcomp6.c
··· 50 50 #include <linux/icmpv6.h> 51 51 #include <linux/mutex.h> 52 52 53 - struct ipcomp6_tfms { 54 - struct list_head list; 55 - struct crypto_comp **tfms; 56 - int users; 57 - }; 58 - 59 - static DEFINE_MUTEX(ipcomp6_resource_mutex); 60 - static void **ipcomp6_scratches; 61 - static int ipcomp6_scratch_users; 62 - static LIST_HEAD(ipcomp6_tfms_list); 63 - 64 - static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) 65 - { 66 - int nexthdr; 67 - int err = -ENOMEM; 68 - struct ip_comp_hdr *ipch; 69 - int plen, dlen; 70 - struct ipcomp_data *ipcd = x->data; 71 - u8 *start, *scratch; 72 - struct crypto_comp *tfm; 73 - int cpu; 74 - 75 - if (skb_linearize_cow(skb)) 76 - goto out; 77 - 78 - skb->ip_summed = CHECKSUM_NONE; 79 - 80 - /* Remove ipcomp header and decompress original payload */ 81 - ipch = (void *)skb->data; 82 - nexthdr = ipch->nexthdr; 83 - 84 - skb->transport_header = skb->network_header + sizeof(*ipch); 85 - __skb_pull(skb, sizeof(*ipch)); 86 - 87 - /* decompression */ 88 - plen = skb->len; 89 - dlen = IPCOMP_SCRATCH_SIZE; 90 - start = skb->data; 91 - 92 - cpu = get_cpu(); 93 - scratch = *per_cpu_ptr(ipcomp6_scratches, cpu); 94 - tfm = *per_cpu_ptr(ipcd->tfms, cpu); 95 - 96 - err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); 97 - if (err) 98 - goto out_put_cpu; 99 - 100 - if (dlen < (plen + sizeof(*ipch))) { 101 - err = -EINVAL; 102 - goto out_put_cpu; 103 - } 104 - 105 - err = pskb_expand_head(skb, 0, dlen - plen, GFP_ATOMIC); 106 - if (err) { 107 - goto out_put_cpu; 108 - } 109 - 110 - skb->truesize += dlen - plen; 111 - __skb_put(skb, dlen - plen); 112 - skb_copy_to_linear_data(skb, scratch, dlen); 113 - err = nexthdr; 114 - 115 - out_put_cpu: 116 - put_cpu(); 117 - out: 118 - return err; 119 - } 120 - 121 - static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb) 122 - { 123 - int err; 124 - struct ip_comp_hdr *ipch; 125 - struct ipcomp_data *ipcd = x->data; 126 - int plen, dlen; 127 - u8 *start, *scratch; 128 - struct crypto_comp *tfm; 129 - int cpu; 130 - 131 - /* check whether datagram len is larger than threshold */ 132 - if (skb->len < ipcd->threshold) { 133 - goto out_ok; 134 - } 135 - 136 - if (skb_linearize_cow(skb)) 137 - goto out_ok; 138 - 139 - /* compression */ 140 - plen = skb->len; 141 - dlen = IPCOMP_SCRATCH_SIZE; 142 - start = skb->data; 143 - 144 - cpu = get_cpu(); 145 - scratch = *per_cpu_ptr(ipcomp6_scratches, cpu); 146 - tfm = *per_cpu_ptr(ipcd->tfms, cpu); 147 - 148 - local_bh_disable(); 149 - err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); 150 - local_bh_enable(); 151 - if (err || (dlen + sizeof(*ipch)) >= plen) { 152 - put_cpu(); 153 - goto out_ok; 154 - } 155 - memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 156 - put_cpu(); 157 - pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); 158 - 159 - /* insert ipcomp header and replace datagram */ 160 - ipch = ip_comp_hdr(skb); 161 - ipch->nexthdr = *skb_mac_header(skb); 162 - ipch->flags = 0; 163 - ipch->cpi = htons((u16 )ntohl(x->id.spi)); 164 - *skb_mac_header(skb) = IPPROTO_COMP; 165 - 166 - out_ok: 167 - skb_push(skb, -skb_network_offset(skb)); 168 - 169 - return 0; 170 - } 171 - 172 53 static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 173 54 int type, int code, int offset, __be32 info) 174 55 { ··· 132 251 return err; 133 252 } 134 253 135 - static void ipcomp6_free_scratches(void) 136 - { 137 - int i; 138 - void **scratches; 139 - 140 - if (--ipcomp6_scratch_users) 141 - return; 142 - 143 - scratches = ipcomp6_scratches; 144 - if (!scratches) 145 - return; 146 - 147 - for_each_possible_cpu(i) { 148 - void *scratch = *per_cpu_ptr(scratches, i); 149 - 150 - vfree(scratch); 151 - } 152 - 153 - free_percpu(scratches); 154 - } 155 - 156 - static void **ipcomp6_alloc_scratches(void) 157 - { 158 - int i; 159 - void **scratches; 160 - 161 - if (ipcomp6_scratch_users++) 162 - return ipcomp6_scratches; 163 - 164 - scratches = alloc_percpu(void *); 165 - if (!scratches) 166 - return NULL; 167 - 168 - ipcomp6_scratches = scratches; 169 - 170 - for_each_possible_cpu(i) { 171 - void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); 172 - if (!scratch) 173 - return NULL; 174 - *per_cpu_ptr(scratches, i) = scratch; 175 - } 176 - 177 - return scratches; 178 - } 179 - 180 - static void ipcomp6_free_tfms(struct crypto_comp **tfms) 181 - { 182 - struct ipcomp6_tfms *pos; 183 - int cpu; 184 - 185 - list_for_each_entry(pos, &ipcomp6_tfms_list, list) { 186 - if (pos->tfms == tfms) 187 - break; 188 - } 189 - 190 - BUG_TRAP(pos); 191 - 192 - if (--pos->users) 193 - return; 194 - 195 - list_del(&pos->list); 196 - kfree(pos); 197 - 198 - if (!tfms) 199 - return; 200 - 201 - for_each_possible_cpu(cpu) { 202 - struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 203 - crypto_free_comp(tfm); 204 - } 205 - free_percpu(tfms); 206 - } 207 - 208 - static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name) 209 - { 210 - struct ipcomp6_tfms *pos; 211 - struct crypto_comp **tfms; 212 - int cpu; 213 - 214 - /* This can be any valid CPU ID so we don't need locking. */ 215 - cpu = raw_smp_processor_id(); 216 - 217 - list_for_each_entry(pos, &ipcomp6_tfms_list, list) { 218 - struct crypto_comp *tfm; 219 - 220 - tfms = pos->tfms; 221 - tfm = *per_cpu_ptr(tfms, cpu); 222 - 223 - if (!strcmp(crypto_comp_name(tfm), alg_name)) { 224 - pos->users++; 225 - return tfms; 226 - } 227 - } 228 - 229 - pos = kmalloc(sizeof(*pos), GFP_KERNEL); 230 - if (!pos) 231 - return NULL; 232 - 233 - pos->users = 1; 234 - INIT_LIST_HEAD(&pos->list); 235 - list_add(&pos->list, &ipcomp6_tfms_list); 236 - 237 - pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 238 - if (!tfms) 239 - goto error; 240 - 241 - for_each_possible_cpu(cpu) { 242 - struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 243 - CRYPTO_ALG_ASYNC); 244 - if (IS_ERR(tfm)) 245 - goto error; 246 - *per_cpu_ptr(tfms, cpu) = tfm; 247 - } 248 - 249 - return tfms; 250 - 251 - error: 252 - ipcomp6_free_tfms(tfms); 253 - return NULL; 254 - } 255 - 256 - static void ipcomp6_free_data(struct ipcomp_data *ipcd) 257 - { 258 - if (ipcd->tfms) 259 - ipcomp6_free_tfms(ipcd->tfms); 260 - ipcomp6_free_scratches(); 261 - } 262 - 263 - static void ipcomp6_destroy(struct xfrm_state *x) 264 - { 265 - struct ipcomp_data *ipcd = x->data; 266 - if (!ipcd) 267 - return; 268 - xfrm_state_delete_tunnel(x); 269 - mutex_lock(&ipcomp6_resource_mutex); 270 - ipcomp6_free_data(ipcd); 271 - mutex_unlock(&ipcomp6_resource_mutex); 272 - kfree(ipcd); 273 - 274 - xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); 275 - } 276 - 277 254 static int ipcomp6_init_state(struct xfrm_state *x) 278 255 { 279 256 int err; 280 257 struct ipcomp_data *ipcd; 281 258 struct xfrm_algo_desc *calg_desc; 282 - 283 - err = -EINVAL; 284 - if (!x->calg) 285 - goto out; 286 - 287 - if (x->encap) 288 - goto out; 289 259 290 260 x->props.header_len = 0; 291 261 switch (x->props.mode) { ··· 149 417 goto out; 150 418 } 151 419 152 - err = -ENOMEM; 153 - ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 154 - if (!ipcd) 420 + err = ipcomp_init_state(x); 421 + if (err) 155 422 goto out; 156 - 157 - mutex_lock(&ipcomp6_resource_mutex); 158 - if (!ipcomp6_alloc_scratches()) 159 - goto error; 160 - 161 - ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name); 162 - if (!ipcd->tfms) 163 - goto error; 164 - mutex_unlock(&ipcomp6_resource_mutex); 165 423 166 424 if (x->props.mode == XFRM_MODE_TUNNEL) { 167 425 err = ipcomp6_tunnel_attach(x); ··· 159 437 goto error_tunnel; 160 438 } 161 439 162 - calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); 163 - BUG_ON(!calg_desc); 164 - ipcd->threshold = calg_desc->uinfo.comp.threshold; 165 - x->data = ipcd; 166 440 err = 0; 167 441 out: 168 442 return err; 169 443 error_tunnel: 170 - mutex_lock(&ipcomp6_resource_mutex); 171 - error: 172 - ipcomp6_free_data(ipcd); 173 - mutex_unlock(&ipcomp6_resource_mutex); 174 - kfree(ipcd); 444 + ipcomp_destroy(x); 175 445 176 446 goto out; 177 447 } ··· 174 460 .owner = THIS_MODULE, 175 461 .proto = IPPROTO_COMP, 176 462 .init_state = ipcomp6_init_state, 177 - .destructor = ipcomp6_destroy, 178 - .input = ipcomp6_input, 179 - .output = ipcomp6_output, 463 + .destructor = ipcomp_destroy, 464 + .input = ipcomp_input, 465 + .output = ipcomp_output, 180 466 .hdr_offset = xfrm6_find_1stfragopt, 181 467 }; 182 468
+2 -2
net/sched/sch_generic.c
··· 736 736 dev_queue->qdisc = qdisc_default; 737 737 dev_queue->qdisc_sleeping = qdisc_default; 738 738 739 - spin_lock(root_lock); 739 + spin_lock_bh(root_lock); 740 740 qdisc_destroy(qdisc); 741 - spin_unlock(root_lock); 741 + spin_unlock_bh(root_lock); 742 742 } 743 743 } 744 744
+6
net/xfrm/Kconfig
··· 46 46 47 47 If unsure, say N. 48 48 49 + config XFRM_IPCOMP 50 + tristate 51 + select XFRM 52 + select CRYPTO 53 + select CRYPTO_DEFLATE 54 + 49 55 config NET_KEY 50 56 tristate "PF_KEY sockets" 51 57 select XFRM
+1
net/xfrm/Makefile
··· 6 6 xfrm_input.o xfrm_output.o xfrm_algo.o 7 7 obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o 8 8 obj-$(CONFIG_XFRM_USER) += xfrm_user.o 9 + obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o 9 10
+385
net/xfrm/xfrm_ipcomp.c
··· 1 + /* 2 + * IP Payload Compression Protocol (IPComp) - RFC3173. 3 + * 4 + * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> 5 + * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms of the GNU General Public License as published by the Free 9 + * Software Foundation; either version 2 of the License, or (at your option) 10 + * any later version. 11 + * 12 + * Todo: 13 + * - Tunable compression parameters. 14 + * - Compression stats. 15 + * - Adaptive compression. 16 + */ 17 + 18 + #include <linux/crypto.h> 19 + #include <linux/err.h> 20 + #include <linux/gfp.h> 21 + #include <linux/list.h> 22 + #include <linux/module.h> 23 + #include <linux/mutex.h> 24 + #include <linux/percpu.h> 25 + #include <linux/rtnetlink.h> 26 + #include <linux/smp.h> 27 + #include <linux/vmalloc.h> 28 + #include <net/ip.h> 29 + #include <net/ipcomp.h> 30 + #include <net/xfrm.h> 31 + 32 + struct ipcomp_tfms { 33 + struct list_head list; 34 + struct crypto_comp **tfms; 35 + int users; 36 + }; 37 + 38 + static DEFINE_MUTEX(ipcomp_resource_mutex); 39 + static void **ipcomp_scratches; 40 + static int ipcomp_scratch_users; 41 + static LIST_HEAD(ipcomp_tfms_list); 42 + 43 + static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) 44 + { 45 + struct ipcomp_data *ipcd = x->data; 46 + const int plen = skb->len; 47 + int dlen = IPCOMP_SCRATCH_SIZE; 48 + const u8 *start = skb->data; 49 + const int cpu = get_cpu(); 50 + u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 51 + struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); 52 + int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); 53 + int len; 54 + 55 + if (err) 56 + goto out; 57 + 58 + if (dlen < (plen + sizeof(struct ip_comp_hdr))) { 59 + err = -EINVAL; 60 + goto out; 61 + } 62 + 63 + len = dlen - plen; 64 + if (len > skb_tailroom(skb)) 65 + len = skb_tailroom(skb); 66 + 67 + skb->truesize += len; 68 + __skb_put(skb, len); 69 + 70 + len += plen; 71 + skb_copy_to_linear_data(skb, scratch, len); 72 + 73 + while ((scratch += len, dlen -= len) > 0) { 74 + skb_frag_t *frag; 75 + 76 + err = -EMSGSIZE; 77 + if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) 78 + goto out; 79 + 80 + frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; 81 + frag->page = alloc_page(GFP_ATOMIC); 82 + 83 + err = -ENOMEM; 84 + if (!frag->page) 85 + goto out; 86 + 87 + len = PAGE_SIZE; 88 + if (dlen < len) 89 + len = dlen; 90 + 91 + memcpy(page_address(frag->page), scratch, len); 92 + 93 + frag->page_offset = 0; 94 + frag->size = len; 95 + skb->truesize += len; 96 + skb->data_len += len; 97 + skb->len += len; 98 + 99 + skb_shinfo(skb)->nr_frags++; 100 + } 101 + 102 + err = 0; 103 + 104 + out: 105 + put_cpu(); 106 + return err; 107 + } 108 + 109 + int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) 110 + { 111 + int nexthdr; 112 + int err = -ENOMEM; 113 + struct ip_comp_hdr *ipch; 114 + 115 + if (skb_linearize_cow(skb)) 116 + goto out; 117 + 118 + skb->ip_summed = CHECKSUM_NONE; 119 + 120 + /* Remove ipcomp header and decompress original payload */ 121 + ipch = (void *)skb->data; 122 + nexthdr = ipch->nexthdr; 123 + 124 + skb->transport_header = skb->network_header + sizeof(*ipch); 125 + __skb_pull(skb, sizeof(*ipch)); 126 + err = ipcomp_decompress(x, skb); 127 + if (err) 128 + goto out; 129 + 130 + err = nexthdr; 131 + 132 + out: 133 + return err; 134 + } 135 + EXPORT_SYMBOL_GPL(ipcomp_input); 136 + 137 + static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) 138 + { 139 + struct ipcomp_data *ipcd = x->data; 140 + const int plen = skb->len; 141 + int dlen = IPCOMP_SCRATCH_SIZE; 142 + u8 *start = skb->data; 143 + const int cpu = get_cpu(); 144 + u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 145 + struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); 146 + int err; 147 + 148 + local_bh_disable(); 149 + err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); 150 + local_bh_enable(); 151 + if (err) 152 + goto out; 153 + 154 + if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) { 155 + err = -EMSGSIZE; 156 + goto out; 157 + } 158 + 159 + memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 160 + put_cpu(); 161 + 162 + pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); 163 + return 0; 164 + 165 + out: 166 + put_cpu(); 167 + return err; 168 + } 169 + 170 + int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) 171 + { 172 + int err; 173 + struct ip_comp_hdr *ipch; 174 + struct ipcomp_data *ipcd = x->data; 175 + 176 + if (skb->len < ipcd->threshold) { 177 + /* Don't bother compressing */ 178 + goto out_ok; 179 + } 180 + 181 + if (skb_linearize_cow(skb)) 182 + goto out_ok; 183 + 184 + err = ipcomp_compress(x, skb); 185 + 186 + if (err) { 187 + goto out_ok; 188 + } 189 + 190 + /* Install ipcomp header, convert into ipcomp datagram. */ 191 + ipch = ip_comp_hdr(skb); 192 + ipch->nexthdr = *skb_mac_header(skb); 193 + ipch->flags = 0; 194 + ipch->cpi = htons((u16 )ntohl(x->id.spi)); 195 + *skb_mac_header(skb) = IPPROTO_COMP; 196 + out_ok: 197 + skb_push(skb, -skb_network_offset(skb)); 198 + return 0; 199 + } 200 + EXPORT_SYMBOL_GPL(ipcomp_output); 201 + 202 + static void ipcomp_free_scratches(void) 203 + { 204 + int i; 205 + void **scratches; 206 + 207 + if (--ipcomp_scratch_users) 208 + return; 209 + 210 + scratches = ipcomp_scratches; 211 + if (!scratches) 212 + return; 213 + 214 + for_each_possible_cpu(i) 215 + vfree(*per_cpu_ptr(scratches, i)); 216 + 217 + free_percpu(scratches); 218 + } 219 + 220 + static void **ipcomp_alloc_scratches(void) 221 + { 222 + int i; 223 + void **scratches; 224 + 225 + if (ipcomp_scratch_users++) 226 + return ipcomp_scratches; 227 + 228 + scratches = alloc_percpu(void *); 229 + if (!scratches) 230 + return NULL; 231 + 232 + ipcomp_scratches = scratches; 233 + 234 + for_each_possible_cpu(i) { 235 + void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); 236 + if (!scratch) 237 + return NULL; 238 + *per_cpu_ptr(scratches, i) = scratch; 239 + } 240 + 241 + return scratches; 242 + } 243 + 244 + static void ipcomp_free_tfms(struct crypto_comp **tfms) 245 + { 246 + struct ipcomp_tfms *pos; 247 + int cpu; 248 + 249 + list_for_each_entry(pos, &ipcomp_tfms_list, list) { 250 + if (pos->tfms == tfms) 251 + break; 252 + } 253 + 254 + BUG_TRAP(pos); 255 + 256 + if (--pos->users) 257 + return; 258 + 259 + list_del(&pos->list); 260 + kfree(pos); 261 + 262 + if (!tfms) 263 + return; 264 + 265 + for_each_possible_cpu(cpu) { 266 + struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 267 + crypto_free_comp(tfm); 268 + } 269 + free_percpu(tfms); 270 + } 271 + 272 + static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 273 + { 274 + struct ipcomp_tfms *pos; 275 + struct crypto_comp **tfms; 276 + int cpu; 277 + 278 + /* This can be any valid CPU ID so we don't need locking. */ 279 + cpu = raw_smp_processor_id(); 280 + 281 + list_for_each_entry(pos, &ipcomp_tfms_list, list) { 282 + struct crypto_comp *tfm; 283 + 284 + tfms = pos->tfms; 285 + tfm = *per_cpu_ptr(tfms, cpu); 286 + 287 + if (!strcmp(crypto_comp_name(tfm), alg_name)) { 288 + pos->users++; 289 + return tfms; 290 + } 291 + } 292 + 293 + pos = kmalloc(sizeof(*pos), GFP_KERNEL); 294 + if (!pos) 295 + return NULL; 296 + 297 + pos->users = 1; 298 + INIT_LIST_HEAD(&pos->list); 299 + list_add(&pos->list, &ipcomp_tfms_list); 300 + 301 + pos->tfms = tfms = alloc_percpu(struct crypto_comp *); 302 + if (!tfms) 303 + goto error; 304 + 305 + for_each_possible_cpu(cpu) { 306 + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, 307 + CRYPTO_ALG_ASYNC); 308 + if (IS_ERR(tfm)) 309 + goto error; 310 + *per_cpu_ptr(tfms, cpu) = tfm; 311 + } 312 + 313 + return tfms; 314 + 315 + error: 316 + ipcomp_free_tfms(tfms); 317 + return NULL; 318 + } 319 + 320 + static void ipcomp_free_data(struct ipcomp_data *ipcd) 321 + { 322 + if (ipcd->tfms) 323 + ipcomp_free_tfms(ipcd->tfms); 324 + ipcomp_free_scratches(); 325 + } 326 + 327 + void ipcomp_destroy(struct xfrm_state *x) 328 + { 329 + struct ipcomp_data *ipcd = x->data; 330 + if (!ipcd) 331 + return; 332 + xfrm_state_delete_tunnel(x); 333 + mutex_lock(&ipcomp_resource_mutex); 334 + ipcomp_free_data(ipcd); 335 + mutex_unlock(&ipcomp_resource_mutex); 336 + kfree(ipcd); 337 + } 338 + EXPORT_SYMBOL_GPL(ipcomp_destroy); 339 + 340 + int ipcomp_init_state(struct xfrm_state *x) 341 + { 342 + int err; 343 + struct ipcomp_data *ipcd; 344 + struct xfrm_algo_desc *calg_desc; 345 + 346 + err = -EINVAL; 347 + if (!x->calg) 348 + goto out; 349 + 350 + if (x->encap) 351 + goto out; 352 + 353 + err = -ENOMEM; 354 + ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 355 + if (!ipcd) 356 + goto out; 357 + 358 + mutex_lock(&ipcomp_resource_mutex); 359 + if (!ipcomp_alloc_scratches()) 360 + goto error; 361 + 362 + ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); 363 + if (!ipcd->tfms) 364 + goto error; 365 + mutex_unlock(&ipcomp_resource_mutex); 366 + 367 + calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); 368 + BUG_ON(!calg_desc); 369 + ipcd->threshold = calg_desc->uinfo.comp.threshold; 370 + x->data = ipcd; 371 + err = 0; 372 + out: 373 + return err; 374 + 375 + error: 376 + ipcomp_free_data(ipcd); 377 + mutex_unlock(&ipcomp_resource_mutex); 378 + kfree(ipcd); 379 + goto out; 380 + } 381 + EXPORT_SYMBOL_GPL(ipcomp_init_state); 382 + 383 + MODULE_LICENSE("GPL"); 384 + MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) - RFC3173"); 385 + MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");