Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

+77 -57
+4 -6
drivers/atm/fore200e.c
··· 178 179 180 static void* 181 - fore200e_kmalloc(int size, int flags) 182 { 183 - void* chunk = kmalloc(size, flags); 184 185 - if (chunk) 186 - memset(chunk, 0x00, size); 187 - else 188 - printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags); 189 190 return chunk; 191 }
··· 178 179 180 static void* 181 + fore200e_kmalloc(int size, unsigned int __nocast flags) 182 { 183 + void *chunk = kzalloc(size, flags); 184 185 + if (!chunk) 186 + printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags); 187 188 return chunk; 189 }
+2 -1
drivers/connector/connector.c
··· 69 * a new message. 70 * 71 */ 72 - int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) 73 { 74 struct cn_callback_entry *__cbq; 75 unsigned int size;
··· 69 * a new message. 70 * 71 */ 72 + int cn_netlink_send(struct cn_msg *msg, u32 __group, 73 + unsigned int __nocast gfp_mask) 74 { 75 struct cn_callback_entry *__cbq; 76 unsigned int size;
+3 -2
drivers/net/bonding/bond_main.c
··· 1289 /* 1290 * Copy all the Multicast addresses from src to the bonding device dst 1291 */ 1292 - static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, int gpf_flag) 1293 { 1294 struct dev_mc_list *dmi, *new_dmi; 1295 1296 for (dmi = mc_list; dmi; dmi = dmi->next) { 1297 - new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag); 1298 1299 if (!new_dmi) { 1300 /* FIXME: Potential memory leak !!! */
··· 1289 /* 1290 * Copy all the Multicast addresses from src to the bonding device dst 1291 */ 1292 + static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, 1293 + unsigned int __nocast gfp_flag) 1294 { 1295 struct dev_mc_list *dmi, *new_dmi; 1296 1297 for (dmi = mc_list; dmi; dmi = dmi->next) { 1298 + new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag); 1299 1300 if (!new_dmi) { 1301 /* FIXME: Potential memory leak !!! */
+1 -1
include/linux/atmdev.h
··· 457 458 int atm_charge(struct atm_vcc *vcc,int truesize); 459 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 460 - int gfp_flags); 461 int atm_pcr_goal(struct atm_trafprm *tp); 462 463 void vcc_release_async(struct atm_vcc *vcc, int reply);
··· 457 458 int atm_charge(struct atm_vcc *vcc,int truesize); 459 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 460 + unsigned int __nocast gfp_flags); 461 int atm_pcr_goal(struct atm_trafprm *tp); 462 463 void vcc_release_async(struct atm_vcc *vcc, int reply);
+1 -1
include/linux/connector.h
··· 149 150 int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); 151 void cn_del_callback(struct cb_id *); 152 - int cn_netlink_send(struct cn_msg *, u32, int); 153 154 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); 155 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
··· 149 150 int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); 151 void cn_del_callback(struct cb_id *); 152 + int cn_netlink_send(struct cn_msg *, u32, unsigned int __nocast); 153 154 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); 155 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+2 -1
include/linux/textsearch.h
··· 158 #define TS_PRIV_ALIGNTO 8 159 #define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) 160 161 - static inline struct ts_config *alloc_ts_config(size_t payload, int gfp_mask) 162 { 163 struct ts_config *conf; 164
··· 158 #define TS_PRIV_ALIGNTO 8 159 #define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) 160 161 + static inline struct ts_config *alloc_ts_config(size_t payload, 162 + unsigned int __nocast gfp_mask) 163 { 164 struct ts_config *conf; 165
+4 -4
include/net/dn_nsp.h
··· 19 extern void dn_nsp_send_oth_ack(struct sock *sk); 20 extern void dn_nsp_delayed_ack(struct sock *sk); 21 extern void dn_send_conn_ack(struct sock *sk); 22 - extern void dn_send_conn_conf(struct sock *sk, int gfp); 23 extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, 24 - unsigned short reason, int gfp); 25 extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, 26 unsigned short reason); 27 extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); ··· 29 30 extern void dn_nsp_output(struct sock *sk); 31 extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); 32 - extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oob); 33 extern unsigned long dn_nsp_persist(struct sock *sk); 34 extern int dn_nsp_xmit_timeout(struct sock *sk); 35 36 extern int dn_nsp_rx(struct sk_buff *); 37 extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 38 39 - extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri); 40 extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); 41 42 #define NSP_REASON_OK 0 /* No error */
··· 19 extern void dn_nsp_send_oth_ack(struct sock *sk); 20 extern void dn_nsp_delayed_ack(struct sock *sk); 21 extern void dn_send_conn_ack(struct sock *sk); 22 + extern void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp); 23 extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, 24 + unsigned short reason, unsigned int __nocast gfp); 25 extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, 26 unsigned short reason); 27 extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); ··· 29 30 extern void dn_nsp_output(struct sock *sk); 31 extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); 32 + extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, unsigned int __nocast gfp, int oob); 33 extern unsigned long dn_nsp_persist(struct sock *sk); 34 extern int dn_nsp_xmit_timeout(struct sock *sk); 35 36 extern int dn_nsp_rx(struct sk_buff *); 37 extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 38 39 + extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri); 40 extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); 41 42 #define NSP_REASON_OK 0 /* No error */
+1 -1
include/net/dn_route.h
··· 15 GNU General Public License for more details. 16 *******************************************************************************/ 17 18 - extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri); 19 extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); 20 extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); 21 extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
··· 15 GNU General Public License for more details. 16 *******************************************************************************/ 17 18 + extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri); 19 extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); 20 extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); 21 extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+1 -1
include/net/inet_hashtables.h
··· 40 struct inet_ehash_bucket { 41 rwlock_t lock; 42 struct hlist_head chain; 43 - } __attribute__((__aligned__(8))); 44 45 /* There are a few simple rules, which allow for local port reuse by 46 * an application. In essence:
··· 40 struct inet_ehash_bucket { 41 rwlock_t lock; 42 struct hlist_head chain; 43 + }; 44 45 /* There are a few simple rules, which allow for local port reuse by 46 * an application. In essence:
+1 -1
include/net/ip_vs.h
··· 832 833 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb); 834 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb); 835 - extern int ip_vs_skb_replace(struct sk_buff *skb, int pri, 836 char *o_buf, int o_len, char *n_buf, int n_len); 837 extern int ip_vs_app_init(void); 838 extern void ip_vs_app_cleanup(void);
··· 832 833 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb); 834 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb); 835 + extern int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri, 836 char *o_buf, int o_len, char *n_buf, int n_len); 837 extern int ip_vs_app_init(void); 838 extern void ip_vs_app_cleanup(void);
+6 -1
include/net/xfrm.h
··· 875 } 876 #endif 877 878 - struct xfrm_policy *xfrm_policy_alloc(int gfp); 879 extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *); 880 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 881 struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel, ··· 929 return ipv6_addr_cmp((struct in6_addr *)a, 930 (struct in6_addr *)b); 931 } 932 } 933 934 #endif /* _NET_XFRM_H */
··· 875 } 876 #endif 877 878 + struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp); 879 extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *); 880 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 881 struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel, ··· 929 return ipv6_addr_cmp((struct in6_addr *)a, 930 (struct in6_addr *)b); 931 } 932 + } 933 + 934 + static inline int xfrm_policy_id2dir(u32 index) 935 + { 936 + return index & 7; 937 } 938 939 #endif /* _NET_XFRM_H */
+1 -1
include/rxrpc/call.h
··· 203 size_t sioc, 204 struct kvec *siov, 205 uint8_t rxhdr_flags, 206 - int alloc_flags, 207 int dup_data, 208 size_t *size_sent); 209
··· 203 size_t sioc, 204 struct kvec *siov, 205 uint8_t rxhdr_flags, 206 + unsigned int __nocast alloc_flags, 207 int dup_data, 208 size_t *size_sent); 209
+1 -1
include/rxrpc/message.h
··· 63 uint8_t type, 64 int count, 65 struct kvec *diov, 66 - int alloc_flags, 67 struct rxrpc_message **_msg); 68 69 extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
··· 63 uint8_t type, 64 int count, 65 struct kvec *diov, 66 + unsigned int __nocast alloc_flags, 67 struct rxrpc_message **_msg); 68 69 extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
+1 -1
lib/ts_bm.c
··· 127 } 128 129 static struct ts_config *bm_init(const void *pattern, unsigned int len, 130 - int gfp_mask) 131 { 132 struct ts_config *conf; 133 struct ts_bm *bm;
··· 127 } 128 129 static struct ts_config *bm_init(const void *pattern, unsigned int len, 130 + unsigned int __nocast gfp_mask) 131 { 132 struct ts_config *conf; 133 struct ts_bm *bm;
+1 -1
lib/ts_fsm.c
··· 258 } 259 260 static struct ts_config *fsm_init(const void *pattern, unsigned int len, 261 - int gfp_mask) 262 { 263 int i, err = -EINVAL; 264 struct ts_config *conf;
··· 258 } 259 260 static struct ts_config *fsm_init(const void *pattern, unsigned int len, 261 + unsigned int __nocast gfp_mask) 262 { 263 int i, err = -EINVAL; 264 struct ts_config *conf;
+1 -1
lib/ts_kmp.c
··· 87 } 88 89 static struct ts_config *kmp_init(const void *pattern, unsigned int len, 90 - int gfp_mask) 91 { 92 struct ts_config *conf; 93 struct ts_kmp *kmp;
··· 87 } 88 89 static struct ts_config *kmp_init(const void *pattern, unsigned int len, 90 + unsigned int __nocast gfp_mask) 91 { 92 struct ts_config *conf; 93 struct ts_kmp *kmp;
+1 -1
net/atm/atm_misc.c
··· 25 26 27 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 28 - int gfp_flags) 29 { 30 struct sock *sk = sk_atm(vcc); 31 int guess = atm_guess_pdu2truesize(pdu_size);
··· 25 26 27 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 28 + unsigned int __nocast gfp_flags) 29 { 30 struct sock *sk = sk_atm(vcc); 31 int guess = atm_guess_pdu2truesize(pdu_size);
+1 -1
net/ax25/ax25_in.c
··· 123 } 124 125 skb_pull(skb, 1); /* Remove PID */ 126 - skb->h.raw = skb->data; 127 skb->nh.raw = skb->data; 128 skb->dev = ax25->ax25_dev->dev; 129 skb->pkt_type = PACKET_HOST;
··· 123 } 124 125 skb_pull(skb, 1); /* Remove PID */ 126 + skb->mac.raw = skb->nh.raw; 127 skb->nh.raw = skb->data; 128 skb->dev = ax25->ax25_dev->dev; 129 skb->pkt_type = PACKET_HOST;
+4 -2
net/decnet/af_decnet.c
··· 452 .obj_size = sizeof(struct dn_sock), 453 }; 454 455 - static struct sock *dn_alloc_sock(struct socket *sock, int gfp) 456 { 457 struct dn_scp *scp; 458 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); ··· 805 return rv; 806 } 807 808 - static int dn_confirm_accept(struct sock *sk, long *timeo, int allocation) 809 { 810 struct dn_scp *scp = DN_SK(sk); 811 DEFINE_WAIT(wait);
··· 452 .obj_size = sizeof(struct dn_sock), 453 }; 454 455 + static struct sock *dn_alloc_sock(struct socket *sock, 456 + unsigned int __nocast gfp) 457 { 458 struct dn_scp *scp; 459 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); ··· 804 return rv; 805 } 806 807 + static int dn_confirm_accept(struct sock *sk, long *timeo, 808 + unsigned int __nocast allocation) 809 { 810 struct dn_scp *scp = DN_SK(sk); 811 DEFINE_WAIT(wait);
+14 -9
net/decnet/dn_nsp_out.c
··· 117 * The eventual aim is for each socket to have a cached header size 118 * for its outgoing packets, and to set hdr from this when sk != NULL. 119 */ 120 - struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri) 121 { 122 struct sk_buff *skb; 123 int hdr = 64; ··· 211 * 212 * Returns: The number of times the packet has been sent previously 213 */ 214 - static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, int gfp) 215 { 216 struct dn_skb_cb *cb = DN_SKB_CB(skb); 217 struct sk_buff *skb2; ··· 352 return ptr; 353 } 354 355 - void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oth) 356 { 357 struct dn_scp *scp = DN_SK(sk); 358 struct dn_skb_cb *cb = DN_SKB_CB(skb); ··· 520 return 0; 521 } 522 523 - void dn_send_conn_conf(struct sock *sk, int gfp) 524 { 525 struct dn_scp *scp = DN_SK(sk); 526 struct sk_buff *skb = NULL; ··· 552 553 554 static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, 555 - unsigned short reason, int gfp, struct dst_entry *dst, 556 int ddl, unsigned char *dd, __u16 rem, __u16 loc) 557 { 558 struct sk_buff *skb = NULL; ··· 595 596 597 void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, 598 - unsigned short reason, int gfp) 599 { 600 struct dn_scp *scp = DN_SK(sk); 601 int ddl = 0; ··· 616 { 617 struct dn_skb_cb *cb = DN_SKB_CB(skb); 618 int ddl = 0; 619 - int gfp = GFP_ATOMIC; 620 621 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, 622 NULL, cb->src_port, cb->dst_port); ··· 628 struct dn_scp *scp = DN_SK(sk); 629 struct sk_buff *skb; 630 unsigned char *ptr; 631 - int gfp = GFP_ATOMIC; 632 633 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) 634 return; ··· 663 unsigned char menuver; 664 struct dn_skb_cb *cb; 665 unsigned char type = 1; 666 - int allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; 667 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); 668 669 if (!skb)
··· 117 * The eventual aim is for each socket to have a cached header size 118 * for its outgoing packets, and to set hdr from this when sk != NULL. 119 */ 120 + struct sk_buff *dn_alloc_skb(struct sock *sk, int size, 121 + unsigned int __nocast pri) 122 { 123 struct sk_buff *skb; 124 int hdr = 64; ··· 210 * 211 * Returns: The number of times the packet has been sent previously 212 */ 213 + static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, 214 + unsigned int __nocast gfp) 215 { 216 struct dn_skb_cb *cb = DN_SKB_CB(skb); 217 struct sk_buff *skb2; ··· 350 return ptr; 351 } 352 353 + void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, 354 + unsigned int __nocast gfp, int oth) 355 { 356 struct dn_scp *scp = DN_SK(sk); 357 struct dn_skb_cb *cb = DN_SKB_CB(skb); ··· 517 return 0; 518 } 519 520 + void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp) 521 { 522 struct dn_scp *scp = DN_SK(sk); 523 struct sk_buff *skb = NULL; ··· 549 550 551 static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, 552 + unsigned short reason, unsigned int __nocast gfp, 553 + struct dst_entry *dst, 554 int ddl, unsigned char *dd, __u16 rem, __u16 loc) 555 { 556 struct sk_buff *skb = NULL; ··· 591 592 593 void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, 594 + unsigned short reason, unsigned int __nocast gfp) 595 { 596 struct dn_scp *scp = DN_SK(sk); 597 int ddl = 0; ··· 612 { 613 struct dn_skb_cb *cb = DN_SKB_CB(skb); 614 int ddl = 0; 615 + unsigned int __nocast gfp = GFP_ATOMIC; 616 617 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, 618 NULL, cb->src_port, cb->dst_port); ··· 624 struct dn_scp *scp = DN_SK(sk); 625 struct sk_buff *skb; 626 unsigned char *ptr; 627 + unsigned int __nocast gfp = GFP_ATOMIC; 628 629 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) 630 return; ··· 659 unsigned char menuver; 660 struct dn_skb_cb *cb; 661 unsigned char type = 1; 662 + unsigned int __nocast allocation = 663 + (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; 664 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); 665 666 if (!skb)
+1 -1
net/ipv4/ipvs/ip_vs_app.c
··· 604 /* 605 * Replace a segment of data with a new segment 606 */ 607 - int ip_vs_skb_replace(struct sk_buff *skb, int pri, 608 char *o_buf, int o_len, char *n_buf, int n_len) 609 { 610 struct iphdr *iph;
··· 604 /* 605 * Replace a segment of data with a new segment 606 */ 607 + int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri, 608 char *o_buf, int o_len, char *n_buf, int n_len) 609 { 610 struct iphdr *iph;
+1 -1
net/ipv4/netfilter/Kconfig
··· 141 tristate 'PPTP protocol support' 142 help 143 This module adds support for PPTP (Point to Point Tunnelling 144 - Protocol, RFC2637) conncection tracking and NAT. 145 146 If you are running PPTP sessions over a stateful firewall or NAT 147 box, you may want to enable this feature.
··· 141 tristate 'PPTP protocol support' 142 help 143 This module adds support for PPTP (Point to Point Tunnelling 144 + Protocol, RFC2637) connection tracking and NAT. 145 146 If you are running PPTP sessions over a stateful firewall or NAT 147 box, you may want to enable this feature.
+1 -1
net/ipv4/tcp_bic.c
··· 136 else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) 137 /* slow start */ 138 ca->cnt = (cwnd * (BICTCP_B-1)) 139 - / cwnd-ca->last_max_cwnd; 140 else 141 /* linear increase */ 142 ca->cnt = cwnd / max_increment;
··· 136 else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) 137 /* slow start */ 138 ca->cnt = (cwnd * (BICTCP_B-1)) 139 + / (cwnd - ca->last_max_cwnd); 140 else 141 /* linear increase */ 142 ca->cnt = cwnd / max_increment;
+1 -1
net/ipv6/mcast.c
··· 1393 1394 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) 1395 { 1396 - return sizeof(struct mld2_grec) + 4*mld_scount(pmc,type,gdel,sdel); 1397 } 1398 1399 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
··· 1393 1394 static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) 1395 { 1396 + return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel); 1397 } 1398 1399 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+1 -1
net/ipv6/ndisc.c
··· 1450 1451 static void pndisc_redo(struct sk_buff *skb) 1452 { 1453 - ndisc_rcv(skb); 1454 kfree_skb(skb); 1455 } 1456
··· 1450 1451 static void pndisc_redo(struct sk_buff *skb) 1452 { 1453 + ndisc_recv_ns(skb); 1454 kfree_skb(skb); 1455 } 1456
+12 -6
net/key/af_key.c
··· 185 } 186 187 static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 188 - int allocation, struct sock *sk) 189 { 190 int err = -ENOBUFS; 191 ··· 217 #define BROADCAST_ONE 1 218 #define BROADCAST_REGISTERED 2 219 #define BROADCAST_PROMISC_ONLY 4 220 - static int pfkey_broadcast(struct sk_buff *skb, int allocation, 221 int broadcast_flags, struct sock *one_sk) 222 { 223 struct sock *sk; ··· 1416 return 0; 1417 } 1418 1419 - static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, int allocation) 1420 { 1421 struct sk_buff *skb; 1422 struct sadb_msg *hdr; ··· 2154 2155 static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 2156 { 2157 int err; 2158 struct sadb_x_policy *pol; 2159 struct xfrm_policy *xp; ··· 2163 if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL) 2164 return -EINVAL; 2165 2166 - xp = xfrm_policy_byid(0, pol->sadb_x_policy_id, 2167 hdr->sadb_msg_type == SADB_X_SPDDELETE2); 2168 if (xp == NULL) 2169 return -ENOENT; ··· 2179 if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) { 2180 c.data.byid = 1; 2181 c.event = XFRM_MSG_DELPOLICY; 2182 - km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); 2183 } else { 2184 - err = key_pol_get_resp(sk, xp, hdr, pol->sadb_x_policy_dir-1); 2185 } 2186 2187 xfrm_pol_put(xp);
··· 185 } 186 187 static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 188 + unsigned int __nocast allocation, struct sock *sk) 189 { 190 int err = -ENOBUFS; 191 ··· 217 #define BROADCAST_ONE 1 218 #define BROADCAST_REGISTERED 2 219 #define BROADCAST_PROMISC_ONLY 4 220 + static int pfkey_broadcast(struct sk_buff *skb, unsigned int __nocast allocation, 221 int broadcast_flags, struct sock *one_sk) 222 { 223 struct sock *sk; ··· 1416 return 0; 1417 } 1418 1419 + static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, 1420 + unsigned int __nocast allocation) 1421 { 1422 struct sk_buff *skb; 1423 struct sadb_msg *hdr; ··· 2153 2154 static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 2155 { 2156 + unsigned int dir; 2157 int err; 2158 struct sadb_x_policy *pol; 2159 struct xfrm_policy *xp; ··· 2161 if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL) 2162 return -EINVAL; 2163 2164 + dir = xfrm_policy_id2dir(pol->sadb_x_policy_id); 2165 + if (dir >= XFRM_POLICY_MAX) 2166 + return -EINVAL; 2167 + 2168 + xp = xfrm_policy_byid(dir, pol->sadb_x_policy_id, 2169 hdr->sadb_msg_type == SADB_X_SPDDELETE2); 2170 if (xp == NULL) 2171 return -ENOENT; ··· 2173 if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) { 2174 c.data.byid = 1; 2175 c.event = XFRM_MSG_DELPOLICY; 2176 + km_policy_notify(xp, dir, &c); 2177 } else { 2178 + err = key_pol_get_resp(sk, xp, hdr, dir); 2179 } 2180 2181 xfrm_pol_put(xp);
+2 -1
net/netfilter/nfnetlink.c
··· 195 196 int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) 197 { 198 - int allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; 199 int err = 0; 200 201 NETLINK_CB(skb).dst_group = group;
··· 195 196 int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) 197 { 198 + unsigned int __nocast allocation = 199 + in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; 200 int err = 0; 201 202 NETLINK_CB(skb).dst_group = group;
+1 -1
net/netrom/nr_dev.c
··· 58 59 /* Spoof incoming device */ 60 skb->dev = dev; 61 - skb->h.raw = skb->data; 62 skb->nh.raw = skb->data; 63 skb->pkt_type = PACKET_HOST; 64
··· 58 59 /* Spoof incoming device */ 60 skb->dev = dev; 61 + skb->mac.raw = skb->nh.raw; 62 skb->nh.raw = skb->data; 63 skb->pkt_type = PACKET_HOST; 64
+1 -1
net/rxrpc/call.c
··· 1923 size_t sioc, 1924 struct kvec *siov, 1925 u8 rxhdr_flags, 1926 - int alloc_flags, 1927 int dup_data, 1928 size_t *size_sent) 1929 {
··· 1923 size_t sioc, 1924 struct kvec *siov, 1925 u8 rxhdr_flags, 1926 + unsigned int __nocast alloc_flags, 1927 int dup_data, 1928 size_t *size_sent) 1929 {
+1 -1
net/rxrpc/connection.c
··· 522 uint8_t type, 523 int dcount, 524 struct kvec diov[], 525 - int alloc_flags, 526 struct rxrpc_message **_msg) 527 { 528 struct rxrpc_message *msg;
··· 522 uint8_t type, 523 int dcount, 524 struct kvec diov[], 525 + unsigned int __nocast alloc_flags, 526 struct rxrpc_message **_msg) 527 { 528 struct rxrpc_message *msg;
+1 -1
net/sunrpc/sched.c
··· 719 void * 720 rpc_malloc(struct rpc_task *task, size_t size) 721 { 722 - int gfp; 723 724 if (task->tk_flags & RPC_TASK_SWAPPER) 725 gfp = GFP_ATOMIC;
··· 719 void * 720 rpc_malloc(struct rpc_task *task, size_t size) 721 { 722 + unsigned int __nocast gfp; 723 724 if (task->tk_flags & RPC_TASK_SWAPPER) 725 gfp = GFP_ATOMIC;
+3 -3
net/xfrm/xfrm_policy.c
··· 163 if (xp->dead) 164 goto out; 165 166 - dir = xp->index & 7; 167 168 if (xp->lft.hard_add_expires_seconds) { 169 long tmo = xp->lft.hard_add_expires_seconds + ··· 225 * SPD calls. 226 */ 227 228 - struct xfrm_policy *xfrm_policy_alloc(int gfp) 229 { 230 struct xfrm_policy *policy; 231 ··· 417 struct xfrm_policy *pol, **p; 418 419 write_lock_bh(&xfrm_policy_lock); 420 - for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) { 421 if (pol->index == id) { 422 xfrm_pol_hold(pol); 423 if (delete)
··· 163 if (xp->dead) 164 goto out; 165 166 + dir = xfrm_policy_id2dir(xp->index); 167 168 if (xp->lft.hard_add_expires_seconds) { 169 long tmo = xp->lft.hard_add_expires_seconds + ··· 225 * SPD calls. 226 */ 227 228 + struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp) 229 { 230 struct xfrm_policy *policy; 231 ··· 417 struct xfrm_policy *pol, **p; 418 419 write_lock_bh(&xfrm_policy_lock); 420 + for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { 421 if (pol->index == id) { 422 xfrm_pol_hold(pol); 423 if (delete)