Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

connector/cn_proc: Add filtering to fix some bugs

The current proc connector code has the foll. bugs - if there are more
than one listeners for the proc connector messages, and one of them
deregisters for listening using PROC_CN_MCAST_IGNORE, they will still get
all proc connector messages, as long as there is another listener.

Another issue is if one client calls PROC_CN_MCAST_LISTEN, and another one
calls PROC_CN_MCAST_IGNORE, then both will end up not getting any messages.

This patch adds filtering and drops packet if client has sent
PROC_CN_MCAST_IGNORE. This data is stored in the client socket's
sk_user_data. In addition, we only increment or decrement
proc_event_num_listeners once per client. This fixes the above issues.

cn_release is the release function added for NETLINK_CONNECTOR. It uses
the newly added netlink_release function added to netlink_sock. It will
free sk_user_data.

Signed-off-by: Anjali Kulkarni <anjali.k.kulkarni@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Anjali Kulkarni and committed by
David S. Miller
2aa1f7a1 a4c9a56e

+100 -35
+47 -10
drivers/connector/cn_proc.c
··· 48 48 .lock = INIT_LOCAL_LOCK(lock), 49 49 }; 50 50 51 + static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data) 52 + { 53 + enum proc_cn_mcast_op mc_op; 54 + 55 + if (!dsk) 56 + return 0; 57 + 58 + mc_op = ((struct proc_input *)(dsk->sk_user_data))->mcast_op; 59 + 60 + if (mc_op == PROC_CN_MCAST_IGNORE) 61 + return 1; 62 + 63 + return 0; 64 + } 65 + 51 66 static inline void send_msg(struct cn_msg *msg) 52 67 { 53 68 local_lock(&local_event.lock); ··· 76 61 * 77 62 * If cn_netlink_send() fails, the data is not sent. 78 63 */ 79 - cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT); 64 + cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT, 65 + cn_filter, NULL); 80 66 81 67 local_unlock(&local_event.lock); 82 68 } ··· 362 346 static void cn_proc_mcast_ctl(struct cn_msg *msg, 363 347 struct netlink_skb_parms *nsp) 364 348 { 365 - enum proc_cn_mcast_op *mc_op = NULL; 366 - int err = 0; 367 - 368 - if (msg->len != sizeof(*mc_op)) 369 - return; 349 + enum proc_cn_mcast_op mc_op = 0, prev_mc_op = 0; 350 + int err = 0, initial = 0; 351 + struct sock *sk = NULL; 370 352 371 353 /* 372 354 * Events are reported with respect to the initial pid ··· 381 367 goto out; 382 368 } 383 369 384 - mc_op = (enum proc_cn_mcast_op *)msg->data; 385 - switch (*mc_op) { 370 + if (msg->len == sizeof(mc_op)) 371 + mc_op = *((enum proc_cn_mcast_op *)msg->data); 372 + else 373 + return; 374 + 375 + if (nsp->sk) { 376 + sk = nsp->sk; 377 + if (sk->sk_user_data == NULL) { 378 + sk->sk_user_data = kzalloc(sizeof(struct proc_input), 379 + GFP_KERNEL); 380 + if (sk->sk_user_data == NULL) { 381 + err = ENOMEM; 382 + goto out; 383 + } 384 + initial = 1; 385 + } else { 386 + prev_mc_op = 387 + ((struct proc_input *)(sk->sk_user_data))->mcast_op; 388 + } 389 + ((struct proc_input *)(sk->sk_user_data))->mcast_op = mc_op; 390 + } 391 + 392 + switch (mc_op) { 386 393 case PROC_CN_MCAST_LISTEN: 387 - atomic_inc(&proc_event_num_listeners); 394 + if (initial || (prev_mc_op != PROC_CN_MCAST_LISTEN)) 395 + atomic_inc(&proc_event_num_listeners); 388 396 break; 389 397 case PROC_CN_MCAST_IGNORE: 390 - atomic_dec(&proc_event_num_listeners); 398 + if (!initial && (prev_mc_op != PROC_CN_MCAST_IGNORE)) 399 + atomic_dec(&proc_event_num_listeners); 391 400 break; 392 401 default: 393 402 err = EINVAL;
+17 -4
drivers/connector/connector.c
··· 59 59 * both, or if both are zero then the group is looked up and sent there. 60 60 */ 61 61 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group, 62 - gfp_t gfp_mask) 62 + gfp_t gfp_mask, 63 + int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), 64 + void *filter_data) 63 65 { 64 66 struct cn_callback_entry *__cbq; 65 67 unsigned int size; ··· 112 110 NETLINK_CB(skb).dst_group = group; 113 111 114 112 if (group) 115 - return netlink_broadcast(dev->nls, skb, portid, group, 116 - gfp_mask); 113 + return netlink_broadcast_filtered(dev->nls, skb, portid, group, 114 + gfp_mask, filter, 115 + (void *)filter_data); 117 116 return netlink_unicast(dev->nls, skb, portid, 118 117 !gfpflags_allow_blocking(gfp_mask)); 119 118 } ··· 124 121 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, 125 122 gfp_t gfp_mask) 126 123 { 127 - return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); 124 + return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask, 125 + NULL, NULL); 128 126 } 129 127 EXPORT_SYMBOL_GPL(cn_netlink_send); 130 128 ··· 164 160 } 165 161 166 162 return err; 163 + } 164 + 165 + static void cn_release(struct sock *sk, unsigned long *groups) 166 + { 167 + if (groups && test_bit(CN_IDX_PROC - 1, groups)) { 168 + kfree(sk->sk_user_data); 169 + sk->sk_user_data = NULL; 170 + } 167 171 } 168 172 169 173 /* ··· 261 249 struct netlink_kernel_cfg cfg = { 262 250 .groups = CN_NETLINK_USERS + 0xf, 263 251 .input = cn_rx_skb, 252 + .release = cn_release, 264 253 }; 265 254 266 255 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
+4 -2
drivers/w1/w1_netlink.c
··· 65 65 u16 len = w1_reply_len(block); 66 66 if (len) { 67 67 cn_netlink_send_mult(block->first_cn, len, 68 - block->portid, 0, GFP_KERNEL); 68 + block->portid, 0, 69 + GFP_KERNEL, NULL, NULL); 69 70 } 70 71 kfree(block); 71 72 } ··· 84 83 { 85 84 u16 len = w1_reply_len(block); 86 85 if (len + space >= block->maxlen) { 87 - cn_netlink_send_mult(block->first_cn, len, block->portid, 0, GFP_KERNEL); 86 + cn_netlink_send_mult(block->first_cn, len, block->portid, 87 + 0, GFP_KERNEL, NULL, NULL); 88 88 block->first_cn->len = 0; 89 89 block->cn = NULL; 90 90 block->msg = NULL;
+7 -1
include/linux/connector.h
··· 90 90 * If @group is not zero, then message will be delivered 91 91 * to the specified group. 92 92 * @gfp_mask: GFP mask. 93 + * @filter: Filter function to be used at netlink layer. 94 + * @filter_data:Filter data to be supplied to the filter function 93 95 * 94 96 * It can be safely called from softirq context, but may silently 95 97 * fail under strong memory pressure. 96 98 * 97 99 * If there are no listeners for given group %-ESRCH can be returned. 98 100 */ 99 - int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); 101 + int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, 102 + u32 group, gfp_t gfp_mask, 103 + int (*filter)(struct sock *dsk, struct sk_buff *skb, 104 + void *data), 105 + void *filter_data); 100 106 101 107 /** 102 108 * cn_netlink_send - Sends message to the specified groups.
+25 -18
include/uapi/linux/cn_proc.h
··· 30 30 PROC_CN_MCAST_IGNORE = 2 31 31 }; 32 32 33 + enum proc_cn_event { 34 + /* Use successive bits so the enums can be used to record 35 + * sets of events as well 36 + */ 37 + PROC_EVENT_NONE = 0x00000000, 38 + PROC_EVENT_FORK = 0x00000001, 39 + PROC_EVENT_EXEC = 0x00000002, 40 + PROC_EVENT_UID = 0x00000004, 41 + PROC_EVENT_GID = 0x00000040, 42 + PROC_EVENT_SID = 0x00000080, 43 + PROC_EVENT_PTRACE = 0x00000100, 44 + PROC_EVENT_COMM = 0x00000200, 45 + /* "next" should be 0x00000400 */ 46 + /* "last" is the last process event: exit, 47 + * while "next to last" is coredumping event 48 + */ 49 + PROC_EVENT_COREDUMP = 0x40000000, 50 + PROC_EVENT_EXIT = 0x80000000 51 + }; 52 + 53 + struct proc_input { 54 + enum proc_cn_mcast_op mcast_op; 55 + }; 56 + 33 57 /* 34 58 * From the user's point of view, the process 35 59 * ID is the thread group ID and thread ID is the internal ··· 68 44 */ 69 45 70 46 struct proc_event { 71 - enum what { 72 - /* Use successive bits so the enums can be used to record 73 - * sets of events as well 74 - */ 75 - PROC_EVENT_NONE = 0x00000000, 76 - PROC_EVENT_FORK = 0x00000001, 77 - PROC_EVENT_EXEC = 0x00000002, 78 - PROC_EVENT_UID = 0x00000004, 79 - PROC_EVENT_GID = 0x00000040, 80 - PROC_EVENT_SID = 0x00000080, 81 - PROC_EVENT_PTRACE = 0x00000100, 82 - PROC_EVENT_COMM = 0x00000200, 83 - /* "next" should be 0x00000400 */ 84 - /* "last" is the last process event: exit, 85 - * while "next to last" is coredumping event */ 86 - PROC_EVENT_COREDUMP = 0x40000000, 87 - PROC_EVENT_EXIT = 0x80000000 88 - } what; 47 + enum proc_cn_event what; 89 48 __u32 cpu; 90 49 __u64 __attribute__((aligned(8))) timestamp_ns; 91 50 /* Number of nano seconds since system boot */