Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kcm: Add statistics and proc interfaces

This patch adds various counters for KCM. These include counters for
messages and bytes received or sent, as well as counters for number of
attached/unattached TCP sockets and other error or edge events.

The statistics are exposed via a proc interface. /proc/net/kcm provides
statistics per KCM socket and per psock (attached TCP sockets).
/proc/net/kcm_stats provides aggregate statistics.

Signed-off-by: Tom Herbert <tom@herbertland.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Tom Herbert and committed by
David S. Miller
cd6e111b ab7ac4eb

+597 -1
+94
include/net/kcm.h
··· 17 17 18 18 extern unsigned int kcm_net_id; 19 19 20 + #define KCM_STATS_ADD(stat, count) ((stat) += (count)) 21 + #define KCM_STATS_INCR(stat) ((stat)++) 22 + 23 + struct kcm_psock_stats { 24 + unsigned long long rx_msgs; 25 + unsigned long long rx_bytes; 26 + unsigned long long tx_msgs; 27 + unsigned long long tx_bytes; 28 + unsigned int rx_aborts; 29 + unsigned int rx_mem_fail; 30 + unsigned int rx_need_more_hdr; 31 + unsigned int rx_bad_hdr_len; 32 + unsigned long long reserved; 33 + unsigned long long unreserved; 34 + unsigned int tx_aborts; 35 + }; 36 + 37 + struct kcm_mux_stats { 38 + unsigned long long rx_msgs; 39 + unsigned long long rx_bytes; 40 + unsigned long long tx_msgs; 41 + unsigned long long tx_bytes; 42 + unsigned int rx_ready_drops; 43 + unsigned int tx_retries; 44 + unsigned int psock_attach; 45 + unsigned int psock_unattach_rsvd; 46 + unsigned int psock_unattach; 47 + }; 48 + 49 + struct kcm_stats { 50 + unsigned long long rx_msgs; 51 + unsigned long long rx_bytes; 52 + unsigned long long tx_msgs; 53 + unsigned long long tx_bytes; 54 + }; 55 + 20 56 struct kcm_tx_msg { 21 57 unsigned int sent; 22 58 unsigned int fragidx; ··· 76 40 int index; 77 41 u32 done : 1; 78 42 struct work_struct done_work; 43 + 44 + struct kcm_stats stats; 79 45 80 46 /* Transmit */ 81 47 struct kcm_psock *tx_psock; ··· 115 77 116 78 struct list_head psock_list; 117 79 80 + struct kcm_psock_stats stats; 81 + 118 82 /* Receive */ 119 83 struct sk_buff *rx_skb_head; 120 84 struct sk_buff **rx_skb_nextp; ··· 126 86 struct delayed_work rx_delayed_work; 127 87 struct bpf_prog *bpf_prog; 128 88 struct kcm_sock *rx_kcm; 89 + unsigned long long saved_rx_bytes; 90 + unsigned long long saved_rx_msgs; 129 91 130 92 /* Transmit */ 131 93 struct kcm_sock *tx_kcm; 132 94 struct list_head psock_avail_list; 95 + unsigned long long saved_tx_bytes; 96 + unsigned long long saved_tx_msgs; 133 97 }; 134 98 135 99 /* Per net MUX list */ 136 100 struct kcm_net { 137 101 struct mutex mutex; 102 + struct kcm_psock_stats aggregate_psock_stats; 103 + struct kcm_mux_stats aggregate_mux_stats; 138 104 struct list_head mux_list; 139 105 int count; 140 106 }; ··· 156 110 struct list_head psocks; /* List of all psocks on MUX */ 157 111 int psocks_cnt; /* Total attached sockets */ 158 112 113 + struct kcm_mux_stats stats; 114 + struct kcm_psock_stats aggregate_psock_stats; 115 + 159 116 /* Receive */ 160 117 spinlock_t rx_lock ____cacheline_aligned_in_smp; 161 118 struct list_head kcm_rx_waiters; /* KCMs waiting for receiving */ ··· 170 121 struct list_head psocks_avail; /* List of available psocks */ 171 122 struct list_head kcm_tx_waiters; /* KCMs waiting for a TX psock */ 172 123 }; 124 + 125 + #ifdef CONFIG_PROC_FS 126 + int kcm_proc_init(void); 127 + void kcm_proc_exit(void); 128 + #else 129 + static int kcm_proc_init(void) { return 0; } 130 + static void kcm_proc_exit(void) { } 131 + #endif 132 + 133 + static inline void aggregate_psock_stats(struct kcm_psock_stats *stats, 134 + struct kcm_psock_stats *agg_stats) 135 + { 136 + /* Save psock statistics in the mux when psock is being unattached. */ 137 + 138 + #define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat) 139 + SAVE_PSOCK_STATS(rx_msgs); 140 + SAVE_PSOCK_STATS(rx_bytes); 141 + SAVE_PSOCK_STATS(rx_aborts); 142 + SAVE_PSOCK_STATS(rx_mem_fail); 143 + SAVE_PSOCK_STATS(rx_need_more_hdr); 144 + SAVE_PSOCK_STATS(rx_bad_hdr_len); 145 + SAVE_PSOCK_STATS(tx_msgs); 146 + SAVE_PSOCK_STATS(tx_bytes); 147 + SAVE_PSOCK_STATS(reserved); 148 + SAVE_PSOCK_STATS(unreserved); 149 + SAVE_PSOCK_STATS(tx_aborts); 150 + #undef SAVE_PSOCK_STATS 151 + } 152 + 153 + static inline void aggregate_mux_stats(struct kcm_mux_stats *stats, 154 + struct kcm_mux_stats *agg_stats) 155 + { 156 + /* Save psock statistics in the mux when psock is being unattached. */ 157 + 158 + #define SAVE_MUX_STATS(_stat) (agg_stats->_stat += stats->_stat) 159 + SAVE_MUX_STATS(rx_msgs); 160 + SAVE_MUX_STATS(rx_bytes); 161 + SAVE_MUX_STATS(tx_msgs); 162 + SAVE_MUX_STATS(tx_bytes); 163 + SAVE_MUX_STATS(rx_ready_drops); 164 + SAVE_MUX_STATS(psock_attach); 165 + SAVE_MUX_STATS(psock_unattach_rsvd); 166 + SAVE_MUX_STATS(psock_unattach); 167 + #undef SAVE_MUX_STATS 168 + } 173 169 174 170 #endif /* __NET_KCM_H_ */
+1 -1
net/kcm/Makefile
··· 1 1 obj-$(CONFIG_AF_KCM) += kcm.o 2 2 3 - kcm-y := kcmsock.o 3 + kcm-y := kcmsock.o kcmproc.o
+422
net/kcm/kcmproc.c
··· 1 + #include <linux/in.h> 2 + #include <linux/inet.h> 3 + #include <linux/list.h> 4 + #include <linux/module.h> 5 + #include <linux/net.h> 6 + #include <linux/proc_fs.h> 7 + #include <linux/rculist.h> 8 + #include <linux/seq_file.h> 9 + #include <linux/socket.h> 10 + #include <net/inet_sock.h> 11 + #include <net/kcm.h> 12 + #include <net/net_namespace.h> 13 + #include <net/netns/generic.h> 14 + #include <net/tcp.h> 15 + 16 + #ifdef CONFIG_PROC_FS 17 + struct kcm_seq_muxinfo { 18 + char *name; 19 + const struct file_operations *seq_fops; 20 + const struct seq_operations seq_ops; 21 + }; 22 + 23 + static struct kcm_mux *kcm_get_first(struct seq_file *seq) 24 + { 25 + struct net *net = seq_file_net(seq); 26 + struct kcm_net *knet = net_generic(net, kcm_net_id); 27 + 28 + return list_first_or_null_rcu(&knet->mux_list, 29 + struct kcm_mux, kcm_mux_list); 30 + } 31 + 32 + static struct kcm_mux *kcm_get_next(struct kcm_mux *mux) 33 + { 34 + struct kcm_net *knet = mux->knet; 35 + 36 + return list_next_or_null_rcu(&knet->mux_list, &mux->kcm_mux_list, 37 + struct kcm_mux, kcm_mux_list); 38 + } 39 + 40 + static struct kcm_mux *kcm_get_idx(struct seq_file *seq, loff_t pos) 41 + { 42 + struct net *net = seq_file_net(seq); 43 + struct kcm_net *knet = net_generic(net, kcm_net_id); 44 + struct kcm_mux *m; 45 + 46 + list_for_each_entry_rcu(m, &knet->mux_list, kcm_mux_list) { 47 + if (!pos) 48 + return m; 49 + --pos; 50 + } 51 + return NULL; 52 + } 53 + 54 + static void *kcm_seq_next(struct seq_file *seq, void *v, loff_t *pos) 55 + { 56 + void *p; 57 + 58 + if (v == SEQ_START_TOKEN) 59 + p = kcm_get_first(seq); 60 + else 61 + p = kcm_get_next(v); 62 + ++*pos; 63 + return p; 64 + } 65 + 66 + static void *kcm_seq_start(struct seq_file *seq, loff_t *pos) 67 + __acquires(rcu) 68 + { 69 + rcu_read_lock(); 70 + 71 + if (!*pos) 72 + return SEQ_START_TOKEN; 73 + else 74 + return kcm_get_idx(seq, *pos - 1); 75 + } 76 + 77 + static void kcm_seq_stop(struct seq_file *seq, void *v) 78 + __releases(rcu) 79 + { 80 + rcu_read_unlock(); 81 + } 82 + 83 + struct kcm_proc_mux_state { 84 + struct seq_net_private p; 85 + int idx; 86 + }; 87 + 88 + static int kcm_seq_open(struct inode *inode, struct file *file) 89 + { 90 + struct kcm_seq_muxinfo *muxinfo = PDE_DATA(inode); 91 + int err; 92 + 93 + err = seq_open_net(inode, file, &muxinfo->seq_ops, 94 + sizeof(struct kcm_proc_mux_state)); 95 + if (err < 0) 96 + return err; 97 + return err; 98 + } 99 + 100 + static void kcm_format_mux_header(struct seq_file *seq) 101 + { 102 + struct net *net = seq_file_net(seq); 103 + struct kcm_net *knet = net_generic(net, kcm_net_id); 104 + 105 + seq_printf(seq, 106 + "*** KCM statistics (%d MUX) ****\n", 107 + knet->count); 108 + 109 + seq_printf(seq, 110 + "%-14s %-10s %-16s %-10s %-16s %-8s %-8s %-8s %-8s %s", 111 + "Object", 112 + "RX-Msgs", 113 + "RX-Bytes", 114 + "TX-Msgs", 115 + "TX-Bytes", 116 + "Recv-Q", 117 + "Rmem", 118 + "Send-Q", 119 + "Smem", 120 + "Status"); 121 + 122 + /* XXX: pdsts header stuff here */ 123 + seq_puts(seq, "\n"); 124 + } 125 + 126 + static void kcm_format_sock(struct kcm_sock *kcm, struct seq_file *seq, 127 + int i, int *len) 128 + { 129 + seq_printf(seq, 130 + " kcm-%-7u %-10llu %-16llu %-10llu %-16llu %-8d %-8d %-8d %-8s ", 131 + kcm->index, 132 + kcm->stats.rx_msgs, 133 + kcm->stats.rx_bytes, 134 + kcm->stats.tx_msgs, 135 + kcm->stats.tx_bytes, 136 + kcm->sk.sk_receive_queue.qlen, 137 + sk_rmem_alloc_get(&kcm->sk), 138 + kcm->sk.sk_write_queue.qlen, 139 + "-"); 140 + 141 + if (kcm->tx_psock) 142 + seq_printf(seq, "Psck-%u ", kcm->tx_psock->index); 143 + 144 + if (kcm->tx_wait) 145 + seq_puts(seq, "TxWait "); 146 + 147 + if (kcm->tx_wait_more) 148 + seq_puts(seq, "WMore "); 149 + 150 + if (kcm->rx_wait) 151 + seq_puts(seq, "RxWait "); 152 + 153 + seq_puts(seq, "\n"); 154 + } 155 + 156 + static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq, 157 + int i, int *len) 158 + { 159 + seq_printf(seq, 160 + " psock-%-5u %-10llu %-16llu %-10llu %-16llu %-8d %-8d %-8d %-8d ", 161 + psock->index, 162 + psock->stats.rx_msgs, 163 + psock->stats.rx_bytes, 164 + psock->stats.tx_msgs, 165 + psock->stats.tx_bytes, 166 + psock->sk->sk_receive_queue.qlen, 167 + atomic_read(&psock->sk->sk_rmem_alloc), 168 + psock->sk->sk_write_queue.qlen, 169 + atomic_read(&psock->sk->sk_wmem_alloc)); 170 + 171 + if (psock->done) 172 + seq_puts(seq, "Done "); 173 + 174 + if (psock->tx_stopped) 175 + seq_puts(seq, "TxStop "); 176 + 177 + if (psock->rx_stopped) 178 + seq_puts(seq, "RxStop "); 179 + 180 + if (psock->tx_kcm) 181 + seq_printf(seq, "Rsvd-%d ", psock->tx_kcm->index); 182 + 183 + if (psock->ready_rx_msg) 184 + seq_puts(seq, "RdyRx "); 185 + 186 + seq_puts(seq, "\n"); 187 + } 188 + 189 + static void 190 + kcm_format_mux(struct kcm_mux *mux, loff_t idx, struct seq_file *seq) 191 + { 192 + int i, len; 193 + struct kcm_sock *kcm; 194 + struct kcm_psock *psock; 195 + 196 + /* mux information */ 197 + seq_printf(seq, 198 + "%-6s%-8s %-10llu %-16llu %-10llu %-16llu %-8s %-8s %-8s %-8s ", 199 + "mux", "", 200 + mux->stats.rx_msgs, 201 + mux->stats.rx_bytes, 202 + mux->stats.tx_msgs, 203 + mux->stats.tx_bytes, 204 + "-", "-", "-", "-"); 205 + 206 + seq_printf(seq, "KCMs: %d, Psocks %d\n", 207 + mux->kcm_socks_cnt, mux->psocks_cnt); 208 + 209 + /* kcm sock information */ 210 + i = 0; 211 + spin_lock_bh(&mux->lock); 212 + list_for_each_entry(kcm, &mux->kcm_socks, kcm_sock_list) { 213 + kcm_format_sock(kcm, seq, i, &len); 214 + i++; 215 + } 216 + i = 0; 217 + list_for_each_entry(psock, &mux->psocks, psock_list) { 218 + kcm_format_psock(psock, seq, i, &len); 219 + i++; 220 + } 221 + spin_unlock_bh(&mux->lock); 222 + } 223 + 224 + static int kcm_seq_show(struct seq_file *seq, void *v) 225 + { 226 + struct kcm_proc_mux_state *mux_state; 227 + 228 + mux_state = seq->private; 229 + if (v == SEQ_START_TOKEN) { 230 + mux_state->idx = 0; 231 + kcm_format_mux_header(seq); 232 + } else { 233 + kcm_format_mux(v, mux_state->idx, seq); 234 + mux_state->idx++; 235 + } 236 + return 0; 237 + } 238 + 239 + static const struct file_operations kcm_seq_fops = { 240 + .owner = THIS_MODULE, 241 + .open = kcm_seq_open, 242 + .read = seq_read, 243 + .llseek = seq_lseek, 244 + }; 245 + 246 + static struct kcm_seq_muxinfo kcm_seq_muxinfo = { 247 + .name = "kcm", 248 + .seq_fops = &kcm_seq_fops, 249 + .seq_ops = { 250 + .show = kcm_seq_show, 251 + .start = kcm_seq_start, 252 + .next = kcm_seq_next, 253 + .stop = kcm_seq_stop, 254 + } 255 + }; 256 + 257 + static int kcm_proc_register(struct net *net, struct kcm_seq_muxinfo *muxinfo) 258 + { 259 + struct proc_dir_entry *p; 260 + int rc = 0; 261 + 262 + p = proc_create_data(muxinfo->name, S_IRUGO, net->proc_net, 263 + muxinfo->seq_fops, muxinfo); 264 + if (!p) 265 + rc = -ENOMEM; 266 + return rc; 267 + } 268 + EXPORT_SYMBOL(kcm_proc_register); 269 + 270 + static void kcm_proc_unregister(struct net *net, 271 + struct kcm_seq_muxinfo *muxinfo) 272 + { 273 + remove_proc_entry(muxinfo->name, net->proc_net); 274 + } 275 + EXPORT_SYMBOL(kcm_proc_unregister); 276 + 277 + static int kcm_stats_seq_show(struct seq_file *seq, void *v) 278 + { 279 + struct kcm_psock_stats psock_stats; 280 + struct kcm_mux_stats mux_stats; 281 + struct kcm_mux *mux; 282 + struct kcm_psock *psock; 283 + struct net *net = seq->private; 284 + struct kcm_net *knet = net_generic(net, kcm_net_id); 285 + 286 + memset(&mux_stats, 0, sizeof(mux_stats)); 287 + memset(&psock_stats, 0, sizeof(psock_stats)); 288 + 289 + mutex_lock(&knet->mutex); 290 + 291 + aggregate_mux_stats(&knet->aggregate_mux_stats, &mux_stats); 292 + aggregate_psock_stats(&knet->aggregate_psock_stats, 293 + &psock_stats); 294 + 295 + list_for_each_entry_rcu(mux, &knet->mux_list, kcm_mux_list) { 296 + spin_lock_bh(&mux->lock); 297 + aggregate_mux_stats(&mux->stats, &mux_stats); 298 + aggregate_psock_stats(&mux->aggregate_psock_stats, 299 + &psock_stats); 300 + list_for_each_entry(psock, &mux->psocks, psock_list) 301 + aggregate_psock_stats(&psock->stats, &psock_stats); 302 + spin_unlock_bh(&mux->lock); 303 + } 304 + 305 + mutex_unlock(&knet->mutex); 306 + 307 + seq_printf(seq, 308 + "%-8s %-10s %-16s %-10s %-16s %-10s %-10s %-10s %-10s %-10s\n", 309 + "MUX", 310 + "RX-Msgs", 311 + "RX-Bytes", 312 + "TX-Msgs", 313 + "TX-Bytes", 314 + "TX-Retries", 315 + "Attach", 316 + "Unattach", 317 + "UnattchRsvd", 318 + "RX-RdyDrops"); 319 + 320 + seq_printf(seq, 321 + "%-8s %-10llu %-16llu %-10llu %-16llu %-10u %-10u %-10u %-10u %-10u\n", 322 + "", 323 + mux_stats.rx_msgs, 324 + mux_stats.rx_bytes, 325 + mux_stats.tx_msgs, 326 + mux_stats.tx_bytes, 327 + mux_stats.tx_retries, 328 + mux_stats.psock_attach, 329 + mux_stats.psock_unattach_rsvd, 330 + mux_stats.psock_unattach, 331 + mux_stats.rx_ready_drops); 332 + 333 + seq_printf(seq, 334 + "%-8s %-10s %-16s %-10s %-16s %-10s %-10s %-10s %-10s %-10s %-10s %-10s\n", 335 + "Psock", 336 + "RX-Msgs", 337 + "RX-Bytes", 338 + "TX-Msgs", 339 + "TX-Bytes", 340 + "Reserved", 341 + "Unreserved", 342 + "RX-Aborts", 343 + "RX-MemFail", 344 + "RX-NeedMor", 345 + "RX-BadLen", 346 + "TX-Aborts"); 347 + 348 + seq_printf(seq, 349 + "%-8s %-10llu %-16llu %-10llu %-16llu %-10llu %-10llu %-10u %-10u %-10u %-10u %-10u\n", 350 + "", 351 + psock_stats.rx_msgs, 352 + psock_stats.rx_bytes, 353 + psock_stats.tx_msgs, 354 + psock_stats.tx_bytes, 355 + psock_stats.reserved, 356 + psock_stats.unreserved, 357 + psock_stats.rx_aborts, 358 + psock_stats.rx_mem_fail, 359 + psock_stats.rx_need_more_hdr, 360 + psock_stats.rx_bad_hdr_len, 361 + psock_stats.tx_aborts); 362 + 363 + return 0; 364 + } 365 + 366 + static int kcm_stats_seq_open(struct inode *inode, struct file *file) 367 + { 368 + return single_open_net(inode, file, kcm_stats_seq_show); 369 + } 370 + 371 + static const struct file_operations kcm_stats_seq_fops = { 372 + .owner = THIS_MODULE, 373 + .open = kcm_stats_seq_open, 374 + .read = seq_read, 375 + .llseek = seq_lseek, 376 + .release = single_release_net, 377 + }; 378 + 379 + static int kcm_proc_init_net(struct net *net) 380 + { 381 + int err; 382 + 383 + if (!proc_create("kcm_stats", S_IRUGO, net->proc_net, 384 + &kcm_stats_seq_fops)) { 385 + err = -ENOMEM; 386 + goto out_kcm_stats; 387 + } 388 + 389 + err = kcm_proc_register(net, &kcm_seq_muxinfo); 390 + if (err) 391 + goto out_kcm; 392 + 393 + return 0; 394 + 395 + out_kcm: 396 + remove_proc_entry("kcm_stats", net->proc_net); 397 + out_kcm_stats: 398 + return err; 399 + } 400 + 401 + static void kcm_proc_exit_net(struct net *net) 402 + { 403 + kcm_proc_unregister(net, &kcm_seq_muxinfo); 404 + remove_proc_entry("kcm_stats", net->proc_net); 405 + } 406 + 407 + static struct pernet_operations kcm_net_ops = { 408 + .init = kcm_proc_init_net, 409 + .exit = kcm_proc_exit_net, 410 + }; 411 + 412 + int __init kcm_proc_init(void) 413 + { 414 + return register_pernet_subsys(&kcm_net_ops); 415 + } 416 + 417 + void __exit kcm_proc_exit(void) 418 + { 419 + unregister_pernet_subsys(&kcm_net_ops); 420 + } 421 + 422 + #endif /* CONFIG_PROC_FS */
+80
net/kcm/kcmsock.c
··· 59 59 return; 60 60 61 61 psock->rx_stopped = 1; 62 + KCM_STATS_INCR(psock->stats.rx_aborts); 62 63 63 64 /* Report an error on the lower socket */ 64 65 report_csk_error(csk, err); ··· 81 80 } 82 81 83 82 psock->tx_stopped = 1; 83 + KCM_STATS_INCR(psock->stats.tx_aborts); 84 84 85 85 if (!psock->tx_kcm) { 86 86 /* Take off psocks_avail list */ ··· 101 99 102 100 /* Report error on lower socket */ 103 101 report_csk_error(csk, err); 102 + } 103 + 104 + /* RX mux lock held. */ 105 + static void kcm_update_rx_mux_stats(struct kcm_mux *mux, 106 + struct kcm_psock *psock) 107 + { 108 + KCM_STATS_ADD(mux->stats.rx_bytes, 109 + psock->stats.rx_bytes - psock->saved_rx_bytes); 110 + mux->stats.rx_msgs += 111 + psock->stats.rx_msgs - psock->saved_rx_msgs; 112 + psock->saved_rx_msgs = psock->stats.rx_msgs; 113 + psock->saved_rx_bytes = psock->stats.rx_bytes; 114 + } 115 + 116 + static void kcm_update_tx_mux_stats(struct kcm_mux *mux, 117 + struct kcm_psock *psock) 118 + { 119 + KCM_STATS_ADD(mux->stats.tx_bytes, 120 + psock->stats.tx_bytes - psock->saved_tx_bytes); 121 + mux->stats.tx_msgs += 122 + psock->stats.tx_msgs - psock->saved_tx_msgs; 123 + psock->saved_tx_msgs = psock->stats.tx_msgs; 124 + psock->saved_tx_bytes = psock->stats.tx_bytes; 104 125 } 105 126 106 127 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); ··· 279 254 return psock->rx_kcm; 280 255 } 281 256 257 + kcm_update_rx_mux_stats(mux, psock); 258 + 282 259 if (list_empty(&mux->kcm_rx_waiters)) { 283 260 psock->ready_rx_msg = head; 284 261 list_add_tail(&psock->psock_ready_list, ··· 383 356 */ 384 357 orig_skb = skb_clone(orig_skb, GFP_ATOMIC); 385 358 if (!orig_skb) { 359 + KCM_STATS_INCR(psock->stats.rx_mem_fail); 386 360 desc->error = -ENOMEM; 387 361 return 0; 388 362 } 389 363 if (!pskb_pull(orig_skb, orig_offset)) { 364 + KCM_STATS_INCR(psock->stats.rx_mem_fail); 390 365 kfree_skb(orig_skb); 391 366 desc->error = -ENOMEM; 392 367 return 0; ··· 403 374 */ 404 375 err = skb_unclone(head, GFP_ATOMIC); 405 376 if (err) { 377 + KCM_STATS_INCR(psock->stats.rx_mem_fail); 406 378 desc->error = err; 407 379 return 0; 408 380 } ··· 422 392 423 393 skb = alloc_skb(0, GFP_ATOMIC); 424 394 if (!skb) { 395 + KCM_STATS_INCR(psock->stats.rx_mem_fail); 425 396 desc->error = -ENOMEM; 426 397 return 0; 427 398 } ··· 445 414 /* Always clone since we will consume something */ 446 415 skb = skb_clone(orig_skb, GFP_ATOMIC); 447 416 if (!skb) { 417 + KCM_STATS_INCR(psock->stats.rx_mem_fail); 448 418 desc->error = -ENOMEM; 449 419 break; 450 420 } ··· 467 435 */ 468 436 err = skb_unclone(skb, GFP_ATOMIC); 469 437 if (err) { 438 + KCM_STATS_INCR(psock->stats.rx_mem_fail); 470 439 desc->error = err; 471 440 break; 472 441 } ··· 489 456 /* Need more header to determine length */ 490 457 rxm->accum_len += cand_len; 491 458 eaten += cand_len; 459 + KCM_STATS_INCR(psock->stats.rx_need_more_hdr); 492 460 WARN_ON(eaten != orig_len); 493 461 break; 494 462 } else if (len <= (ssize_t)head->len - ··· 497 463 /* Length must be into new skb (and also 498 464 * greater than zero) 499 465 */ 466 + KCM_STATS_INCR(psock->stats.rx_bad_hdr_len); 500 467 desc->error = -EPROTO; 501 468 psock->rx_skb_head = NULL; 502 469 kcm_abort_rx_psock(psock, EPROTO, head); ··· 527 492 528 493 /* Hurray, we have a new message! */ 529 494 psock->rx_skb_head = NULL; 495 + KCM_STATS_INCR(psock->stats.rx_msgs); 530 496 531 497 try_queue: 532 498 kcm = reserve_rx_kcm(psock, head); ··· 545 509 546 510 if (cloned_orig) 547 511 kfree_skb(orig_skb); 512 + 513 + KCM_STATS_ADD(psock->stats.rx_bytes, eaten); 548 514 549 515 return eaten; 550 516 } ··· 709 671 } 710 672 kcm->tx_psock = psock; 711 673 psock->tx_kcm = kcm; 674 + KCM_STATS_INCR(psock->stats.reserved); 712 675 } else if (!kcm->tx_wait) { 713 676 list_add_tail(&kcm->wait_psock_list, 714 677 &mux->kcm_tx_waiters); ··· 744 705 smp_mb(); 745 706 746 707 kcm->tx_psock = psock; 708 + KCM_STATS_INCR(psock->stats.reserved); 747 709 queue_work(kcm_wq, &kcm->tx_work); 748 710 } 749 711 } ··· 766 726 767 727 smp_rmb(); /* Read tx_psock before tx_wait */ 768 728 729 + kcm_update_tx_mux_stats(mux, psock); 730 + 769 731 WARN_ON(kcm->tx_wait); 770 732 771 733 kcm->tx_psock = NULL; 772 734 psock->tx_kcm = NULL; 735 + KCM_STATS_INCR(psock->stats.unreserved); 773 736 774 737 if (unlikely(psock->tx_stopped)) { 775 738 if (psock->done) { ··· 796 753 spin_unlock_bh(&mux->lock); 797 754 } 798 755 756 + static void kcm_report_tx_retry(struct kcm_sock *kcm) 757 + { 758 + struct kcm_mux *mux = kcm->mux; 759 + 760 + spin_lock_bh(&mux->lock); 761 + KCM_STATS_INCR(mux->stats.tx_retries); 762 + spin_unlock_bh(&mux->lock); 763 + } 764 + 799 765 /* Write any messages ready on the kcm socket. Called with kcm sock lock 800 766 * held. Return bytes actually sent or error. 801 767 */ ··· 825 773 * it and we'll retry the message. 826 774 */ 827 775 unreserve_psock(kcm); 776 + kcm_report_tx_retry(kcm); 828 777 if (skb_queue_empty(&sk->sk_write_queue)) 829 778 return 0; 830 779 ··· 909 856 unreserve_psock(kcm); 910 857 911 858 txm->sent = 0; 859 + kcm_report_tx_retry(kcm); 912 860 ret = 0; 913 861 914 862 goto try_again; ··· 917 863 918 864 sent += ret; 919 865 frag_offset += ret; 866 + KCM_STATS_ADD(psock->stats.tx_bytes, ret); 920 867 if (frag_offset < frag->size) { 921 868 /* Not finished with this frag */ 922 869 goto do_frag; ··· 939 884 kfree_skb(head); 940 885 sk->sk_wmem_queued -= sent; 941 886 total_sent += sent; 887 + KCM_STATS_INCR(psock->stats.tx_msgs); 942 888 } while ((head = skb_peek(&sk->sk_write_queue))); 943 889 out: 944 890 if (!head) { ··· 1117 1061 /* Message complete, queue it on send buffer */ 1118 1062 __skb_queue_tail(&sk->sk_write_queue, head); 1119 1063 kcm->seq_skb = NULL; 1064 + KCM_STATS_INCR(kcm->stats.tx_msgs); 1120 1065 1121 1066 if (msg->msg_flags & MSG_BATCH) { 1122 1067 kcm->tx_wait_more = true; ··· 1139 1082 kcm->seq_skb = head; 1140 1083 kcm_tx_msg(head)->last_skb = skb; 1141 1084 } 1085 + 1086 + KCM_STATS_ADD(kcm->stats.tx_bytes, copied); 1142 1087 1143 1088 release_sock(sk); 1144 1089 return copied; ··· 1203 1144 size_t len, int flags) 1204 1145 { 1205 1146 struct sock *sk = sock->sk; 1147 + struct kcm_sock *kcm = kcm_sk(sk); 1206 1148 int err = 0; 1207 1149 long timeo; 1208 1150 struct kcm_rx_msg *rxm; ··· 1231 1171 1232 1172 copied = len; 1233 1173 if (likely(!(flags & MSG_PEEK))) { 1174 + KCM_STATS_ADD(kcm->stats.rx_bytes, copied); 1234 1175 if (copied < rxm->full_len) { 1235 1176 if (sock->type == SOCK_DGRAM) { 1236 1177 /* Truncated message */ ··· 1244 1183 msg_finished: 1245 1184 /* Finished with message */ 1246 1185 msg->msg_flags |= MSG_EOR; 1186 + KCM_STATS_INCR(kcm->stats.rx_msgs); 1247 1187 skb_unlink(skb, &sk->sk_receive_queue); 1248 1188 kfree_skb(skb); 1249 1189 } ··· 1456 1394 list_add(&psock->psock_list, head); 1457 1395 psock->index = index; 1458 1396 1397 + KCM_STATS_INCR(mux->stats.psock_attach); 1459 1398 mux->psocks_cnt++; 1460 1399 psock_now_avail(psock); 1461 1400 spin_unlock_bh(&mux->lock); ··· 1532 1469 list_del(&psock->psock_ready_list); 1533 1470 kfree_skb(psock->ready_rx_msg); 1534 1471 psock->ready_rx_msg = NULL; 1472 + KCM_STATS_INCR(mux->stats.rx_ready_drops); 1535 1473 } 1536 1474 1537 1475 spin_unlock_bh(&mux->rx_lock); ··· 1549 1485 1550 1486 spin_lock_bh(&mux->lock); 1551 1487 1488 + aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats); 1489 + 1490 + KCM_STATS_INCR(mux->stats.psock_unattach); 1491 + 1552 1492 if (psock->tx_kcm) { 1553 1493 /* psock was reserved. Just mark it finished and we will clean 1554 1494 * up in the kcm paths, we need kcm lock which can not be 1555 1495 * acquired here. 1556 1496 */ 1497 + KCM_STATS_INCR(mux->stats.psock_unattach_rsvd); 1557 1498 spin_unlock_bh(&mux->lock); 1558 1499 1559 1500 /* We are unattaching a socket that is reserved. Abort the ··· 1786 1717 __skb_queue_purge(&mux->rx_hold_queue); 1787 1718 1788 1719 mutex_lock(&knet->mutex); 1720 + aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats); 1721 + aggregate_psock_stats(&mux->aggregate_psock_stats, 1722 + &knet->aggregate_psock_stats); 1789 1723 list_del_rcu(&mux->kcm_mux_list); 1790 1724 knet->count--; 1791 1725 mutex_unlock(&knet->mutex); ··· 2051 1979 if (err) 2052 1980 goto net_ops_fail; 2053 1981 1982 + err = kcm_proc_init(); 1983 + if (err) 1984 + goto proc_init_fail; 1985 + 2054 1986 return 0; 1987 + 1988 + proc_init_fail: 1989 + unregister_pernet_device(&kcm_net_ops); 2055 1990 2056 1991 net_ops_fail: 2057 1992 sock_unregister(PF_KCM); ··· 2078 1999 2079 2000 static void __exit kcm_exit(void) 2080 2001 { 2002 + kcm_proc_exit(); 2081 2003 unregister_pernet_device(&kcm_net_ops); 2082 2004 sock_unregister(PF_KCM); 2083 2005 proto_unregister(&kcm_proto);