at v3.8 3.6 kB view raw
1/* 2 * Common code for low-level network console, dump, and debugger code 3 * 4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches 5 */ 6 7#ifndef _LINUX_NETPOLL_H 8#define _LINUX_NETPOLL_H 9 10#include <linux/netdevice.h> 11#include <linux/interrupt.h> 12#include <linux/rcupdate.h> 13#include <linux/list.h> 14 15struct netpoll { 16 struct net_device *dev; 17 char dev_name[IFNAMSIZ]; 18 const char *name; 19 void (*rx_hook)(struct netpoll *, int, char *, int); 20 21 __be32 local_ip, remote_ip; 22 u16 local_port, remote_port; 23 u8 remote_mac[ETH_ALEN]; 24 25 struct list_head rx; /* rx_np list element */ 26 struct rcu_head rcu; 27}; 28 29struct netpoll_info { 30 atomic_t refcnt; 31 32 int rx_flags; 33 spinlock_t rx_lock; 34 struct list_head rx_np; /* netpolls that registered an rx_hook */ 35 36 struct sk_buff_head arp_tx; /* list of arp requests to reply to */ 37 struct sk_buff_head txq; 38 39 struct delayed_work tx_work; 40 41 struct netpoll *netpoll; 42 struct rcu_head rcu; 43}; 44 45void netpoll_send_udp(struct netpoll *np, const char *msg, int len); 46void netpoll_print_options(struct netpoll *np); 47int netpoll_parse_options(struct netpoll *np, char *opt); 48int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp); 49int netpoll_setup(struct netpoll *np); 50int netpoll_trap(void); 51void netpoll_set_trap(int trap); 52void __netpoll_cleanup(struct netpoll *np); 53void __netpoll_free_rcu(struct netpoll *np); 54void netpoll_cleanup(struct netpoll *np); 55int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo); 56void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, 57 struct net_device *dev); 58static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 59{ 60 unsigned long flags; 61 local_irq_save(flags); 62 netpoll_send_skb_on_dev(np, skb, np->dev); 63 local_irq_restore(flags); 64} 65 66 67 68#ifdef CONFIG_NETPOLL 69static inline bool netpoll_rx_on(struct sk_buff *skb) 70{ 71 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); 72 73 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); 74} 75 76static inline bool netpoll_rx(struct sk_buff *skb) 77{ 78 struct netpoll_info *npinfo; 79 unsigned long flags; 80 bool ret = false; 81 82 local_irq_save(flags); 83 84 if (!netpoll_rx_on(skb)) 85 goto out; 86 87 npinfo = rcu_dereference_bh(skb->dev->npinfo); 88 spin_lock(&npinfo->rx_lock); 89 /* check rx_flags again with the lock held */ 90 if (npinfo->rx_flags && __netpoll_rx(skb, npinfo)) 91 ret = true; 92 spin_unlock(&npinfo->rx_lock); 93 94out: 95 local_irq_restore(flags); 96 return ret; 97} 98 99static inline int netpoll_receive_skb(struct sk_buff *skb) 100{ 101 if (!list_empty(&skb->dev->napi_list)) 102 return netpoll_rx(skb); 103 return 0; 104} 105 106static inline void *netpoll_poll_lock(struct napi_struct *napi) 107{ 108 struct net_device *dev = napi->dev; 109 110 if (dev && dev->npinfo) { 111 spin_lock(&napi->poll_lock); 112 napi->poll_owner = smp_processor_id(); 113 return napi; 114 } 115 return NULL; 116} 117 118static inline void netpoll_poll_unlock(void *have) 119{ 120 struct napi_struct *napi = have; 121 122 if (napi) { 123 napi->poll_owner = -1; 124 spin_unlock(&napi->poll_lock); 125 } 126} 127 128static inline bool netpoll_tx_running(struct net_device *dev) 129{ 130 return irqs_disabled(); 131} 132 133#else 134static inline bool netpoll_rx(struct sk_buff *skb) 135{ 136 return false; 137} 138static inline bool netpoll_rx_on(struct sk_buff *skb) 139{ 140 return false; 141} 142static inline int netpoll_receive_skb(struct sk_buff *skb) 143{ 144 return 0; 145} 146static inline void *netpoll_poll_lock(struct napi_struct *napi) 147{ 148 return NULL; 149} 150static inline void netpoll_poll_unlock(void *have) 151{ 152} 153static inline void netpoll_netdev_init(struct net_device *dev) 154{ 155} 156static inline bool netpoll_tx_running(struct net_device *dev) 157{ 158 return false; 159} 160#endif 161 162#endif