at v2.6.34 2.9 kB view raw
1/* 2 * Common code for low-level network console, dump, and debugger code 3 * 4 * Derived from netconsole, kgdb-over-ethernet, and netdump patches 5 */ 6 7#ifndef _LINUX_NETPOLL_H 8#define _LINUX_NETPOLL_H 9 10#include <linux/netdevice.h> 11#include <linux/interrupt.h> 12#include <linux/rcupdate.h> 13#include <linux/list.h> 14 15struct netpoll { 16 struct net_device *dev; 17 char dev_name[IFNAMSIZ]; 18 const char *name; 19 void (*rx_hook)(struct netpoll *, int, char *, int); 20 21 __be32 local_ip, remote_ip; 22 u16 local_port, remote_port; 23 u8 remote_mac[ETH_ALEN]; 24 25 struct list_head rx; /* rx_np list element */ 26}; 27 28struct netpoll_info { 29 atomic_t refcnt; 30 31 int rx_flags; 32 spinlock_t rx_lock; 33 struct list_head rx_np; /* netpolls that registered an rx_hook */ 34 35 struct sk_buff_head arp_tx; /* list of arp requests to reply to */ 36 struct sk_buff_head txq; 37 38 struct delayed_work tx_work; 39}; 40 41void netpoll_poll(struct netpoll *np); 42void netpoll_send_udp(struct netpoll *np, const char *msg, int len); 43void netpoll_print_options(struct netpoll *np); 44int netpoll_parse_options(struct netpoll *np, char *opt); 45int netpoll_setup(struct netpoll *np); 46int netpoll_trap(void); 47void netpoll_set_trap(int trap); 48void netpoll_cleanup(struct netpoll *np); 49int __netpoll_rx(struct sk_buff *skb); 50 51 52#ifdef CONFIG_NETPOLL 53static inline int netpoll_rx(struct sk_buff *skb) 54{ 55 struct netpoll_info *npinfo = skb->dev->npinfo; 56 unsigned long flags; 57 int ret = 0; 58 59 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) 60 return 0; 61 62 spin_lock_irqsave(&npinfo->rx_lock, flags); 63 /* check rx_flags again with the lock held */ 64 if (npinfo->rx_flags && __netpoll_rx(skb)) 65 ret = 1; 66 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 67 68 return ret; 69} 70 71static inline int netpoll_rx_on(struct sk_buff *skb) 72{ 73 struct netpoll_info *npinfo = skb->dev->npinfo; 74 75 return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); 76} 77 78static inline int netpoll_receive_skb(struct sk_buff *skb) 79{ 80 if (!list_empty(&skb->dev->napi_list)) 81 return netpoll_rx(skb); 82 return 0; 83} 84 85static inline void *netpoll_poll_lock(struct napi_struct *napi) 86{ 87 struct net_device *dev = napi->dev; 88 89 rcu_read_lock(); /* deal with race on ->npinfo */ 90 if (dev && dev->npinfo) { 91 spin_lock(&napi->poll_lock); 92 napi->poll_owner = smp_processor_id(); 93 return napi; 94 } 95 return NULL; 96} 97 98static inline void netpoll_poll_unlock(void *have) 99{ 100 struct napi_struct *napi = have; 101 102 if (napi) { 103 napi->poll_owner = -1; 104 spin_unlock(&napi->poll_lock); 105 } 106 rcu_read_unlock(); 107} 108 109#else 110static inline int netpoll_rx(struct sk_buff *skb) 111{ 112 return 0; 113} 114static inline int netpoll_rx_on(struct sk_buff *skb) 115{ 116 return 0; 117} 118static inline int netpoll_receive_skb(struct sk_buff *skb) 119{ 120 return 0; 121} 122static inline void *netpoll_poll_lock(struct napi_struct *napi) 123{ 124 return NULL; 125} 126static inline void netpoll_poll_unlock(void *have) 127{ 128} 129static inline void netpoll_netdev_init(struct net_device *dev) 130{ 131} 132#endif 133 134#endif