Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 24d8c0293b04ad207648bb2a0dbfebff8b47d166 371 lines 8.4 kB view raw
1/* 2 * net/dst.h Protocol independent destination cache definitions. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8#ifndef _NET_DST_H 9#define _NET_DST_H 10 11#include <net/dst_ops.h> 12#include <linux/netdevice.h> 13#include <linux/rtnetlink.h> 14#include <linux/rcupdate.h> 15#include <linux/jiffies.h> 16#include <net/neighbour.h> 17#include <asm/processor.h> 18 19/* 20 * 0 - no debugging messages 21 * 1 - rare events and bugs (default) 22 * 2 - trace mode. 23 */ 24#define RT_CACHE_DEBUG 0 25 26#define DST_GC_MIN (HZ/10) 27#define DST_GC_INC (HZ/2) 28#define DST_GC_MAX (120*HZ) 29 30/* Each dst_entry has reference count and sits in some parent list(s). 31 * When it is removed from parent list, it is "freed" (dst_free). 32 * After this it enters dead state (dst->obsolete > 0) and if its refcnt 33 * is zero, it can be destroyed immediately, otherwise it is added 34 * to gc list and garbage collector periodically checks the refcnt. 35 */ 36 37struct sk_buff; 38 39struct dst_entry { 40 struct rcu_head rcu_head; 41 struct dst_entry *child; 42 struct net_device *dev; 43 short error; 44 short obsolete; 45 int flags; 46#define DST_HOST 0x0001 47#define DST_NOXFRM 0x0002 48#define DST_NOPOLICY 0x0004 49#define DST_NOHASH 0x0008 50#define DST_NOCACHE 0x0010 51 unsigned long expires; 52 53 unsigned short header_len; /* more space at head required */ 54 unsigned short trailer_len; /* space to reserve at tail */ 55 56 unsigned int rate_tokens; 57 unsigned long rate_last; /* rate limiting for ICMP */ 58 59 struct dst_entry *path; 60 61 struct neighbour *neighbour; 62 struct hh_cache *hh; 63#ifdef CONFIG_XFRM 64 struct xfrm_state *xfrm; 65#else 66 void *__pad1; 67#endif 68 int (*input)(struct sk_buff*); 69 int (*output)(struct sk_buff*); 70 71 struct dst_ops *ops; 72 73 u32 metrics[RTAX_MAX]; 74 75#ifdef CONFIG_NET_CLS_ROUTE 76 __u32 tclassid; 77#else 78 __u32 __pad2; 79#endif 80 81 82 /* 83 * Align __refcnt to a 64 bytes alignment 84 * (L1_CACHE_SIZE would be too much) 85 */ 86#ifdef CONFIG_64BIT 87 long __pad_to_align_refcnt[1]; 88#endif 89 /* 90 * __refcnt wants to be on a different cache line from 91 * input/output/ops or performance tanks badly 92 */ 93 atomic_t __refcnt; /* client references */ 94 int __use; 95 unsigned long lastuse; 96 union { 97 struct dst_entry *next; 98 struct rtable __rcu *rt_next; 99 struct rt6_info *rt6_next; 100 struct dn_route *dn_next; 101 }; 102}; 103 104#ifdef __KERNEL__ 105 106static inline u32 107dst_metric(const struct dst_entry *dst, int metric) 108{ 109 return dst->metrics[metric-1]; 110} 111 112static inline u32 113dst_feature(const struct dst_entry *dst, u32 feature) 114{ 115 return dst_metric(dst, RTAX_FEATURES) & feature; 116} 117 118static inline u32 dst_mtu(const struct dst_entry *dst) 119{ 120 u32 mtu = dst_metric(dst, RTAX_MTU); 121 /* 122 * Alexey put it here, so ask him about it :) 123 */ 124 barrier(); 125 return mtu; 126} 127 128/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ 129static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) 130{ 131 return msecs_to_jiffies(dst_metric(dst, metric)); 132} 133 134static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric, 135 unsigned long rtt) 136{ 137 dst->metrics[metric-1] = jiffies_to_msecs(rtt); 138} 139 140static inline u32 141dst_allfrag(const struct dst_entry *dst) 142{ 143 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); 144 /* Yes, _exactly_. This is paranoia. */ 145 barrier(); 146 return ret; 147} 148 149static inline int 150dst_metric_locked(struct dst_entry *dst, int metric) 151{ 152 return dst_metric(dst, RTAX_LOCK) & (1<<metric); 153} 154 155static inline void dst_hold(struct dst_entry * dst) 156{ 157 /* 158 * If your kernel compilation stops here, please check 159 * __pad_to_align_refcnt declaration in struct dst_entry 160 */ 161 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); 162 atomic_inc(&dst->__refcnt); 163} 164 165static inline void dst_use(struct dst_entry *dst, unsigned long time) 166{ 167 dst_hold(dst); 168 dst->__use++; 169 dst->lastuse = time; 170} 171 172static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) 173{ 174 dst->__use++; 175 dst->lastuse = time; 176} 177 178static inline 179struct dst_entry * dst_clone(struct dst_entry * dst) 180{ 181 if (dst) 182 atomic_inc(&dst->__refcnt); 183 return dst; 184} 185 186extern void dst_release(struct dst_entry *dst); 187 188static inline void refdst_drop(unsigned long refdst) 189{ 190 if (!(refdst & SKB_DST_NOREF)) 191 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); 192} 193 194/** 195 * skb_dst_drop - drops skb dst 196 * @skb: buffer 197 * 198 * Drops dst reference count if a reference was taken. 199 */ 200static inline void skb_dst_drop(struct sk_buff *skb) 201{ 202 if (skb->_skb_refdst) { 203 refdst_drop(skb->_skb_refdst); 204 skb->_skb_refdst = 0UL; 205 } 206} 207 208static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) 209{ 210 nskb->_skb_refdst = oskb->_skb_refdst; 211 if (!(nskb->_skb_refdst & SKB_DST_NOREF)) 212 dst_clone(skb_dst(nskb)); 213} 214 215/** 216 * skb_dst_force - makes sure skb dst is refcounted 217 * @skb: buffer 218 * 219 * If dst is not yet refcounted, let's do it 220 */ 221static inline void skb_dst_force(struct sk_buff *skb) 222{ 223 if (skb_dst_is_noref(skb)) { 224 WARN_ON(!rcu_read_lock_held()); 225 skb->_skb_refdst &= ~SKB_DST_NOREF; 226 dst_clone(skb_dst(skb)); 227 } 228} 229 230 231/** 232 * __skb_tunnel_rx - prepare skb for rx reinsert 233 * @skb: buffer 234 * @dev: tunnel device 235 * 236 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 237 * so make some cleanups. (no accounting done) 238 */ 239static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) 240{ 241 skb->dev = dev; 242 skb->rxhash = 0; 243 skb_set_queue_mapping(skb, 0); 244 skb_dst_drop(skb); 245 nf_reset(skb); 246} 247 248/** 249 * skb_tunnel_rx - prepare skb for rx reinsert 250 * @skb: buffer 251 * @dev: tunnel device 252 * 253 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 254 * so make some cleanups, and perform accounting. 255 * Note: this accounting is not SMP safe. 256 */ 257static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) 258{ 259 /* TODO : stats should be SMP safe */ 260 dev->stats.rx_packets++; 261 dev->stats.rx_bytes += skb->len; 262 __skb_tunnel_rx(skb, dev); 263} 264 265/* Children define the path of the packet through the 266 * Linux networking. Thus, destinations are stackable. 267 */ 268 269static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) 270{ 271 struct dst_entry *child = skb_dst(skb)->child; 272 273 skb_dst_drop(skb); 274 return child; 275} 276 277extern int dst_discard(struct sk_buff *skb); 278extern void * dst_alloc(struct dst_ops * ops); 279extern void __dst_free(struct dst_entry * dst); 280extern struct dst_entry *dst_destroy(struct dst_entry * dst); 281 282static inline void dst_free(struct dst_entry * dst) 283{ 284 if (dst->obsolete > 1) 285 return; 286 if (!atomic_read(&dst->__refcnt)) { 287 dst = dst_destroy(dst); 288 if (!dst) 289 return; 290 } 291 __dst_free(dst); 292} 293 294static inline void dst_rcu_free(struct rcu_head *head) 295{ 296 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); 297 dst_free(dst); 298} 299 300static inline void dst_confirm(struct dst_entry *dst) 301{ 302 if (dst) 303 neigh_confirm(dst->neighbour); 304} 305 306static inline void dst_link_failure(struct sk_buff *skb) 307{ 308 struct dst_entry *dst = skb_dst(skb); 309 if (dst && dst->ops && dst->ops->link_failure) 310 dst->ops->link_failure(skb); 311} 312 313static inline void dst_set_expires(struct dst_entry *dst, int timeout) 314{ 315 unsigned long expires = jiffies + timeout; 316 317 if (expires == 0) 318 expires = 1; 319 320 if (dst->expires == 0 || time_before(expires, dst->expires)) 321 dst->expires = expires; 322} 323 324/* Output packet to network from transport. */ 325static inline int dst_output(struct sk_buff *skb) 326{ 327 return skb_dst(skb)->output(skb); 328} 329 330/* Input packet from network to transport. */ 331static inline int dst_input(struct sk_buff *skb) 332{ 333 return skb_dst(skb)->input(skb); 334} 335 336static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) 337{ 338 if (dst->obsolete) 339 dst = dst->ops->check(dst, cookie); 340 return dst; 341} 342 343extern void dst_init(void); 344 345/* Flags for xfrm_lookup flags argument. */ 346enum { 347 XFRM_LOOKUP_WAIT = 1 << 0, 348 XFRM_LOOKUP_ICMP = 1 << 1, 349}; 350 351struct flowi; 352#ifndef CONFIG_XFRM 353static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 354 struct flowi *fl, struct sock *sk, int flags) 355{ 356 return 0; 357} 358static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 359 struct flowi *fl, struct sock *sk, int flags) 360{ 361 return 0; 362} 363#else 364extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p, 365 struct flowi *fl, struct sock *sk, int flags); 366extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, 367 struct flowi *fl, struct sock *sk, int flags); 368#endif 369#endif 370 371#endif /* _NET_DST_H */