Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * INETPEER - A storage for permanent information about peers
3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 *
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 */
8
9#include <linux/cache.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/slab.h>
13#include <linux/interrupt.h>
14#include <linux/spinlock.h>
15#include <linux/random.h>
16#include <linux/timer.h>
17#include <linux/time.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/net.h>
21#include <linux/workqueue.h>
22#include <net/ip.h>
23#include <net/inetpeer.h>
24#include <net/secure_seq.h>
25
26/*
27 * Theory of operations.
28 * We keep one entry for each peer IP address. The nodes contains long-living
29 * information about the peer which doesn't depend on routes.
30 *
31 * Nodes are removed only when reference counter goes to 0.
32 * When it's happened the node may be removed when a sufficient amount of
33 * time has been passed since its last use. The less-recently-used entry can
34 * also be removed if the pool is overloaded i.e. if the total amount of
35 * entries is greater-or-equal than the threshold.
36 *
37 * Node pool is organised as an RB tree.
38 * Such an implementation has been chosen not just for fun. It's a way to
39 * prevent easy and efficient DoS attacks by creating hash collisions. A huge
40 * amount of long living nodes in a single hash slot would significantly delay
41 * lookups performed with disabled BHs.
42 *
43 * Serialisation issues.
44 * 1. Nodes may appear in the tree only with the pool lock held.
45 * 2. Nodes may disappear from the tree only with the pool lock held
46 * AND reference count being 0.
47 * 3. Global variable peer_total is modified under the pool lock.
48 * 4. struct inet_peer fields modification:
49 * rb_node: pool lock
50 * refcnt: atomically against modifications on other CPU;
51 * usually under some other lock to prevent node disappearing
52 * daddr: unchangeable
53 */
54
55static struct kmem_cache *peer_cachep __ro_after_init;
56
57void inet_peer_base_init(struct inet_peer_base *bp)
58{
59 bp->rb_root = RB_ROOT;
60 seqlock_init(&bp->lock);
61 bp->total = 0;
62}
63EXPORT_SYMBOL_GPL(inet_peer_base_init);
64
65#define PEER_MAX_GC 32
66
67/* Exported for sysctl_net_ipv4. */
68int inet_peer_threshold __read_mostly; /* start to throw entries more
69 * aggressively at this stage */
70int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
71int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
72
73/* Called from ip_output.c:ip_init */
74void __init inet_initpeers(void)
75{
76 u64 nr_entries;
77
78 /* 1% of physical memory */
79 nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
80 100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
81
82 inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
83
84 peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
85}
86
87/* Called with rcu_read_lock() or base->lock held */
88static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
89 struct inet_peer_base *base,
90 unsigned int seq,
91 struct inet_peer *gc_stack[],
92 unsigned int *gc_cnt,
93 struct rb_node **parent_p,
94 struct rb_node ***pp_p)
95{
96 struct rb_node **pp, *parent, *next;
97 struct inet_peer *p;
98
99 pp = &base->rb_root.rb_node;
100 parent = NULL;
101 while (1) {
102 int cmp;
103
104 next = rcu_dereference_raw(*pp);
105 if (!next)
106 break;
107 parent = next;
108 p = rb_entry(parent, struct inet_peer, rb_node);
109 cmp = inetpeer_addr_cmp(daddr, &p->daddr);
110 if (cmp == 0) {
111 if (!refcount_inc_not_zero(&p->refcnt))
112 break;
113 return p;
114 }
115 if (gc_stack) {
116 if (*gc_cnt < PEER_MAX_GC)
117 gc_stack[(*gc_cnt)++] = p;
118 } else if (unlikely(read_seqretry(&base->lock, seq))) {
119 break;
120 }
121 if (cmp == -1)
122 pp = &next->rb_left;
123 else
124 pp = &next->rb_right;
125 }
126 *parent_p = parent;
127 *pp_p = pp;
128 return NULL;
129}
130
131/* perform garbage collect on all items stacked during a lookup */
132static void inet_peer_gc(struct inet_peer_base *base,
133 struct inet_peer *gc_stack[],
134 unsigned int gc_cnt)
135{
136 int peer_threshold, peer_maxttl, peer_minttl;
137 struct inet_peer *p;
138 __u32 delta, ttl;
139 int i;
140
141 peer_threshold = READ_ONCE(inet_peer_threshold);
142 peer_maxttl = READ_ONCE(inet_peer_maxttl);
143 peer_minttl = READ_ONCE(inet_peer_minttl);
144
145 if (base->total >= peer_threshold)
146 ttl = 0; /* be aggressive */
147 else
148 ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
149 base->total / peer_threshold * HZ;
150 for (i = 0; i < gc_cnt; i++) {
151 p = gc_stack[i];
152
153 /* The READ_ONCE() pairs with the WRITE_ONCE()
154 * in inet_putpeer()
155 */
156 delta = (__u32)jiffies - READ_ONCE(p->dtime);
157
158 if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
159 gc_stack[i] = NULL;
160 }
161 for (i = 0; i < gc_cnt; i++) {
162 p = gc_stack[i];
163 if (p) {
164 rb_erase(&p->rb_node, &base->rb_root);
165 base->total--;
166 kfree_rcu(p, rcu);
167 }
168 }
169}
170
171struct inet_peer *inet_getpeer(struct inet_peer_base *base,
172 const struct inetpeer_addr *daddr,
173 int create)
174{
175 struct inet_peer *p, *gc_stack[PEER_MAX_GC];
176 struct rb_node **pp, *parent;
177 unsigned int gc_cnt, seq;
178 int invalidated;
179
180 /* Attempt a lockless lookup first.
181 * Because of a concurrent writer, we might not find an existing entry.
182 */
183 rcu_read_lock();
184 seq = read_seqbegin(&base->lock);
185 p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
186 invalidated = read_seqretry(&base->lock, seq);
187 rcu_read_unlock();
188
189 if (p)
190 return p;
191
192 /* If no writer did a change during our lookup, we can return early. */
193 if (!create && !invalidated)
194 return NULL;
195
196 /* retry an exact lookup, taking the lock before.
197 * At least, nodes should be hot in our cache.
198 */
199 parent = NULL;
200 write_seqlock_bh(&base->lock);
201
202 gc_cnt = 0;
203 p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
204 if (!p && create) {
205 p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
206 if (p) {
207 p->daddr = *daddr;
208 p->dtime = (__u32)jiffies;
209 refcount_set(&p->refcnt, 2);
210 atomic_set(&p->rid, 0);
211 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
212 p->rate_tokens = 0;
213 p->n_redirects = 0;
214 /* 60*HZ is arbitrary, but chosen enough high so that the first
215 * calculation of tokens is at its maximum.
216 */
217 p->rate_last = jiffies - 60*HZ;
218
219 rb_link_node(&p->rb_node, parent, pp);
220 rb_insert_color(&p->rb_node, &base->rb_root);
221 base->total++;
222 }
223 }
224 if (gc_cnt)
225 inet_peer_gc(base, gc_stack, gc_cnt);
226 write_sequnlock_bh(&base->lock);
227
228 return p;
229}
230EXPORT_SYMBOL_GPL(inet_getpeer);
231
232void inet_putpeer(struct inet_peer *p)
233{
234 /* The WRITE_ONCE() pairs with itself (we run lockless)
235 * and the READ_ONCE() in inet_peer_gc()
236 */
237 WRITE_ONCE(p->dtime, (__u32)jiffies);
238
239 if (refcount_dec_and_test(&p->refcnt))
240 kfree_rcu(p, rcu);
241}
242EXPORT_SYMBOL_GPL(inet_putpeer);
243
244/*
245 * Check transmit rate limitation for given message.
246 * The rate information is held in the inet_peer entries now.
247 * This function is generic and could be used for other purposes
248 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
249 *
250 * Note that the same inet_peer fields are modified by functions in
251 * route.c too, but these work for packet destinations while xrlim_allow
252 * works for icmp destinations. This means the rate limiting information
253 * for one "ip object" is shared - and these ICMPs are twice limited:
254 * by source and by destination.
255 *
256 * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
257 * SHOULD allow setting of rate limits
258 *
259 * Shared between ICMPv4 and ICMPv6.
260 */
261#define XRLIM_BURST_FACTOR 6
262bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
263{
264 unsigned long now, token;
265 bool rc = false;
266
267 if (!peer)
268 return true;
269
270 token = peer->rate_tokens;
271 now = jiffies;
272 token += now - peer->rate_last;
273 peer->rate_last = now;
274 if (token > XRLIM_BURST_FACTOR * timeout)
275 token = XRLIM_BURST_FACTOR * timeout;
276 if (token >= timeout) {
277 token -= timeout;
278 rc = true;
279 }
280 peer->rate_tokens = token;
281 return rc;
282}
283EXPORT_SYMBOL(inet_peer_xrlim_allow);
284
285void inetpeer_invalidate_tree(struct inet_peer_base *base)
286{
287 struct rb_node *p = rb_first(&base->rb_root);
288
289 while (p) {
290 struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
291
292 p = rb_next(p);
293 rb_erase(&peer->rb_node, &base->rb_root);
294 inet_putpeer(peer);
295 cond_resched();
296 }
297
298 base->total = 0;
299}
300EXPORT_SYMBOL(inetpeer_invalidate_tree);