Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* AFS server record management
2 *
3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include "afs_fs.h"
15#include "internal.h"
16#include "protocol_yfs.h"
17
18static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */
19static unsigned afs_server_update_delay = 30; /* Time till VLDB recheck in secs */
20
21static void afs_inc_servers_outstanding(struct afs_net *net)
22{
23 atomic_inc(&net->servers_outstanding);
24}
25
26static void afs_dec_servers_outstanding(struct afs_net *net)
27{
28 if (atomic_dec_and_test(&net->servers_outstanding))
29 wake_up_var(&net->servers_outstanding);
30}
31
32/*
33 * Find a server by one of its addresses.
34 */
35struct afs_server *afs_find_server(struct afs_net *net,
36 const struct sockaddr_rxrpc *srx)
37{
38 const struct sockaddr_in6 *a = &srx->transport.sin6, *b;
39 const struct afs_addr_list *alist;
40 struct afs_server *server = NULL;
41 unsigned int i;
42 bool ipv6 = true;
43 int seq = 0, diff;
44
45 if (srx->transport.sin6.sin6_addr.s6_addr32[0] == 0 ||
46 srx->transport.sin6.sin6_addr.s6_addr32[1] == 0 ||
47 srx->transport.sin6.sin6_addr.s6_addr32[2] == htonl(0xffff))
48 ipv6 = false;
49
50 rcu_read_lock();
51
52 do {
53 if (server)
54 afs_put_server(net, server);
55 server = NULL;
56 read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
57
58 if (ipv6) {
59 hlist_for_each_entry_rcu(server, &net->fs_addresses6, addr6_link) {
60 alist = rcu_dereference(server->addresses);
61 for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
62 b = &alist->addrs[i].transport.sin6;
63 diff = ((u16 __force)a->sin6_port -
64 (u16 __force)b->sin6_port);
65 if (diff == 0)
66 diff = memcmp(&a->sin6_addr,
67 &b->sin6_addr,
68 sizeof(struct in6_addr));
69 if (diff == 0)
70 goto found;
71 }
72 }
73 } else {
74 hlist_for_each_entry_rcu(server, &net->fs_addresses4, addr4_link) {
75 alist = rcu_dereference(server->addresses);
76 for (i = 0; i < alist->nr_ipv4; i++) {
77 b = &alist->addrs[i].transport.sin6;
78 diff = ((u16 __force)a->sin6_port -
79 (u16 __force)b->sin6_port);
80 if (diff == 0)
81 diff = ((u32 __force)a->sin6_addr.s6_addr32[3] -
82 (u32 __force)b->sin6_addr.s6_addr32[3]);
83 if (diff == 0)
84 goto found;
85 }
86 }
87 }
88
89 server = NULL;
90 found:
91 if (server && !atomic_inc_not_zero(&server->usage))
92 server = NULL;
93
94 } while (need_seqretry(&net->fs_addr_lock, seq));
95
96 done_seqretry(&net->fs_addr_lock, seq);
97
98 rcu_read_unlock();
99 return server;
100}
101
102/*
103 * Look up a server by its UUID
104 */
105struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid)
106{
107 struct afs_server *server = NULL;
108 struct rb_node *p;
109 int diff, seq = 0;
110
111 _enter("%pU", uuid);
112
113 do {
114 /* Unfortunately, rbtree walking doesn't give reliable results
115 * under just the RCU read lock, so we have to check for
116 * changes.
117 */
118 if (server)
119 afs_put_server(net, server);
120 server = NULL;
121
122 read_seqbegin_or_lock(&net->fs_lock, &seq);
123
124 p = net->fs_servers.rb_node;
125 while (p) {
126 server = rb_entry(p, struct afs_server, uuid_rb);
127
128 diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
129 if (diff < 0) {
130 p = p->rb_left;
131 } else if (diff > 0) {
132 p = p->rb_right;
133 } else {
134 afs_get_server(server);
135 break;
136 }
137
138 server = NULL;
139 }
140 } while (need_seqretry(&net->fs_lock, seq));
141
142 done_seqretry(&net->fs_lock, seq);
143
144 _leave(" = %p", server);
145 return server;
146}
147
148/*
149 * Install a server record in the namespace tree
150 */
151static struct afs_server *afs_install_server(struct afs_net *net,
152 struct afs_server *candidate)
153{
154 const struct afs_addr_list *alist;
155 struct afs_server *server;
156 struct rb_node **pp, *p;
157 int ret = -EEXIST, diff;
158
159 _enter("%p", candidate);
160
161 write_seqlock(&net->fs_lock);
162
163 /* Firstly install the server in the UUID lookup tree */
164 pp = &net->fs_servers.rb_node;
165 p = NULL;
166 while (*pp) {
167 p = *pp;
168 _debug("- consider %p", p);
169 server = rb_entry(p, struct afs_server, uuid_rb);
170 diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t));
171 if (diff < 0)
172 pp = &(*pp)->rb_left;
173 else if (diff > 0)
174 pp = &(*pp)->rb_right;
175 else
176 goto exists;
177 }
178
179 server = candidate;
180 rb_link_node(&server->uuid_rb, p, pp);
181 rb_insert_color(&server->uuid_rb, &net->fs_servers);
182 hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
183
184 write_seqlock(&net->fs_addr_lock);
185 alist = rcu_dereference_protected(server->addresses,
186 lockdep_is_held(&net->fs_addr_lock.lock));
187
188 /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install
189 * it in the IPv4 and/or IPv6 reverse-map lists.
190 *
191 * TODO: For speed we want to use something other than a flat list
192 * here; even sorting the list in terms of lowest address would help a
193 * bit, but anything we might want to do gets messy and memory
194 * intensive.
195 */
196 if (alist->nr_ipv4 > 0)
197 hlist_add_head_rcu(&server->addr4_link, &net->fs_addresses4);
198 if (alist->nr_addrs > alist->nr_ipv4)
199 hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6);
200
201 write_sequnlock(&net->fs_addr_lock);
202 ret = 0;
203
204exists:
205 afs_get_server(server);
206 write_sequnlock(&net->fs_lock);
207 return server;
208}
209
210/*
211 * allocate a new server record
212 */
213static struct afs_server *afs_alloc_server(struct afs_net *net,
214 const uuid_t *uuid,
215 struct afs_addr_list *alist)
216{
217 struct afs_server *server;
218
219 _enter("");
220
221 server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
222 if (!server)
223 goto enomem;
224
225 atomic_set(&server->usage, 1);
226 RCU_INIT_POINTER(server->addresses, alist);
227 server->addr_version = alist->version;
228 server->uuid = *uuid;
229 server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
230 rwlock_init(&server->fs_lock);
231 INIT_HLIST_HEAD(&server->cb_volumes);
232 rwlock_init(&server->cb_break_lock);
233 init_waitqueue_head(&server->probe_wq);
234 spin_lock_init(&server->probe_lock);
235
236 afs_inc_servers_outstanding(net);
237 _leave(" = %p", server);
238 return server;
239
240enomem:
241 _leave(" = NULL [nomem]");
242 return NULL;
243}
244
245/*
246 * Look up an address record for a server
247 */
248static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
249 struct key *key, const uuid_t *uuid)
250{
251 struct afs_vl_cursor vc;
252 struct afs_addr_list *alist = NULL;
253 int ret;
254
255 ret = -ERESTARTSYS;
256 if (afs_begin_vlserver_operation(&vc, cell, key)) {
257 while (afs_select_vlserver(&vc)) {
258 if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags))
259 alist = afs_yfsvl_get_endpoints(&vc, uuid);
260 else
261 alist = afs_vl_get_addrs_u(&vc, uuid);
262 }
263
264 ret = afs_end_vlserver_operation(&vc);
265 }
266
267 return ret < 0 ? ERR_PTR(ret) : alist;
268}
269
270/*
271 * Get or create a fileserver record.
272 */
273struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
274 const uuid_t *uuid)
275{
276 struct afs_addr_list *alist;
277 struct afs_server *server, *candidate;
278
279 _enter("%p,%pU", cell->net, uuid);
280
281 server = afs_find_server_by_uuid(cell->net, uuid);
282 if (server)
283 return server;
284
285 alist = afs_vl_lookup_addrs(cell, key, uuid);
286 if (IS_ERR(alist))
287 return ERR_CAST(alist);
288
289 candidate = afs_alloc_server(cell->net, uuid, alist);
290 if (!candidate) {
291 afs_put_addrlist(alist);
292 return ERR_PTR(-ENOMEM);
293 }
294
295 server = afs_install_server(cell->net, candidate);
296 if (server != candidate) {
297 afs_put_addrlist(alist);
298 kfree(candidate);
299 }
300
301 _leave(" = %p{%d}", server, atomic_read(&server->usage));
302 return server;
303}
304
305/*
306 * Set the server timer to fire after a given delay, assuming it's not already
307 * set for an earlier time.
308 */
309static void afs_set_server_timer(struct afs_net *net, time64_t delay)
310{
311 if (net->live) {
312 afs_inc_servers_outstanding(net);
313 if (timer_reduce(&net->fs_timer, jiffies + delay * HZ))
314 afs_dec_servers_outstanding(net);
315 }
316}
317
318/*
319 * Server management timer. We have an increment on fs_outstanding that we
320 * need to pass along to the work item.
321 */
322void afs_servers_timer(struct timer_list *timer)
323{
324 struct afs_net *net = container_of(timer, struct afs_net, fs_timer);
325
326 _enter("");
327 if (!queue_work(afs_wq, &net->fs_manager))
328 afs_dec_servers_outstanding(net);
329}
330
331/*
332 * Release a reference on a server record.
333 */
334void afs_put_server(struct afs_net *net, struct afs_server *server)
335{
336 unsigned int usage;
337
338 if (!server)
339 return;
340
341 server->put_time = ktime_get_real_seconds();
342
343 usage = atomic_dec_return(&server->usage);
344
345 _enter("{%u}", usage);
346
347 if (likely(usage > 0))
348 return;
349
350 afs_set_server_timer(net, afs_server_gc_delay);
351}
352
353static void afs_server_rcu(struct rcu_head *rcu)
354{
355 struct afs_server *server = container_of(rcu, struct afs_server, rcu);
356
357 afs_put_addrlist(rcu_access_pointer(server->addresses));
358 kfree(server);
359}
360
361/*
362 * destroy a dead server
363 */
364static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
365{
366 struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
367 struct afs_addr_cursor ac = {
368 .alist = alist,
369 .index = alist->preferred,
370 .error = 0,
371 };
372 _enter("%p", server);
373
374 if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
375 afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
376
377 wait_var_event(&server->probe_outstanding,
378 atomic_read(&server->probe_outstanding) == 0);
379
380 call_rcu(&server->rcu, afs_server_rcu);
381 afs_dec_servers_outstanding(net);
382}
383
384/*
385 * Garbage collect any expired servers.
386 */
387static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
388{
389 struct afs_server *server;
390 bool deleted;
391 int usage;
392
393 while ((server = gc_list)) {
394 gc_list = server->gc_next;
395
396 write_seqlock(&net->fs_lock);
397 usage = 1;
398 deleted = atomic_try_cmpxchg(&server->usage, &usage, 0);
399 if (deleted) {
400 rb_erase(&server->uuid_rb, &net->fs_servers);
401 hlist_del_rcu(&server->proc_link);
402 }
403 write_sequnlock(&net->fs_lock);
404
405 if (deleted) {
406 write_seqlock(&net->fs_addr_lock);
407 if (!hlist_unhashed(&server->addr4_link))
408 hlist_del_rcu(&server->addr4_link);
409 if (!hlist_unhashed(&server->addr6_link))
410 hlist_del_rcu(&server->addr6_link);
411 write_sequnlock(&net->fs_addr_lock);
412 afs_destroy_server(net, server);
413 }
414 }
415}
416
417/*
418 * Manage the records of servers known to be within a network namespace. This
419 * includes garbage collecting unused servers.
420 *
421 * Note also that we were given an increment on net->servers_outstanding by
422 * whoever queued us that we need to deal with before returning.
423 */
424void afs_manage_servers(struct work_struct *work)
425{
426 struct afs_net *net = container_of(work, struct afs_net, fs_manager);
427 struct afs_server *gc_list = NULL;
428 struct rb_node *cursor;
429 time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
430 bool purging = !net->live;
431
432 _enter("");
433
434 /* Trawl the server list looking for servers that have expired from
435 * lack of use.
436 */
437 read_seqlock_excl(&net->fs_lock);
438
439 for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) {
440 struct afs_server *server =
441 rb_entry(cursor, struct afs_server, uuid_rb);
442 int usage = atomic_read(&server->usage);
443
444 _debug("manage %pU %u", &server->uuid, usage);
445
446 ASSERTCMP(usage, >=, 1);
447 ASSERTIFCMP(purging, usage, ==, 1);
448
449 if (usage == 1) {
450 time64_t expire_at = server->put_time;
451
452 if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
453 !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
454 expire_at += afs_server_gc_delay;
455 if (purging || expire_at <= now) {
456 server->gc_next = gc_list;
457 gc_list = server;
458 } else if (expire_at < next_manage) {
459 next_manage = expire_at;
460 }
461 }
462 }
463
464 read_sequnlock_excl(&net->fs_lock);
465
466 /* Update the timer on the way out. We have to pass an increment on
467 * servers_outstanding in the namespace that we are in to the timer or
468 * the work scheduler.
469 */
470 if (!purging && next_manage < TIME64_MAX) {
471 now = ktime_get_real_seconds();
472
473 if (next_manage - now <= 0) {
474 if (queue_work(afs_wq, &net->fs_manager))
475 afs_inc_servers_outstanding(net);
476 } else {
477 afs_set_server_timer(net, next_manage - now);
478 }
479 }
480
481 afs_gc_servers(net, gc_list);
482
483 afs_dec_servers_outstanding(net);
484 _leave(" [%d]", atomic_read(&net->servers_outstanding));
485}
486
487static void afs_queue_server_manager(struct afs_net *net)
488{
489 afs_inc_servers_outstanding(net);
490 if (!queue_work(afs_wq, &net->fs_manager))
491 afs_dec_servers_outstanding(net);
492}
493
494/*
495 * Purge list of servers.
496 */
497void afs_purge_servers(struct afs_net *net)
498{
499 _enter("");
500
501 if (del_timer_sync(&net->fs_timer))
502 atomic_dec(&net->servers_outstanding);
503
504 afs_queue_server_manager(net);
505
506 _debug("wait");
507 wait_var_event(&net->servers_outstanding,
508 !atomic_read(&net->servers_outstanding));
509 _leave("");
510}
511
512/*
513 * Get an update for a server's address list.
514 */
515static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
516{
517 struct afs_addr_list *alist, *discard;
518
519 _enter("");
520
521 alist = afs_vl_lookup_addrs(fc->vnode->volume->cell, fc->key,
522 &server->uuid);
523 if (IS_ERR(alist)) {
524 fc->ac.error = PTR_ERR(alist);
525 _leave(" = f [%d]", fc->ac.error);
526 return false;
527 }
528
529 discard = alist;
530 if (server->addr_version != alist->version) {
531 write_lock(&server->fs_lock);
532 discard = rcu_dereference_protected(server->addresses,
533 lockdep_is_held(&server->fs_lock));
534 rcu_assign_pointer(server->addresses, alist);
535 server->addr_version = alist->version;
536 write_unlock(&server->fs_lock);
537 }
538
539 server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
540 afs_put_addrlist(discard);
541 _leave(" = t");
542 return true;
543}
544
545/*
546 * See if a server's address list needs updating.
547 */
548bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
549{
550 time64_t now = ktime_get_real_seconds();
551 long diff;
552 bool success;
553 int ret, retries = 0;
554
555 _enter("");
556
557 ASSERT(server);
558
559retry:
560 diff = READ_ONCE(server->update_at) - now;
561 if (diff > 0) {
562 _leave(" = t [not now %ld]", diff);
563 return true;
564 }
565
566 if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, &server->flags)) {
567 success = afs_update_server_record(fc, server);
568 clear_bit_unlock(AFS_SERVER_FL_UPDATING, &server->flags);
569 wake_up_bit(&server->flags, AFS_SERVER_FL_UPDATING);
570 _leave(" = %d", success);
571 return success;
572 }
573
574 ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING,
575 TASK_INTERRUPTIBLE);
576 if (ret == -ERESTARTSYS) {
577 fc->ac.error = ret;
578 _leave(" = f [intr]");
579 return false;
580 }
581
582 retries++;
583 if (retries == 4) {
584 _leave(" = f [stale]");
585 ret = -ESTALE;
586 return false;
587 }
588 goto retry;
589}