Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sunrpc/cache.c
4 *
5 * Generic code for various authentication-related caches
6 * used by sunrpc clients and servers.
7 *
8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 */
10
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/file.h>
14#include <linux/slab.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/kmod.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/ctype.h>
21#include <linux/string_helpers.h>
22#include <linux/uaccess.h>
23#include <linux/poll.h>
24#include <linux/seq_file.h>
25#include <linux/proc_fs.h>
26#include <linux/net.h>
27#include <linux/workqueue.h>
28#include <linux/mutex.h>
29#include <linux/pagemap.h>
30#include <asm/ioctls.h>
31#include <linux/sunrpc/types.h>
32#include <linux/sunrpc/cache.h>
33#include <linux/sunrpc/stats.h>
34#include <linux/sunrpc/rpc_pipe_fs.h>
35#include "netns.h"
36
37#define RPCDBG_FACILITY RPCDBG_CACHE
38
39static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
40static void cache_revisit_request(struct cache_head *item);
41static bool cache_listeners_exist(struct cache_detail *detail);
42
43static void cache_init(struct cache_head *h, struct cache_detail *detail)
44{
45 time64_t now = seconds_since_boot();
46 INIT_HLIST_NODE(&h->cache_list);
47 h->flags = 0;
48 kref_init(&h->ref);
49 h->expiry_time = now + CACHE_NEW_EXPIRY;
50 if (now <= detail->flush_time)
51 /* ensure it isn't already expired */
52 now = detail->flush_time + 1;
53 h->last_refresh = now;
54}
55
56static void cache_fresh_unlocked(struct cache_head *head,
57 struct cache_detail *detail);
58
59static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
60 struct cache_head *key,
61 int hash)
62{
63 struct hlist_head *head = &detail->hash_table[hash];
64 struct cache_head *tmp;
65
66 rcu_read_lock();
67 hlist_for_each_entry_rcu(tmp, head, cache_list) {
68 if (detail->match(tmp, key)) {
69 if (cache_is_expired(detail, tmp))
70 continue;
71 tmp = cache_get_rcu(tmp);
72 rcu_read_unlock();
73 return tmp;
74 }
75 }
76 rcu_read_unlock();
77 return NULL;
78}
79
80static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
81 struct cache_detail *cd)
82{
83 /* Must be called under cd->hash_lock */
84 hlist_del_init_rcu(&ch->cache_list);
85 set_bit(CACHE_CLEANED, &ch->flags);
86 cd->entries --;
87}
88
89static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
90 struct cache_detail *cd)
91{
92 cache_fresh_unlocked(ch, cd);
93 cache_put(ch, cd);
94}
95
96static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
97 struct cache_head *key,
98 int hash)
99{
100 struct cache_head *new, *tmp, *freeme = NULL;
101 struct hlist_head *head = &detail->hash_table[hash];
102
103 new = detail->alloc();
104 if (!new)
105 return NULL;
106 /* must fully initialise 'new', else
107 * we might get lose if we need to
108 * cache_put it soon.
109 */
110 cache_init(new, detail);
111 detail->init(new, key);
112
113 spin_lock(&detail->hash_lock);
114
115 /* check if entry appeared while we slept */
116 hlist_for_each_entry_rcu(tmp, head, cache_list) {
117 if (detail->match(tmp, key)) {
118 if (cache_is_expired(detail, tmp)) {
119 sunrpc_begin_cache_remove_entry(tmp, detail);
120 freeme = tmp;
121 break;
122 }
123 cache_get(tmp);
124 spin_unlock(&detail->hash_lock);
125 cache_put(new, detail);
126 return tmp;
127 }
128 }
129
130 hlist_add_head_rcu(&new->cache_list, head);
131 detail->entries++;
132 cache_get(new);
133 spin_unlock(&detail->hash_lock);
134
135 if (freeme)
136 sunrpc_end_cache_remove_entry(freeme, detail);
137 return new;
138}
139
140struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
141 struct cache_head *key, int hash)
142{
143 struct cache_head *ret;
144
145 ret = sunrpc_cache_find_rcu(detail, key, hash);
146 if (ret)
147 return ret;
148 /* Didn't find anything, insert an empty entry */
149 return sunrpc_cache_add_entry(detail, key, hash);
150}
151EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
152
153static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
154
155static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
156 struct cache_detail *detail)
157{
158 time64_t now = seconds_since_boot();
159 if (now <= detail->flush_time)
160 /* ensure it isn't immediately treated as expired */
161 now = detail->flush_time + 1;
162 head->expiry_time = expiry;
163 head->last_refresh = now;
164 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
165 set_bit(CACHE_VALID, &head->flags);
166}
167
168static void cache_fresh_unlocked(struct cache_head *head,
169 struct cache_detail *detail)
170{
171 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
172 cache_revisit_request(head);
173 cache_dequeue(detail, head);
174 }
175}
176
177struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
178 struct cache_head *new, struct cache_head *old, int hash)
179{
180 /* The 'old' entry is to be replaced by 'new'.
181 * If 'old' is not VALID, we update it directly,
182 * otherwise we need to replace it
183 */
184 struct cache_head *tmp;
185
186 if (!test_bit(CACHE_VALID, &old->flags)) {
187 spin_lock(&detail->hash_lock);
188 if (!test_bit(CACHE_VALID, &old->flags)) {
189 if (test_bit(CACHE_NEGATIVE, &new->flags))
190 set_bit(CACHE_NEGATIVE, &old->flags);
191 else
192 detail->update(old, new);
193 cache_fresh_locked(old, new->expiry_time, detail);
194 spin_unlock(&detail->hash_lock);
195 cache_fresh_unlocked(old, detail);
196 return old;
197 }
198 spin_unlock(&detail->hash_lock);
199 }
200 /* We need to insert a new entry */
201 tmp = detail->alloc();
202 if (!tmp) {
203 cache_put(old, detail);
204 return NULL;
205 }
206 cache_init(tmp, detail);
207 detail->init(tmp, old);
208
209 spin_lock(&detail->hash_lock);
210 if (test_bit(CACHE_NEGATIVE, &new->flags))
211 set_bit(CACHE_NEGATIVE, &tmp->flags);
212 else
213 detail->update(tmp, new);
214 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
215 detail->entries++;
216 cache_get(tmp);
217 cache_fresh_locked(tmp, new->expiry_time, detail);
218 cache_fresh_locked(old, 0, detail);
219 spin_unlock(&detail->hash_lock);
220 cache_fresh_unlocked(tmp, detail);
221 cache_fresh_unlocked(old, detail);
222 cache_put(old, detail);
223 return tmp;
224}
225EXPORT_SYMBOL_GPL(sunrpc_cache_update);
226
227static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
228{
229 if (cd->cache_upcall)
230 return cd->cache_upcall(cd, h);
231 return sunrpc_cache_pipe_upcall(cd, h);
232}
233
234static inline int cache_is_valid(struct cache_head *h)
235{
236 if (!test_bit(CACHE_VALID, &h->flags))
237 return -EAGAIN;
238 else {
239 /* entry is valid */
240 if (test_bit(CACHE_NEGATIVE, &h->flags))
241 return -ENOENT;
242 else {
243 /*
244 * In combination with write barrier in
245 * sunrpc_cache_update, ensures that anyone
246 * using the cache entry after this sees the
247 * updated contents:
248 */
249 smp_rmb();
250 return 0;
251 }
252 }
253}
254
255static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
256{
257 int rv;
258
259 spin_lock(&detail->hash_lock);
260 rv = cache_is_valid(h);
261 if (rv == -EAGAIN) {
262 set_bit(CACHE_NEGATIVE, &h->flags);
263 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
264 detail);
265 rv = -ENOENT;
266 }
267 spin_unlock(&detail->hash_lock);
268 cache_fresh_unlocked(h, detail);
269 return rv;
270}
271
272/*
273 * This is the generic cache management routine for all
274 * the authentication caches.
275 * It checks the currency of a cache item and will (later)
276 * initiate an upcall to fill it if needed.
277 *
278 *
279 * Returns 0 if the cache_head can be used, or cache_puts it and returns
280 * -EAGAIN if upcall is pending and request has been queued
281 * -ETIMEDOUT if upcall failed or request could not be queue or
282 * upcall completed but item is still invalid (implying that
283 * the cache item has been replaced with a newer one).
284 * -ENOENT if cache entry was negative
285 */
286int cache_check(struct cache_detail *detail,
287 struct cache_head *h, struct cache_req *rqstp)
288{
289 int rv;
290 time64_t refresh_age, age;
291
292 /* First decide return status as best we can */
293 rv = cache_is_valid(h);
294
295 /* now see if we want to start an upcall */
296 refresh_age = (h->expiry_time - h->last_refresh);
297 age = seconds_since_boot() - h->last_refresh;
298
299 if (rqstp == NULL) {
300 if (rv == -EAGAIN)
301 rv = -ENOENT;
302 } else if (rv == -EAGAIN ||
303 (h->expiry_time != 0 && age > refresh_age/2)) {
304 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
305 refresh_age, age);
306 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
307 switch (cache_make_upcall(detail, h)) {
308 case -EINVAL:
309 rv = try_to_negate_entry(detail, h);
310 break;
311 case -EAGAIN:
312 cache_fresh_unlocked(h, detail);
313 break;
314 }
315 } else if (!cache_listeners_exist(detail))
316 rv = try_to_negate_entry(detail, h);
317 }
318
319 if (rv == -EAGAIN) {
320 if (!cache_defer_req(rqstp, h)) {
321 /*
322 * Request was not deferred; handle it as best
323 * we can ourselves:
324 */
325 rv = cache_is_valid(h);
326 if (rv == -EAGAIN)
327 rv = -ETIMEDOUT;
328 }
329 }
330 if (rv)
331 cache_put(h, detail);
332 return rv;
333}
334EXPORT_SYMBOL_GPL(cache_check);
335
336/*
337 * caches need to be periodically cleaned.
338 * For this we maintain a list of cache_detail and
339 * a current pointer into that list and into the table
340 * for that entry.
341 *
342 * Each time cache_clean is called it finds the next non-empty entry
343 * in the current table and walks the list in that entry
344 * looking for entries that can be removed.
345 *
346 * An entry gets removed if:
347 * - The expiry is before current time
348 * - The last_refresh time is before the flush_time for that cache
349 *
350 * later we might drop old entries with non-NEVER expiry if that table
351 * is getting 'full' for some definition of 'full'
352 *
353 * The question of "how often to scan a table" is an interesting one
354 * and is answered in part by the use of the "nextcheck" field in the
355 * cache_detail.
356 * When a scan of a table begins, the nextcheck field is set to a time
357 * that is well into the future.
358 * While scanning, if an expiry time is found that is earlier than the
359 * current nextcheck time, nextcheck is set to that expiry time.
360 * If the flush_time is ever set to a time earlier than the nextcheck
361 * time, the nextcheck time is then set to that flush_time.
362 *
363 * A table is then only scanned if the current time is at least
364 * the nextcheck time.
365 *
366 */
367
368static LIST_HEAD(cache_list);
369static DEFINE_SPINLOCK(cache_list_lock);
370static struct cache_detail *current_detail;
371static int current_index;
372
373static void do_cache_clean(struct work_struct *work);
374static struct delayed_work cache_cleaner;
375
376void sunrpc_init_cache_detail(struct cache_detail *cd)
377{
378 spin_lock_init(&cd->hash_lock);
379 INIT_LIST_HEAD(&cd->queue);
380 spin_lock(&cache_list_lock);
381 cd->nextcheck = 0;
382 cd->entries = 0;
383 atomic_set(&cd->writers, 0);
384 cd->last_close = 0;
385 cd->last_warn = -1;
386 list_add(&cd->others, &cache_list);
387 spin_unlock(&cache_list_lock);
388
389 /* start the cleaning process */
390 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
391}
392EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
393
394void sunrpc_destroy_cache_detail(struct cache_detail *cd)
395{
396 cache_purge(cd);
397 spin_lock(&cache_list_lock);
398 spin_lock(&cd->hash_lock);
399 if (current_detail == cd)
400 current_detail = NULL;
401 list_del_init(&cd->others);
402 spin_unlock(&cd->hash_lock);
403 spin_unlock(&cache_list_lock);
404 if (list_empty(&cache_list)) {
405 /* module must be being unloaded so its safe to kill the worker */
406 cancel_delayed_work_sync(&cache_cleaner);
407 }
408}
409EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
410
411/* clean cache tries to find something to clean
412 * and cleans it.
413 * It returns 1 if it cleaned something,
414 * 0 if it didn't find anything this time
415 * -1 if it fell off the end of the list.
416 */
417static int cache_clean(void)
418{
419 int rv = 0;
420 struct list_head *next;
421
422 spin_lock(&cache_list_lock);
423
424 /* find a suitable table if we don't already have one */
425 while (current_detail == NULL ||
426 current_index >= current_detail->hash_size) {
427 if (current_detail)
428 next = current_detail->others.next;
429 else
430 next = cache_list.next;
431 if (next == &cache_list) {
432 current_detail = NULL;
433 spin_unlock(&cache_list_lock);
434 return -1;
435 }
436 current_detail = list_entry(next, struct cache_detail, others);
437 if (current_detail->nextcheck > seconds_since_boot())
438 current_index = current_detail->hash_size;
439 else {
440 current_index = 0;
441 current_detail->nextcheck = seconds_since_boot()+30*60;
442 }
443 }
444
445 /* find a non-empty bucket in the table */
446 while (current_detail &&
447 current_index < current_detail->hash_size &&
448 hlist_empty(¤t_detail->hash_table[current_index]))
449 current_index++;
450
451 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
452
453 if (current_detail && current_index < current_detail->hash_size) {
454 struct cache_head *ch = NULL;
455 struct cache_detail *d;
456 struct hlist_head *head;
457 struct hlist_node *tmp;
458
459 spin_lock(¤t_detail->hash_lock);
460
461 /* Ok, now to clean this strand */
462
463 head = ¤t_detail->hash_table[current_index];
464 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
465 if (current_detail->nextcheck > ch->expiry_time)
466 current_detail->nextcheck = ch->expiry_time+1;
467 if (!cache_is_expired(current_detail, ch))
468 continue;
469
470 sunrpc_begin_cache_remove_entry(ch, current_detail);
471 rv = 1;
472 break;
473 }
474
475 spin_unlock(¤t_detail->hash_lock);
476 d = current_detail;
477 if (!ch)
478 current_index ++;
479 spin_unlock(&cache_list_lock);
480 if (ch)
481 sunrpc_end_cache_remove_entry(ch, d);
482 } else
483 spin_unlock(&cache_list_lock);
484
485 return rv;
486}
487
488/*
489 * We want to regularly clean the cache, so we need to schedule some work ...
490 */
491static void do_cache_clean(struct work_struct *work)
492{
493 int delay = 5;
494 if (cache_clean() == -1)
495 delay = round_jiffies_relative(30*HZ);
496
497 if (list_empty(&cache_list))
498 delay = 0;
499
500 if (delay)
501 queue_delayed_work(system_power_efficient_wq,
502 &cache_cleaner, delay);
503}
504
505
506/*
507 * Clean all caches promptly. This just calls cache_clean
508 * repeatedly until we are sure that every cache has had a chance to
509 * be fully cleaned
510 */
511void cache_flush(void)
512{
513 while (cache_clean() != -1)
514 cond_resched();
515 while (cache_clean() != -1)
516 cond_resched();
517}
518EXPORT_SYMBOL_GPL(cache_flush);
519
520void cache_purge(struct cache_detail *detail)
521{
522 struct cache_head *ch = NULL;
523 struct hlist_head *head = NULL;
524 struct hlist_node *tmp = NULL;
525 int i = 0;
526
527 spin_lock(&detail->hash_lock);
528 if (!detail->entries) {
529 spin_unlock(&detail->hash_lock);
530 return;
531 }
532
533 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
534 for (i = 0; i < detail->hash_size; i++) {
535 head = &detail->hash_table[i];
536 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
537 sunrpc_begin_cache_remove_entry(ch, detail);
538 spin_unlock(&detail->hash_lock);
539 sunrpc_end_cache_remove_entry(ch, detail);
540 spin_lock(&detail->hash_lock);
541 }
542 }
543 spin_unlock(&detail->hash_lock);
544}
545EXPORT_SYMBOL_GPL(cache_purge);
546
547
548/*
549 * Deferral and Revisiting of Requests.
550 *
551 * If a cache lookup finds a pending entry, we
552 * need to defer the request and revisit it later.
553 * All deferred requests are stored in a hash table,
554 * indexed by "struct cache_head *".
555 * As it may be wasteful to store a whole request
556 * structure, we allow the request to provide a
557 * deferred form, which must contain a
558 * 'struct cache_deferred_req'
559 * This cache_deferred_req contains a method to allow
560 * it to be revisited when cache info is available
561 */
562
563#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
564#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
565
566#define DFR_MAX 300 /* ??? */
567
568static DEFINE_SPINLOCK(cache_defer_lock);
569static LIST_HEAD(cache_defer_list);
570static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
571static int cache_defer_cnt;
572
573static void __unhash_deferred_req(struct cache_deferred_req *dreq)
574{
575 hlist_del_init(&dreq->hash);
576 if (!list_empty(&dreq->recent)) {
577 list_del_init(&dreq->recent);
578 cache_defer_cnt--;
579 }
580}
581
582static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
583{
584 int hash = DFR_HASH(item);
585
586 INIT_LIST_HEAD(&dreq->recent);
587 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
588}
589
590static void setup_deferral(struct cache_deferred_req *dreq,
591 struct cache_head *item,
592 int count_me)
593{
594
595 dreq->item = item;
596
597 spin_lock(&cache_defer_lock);
598
599 __hash_deferred_req(dreq, item);
600
601 if (count_me) {
602 cache_defer_cnt++;
603 list_add(&dreq->recent, &cache_defer_list);
604 }
605
606 spin_unlock(&cache_defer_lock);
607
608}
609
610struct thread_deferred_req {
611 struct cache_deferred_req handle;
612 struct completion completion;
613};
614
615static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
616{
617 struct thread_deferred_req *dr =
618 container_of(dreq, struct thread_deferred_req, handle);
619 complete(&dr->completion);
620}
621
622static void cache_wait_req(struct cache_req *req, struct cache_head *item)
623{
624 struct thread_deferred_req sleeper;
625 struct cache_deferred_req *dreq = &sleeper.handle;
626
627 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
628 dreq->revisit = cache_restart_thread;
629
630 setup_deferral(dreq, item, 0);
631
632 if (!test_bit(CACHE_PENDING, &item->flags) ||
633 wait_for_completion_interruptible_timeout(
634 &sleeper.completion, req->thread_wait) <= 0) {
635 /* The completion wasn't completed, so we need
636 * to clean up
637 */
638 spin_lock(&cache_defer_lock);
639 if (!hlist_unhashed(&sleeper.handle.hash)) {
640 __unhash_deferred_req(&sleeper.handle);
641 spin_unlock(&cache_defer_lock);
642 } else {
643 /* cache_revisit_request already removed
644 * this from the hash table, but hasn't
645 * called ->revisit yet. It will very soon
646 * and we need to wait for it.
647 */
648 spin_unlock(&cache_defer_lock);
649 wait_for_completion(&sleeper.completion);
650 }
651 }
652}
653
654static void cache_limit_defers(void)
655{
656 /* Make sure we haven't exceed the limit of allowed deferred
657 * requests.
658 */
659 struct cache_deferred_req *discard = NULL;
660
661 if (cache_defer_cnt <= DFR_MAX)
662 return;
663
664 spin_lock(&cache_defer_lock);
665
666 /* Consider removing either the first or the last */
667 if (cache_defer_cnt > DFR_MAX) {
668 if (prandom_u32() & 1)
669 discard = list_entry(cache_defer_list.next,
670 struct cache_deferred_req, recent);
671 else
672 discard = list_entry(cache_defer_list.prev,
673 struct cache_deferred_req, recent);
674 __unhash_deferred_req(discard);
675 }
676 spin_unlock(&cache_defer_lock);
677 if (discard)
678 discard->revisit(discard, 1);
679}
680
681/* Return true if and only if a deferred request is queued. */
682static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
683{
684 struct cache_deferred_req *dreq;
685
686 if (req->thread_wait) {
687 cache_wait_req(req, item);
688 if (!test_bit(CACHE_PENDING, &item->flags))
689 return false;
690 }
691 dreq = req->defer(req);
692 if (dreq == NULL)
693 return false;
694 setup_deferral(dreq, item, 1);
695 if (!test_bit(CACHE_PENDING, &item->flags))
696 /* Bit could have been cleared before we managed to
697 * set up the deferral, so need to revisit just in case
698 */
699 cache_revisit_request(item);
700
701 cache_limit_defers();
702 return true;
703}
704
705static void cache_revisit_request(struct cache_head *item)
706{
707 struct cache_deferred_req *dreq;
708 struct list_head pending;
709 struct hlist_node *tmp;
710 int hash = DFR_HASH(item);
711
712 INIT_LIST_HEAD(&pending);
713 spin_lock(&cache_defer_lock);
714
715 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
716 if (dreq->item == item) {
717 __unhash_deferred_req(dreq);
718 list_add(&dreq->recent, &pending);
719 }
720
721 spin_unlock(&cache_defer_lock);
722
723 while (!list_empty(&pending)) {
724 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
725 list_del_init(&dreq->recent);
726 dreq->revisit(dreq, 0);
727 }
728}
729
730void cache_clean_deferred(void *owner)
731{
732 struct cache_deferred_req *dreq, *tmp;
733 struct list_head pending;
734
735
736 INIT_LIST_HEAD(&pending);
737 spin_lock(&cache_defer_lock);
738
739 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
740 if (dreq->owner == owner) {
741 __unhash_deferred_req(dreq);
742 list_add(&dreq->recent, &pending);
743 }
744 }
745 spin_unlock(&cache_defer_lock);
746
747 while (!list_empty(&pending)) {
748 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
749 list_del_init(&dreq->recent);
750 dreq->revisit(dreq, 1);
751 }
752}
753
754/*
755 * communicate with user-space
756 *
757 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
758 * On read, you get a full request, or block.
759 * On write, an update request is processed.
760 * Poll works if anything to read, and always allows write.
761 *
762 * Implemented by linked list of requests. Each open file has
763 * a ->private that also exists in this list. New requests are added
764 * to the end and may wakeup and preceding readers.
765 * New readers are added to the head. If, on read, an item is found with
766 * CACHE_UPCALLING clear, we free it from the list.
767 *
768 */
769
770static DEFINE_SPINLOCK(queue_lock);
771static DEFINE_MUTEX(queue_io_mutex);
772
773struct cache_queue {
774 struct list_head list;
775 int reader; /* if 0, then request */
776};
777struct cache_request {
778 struct cache_queue q;
779 struct cache_head *item;
780 char * buf;
781 int len;
782 int readers;
783};
784struct cache_reader {
785 struct cache_queue q;
786 int offset; /* if non-0, we have a refcnt on next request */
787};
788
789static int cache_request(struct cache_detail *detail,
790 struct cache_request *crq)
791{
792 char *bp = crq->buf;
793 int len = PAGE_SIZE;
794
795 detail->cache_request(detail, crq->item, &bp, &len);
796 if (len < 0)
797 return -EAGAIN;
798 return PAGE_SIZE - len;
799}
800
801static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
802 loff_t *ppos, struct cache_detail *cd)
803{
804 struct cache_reader *rp = filp->private_data;
805 struct cache_request *rq;
806 struct inode *inode = file_inode(filp);
807 int err;
808
809 if (count == 0)
810 return 0;
811
812 inode_lock(inode); /* protect against multiple concurrent
813 * readers on this file */
814 again:
815 spin_lock(&queue_lock);
816 /* need to find next request */
817 while (rp->q.list.next != &cd->queue &&
818 list_entry(rp->q.list.next, struct cache_queue, list)
819 ->reader) {
820 struct list_head *next = rp->q.list.next;
821 list_move(&rp->q.list, next);
822 }
823 if (rp->q.list.next == &cd->queue) {
824 spin_unlock(&queue_lock);
825 inode_unlock(inode);
826 WARN_ON_ONCE(rp->offset);
827 return 0;
828 }
829 rq = container_of(rp->q.list.next, struct cache_request, q.list);
830 WARN_ON_ONCE(rq->q.reader);
831 if (rp->offset == 0)
832 rq->readers++;
833 spin_unlock(&queue_lock);
834
835 if (rq->len == 0) {
836 err = cache_request(cd, rq);
837 if (err < 0)
838 goto out;
839 rq->len = err;
840 }
841
842 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
843 err = -EAGAIN;
844 spin_lock(&queue_lock);
845 list_move(&rp->q.list, &rq->q.list);
846 spin_unlock(&queue_lock);
847 } else {
848 if (rp->offset + count > rq->len)
849 count = rq->len - rp->offset;
850 err = -EFAULT;
851 if (copy_to_user(buf, rq->buf + rp->offset, count))
852 goto out;
853 rp->offset += count;
854 if (rp->offset >= rq->len) {
855 rp->offset = 0;
856 spin_lock(&queue_lock);
857 list_move(&rp->q.list, &rq->q.list);
858 spin_unlock(&queue_lock);
859 }
860 err = 0;
861 }
862 out:
863 if (rp->offset == 0) {
864 /* need to release rq */
865 spin_lock(&queue_lock);
866 rq->readers--;
867 if (rq->readers == 0 &&
868 !test_bit(CACHE_PENDING, &rq->item->flags)) {
869 list_del(&rq->q.list);
870 spin_unlock(&queue_lock);
871 cache_put(rq->item, cd);
872 kfree(rq->buf);
873 kfree(rq);
874 } else
875 spin_unlock(&queue_lock);
876 }
877 if (err == -EAGAIN)
878 goto again;
879 inode_unlock(inode);
880 return err ? err : count;
881}
882
883static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
884 size_t count, struct cache_detail *cd)
885{
886 ssize_t ret;
887
888 if (count == 0)
889 return -EINVAL;
890 if (copy_from_user(kaddr, buf, count))
891 return -EFAULT;
892 kaddr[count] = '\0';
893 ret = cd->cache_parse(cd, kaddr, count);
894 if (!ret)
895 ret = count;
896 return ret;
897}
898
899static ssize_t cache_slow_downcall(const char __user *buf,
900 size_t count, struct cache_detail *cd)
901{
902 static char write_buf[8192]; /* protected by queue_io_mutex */
903 ssize_t ret = -EINVAL;
904
905 if (count >= sizeof(write_buf))
906 goto out;
907 mutex_lock(&queue_io_mutex);
908 ret = cache_do_downcall(write_buf, buf, count, cd);
909 mutex_unlock(&queue_io_mutex);
910out:
911 return ret;
912}
913
914static ssize_t cache_downcall(struct address_space *mapping,
915 const char __user *buf,
916 size_t count, struct cache_detail *cd)
917{
918 struct page *page;
919 char *kaddr;
920 ssize_t ret = -ENOMEM;
921
922 if (count >= PAGE_SIZE)
923 goto out_slow;
924
925 page = find_or_create_page(mapping, 0, GFP_KERNEL);
926 if (!page)
927 goto out_slow;
928
929 kaddr = kmap(page);
930 ret = cache_do_downcall(kaddr, buf, count, cd);
931 kunmap(page);
932 unlock_page(page);
933 put_page(page);
934 return ret;
935out_slow:
936 return cache_slow_downcall(buf, count, cd);
937}
938
939static ssize_t cache_write(struct file *filp, const char __user *buf,
940 size_t count, loff_t *ppos,
941 struct cache_detail *cd)
942{
943 struct address_space *mapping = filp->f_mapping;
944 struct inode *inode = file_inode(filp);
945 ssize_t ret = -EINVAL;
946
947 if (!cd->cache_parse)
948 goto out;
949
950 inode_lock(inode);
951 ret = cache_downcall(mapping, buf, count, cd);
952 inode_unlock(inode);
953out:
954 return ret;
955}
956
957static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
958
959static __poll_t cache_poll(struct file *filp, poll_table *wait,
960 struct cache_detail *cd)
961{
962 __poll_t mask;
963 struct cache_reader *rp = filp->private_data;
964 struct cache_queue *cq;
965
966 poll_wait(filp, &queue_wait, wait);
967
968 /* alway allow write */
969 mask = EPOLLOUT | EPOLLWRNORM;
970
971 if (!rp)
972 return mask;
973
974 spin_lock(&queue_lock);
975
976 for (cq= &rp->q; &cq->list != &cd->queue;
977 cq = list_entry(cq->list.next, struct cache_queue, list))
978 if (!cq->reader) {
979 mask |= EPOLLIN | EPOLLRDNORM;
980 break;
981 }
982 spin_unlock(&queue_lock);
983 return mask;
984}
985
986static int cache_ioctl(struct inode *ino, struct file *filp,
987 unsigned int cmd, unsigned long arg,
988 struct cache_detail *cd)
989{
990 int len = 0;
991 struct cache_reader *rp = filp->private_data;
992 struct cache_queue *cq;
993
994 if (cmd != FIONREAD || !rp)
995 return -EINVAL;
996
997 spin_lock(&queue_lock);
998
999 /* only find the length remaining in current request,
1000 * or the length of the next request
1001 */
1002 for (cq= &rp->q; &cq->list != &cd->queue;
1003 cq = list_entry(cq->list.next, struct cache_queue, list))
1004 if (!cq->reader) {
1005 struct cache_request *cr =
1006 container_of(cq, struct cache_request, q);
1007 len = cr->len - rp->offset;
1008 break;
1009 }
1010 spin_unlock(&queue_lock);
1011
1012 return put_user(len, (int __user *)arg);
1013}
1014
1015static int cache_open(struct inode *inode, struct file *filp,
1016 struct cache_detail *cd)
1017{
1018 struct cache_reader *rp = NULL;
1019
1020 if (!cd || !try_module_get(cd->owner))
1021 return -EACCES;
1022 nonseekable_open(inode, filp);
1023 if (filp->f_mode & FMODE_READ) {
1024 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1025 if (!rp) {
1026 module_put(cd->owner);
1027 return -ENOMEM;
1028 }
1029 rp->offset = 0;
1030 rp->q.reader = 1;
1031
1032 spin_lock(&queue_lock);
1033 list_add(&rp->q.list, &cd->queue);
1034 spin_unlock(&queue_lock);
1035 }
1036 if (filp->f_mode & FMODE_WRITE)
1037 atomic_inc(&cd->writers);
1038 filp->private_data = rp;
1039 return 0;
1040}
1041
1042static int cache_release(struct inode *inode, struct file *filp,
1043 struct cache_detail *cd)
1044{
1045 struct cache_reader *rp = filp->private_data;
1046
1047 if (rp) {
1048 spin_lock(&queue_lock);
1049 if (rp->offset) {
1050 struct cache_queue *cq;
1051 for (cq= &rp->q; &cq->list != &cd->queue;
1052 cq = list_entry(cq->list.next, struct cache_queue, list))
1053 if (!cq->reader) {
1054 container_of(cq, struct cache_request, q)
1055 ->readers--;
1056 break;
1057 }
1058 rp->offset = 0;
1059 }
1060 list_del(&rp->q.list);
1061 spin_unlock(&queue_lock);
1062
1063 filp->private_data = NULL;
1064 kfree(rp);
1065
1066 }
1067 if (filp->f_mode & FMODE_WRITE) {
1068 atomic_dec(&cd->writers);
1069 cd->last_close = seconds_since_boot();
1070 }
1071 module_put(cd->owner);
1072 return 0;
1073}
1074
1075
1076
1077static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1078{
1079 struct cache_queue *cq, *tmp;
1080 struct cache_request *cr;
1081 struct list_head dequeued;
1082
1083 INIT_LIST_HEAD(&dequeued);
1084 spin_lock(&queue_lock);
1085 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1086 if (!cq->reader) {
1087 cr = container_of(cq, struct cache_request, q);
1088 if (cr->item != ch)
1089 continue;
1090 if (test_bit(CACHE_PENDING, &ch->flags))
1091 /* Lost a race and it is pending again */
1092 break;
1093 if (cr->readers != 0)
1094 continue;
1095 list_move(&cr->q.list, &dequeued);
1096 }
1097 spin_unlock(&queue_lock);
1098 while (!list_empty(&dequeued)) {
1099 cr = list_entry(dequeued.next, struct cache_request, q.list);
1100 list_del(&cr->q.list);
1101 cache_put(cr->item, detail);
1102 kfree(cr->buf);
1103 kfree(cr);
1104 }
1105}
1106
1107/*
1108 * Support routines for text-based upcalls.
1109 * Fields are separated by spaces.
1110 * Fields are either mangled to quote space tab newline slosh with slosh
1111 * or a hexified with a leading \x
1112 * Record is terminated with newline.
1113 *
1114 */
1115
1116void qword_add(char **bpp, int *lp, char *str)
1117{
1118 char *bp = *bpp;
1119 int len = *lp;
1120 int ret;
1121
1122 if (len < 0) return;
1123
1124 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1125 if (ret >= len) {
1126 bp += len;
1127 len = -1;
1128 } else {
1129 bp += ret;
1130 len -= ret;
1131 *bp++ = ' ';
1132 len--;
1133 }
1134 *bpp = bp;
1135 *lp = len;
1136}
1137EXPORT_SYMBOL_GPL(qword_add);
1138
1139void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1140{
1141 char *bp = *bpp;
1142 int len = *lp;
1143
1144 if (len < 0) return;
1145
1146 if (len > 2) {
1147 *bp++ = '\\';
1148 *bp++ = 'x';
1149 len -= 2;
1150 while (blen && len >= 2) {
1151 bp = hex_byte_pack(bp, *buf++);
1152 len -= 2;
1153 blen--;
1154 }
1155 }
1156 if (blen || len<1) len = -1;
1157 else {
1158 *bp++ = ' ';
1159 len--;
1160 }
1161 *bpp = bp;
1162 *lp = len;
1163}
1164EXPORT_SYMBOL_GPL(qword_addhex);
1165
1166static void warn_no_listener(struct cache_detail *detail)
1167{
1168 if (detail->last_warn != detail->last_close) {
1169 detail->last_warn = detail->last_close;
1170 if (detail->warn_no_listener)
1171 detail->warn_no_listener(detail, detail->last_close != 0);
1172 }
1173}
1174
1175static bool cache_listeners_exist(struct cache_detail *detail)
1176{
1177 if (atomic_read(&detail->writers))
1178 return true;
1179 if (detail->last_close == 0)
1180 /* This cache was never opened */
1181 return false;
1182 if (detail->last_close < seconds_since_boot() - 30)
1183 /*
1184 * We allow for the possibility that someone might
1185 * restart a userspace daemon without restarting the
1186 * server; but after 30 seconds, we give up.
1187 */
1188 return false;
1189 return true;
1190}
1191
1192/*
1193 * register an upcall request to user-space and queue it up for read() by the
1194 * upcall daemon.
1195 *
1196 * Each request is at most one page long.
1197 */
1198int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1199{
1200
1201 char *buf;
1202 struct cache_request *crq;
1203 int ret = 0;
1204
1205 if (!detail->cache_request)
1206 return -EINVAL;
1207
1208 if (!cache_listeners_exist(detail)) {
1209 warn_no_listener(detail);
1210 return -EINVAL;
1211 }
1212 if (test_bit(CACHE_CLEANED, &h->flags))
1213 /* Too late to make an upcall */
1214 return -EAGAIN;
1215
1216 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1217 if (!buf)
1218 return -EAGAIN;
1219
1220 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1221 if (!crq) {
1222 kfree(buf);
1223 return -EAGAIN;
1224 }
1225
1226 crq->q.reader = 0;
1227 crq->buf = buf;
1228 crq->len = 0;
1229 crq->readers = 0;
1230 spin_lock(&queue_lock);
1231 if (test_bit(CACHE_PENDING, &h->flags)) {
1232 crq->item = cache_get(h);
1233 list_add_tail(&crq->q.list, &detail->queue);
1234 } else
1235 /* Lost a race, no longer PENDING, so don't enqueue */
1236 ret = -EAGAIN;
1237 spin_unlock(&queue_lock);
1238 wake_up(&queue_wait);
1239 if (ret == -EAGAIN) {
1240 kfree(buf);
1241 kfree(crq);
1242 }
1243 return ret;
1244}
1245EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1246
1247/*
1248 * parse a message from user-space and pass it
1249 * to an appropriate cache
1250 * Messages are, like requests, separated into fields by
1251 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1252 *
1253 * Message is
1254 * reply cachename expiry key ... content....
1255 *
1256 * key and content are both parsed by cache
1257 */
1258
1259int qword_get(char **bpp, char *dest, int bufsize)
1260{
1261 /* return bytes copied, or -1 on error */
1262 char *bp = *bpp;
1263 int len = 0;
1264
1265 while (*bp == ' ') bp++;
1266
1267 if (bp[0] == '\\' && bp[1] == 'x') {
1268 /* HEX STRING */
1269 bp += 2;
1270 while (len < bufsize - 1) {
1271 int h, l;
1272
1273 h = hex_to_bin(bp[0]);
1274 if (h < 0)
1275 break;
1276
1277 l = hex_to_bin(bp[1]);
1278 if (l < 0)
1279 break;
1280
1281 *dest++ = (h << 4) | l;
1282 bp += 2;
1283 len++;
1284 }
1285 } else {
1286 /* text with \nnn octal quoting */
1287 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1288 if (*bp == '\\' &&
1289 isodigit(bp[1]) && (bp[1] <= '3') &&
1290 isodigit(bp[2]) &&
1291 isodigit(bp[3])) {
1292 int byte = (*++bp -'0');
1293 bp++;
1294 byte = (byte << 3) | (*bp++ - '0');
1295 byte = (byte << 3) | (*bp++ - '0');
1296 *dest++ = byte;
1297 len++;
1298 } else {
1299 *dest++ = *bp++;
1300 len++;
1301 }
1302 }
1303 }
1304
1305 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1306 return -1;
1307 while (*bp == ' ') bp++;
1308 *bpp = bp;
1309 *dest = '\0';
1310 return len;
1311}
1312EXPORT_SYMBOL_GPL(qword_get);
1313
1314
1315/*
1316 * support /proc/net/rpc/$CACHENAME/content
1317 * as a seqfile.
1318 * We call ->cache_show passing NULL for the item to
1319 * get a header, then pass each real item in the cache
1320 */
1321
1322static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1323{
1324 loff_t n = *pos;
1325 unsigned int hash, entry;
1326 struct cache_head *ch;
1327 struct cache_detail *cd = m->private;
1328
1329 if (!n--)
1330 return SEQ_START_TOKEN;
1331 hash = n >> 32;
1332 entry = n & ((1LL<<32) - 1);
1333
1334 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1335 if (!entry--)
1336 return ch;
1337 n &= ~((1LL<<32) - 1);
1338 do {
1339 hash++;
1340 n += 1LL<<32;
1341 } while(hash < cd->hash_size &&
1342 hlist_empty(&cd->hash_table[hash]));
1343 if (hash >= cd->hash_size)
1344 return NULL;
1345 *pos = n+1;
1346 return hlist_entry_safe(rcu_dereference_raw(
1347 hlist_first_rcu(&cd->hash_table[hash])),
1348 struct cache_head, cache_list);
1349}
1350
1351static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1352{
1353 struct cache_head *ch = p;
1354 int hash = (*pos >> 32);
1355 struct cache_detail *cd = m->private;
1356
1357 if (p == SEQ_START_TOKEN)
1358 hash = 0;
1359 else if (ch->cache_list.next == NULL) {
1360 hash++;
1361 *pos += 1LL<<32;
1362 } else {
1363 ++*pos;
1364 return hlist_entry_safe(rcu_dereference_raw(
1365 hlist_next_rcu(&ch->cache_list)),
1366 struct cache_head, cache_list);
1367 }
1368 *pos &= ~((1LL<<32) - 1);
1369 while (hash < cd->hash_size &&
1370 hlist_empty(&cd->hash_table[hash])) {
1371 hash++;
1372 *pos += 1LL<<32;
1373 }
1374 if (hash >= cd->hash_size)
1375 return NULL;
1376 ++*pos;
1377 return hlist_entry_safe(rcu_dereference_raw(
1378 hlist_first_rcu(&cd->hash_table[hash])),
1379 struct cache_head, cache_list);
1380}
1381
1382void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1383 __acquires(RCU)
1384{
1385 rcu_read_lock();
1386 return __cache_seq_start(m, pos);
1387}
1388EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1389
1390void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1391{
1392 return cache_seq_next(file, p, pos);
1393}
1394EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1395
1396void cache_seq_stop_rcu(struct seq_file *m, void *p)
1397 __releases(RCU)
1398{
1399 rcu_read_unlock();
1400}
1401EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1402
1403static int c_show(struct seq_file *m, void *p)
1404{
1405 struct cache_head *cp = p;
1406 struct cache_detail *cd = m->private;
1407
1408 if (p == SEQ_START_TOKEN)
1409 return cd->cache_show(m, cd, NULL);
1410
1411 ifdebug(CACHE)
1412 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1413 convert_to_wallclock(cp->expiry_time),
1414 kref_read(&cp->ref), cp->flags);
1415 cache_get(cp);
1416 if (cache_check(cd, cp, NULL))
1417 /* cache_check does a cache_put on failure */
1418 seq_printf(m, "# ");
1419 else {
1420 if (cache_is_expired(cd, cp))
1421 seq_printf(m, "# ");
1422 cache_put(cp, cd);
1423 }
1424
1425 return cd->cache_show(m, cd, cp);
1426}
1427
1428static const struct seq_operations cache_content_op = {
1429 .start = cache_seq_start_rcu,
1430 .next = cache_seq_next_rcu,
1431 .stop = cache_seq_stop_rcu,
1432 .show = c_show,
1433};
1434
1435static int content_open(struct inode *inode, struct file *file,
1436 struct cache_detail *cd)
1437{
1438 struct seq_file *seq;
1439 int err;
1440
1441 if (!cd || !try_module_get(cd->owner))
1442 return -EACCES;
1443
1444 err = seq_open(file, &cache_content_op);
1445 if (err) {
1446 module_put(cd->owner);
1447 return err;
1448 }
1449
1450 seq = file->private_data;
1451 seq->private = cd;
1452 return 0;
1453}
1454
1455static int content_release(struct inode *inode, struct file *file,
1456 struct cache_detail *cd)
1457{
1458 int ret = seq_release(inode, file);
1459 module_put(cd->owner);
1460 return ret;
1461}
1462
1463static int open_flush(struct inode *inode, struct file *file,
1464 struct cache_detail *cd)
1465{
1466 if (!cd || !try_module_get(cd->owner))
1467 return -EACCES;
1468 return nonseekable_open(inode, file);
1469}
1470
1471static int release_flush(struct inode *inode, struct file *file,
1472 struct cache_detail *cd)
1473{
1474 module_put(cd->owner);
1475 return 0;
1476}
1477
1478static ssize_t read_flush(struct file *file, char __user *buf,
1479 size_t count, loff_t *ppos,
1480 struct cache_detail *cd)
1481{
1482 char tbuf[22];
1483 size_t len;
1484
1485 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1486 convert_to_wallclock(cd->flush_time));
1487 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1488}
1489
1490static ssize_t write_flush(struct file *file, const char __user *buf,
1491 size_t count, loff_t *ppos,
1492 struct cache_detail *cd)
1493{
1494 char tbuf[20];
1495 char *ep;
1496 time64_t now;
1497
1498 if (*ppos || count > sizeof(tbuf)-1)
1499 return -EINVAL;
1500 if (copy_from_user(tbuf, buf, count))
1501 return -EFAULT;
1502 tbuf[count] = 0;
1503 simple_strtoul(tbuf, &ep, 0);
1504 if (*ep && *ep != '\n')
1505 return -EINVAL;
1506 /* Note that while we check that 'buf' holds a valid number,
1507 * we always ignore the value and just flush everything.
1508 * Making use of the number leads to races.
1509 */
1510
1511 now = seconds_since_boot();
1512 /* Always flush everything, so behave like cache_purge()
1513 * Do this by advancing flush_time to the current time,
1514 * or by one second if it has already reached the current time.
1515 * Newly added cache entries will always have ->last_refresh greater
1516 * that ->flush_time, so they don't get flushed prematurely.
1517 */
1518
1519 if (cd->flush_time >= now)
1520 now = cd->flush_time + 1;
1521
1522 cd->flush_time = now;
1523 cd->nextcheck = now;
1524 cache_flush();
1525
1526 if (cd->flush)
1527 cd->flush();
1528
1529 *ppos += count;
1530 return count;
1531}
1532
1533static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1534 size_t count, loff_t *ppos)
1535{
1536 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1537
1538 return cache_read(filp, buf, count, ppos, cd);
1539}
1540
1541static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1542 size_t count, loff_t *ppos)
1543{
1544 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1545
1546 return cache_write(filp, buf, count, ppos, cd);
1547}
1548
1549static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1550{
1551 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1552
1553 return cache_poll(filp, wait, cd);
1554}
1555
1556static long cache_ioctl_procfs(struct file *filp,
1557 unsigned int cmd, unsigned long arg)
1558{
1559 struct inode *inode = file_inode(filp);
1560 struct cache_detail *cd = PDE_DATA(inode);
1561
1562 return cache_ioctl(inode, filp, cmd, arg, cd);
1563}
1564
1565static int cache_open_procfs(struct inode *inode, struct file *filp)
1566{
1567 struct cache_detail *cd = PDE_DATA(inode);
1568
1569 return cache_open(inode, filp, cd);
1570}
1571
1572static int cache_release_procfs(struct inode *inode, struct file *filp)
1573{
1574 struct cache_detail *cd = PDE_DATA(inode);
1575
1576 return cache_release(inode, filp, cd);
1577}
1578
1579static const struct proc_ops cache_channel_proc_ops = {
1580 .proc_lseek = no_llseek,
1581 .proc_read = cache_read_procfs,
1582 .proc_write = cache_write_procfs,
1583 .proc_poll = cache_poll_procfs,
1584 .proc_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1585 .proc_open = cache_open_procfs,
1586 .proc_release = cache_release_procfs,
1587};
1588
1589static int content_open_procfs(struct inode *inode, struct file *filp)
1590{
1591 struct cache_detail *cd = PDE_DATA(inode);
1592
1593 return content_open(inode, filp, cd);
1594}
1595
1596static int content_release_procfs(struct inode *inode, struct file *filp)
1597{
1598 struct cache_detail *cd = PDE_DATA(inode);
1599
1600 return content_release(inode, filp, cd);
1601}
1602
1603static const struct proc_ops content_proc_ops = {
1604 .proc_open = content_open_procfs,
1605 .proc_read = seq_read,
1606 .proc_lseek = seq_lseek,
1607 .proc_release = content_release_procfs,
1608};
1609
1610static int open_flush_procfs(struct inode *inode, struct file *filp)
1611{
1612 struct cache_detail *cd = PDE_DATA(inode);
1613
1614 return open_flush(inode, filp, cd);
1615}
1616
1617static int release_flush_procfs(struct inode *inode, struct file *filp)
1618{
1619 struct cache_detail *cd = PDE_DATA(inode);
1620
1621 return release_flush(inode, filp, cd);
1622}
1623
1624static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1625 size_t count, loff_t *ppos)
1626{
1627 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1628
1629 return read_flush(filp, buf, count, ppos, cd);
1630}
1631
1632static ssize_t write_flush_procfs(struct file *filp,
1633 const char __user *buf,
1634 size_t count, loff_t *ppos)
1635{
1636 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1637
1638 return write_flush(filp, buf, count, ppos, cd);
1639}
1640
1641static const struct proc_ops cache_flush_proc_ops = {
1642 .proc_open = open_flush_procfs,
1643 .proc_read = read_flush_procfs,
1644 .proc_write = write_flush_procfs,
1645 .proc_release = release_flush_procfs,
1646 .proc_lseek = no_llseek,
1647};
1648
1649static void remove_cache_proc_entries(struct cache_detail *cd)
1650{
1651 if (cd->procfs) {
1652 proc_remove(cd->procfs);
1653 cd->procfs = NULL;
1654 }
1655}
1656
1657#ifdef CONFIG_PROC_FS
1658static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1659{
1660 struct proc_dir_entry *p;
1661 struct sunrpc_net *sn;
1662
1663 sn = net_generic(net, sunrpc_net_id);
1664 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1665 if (cd->procfs == NULL)
1666 goto out_nomem;
1667
1668 p = proc_create_data("flush", S_IFREG | 0600,
1669 cd->procfs, &cache_flush_proc_ops, cd);
1670 if (p == NULL)
1671 goto out_nomem;
1672
1673 if (cd->cache_request || cd->cache_parse) {
1674 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1675 &cache_channel_proc_ops, cd);
1676 if (p == NULL)
1677 goto out_nomem;
1678 }
1679 if (cd->cache_show) {
1680 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1681 &content_proc_ops, cd);
1682 if (p == NULL)
1683 goto out_nomem;
1684 }
1685 return 0;
1686out_nomem:
1687 remove_cache_proc_entries(cd);
1688 return -ENOMEM;
1689}
1690#else /* CONFIG_PROC_FS */
1691static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1692{
1693 return 0;
1694}
1695#endif
1696
1697void __init cache_initialize(void)
1698{
1699 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1700}
1701
1702int cache_register_net(struct cache_detail *cd, struct net *net)
1703{
1704 int ret;
1705
1706 sunrpc_init_cache_detail(cd);
1707 ret = create_cache_proc_entries(cd, net);
1708 if (ret)
1709 sunrpc_destroy_cache_detail(cd);
1710 return ret;
1711}
1712EXPORT_SYMBOL_GPL(cache_register_net);
1713
1714void cache_unregister_net(struct cache_detail *cd, struct net *net)
1715{
1716 remove_cache_proc_entries(cd);
1717 sunrpc_destroy_cache_detail(cd);
1718}
1719EXPORT_SYMBOL_GPL(cache_unregister_net);
1720
1721struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1722{
1723 struct cache_detail *cd;
1724 int i;
1725
1726 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1727 if (cd == NULL)
1728 return ERR_PTR(-ENOMEM);
1729
1730 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1731 GFP_KERNEL);
1732 if (cd->hash_table == NULL) {
1733 kfree(cd);
1734 return ERR_PTR(-ENOMEM);
1735 }
1736
1737 for (i = 0; i < cd->hash_size; i++)
1738 INIT_HLIST_HEAD(&cd->hash_table[i]);
1739 cd->net = net;
1740 return cd;
1741}
1742EXPORT_SYMBOL_GPL(cache_create_net);
1743
1744void cache_destroy_net(struct cache_detail *cd, struct net *net)
1745{
1746 kfree(cd->hash_table);
1747 kfree(cd);
1748}
1749EXPORT_SYMBOL_GPL(cache_destroy_net);
1750
1751static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1752 size_t count, loff_t *ppos)
1753{
1754 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1755
1756 return cache_read(filp, buf, count, ppos, cd);
1757}
1758
1759static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1760 size_t count, loff_t *ppos)
1761{
1762 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1763
1764 return cache_write(filp, buf, count, ppos, cd);
1765}
1766
1767static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1768{
1769 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1770
1771 return cache_poll(filp, wait, cd);
1772}
1773
1774static long cache_ioctl_pipefs(struct file *filp,
1775 unsigned int cmd, unsigned long arg)
1776{
1777 struct inode *inode = file_inode(filp);
1778 struct cache_detail *cd = RPC_I(inode)->private;
1779
1780 return cache_ioctl(inode, filp, cmd, arg, cd);
1781}
1782
1783static int cache_open_pipefs(struct inode *inode, struct file *filp)
1784{
1785 struct cache_detail *cd = RPC_I(inode)->private;
1786
1787 return cache_open(inode, filp, cd);
1788}
1789
1790static int cache_release_pipefs(struct inode *inode, struct file *filp)
1791{
1792 struct cache_detail *cd = RPC_I(inode)->private;
1793
1794 return cache_release(inode, filp, cd);
1795}
1796
1797const struct file_operations cache_file_operations_pipefs = {
1798 .owner = THIS_MODULE,
1799 .llseek = no_llseek,
1800 .read = cache_read_pipefs,
1801 .write = cache_write_pipefs,
1802 .poll = cache_poll_pipefs,
1803 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1804 .open = cache_open_pipefs,
1805 .release = cache_release_pipefs,
1806};
1807
1808static int content_open_pipefs(struct inode *inode, struct file *filp)
1809{
1810 struct cache_detail *cd = RPC_I(inode)->private;
1811
1812 return content_open(inode, filp, cd);
1813}
1814
1815static int content_release_pipefs(struct inode *inode, struct file *filp)
1816{
1817 struct cache_detail *cd = RPC_I(inode)->private;
1818
1819 return content_release(inode, filp, cd);
1820}
1821
1822const struct file_operations content_file_operations_pipefs = {
1823 .open = content_open_pipefs,
1824 .read = seq_read,
1825 .llseek = seq_lseek,
1826 .release = content_release_pipefs,
1827};
1828
1829static int open_flush_pipefs(struct inode *inode, struct file *filp)
1830{
1831 struct cache_detail *cd = RPC_I(inode)->private;
1832
1833 return open_flush(inode, filp, cd);
1834}
1835
1836static int release_flush_pipefs(struct inode *inode, struct file *filp)
1837{
1838 struct cache_detail *cd = RPC_I(inode)->private;
1839
1840 return release_flush(inode, filp, cd);
1841}
1842
1843static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1844 size_t count, loff_t *ppos)
1845{
1846 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1847
1848 return read_flush(filp, buf, count, ppos, cd);
1849}
1850
1851static ssize_t write_flush_pipefs(struct file *filp,
1852 const char __user *buf,
1853 size_t count, loff_t *ppos)
1854{
1855 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1856
1857 return write_flush(filp, buf, count, ppos, cd);
1858}
1859
1860const struct file_operations cache_flush_operations_pipefs = {
1861 .open = open_flush_pipefs,
1862 .read = read_flush_pipefs,
1863 .write = write_flush_pipefs,
1864 .release = release_flush_pipefs,
1865 .llseek = no_llseek,
1866};
1867
1868int sunrpc_cache_register_pipefs(struct dentry *parent,
1869 const char *name, umode_t umode,
1870 struct cache_detail *cd)
1871{
1872 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1873 if (IS_ERR(dir))
1874 return PTR_ERR(dir);
1875 cd->pipefs = dir;
1876 return 0;
1877}
1878EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1879
1880void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1881{
1882 if (cd->pipefs) {
1883 rpc_remove_cache_dir(cd->pipefs);
1884 cd->pipefs = NULL;
1885 }
1886}
1887EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1888
1889void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1890{
1891 spin_lock(&cd->hash_lock);
1892 if (!hlist_unhashed(&h->cache_list)){
1893 sunrpc_begin_cache_remove_entry(h, cd);
1894 spin_unlock(&cd->hash_lock);
1895 sunrpc_end_cache_remove_entry(h, cd);
1896 } else
1897 spin_unlock(&cd->hash_lock);
1898}
1899EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);