1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/sunrpc/cache.c
4 *
5 * Generic code for various authentication-related caches
6 * used by sunrpc clients and servers.
7 *
8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
9 */
10
11 #include <linux/types.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/kmod.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/string_helpers.h>
22 #include <linux/uaccess.h>
23 #include <linux/poll.h>
24 #include <linux/seq_file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/net.h>
27 #include <linux/workqueue.h>
28 #include <linux/mutex.h>
29 #include <linux/pagemap.h>
30 #include <asm/ioctls.h>
31 #include <linux/sunrpc/types.h>
32 #include <linux/sunrpc/cache.h>
33 #include <linux/sunrpc/stats.h>
34 #include <linux/sunrpc/rpc_pipe_fs.h>
35 #include <trace/events/sunrpc.h>
36
37 #include "netns.h"
38 #include "fail.h"
39
40 #define RPCDBG_FACILITY RPCDBG_CACHE
41
42 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
43 static void cache_revisit_request(struct cache_head *item);
44
cache_init(struct cache_head * h,struct cache_detail * detail)45 static void cache_init(struct cache_head *h, struct cache_detail *detail)
46 {
47 time64_t now = seconds_since_boot();
48 INIT_HLIST_NODE(&h->cache_list);
49 h->flags = 0;
50 kref_init(&h->ref);
51 h->expiry_time = now + CACHE_NEW_EXPIRY;
52 if (now <= detail->flush_time)
53 /* ensure it isn't already expired */
54 now = detail->flush_time + 1;
55 h->last_refresh = now;
56 }
57
58 static void cache_fresh_unlocked(struct cache_head *head,
59 struct cache_detail *detail);
60
sunrpc_cache_find_rcu(struct cache_detail * detail,struct cache_head * key,int hash)61 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
62 struct cache_head *key,
63 int hash)
64 {
65 struct hlist_head *head = &detail->hash_table[hash];
66 struct cache_head *tmp;
67
68 rcu_read_lock();
69 hlist_for_each_entry_rcu(tmp, head, cache_list) {
70 if (!detail->match(tmp, key))
71 continue;
72 if (test_bit(CACHE_VALID, &tmp->flags) &&
73 cache_is_expired(detail, tmp))
74 continue;
75 tmp = cache_get_rcu(tmp);
76 rcu_read_unlock();
77 return tmp;
78 }
79 rcu_read_unlock();
80 return NULL;
81 }
82
sunrpc_begin_cache_remove_entry(struct cache_head * ch,struct cache_detail * cd)83 static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
84 struct cache_detail *cd)
85 {
86 /* Must be called under cd->hash_lock */
87 hlist_del_init_rcu(&ch->cache_list);
88 set_bit(CACHE_CLEANED, &ch->flags);
89 cd->entries --;
90 }
91
sunrpc_end_cache_remove_entry(struct cache_head * ch,struct cache_detail * cd)92 static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
93 struct cache_detail *cd)
94 {
95 cache_fresh_unlocked(ch, cd);
96 cache_put(ch, cd);
97 }
98
sunrpc_cache_add_entry(struct cache_detail * detail,struct cache_head * key,int hash)99 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
100 struct cache_head *key,
101 int hash)
102 {
103 struct cache_head *new, *tmp, *freeme = NULL;
104 struct hlist_head *head = &detail->hash_table[hash];
105
106 new = detail->alloc();
107 if (!new)
108 return NULL;
109 /* must fully initialise 'new', else
110 * we might get lose if we need to
111 * cache_put it soon.
112 */
113 cache_init(new, detail);
114 detail->init(new, key);
115
116 spin_lock(&detail->hash_lock);
117
118 /* check if entry appeared while we slept */
119 hlist_for_each_entry_rcu(tmp, head, cache_list,
120 lockdep_is_held(&detail->hash_lock)) {
121 if (!detail->match(tmp, key))
122 continue;
123 if (test_bit(CACHE_VALID, &tmp->flags) &&
124 cache_is_expired(detail, tmp)) {
125 sunrpc_begin_cache_remove_entry(tmp, detail);
126 trace_cache_entry_expired(detail, tmp);
127 freeme = tmp;
128 break;
129 }
130 cache_get(tmp);
131 spin_unlock(&detail->hash_lock);
132 cache_put(new, detail);
133 return tmp;
134 }
135
136 hlist_add_head_rcu(&new->cache_list, head);
137 detail->entries++;
138 cache_get(new);
139 spin_unlock(&detail->hash_lock);
140
141 if (freeme)
142 sunrpc_end_cache_remove_entry(freeme, detail);
143 return new;
144 }
145
sunrpc_cache_lookup_rcu(struct cache_detail * detail,struct cache_head * key,int hash)146 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
147 struct cache_head *key, int hash)
148 {
149 struct cache_head *ret;
150
151 ret = sunrpc_cache_find_rcu(detail, key, hash);
152 if (ret)
153 return ret;
154 /* Didn't find anything, insert an empty entry */
155 return sunrpc_cache_add_entry(detail, key, hash);
156 }
157 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
158
159 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
160
cache_fresh_locked(struct cache_head * head,time64_t expiry,struct cache_detail * detail)161 static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
162 struct cache_detail *detail)
163 {
164 time64_t now = seconds_since_boot();
165 if (now <= detail->flush_time)
166 /* ensure it isn't immediately treated as expired */
167 now = detail->flush_time + 1;
168 head->expiry_time = expiry;
169 head->last_refresh = now;
170 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
171 set_bit(CACHE_VALID, &head->flags);
172 }
173
cache_fresh_unlocked(struct cache_head * head,struct cache_detail * detail)174 static void cache_fresh_unlocked(struct cache_head *head,
175 struct cache_detail *detail)
176 {
177 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
178 cache_revisit_request(head);
179 cache_dequeue(detail, head);
180 }
181 }
182
cache_make_negative(struct cache_detail * detail,struct cache_head * h)183 static void cache_make_negative(struct cache_detail *detail,
184 struct cache_head *h)
185 {
186 set_bit(CACHE_NEGATIVE, &h->flags);
187 trace_cache_entry_make_negative(detail, h);
188 }
189
cache_entry_update(struct cache_detail * detail,struct cache_head * h,struct cache_head * new)190 static void cache_entry_update(struct cache_detail *detail,
191 struct cache_head *h,
192 struct cache_head *new)
193 {
194 if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
195 detail->update(h, new);
196 trace_cache_entry_update(detail, h);
197 } else {
198 cache_make_negative(detail, h);
199 }
200 }
201
sunrpc_cache_update(struct cache_detail * detail,struct cache_head * new,struct cache_head * old,int hash)202 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
203 struct cache_head *new, struct cache_head *old, int hash)
204 {
205 /* The 'old' entry is to be replaced by 'new'.
206 * If 'old' is not VALID, we update it directly,
207 * otherwise we need to replace it
208 */
209 struct cache_head *tmp;
210
211 if (!test_bit(CACHE_VALID, &old->flags)) {
212 spin_lock(&detail->hash_lock);
213 if (!test_bit(CACHE_VALID, &old->flags)) {
214 cache_entry_update(detail, old, new);
215 cache_fresh_locked(old, new->expiry_time, detail);
216 spin_unlock(&detail->hash_lock);
217 cache_fresh_unlocked(old, detail);
218 return old;
219 }
220 spin_unlock(&detail->hash_lock);
221 }
222 /* We need to insert a new entry */
223 tmp = detail->alloc();
224 if (!tmp) {
225 cache_put(old, detail);
226 return NULL;
227 }
228 cache_init(tmp, detail);
229 detail->init(tmp, old);
230
231 spin_lock(&detail->hash_lock);
232 cache_entry_update(detail, tmp, new);
233 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
234 detail->entries++;
235 cache_get(tmp);
236 cache_fresh_locked(tmp, new->expiry_time, detail);
237 cache_fresh_locked(old, 0, detail);
238 spin_unlock(&detail->hash_lock);
239 cache_fresh_unlocked(tmp, detail);
240 cache_fresh_unlocked(old, detail);
241 cache_put(old, detail);
242 return tmp;
243 }
244 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
245
cache_is_valid(struct cache_head * h)246 static inline int cache_is_valid(struct cache_head *h)
247 {
248 if (!test_bit(CACHE_VALID, &h->flags))
249 return -EAGAIN;
250 else {
251 /* entry is valid */
252 if (test_bit(CACHE_NEGATIVE, &h->flags))
253 return -ENOENT;
254 else {
255 /*
256 * In combination with write barrier in
257 * sunrpc_cache_update, ensures that anyone
258 * using the cache entry after this sees the
259 * updated contents:
260 */
261 smp_rmb();
262 return 0;
263 }
264 }
265 }
266
try_to_negate_entry(struct cache_detail * detail,struct cache_head * h)267 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
268 {
269 int rv;
270
271 spin_lock(&detail->hash_lock);
272 rv = cache_is_valid(h);
273 if (rv == -EAGAIN) {
274 cache_make_negative(detail, h);
275 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
276 detail);
277 rv = -ENOENT;
278 }
279 spin_unlock(&detail->hash_lock);
280 cache_fresh_unlocked(h, detail);
281 return rv;
282 }
283
284 /*
285 * This is the generic cache management routine for all
286 * the authentication caches.
287 * It checks the currency of a cache item and will (later)
288 * initiate an upcall to fill it if needed.
289 *
290 *
291 * Returns 0 if the cache_head can be used, or cache_puts it and returns
292 * -EAGAIN if upcall is pending and request has been queued
293 * -ETIMEDOUT if upcall failed or request could not be queue or
294 * upcall completed but item is still invalid (implying that
295 * the cache item has been replaced with a newer one).
296 * -ENOENT if cache entry was negative
297 */
cache_check(struct cache_detail * detail,struct cache_head * h,struct cache_req * rqstp)298 int cache_check(struct cache_detail *detail,
299 struct cache_head *h, struct cache_req *rqstp)
300 {
301 int rv;
302 time64_t refresh_age, age;
303
304 /* First decide return status as best we can */
305 rv = cache_is_valid(h);
306
307 /* now see if we want to start an upcall */
308 refresh_age = (h->expiry_time - h->last_refresh);
309 age = seconds_since_boot() - h->last_refresh;
310
311 if (rqstp == NULL) {
312 if (rv == -EAGAIN)
313 rv = -ENOENT;
314 } else if (rv == -EAGAIN ||
315 (h->expiry_time != 0 && age > refresh_age/2)) {
316 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
317 refresh_age, age);
318 switch (detail->cache_upcall(detail, h)) {
319 case -EINVAL:
320 rv = try_to_negate_entry(detail, h);
321 break;
322 case -EAGAIN:
323 cache_fresh_unlocked(h, detail);
324 break;
325 }
326 }
327
328 if (rv == -EAGAIN) {
329 if (!cache_defer_req(rqstp, h)) {
330 /*
331 * Request was not deferred; handle it as best
332 * we can ourselves:
333 */
334 rv = cache_is_valid(h);
335 if (rv == -EAGAIN)
336 rv = -ETIMEDOUT;
337 }
338 }
339 if (rv)
340 cache_put(h, detail);
341 return rv;
342 }
343 EXPORT_SYMBOL_GPL(cache_check);
344
345 /*
346 * caches need to be periodically cleaned.
347 * For this we maintain a list of cache_detail and
348 * a current pointer into that list and into the table
349 * for that entry.
350 *
351 * Each time cache_clean is called it finds the next non-empty entry
352 * in the current table and walks the list in that entry
353 * looking for entries that can be removed.
354 *
355 * An entry gets removed if:
356 * - The expiry is before current time
357 * - The last_refresh time is before the flush_time for that cache
358 *
359 * later we might drop old entries with non-NEVER expiry if that table
360 * is getting 'full' for some definition of 'full'
361 *
362 * The question of "how often to scan a table" is an interesting one
363 * and is answered in part by the use of the "nextcheck" field in the
364 * cache_detail.
365 * When a scan of a table begins, the nextcheck field is set to a time
366 * that is well into the future.
367 * While scanning, if an expiry time is found that is earlier than the
368 * current nextcheck time, nextcheck is set to that expiry time.
369 * If the flush_time is ever set to a time earlier than the nextcheck
370 * time, the nextcheck time is then set to that flush_time.
371 *
372 * A table is then only scanned if the current time is at least
373 * the nextcheck time.
374 *
375 */
376
377 static LIST_HEAD(cache_list);
378 static DEFINE_SPINLOCK(cache_list_lock);
379 static struct cache_detail *current_detail;
380 static int current_index;
381
382 static void do_cache_clean(struct work_struct *work);
383 static struct delayed_work cache_cleaner;
384
sunrpc_init_cache_detail(struct cache_detail * cd)385 void sunrpc_init_cache_detail(struct cache_detail *cd)
386 {
387 spin_lock_init(&cd->hash_lock);
388 INIT_LIST_HEAD(&cd->queue);
389 spin_lock(&cache_list_lock);
390 cd->nextcheck = 0;
391 cd->entries = 0;
392 atomic_set(&cd->writers, 0);
393 cd->last_close = 0;
394 cd->last_warn = -1;
395 list_add(&cd->others, &cache_list);
396 spin_unlock(&cache_list_lock);
397
398 /* start the cleaning process */
399 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
400 }
401 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
402
sunrpc_destroy_cache_detail(struct cache_detail * cd)403 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
404 {
405 cache_purge(cd);
406 spin_lock(&cache_list_lock);
407 spin_lock(&cd->hash_lock);
408 if (current_detail == cd)
409 current_detail = NULL;
410 list_del_init(&cd->others);
411 spin_unlock(&cd->hash_lock);
412 spin_unlock(&cache_list_lock);
413 if (list_empty(&cache_list)) {
414 /* module must be being unloaded so its safe to kill the worker */
415 cancel_delayed_work_sync(&cache_cleaner);
416 }
417 }
418 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
419
420 /* clean cache tries to find something to clean
421 * and cleans it.
422 * It returns 1 if it cleaned something,
423 * 0 if it didn't find anything this time
424 * -1 if it fell off the end of the list.
425 */
cache_clean(void)426 static int cache_clean(void)
427 {
428 int rv = 0;
429 struct list_head *next;
430
431 spin_lock(&cache_list_lock);
432
433 /* find a suitable table if we don't already have one */
434 while (current_detail == NULL ||
435 current_index >= current_detail->hash_size) {
436 if (current_detail)
437 next = current_detail->others.next;
438 else
439 next = cache_list.next;
440 if (next == &cache_list) {
441 current_detail = NULL;
442 spin_unlock(&cache_list_lock);
443 return -1;
444 }
445 current_detail = list_entry(next, struct cache_detail, others);
446 if (current_detail->nextcheck > seconds_since_boot())
447 current_index = current_detail->hash_size;
448 else {
449 current_index = 0;
450 current_detail->nextcheck = seconds_since_boot()+30*60;
451 }
452 }
453
454 /* find a non-empty bucket in the table */
455 while (current_detail &&
456 current_index < current_detail->hash_size &&
457 hlist_empty(¤t_detail->hash_table[current_index]))
458 current_index++;
459
460 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
461
462 if (current_detail && current_index < current_detail->hash_size) {
463 struct cache_head *ch = NULL;
464 struct cache_detail *d;
465 struct hlist_head *head;
466 struct hlist_node *tmp;
467
468 spin_lock(¤t_detail->hash_lock);
469
470 /* Ok, now to clean this strand */
471
472 head = ¤t_detail->hash_table[current_index];
473 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
474 if (current_detail->nextcheck > ch->expiry_time)
475 current_detail->nextcheck = ch->expiry_time+1;
476 if (!cache_is_expired(current_detail, ch))
477 continue;
478
479 sunrpc_begin_cache_remove_entry(ch, current_detail);
480 trace_cache_entry_expired(current_detail, ch);
481 rv = 1;
482 break;
483 }
484
485 spin_unlock(¤t_detail->hash_lock);
486 d = current_detail;
487 if (!ch)
488 current_index ++;
489 spin_unlock(&cache_list_lock);
490 if (ch)
491 sunrpc_end_cache_remove_entry(ch, d);
492 } else
493 spin_unlock(&cache_list_lock);
494
495 return rv;
496 }
497
498 /*
499 * We want to regularly clean the cache, so we need to schedule some work ...
500 */
do_cache_clean(struct work_struct * work)501 static void do_cache_clean(struct work_struct *work)
502 {
503 int delay;
504
505 if (list_empty(&cache_list))
506 return;
507
508 if (cache_clean() == -1)
509 delay = round_jiffies_relative(30*HZ);
510 else
511 delay = 5;
512
513 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay);
514 }
515
516
517 /*
518 * Clean all caches promptly. This just calls cache_clean
519 * repeatedly until we are sure that every cache has had a chance to
520 * be fully cleaned
521 */
cache_flush(void)522 void cache_flush(void)
523 {
524 while (cache_clean() != -1)
525 cond_resched();
526 while (cache_clean() != -1)
527 cond_resched();
528 }
529 EXPORT_SYMBOL_GPL(cache_flush);
530
cache_purge(struct cache_detail * detail)531 void cache_purge(struct cache_detail *detail)
532 {
533 struct cache_head *ch = NULL;
534 struct hlist_head *head = NULL;
535 int i = 0;
536
537 spin_lock(&detail->hash_lock);
538 if (!detail->entries) {
539 spin_unlock(&detail->hash_lock);
540 return;
541 }
542
543 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
544 for (i = 0; i < detail->hash_size; i++) {
545 head = &detail->hash_table[i];
546 while (!hlist_empty(head)) {
547 ch = hlist_entry(head->first, struct cache_head,
548 cache_list);
549 sunrpc_begin_cache_remove_entry(ch, detail);
550 spin_unlock(&detail->hash_lock);
551 sunrpc_end_cache_remove_entry(ch, detail);
552 spin_lock(&detail->hash_lock);
553 }
554 }
555 spin_unlock(&detail->hash_lock);
556 }
557 EXPORT_SYMBOL_GPL(cache_purge);
558
559
560 /*
561 * Deferral and Revisiting of Requests.
562 *
563 * If a cache lookup finds a pending entry, we
564 * need to defer the request and revisit it later.
565 * All deferred requests are stored in a hash table,
566 * indexed by "struct cache_head *".
567 * As it may be wasteful to store a whole request
568 * structure, we allow the request to provide a
569 * deferred form, which must contain a
570 * 'struct cache_deferred_req'
571 * This cache_deferred_req contains a method to allow
572 * it to be revisited when cache info is available
573 */
574
575 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
576 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
577
578 #define DFR_MAX 300 /* ??? */
579
580 static DEFINE_SPINLOCK(cache_defer_lock);
581 static LIST_HEAD(cache_defer_list);
582 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
583 static int cache_defer_cnt;
584
__unhash_deferred_req(struct cache_deferred_req * dreq)585 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
586 {
587 hlist_del_init(&dreq->hash);
588 if (!list_empty(&dreq->recent)) {
589 list_del_init(&dreq->recent);
590 cache_defer_cnt--;
591 }
592 }
593
__hash_deferred_req(struct cache_deferred_req * dreq,struct cache_head * item)594 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
595 {
596 int hash = DFR_HASH(item);
597
598 INIT_LIST_HEAD(&dreq->recent);
599 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
600 }
601
setup_deferral(struct cache_deferred_req * dreq,struct cache_head * item,int count_me)602 static void setup_deferral(struct cache_deferred_req *dreq,
603 struct cache_head *item,
604 int count_me)
605 {
606
607 dreq->item = item;
608
609 spin_lock(&cache_defer_lock);
610
611 __hash_deferred_req(dreq, item);
612
613 if (count_me) {
614 cache_defer_cnt++;
615 list_add(&dreq->recent, &cache_defer_list);
616 }
617
618 spin_unlock(&cache_defer_lock);
619
620 }
621
622 struct thread_deferred_req {
623 struct cache_deferred_req handle;
624 struct completion completion;
625 };
626
cache_restart_thread(struct cache_deferred_req * dreq,int too_many)627 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
628 {
629 struct thread_deferred_req *dr =
630 container_of(dreq, struct thread_deferred_req, handle);
631 complete(&dr->completion);
632 }
633
cache_wait_req(struct cache_req * req,struct cache_head * item)634 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
635 {
636 struct thread_deferred_req sleeper;
637 struct cache_deferred_req *dreq = &sleeper.handle;
638
639 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
640 dreq->revisit = cache_restart_thread;
641
642 setup_deferral(dreq, item, 0);
643
644 if (!test_bit(CACHE_PENDING, &item->flags) ||
645 wait_for_completion_interruptible_timeout(
646 &sleeper.completion, req->thread_wait) <= 0) {
647 /* The completion wasn't completed, so we need
648 * to clean up
649 */
650 spin_lock(&cache_defer_lock);
651 if (!hlist_unhashed(&sleeper.handle.hash)) {
652 __unhash_deferred_req(&sleeper.handle);
653 spin_unlock(&cache_defer_lock);
654 } else {
655 /* cache_revisit_request already removed
656 * this from the hash table, but hasn't
657 * called ->revisit yet. It will very soon
658 * and we need to wait for it.
659 */
660 spin_unlock(&cache_defer_lock);
661 wait_for_completion(&sleeper.completion);
662 }
663 }
664 }
665
cache_limit_defers(void)666 static void cache_limit_defers(void)
667 {
668 /* Make sure we haven't exceed the limit of allowed deferred
669 * requests.
670 */
671 struct cache_deferred_req *discard = NULL;
672
673 if (cache_defer_cnt <= DFR_MAX)
674 return;
675
676 spin_lock(&cache_defer_lock);
677
678 /* Consider removing either the first or the last */
679 if (cache_defer_cnt > DFR_MAX) {
680 if (get_random_u32_below(2))
681 discard = list_entry(cache_defer_list.next,
682 struct cache_deferred_req, recent);
683 else
684 discard = list_entry(cache_defer_list.prev,
685 struct cache_deferred_req, recent);
686 __unhash_deferred_req(discard);
687 }
688 spin_unlock(&cache_defer_lock);
689 if (discard)
690 discard->revisit(discard, 1);
691 }
692
693 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
cache_defer_immediately(void)694 static inline bool cache_defer_immediately(void)
695 {
696 return !fail_sunrpc.ignore_cache_wait &&
697 should_fail(&fail_sunrpc.attr, 1);
698 }
699 #else
cache_defer_immediately(void)700 static inline bool cache_defer_immediately(void)
701 {
702 return false;
703 }
704 #endif
705
706 /* Return true if and only if a deferred request is queued. */
cache_defer_req(struct cache_req * req,struct cache_head * item)707 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
708 {
709 struct cache_deferred_req *dreq;
710
711 if (!cache_defer_immediately()) {
712 cache_wait_req(req, item);
713 if (!test_bit(CACHE_PENDING, &item->flags))
714 return false;
715 }
716
717 dreq = req->defer(req);
718 if (dreq == NULL)
719 return false;
720 setup_deferral(dreq, item, 1);
721 if (!test_bit(CACHE_PENDING, &item->flags))
722 /* Bit could have been cleared before we managed to
723 * set up the deferral, so need to revisit just in case
724 */
725 cache_revisit_request(item);
726
727 cache_limit_defers();
728 return true;
729 }
730
cache_revisit_request(struct cache_head * item)731 static void cache_revisit_request(struct cache_head *item)
732 {
733 struct cache_deferred_req *dreq;
734 struct hlist_node *tmp;
735 int hash = DFR_HASH(item);
736 LIST_HEAD(pending);
737
738 spin_lock(&cache_defer_lock);
739
740 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
741 if (dreq->item == item) {
742 __unhash_deferred_req(dreq);
743 list_add(&dreq->recent, &pending);
744 }
745
746 spin_unlock(&cache_defer_lock);
747
748 while (!list_empty(&pending)) {
749 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
750 list_del_init(&dreq->recent);
751 dreq->revisit(dreq, 0);
752 }
753 }
754
cache_clean_deferred(void * owner)755 void cache_clean_deferred(void *owner)
756 {
757 struct cache_deferred_req *dreq, *tmp;
758 LIST_HEAD(pending);
759
760 spin_lock(&cache_defer_lock);
761
762 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
763 if (dreq->owner == owner) {
764 __unhash_deferred_req(dreq);
765 list_add(&dreq->recent, &pending);
766 }
767 }
768 spin_unlock(&cache_defer_lock);
769
770 while (!list_empty(&pending)) {
771 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
772 list_del_init(&dreq->recent);
773 dreq->revisit(dreq, 1);
774 }
775 }
776
777 /*
778 * communicate with user-space
779 *
780 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
781 * On read, you get a full request, or block.
782 * On write, an update request is processed.
783 * Poll works if anything to read, and always allows write.
784 *
785 * Implemented by linked list of requests. Each open file has
786 * a ->private that also exists in this list. New requests are added
787 * to the end and may wakeup and preceding readers.
788 * New readers are added to the head. If, on read, an item is found with
789 * CACHE_UPCALLING clear, we free it from the list.
790 *
791 */
792
793 static DEFINE_SPINLOCK(queue_lock);
794
795 struct cache_queue {
796 struct list_head list;
797 int reader; /* if 0, then request */
798 };
799 struct cache_request {
800 struct cache_queue q;
801 struct cache_head *item;
802 char * buf;
803 int len;
804 int readers;
805 };
806 struct cache_reader {
807 struct cache_queue q;
808 int offset; /* if non-0, we have a refcnt on next request */
809 };
810
cache_request(struct cache_detail * detail,struct cache_request * crq)811 static int cache_request(struct cache_detail *detail,
812 struct cache_request *crq)
813 {
814 char *bp = crq->buf;
815 int len = PAGE_SIZE;
816
817 detail->cache_request(detail, crq->item, &bp, &len);
818 if (len < 0)
819 return -E2BIG;
820 return PAGE_SIZE - len;
821 }
822
cache_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)823 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
824 loff_t *ppos, struct cache_detail *cd)
825 {
826 struct cache_reader *rp = filp->private_data;
827 struct cache_request *rq;
828 struct inode *inode = file_inode(filp);
829 int err;
830
831 if (count == 0)
832 return 0;
833
834 inode_lock(inode); /* protect against multiple concurrent
835 * readers on this file */
836 again:
837 spin_lock(&queue_lock);
838 /* need to find next request */
839 while (rp->q.list.next != &cd->queue &&
840 list_entry(rp->q.list.next, struct cache_queue, list)
841 ->reader) {
842 struct list_head *next = rp->q.list.next;
843 list_move(&rp->q.list, next);
844 }
845 if (rp->q.list.next == &cd->queue) {
846 spin_unlock(&queue_lock);
847 inode_unlock(inode);
848 WARN_ON_ONCE(rp->offset);
849 return 0;
850 }
851 rq = container_of(rp->q.list.next, struct cache_request, q.list);
852 WARN_ON_ONCE(rq->q.reader);
853 if (rp->offset == 0)
854 rq->readers++;
855 spin_unlock(&queue_lock);
856
857 if (rq->len == 0) {
858 err = cache_request(cd, rq);
859 if (err < 0)
860 goto out;
861 rq->len = err;
862 }
863
864 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
865 err = -EAGAIN;
866 spin_lock(&queue_lock);
867 list_move(&rp->q.list, &rq->q.list);
868 spin_unlock(&queue_lock);
869 } else {
870 if (rp->offset + count > rq->len)
871 count = rq->len - rp->offset;
872 err = -EFAULT;
873 if (copy_to_user(buf, rq->buf + rp->offset, count))
874 goto out;
875 rp->offset += count;
876 if (rp->offset >= rq->len) {
877 rp->offset = 0;
878 spin_lock(&queue_lock);
879 list_move(&rp->q.list, &rq->q.list);
880 spin_unlock(&queue_lock);
881 }
882 err = 0;
883 }
884 out:
885 if (rp->offset == 0) {
886 /* need to release rq */
887 spin_lock(&queue_lock);
888 rq->readers--;
889 if (rq->readers == 0 &&
890 !test_bit(CACHE_PENDING, &rq->item->flags)) {
891 list_del(&rq->q.list);
892 spin_unlock(&queue_lock);
893 cache_put(rq->item, cd);
894 kfree(rq->buf);
895 kfree(rq);
896 } else
897 spin_unlock(&queue_lock);
898 }
899 if (err == -EAGAIN)
900 goto again;
901 inode_unlock(inode);
902 return err ? err : count;
903 }
904
cache_do_downcall(char * kaddr,const char __user * buf,size_t count,struct cache_detail * cd)905 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
906 size_t count, struct cache_detail *cd)
907 {
908 ssize_t ret;
909
910 if (count == 0)
911 return -EINVAL;
912 if (copy_from_user(kaddr, buf, count))
913 return -EFAULT;
914 kaddr[count] = '\0';
915 ret = cd->cache_parse(cd, kaddr, count);
916 if (!ret)
917 ret = count;
918 return ret;
919 }
920
cache_downcall(struct address_space * mapping,const char __user * buf,size_t count,struct cache_detail * cd)921 static ssize_t cache_downcall(struct address_space *mapping,
922 const char __user *buf,
923 size_t count, struct cache_detail *cd)
924 {
925 char *write_buf;
926 ssize_t ret = -ENOMEM;
927
928 if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
929 ret = -EINVAL;
930 goto out;
931 }
932
933 write_buf = kvmalloc(count + 1, GFP_KERNEL);
934 if (!write_buf)
935 goto out;
936
937 ret = cache_do_downcall(write_buf, buf, count, cd);
938 kvfree(write_buf);
939 out:
940 return ret;
941 }
942
cache_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)943 static ssize_t cache_write(struct file *filp, const char __user *buf,
944 size_t count, loff_t *ppos,
945 struct cache_detail *cd)
946 {
947 struct address_space *mapping = filp->f_mapping;
948 struct inode *inode = file_inode(filp);
949 ssize_t ret = -EINVAL;
950
951 if (!cd->cache_parse)
952 goto out;
953
954 inode_lock(inode);
955 ret = cache_downcall(mapping, buf, count, cd);
956 inode_unlock(inode);
957 out:
958 return ret;
959 }
960
961 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
962
cache_poll(struct file * filp,poll_table * wait,struct cache_detail * cd)963 static __poll_t cache_poll(struct file *filp, poll_table *wait,
964 struct cache_detail *cd)
965 {
966 __poll_t mask;
967 struct cache_reader *rp = filp->private_data;
968 struct cache_queue *cq;
969
970 poll_wait(filp, &queue_wait, wait);
971
972 /* alway allow write */
973 mask = EPOLLOUT | EPOLLWRNORM;
974
975 if (!rp)
976 return mask;
977
978 spin_lock(&queue_lock);
979
980 for (cq= &rp->q; &cq->list != &cd->queue;
981 cq = list_entry(cq->list.next, struct cache_queue, list))
982 if (!cq->reader) {
983 mask |= EPOLLIN | EPOLLRDNORM;
984 break;
985 }
986 spin_unlock(&queue_lock);
987 return mask;
988 }
989
cache_ioctl(struct inode * ino,struct file * filp,unsigned int cmd,unsigned long arg,struct cache_detail * cd)990 static int cache_ioctl(struct inode *ino, struct file *filp,
991 unsigned int cmd, unsigned long arg,
992 struct cache_detail *cd)
993 {
994 int len = 0;
995 struct cache_reader *rp = filp->private_data;
996 struct cache_queue *cq;
997
998 if (cmd != FIONREAD || !rp)
999 return -EINVAL;
1000
1001 spin_lock(&queue_lock);
1002
1003 /* only find the length remaining in current request,
1004 * or the length of the next request
1005 */
1006 for (cq= &rp->q; &cq->list != &cd->queue;
1007 cq = list_entry(cq->list.next, struct cache_queue, list))
1008 if (!cq->reader) {
1009 struct cache_request *cr =
1010 container_of(cq, struct cache_request, q);
1011 len = cr->len - rp->offset;
1012 break;
1013 }
1014 spin_unlock(&queue_lock);
1015
1016 return put_user(len, (int __user *)arg);
1017 }
1018
cache_open(struct inode * inode,struct file * filp,struct cache_detail * cd)1019 static int cache_open(struct inode *inode, struct file *filp,
1020 struct cache_detail *cd)
1021 {
1022 struct cache_reader *rp = NULL;
1023
1024 if (!cd || !try_module_get(cd->owner))
1025 return -EACCES;
1026 nonseekable_open(inode, filp);
1027 if (filp->f_mode & FMODE_READ) {
1028 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1029 if (!rp) {
1030 module_put(cd->owner);
1031 return -ENOMEM;
1032 }
1033 rp->offset = 0;
1034 rp->q.reader = 1;
1035
1036 spin_lock(&queue_lock);
1037 list_add(&rp->q.list, &cd->queue);
1038 spin_unlock(&queue_lock);
1039 }
1040 if (filp->f_mode & FMODE_WRITE)
1041 atomic_inc(&cd->writers);
1042 filp->private_data = rp;
1043 return 0;
1044 }
1045
cache_release(struct inode * inode,struct file * filp,struct cache_detail * cd)1046 static int cache_release(struct inode *inode, struct file *filp,
1047 struct cache_detail *cd)
1048 {
1049 struct cache_reader *rp = filp->private_data;
1050
1051 if (rp) {
1052 spin_lock(&queue_lock);
1053 if (rp->offset) {
1054 struct cache_queue *cq;
1055 for (cq= &rp->q; &cq->list != &cd->queue;
1056 cq = list_entry(cq->list.next, struct cache_queue, list))
1057 if (!cq->reader) {
1058 container_of(cq, struct cache_request, q)
1059 ->readers--;
1060 break;
1061 }
1062 rp->offset = 0;
1063 }
1064 list_del(&rp->q.list);
1065 spin_unlock(&queue_lock);
1066
1067 filp->private_data = NULL;
1068 kfree(rp);
1069
1070 }
1071 if (filp->f_mode & FMODE_WRITE) {
1072 atomic_dec(&cd->writers);
1073 cd->last_close = seconds_since_boot();
1074 }
1075 module_put(cd->owner);
1076 return 0;
1077 }
1078
1079
1080
cache_dequeue(struct cache_detail * detail,struct cache_head * ch)1081 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1082 {
1083 struct cache_queue *cq, *tmp;
1084 struct cache_request *cr;
1085 LIST_HEAD(dequeued);
1086
1087 spin_lock(&queue_lock);
1088 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1089 if (!cq->reader) {
1090 cr = container_of(cq, struct cache_request, q);
1091 if (cr->item != ch)
1092 continue;
1093 if (test_bit(CACHE_PENDING, &ch->flags))
1094 /* Lost a race and it is pending again */
1095 break;
1096 if (cr->readers != 0)
1097 continue;
1098 list_move(&cr->q.list, &dequeued);
1099 }
1100 spin_unlock(&queue_lock);
1101 while (!list_empty(&dequeued)) {
1102 cr = list_entry(dequeued.next, struct cache_request, q.list);
1103 list_del(&cr->q.list);
1104 cache_put(cr->item, detail);
1105 kfree(cr->buf);
1106 kfree(cr);
1107 }
1108 }
1109
1110 /*
1111 * Support routines for text-based upcalls.
1112 * Fields are separated by spaces.
1113 * Fields are either mangled to quote space tab newline slosh with slosh
1114 * or a hexified with a leading \x
1115 * Record is terminated with newline.
1116 *
1117 */
1118
qword_add(char ** bpp,int * lp,char * str)1119 void qword_add(char **bpp, int *lp, char *str)
1120 {
1121 char *bp = *bpp;
1122 int len = *lp;
1123 int ret;
1124
1125 if (len < 0) return;
1126
1127 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1128 if (ret >= len) {
1129 bp += len;
1130 len = -1;
1131 } else {
1132 bp += ret;
1133 len -= ret;
1134 *bp++ = ' ';
1135 len--;
1136 }
1137 *bpp = bp;
1138 *lp = len;
1139 }
1140 EXPORT_SYMBOL_GPL(qword_add);
1141
qword_addhex(char ** bpp,int * lp,char * buf,int blen)1142 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1143 {
1144 char *bp = *bpp;
1145 int len = *lp;
1146
1147 if (len < 0) return;
1148
1149 if (len > 2) {
1150 *bp++ = '\\';
1151 *bp++ = 'x';
1152 len -= 2;
1153 while (blen && len >= 2) {
1154 bp = hex_byte_pack(bp, *buf++);
1155 len -= 2;
1156 blen--;
1157 }
1158 }
1159 if (blen || len<1) len = -1;
1160 else {
1161 *bp++ = ' ';
1162 len--;
1163 }
1164 *bpp = bp;
1165 *lp = len;
1166 }
1167 EXPORT_SYMBOL_GPL(qword_addhex);
1168
warn_no_listener(struct cache_detail * detail)1169 static void warn_no_listener(struct cache_detail *detail)
1170 {
1171 if (detail->last_warn != detail->last_close) {
1172 detail->last_warn = detail->last_close;
1173 if (detail->warn_no_listener)
1174 detail->warn_no_listener(detail, detail->last_close != 0);
1175 }
1176 }
1177
cache_listeners_exist(struct cache_detail * detail)1178 static bool cache_listeners_exist(struct cache_detail *detail)
1179 {
1180 if (atomic_read(&detail->writers))
1181 return true;
1182 if (detail->last_close == 0)
1183 /* This cache was never opened */
1184 return false;
1185 if (detail->last_close < seconds_since_boot() - 30)
1186 /*
1187 * We allow for the possibility that someone might
1188 * restart a userspace daemon without restarting the
1189 * server; but after 30 seconds, we give up.
1190 */
1191 return false;
1192 return true;
1193 }
1194
1195 /*
1196 * register an upcall request to user-space and queue it up for read() by the
1197 * upcall daemon.
1198 *
1199 * Each request is at most one page long.
1200 */
cache_pipe_upcall(struct cache_detail * detail,struct cache_head * h)1201 static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1202 {
1203 char *buf;
1204 struct cache_request *crq;
1205 int ret = 0;
1206
1207 if (test_bit(CACHE_CLEANED, &h->flags))
1208 /* Too late to make an upcall */
1209 return -EAGAIN;
1210
1211 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1212 if (!buf)
1213 return -EAGAIN;
1214
1215 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1216 if (!crq) {
1217 kfree(buf);
1218 return -EAGAIN;
1219 }
1220
1221 crq->q.reader = 0;
1222 crq->buf = buf;
1223 crq->len = 0;
1224 crq->readers = 0;
1225 spin_lock(&queue_lock);
1226 if (test_bit(CACHE_PENDING, &h->flags)) {
1227 crq->item = cache_get(h);
1228 list_add_tail(&crq->q.list, &detail->queue);
1229 trace_cache_entry_upcall(detail, h);
1230 } else
1231 /* Lost a race, no longer PENDING, so don't enqueue */
1232 ret = -EAGAIN;
1233 spin_unlock(&queue_lock);
1234 wake_up(&queue_wait);
1235 if (ret == -EAGAIN) {
1236 kfree(buf);
1237 kfree(crq);
1238 }
1239 return ret;
1240 }
1241
sunrpc_cache_pipe_upcall(struct cache_detail * detail,struct cache_head * h)1242 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1243 {
1244 if (test_and_set_bit(CACHE_PENDING, &h->flags))
1245 return 0;
1246 return cache_pipe_upcall(detail, h);
1247 }
1248 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1249
sunrpc_cache_pipe_upcall_timeout(struct cache_detail * detail,struct cache_head * h)1250 int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1251 struct cache_head *h)
1252 {
1253 if (!cache_listeners_exist(detail)) {
1254 warn_no_listener(detail);
1255 trace_cache_entry_no_listener(detail, h);
1256 return -EINVAL;
1257 }
1258 return sunrpc_cache_pipe_upcall(detail, h);
1259 }
1260 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1261
1262 /*
1263 * parse a message from user-space and pass it
1264 * to an appropriate cache
1265 * Messages are, like requests, separated into fields by
1266 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1267 *
1268 * Message is
1269 * reply cachename expiry key ... content....
1270 *
1271 * key and content are both parsed by cache
1272 */
1273
qword_get(char ** bpp,char * dest,int bufsize)1274 int qword_get(char **bpp, char *dest, int bufsize)
1275 {
1276 /* return bytes copied, or -1 on error */
1277 char *bp = *bpp;
1278 int len = 0;
1279
1280 while (*bp == ' ') bp++;
1281
1282 if (bp[0] == '\\' && bp[1] == 'x') {
1283 /* HEX STRING */
1284 bp += 2;
1285 while (len < bufsize - 1) {
1286 int h, l;
1287
1288 h = hex_to_bin(bp[0]);
1289 if (h < 0)
1290 break;
1291
1292 l = hex_to_bin(bp[1]);
1293 if (l < 0)
1294 break;
1295
1296 *dest++ = (h << 4) | l;
1297 bp += 2;
1298 len++;
1299 }
1300 } else {
1301 /* text with \nnn octal quoting */
1302 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1303 if (*bp == '\\' &&
1304 isodigit(bp[1]) && (bp[1] <= '3') &&
1305 isodigit(bp[2]) &&
1306 isodigit(bp[3])) {
1307 int byte = (*++bp -'0');
1308 bp++;
1309 byte = (byte << 3) | (*bp++ - '0');
1310 byte = (byte << 3) | (*bp++ - '0');
1311 *dest++ = byte;
1312 len++;
1313 } else {
1314 *dest++ = *bp++;
1315 len++;
1316 }
1317 }
1318 }
1319
1320 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1321 return -1;
1322 while (*bp == ' ') bp++;
1323 *bpp = bp;
1324 *dest = '\0';
1325 return len;
1326 }
1327 EXPORT_SYMBOL_GPL(qword_get);
1328
1329
1330 /*
1331 * support /proc/net/rpc/$CACHENAME/content
1332 * as a seqfile.
1333 * We call ->cache_show passing NULL for the item to
1334 * get a header, then pass each real item in the cache
1335 */
1336
__cache_seq_start(struct seq_file * m,loff_t * pos)1337 static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1338 {
1339 loff_t n = *pos;
1340 unsigned int hash, entry;
1341 struct cache_head *ch;
1342 struct cache_detail *cd = m->private;
1343
1344 if (!n--)
1345 return SEQ_START_TOKEN;
1346 hash = n >> 32;
1347 entry = n & ((1LL<<32) - 1);
1348
1349 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1350 if (!entry--)
1351 return ch;
1352 n &= ~((1LL<<32) - 1);
1353 do {
1354 hash++;
1355 n += 1LL<<32;
1356 } while(hash < cd->hash_size &&
1357 hlist_empty(&cd->hash_table[hash]));
1358 if (hash >= cd->hash_size)
1359 return NULL;
1360 *pos = n+1;
1361 return hlist_entry_safe(rcu_dereference_raw(
1362 hlist_first_rcu(&cd->hash_table[hash])),
1363 struct cache_head, cache_list);
1364 }
1365
cache_seq_next(struct seq_file * m,void * p,loff_t * pos)1366 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1367 {
1368 struct cache_head *ch = p;
1369 int hash = (*pos >> 32);
1370 struct cache_detail *cd = m->private;
1371
1372 if (p == SEQ_START_TOKEN)
1373 hash = 0;
1374 else if (ch->cache_list.next == NULL) {
1375 hash++;
1376 *pos += 1LL<<32;
1377 } else {
1378 ++*pos;
1379 return hlist_entry_safe(rcu_dereference_raw(
1380 hlist_next_rcu(&ch->cache_list)),
1381 struct cache_head, cache_list);
1382 }
1383 *pos &= ~((1LL<<32) - 1);
1384 while (hash < cd->hash_size &&
1385 hlist_empty(&cd->hash_table[hash])) {
1386 hash++;
1387 *pos += 1LL<<32;
1388 }
1389 if (hash >= cd->hash_size)
1390 return NULL;
1391 ++*pos;
1392 return hlist_entry_safe(rcu_dereference_raw(
1393 hlist_first_rcu(&cd->hash_table[hash])),
1394 struct cache_head, cache_list);
1395 }
1396
cache_seq_start_rcu(struct seq_file * m,loff_t * pos)1397 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1398 __acquires(RCU)
1399 {
1400 rcu_read_lock();
1401 return __cache_seq_start(m, pos);
1402 }
1403 EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1404
cache_seq_next_rcu(struct seq_file * file,void * p,loff_t * pos)1405 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1406 {
1407 return cache_seq_next(file, p, pos);
1408 }
1409 EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1410
cache_seq_stop_rcu(struct seq_file * m,void * p)1411 void cache_seq_stop_rcu(struct seq_file *m, void *p)
1412 __releases(RCU)
1413 {
1414 rcu_read_unlock();
1415 }
1416 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1417
c_show(struct seq_file * m,void * p)1418 static int c_show(struct seq_file *m, void *p)
1419 {
1420 struct cache_head *cp = p;
1421 struct cache_detail *cd = m->private;
1422
1423 if (p == SEQ_START_TOKEN)
1424 return cd->cache_show(m, cd, NULL);
1425
1426 ifdebug(CACHE)
1427 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1428 convert_to_wallclock(cp->expiry_time),
1429 kref_read(&cp->ref), cp->flags);
1430 cache_get(cp);
1431 if (cache_check(cd, cp, NULL))
1432 /* cache_check does a cache_put on failure */
1433 seq_puts(m, "# ");
1434 else {
1435 if (cache_is_expired(cd, cp))
1436 seq_puts(m, "# ");
1437 cache_put(cp, cd);
1438 }
1439
1440 return cd->cache_show(m, cd, cp);
1441 }
1442
1443 static const struct seq_operations cache_content_op = {
1444 .start = cache_seq_start_rcu,
1445 .next = cache_seq_next_rcu,
1446 .stop = cache_seq_stop_rcu,
1447 .show = c_show,
1448 };
1449
content_open(struct inode * inode,struct file * file,struct cache_detail * cd)1450 static int content_open(struct inode *inode, struct file *file,
1451 struct cache_detail *cd)
1452 {
1453 struct seq_file *seq;
1454 int err;
1455
1456 if (!cd || !try_module_get(cd->owner))
1457 return -EACCES;
1458
1459 err = seq_open(file, &cache_content_op);
1460 if (err) {
1461 module_put(cd->owner);
1462 return err;
1463 }
1464
1465 seq = file->private_data;
1466 seq->private = cd;
1467 return 0;
1468 }
1469
content_release(struct inode * inode,struct file * file,struct cache_detail * cd)1470 static int content_release(struct inode *inode, struct file *file,
1471 struct cache_detail *cd)
1472 {
1473 int ret = seq_release(inode, file);
1474 module_put(cd->owner);
1475 return ret;
1476 }
1477
open_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1478 static int open_flush(struct inode *inode, struct file *file,
1479 struct cache_detail *cd)
1480 {
1481 if (!cd || !try_module_get(cd->owner))
1482 return -EACCES;
1483 return nonseekable_open(inode, file);
1484 }
1485
release_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1486 static int release_flush(struct inode *inode, struct file *file,
1487 struct cache_detail *cd)
1488 {
1489 module_put(cd->owner);
1490 return 0;
1491 }
1492
read_flush(struct file * file,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1493 static ssize_t read_flush(struct file *file, char __user *buf,
1494 size_t count, loff_t *ppos,
1495 struct cache_detail *cd)
1496 {
1497 char tbuf[22];
1498 size_t len;
1499
1500 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1501 convert_to_wallclock(cd->flush_time));
1502 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1503 }
1504
write_flush(struct file * file,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1505 static ssize_t write_flush(struct file *file, const char __user *buf,
1506 size_t count, loff_t *ppos,
1507 struct cache_detail *cd)
1508 {
1509 char tbuf[20];
1510 char *ep;
1511 time64_t now;
1512
1513 if (*ppos || count > sizeof(tbuf)-1)
1514 return -EINVAL;
1515 if (copy_from_user(tbuf, buf, count))
1516 return -EFAULT;
1517 tbuf[count] = 0;
1518 simple_strtoul(tbuf, &ep, 0);
1519 if (*ep && *ep != '\n')
1520 return -EINVAL;
1521 /* Note that while we check that 'buf' holds a valid number,
1522 * we always ignore the value and just flush everything.
1523 * Making use of the number leads to races.
1524 */
1525
1526 now = seconds_since_boot();
1527 /* Always flush everything, so behave like cache_purge()
1528 * Do this by advancing flush_time to the current time,
1529 * or by one second if it has already reached the current time.
1530 * Newly added cache entries will always have ->last_refresh greater
1531 * that ->flush_time, so they don't get flushed prematurely.
1532 */
1533
1534 if (cd->flush_time >= now)
1535 now = cd->flush_time + 1;
1536
1537 cd->flush_time = now;
1538 cd->nextcheck = now;
1539 cache_flush();
1540
1541 if (cd->flush)
1542 cd->flush();
1543
1544 *ppos += count;
1545 return count;
1546 }
1547
cache_read_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1548 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1549 size_t count, loff_t *ppos)
1550 {
1551 struct cache_detail *cd = pde_data(file_inode(filp));
1552
1553 return cache_read(filp, buf, count, ppos, cd);
1554 }
1555
cache_write_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1556 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1557 size_t count, loff_t *ppos)
1558 {
1559 struct cache_detail *cd = pde_data(file_inode(filp));
1560
1561 return cache_write(filp, buf, count, ppos, cd);
1562 }
1563
cache_poll_procfs(struct file * filp,poll_table * wait)1564 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1565 {
1566 struct cache_detail *cd = pde_data(file_inode(filp));
1567
1568 return cache_poll(filp, wait, cd);
1569 }
1570
cache_ioctl_procfs(struct file * filp,unsigned int cmd,unsigned long arg)1571 static long cache_ioctl_procfs(struct file *filp,
1572 unsigned int cmd, unsigned long arg)
1573 {
1574 struct inode *inode = file_inode(filp);
1575 struct cache_detail *cd = pde_data(inode);
1576
1577 return cache_ioctl(inode, filp, cmd, arg, cd);
1578 }
1579
cache_open_procfs(struct inode * inode,struct file * filp)1580 static int cache_open_procfs(struct inode *inode, struct file *filp)
1581 {
1582 struct cache_detail *cd = pde_data(inode);
1583
1584 return cache_open(inode, filp, cd);
1585 }
1586
cache_release_procfs(struct inode * inode,struct file * filp)1587 static int cache_release_procfs(struct inode *inode, struct file *filp)
1588 {
1589 struct cache_detail *cd = pde_data(inode);
1590
1591 return cache_release(inode, filp, cd);
1592 }
1593
1594 static const struct proc_ops cache_channel_proc_ops = {
1595 .proc_read = cache_read_procfs,
1596 .proc_write = cache_write_procfs,
1597 .proc_poll = cache_poll_procfs,
1598 .proc_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1599 .proc_open = cache_open_procfs,
1600 .proc_release = cache_release_procfs,
1601 };
1602
content_open_procfs(struct inode * inode,struct file * filp)1603 static int content_open_procfs(struct inode *inode, struct file *filp)
1604 {
1605 struct cache_detail *cd = pde_data(inode);
1606
1607 return content_open(inode, filp, cd);
1608 }
1609
content_release_procfs(struct inode * inode,struct file * filp)1610 static int content_release_procfs(struct inode *inode, struct file *filp)
1611 {
1612 struct cache_detail *cd = pde_data(inode);
1613
1614 return content_release(inode, filp, cd);
1615 }
1616
1617 static const struct proc_ops content_proc_ops = {
1618 .proc_open = content_open_procfs,
1619 .proc_read = seq_read,
1620 .proc_lseek = seq_lseek,
1621 .proc_release = content_release_procfs,
1622 };
1623
open_flush_procfs(struct inode * inode,struct file * filp)1624 static int open_flush_procfs(struct inode *inode, struct file *filp)
1625 {
1626 struct cache_detail *cd = pde_data(inode);
1627
1628 return open_flush(inode, filp, cd);
1629 }
1630
release_flush_procfs(struct inode * inode,struct file * filp)1631 static int release_flush_procfs(struct inode *inode, struct file *filp)
1632 {
1633 struct cache_detail *cd = pde_data(inode);
1634
1635 return release_flush(inode, filp, cd);
1636 }
1637
read_flush_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1638 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1639 size_t count, loff_t *ppos)
1640 {
1641 struct cache_detail *cd = pde_data(file_inode(filp));
1642
1643 return read_flush(filp, buf, count, ppos, cd);
1644 }
1645
write_flush_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1646 static ssize_t write_flush_procfs(struct file *filp,
1647 const char __user *buf,
1648 size_t count, loff_t *ppos)
1649 {
1650 struct cache_detail *cd = pde_data(file_inode(filp));
1651
1652 return write_flush(filp, buf, count, ppos, cd);
1653 }
1654
1655 static const struct proc_ops cache_flush_proc_ops = {
1656 .proc_open = open_flush_procfs,
1657 .proc_read = read_flush_procfs,
1658 .proc_write = write_flush_procfs,
1659 .proc_release = release_flush_procfs,
1660 };
1661
remove_cache_proc_entries(struct cache_detail * cd)1662 static void remove_cache_proc_entries(struct cache_detail *cd)
1663 {
1664 if (cd->procfs) {
1665 proc_remove(cd->procfs);
1666 cd->procfs = NULL;
1667 }
1668 }
1669
1670 #ifdef CONFIG_PROC_FS
create_cache_proc_entries(struct cache_detail * cd,struct net * net)1671 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1672 {
1673 struct proc_dir_entry *p;
1674 struct sunrpc_net *sn;
1675
1676 sn = net_generic(net, sunrpc_net_id);
1677 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1678 if (cd->procfs == NULL)
1679 goto out_nomem;
1680
1681 p = proc_create_data("flush", S_IFREG | 0600,
1682 cd->procfs, &cache_flush_proc_ops, cd);
1683 if (p == NULL)
1684 goto out_nomem;
1685
1686 if (cd->cache_request || cd->cache_parse) {
1687 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1688 &cache_channel_proc_ops, cd);
1689 if (p == NULL)
1690 goto out_nomem;
1691 }
1692 if (cd->cache_show) {
1693 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1694 &content_proc_ops, cd);
1695 if (p == NULL)
1696 goto out_nomem;
1697 }
1698 return 0;
1699 out_nomem:
1700 remove_cache_proc_entries(cd);
1701 return -ENOMEM;
1702 }
1703 #else /* CONFIG_PROC_FS */
create_cache_proc_entries(struct cache_detail * cd,struct net * net)1704 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1705 {
1706 return 0;
1707 }
1708 #endif
1709
cache_initialize(void)1710 void __init cache_initialize(void)
1711 {
1712 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1713 }
1714
cache_register_net(struct cache_detail * cd,struct net * net)1715 int cache_register_net(struct cache_detail *cd, struct net *net)
1716 {
1717 int ret;
1718
1719 sunrpc_init_cache_detail(cd);
1720 ret = create_cache_proc_entries(cd, net);
1721 if (ret)
1722 sunrpc_destroy_cache_detail(cd);
1723 return ret;
1724 }
1725 EXPORT_SYMBOL_GPL(cache_register_net);
1726
cache_unregister_net(struct cache_detail * cd,struct net * net)1727 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1728 {
1729 remove_cache_proc_entries(cd);
1730 sunrpc_destroy_cache_detail(cd);
1731 }
1732 EXPORT_SYMBOL_GPL(cache_unregister_net);
1733
cache_create_net(const struct cache_detail * tmpl,struct net * net)1734 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1735 {
1736 struct cache_detail *cd;
1737 int i;
1738
1739 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1740 if (cd == NULL)
1741 return ERR_PTR(-ENOMEM);
1742
1743 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1744 GFP_KERNEL);
1745 if (cd->hash_table == NULL) {
1746 kfree(cd);
1747 return ERR_PTR(-ENOMEM);
1748 }
1749
1750 for (i = 0; i < cd->hash_size; i++)
1751 INIT_HLIST_HEAD(&cd->hash_table[i]);
1752 cd->net = net;
1753 return cd;
1754 }
1755 EXPORT_SYMBOL_GPL(cache_create_net);
1756
cache_destroy_net(struct cache_detail * cd,struct net * net)1757 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1758 {
1759 kfree(cd->hash_table);
1760 kfree(cd);
1761 }
1762 EXPORT_SYMBOL_GPL(cache_destroy_net);
1763
cache_read_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1764 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1765 size_t count, loff_t *ppos)
1766 {
1767 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1768
1769 return cache_read(filp, buf, count, ppos, cd);
1770 }
1771
cache_write_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1772 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1773 size_t count, loff_t *ppos)
1774 {
1775 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1776
1777 return cache_write(filp, buf, count, ppos, cd);
1778 }
1779
cache_poll_pipefs(struct file * filp,poll_table * wait)1780 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1781 {
1782 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1783
1784 return cache_poll(filp, wait, cd);
1785 }
1786
cache_ioctl_pipefs(struct file * filp,unsigned int cmd,unsigned long arg)1787 static long cache_ioctl_pipefs(struct file *filp,
1788 unsigned int cmd, unsigned long arg)
1789 {
1790 struct inode *inode = file_inode(filp);
1791 struct cache_detail *cd = RPC_I(inode)->private;
1792
1793 return cache_ioctl(inode, filp, cmd, arg, cd);
1794 }
1795
cache_open_pipefs(struct inode * inode,struct file * filp)1796 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1797 {
1798 struct cache_detail *cd = RPC_I(inode)->private;
1799
1800 return cache_open(inode, filp, cd);
1801 }
1802
cache_release_pipefs(struct inode * inode,struct file * filp)1803 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1804 {
1805 struct cache_detail *cd = RPC_I(inode)->private;
1806
1807 return cache_release(inode, filp, cd);
1808 }
1809
1810 const struct file_operations cache_file_operations_pipefs = {
1811 .owner = THIS_MODULE,
1812 .read = cache_read_pipefs,
1813 .write = cache_write_pipefs,
1814 .poll = cache_poll_pipefs,
1815 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1816 .open = cache_open_pipefs,
1817 .release = cache_release_pipefs,
1818 };
1819
content_open_pipefs(struct inode * inode,struct file * filp)1820 static int content_open_pipefs(struct inode *inode, struct file *filp)
1821 {
1822 struct cache_detail *cd = RPC_I(inode)->private;
1823
1824 return content_open(inode, filp, cd);
1825 }
1826
content_release_pipefs(struct inode * inode,struct file * filp)1827 static int content_release_pipefs(struct inode *inode, struct file *filp)
1828 {
1829 struct cache_detail *cd = RPC_I(inode)->private;
1830
1831 return content_release(inode, filp, cd);
1832 }
1833
1834 const struct file_operations content_file_operations_pipefs = {
1835 .open = content_open_pipefs,
1836 .read = seq_read,
1837 .llseek = seq_lseek,
1838 .release = content_release_pipefs,
1839 };
1840
open_flush_pipefs(struct inode * inode,struct file * filp)1841 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1842 {
1843 struct cache_detail *cd = RPC_I(inode)->private;
1844
1845 return open_flush(inode, filp, cd);
1846 }
1847
release_flush_pipefs(struct inode * inode,struct file * filp)1848 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1849 {
1850 struct cache_detail *cd = RPC_I(inode)->private;
1851
1852 return release_flush(inode, filp, cd);
1853 }
1854
read_flush_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1855 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1856 size_t count, loff_t *ppos)
1857 {
1858 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1859
1860 return read_flush(filp, buf, count, ppos, cd);
1861 }
1862
write_flush_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1863 static ssize_t write_flush_pipefs(struct file *filp,
1864 const char __user *buf,
1865 size_t count, loff_t *ppos)
1866 {
1867 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1868
1869 return write_flush(filp, buf, count, ppos, cd);
1870 }
1871
1872 const struct file_operations cache_flush_operations_pipefs = {
1873 .open = open_flush_pipefs,
1874 .read = read_flush_pipefs,
1875 .write = write_flush_pipefs,
1876 .release = release_flush_pipefs,
1877 };
1878
sunrpc_cache_register_pipefs(struct dentry * parent,const char * name,umode_t umode,struct cache_detail * cd)1879 int sunrpc_cache_register_pipefs(struct dentry *parent,
1880 const char *name, umode_t umode,
1881 struct cache_detail *cd)
1882 {
1883 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1884 if (IS_ERR(dir))
1885 return PTR_ERR(dir);
1886 cd->pipefs = dir;
1887 return 0;
1888 }
1889 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1890
sunrpc_cache_unregister_pipefs(struct cache_detail * cd)1891 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1892 {
1893 if (cd->pipefs) {
1894 rpc_remove_cache_dir(cd->pipefs);
1895 cd->pipefs = NULL;
1896 }
1897 }
1898 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1899
sunrpc_cache_unhash(struct cache_detail * cd,struct cache_head * h)1900 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1901 {
1902 spin_lock(&cd->hash_lock);
1903 if (!hlist_unhashed(&h->cache_list)){
1904 sunrpc_begin_cache_remove_entry(h, cd);
1905 spin_unlock(&cd->hash_lock);
1906 sunrpc_end_cache_remove_entry(h, cd);
1907 } else
1908 spin_unlock(&cd->hash_lock);
1909 }
1910 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
1911