1 /*
2 * ProFTPD: mod_statcache -- a module implementing caching of stat(2),
3 * fstat(2), and lstat(2) calls
4 * Copyright (c) 2013-2018 TJ Saunders
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA.
19 *
20 * As a special exemption, TJ Saunders and other respective copyright holders
21 * give permission to link this program with OpenSSL, and distribute the
22 * resulting executable, without including the source code for OpenSSL in the
23 * source distribution.
24 *
25 * This is mod_statcache, contrib software for proftpd 1.3.x.
26 * For more information contact TJ Saunders <tj@castaglia.org>.
27 */
28
29 #include "conf.h"
30 #include "privs.h"
31 #ifdef PR_USE_CTRLS
32 # include "mod_ctrls.h"
33 #endif /* PR_USE_CTRLS */
34
35 #include <signal.h>
36 #include <sys/ipc.h>
37 #include <sys/shm.h>
38
39 #if HAVE_SYS_MMAN_H
40 # include <sys/mman.h>
41 #endif
42
43 #if HAVE_SYS_UIO_H
44 # include <sys/uio.h>
45 #endif
46
47 #define MOD_STATCACHE_VERSION "mod_statcache/0.2"
48
49 /* Make sure the version of proftpd is as necessary. */
50 #if PROFTPD_VERSION_NUMBER < 0x0001030402
51 # error "ProFTPD 1.3.4rc2 or later required"
52 #endif
53
54 /* On some platforms, this may not be defined. On AIX, for example, this
55 * symbol is only defined when _NO_PROTO is defined, and _XOPEN_SOURCE is 500.
56 * How annoying.
57 */
58 #ifndef MAP_FAILED
59 # define MAP_FAILED ((void *) -1)
60 #endif
61
62 #define STATCACHE_DEFAULT_CAPACITY 5000
63 #define STATCACHE_DEFAULT_MAX_AGE 5
64
65 /* A path is hashed, and that hash % ncols indicates the row index. For
66 * each row, there can be N columns. This value indicates the number of
67 * columns for a row; it controls how many collisions can be handled.
68 */
69 #define STATCACHE_COLS_PER_ROW 10
70
71 /* Max number of lock attempts */
72 #define STATCACHE_MAX_LOCK_ATTEMPTS 10
73
74 /* Subpool size */
75 #define STATCACHE_POOL_SIZE 256
76
77 /* From src/main.c */
78 extern pid_t mpid;
79
80 module statcache_module;
81
82 #ifdef PR_USE_CTRLS
83 static ctrls_acttab_t statcache_acttab[];
84 #endif
85
86 /* Pool for this module's use */
87 static pool *statcache_pool = NULL;
88
89 /* Copied from src/fsio.c. */
90 struct statcache_entry {
91 uint32_t sce_hash;
92 char sce_path[PR_TUNABLE_PATH_MAX+1];
93 size_t sce_pathlen;
94 struct stat sce_stat;
95 int sce_errno;
96 unsigned char sce_op;
97 time_t sce_ts;
98 };
99
100 /* Storage structure:
101 *
102 * Header (stats):
103 * uint32_t count
104 * uint32_t highest
105 * uint32_t hits
106 * uint32_t misses
107 * uint32_t expires
108 * uint32_t rejects
109 *
110 * Data (entries):
111 * nrows = capacity / STATCACHE_COLS_PER_ROW
112 * row_len = sizeof(struct statcache_entry) * STATCACHE_COLS_PER_ROW
113 * row_start = ((hash % nrows) * row_len) + data_start
114 */
115
116 static int statcache_engine = FALSE;
117 static unsigned int statcache_max_positive_age = STATCACHE_DEFAULT_MAX_AGE;
118 static unsigned int statcache_max_negative_age = 1;
119 static unsigned int statcache_capacity = STATCACHE_DEFAULT_CAPACITY;
120 static unsigned int statcache_nrows = 0;
121 static size_t statcache_rowlen = 0;
122
123 static char *statcache_table_path = NULL;
124 static pr_fh_t *statcache_tabfh = NULL;
125
126 static void *statcache_table = NULL;
127 static size_t statcache_tablesz = 0;
128 static void *statcache_table_stats = NULL;
129 static struct statcache_entry *statcache_table_data = NULL;
130
131 static const char *trace_channel = "statcache";
132
133 static int statcache_wlock_row(int fd, uint32_t hash);
134 static int statcache_unlock_row(int fd, uint32_t hash);
135
136 #ifdef PR_USE_CTRLS
137 static int statcache_rlock_stats(int fd);
138 static int statcache_rlock_table(int fd);
139 static int statcache_unlock_table(int fd);
140 #endif /* PR_USE_CTRLS */
141 static int statcache_wlock_stats(int fd);
142 static int statcache_unlock_stats(int fd);
143
144 static void statcache_fs_statcache_clear_ev(const void *event_data,
145 void *user_data);
146 static int statcache_sess_init(void);
147
148 /* Functions for marshalling key/value data to/from local cache (SysV shm). */
statcache_get_shm(pr_fh_t * tabfh,size_t datasz)149 static void *statcache_get_shm(pr_fh_t *tabfh, size_t datasz) {
150 void *data;
151 int fd, mmap_flags, res, xerrno;
152 #if defined(MADV_RANDOM) || defined(MADV_ACCESS_MANY)
153 int advice = 0;
154 #endif
155
156 fd = tabfh->fh_fd;
157
158 /* Truncate the table first; any existing data should be deleted. */
159 res = ftruncate(fd, 0);
160 if (res < 0) {
161 xerrno = errno;
162
163 pr_log_debug(DEBUG0, MOD_STATCACHE_VERSION
164 ": error truncating StatCacheTable '%s' to size 0: %s", tabfh->fh_path,
165 strerror(xerrno));
166
167 errno = xerrno;
168 return NULL;
169 }
170
171 /* Seek to the desired table size (actually, one byte less than the desired
172 * size) and write a single byte, so that there's enough allocated backing
173 * store on the filesystem to support the ensuing mmap() call.
174 */
175 if (lseek(fd, datasz, SEEK_SET) == (off_t) -1) {
176 xerrno = errno;
177
178 pr_log_debug(DEBUG0, MOD_STATCACHE_VERSION
179 ": error seeking to offset %lu in StatCacheTable '%s': %s",
180 (unsigned long) datasz-1, tabfh->fh_path, strerror(xerrno));
181
182 errno = xerrno;
183 return NULL;
184 }
185
186 res = write(fd, "", 1);
187 if (res != 1) {
188 xerrno = errno;
189
190 pr_log_debug(DEBUG0, MOD_STATCACHE_VERSION
191 ": error writing single byte to StatCacheTable '%s': %s",
192 tabfh->fh_path, strerror(xerrno));
193
194 errno = xerrno;
195 return NULL;
196 }
197
198 mmap_flags = MAP_SHARED;
199
200 /* Make sure to set the fd to -1 if MAP_ANON(YMOUS) is used. By definition,
201 * anonymous mapped memory does not need (or want) a valid file backing
202 * store; some implementations will not do what is expected when anonymous
203 * memory is requested AND a valid fd is passed in.
204 *
205 * However, we want to keep a valid fd open anyway, for later use by
206 * fcntl(2) for byte range locking; we simply don't use the valid fd for
207 * the mmap(2) call.
208 */
209
210 #if defined(MAP_ANONYMOUS)
211 /* Linux */
212 mmap_flags |= MAP_ANONYMOUS;
213 fd = -1;
214
215 #elif defined(MAP_ANON)
216 /* FreeBSD, MacOSX, Solaris, others? */
217 mmap_flags |= MAP_ANON;
218 fd = -1;
219
220 #else
221 pr_log_debug(DEBUG8, MOD_STATCACHE_VERSION
222 ": mmap(2) MAP_ANONYMOUS and MAP_ANON flags not defined");
223 #endif
224
225 data = mmap(NULL, datasz, PROT_READ|PROT_WRITE, mmap_flags, fd, 0);
226 if (data == MAP_FAILED) {
227 xerrno = errno;
228
229 pr_log_debug(DEBUG0, MOD_STATCACHE_VERSION
230 ": error mapping StatCacheTable '%s' fd %d size %lu into memory: %s",
231 tabfh->fh_path, fd, (unsigned long) datasz, strerror(xerrno));
232
233 errno = xerrno;
234 return NULL;
235 }
236
237 /* Make sure the data are zeroed. */
238 memset(data, 0, datasz);
239
240 #if defined(MADV_RANDOM) || defined(MADV_ACCESS_MANY)
241 /* Provide some hints to the kernel, for hopefully better handling of
242 * this buffer.
243 */
244 # if defined(MADV_RANDOM)
245 advice = MADV_RANDOM;
246 # elif defined(MADV_ACCESS_MANY)
247 /* Oracle-ism? */
248 advice = MADV_ACCESS_MANY;
249 # endif /* Random access pattern memory advice */
250
251 res = madvise(data, datasz, advice);
252 if (res < 0) {
253 pr_log_debug(DEBUG5, MOD_STATCACHE_VERSION
254 ": madvise(2) error with MADV_RANDOM: %s", strerror(errno));
255 }
256 #endif
257
258 return data;
259 }
260
get_lock_type(struct flock * lock)261 static const char *get_lock_type(struct flock *lock) {
262 const char *lock_type;
263
264 switch (lock->l_type) {
265 case F_RDLCK:
266 lock_type = "read";
267 break;
268
269 case F_WRLCK:
270 lock_type = "write";
271 break;
272
273 case F_UNLCK:
274 lock_type = "unlock";
275 break;
276
277 default:
278 lock_type = "[UNKNOWN]";
279 }
280
281 return lock_type;
282 }
283
284 /* Header locking routines */
lock_table(int fd,int lock_type,off_t lock_len)285 static int lock_table(int fd, int lock_type, off_t lock_len) {
286 struct flock lock;
287 unsigned int nattempts = 1;
288
289 lock.l_type = lock_type;
290 lock.l_whence = 0;
291 lock.l_start = 0;
292 lock.l_len = (6 * sizeof(uint32_t));
293
294 pr_trace_msg(trace_channel, 15,
295 "attempt #%u to acquire %s lock on StatCacheTable fd %d (off %lu, len %lu)",
296 nattempts, get_lock_type(&lock), fd, (unsigned long) lock.l_start,
297 (unsigned long) lock.l_len);
298
299 while (fcntl(fd, F_SETLK, &lock) < 0) {
300 int xerrno = errno;
301
302 if (xerrno == EINTR) {
303 pr_signals_handle();
304 continue;
305 }
306
307 pr_trace_msg(trace_channel, 3,
308 "%s lock (attempt #%u) of StatCacheTable fd %d failed: %s",
309 get_lock_type(&lock), nattempts, fd, strerror(xerrno));
310 if (xerrno == EACCES) {
311 struct flock locker;
312
313 /* Get the PID of the process blocking this lock. */
314 if (fcntl(fd, F_GETLK, &locker) == 0) {
315 pr_trace_msg(trace_channel, 3, "process ID %lu has blocking %s lock on "
316 "StatCacheTable fd %d", (unsigned long) locker.l_pid,
317 get_lock_type(&locker), fd);
318 }
319 }
320
321 if (xerrno == EAGAIN ||
322 xerrno == EACCES) {
323 /* Treat this as an interrupted call, call pr_signals_handle() (which
324 * will delay for a few msecs because of EINTR), and try again.
325 * After MAX_LOCK_ATTEMPTS attempts, give up altogether.
326 */
327
328 nattempts++;
329 if (nattempts <= STATCACHE_MAX_LOCK_ATTEMPTS) {
330 errno = EINTR;
331
332 pr_signals_handle();
333
334 errno = 0;
335 pr_trace_msg(trace_channel, 15,
336 "attempt #%u to acquire %s lock on StatCacheTable fd %d", nattempts,
337 get_lock_type(&lock), fd);
338 continue;
339 }
340
341 pr_trace_msg(trace_channel, 15, "unable to acquire %s lock on "
342 "StatCacheTable fd %d after %u attempts: %s", get_lock_type(&lock),
343 nattempts, fd, strerror(xerrno));
344 }
345
346 errno = xerrno;
347 return -1;
348 }
349
350 pr_trace_msg(trace_channel, 15,
351 "acquired %s lock of StatCacheTable fd %d successfully",
352 get_lock_type(&lock), fd);
353 return 0;
354 }
355
356 #ifdef PR_USE_CTRLS
statcache_rlock_stats(int fd)357 static int statcache_rlock_stats(int fd) {
358 return lock_table(fd, F_RDLCK, (6 * sizeof(uint32_t)));
359 }
360
statcache_rlock_table(int fd)361 static int statcache_rlock_table(int fd) {
362 return lock_table(fd, F_RDLCK, 0);
363 }
364
statcache_unlock_table(int fd)365 static int statcache_unlock_table(int fd) {
366 return lock_table(fd, F_RDLCK, 0);
367 }
368 #endif /* PR_USE_CTRLS */
369
statcache_wlock_stats(int fd)370 static int statcache_wlock_stats(int fd) {
371 return lock_table(fd, F_WRLCK, (6 * sizeof(uint32_t)));
372 }
373
statcache_unlock_stats(int fd)374 static int statcache_unlock_stats(int fd) {
375 return lock_table(fd, F_UNLCK, (6 * sizeof(uint32_t)));
376 }
377
378 #ifdef PR_USE_CTRLS
statcache_stats_get_count(void)379 static uint32_t statcache_stats_get_count(void) {
380 uint32_t count = 0;
381
382 /* count = statcache_table_stats + (0 * sizeof(uint32_t)) */
383 count = *((uint32_t *) statcache_table_stats);
384 return count;
385 }
386
statcache_stats_get_highest(void)387 static uint32_t statcache_stats_get_highest(void) {
388 uint32_t highest = 0;
389
390 /* highest = statcache_table_stats + (1 * sizeof(uint32_t)) */
391 highest = *((uint32_t *) ((char *) statcache_table_stats +
392 (1 * sizeof(uint32_t))));
393 return highest;
394 }
395
statcache_stats_get_hits(void)396 static uint32_t statcache_stats_get_hits(void) {
397 uint32_t hits = 0;
398
399 /* hits = statcache_table_stats + (2 * sizeof(uint32_t)) */
400 hits = *((uint32_t *) ((char *) statcache_table_stats +
401 (2 * sizeof(uint32_t))));
402 return hits;
403 }
404
statcache_stats_get_misses(void)405 static uint32_t statcache_stats_get_misses(void) {
406 uint32_t misses = 0;
407
408 /* misses = statcache_table_stats + (3 * sizeof(uint32_t)) */
409 misses = *((uint32_t *) ((char *) statcache_table_stats +
410 (3 * sizeof(uint32_t))));
411 return misses;
412 }
413
statcache_stats_get_expires(void)414 static uint32_t statcache_stats_get_expires(void) {
415 uint32_t expires = 0;
416
417 /* expires = statcache_table_stats + (4 * sizeof(uint32_t)) */
418 expires = *((uint32_t *) ((char *) statcache_table_stats +
419 (4 * sizeof(uint32_t))));
420 return expires;
421 }
422
statcache_stats_get_rejects(void)423 static uint32_t statcache_stats_get_rejects(void) {
424 uint32_t rejects = 0;
425
426 /* rejects = statcache_table_stats + (5 * sizeof(uint32_t)) */
427 rejects = *((uint32_t *) ((char *) statcache_table_stats +
428 (5 * sizeof(uint32_t))));
429 return rejects;
430 }
431 #endif /* PR_USE_CTRLS */
432
statcache_stats_incr_count(int32_t incr)433 static int statcache_stats_incr_count(int32_t incr) {
434 uint32_t *count = NULL, *highest = NULL;
435
436 if (incr == 0) {
437 return 0;
438 }
439
440 /* count = statcache_table_stats + (0 * sizeof(uint32_t)) */
441 count = ((uint32_t *) statcache_table_stats);
442
443 /* highest = statcache_table_stats + (1 * sizeof(uint32_t)) */
444 highest = ((uint32_t *) ((char *) statcache_table_stats) +
445 (1 * sizeof(uint32_t)));
446
447 if (incr < 0) {
448 /* Prevent underflow. */
449 if (*count <= incr) {
450 *count = 0;
451
452 } else {
453 *count += incr;
454 }
455
456 } else {
457 *count += incr;
458
459 if (*count > *highest) {
460 *highest = *count;
461 }
462 }
463
464 return 0;
465 }
466
statcache_stats_incr_hits(int32_t incr)467 static int statcache_stats_incr_hits(int32_t incr) {
468 uint32_t *hits = NULL;
469
470 if (incr == 0) {
471 return 0;
472 }
473
474 /* hits = statcache_table_stats + (2 * sizeof(uint32_t)) */
475 hits = ((uint32_t *) ((char *) statcache_table_stats) +
476 (2 * sizeof(uint32_t)));
477
478 /* Prevent underflow. */
479 if (incr < 0 &&
480 *hits <= incr) {
481 *hits = 0;
482
483 } else {
484 *hits += incr;
485 }
486
487 return 0;
488 }
489
statcache_stats_incr_misses(int32_t incr)490 static int statcache_stats_incr_misses(int32_t incr) {
491 uint32_t *misses = NULL;
492
493 if (incr == 0) {
494 return 0;
495 }
496
497 /* misses = statcache_table_stats + (3 * sizeof(uint32_t)) */
498 misses = ((uint32_t *) ((char *) statcache_table_stats) +
499 (3 * sizeof(uint32_t)));
500
501 /* Prevent underflow. */
502 if (incr < 0 &&
503 *misses <= incr) {
504 *misses = 0;
505
506 } else {
507 *misses += incr;
508 }
509
510 return 0;
511 }
512
statcache_stats_incr_expires(int32_t incr)513 static int statcache_stats_incr_expires(int32_t incr) {
514 uint32_t *expires = NULL;
515
516 if (incr == 0) {
517 return 0;
518 }
519
520 /* expires = statcache_table_stats + (4 * sizeof(uint32_t)) */
521 expires = ((uint32_t *) ((char *) statcache_table_stats) +
522 (4 * sizeof(uint32_t)));
523
524 /* Prevent underflow. */
525 if (incr < 0 &&
526 *expires <= incr) {
527 *expires = 0;
528
529 } else {
530 *expires += incr;
531 }
532
533 return 0;
534 }
535
statcache_stats_incr_rejects(int32_t incr)536 static int statcache_stats_incr_rejects(int32_t incr) {
537 uint32_t *rejects = NULL;
538
539 if (incr == 0) {
540 return 0;
541 }
542
543 /* rejects = statcache_table_stats + (5 * sizeof(uint32_t)) */
544 rejects = ((uint32_t *) ((char *) statcache_table_stats) +
545 (5 * sizeof(uint32_t)));
546
547 /* Prevent underflow. */
548 if (incr < 0 &&
549 *rejects <= incr) {
550 *rejects = 0;
551
552 } else {
553 *rejects += incr;
554 }
555
556 return 0;
557 }
558
559 /* Data locking routines */
560
get_row_range(uint32_t hash,off_t * row_start,off_t * row_len)561 static int get_row_range(uint32_t hash, off_t *row_start, off_t *row_len) {
562 uint32_t row_idx;
563
564 row_idx = hash % statcache_nrows;
565 *row_start = (row_idx * statcache_rowlen);
566 *row_len = statcache_rowlen;
567
568 return 0;
569 }
570
lock_row(int fd,int lock_type,uint32_t hash)571 static int lock_row(int fd, int lock_type, uint32_t hash) {
572 struct flock lock;
573 unsigned int nattempts = 1;
574
575 lock.l_type = lock_type;
576 lock.l_whence = 0;
577 get_row_range(hash, &lock.l_start, &lock.l_len);
578
579 pr_trace_msg(trace_channel, 15,
580 "attempt #%u to acquire row %s lock on StatCacheTable fd %d "
581 "(off %lu, len %lu)", nattempts, get_lock_type(&lock), fd,
582 (unsigned long) lock.l_start,
583 (unsigned long) lock.l_len);
584
585 while (fcntl(fd, F_SETLK, &lock) < 0) {
586 int xerrno = errno;
587
588 if (xerrno == EINTR) {
589 pr_signals_handle();
590 continue;
591 }
592
593 pr_trace_msg(trace_channel, 3,
594 "%s lock (attempt #%u) of StatCacheTable fd %d failed: %s",
595 get_lock_type(&lock), nattempts, fd, strerror(xerrno));
596 if (xerrno == EACCES) {
597 struct flock locker;
598
599 /* Get the PID of the process blocking this lock. */
600 if (fcntl(fd, F_GETLK, &locker) == 0) {
601 pr_trace_msg(trace_channel, 3, "process ID %lu has blocking %s lock on "
602 "StatCacheTable fd %d", (unsigned long) locker.l_pid,
603 get_lock_type(&locker), fd);
604 }
605 }
606
607 if (xerrno == EAGAIN ||
608 xerrno == EACCES) {
609 /* Treat this as an interrupted call, call pr_signals_handle() (which
610 * will delay for a few msecs because of EINTR), and try again.
611 * After MAX_LOCK_ATTEMPTS attempts, give up altogether.
612 */
613
614 nattempts++;
615 if (nattempts <= STATCACHE_MAX_LOCK_ATTEMPTS) {
616 errno = EINTR;
617
618 pr_signals_handle();
619
620 errno = 0;
621 pr_trace_msg(trace_channel, 15,
622 "attempt #%u to acquire %s row lock on StatCacheTable fd %d",
623 nattempts, get_lock_type(&lock), fd);
624 continue;
625 }
626
627 pr_trace_msg(trace_channel, 15, "unable to acquire %s row lock on "
628 "StatCacheTable fd %d after %u attempts: %s", get_lock_type(&lock),
629 nattempts, fd, strerror(xerrno));
630 }
631
632 errno = xerrno;
633 return -1;
634 }
635
636 pr_trace_msg(trace_channel, 15,
637 "acquired %s row lock of StatCacheTable fd %d successfully",
638 get_lock_type(&lock), fd);
639 return 0;
640 }
641
statcache_wlock_row(int fd,uint32_t hash)642 static int statcache_wlock_row(int fd, uint32_t hash) {
643 return lock_row(fd, F_WRLCK, hash);
644 }
645
statcache_unlock_row(int fd,uint32_t hash)646 static int statcache_unlock_row(int fd, uint32_t hash) {
647 return lock_row(fd, F_UNLCK, hash);
648 }
649
650 /* Table manipulation routines */
651
652 /* See http://www.cse.yorku.ca/~oz/hash.html */
statcache_hash(const char * path,size_t pathlen)653 static uint32_t statcache_hash(const char *path, size_t pathlen) {
654 register unsigned int i;
655 uint32_t h = 5381;
656
657 for (i = 0; i < pathlen; i++) {
658 h = ((h << 5) + h) + path[i];
659 }
660
661 /* Strip off the high bit. */
662 h &= ~(1 << 31);
663
664 return h;
665 }
666
667 /* Add an entry to the table. */
statcache_table_add(int fd,const char * path,size_t pathlen,struct stat * st,int xerrno,uint32_t hash,unsigned char op)668 static int statcache_table_add(int fd, const char *path, size_t pathlen,
669 struct stat *st, int xerrno, uint32_t hash, unsigned char op) {
670 register unsigned int i;
671 uint32_t row_idx, row_start;
672 int found_slot = FALSE, expired_entries = 0;
673 time_t now;
674 struct statcache_entry *sce = NULL;
675
676 if (statcache_table == NULL) {
677 errno = EPERM;
678 return -1;
679 }
680
681 /* Find an open slot in the list for this new entry. */
682 now = time(NULL);
683
684 row_idx = hash % statcache_nrows;
685 row_start = (row_idx * statcache_rowlen);
686
687 for (i = 0; i < STATCACHE_COLS_PER_ROW; i++) {
688 uint32_t col_start;
689
690 pr_signals_handle();
691
692 col_start = (row_start + (i * sizeof(struct statcache_entry)));
693 sce = (((char *) statcache_table_data) + col_start);
694 if (sce->sce_ts == 0) {
695 /* Empty slot */
696 found_slot = TRUE;
697 break;
698 }
699
700 /* If existing item is too old, use this slot. Note that there
701 * are different expiry rules for negative cache entries (i.e.
702 * errors) than for positive cache entries.
703 */
704 if (sce->sce_errno == 0) {
705 if (now > (sce->sce_ts + statcache_max_positive_age)) {
706 found_slot = TRUE;
707 expired_entries++;
708 break;
709 }
710
711 } else {
712 if (now > (sce->sce_ts + statcache_max_negative_age)) {
713 found_slot = TRUE;
714 expired_entries++;
715 break;
716 }
717 }
718 }
719
720 if (found_slot == FALSE) {
721 if (statcache_wlock_stats(fd) < 0) {
722 pr_trace_msg(trace_channel, 3,
723 "error write-locking shared memory: %s", strerror(errno));
724 }
725
726 statcache_stats_incr_rejects(1);
727
728 if (statcache_unlock_stats(fd) < 0) {
729 pr_trace_msg(trace_channel, 3,
730 "error un-locking shared memory: %s", strerror(errno));
731 }
732
733 errno = ENOSPC;
734 return -1;
735 }
736
737 if (st != NULL) {
738 pr_trace_msg(trace_channel, 9,
739 "adding entry for path '%s' (hash %lu) at row %lu, col %u "
740 "(op %s, type %s)", path,
741 (unsigned long) hash, (unsigned long) row_idx + 1, i + 1,
742 op == FSIO_FILE_LSTAT ? "LSTAT" : "STAT",
743 S_ISLNK(st->st_mode) ? "symlink" :
744 S_ISDIR(st->st_mode) ? "dir" : "file");
745
746 } else {
747 pr_trace_msg(trace_channel, 9,
748 "adding entry for path '%s' (hash %lu) at row %lu, col %u "
749 "(op %s, errno %d)", path,
750 (unsigned long) hash, (unsigned long) row_idx + 1, i + 1,
751 op == FSIO_FILE_LSTAT ? "LSTAT" : "STAT", xerrno);
752 }
753
754 sce->sce_hash = hash;
755 sce->sce_pathlen = pathlen;
756
757 /* Include trailing NUL. */
758 memcpy(sce->sce_path, path, pathlen + 1);
759 if (st != NULL) {
760 memcpy(&(sce->sce_stat), st, sizeof(struct stat));
761 }
762 sce->sce_errno = xerrno;
763 sce->sce_ts = now;
764 sce->sce_op = op;
765
766 if (statcache_wlock_stats(fd) < 0) {
767 pr_trace_msg(trace_channel, 3,
768 "error write-locking shared memory: %s", strerror(errno));
769 }
770
771 statcache_stats_incr_count(1);
772 if (expired_entries > 0) {
773 statcache_stats_incr_count(-expired_entries);
774 statcache_stats_incr_expires(expired_entries);
775 }
776
777 if (statcache_unlock_stats(fd) < 0) {
778 pr_trace_msg(trace_channel, 3,
779 "error un-locking shared memory: %s", strerror(errno));
780 }
781
782 return 0;
783 }
784
statcache_table_get(int fd,const char * path,size_t pathlen,struct stat * st,int * xerrno,uint32_t hash,unsigned char op)785 static int statcache_table_get(int fd, const char *path, size_t pathlen,
786 struct stat *st, int *xerrno, uint32_t hash, unsigned char op) {
787 register unsigned int i;
788 int expired_entries = 0, res = -1;
789 uint32_t row_idx, row_start;
790
791 if (statcache_table == NULL) {
792 errno = EPERM;
793 return -1;
794 }
795
796 row_idx = hash % statcache_nrows;
797 row_start = (row_idx * statcache_rowlen);
798
799 /* Find the matching entry for this path. */
800 for (i = 0; i < STATCACHE_COLS_PER_ROW; i++) {
801 uint32_t col_start;
802 struct statcache_entry *sce;
803
804 pr_signals_handle();
805
806 col_start = (row_start + (i * sizeof(struct statcache_entry)));
807 sce = (((char *) statcache_table_data) + col_start);
808 if (sce->sce_ts > 0) {
809 if (sce->sce_hash == hash) {
810 /* Possible collision; check paths. */
811 if (sce->sce_pathlen == pathlen) {
812
813 /* Include the trailing NUL in the comparison... */
814 if (strncmp(sce->sce_path, path, pathlen + 1) == 0) {
815 time_t now;
816
817 now = time(NULL);
818
819 /* Check the age. If it's aged out, clear it now, for later use. */
820 if (sce->sce_errno == 0) {
821 if (now > (sce->sce_ts + statcache_max_positive_age)) {
822 pr_trace_msg(trace_channel, 17,
823 "clearing expired cache entry for path '%s' (hash %lu) "
824 "at row %lu, col %u: aged %lu secs",
825 sce->sce_path, (unsigned long) hash,
826 (unsigned long) row_idx + 1, i + 1,
827 (unsigned long) (now - sce->sce_ts));
828 sce->sce_ts = 0;
829 expired_entries++;
830 continue;
831 }
832
833 } else {
834 if (now > (sce->sce_ts + statcache_max_negative_age)) {
835 pr_trace_msg(trace_channel, 17,
836 "clearing expired negative cache entry for path '%s' "
837 "(hash %lu) at row %lu, col %u: aged %lu secs",
838 sce->sce_path, (unsigned long) hash,
839 (unsigned long) row_idx + 1, i + 1,
840 (unsigned long) (now - sce->sce_ts));
841 sce->sce_ts = 0;
842 expired_entries++;
843 continue;
844 }
845 }
846
847 /* If the ops match, OR if the entry is from a LSTAT AND the entry
848 * is NOT a symlink, we can use it.
849 */
850 if (sce->sce_op == op ||
851 (sce->sce_op == FSIO_FILE_LSTAT &&
852 S_ISLNK(sce->sce_stat.st_mode) == FALSE)) {
853 /* Found matching entry. */
854 pr_trace_msg(trace_channel, 9,
855 "found entry for path '%s' (hash %lu) at row %lu, col %u",
856 path, (unsigned long) hash, (unsigned long) row_idx + 1, i + 1);
857
858 *xerrno = sce->sce_errno;
859 if (sce->sce_errno == 0) {
860 memcpy(st, &(sce->sce_stat), sizeof(struct stat));
861 }
862
863 res = 0;
864 break;
865 }
866 }
867 }
868 }
869 }
870 }
871
872 if (statcache_wlock_stats(fd) < 0) {
873 pr_trace_msg(trace_channel, 3,
874 "error write-locking shared memory: %s", strerror(errno));
875 }
876
877 if (res == 0) {
878 statcache_stats_incr_hits(1);
879
880 } else {
881 statcache_stats_incr_misses(1);
882 }
883
884 if (expired_entries > 0) {
885 statcache_stats_incr_count(-expired_entries);
886 statcache_stats_incr_expires(expired_entries);
887 }
888
889 if (statcache_unlock_stats(fd) < 0) {
890 pr_trace_msg(trace_channel, 3,
891 "error un-locking shared memory: %s", strerror(errno));
892 }
893
894 if (res < 0) {
895 errno = ENOENT;
896 }
897
898 return res;
899 }
900
statcache_table_remove(int fd,const char * path,size_t pathlen,uint32_t hash)901 static int statcache_table_remove(int fd, const char *path, size_t pathlen,
902 uint32_t hash) {
903 register unsigned int i;
904 uint32_t row_idx, row_start;
905 int removed_entries = 0, res = -1;
906
907 if (statcache_table == NULL) {
908 errno = EPERM;
909 return -1;
910 }
911
912 row_idx = hash % statcache_nrows;
913 row_start = (row_idx * statcache_rowlen);
914
915 /* Find the matching entry for this path. */
916 for (i = 0; i < STATCACHE_COLS_PER_ROW; i++) {
917 uint32_t col_start;
918 struct statcache_entry *sce;
919
920 pr_signals_handle();
921
922 col_start = (row_start + (i * sizeof(struct statcache_entry)));
923 sce = (((char *) statcache_table_data) + col_start);
924 if (sce->sce_ts > 0) {
925 if (sce->sce_hash == hash) {
926 /* Possible collision; check paths. */
927 if (sce->sce_pathlen == pathlen) {
928
929 /* Include the trailing NUL in the comparison... */
930 if (strncmp(sce->sce_path, path, pathlen + 1) == 0) {
931 /* Found matching entry. Clear it by zeroing timestamp field. */
932
933 pr_trace_msg(trace_channel, 9,
934 "removing entry for path '%s' (hash %lu) at row %lu, col %u",
935 path, (unsigned long) hash, (unsigned long) row_idx + 1, i + 1);
936
937 sce->sce_ts = 0;
938 removed_entries++;
939 res = 0;
940
941 /* Rather than returning now, we finish iterating through
942 * the bucket, in order to clear out multiple entries for
943 * the same path (e.g. one for LSTAT, and another for STAT).
944 */
945 }
946 }
947 }
948 }
949 }
950
951 if (res == 0) {
952 if (statcache_wlock_stats(fd) < 0) {
953 pr_trace_msg(trace_channel, 3,
954 "error write-locking shared memory: %s", strerror(errno));
955 }
956
957 if (removed_entries > 0) {
958 statcache_stats_incr_count(-removed_entries);
959 }
960
961 if (statcache_unlock_stats(fd) < 0) {
962 pr_trace_msg(trace_channel, 3,
963 "error un-locking shared memory: %s", strerror(errno));
964 }
965
966 } else {
967 errno = ENOENT;
968 }
969
970 return res;
971 }
972
statcache_get_canon_path(pool * p,const char * path,size_t * pathlen)973 static const char *statcache_get_canon_path(pool *p, const char *path,
974 size_t *pathlen) {
975 int res;
976 char *canon_path = NULL, *interp_path = NULL;
977 size_t canon_pathlen = PR_TUNABLE_PATH_MAX + 1;
978
979 /* Handle any '~' interpolation needed. */
980 interp_path = dir_interpolate(p, path);
981 if (interp_path == NULL) {
982 /* This happens when the '~' was just that, and did NOT refer to
983 * any known user.
984 */
985 interp_path = (char *) path;
986 }
987
988 canon_path = palloc(p, canon_pathlen);
989 res = pr_fs_dircat(canon_path, canon_pathlen, pr_fs_getcwd(), interp_path);
990 if (res < 0) {
991 errno = ENOMEM;
992 return NULL;
993 }
994
995 *pathlen = strlen(canon_path);
996 return canon_path;
997 }
998
999 /* FSIO callbacks
1000 */
1001
statcache_fsio_stat(pr_fs_t * fs,const char * path,struct stat * st)1002 static int statcache_fsio_stat(pr_fs_t *fs, const char *path,
1003 struct stat *st) {
1004 int res, tab_fd, xerrno = 0;
1005 const char *canon_path = NULL;
1006 size_t canon_pathlen = 0;
1007 pool *p;
1008 uint32_t hash;
1009
1010 p = make_sub_pool(statcache_pool);
1011 pr_pool_tag(p, "statcache_fsio_stat sub-pool");
1012 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
1013 if (canon_path == NULL) {
1014 xerrno = errno;
1015
1016 destroy_pool(p);
1017 errno = xerrno;
1018 return -1;
1019 }
1020
1021 hash = statcache_hash(canon_path, canon_pathlen);
1022 tab_fd = statcache_tabfh->fh_fd;
1023
1024 if (statcache_wlock_row(tab_fd, hash) < 0) {
1025 pr_trace_msg(trace_channel, 3,
1026 "error write-locking shared memory: %s", strerror(errno));
1027 }
1028
1029 res = statcache_table_get(tab_fd, canon_path, canon_pathlen, st, &xerrno,
1030 hash, FSIO_FILE_STAT);
1031
1032 if (statcache_unlock_row(tab_fd, hash) < 0) {
1033 pr_trace_msg(trace_channel, 3,
1034 "error unlocking shared memory: %s", strerror(errno));
1035 }
1036
1037 if (res == 0) {
1038 if (xerrno != 0) {
1039 res = -1;
1040
1041 } else {
1042 pr_trace_msg(trace_channel, 11,
1043 "using cached stat for path '%s'", canon_path);
1044 }
1045
1046 destroy_pool(p);
1047 errno = xerrno;
1048 return res;
1049 }
1050
1051 res = stat(path, st);
1052 xerrno = errno;
1053
1054 if (statcache_wlock_row(tab_fd, hash) < 0) {
1055 pr_trace_msg(trace_channel, 3,
1056 "error write-locking shared memory: %s", strerror(errno));
1057 }
1058
1059 if (statcache_wlock_row(tab_fd, hash) < 0) {
1060 pr_trace_msg(trace_channel, 3,
1061 "error write-locking shared memory: %s", strerror(errno));
1062 }
1063
1064 if (res < 0) {
1065 if (statcache_max_negative_age > 0) {
1066 /* Negatively cache the failed stat(2). */
1067 if (statcache_table_add(tab_fd, canon_path, canon_pathlen, NULL, xerrno,
1068 hash, FSIO_FILE_STAT) < 0) {
1069 pr_trace_msg(trace_channel, 3, "error adding entry for path '%s': %s",
1070 canon_path, strerror(errno));
1071 }
1072 }
1073
1074 } else {
1075 if (statcache_table_add(tab_fd, canon_path, canon_pathlen, st, 0, hash,
1076 FSIO_FILE_STAT) < 0) {
1077 pr_trace_msg(trace_channel, 3, "error adding entry for path '%s': %s",
1078 canon_path, strerror(errno));
1079 }
1080 }
1081
1082 if (statcache_unlock_row(tab_fd, hash) < 0) {
1083 pr_trace_msg(trace_channel, 3,
1084 "error unlocking shared memory: %s", strerror(errno));
1085 }
1086
1087 destroy_pool(p);
1088 errno = xerrno;
1089 return res;
1090 }
1091
statcache_fsio_fstat(pr_fh_t * fh,int fd,struct stat * st)1092 static int statcache_fsio_fstat(pr_fh_t *fh, int fd, struct stat *st) {
1093 int res, tab_fd, xerrno = 0;
1094 size_t pathlen = 0;
1095 uint32_t hash;
1096
1097 /* XXX Core FSIO API should have an fh_pathlen member.
1098 *
1099 * XXX Core FSIO API should have an fh_notes table, so that e.g.
1100 * mod_statcache could generate its hash for this handle only once, and
1101 * stash it in the table.
1102 */
1103
1104 pathlen = strlen(fh->fh_path);
1105 hash = statcache_hash(fh->fh_path, pathlen);
1106 tab_fd = statcache_tabfh->fh_fd;
1107
1108 if (statcache_wlock_row(tab_fd, hash) < 0) {
1109 pr_trace_msg(trace_channel, 3,
1110 "error write-locking shared memory: %s", strerror(errno));
1111 }
1112
1113 res = statcache_table_get(tab_fd, fh->fh_path, pathlen, st, &xerrno, hash,
1114 FSIO_FILE_STAT);
1115
1116 if (statcache_unlock_row(tab_fd, hash) < 0) {
1117 pr_trace_msg(trace_channel, 3,
1118 "error unlocking shared memory: %s", strerror(errno));
1119 }
1120
1121 if (res == 0) {
1122 if (xerrno != 0) {
1123 res = -1;
1124
1125 } else {
1126 pr_trace_msg(trace_channel, 11,
1127 "using cached stat for path '%s'", fh->fh_path);
1128 }
1129
1130 errno = xerrno;
1131 return res;
1132 }
1133
1134 res = fstat(fd, st);
1135 xerrno = errno;
1136
1137 if (statcache_wlock_row(tab_fd, hash) < 0) {
1138 pr_trace_msg(trace_channel, 3,
1139 "error write-locking shared memory: %s", strerror(errno));
1140 }
1141
1142 if (res < 0) {
1143 if (statcache_max_negative_age > 0) {
1144 /* Negatively cache the failed fstat(2). */
1145 if (statcache_table_add(tab_fd, fh->fh_path, pathlen, NULL, xerrno,
1146 hash, FSIO_FILE_STAT) < 0) {
1147 pr_trace_msg(trace_channel, 3, "error adding entry for path '%s': %s",
1148 fh->fh_path, strerror(errno));
1149 }
1150 }
1151
1152 } else {
1153 if (statcache_table_add(tab_fd, fh->fh_path, pathlen, st, 0, hash,
1154 FSIO_FILE_STAT) < 0) {
1155 pr_trace_msg(trace_channel, 3, "error adding entry for path '%s': %s",
1156 fh->fh_path, strerror(errno));
1157 }
1158 }
1159
1160 if (statcache_unlock_row(tab_fd, hash) < 0) {
1161 pr_trace_msg(trace_channel, 3,
1162 "error unlocking shared memory: %s", strerror(errno));
1163 }
1164
1165 errno = xerrno;
1166 return res;
1167 }
1168
statcache_fsio_lstat(pr_fs_t * fs,const char * path,struct stat * st)1169 static int statcache_fsio_lstat(pr_fs_t *fs, const char *path,
1170 struct stat *st) {
1171 int res, tab_fd, xerrno = 0;
1172 const char *canon_path = NULL;
1173 size_t canon_pathlen = 0;
1174 pool *p;
1175 uint32_t hash;
1176
1177 p = make_sub_pool(statcache_pool);
1178 pr_pool_tag(p, "statcache_fsio_lstat sub-pool");
1179 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
1180 if (canon_path == NULL) {
1181 xerrno = errno;
1182
1183 destroy_pool(p);
1184 errno = xerrno;
1185 return -1;
1186 }
1187
1188 hash = statcache_hash(canon_path, canon_pathlen);
1189 tab_fd = statcache_tabfh->fh_fd;
1190
1191 if (statcache_wlock_row(tab_fd, hash) < 0) {
1192 pr_trace_msg(trace_channel, 3,
1193 "error write-locking shared memory: %s", strerror(errno));
1194 }
1195
1196 res = statcache_table_get(tab_fd, canon_path, canon_pathlen, st, &xerrno,
1197 hash, FSIO_FILE_LSTAT);
1198
1199 if (statcache_unlock_row(tab_fd, hash) < 0) {
1200 pr_trace_msg(trace_channel, 3,
1201 "error unlocking shared memory: %s", strerror(errno));
1202 }
1203
1204 if (res == 0) {
1205 if (xerrno != 0) {
1206 res = -1;
1207
1208 } else {
1209 pr_trace_msg(trace_channel, 11,
1210 "using cached lstat for path '%s'", canon_path);
1211 }
1212
1213 destroy_pool(p);
1214 errno = xerrno;
1215 return res;
1216 }
1217
1218 res = lstat(path, st);
1219 xerrno = errno;
1220
1221 if (statcache_wlock_row(tab_fd, hash) < 0) {
1222 pr_trace_msg(trace_channel, 3,
1223 "error write-locking shared memory: %s", strerror(errno));
1224 }
1225
1226 if (res < 0) {
1227 if (statcache_max_negative_age > 0) {
1228 /* Negatively cache the failed lstat(2). */
1229 if (statcache_table_add(tab_fd, canon_path, canon_pathlen, NULL, xerrno,
1230 hash, FSIO_FILE_LSTAT) < 0) {
1231 pr_trace_msg(trace_channel, 3, "error adding entry for path '%s': %s",
1232 canon_path, strerror(errno));
1233 }
1234 }
1235
1236 } else {
1237 if (statcache_table_add(tab_fd, canon_path, canon_pathlen, st, 0, hash,
1238 FSIO_FILE_LSTAT) < 0) {
1239 pr_trace_msg(trace_channel, 3, "error adding entry for path '%s': %s",
1240 canon_path, strerror(errno));
1241 }
1242 }
1243
1244 if (statcache_unlock_row(tab_fd, hash) < 0) {
1245 pr_trace_msg(trace_channel, 3,
1246 "error unlocking shared memory: %s", strerror(errno));
1247 }
1248
1249 destroy_pool(p);
1250 errno = xerrno;
1251 return res;
1252 }
1253
statcache_fsio_rename(pr_fs_t * fs,const char * rnfm,const char * rnto)1254 static int statcache_fsio_rename(pr_fs_t *fs, const char *rnfm,
1255 const char *rnto) {
1256 int res, xerrno;
1257
1258 res = rename(rnfm, rnto);
1259 xerrno = errno;
1260
1261 if (res == 0) {
1262 int tab_fd;
1263 const char *canon_rnfm = NULL, *canon_rnto = NULL;
1264 size_t canon_rnfmlen = 0, canon_rntolen = 0;
1265 pool *p;
1266 uint32_t hash_rnfm, hash_rnto;
1267
1268 p = make_sub_pool(statcache_pool);
1269 pr_pool_tag(p, "statcache_fsio_rename sub-pool");
1270
1271 canon_rnfm = statcache_get_canon_path(p, rnfm, &canon_rnfmlen);
1272 if (canon_rnfm == NULL) {
1273 xerrno = errno;
1274
1275 destroy_pool(p);
1276 errno = xerrno;
1277 return res;
1278 }
1279
1280 canon_rnto = statcache_get_canon_path(p, rnto, &canon_rntolen);
1281 if (canon_rnto == NULL) {
1282 xerrno = errno;
1283
1284 destroy_pool(p);
1285 errno = xerrno;
1286 return res;
1287 }
1288
1289 hash_rnfm = statcache_hash(canon_rnfm, canon_rnfmlen);
1290 hash_rnto = statcache_hash(canon_rnto, canon_rntolen);
1291 tab_fd = statcache_tabfh->fh_fd;
1292
1293 if (statcache_wlock_row(tab_fd, hash_rnfm) < 0) {
1294 pr_trace_msg(trace_channel, 3,
1295 "error write-locking shared memory: %s", strerror(errno));
1296 }
1297
1298 (void) statcache_table_remove(tab_fd, canon_rnfm, canon_rnfmlen, hash_rnfm);
1299
1300 if (statcache_unlock_row(tab_fd, hash_rnfm) < 0) {
1301 pr_trace_msg(trace_channel, 3,
1302 "error unlocking shared memory: %s", strerror(errno));
1303 }
1304
1305 if (statcache_wlock_row(tab_fd, hash_rnto) < 0) {
1306 pr_trace_msg(trace_channel, 3,
1307 "error write-locking shared memory: %s", strerror(errno));
1308 }
1309
1310 (void) statcache_table_remove(tab_fd, canon_rnto, canon_rntolen, hash_rnto);
1311
1312 if (statcache_unlock_row(tab_fd, hash_rnto) < 0) {
1313 pr_trace_msg(trace_channel, 3,
1314 "error unlocking shared memory: %s", strerror(errno));
1315 }
1316
1317 destroy_pool(p);
1318 }
1319
1320 errno = xerrno;
1321 return res;
1322 }
1323
statcache_fsio_unlink(pr_fs_t * fs,const char * path)1324 static int statcache_fsio_unlink(pr_fs_t *fs, const char *path) {
1325 int res, xerrno;
1326
1327 res = unlink(path);
1328 xerrno = errno;
1329
1330 if (res == 0) {
1331 int tab_fd;
1332 const char *canon_path = NULL;
1333 size_t canon_pathlen = 0;
1334 pool *p;
1335 uint32_t hash;
1336
1337 p = make_sub_pool(statcache_pool);
1338 pr_pool_tag(p, "statcache_fsio_unlink sub-pool");
1339 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
1340 if (canon_path == NULL) {
1341 xerrno = errno;
1342
1343 destroy_pool(p);
1344 errno = xerrno;
1345 return res;
1346 }
1347
1348 hash = statcache_hash(canon_path, canon_pathlen);
1349 tab_fd = statcache_tabfh->fh_fd;
1350
1351 if (statcache_wlock_row(tab_fd, hash) < 0) {
1352 pr_trace_msg(trace_channel, 3,
1353 "error write-locking shared memory: %s", strerror(errno));
1354 }
1355
1356 (void) statcache_table_remove(tab_fd, canon_path, canon_pathlen, hash);
1357
1358 if (statcache_unlock_row(tab_fd, hash) < 0) {
1359 pr_trace_msg(trace_channel, 3,
1360 "error unlocking shared memory: %s", strerror(errno));
1361 }
1362
1363 destroy_pool(p);
1364 }
1365
1366 errno = xerrno;
1367 return res;
1368 }
1369
statcache_fsio_open(pr_fh_t * fh,const char * path,int flags)1370 static int statcache_fsio_open(pr_fh_t *fh, const char *path, int flags) {
1371 int res, xerrno;
1372
1373 res = open(path, flags, PR_OPEN_MODE);
1374 xerrno = errno;
1375
1376 if (res >= 0) {
1377 /* Clear the cache for this patch, but only if O_CREAT or O_TRUNC are
1378 * present.
1379 */
1380 if ((flags & O_CREAT) ||
1381 (flags & O_TRUNC)) {
1382 int tab_fd;
1383 const char *canon_path = NULL;
1384 size_t canon_pathlen = 0;
1385 pool *p;
1386 uint32_t hash;
1387
1388 p = make_sub_pool(statcache_pool);
1389 pr_pool_tag(p, "statcache_fsio_open sub-pool");
1390 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
1391 if (canon_path == NULL) {
1392 xerrno = errno;
1393
1394 destroy_pool(p);
1395 errno = xerrno;
1396 return res;
1397 }
1398
1399 hash = statcache_hash(canon_path, canon_pathlen);
1400 tab_fd = statcache_tabfh->fh_fd;
1401
1402 if (statcache_wlock_row(tab_fd, hash) < 0) {
1403 pr_trace_msg(trace_channel, 3,
1404 "error write-locking shared memory: %s", strerror(errno));
1405 }
1406
1407 pr_trace_msg(trace_channel, 14,
1408 "removing entry for path '%s' due to open(2) flags", canon_path);
1409 (void) statcache_table_remove(tab_fd, canon_path, canon_pathlen, hash);
1410
1411 if (statcache_unlock_row(tab_fd, hash) < 0) {
1412 pr_trace_msg(trace_channel, 3,
1413 "error unlocking shared memory: %s", strerror(errno));
1414 }
1415
1416 destroy_pool(p);
1417 }
1418 }
1419
1420 errno = xerrno;
1421 return res;
1422 }
1423
statcache_fsio_write(pr_fh_t * fh,int fd,const char * buf,size_t buflen)1424 static int statcache_fsio_write(pr_fh_t *fh, int fd, const char *buf,
1425 size_t buflen) {
1426 int res, xerrno;
1427
1428 res = write(fd, buf, buflen);
1429 xerrno = errno;
1430
1431 if (res > 0) {
1432 int tab_fd;
1433 size_t pathlen = 0;
1434 uint32_t hash;
1435
1436 pathlen = strlen(fh->fh_path);
1437 hash = statcache_hash(fh->fh_path, pathlen);
1438 tab_fd = statcache_tabfh->fh_fd;
1439
1440 if (statcache_wlock_row(tab_fd, hash) < 0) {
1441 pr_trace_msg(trace_channel, 3,
1442 "error write-locking shared memory: %s", strerror(errno));
1443 }
1444
1445 (void) statcache_table_remove(tab_fd, fh->fh_path, pathlen, hash);
1446
1447 if (statcache_unlock_row(tab_fd, hash) < 0) {
1448 pr_trace_msg(trace_channel, 3,
1449 "error unlocking shared memory: %s", strerror(errno));
1450 }
1451 }
1452
1453 errno = xerrno;
1454 return res;
1455 }
1456
statcache_fsio_truncate(pr_fs_t * fs,const char * path,off_t len)1457 static int statcache_fsio_truncate(pr_fs_t *fs, const char *path, off_t len) {
1458 int res, xerrno;
1459
1460 res = truncate(path, len);
1461 xerrno = errno;
1462
1463 if (res == 0) {
1464 int tab_fd;
1465 const char *canon_path = NULL;
1466 size_t canon_pathlen = 0;
1467 pool *p;
1468 uint32_t hash;
1469
1470 p = make_sub_pool(statcache_pool);
1471 pr_pool_tag(p, "statcache_fsio_truncate sub-pool");
1472 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
1473 if (canon_path == NULL) {
1474 xerrno = errno;
1475
1476 destroy_pool(p);
1477 errno = xerrno;
1478 return res;
1479 }
1480
1481 hash = statcache_hash(canon_path, canon_pathlen);
1482 tab_fd = statcache_tabfh->fh_fd;
1483
1484 if (statcache_wlock_row(tab_fd, hash) < 0) {
1485 pr_trace_msg(trace_channel, 3,
1486 "error write-locking shared memory: %s", strerror(errno));
1487 }
1488
1489 (void) statcache_table_remove(tab_fd, canon_path, canon_pathlen, hash);
1490
1491 if (statcache_unlock_row(tab_fd, hash) < 0) {
1492 pr_trace_msg(trace_channel, 3,
1493 "error unlocking shared memory: %s", strerror(errno));
1494 }
1495
1496 destroy_pool(p);
1497 }
1498
1499 errno = xerrno;
1500 return res;
1501 }
1502
statcache_fsio_ftruncate(pr_fh_t * fh,int fd,off_t len)1503 static int statcache_fsio_ftruncate(pr_fh_t *fh, int fd, off_t len) {
1504 int res, xerrno;
1505
1506 res = ftruncate(fd, len);
1507 xerrno = errno;
1508
1509 if (res == 0) {
1510 int tab_fd;
1511 size_t pathlen = 0;
1512 uint32_t hash;
1513
1514 pathlen = strlen(fh->fh_path);
1515 hash = statcache_hash(fh->fh_path, pathlen);
1516 tab_fd = statcache_tabfh->fh_fd;
1517
1518 if (statcache_wlock_row(tab_fd, hash) < 0) {
1519 pr_trace_msg(trace_channel, 3,
1520 "error write-locking shared memory: %s", strerror(errno));
1521 }
1522
1523 (void) statcache_table_remove(tab_fd, fh->fh_path, pathlen, hash);
1524
1525 if (statcache_unlock_row(tab_fd, hash) < 0) {
1526 pr_trace_msg(trace_channel, 3,
1527 "error unlocking shared memory: %s", strerror(errno));
1528 }
1529 }
1530
1531 errno = xerrno;
1532 return res;
1533 }
1534
statcache_fsio_chmod(pr_fs_t * fs,const char * path,mode_t mode)1535 static int statcache_fsio_chmod(pr_fs_t *fs, const char *path, mode_t mode) {
1536 int res, xerrno;
1537
1538 res = chmod(path, mode);
1539 xerrno = errno;
1540
1541 if (res == 0) {
1542 int tab_fd;
1543 const char *canon_path = NULL;
1544 size_t canon_pathlen = 0;
1545 pool *p;
1546 uint32_t hash;
1547
1548 p = make_sub_pool(statcache_pool);
1549 pr_pool_tag(p, "statcache_fsio_chmod sub-pool");
1550 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
1551 if (canon_path == NULL) {
1552 xerrno = errno;
1553
1554 destroy_pool(p);
1555 errno = xerrno;
1556 return res;
1557 }
1558
1559 hash = statcache_hash(canon_path, canon_pathlen);
1560 tab_fd = statcache_tabfh->fh_fd;
1561
1562 if (statcache_wlock_row(tab_fd, hash) < 0) {
1563 pr_trace_msg(trace_channel, 3,
1564 "error write-locking shared memory: %s", strerror(errno));
1565 }
1566
1567 (void) statcache_table_remove(tab_fd, canon_path, canon_pathlen, hash);
1568
1569 if (statcache_unlock_row(tab_fd, hash) < 0) {
1570 pr_trace_msg(trace_channel, 3,
1571 "error unlocking shared memory: %s", strerror(errno));
1572 }
1573
1574 destroy_pool(p);
1575 }
1576
1577 errno = xerrno;
1578 return res;
1579 }
1580
statcache_fsio_fchmod(pr_fh_t * fh,int fd,mode_t mode)1581 static int statcache_fsio_fchmod(pr_fh_t *fh, int fd, mode_t mode) {
1582 int res, xerrno;
1583
1584 res = fchmod(fd, mode);
1585 xerrno = errno;
1586
1587 if (res == 0) {
1588 int tab_fd;
1589 size_t pathlen = 0;
1590 uint32_t hash;
1591
1592 pathlen = strlen(fh->fh_path);
1593 hash = statcache_hash(fh->fh_path, pathlen);
1594 tab_fd = statcache_tabfh->fh_fd;
1595
1596 if (statcache_wlock_row(tab_fd, hash) < 0) {
1597 pr_trace_msg(trace_channel, 3,
1598 "error write-locking shared memory: %s", strerror(errno));
1599 }
1600
1601 (void) statcache_table_remove(tab_fd, fh->fh_path, pathlen, hash);
1602
1603 if (statcache_unlock_row(tab_fd, hash) < 0) {
1604 pr_trace_msg(trace_channel, 3,
1605 "error unlocking shared memory: %s", strerror(errno));
1606 }
1607 }
1608
1609 errno = xerrno;
1610 return res;
1611 }
1612
statcache_fsio_chown(pr_fs_t * fs,const char * path,uid_t uid,gid_t gid)1613 static int statcache_fsio_chown(pr_fs_t *fs, const char *path, uid_t uid,
1614 gid_t gid) {
1615 int res, xerrno;
1616
1617 res = chown(path, uid, gid);
1618 xerrno = errno;
1619
1620 if (res == 0) {
1621 int tab_fd;
1622 const char *canon_path = NULL;
1623 size_t canon_pathlen = 0;
1624 pool *p;
1625 uint32_t hash;
1626
1627 p = make_sub_pool(statcache_pool);
1628 pr_pool_tag(p, "statcache_fsio_chown sub-pool");
1629 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
1630 if (canon_path == NULL) {
1631 xerrno = errno;
1632
1633 destroy_pool(p);
1634 errno = xerrno;
1635 return res;
1636 }
1637
1638 hash = statcache_hash(canon_path, canon_pathlen);
1639 tab_fd = statcache_tabfh->fh_fd;
1640
1641 if (statcache_wlock_row(tab_fd, hash) < 0) {
1642 pr_trace_msg(trace_channel, 3,
1643 "error write-locking shared memory: %s", strerror(errno));
1644 }
1645
1646 (void) statcache_table_remove(tab_fd, canon_path, canon_pathlen, hash);
1647
1648 if (statcache_unlock_row(tab_fd, hash) < 0) {
1649 pr_trace_msg(trace_channel, 3,
1650 "error unlocking shared memory: %s", strerror(errno));
1651 }
1652
1653 destroy_pool(p);
1654 }
1655
1656 errno = xerrno;
1657 return res;
1658 }
1659
statcache_fsio_fchown(pr_fh_t * fh,int fd,uid_t uid,gid_t gid)1660 static int statcache_fsio_fchown(pr_fh_t *fh, int fd, uid_t uid, gid_t gid) {
1661 int res, xerrno;
1662
1663 res = fchown(fd, uid, gid);
1664 xerrno = errno;
1665
1666 if (res == 0) {
1667 int tab_fd;
1668 size_t pathlen = 0;
1669 uint32_t hash;
1670
1671 pathlen = strlen(fh->fh_path);
1672 hash = statcache_hash(fh->fh_path, pathlen);
1673 tab_fd = statcache_tabfh->fh_fd;
1674
1675 if (statcache_wlock_row(tab_fd, hash) < 0) {
1676 pr_trace_msg(trace_channel, 3,
1677 "error write-locking shared memory: %s", strerror(errno));
1678 }
1679
1680 (void) statcache_table_remove(tab_fd, fh->fh_path, pathlen, hash);
1681
1682 if (statcache_unlock_row(tab_fd, hash) < 0) {
1683 pr_trace_msg(trace_channel, 3,
1684 "error unlocking shared memory: %s", strerror(errno));
1685 }
1686 }
1687
1688 errno = xerrno;
1689 return res;
1690 }
1691
1692 #if PROFTPD_VERSION_NUMBER >= 0x0001030407
statcache_fsio_lchown(pr_fs_t * fs,const char * path,uid_t uid,gid_t gid)1693 static int statcache_fsio_lchown(pr_fs_t *fs, const char *path, uid_t uid,
1694 gid_t gid) {
1695 int res, xerrno;
1696
1697 res = lchown(path, uid, gid);
1698 xerrno = errno;
1699
1700 if (res == 0) {
1701 int tab_fd;
1702 const char *canon_path = NULL;
1703 size_t canon_pathlen = 0;
1704 pool *p;
1705 uint32_t hash;
1706
1707 p = make_sub_pool(statcache_pool);
1708 pr_pool_tag(p, "statcache_fsio_lchown sub-pool");
1709 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
1710 if (canon_path == NULL) {
1711 xerrno = errno;
1712
1713 destroy_pool(p);
1714 errno = xerrno;
1715 return res;
1716 }
1717
1718 hash = statcache_hash(canon_path, canon_pathlen);
1719 tab_fd = statcache_tabfh->fh_fd;
1720
1721 if (statcache_wlock_row(tab_fd, hash) < 0) {
1722 pr_trace_msg(trace_channel, 3,
1723 "error write-locking shared memory: %s", strerror(errno));
1724 }
1725
1726 (void) statcache_table_remove(tab_fd, canon_path, canon_pathlen, hash);
1727
1728 if (statcache_unlock_row(tab_fd, hash) < 0) {
1729 pr_trace_msg(trace_channel, 3,
1730 "error unlocking shared memory: %s", strerror(errno));
1731 }
1732
1733 destroy_pool(p);
1734 }
1735
1736 errno = xerrno;
1737 return res;
1738 }
1739 #endif /* ProFTPD 1.3.4c or later */
1740
statcache_fsio_utimes(pr_fs_t * fs,const char * path,struct timeval * tvs)1741 static int statcache_fsio_utimes(pr_fs_t *fs, const char *path,
1742 struct timeval *tvs) {
1743 int res, xerrno;
1744
1745 res = utimes(path, tvs);
1746 xerrno = errno;
1747
1748 if (res == 0) {
1749 int tab_fd;
1750 const char *canon_path = NULL;
1751 size_t canon_pathlen = 0;
1752 pool *p;
1753 uint32_t hash;
1754
1755 p = make_sub_pool(statcache_pool);
1756 pr_pool_tag(p, "statcache_fsio_utimes sub-pool");
1757 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
1758 if (canon_path == NULL) {
1759 xerrno = errno;
1760
1761 destroy_pool(p);
1762 errno = xerrno;
1763 return res;
1764 }
1765
1766 hash = statcache_hash(canon_path, canon_pathlen);
1767 tab_fd = statcache_tabfh->fh_fd;
1768
1769 if (statcache_wlock_row(tab_fd, hash) < 0) {
1770 pr_trace_msg(trace_channel, 3,
1771 "error write-locking shared memory: %s", strerror(errno));
1772 }
1773
1774 (void) statcache_table_remove(tab_fd, canon_path, canon_pathlen, hash);
1775
1776 if (statcache_unlock_row(tab_fd, hash) < 0) {
1777 pr_trace_msg(trace_channel, 3,
1778 "error unlocking shared memory: %s", strerror(errno));
1779 }
1780
1781 destroy_pool(p);
1782 }
1783
1784 errno = xerrno;
1785 return res;
1786 }
1787
statcache_fsio_futimes(pr_fh_t * fh,int fd,struct timeval * tvs)1788 static int statcache_fsio_futimes(pr_fh_t *fh, int fd, struct timeval *tvs) {
1789 #ifdef HAVE_FUTIMES
1790 int res, xerrno;
1791
1792 /* Check for an ENOSYS errno; if so, fallback to using fsio_utimes. Some
1793 * platforms will provide a futimes(2) stub which does not actually do
1794 * anything.
1795 */
1796 res = futimes(fd, tvs);
1797 xerrno = errno;
1798
1799 if (res < 0 &&
1800 xerrno == ENOSYS) {
1801 return statcache_fsio_utimes(fh->fh_fs, fh->fh_path, tvs);
1802 }
1803
1804 if (res == 0) {
1805 int tab_fd;
1806 size_t pathlen = 0;
1807 uint32_t hash;
1808
1809 pathlen = strlen(fh->fh_path);
1810 hash = statcache_hash(fh->fh_path, pathlen);
1811 tab_fd = statcache_tabfh->fh_fd;
1812
1813 if (statcache_wlock_row(tab_fd, hash) < 0) {
1814 pr_trace_msg(trace_channel, 3,
1815 "error write-locking shared memory: %s", strerror(errno));
1816 }
1817
1818 (void) statcache_table_remove(tab_fd, fh->fh_path, pathlen, hash);
1819
1820 if (statcache_unlock_row(tab_fd, hash) < 0) {
1821 pr_trace_msg(trace_channel, 3,
1822 "error unlocking shared memory: %s", strerror(errno));
1823 }
1824 }
1825
1826 errno = xerrno;
1827 return res;
1828 #else
1829 return statcache_fsio_utimes(fh->fh_fs, fh->fh_path, tvs);
1830 #endif /* HAVE_FUTIMES */
1831 }
1832
1833 #ifdef PR_USE_CTRLS
1834 /* Controls handlers
1835 */
1836
statcache_handle_statcache(pr_ctrls_t * ctrl,int reqargc,char ** reqargv)1837 static int statcache_handle_statcache(pr_ctrls_t *ctrl, int reqargc,
1838 char **reqargv) {
1839 /* Check the ban ACL */
1840 if (!pr_ctrls_check_acl(ctrl, statcache_acttab, "statcache")) {
1841
1842 /* Access denied */
1843 pr_ctrls_add_response(ctrl, "access denied");
1844 return -1;
1845 }
1846
1847 /* Sanity check */
1848 if (reqargv == NULL) {
1849 pr_ctrls_add_response(ctrl, "missing parameters");
1850 return -1;
1851 }
1852
1853 if (statcache_engine != TRUE) {
1854 pr_ctrls_add_response(ctrl, MOD_STATCACHE_VERSION " not enabled");
1855 return -1;
1856 }
1857
1858 /* Check for options. */
1859 pr_getopt_reset();
1860
1861 if (strcmp(reqargv[0], "info") == 0) {
1862 uint32_t count, highest, hits, misses, expires, rejects;
1863 float current_usage = 0.0, highest_usage = 0.0, hit_rate = 0.0;
1864
1865 if (statcache_rlock_stats(statcache_tabfh->fh_fd) < 0) {
1866 pr_ctrls_add_response(ctrl, "error locking shared memory: %s",
1867 strerror(errno));
1868 return -1;
1869 }
1870
1871 count = statcache_stats_get_count();
1872 highest = statcache_stats_get_highest();
1873 hits = statcache_stats_get_hits();
1874 misses = statcache_stats_get_misses();
1875 expires = statcache_stats_get_expires();
1876 rejects = statcache_stats_get_rejects();
1877
1878 if (statcache_unlock_stats(statcache_tabfh->fh_fd) < 0) {
1879 pr_trace_msg(trace_channel, 3,
1880 "error un-locking shared memory: %s", strerror(errno));
1881 }
1882
1883 current_usage = (((float) count / (float) statcache_capacity) * 100.0);
1884 highest_usage = (((float) highest / (float) statcache_capacity) * 100.0);
1885 if ((hits + misses) > 0) {
1886 hit_rate = (((float) hits / (float) (hits + misses)) * 100.0);
1887 }
1888
1889 pr_log_debug(DEBUG7, MOD_STATCACHE_VERSION
1890 ": showing statcache statistics");
1891
1892 pr_ctrls_add_response(ctrl,
1893 " hits %lu, misses %lu: %02.1f%% hit rate",
1894 (unsigned long) hits, (unsigned long) misses, hit_rate);
1895 pr_ctrls_add_response(ctrl,
1896 " expires %lu, rejects %lu", (unsigned long) expires,
1897 (unsigned long) rejects);
1898 pr_ctrls_add_response(ctrl, " current count: %lu (of %lu) (%02.1f%% usage)",
1899 (unsigned long) count, (unsigned long) statcache_capacity, current_usage);
1900 pr_ctrls_add_response(ctrl, " highest count: %lu (of %lu) (%02.1f%% usage)",
1901 (unsigned long) highest, (unsigned long) statcache_capacity,
1902 highest_usage);
1903
1904 } else if (strcmp(reqargv[0], "dump") == 0) {
1905 register unsigned int i;
1906 time_t now;
1907
1908 if (statcache_rlock_table(statcache_tabfh->fh_fd) < 0) {
1909 pr_ctrls_add_response(ctrl, "error locking shared memory: %s",
1910 strerror(errno));
1911 return -1;
1912 }
1913
1914 pr_log_debug(DEBUG7, MOD_STATCACHE_VERSION ": dumping statcache");
1915
1916 pr_ctrls_add_response(ctrl, "StatCache Contents:");
1917 now = time(NULL);
1918
1919 for (i = 0; i < statcache_nrows; i++) {
1920 register unsigned int j;
1921 unsigned long row_start;
1922
1923 pr_ctrls_add_response(ctrl, " Row %u:", i + 1);
1924 row_start = (i * statcache_rowlen);
1925
1926 for (j = 0; j < STATCACHE_COLS_PER_ROW; j++) {
1927 unsigned long col_start;
1928 struct statcache_entry *sce;
1929
1930 pr_signals_handle();
1931
1932 col_start = (row_start + (j * sizeof(struct statcache_entry)));
1933 sce = (((char *) statcache_table_data) + col_start);
1934 if (sce->sce_ts > 0) {
1935 if (sce->sce_errno == 0) {
1936 pr_ctrls_add_response(ctrl, " Col %u: '%s' (%u secs old)",
1937 j + 1, sce->sce_path, (unsigned int) (now - sce->sce_ts));
1938
1939 } else {
1940 pr_ctrls_add_response(ctrl, " Col %u: '%s' (error: %s)",
1941 j + 1, sce->sce_path, strerror(sce->sce_errno));
1942 }
1943
1944 } else {
1945 pr_ctrls_add_response(ctrl, " Col %u: <empty>", j + 1);
1946 }
1947 }
1948 }
1949
1950 statcache_unlock_table(statcache_tabfh->fh_fd);
1951
1952 } else {
1953 pr_ctrls_add_response(ctrl, "unknown statcache action requested: '%s'",
1954 reqargv[0]);
1955 return -1;
1956 }
1957
1958 return 0;
1959 }
1960
1961 #endif /* PR_USE_CTRLS */
1962
1963 /* Configuration handlers
1964 */
1965
1966 /* usage: StatCacheCapacity count */
set_statcachecapacity(cmd_rec * cmd)1967 MODRET set_statcachecapacity(cmd_rec *cmd) {
1968 int capacity;
1969
1970 CHECK_ARGS(cmd, 1);
1971 CHECK_CONF(cmd, CONF_ROOT);
1972
1973 capacity = atoi(cmd->argv[1]);
1974 if (capacity < STATCACHE_COLS_PER_ROW) {
1975 char str[32];
1976
1977 memset(str, '\0', sizeof(str));
1978 pr_snprintf(str, sizeof(str), "%d", (int) STATCACHE_COLS_PER_ROW);
1979 CONF_ERROR(cmd, pstrcat(cmd->tmp_pool, "parameter must be ", str,
1980 " or greater", NULL));
1981 }
1982
1983 /* Always round UP to the nearest multiple of STATCACHE_COLS_PER_ROW. */
1984 if (capacity % STATCACHE_COLS_PER_ROW != 0) {
1985 int factor;
1986
1987 factor = (capacity / (int) STATCACHE_COLS_PER_ROW);
1988 capacity = ((factor * (int) STATCACHE_COLS_PER_ROW) +
1989 (int) STATCACHE_COLS_PER_ROW);
1990 }
1991
1992 statcache_capacity = capacity;
1993 return PR_HANDLED(cmd);
1994 }
1995
1996 /* usage: StatCacheControlsACLs actions|all allow|deny user|group list */
set_statcachectrlsacls(cmd_rec * cmd)1997 MODRET set_statcachectrlsacls(cmd_rec *cmd) {
1998 #ifdef PR_USE_CTRLS
1999 char *bad_action = NULL, **actions = NULL;
2000
2001 CHECK_ARGS(cmd, 4);
2002 CHECK_CONF(cmd, CONF_ROOT);
2003
2004 /* We can cheat here, and use the ctrls_parse_acl() routine to
2005 * separate the given string...
2006 */
2007 actions = ctrls_parse_acl(cmd->tmp_pool, cmd->argv[1]);
2008
2009 /* Check the second parameter to make sure it is "allow" or "deny" */
2010 if (strcmp(cmd->argv[2], "allow") != 0 &&
2011 strcmp(cmd->argv[2], "deny") != 0) {
2012 CONF_ERROR(cmd, "second parameter must be 'allow' or 'deny'");
2013 }
2014
2015 /* Check the third parameter to make sure it is "user" or "group" */
2016 if (strcmp(cmd->argv[3], "user") != 0 &&
2017 strcmp(cmd->argv[3], "group") != 0) {
2018 CONF_ERROR(cmd, "third parameter must be 'user' or 'group'");
2019 }
2020
2021 bad_action = pr_ctrls_set_module_acls(statcache_acttab, statcache_pool,
2022 actions, cmd->argv[2], cmd->argv[3], cmd->argv[4]);
2023 if (bad_action != NULL) {
2024 CONF_ERROR(cmd, pstrcat(cmd->tmp_pool, ": unknown action: '",
2025 bad_action, "'", NULL));
2026 }
2027
2028 return PR_HANDLED(cmd);
2029 #else
2030 CONF_ERROR(cmd, "requires Controls support (use --enable-ctrls)");
2031 #endif /* PR_USE_CTRLS */
2032 }
2033
2034 /* usage: StatCacheEngine on|off */
set_statcacheengine(cmd_rec * cmd)2035 MODRET set_statcacheengine(cmd_rec *cmd) {
2036 int engine = -1;
2037 config_rec *c;
2038
2039 CHECK_ARGS(cmd, 1);
2040 CHECK_CONF(cmd, CONF_ROOT|CONF_VIRTUAL|CONF_GLOBAL);
2041
2042 engine = get_boolean(cmd, 1);
2043 if (engine == -1) {
2044 CONF_ERROR(cmd, "expected Boolean parameter");
2045 }
2046
2047 statcache_engine = engine;
2048
2049 c = add_config_param(cmd->argv[0], 1, NULL);
2050 c->argv[0] = palloc(c->pool, sizeof(int));
2051 *((int *) c->argv[0]) = engine;
2052
2053 return PR_HANDLED(cmd);
2054 }
2055
2056 /* usage: StatCacheMaxAge secs */
set_statcachemaxage(cmd_rec * cmd)2057 MODRET set_statcachemaxage(cmd_rec *cmd) {
2058 int positive_age;
2059
2060 if (cmd->argc < 2 ||
2061 cmd->argc > 3) {
2062 CONF_ERROR(cmd, "wrong number of parameters");
2063 }
2064
2065 CHECK_CONF(cmd, CONF_ROOT);
2066
2067 positive_age = atoi(cmd->argv[1]);
2068 if (positive_age <= 0) {
2069 CONF_ERROR(cmd, "positive-age parameter must be 1 or greater");
2070 }
2071
2072 if (cmd->argc == 2) {
2073 statcache_max_positive_age = statcache_max_negative_age = positive_age;
2074
2075 } else {
2076 int negative_age;
2077
2078 negative_age = atoi(cmd->argv[2]);
2079 if (negative_age < 0) {
2080 negative_age = 0;
2081 }
2082
2083 statcache_max_positive_age = positive_age;
2084 statcache_max_negative_age = negative_age;
2085 }
2086
2087 return PR_HANDLED(cmd);
2088 }
2089
2090 /* usage: StatCacheTable path */
set_statcachetable(cmd_rec * cmd)2091 MODRET set_statcachetable(cmd_rec *cmd) {
2092 CHECK_ARGS(cmd, 1);
2093 CHECK_CONF(cmd, CONF_ROOT);
2094
2095 if (pr_fs_valid_path(cmd->argv[1]) < 0) {
2096 CONF_ERROR(cmd, "must be an absolute path");
2097 }
2098
2099 statcache_table_path = pstrdup(statcache_pool, cmd->argv[1]);
2100 return PR_HANDLED(cmd);
2101 }
2102
2103 /* Command handlers
2104 */
2105
statcache_post_pass(cmd_rec * cmd)2106 MODRET statcache_post_pass(cmd_rec *cmd) {
2107 pr_fs_t *fs;
2108 const char *proto;
2109
2110 if (statcache_engine == FALSE) {
2111 return PR_DECLINED(cmd);
2112 }
2113
2114 /* Unmount the default/system FS, so that our FS is used for relative
2115 * paths, too.
2116 */
2117 (void) pr_unmount_fs("/", NULL);
2118
2119 fs = pr_register_fs(statcache_pool, "statcache", "/");
2120 if (fs == NULL) {
2121 pr_log_debug(DEBUG3, MOD_STATCACHE_VERSION
2122 ": error registering 'statcache' fs: %s", strerror(errno));
2123 statcache_engine = FALSE;
2124 return PR_DECLINED(cmd);
2125 }
2126
2127 /* Add the module's custom FS callbacks here. */
2128 fs->stat = statcache_fsio_stat;
2129 fs->fstat = statcache_fsio_fstat;
2130 fs->lstat = statcache_fsio_lstat;
2131 fs->rename = statcache_fsio_rename;
2132 fs->unlink = statcache_fsio_unlink;
2133 fs->open = statcache_fsio_open;;
2134 fs->truncate = statcache_fsio_truncate;
2135 fs->ftruncate = statcache_fsio_ftruncate;
2136 fs->write = statcache_fsio_write;
2137 fs->chmod = statcache_fsio_chmod;
2138 fs->fchmod = statcache_fsio_fchmod;
2139 fs->chown = statcache_fsio_chown;
2140 fs->fchown = statcache_fsio_fchown;
2141 #if PROFTPD_VERSION_NUMBER >= 0x0001030407
2142 fs->lchown = statcache_fsio_lchown;
2143 #endif /* ProFTPD 1.3.4c or later */
2144 fs->utimes = statcache_fsio_utimes;
2145 fs->futimes = statcache_fsio_futimes;
2146
2147 pr_fs_setcwd(pr_fs_getvwd());
2148 pr_fs_clear_cache();
2149
2150 pr_event_register(&statcache_module, "fs.statcache.clear",
2151 statcache_fs_statcache_clear_ev, NULL);
2152
2153 /* If we are handling an SSH2 session, then we need to disable all
2154 * negative caching; something about ProFTPD's stat caching interacting
2155 * with mod_statcache's caching, AND mod_sftp's dispatching through
2156 * the main FTP handlers, causes unexpected behavior.
2157 */
2158
2159 proto = pr_session_get_protocol(0);
2160 if (strncmp(proto, "ssh2", 5) == 0 ||
2161 strncmp(proto, "sftp", 5) == 0 ||
2162 strncmp(proto, "scp", 4) == 0) {
2163 pr_trace_msg(trace_channel, 9,
2164 "disabling negative caching for %s protocol", proto);
2165 statcache_max_negative_age = 0;
2166 }
2167
2168 return PR_DECLINED(cmd);
2169 }
2170
2171 #ifdef MADV_WILLNEED
statcache_pre_list(cmd_rec * cmd)2172 MODRET statcache_pre_list(cmd_rec *cmd) {
2173 int res;
2174
2175 if (statcache_engine == FALSE) {
2176 return PR_DECLINED(cmd);
2177 }
2178
2179 res = madvise(statcache_table, statcache_tablesz, MADV_WILLNEED);
2180 if (res < 0) {
2181 pr_log_debug(DEBUG5, MOD_STATCACHE_VERSION
2182 ": madvise(2) error with MADV_WILLNEED: %s", strerror(errno));
2183 }
2184
2185 return PR_DECLINED(cmd);
2186 }
2187 #endif /* MADV_WILLNEED */
2188
2189 /* Event handlers
2190 */
2191
statcache_fs_statcache_clear_ev(const void * event_data,void * user_data)2192 static void statcache_fs_statcache_clear_ev(const void *event_data,
2193 void *user_data) {
2194 int tab_fd;
2195 const char *canon_path = NULL, *path;
2196 size_t canon_pathlen = 0;
2197 pool *p;
2198 uint32_t hash;
2199
2200 path = event_data;
2201 if (path == NULL) {
2202 return;
2203 }
2204
2205 p = make_sub_pool(statcache_pool);
2206 pr_pool_tag(p, "statcache_clear_ev sub-pool");
2207 canon_path = statcache_get_canon_path(p, path, &canon_pathlen);
2208 if (canon_path == NULL) {
2209 destroy_pool(p);
2210 return;
2211 }
2212
2213 hash = statcache_hash(canon_path, canon_pathlen);
2214 tab_fd = statcache_tabfh->fh_fd;
2215
2216 if (statcache_wlock_row(tab_fd, hash) < 0) {
2217 pr_trace_msg(trace_channel, 3,
2218 "error write-locking shared memory: %s", strerror(errno));
2219 }
2220
2221 pr_trace_msg(trace_channel, 14,
2222 "removing entry for path '%s' due to event", canon_path);
2223 (void) statcache_table_remove(tab_fd, canon_path, canon_pathlen, hash);
2224
2225 if (statcache_unlock_row(tab_fd, hash) < 0) {
2226 pr_trace_msg(trace_channel, 3,
2227 "error unlocking shared memory: %s", strerror(errno));
2228 }
2229
2230 destroy_pool(p);
2231 }
2232
statcache_sess_reinit_ev(const void * event_data,void * user_data)2233 static void statcache_sess_reinit_ev(const void *event_data, void *user_data) {
2234 int res;
2235
2236 /* A HOST command changed the main_server pointer; reinitialize ourselves. */
2237
2238 pr_event_unregister(&statcache_module, "core.session-reinit",
2239 statcache_sess_reinit_ev);
2240
2241 /* Restore defaults */
2242 statcache_engine = FALSE;
2243
2244 res = statcache_sess_init();
2245 if (res < 0) {
2246 pr_session_disconnect(&statcache_module,
2247 PR_SESS_DISCONNECT_SESSION_INIT_FAILED, NULL);
2248 }
2249 }
2250
statcache_shutdown_ev(const void * event_data,void * user_data)2251 static void statcache_shutdown_ev(const void *event_data, void *user_data) {
2252
2253 /* Remove the mmap from the system. We can only do this reliably
2254 * when the standalone daemon process exits; if it's an inetd process,
2255 * there many be other proftpd processes still running.
2256 */
2257
2258 if (getpid() == mpid &&
2259 ServerType == SERVER_STANDALONE &&
2260 (statcache_table != NULL && statcache_tabfh->fh_fd >= 0)) {
2261 int res;
2262
2263 res = munmap(statcache_table, statcache_tablesz);
2264 if (res < 0) {
2265 pr_log_debug(DEBUG1, MOD_STATCACHE_VERSION
2266 ": error detaching shared memory: %s", strerror(errno));
2267
2268 } else {
2269 pr_log_debug(DEBUG7, MOD_STATCACHE_VERSION
2270 ": detached %lu bytes of shared memory for StatCacheTable '%s'",
2271 (unsigned long) statcache_tablesz, statcache_table_path);
2272 }
2273
2274 res = pr_fsio_close(statcache_tabfh);
2275 if (res < 0) {
2276 pr_log_debug(DEBUG1, MOD_STATCACHE_VERSION
2277 ": error closing StatCacheTable '%s': %s", statcache_table_path,
2278 strerror(errno));
2279 }
2280 }
2281 }
2282
2283 #if defined(PR_SHARED_MODULE)
statcache_mod_unload_ev(const void * event_data,void * user_data)2284 static void statcache_mod_unload_ev(const void *event_data, void *user_data) {
2285 if (strcmp("mod_statcache.c", (const char *) event_data) == 0) {
2286 #ifdef PR_USE_CTRLS
2287 register unsigned int i;
2288
2289 for (i = 0; statcache_acttab[i].act_action; i++) {
2290 (void) pr_ctrls_unregister(&statcache_module,
2291 statcache_acttab[i].act_action);
2292 }
2293 #endif /* PR_USE_CTRLS */
2294
2295 pr_event_unregister(&statcache_module, NULL, NULL);
2296
2297 if (statcache_tabfh) {
2298 (void) pr_fsio_close(statcache_tabfh);
2299 statcache_tabfh = NULL;
2300 }
2301
2302 if (statcache_pool) {
2303 destroy_pool(statcache_pool);
2304 statcache_pool = NULL;
2305 }
2306
2307 statcache_engine = FALSE;
2308 }
2309 }
2310 #endif /* PR_SHARED_MODULE */
2311
statcache_postparse_ev(const void * event_data,void * user_data)2312 static void statcache_postparse_ev(const void *event_data, void *user_data) {
2313 size_t tablesz;
2314 void *table;
2315 int xerrno;
2316 struct stat st;
2317
2318 if (statcache_engine == FALSE) {
2319 return;
2320 }
2321
2322 /* Make sure the StatCacheTable exists. */
2323 if (statcache_table_path == NULL) {
2324 pr_log_pri(PR_LOG_NOTICE, MOD_STATCACHE_VERSION
2325 ": missing required StatCacheTable configuration");
2326 pr_session_disconnect(&statcache_module, PR_SESS_DISCONNECT_BAD_CONFIG,
2327 NULL);
2328 }
2329
2330 PRIVS_ROOT
2331 statcache_tabfh = pr_fsio_open(statcache_table_path, O_RDWR|O_CREAT);
2332 xerrno = errno;
2333 PRIVS_RELINQUISH
2334
2335 if (statcache_tabfh == NULL) {
2336 pr_log_pri(PR_LOG_NOTICE, MOD_STATCACHE_VERSION
2337 ": unable to open StatCacheTable '%s': %s", statcache_table_path,
2338 strerror(xerrno));
2339 pr_session_disconnect(&statcache_module, PR_SESS_DISCONNECT_BAD_CONFIG,
2340 NULL);
2341 }
2342
2343 if (pr_fsio_fstat(statcache_tabfh, &st) < 0) {
2344 xerrno = errno;
2345
2346 pr_log_pri(PR_LOG_NOTICE, MOD_STATCACHE_VERSION
2347 ": unable to stat StatCacheTable '%s': %s", statcache_table_path,
2348 strerror(xerrno));
2349 pr_fsio_close(statcache_tabfh);
2350 statcache_tabfh = NULL;
2351 pr_session_disconnect(&statcache_module, PR_SESS_DISCONNECT_BAD_CONFIG,
2352 NULL);
2353 }
2354
2355 if (S_ISDIR(st.st_mode)) {
2356 xerrno = EISDIR;
2357
2358 pr_log_pri(PR_LOG_NOTICE, MOD_STATCACHE_VERSION
2359 ": unable to stat StatCacheTable '%s': %s", statcache_table_path,
2360 strerror(xerrno));
2361 pr_fsio_close(statcache_tabfh);
2362 statcache_tabfh = NULL;
2363 pr_session_disconnect(&statcache_module, PR_SESS_DISCONNECT_BAD_CONFIG,
2364 NULL);
2365 }
2366
2367 if (statcache_tabfh->fh_fd <= STDERR_FILENO) {
2368 int usable_fd;
2369
2370 usable_fd = pr_fs_get_usable_fd(statcache_tabfh->fh_fd);
2371 if (usable_fd < 0) {
2372 pr_log_debug(DEBUG0, MOD_STATCACHE_VERSION
2373 "warning: unable to find good fd for StatCacheTable %s: %s",
2374 statcache_table_path, strerror(errno));
2375
2376 } else {
2377 close(statcache_tabfh->fh_fd);
2378 statcache_tabfh->fh_fd = usable_fd;
2379 }
2380 }
2381
2382 /* The size of the table, in bytes, is:
2383 *
2384 * sizeof(header) + sizeof(data)
2385 *
2386 * thus:
2387 *
2388 * header = 6 * sizeof(uint32_t)
2389 * data = capacity * sizeof(struct statcache_entry)
2390 */
2391
2392 tablesz = (6 * sizeof(uint32_t)) +
2393 (statcache_capacity * sizeof(struct statcache_entry));
2394
2395 /* Get the shm for storing all of our stat info. */
2396 table = statcache_get_shm(statcache_tabfh, tablesz);
2397 if (table == NULL) {
2398 pr_log_pri(PR_LOG_NOTICE, MOD_STATCACHE_VERSION
2399 ": unable to get shared memory for StatCacheTable '%s': %s",
2400 statcache_table_path, strerror(errno));
2401 pr_session_disconnect(&statcache_module, PR_SESS_DISCONNECT_BAD_CONFIG,
2402 NULL);
2403 }
2404
2405 pr_trace_msg(trace_channel, 9,
2406 "allocated %lu bytes of shared memory for %u cache entries",
2407 (unsigned long) tablesz, statcache_capacity);
2408
2409 statcache_table = table;
2410 statcache_tablesz = tablesz;
2411 statcache_table_stats = statcache_table;
2412 statcache_table_data = (struct statcache_entry *) (statcache_table + (6 * sizeof(uint32_t)));
2413
2414 statcache_nrows = (statcache_capacity / STATCACHE_COLS_PER_ROW);
2415 statcache_rowlen = (STATCACHE_COLS_PER_ROW * sizeof(struct statcache_entry));
2416
2417 return;
2418 }
2419
statcache_restart_ev(const void * event_data,void * user_data)2420 static void statcache_restart_ev(const void *event_data, void *user_data) {
2421 #ifdef PR_USE_CTRLS
2422 register unsigned int i;
2423 #endif /* PR_USE_CTRLS */
2424
2425 if (statcache_pool) {
2426 destroy_pool(statcache_pool);
2427 statcache_pool = NULL;
2428 }
2429
2430 statcache_pool = make_sub_pool(permanent_pool);
2431 pr_pool_tag(statcache_pool, MOD_STATCACHE_VERSION);
2432
2433 #ifdef PR_USE_CTRLS
2434 /* Register the control handlers */
2435 for (i = 0; statcache_acttab[i].act_action; i++) {
2436
2437 /* Allocate and initialize the ACL for this control. */
2438 statcache_acttab[i].act_acl = pcalloc(statcache_pool, sizeof(ctrls_acl_t));
2439 pr_ctrls_init_acl(statcache_acttab[i].act_acl);
2440 }
2441 #endif /* PR_USE_CTRLS */
2442
2443 /* Close the StatCacheTable file descriptor; it will be reopened by the
2444 * postparse event listener.
2445 */
2446 if (statcache_tabfh != NULL) {
2447 pr_fsio_close(statcache_tabfh);
2448 statcache_tabfh = NULL;
2449 }
2450
2451 return;
2452 }
2453
2454 /* Initialization routines
2455 */
2456
statcache_init(void)2457 static int statcache_init(void) {
2458 #ifdef PR_USE_CTRLS
2459 register unsigned int i = 0;
2460 #endif /* PR_USE_CTRLS */
2461
2462 /* Allocate the pool for this module's use. */
2463 statcache_pool = make_sub_pool(permanent_pool);
2464 pr_pool_tag(statcache_pool, MOD_STATCACHE_VERSION);
2465
2466 #ifdef PR_USE_CTRLS
2467 /* Register the control handlers */
2468 for (i = 0; statcache_acttab[i].act_action; i++) {
2469
2470 /* Allocate and initialize the ACL for this control. */
2471 statcache_acttab[i].act_acl = pcalloc(statcache_pool, sizeof(ctrls_acl_t));
2472 pr_ctrls_init_acl(statcache_acttab[i].act_acl);
2473
2474 if (pr_ctrls_register(&statcache_module, statcache_acttab[i].act_action,
2475 statcache_acttab[i].act_desc, statcache_acttab[i].act_cb) < 0) {
2476 pr_log_pri(PR_LOG_INFO, MOD_STATCACHE_VERSION
2477 ": error registering '%s' control: %s",
2478 statcache_acttab[i].act_action, strerror(errno));
2479 }
2480 }
2481 #endif /* PR_USE_CTRLS */
2482
2483 #if defined(PR_SHARED_MODULE)
2484 pr_event_register(&statcache_module, "core.module-unload",
2485 statcache_mod_unload_ev, NULL);
2486 #endif /* PR_SHARED_MODULE */
2487 pr_event_register(&statcache_module, "core.postparse",
2488 statcache_postparse_ev, NULL);
2489 pr_event_register(&statcache_module, "core.restart",
2490 statcache_restart_ev, NULL);
2491 pr_event_register(&statcache_module, "core.shutdown",
2492 statcache_shutdown_ev, NULL);
2493
2494 return 0;
2495 }
2496
statcache_sess_init(void)2497 static int statcache_sess_init(void) {
2498 config_rec *c;
2499
2500 pr_event_register(&statcache_module, "core.session-reinit",
2501 statcache_sess_reinit_ev, NULL);
2502
2503 /* Check to see if the BanEngine directive is set to 'off'. */
2504 c = find_config(main_server->conf, CONF_PARAM, "StatCacheEngine", FALSE);
2505 if (c != NULL) {
2506 statcache_engine = *((int *) c->argv[0]);
2507 }
2508
2509 return 0;
2510 }
2511
2512 #ifdef PR_USE_CTRLS
2513
2514 /* Controls table
2515 */
2516 static ctrls_acttab_t statcache_acttab[] = {
2517 { "statcache", "display cache stats", NULL,
2518 statcache_handle_statcache },
2519
2520 { NULL, NULL, NULL, NULL }
2521 };
2522 #endif /* PR_USE_CTRLS */
2523
2524 /* Module API tables
2525 */
2526
2527 static conftable statcache_conftab[] = {
2528 { "StatCacheCapacity", set_statcachecapacity, NULL },
2529 { "StatCacheControlsACLs", set_statcachectrlsacls, NULL },
2530 { "StatCacheEngine", set_statcacheengine, NULL },
2531 { "StatCacheMaxAge", set_statcachemaxage, NULL },
2532 { "StatCacheTable", set_statcachetable, NULL },
2533 { NULL }
2534 };
2535
2536 static cmdtable statcache_cmdtab[] = {
2537 { POST_CMD, C_PASS, G_NONE, statcache_post_pass, FALSE, FALSE },
2538
2539 #ifdef MADV_WILLNEED
2540 /* If the necessary madvise(2) flag is present, register a PRE_CMD
2541 * handler for directory listings, to suggest to the kernel that
2542 * it read in some pages of the mmap()'d region.
2543 */
2544 { PRE_CMD, C_LIST, G_NONE, statcache_pre_list, FALSE, FALSE },
2545 { PRE_CMD, C_MLSD, G_NONE, statcache_pre_list, FALSE, FALSE },
2546 { PRE_CMD, C_NLST, G_NONE, statcache_pre_list, FALSE, FALSE },
2547 #endif /* MADV_WILLNEED */
2548
2549 { 0, NULL }
2550 };
2551
2552 module statcache_module = {
2553 NULL, NULL,
2554
2555 /* Module API version 2.0 */
2556 0x20,
2557
2558 /* Module name */
2559 "statcache",
2560
2561 /* Module configuration handler table */
2562 statcache_conftab,
2563
2564 /* Module command handler table */
2565 statcache_cmdtab,
2566
2567 /* Module authentication handler table */
2568 NULL,
2569
2570 /* Module initialization function */
2571 statcache_init,
2572
2573 /* Session initialization function */
2574 statcache_sess_init,
2575
2576 /* Module version */
2577 MOD_STATCACHE_VERSION
2578 };
2579