1 /*
2 BAREOS® - Backup Archiving REcovery Open Sourced
3
4 Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
5 Copyright (C) 2011-2012 Planets Communications B.V.
6 Copyright (C) 2013-2019 Bareos GmbH & Co. KG
7
8 This program is Free Software; you can redistribute it and/or
9 modify it under the terms of version three of the GNU Affero General Public
10 License as published by the Free Software Foundation and included
11 in the file LICENSE.
12
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Affero General Public License for more details.
17
18 You should have received a copy of the GNU Affero General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA.
22 */
23
24 /**
25 * This code implements a cache with the current mounted filesystems for which
26 * its uses the mostly in kernel mount information and export the different OS
27 * specific interfaces using a generic interface. We use a linked list cache
28 * which is accessed using a binary search on the device id and we keep the
29 * previous cache hit as most of the time we get called quite a lot with most
30 * of the time the same device so keeping the previous cache hit we have a
31 * very optimized code path.
32 *
33 * This interface is implemented for the following OS-es:
34 *
35 * - Linux
36 * - HPUX
37 * - DARWIN (OSX)
38 * - IRIX
39 * - AIX
40 * - OSF1 (Tru64)
41 * - Solaris
42 *
43 * Currently we only use this code for Linux and OSF1 based fstype
44 * determination. For the other OS-es we can use the fstype present in stat
45 * structure on those OS-es.
46 *
47 * This code replaces the big switch we used before based on SUPER_MAGIC present
48 * in the statfs(2) structure but which need extra code for each new filesystem
49 * added to the OS and for Linux that tends to be often as it has quite some
50 * different filesystems. This new implementation should eliminate this as we
51 * use the Linux /proc/mounts in kernel data which automatically adds any new
52 * filesystem when added to the kernel.
53 */
54
55 /*
56 * Marco van Wieringen, August 2009
57 */
58
59 #include "include/bareos.h"
60 #include "mntent_cache.h"
61 #include "lib/dlist.h"
62
63 #include <errno.h>
64 #include <stdlib.h>
65 #include <string.h>
66 #include <sys/types.h>
67 #include <sys/stat.h>
68
69 #if defined(HAVE_GETMNTENT)
70 # if defined(HAVE_LINUX_OS) || defined(HAVE_HPUX_OS) || defined(HAVE_AIX_OS)
71 # include <mntent.h>
72 # elif defined(HAVE_SUN_OS)
73 # include <sys/mnttab.h>
74 # elif defined(HAVE_HURD_OS)
75 # include <hurd/paths.h>
76 # include <mntent.h>
77 # endif /* HAVE_GETMNTENT */
78 #elif defined(HAVE_GETMNTINFO)
79 # if defined(HAVE_OPENBSD_OS)
80 # include <sys/param.h>
81 # include <sys/mount.h>
82 # elif defined(HAVE_NETBSD_OS)
83 # include <sys/types.h>
84 # include <sys/statvfs.h>
85 # else
86 # include <sys/param.h>
87 # include <sys/ucred.h>
88 # include <sys/mount.h>
89 # endif
90 #elif defined(HAVE_AIX_OS)
91 # include <fshelp.h>
92 # include <sys/vfs.h>
93 #elif defined(HAVE_OSF1_OS)
94 # include <sys/mount.h>
95 #endif
96
97 /*
98 * Protected data by mutex lock.
99 */
100 static pthread_mutex_t mntent_cache_lock = PTHREAD_MUTEX_INITIALIZER;
101 static mntent_cache_entry_t* previous_cache_hit = NULL;
102 static dlist* mntent_cache_entries = NULL;
103
104 /*
105 * Last time a rescan of the mountlist took place.
106 */
107 static time_t last_rescan = 0;
108
109 static const char* skipped_fs_types[] = {
110 #if defined(HAVE_LINUX_OS)
111 "rootfs",
112 #endif
113 NULL};
114
115 /**
116 * Simple comparison function for binary search and insert.
117 */
CompareMntentMapping(void * e1,void * e2)118 static int CompareMntentMapping(void* e1, void* e2)
119 {
120 mntent_cache_entry_t *mce1, *mce2;
121
122 mce1 = (mntent_cache_entry_t*)e1;
123 mce2 = (mntent_cache_entry_t*)e2;
124
125 if (mce1->dev == mce2->dev) {
126 return 0;
127 } else {
128 return (mce1->dev < mce2->dev) ? -1 : 1;
129 }
130 }
131
132 /**
133 * Free the members of the mntent_cache structure not the structure itself.
134 */
DestroyMntentCacheEntry(mntent_cache_entry_t * mce)135 static inline void DestroyMntentCacheEntry(mntent_cache_entry_t* mce)
136 {
137 if (mce->mntopts) { free(mce->mntopts); }
138 free(mce->fstype);
139 free(mce->mountpoint);
140 free(mce->special);
141 }
142
143 /**
144 * Add a new entry to the cache.
145 * This function should be called with a write lock on the mntent_cache.
146 */
add_mntent_mapping(uint32_t dev,const char * special,const char * mountpoint,const char * fstype,const char * mntopts)147 static mntent_cache_entry_t* add_mntent_mapping(uint32_t dev,
148 const char* special,
149 const char* mountpoint,
150 const char* fstype,
151 const char* mntopts)
152 {
153 mntent_cache_entry_t* mce;
154
155 mce = (mntent_cache_entry_t*)malloc(sizeof(mntent_cache_entry_t));
156 mntent_cache_entry_t empty_mntent_cache_entry;
157 *mce = empty_mntent_cache_entry;
158 mce->dev = dev;
159 mce->special = strdup(special);
160 mce->mountpoint = strdup(mountpoint);
161 mce->fstype = strdup(fstype);
162 if (mntopts) { mce->mntopts = strdup(mntopts); }
163
164 mntent_cache_entries->binary_insert(mce, CompareMntentMapping);
165
166 return mce;
167 }
168
169 /**
170 * Update an entry in the cache.
171 * This function should be called with a write lock on the mntent_cache.
172 */
update_mntent_mapping(uint32_t dev,const char * special,const char * mountpoint,const char * fstype,const char * mntopts)173 static mntent_cache_entry_t* update_mntent_mapping(uint32_t dev,
174 const char* special,
175 const char* mountpoint,
176 const char* fstype,
177 const char* mntopts)
178 {
179 mntent_cache_entry_t lookup, *mce;
180
181 lookup.dev = dev;
182 mce = (mntent_cache_entry_t*)mntent_cache_entries->binary_search(
183 &lookup, CompareMntentMapping);
184 if (mce) {
185 /*
186 * See if the info changed.
187 */
188 if (!bstrcmp(mce->special, special)) {
189 free(mce->special);
190 mce->special = strdup(special);
191 }
192
193 if (!bstrcmp(mce->mountpoint, mountpoint)) {
194 free(mce->mountpoint);
195 mce->mountpoint = strdup(mountpoint);
196 }
197
198 if (!bstrcmp(mce->fstype, fstype)) {
199 free(mce->fstype);
200 mce->fstype = strdup(fstype);
201 }
202
203 if (!bstrcmp(mce->mntopts, mntopts)) {
204 free(mce->mntopts);
205 mce->mntopts = strdup(mntopts);
206 }
207 } else {
208 mce = add_mntent_mapping(dev, special, mountpoint, fstype, mntopts);
209 }
210
211 mce->validated = true;
212 return mce;
213 }
214
SkipFstype(const char * fstype)215 static inline bool SkipFstype(const char* fstype)
216 {
217 int i;
218
219 for (i = 0; skipped_fs_types[i]; i++) {
220 if (bstrcmp(fstype, skipped_fs_types[i])) return true;
221 }
222
223 return false;
224 }
225
226 /**
227 * OS specific function to load the different mntents into the cache.
228 * This function should be called with a write lock on the mntent_cache.
229 */
refresh_mount_cache(mntent_cache_entry_t * handle_entry (uint32_t dev,const char * special,const char * mountpoint,const char * fstype,const char * mntopts))230 static void refresh_mount_cache(
231 mntent_cache_entry_t* handle_entry(uint32_t dev,
232 const char* special,
233 const char* mountpoint,
234 const char* fstype,
235 const char* mntopts))
236 {
237 #if defined(HAVE_GETMNTENT)
238 FILE* fp;
239 struct stat st;
240 # if defined(HAVE_LINUX_OS) || defined(HAVE_HPUX_OS) || defined(HAVE_IRIX_OS) \
241 || defined(HAVE_AIX_OS) || defined(HAVE_HURD_OS)
242 struct mntent* mnt;
243
244 # if defined(HAVE_LINUX_OS)
245 if ((fp = setmntent("/proc/mounts", "r")) == (FILE*)NULL) {
246 if ((fp = setmntent(_PATH_MOUNTED, "r")) == (FILE*)NULL) { return; }
247 }
248 # elif defined(HAVE_HPUX_OS)
249 if ((fp = fopen(MNT_MNTTAB, "r")) == (FILE*)NULL) { return; }
250 # elif defined(HAVE_IRIX_OS)
251 if ((fp = setmntent(MOUNTED, "r")) == (FILE*)NULL) { return; }
252 # elif defined(HAVE_AIX_OS)
253 if ((fp = setmntent(MNTTAB, "r")) == (FILE*)NULL) { return; }
254 # elif defined(HAVE_HURD_OS)
255 if ((fp = setmntent(_PATH_MNTTAB, "r")) == (FILE*)NULL) { return; }
256 # endif
257
258 while ((mnt = getmntent(fp)) != (struct mntent*)NULL) {
259 if (SkipFstype(mnt->mnt_type)) { continue; }
260
261 if (stat(mnt->mnt_dir, &st) < 0) { continue; }
262
263 handle_entry(st.st_dev, mnt->mnt_fsname, mnt->mnt_dir, mnt->mnt_type,
264 mnt->mnt_opts);
265 }
266
267 endmntent(fp);
268 # elif defined(HAVE_SUN_OS)
269 struct mnttab mnt;
270
271 if ((fp = fopen(MNTTAB, "r")) == (FILE*)NULL) return;
272
273 while (getmntent(fp, &mnt) == 0) {
274 if (SkipFstype(mnt.mnt_fstype)) { continue; }
275
276 if (stat(mnt.mnt_mountp, &st) < 0) { continue; }
277
278 handle_entry(st.st_dev, mnt.mnt_special, mnt.mnt_mountp, mnt.mnt_fstype,
279 mnt.mnt_mntopts);
280 }
281
282 fclose(fp);
283 # endif /* HAVE_SUN_OS */
284 #elif defined(HAVE_GETMNTINFO)
285 int cnt;
286 struct stat st;
287 # if defined(HAVE_NETBSD_OS)
288 struct statvfs* mntinfo;
289 # else
290 struct statfs* mntinfo;
291 # endif
292 # if defined(ST_NOWAIT)
293 int flags = ST_NOWAIT;
294 # elif defined(MNT_NOWAIT)
295 int flags = MNT_NOWAIT;
296 # else
297 int flags = 0;
298 # endif
299
300 if ((cnt = getmntinfo(&mntinfo, flags)) > 0) {
301 while (cnt > 0) {
302 if (!SkipFstype(mntinfo->f_fstypename)
303 && stat(mntinfo->f_mntonname, &st) == 0) {
304 handle_entry(st.st_dev, mntinfo->f_mntfromname, mntinfo->f_mntonname,
305 mntinfo->f_fstypename, NULL);
306 }
307 mntinfo++;
308 cnt--;
309 }
310 }
311 #elif defined(HAVE_AIX_OS)
312 int bufsize;
313 char *entries, *current;
314 struct vmount* vmp;
315 struct stat st;
316 struct vfs_ent* ve;
317 int n_entries, cnt;
318
319 if (mntctl(MCTL_QUERY, sizeof(bufsize), (struct vmount*)&bufsize) != 0) {
320 return;
321 }
322
323 entries = malloc(bufsize);
324 if ((n_entries = mntctl(MCTL_QUERY, bufsize, (struct vmount*)entries)) < 0) {
325 free(entries);
326 return;
327 }
328
329 cnt = 0;
330 current = entries;
331 while (cnt < n_entries) {
332 vmp = (struct vmount*)current;
333
334 if (SkipFstype(ve->vfsent_name)) { continue; }
335
336 if (stat(current + vmp->vmt_data[VMT_STUB].vmt_off, &st) < 0) { continue; }
337
338 ve = getvfsbytype(vmp->vmt_gfstype);
339 if (ve && ve->vfsent_name) {
340 handle_entry(st.st_dev, current + vmp->vmt_data[VMT_OBJECT].vmt_off,
341 current + vmp->vmt_data[VMT_STUB].vmt_off, ve->vfsent_name,
342 current + vmp->vmt_data[VMT_ARGS].vmt_off);
343 }
344 current = current + vmp->vmt_length;
345 cnt++;
346 }
347 free(entries);
348 #elif defined(HAVE_OSF1_OS)
349 struct statfs *entries, *current;
350 struct stat st;
351 int n_entries, cnt;
352 int size;
353
354 if ((n_entries = getfsstat((struct statfs*)0, 0L, MNT_NOWAIT)) < 0) {
355 return;
356 }
357
358 size = (n_entries + 1) * sizeof(struct statfs);
359 entries = malloc(size);
360
361 if ((n_entries = getfsstat(entries, size, MNT_NOWAIT)) < 0) {
362 free(entries);
363 return;
364 }
365
366 cnt = 0;
367 current = entries;
368 while (cnt < n_entries) {
369 if (SkipFstype(current->f_fstypename)) { continue; }
370
371 if (stat(current->f_mntonname, &st) < 0) { continue; }
372 handle_entry(st.st_dev, current->f_mntfromname, current->f_mntonname,
373 current->f_fstypename, NULL);
374 current++;
375 cnt++;
376 }
377 free(stats);
378 #endif
379 }
380
381 /**
382 * Initialize the cache for use.
383 * This function should be called with a write lock on the mntent_cache.
384 */
InitializeMntentCache(void)385 static inline void InitializeMntentCache(void)
386 {
387 mntent_cache_entry_t* mce = NULL;
388
389 mntent_cache_entries = new dlist(mce, &mce->link);
390
391 /**
392 * Refresh the cache.
393 */
394 refresh_mount_cache(add_mntent_mapping);
395 }
396
397 /**
398 * Repopulate the cache with new data.
399 * This function should be called with a write lock on the mntent_cache.
400 */
RepopulateMntentCache(void)401 static void RepopulateMntentCache(void)
402 {
403 mntent_cache_entry_t *mce, *next_mce;
404
405 /**
406 * Reset validated flag on all entries in the cache.
407 */
408 foreach_dlist (mce, mntent_cache_entries) {
409 mce->validated = false;
410 }
411
412 /**
413 * Refresh the cache.
414 */
415 refresh_mount_cache(update_mntent_mapping);
416
417 /**
418 * Remove any entry that is not validated in
419 * the previous refresh run.
420 */
421 mce = (mntent_cache_entry_t*)mntent_cache_entries->first();
422 while (mce) {
423 next_mce = (mntent_cache_entry_t*)mntent_cache_entries->next(mce);
424 if (!mce->validated) {
425 /**
426 * Invalidate the previous cache hit if we are removing it.
427 */
428 if (previous_cache_hit == mce) { previous_cache_hit = NULL; }
429
430 /**
431 * See if this is an outstanding entry.
432 * e.g. when reference_count > 0 set
433 * the entry to destroyed and remove it
434 * from the list. But don't free the data
435 * yet. The put_mntent_mapping function will
436 * handle these dangling entries.
437 */
438 if (mce->reference_count == 0) {
439 mntent_cache_entries->remove(mce);
440 DestroyMntentCacheEntry(mce);
441 free(mce);
442 } else {
443 mce->destroyed = true;
444 mntent_cache_entries->remove(mce);
445 }
446 }
447 mce = next_mce;
448 }
449 }
450
451 /**
452 * Flush the current content from the cache.
453 */
FlushMntentCache(void)454 void FlushMntentCache(void)
455 {
456 mntent_cache_entry_t* mce;
457
458 /**
459 * Lock the cache.
460 */
461 P(mntent_cache_lock);
462
463 if (mntent_cache_entries) {
464 previous_cache_hit = NULL;
465 foreach_dlist (mce, mntent_cache_entries) {
466 DestroyMntentCacheEntry(mce);
467 }
468 mntent_cache_entries->destroy();
469 delete mntent_cache_entries;
470 mntent_cache_entries = NULL;
471 }
472
473 V(mntent_cache_lock);
474 }
475
476 /**
477 * Release a mntent mapping reference returned
478 * by a successfull call to find_mntent_mapping.
479 */
ReleaseMntentMapping(mntent_cache_entry_t * mce)480 void ReleaseMntentMapping(mntent_cache_entry_t* mce)
481 {
482 /**
483 * Lock the cache.
484 */
485 P(mntent_cache_lock);
486
487 mce->reference_count--;
488
489 /**
490 * See if this entry is a dangling entry.
491 */
492 if (mce->reference_count == 0 && mce->destroyed) {
493 DestroyMntentCacheEntry(mce);
494 free(mce);
495 }
496
497 V(mntent_cache_lock);
498 }
499
500 /**
501 * Find a mapping in the cache.
502 */
find_mntent_mapping(uint32_t dev)503 mntent_cache_entry_t* find_mntent_mapping(uint32_t dev)
504 {
505 mntent_cache_entry_t lookup, *mce = NULL;
506 time_t now;
507
508 /**
509 * Lock the cache.
510 */
511 P(mntent_cache_lock);
512
513 /**
514 * Shortcut when we get a request for the same device again.
515 */
516 if (previous_cache_hit && previous_cache_hit->dev == dev) {
517 mce = previous_cache_hit;
518 mce->reference_count++;
519 goto ok_out;
520 }
521
522 /**
523 * Initialize the cache if that was not done before.
524 */
525 if (!mntent_cache_entries) {
526 InitializeMntentCache();
527 last_rescan = time(NULL);
528 } else {
529 /**
530 * We rescan the mountlist when called when more then
531 * MNTENT_RESCAN_INTERVAL seconds have past since the
532 * last rescan. This way we never work with data older
533 * then MNTENT_RESCAN_INTERVAL seconds.
534 */
535 now = time(NULL);
536 if ((now - last_rescan) > MNTENT_RESCAN_INTERVAL) {
537 RepopulateMntentCache();
538 last_rescan = time(NULL);
539 }
540 }
541
542 lookup.dev = dev;
543 mce = (mntent_cache_entry_t*)mntent_cache_entries->binary_search(
544 &lookup, CompareMntentMapping);
545
546 /**
547 * If we fail to lookup the mountpoint its probably a mountpoint added
548 * after we did our initial scan. Lets rescan the mountlist and try
549 * the lookup again.
550 */
551 if (!mce) {
552 RepopulateMntentCache();
553 mce = (mntent_cache_entry_t*)mntent_cache_entries->binary_search(
554 &lookup, CompareMntentMapping);
555 }
556
557 /**
558 * Store the last successfull lookup as the previous_cache_hit.
559 * And increment the reference count.
560 */
561 if (mce) {
562 previous_cache_hit = mce;
563 mce->reference_count++;
564 }
565
566 ok_out:
567 V(mntent_cache_lock);
568 return mce;
569 }
570