xref: /freebsd/sys/security/audit/audit_worker.c (revision 39beb93c)
1 /*-
2  * Copyright (c) 1999-2008 Apple Inc.
3  * Copyright (c) 2006-2008 Robert N. M. Watson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1.  Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  * 2.  Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15  *     its contributors may be used to endorse or promote products derived
16  *     from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/condvar.h>
36 #include <sys/conf.h>
37 #include <sys/file.h>
38 #include <sys/filedesc.h>
39 #include <sys/fcntl.h>
40 #include <sys/ipc.h>
41 #include <sys/kernel.h>
42 #include <sys/kthread.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/queue.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/protosw.h>
51 #include <sys/domain.h>
52 #include <sys/sx.h>
53 #include <sys/sysproto.h>
54 #include <sys/sysent.h>
55 #include <sys/systm.h>
56 #include <sys/ucred.h>
57 #include <sys/uio.h>
58 #include <sys/un.h>
59 #include <sys/unistd.h>
60 #include <sys/vnode.h>
61 
62 #include <bsm/audit.h>
63 #include <bsm/audit_internal.h>
64 #include <bsm/audit_kevents.h>
65 
66 #include <netinet/in.h>
67 #include <netinet/in_pcb.h>
68 
69 #include <security/audit/audit.h>
70 #include <security/audit/audit_private.h>
71 
72 #include <vm/uma.h>
73 
74 /*
75  * Worker thread that will schedule disk I/O, etc.
76  */
77 static struct proc		*audit_thread;
78 
79 /*
80  * audit_cred and audit_vp are the stored credential and vnode to use for
81  * active audit trail.  They are protected by the audit worker lock, which
82  * will be held across all I/O and all rotation to prevent them from being
83  * replaced (rotated) while in use.  The audit_file_rotate_wait flag is set
84  * when the kernel has delivered a trigger to auditd to rotate the trail, and
85  * is cleared when the next rotation takes place.  It is also protected by
86  * the audit worker lock.
87  */
88 static int		 audit_file_rotate_wait;
89 static struct ucred	*audit_cred;
90 static struct vnode	*audit_vp;
91 static struct sx	 audit_worker_lock;
92 
93 #define	AUDIT_WORKER_LOCK_INIT()	sx_init(&audit_worker_lock, \
94 					    "audit_worker_lock");
95 #define	AUDIT_WORKER_LOCK_ASSERT()	sx_assert(&audit_worker_lock, \
96 					    SA_XLOCKED)
97 #define	AUDIT_WORKER_LOCK()		sx_xlock(&audit_worker_lock)
98 #define	AUDIT_WORKER_UNLOCK()		sx_xunlock(&audit_worker_lock)
99 
100 /*
101  * Write an audit record to a file, performed as the last stage after both
102  * preselection and BSM conversion.  Both space management and write failures
103  * are handled in this function.
104  *
105  * No attempt is made to deal with possible failure to deliver a trigger to
106  * the audit daemon, since the message is asynchronous anyway.
107  */
108 static void
109 audit_record_write(struct vnode *vp, struct ucred *cred, void *data,
110     size_t len)
111 {
112 	static struct timeval last_lowspace_trigger;
113 	static struct timeval last_fail;
114 	static int cur_lowspace_trigger;
115 	struct statfs *mnt_stat;
116 	int error, vfslocked;
117 	static int cur_fail;
118 	struct vattr vattr;
119 	long temp;
120 
121 	AUDIT_WORKER_LOCK_ASSERT();
122 
123 	if (vp == NULL)
124 		return;
125 
126 	mnt_stat = &vp->v_mount->mnt_stat;
127 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
128 
129 	/*
130 	 * First, gather statistics on the audit log file and file system so
131 	 * that we know how we're doing on space.  Consider failure of these
132 	 * operations to indicate a future inability to write to the file.
133 	 */
134 	error = VFS_STATFS(vp->v_mount, mnt_stat, curthread);
135 	if (error)
136 		goto fail;
137 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
138 	error = VOP_GETATTR(vp, &vattr, cred);
139 	VOP_UNLOCK(vp, 0);
140 	if (error)
141 		goto fail;
142 	audit_fstat.af_currsz = vattr.va_size;
143 
144 	/*
145 	 * We handle four different space-related limits:
146 	 *
147 	 * - A fixed (hard) limit on the minimum free blocks we require on
148 	 *   the file system, and results in record loss, a trigger, and
149 	 *   possible fail stop due to violating invariants.
150 	 *
151 	 * - An administrative (soft) limit, which when fallen below, results
152 	 *   in the kernel notifying the audit daemon of low space.
153 	 *
154 	 * - An audit trail size limit, which when gone above, results in the
155 	 *   kernel notifying the audit daemon that rotation is desired.
156 	 *
157 	 * - The total depth of the kernel audit record exceeding free space,
158 	 *   which can lead to possible fail stop (with drain), in order to
159 	 *   prevent violating invariants.  Failure here doesn't halt
160 	 *   immediately, but prevents new records from being generated.
161 	 *
162 	 * Possibly, the last of these should be handled differently, always
163 	 * allowing a full queue to be lost, rather than trying to prevent
164 	 * loss.
165 	 *
166 	 * First, handle the hard limit, which generates a trigger and may
167 	 * fail stop.  This is handled in the same manner as ENOSPC from
168 	 * VOP_WRITE, and results in record loss.
169 	 */
170 	if (mnt_stat->f_bfree < AUDIT_HARD_LIMIT_FREE_BLOCKS) {
171 		error = ENOSPC;
172 		goto fail_enospc;
173 	}
174 
175 	/*
176 	 * Second, handle falling below the soft limit, if defined; we send
177 	 * the daemon a trigger and continue processing the record.  Triggers
178 	 * are limited to 1/sec.
179 	 */
180 	if (audit_qctrl.aq_minfree != 0) {
181 		temp = mnt_stat->f_blocks / (100 / audit_qctrl.aq_minfree);
182 		if (mnt_stat->f_bfree < temp) {
183 			if (ppsratecheck(&last_lowspace_trigger,
184 			    &cur_lowspace_trigger, 1)) {
185 				(void)audit_send_trigger(
186 				    AUDIT_TRIGGER_LOW_SPACE);
187 				printf("Warning: disk space low (< %d%% free) "
188 				    "on audit log file-system\n",
189 				    audit_qctrl.aq_minfree);
190 			}
191 		}
192 	}
193 
194 	/*
195 	 * If the current file is getting full, generate a rotation trigger
196 	 * to the daemon.  This is only approximate, which is fine as more
197 	 * records may be generated before the daemon rotates the file.
198 	 */
199 	if ((audit_fstat.af_filesz != 0) && (audit_file_rotate_wait == 0) &&
200 	    (vattr.va_size >= audit_fstat.af_filesz)) {
201 		AUDIT_WORKER_LOCK_ASSERT();
202 
203 		audit_file_rotate_wait = 1;
204 		(void)audit_send_trigger(AUDIT_TRIGGER_ROTATE_KERNEL);
205 	}
206 
207 	/*
208 	 * If the estimated amount of audit data in the audit event queue
209 	 * (plus records allocated but not yet queued) has reached the amount
210 	 * of free space on the disk, then we need to go into an audit fail
211 	 * stop state, in which we do not permit the allocation/committing of
212 	 * any new audit records.  We continue to process records but don't
213 	 * allow any activities that might generate new records.  In the
214 	 * future, we might want to detect when space is available again and
215 	 * allow operation to continue, but this behavior is sufficient to
216 	 * meet fail stop requirements in CAPP.
217 	 */
218 	if (audit_fail_stop) {
219 		if ((unsigned long)((audit_q_len + audit_pre_q_len + 1) *
220 		    MAX_AUDIT_RECORD_SIZE) / mnt_stat->f_bsize >=
221 		    (unsigned long)(mnt_stat->f_bfree)) {
222 			if (ppsratecheck(&last_fail, &cur_fail, 1))
223 				printf("audit_record_write: free space "
224 				    "below size of audit queue, failing "
225 				    "stop\n");
226 			audit_in_failure = 1;
227 		} else if (audit_in_failure) {
228 			/*
229 			 * Note: if we want to handle recovery, this is the
230 			 * spot to do it: unset audit_in_failure, and issue a
231 			 * wakeup on the cv.
232 			 */
233 		}
234 	}
235 
236 	error = vn_rdwr(UIO_WRITE, vp, data, len, (off_t)0, UIO_SYSSPACE,
237 	    IO_APPEND|IO_UNIT, cred, NULL, NULL, curthread);
238 	if (error == ENOSPC)
239 		goto fail_enospc;
240 	else if (error)
241 		goto fail;
242 
243 	/*
244 	 * Catch completion of a queue drain here; if we're draining and the
245 	 * queue is now empty, fail stop.  That audit_fail_stop is implicitly
246 	 * true, since audit_in_failure can only be set of audit_fail_stop is
247 	 * set.
248 	 *
249 	 * Note: if we handle recovery from audit_in_failure, then we need to
250 	 * make panic here conditional.
251 	 */
252 	if (audit_in_failure) {
253 		if (audit_q_len == 0 && audit_pre_q_len == 0) {
254 			VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
255 			(void)VOP_FSYNC(vp, MNT_WAIT, curthread);
256 			VOP_UNLOCK(vp, 0);
257 			panic("Audit store overflow; record queue drained.");
258 		}
259 	}
260 
261 	VFS_UNLOCK_GIANT(vfslocked);
262 	return;
263 
264 fail_enospc:
265 	/*
266 	 * ENOSPC is considered a special case with respect to failures, as
267 	 * this can reflect either our preemptive detection of insufficient
268 	 * space, or ENOSPC returned by the vnode write call.
269 	 */
270 	if (audit_fail_stop) {
271 		VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
272 		(void)VOP_FSYNC(vp, MNT_WAIT, curthread);
273 		VOP_UNLOCK(vp, 0);
274 		panic("Audit log space exhausted and fail-stop set.");
275 	}
276 	(void)audit_send_trigger(AUDIT_TRIGGER_NO_SPACE);
277 	audit_suspended = 1;
278 
279 	/* FALLTHROUGH */
280 fail:
281 	/*
282 	 * We have failed to write to the file, so the current record is
283 	 * lost, which may require an immediate system halt.
284 	 */
285 	if (audit_panic_on_write_fail) {
286 		VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
287 		(void)VOP_FSYNC(vp, MNT_WAIT, curthread);
288 		VOP_UNLOCK(vp, 0);
289 		panic("audit_worker: write error %d\n", error);
290 	} else if (ppsratecheck(&last_fail, &cur_fail, 1))
291 		printf("audit_worker: write error %d\n", error);
292 	VFS_UNLOCK_GIANT(vfslocked);
293 }
294 
295 /*
296  * Given a kernel audit record, process as required.  Kernel audit records
297  * are converted to one, or possibly two, BSM records, depending on whether
298  * there is a user audit record present also.  Kernel records need be
299  * converted to BSM before they can be written out.  Both types will be
300  * written to disk, and audit pipes.
301  */
302 static void
303 audit_worker_process_record(struct kaudit_record *ar)
304 {
305 	struct au_record *bsm;
306 	au_class_t class;
307 	au_event_t event;
308 	au_id_t auid;
309 	int error, sorf;
310 	int locked;
311 
312 	/*
313 	 * We hold the audit worker lock over both writes, if there are two,
314 	 * so that the two records won't be split across a rotation and end
315 	 * up in two different trail files.
316 	 */
317 	if (((ar->k_ar_commit & AR_COMMIT_USER) &&
318 	    (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) ||
319 	    (ar->k_ar_commit & AR_PRESELECT_TRAIL)) {
320 		AUDIT_WORKER_LOCK();
321 		locked = 1;
322 	} else
323 		locked = 0;
324 
325 	/*
326 	 * First, handle the user record, if any: commit to the system trail
327 	 * and audit pipes as selected.
328 	 */
329 	if ((ar->k_ar_commit & AR_COMMIT_USER) &&
330 	    (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) {
331 		AUDIT_WORKER_LOCK_ASSERT();
332 		audit_record_write(audit_vp, audit_cred, ar->k_udata,
333 		    ar->k_ulen);
334 	}
335 
336 	if ((ar->k_ar_commit & AR_COMMIT_USER) &&
337 	    (ar->k_ar_commit & AR_PRESELECT_USER_PIPE))
338 		audit_pipe_submit_user(ar->k_udata, ar->k_ulen);
339 
340 	if (!(ar->k_ar_commit & AR_COMMIT_KERNEL) ||
341 	    ((ar->k_ar_commit & AR_PRESELECT_PIPE) == 0 &&
342 	    (ar->k_ar_commit & AR_PRESELECT_TRAIL) == 0))
343 		goto out;
344 
345 	auid = ar->k_ar.ar_subj_auid;
346 	event = ar->k_ar.ar_event;
347 	class = au_event_class(event);
348 	if (ar->k_ar.ar_errno == 0)
349 		sorf = AU_PRS_SUCCESS;
350 	else
351 		sorf = AU_PRS_FAILURE;
352 
353 	error = kaudit_to_bsm(ar, &bsm);
354 	switch (error) {
355 	case BSM_NOAUDIT:
356 		goto out;
357 
358 	case BSM_FAILURE:
359 		printf("audit_worker_process_record: BSM_FAILURE\n");
360 		goto out;
361 
362 	case BSM_SUCCESS:
363 		break;
364 
365 	default:
366 		panic("kaudit_to_bsm returned %d", error);
367 	}
368 
369 	if (ar->k_ar_commit & AR_PRESELECT_TRAIL) {
370 		AUDIT_WORKER_LOCK_ASSERT();
371 		audit_record_write(audit_vp, audit_cred, bsm->data, bsm->len);
372 	}
373 
374 	if (ar->k_ar_commit & AR_PRESELECT_PIPE)
375 		audit_pipe_submit(auid, event, class, sorf,
376 		    ar->k_ar_commit & AR_PRESELECT_TRAIL, bsm->data,
377 		    bsm->len);
378 
379 	kau_free(bsm);
380 out:
381 	if (locked)
382 		AUDIT_WORKER_UNLOCK();
383 }
384 
385 /*
386  * The audit_worker thread is responsible for watching the event queue,
387  * dequeueing records, converting them to BSM format, and committing them to
388  * disk.  In order to minimize lock thrashing, records are dequeued in sets
389  * to a thread-local work queue.
390  *
391  * Note: this means that the effect bound on the size of the pending record
392  * queue is 2x the length of the global queue.
393  */
394 static void
395 audit_worker(void *arg)
396 {
397 	struct kaudit_queue ar_worklist;
398 	struct kaudit_record *ar;
399 	int lowater_signal;
400 
401 	TAILQ_INIT(&ar_worklist);
402 	mtx_lock(&audit_mtx);
403 	while (1) {
404 		mtx_assert(&audit_mtx, MA_OWNED);
405 
406 		/*
407 		 * Wait for a record.
408 		 */
409 		while (TAILQ_EMPTY(&audit_q))
410 			cv_wait(&audit_worker_cv, &audit_mtx);
411 
412 		/*
413 		 * If there are records in the global audit record queue,
414 		 * transfer them to a thread-local queue and process them
415 		 * one by one.  If we cross the low watermark threshold,
416 		 * signal any waiting processes that they may wake up and
417 		 * continue generating records.
418 		 */
419 		lowater_signal = 0;
420 		while ((ar = TAILQ_FIRST(&audit_q))) {
421 			TAILQ_REMOVE(&audit_q, ar, k_q);
422 			audit_q_len--;
423 			if (audit_q_len == audit_qctrl.aq_lowater)
424 				lowater_signal++;
425 			TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q);
426 		}
427 		if (lowater_signal)
428 			cv_broadcast(&audit_watermark_cv);
429 
430 		mtx_unlock(&audit_mtx);
431 		while ((ar = TAILQ_FIRST(&ar_worklist))) {
432 			TAILQ_REMOVE(&ar_worklist, ar, k_q);
433 			audit_worker_process_record(ar);
434 			audit_free(ar);
435 		}
436 		mtx_lock(&audit_mtx);
437 	}
438 }
439 
440 /*
441  * audit_rotate_vnode() is called by a user or kernel thread to configure or
442  * de-configure auditing on a vnode.  The arguments are the replacement
443  * credential (referenced) and vnode (referenced and opened) to substitute
444  * for the current credential and vnode, if any.  If either is set to NULL,
445  * both should be NULL, and this is used to indicate that audit is being
446  * disabled.  Any previous cred/vnode will be closed and freed.  We re-enable
447  * generating rotation requests to auditd.
448  */
449 void
450 audit_rotate_vnode(struct ucred *cred, struct vnode *vp)
451 {
452 	struct ucred *old_audit_cred;
453 	struct vnode *old_audit_vp;
454 	int vfslocked;
455 
456 	KASSERT((cred != NULL && vp != NULL) || (cred == NULL && vp == NULL),
457 	    ("audit_rotate_vnode: cred %p vp %p", cred, vp));
458 
459 	/*
460 	 * Rotate the vnode/cred, and clear the rotate flag so that we will
461 	 * send a rotate trigger if the new file fills.
462 	 */
463 	AUDIT_WORKER_LOCK();
464 	old_audit_cred = audit_cred;
465 	old_audit_vp = audit_vp;
466 	audit_cred = cred;
467 	audit_vp = vp;
468 	audit_file_rotate_wait = 0;
469 	audit_enabled = (audit_vp != NULL);
470 	AUDIT_WORKER_UNLOCK();
471 
472 	/*
473 	 * If there was an old vnode/credential, close and free.
474 	 */
475 	if (old_audit_vp != NULL) {
476 		vfslocked = VFS_LOCK_GIANT(old_audit_vp->v_mount);
477 		vn_close(old_audit_vp, AUDIT_CLOSE_FLAGS, old_audit_cred,
478 		    curthread);
479 		VFS_UNLOCK_GIANT(vfslocked);
480 		crfree(old_audit_cred);
481 	}
482 }
483 
484 void
485 audit_worker_init(void)
486 {
487 	int error;
488 
489 	AUDIT_WORKER_LOCK_INIT();
490 	error = kproc_create(audit_worker, NULL, &audit_thread, RFHIGHPID,
491 	    0, "audit");
492 	if (error)
493 		panic("audit_worker_init: kproc_create returned %d", error);
494 }
495