xref: /freebsd/contrib/openbsm/libbsm/bsm_mask.c (revision 3b97a967)
1ca0716f5SRobert Watson /*-
2ca0716f5SRobert Watson  * Copyright (c) 2004 Apple Inc.
3ca0716f5SRobert Watson  * Copyright (c) 2005 Robert N. M. Watson
4ca0716f5SRobert Watson  * All rights reserved.
5ca0716f5SRobert Watson  *
6ca0716f5SRobert Watson  * Redistribution and use in source and binary forms, with or without
7ca0716f5SRobert Watson  * modification, are permitted provided that the following conditions
8ca0716f5SRobert Watson  * are met:
9ca0716f5SRobert Watson  * 1.  Redistributions of source code must retain the above copyright
10ca0716f5SRobert Watson  *     notice, this list of conditions and the following disclaimer.
11ca0716f5SRobert Watson  * 2.  Redistributions in binary form must reproduce the above copyright
12ca0716f5SRobert Watson  *     notice, this list of conditions and the following disclaimer in the
13ca0716f5SRobert Watson  *     documentation and/or other materials provided with the distribution.
14ca0716f5SRobert Watson  * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15ca0716f5SRobert Watson  *     its contributors may be used to endorse or promote products derived
16ca0716f5SRobert Watson  *     from this software without specific prior written permission.
17ca0716f5SRobert Watson  *
18ca0716f5SRobert Watson  * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19ca0716f5SRobert Watson  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20ca0716f5SRobert Watson  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21ca0716f5SRobert Watson  * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22ca0716f5SRobert Watson  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23ca0716f5SRobert Watson  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24ca0716f5SRobert Watson  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25ca0716f5SRobert Watson  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26ca0716f5SRobert Watson  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27ca0716f5SRobert Watson  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28ca0716f5SRobert Watson  * POSSIBILITY OF SUCH DAMAGE.
29ca0716f5SRobert Watson  */
303b97a967SRobert Watson 
31ca0716f5SRobert Watson #include <sys/types.h>
32ca0716f5SRobert Watson 
33ca0716f5SRobert Watson #include <config/config.h>
343b97a967SRobert Watson #ifdef HAVE_FULL_QUEUE_H
353b97a967SRobert Watson #include <sys/queue.h>
363b97a967SRobert Watson #else /* !HAVE_FULL_QUEUE_H */
37ca0716f5SRobert Watson #include <compat/queue.h>
383b97a967SRobert Watson #endif /* !HAVE_FULL_QUEUE_H */
393b97a967SRobert Watson 
403b97a967SRobert Watson #include <bsm/libbsm.h>
41ca0716f5SRobert Watson 
42ca0716f5SRobert Watson #ifdef HAVE_PTHREAD_MUTEX_LOCK
43ca0716f5SRobert Watson #include <pthread.h>
44ca0716f5SRobert Watson #endif
45ca0716f5SRobert Watson #include <stdlib.h>
46ca0716f5SRobert Watson #include <string.h>
47ca0716f5SRobert Watson 
48ca0716f5SRobert Watson /* MT-Safe */
49ca0716f5SRobert Watson #ifdef HAVE_PTHREAD_MUTEX_LOCK
50ca0716f5SRobert Watson static pthread_mutex_t	mutex = PTHREAD_MUTEX_INITIALIZER;
51ca0716f5SRobert Watson #endif
52ca0716f5SRobert Watson static int		firsttime = 1;
53ca0716f5SRobert Watson 
54ca0716f5SRobert Watson /*
55ca0716f5SRobert Watson  * XXX ev_cache, once created, sticks around until the calling program exits.
56ca0716f5SRobert Watson  * This may or may not be a problem as far as absolute memory usage goes, but
57ca0716f5SRobert Watson  * at least there don't appear to be any leaks in using the cache.
58ca0716f5SRobert Watson  *
59ca0716f5SRobert Watson  * XXXRW: Note that despite (mutex), load_event_table() could race with
60ca0716f5SRobert Watson  * other consumers of the getauevents() API.
61ca0716f5SRobert Watson  */
62ca0716f5SRobert Watson struct audit_event_map {
63ca0716f5SRobert Watson 	char				 ev_name[AU_EVENT_NAME_MAX];
64ca0716f5SRobert Watson 	char				 ev_desc[AU_EVENT_DESC_MAX];
65ca0716f5SRobert Watson 	struct au_event_ent		 ev;
66ca0716f5SRobert Watson 	LIST_ENTRY(audit_event_map)	 ev_list;
67ca0716f5SRobert Watson };
68ca0716f5SRobert Watson static LIST_HEAD(, audit_event_map)	ev_cache;
69ca0716f5SRobert Watson 
70ca0716f5SRobert Watson static struct audit_event_map *
audit_event_map_alloc(void)71ca0716f5SRobert Watson audit_event_map_alloc(void)
72ca0716f5SRobert Watson {
73ca0716f5SRobert Watson 	struct audit_event_map *aemp;
74ca0716f5SRobert Watson 
75ca0716f5SRobert Watson 	aemp = malloc(sizeof(*aemp));
76ca0716f5SRobert Watson 	if (aemp == NULL)
77ca0716f5SRobert Watson 		return (aemp);
78ca0716f5SRobert Watson 	bzero(aemp, sizeof(*aemp));
79ca0716f5SRobert Watson 	aemp->ev.ae_name = aemp->ev_name;
80ca0716f5SRobert Watson 	aemp->ev.ae_desc = aemp->ev_desc;
81ca0716f5SRobert Watson 	return (aemp);
82ca0716f5SRobert Watson }
83ca0716f5SRobert Watson 
84ca0716f5SRobert Watson static void
audit_event_map_free(struct audit_event_map * aemp)85ca0716f5SRobert Watson audit_event_map_free(struct audit_event_map *aemp)
86ca0716f5SRobert Watson {
87ca0716f5SRobert Watson 
88ca0716f5SRobert Watson 	free(aemp);
89ca0716f5SRobert Watson }
90ca0716f5SRobert Watson 
91ca0716f5SRobert Watson /*
92ca0716f5SRobert Watson  * When reading into the cache fails, we need to flush the entire cache to
93ca0716f5SRobert Watson  * prevent it from containing some but not all records.
94ca0716f5SRobert Watson  */
95ca0716f5SRobert Watson static void
flush_cache(void)96ca0716f5SRobert Watson flush_cache(void)
97ca0716f5SRobert Watson {
98ca0716f5SRobert Watson 	struct audit_event_map *aemp;
99ca0716f5SRobert Watson 
100ca0716f5SRobert Watson 	/* XXX: Would assert 'mutex'. */
101ca0716f5SRobert Watson 
102ca0716f5SRobert Watson 	while ((aemp = LIST_FIRST(&ev_cache)) != NULL) {
103ca0716f5SRobert Watson 		LIST_REMOVE(aemp, ev_list);
104ca0716f5SRobert Watson 		audit_event_map_free(aemp);
105ca0716f5SRobert Watson 	}
106ca0716f5SRobert Watson }
107ca0716f5SRobert Watson 
108ca0716f5SRobert Watson static int
load_event_table(void)109ca0716f5SRobert Watson load_event_table(void)
110ca0716f5SRobert Watson {
111ca0716f5SRobert Watson 	struct audit_event_map *aemp;
112ca0716f5SRobert Watson 	struct au_event_ent *ep;
113ca0716f5SRobert Watson 
114ca0716f5SRobert Watson 	/*
115ca0716f5SRobert Watson 	 * XXX: Would assert 'mutex'.
116ca0716f5SRobert Watson 	 * Loading of the cache happens only once; dont check if cache is
117ca0716f5SRobert Watson 	 * already loaded.
118ca0716f5SRobert Watson 	 */
119ca0716f5SRobert Watson 	LIST_INIT(&ev_cache);
120ca0716f5SRobert Watson 	setauevent();	/* Rewind to beginning of entries. */
121ca0716f5SRobert Watson 	do {
122ca0716f5SRobert Watson 		aemp = audit_event_map_alloc();
123ca0716f5SRobert Watson 		if (aemp == NULL) {
124ca0716f5SRobert Watson 			flush_cache();
125ca0716f5SRobert Watson 			return (-1);
126ca0716f5SRobert Watson 		}
127ca0716f5SRobert Watson 		ep = getauevent_r(&aemp->ev);
128ca0716f5SRobert Watson 		if (ep != NULL)
129ca0716f5SRobert Watson 			LIST_INSERT_HEAD(&ev_cache, aemp, ev_list);
130ca0716f5SRobert Watson 		else
131ca0716f5SRobert Watson 			audit_event_map_free(aemp);
132ca0716f5SRobert Watson 	} while (ep != NULL);
133ca0716f5SRobert Watson 	return (1);
134ca0716f5SRobert Watson }
135ca0716f5SRobert Watson 
136ca0716f5SRobert Watson /*
137ca0716f5SRobert Watson  * Read the event with the matching event number from the cache.
138ca0716f5SRobert Watson  */
139ca0716f5SRobert Watson static struct au_event_ent *
read_from_cache(au_event_t event)140ca0716f5SRobert Watson read_from_cache(au_event_t event)
141ca0716f5SRobert Watson {
142ca0716f5SRobert Watson 	struct audit_event_map *elem;
143ca0716f5SRobert Watson 
144ca0716f5SRobert Watson 	/* XXX: Would assert 'mutex'. */
145ca0716f5SRobert Watson 
146ca0716f5SRobert Watson 	LIST_FOREACH(elem, &ev_cache, ev_list) {
147ca0716f5SRobert Watson 		if (elem->ev.ae_number == event)
148ca0716f5SRobert Watson 			return (&elem->ev);
149ca0716f5SRobert Watson 	}
150ca0716f5SRobert Watson 
151ca0716f5SRobert Watson 	return (NULL);
152ca0716f5SRobert Watson }
153ca0716f5SRobert Watson 
154ca0716f5SRobert Watson /*
155ca0716f5SRobert Watson  * Check if the audit event is preselected against the preselection mask.
156ca0716f5SRobert Watson  */
157ca0716f5SRobert Watson int
au_preselect(au_event_t event,au_mask_t * mask_p,int sorf,int flag)158ca0716f5SRobert Watson au_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag)
159ca0716f5SRobert Watson {
160ca0716f5SRobert Watson 	struct au_event_ent *ev;
161ca0716f5SRobert Watson 	au_class_t effmask = 0;
162ca0716f5SRobert Watson 
163ca0716f5SRobert Watson 	if (mask_p == NULL)
164ca0716f5SRobert Watson 		return (-1);
165ca0716f5SRobert Watson 
166ca0716f5SRobert Watson 
167ca0716f5SRobert Watson #ifdef HAVE_PTHREAD_MUTEX_LOCK
168ca0716f5SRobert Watson 	pthread_mutex_lock(&mutex);
169ca0716f5SRobert Watson #endif
170ca0716f5SRobert Watson 	if (firsttime) {
171ca0716f5SRobert Watson 		firsttime = 0;
172ca0716f5SRobert Watson 		if ( -1 == load_event_table()) {
173ca0716f5SRobert Watson #ifdef HAVE_PTHREAD_MUTEX_LOCK
174ca0716f5SRobert Watson 			pthread_mutex_unlock(&mutex);
175ca0716f5SRobert Watson #endif
176ca0716f5SRobert Watson 			return (-1);
177ca0716f5SRobert Watson 		}
178ca0716f5SRobert Watson 	}
179ca0716f5SRobert Watson 	switch (flag) {
180ca0716f5SRobert Watson 	case AU_PRS_REREAD:
181ca0716f5SRobert Watson 		flush_cache();
182ca0716f5SRobert Watson 		if (load_event_table() == -1) {
183ca0716f5SRobert Watson #ifdef HAVE_PTHREAD_MUTEX_LOCK
184ca0716f5SRobert Watson 			pthread_mutex_unlock(&mutex);
185ca0716f5SRobert Watson #endif
186ca0716f5SRobert Watson 			return (-1);
187ca0716f5SRobert Watson 		}
188ca0716f5SRobert Watson 		ev = read_from_cache(event);
189ca0716f5SRobert Watson 		break;
190ca0716f5SRobert Watson 	case AU_PRS_USECACHE:
191ca0716f5SRobert Watson 		ev = read_from_cache(event);
192ca0716f5SRobert Watson 		break;
193ca0716f5SRobert Watson 	default:
194ca0716f5SRobert Watson 		ev = NULL;
195ca0716f5SRobert Watson 	}
196ca0716f5SRobert Watson 	if (ev == NULL) {
197ca0716f5SRobert Watson #ifdef HAVE_PTHREAD_MUTEX_LOCK
198ca0716f5SRobert Watson 		pthread_mutex_unlock(&mutex);
199ca0716f5SRobert Watson #endif
200ca0716f5SRobert Watson 		return (-1);
201 	}
202 	if (sorf & AU_PRS_SUCCESS)
203 		effmask |= (mask_p->am_success & ev->ae_class);
204 	if (sorf & AU_PRS_FAILURE)
205 		effmask |= (mask_p->am_failure & ev->ae_class);
206 #ifdef HAVE_PTHREAD_MUTEX_LOCK
207 	pthread_mutex_unlock(&mutex);
208 #endif
209 	if (effmask != 0)
210 		return (1);
211 	return (0);
212 }
213