xref: /openbsd/usr.bin/systat/cache.c (revision 404b540a)
1 /* $Id: cache.c,v 1.3 2008/12/07 02:56:06 canacar Exp $ */
2 /*
3  * Copyright (c) 2001, 2007 Can Erkin Acar <canacar@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/types.h>
19 #include <sys/ioctl.h>
20 #include <sys/socket.h>
21 
22 #include <net/if.h>
23 #include <netinet/in.h>
24 
25 #include <netinet/tcp_fsm.h>
26 #ifdef TEST_COMPAT
27 #include "pfvarmux.h"
28 #else
29 #include <net/pfvar.h>
30 #endif
31 #include <arpa/inet.h>
32 
33 #include <stdio.h>
34 #include <stdlib.h>
35 
36 #include <assert.h>
37 
38 #include "cache.h"
39 
40 /* prototypes */
41 void update_state(struct sc_ent *, struct pfsync_state *, double);
42 struct sc_ent *cache_state(struct pfsync_state *);
43 static __inline int sc_cmp(struct sc_ent *s1, struct sc_ent *s2);
44 
45 /* initialize the tree and queue */
46 RB_HEAD(sc_tree, sc_ent) sctree;
47 TAILQ_HEAD(sc_queue, sc_ent) scq1, scq2, scq_free;
48 RB_GENERATE(sc_tree, sc_ent, tlink, sc_cmp)
49 
50 struct sc_queue *scq_act = NULL;
51 struct sc_queue *scq_exp = NULL;
52 
53 int cache_max = 0;
54 int cache_size = 0;
55 
56 struct sc_ent *sc_store = NULL;
57 
58 /* preallocate the cache and insert into the 'free' queue */
59 int
60 cache_init(int max)
61 {
62 	int n;
63 	static int initialized = 0;
64 
65 	if (max < 0 || initialized)
66 		return (1);
67 
68 	if (max == 0) {
69 		sc_store = NULL;
70 	} else {
71 		sc_store = malloc(max * sizeof(struct sc_ent));
72 		if (sc_store == NULL)
73 			return (1);
74 	}
75 
76 	RB_INIT(&sctree);
77 	TAILQ_INIT(&scq1);
78 	TAILQ_INIT(&scq2);
79 	TAILQ_INIT(&scq_free);
80 
81 	scq_act = &scq1;
82 	scq_exp = &scq2;
83 
84 	for (n = 0; n < max; n++)
85 		TAILQ_INSERT_HEAD(&scq_free, sc_store + n, qlink);
86 
87 	cache_size = cache_max = max;
88 	initialized++;
89 
90 	return (0);
91 }
92 
93 void
94 update_state(struct sc_ent *prev, struct pfsync_state *new, double rate)
95 {
96 	assert (prev != NULL && new != NULL);
97 	prev->t = time(NULL);
98 	prev->rate = rate;
99 	prev->bytes = COUNTER(new->bytes[0]) + COUNTER(new->bytes[1]);
100 	if (prev->peak < rate)
101 		prev->peak = rate;
102 }
103 
104 void
105 add_state(struct pfsync_state *st)
106 {
107 	struct sc_ent *ent;
108 	assert(st != NULL);
109 
110 	if (cache_max == 0)
111 		return;
112 
113 	if (TAILQ_EMPTY(&scq_free))
114 		return;
115 
116 	ent = TAILQ_FIRST(&scq_free);
117 	TAILQ_REMOVE(&scq_free, ent, qlink);
118 
119 	cache_size--;
120 
121 	ent->id[0] = st->id[0];
122 	ent->id[1] = st->id[1];
123 	ent->bytes = COUNTER(st->bytes[0]) + COUNTER(st->bytes[1]);
124 	ent->peak = 0;
125 	ent->rate = 0;
126 	ent->t = time(NULL);
127 
128 	RB_INSERT(sc_tree, &sctree, ent);
129 	TAILQ_INSERT_HEAD(scq_act, ent, qlink);
130 }
131 
132 /* must be called only once for each state before cache_endupdate */
133 struct sc_ent *
134 cache_state(struct pfsync_state *st)
135 {
136 	struct sc_ent ent, *old;
137 	double sd, td, r;
138 
139 	if (cache_max == 0)
140 		return (NULL);
141 
142 	ent.id[0] = st->id[0];
143 	ent.id[1] = st->id[1];
144 	old = RB_FIND(sc_tree, &sctree, &ent);
145 
146 	if (old == NULL) {
147 		add_state(st);
148 		return (NULL);
149 	}
150 
151 	if (COUNTER(st->bytes[0]) + COUNTER(st->bytes[1]) < old->bytes)
152 		return (NULL);
153 
154 	sd = COUNTER(st->bytes[0]) + COUNTER(st->bytes[1]) - old->bytes;
155 	td = time(NULL) - old->t;
156 
157 	if (td > 0) {
158 		r = sd/td;
159 		update_state(old, st, r);
160 	}
161 
162 	/* move to active queue */
163 	TAILQ_REMOVE(scq_exp, old, qlink);
164 	TAILQ_INSERT_HEAD(scq_act, old, qlink);
165 
166 	return (old);
167 }
168 
169 /* remove the states that are not updated in this cycle */
170 void
171 cache_endupdate(void)
172 {
173 	struct sc_queue *tmp;
174 	struct sc_ent *ent;
175 
176 	while (! TAILQ_EMPTY(scq_exp)) {
177 		ent = TAILQ_FIRST(scq_exp);
178 		TAILQ_REMOVE(scq_exp, ent, qlink);
179 		RB_REMOVE(sc_tree, &sctree, ent);
180 		TAILQ_INSERT_HEAD(&scq_free, ent, qlink);
181 		cache_size++;
182 	}
183 
184 	tmp = scq_act;
185 	scq_act = scq_exp;
186 	scq_exp = tmp;
187 }
188 
189 static __inline int
190 sc_cmp(struct sc_ent *a, struct sc_ent *b)
191 {
192 	if (a->id[0] > b->id[0])
193 		return (1);
194 	if (a->id[0] < b->id[0])
195 		return (-1);
196 	if (a->id[1] > b->id[1])
197 		return (1);
198 	if (a->id[1] < b->id[1])
199 		return (-1);
200 	return (0);
201 }
202