1 /*
2  * Copyright (c) 2016-2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  *  * Redistributions of source code must retain the above copyright notice,
8  *    this list of conditions and the following disclaimer.
9  *  * Redistributions in binary form must reproduce the above copyright notice,
10  *    this list of conditions and the following disclaimer in the documentation
11  *    and/or other materials provided with the distribution.
12  *  * Neither the name of Intel Corporation nor the names of its contributors
13  *    may be used to endorse or promote products derived from this software
14  *    without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "ptunit_threads.h"
30 
31 #include "pt_block_cache.h"
32 
33 #include <string.h>
34 
35 
36 /* A test fixture optionally providing a block cache and automatically freeing
37  * the cache.
38  */
39 struct bcache_fixture {
40 	/* Threading support. */
41 	struct ptunit_thrd_fixture thrd;
42 
43 	/* The cache - it will be freed automatically. */
44 	struct pt_block_cache *bcache;
45 
46 	/* The test fixture initialization and finalization functions. */
47 	struct ptunit_result (*init)(struct bcache_fixture *);
48 	struct ptunit_result (*fini)(struct bcache_fixture *);
49 };
50 
51 enum {
52 	/* The number of entries in fixture-provided caches. */
53 	bfix_nentries = 0x10000,
54 
55 #if defined(FEATURE_THREADS)
56 
57 	/* The number of additional threads to use for stress testing. */
58 	bfix_threads = 3,
59 
60 #endif /* defined(FEATURE_THREADS) */
61 
62 	/* The number of iterations in stress testing. */
63 	bfix_iterations = 0x10
64 };
65 
66 static struct ptunit_result cfix_init(struct bcache_fixture *bfix)
67 {
68 	ptu_test(ptunit_thrd_init, &bfix->thrd);
69 
70 	bfix->bcache = NULL;
71 
72 	return ptu_passed();
73 }
74 
75 static struct ptunit_result bfix_init(struct bcache_fixture *bfix)
76 {
77 	ptu_test(cfix_init, bfix);
78 
79 	bfix->bcache = pt_bcache_alloc(bfix_nentries);
80 	ptu_ptr(bfix->bcache);
81 
82 	return ptu_passed();
83 }
84 
85 static struct ptunit_result bfix_fini(struct bcache_fixture *bfix)
86 {
87 	int thrd;
88 
89 	ptu_test(ptunit_thrd_fini, &bfix->thrd);
90 
91 	for (thrd = 0; thrd < bfix->thrd.nthreads; ++thrd)
92 		ptu_int_eq(bfix->thrd.result[thrd], 0);
93 
94 	pt_bcache_free(bfix->bcache);
95 
96 	return ptu_passed();
97 }
98 
99 static struct ptunit_result bcache_entry_size(void)
100 {
101 	ptu_uint_eq(sizeof(struct pt_bcache_entry), sizeof(uint32_t));
102 
103 	return ptu_passed();
104 }
105 
106 static struct ptunit_result bcache_size(void)
107 {
108 	ptu_uint_le(sizeof(struct pt_block_cache),
109 		    2 * sizeof(struct pt_bcache_entry));
110 
111 	return ptu_passed();
112 }
113 
114 static struct ptunit_result free_null(void)
115 {
116 	pt_bcache_free(NULL);
117 
118 	return ptu_passed();
119 }
120 
121 static struct ptunit_result add_null(void)
122 {
123 	struct pt_bcache_entry bce;
124 	int errcode;
125 
126 	memset(&bce, 0, sizeof(bce));
127 
128 	errcode = pt_bcache_add(NULL, 0ull, bce);
129 	ptu_int_eq(errcode, -pte_internal);
130 
131 	return ptu_passed();
132 }
133 
134 static struct ptunit_result lookup_null(void)
135 {
136 	struct pt_bcache_entry bce;
137 	struct pt_block_cache bcache;
138 	int errcode;
139 
140 	errcode = pt_bcache_lookup(&bce, NULL, 0ull);
141 	ptu_int_eq(errcode, -pte_internal);
142 
143 	errcode = pt_bcache_lookup(NULL, &bcache, 0ull);
144 	ptu_int_eq(errcode, -pte_internal);
145 
146 	return ptu_passed();
147 }
148 
149 static struct ptunit_result alloc(struct bcache_fixture *bfix)
150 {
151 	bfix->bcache = pt_bcache_alloc(0x10000ull);
152 	ptu_ptr(bfix->bcache);
153 
154 	return ptu_passed();
155 }
156 
157 static struct ptunit_result alloc_min(struct bcache_fixture *bfix)
158 {
159 	bfix->bcache = pt_bcache_alloc(1ull);
160 	ptu_ptr(bfix->bcache);
161 
162 	return ptu_passed();
163 }
164 
165 static struct ptunit_result alloc_too_big(struct bcache_fixture *bfix)
166 {
167 	bfix->bcache = pt_bcache_alloc(UINT32_MAX + 1ull);
168 	ptu_null(bfix->bcache);
169 
170 	return ptu_passed();
171 }
172 
173 static struct ptunit_result alloc_zero(struct bcache_fixture *bfix)
174 {
175 	bfix->bcache = pt_bcache_alloc(0ull);
176 	ptu_null(bfix->bcache);
177 
178 	return ptu_passed();
179 }
180 
181 static struct ptunit_result initially_empty(struct bcache_fixture *bfix)
182 {
183 	uint64_t index;
184 
185 	for (index = 0; index < bfix_nentries; ++index) {
186 		struct pt_bcache_entry bce;
187 		int status;
188 
189 		memset(&bce, 0xff, sizeof(bce));
190 
191 		status = pt_bcache_lookup(&bce, bfix->bcache, index);
192 		ptu_int_eq(status, 0);
193 
194 		status = pt_bce_is_valid(bce);
195 		ptu_int_eq(status, 0);
196 	}
197 
198 	return ptu_passed();
199 }
200 
201 static struct ptunit_result add_bad_index(struct bcache_fixture *bfix)
202 {
203 	struct pt_bcache_entry bce;
204 	int errcode;
205 
206 	memset(&bce, 0, sizeof(bce));
207 
208 	errcode = pt_bcache_add(bfix->bcache, bfix_nentries, bce);
209 	ptu_int_eq(errcode, -pte_internal);
210 
211 	return ptu_passed();
212 }
213 
214 static struct ptunit_result lookup_bad_index(struct bcache_fixture *bfix)
215 {
216 	struct pt_bcache_entry bce;
217 	int errcode;
218 
219 	errcode = pt_bcache_lookup(&bce, bfix->bcache, bfix_nentries);
220 	ptu_int_eq(errcode, -pte_internal);
221 
222 	return ptu_passed();
223 }
224 
225 static struct ptunit_result add(struct bcache_fixture *bfix, uint64_t index)
226 {
227 	struct pt_bcache_entry bce, exp;
228 	int errcode;
229 
230 	memset(&bce, 0xff, sizeof(bce));
231 	memset(&exp, 0x00, sizeof(exp));
232 
233 	exp.ninsn = 1;
234 	exp.displacement = 7;
235 	exp.mode = ptem_64bit;
236 	exp.qualifier = ptbq_decode;
237 	exp.isize = 7;
238 
239 	errcode = pt_bcache_add(bfix->bcache, index, exp);
240 	ptu_int_eq(errcode, 0);
241 
242 	errcode = pt_bcache_lookup(&bce, bfix->bcache, index);
243 	ptu_int_eq(errcode, 0);
244 
245 	ptu_uint_eq(bce.ninsn, exp.ninsn);
246 	ptu_int_eq(bce.displacement, exp.displacement);
247 	ptu_uint_eq(pt_bce_exec_mode(bce), pt_bce_exec_mode(exp));
248 	ptu_uint_eq(pt_bce_qualifier(bce), pt_bce_qualifier(exp));
249 	ptu_uint_eq(bce.isize, exp.isize);
250 
251 	return ptu_passed();
252 }
253 
254 static int worker(void *arg)
255 {
256 	struct pt_bcache_entry exp;
257 	struct pt_block_cache *bcache;
258 	uint64_t iter, index;
259 
260 	bcache = arg;
261 	if (!bcache)
262 		return -pte_internal;
263 
264 	memset(&exp, 0x00, sizeof(exp));
265 	exp.ninsn = 5;
266 	exp.displacement = 28;
267 	exp.mode = ptem_64bit;
268 	exp.qualifier = ptbq_again;
269 	exp.isize = 3;
270 
271 	for (index = 0; index < bfix_nentries; ++index) {
272 		for (iter = 0; iter < bfix_iterations; ++iter) {
273 			struct pt_bcache_entry bce;
274 			int errcode;
275 
276 			memset(&bce, 0xff, sizeof(bce));
277 
278 			errcode = pt_bcache_lookup(&bce, bcache, index);
279 			if (errcode < 0)
280 				return errcode;
281 
282 			if (!pt_bce_is_valid(bce)) {
283 				errcode = pt_bcache_add(bcache, index, exp);
284 				if (errcode < 0)
285 					return errcode;
286 			}
287 
288 			errcode = pt_bcache_lookup(&bce, bcache, index);
289 			if (errcode < 0)
290 				return errcode;
291 
292 			if (!pt_bce_is_valid(bce))
293 				return -pte_nosync;
294 
295 			if (bce.ninsn != exp.ninsn)
296 				return -pte_nosync;
297 
298 			if (bce.displacement != exp.displacement)
299 				return -pte_nosync;
300 
301 			if (pt_bce_exec_mode(bce) != pt_bce_exec_mode(exp))
302 				return -pte_nosync;
303 
304 			if (pt_bce_qualifier(bce) != pt_bce_qualifier(exp))
305 				return -pte_nosync;
306 
307 			if (bce.isize != exp.isize)
308 				return -pte_nosync;
309 		}
310 	}
311 
312 	return 0;
313 }
314 
315 static struct ptunit_result stress(struct bcache_fixture *bfix)
316 {
317 	int errcode;
318 
319 #if defined(FEATURE_THREADS)
320 	{
321 		int thrd;
322 
323 		for (thrd = 0; thrd < bfix_threads; ++thrd)
324 			ptu_test(ptunit_thrd_create, &bfix->thrd, worker,
325 				 bfix->bcache);
326 	}
327 #endif /* defined(FEATURE_THREADS) */
328 
329 	errcode = worker(bfix->bcache);
330 	ptu_int_eq(errcode, 0);
331 
332 	return ptu_passed();
333 }
334 
335 int main(int argc, char **argv)
336 {
337 	struct bcache_fixture bfix, cfix;
338 	struct ptunit_suite suite;
339 
340 	bfix.init = bfix_init;
341 	bfix.fini = bfix_fini;
342 
343 	cfix.init = cfix_init;
344 	cfix.fini = bfix_fini;
345 
346 	suite = ptunit_mk_suite(argc, argv);
347 
348 	ptu_run(suite, bcache_entry_size);
349 	ptu_run(suite, bcache_size);
350 
351 	ptu_run(suite, free_null);
352 	ptu_run(suite, add_null);
353 	ptu_run(suite, lookup_null);
354 
355 	ptu_run_f(suite, alloc, cfix);
356 	ptu_run_f(suite, alloc_min, cfix);
357 	ptu_run_f(suite, alloc_too_big, cfix);
358 	ptu_run_f(suite, alloc_zero, cfix);
359 
360 	ptu_run_f(suite, initially_empty, bfix);
361 
362 	ptu_run_f(suite, add_bad_index, bfix);
363 	ptu_run_f(suite, lookup_bad_index, bfix);
364 
365 	ptu_run_fp(suite, add, bfix, 0ull);
366 	ptu_run_fp(suite, add, bfix, bfix_nentries - 1ull);
367 	ptu_run_f(suite, stress, bfix);
368 
369 	return ptunit_report(&suite);
370 }
371