xref: /linux/lib/cpumask_kunit.c (revision 596ff4a0)
1d3c0ca49SSander Vanheule // SPDX-License-Identifier: GPL-2.0-only
2d3c0ca49SSander Vanheule /*
3d3c0ca49SSander Vanheule  * KUnit tests for cpumask.
4d3c0ca49SSander Vanheule  *
5d3c0ca49SSander Vanheule  * Author: Sander Vanheule <sander@svanheule.net>
6d3c0ca49SSander Vanheule  */
7d3c0ca49SSander Vanheule 
8d3c0ca49SSander Vanheule #include <kunit/test.h>
9d3c0ca49SSander Vanheule #include <linux/cpu.h>
10d3c0ca49SSander Vanheule #include <linux/cpumask.h>
11d3c0ca49SSander Vanheule 
12bf541358SSander Vanheule #define MASK_MSG(m) \
13bf541358SSander Vanheule 	"%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \
14bf541358SSander Vanheule 	nr_cpumask_bits, cpumask_bits(m)
15bf541358SSander Vanheule 
16d3c0ca49SSander Vanheule #define EXPECT_FOR_EACH_CPU_EQ(test, mask)			\
17d3c0ca49SSander Vanheule 	do {							\
18d3c0ca49SSander Vanheule 		const cpumask_t *m = (mask);			\
19d3c0ca49SSander Vanheule 		int mask_weight = cpumask_weight(m);		\
20d3c0ca49SSander Vanheule 		int cpu, iter = 0;				\
21d3c0ca49SSander Vanheule 		for_each_cpu(cpu, m)				\
22d3c0ca49SSander Vanheule 			iter++;					\
23bf541358SSander Vanheule 		KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask));	\
24d3c0ca49SSander Vanheule 	} while (0)
25d3c0ca49SSander Vanheule 
2649937cd1SValentin Schneider #define EXPECT_FOR_EACH_CPU_OP_EQ(test, op, mask1, mask2)			\
2749937cd1SValentin Schneider 	do {									\
2849937cd1SValentin Schneider 		const cpumask_t *m1 = (mask1);					\
2949937cd1SValentin Schneider 		const cpumask_t *m2 = (mask2);					\
3049937cd1SValentin Schneider 		int weight;                                                     \
3149937cd1SValentin Schneider 		int cpu, iter = 0;						\
3249937cd1SValentin Schneider 		cpumask_##op(&mask_tmp, m1, m2);                                \
3349937cd1SValentin Schneider 		weight = cpumask_weight(&mask_tmp);				\
3449937cd1SValentin Schneider 		for_each_cpu_##op(cpu, mask1, mask2)				\
3549937cd1SValentin Schneider 			iter++;							\
3649937cd1SValentin Schneider 		KUNIT_EXPECT_EQ((test), weight, iter);				\
3749937cd1SValentin Schneider 	} while (0)
3849937cd1SValentin Schneider 
39d3c0ca49SSander Vanheule #define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask)			\
40d3c0ca49SSander Vanheule 	do {							\
41d3c0ca49SSander Vanheule 		const cpumask_t *m = (mask);			\
42d3c0ca49SSander Vanheule 		int mask_weight = cpumask_weight(m);		\
43d3c0ca49SSander Vanheule 		int cpu, iter = 0;				\
44d3c0ca49SSander Vanheule 		for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2)	\
45d3c0ca49SSander Vanheule 			iter++;					\
46bf541358SSander Vanheule 		KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask));	\
47d3c0ca49SSander Vanheule 	} while (0)
48d3c0ca49SSander Vanheule 
49d3c0ca49SSander Vanheule #define EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, name)		\
50d3c0ca49SSander Vanheule 	do {							\
51d3c0ca49SSander Vanheule 		int mask_weight = num_##name##_cpus();		\
52d3c0ca49SSander Vanheule 		int cpu, iter = 0;				\
53d3c0ca49SSander Vanheule 		for_each_##name##_cpu(cpu)			\
54d3c0ca49SSander Vanheule 			iter++;					\
55bf541358SSander Vanheule 		KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(cpu_##name##_mask));	\
56d3c0ca49SSander Vanheule 	} while (0)
57d3c0ca49SSander Vanheule 
58d3c0ca49SSander Vanheule static cpumask_t mask_empty;
59d3c0ca49SSander Vanheule static cpumask_t mask_all;
6049937cd1SValentin Schneider static cpumask_t mask_tmp;
61d3c0ca49SSander Vanheule 
test_cpumask_weight(struct kunit * test)62d3c0ca49SSander Vanheule static void test_cpumask_weight(struct kunit *test)
63d3c0ca49SSander Vanheule {
64bf541358SSander Vanheule 	KUNIT_EXPECT_TRUE_MSG(test, cpumask_empty(&mask_empty), MASK_MSG(&mask_empty));
65bf541358SSander Vanheule 	KUNIT_EXPECT_TRUE_MSG(test, cpumask_full(&mask_all), MASK_MSG(&mask_all));
66d3c0ca49SSander Vanheule 
67bf541358SSander Vanheule 	KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty));
68bf541358SSander Vanheule 	KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask),
69bf541358SSander Vanheule 			    MASK_MSG(cpu_possible_mask));
70*596ff4a0SLinus Torvalds 	KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all));
71d3c0ca49SSander Vanheule }
72d3c0ca49SSander Vanheule 
test_cpumask_first(struct kunit * test)73d3c0ca49SSander Vanheule static void test_cpumask_first(struct kunit *test)
74d3c0ca49SSander Vanheule {
75bf541358SSander Vanheule 	KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty));
76bf541358SSander Vanheule 	KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first(cpu_possible_mask), MASK_MSG(cpu_possible_mask));
77d3c0ca49SSander Vanheule 
78bf541358SSander Vanheule 	KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first_zero(&mask_empty), MASK_MSG(&mask_empty));
79bf541358SSander Vanheule 	KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask),
80bf541358SSander Vanheule 			    MASK_MSG(cpu_possible_mask));
81d3c0ca49SSander Vanheule }
82d3c0ca49SSander Vanheule 
test_cpumask_last(struct kunit * test)83d3c0ca49SSander Vanheule static void test_cpumask_last(struct kunit *test)
84d3c0ca49SSander Vanheule {
85bf541358SSander Vanheule 	KUNIT_EXPECT_LE_MSG(test, nr_cpumask_bits, cpumask_last(&mask_empty),
86bf541358SSander Vanheule 			    MASK_MSG(&mask_empty));
87bf541358SSander Vanheule 	KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask),
88bf541358SSander Vanheule 			    MASK_MSG(cpu_possible_mask));
89d3c0ca49SSander Vanheule }
90d3c0ca49SSander Vanheule 
test_cpumask_next(struct kunit * test)91d3c0ca49SSander Vanheule static void test_cpumask_next(struct kunit *test)
92d3c0ca49SSander Vanheule {
93bf541358SSander Vanheule 	KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next_zero(-1, &mask_empty), MASK_MSG(&mask_empty));
94bf541358SSander Vanheule 	KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask),
95bf541358SSander Vanheule 			    MASK_MSG(cpu_possible_mask));
96d3c0ca49SSander Vanheule 
97bf541358SSander Vanheule 	KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty),
98bf541358SSander Vanheule 			    MASK_MSG(&mask_empty));
99bf541358SSander Vanheule 	KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next(-1, cpu_possible_mask),
100bf541358SSander Vanheule 			    MASK_MSG(cpu_possible_mask));
101d3c0ca49SSander Vanheule }
102d3c0ca49SSander Vanheule 
test_cpumask_iterators(struct kunit * test)103d3c0ca49SSander Vanheule static void test_cpumask_iterators(struct kunit *test)
104d3c0ca49SSander Vanheule {
105d3c0ca49SSander Vanheule 	EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
106d3c0ca49SSander Vanheule 	EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
10749937cd1SValentin Schneider 	EXPECT_FOR_EACH_CPU_OP_EQ(test, and, &mask_empty, &mask_empty);
10849937cd1SValentin Schneider 	EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, &mask_empty);
10949937cd1SValentin Schneider 	EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, &mask_empty, &mask_empty);
110d3c0ca49SSander Vanheule 
111d3c0ca49SSander Vanheule 	EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
112d3c0ca49SSander Vanheule 	EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
11349937cd1SValentin Schneider 	EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, cpu_possible_mask);
11449937cd1SValentin Schneider 	EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, cpu_possible_mask, &mask_empty);
115d3c0ca49SSander Vanheule }
116d3c0ca49SSander Vanheule 
test_cpumask_iterators_builtin(struct kunit * test)117d3c0ca49SSander Vanheule static void test_cpumask_iterators_builtin(struct kunit *test)
118d3c0ca49SSander Vanheule {
119d3c0ca49SSander Vanheule 	EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, possible);
120d3c0ca49SSander Vanheule 
121d3c0ca49SSander Vanheule 	/* Ensure the dynamic masks are stable while running the tests */
122d3c0ca49SSander Vanheule 	cpu_hotplug_disable();
123d3c0ca49SSander Vanheule 
124d3c0ca49SSander Vanheule 	EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, online);
125d3c0ca49SSander Vanheule 	EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, present);
126d3c0ca49SSander Vanheule 
127d3c0ca49SSander Vanheule 	cpu_hotplug_enable();
128d3c0ca49SSander Vanheule }
129d3c0ca49SSander Vanheule 
test_cpumask_init(struct kunit * test)130d3c0ca49SSander Vanheule static int test_cpumask_init(struct kunit *test)
131d3c0ca49SSander Vanheule {
132d3c0ca49SSander Vanheule 	cpumask_clear(&mask_empty);
133d3c0ca49SSander Vanheule 	cpumask_setall(&mask_all);
134d3c0ca49SSander Vanheule 
135d3c0ca49SSander Vanheule 	return 0;
136d3c0ca49SSander Vanheule }
137d3c0ca49SSander Vanheule 
138d3c0ca49SSander Vanheule static struct kunit_case test_cpumask_cases[] = {
139d3c0ca49SSander Vanheule 	KUNIT_CASE(test_cpumask_weight),
140d3c0ca49SSander Vanheule 	KUNIT_CASE(test_cpumask_first),
141d3c0ca49SSander Vanheule 	KUNIT_CASE(test_cpumask_last),
142d3c0ca49SSander Vanheule 	KUNIT_CASE(test_cpumask_next),
143d3c0ca49SSander Vanheule 	KUNIT_CASE(test_cpumask_iterators),
144d3c0ca49SSander Vanheule 	KUNIT_CASE(test_cpumask_iterators_builtin),
145d3c0ca49SSander Vanheule 	{}
146d3c0ca49SSander Vanheule };
147d3c0ca49SSander Vanheule 
148d3c0ca49SSander Vanheule static struct kunit_suite test_cpumask_suite = {
149d3c0ca49SSander Vanheule 	.name = "cpumask",
150d3c0ca49SSander Vanheule 	.init = test_cpumask_init,
151d3c0ca49SSander Vanheule 	.test_cases = test_cpumask_cases,
152d3c0ca49SSander Vanheule };
153d3c0ca49SSander Vanheule kunit_test_suite(test_cpumask_suite);
154d3c0ca49SSander Vanheule 
155d3c0ca49SSander Vanheule MODULE_LICENSE("GPL");
156