xref: /linux/fs/bcachefs/eytzinger.c (revision 0ddb5f08)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "eytzinger.h"
4 
5 /**
6  * is_aligned - is this pointer & size okay for word-wide copying?
7  * @base: pointer to data
8  * @size: size of each element
9  * @align: required alignment (typically 4 or 8)
10  *
11  * Returns true if elements can be copied using word loads and stores.
12  * The size must be a multiple of the alignment, and the base address must
13  * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
14  *
15  * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
16  * to "if ((a | b) & mask)", so we do that by hand.
17  */
18 __attribute_const__ __always_inline
is_aligned(const void * base,size_t size,unsigned char align)19 static bool is_aligned(const void *base, size_t size, unsigned char align)
20 {
21 	unsigned char lsbits = (unsigned char)size;
22 
23 	(void)base;
24 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
25 	lsbits |= (unsigned char)(uintptr_t)base;
26 #endif
27 	return (lsbits & (align - 1)) == 0;
28 }
29 
30 /**
31  * swap_words_32 - swap two elements in 32-bit chunks
32  * @a: pointer to the first element to swap
33  * @b: pointer to the second element to swap
34  * @n: element size (must be a multiple of 4)
35  *
36  * Exchange the two objects in memory.  This exploits base+index addressing,
37  * which basically all CPUs have, to minimize loop overhead computations.
38  *
39  * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
40  * bottom of the loop, even though the zero flag is still valid from the
41  * subtract (since the intervening mov instructions don't alter the flags).
42  * Gcc 8.1.0 doesn't have that problem.
43  */
swap_words_32(void * a,void * b,size_t n)44 static void swap_words_32(void *a, void *b, size_t n)
45 {
46 	do {
47 		u32 t = *(u32 *)(a + (n -= 4));
48 		*(u32 *)(a + n) = *(u32 *)(b + n);
49 		*(u32 *)(b + n) = t;
50 	} while (n);
51 }
52 
53 /**
54  * swap_words_64 - swap two elements in 64-bit chunks
55  * @a: pointer to the first element to swap
56  * @b: pointer to the second element to swap
57  * @n: element size (must be a multiple of 8)
58  *
59  * Exchange the two objects in memory.  This exploits base+index
60  * addressing, which basically all CPUs have, to minimize loop overhead
61  * computations.
62  *
63  * We'd like to use 64-bit loads if possible.  If they're not, emulating
64  * one requires base+index+4 addressing which x86 has but most other
65  * processors do not.  If CONFIG_64BIT, we definitely have 64-bit loads,
66  * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
67  * x32 ABI).  Are there any cases the kernel needs to worry about?
68  */
swap_words_64(void * a,void * b,size_t n)69 static void swap_words_64(void *a, void *b, size_t n)
70 {
71 	do {
72 #ifdef CONFIG_64BIT
73 		u64 t = *(u64 *)(a + (n -= 8));
74 		*(u64 *)(a + n) = *(u64 *)(b + n);
75 		*(u64 *)(b + n) = t;
76 #else
77 		/* Use two 32-bit transfers to avoid base+index+4 addressing */
78 		u32 t = *(u32 *)(a + (n -= 4));
79 		*(u32 *)(a + n) = *(u32 *)(b + n);
80 		*(u32 *)(b + n) = t;
81 
82 		t = *(u32 *)(a + (n -= 4));
83 		*(u32 *)(a + n) = *(u32 *)(b + n);
84 		*(u32 *)(b + n) = t;
85 #endif
86 	} while (n);
87 }
88 
89 /**
90  * swap_bytes - swap two elements a byte at a time
91  * @a: pointer to the first element to swap
92  * @b: pointer to the second element to swap
93  * @n: element size
94  *
95  * This is the fallback if alignment doesn't allow using larger chunks.
96  */
swap_bytes(void * a,void * b,size_t n)97 static void swap_bytes(void *a, void *b, size_t n)
98 {
99 	do {
100 		char t = ((char *)a)[--n];
101 		((char *)a)[n] = ((char *)b)[n];
102 		((char *)b)[n] = t;
103 	} while (n);
104 }
105 
106 /*
107  * The values are arbitrary as long as they can't be confused with
108  * a pointer, but small integers make for the smallest compare
109  * instructions.
110  */
111 #define SWAP_WORDS_64 (swap_r_func_t)0
112 #define SWAP_WORDS_32 (swap_r_func_t)1
113 #define SWAP_BYTES    (swap_r_func_t)2
114 #define SWAP_WRAPPER  (swap_r_func_t)3
115 
116 struct wrapper {
117 	cmp_func_t cmp;
118 	swap_func_t swap_func;
119 };
120 
121 /*
122  * The function pointer is last to make tail calls most efficient if the
123  * compiler decides not to inline this function.
124  */
do_swap(void * a,void * b,size_t size,swap_r_func_t swap_func,const void * priv)125 static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
126 {
127 	if (swap_func == SWAP_WRAPPER) {
128 		((const struct wrapper *)priv)->swap_func(a, b, (int)size);
129 		return;
130 	}
131 
132 	if (swap_func == SWAP_WORDS_64)
133 		swap_words_64(a, b, size);
134 	else if (swap_func == SWAP_WORDS_32)
135 		swap_words_32(a, b, size);
136 	else if (swap_func == SWAP_BYTES)
137 		swap_bytes(a, b, size);
138 	else
139 		swap_func(a, b, (int)size, priv);
140 }
141 
142 #define _CMP_WRAPPER ((cmp_r_func_t)0L)
143 
do_cmp(const void * a,const void * b,cmp_r_func_t cmp,const void * priv)144 static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
145 {
146 	if (cmp == _CMP_WRAPPER)
147 		return ((const struct wrapper *)priv)->cmp(a, b);
148 	return cmp(a, b, priv);
149 }
150 
eytzinger0_do_cmp(void * base,size_t n,size_t size,cmp_r_func_t cmp_func,const void * priv,size_t l,size_t r)151 static inline int eytzinger0_do_cmp(void *base, size_t n, size_t size,
152 			 cmp_r_func_t cmp_func, const void *priv,
153 			 size_t l, size_t r)
154 {
155 	return do_cmp(base + inorder_to_eytzinger0(l, n) * size,
156 		      base + inorder_to_eytzinger0(r, n) * size,
157 		      cmp_func, priv);
158 }
159 
eytzinger0_do_swap(void * base,size_t n,size_t size,swap_r_func_t swap_func,const void * priv,size_t l,size_t r)160 static inline void eytzinger0_do_swap(void *base, size_t n, size_t size,
161 			   swap_r_func_t swap_func, const void *priv,
162 			   size_t l, size_t r)
163 {
164 	do_swap(base + inorder_to_eytzinger0(l, n) * size,
165 		base + inorder_to_eytzinger0(r, n) * size,
166 		size, swap_func, priv);
167 }
168 
eytzinger0_sort_r(void * base,size_t n,size_t size,cmp_r_func_t cmp_func,swap_r_func_t swap_func,const void * priv)169 void eytzinger0_sort_r(void *base, size_t n, size_t size,
170 		       cmp_r_func_t cmp_func,
171 		       swap_r_func_t swap_func,
172 		       const void *priv)
173 {
174 	int i, j, k;
175 
176 	/* called from 'sort' without swap function, let's pick the default */
177 	if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
178 		swap_func = NULL;
179 
180 	if (!swap_func) {
181 		if (is_aligned(base, size, 8))
182 			swap_func = SWAP_WORDS_64;
183 		else if (is_aligned(base, size, 4))
184 			swap_func = SWAP_WORDS_32;
185 		else
186 			swap_func = SWAP_BYTES;
187 	}
188 
189 	/* heapify */
190 	for (i = n / 2 - 1; i >= 0; --i) {
191 		/* Find the sift-down path all the way to the leaves. */
192 		for (j = i; k = j * 2 + 1, k + 1 < n;)
193 			j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
194 
195 		/* Special case for the last leaf with no sibling. */
196 		if (j * 2 + 2 == n)
197 			j = j * 2 + 1;
198 
199 		/* Backtrack to the correct location. */
200 		while (j != i && eytzinger0_do_cmp(base, n, size, cmp_func, priv, i, j) >= 0)
201 			j = (j - 1) / 2;
202 
203 		/* Shift the element into its correct place. */
204 		for (k = j; j != i;) {
205 			j = (j - 1) / 2;
206 			eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
207 		}
208 	}
209 
210 	/* sort */
211 	for (i = n - 1; i > 0; --i) {
212 		eytzinger0_do_swap(base, n, size, swap_func, priv, 0, i);
213 
214 		/* Find the sift-down path all the way to the leaves. */
215 		for (j = 0; k = j * 2 + 1, k + 1 < i;)
216 			j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
217 
218 		/* Special case for the last leaf with no sibling. */
219 		if (j * 2 + 2 == i)
220 			j = j * 2 + 1;
221 
222 		/* Backtrack to the correct location. */
223 		while (j && eytzinger0_do_cmp(base, n, size, cmp_func, priv, 0, j) >= 0)
224 			j = (j - 1) / 2;
225 
226 		/* Shift the element into its correct place. */
227 		for (k = j; j;) {
228 			j = (j - 1) / 2;
229 			eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
230 		}
231 	}
232 }
233 
eytzinger0_sort(void * base,size_t n,size_t size,cmp_func_t cmp_func,swap_func_t swap_func)234 void eytzinger0_sort(void *base, size_t n, size_t size,
235 		     cmp_func_t cmp_func,
236 		     swap_func_t swap_func)
237 {
238 	struct wrapper w = {
239 		.cmp  = cmp_func,
240 		.swap_func = swap_func,
241 	};
242 
243 	return eytzinger0_sort_r(base, n, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
244 }
245 
246 #if 0
247 #include <linux/slab.h>
248 #include <linux/random.h>
249 #include <linux/ktime.h>
250 
251 static u64 cmp_count;
252 
253 static int mycmp(const void *a, const void *b)
254 {
255 	u32 _a = *(u32 *)a;
256 	u32 _b = *(u32 *)b;
257 
258 	cmp_count++;
259 	if (_a < _b)
260 		return -1;
261 	else if (_a > _b)
262 		return 1;
263 	else
264 		return 0;
265 }
266 
267 static int test(void)
268 {
269 	size_t N, i;
270 	ktime_t start, end;
271 	s64 delta;
272 	u32 *arr;
273 
274 	for (N = 10000; N <= 100000; N += 10000) {
275 		arr = kmalloc_array(N, sizeof(u32), GFP_KERNEL);
276 		cmp_count = 0;
277 
278 		for (i = 0; i < N; i++)
279 			arr[i] = get_random_u32();
280 
281 		start = ktime_get();
282 		eytzinger0_sort(arr, N, sizeof(u32), mycmp, NULL);
283 		end = ktime_get();
284 
285 		delta = ktime_us_delta(end, start);
286 		printk(KERN_INFO "time: %lld\n", delta);
287 		printk(KERN_INFO "comparisons: %lld\n", cmp_count);
288 
289 		u32 prev = 0;
290 
291 		eytzinger0_for_each(i, N) {
292 			if (prev > arr[i])
293 				goto err;
294 			prev = arr[i];
295 		}
296 
297 		kfree(arr);
298 	}
299 	return 0;
300 
301 err:
302 	kfree(arr);
303 	return -1;
304 }
305 #endif
306