1 // SPDX-License-Identifier: GPL-2.0
6 * is_aligned - is this pointer & size okay for word-wide copying?
7 * @base: pointer to data
8 * @size: size of each element
9 * @align: required alignment (typically 4 or 8)
11 * Returns true if elements can be copied using word loads and stores.
12 * The size must be a multiple of the alignment, and the base address must
13 * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
15 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
16 * to "if ((a | b) & mask)", so we do that by hand.
18 __attribute_const__ __always_inline
19 static bool is_aligned(const void *base, size_t size, unsigned char align)
21 unsigned char lsbits = (unsigned char)size;
24 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
25 lsbits |= (unsigned char)(uintptr_t)base;
27 return (lsbits & (align - 1)) == 0;
31 * swap_words_32 - swap two elements in 32-bit chunks
32 * @a: pointer to the first element to swap
33 * @b: pointer to the second element to swap
34 * @n: element size (must be a multiple of 4)
36 * Exchange the two objects in memory. This exploits base+index addressing,
37 * which basically all CPUs have, to minimize loop overhead computations.
39 * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
40 * bottom of the loop, even though the zero flag is still valid from the
41 * subtract (since the intervening mov instructions don't alter the flags).
42 * Gcc 8.1.0 doesn't have that problem.
44 static void swap_words_32(void *a, void *b, size_t n)
47 u32 t = *(u32 *)(a + (n -= 4));
48 *(u32 *)(a + n) = *(u32 *)(b + n);
54 * swap_words_64 - swap two elements in 64-bit chunks
55 * @a: pointer to the first element to swap
56 * @b: pointer to the second element to swap
57 * @n: element size (must be a multiple of 8)
59 * Exchange the two objects in memory. This exploits base+index
60 * addressing, which basically all CPUs have, to minimize loop overhead
63 * We'd like to use 64-bit loads if possible. If they're not, emulating
64 * one requires base+index+4 addressing which x86 has but most other
65 * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
66 * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
67 * x32 ABI). Are there any cases the kernel needs to worry about?
69 static void swap_words_64(void *a, void *b, size_t n)
73 u64 t = *(u64 *)(a + (n -= 8));
74 *(u64 *)(a + n) = *(u64 *)(b + n);
77 /* Use two 32-bit transfers to avoid base+index+4 addressing */
78 u32 t = *(u32 *)(a + (n -= 4));
79 *(u32 *)(a + n) = *(u32 *)(b + n);
82 t = *(u32 *)(a + (n -= 4));
83 *(u32 *)(a + n) = *(u32 *)(b + n);
90 * swap_bytes - swap two elements a byte at a time
91 * @a: pointer to the first element to swap
92 * @b: pointer to the second element to swap
95 * This is the fallback if alignment doesn't allow using larger chunks.
97 static void swap_bytes(void *a, void *b, size_t n)
100 char t = ((char *)a)[--n];
101 ((char *)a)[n] = ((char *)b)[n];
107 * The values are arbitrary as long as they can't be confused with
108 * a pointer, but small integers make for the smallest compare
111 #define SWAP_WORDS_64 (swap_r_func_t)0
112 #define SWAP_WORDS_32 (swap_r_func_t)1
113 #define SWAP_BYTES (swap_r_func_t)2
114 #define SWAP_WRAPPER (swap_r_func_t)3
118 swap_func_t swap_func;
122 * The function pointer is last to make tail calls most efficient if the
123 * compiler decides not to inline this function.
125 static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
127 if (swap_func == SWAP_WRAPPER) {
128 ((const struct wrapper *)priv)->swap_func(a, b, (int)size);
132 if (swap_func == SWAP_WORDS_64)
133 swap_words_64(a, b, size);
134 else if (swap_func == SWAP_WORDS_32)
135 swap_words_32(a, b, size);
136 else if (swap_func == SWAP_BYTES)
137 swap_bytes(a, b, size);
139 swap_func(a, b, (int)size, priv);
142 #define _CMP_WRAPPER ((cmp_r_func_t)0L)
144 static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
146 if (cmp == _CMP_WRAPPER)
147 return ((const struct wrapper *)priv)->cmp(a, b);
148 return cmp(a, b, priv);
151 static inline int eytzinger0_do_cmp(void *base, size_t n, size_t size,
152 cmp_r_func_t cmp_func, const void *priv,
155 return do_cmp(base + inorder_to_eytzinger0(l, n) * size,
156 base + inorder_to_eytzinger0(r, n) * size,
160 static inline void eytzinger0_do_swap(void *base, size_t n, size_t size,
161 swap_r_func_t swap_func, const void *priv,
164 do_swap(base + inorder_to_eytzinger0(l, n) * size,
165 base + inorder_to_eytzinger0(r, n) * size,
166 size, swap_func, priv);
169 void eytzinger0_sort_r(void *base, size_t n, size_t size,
170 cmp_r_func_t cmp_func,
171 swap_r_func_t swap_func,
176 /* called from 'sort' without swap function, let's pick the default */
177 if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
181 if (is_aligned(base, size, 8))
182 swap_func = SWAP_WORDS_64;
183 else if (is_aligned(base, size, 4))
184 swap_func = SWAP_WORDS_32;
186 swap_func = SWAP_BYTES;
190 for (i = n / 2 - 1; i >= 0; --i) {
191 for (r = i; r * 2 + 1 < n; r = c) {
195 eytzinger0_do_cmp(base, n, size, cmp_func, priv, c, c + 1) < 0)
198 if (eytzinger0_do_cmp(base, n, size, cmp_func, priv, r, c) >= 0)
201 eytzinger0_do_swap(base, n, size, swap_func, priv, r, c);
206 for (i = n - 1; i > 0; --i) {
207 eytzinger0_do_swap(base, n, size, swap_func, priv, 0, i);
209 for (r = 0; r * 2 + 1 < i; r = c) {
213 eytzinger0_do_cmp(base, n, size, cmp_func, priv, c, c + 1) < 0)
216 if (eytzinger0_do_cmp(base, n, size, cmp_func, priv, r, c) >= 0)
219 eytzinger0_do_swap(base, n, size, swap_func, priv, r, c);
224 void eytzinger0_sort(void *base, size_t n, size_t size,
226 swap_func_t swap_func)
230 .swap_func = swap_func,
233 return eytzinger0_sort_r(base, n, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);