1 // SPDX-License-Identifier: LGPL-2.1
12 #include "../kselftest.h"
15 #ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
16 # define RSEQ_PERCPU RSEQ_PERCPU_MM_CID
18 int get_current_cpu_id(void)
20 return rseq_current_mm_cid();
23 bool rseq_validate_cpu_id(void)
25 return rseq_mm_cid_available();
28 bool rseq_use_cpu_index(void)
30 return false; /* Use mm_cid */
33 # define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
35 int get_current_cpu_id(void)
37 return rseq_cpu_start();
40 bool rseq_validate_cpu_id(void)
42 return rseq_current_cpu_raw() >= 0;
45 bool rseq_use_cpu_index(void)
47 return true; /* Use cpu_id as index. */
51 struct percpu_lock_entry {
53 } __attribute__((aligned(128)));
56 struct percpu_lock_entry c[CPU_SETSIZE];
59 struct test_data_entry {
61 } __attribute__((aligned(128)));
63 struct spinlock_test_data {
64 struct percpu_lock lock;
65 struct test_data_entry c[CPU_SETSIZE];
69 struct percpu_list_node {
71 struct percpu_list_node *next;
74 struct percpu_list_entry {
75 struct percpu_list_node *head;
76 } __attribute__((aligned(128)));
79 struct percpu_list_entry c[CPU_SETSIZE];
82 /* A simple percpu spinlock. Returns the cpu lock was acquired on. */
83 int rseq_this_cpu_lock(struct percpu_lock *lock)
90 cpu = get_current_cpu_id();
91 ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
92 &lock->c[cpu].v, 0, 1, cpu);
93 if (rseq_likely(!ret))
95 /* Retry if comparison fails or rseq aborts. */
98 * Acquire semantic when taking lock after control dependency.
99 * Matches rseq_smp_store_release().
101 rseq_smp_acquire__after_ctrl_dep();
105 void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
107 assert(lock->c[cpu].v == 1);
109 * Release lock, with release semantic. Matches
110 * rseq_smp_acquire__after_ctrl_dep().
112 rseq_smp_store_release(&lock->c[cpu].v, 0);
115 void *test_percpu_spinlock_thread(void *arg)
117 struct spinlock_test_data *data = arg;
120 if (rseq_register_current_thread()) {
121 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
122 errno, strerror(errno));
125 for (i = 0; i < data->reps; i++) {
126 cpu = rseq_this_cpu_lock(&data->lock);
127 data->c[cpu].count++;
128 rseq_percpu_unlock(&data->lock, cpu);
130 if (rseq_unregister_current_thread()) {
131 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
132 errno, strerror(errno));
140 * A simple test which implements a sharded counter using a per-cpu
141 * lock. Obviously real applications might prefer to simply use a
142 * per-cpu increment; however, this is reasonable for a test and the
143 * lock can be extended to synchronize more complicated operations.
145 void test_percpu_spinlock(void)
147 const int num_threads = 200;
150 pthread_t test_threads[num_threads];
151 struct spinlock_test_data data;
153 memset(&data, 0, sizeof(data));
156 for (i = 0; i < num_threads; i++)
157 pthread_create(&test_threads[i], NULL,
158 test_percpu_spinlock_thread, &data);
160 for (i = 0; i < num_threads; i++)
161 pthread_join(test_threads[i], NULL);
164 for (i = 0; i < CPU_SETSIZE; i++)
165 sum += data.c[i].count;
167 assert(sum == (uint64_t)data.reps * num_threads);
170 void this_cpu_list_push(struct percpu_list *list,
171 struct percpu_list_node *node,
177 intptr_t *targetptr, newval, expect;
180 cpu = get_current_cpu_id();
181 /* Load list->c[cpu].head with single-copy atomicity. */
182 expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
183 newval = (intptr_t)node;
184 targetptr = (intptr_t *)&list->c[cpu].head;
185 node->next = (struct percpu_list_node *)expect;
186 ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
187 targetptr, expect, newval, cpu);
188 if (rseq_likely(!ret))
190 /* Retry if comparison fails or rseq aborts. */
197 * Unlike a traditional lock-less linked list; the availability of a
198 * rseq primitive allows us to implement pop without concerns over
201 struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
205 struct percpu_list_node *head;
206 intptr_t *targetptr, expectnot, *load;
210 cpu = get_current_cpu_id();
211 targetptr = (intptr_t *)&list->c[cpu].head;
212 expectnot = (intptr_t)NULL;
213 offset = offsetof(struct percpu_list_node, next);
214 load = (intptr_t *)&head;
215 ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU,
216 targetptr, expectnot,
218 if (rseq_likely(!ret)) {
225 /* Retry if rseq aborts. */
230 * __percpu_list_pop is not safe against concurrent accesses. Should
231 * only be used on lists that are not concurrently modified.
233 struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
235 struct percpu_list_node *node;
237 node = list->c[cpu].head;
240 list->c[cpu].head = node->next;
244 void *test_percpu_list_thread(void *arg)
247 struct percpu_list *list = (struct percpu_list *)arg;
249 if (rseq_register_current_thread()) {
250 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
251 errno, strerror(errno));
255 for (i = 0; i < 100000; i++) {
256 struct percpu_list_node *node;
258 node = this_cpu_list_pop(list, NULL);
259 sched_yield(); /* encourage shuffling */
261 this_cpu_list_push(list, node, NULL);
264 if (rseq_unregister_current_thread()) {
265 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
266 errno, strerror(errno));
273 /* Simultaneous modification to a per-cpu linked list from many threads. */
274 void test_percpu_list(void)
277 uint64_t sum = 0, expected_sum = 0;
278 struct percpu_list list;
279 pthread_t test_threads[200];
280 cpu_set_t allowed_cpus;
282 memset(&list, 0, sizeof(list));
284 /* Generate list entries for every usable cpu. */
285 sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
286 for (i = 0; i < CPU_SETSIZE; i++) {
287 if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
289 for (j = 1; j <= 100; j++) {
290 struct percpu_list_node *node;
294 node = malloc(sizeof(*node));
297 node->next = list.c[i].head;
298 list.c[i].head = node;
302 for (i = 0; i < 200; i++)
303 pthread_create(&test_threads[i], NULL,
304 test_percpu_list_thread, &list);
306 for (i = 0; i < 200; i++)
307 pthread_join(test_threads[i], NULL);
309 for (i = 0; i < CPU_SETSIZE; i++) {
310 struct percpu_list_node *node;
312 if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
315 while ((node = __percpu_list_pop(&list, i))) {
322 * All entries should now be accounted for (unless some external
323 * actor is interfering with our allowed affinity while this
326 assert(sum == expected_sum);
329 int main(int argc, char **argv)
331 if (rseq_register_current_thread()) {
332 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
333 errno, strerror(errno));
336 if (!rseq_validate_cpu_id()) {
337 fprintf(stderr, "Error: cpu id getter unavailable\n");
340 printf("spinlock\n");
341 test_percpu_spinlock();
342 printf("percpu_list\n");
344 if (rseq_unregister_current_thread()) {
345 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
346 errno, strerror(errno));