Merge tag 'vfio-v4.11-rc1' of git://github.com/awilliam/linux-vfio
[sfrench/cifs-2.6.git] / include / linux / ptr_ring.h
1 /*
2  *      Definitions for the 'struct ptr_ring' datastructure.
3  *
4  *      Author:
5  *              Michael S. Tsirkin <mst@redhat.com>
6  *
7  *      Copyright (C) 2016 Red Hat, Inc.
8  *
9  *      This program is free software; you can redistribute it and/or modify it
10  *      under the terms of the GNU General Public License as published by the
11  *      Free Software Foundation; either version 2 of the License, or (at your
12  *      option) any later version.
13  *
14  *      This is a limited-size FIFO maintaining pointers in FIFO order, with
15  *      one CPU producing entries and another consuming entries from a FIFO.
16  *
17  *      This implementation tries to minimize cache-contention when there is a
18  *      single producer and a single consumer CPU.
19  */
20
21 #ifndef _LINUX_PTR_RING_H
22 #define _LINUX_PTR_RING_H 1
23
24 #ifdef __KERNEL__
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/types.h>
28 #include <linux/compiler.h>
29 #include <linux/cache.h>
30 #include <linux/slab.h>
31 #include <asm/errno.h>
32 #endif
33
34 struct ptr_ring {
35         int producer ____cacheline_aligned_in_smp;
36         spinlock_t producer_lock;
37         int consumer ____cacheline_aligned_in_smp;
38         spinlock_t consumer_lock;
39         /* Shared consumer/producer data */
40         /* Read-only by both the producer and the consumer */
41         int size ____cacheline_aligned_in_smp; /* max entries in queue */
42         void **queue;
43 };
44
45 /* Note: callers invoking this in a loop must use a compiler barrier,
46  * for example cpu_relax().  If ring is ever resized, callers must hold
47  * producer_lock - see e.g. ptr_ring_full.  Otherwise, if callers don't hold
48  * producer_lock, the next call to __ptr_ring_produce may fail.
49  */
50 static inline bool __ptr_ring_full(struct ptr_ring *r)
51 {
52         return r->queue[r->producer];
53 }
54
55 static inline bool ptr_ring_full(struct ptr_ring *r)
56 {
57         bool ret;
58
59         spin_lock(&r->producer_lock);
60         ret = __ptr_ring_full(r);
61         spin_unlock(&r->producer_lock);
62
63         return ret;
64 }
65
66 static inline bool ptr_ring_full_irq(struct ptr_ring *r)
67 {
68         bool ret;
69
70         spin_lock_irq(&r->producer_lock);
71         ret = __ptr_ring_full(r);
72         spin_unlock_irq(&r->producer_lock);
73
74         return ret;
75 }
76
77 static inline bool ptr_ring_full_any(struct ptr_ring *r)
78 {
79         unsigned long flags;
80         bool ret;
81
82         spin_lock_irqsave(&r->producer_lock, flags);
83         ret = __ptr_ring_full(r);
84         spin_unlock_irqrestore(&r->producer_lock, flags);
85
86         return ret;
87 }
88
89 static inline bool ptr_ring_full_bh(struct ptr_ring *r)
90 {
91         bool ret;
92
93         spin_lock_bh(&r->producer_lock);
94         ret = __ptr_ring_full(r);
95         spin_unlock_bh(&r->producer_lock);
96
97         return ret;
98 }
99
100 /* Note: callers invoking this in a loop must use a compiler barrier,
101  * for example cpu_relax(). Callers must hold producer_lock.
102  */
103 static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
104 {
105         if (unlikely(!r->size) || r->queue[r->producer])
106                 return -ENOSPC;
107
108         r->queue[r->producer++] = ptr;
109         if (unlikely(r->producer >= r->size))
110                 r->producer = 0;
111         return 0;
112 }
113
114 /*
115  * Note: resize (below) nests producer lock within consumer lock, so if you
116  * consume in interrupt or BH context, you must disable interrupts/BH when
117  * calling this.
118  */
119 static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
120 {
121         int ret;
122
123         spin_lock(&r->producer_lock);
124         ret = __ptr_ring_produce(r, ptr);
125         spin_unlock(&r->producer_lock);
126
127         return ret;
128 }
129
130 static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
131 {
132         int ret;
133
134         spin_lock_irq(&r->producer_lock);
135         ret = __ptr_ring_produce(r, ptr);
136         spin_unlock_irq(&r->producer_lock);
137
138         return ret;
139 }
140
141 static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
142 {
143         unsigned long flags;
144         int ret;
145
146         spin_lock_irqsave(&r->producer_lock, flags);
147         ret = __ptr_ring_produce(r, ptr);
148         spin_unlock_irqrestore(&r->producer_lock, flags);
149
150         return ret;
151 }
152
153 static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
154 {
155         int ret;
156
157         spin_lock_bh(&r->producer_lock);
158         ret = __ptr_ring_produce(r, ptr);
159         spin_unlock_bh(&r->producer_lock);
160
161         return ret;
162 }
163
164 /* Note: callers invoking this in a loop must use a compiler barrier,
165  * for example cpu_relax(). Callers must take consumer_lock
166  * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
167  * If ring is never resized, and if the pointer is merely
168  * tested, there's no need to take the lock - see e.g.  __ptr_ring_empty.
169  */
170 static inline void *__ptr_ring_peek(struct ptr_ring *r)
171 {
172         if (likely(r->size))
173                 return r->queue[r->consumer];
174         return NULL;
175 }
176
177 /* Note: callers invoking this in a loop must use a compiler barrier,
178  * for example cpu_relax(). Callers must take consumer_lock
179  * if the ring is ever resized - see e.g. ptr_ring_empty.
180  */
181 static inline bool __ptr_ring_empty(struct ptr_ring *r)
182 {
183         return !__ptr_ring_peek(r);
184 }
185
186 static inline bool ptr_ring_empty(struct ptr_ring *r)
187 {
188         bool ret;
189
190         spin_lock(&r->consumer_lock);
191         ret = __ptr_ring_empty(r);
192         spin_unlock(&r->consumer_lock);
193
194         return ret;
195 }
196
197 static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
198 {
199         bool ret;
200
201         spin_lock_irq(&r->consumer_lock);
202         ret = __ptr_ring_empty(r);
203         spin_unlock_irq(&r->consumer_lock);
204
205         return ret;
206 }
207
208 static inline bool ptr_ring_empty_any(struct ptr_ring *r)
209 {
210         unsigned long flags;
211         bool ret;
212
213         spin_lock_irqsave(&r->consumer_lock, flags);
214         ret = __ptr_ring_empty(r);
215         spin_unlock_irqrestore(&r->consumer_lock, flags);
216
217         return ret;
218 }
219
220 static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
221 {
222         bool ret;
223
224         spin_lock_bh(&r->consumer_lock);
225         ret = __ptr_ring_empty(r);
226         spin_unlock_bh(&r->consumer_lock);
227
228         return ret;
229 }
230
231 /* Must only be called after __ptr_ring_peek returned !NULL */
232 static inline void __ptr_ring_discard_one(struct ptr_ring *r)
233 {
234         r->queue[r->consumer++] = NULL;
235         if (unlikely(r->consumer >= r->size))
236                 r->consumer = 0;
237 }
238
239 static inline void *__ptr_ring_consume(struct ptr_ring *r)
240 {
241         void *ptr;
242
243         ptr = __ptr_ring_peek(r);
244         if (ptr)
245                 __ptr_ring_discard_one(r);
246
247         return ptr;
248 }
249
250 /*
251  * Note: resize (below) nests producer lock within consumer lock, so if you
252  * call this in interrupt or BH context, you must disable interrupts/BH when
253  * producing.
254  */
255 static inline void *ptr_ring_consume(struct ptr_ring *r)
256 {
257         void *ptr;
258
259         spin_lock(&r->consumer_lock);
260         ptr = __ptr_ring_consume(r);
261         spin_unlock(&r->consumer_lock);
262
263         return ptr;
264 }
265
266 static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
267 {
268         void *ptr;
269
270         spin_lock_irq(&r->consumer_lock);
271         ptr = __ptr_ring_consume(r);
272         spin_unlock_irq(&r->consumer_lock);
273
274         return ptr;
275 }
276
277 static inline void *ptr_ring_consume_any(struct ptr_ring *r)
278 {
279         unsigned long flags;
280         void *ptr;
281
282         spin_lock_irqsave(&r->consumer_lock, flags);
283         ptr = __ptr_ring_consume(r);
284         spin_unlock_irqrestore(&r->consumer_lock, flags);
285
286         return ptr;
287 }
288
289 static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
290 {
291         void *ptr;
292
293         spin_lock_bh(&r->consumer_lock);
294         ptr = __ptr_ring_consume(r);
295         spin_unlock_bh(&r->consumer_lock);
296
297         return ptr;
298 }
299
300 /* Cast to structure type and call a function without discarding from FIFO.
301  * Function must return a value.
302  * Callers must take consumer_lock.
303  */
304 #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
305
306 #define PTR_RING_PEEK_CALL(r, f) ({ \
307         typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
308         \
309         spin_lock(&(r)->consumer_lock); \
310         __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
311         spin_unlock(&(r)->consumer_lock); \
312         __PTR_RING_PEEK_CALL_v; \
313 })
314
315 #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
316         typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
317         \
318         spin_lock_irq(&(r)->consumer_lock); \
319         __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
320         spin_unlock_irq(&(r)->consumer_lock); \
321         __PTR_RING_PEEK_CALL_v; \
322 })
323
324 #define PTR_RING_PEEK_CALL_BH(r, f) ({ \
325         typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
326         \
327         spin_lock_bh(&(r)->consumer_lock); \
328         __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
329         spin_unlock_bh(&(r)->consumer_lock); \
330         __PTR_RING_PEEK_CALL_v; \
331 })
332
333 #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
334         typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
335         unsigned long __PTR_RING_PEEK_CALL_f;\
336         \
337         spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
338         __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
339         spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
340         __PTR_RING_PEEK_CALL_v; \
341 })
342
343 static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
344 {
345         return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
346 }
347
348 static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
349 {
350         r->queue = __ptr_ring_init_queue_alloc(size, gfp);
351         if (!r->queue)
352                 return -ENOMEM;
353
354         r->size = size;
355         r->producer = r->consumer = 0;
356         spin_lock_init(&r->producer_lock);
357         spin_lock_init(&r->consumer_lock);
358
359         return 0;
360 }
361
362 static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
363                                            int size, gfp_t gfp,
364                                            void (*destroy)(void *))
365 {
366         int producer = 0;
367         void **old;
368         void *ptr;
369
370         while ((ptr = __ptr_ring_consume(r)))
371                 if (producer < size)
372                         queue[producer++] = ptr;
373                 else if (destroy)
374                         destroy(ptr);
375
376         r->size = size;
377         r->producer = producer;
378         r->consumer = 0;
379         old = r->queue;
380         r->queue = queue;
381
382         return old;
383 }
384
385 /*
386  * Note: producer lock is nested within consumer lock, so if you
387  * resize you must make sure all uses nest correctly.
388  * In particular if you consume ring in interrupt or BH context, you must
389  * disable interrupts/BH when doing so.
390  */
391 static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
392                                   void (*destroy)(void *))
393 {
394         unsigned long flags;
395         void **queue = __ptr_ring_init_queue_alloc(size, gfp);
396         void **old;
397
398         if (!queue)
399                 return -ENOMEM;
400
401         spin_lock_irqsave(&(r)->consumer_lock, flags);
402         spin_lock(&(r)->producer_lock);
403
404         old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
405
406         spin_unlock(&(r)->producer_lock);
407         spin_unlock_irqrestore(&(r)->consumer_lock, flags);
408
409         kfree(old);
410
411         return 0;
412 }
413
414 /*
415  * Note: producer lock is nested within consumer lock, so if you
416  * resize you must make sure all uses nest correctly.
417  * In particular if you consume ring in interrupt or BH context, you must
418  * disable interrupts/BH when doing so.
419  */
420 static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
421                                            int size,
422                                            gfp_t gfp, void (*destroy)(void *))
423 {
424         unsigned long flags;
425         void ***queues;
426         int i;
427
428         queues = kmalloc(nrings * sizeof *queues, gfp);
429         if (!queues)
430                 goto noqueues;
431
432         for (i = 0; i < nrings; ++i) {
433                 queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
434                 if (!queues[i])
435                         goto nomem;
436         }
437
438         for (i = 0; i < nrings; ++i) {
439                 spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
440                 spin_lock(&(rings[i])->producer_lock);
441                 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
442                                                   size, gfp, destroy);
443                 spin_unlock(&(rings[i])->producer_lock);
444                 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
445         }
446
447         for (i = 0; i < nrings; ++i)
448                 kfree(queues[i]);
449
450         kfree(queues);
451
452         return 0;
453
454 nomem:
455         while (--i >= 0)
456                 kfree(queues[i]);
457
458         kfree(queues);
459
460 noqueues:
461         return -ENOMEM;
462 }
463
464 static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
465 {
466         void *ptr;
467
468         if (destroy)
469                 while ((ptr = ptr_ring_consume(r)))
470                         destroy(ptr);
471         kfree(r->queue);
472 }
473
474 #endif /* _LINUX_PTR_RING_H  */