MTD: merge 3.18 development into -next development
[sfrench/cifs-2.6.git] / include / linux / percpu_counter.h
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5  *
6  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
7  */
8
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15 #include <linux/gfp.h>
16
17 #ifdef CONFIG_SMP
18
19 struct percpu_counter {
20         raw_spinlock_t lock;
21         s64 count;
22 #ifdef CONFIG_HOTPLUG_CPU
23         struct list_head list;  /* All percpu_counters are on a list */
24 #endif
25         s32 __percpu *counters;
26 };
27
28 extern int percpu_counter_batch;
29
30 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
31                           struct lock_class_key *key);
32
33 #define percpu_counter_init(fbc, value, gfp)                            \
34         ({                                                              \
35                 static struct lock_class_key __key;                     \
36                                                                         \
37                 __percpu_counter_init(fbc, value, gfp, &__key);         \
38         })
39
40 void percpu_counter_destroy(struct percpu_counter *fbc);
41 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
42 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
43 s64 __percpu_counter_sum(struct percpu_counter *fbc);
44 int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
45
46 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
47 {
48         __percpu_counter_add(fbc, amount, percpu_counter_batch);
49 }
50
51 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
52 {
53         s64 ret = __percpu_counter_sum(fbc);
54         return ret < 0 ? 0 : ret;
55 }
56
57 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
58 {
59         return __percpu_counter_sum(fbc);
60 }
61
62 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
63 {
64         return fbc->count;
65 }
66
67 /*
68  * It is possible for the percpu_counter_read() to return a small negative
69  * number for some counter which should never be negative.
70  *
71  */
72 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
73 {
74         s64 ret = fbc->count;
75
76         barrier();              /* Prevent reloads of fbc->count */
77         if (ret >= 0)
78                 return ret;
79         return 0;
80 }
81
82 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
83 {
84         return (fbc->counters != NULL);
85 }
86
87 #else /* !CONFIG_SMP */
88
89 struct percpu_counter {
90         s64 count;
91 };
92
93 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
94                                       gfp_t gfp)
95 {
96         fbc->count = amount;
97         return 0;
98 }
99
100 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
101 {
102 }
103
104 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
105 {
106         fbc->count = amount;
107 }
108
109 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
110 {
111         if (fbc->count > rhs)
112                 return 1;
113         else if (fbc->count < rhs)
114                 return -1;
115         else
116                 return 0;
117 }
118
119 static inline void
120 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
121 {
122         preempt_disable();
123         fbc->count += amount;
124         preempt_enable();
125 }
126
127 static inline void
128 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
129 {
130         percpu_counter_add(fbc, amount);
131 }
132
133 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
134 {
135         return fbc->count;
136 }
137
138 /*
139  * percpu_counter is intended to track positive numbers. In the UP case the
140  * number should never be negative.
141  */
142 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
143 {
144         return fbc->count;
145 }
146
147 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
148 {
149         return percpu_counter_read_positive(fbc);
150 }
151
152 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
153 {
154         return percpu_counter_read(fbc);
155 }
156
157 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
158 {
159         return 1;
160 }
161
162 #endif  /* CONFIG_SMP */
163
164 static inline void percpu_counter_inc(struct percpu_counter *fbc)
165 {
166         percpu_counter_add(fbc, 1);
167 }
168
169 static inline void percpu_counter_dec(struct percpu_counter *fbc)
170 {
171         percpu_counter_add(fbc, -1);
172 }
173
174 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
175 {
176         percpu_counter_add(fbc, -amount);
177 }
178
179 #endif /* _LINUX_PERCPU_COUNTER_H */