drm/edid: new drm_edid_block_checksum helper function V3
[sfrench/cifs-2.6.git] / lib / lockref.c
1 #include <linux/export.h>
2 #include <linux/lockref.h>
3
4 #if USE_CMPXCHG_LOCKREF
5
6 /*
7  * Allow weakly-ordered memory architectures to provide barrier-less
8  * cmpxchg semantics for lockref updates.
9  */
10 #ifndef cmpxchg64_relaxed
11 # define cmpxchg64_relaxed cmpxchg64
12 #endif
13
14 /*
15  * Note that the "cmpxchg()" reloads the "old" value for the
16  * failure case.
17  */
18 #define CMPXCHG_LOOP(CODE, SUCCESS) do {                                        \
19         struct lockref old;                                                     \
20         BUILD_BUG_ON(sizeof(old) != 8);                                         \
21         old.lock_count = ACCESS_ONCE(lockref->lock_count);                      \
22         while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
23                 struct lockref new = old, prev = old;                           \
24                 CODE                                                            \
25                 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,        \
26                                                    old.lock_count,              \
27                                                    new.lock_count);             \
28                 if (likely(old.lock_count == prev.lock_count)) {                \
29                         SUCCESS;                                                \
30                 }                                                               \
31                 cpu_relax_lowlatency();                                         \
32         }                                                                       \
33 } while (0)
34
35 #else
36
37 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
38
39 #endif
40
41 /**
42  * lockref_get - Increments reference count unconditionally
43  * @lockref: pointer to lockref structure
44  *
45  * This operation is only valid if you already hold a reference
46  * to the object, so you know the count cannot be zero.
47  */
48 void lockref_get(struct lockref *lockref)
49 {
50         CMPXCHG_LOOP(
51                 new.count++;
52         ,
53                 return;
54         );
55
56         spin_lock(&lockref->lock);
57         lockref->count++;
58         spin_unlock(&lockref->lock);
59 }
60 EXPORT_SYMBOL(lockref_get);
61
62 /**
63  * lockref_get_not_zero - Increments count unless the count is 0
64  * @lockref: pointer to lockref structure
65  * Return: 1 if count updated successfully or 0 if count was zero
66  */
67 int lockref_get_not_zero(struct lockref *lockref)
68 {
69         int retval;
70
71         CMPXCHG_LOOP(
72                 new.count++;
73                 if (!old.count)
74                         return 0;
75         ,
76                 return 1;
77         );
78
79         spin_lock(&lockref->lock);
80         retval = 0;
81         if (lockref->count) {
82                 lockref->count++;
83                 retval = 1;
84         }
85         spin_unlock(&lockref->lock);
86         return retval;
87 }
88 EXPORT_SYMBOL(lockref_get_not_zero);
89
90 /**
91  * lockref_get_or_lock - Increments count unless the count is 0
92  * @lockref: pointer to lockref structure
93  * Return: 1 if count updated successfully or 0 if count was zero
94  * and we got the lock instead.
95  */
96 int lockref_get_or_lock(struct lockref *lockref)
97 {
98         CMPXCHG_LOOP(
99                 new.count++;
100                 if (!old.count)
101                         break;
102         ,
103                 return 1;
104         );
105
106         spin_lock(&lockref->lock);
107         if (!lockref->count)
108                 return 0;
109         lockref->count++;
110         spin_unlock(&lockref->lock);
111         return 1;
112 }
113 EXPORT_SYMBOL(lockref_get_or_lock);
114
115 /**
116  * lockref_put_or_lock - decrements count unless count <= 1 before decrement
117  * @lockref: pointer to lockref structure
118  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
119  */
120 int lockref_put_or_lock(struct lockref *lockref)
121 {
122         CMPXCHG_LOOP(
123                 new.count--;
124                 if (old.count <= 1)
125                         break;
126         ,
127                 return 1;
128         );
129
130         spin_lock(&lockref->lock);
131         if (lockref->count <= 1)
132                 return 0;
133         lockref->count--;
134         spin_unlock(&lockref->lock);
135         return 1;
136 }
137 EXPORT_SYMBOL(lockref_put_or_lock);
138
139 /**
140  * lockref_mark_dead - mark lockref dead
141  * @lockref: pointer to lockref structure
142  */
143 void lockref_mark_dead(struct lockref *lockref)
144 {
145         assert_spin_locked(&lockref->lock);
146         lockref->count = -128;
147 }
148 EXPORT_SYMBOL(lockref_mark_dead);
149
150 /**
151  * lockref_get_not_dead - Increments count unless the ref is dead
152  * @lockref: pointer to lockref structure
153  * Return: 1 if count updated successfully or 0 if lockref was dead
154  */
155 int lockref_get_not_dead(struct lockref *lockref)
156 {
157         int retval;
158
159         CMPXCHG_LOOP(
160                 new.count++;
161                 if ((int)old.count < 0)
162                         return 0;
163         ,
164                 return 1;
165         );
166
167         spin_lock(&lockref->lock);
168         retval = 0;
169         if ((int) lockref->count >= 0) {
170                 lockref->count++;
171                 retval = 1;
172         }
173         spin_unlock(&lockref->lock);
174         return retval;
175 }
176 EXPORT_SYMBOL(lockref_get_not_dead);