import HEAD into svn+ssh://svn.samba.org/home/svn/samba/trunk
[metze/old/v3-2-winbind-ndr.git] / source / tdb / spinlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    trivial database library 
5
6    Copyright (C) Anton Blanchard                   2001
7    
8      ** NOTE! The following LGPL license applies to the tdb
9      ** library. This does NOT imply that all of Samba is released
10      ** under the LGPL
11    
12    This library is free software; you can redistribute it and/or
13    modify it under the terms of the GNU Lesser General Public
14    License as published by the Free Software Foundation; either
15    version 2 of the License, or (at your option) any later version.
16
17    This library is distributed in the hope that it will be useful,
18    but WITHOUT ANY WARRANTY; without even the implied warranty of
19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20    Lesser General Public License for more details.
21    
22    You should have received a copy of the GNU Lesser General Public
23    License along with this library; if not, write to the Free Software
24    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25 */
26 #if HAVE_CONFIG_H
27 #include <config.h>
28 #endif
29
30 #if STANDALONE
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <sys/stat.h>
38 #include <time.h>
39 #include <signal.h>
40 #include "tdb.h"
41 #include "spinlock.h"
42
43 #define DEBUG
44 #else
45 #include "includes.h"
46 #endif
47
48 #ifdef USE_SPINLOCKS
49
50 /*
51  * ARCH SPECIFIC
52  */
53
54 #if defined(SPARC_SPINLOCKS)
55
56 static inline int __spin_trylock(spinlock_t *lock)
57 {
58         unsigned int result;
59
60         asm volatile("ldstub    [%1], %0"
61                 : "=r" (result)
62                 : "r" (lock)
63                 : "memory");
64
65         return (result == 0) ? 0 : EBUSY;
66 }
67
68 static inline void __spin_unlock(spinlock_t *lock)
69 {
70         asm volatile("":::"memory");
71         *lock = 0;
72 }
73
74 static inline void __spin_lock_init(spinlock_t *lock)
75 {
76         *lock = 0;
77 }
78
79 static inline int __spin_is_locked(spinlock_t *lock)
80 {
81         return (*lock != 0);
82 }
83
84 #elif defined(POWERPC_SPINLOCKS) 
85
86 static inline int __spin_trylock(spinlock_t *lock)
87 {
88         unsigned int result;
89
90         __asm__ __volatile__(
91 "1:     lwarx           %0,0,%1\n\
92         cmpwi           0,%0,0\n\
93         li              %0,0\n\
94         bne-            2f\n\
95         li              %0,1\n\
96         stwcx.          %0,0,%1\n\
97         bne-            1b\n\
98         isync\n\
99 2:"     : "=&r"(result)
100         : "r"(lock)
101         : "cr0", "memory");
102
103         return (result == 1) ? 0 : EBUSY;
104 }
105
106 static inline void __spin_unlock(spinlock_t *lock)
107 {
108         asm volatile("eieio":::"memory");
109         *lock = 0;
110 }
111
112 static inline void __spin_lock_init(spinlock_t *lock)
113 {
114         *lock = 0;
115 }
116
117 static inline int __spin_is_locked(spinlock_t *lock)
118 {
119         return (*lock != 0);
120 }
121
122 #elif defined(INTEL_SPINLOCKS) 
123
124 static inline int __spin_trylock(spinlock_t *lock)
125 {
126         int oldval;
127
128         asm volatile("xchgl %0,%1"
129                 : "=r" (oldval), "=m" (*lock)
130                 : "0" (0)
131                 : "memory");
132
133         return oldval > 0 ? 0 : EBUSY;
134 }
135
136 static inline void __spin_unlock(spinlock_t *lock)
137 {
138         asm volatile("":::"memory");
139         *lock = 1;
140 }
141
142 static inline void __spin_lock_init(spinlock_t *lock)
143 {
144         *lock = 1;
145 }
146
147 static inline int __spin_is_locked(spinlock_t *lock)
148 {
149         return (*lock != 1);
150 }
151
152 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
153
154 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
155  * sync(3) for the details of the intrinsic operations.
156  *
157  * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
158  */
159
160 #if defined(STANDALONE)
161
162 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
163 #define inline __inline
164
165 #endif /* STANDALONE */
166
167 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
168 static inline int __spin_trylock(spinlock_t *lock)
169 {
170         unsigned int val;
171         val = __lock_test_and_set(lock, 1);
172         return val == 0 ? 0 : EBUSY;
173 }
174
175 static inline void __spin_unlock(spinlock_t *lock)
176 {
177         __lock_release(lock);
178 }
179
180 static inline void __spin_lock_init(spinlock_t *lock)
181 {
182         __lock_release(lock);
183 }
184
185 /* Returns 1 if the lock is held, 0 otherwise. */
186 static inline int __spin_is_locked(spinlock_t *lock)
187 {
188         unsigned int val;
189         val = __add_and_fetch(lock, 0);
190         return val;
191 }
192
193 #elif defined(MIPS_SPINLOCKS) 
194
195 static inline unsigned int load_linked(unsigned long addr)
196 {
197         unsigned int res;
198
199         __asm__ __volatile__("ll\t%0,(%1)"
200                 : "=r" (res)
201                 : "r" (addr));
202
203         return res;
204 }
205
206 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
207 {
208         unsigned int res;
209
210         __asm__ __volatile__("sc\t%0,(%2)"
211                 : "=r" (res)
212                 : "0" (value), "r" (addr));
213         return res;
214 }
215
216 static inline int __spin_trylock(spinlock_t *lock)
217 {
218         unsigned int mw;
219
220         do {
221                 mw = load_linked(lock);
222                 if (mw) 
223                         return EBUSY;
224         } while (!store_conditional(lock, 1));
225
226         asm volatile("":::"memory");
227
228         return 0;
229 }
230
231 static inline void __spin_unlock(spinlock_t *lock)
232 {
233         asm volatile("":::"memory");
234         *lock = 0;
235 }
236
237 static inline void __spin_lock_init(spinlock_t *lock)
238 {
239         *lock = 0;
240 }
241
242 static inline int __spin_is_locked(spinlock_t *lock)
243 {
244         return (*lock != 0);
245 }
246
247 #else
248 #error Need to implement spinlock code in spinlock.c
249 #endif
250
251 /*
252  * OS SPECIFIC
253  */
254
255 static void yield_cpu(void)
256 {
257         struct timespec tm;
258
259 #ifdef USE_SCHED_YIELD
260         sched_yield();
261 #else
262         /* Linux will busy loop for delays < 2ms on real time tasks */
263         tm.tv_sec = 0;
264         tm.tv_nsec = 2000000L + 1;
265         nanosleep(&tm, NULL);
266 #endif
267 }
268
269 static int this_is_smp(void)
270 {
271 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
272         return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
273 #else
274         return 0;
275 #endif
276 }
277
278 /*
279  * GENERIC
280  */
281
282 static int smp_machine = 0;
283
284 static inline void __spin_lock(spinlock_t *lock)
285 {
286         int ntries = 0;
287
288         while(__spin_trylock(lock)) {
289                 while(__spin_is_locked(lock)) {
290                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
291                                 continue;
292                         yield_cpu();
293                 }
294         }
295 }
296
297 static void __read_lock(tdb_rwlock_t *rwlock)
298 {
299         int ntries = 0;
300
301         while(1) {
302                 __spin_lock(&rwlock->lock);
303
304                 if (!(rwlock->count & RWLOCK_BIAS)) {
305                         rwlock->count++;
306                         __spin_unlock(&rwlock->lock);
307                         return;
308                 }
309         
310                 __spin_unlock(&rwlock->lock);
311
312                 while(rwlock->count & RWLOCK_BIAS) {
313                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
314                                 continue;
315                         yield_cpu();
316                 }
317         }
318 }
319
320 static void __write_lock(tdb_rwlock_t *rwlock)
321 {
322         int ntries = 0;
323
324         while(1) {
325                 __spin_lock(&rwlock->lock);
326
327                 if (rwlock->count == 0) {
328                         rwlock->count |= RWLOCK_BIAS;
329                         __spin_unlock(&rwlock->lock);
330                         return;
331                 }
332
333                 __spin_unlock(&rwlock->lock);
334
335                 while(rwlock->count != 0) {
336                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
337                                 continue;
338                         yield_cpu();
339                 }
340         }
341 }
342
343 static void __write_unlock(tdb_rwlock_t *rwlock)
344 {
345         __spin_lock(&rwlock->lock);
346
347 #ifdef DEBUG
348         if (!(rwlock->count & RWLOCK_BIAS))
349                 fprintf(stderr, "bug: write_unlock\n");
350 #endif
351
352         rwlock->count &= ~RWLOCK_BIAS;
353         __spin_unlock(&rwlock->lock);
354 }
355
356 static void __read_unlock(tdb_rwlock_t *rwlock)
357 {
358         __spin_lock(&rwlock->lock);
359
360 #ifdef DEBUG
361         if (!rwlock->count)
362                 fprintf(stderr, "bug: read_unlock\n");
363
364         if (rwlock->count & RWLOCK_BIAS)
365                 fprintf(stderr, "bug: read_unlock\n");
366 #endif
367
368         rwlock->count--;
369         __spin_unlock(&rwlock->lock);
370 }
371
372 /* TDB SPECIFIC */
373
374 /* lock a list in the database. list -1 is the alloc list */
375 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
376 {
377         tdb_rwlock_t *rwlocks;
378
379         if (!tdb->map_ptr) return -1;
380         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
381
382         switch(rw_type) {
383         case F_RDLCK:
384                 __read_lock(&rwlocks[list+1]);
385                 break;
386
387         case F_WRLCK:
388                 __write_lock(&rwlocks[list+1]);
389                 break;
390
391         default:
392                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
393         }
394         return 0;
395 }
396
397 /* unlock the database. */
398 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
399 {
400         tdb_rwlock_t *rwlocks;
401
402         if (!tdb->map_ptr) return -1;
403         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
404
405         switch(rw_type) {
406         case F_RDLCK:
407                 __read_unlock(&rwlocks[list+1]);
408                 break;
409
410         case F_WRLCK:
411                 __write_unlock(&rwlocks[list+1]);
412                 break;
413
414         default:
415                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
416         }
417
418         return 0;
419 }
420
421 int tdb_create_rwlocks(int fd, unsigned int hash_size)
422 {
423         unsigned size, i;
424         tdb_rwlock_t *rwlocks;
425
426         size = TDB_SPINLOCK_SIZE(hash_size);
427         rwlocks = malloc(size);
428         if (!rwlocks)
429                 return -1;
430
431         for(i = 0; i < hash_size+1; i++) {
432                 __spin_lock_init(&rwlocks[i].lock);
433                 rwlocks[i].count = 0;
434         }
435
436         /* Write it out (appending to end) */
437         if (write(fd, rwlocks, size) != size) {
438                 free(rwlocks);
439                 return -1;
440         }
441         smp_machine = this_is_smp();
442         free(rwlocks);
443         return 0;
444 }
445
446 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
447 {
448         tdb_rwlock_t *rwlocks;
449         unsigned i;
450
451         if (tdb->header.rwlocks == 0) return 0;
452         if (!tdb->map_ptr) return -1;
453
454         /* We're mmapped here */
455         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
456         for(i = 0; i < tdb->header.hash_size+1; i++) {
457                 __spin_lock_init(&rwlocks[i].lock);
458                 rwlocks[i].count = 0;
459         }
460         return 0;
461 }
462 #else
463 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
464 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
465 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
466
467 /* Non-spinlock version: remove spinlock pointer */
468 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
469 {
470         tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
471                                 - (char *)&tdb->header);
472
473         tdb->header.rwlocks = 0;
474         if (lseek(tdb->fd, off, SEEK_SET) != off
475             || write(tdb->fd, (void *)&tdb->header.rwlocks,
476                      sizeof(tdb->header.rwlocks)) 
477             != sizeof(tdb->header.rwlocks))
478                 return -1;
479         return 0;
480 }
481 #endif