r5298: - got rid of pstring.h from includes.h. This at least makes it a bit
[kai/samba.git] / source4 / lib / tdb / common / spinlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    trivial database library 
5
6    Copyright (C) Anton Blanchard                   2001
7    
8      ** NOTE! The following LGPL license applies to the tdb
9      ** library. This does NOT imply that all of Samba is released
10      ** under the LGPL
11    
12    This library is free software; you can redistribute it and/or
13    modify it under the terms of the GNU Lesser General Public
14    License as published by the Free Software Foundation; either
15    version 2 of the License, or (at your option) any later version.
16
17    This library is distributed in the hope that it will be useful,
18    but WITHOUT ANY WARRANTY; without even the implied warranty of
19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20    Lesser General Public License for more details.
21
22    You should have received a copy of the GNU Lesser General Public
23    License along with this library; if not, write to the Free Software
24    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25 */
26 #if HAVE_CONFIG_H
27 #include <config.h>
28 #endif
29
30 #ifndef _SAMBA_BUILD_
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <sys/stat.h>
38 #include <time.h>
39 #include <signal.h>
40 #include "tdb.h"
41
42 #define DEBUG
43 #else
44 #include "includes.h"
45 #include "lib/tdb/include/tdb.h"
46 #include "system/filesys.h"
47 #endif
48
49 #ifdef USE_SPINLOCKS
50
51 /*
52  * ARCH SPECIFIC
53  */
54
55 #if defined(SPARC_SPINLOCKS)
56
57 static inline int __spin_trylock(spinlock_t *lock)
58 {
59         unsigned int result;
60
61         asm volatile("ldstub    [%1], %0"
62                 : "=r" (result)
63                 : "r" (lock)
64                 : "memory");
65
66         return (result == 0) ? 0 : EBUSY;
67 }
68
69 static inline void __spin_unlock(spinlock_t *lock)
70 {
71         asm volatile("":::"memory");
72         *lock = 0;
73 }
74
75 static inline void __spin_lock_init(spinlock_t *lock)
76 {
77         *lock = 0;
78 }
79
80 static inline int __spin_is_locked(spinlock_t *lock)
81 {
82         return (*lock != 0);
83 }
84
85 #elif defined(POWERPC_SPINLOCKS) 
86
87 static inline int __spin_trylock(spinlock_t *lock)
88 {
89         unsigned int result;
90
91         __asm__ __volatile__(
92 "1:     lwarx           %0,0,%1\n\
93         cmpwi           0,%0,0\n\
94         li              %0,0\n\
95         bne-            2f\n\
96         li              %0,1\n\
97         stwcx.          %0,0,%1\n\
98         bne-            1b\n\
99         isync\n\
100 2:"     : "=&r"(result)
101         : "r"(lock)
102         : "cr0", "memory");
103
104         return (result == 1) ? 0 : EBUSY;
105 }
106
107 static inline void __spin_unlock(spinlock_t *lock)
108 {
109         asm volatile("eieio":::"memory");
110         *lock = 0;
111 }
112
113 static inline void __spin_lock_init(spinlock_t *lock)
114 {
115         *lock = 0;
116 }
117
118 static inline int __spin_is_locked(spinlock_t *lock)
119 {
120         return (*lock != 0);
121 }
122
123 #elif defined(INTEL_SPINLOCKS) 
124
125 static inline int __spin_trylock(spinlock_t *lock)
126 {
127         int oldval;
128
129         asm volatile("xchgl %0,%1"
130                 : "=r" (oldval), "=m" (*lock)
131                 : "0" (0)
132                 : "memory");
133
134         return oldval > 0 ? 0 : EBUSY;
135 }
136
137 static inline void __spin_unlock(spinlock_t *lock)
138 {
139         asm volatile("":::"memory");
140         *lock = 1;
141 }
142
143 static inline void __spin_lock_init(spinlock_t *lock)
144 {
145         *lock = 1;
146 }
147
148 static inline int __spin_is_locked(spinlock_t *lock)
149 {
150         return (*lock != 1);
151 }
152
153 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
154
155 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
156  * sync(3) for the details of the intrinsic operations.
157  *
158  * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
159  */
160
161 #ifndef _SAMBA_BUILD_
162
163 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
164 #define inline __inline
165
166 #endif /* _SAMBA_BUILD_ */
167
168 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
169 static inline int __spin_trylock(spinlock_t *lock)
170 {
171         unsigned int val;
172         val = __lock_test_and_set(lock, 1);
173         return val == 0 ? 0 : EBUSY;
174 }
175
176 static inline void __spin_unlock(spinlock_t *lock)
177 {
178         __lock_release(lock);
179 }
180
181 static inline void __spin_lock_init(spinlock_t *lock)
182 {
183         __lock_release(lock);
184 }
185
186 /* Returns 1 if the lock is held, 0 otherwise. */
187 static inline int __spin_is_locked(spinlock_t *lock)
188 {
189         unsigned int val;
190         val = __add_and_fetch(lock, 0);
191         return val;
192 }
193
194 #elif defined(MIPS_SPINLOCKS) 
195
196 static inline unsigned int load_linked(unsigned long addr)
197 {
198         unsigned int res;
199
200         __asm__ __volatile__("ll\t%0,(%1)"
201                 : "=r" (res)
202                 : "r" (addr));
203
204         return res;
205 }
206
207 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
208 {
209         unsigned int res;
210
211         __asm__ __volatile__("sc\t%0,(%2)"
212                 : "=r" (res)
213                 : "0" (value), "r" (addr));
214         return res;
215 }
216
217 static inline int __spin_trylock(spinlock_t *lock)
218 {
219         unsigned int mw;
220
221         do {
222                 mw = load_linked(lock);
223                 if (mw) 
224                         return EBUSY;
225         } while (!store_conditional(lock, 1));
226
227         asm volatile("":::"memory");
228
229         return 0;
230 }
231
232 static inline void __spin_unlock(spinlock_t *lock)
233 {
234         asm volatile("":::"memory");
235         *lock = 0;
236 }
237
238 static inline void __spin_lock_init(spinlock_t *lock)
239 {
240         *lock = 0;
241 }
242
243 static inline int __spin_is_locked(spinlock_t *lock)
244 {
245         return (*lock != 0);
246 }
247
248 #else
249 #error Need to implement spinlock code in spinlock.c
250 #endif
251
252 /*
253  * OS SPECIFIC
254  */
255
256 static void yield_cpu(void)
257 {
258         struct timespec tm;
259
260 #ifdef USE_SCHED_YIELD
261         sched_yield();
262 #else
263         /* Linux will busy loop for delays < 2ms on real time tasks */
264         tm.tv_sec = 0;
265         tm.tv_nsec = 2000000L + 1;
266         nanosleep(&tm, NULL);
267 #endif
268 }
269
270 static int this_is_smp(void)
271 {
272 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
273         return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
274 #else
275         return 0;
276 #endif
277 }
278
279 /*
280  * GENERIC
281  */
282
283 static int smp_machine = 0;
284
285 static inline void __spin_lock(spinlock_t *lock)
286 {
287         int ntries = 0;
288
289         while(__spin_trylock(lock)) {
290                 while(__spin_is_locked(lock)) {
291                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
292                                 continue;
293                         yield_cpu();
294                 }
295         }
296 }
297
298 static void __read_lock(tdb_rwlock_t *rwlock)
299 {
300         int ntries = 0;
301
302         while(1) {
303                 __spin_lock(&rwlock->lock);
304
305                 if (!(rwlock->count & RWLOCK_BIAS)) {
306                         rwlock->count++;
307                         __spin_unlock(&rwlock->lock);
308                         return;
309                 }
310         
311                 __spin_unlock(&rwlock->lock);
312
313                 while(rwlock->count & RWLOCK_BIAS) {
314                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
315                                 continue;
316                         yield_cpu();
317                 }
318         }
319 }
320
321 static void __write_lock(tdb_rwlock_t *rwlock)
322 {
323         int ntries = 0;
324
325         while(1) {
326                 __spin_lock(&rwlock->lock);
327
328                 if (rwlock->count == 0) {
329                         rwlock->count |= RWLOCK_BIAS;
330                         __spin_unlock(&rwlock->lock);
331                         return;
332                 }
333
334                 __spin_unlock(&rwlock->lock);
335
336                 while(rwlock->count != 0) {
337                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
338                                 continue;
339                         yield_cpu();
340                 }
341         }
342 }
343
344 static void __write_unlock(tdb_rwlock_t *rwlock)
345 {
346         __spin_lock(&rwlock->lock);
347
348 #ifdef DEBUG
349         if (!(rwlock->count & RWLOCK_BIAS))
350                 fprintf(stderr, "bug: write_unlock\n");
351 #endif
352
353         rwlock->count &= ~RWLOCK_BIAS;
354         __spin_unlock(&rwlock->lock);
355 }
356
357 static void __read_unlock(tdb_rwlock_t *rwlock)
358 {
359         __spin_lock(&rwlock->lock);
360
361 #ifdef DEBUG
362         if (!rwlock->count)
363                 fprintf(stderr, "bug: read_unlock\n");
364
365         if (rwlock->count & RWLOCK_BIAS)
366                 fprintf(stderr, "bug: read_unlock\n");
367 #endif
368
369         rwlock->count--;
370         __spin_unlock(&rwlock->lock);
371 }
372
373 /* TDB SPECIFIC */
374
375 /* lock a list in the database. list -1 is the alloc list */
376 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
377 {
378         tdb_rwlock_t *rwlocks;
379
380         if (!tdb->map_ptr) return -1;
381         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
382
383         switch(rw_type) {
384         case F_RDLCK:
385                 __read_lock(&rwlocks[list+1]);
386                 break;
387
388         case F_WRLCK:
389                 __write_lock(&rwlocks[list+1]);
390                 break;
391
392         default:
393                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
394         }
395         return 0;
396 }
397
398 /* unlock the database. */
399 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
400 {
401         tdb_rwlock_t *rwlocks;
402
403         if (!tdb->map_ptr) return -1;
404         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
405
406         switch(rw_type) {
407         case F_RDLCK:
408                 __read_unlock(&rwlocks[list+1]);
409                 break;
410
411         case F_WRLCK:
412                 __write_unlock(&rwlocks[list+1]);
413                 break;
414
415         default:
416                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
417         }
418
419         return 0;
420 }
421
422 int tdb_create_rwlocks(int fd, unsigned int hash_size)
423 {
424         unsigned int size, i;
425         tdb_rwlock_t *rwlocks;
426
427         size = TDB_SPINLOCK_SIZE(hash_size);
428         rwlocks = malloc(size);
429         if (!rwlocks)
430                 return -1;
431
432         for(i = 0; i < hash_size+1; i++) {
433                 __spin_lock_init(&rwlocks[i].lock);
434                 rwlocks[i].count = 0;
435         }
436
437         /* Write it out (appending to end) */
438         if (write(fd, rwlocks, size) != size) {
439                 free(rwlocks);
440                 return -1;
441         }
442         smp_machine = this_is_smp();
443         free(rwlocks);
444         return 0;
445 }
446
447 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
448 {
449         tdb_rwlock_t *rwlocks;
450         unsigned int i;
451
452         if (tdb->header.rwlocks == 0) return 0;
453         if (!tdb->map_ptr) return -1;
454
455         /* We're mmapped here */
456         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
457         for(i = 0; i < tdb->header.hash_size+1; i++) {
458                 __spin_lock_init(&rwlocks[i].lock);
459                 rwlocks[i].count = 0;
460         }
461         return 0;
462 }
463 #else
464 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
465 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
466 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
467
468 /* Non-spinlock version: remove spinlock pointer */
469 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
470 {
471         tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
472                                 - (char *)&tdb->header);
473
474         tdb->header.rwlocks = 0;
475         if (lseek(tdb->fd, off, SEEK_SET) != off
476             || write(tdb->fd, (void *)&tdb->header.rwlocks,
477                      sizeof(tdb->header.rwlocks)) 
478             != sizeof(tdb->header.rwlocks))
479                 return -1;
480         return 0;
481 }
482 #endif