2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Anton Blanchard 2001
8 ** NOTE! The following LGPL license applies to the tdb
9 ** library. This does NOT imply that all of Samba is released
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU Lesser General Public
14 License as published by the Free Software Foundation; either
15 version 2 of the License, or (at your option) any later version.
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 Lesser General Public License for more details.
22 You should have received a copy of the GNU Lesser General Public
23 License along with this library; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
45 #include "lib/tdb/include/tdb.h"
46 #include "system/filesys.h"
55 #if defined(SPARC_SPINLOCKS)
57 static inline int __spin_trylock(spinlock_t *lock)
61 asm volatile("ldstub [%1], %0"
66 return (result == 0) ? 0 : EBUSY;
69 static inline void __spin_unlock(spinlock_t *lock)
71 asm volatile("":::"memory");
75 static inline void __spin_lock_init(spinlock_t *lock)
80 static inline int __spin_is_locked(spinlock_t *lock)
85 #elif defined(POWERPC_SPINLOCKS)
87 static inline int __spin_trylock(spinlock_t *lock)
104 return (result == 1) ? 0 : EBUSY;
107 static inline void __spin_unlock(spinlock_t *lock)
109 asm volatile("eieio":::"memory");
113 static inline void __spin_lock_init(spinlock_t *lock)
118 static inline int __spin_is_locked(spinlock_t *lock)
123 #elif defined(INTEL_SPINLOCKS)
125 static inline int __spin_trylock(spinlock_t *lock)
129 asm volatile("xchgl %0,%1"
130 : "=r" (oldval), "=m" (*lock)
134 return oldval > 0 ? 0 : EBUSY;
137 static inline void __spin_unlock(spinlock_t *lock)
139 asm volatile("":::"memory");
143 static inline void __spin_lock_init(spinlock_t *lock)
148 static inline int __spin_is_locked(spinlock_t *lock)
153 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
155 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
156 * sync(3) for the details of the intrinsic operations.
158 * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
161 #ifndef _SAMBA_BUILD_
163 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
164 #define inline __inline
166 #endif /* _SAMBA_BUILD_ */
168 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
169 static inline int __spin_trylock(spinlock_t *lock)
172 val = __lock_test_and_set(lock, 1);
173 return val == 0 ? 0 : EBUSY;
176 static inline void __spin_unlock(spinlock_t *lock)
178 __lock_release(lock);
181 static inline void __spin_lock_init(spinlock_t *lock)
183 __lock_release(lock);
186 /* Returns 1 if the lock is held, 0 otherwise. */
187 static inline int __spin_is_locked(spinlock_t *lock)
190 val = __add_and_fetch(lock, 0);
194 #elif defined(MIPS_SPINLOCKS)
196 static inline unsigned int load_linked(unsigned long addr)
200 __asm__ __volatile__("ll\t%0,(%1)"
207 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
211 __asm__ __volatile__("sc\t%0,(%2)"
213 : "0" (value), "r" (addr));
217 static inline int __spin_trylock(spinlock_t *lock)
222 mw = load_linked(lock);
225 } while (!store_conditional(lock, 1));
227 asm volatile("":::"memory");
232 static inline void __spin_unlock(spinlock_t *lock)
234 asm volatile("":::"memory");
238 static inline void __spin_lock_init(spinlock_t *lock)
243 static inline int __spin_is_locked(spinlock_t *lock)
249 #error Need to implement spinlock code in spinlock.c
256 static void yield_cpu(void)
260 #ifdef USE_SCHED_YIELD
263 /* Linux will busy loop for delays < 2ms on real time tasks */
265 tm.tv_nsec = 2000000L + 1;
266 nanosleep(&tm, NULL);
270 static int this_is_smp(void)
272 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
273 return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
283 static int smp_machine = 0;
285 static inline void __spin_lock(spinlock_t *lock)
289 while(__spin_trylock(lock)) {
290 while(__spin_is_locked(lock)) {
291 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
298 static void __read_lock(tdb_rwlock_t *rwlock)
303 __spin_lock(&rwlock->lock);
305 if (!(rwlock->count & RWLOCK_BIAS)) {
307 __spin_unlock(&rwlock->lock);
311 __spin_unlock(&rwlock->lock);
313 while(rwlock->count & RWLOCK_BIAS) {
314 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
321 static void __write_lock(tdb_rwlock_t *rwlock)
326 __spin_lock(&rwlock->lock);
328 if (rwlock->count == 0) {
329 rwlock->count |= RWLOCK_BIAS;
330 __spin_unlock(&rwlock->lock);
334 __spin_unlock(&rwlock->lock);
336 while(rwlock->count != 0) {
337 if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
344 static void __write_unlock(tdb_rwlock_t *rwlock)
346 __spin_lock(&rwlock->lock);
349 if (!(rwlock->count & RWLOCK_BIAS))
350 fprintf(stderr, "bug: write_unlock\n");
353 rwlock->count &= ~RWLOCK_BIAS;
354 __spin_unlock(&rwlock->lock);
357 static void __read_unlock(tdb_rwlock_t *rwlock)
359 __spin_lock(&rwlock->lock);
363 fprintf(stderr, "bug: read_unlock\n");
365 if (rwlock->count & RWLOCK_BIAS)
366 fprintf(stderr, "bug: read_unlock\n");
370 __spin_unlock(&rwlock->lock);
375 /* lock a list in the database. list -1 is the alloc list */
376 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
378 tdb_rwlock_t *rwlocks;
380 if (!tdb->map_ptr) return -1;
381 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
385 __read_lock(&rwlocks[list+1]);
389 __write_lock(&rwlocks[list+1]);
393 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
398 /* unlock the database. */
399 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
401 tdb_rwlock_t *rwlocks;
403 if (!tdb->map_ptr) return -1;
404 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
408 __read_unlock(&rwlocks[list+1]);
412 __write_unlock(&rwlocks[list+1]);
416 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
422 int tdb_create_rwlocks(int fd, unsigned int hash_size)
424 unsigned int size, i;
425 tdb_rwlock_t *rwlocks;
427 size = TDB_SPINLOCK_SIZE(hash_size);
428 rwlocks = malloc(size);
432 for(i = 0; i < hash_size+1; i++) {
433 __spin_lock_init(&rwlocks[i].lock);
434 rwlocks[i].count = 0;
437 /* Write it out (appending to end) */
438 if (write(fd, rwlocks, size) != size) {
442 smp_machine = this_is_smp();
447 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
449 tdb_rwlock_t *rwlocks;
452 if (tdb->header.rwlocks == 0) return 0;
453 if (!tdb->map_ptr) return -1;
455 /* We're mmapped here */
456 rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
457 for(i = 0; i < tdb->header.hash_size+1; i++) {
458 __spin_lock_init(&rwlocks[i].lock);
459 rwlocks[i].count = 0;
464 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
465 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
466 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
468 /* Non-spinlock version: remove spinlock pointer */
469 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
471 tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
472 - (char *)&tdb->header);
474 tdb->header.rwlocks = 0;
475 if (lseek(tdb->fd, off, SEEK_SET) != off
476 || write(tdb->fd, (void *)&tdb->header.rwlocks,
477 sizeof(tdb->header.rwlocks))
478 != sizeof(tdb->header.rwlocks))