SYSV IPC implementation of fast share modes.
[kai/samba.git] / source3 / locking / shmem_sysv.c
1 /* 
2    Unix SMB/Netbios implementation.
3    Version 1.9.
4    Shared memory functions - SYSV IPC implementation
5    Copyright (C) Erik Devriendt 1996-1997
6    
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11    
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20
21 */
22
23 #include "includes.h"
24
25
26 #ifdef USE_SYSV_IPC
27
28 extern int DEBUGLEVEL;
29
30 #define SHMEM_KEY ((key_t)0x280267)
31 #define SEMAPHORE_KEY (SHMEM_KEY+2)
32
33 #define SHM_MAGIC 0x53484100
34 #define SHM_VERSION 2
35
36 #define IPC_PERMS ((SHM_R | SHM_W) | (SHM_R>>3) | (SHM_R>>6))
37
38 static int shm_id;
39 static int sem_id;
40 static int shm_size;
41 static int hash_size;
42 static int global_lock_count;
43
44 struct ShmHeader {
45    int shm_magic;
46    int shm_version;
47    int total_size;      /* in bytes */
48    BOOL consistent;
49    int first_free_off;
50    int userdef_off;    /* a userdefined offset. can be used to store
51                           root of tree or list */
52    struct {             /* a cell is a range of bytes of sizeof(struct
53                            ShmBlockDesc) size */
54            int cells_free;
55            int cells_used;
56            int cells_system; /* number of cells used as allocated
57                                 block descriptors */
58    } statistics;
59 };
60
61 #define SHM_NOT_FREE_OFF (-1)
62 struct ShmBlockDesc
63 {
64    int next;    /* offset of next block in the free list or
65                    SHM_NOT_FREE_OFF when block in use */
66    int          size;   /* user size in BlockDescSize units */
67 };
68
69 #define EOList_Addr     (struct ShmBlockDesc *)( 0 )
70 #define EOList_Off      (NULL_OFFSET)
71
72 #define CellSize        sizeof(struct ShmBlockDesc)
73
74 /* HeaderSize aligned on 8 byte boundary */
75 #define AlignedHeaderSize       ((sizeof(struct ShmHeader)+7) & ~7)
76
77 static struct ShmHeader *shm_header_p = (struct ShmHeader *)0;
78
79 static BOOL shm_initialize_called = False;
80
81 static int read_only;
82
83 static BOOL sem_lock(int i)
84 {
85         struct sembuf sb;
86         if (read_only) return True;
87         
88         sb.sem_num = i;
89         sb.sem_op = -1;
90         sb.sem_flg = SEM_UNDO;
91
92         if (semop(sem_id, &sb, 1) != 0) {
93                 DEBUG(0,("ERROR: IPC lock failed on semaphore %d\n", i));
94                 return False;
95         }
96
97         return True;
98 }
99
100 static BOOL sem_unlock(int i)
101 {
102         struct sembuf sb;
103         if (read_only) return True;
104
105         sb.sem_num = i;
106         sb.sem_op = 1;
107         sb.sem_flg = SEM_UNDO;
108
109         if (semop(sem_id, &sb, 1) != 0) {
110                 DEBUG(0,("ERROR: IPC unlock failed on semaphore %d\n", i));
111                 return False;
112         }
113
114         return True;
115 }
116
117 static BOOL global_lock(void)
118 {
119         global_lock_count++;
120         if (global_lock_count == 1)
121                 return sem_lock(0);
122         return True;
123 }
124
125 static BOOL global_unlock(void)
126 {
127         global_lock_count--;
128         if (global_lock_count == 0)
129                 return sem_unlock(0);
130         return True;
131 }
132
133 static void *shm_offset2addr(int offset)
134 {
135    if (offset == NULL_OFFSET )
136       return (void *)(0);
137    
138    if (!shm_header_p)
139       return (void *)(0);
140    
141    return (void *)((char *)shm_header_p + offset );
142 }
143
144 static int shm_addr2offset(void *addr)
145 {
146    if (!addr)
147       return NULL_OFFSET;
148    
149    if (!shm_header_p)
150       return NULL_OFFSET;
151    
152    return (int)((char *)addr - (char *)shm_header_p);
153 }
154
155
156 static int shm_alloc(int size)
157 {
158         unsigned num_cells ;
159         struct ShmBlockDesc *scanner_p;
160         struct ShmBlockDesc *prev_p;
161         struct ShmBlockDesc *new_p;
162         int result_offset;
163    
164    
165         if (!shm_header_p) {
166                 /* not mapped yet */
167                 DEBUG(0,("ERROR shm_alloc : shmem not mapped\n"));
168                 return NULL_OFFSET;
169         }
170         
171         global_lock();
172         
173         if (!shm_header_p->consistent) {
174                 DEBUG(0,("ERROR shm_alloc : shmem not consistent\n"));
175                 global_unlock();
176                 return NULL_OFFSET;
177         }
178         
179         /* calculate    the number of cells */
180         num_cells = (size + CellSize -1) / CellSize;
181         
182         /* set start    of scan */
183         prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
184         scanner_p =     prev_p ;
185         
186         /* scan the free list to find a matching free space */
187         while ((scanner_p != EOList_Addr) && (scanner_p->size < num_cells)) {
188                 prev_p = scanner_p;
189                 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
190         }
191    
192         /* at this point scanner point to a block header or to the end of
193            the list */
194         if (scanner_p == EOList_Addr) {
195                 DEBUG(0,("ERROR shm_alloc : alloc of %d bytes failed, no free space found\n",size));
196                 global_unlock();
197                 return (NULL_OFFSET);
198         }
199    
200         /* going to modify shared mem */
201         shm_header_p->consistent = False;
202         
203         /* if we found a good one : scanner == the good one */
204         if (scanner_p->size <= num_cells + 2) {
205                 /* there is no use in making a new one, it will be too small anyway 
206                  *       we will link out scanner
207                  */
208                 if ( prev_p == scanner_p ) {
209                         shm_header_p->first_free_off = scanner_p->next ;
210                 } else {
211                         prev_p->next = scanner_p->next ;
212                 }
213                 shm_header_p->statistics.cells_free -= scanner_p->size;
214                 shm_header_p->statistics.cells_used += scanner_p->size;
215         } else {
216                 /* Make a new one */
217                 new_p = scanner_p + 1 + num_cells;
218                 new_p->size = scanner_p->size - num_cells - 1;
219                 new_p->next = scanner_p->next;
220                 scanner_p->size = num_cells;
221                 scanner_p->next = shm_addr2offset(new_p);
222                 
223                 if (prev_p != scanner_p) {
224                         prev_p->next       = shm_addr2offset(new_p)  ;
225                 } else {
226                         shm_header_p->first_free_off = shm_addr2offset(new_p);
227                 }
228                 shm_header_p->statistics.cells_free -= num_cells+1;
229                 shm_header_p->statistics.cells_used += num_cells;
230                 shm_header_p->statistics.cells_system += 1;
231         }
232
233         result_offset = shm_addr2offset( &(scanner_p[1]) );
234         scanner_p->next =       SHM_NOT_FREE_OFF ;
235
236         /* end modification of shared mem */
237         shm_header_p->consistent = True;
238         
239         DEBUG(6,("shm_alloc : request for %d bytes, allocated %d bytes at offset %d\n",size,scanner_p->size*CellSize,result_offset ));
240
241         global_unlock();
242         return result_offset;
243 }   
244
245
246
247 /* 
248  * Function to create the hash table for the share mode entries. Called
249  * when smb shared memory is global locked.
250  */
251 static BOOL shm_create_hash_table( unsigned int size )
252 {
253         size *= sizeof(int);
254
255         global_lock();
256         shm_header_p->userdef_off = shm_alloc( size );
257
258         if(shm_header_p->userdef_off == NULL_OFFSET) {
259                 DEBUG(0,("shm_create_hash_table: Failed to create hash table of size %d\n",size));
260                 global_unlock();
261                 return False;
262         }
263
264         /* Clear hash buckets. */
265         memset( shm_offset2addr(shm_header_p->userdef_off), '\0', size);
266         global_unlock();
267         return True;
268 }
269
270 static BOOL shm_validate_header(int size)
271 {
272         if( !shm_header_p ) {
273                 /* not mapped yet */
274                 DEBUG(0,("ERROR shm_validate_header : shmem not mapped\n"));
275                 return False;
276         }
277    
278         if(shm_header_p->shm_magic != SHM_MAGIC) {
279                 DEBUG(0,("ERROR shm_validate_header : bad magic\n"));
280                 return False;
281         }
282
283         if(shm_header_p->shm_version != SHM_VERSION) {
284                 DEBUG(0,("ERROR shm_validate_header : bad version %X\n",shm_header_p->shm_version));
285                 return False;
286         }
287    
288         if(shm_header_p->total_size != size) {
289                 DEBUG(0,("ERROR shm_validate_header : shmem size mismatch (old = %d, new = %d)\n",shm_header_p->total_size,size));
290                 return False;
291         }
292
293         if(!shm_header_p->consistent) {
294                 DEBUG(0,("ERROR shm_validate_header : shmem not consistent\n"));
295                 return False;
296         }
297         return True;
298 }
299
300 static BOOL shm_initialize(int size)
301 {
302         struct ShmBlockDesc * first_free_block_p;
303         
304         DEBUG(5,("shm_initialize : initializing shmem file of size %d\n",size));
305    
306         if( !shm_header_p ) {
307                 /* not mapped yet */
308                 DEBUG(0,("ERROR shm_initialize : shmem not mapped\n"));
309                 return False;
310         }
311    
312         shm_header_p->shm_magic = SHM_MAGIC;
313         shm_header_p->shm_version = SHM_VERSION;
314         shm_header_p->total_size = size;
315         shm_header_p->first_free_off = AlignedHeaderSize;
316         shm_header_p->userdef_off = NULL_OFFSET;
317         
318         first_free_block_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
319         first_free_block_p->next = EOList_Off;
320         first_free_block_p->size = ( size - AlignedHeaderSize - CellSize ) / CellSize ;
321    
322         shm_header_p->statistics.cells_free = first_free_block_p->size;
323         shm_header_p->statistics.cells_used = 0;
324         shm_header_p->statistics.cells_system = 1;
325    
326         shm_header_p->consistent = True;
327    
328         shm_initialize_called = True;
329
330         return True;
331 }
332    
333 static void shm_solve_neighbors(struct ShmBlockDesc *head_p )
334 {
335         struct ShmBlockDesc *next_p;
336    
337         /* Check if head_p and head_p->next are neighbors and if so
338            join them */
339         if ( head_p == EOList_Addr ) return ;
340         if ( head_p->next == EOList_Off ) return ;
341    
342         next_p = (struct ShmBlockDesc *)shm_offset2addr(head_p->next);
343         if ( ( head_p + head_p->size + 1 ) == next_p) {
344                 head_p->size += next_p->size +1 ;       /* adapt size */
345                 head_p->next = next_p->next       ; /* link out */
346       
347                 shm_header_p->statistics.cells_free += 1;
348                 shm_header_p->statistics.cells_system -= 1;
349         }
350 }
351
352
353
354
355 static BOOL shm_close( void )
356 {
357         return True;
358 }
359
360
361 static BOOL shm_free(int offset)
362 {
363         struct ShmBlockDesc *header_p; /* pointer to header of
364                                                block to free */
365         struct ShmBlockDesc *scanner_p; /* used to scan the list */
366         struct ShmBlockDesc *prev_p; /* holds previous in the
367                                            list */
368    
369         if (!shm_header_p) {
370                 /* not mapped yet */
371                 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
372                 return False;
373         }
374         
375         global_lock();
376         
377         if (!shm_header_p->consistent) {
378                 DEBUG(0,("ERROR shm_free : shmem not consistent\n"));
379                 global_unlock();
380                 return False;
381         }
382         
383         /* make pointer to header of block */
384         header_p = ((struct ShmBlockDesc *)shm_offset2addr(offset) - 1); 
385         
386         if (header_p->next != SHM_NOT_FREE_OFF) {
387                 DEBUG(0,("ERROR shm_free : bad offset (%d)\n",offset));
388                 global_unlock();
389                 return False;
390         }
391         
392         /* find a place in the free_list to put the header in */
393         
394         /* set scanner and previous pointer to start of list */
395         prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
396         scanner_p = prev_p ;
397         
398         while ((scanner_p != EOList_Addr) && 
399                (scanner_p < header_p)) { 
400                 /* while we didn't scan past its position */
401                 prev_p = scanner_p ;
402                 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
403         }
404         
405         shm_header_p->consistent = False;
406         
407         DEBUG(6,("shm_free : freeing %d bytes at offset %d\n",
408                  header_p->size*CellSize,offset));
409         
410         if (scanner_p == prev_p) {
411                 shm_header_p->statistics.cells_free += header_p->size;
412                 shm_header_p->statistics.cells_used -= header_p->size;
413                 
414                 /* we must free it at the beginning of the list */
415                 shm_header_p->first_free_off = shm_addr2offset(header_p);                                                /*     set     the free_list_pointer to this block_header */
416                 
417                 /* scanner is the one that was first in the list */
418                 header_p->next = shm_addr2offset(scanner_p);
419                 shm_solve_neighbors( header_p ); /* if neighbors then link them */
420                 
421                 shm_header_p->consistent = True;
422         } else {
423                 shm_header_p->statistics.cells_free += header_p->size;
424                 shm_header_p->statistics.cells_used -= header_p->size;
425                 
426                 prev_p->next = shm_addr2offset(header_p);
427                 header_p->next = shm_addr2offset(scanner_p);
428                 shm_solve_neighbors(header_p) ;
429                 shm_solve_neighbors(prev_p) ;
430            
431                 shm_header_p->consistent = True;
432         }
433
434         global_unlock();
435         return True;
436 }
437
438
439 static int shm_get_userdef_off(void)
440 {
441    if (!shm_header_p)
442       return NULL_OFFSET;
443    else
444       return shm_header_p->userdef_off;
445 }
446
447 /*******************************************************************
448   Lock a particular hash bucket entry.
449   ******************************************************************/
450 static BOOL shm_lock_hash_entry(unsigned int entry)
451 {
452         DEBUG(0,("hash lock %d\n", entry));
453         return sem_lock(entry+1);
454 }
455
456 /*******************************************************************
457   Unlock a particular hash bucket entry.
458   ******************************************************************/
459 static BOOL shm_unlock_hash_entry(unsigned int entry)
460 {
461         DEBUG(0,("hash unlock %d\n", entry));
462         return sem_unlock(entry+1);
463 }
464
465
466 /*******************************************************************
467   Gather statistics on shared memory usage.
468   ******************************************************************/
469 static BOOL shm_get_usage(int *bytes_free,
470                           int *bytes_used,
471                           int *bytes_overhead)
472 {
473         if(!shm_header_p) {
474                 /* not mapped yet */
475                 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
476                 return False;
477         }
478
479         *bytes_free = shm_header_p->statistics.cells_free * CellSize;
480         *bytes_used = shm_header_p->statistics.cells_used * CellSize;
481         *bytes_overhead = shm_header_p->statistics.cells_system * CellSize + AlignedHeaderSize;
482         
483         return True;
484 }
485
486 static struct shmem_ops shmops = {
487         shm_close,
488         shm_alloc,
489         shm_free,
490         shm_get_userdef_off,
491         shm_offset2addr,
492         shm_addr2offset,
493         shm_lock_hash_entry,
494         shm_unlock_hash_entry,
495         shm_get_usage,
496 };
497
498 /*******************************************************************
499   open the shared memory
500   ******************************************************************/
501 struct shmem_ops *sysv_shm_open(int size, int ronly)
502 {
503         BOOL created_new = False;
504         BOOL other_processes;
505         struct shmid_ds shm_ds;
506         struct semid_ds sem_ds;
507         union semun su;
508         int i;
509
510         read_only = ronly;
511
512         shm_size = size;
513
514         DEBUG(4,("Trying sysv shmem open of size %d\n", size));
515
516         /* first the semaphore */
517         sem_id = semget(SEMAPHORE_KEY, 0, 0);
518         if (sem_id == -1) {
519                 if (read_only) return NULL;
520
521                 sem_id = semget(SEMAPHORE_KEY, lp_shmem_hash_size()+1, 
522                                 IPC_CREAT | IPC_EXCL | IPC_PERMS);
523
524                 if (sem_id == -1) {
525                         DEBUG(0,("Can't create or use semaphore %s\n", 
526                                  strerror(errno)));
527                 }   
528
529                 if (sem_id != -1) {
530                         su.val = 1;
531                         for (i=0;i<lp_shmem_hash_size()+1;i++) {
532                                 if (semctl(sem_id, i, SETVAL, su) != 0) {
533                                         DEBUG(1,("Failed to init semaphore %d\n", i));
534                                 }
535                         }
536                 }
537         }
538         if (shm_id == -1) {
539                 sem_id = semget(SEMAPHORE_KEY, 0, 0);
540         }
541         if (sem_id == -1) {
542                 DEBUG(0,("Can't create or use semaphore %s\n", 
543                          strerror(errno)));
544                 return NULL;
545         }   
546
547         su.buf = &sem_ds;
548         if (semctl(sem_id, 0, IPC_STAT, su) != 0) {
549                 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
550         }
551         hash_size = sem_ds.sem_nsems;
552         if (hash_size != lp_shmem_hash_size()+1) {
553                 DEBUG(0,("WARNING: nsems=%d\n", hash_size));
554         }
555         
556         if (!global_lock())
557                 return NULL;
558         
559         /* try to use an existing key */
560         shm_id = shmget(SHMEM_KEY, shm_size, 0);
561         
562         /* if that failed then create one */
563         if (shm_id == -1) {
564                 if (read_only) return NULL;
565                 shm_id = shmget(SHMEM_KEY, shm_size, IPC_CREAT | IPC_EXCL);
566                 created_new = (shm_id != -1);
567         }
568         
569         if (shm_id == -1) {
570                 DEBUG(0,("Can't create or use IPC area\n"));
571                 global_unlock();
572                 return NULL;
573         }   
574         
575         
576         shm_header_p = (struct ShmHeader *)shmat(shm_id, 0, 
577                                                  read_only?SHM_RDONLY:0);
578         if ((int)shm_header_p == -1) {
579                 DEBUG(0,("Can't attach to IPC area\n"));
580                 global_unlock();
581                 return NULL;
582         }
583
584         /* to find out if some other process is already mapping the file,
585            we use a registration file containing the processids of the file
586            mapping processes */
587         if (shmctl(shm_id, IPC_STAT, &shm_ds) != 0) {
588                 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
589         }
590
591         /* set the permissions */
592         if (!read_only) {
593                 shm_ds.shm_perm.mode = IPC_PERMS;
594                 shmctl(shm_id, IPC_SET, &shm_ds);
595         }
596
597         other_processes = (shm_ds.shm_nattch > 1);
598
599         if (!read_only && !other_processes) {
600                 memset((char *)shm_header_p, 0, shm_size);
601                 shm_initialize(shm_size);
602                 shm_create_hash_table(lp_shmem_hash_size());
603                 DEBUG(1,("Initialised IPC area of size %d\n", shm_size));
604         } else if (!shm_validate_header(shm_size)) {
605                 /* existing file is corrupt, samba admin should remove
606                    it by hand */
607                 DEBUG(0,("ERROR shm_open : corrupt IPC area - remove it!\n"));
608                 global_unlock();
609                 return NULL;
610         }
611    
612         global_unlock();
613         return &shmops;
614 }
615
616
617
618 #else 
619  int ipc_dummy_procedure(void)
620 {return 0;}
621 #endif