fix for broken sunos4 includes (doesn't have SHM_R)
[kai/samba.git] / source3 / locking / shmem_sysv.c
1 /* 
2    Unix SMB/Netbios implementation.
3    Version 1.9.
4    Shared memory functions - SYSV IPC implementation
5    Copyright (C) Erik Devriendt 1996-1997
6    
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11    
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20
21 */
22
23 #include "includes.h"
24
25
26 #ifdef USE_SYSV_IPC
27
28 extern int DEBUGLEVEL;
29
30 #define SHMEM_KEY ((key_t)0x280267)
31 #define SEMAPHORE_KEY (SHMEM_KEY+2)
32
33 #define SHM_MAGIC 0x53484100
34 #define SHM_VERSION 2
35
36 #ifdef SHM_R
37 #define IPC_PERMS ((SHM_R | SHM_W) | (SHM_R>>3) | (SHM_R>>6))
38 #else
39 #define IPC_PERMS 0644
40 #endif
41
42 static int shm_id;
43 static int sem_id;
44 static int shm_size;
45 static int hash_size;
46 static int global_lock_count;
47
48 struct ShmHeader {
49    int shm_magic;
50    int shm_version;
51    int total_size;      /* in bytes */
52    BOOL consistent;
53    int first_free_off;
54    int userdef_off;    /* a userdefined offset. can be used to store
55                           root of tree or list */
56    struct {             /* a cell is a range of bytes of sizeof(struct
57                            ShmBlockDesc) size */
58            int cells_free;
59            int cells_used;
60            int cells_system; /* number of cells used as allocated
61                                 block descriptors */
62    } statistics;
63 };
64
65 #define SHM_NOT_FREE_OFF (-1)
66 struct ShmBlockDesc
67 {
68    int next;    /* offset of next block in the free list or
69                    SHM_NOT_FREE_OFF when block in use */
70    int          size;   /* user size in BlockDescSize units */
71 };
72
73 #define EOList_Addr     (struct ShmBlockDesc *)( 0 )
74 #define EOList_Off      (NULL_OFFSET)
75
76 #define CellSize        sizeof(struct ShmBlockDesc)
77
78 /* HeaderSize aligned on 8 byte boundary */
79 #define AlignedHeaderSize       ((sizeof(struct ShmHeader)+7) & ~7)
80
81 static struct ShmHeader *shm_header_p = (struct ShmHeader *)0;
82
83 static BOOL shm_initialize_called = False;
84
85 static int read_only;
86
87 static BOOL sem_lock(int i)
88 {
89         struct sembuf sb;
90         if (read_only) return True;
91         
92         sb.sem_num = i;
93         sb.sem_op = -1;
94         sb.sem_flg = SEM_UNDO;
95
96         if (semop(sem_id, &sb, 1) != 0) {
97                 DEBUG(0,("ERROR: IPC lock failed on semaphore %d\n", i));
98                 return False;
99         }
100
101         return True;
102 }
103
104 static BOOL sem_unlock(int i)
105 {
106         struct sembuf sb;
107         if (read_only) return True;
108
109         sb.sem_num = i;
110         sb.sem_op = 1;
111         sb.sem_flg = SEM_UNDO;
112
113         if (semop(sem_id, &sb, 1) != 0) {
114                 DEBUG(0,("ERROR: IPC unlock failed on semaphore %d\n", i));
115                 return False;
116         }
117
118         return True;
119 }
120
121 static BOOL global_lock(void)
122 {
123         global_lock_count++;
124         if (global_lock_count == 1)
125                 return sem_lock(0);
126         return True;
127 }
128
129 static BOOL global_unlock(void)
130 {
131         global_lock_count--;
132         if (global_lock_count == 0)
133                 return sem_unlock(0);
134         return True;
135 }
136
137 static void *shm_offset2addr(int offset)
138 {
139    if (offset == NULL_OFFSET )
140       return (void *)(0);
141    
142    if (!shm_header_p)
143       return (void *)(0);
144    
145    return (void *)((char *)shm_header_p + offset );
146 }
147
148 static int shm_addr2offset(void *addr)
149 {
150    if (!addr)
151       return NULL_OFFSET;
152    
153    if (!shm_header_p)
154       return NULL_OFFSET;
155    
156    return (int)((char *)addr - (char *)shm_header_p);
157 }
158
159
160 static int shm_alloc(int size)
161 {
162         unsigned num_cells ;
163         struct ShmBlockDesc *scanner_p;
164         struct ShmBlockDesc *prev_p;
165         struct ShmBlockDesc *new_p;
166         int result_offset;
167    
168    
169         if (!shm_header_p) {
170                 /* not mapped yet */
171                 DEBUG(0,("ERROR shm_alloc : shmem not mapped\n"));
172                 return NULL_OFFSET;
173         }
174         
175         global_lock();
176         
177         if (!shm_header_p->consistent) {
178                 DEBUG(0,("ERROR shm_alloc : shmem not consistent\n"));
179                 global_unlock();
180                 return NULL_OFFSET;
181         }
182         
183         /* calculate    the number of cells */
184         num_cells = (size + CellSize -1) / CellSize;
185         
186         /* set start    of scan */
187         prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
188         scanner_p =     prev_p ;
189         
190         /* scan the free list to find a matching free space */
191         while ((scanner_p != EOList_Addr) && (scanner_p->size < num_cells)) {
192                 prev_p = scanner_p;
193                 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
194         }
195    
196         /* at this point scanner point to a block header or to the end of
197            the list */
198         if (scanner_p == EOList_Addr) {
199                 DEBUG(0,("ERROR shm_alloc : alloc of %d bytes failed, no free space found\n",size));
200                 global_unlock();
201                 return (NULL_OFFSET);
202         }
203    
204         /* going to modify shared mem */
205         shm_header_p->consistent = False;
206         
207         /* if we found a good one : scanner == the good one */
208         if (scanner_p->size <= num_cells + 2) {
209                 /* there is no use in making a new one, it will be too small anyway 
210                  *       we will link out scanner
211                  */
212                 if ( prev_p == scanner_p ) {
213                         shm_header_p->first_free_off = scanner_p->next ;
214                 } else {
215                         prev_p->next = scanner_p->next ;
216                 }
217                 shm_header_p->statistics.cells_free -= scanner_p->size;
218                 shm_header_p->statistics.cells_used += scanner_p->size;
219         } else {
220                 /* Make a new one */
221                 new_p = scanner_p + 1 + num_cells;
222                 new_p->size = scanner_p->size - num_cells - 1;
223                 new_p->next = scanner_p->next;
224                 scanner_p->size = num_cells;
225                 scanner_p->next = shm_addr2offset(new_p);
226                 
227                 if (prev_p != scanner_p) {
228                         prev_p->next       = shm_addr2offset(new_p)  ;
229                 } else {
230                         shm_header_p->first_free_off = shm_addr2offset(new_p);
231                 }
232                 shm_header_p->statistics.cells_free -= num_cells+1;
233                 shm_header_p->statistics.cells_used += num_cells;
234                 shm_header_p->statistics.cells_system += 1;
235         }
236
237         result_offset = shm_addr2offset( &(scanner_p[1]) );
238         scanner_p->next =       SHM_NOT_FREE_OFF ;
239
240         /* end modification of shared mem */
241         shm_header_p->consistent = True;
242         
243         DEBUG(6,("shm_alloc : request for %d bytes, allocated %d bytes at offset %d\n",size,scanner_p->size*CellSize,result_offset ));
244
245         global_unlock();
246         return result_offset;
247 }   
248
249
250
251 /* 
252  * Function to create the hash table for the share mode entries. Called
253  * when smb shared memory is global locked.
254  */
255 static BOOL shm_create_hash_table( unsigned int size )
256 {
257         size *= sizeof(int);
258
259         global_lock();
260         shm_header_p->userdef_off = shm_alloc( size );
261
262         if(shm_header_p->userdef_off == NULL_OFFSET) {
263                 DEBUG(0,("shm_create_hash_table: Failed to create hash table of size %d\n",size));
264                 global_unlock();
265                 return False;
266         }
267
268         /* Clear hash buckets. */
269         memset( shm_offset2addr(shm_header_p->userdef_off), '\0', size);
270         global_unlock();
271         return True;
272 }
273
274 static BOOL shm_validate_header(int size)
275 {
276         if( !shm_header_p ) {
277                 /* not mapped yet */
278                 DEBUG(0,("ERROR shm_validate_header : shmem not mapped\n"));
279                 return False;
280         }
281    
282         if(shm_header_p->shm_magic != SHM_MAGIC) {
283                 DEBUG(0,("ERROR shm_validate_header : bad magic\n"));
284                 return False;
285         }
286
287         if(shm_header_p->shm_version != SHM_VERSION) {
288                 DEBUG(0,("ERROR shm_validate_header : bad version %X\n",shm_header_p->shm_version));
289                 return False;
290         }
291    
292         if(shm_header_p->total_size != size) {
293                 DEBUG(0,("ERROR shm_validate_header : shmem size mismatch (old = %d, new = %d)\n",shm_header_p->total_size,size));
294                 return False;
295         }
296
297         if(!shm_header_p->consistent) {
298                 DEBUG(0,("ERROR shm_validate_header : shmem not consistent\n"));
299                 return False;
300         }
301         return True;
302 }
303
304 static BOOL shm_initialize(int size)
305 {
306         struct ShmBlockDesc * first_free_block_p;
307         
308         DEBUG(5,("shm_initialize : initializing shmem file of size %d\n",size));
309    
310         if( !shm_header_p ) {
311                 /* not mapped yet */
312                 DEBUG(0,("ERROR shm_initialize : shmem not mapped\n"));
313                 return False;
314         }
315    
316         shm_header_p->shm_magic = SHM_MAGIC;
317         shm_header_p->shm_version = SHM_VERSION;
318         shm_header_p->total_size = size;
319         shm_header_p->first_free_off = AlignedHeaderSize;
320         shm_header_p->userdef_off = NULL_OFFSET;
321         
322         first_free_block_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
323         first_free_block_p->next = EOList_Off;
324         first_free_block_p->size = ( size - AlignedHeaderSize - CellSize ) / CellSize ;
325    
326         shm_header_p->statistics.cells_free = first_free_block_p->size;
327         shm_header_p->statistics.cells_used = 0;
328         shm_header_p->statistics.cells_system = 1;
329    
330         shm_header_p->consistent = True;
331    
332         shm_initialize_called = True;
333
334         return True;
335 }
336    
337 static void shm_solve_neighbors(struct ShmBlockDesc *head_p )
338 {
339         struct ShmBlockDesc *next_p;
340    
341         /* Check if head_p and head_p->next are neighbors and if so
342            join them */
343         if ( head_p == EOList_Addr ) return ;
344         if ( head_p->next == EOList_Off ) return ;
345    
346         next_p = (struct ShmBlockDesc *)shm_offset2addr(head_p->next);
347         if ( ( head_p + head_p->size + 1 ) == next_p) {
348                 head_p->size += next_p->size +1 ;       /* adapt size */
349                 head_p->next = next_p->next       ; /* link out */
350       
351                 shm_header_p->statistics.cells_free += 1;
352                 shm_header_p->statistics.cells_system -= 1;
353         }
354 }
355
356
357
358
359 static BOOL shm_close( void )
360 {
361         return True;
362 }
363
364
365 static BOOL shm_free(int offset)
366 {
367         struct ShmBlockDesc *header_p; /* pointer to header of
368                                                block to free */
369         struct ShmBlockDesc *scanner_p; /* used to scan the list */
370         struct ShmBlockDesc *prev_p; /* holds previous in the
371                                            list */
372    
373         if (!shm_header_p) {
374                 /* not mapped yet */
375                 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
376                 return False;
377         }
378         
379         global_lock();
380         
381         if (!shm_header_p->consistent) {
382                 DEBUG(0,("ERROR shm_free : shmem not consistent\n"));
383                 global_unlock();
384                 return False;
385         }
386         
387         /* make pointer to header of block */
388         header_p = ((struct ShmBlockDesc *)shm_offset2addr(offset) - 1); 
389         
390         if (header_p->next != SHM_NOT_FREE_OFF) {
391                 DEBUG(0,("ERROR shm_free : bad offset (%d)\n",offset));
392                 global_unlock();
393                 return False;
394         }
395         
396         /* find a place in the free_list to put the header in */
397         
398         /* set scanner and previous pointer to start of list */
399         prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
400         scanner_p = prev_p ;
401         
402         while ((scanner_p != EOList_Addr) && 
403                (scanner_p < header_p)) { 
404                 /* while we didn't scan past its position */
405                 prev_p = scanner_p ;
406                 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
407         }
408         
409         shm_header_p->consistent = False;
410         
411         DEBUG(6,("shm_free : freeing %d bytes at offset %d\n",
412                  header_p->size*CellSize,offset));
413         
414         if (scanner_p == prev_p) {
415                 shm_header_p->statistics.cells_free += header_p->size;
416                 shm_header_p->statistics.cells_used -= header_p->size;
417                 
418                 /* we must free it at the beginning of the list */
419                 shm_header_p->first_free_off = shm_addr2offset(header_p);                                                /*     set     the free_list_pointer to this block_header */
420                 
421                 /* scanner is the one that was first in the list */
422                 header_p->next = shm_addr2offset(scanner_p);
423                 shm_solve_neighbors( header_p ); /* if neighbors then link them */
424                 
425                 shm_header_p->consistent = True;
426         } else {
427                 shm_header_p->statistics.cells_free += header_p->size;
428                 shm_header_p->statistics.cells_used -= header_p->size;
429                 
430                 prev_p->next = shm_addr2offset(header_p);
431                 header_p->next = shm_addr2offset(scanner_p);
432                 shm_solve_neighbors(header_p) ;
433                 shm_solve_neighbors(prev_p) ;
434            
435                 shm_header_p->consistent = True;
436         }
437
438         global_unlock();
439         return True;
440 }
441
442
443 static int shm_get_userdef_off(void)
444 {
445    if (!shm_header_p)
446       return NULL_OFFSET;
447    else
448       return shm_header_p->userdef_off;
449 }
450
451 /*******************************************************************
452   Lock a particular hash bucket entry.
453   ******************************************************************/
454 static BOOL shm_lock_hash_entry(unsigned int entry)
455 {
456         DEBUG(0,("hash lock %d\n", entry));
457         return sem_lock(entry+1);
458 }
459
460 /*******************************************************************
461   Unlock a particular hash bucket entry.
462   ******************************************************************/
463 static BOOL shm_unlock_hash_entry(unsigned int entry)
464 {
465         DEBUG(0,("hash unlock %d\n", entry));
466         return sem_unlock(entry+1);
467 }
468
469
470 /*******************************************************************
471   Gather statistics on shared memory usage.
472   ******************************************************************/
473 static BOOL shm_get_usage(int *bytes_free,
474                           int *bytes_used,
475                           int *bytes_overhead)
476 {
477         if(!shm_header_p) {
478                 /* not mapped yet */
479                 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
480                 return False;
481         }
482
483         *bytes_free = shm_header_p->statistics.cells_free * CellSize;
484         *bytes_used = shm_header_p->statistics.cells_used * CellSize;
485         *bytes_overhead = shm_header_p->statistics.cells_system * CellSize + AlignedHeaderSize;
486         
487         return True;
488 }
489
490 static struct shmem_ops shmops = {
491         shm_close,
492         shm_alloc,
493         shm_free,
494         shm_get_userdef_off,
495         shm_offset2addr,
496         shm_addr2offset,
497         shm_lock_hash_entry,
498         shm_unlock_hash_entry,
499         shm_get_usage,
500 };
501
502 /*******************************************************************
503   open the shared memory
504   ******************************************************************/
505 struct shmem_ops *sysv_shm_open(int size, int ronly)
506 {
507         BOOL created_new = False;
508         BOOL other_processes;
509         struct shmid_ds shm_ds;
510         struct semid_ds sem_ds;
511         union semun su;
512         int i;
513
514         read_only = ronly;
515
516         shm_size = size;
517
518         DEBUG(4,("Trying sysv shmem open of size %d\n", size));
519
520         /* first the semaphore */
521         sem_id = semget(SEMAPHORE_KEY, 0, 0);
522         if (sem_id == -1) {
523                 if (read_only) return NULL;
524
525                 sem_id = semget(SEMAPHORE_KEY, lp_shmem_hash_size()+1, 
526                                 IPC_CREAT | IPC_EXCL | IPC_PERMS);
527
528                 if (sem_id == -1) {
529                         DEBUG(0,("Can't create or use semaphore %s\n", 
530                                  strerror(errno)));
531                 }   
532
533                 if (sem_id != -1) {
534                         su.val = 1;
535                         for (i=0;i<lp_shmem_hash_size()+1;i++) {
536                                 if (semctl(sem_id, i, SETVAL, su) != 0) {
537                                         DEBUG(1,("Failed to init semaphore %d\n", i));
538                                 }
539                         }
540                 }
541         }
542         if (shm_id == -1) {
543                 sem_id = semget(SEMAPHORE_KEY, 0, 0);
544         }
545         if (sem_id == -1) {
546                 DEBUG(0,("Can't create or use semaphore %s\n", 
547                          strerror(errno)));
548                 return NULL;
549         }   
550
551         su.buf = &sem_ds;
552         if (semctl(sem_id, 0, IPC_STAT, su) != 0) {
553                 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
554         }
555         hash_size = sem_ds.sem_nsems;
556         if (hash_size != lp_shmem_hash_size()+1) {
557                 DEBUG(0,("WARNING: nsems=%d\n", hash_size));
558         }
559         
560         if (!global_lock())
561                 return NULL;
562         
563         /* try to use an existing key */
564         shm_id = shmget(SHMEM_KEY, shm_size, 0);
565         
566         /* if that failed then create one */
567         if (shm_id == -1) {
568                 if (read_only) return NULL;
569                 shm_id = shmget(SHMEM_KEY, shm_size, IPC_CREAT | IPC_EXCL);
570                 created_new = (shm_id != -1);
571         }
572         
573         if (shm_id == -1) {
574                 DEBUG(0,("Can't create or use IPC area\n"));
575                 global_unlock();
576                 return NULL;
577         }   
578         
579         
580         shm_header_p = (struct ShmHeader *)shmat(shm_id, 0, 
581                                                  read_only?SHM_RDONLY:0);
582         if ((int)shm_header_p == -1) {
583                 DEBUG(0,("Can't attach to IPC area\n"));
584                 global_unlock();
585                 return NULL;
586         }
587
588         /* to find out if some other process is already mapping the file,
589            we use a registration file containing the processids of the file
590            mapping processes */
591         if (shmctl(shm_id, IPC_STAT, &shm_ds) != 0) {
592                 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
593         }
594
595         /* set the permissions */
596         if (!read_only) {
597                 shm_ds.shm_perm.mode = IPC_PERMS;
598                 shmctl(shm_id, IPC_SET, &shm_ds);
599         }
600
601         other_processes = (shm_ds.shm_nattch > 1);
602
603         if (!read_only && !other_processes) {
604                 memset((char *)shm_header_p, 0, shm_size);
605                 shm_initialize(shm_size);
606                 shm_create_hash_table(lp_shmem_hash_size());
607                 DEBUG(1,("Initialised IPC area of size %d\n", shm_size));
608         } else if (!shm_validate_header(shm_size)) {
609                 /* existing file is corrupt, samba admin should remove
610                    it by hand */
611                 DEBUG(0,("ERROR shm_open : corrupt IPC area - remove it!\n"));
612                 global_unlock();
613                 return NULL;
614         }
615    
616         global_unlock();
617         return &shmops;
618 }
619
620
621
622 #else 
623  int ipc_dummy_procedure(void)
624 {return 0;}
625 #endif