damn. We need root privilages to do semaphore operations even if we
[kai/samba.git] / source3 / locking / shmem_sysv.c
1 /* 
2    Unix SMB/Netbios implementation.
3    Version 1.9.
4    Shared memory functions - SYSV IPC implementation
5    Copyright (C) Erik Devriendt 1996-1997
6    
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 2 of the License, or
10    (at your option) any later version.
11    
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    
17    You should have received a copy of the GNU General Public License
18    along with this program; if not, write to the Free Software
19    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20
21 */
22
23 #include "includes.h"
24
25
26 #ifdef USE_SYSV_IPC
27
28 extern int DEBUGLEVEL;
29
30 #define SHMEM_KEY ((key_t)0x280267)
31 #define SEMAPHORE_KEY (SHMEM_KEY+2)
32
33 #define SHM_MAGIC 0x53484100
34 #define SHM_VERSION 2
35
36 #ifdef SHM_R
37 #define IPC_PERMS ((SHM_R | SHM_W) | (SHM_R>>3) | (SHM_R>>6))
38 #else
39 #define IPC_PERMS 0644
40 #endif
41
42 #ifdef SECURE_SEMAPHORES
43 /* secure semaphores are slow because we have to do a become_root()
44    on every call! */
45 #define SEMAPHORE_PERMS IPC_PERMS
46 #else
47 #define SEMAPHORE_PERMS 0666
48 #endif
49
50 #ifdef SEMMSL
51 #define SHMEM_HASH_SIZE (SEMMSL-1)
52 #else
53 #define SHMEM_HASH_SIZE 63
54 #endif
55
56 #define MIN_SHM_SIZE 10240
57
58 static int shm_id;
59 static int sem_id;
60 static int shm_size;
61 static int hash_size;
62 static int global_lock_count;
63
64 struct ShmHeader {
65    int shm_magic;
66    int shm_version;
67    int total_size;      /* in bytes */
68    BOOL consistent;
69    int first_free_off;
70    int userdef_off;    /* a userdefined offset. can be used to store
71                           root of tree or list */
72    struct {             /* a cell is a range of bytes of sizeof(struct
73                            ShmBlockDesc) size */
74            int cells_free;
75            int cells_used;
76            int cells_system; /* number of cells used as allocated
77                                 block descriptors */
78    } statistics;
79 };
80
81 #define SHM_NOT_FREE_OFF (-1)
82 struct ShmBlockDesc
83 {
84    int next;    /* offset of next block in the free list or
85                    SHM_NOT_FREE_OFF when block in use */
86    int          size;   /* user size in BlockDescSize units */
87 };
88
89 #define EOList_Addr     (struct ShmBlockDesc *)( 0 )
90 #define EOList_Off      (NULL_OFFSET)
91
92 #define CellSize        sizeof(struct ShmBlockDesc)
93
94 /* HeaderSize aligned on 8 byte boundary */
95 #define AlignedHeaderSize       ((sizeof(struct ShmHeader)+7) & ~7)
96
97 static struct ShmHeader *shm_header_p = (struct ShmHeader *)0;
98
99 static BOOL shm_initialize_called = False;
100
101 static int read_only;
102
103 static BOOL sem_change(int i, int op)
104 {
105 #ifdef SECURE_SEMAPHORES
106         extern struct current_user current_user;
107         int became_root=0;
108 #endif
109         struct sembuf sb;
110         int ret;
111
112         if (read_only) return True;
113
114 #ifdef SECURE_SEMAPHORES
115         if (current_user.uid != 0) {
116                 become_root(0);
117                 became_root = 1;
118         }
119 #endif
120
121         sb.sem_num = i;
122         sb.sem_op = op;
123         sb.sem_flg = SEM_UNDO;
124
125         ret = semop(sem_id, &sb, 1);
126
127         if (ret != 0) {
128                 DEBUG(0,("ERROR: sem_change(%d,%d) failed (%s)\n", 
129                          i, op, strerror(errno)));
130         }
131
132 #ifdef SECURE_SEMAPHORES
133         if (became_root) {
134                 unbecome_root(0);
135         }
136 #endif
137
138         return ret == 0;
139 }
140
141 static BOOL global_lock(void)
142 {
143         global_lock_count++;
144         if (global_lock_count == 1)
145                 return sem_change(0, -1);
146         return True;
147 }
148
149 static BOOL global_unlock(void)
150 {
151         global_lock_count--;
152         if (global_lock_count == 0)
153                 return sem_change(0, 1);
154         return True;
155 }
156
157 static void *shm_offset2addr(int offset)
158 {
159    if (offset == NULL_OFFSET )
160       return (void *)(0);
161    
162    if (!shm_header_p)
163       return (void *)(0);
164    
165    return (void *)((char *)shm_header_p + offset );
166 }
167
168 static int shm_addr2offset(void *addr)
169 {
170    if (!addr)
171       return NULL_OFFSET;
172    
173    if (!shm_header_p)
174       return NULL_OFFSET;
175    
176    return (int)((char *)addr - (char *)shm_header_p);
177 }
178
179
180 static int shm_alloc(int size)
181 {
182         unsigned num_cells ;
183         struct ShmBlockDesc *scanner_p;
184         struct ShmBlockDesc *prev_p;
185         struct ShmBlockDesc *new_p;
186         int result_offset;
187    
188    
189         if (!shm_header_p) {
190                 /* not mapped yet */
191                 DEBUG(0,("ERROR shm_alloc : shmem not mapped\n"));
192                 return NULL_OFFSET;
193         }
194         
195         global_lock();
196         
197         if (!shm_header_p->consistent) {
198                 DEBUG(0,("ERROR shm_alloc : shmem not consistent\n"));
199                 global_unlock();
200                 return NULL_OFFSET;
201         }
202         
203         /* calculate    the number of cells */
204         num_cells = (size + CellSize -1) / CellSize;
205         
206         /* set start    of scan */
207         prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
208         scanner_p =     prev_p ;
209         
210         /* scan the free list to find a matching free space */
211         while ((scanner_p != EOList_Addr) && (scanner_p->size < num_cells)) {
212                 prev_p = scanner_p;
213                 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
214         }
215    
216         /* at this point scanner point to a block header or to the end of
217            the list */
218         if (scanner_p == EOList_Addr) {
219                 DEBUG(0,("ERROR shm_alloc : alloc of %d bytes failed, no free space found\n",size));
220                 global_unlock();
221                 return (NULL_OFFSET);
222         }
223    
224         /* going to modify shared mem */
225         shm_header_p->consistent = False;
226         
227         /* if we found a good one : scanner == the good one */
228         if (scanner_p->size <= num_cells + 2) {
229                 /* there is no use in making a new one, it will be too small anyway 
230                  *       we will link out scanner
231                  */
232                 if ( prev_p == scanner_p ) {
233                         shm_header_p->first_free_off = scanner_p->next ;
234                 } else {
235                         prev_p->next = scanner_p->next ;
236                 }
237                 shm_header_p->statistics.cells_free -= scanner_p->size;
238                 shm_header_p->statistics.cells_used += scanner_p->size;
239         } else {
240                 /* Make a new one */
241                 new_p = scanner_p + 1 + num_cells;
242                 new_p->size = scanner_p->size - num_cells - 1;
243                 new_p->next = scanner_p->next;
244                 scanner_p->size = num_cells;
245                 scanner_p->next = shm_addr2offset(new_p);
246                 
247                 if (prev_p != scanner_p) {
248                         prev_p->next       = shm_addr2offset(new_p)  ;
249                 } else {
250                         shm_header_p->first_free_off = shm_addr2offset(new_p);
251                 }
252                 shm_header_p->statistics.cells_free -= num_cells+1;
253                 shm_header_p->statistics.cells_used += num_cells;
254                 shm_header_p->statistics.cells_system += 1;
255         }
256
257         result_offset = shm_addr2offset( &(scanner_p[1]) );
258         scanner_p->next =       SHM_NOT_FREE_OFF ;
259
260         /* end modification of shared mem */
261         shm_header_p->consistent = True;
262         
263         DEBUG(6,("shm_alloc : request for %d bytes, allocated %d bytes at offset %d\n",size,scanner_p->size*CellSize,result_offset ));
264
265         global_unlock();
266         return result_offset;
267 }   
268
269
270
271 /* 
272  * Function to create the hash table for the share mode entries. Called
273  * when smb shared memory is global locked.
274  */
275 static BOOL shm_create_hash_table( unsigned int size )
276 {
277         size *= sizeof(int);
278
279         global_lock();
280         shm_header_p->userdef_off = shm_alloc( size );
281
282         if(shm_header_p->userdef_off == NULL_OFFSET) {
283                 DEBUG(0,("shm_create_hash_table: Failed to create hash table of size %d\n",size));
284                 global_unlock();
285                 return False;
286         }
287
288         /* Clear hash buckets. */
289         memset( shm_offset2addr(shm_header_p->userdef_off), '\0', size);
290         global_unlock();
291         return True;
292 }
293
294 static BOOL shm_validate_header(int size)
295 {
296         if( !shm_header_p ) {
297                 /* not mapped yet */
298                 DEBUG(0,("ERROR shm_validate_header : shmem not mapped\n"));
299                 return False;
300         }
301    
302         if(shm_header_p->shm_magic != SHM_MAGIC) {
303                 DEBUG(0,("ERROR shm_validate_header : bad magic\n"));
304                 return False;
305         }
306
307         if(shm_header_p->shm_version != SHM_VERSION) {
308                 DEBUG(0,("ERROR shm_validate_header : bad version %X\n",shm_header_p->shm_version));
309                 return False;
310         }
311    
312         if(shm_header_p->total_size != size) {
313                 DEBUG(0,("ERROR shm_validate_header : shmem size mismatch (old = %d, new = %d)\n",shm_header_p->total_size,size));
314                 return False;
315         }
316
317         if(!shm_header_p->consistent) {
318                 DEBUG(0,("ERROR shm_validate_header : shmem not consistent\n"));
319                 return False;
320         }
321         return True;
322 }
323
324 static BOOL shm_initialize(int size)
325 {
326         struct ShmBlockDesc * first_free_block_p;
327         
328         DEBUG(5,("shm_initialize : initializing shmem file of size %d\n",size));
329    
330         if( !shm_header_p ) {
331                 /* not mapped yet */
332                 DEBUG(0,("ERROR shm_initialize : shmem not mapped\n"));
333                 return False;
334         }
335    
336         shm_header_p->shm_magic = SHM_MAGIC;
337         shm_header_p->shm_version = SHM_VERSION;
338         shm_header_p->total_size = size;
339         shm_header_p->first_free_off = AlignedHeaderSize;
340         shm_header_p->userdef_off = NULL_OFFSET;
341         
342         first_free_block_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
343         first_free_block_p->next = EOList_Off;
344         first_free_block_p->size = ( size - AlignedHeaderSize - CellSize ) / CellSize ;
345    
346         shm_header_p->statistics.cells_free = first_free_block_p->size;
347         shm_header_p->statistics.cells_used = 0;
348         shm_header_p->statistics.cells_system = 1;
349    
350         shm_header_p->consistent = True;
351    
352         shm_initialize_called = True;
353
354         return True;
355 }
356    
357 static void shm_solve_neighbors(struct ShmBlockDesc *head_p )
358 {
359         struct ShmBlockDesc *next_p;
360    
361         /* Check if head_p and head_p->next are neighbors and if so
362            join them */
363         if ( head_p == EOList_Addr ) return ;
364         if ( head_p->next == EOList_Off ) return ;
365    
366         next_p = (struct ShmBlockDesc *)shm_offset2addr(head_p->next);
367         if ( ( head_p + head_p->size + 1 ) == next_p) {
368                 head_p->size += next_p->size +1 ;       /* adapt size */
369                 head_p->next = next_p->next       ; /* link out */
370       
371                 shm_header_p->statistics.cells_free += 1;
372                 shm_header_p->statistics.cells_system -= 1;
373         }
374 }
375
376
377
378
379 static BOOL shm_close( void )
380 {
381         return True;
382 }
383
384
385 static BOOL shm_free(int offset)
386 {
387         struct ShmBlockDesc *header_p; /* pointer to header of
388                                                block to free */
389         struct ShmBlockDesc *scanner_p; /* used to scan the list */
390         struct ShmBlockDesc *prev_p; /* holds previous in the
391                                            list */
392    
393         if (!shm_header_p) {
394                 /* not mapped yet */
395                 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
396                 return False;
397         }
398         
399         global_lock();
400         
401         if (!shm_header_p->consistent) {
402                 DEBUG(0,("ERROR shm_free : shmem not consistent\n"));
403                 global_unlock();
404                 return False;
405         }
406         
407         /* make pointer to header of block */
408         header_p = ((struct ShmBlockDesc *)shm_offset2addr(offset) - 1); 
409         
410         if (header_p->next != SHM_NOT_FREE_OFF) {
411                 DEBUG(0,("ERROR shm_free : bad offset (%d)\n",offset));
412                 global_unlock();
413                 return False;
414         }
415         
416         /* find a place in the free_list to put the header in */
417         
418         /* set scanner and previous pointer to start of list */
419         prev_p = (struct ShmBlockDesc *)shm_offset2addr(shm_header_p->first_free_off);
420         scanner_p = prev_p ;
421         
422         while ((scanner_p != EOList_Addr) && 
423                (scanner_p < header_p)) { 
424                 /* while we didn't scan past its position */
425                 prev_p = scanner_p ;
426                 scanner_p = (struct ShmBlockDesc *)shm_offset2addr(scanner_p->next);
427         }
428         
429         shm_header_p->consistent = False;
430         
431         DEBUG(6,("shm_free : freeing %d bytes at offset %d\n",
432                  header_p->size*CellSize,offset));
433         
434         if (scanner_p == prev_p) {
435                 shm_header_p->statistics.cells_free += header_p->size;
436                 shm_header_p->statistics.cells_used -= header_p->size;
437                 
438                 /* we must free it at the beginning of the list */
439                 shm_header_p->first_free_off = shm_addr2offset(header_p);                                                /*     set     the free_list_pointer to this block_header */
440                 
441                 /* scanner is the one that was first in the list */
442                 header_p->next = shm_addr2offset(scanner_p);
443                 shm_solve_neighbors( header_p ); /* if neighbors then link them */
444                 
445                 shm_header_p->consistent = True;
446         } else {
447                 shm_header_p->statistics.cells_free += header_p->size;
448                 shm_header_p->statistics.cells_used -= header_p->size;
449                 
450                 prev_p->next = shm_addr2offset(header_p);
451                 header_p->next = shm_addr2offset(scanner_p);
452                 shm_solve_neighbors(header_p) ;
453                 shm_solve_neighbors(prev_p) ;
454            
455                 shm_header_p->consistent = True;
456         }
457
458         global_unlock();
459         return True;
460 }
461
462
463 static int shm_get_userdef_off(void)
464 {
465    if (!shm_header_p)
466       return NULL_OFFSET;
467    else
468       return shm_header_p->userdef_off;
469 }
470
471 /*******************************************************************
472   Lock a particular hash bucket entry.
473   ******************************************************************/
474 static BOOL shm_lock_hash_entry(unsigned int entry)
475 {
476         return sem_change(entry+1, -1);
477 }
478
479 /*******************************************************************
480   Unlock a particular hash bucket entry.
481   ******************************************************************/
482 static BOOL shm_unlock_hash_entry(unsigned int entry)
483 {
484         return sem_change(entry+1, 1);
485 }
486
487
488 /*******************************************************************
489   Gather statistics on shared memory usage.
490   ******************************************************************/
491 static BOOL shm_get_usage(int *bytes_free,
492                           int *bytes_used,
493                           int *bytes_overhead)
494 {
495         if(!shm_header_p) {
496                 /* not mapped yet */
497                 DEBUG(0,("ERROR shm_free : shmem not mapped\n"));
498                 return False;
499         }
500
501         *bytes_free = shm_header_p->statistics.cells_free * CellSize;
502         *bytes_used = shm_header_p->statistics.cells_used * CellSize;
503         *bytes_overhead = shm_header_p->statistics.cells_system * CellSize + AlignedHeaderSize;
504         
505         return True;
506 }
507
508
509 /*******************************************************************
510 hash a number into a hash_entry
511   ******************************************************************/
512 static unsigned shm_hash_size(void)
513 {
514         return hash_size;
515 }
516
517
518 static struct shmem_ops shmops = {
519         shm_close,
520         shm_alloc,
521         shm_free,
522         shm_get_userdef_off,
523         shm_offset2addr,
524         shm_addr2offset,
525         shm_lock_hash_entry,
526         shm_unlock_hash_entry,
527         shm_get_usage,
528         shm_hash_size,
529 };
530
531 /*******************************************************************
532   open the shared memory
533   ******************************************************************/
534 struct shmem_ops *sysv_shm_open(int ronly)
535 {
536         BOOL created_new = False;
537         BOOL other_processes;
538         struct shmid_ds shm_ds;
539         struct semid_ds sem_ds;
540         union semun su;
541         int i;
542
543         read_only = ronly;
544
545         shm_size = lp_shmem_size();
546
547         DEBUG(4,("Trying sysv shmem open of size %d\n", shm_size));
548
549         /* first the semaphore */
550         sem_id = semget(SEMAPHORE_KEY, 0, 0);
551         if (sem_id == -1) {
552                 if (read_only) return NULL;
553
554                 hash_size = SHMEM_HASH_SIZE;
555
556                 while (hash_size > 1) {
557                         sem_id = semget(SEMAPHORE_KEY, hash_size+1, 
558                                         IPC_CREAT|IPC_EXCL| SEMAPHORE_PERMS);
559                         if (sem_id != -1 || errno != EINVAL) break;
560                         hash_size--;
561                 }
562
563                 if (sem_id == -1) {
564                         DEBUG(0,("Can't create or use semaphore %s\n", 
565                                  strerror(errno)));
566                 }   
567
568                 if (sem_id != -1) {
569                         su.val = 1;
570                         for (i=0;i<hash_size+1;i++) {
571                                 if (semctl(sem_id, i, SETVAL, su) != 0) {
572                                         DEBUG(1,("Failed to init semaphore %d\n", i));
573                                 }
574                         }
575                 }
576         }
577         if (shm_id == -1) {
578                 sem_id = semget(SEMAPHORE_KEY, 0, 0);
579         }
580         if (sem_id == -1) {
581                 DEBUG(0,("Can't create or use semaphore %s\n", 
582                          strerror(errno)));
583                 return NULL;
584         }   
585
586         su.buf = &sem_ds;
587         if (semctl(sem_id, 0, IPC_STAT, su) != 0) {
588                 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
589         }
590         hash_size = sem_ds.sem_nsems-1;
591
592         if (!read_only) {
593                 if (sem_ds.sem_perm.cuid != 0 || sem_ds.sem_perm.cgid != 0) {
594                         DEBUG(0,("ERROR: root did not create the semaphore\n"));
595                         return NULL;
596                 }
597
598                 sem_ds.sem_perm.mode = SEMAPHORE_PERMS;
599                 if (semctl(sem_id, 0, IPC_SET, su) != 0) {
600                         DEBUG(0,("ERROR shm_open : can't IPC_SET\n"));
601                 }
602         }
603
604         
605         
606         if (!global_lock())
607                 return NULL;
608         
609         /* try to use an existing key */
610         shm_id = shmget(SHMEM_KEY, shm_size, 0);
611         
612         /* if that failed then create one */
613         if (shm_id == -1) {
614                 if (read_only) return NULL;
615                 while (shm_size > MIN_SHM_SIZE) {
616                         shm_id = shmget(SHMEM_KEY, shm_size, 
617                                         IPC_CREAT | IPC_EXCL | IPC_PERMS);
618                         if (shm_id != -1 || errno != EINVAL) break;
619                         shm_size *= 0.9;
620                 }
621                 created_new = (shm_id != -1);
622         }
623         
624         if (shm_id == -1) {
625                 DEBUG(0,("Can't create or use IPC area\n"));
626                 global_unlock();
627                 return NULL;
628         }   
629         
630         
631         shm_header_p = (struct ShmHeader *)shmat(shm_id, 0, 
632                                                  read_only?SHM_RDONLY:0);
633         if ((int)shm_header_p == -1) {
634                 DEBUG(0,("Can't attach to IPC area\n"));
635                 global_unlock();
636                 return NULL;
637         }
638
639         /* to find out if some other process is already mapping the file,
640            we use a registration file containing the processids of the file
641            mapping processes */
642         if (shmctl(shm_id, IPC_STAT, &shm_ds) != 0) {
643                 DEBUG(0,("ERROR shm_open : can't IPC_STAT\n"));
644         }
645
646         if (!read_only) {
647                 if (shm_ds.shm_perm.cuid != 0 || shm_ds.shm_perm.cgid != 0) {
648                         DEBUG(0,("ERROR: root did not create the shmem\n"));
649                         global_unlock();
650                         return NULL;
651                 }
652         }
653
654         shm_size = shm_ds.shm_segsz;
655
656         other_processes = (shm_ds.shm_nattch > 1);
657
658         if (!read_only && !other_processes) {
659                 memset((char *)shm_header_p, 0, shm_size);
660                 shm_initialize(shm_size);
661                 shm_create_hash_table(hash_size);
662                 DEBUG(3,("Initialised IPC area of size %d\n", shm_size));
663         } else if (!shm_validate_header(shm_size)) {
664                 /* existing file is corrupt, samba admin should remove
665                    it by hand */
666                 DEBUG(0,("ERROR shm_open : corrupt IPC area - remove it!\n"));
667                 global_unlock();
668                 return NULL;
669         }
670    
671         global_unlock();
672         return &shmops;
673 }
674
675
676
677 #else 
678  int ipc_dummy_procedure(void)
679 {return 0;}
680 #endif